1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2017 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/ctype.h> 13 #include <linux/stringify.h> 14 #include <linux/ethtool.h> 15 #include <linux/ethtool_netlink.h> 16 #include <linux/linkmode.h> 17 #include <linux/interrupt.h> 18 #include <linux/pci.h> 19 #include <linux/etherdevice.h> 20 #include <linux/crc32.h> 21 #include <linux/firmware.h> 22 #include <linux/utsname.h> 23 #include <linux/time.h> 24 #include <linux/ptp_clock_kernel.h> 25 #include <linux/net_tstamp.h> 26 #include <linux/timecounter.h> 27 #include <net/netdev_queues.h> 28 #include <net/netlink.h> 29 #include <linux/bnxt/hsi.h> 30 #include "bnxt.h" 31 #include "bnxt_hwrm.h" 32 #include "bnxt_ulp.h" 33 #include "bnxt_xdp.h" 34 #include "bnxt_ptp.h" 35 #include "bnxt_ethtool.h" 36 #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */ 37 #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ 38 #include "bnxt_coredump.h" 39 40 #define BNXT_NVM_ERR_MSG(dev, extack, msg) \ 41 do { \ 42 if (extack) \ 43 NL_SET_ERR_MSG_MOD(extack, msg); \ 44 netdev_err(dev, "%s\n", msg); \ 45 } while (0) 46 47 static u32 bnxt_get_msglevel(struct net_device *dev) 48 { 49 struct bnxt *bp = netdev_priv(dev); 50 51 return bp->msg_enable; 52 } 53 54 static void bnxt_set_msglevel(struct net_device *dev, u32 value) 55 { 56 struct bnxt *bp = netdev_priv(dev); 57 58 bp->msg_enable = value; 59 } 60 61 static int bnxt_get_coalesce(struct net_device *dev, 62 struct ethtool_coalesce *coal, 63 struct kernel_ethtool_coalesce *kernel_coal, 64 struct netlink_ext_ack *extack) 65 { 66 struct bnxt *bp = netdev_priv(dev); 67 struct bnxt_coal *hw_coal; 68 u16 mult; 69 70 memset(coal, 0, sizeof(*coal)); 71 72 coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM; 73 74 hw_coal = &bp->rx_coal; 75 mult = hw_coal->bufs_per_record; 76 coal->rx_coalesce_usecs = hw_coal->coal_ticks; 77 coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult; 78 coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 79 coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 80 if (hw_coal->flags & 81 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET) 82 kernel_coal->use_cqe_mode_rx = true; 83 84 hw_coal = &bp->tx_coal; 85 mult = hw_coal->bufs_per_record; 86 coal->tx_coalesce_usecs = hw_coal->coal_ticks; 87 coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult; 88 coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 89 coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 90 if (hw_coal->flags & 91 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET) 92 kernel_coal->use_cqe_mode_tx = true; 93 94 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; 95 96 return 0; 97 } 98 99 static int bnxt_set_coalesce(struct net_device *dev, 100 struct ethtool_coalesce *coal, 101 struct kernel_ethtool_coalesce *kernel_coal, 102 struct netlink_ext_ack *extack) 103 { 104 struct bnxt *bp = netdev_priv(dev); 105 bool update_stats = false; 106 struct bnxt_coal *hw_coal; 107 int rc = 0; 108 u16 mult; 109 110 if (coal->use_adaptive_rx_coalesce) { 111 bp->flags |= BNXT_FLAG_DIM; 112 } else { 113 if (bp->flags & BNXT_FLAG_DIM) { 114 bp->flags &= ~(BNXT_FLAG_DIM); 115 goto reset_coalesce; 116 } 117 } 118 119 if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) && 120 !(bp->coal_cap.cmpl_params & 121 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)) 122 return -EOPNOTSUPP; 123 124 hw_coal = &bp->rx_coal; 125 mult = hw_coal->bufs_per_record; 126 hw_coal->coal_ticks = coal->rx_coalesce_usecs; 127 hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult; 128 hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq; 129 hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult; 130 hw_coal->flags &= 131 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 132 if (kernel_coal->use_cqe_mode_rx) 133 hw_coal->flags |= 134 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 135 136 hw_coal = &bp->tx_coal; 137 mult = hw_coal->bufs_per_record; 138 hw_coal->coal_ticks = coal->tx_coalesce_usecs; 139 hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult; 140 hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq; 141 hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult; 142 hw_coal->flags &= 143 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 144 if (kernel_coal->use_cqe_mode_tx) 145 hw_coal->flags |= 146 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 147 148 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { 149 u32 stats_ticks = coal->stats_block_coalesce_usecs; 150 151 /* Allow 0, which means disable. */ 152 if (stats_ticks) 153 stats_ticks = clamp_t(u32, stats_ticks, 154 BNXT_MIN_STATS_COAL_TICKS, 155 BNXT_MAX_STATS_COAL_TICKS); 156 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); 157 bp->stats_coal_ticks = stats_ticks; 158 if (bp->stats_coal_ticks) 159 bp->current_interval = 160 bp->stats_coal_ticks * HZ / 1000000; 161 else 162 bp->current_interval = BNXT_TIMER_INTERVAL; 163 update_stats = true; 164 } 165 166 reset_coalesce: 167 if (test_bit(BNXT_STATE_OPEN, &bp->state)) { 168 if (update_stats) { 169 bnxt_close_nic(bp, true, false); 170 rc = bnxt_open_nic(bp, true, false); 171 } else { 172 rc = bnxt_hwrm_set_coal(bp); 173 } 174 } 175 176 return rc; 177 } 178 179 static const char * const bnxt_ring_rx_stats_str[] = { 180 "rx_ucast_packets", 181 "rx_mcast_packets", 182 "rx_bcast_packets", 183 "rx_discards", 184 "rx_errors", 185 "rx_ucast_bytes", 186 "rx_mcast_bytes", 187 "rx_bcast_bytes", 188 }; 189 190 static const char * const bnxt_ring_tx_stats_str[] = { 191 "tx_ucast_packets", 192 "tx_mcast_packets", 193 "tx_bcast_packets", 194 "tx_errors", 195 "tx_discards", 196 "tx_ucast_bytes", 197 "tx_mcast_bytes", 198 "tx_bcast_bytes", 199 }; 200 201 static const char * const bnxt_ring_tpa_stats_str[] = { 202 "tpa_packets", 203 "tpa_bytes", 204 "tpa_events", 205 "tpa_aborts", 206 }; 207 208 static const char * const bnxt_ring_tpa2_stats_str[] = { 209 "rx_tpa_eligible_pkt", 210 "rx_tpa_eligible_bytes", 211 "rx_tpa_pkt", 212 "rx_tpa_bytes", 213 "rx_tpa_errors", 214 "rx_tpa_events", 215 }; 216 217 static const char * const bnxt_rx_sw_stats_str[] = { 218 "rx_l4_csum_errors", 219 "rx_resets", 220 "rx_buf_errors", 221 }; 222 223 static const char * const bnxt_cmn_sw_stats_str[] = { 224 "missed_irqs", 225 }; 226 227 #define BNXT_RX_STATS_ENTRY(counter) \ 228 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } 229 230 #define BNXT_TX_STATS_ENTRY(counter) \ 231 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) } 232 233 #define BNXT_RX_STATS_EXT_ENTRY(counter) \ 234 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } 235 236 #define BNXT_TX_STATS_EXT_ENTRY(counter) \ 237 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) } 238 239 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \ 240 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \ 241 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions) 242 243 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \ 244 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \ 245 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions) 246 247 #define BNXT_RX_STATS_EXT_PFC_ENTRIES \ 248 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \ 249 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \ 250 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \ 251 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \ 252 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \ 253 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \ 254 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \ 255 BNXT_RX_STATS_EXT_PFC_ENTRY(7) 256 257 #define BNXT_TX_STATS_EXT_PFC_ENTRIES \ 258 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \ 259 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \ 260 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \ 261 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \ 262 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \ 263 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \ 264 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \ 265 BNXT_TX_STATS_EXT_PFC_ENTRY(7) 266 267 #define BNXT_RX_STATS_EXT_COS_ENTRY(n) \ 268 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \ 269 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n) 270 271 #define BNXT_TX_STATS_EXT_COS_ENTRY(n) \ 272 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \ 273 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n) 274 275 #define BNXT_RX_STATS_EXT_COS_ENTRIES \ 276 BNXT_RX_STATS_EXT_COS_ENTRY(0), \ 277 BNXT_RX_STATS_EXT_COS_ENTRY(1), \ 278 BNXT_RX_STATS_EXT_COS_ENTRY(2), \ 279 BNXT_RX_STATS_EXT_COS_ENTRY(3), \ 280 BNXT_RX_STATS_EXT_COS_ENTRY(4), \ 281 BNXT_RX_STATS_EXT_COS_ENTRY(5), \ 282 BNXT_RX_STATS_EXT_COS_ENTRY(6), \ 283 BNXT_RX_STATS_EXT_COS_ENTRY(7) \ 284 285 #define BNXT_TX_STATS_EXT_COS_ENTRIES \ 286 BNXT_TX_STATS_EXT_COS_ENTRY(0), \ 287 BNXT_TX_STATS_EXT_COS_ENTRY(1), \ 288 BNXT_TX_STATS_EXT_COS_ENTRY(2), \ 289 BNXT_TX_STATS_EXT_COS_ENTRY(3), \ 290 BNXT_TX_STATS_EXT_COS_ENTRY(4), \ 291 BNXT_TX_STATS_EXT_COS_ENTRY(5), \ 292 BNXT_TX_STATS_EXT_COS_ENTRY(6), \ 293 BNXT_TX_STATS_EXT_COS_ENTRY(7) \ 294 295 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \ 296 BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \ 297 BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n) 298 299 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \ 300 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \ 301 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \ 302 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \ 303 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \ 304 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \ 305 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \ 306 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \ 307 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7) 308 309 #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \ 310 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \ 311 __stringify(counter##_pri##n) } 312 313 #define BNXT_TX_STATS_PRI_ENTRY(counter, n) \ 314 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \ 315 __stringify(counter##_pri##n) } 316 317 #define BNXT_RX_STATS_PRI_ENTRIES(counter) \ 318 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \ 319 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \ 320 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \ 321 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \ 322 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \ 323 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \ 324 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \ 325 BNXT_RX_STATS_PRI_ENTRY(counter, 7) 326 327 #define BNXT_TX_STATS_PRI_ENTRIES(counter) \ 328 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \ 329 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \ 330 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \ 331 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \ 332 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \ 333 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \ 334 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \ 335 BNXT_TX_STATS_PRI_ENTRY(counter, 7) 336 337 enum { 338 RX_TOTAL_DISCARDS, 339 TX_TOTAL_DISCARDS, 340 RX_NETPOLL_DISCARDS, 341 }; 342 343 static const char *const bnxt_ring_err_stats_arr[] = { 344 "rx_total_l4_csum_errors", 345 "rx_total_resets", 346 "rx_total_buf_errors", 347 "rx_total_oom_discards", 348 "rx_total_netpoll_discards", 349 "rx_total_ring_discards", 350 "tx_total_resets", 351 "tx_total_ring_discards", 352 "total_missed_irqs", 353 }; 354 355 #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str) 356 #define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str) 357 #define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str) 358 #define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str) 359 360 static const struct { 361 long offset; 362 char string[ETH_GSTRING_LEN]; 363 } bnxt_port_stats_arr[] = { 364 BNXT_RX_STATS_ENTRY(rx_64b_frames), 365 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames), 366 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames), 367 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames), 368 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames), 369 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames), 370 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames), 371 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames), 372 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames), 373 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames), 374 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames), 375 BNXT_RX_STATS_ENTRY(rx_total_frames), 376 BNXT_RX_STATS_ENTRY(rx_ucast_frames), 377 BNXT_RX_STATS_ENTRY(rx_mcast_frames), 378 BNXT_RX_STATS_ENTRY(rx_bcast_frames), 379 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames), 380 BNXT_RX_STATS_ENTRY(rx_ctrl_frames), 381 BNXT_RX_STATS_ENTRY(rx_pause_frames), 382 BNXT_RX_STATS_ENTRY(rx_pfc_frames), 383 BNXT_RX_STATS_ENTRY(rx_align_err_frames), 384 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames), 385 BNXT_RX_STATS_ENTRY(rx_jbr_frames), 386 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames), 387 BNXT_RX_STATS_ENTRY(rx_tagged_frames), 388 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames), 389 BNXT_RX_STATS_ENTRY(rx_good_frames), 390 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0), 391 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1), 392 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2), 393 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3), 394 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4), 395 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5), 396 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6), 397 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7), 398 BNXT_RX_STATS_ENTRY(rx_undrsz_frames), 399 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events), 400 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration), 401 BNXT_RX_STATS_ENTRY(rx_bytes), 402 BNXT_RX_STATS_ENTRY(rx_runt_bytes), 403 BNXT_RX_STATS_ENTRY(rx_runt_frames), 404 BNXT_RX_STATS_ENTRY(rx_stat_discard), 405 BNXT_RX_STATS_ENTRY(rx_stat_err), 406 407 BNXT_TX_STATS_ENTRY(tx_64b_frames), 408 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames), 409 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames), 410 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames), 411 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames), 412 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames), 413 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames), 414 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames), 415 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames), 416 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames), 417 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames), 418 BNXT_TX_STATS_ENTRY(tx_good_frames), 419 BNXT_TX_STATS_ENTRY(tx_total_frames), 420 BNXT_TX_STATS_ENTRY(tx_ucast_frames), 421 BNXT_TX_STATS_ENTRY(tx_mcast_frames), 422 BNXT_TX_STATS_ENTRY(tx_bcast_frames), 423 BNXT_TX_STATS_ENTRY(tx_pause_frames), 424 BNXT_TX_STATS_ENTRY(tx_pfc_frames), 425 BNXT_TX_STATS_ENTRY(tx_jabber_frames), 426 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames), 427 BNXT_TX_STATS_ENTRY(tx_err), 428 BNXT_TX_STATS_ENTRY(tx_fifo_underruns), 429 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0), 430 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1), 431 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2), 432 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3), 433 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4), 434 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5), 435 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6), 436 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7), 437 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events), 438 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration), 439 BNXT_TX_STATS_ENTRY(tx_total_collisions), 440 BNXT_TX_STATS_ENTRY(tx_bytes), 441 BNXT_TX_STATS_ENTRY(tx_xthol_frames), 442 BNXT_TX_STATS_ENTRY(tx_stat_discard), 443 BNXT_TX_STATS_ENTRY(tx_stat_error), 444 }; 445 446 static const struct { 447 long offset; 448 char string[ETH_GSTRING_LEN]; 449 } bnxt_port_stats_ext_arr[] = { 450 BNXT_RX_STATS_EXT_ENTRY(link_down_events), 451 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events), 452 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events), 453 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events), 454 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), 455 BNXT_RX_STATS_EXT_COS_ENTRIES, 456 BNXT_RX_STATS_EXT_PFC_ENTRIES, 457 BNXT_RX_STATS_EXT_ENTRY(rx_bits), 458 BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold), 459 BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err), 460 BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits), 461 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES, 462 BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks), 463 BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks), 464 BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss), 465 }; 466 467 static const struct { 468 long offset; 469 char string[ETH_GSTRING_LEN]; 470 } bnxt_tx_port_stats_ext_arr[] = { 471 BNXT_TX_STATS_EXT_COS_ENTRIES, 472 BNXT_TX_STATS_EXT_PFC_ENTRIES, 473 }; 474 475 static const struct { 476 long base_off; 477 char string[ETH_GSTRING_LEN]; 478 } bnxt_rx_bytes_pri_arr[] = { 479 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes), 480 }; 481 482 static const struct { 483 long base_off; 484 char string[ETH_GSTRING_LEN]; 485 } bnxt_rx_pkts_pri_arr[] = { 486 BNXT_RX_STATS_PRI_ENTRIES(rx_packets), 487 }; 488 489 static const struct { 490 long base_off; 491 char string[ETH_GSTRING_LEN]; 492 } bnxt_tx_bytes_pri_arr[] = { 493 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes), 494 }; 495 496 static const struct { 497 long base_off; 498 char string[ETH_GSTRING_LEN]; 499 } bnxt_tx_pkts_pri_arr[] = { 500 BNXT_TX_STATS_PRI_ENTRIES(tx_packets), 501 }; 502 503 #define BNXT_NUM_RING_ERR_STATS ARRAY_SIZE(bnxt_ring_err_stats_arr) 504 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) 505 #define BNXT_NUM_STATS_PRI \ 506 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \ 507 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \ 508 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \ 509 ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) 510 511 static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) 512 { 513 if (BNXT_SUPPORTS_TPA(bp)) { 514 if (bp->max_tpa_v2) { 515 if (BNXT_CHIP_P5(bp)) 516 return BNXT_NUM_TPA_RING_STATS_P5; 517 return BNXT_NUM_TPA_RING_STATS_P7; 518 } 519 return BNXT_NUM_TPA_RING_STATS; 520 } 521 return 0; 522 } 523 524 static int bnxt_get_num_ring_stats(struct bnxt *bp) 525 { 526 int rx, tx, cmn; 527 528 rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS + 529 bnxt_get_num_tpa_ring_stats(bp); 530 tx = NUM_RING_TX_HW_STATS; 531 cmn = NUM_RING_CMN_SW_STATS; 532 return rx * bp->rx_nr_rings + 533 tx * (bp->tx_nr_rings_xdp + bp->tx_nr_rings_per_tc) + 534 cmn * bp->cp_nr_rings; 535 } 536 537 static int bnxt_get_num_stats(struct bnxt *bp) 538 { 539 int num_stats = bnxt_get_num_ring_stats(bp); 540 int len; 541 542 num_stats += BNXT_NUM_RING_ERR_STATS; 543 544 if (bp->flags & BNXT_FLAG_PORT_STATS) 545 num_stats += BNXT_NUM_PORT_STATS; 546 547 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 548 len = min_t(int, bp->fw_rx_stats_ext_size, 549 ARRAY_SIZE(bnxt_port_stats_ext_arr)); 550 num_stats += len; 551 len = min_t(int, bp->fw_tx_stats_ext_size, 552 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); 553 num_stats += len; 554 if (bp->pri2cos_valid) 555 num_stats += BNXT_NUM_STATS_PRI; 556 } 557 558 return num_stats; 559 } 560 561 static int bnxt_get_sset_count(struct net_device *dev, int sset) 562 { 563 struct bnxt *bp = netdev_priv(dev); 564 565 switch (sset) { 566 case ETH_SS_STATS: 567 return bnxt_get_num_stats(bp); 568 case ETH_SS_TEST: 569 if (!bp->num_tests) 570 return -EOPNOTSUPP; 571 return bp->num_tests; 572 default: 573 return -EOPNOTSUPP; 574 } 575 } 576 577 static bool is_rx_ring(struct bnxt *bp, int ring_num) 578 { 579 return ring_num < bp->rx_nr_rings; 580 } 581 582 static bool is_tx_ring(struct bnxt *bp, int ring_num) 583 { 584 int tx_base = 0; 585 586 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 587 tx_base = bp->rx_nr_rings; 588 589 if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings)) 590 return true; 591 return false; 592 } 593 594 static void bnxt_get_ethtool_stats(struct net_device *dev, 595 struct ethtool_stats *stats, u64 *buf) 596 { 597 struct bnxt_total_ring_err_stats ring_err_stats = {0}; 598 struct bnxt *bp = netdev_priv(dev); 599 u64 *curr, *prev; 600 u32 tpa_stats; 601 u32 i, j = 0; 602 603 if (!bp->bnapi) { 604 j += bnxt_get_num_ring_stats(bp); 605 goto skip_ring_stats; 606 } 607 608 tpa_stats = bnxt_get_num_tpa_ring_stats(bp); 609 for (i = 0; i < bp->cp_nr_rings; i++) { 610 struct bnxt_napi *bnapi = bp->bnapi[i]; 611 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 612 u64 *sw_stats = cpr->stats.sw_stats; 613 u64 *sw; 614 int k; 615 616 if (is_rx_ring(bp, i)) { 617 for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++) 618 buf[j] = sw_stats[k]; 619 } 620 if (is_tx_ring(bp, i)) { 621 k = NUM_RING_RX_HW_STATS; 622 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 623 j++, k++) 624 buf[j] = sw_stats[k]; 625 } 626 if (!tpa_stats || !is_rx_ring(bp, i)) 627 goto skip_tpa_ring_stats; 628 629 k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 630 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS + 631 tpa_stats; j++, k++) 632 buf[j] = sw_stats[k]; 633 634 skip_tpa_ring_stats: 635 sw = (u64 *)&cpr->sw_stats->rx; 636 if (is_rx_ring(bp, i)) { 637 for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++) 638 buf[j] = sw[k]; 639 } 640 641 sw = (u64 *)&cpr->sw_stats->cmn; 642 for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++) 643 buf[j] = sw[k]; 644 } 645 646 bnxt_get_ring_err_stats(bp, &ring_err_stats); 647 648 skip_ring_stats: 649 curr = &ring_err_stats.rx_total_l4_csum_errors; 650 prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors; 651 for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++) 652 buf[j] = *curr + *prev; 653 654 if (bp->flags & BNXT_FLAG_PORT_STATS) { 655 u64 *port_stats = bp->port_stats.sw_stats; 656 657 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) 658 buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset); 659 } 660 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 661 u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats; 662 u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats; 663 u32 len; 664 665 len = min_t(u32, bp->fw_rx_stats_ext_size, 666 ARRAY_SIZE(bnxt_port_stats_ext_arr)); 667 for (i = 0; i < len; i++, j++) { 668 buf[j] = *(rx_port_stats_ext + 669 bnxt_port_stats_ext_arr[i].offset); 670 } 671 len = min_t(u32, bp->fw_tx_stats_ext_size, 672 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); 673 for (i = 0; i < len; i++, j++) { 674 buf[j] = *(tx_port_stats_ext + 675 bnxt_tx_port_stats_ext_arr[i].offset); 676 } 677 if (bp->pri2cos_valid) { 678 for (i = 0; i < 8; i++, j++) { 679 long n = bnxt_rx_bytes_pri_arr[i].base_off + 680 bp->pri2cos_idx[i]; 681 682 buf[j] = *(rx_port_stats_ext + n); 683 } 684 for (i = 0; i < 8; i++, j++) { 685 long n = bnxt_rx_pkts_pri_arr[i].base_off + 686 bp->pri2cos_idx[i]; 687 688 buf[j] = *(rx_port_stats_ext + n); 689 } 690 for (i = 0; i < 8; i++, j++) { 691 long n = bnxt_tx_bytes_pri_arr[i].base_off + 692 bp->pri2cos_idx[i]; 693 694 buf[j] = *(tx_port_stats_ext + n); 695 } 696 for (i = 0; i < 8; i++, j++) { 697 long n = bnxt_tx_pkts_pri_arr[i].base_off + 698 bp->pri2cos_idx[i]; 699 700 buf[j] = *(tx_port_stats_ext + n); 701 } 702 } 703 } 704 } 705 706 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 707 { 708 struct bnxt *bp = netdev_priv(dev); 709 u32 i, j, num_str; 710 const char *str; 711 712 switch (stringset) { 713 case ETH_SS_STATS: 714 for (i = 0; i < bp->cp_nr_rings; i++) { 715 if (is_rx_ring(bp, i)) 716 for (j = 0; j < NUM_RING_RX_HW_STATS; j++) { 717 str = bnxt_ring_rx_stats_str[j]; 718 ethtool_sprintf(&buf, "[%d]: %s", i, 719 str); 720 } 721 if (is_tx_ring(bp, i)) 722 for (j = 0; j < NUM_RING_TX_HW_STATS; j++) { 723 str = bnxt_ring_tx_stats_str[j]; 724 ethtool_sprintf(&buf, "[%d]: %s", i, 725 str); 726 } 727 num_str = bnxt_get_num_tpa_ring_stats(bp); 728 if (!num_str || !is_rx_ring(bp, i)) 729 goto skip_tpa_stats; 730 731 if (bp->max_tpa_v2) 732 for (j = 0; j < num_str; j++) { 733 str = bnxt_ring_tpa2_stats_str[j]; 734 ethtool_sprintf(&buf, "[%d]: %s", i, 735 str); 736 } 737 else 738 for (j = 0; j < num_str; j++) { 739 str = bnxt_ring_tpa_stats_str[j]; 740 ethtool_sprintf(&buf, "[%d]: %s", i, 741 str); 742 } 743 skip_tpa_stats: 744 if (is_rx_ring(bp, i)) 745 for (j = 0; j < NUM_RING_RX_SW_STATS; j++) { 746 str = bnxt_rx_sw_stats_str[j]; 747 ethtool_sprintf(&buf, "[%d]: %s", i, 748 str); 749 } 750 for (j = 0; j < NUM_RING_CMN_SW_STATS; j++) { 751 str = bnxt_cmn_sw_stats_str[j]; 752 ethtool_sprintf(&buf, "[%d]: %s", i, str); 753 } 754 } 755 for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) 756 ethtool_puts(&buf, bnxt_ring_err_stats_arr[i]); 757 758 if (bp->flags & BNXT_FLAG_PORT_STATS) 759 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) { 760 str = bnxt_port_stats_arr[i].string; 761 ethtool_puts(&buf, str); 762 } 763 764 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 765 u32 len; 766 767 len = min_t(u32, bp->fw_rx_stats_ext_size, 768 ARRAY_SIZE(bnxt_port_stats_ext_arr)); 769 for (i = 0; i < len; i++) { 770 str = bnxt_port_stats_ext_arr[i].string; 771 ethtool_puts(&buf, str); 772 } 773 774 len = min_t(u32, bp->fw_tx_stats_ext_size, 775 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); 776 for (i = 0; i < len; i++) { 777 str = bnxt_tx_port_stats_ext_arr[i].string; 778 ethtool_puts(&buf, str); 779 } 780 781 if (bp->pri2cos_valid) { 782 for (i = 0; i < 8; i++) { 783 str = bnxt_rx_bytes_pri_arr[i].string; 784 ethtool_puts(&buf, str); 785 } 786 787 for (i = 0; i < 8; i++) { 788 str = bnxt_rx_pkts_pri_arr[i].string; 789 ethtool_puts(&buf, str); 790 } 791 792 for (i = 0; i < 8; i++) { 793 str = bnxt_tx_bytes_pri_arr[i].string; 794 ethtool_puts(&buf, str); 795 } 796 797 for (i = 0; i < 8; i++) { 798 str = bnxt_tx_pkts_pri_arr[i].string; 799 ethtool_puts(&buf, str); 800 } 801 } 802 } 803 break; 804 case ETH_SS_TEST: 805 if (bp->num_tests) 806 for (i = 0; i < bp->num_tests; i++) 807 ethtool_puts(&buf, bp->test_info->string[i]); 808 break; 809 default: 810 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n", 811 stringset); 812 break; 813 } 814 } 815 816 static void bnxt_get_ringparam(struct net_device *dev, 817 struct ethtool_ringparam *ering, 818 struct kernel_ethtool_ringparam *kernel_ering, 819 struct netlink_ext_ack *extack) 820 { 821 struct bnxt *bp = netdev_priv(dev); 822 823 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 824 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA; 825 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT; 826 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED; 827 } else { 828 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT; 829 ering->rx_jumbo_max_pending = 0; 830 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED; 831 } 832 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT; 833 834 ering->rx_pending = bp->rx_ring_size; 835 ering->rx_jumbo_pending = bp->rx_agg_ring_size; 836 ering->tx_pending = bp->tx_ring_size; 837 838 kernel_ering->hds_thresh_max = BNXT_HDS_THRESHOLD_MAX; 839 } 840 841 static int bnxt_set_ringparam(struct net_device *dev, 842 struct ethtool_ringparam *ering, 843 struct kernel_ethtool_ringparam *kernel_ering, 844 struct netlink_ext_ack *extack) 845 { 846 u8 tcp_data_split = kernel_ering->tcp_data_split; 847 struct bnxt *bp = netdev_priv(dev); 848 u8 hds_config_mod; 849 850 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) || 851 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) || 852 (ering->tx_pending < BNXT_MIN_TX_DESC_CNT)) 853 return -EINVAL; 854 855 hds_config_mod = tcp_data_split != dev->cfg->hds_config; 856 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED && hds_config_mod) 857 return -EINVAL; 858 859 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED && 860 hds_config_mod && BNXT_RX_PAGE_MODE(bp)) { 861 NL_SET_ERR_MSG_MOD(extack, "tcp-data-split is disallowed when XDP is attached"); 862 return -EINVAL; 863 } 864 865 if (netif_running(dev)) 866 bnxt_close_nic(bp, false, false); 867 868 if (hds_config_mod) { 869 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED) 870 bp->flags |= BNXT_FLAG_HDS; 871 else if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN) 872 bp->flags &= ~BNXT_FLAG_HDS; 873 } 874 875 bp->rx_ring_size = ering->rx_pending; 876 bp->tx_ring_size = ering->tx_pending; 877 bnxt_set_ring_params(bp); 878 879 if (netif_running(dev)) 880 return bnxt_open_nic(bp, false, false); 881 882 return 0; 883 } 884 885 static void bnxt_get_channels(struct net_device *dev, 886 struct ethtool_channels *channel) 887 { 888 struct bnxt *bp = netdev_priv(dev); 889 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 890 int max_rx_rings, max_tx_rings, tcs; 891 int max_tx_sch_inputs, tx_grps; 892 893 /* Get the most up-to-date max_tx_sch_inputs. */ 894 if (netif_running(dev) && BNXT_NEW_RM(bp)) 895 bnxt_hwrm_func_resc_qcaps(bp, false); 896 max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; 897 898 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); 899 if (max_tx_sch_inputs) 900 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 901 902 tcs = bp->num_tc; 903 tx_grps = max(tcs, 1); 904 if (bp->tx_nr_rings_xdp) 905 tx_grps++; 906 max_tx_rings /= tx_grps; 907 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings); 908 909 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { 910 max_rx_rings = 0; 911 max_tx_rings = 0; 912 } 913 if (max_tx_sch_inputs) 914 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 915 916 if (tcs > 1) 917 max_tx_rings /= tcs; 918 919 channel->max_rx = max_rx_rings; 920 channel->max_tx = max_tx_rings; 921 channel->max_other = 0; 922 if (bp->flags & BNXT_FLAG_SHARED_RINGS) { 923 channel->combined_count = bp->rx_nr_rings; 924 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 925 channel->combined_count--; 926 } else { 927 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) { 928 channel->rx_count = bp->rx_nr_rings; 929 channel->tx_count = bp->tx_nr_rings_per_tc; 930 } 931 } 932 } 933 934 static int bnxt_set_channels(struct net_device *dev, 935 struct ethtool_channels *channel) 936 { 937 struct bnxt *bp = netdev_priv(dev); 938 int req_tx_rings, req_rx_rings, tcs; 939 bool sh = false; 940 int tx_xdp = 0; 941 int rc = 0; 942 int tx_cp; 943 944 if (channel->other_count) 945 return -EINVAL; 946 947 if (!channel->combined_count && 948 (!channel->rx_count || !channel->tx_count)) 949 return -EINVAL; 950 951 if (channel->combined_count && 952 (channel->rx_count || channel->tx_count)) 953 return -EINVAL; 954 955 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count || 956 channel->tx_count)) 957 return -EINVAL; 958 959 if (channel->combined_count) 960 sh = true; 961 962 tcs = bp->num_tc; 963 964 req_tx_rings = sh ? channel->combined_count : channel->tx_count; 965 req_rx_rings = sh ? channel->combined_count : channel->rx_count; 966 if (bp->tx_nr_rings_xdp) { 967 if (!sh) { 968 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n"); 969 return -EINVAL; 970 } 971 tx_xdp = req_rx_rings; 972 } 973 974 if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != 975 bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && 976 netif_is_rxfh_configured(dev)) { 977 netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n"); 978 return -EINVAL; 979 } 980 981 rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); 982 if (rc) { 983 netdev_warn(dev, "Unable to allocate the requested rings\n"); 984 return rc; 985 } 986 987 if (netif_running(dev)) { 988 if (BNXT_PF(bp)) { 989 /* TODO CHIMP_FW: Send message to all VF's 990 * before PF unload 991 */ 992 } 993 bnxt_close_nic(bp, true, false); 994 } 995 996 if (sh) { 997 bp->flags |= BNXT_FLAG_SHARED_RINGS; 998 bp->rx_nr_rings = channel->combined_count; 999 bp->tx_nr_rings_per_tc = channel->combined_count; 1000 } else { 1001 bp->flags &= ~BNXT_FLAG_SHARED_RINGS; 1002 bp->rx_nr_rings = channel->rx_count; 1003 bp->tx_nr_rings_per_tc = channel->tx_count; 1004 } 1005 bp->tx_nr_rings_xdp = tx_xdp; 1006 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp; 1007 if (tcs > 1) 1008 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp; 1009 1010 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 1011 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) : 1012 tx_cp + bp->rx_nr_rings; 1013 1014 /* After changing number of rx channels, update NTUPLE feature. */ 1015 netdev_update_features(dev); 1016 if (netif_running(dev)) { 1017 rc = bnxt_open_nic(bp, true, false); 1018 if ((!rc) && BNXT_PF(bp)) { 1019 /* TODO CHIMP_FW: Send message to all VF's 1020 * to renable 1021 */ 1022 } 1023 } else { 1024 rc = bnxt_reserve_rings(bp, true); 1025 } 1026 1027 return rc; 1028 } 1029 1030 static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[], 1031 int tbl_size, u32 *ids, u32 start, 1032 u32 id_cnt) 1033 { 1034 int i, j = start; 1035 1036 if (j >= id_cnt) 1037 return j; 1038 for (i = 0; i < tbl_size; i++) { 1039 struct hlist_head *head; 1040 struct bnxt_filter_base *fltr; 1041 1042 head = &tbl[i]; 1043 hlist_for_each_entry_rcu(fltr, head, hash) { 1044 if (!fltr->flags || 1045 test_bit(BNXT_FLTR_FW_DELETED, &fltr->state)) 1046 continue; 1047 ids[j++] = fltr->sw_id; 1048 if (j == id_cnt) 1049 return j; 1050 } 1051 } 1052 return j; 1053 } 1054 1055 static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp, 1056 struct hlist_head tbl[], 1057 int tbl_size, u32 id) 1058 { 1059 int i; 1060 1061 for (i = 0; i < tbl_size; i++) { 1062 struct hlist_head *head; 1063 struct bnxt_filter_base *fltr; 1064 1065 head = &tbl[i]; 1066 hlist_for_each_entry_rcu(fltr, head, hash) { 1067 if (fltr->flags && fltr->sw_id == id) 1068 return fltr; 1069 } 1070 } 1071 return NULL; 1072 } 1073 1074 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd, 1075 u32 *rule_locs) 1076 { 1077 u32 count; 1078 1079 cmd->data = bp->ntp_fltr_count; 1080 rcu_read_lock(); 1081 count = bnxt_get_all_fltr_ids_rcu(bp, bp->l2_fltr_hash_tbl, 1082 BNXT_L2_FLTR_HASH_SIZE, rule_locs, 0, 1083 cmd->rule_cnt); 1084 cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl, 1085 BNXT_NTP_FLTR_HASH_SIZE, 1086 rule_locs, count, 1087 cmd->rule_cnt); 1088 rcu_read_unlock(); 1089 1090 return 0; 1091 } 1092 1093 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1094 { 1095 struct ethtool_rx_flow_spec *fs = 1096 (struct ethtool_rx_flow_spec *)&cmd->fs; 1097 struct bnxt_filter_base *fltr_base; 1098 struct bnxt_ntuple_filter *fltr; 1099 struct bnxt_flow_masks *fmasks; 1100 struct flow_keys *fkeys; 1101 int rc = -EINVAL; 1102 1103 if (fs->location >= bp->max_fltr) 1104 return rc; 1105 1106 rcu_read_lock(); 1107 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl, 1108 BNXT_L2_FLTR_HASH_SIZE, 1109 fs->location); 1110 if (fltr_base) { 1111 struct ethhdr *h_ether = &fs->h_u.ether_spec; 1112 struct ethhdr *m_ether = &fs->m_u.ether_spec; 1113 struct bnxt_l2_filter *l2_fltr; 1114 struct bnxt_l2_key *l2_key; 1115 1116 l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base); 1117 l2_key = &l2_fltr->l2_key; 1118 fs->flow_type = ETHER_FLOW; 1119 ether_addr_copy(h_ether->h_dest, l2_key->dst_mac_addr); 1120 eth_broadcast_addr(m_ether->h_dest); 1121 if (l2_key->vlan) { 1122 struct ethtool_flow_ext *m_ext = &fs->m_ext; 1123 struct ethtool_flow_ext *h_ext = &fs->h_ext; 1124 1125 fs->flow_type |= FLOW_EXT; 1126 m_ext->vlan_tci = htons(0xfff); 1127 h_ext->vlan_tci = htons(l2_key->vlan); 1128 } 1129 if (fltr_base->flags & BNXT_ACT_RING_DST) 1130 fs->ring_cookie = fltr_base->rxq; 1131 if (fltr_base->flags & BNXT_ACT_FUNC_DST) 1132 fs->ring_cookie = (u64)(fltr_base->vf_idx + 1) << 1133 ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; 1134 rcu_read_unlock(); 1135 return 0; 1136 } 1137 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl, 1138 BNXT_NTP_FLTR_HASH_SIZE, 1139 fs->location); 1140 if (!fltr_base) { 1141 rcu_read_unlock(); 1142 return rc; 1143 } 1144 fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base); 1145 1146 fkeys = &fltr->fkeys; 1147 fmasks = &fltr->fmasks; 1148 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 1149 if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) { 1150 fs->flow_type = IP_USER_FLOW; 1151 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 1152 fs->h_u.usr_ip4_spec.proto = BNXT_IP_PROTO_WILDCARD; 1153 fs->m_u.usr_ip4_spec.proto = 0; 1154 } else if (fkeys->basic.ip_proto == IPPROTO_ICMP) { 1155 fs->flow_type = IP_USER_FLOW; 1156 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 1157 fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP; 1158 fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK; 1159 } else if (fkeys->basic.ip_proto == IPPROTO_TCP) { 1160 fs->flow_type = TCP_V4_FLOW; 1161 } else if (fkeys->basic.ip_proto == IPPROTO_UDP) { 1162 fs->flow_type = UDP_V4_FLOW; 1163 } else { 1164 goto fltr_err; 1165 } 1166 1167 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; 1168 fs->m_u.tcp_ip4_spec.ip4src = fmasks->addrs.v4addrs.src; 1169 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; 1170 fs->m_u.tcp_ip4_spec.ip4dst = fmasks->addrs.v4addrs.dst; 1171 if (fs->flow_type == TCP_V4_FLOW || 1172 fs->flow_type == UDP_V4_FLOW) { 1173 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; 1174 fs->m_u.tcp_ip4_spec.psrc = fmasks->ports.src; 1175 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; 1176 fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst; 1177 } 1178 } else { 1179 if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) { 1180 fs->flow_type = IPV6_USER_FLOW; 1181 fs->h_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_WILDCARD; 1182 fs->m_u.usr_ip6_spec.l4_proto = 0; 1183 } else if (fkeys->basic.ip_proto == IPPROTO_ICMPV6) { 1184 fs->flow_type = IPV6_USER_FLOW; 1185 fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6; 1186 fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK; 1187 } else if (fkeys->basic.ip_proto == IPPROTO_TCP) { 1188 fs->flow_type = TCP_V6_FLOW; 1189 } else if (fkeys->basic.ip_proto == IPPROTO_UDP) { 1190 fs->flow_type = UDP_V6_FLOW; 1191 } else { 1192 goto fltr_err; 1193 } 1194 1195 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = 1196 fkeys->addrs.v6addrs.src; 1197 *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6src[0] = 1198 fmasks->addrs.v6addrs.src; 1199 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = 1200 fkeys->addrs.v6addrs.dst; 1201 *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6dst[0] = 1202 fmasks->addrs.v6addrs.dst; 1203 if (fs->flow_type == TCP_V6_FLOW || 1204 fs->flow_type == UDP_V6_FLOW) { 1205 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; 1206 fs->m_u.tcp_ip6_spec.psrc = fmasks->ports.src; 1207 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; 1208 fs->m_u.tcp_ip6_spec.pdst = fmasks->ports.dst; 1209 } 1210 } 1211 1212 if (fltr->base.flags & BNXT_ACT_DROP) { 1213 fs->ring_cookie = RX_CLS_FLOW_DISC; 1214 } else if (fltr->base.flags & BNXT_ACT_RSS_CTX) { 1215 fs->flow_type |= FLOW_RSS; 1216 cmd->rss_context = fltr->base.fw_vnic_id; 1217 } else { 1218 fs->ring_cookie = fltr->base.rxq; 1219 } 1220 rc = 0; 1221 1222 fltr_err: 1223 rcu_read_unlock(); 1224 1225 return rc; 1226 } 1227 1228 static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp, 1229 u32 index) 1230 { 1231 struct ethtool_rxfh_context *ctx; 1232 1233 ctx = xa_load(&bp->dev->ethtool->rss_ctx, index); 1234 if (!ctx) 1235 return NULL; 1236 return ethtool_rxfh_context_priv(ctx); 1237 } 1238 1239 static int bnxt_alloc_vnic_rss_table(struct bnxt *bp, 1240 struct bnxt_vnic_info *vnic) 1241 { 1242 int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); 1243 1244 vnic->rss_table_size = size + HW_HASH_KEY_SIZE; 1245 vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev, 1246 vnic->rss_table_size, 1247 &vnic->rss_table_dma_addr, 1248 GFP_KERNEL); 1249 if (!vnic->rss_table) 1250 return -ENOMEM; 1251 1252 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 1253 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 1254 return 0; 1255 } 1256 1257 static int bnxt_add_l2_cls_rule(struct bnxt *bp, 1258 struct ethtool_rx_flow_spec *fs) 1259 { 1260 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); 1261 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 1262 struct ethhdr *h_ether = &fs->h_u.ether_spec; 1263 struct ethhdr *m_ether = &fs->m_u.ether_spec; 1264 struct bnxt_l2_filter *fltr; 1265 struct bnxt_l2_key key; 1266 u16 vnic_id; 1267 u8 flags; 1268 int rc; 1269 1270 if (BNXT_CHIP_P5_PLUS(bp)) 1271 return -EOPNOTSUPP; 1272 1273 if (!is_broadcast_ether_addr(m_ether->h_dest)) 1274 return -EINVAL; 1275 ether_addr_copy(key.dst_mac_addr, h_ether->h_dest); 1276 key.vlan = 0; 1277 if (fs->flow_type & FLOW_EXT) { 1278 struct ethtool_flow_ext *m_ext = &fs->m_ext; 1279 struct ethtool_flow_ext *h_ext = &fs->h_ext; 1280 1281 if (m_ext->vlan_tci != htons(0xfff) || !h_ext->vlan_tci) 1282 return -EINVAL; 1283 key.vlan = ntohs(h_ext->vlan_tci); 1284 } 1285 1286 if (vf) { 1287 flags = BNXT_ACT_FUNC_DST; 1288 vnic_id = 0xffff; 1289 vf--; 1290 } else { 1291 flags = BNXT_ACT_RING_DST; 1292 vnic_id = bp->vnic_info[ring + 1].fw_vnic_id; 1293 } 1294 fltr = bnxt_alloc_new_l2_filter(bp, &key, flags); 1295 if (IS_ERR(fltr)) 1296 return PTR_ERR(fltr); 1297 1298 fltr->base.fw_vnic_id = vnic_id; 1299 fltr->base.rxq = ring; 1300 fltr->base.vf_idx = vf; 1301 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); 1302 if (rc) 1303 bnxt_del_l2_filter(bp, fltr); 1304 else 1305 fs->location = fltr->base.sw_id; 1306 return rc; 1307 } 1308 1309 static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec, 1310 struct ethtool_usrip4_spec *ip_mask) 1311 { 1312 u8 mproto = ip_mask->proto; 1313 u8 sproto = ip_spec->proto; 1314 1315 if (ip_mask->l4_4_bytes || ip_mask->tos || 1316 ip_spec->ip_ver != ETH_RX_NFC_IP4 || 1317 (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMP))) 1318 return false; 1319 return true; 1320 } 1321 1322 static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec, 1323 struct ethtool_usrip6_spec *ip_mask) 1324 { 1325 u8 mproto = ip_mask->l4_proto; 1326 u8 sproto = ip_spec->l4_proto; 1327 1328 if (ip_mask->l4_4_bytes || ip_mask->tclass || 1329 (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMPV6))) 1330 return false; 1331 return true; 1332 } 1333 1334 static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, 1335 struct ethtool_rxnfc *cmd) 1336 { 1337 struct ethtool_rx_flow_spec *fs = &cmd->fs; 1338 struct bnxt_ntuple_filter *new_fltr, *fltr; 1339 u32 flow_type = fs->flow_type & 0xff; 1340 struct bnxt_l2_filter *l2_fltr; 1341 struct bnxt_flow_masks *fmasks; 1342 struct flow_keys *fkeys; 1343 u32 idx, ring; 1344 int rc; 1345 u8 vf; 1346 1347 if (!bp->vnic_info) 1348 return -EAGAIN; 1349 1350 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 1351 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); 1352 if ((fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf) 1353 return -EOPNOTSUPP; 1354 1355 if (flow_type == IP_USER_FLOW) { 1356 if (!bnxt_verify_ntuple_ip4_flow(&fs->h_u.usr_ip4_spec, 1357 &fs->m_u.usr_ip4_spec)) 1358 return -EOPNOTSUPP; 1359 } 1360 1361 if (flow_type == IPV6_USER_FLOW) { 1362 if (!bnxt_verify_ntuple_ip6_flow(&fs->h_u.usr_ip6_spec, 1363 &fs->m_u.usr_ip6_spec)) 1364 return -EOPNOTSUPP; 1365 } 1366 1367 new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL); 1368 if (!new_fltr) 1369 return -ENOMEM; 1370 1371 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 1372 atomic_inc(&l2_fltr->refcnt); 1373 new_fltr->l2_fltr = l2_fltr; 1374 fmasks = &new_fltr->fmasks; 1375 fkeys = &new_fltr->fkeys; 1376 1377 rc = -EOPNOTSUPP; 1378 switch (flow_type) { 1379 case IP_USER_FLOW: { 1380 struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec; 1381 struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec; 1382 1383 fkeys->basic.ip_proto = ip_mask->proto ? ip_spec->proto 1384 : BNXT_IP_PROTO_WILDCARD; 1385 fkeys->basic.n_proto = htons(ETH_P_IP); 1386 fkeys->addrs.v4addrs.src = ip_spec->ip4src; 1387 fmasks->addrs.v4addrs.src = ip_mask->ip4src; 1388 fkeys->addrs.v4addrs.dst = ip_spec->ip4dst; 1389 fmasks->addrs.v4addrs.dst = ip_mask->ip4dst; 1390 break; 1391 } 1392 case TCP_V4_FLOW: 1393 case UDP_V4_FLOW: { 1394 struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec; 1395 struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec; 1396 1397 fkeys->basic.ip_proto = IPPROTO_TCP; 1398 if (flow_type == UDP_V4_FLOW) 1399 fkeys->basic.ip_proto = IPPROTO_UDP; 1400 fkeys->basic.n_proto = htons(ETH_P_IP); 1401 fkeys->addrs.v4addrs.src = ip_spec->ip4src; 1402 fmasks->addrs.v4addrs.src = ip_mask->ip4src; 1403 fkeys->addrs.v4addrs.dst = ip_spec->ip4dst; 1404 fmasks->addrs.v4addrs.dst = ip_mask->ip4dst; 1405 fkeys->ports.src = ip_spec->psrc; 1406 fmasks->ports.src = ip_mask->psrc; 1407 fkeys->ports.dst = ip_spec->pdst; 1408 fmasks->ports.dst = ip_mask->pdst; 1409 break; 1410 } 1411 case IPV6_USER_FLOW: { 1412 struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec; 1413 struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec; 1414 1415 fkeys->basic.ip_proto = ip_mask->l4_proto ? ip_spec->l4_proto 1416 : BNXT_IP_PROTO_WILDCARD; 1417 fkeys->basic.n_proto = htons(ETH_P_IPV6); 1418 fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src; 1419 fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src; 1420 fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst; 1421 fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst; 1422 break; 1423 } 1424 case TCP_V6_FLOW: 1425 case UDP_V6_FLOW: { 1426 struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec; 1427 struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec; 1428 1429 fkeys->basic.ip_proto = IPPROTO_TCP; 1430 if (flow_type == UDP_V6_FLOW) 1431 fkeys->basic.ip_proto = IPPROTO_UDP; 1432 fkeys->basic.n_proto = htons(ETH_P_IPV6); 1433 1434 fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src; 1435 fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src; 1436 fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst; 1437 fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst; 1438 fkeys->ports.src = ip_spec->psrc; 1439 fmasks->ports.src = ip_mask->psrc; 1440 fkeys->ports.dst = ip_spec->pdst; 1441 fmasks->ports.dst = ip_mask->pdst; 1442 break; 1443 } 1444 default: 1445 rc = -EOPNOTSUPP; 1446 goto ntuple_err; 1447 } 1448 if (!memcmp(&BNXT_FLOW_MASK_NONE, fmasks, sizeof(*fmasks))) 1449 goto ntuple_err; 1450 1451 idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL); 1452 rcu_read_lock(); 1453 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx); 1454 if (fltr) { 1455 rcu_read_unlock(); 1456 rc = -EEXIST; 1457 goto ntuple_err; 1458 } 1459 rcu_read_unlock(); 1460 1461 new_fltr->base.flags = BNXT_ACT_NO_AGING; 1462 if (fs->flow_type & FLOW_RSS) { 1463 struct bnxt_rss_ctx *rss_ctx; 1464 1465 new_fltr->base.fw_vnic_id = 0; 1466 new_fltr->base.flags |= BNXT_ACT_RSS_CTX; 1467 rss_ctx = bnxt_get_rss_ctx_from_index(bp, cmd->rss_context); 1468 if (rss_ctx) { 1469 new_fltr->base.fw_vnic_id = rss_ctx->index; 1470 } else { 1471 rc = -EINVAL; 1472 goto ntuple_err; 1473 } 1474 } 1475 if (fs->ring_cookie == RX_CLS_FLOW_DISC) 1476 new_fltr->base.flags |= BNXT_ACT_DROP; 1477 else 1478 new_fltr->base.rxq = ring; 1479 __set_bit(BNXT_FLTR_VALID, &new_fltr->base.state); 1480 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx); 1481 if (!rc) { 1482 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr); 1483 if (rc) { 1484 bnxt_del_ntp_filter(bp, new_fltr); 1485 return rc; 1486 } 1487 fs->location = new_fltr->base.sw_id; 1488 return 0; 1489 } 1490 1491 ntuple_err: 1492 atomic_dec(&l2_fltr->refcnt); 1493 kfree(new_fltr); 1494 return rc; 1495 } 1496 1497 static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1498 { 1499 struct ethtool_rx_flow_spec *fs = &cmd->fs; 1500 u32 ring, flow_type; 1501 int rc; 1502 u8 vf; 1503 1504 if (!netif_running(bp->dev)) 1505 return -EAGAIN; 1506 if (!(bp->flags & BNXT_FLAG_RFS)) 1507 return -EPERM; 1508 if (fs->location != RX_CLS_LOC_ANY) 1509 return -EINVAL; 1510 1511 flow_type = fs->flow_type; 1512 if ((flow_type == IP_USER_FLOW || 1513 flow_type == IPV6_USER_FLOW) && 1514 !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO)) 1515 return -EOPNOTSUPP; 1516 if (flow_type & FLOW_MAC_EXT) 1517 return -EINVAL; 1518 flow_type &= ~FLOW_EXT; 1519 1520 if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW) 1521 return bnxt_add_ntuple_cls_rule(bp, cmd); 1522 1523 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); 1524 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 1525 if (BNXT_VF(bp) && vf) 1526 return -EINVAL; 1527 if (BNXT_PF(bp) && vf > bp->pf.active_vfs) 1528 return -EINVAL; 1529 if (!vf && ring >= bp->rx_nr_rings) 1530 return -EINVAL; 1531 1532 if (flow_type == ETHER_FLOW) 1533 rc = bnxt_add_l2_cls_rule(bp, fs); 1534 else 1535 rc = bnxt_add_ntuple_cls_rule(bp, cmd); 1536 return rc; 1537 } 1538 1539 static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1540 { 1541 struct ethtool_rx_flow_spec *fs = &cmd->fs; 1542 struct bnxt_filter_base *fltr_base; 1543 struct bnxt_ntuple_filter *fltr; 1544 u32 id = fs->location; 1545 1546 rcu_read_lock(); 1547 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl, 1548 BNXT_L2_FLTR_HASH_SIZE, id); 1549 if (fltr_base) { 1550 struct bnxt_l2_filter *l2_fltr; 1551 1552 l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base); 1553 rcu_read_unlock(); 1554 bnxt_hwrm_l2_filter_free(bp, l2_fltr); 1555 bnxt_del_l2_filter(bp, l2_fltr); 1556 return 0; 1557 } 1558 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl, 1559 BNXT_NTP_FLTR_HASH_SIZE, id); 1560 if (!fltr_base) { 1561 rcu_read_unlock(); 1562 return -ENOENT; 1563 } 1564 1565 fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base); 1566 if (!(fltr->base.flags & BNXT_ACT_NO_AGING)) { 1567 rcu_read_unlock(); 1568 return -EINVAL; 1569 } 1570 rcu_read_unlock(); 1571 bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr); 1572 bnxt_del_ntp_filter(bp, fltr); 1573 return 0; 1574 } 1575 1576 static u64 get_ethtool_ipv4_rss(struct bnxt *bp) 1577 { 1578 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) 1579 return RXH_IP_SRC | RXH_IP_DST; 1580 return 0; 1581 } 1582 1583 static u64 get_ethtool_ipv6_rss(struct bnxt *bp) 1584 { 1585 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) 1586 return RXH_IP_SRC | RXH_IP_DST; 1587 return 0; 1588 } 1589 1590 static int bnxt_get_rxfh_fields(struct net_device *dev, 1591 struct ethtool_rxfh_fields *cmd) 1592 { 1593 struct bnxt *bp = netdev_priv(dev); 1594 1595 cmd->data = 0; 1596 switch (cmd->flow_type) { 1597 case TCP_V4_FLOW: 1598 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) 1599 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1600 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1601 cmd->data |= get_ethtool_ipv4_rss(bp); 1602 break; 1603 case UDP_V4_FLOW: 1604 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4) 1605 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1606 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1607 fallthrough; 1608 case AH_ESP_V4_FLOW: 1609 if (bp->rss_hash_cfg & 1610 (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | 1611 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4)) 1612 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1613 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1614 fallthrough; 1615 case SCTP_V4_FLOW: 1616 case AH_V4_FLOW: 1617 case ESP_V4_FLOW: 1618 case IPV4_FLOW: 1619 cmd->data |= get_ethtool_ipv4_rss(bp); 1620 break; 1621 1622 case TCP_V6_FLOW: 1623 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) 1624 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1625 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1626 cmd->data |= get_ethtool_ipv6_rss(bp); 1627 break; 1628 case UDP_V6_FLOW: 1629 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6) 1630 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1631 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1632 fallthrough; 1633 case AH_ESP_V6_FLOW: 1634 if (bp->rss_hash_cfg & 1635 (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | 1636 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6)) 1637 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1638 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1639 fallthrough; 1640 case SCTP_V6_FLOW: 1641 case AH_V6_FLOW: 1642 case ESP_V6_FLOW: 1643 case IPV6_FLOW: 1644 cmd->data |= get_ethtool_ipv6_rss(bp); 1645 break; 1646 } 1647 return 0; 1648 } 1649 1650 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) 1651 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST) 1652 1653 static int bnxt_set_rxfh_fields(struct net_device *dev, 1654 const struct ethtool_rxfh_fields *cmd, 1655 struct netlink_ext_ack *extack) 1656 { 1657 struct bnxt *bp = netdev_priv(dev); 1658 int tuple, rc = 0; 1659 u32 rss_hash_cfg; 1660 1661 rss_hash_cfg = bp->rss_hash_cfg; 1662 1663 if (cmd->data == RXH_4TUPLE) 1664 tuple = 4; 1665 else if (cmd->data == RXH_2TUPLE) 1666 tuple = 2; 1667 else if (!cmd->data) 1668 tuple = 0; 1669 else 1670 return -EINVAL; 1671 1672 if (cmd->flow_type == TCP_V4_FLOW) { 1673 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1674 if (tuple == 4) 1675 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1676 } else if (cmd->flow_type == UDP_V4_FLOW) { 1677 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP)) 1678 return -EINVAL; 1679 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1680 if (tuple == 4) 1681 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1682 } else if (cmd->flow_type == TCP_V6_FLOW) { 1683 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1684 if (tuple == 4) 1685 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1686 } else if (cmd->flow_type == UDP_V6_FLOW) { 1687 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP)) 1688 return -EINVAL; 1689 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1690 if (tuple == 4) 1691 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1692 } else if (cmd->flow_type == AH_ESP_V4_FLOW) { 1693 if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V4_RSS_CAP) || 1694 !(bp->rss_cap & BNXT_RSS_CAP_ESP_V4_RSS_CAP))) 1695 return -EINVAL; 1696 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | 1697 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4); 1698 if (tuple == 4) 1699 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | 1700 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4; 1701 } else if (cmd->flow_type == AH_ESP_V6_FLOW) { 1702 if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V6_RSS_CAP) || 1703 !(bp->rss_cap & BNXT_RSS_CAP_ESP_V6_RSS_CAP))) 1704 return -EINVAL; 1705 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | 1706 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6); 1707 if (tuple == 4) 1708 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | 1709 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6; 1710 } else if (tuple == 4) { 1711 return -EINVAL; 1712 } 1713 1714 switch (cmd->flow_type) { 1715 case TCP_V4_FLOW: 1716 case UDP_V4_FLOW: 1717 case SCTP_V4_FLOW: 1718 case AH_ESP_V4_FLOW: 1719 case AH_V4_FLOW: 1720 case ESP_V4_FLOW: 1721 case IPV4_FLOW: 1722 if (tuple == 2) 1723 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1724 else if (!tuple) 1725 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1726 break; 1727 1728 case TCP_V6_FLOW: 1729 case UDP_V6_FLOW: 1730 case SCTP_V6_FLOW: 1731 case AH_ESP_V6_FLOW: 1732 case AH_V6_FLOW: 1733 case ESP_V6_FLOW: 1734 case IPV6_FLOW: 1735 if (tuple == 2) 1736 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; 1737 else if (!tuple) 1738 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; 1739 break; 1740 } 1741 1742 if (bp->rss_hash_cfg == rss_hash_cfg) 1743 return 0; 1744 1745 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 1746 bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg; 1747 bp->rss_hash_cfg = rss_hash_cfg; 1748 if (netif_running(bp->dev)) { 1749 bnxt_close_nic(bp, false, false); 1750 rc = bnxt_open_nic(bp, false, false); 1751 } 1752 return rc; 1753 } 1754 1755 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 1756 u32 *rule_locs) 1757 { 1758 struct bnxt *bp = netdev_priv(dev); 1759 int rc = 0; 1760 1761 switch (cmd->cmd) { 1762 case ETHTOOL_GRXRINGS: 1763 cmd->data = bp->rx_nr_rings; 1764 break; 1765 1766 case ETHTOOL_GRXCLSRLCNT: 1767 cmd->rule_cnt = bp->ntp_fltr_count; 1768 cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL; 1769 break; 1770 1771 case ETHTOOL_GRXCLSRLALL: 1772 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs); 1773 break; 1774 1775 case ETHTOOL_GRXCLSRULE: 1776 rc = bnxt_grxclsrule(bp, cmd); 1777 break; 1778 1779 default: 1780 rc = -EOPNOTSUPP; 1781 break; 1782 } 1783 1784 return rc; 1785 } 1786 1787 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 1788 { 1789 struct bnxt *bp = netdev_priv(dev); 1790 int rc; 1791 1792 switch (cmd->cmd) { 1793 case ETHTOOL_SRXCLSRLINS: 1794 rc = bnxt_srxclsrlins(bp, cmd); 1795 break; 1796 1797 case ETHTOOL_SRXCLSRLDEL: 1798 rc = bnxt_srxclsrldel(bp, cmd); 1799 break; 1800 1801 default: 1802 rc = -EOPNOTSUPP; 1803 break; 1804 } 1805 return rc; 1806 } 1807 1808 u32 bnxt_get_rxfh_indir_size(struct net_device *dev) 1809 { 1810 struct bnxt *bp = netdev_priv(dev); 1811 1812 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1813 return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) * 1814 BNXT_RSS_TABLE_ENTRIES_P5; 1815 return HW_HASH_INDEX_SIZE; 1816 } 1817 1818 static u32 bnxt_get_rxfh_key_size(struct net_device *dev) 1819 { 1820 return HW_HASH_KEY_SIZE; 1821 } 1822 1823 static int bnxt_get_rxfh(struct net_device *dev, 1824 struct ethtool_rxfh_param *rxfh) 1825 { 1826 struct bnxt_rss_ctx *rss_ctx = NULL; 1827 struct bnxt *bp = netdev_priv(dev); 1828 u32 *indir_tbl = bp->rss_indir_tbl; 1829 struct bnxt_vnic_info *vnic; 1830 u32 i, tbl_size; 1831 1832 rxfh->hfunc = ETH_RSS_HASH_TOP; 1833 1834 if (!bp->vnic_info) 1835 return 0; 1836 1837 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 1838 if (rxfh->rss_context) { 1839 struct ethtool_rxfh_context *ctx; 1840 1841 ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context); 1842 if (!ctx) 1843 return -EINVAL; 1844 indir_tbl = ethtool_rxfh_context_indir(ctx); 1845 rss_ctx = ethtool_rxfh_context_priv(ctx); 1846 vnic = &rss_ctx->vnic; 1847 } 1848 1849 if (rxfh->indir && indir_tbl) { 1850 tbl_size = bnxt_get_rxfh_indir_size(dev); 1851 for (i = 0; i < tbl_size; i++) 1852 rxfh->indir[i] = indir_tbl[i]; 1853 } 1854 1855 if (rxfh->key && vnic->rss_hash_key) 1856 memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE); 1857 1858 return 0; 1859 } 1860 1861 static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx, 1862 struct bnxt_rss_ctx *rss_ctx, 1863 const struct ethtool_rxfh_param *rxfh) 1864 { 1865 if (rxfh->key) { 1866 if (rss_ctx) { 1867 memcpy(rss_ctx->vnic.rss_hash_key, rxfh->key, 1868 HW_HASH_KEY_SIZE); 1869 } else { 1870 memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE); 1871 bp->rss_hash_key_updated = true; 1872 } 1873 } 1874 if (rxfh->indir) { 1875 u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 1876 u32 *indir_tbl = bp->rss_indir_tbl; 1877 1878 if (rss_ctx) 1879 indir_tbl = ethtool_rxfh_context_indir(ctx); 1880 for (i = 0; i < tbl_size; i++) 1881 indir_tbl[i] = rxfh->indir[i]; 1882 pad = bp->rss_indir_tbl_entries - tbl_size; 1883 if (pad) 1884 memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl)); 1885 } 1886 } 1887 1888 static int bnxt_rxfh_context_check(struct bnxt *bp, 1889 const struct ethtool_rxfh_param *rxfh, 1890 struct netlink_ext_ack *extack) 1891 { 1892 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) { 1893 NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported"); 1894 return -EOPNOTSUPP; 1895 } 1896 1897 if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) { 1898 NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported"); 1899 return -EOPNOTSUPP; 1900 } 1901 1902 if (!netif_running(bp->dev)) { 1903 NL_SET_ERR_MSG_MOD(extack, "Unable to set RSS contexts when interface is down"); 1904 return -EAGAIN; 1905 } 1906 1907 return 0; 1908 } 1909 1910 static int bnxt_create_rxfh_context(struct net_device *dev, 1911 struct ethtool_rxfh_context *ctx, 1912 const struct ethtool_rxfh_param *rxfh, 1913 struct netlink_ext_ack *extack) 1914 { 1915 struct bnxt *bp = netdev_priv(dev); 1916 struct bnxt_rss_ctx *rss_ctx; 1917 struct bnxt_vnic_info *vnic; 1918 int rc; 1919 1920 rc = bnxt_rxfh_context_check(bp, rxfh, extack); 1921 if (rc) 1922 return rc; 1923 1924 if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) { 1925 NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u", 1926 BNXT_MAX_ETH_RSS_CTX); 1927 return -EINVAL; 1928 } 1929 1930 if (!bnxt_rfs_capable(bp, true)) { 1931 NL_SET_ERR_MSG_MOD(extack, "Out hardware resources"); 1932 return -ENOMEM; 1933 } 1934 1935 rss_ctx = ethtool_rxfh_context_priv(ctx); 1936 1937 bp->num_rss_ctx++; 1938 1939 vnic = &rss_ctx->vnic; 1940 vnic->rss_ctx = ctx; 1941 vnic->flags |= BNXT_VNIC_RSSCTX_FLAG; 1942 vnic->vnic_id = BNXT_VNIC_ID_INVALID; 1943 rc = bnxt_alloc_vnic_rss_table(bp, vnic); 1944 if (rc) 1945 goto out; 1946 1947 /* Populate defaults in the context */ 1948 bnxt_set_dflt_rss_indir_tbl(bp, ctx); 1949 ctx->hfunc = ETH_RSS_HASH_TOP; 1950 memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE); 1951 memcpy(ethtool_rxfh_context_key(ctx), 1952 bp->rss_hash_key, HW_HASH_KEY_SIZE); 1953 1954 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings); 1955 if (rc) { 1956 NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC"); 1957 goto out; 1958 } 1959 1960 rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA); 1961 if (rc) { 1962 NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); 1963 goto out; 1964 } 1965 bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); 1966 1967 rc = __bnxt_setup_vnic_p5(bp, vnic); 1968 if (rc) { 1969 NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); 1970 goto out; 1971 } 1972 1973 rss_ctx->index = rxfh->rss_context; 1974 return 0; 1975 out: 1976 bnxt_del_one_rss_ctx(bp, rss_ctx, true); 1977 return rc; 1978 } 1979 1980 static int bnxt_modify_rxfh_context(struct net_device *dev, 1981 struct ethtool_rxfh_context *ctx, 1982 const struct ethtool_rxfh_param *rxfh, 1983 struct netlink_ext_ack *extack) 1984 { 1985 struct bnxt *bp = netdev_priv(dev); 1986 struct bnxt_rss_ctx *rss_ctx; 1987 int rc; 1988 1989 rc = bnxt_rxfh_context_check(bp, rxfh, extack); 1990 if (rc) 1991 return rc; 1992 1993 rss_ctx = ethtool_rxfh_context_priv(ctx); 1994 1995 bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); 1996 1997 return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic); 1998 } 1999 2000 static int bnxt_remove_rxfh_context(struct net_device *dev, 2001 struct ethtool_rxfh_context *ctx, 2002 u32 rss_context, 2003 struct netlink_ext_ack *extack) 2004 { 2005 struct bnxt *bp = netdev_priv(dev); 2006 struct bnxt_rss_ctx *rss_ctx; 2007 2008 rss_ctx = ethtool_rxfh_context_priv(ctx); 2009 2010 bnxt_del_one_rss_ctx(bp, rss_ctx, true); 2011 return 0; 2012 } 2013 2014 static int bnxt_set_rxfh(struct net_device *dev, 2015 struct ethtool_rxfh_param *rxfh, 2016 struct netlink_ext_ack *extack) 2017 { 2018 struct bnxt *bp = netdev_priv(dev); 2019 int rc = 0; 2020 2021 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) 2022 return -EOPNOTSUPP; 2023 2024 bnxt_modify_rss(bp, NULL, NULL, rxfh); 2025 2026 if (netif_running(bp->dev)) { 2027 bnxt_close_nic(bp, false, false); 2028 rc = bnxt_open_nic(bp, false, false); 2029 } 2030 return rc; 2031 } 2032 2033 static void bnxt_get_drvinfo(struct net_device *dev, 2034 struct ethtool_drvinfo *info) 2035 { 2036 struct bnxt *bp = netdev_priv(dev); 2037 2038 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 2039 strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); 2040 strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); 2041 info->n_stats = bnxt_get_num_stats(bp); 2042 info->testinfo_len = bp->num_tests; 2043 /* TODO CHIMP_FW: eeprom dump details */ 2044 info->eedump_len = 0; 2045 /* TODO CHIMP FW: reg dump details */ 2046 info->regdump_len = 0; 2047 } 2048 2049 static int bnxt_get_regs_len(struct net_device *dev) 2050 { 2051 struct bnxt *bp = netdev_priv(dev); 2052 int reg_len; 2053 2054 if (!BNXT_PF(bp)) 2055 return -EOPNOTSUPP; 2056 2057 reg_len = BNXT_PXP_REG_LEN; 2058 2059 if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED) 2060 reg_len += sizeof(struct pcie_ctx_hw_stats); 2061 2062 return reg_len; 2063 } 2064 2065 #define BNXT_PCIE_32B_ENTRY(start, end) \ 2066 { offsetof(struct pcie_ctx_hw_stats, start), \ 2067 offsetof(struct pcie_ctx_hw_stats, end) } 2068 2069 static const struct { 2070 u16 start; 2071 u16 end; 2072 } bnxt_pcie_32b_entries[] = { 2073 BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]), 2074 }; 2075 2076 static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, 2077 void *_p) 2078 { 2079 struct pcie_ctx_hw_stats *hw_pcie_stats; 2080 struct hwrm_pcie_qstats_input *req; 2081 struct bnxt *bp = netdev_priv(dev); 2082 dma_addr_t hw_pcie_stats_addr; 2083 int rc; 2084 2085 regs->version = 0; 2086 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED)) 2087 bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p); 2088 2089 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 2090 return; 2091 2092 if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) 2093 return; 2094 2095 hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats), 2096 &hw_pcie_stats_addr); 2097 if (!hw_pcie_stats) { 2098 hwrm_req_drop(bp, req); 2099 return; 2100 } 2101 2102 regs->version = 1; 2103 hwrm_req_hold(bp, req); /* hold on to slice */ 2104 req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); 2105 req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); 2106 rc = hwrm_req_send(bp, req); 2107 if (!rc) { 2108 u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN); 2109 u8 *src = (u8 *)hw_pcie_stats; 2110 int i, j; 2111 2112 for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) { 2113 if (i >= bnxt_pcie_32b_entries[j].start && 2114 i <= bnxt_pcie_32b_entries[j].end) { 2115 u32 *dst32 = (u32 *)(dst + i); 2116 2117 *dst32 = le32_to_cpu(*(__le32 *)(src + i)); 2118 i += 4; 2119 if (i > bnxt_pcie_32b_entries[j].end && 2120 j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1) 2121 j++; 2122 } else { 2123 u64 *dst64 = (u64 *)(dst + i); 2124 2125 *dst64 = le64_to_cpu(*(__le64 *)(src + i)); 2126 i += 8; 2127 } 2128 } 2129 } 2130 hwrm_req_drop(bp, req); 2131 } 2132 2133 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2134 { 2135 struct bnxt *bp = netdev_priv(dev); 2136 2137 wol->supported = 0; 2138 wol->wolopts = 0; 2139 memset(&wol->sopass, 0, sizeof(wol->sopass)); 2140 if (bp->flags & BNXT_FLAG_WOL_CAP) { 2141 wol->supported = WAKE_MAGIC; 2142 if (bp->wol) 2143 wol->wolopts = WAKE_MAGIC; 2144 } 2145 } 2146 2147 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2148 { 2149 struct bnxt *bp = netdev_priv(dev); 2150 2151 if (wol->wolopts & ~WAKE_MAGIC) 2152 return -EINVAL; 2153 2154 if (wol->wolopts & WAKE_MAGIC) { 2155 if (!(bp->flags & BNXT_FLAG_WOL_CAP)) 2156 return -EINVAL; 2157 if (!bp->wol) { 2158 if (bnxt_hwrm_alloc_wol_fltr(bp)) 2159 return -EBUSY; 2160 bp->wol = 1; 2161 } 2162 } else { 2163 if (bp->wol) { 2164 if (bnxt_hwrm_free_wol_fltr(bp)) 2165 return -EBUSY; 2166 bp->wol = 0; 2167 } 2168 } 2169 return 0; 2170 } 2171 2172 /* TODO: support 25GB, 40GB, 50GB with different cable type */ 2173 void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds) 2174 { 2175 linkmode_zero(mode); 2176 2177 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) 2178 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode); 2179 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) 2180 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode); 2181 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) 2182 linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, mode); 2183 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 2184 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode); 2185 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 2186 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode); 2187 } 2188 2189 enum bnxt_media_type { 2190 BNXT_MEDIA_UNKNOWN = 0, 2191 BNXT_MEDIA_TP, 2192 BNXT_MEDIA_CR, 2193 BNXT_MEDIA_SR, 2194 BNXT_MEDIA_LR_ER_FR, 2195 BNXT_MEDIA_KR, 2196 BNXT_MEDIA_KX, 2197 BNXT_MEDIA_X, 2198 __BNXT_MEDIA_END, 2199 }; 2200 2201 static const enum bnxt_media_type bnxt_phy_types[] = { 2202 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR] = BNXT_MEDIA_CR, 2203 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4] = BNXT_MEDIA_KR, 2204 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR] = BNXT_MEDIA_LR_ER_FR, 2205 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR] = BNXT_MEDIA_SR, 2206 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2] = BNXT_MEDIA_KR, 2207 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX] = BNXT_MEDIA_KX, 2208 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR] = BNXT_MEDIA_KR, 2209 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASET] = BNXT_MEDIA_TP, 2210 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE] = BNXT_MEDIA_TP, 2211 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L] = BNXT_MEDIA_CR, 2212 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S] = BNXT_MEDIA_CR, 2213 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N] = BNXT_MEDIA_CR, 2214 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR] = BNXT_MEDIA_SR, 2215 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4] = BNXT_MEDIA_CR, 2216 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4] = BNXT_MEDIA_SR, 2217 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2218 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2219 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10] = BNXT_MEDIA_SR, 2220 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4] = BNXT_MEDIA_CR, 2221 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4] = BNXT_MEDIA_SR, 2222 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2223 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2224 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE] = BNXT_MEDIA_SR, 2225 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET] = BNXT_MEDIA_TP, 2226 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX] = BNXT_MEDIA_X, 2227 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX] = BNXT_MEDIA_X, 2228 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4] = BNXT_MEDIA_CR, 2229 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4] = BNXT_MEDIA_SR, 2230 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2231 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2232 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR] = BNXT_MEDIA_CR, 2233 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR] = BNXT_MEDIA_SR, 2234 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR] = BNXT_MEDIA_LR_ER_FR, 2235 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER] = BNXT_MEDIA_LR_ER_FR, 2236 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2] = BNXT_MEDIA_CR, 2237 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR, 2238 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR, 2239 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR, 2240 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR] = BNXT_MEDIA_CR, 2241 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR] = BNXT_MEDIA_SR, 2242 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR] = BNXT_MEDIA_LR_ER_FR, 2243 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER] = BNXT_MEDIA_LR_ER_FR, 2244 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2] = BNXT_MEDIA_CR, 2245 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2] = BNXT_MEDIA_SR, 2246 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2] = BNXT_MEDIA_LR_ER_FR, 2247 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2] = BNXT_MEDIA_LR_ER_FR, 2248 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8] = BNXT_MEDIA_CR, 2249 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8] = BNXT_MEDIA_SR, 2250 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8] = BNXT_MEDIA_LR_ER_FR, 2251 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8] = BNXT_MEDIA_LR_ER_FR, 2252 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4] = BNXT_MEDIA_CR, 2253 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4] = BNXT_MEDIA_SR, 2254 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2255 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2256 }; 2257 2258 static enum bnxt_media_type 2259 bnxt_get_media(struct bnxt_link_info *link_info) 2260 { 2261 switch (link_info->media_type) { 2262 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP: 2263 return BNXT_MEDIA_TP; 2264 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC: 2265 return BNXT_MEDIA_CR; 2266 default: 2267 if (link_info->phy_type < ARRAY_SIZE(bnxt_phy_types)) 2268 return bnxt_phy_types[link_info->phy_type]; 2269 return BNXT_MEDIA_UNKNOWN; 2270 } 2271 } 2272 2273 enum bnxt_link_speed_indices { 2274 BNXT_LINK_SPEED_UNKNOWN = 0, 2275 BNXT_LINK_SPEED_100MB_IDX, 2276 BNXT_LINK_SPEED_1GB_IDX, 2277 BNXT_LINK_SPEED_10GB_IDX, 2278 BNXT_LINK_SPEED_25GB_IDX, 2279 BNXT_LINK_SPEED_40GB_IDX, 2280 BNXT_LINK_SPEED_50GB_IDX, 2281 BNXT_LINK_SPEED_100GB_IDX, 2282 BNXT_LINK_SPEED_200GB_IDX, 2283 BNXT_LINK_SPEED_400GB_IDX, 2284 __BNXT_LINK_SPEED_END 2285 }; 2286 2287 static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed) 2288 { 2289 switch (speed) { 2290 case BNXT_LINK_SPEED_100MB: return BNXT_LINK_SPEED_100MB_IDX; 2291 case BNXT_LINK_SPEED_1GB: return BNXT_LINK_SPEED_1GB_IDX; 2292 case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX; 2293 case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX; 2294 case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX; 2295 case BNXT_LINK_SPEED_50GB: 2296 case BNXT_LINK_SPEED_50GB_PAM4: 2297 return BNXT_LINK_SPEED_50GB_IDX; 2298 case BNXT_LINK_SPEED_100GB: 2299 case BNXT_LINK_SPEED_100GB_PAM4: 2300 case BNXT_LINK_SPEED_100GB_PAM4_112: 2301 return BNXT_LINK_SPEED_100GB_IDX; 2302 case BNXT_LINK_SPEED_200GB: 2303 case BNXT_LINK_SPEED_200GB_PAM4: 2304 case BNXT_LINK_SPEED_200GB_PAM4_112: 2305 return BNXT_LINK_SPEED_200GB_IDX; 2306 case BNXT_LINK_SPEED_400GB: 2307 case BNXT_LINK_SPEED_400GB_PAM4: 2308 case BNXT_LINK_SPEED_400GB_PAM4_112: 2309 return BNXT_LINK_SPEED_400GB_IDX; 2310 default: return BNXT_LINK_SPEED_UNKNOWN; 2311 } 2312 } 2313 2314 static const enum ethtool_link_mode_bit_indices 2315 bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = { 2316 [BNXT_LINK_SPEED_100MB_IDX] = { 2317 { 2318 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2319 }, 2320 }, 2321 [BNXT_LINK_SPEED_1GB_IDX] = { 2322 { 2323 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2324 /* historically baseT, but DAC is more correctly baseX */ 2325 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 2326 [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2327 [BNXT_MEDIA_X] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 2328 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2329 }, 2330 }, 2331 [BNXT_LINK_SPEED_10GB_IDX] = { 2332 { 2333 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2334 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2335 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2336 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2337 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2338 [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2339 }, 2340 }, 2341 [BNXT_LINK_SPEED_25GB_IDX] = { 2342 { 2343 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2344 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2345 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2346 }, 2347 }, 2348 [BNXT_LINK_SPEED_40GB_IDX] = { 2349 { 2350 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2351 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2352 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2353 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2354 }, 2355 }, 2356 [BNXT_LINK_SPEED_50GB_IDX] = { 2357 [BNXT_SIG_MODE_NRZ] = { 2358 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2359 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2360 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2361 }, 2362 [BNXT_SIG_MODE_PAM4] = { 2363 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2364 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2365 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2366 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2367 }, 2368 }, 2369 [BNXT_LINK_SPEED_100GB_IDX] = { 2370 [BNXT_SIG_MODE_NRZ] = { 2371 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2372 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2373 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2374 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2375 }, 2376 [BNXT_SIG_MODE_PAM4] = { 2377 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2378 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2379 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2380 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2381 }, 2382 [BNXT_SIG_MODE_PAM4_112] = { 2383 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, 2384 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, 2385 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, 2386 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT, 2387 }, 2388 }, 2389 [BNXT_LINK_SPEED_200GB_IDX] = { 2390 [BNXT_SIG_MODE_PAM4] = { 2391 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2392 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2393 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2394 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2395 }, 2396 [BNXT_SIG_MODE_PAM4_112] = { 2397 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, 2398 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, 2399 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, 2400 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT, 2401 }, 2402 }, 2403 [BNXT_LINK_SPEED_400GB_IDX] = { 2404 [BNXT_SIG_MODE_PAM4] = { 2405 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, 2406 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, 2407 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, 2408 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, 2409 }, 2410 [BNXT_SIG_MODE_PAM4_112] = { 2411 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, 2412 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, 2413 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, 2414 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT, 2415 }, 2416 }, 2417 }; 2418 2419 #define BNXT_LINK_MODE_UNKNOWN -1 2420 2421 static enum ethtool_link_mode_bit_indices 2422 bnxt_get_link_mode(struct bnxt_link_info *link_info) 2423 { 2424 enum ethtool_link_mode_bit_indices link_mode; 2425 enum bnxt_link_speed_indices speed; 2426 enum bnxt_media_type media; 2427 u8 sig_mode; 2428 2429 if (link_info->phy_link_status != BNXT_LINK_LINK) 2430 return BNXT_LINK_MODE_UNKNOWN; 2431 2432 media = bnxt_get_media(link_info); 2433 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 2434 speed = bnxt_fw_speed_idx(link_info->link_speed); 2435 sig_mode = link_info->active_fec_sig_mode & 2436 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; 2437 } else { 2438 speed = bnxt_fw_speed_idx(link_info->req_link_speed); 2439 sig_mode = link_info->req_signal_mode; 2440 } 2441 if (sig_mode >= BNXT_SIG_MODE_MAX) 2442 return BNXT_LINK_MODE_UNKNOWN; 2443 2444 /* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux 2445 * link mode, but since no such devices exist, the zeroes in the 2446 * map can be conveniently used to represent unknown link modes. 2447 */ 2448 link_mode = bnxt_link_modes[speed][sig_mode][media]; 2449 if (!link_mode) 2450 return BNXT_LINK_MODE_UNKNOWN; 2451 2452 switch (link_mode) { 2453 case ETHTOOL_LINK_MODE_100baseT_Full_BIT: 2454 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL) 2455 link_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT; 2456 break; 2457 case ETHTOOL_LINK_MODE_1000baseT_Full_BIT: 2458 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL) 2459 link_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT; 2460 break; 2461 default: 2462 break; 2463 } 2464 2465 return link_mode; 2466 } 2467 2468 static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info, 2469 struct ethtool_link_ksettings *lk_ksettings) 2470 { 2471 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2472 2473 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) { 2474 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 2475 lk_ksettings->link_modes.supported); 2476 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 2477 lk_ksettings->link_modes.supported); 2478 } 2479 2480 if (link_info->support_auto_speeds || link_info->support_auto_speeds2 || 2481 link_info->support_pam4_auto_speeds) 2482 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 2483 lk_ksettings->link_modes.supported); 2484 2485 if (~link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 2486 return; 2487 2488 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) 2489 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 2490 lk_ksettings->link_modes.advertising); 2491 if (hweight8(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 1) 2492 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 2493 lk_ksettings->link_modes.advertising); 2494 if (link_info->lp_pause & BNXT_LINK_PAUSE_RX) 2495 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 2496 lk_ksettings->link_modes.lp_advertising); 2497 if (hweight8(link_info->lp_pause & BNXT_LINK_PAUSE_BOTH) == 1) 2498 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 2499 lk_ksettings->link_modes.lp_advertising); 2500 } 2501 2502 static const u16 bnxt_nrz_speed_masks[] = { 2503 [BNXT_LINK_SPEED_100MB_IDX] = BNXT_LINK_SPEED_MSK_100MB, 2504 [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEED_MSK_1GB, 2505 [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEED_MSK_10GB, 2506 [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEED_MSK_25GB, 2507 [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEED_MSK_40GB, 2508 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEED_MSK_50GB, 2509 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEED_MSK_100GB, 2510 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ 2511 }; 2512 2513 static const u16 bnxt_pam4_speed_masks[] = { 2514 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB, 2515 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB, 2516 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB, 2517 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ 2518 }; 2519 2520 static const u16 bnxt_nrz_speeds2_masks[] = { 2521 [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEEDS2_MSK_1GB, 2522 [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEEDS2_MSK_10GB, 2523 [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEEDS2_MSK_25GB, 2524 [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEEDS2_MSK_40GB, 2525 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB, 2526 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB, 2527 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ 2528 }; 2529 2530 static const u16 bnxt_pam4_speeds2_masks[] = { 2531 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4, 2532 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4, 2533 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4, 2534 [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4, 2535 }; 2536 2537 static const u16 bnxt_pam4_112_speeds2_masks[] = { 2538 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112, 2539 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112, 2540 [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112, 2541 }; 2542 2543 static enum bnxt_link_speed_indices 2544 bnxt_encoding_speed_idx(u8 sig_mode, u16 phy_flags, u16 speed_msk) 2545 { 2546 const u16 *speeds; 2547 int idx, len; 2548 2549 switch (sig_mode) { 2550 case BNXT_SIG_MODE_NRZ: 2551 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2552 speeds = bnxt_nrz_speeds2_masks; 2553 len = ARRAY_SIZE(bnxt_nrz_speeds2_masks); 2554 } else { 2555 speeds = bnxt_nrz_speed_masks; 2556 len = ARRAY_SIZE(bnxt_nrz_speed_masks); 2557 } 2558 break; 2559 case BNXT_SIG_MODE_PAM4: 2560 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2561 speeds = bnxt_pam4_speeds2_masks; 2562 len = ARRAY_SIZE(bnxt_pam4_speeds2_masks); 2563 } else { 2564 speeds = bnxt_pam4_speed_masks; 2565 len = ARRAY_SIZE(bnxt_pam4_speed_masks); 2566 } 2567 break; 2568 case BNXT_SIG_MODE_PAM4_112: 2569 speeds = bnxt_pam4_112_speeds2_masks; 2570 len = ARRAY_SIZE(bnxt_pam4_112_speeds2_masks); 2571 break; 2572 default: 2573 return BNXT_LINK_SPEED_UNKNOWN; 2574 } 2575 2576 for (idx = 0; idx < len; idx++) { 2577 if (speeds[idx] == speed_msk) 2578 return idx; 2579 } 2580 2581 return BNXT_LINK_SPEED_UNKNOWN; 2582 } 2583 2584 #define BNXT_FW_SPEED_MSK_BITS 16 2585 2586 static void 2587 __bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media, 2588 u8 sig_mode, u16 phy_flags, unsigned long *et_mask) 2589 { 2590 enum ethtool_link_mode_bit_indices link_mode; 2591 enum bnxt_link_speed_indices speed; 2592 u8 bit; 2593 2594 for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) { 2595 speed = bnxt_encoding_speed_idx(sig_mode, phy_flags, 1 << bit); 2596 if (!speed) 2597 continue; 2598 2599 link_mode = bnxt_link_modes[speed][sig_mode][media]; 2600 if (!link_mode) 2601 continue; 2602 2603 linkmode_set_bit(link_mode, et_mask); 2604 } 2605 } 2606 2607 static void 2608 bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media, 2609 u8 sig_mode, u16 phy_flags, unsigned long *et_mask) 2610 { 2611 if (media) { 2612 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags, 2613 et_mask); 2614 return; 2615 } 2616 2617 /* list speeds for all media if unknown */ 2618 for (media = 1; media < __BNXT_MEDIA_END; media++) 2619 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags, 2620 et_mask); 2621 } 2622 2623 static void 2624 bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info *link_info, 2625 enum bnxt_media_type media, 2626 struct ethtool_link_ksettings *lk_ksettings) 2627 { 2628 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2629 u16 sp_nrz, sp_pam4, sp_pam4_112 = 0; 2630 u16 phy_flags = bp->phy_flags; 2631 2632 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2633 sp_nrz = link_info->support_speeds2; 2634 sp_pam4 = link_info->support_speeds2; 2635 sp_pam4_112 = link_info->support_speeds2; 2636 } else { 2637 sp_nrz = link_info->support_speeds; 2638 sp_pam4 = link_info->support_pam4_speeds; 2639 } 2640 bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags, 2641 lk_ksettings->link_modes.supported); 2642 bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags, 2643 lk_ksettings->link_modes.supported); 2644 bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112, 2645 phy_flags, lk_ksettings->link_modes.supported); 2646 } 2647 2648 static void 2649 bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info *link_info, 2650 enum bnxt_media_type media, 2651 struct ethtool_link_ksettings *lk_ksettings) 2652 { 2653 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2654 u16 sp_nrz, sp_pam4, sp_pam4_112 = 0; 2655 u16 phy_flags = bp->phy_flags; 2656 2657 sp_nrz = link_info->advertising; 2658 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2659 sp_pam4 = link_info->advertising; 2660 sp_pam4_112 = link_info->advertising; 2661 } else { 2662 sp_pam4 = link_info->advertising_pam4; 2663 } 2664 bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags, 2665 lk_ksettings->link_modes.advertising); 2666 bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags, 2667 lk_ksettings->link_modes.advertising); 2668 bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112, 2669 phy_flags, lk_ksettings->link_modes.advertising); 2670 } 2671 2672 static void 2673 bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info *link_info, 2674 enum bnxt_media_type media, 2675 struct ethtool_link_ksettings *lk_ksettings) 2676 { 2677 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2678 u16 phy_flags = bp->phy_flags; 2679 2680 bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, media, 2681 BNXT_SIG_MODE_NRZ, phy_flags, 2682 lk_ksettings->link_modes.lp_advertising); 2683 bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, media, 2684 BNXT_SIG_MODE_PAM4, phy_flags, 2685 lk_ksettings->link_modes.lp_advertising); 2686 } 2687 2688 static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds, 2689 u16 speed_msk, const unsigned long *et_mask, 2690 enum ethtool_link_mode_bit_indices mode) 2691 { 2692 bool mode_desired = linkmode_test_bit(mode, et_mask); 2693 2694 if (!mode) 2695 return; 2696 2697 /* enabled speeds for installed media should override */ 2698 if (installed_media && mode_desired) { 2699 *speeds |= speed_msk; 2700 *delta |= speed_msk; 2701 return; 2702 } 2703 2704 /* many to one mapping, only allow one change per fw_speed bit */ 2705 if (!(*delta & speed_msk) && (mode_desired == !(*speeds & speed_msk))) { 2706 *speeds ^= speed_msk; 2707 *delta |= speed_msk; 2708 } 2709 } 2710 2711 static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info, 2712 const unsigned long *et_mask) 2713 { 2714 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2715 u16 const *sp_msks, *sp_pam4_msks, *sp_pam4_112_msks; 2716 enum bnxt_media_type media = bnxt_get_media(link_info); 2717 u16 *adv, *adv_pam4, *adv_pam4_112 = NULL; 2718 u32 delta_pam4_112 = 0; 2719 u32 delta_pam4 = 0; 2720 u32 delta_nrz = 0; 2721 int i, m; 2722 2723 adv = &link_info->advertising; 2724 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2725 adv_pam4 = &link_info->advertising; 2726 adv_pam4_112 = &link_info->advertising; 2727 sp_msks = bnxt_nrz_speeds2_masks; 2728 sp_pam4_msks = bnxt_pam4_speeds2_masks; 2729 sp_pam4_112_msks = bnxt_pam4_112_speeds2_masks; 2730 } else { 2731 adv_pam4 = &link_info->advertising_pam4; 2732 sp_msks = bnxt_nrz_speed_masks; 2733 sp_pam4_msks = bnxt_pam4_speed_masks; 2734 } 2735 for (i = 1; i < __BNXT_LINK_SPEED_END; i++) { 2736 /* accept any legal media from user */ 2737 for (m = 1; m < __BNXT_MEDIA_END; m++) { 2738 bnxt_update_speed(&delta_nrz, m == media, 2739 adv, sp_msks[i], et_mask, 2740 bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]); 2741 bnxt_update_speed(&delta_pam4, m == media, 2742 adv_pam4, sp_pam4_msks[i], et_mask, 2743 bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]); 2744 if (!adv_pam4_112) 2745 continue; 2746 2747 bnxt_update_speed(&delta_pam4_112, m == media, 2748 adv_pam4_112, sp_pam4_112_msks[i], et_mask, 2749 bnxt_link_modes[i][BNXT_SIG_MODE_PAM4_112][m]); 2750 } 2751 } 2752 } 2753 2754 static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info, 2755 struct ethtool_link_ksettings *lk_ksettings) 2756 { 2757 u16 fec_cfg = link_info->fec_cfg; 2758 2759 if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) { 2760 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 2761 lk_ksettings->link_modes.advertising); 2762 return; 2763 } 2764 if (fec_cfg & BNXT_FEC_ENC_BASE_R) 2765 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 2766 lk_ksettings->link_modes.advertising); 2767 if (fec_cfg & BNXT_FEC_ENC_RS) 2768 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 2769 lk_ksettings->link_modes.advertising); 2770 if (fec_cfg & BNXT_FEC_ENC_LLRS) 2771 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 2772 lk_ksettings->link_modes.advertising); 2773 } 2774 2775 static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info, 2776 struct ethtool_link_ksettings *lk_ksettings) 2777 { 2778 u16 fec_cfg = link_info->fec_cfg; 2779 2780 if (fec_cfg & BNXT_FEC_NONE) { 2781 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 2782 lk_ksettings->link_modes.supported); 2783 return; 2784 } 2785 if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP) 2786 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 2787 lk_ksettings->link_modes.supported); 2788 if (fec_cfg & BNXT_FEC_ENC_RS_CAP) 2789 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 2790 lk_ksettings->link_modes.supported); 2791 if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP) 2792 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 2793 lk_ksettings->link_modes.supported); 2794 } 2795 2796 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) 2797 { 2798 switch (fw_link_speed) { 2799 case BNXT_LINK_SPEED_100MB: 2800 return SPEED_100; 2801 case BNXT_LINK_SPEED_1GB: 2802 return SPEED_1000; 2803 case BNXT_LINK_SPEED_2_5GB: 2804 return SPEED_2500; 2805 case BNXT_LINK_SPEED_10GB: 2806 return SPEED_10000; 2807 case BNXT_LINK_SPEED_20GB: 2808 return SPEED_20000; 2809 case BNXT_LINK_SPEED_25GB: 2810 return SPEED_25000; 2811 case BNXT_LINK_SPEED_40GB: 2812 return SPEED_40000; 2813 case BNXT_LINK_SPEED_50GB: 2814 case BNXT_LINK_SPEED_50GB_PAM4: 2815 return SPEED_50000; 2816 case BNXT_LINK_SPEED_100GB: 2817 case BNXT_LINK_SPEED_100GB_PAM4: 2818 case BNXT_LINK_SPEED_100GB_PAM4_112: 2819 return SPEED_100000; 2820 case BNXT_LINK_SPEED_200GB: 2821 case BNXT_LINK_SPEED_200GB_PAM4: 2822 case BNXT_LINK_SPEED_200GB_PAM4_112: 2823 return SPEED_200000; 2824 case BNXT_LINK_SPEED_400GB: 2825 case BNXT_LINK_SPEED_400GB_PAM4: 2826 case BNXT_LINK_SPEED_400GB_PAM4_112: 2827 return SPEED_400000; 2828 default: 2829 return SPEED_UNKNOWN; 2830 } 2831 } 2832 2833 static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings, 2834 struct bnxt_link_info *link_info) 2835 { 2836 struct ethtool_link_settings *base = &lk_ksettings->base; 2837 2838 if (link_info->link_state == BNXT_LINK_STATE_UP) { 2839 base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); 2840 base->duplex = DUPLEX_HALF; 2841 if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) 2842 base->duplex = DUPLEX_FULL; 2843 lk_ksettings->lanes = link_info->active_lanes; 2844 } else if (!link_info->autoneg) { 2845 base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed); 2846 base->duplex = DUPLEX_HALF; 2847 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) 2848 base->duplex = DUPLEX_FULL; 2849 } 2850 } 2851 2852 static int bnxt_get_link_ksettings(struct net_device *dev, 2853 struct ethtool_link_ksettings *lk_ksettings) 2854 { 2855 struct ethtool_link_settings *base = &lk_ksettings->base; 2856 enum ethtool_link_mode_bit_indices link_mode; 2857 struct bnxt *bp = netdev_priv(dev); 2858 struct bnxt_link_info *link_info; 2859 enum bnxt_media_type media; 2860 2861 ethtool_link_ksettings_zero_link_mode(lk_ksettings, lp_advertising); 2862 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); 2863 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); 2864 base->duplex = DUPLEX_UNKNOWN; 2865 base->speed = SPEED_UNKNOWN; 2866 link_info = &bp->link_info; 2867 2868 mutex_lock(&bp->link_lock); 2869 bnxt_get_ethtool_modes(link_info, lk_ksettings); 2870 media = bnxt_get_media(link_info); 2871 bnxt_get_all_ethtool_support_speeds(link_info, media, lk_ksettings); 2872 bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings); 2873 link_mode = bnxt_get_link_mode(link_info); 2874 if (link_mode != BNXT_LINK_MODE_UNKNOWN) 2875 ethtool_params_from_link_mode(lk_ksettings, link_mode); 2876 else 2877 bnxt_get_default_speeds(lk_ksettings, link_info); 2878 2879 if (link_info->autoneg) { 2880 bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings); 2881 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 2882 lk_ksettings->link_modes.advertising); 2883 base->autoneg = AUTONEG_ENABLE; 2884 bnxt_get_all_ethtool_adv_speeds(link_info, media, lk_ksettings); 2885 if (link_info->phy_link_status == BNXT_LINK_LINK) 2886 bnxt_get_all_ethtool_lp_speeds(link_info, media, 2887 lk_ksettings); 2888 } else { 2889 base->autoneg = AUTONEG_DISABLE; 2890 } 2891 2892 base->port = PORT_NONE; 2893 if (media == BNXT_MEDIA_TP) { 2894 base->port = PORT_TP; 2895 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, 2896 lk_ksettings->link_modes.supported); 2897 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, 2898 lk_ksettings->link_modes.advertising); 2899 } else if (media == BNXT_MEDIA_KR) { 2900 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, 2901 lk_ksettings->link_modes.supported); 2902 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, 2903 lk_ksettings->link_modes.advertising); 2904 } else { 2905 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, 2906 lk_ksettings->link_modes.supported); 2907 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, 2908 lk_ksettings->link_modes.advertising); 2909 2910 if (media == BNXT_MEDIA_CR) 2911 base->port = PORT_DA; 2912 else 2913 base->port = PORT_FIBRE; 2914 } 2915 base->phy_address = link_info->phy_addr; 2916 mutex_unlock(&bp->link_lock); 2917 2918 return 0; 2919 } 2920 2921 static int 2922 bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes) 2923 { 2924 struct bnxt *bp = netdev_priv(dev); 2925 struct bnxt_link_info *link_info = &bp->link_info; 2926 u16 support_pam4_spds = link_info->support_pam4_speeds; 2927 u16 support_spds2 = link_info->support_speeds2; 2928 u16 support_spds = link_info->support_speeds; 2929 u8 sig_mode = BNXT_SIG_MODE_NRZ; 2930 u32 lanes_needed = 1; 2931 u16 fw_speed = 0; 2932 2933 switch (ethtool_speed) { 2934 case SPEED_100: 2935 if (support_spds & BNXT_LINK_SPEED_MSK_100MB) 2936 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB; 2937 break; 2938 case SPEED_1000: 2939 if ((support_spds & BNXT_LINK_SPEED_MSK_1GB) || 2940 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_1GB)) 2941 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 2942 break; 2943 case SPEED_2500: 2944 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB) 2945 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB; 2946 break; 2947 case SPEED_10000: 2948 if ((support_spds & BNXT_LINK_SPEED_MSK_10GB) || 2949 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_10GB)) 2950 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 2951 break; 2952 case SPEED_20000: 2953 if (support_spds & BNXT_LINK_SPEED_MSK_20GB) { 2954 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB; 2955 lanes_needed = 2; 2956 } 2957 break; 2958 case SPEED_25000: 2959 if ((support_spds & BNXT_LINK_SPEED_MSK_25GB) || 2960 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_25GB)) 2961 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 2962 break; 2963 case SPEED_40000: 2964 if ((support_spds & BNXT_LINK_SPEED_MSK_40GB) || 2965 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_40GB)) { 2966 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 2967 lanes_needed = 4; 2968 } 2969 break; 2970 case SPEED_50000: 2971 if (((support_spds & BNXT_LINK_SPEED_MSK_50GB) || 2972 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB)) && 2973 lanes != 1) { 2974 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 2975 lanes_needed = 2; 2976 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) { 2977 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB; 2978 sig_mode = BNXT_SIG_MODE_PAM4; 2979 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB_PAM4) { 2980 fw_speed = BNXT_LINK_SPEED_50GB_PAM4; 2981 sig_mode = BNXT_SIG_MODE_PAM4; 2982 } 2983 break; 2984 case SPEED_100000: 2985 if (((support_spds & BNXT_LINK_SPEED_MSK_100GB) || 2986 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB)) && 2987 lanes != 2 && lanes != 1) { 2988 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB; 2989 lanes_needed = 4; 2990 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) { 2991 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB; 2992 sig_mode = BNXT_SIG_MODE_PAM4; 2993 lanes_needed = 2; 2994 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4) && 2995 lanes != 1) { 2996 fw_speed = BNXT_LINK_SPEED_100GB_PAM4; 2997 sig_mode = BNXT_SIG_MODE_PAM4; 2998 lanes_needed = 2; 2999 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112) { 3000 fw_speed = BNXT_LINK_SPEED_100GB_PAM4_112; 3001 sig_mode = BNXT_SIG_MODE_PAM4_112; 3002 } 3003 break; 3004 case SPEED_200000: 3005 if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) { 3006 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB; 3007 sig_mode = BNXT_SIG_MODE_PAM4; 3008 lanes_needed = 4; 3009 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4) && 3010 lanes != 2) { 3011 fw_speed = BNXT_LINK_SPEED_200GB_PAM4; 3012 sig_mode = BNXT_SIG_MODE_PAM4; 3013 lanes_needed = 4; 3014 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112) { 3015 fw_speed = BNXT_LINK_SPEED_200GB_PAM4_112; 3016 sig_mode = BNXT_SIG_MODE_PAM4_112; 3017 lanes_needed = 2; 3018 } 3019 break; 3020 case SPEED_400000: 3021 if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4) && 3022 lanes != 4) { 3023 fw_speed = BNXT_LINK_SPEED_400GB_PAM4; 3024 sig_mode = BNXT_SIG_MODE_PAM4; 3025 lanes_needed = 8; 3026 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112) { 3027 fw_speed = BNXT_LINK_SPEED_400GB_PAM4_112; 3028 sig_mode = BNXT_SIG_MODE_PAM4_112; 3029 lanes_needed = 4; 3030 } 3031 break; 3032 } 3033 3034 if (!fw_speed) { 3035 netdev_err(dev, "unsupported speed!\n"); 3036 return -EINVAL; 3037 } 3038 3039 if (lanes && lanes != lanes_needed) { 3040 netdev_err(dev, "unsupported number of lanes for speed\n"); 3041 return -EINVAL; 3042 } 3043 3044 if (link_info->req_link_speed == fw_speed && 3045 link_info->req_signal_mode == sig_mode && 3046 link_info->autoneg == 0) 3047 return -EALREADY; 3048 3049 link_info->req_link_speed = fw_speed; 3050 link_info->req_signal_mode = sig_mode; 3051 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; 3052 link_info->autoneg = 0; 3053 link_info->advertising = 0; 3054 link_info->advertising_pam4 = 0; 3055 3056 return 0; 3057 } 3058 3059 u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode) 3060 { 3061 u16 fw_speed_mask = 0; 3062 3063 if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode) || 3064 linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mode)) 3065 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB; 3066 3067 if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode) || 3068 linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mode)) 3069 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB; 3070 3071 if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode)) 3072 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; 3073 3074 if (linkmode_test_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode)) 3075 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB; 3076 3077 return fw_speed_mask; 3078 } 3079 3080 static int bnxt_set_link_ksettings(struct net_device *dev, 3081 const struct ethtool_link_ksettings *lk_ksettings) 3082 { 3083 struct bnxt *bp = netdev_priv(dev); 3084 struct bnxt_link_info *link_info = &bp->link_info; 3085 const struct ethtool_link_settings *base = &lk_ksettings->base; 3086 bool set_pause = false; 3087 u32 speed, lanes = 0; 3088 int rc = 0; 3089 3090 if (!BNXT_PHY_CFG_ABLE(bp)) 3091 return -EOPNOTSUPP; 3092 3093 mutex_lock(&bp->link_lock); 3094 if (base->autoneg == AUTONEG_ENABLE) { 3095 bnxt_set_ethtool_speeds(link_info, 3096 lk_ksettings->link_modes.advertising); 3097 link_info->autoneg |= BNXT_AUTONEG_SPEED; 3098 if (!link_info->advertising && !link_info->advertising_pam4) { 3099 link_info->advertising = link_info->support_auto_speeds; 3100 link_info->advertising_pam4 = 3101 link_info->support_pam4_auto_speeds; 3102 } 3103 /* any change to autoneg will cause link change, therefore the 3104 * driver should put back the original pause setting in autoneg 3105 */ 3106 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) 3107 set_pause = true; 3108 } else { 3109 u8 phy_type = link_info->phy_type; 3110 3111 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || 3112 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE || 3113 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 3114 netdev_err(dev, "10GBase-T devices must autoneg\n"); 3115 rc = -EINVAL; 3116 goto set_setting_exit; 3117 } 3118 if (base->duplex == DUPLEX_HALF) { 3119 netdev_err(dev, "HALF DUPLEX is not supported!\n"); 3120 rc = -EINVAL; 3121 goto set_setting_exit; 3122 } 3123 speed = base->speed; 3124 lanes = lk_ksettings->lanes; 3125 rc = bnxt_force_link_speed(dev, speed, lanes); 3126 if (rc) { 3127 if (rc == -EALREADY) 3128 rc = 0; 3129 goto set_setting_exit; 3130 } 3131 } 3132 3133 if (netif_running(dev)) 3134 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); 3135 3136 set_setting_exit: 3137 mutex_unlock(&bp->link_lock); 3138 return rc; 3139 } 3140 3141 static int bnxt_get_fecparam(struct net_device *dev, 3142 struct ethtool_fecparam *fec) 3143 { 3144 struct bnxt *bp = netdev_priv(dev); 3145 struct bnxt_link_info *link_info; 3146 u8 active_fec; 3147 u16 fec_cfg; 3148 3149 link_info = &bp->link_info; 3150 fec_cfg = link_info->fec_cfg; 3151 active_fec = link_info->active_fec_sig_mode & 3152 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 3153 if (fec_cfg & BNXT_FEC_NONE) { 3154 fec->fec = ETHTOOL_FEC_NONE; 3155 fec->active_fec = ETHTOOL_FEC_NONE; 3156 return 0; 3157 } 3158 if (fec_cfg & BNXT_FEC_AUTONEG) 3159 fec->fec |= ETHTOOL_FEC_AUTO; 3160 if (fec_cfg & BNXT_FEC_ENC_BASE_R) 3161 fec->fec |= ETHTOOL_FEC_BASER; 3162 if (fec_cfg & BNXT_FEC_ENC_RS) 3163 fec->fec |= ETHTOOL_FEC_RS; 3164 if (fec_cfg & BNXT_FEC_ENC_LLRS) 3165 fec->fec |= ETHTOOL_FEC_LLRS; 3166 3167 switch (active_fec) { 3168 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 3169 fec->active_fec |= ETHTOOL_FEC_BASER; 3170 break; 3171 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 3172 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 3173 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 3174 fec->active_fec |= ETHTOOL_FEC_RS; 3175 break; 3176 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 3177 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 3178 fec->active_fec |= ETHTOOL_FEC_LLRS; 3179 break; 3180 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: 3181 fec->active_fec |= ETHTOOL_FEC_OFF; 3182 break; 3183 } 3184 return 0; 3185 } 3186 3187 static void bnxt_get_fec_stats(struct net_device *dev, 3188 struct ethtool_fec_stats *fec_stats) 3189 { 3190 struct bnxt *bp = netdev_priv(dev); 3191 u64 *rx; 3192 3193 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 3194 return; 3195 3196 rx = bp->rx_port_stats_ext.sw_stats; 3197 fec_stats->corrected_bits.total = 3198 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits)); 3199 3200 if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY) 3201 return; 3202 3203 fec_stats->corrected_blocks.total = 3204 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks)); 3205 fec_stats->uncorrectable_blocks.total = 3206 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks)); 3207 } 3208 3209 static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info, 3210 u32 fec) 3211 { 3212 u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE; 3213 3214 if (fec & ETHTOOL_FEC_BASER) 3215 fw_fec |= BNXT_FEC_BASE_R_ON(link_info); 3216 else if (fec & ETHTOOL_FEC_RS) 3217 fw_fec |= BNXT_FEC_RS_ON(link_info); 3218 else if (fec & ETHTOOL_FEC_LLRS) 3219 fw_fec |= BNXT_FEC_LLRS_ON; 3220 return fw_fec; 3221 } 3222 3223 static int bnxt_set_fecparam(struct net_device *dev, 3224 struct ethtool_fecparam *fecparam) 3225 { 3226 struct hwrm_port_phy_cfg_input *req; 3227 struct bnxt *bp = netdev_priv(dev); 3228 struct bnxt_link_info *link_info; 3229 u32 new_cfg, fec = fecparam->fec; 3230 u16 fec_cfg; 3231 int rc; 3232 3233 link_info = &bp->link_info; 3234 fec_cfg = link_info->fec_cfg; 3235 if (fec_cfg & BNXT_FEC_NONE) 3236 return -EOPNOTSUPP; 3237 3238 if (fec & ETHTOOL_FEC_OFF) { 3239 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE | 3240 BNXT_FEC_ALL_OFF(link_info); 3241 goto apply_fec; 3242 } 3243 if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) || 3244 ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) || 3245 ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) || 3246 ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP))) 3247 return -EINVAL; 3248 3249 if (fec & ETHTOOL_FEC_AUTO) { 3250 if (!link_info->autoneg) 3251 return -EINVAL; 3252 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE; 3253 } else { 3254 new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec); 3255 } 3256 3257 apply_fec: 3258 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 3259 if (rc) 3260 return rc; 3261 req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 3262 rc = hwrm_req_send(bp, req); 3263 /* update current settings */ 3264 if (!rc) { 3265 mutex_lock(&bp->link_lock); 3266 bnxt_update_link(bp, false); 3267 mutex_unlock(&bp->link_lock); 3268 } 3269 return rc; 3270 } 3271 3272 static void bnxt_get_pauseparam(struct net_device *dev, 3273 struct ethtool_pauseparam *epause) 3274 { 3275 struct bnxt *bp = netdev_priv(dev); 3276 struct bnxt_link_info *link_info = &bp->link_info; 3277 3278 if (BNXT_VF(bp)) 3279 return; 3280 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); 3281 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX); 3282 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX); 3283 } 3284 3285 static void bnxt_get_pause_stats(struct net_device *dev, 3286 struct ethtool_pause_stats *epstat) 3287 { 3288 struct bnxt *bp = netdev_priv(dev); 3289 u64 *rx, *tx; 3290 3291 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 3292 return; 3293 3294 rx = bp->port_stats.sw_stats; 3295 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 3296 3297 epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames); 3298 epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames); 3299 } 3300 3301 static int bnxt_set_pauseparam(struct net_device *dev, 3302 struct ethtool_pauseparam *epause) 3303 { 3304 int rc = 0; 3305 struct bnxt *bp = netdev_priv(dev); 3306 struct bnxt_link_info *link_info = &bp->link_info; 3307 3308 if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) 3309 return -EOPNOTSUPP; 3310 3311 mutex_lock(&bp->link_lock); 3312 if (epause->autoneg) { 3313 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 3314 rc = -EINVAL; 3315 goto pause_exit; 3316 } 3317 3318 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 3319 link_info->req_flow_ctrl = 0; 3320 } else { 3321 /* when transition from auto pause to force pause, 3322 * force a link change 3323 */ 3324 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 3325 link_info->force_link_chng = true; 3326 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL; 3327 link_info->req_flow_ctrl = 0; 3328 } 3329 if (epause->rx_pause) 3330 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX; 3331 3332 if (epause->tx_pause) 3333 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; 3334 3335 if (netif_running(dev)) 3336 rc = bnxt_hwrm_set_pause(bp); 3337 3338 pause_exit: 3339 mutex_unlock(&bp->link_lock); 3340 return rc; 3341 } 3342 3343 static u32 bnxt_get_link(struct net_device *dev) 3344 { 3345 struct bnxt *bp = netdev_priv(dev); 3346 3347 /* TODO: handle MF, VF, driver close case */ 3348 return BNXT_LINK_IS_UP(bp); 3349 } 3350 3351 int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, 3352 struct hwrm_nvm_get_dev_info_output *nvm_dev_info) 3353 { 3354 struct hwrm_nvm_get_dev_info_output *resp; 3355 struct hwrm_nvm_get_dev_info_input *req; 3356 int rc; 3357 3358 if (BNXT_VF(bp)) 3359 return -EOPNOTSUPP; 3360 3361 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO); 3362 if (rc) 3363 return rc; 3364 3365 resp = hwrm_req_hold(bp, req); 3366 rc = hwrm_req_send(bp, req); 3367 if (!rc) 3368 memcpy(nvm_dev_info, resp, sizeof(*resp)); 3369 hwrm_req_drop(bp, req); 3370 return rc; 3371 } 3372 3373 static void bnxt_print_admin_err(struct bnxt *bp) 3374 { 3375 netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n"); 3376 } 3377 3378 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 3379 u16 ext, u16 *index, u32 *item_length, 3380 u32 *data_length); 3381 3382 int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, 3383 u16 dir_ordinal, u16 dir_ext, u16 dir_attr, 3384 u32 dir_item_len, const u8 *data, 3385 size_t data_len) 3386 { 3387 struct bnxt *bp = netdev_priv(dev); 3388 struct hwrm_nvm_write_input *req; 3389 int rc; 3390 3391 rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE); 3392 if (rc) 3393 return rc; 3394 3395 if (data_len && data) { 3396 dma_addr_t dma_handle; 3397 u8 *kmem; 3398 3399 kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle); 3400 if (!kmem) { 3401 hwrm_req_drop(bp, req); 3402 return -ENOMEM; 3403 } 3404 3405 req->dir_data_length = cpu_to_le32(data_len); 3406 3407 memcpy(kmem, data, data_len); 3408 req->host_src_addr = cpu_to_le64(dma_handle); 3409 } 3410 3411 hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout); 3412 req->dir_type = cpu_to_le16(dir_type); 3413 req->dir_ordinal = cpu_to_le16(dir_ordinal); 3414 req->dir_ext = cpu_to_le16(dir_ext); 3415 req->dir_attr = cpu_to_le16(dir_attr); 3416 req->dir_item_length = cpu_to_le32(dir_item_len); 3417 rc = hwrm_req_send(bp, req); 3418 3419 if (rc == -EACCES) 3420 bnxt_print_admin_err(bp); 3421 return rc; 3422 } 3423 3424 int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, 3425 u8 self_reset, u8 flags) 3426 { 3427 struct bnxt *bp = netdev_priv(dev); 3428 struct hwrm_fw_reset_input *req; 3429 int rc; 3430 3431 if (!bnxt_hwrm_reset_permitted(bp)) { 3432 netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver"); 3433 return -EPERM; 3434 } 3435 3436 rc = hwrm_req_init(bp, req, HWRM_FW_RESET); 3437 if (rc) 3438 return rc; 3439 3440 req->embedded_proc_type = proc_type; 3441 req->selfrst_status = self_reset; 3442 req->flags = flags; 3443 3444 if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) { 3445 rc = hwrm_req_send_silent(bp, req); 3446 } else { 3447 rc = hwrm_req_send(bp, req); 3448 if (rc == -EACCES) 3449 bnxt_print_admin_err(bp); 3450 } 3451 return rc; 3452 } 3453 3454 static int bnxt_firmware_reset(struct net_device *dev, 3455 enum bnxt_nvm_directory_type dir_type) 3456 { 3457 u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE; 3458 u8 proc_type, flags = 0; 3459 3460 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ 3461 /* (e.g. when firmware isn't already running) */ 3462 switch (dir_type) { 3463 case BNX_DIR_TYPE_CHIMP_PATCH: 3464 case BNX_DIR_TYPE_BOOTCODE: 3465 case BNX_DIR_TYPE_BOOTCODE_2: 3466 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; 3467 /* Self-reset ChiMP upon next PCIe reset: */ 3468 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 3469 break; 3470 case BNX_DIR_TYPE_APE_FW: 3471 case BNX_DIR_TYPE_APE_PATCH: 3472 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; 3473 /* Self-reset APE upon next PCIe reset: */ 3474 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 3475 break; 3476 case BNX_DIR_TYPE_KONG_FW: 3477 case BNX_DIR_TYPE_KONG_PATCH: 3478 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; 3479 break; 3480 case BNX_DIR_TYPE_BONO_FW: 3481 case BNX_DIR_TYPE_BONO_PATCH: 3482 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; 3483 break; 3484 default: 3485 return -EINVAL; 3486 } 3487 3488 return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags); 3489 } 3490 3491 static int bnxt_firmware_reset_chip(struct net_device *dev) 3492 { 3493 struct bnxt *bp = netdev_priv(dev); 3494 u8 flags = 0; 3495 3496 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 3497 flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 3498 3499 return bnxt_hwrm_firmware_reset(dev, 3500 FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP, 3501 FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP, 3502 flags); 3503 } 3504 3505 static int bnxt_firmware_reset_ap(struct net_device *dev) 3506 { 3507 return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP, 3508 FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE, 3509 0); 3510 } 3511 3512 static int bnxt_flash_firmware(struct net_device *dev, 3513 u16 dir_type, 3514 const u8 *fw_data, 3515 size_t fw_size) 3516 { 3517 int rc = 0; 3518 u16 code_type; 3519 u32 stored_crc; 3520 u32 calculated_crc; 3521 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data; 3522 3523 switch (dir_type) { 3524 case BNX_DIR_TYPE_BOOTCODE: 3525 case BNX_DIR_TYPE_BOOTCODE_2: 3526 code_type = CODE_BOOT; 3527 break; 3528 case BNX_DIR_TYPE_CHIMP_PATCH: 3529 code_type = CODE_CHIMP_PATCH; 3530 break; 3531 case BNX_DIR_TYPE_APE_FW: 3532 code_type = CODE_MCTP_PASSTHRU; 3533 break; 3534 case BNX_DIR_TYPE_APE_PATCH: 3535 code_type = CODE_APE_PATCH; 3536 break; 3537 case BNX_DIR_TYPE_KONG_FW: 3538 code_type = CODE_KONG_FW; 3539 break; 3540 case BNX_DIR_TYPE_KONG_PATCH: 3541 code_type = CODE_KONG_PATCH; 3542 break; 3543 case BNX_DIR_TYPE_BONO_FW: 3544 code_type = CODE_BONO_FW; 3545 break; 3546 case BNX_DIR_TYPE_BONO_PATCH: 3547 code_type = CODE_BONO_PATCH; 3548 break; 3549 default: 3550 netdev_err(dev, "Unsupported directory entry type: %u\n", 3551 dir_type); 3552 return -EINVAL; 3553 } 3554 if (fw_size < sizeof(struct bnxt_fw_header)) { 3555 netdev_err(dev, "Invalid firmware file size: %u\n", 3556 (unsigned int)fw_size); 3557 return -EINVAL; 3558 } 3559 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) { 3560 netdev_err(dev, "Invalid firmware signature: %08X\n", 3561 le32_to_cpu(header->signature)); 3562 return -EINVAL; 3563 } 3564 if (header->code_type != code_type) { 3565 netdev_err(dev, "Expected firmware type: %d, read: %d\n", 3566 code_type, header->code_type); 3567 return -EINVAL; 3568 } 3569 if (header->device != DEVICE_CUMULUS_FAMILY) { 3570 netdev_err(dev, "Expected firmware device family %d, read: %d\n", 3571 DEVICE_CUMULUS_FAMILY, header->device); 3572 return -EINVAL; 3573 } 3574 /* Confirm the CRC32 checksum of the file: */ 3575 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 3576 sizeof(stored_crc))); 3577 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 3578 if (calculated_crc != stored_crc) { 3579 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n", 3580 (unsigned long)stored_crc, 3581 (unsigned long)calculated_crc); 3582 return -EINVAL; 3583 } 3584 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 3585 0, 0, 0, fw_data, fw_size); 3586 if (rc == 0) /* Firmware update successful */ 3587 rc = bnxt_firmware_reset(dev, dir_type); 3588 3589 return rc; 3590 } 3591 3592 static int bnxt_flash_microcode(struct net_device *dev, 3593 u16 dir_type, 3594 const u8 *fw_data, 3595 size_t fw_size) 3596 { 3597 struct bnxt_ucode_trailer *trailer; 3598 u32 calculated_crc; 3599 u32 stored_crc; 3600 int rc = 0; 3601 3602 if (fw_size < sizeof(struct bnxt_ucode_trailer)) { 3603 netdev_err(dev, "Invalid microcode file size: %u\n", 3604 (unsigned int)fw_size); 3605 return -EINVAL; 3606 } 3607 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size - 3608 sizeof(*trailer))); 3609 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) { 3610 netdev_err(dev, "Invalid microcode trailer signature: %08X\n", 3611 le32_to_cpu(trailer->sig)); 3612 return -EINVAL; 3613 } 3614 if (le16_to_cpu(trailer->dir_type) != dir_type) { 3615 netdev_err(dev, "Expected microcode type: %d, read: %d\n", 3616 dir_type, le16_to_cpu(trailer->dir_type)); 3617 return -EINVAL; 3618 } 3619 if (le16_to_cpu(trailer->trailer_length) < 3620 sizeof(struct bnxt_ucode_trailer)) { 3621 netdev_err(dev, "Invalid microcode trailer length: %d\n", 3622 le16_to_cpu(trailer->trailer_length)); 3623 return -EINVAL; 3624 } 3625 3626 /* Confirm the CRC32 checksum of the file: */ 3627 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 3628 sizeof(stored_crc))); 3629 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 3630 if (calculated_crc != stored_crc) { 3631 netdev_err(dev, 3632 "CRC32 (%08lX) does not match calculated: %08lX\n", 3633 (unsigned long)stored_crc, 3634 (unsigned long)calculated_crc); 3635 return -EINVAL; 3636 } 3637 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 3638 0, 0, 0, fw_data, fw_size); 3639 3640 return rc; 3641 } 3642 3643 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type) 3644 { 3645 switch (dir_type) { 3646 case BNX_DIR_TYPE_CHIMP_PATCH: 3647 case BNX_DIR_TYPE_BOOTCODE: 3648 case BNX_DIR_TYPE_BOOTCODE_2: 3649 case BNX_DIR_TYPE_APE_FW: 3650 case BNX_DIR_TYPE_APE_PATCH: 3651 case BNX_DIR_TYPE_KONG_FW: 3652 case BNX_DIR_TYPE_KONG_PATCH: 3653 case BNX_DIR_TYPE_BONO_FW: 3654 case BNX_DIR_TYPE_BONO_PATCH: 3655 return true; 3656 } 3657 3658 return false; 3659 } 3660 3661 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type) 3662 { 3663 switch (dir_type) { 3664 case BNX_DIR_TYPE_AVS: 3665 case BNX_DIR_TYPE_EXP_ROM_MBA: 3666 case BNX_DIR_TYPE_PCIE: 3667 case BNX_DIR_TYPE_TSCF_UCODE: 3668 case BNX_DIR_TYPE_EXT_PHY: 3669 case BNX_DIR_TYPE_CCM: 3670 case BNX_DIR_TYPE_ISCSI_BOOT: 3671 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3672 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3673 return true; 3674 } 3675 3676 return false; 3677 } 3678 3679 static bool bnxt_dir_type_is_executable(u16 dir_type) 3680 { 3681 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3682 bnxt_dir_type_is_other_exec_format(dir_type); 3683 } 3684 3685 static int bnxt_flash_firmware_from_file(struct net_device *dev, 3686 u16 dir_type, 3687 const char *filename) 3688 { 3689 const struct firmware *fw; 3690 int rc; 3691 3692 rc = request_firmware(&fw, filename, &dev->dev); 3693 if (rc != 0) { 3694 netdev_err(dev, "Error %d requesting firmware file: %s\n", 3695 rc, filename); 3696 return rc; 3697 } 3698 if (bnxt_dir_type_is_ape_bin_format(dir_type)) 3699 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size); 3700 else if (bnxt_dir_type_is_other_exec_format(dir_type)) 3701 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size); 3702 else 3703 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 3704 0, 0, 0, fw->data, fw->size); 3705 release_firmware(fw); 3706 return rc; 3707 } 3708 3709 #define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM" 3710 #define MSG_INVALID_PKG "PKG install error : Invalid package" 3711 #define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error" 3712 #define MSG_INVALID_DEV "PKG install error : Invalid device" 3713 #define MSG_INTERNAL_ERR "PKG install error : Internal error" 3714 #define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram" 3715 #define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram" 3716 #define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error" 3717 #define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected" 3718 #define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure" 3719 3720 static int nvm_update_err_to_stderr(struct net_device *dev, u8 result, 3721 struct netlink_ext_ack *extack) 3722 { 3723 switch (result) { 3724 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER: 3725 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER: 3726 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR: 3727 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR: 3728 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND: 3729 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED: 3730 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR); 3731 return -EINVAL; 3732 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE: 3733 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER: 3734 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE: 3735 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM: 3736 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH: 3737 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST: 3738 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER: 3739 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM: 3740 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM: 3741 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH: 3742 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE: 3743 case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM: 3744 case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM: 3745 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG); 3746 return -ENOPKG; 3747 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR: 3748 BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR); 3749 return -EPERM; 3750 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV: 3751 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID: 3752 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR: 3753 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID: 3754 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM: 3755 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV); 3756 return -EOPNOTSUPP; 3757 default: 3758 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR); 3759 return -EIO; 3760 } 3761 } 3762 3763 #define BNXT_PKG_DMA_SIZE 0x40000 3764 #define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE)) 3765 #define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST)) 3766 3767 static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size, 3768 struct netlink_ext_ack *extack) 3769 { 3770 u32 item_len; 3771 int rc; 3772 3773 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 3774 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL, 3775 &item_len, NULL); 3776 if (rc) { 3777 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR); 3778 return rc; 3779 } 3780 3781 if (fw_size > item_len) { 3782 rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE, 3783 BNX_DIR_ORDINAL_FIRST, 0, 1, 3784 round_up(fw_size, 4096), NULL, 0); 3785 if (rc) { 3786 BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR); 3787 return rc; 3788 } 3789 } 3790 return 0; 3791 } 3792 3793 int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, 3794 u32 install_type, struct netlink_ext_ack *extack) 3795 { 3796 struct hwrm_nvm_install_update_input *install; 3797 struct hwrm_nvm_install_update_output *resp; 3798 struct hwrm_nvm_modify_input *modify; 3799 struct bnxt *bp = netdev_priv(dev); 3800 bool defrag_attempted = false; 3801 dma_addr_t dma_handle; 3802 u8 *kmem = NULL; 3803 u32 modify_len; 3804 u32 item_len; 3805 u8 cmd_err; 3806 u16 index; 3807 int rc; 3808 3809 /* resize before flashing larger image than available space */ 3810 rc = bnxt_resize_update_entry(dev, fw->size, extack); 3811 if (rc) 3812 return rc; 3813 3814 bnxt_hwrm_fw_set_time(bp); 3815 3816 rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY); 3817 if (rc) 3818 return rc; 3819 3820 /* Try allocating a large DMA buffer first. Older fw will 3821 * cause excessive NVRAM erases when using small blocks. 3822 */ 3823 modify_len = roundup_pow_of_two(fw->size); 3824 modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE); 3825 while (1) { 3826 kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle); 3827 if (!kmem && modify_len > PAGE_SIZE) 3828 modify_len /= 2; 3829 else 3830 break; 3831 } 3832 if (!kmem) { 3833 hwrm_req_drop(bp, modify); 3834 return -ENOMEM; 3835 } 3836 3837 rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE); 3838 if (rc) { 3839 hwrm_req_drop(bp, modify); 3840 return rc; 3841 } 3842 3843 hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout); 3844 hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout); 3845 3846 hwrm_req_hold(bp, modify); 3847 modify->host_src_addr = cpu_to_le64(dma_handle); 3848 3849 resp = hwrm_req_hold(bp, install); 3850 if ((install_type & 0xffff) == 0) 3851 install_type >>= 16; 3852 install->install_type = cpu_to_le32(install_type); 3853 3854 do { 3855 u32 copied = 0, len = modify_len; 3856 3857 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 3858 BNX_DIR_ORDINAL_FIRST, 3859 BNX_DIR_EXT_NONE, 3860 &index, &item_len, NULL); 3861 if (rc) { 3862 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR); 3863 break; 3864 } 3865 if (fw->size > item_len) { 3866 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR); 3867 rc = -EFBIG; 3868 break; 3869 } 3870 3871 modify->dir_idx = cpu_to_le16(index); 3872 3873 if (fw->size > modify_len) 3874 modify->flags = BNXT_NVM_MORE_FLAG; 3875 while (copied < fw->size) { 3876 u32 balance = fw->size - copied; 3877 3878 if (balance <= modify_len) { 3879 len = balance; 3880 if (copied) 3881 modify->flags |= BNXT_NVM_LAST_FLAG; 3882 } 3883 memcpy(kmem, fw->data + copied, len); 3884 modify->len = cpu_to_le32(len); 3885 modify->offset = cpu_to_le32(copied); 3886 rc = hwrm_req_send(bp, modify); 3887 if (rc) 3888 goto pkg_abort; 3889 copied += len; 3890 } 3891 3892 rc = hwrm_req_send_silent(bp, install); 3893 if (!rc) 3894 break; 3895 3896 if (defrag_attempted) { 3897 /* We have tried to defragment already in the previous 3898 * iteration. Return with the result for INSTALL_UPDATE 3899 */ 3900 break; 3901 } 3902 3903 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err; 3904 3905 switch (cmd_err) { 3906 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK: 3907 BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR); 3908 rc = -EALREADY; 3909 break; 3910 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR: 3911 install->flags = 3912 cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); 3913 3914 rc = hwrm_req_send_silent(bp, install); 3915 if (!rc) 3916 break; 3917 3918 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err; 3919 3920 if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) { 3921 /* FW has cleared NVM area, driver will create 3922 * UPDATE directory and try the flash again 3923 */ 3924 defrag_attempted = true; 3925 install->flags = 0; 3926 rc = bnxt_flash_nvram(bp->dev, 3927 BNX_DIR_TYPE_UPDATE, 3928 BNX_DIR_ORDINAL_FIRST, 3929 0, 0, item_len, NULL, 0); 3930 if (!rc) 3931 break; 3932 } 3933 fallthrough; 3934 default: 3935 BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR); 3936 } 3937 } while (defrag_attempted && !rc); 3938 3939 pkg_abort: 3940 hwrm_req_drop(bp, modify); 3941 hwrm_req_drop(bp, install); 3942 3943 if (resp->result) { 3944 netdev_err(dev, "PKG install error = %d, problem_item = %d\n", 3945 (s8)resp->result, (int)resp->problem_item); 3946 rc = nvm_update_err_to_stderr(dev, resp->result, extack); 3947 } 3948 if (rc == -EACCES) 3949 bnxt_print_admin_err(bp); 3950 return rc; 3951 } 3952 3953 static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, 3954 u32 install_type, struct netlink_ext_ack *extack) 3955 { 3956 const struct firmware *fw; 3957 int rc; 3958 3959 rc = request_firmware(&fw, filename, &dev->dev); 3960 if (rc != 0) { 3961 netdev_err(dev, "PKG error %d requesting file: %s\n", 3962 rc, filename); 3963 return rc; 3964 } 3965 3966 rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack); 3967 3968 release_firmware(fw); 3969 3970 return rc; 3971 } 3972 3973 static int bnxt_flash_device(struct net_device *dev, 3974 struct ethtool_flash *flash) 3975 { 3976 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) { 3977 netdev_err(dev, "flashdev not supported from a virtual function\n"); 3978 return -EINVAL; 3979 } 3980 3981 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS || 3982 flash->region > 0xffff) 3983 return bnxt_flash_package_from_file(dev, flash->data, 3984 flash->region, NULL); 3985 3986 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data); 3987 } 3988 3989 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) 3990 { 3991 struct hwrm_nvm_get_dir_info_output *output; 3992 struct hwrm_nvm_get_dir_info_input *req; 3993 struct bnxt *bp = netdev_priv(dev); 3994 int rc; 3995 3996 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO); 3997 if (rc) 3998 return rc; 3999 4000 output = hwrm_req_hold(bp, req); 4001 rc = hwrm_req_send(bp, req); 4002 if (!rc) { 4003 *entries = le32_to_cpu(output->entries); 4004 *length = le32_to_cpu(output->entry_length); 4005 } 4006 hwrm_req_drop(bp, req); 4007 return rc; 4008 } 4009 4010 static int bnxt_get_eeprom_len(struct net_device *dev) 4011 { 4012 struct bnxt *bp = netdev_priv(dev); 4013 4014 if (BNXT_VF(bp)) 4015 return 0; 4016 4017 /* The -1 return value allows the entire 32-bit range of offsets to be 4018 * passed via the ethtool command-line utility. 4019 */ 4020 return -1; 4021 } 4022 4023 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) 4024 { 4025 struct bnxt *bp = netdev_priv(dev); 4026 int rc; 4027 u32 dir_entries; 4028 u32 entry_length; 4029 u8 *buf; 4030 size_t buflen; 4031 dma_addr_t dma_handle; 4032 struct hwrm_nvm_get_dir_entries_input *req; 4033 4034 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); 4035 if (rc != 0) 4036 return rc; 4037 4038 if (!dir_entries || !entry_length) 4039 return -EIO; 4040 4041 /* Insert 2 bytes of directory info (count and size of entries) */ 4042 if (len < 2) 4043 return -EINVAL; 4044 4045 *data++ = dir_entries; 4046 *data++ = entry_length; 4047 len -= 2; 4048 memset(data, 0xff, len); 4049 4050 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES); 4051 if (rc) 4052 return rc; 4053 4054 buflen = mul_u32_u32(dir_entries, entry_length); 4055 buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle); 4056 if (!buf) { 4057 hwrm_req_drop(bp, req); 4058 return -ENOMEM; 4059 } 4060 req->host_dest_addr = cpu_to_le64(dma_handle); 4061 4062 hwrm_req_hold(bp, req); /* hold the slice */ 4063 rc = hwrm_req_send(bp, req); 4064 if (rc == 0) 4065 memcpy(data, buf, len > buflen ? buflen : len); 4066 hwrm_req_drop(bp, req); 4067 return rc; 4068 } 4069 4070 int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, 4071 u32 length, u8 *data) 4072 { 4073 struct bnxt *bp = netdev_priv(dev); 4074 int rc; 4075 u8 *buf; 4076 dma_addr_t dma_handle; 4077 struct hwrm_nvm_read_input *req; 4078 4079 if (!length) 4080 return -EINVAL; 4081 4082 rc = hwrm_req_init(bp, req, HWRM_NVM_READ); 4083 if (rc) 4084 return rc; 4085 4086 buf = hwrm_req_dma_slice(bp, req, length, &dma_handle); 4087 if (!buf) { 4088 hwrm_req_drop(bp, req); 4089 return -ENOMEM; 4090 } 4091 4092 req->host_dest_addr = cpu_to_le64(dma_handle); 4093 req->dir_idx = cpu_to_le16(index); 4094 req->offset = cpu_to_le32(offset); 4095 req->len = cpu_to_le32(length); 4096 4097 hwrm_req_hold(bp, req); /* hold the slice */ 4098 rc = hwrm_req_send(bp, req); 4099 if (rc == 0) 4100 memcpy(data, buf, length); 4101 hwrm_req_drop(bp, req); 4102 return rc; 4103 } 4104 4105 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 4106 u16 ext, u16 *index, u32 *item_length, 4107 u32 *data_length) 4108 { 4109 struct hwrm_nvm_find_dir_entry_output *output; 4110 struct hwrm_nvm_find_dir_entry_input *req; 4111 struct bnxt *bp = netdev_priv(dev); 4112 int rc; 4113 4114 rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY); 4115 if (rc) 4116 return rc; 4117 4118 req->enables = 0; 4119 req->dir_idx = 0; 4120 req->dir_type = cpu_to_le16(type); 4121 req->dir_ordinal = cpu_to_le16(ordinal); 4122 req->dir_ext = cpu_to_le16(ext); 4123 req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; 4124 output = hwrm_req_hold(bp, req); 4125 rc = hwrm_req_send_silent(bp, req); 4126 if (rc == 0) { 4127 if (index) 4128 *index = le16_to_cpu(output->dir_idx); 4129 if (item_length) 4130 *item_length = le32_to_cpu(output->dir_item_length); 4131 if (data_length) 4132 *data_length = le32_to_cpu(output->dir_data_length); 4133 } 4134 hwrm_req_drop(bp, req); 4135 return rc; 4136 } 4137 4138 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) 4139 { 4140 char *retval = NULL; 4141 char *p; 4142 char *value; 4143 int field = 0; 4144 4145 if (datalen < 1) 4146 return NULL; 4147 /* null-terminate the log data (removing last '\n'): */ 4148 data[datalen - 1] = 0; 4149 for (p = data; *p != 0; p++) { 4150 field = 0; 4151 retval = NULL; 4152 while (*p != 0 && *p != '\n') { 4153 value = p; 4154 while (*p != 0 && *p != '\t' && *p != '\n') 4155 p++; 4156 if (field == desired_field) 4157 retval = value; 4158 if (*p != '\t') 4159 break; 4160 *p = 0; 4161 field++; 4162 p++; 4163 } 4164 if (*p == 0) 4165 break; 4166 *p = 0; 4167 } 4168 return retval; 4169 } 4170 4171 int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size) 4172 { 4173 struct bnxt *bp = netdev_priv(dev); 4174 u16 index = 0; 4175 char *pkgver; 4176 u32 pkglen; 4177 u8 *pkgbuf; 4178 int rc; 4179 4180 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, 4181 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 4182 &index, NULL, &pkglen); 4183 if (rc) 4184 return rc; 4185 4186 pkgbuf = kzalloc(pkglen, GFP_KERNEL); 4187 if (!pkgbuf) { 4188 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", 4189 pkglen); 4190 return -ENOMEM; 4191 } 4192 4193 rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf); 4194 if (rc) 4195 goto err; 4196 4197 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, 4198 pkglen); 4199 if (pkgver && *pkgver != 0 && isdigit(*pkgver)) 4200 strscpy(ver, pkgver, size); 4201 else 4202 rc = -ENOENT; 4203 4204 err: 4205 kfree(pkgbuf); 4206 4207 return rc; 4208 } 4209 4210 static void bnxt_get_pkgver(struct net_device *dev) 4211 { 4212 struct bnxt *bp = netdev_priv(dev); 4213 char buf[FW_VER_STR_LEN - 5]; 4214 int len; 4215 4216 if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) { 4217 len = strlen(bp->fw_ver_str); 4218 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len, 4219 "/pkg %s", buf); 4220 } 4221 } 4222 4223 static int bnxt_get_eeprom(struct net_device *dev, 4224 struct ethtool_eeprom *eeprom, 4225 u8 *data) 4226 { 4227 u32 index; 4228 u32 offset; 4229 4230 if (eeprom->offset == 0) /* special offset value to get directory */ 4231 return bnxt_get_nvram_directory(dev, eeprom->len, data); 4232 4233 index = eeprom->offset >> 24; 4234 offset = eeprom->offset & 0xffffff; 4235 4236 if (index == 0) { 4237 netdev_err(dev, "unsupported index value: %d\n", index); 4238 return -EINVAL; 4239 } 4240 4241 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data); 4242 } 4243 4244 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) 4245 { 4246 struct hwrm_nvm_erase_dir_entry_input *req; 4247 struct bnxt *bp = netdev_priv(dev); 4248 int rc; 4249 4250 rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY); 4251 if (rc) 4252 return rc; 4253 4254 req->dir_idx = cpu_to_le16(index); 4255 return hwrm_req_send(bp, req); 4256 } 4257 4258 static int bnxt_set_eeprom(struct net_device *dev, 4259 struct ethtool_eeprom *eeprom, 4260 u8 *data) 4261 { 4262 struct bnxt *bp = netdev_priv(dev); 4263 u8 index, dir_op; 4264 u16 type, ext, ordinal, attr; 4265 4266 if (!BNXT_PF(bp)) { 4267 netdev_err(dev, "NVM write not supported from a virtual function\n"); 4268 return -EINVAL; 4269 } 4270 4271 type = eeprom->magic >> 16; 4272 4273 if (type == 0xffff) { /* special value for directory operations */ 4274 index = eeprom->magic & 0xff; 4275 dir_op = eeprom->magic >> 8; 4276 if (index == 0) 4277 return -EINVAL; 4278 switch (dir_op) { 4279 case 0x0e: /* erase */ 4280 if (eeprom->offset != ~eeprom->magic) 4281 return -EINVAL; 4282 return bnxt_erase_nvram_directory(dev, index - 1); 4283 default: 4284 return -EINVAL; 4285 } 4286 } 4287 4288 /* Create or re-write an NVM item: */ 4289 if (bnxt_dir_type_is_executable(type)) 4290 return -EOPNOTSUPP; 4291 ext = eeprom->magic & 0xffff; 4292 ordinal = eeprom->offset >> 16; 4293 attr = eeprom->offset & 0xffff; 4294 4295 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data, 4296 eeprom->len); 4297 } 4298 4299 static int bnxt_set_eee(struct net_device *dev, struct ethtool_keee *edata) 4300 { 4301 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 4302 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp); 4303 struct bnxt *bp = netdev_priv(dev); 4304 struct ethtool_keee *eee = &bp->eee; 4305 struct bnxt_link_info *link_info = &bp->link_info; 4306 int rc = 0; 4307 4308 if (!BNXT_PHY_CFG_ABLE(bp)) 4309 return -EOPNOTSUPP; 4310 4311 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 4312 return -EOPNOTSUPP; 4313 4314 mutex_lock(&bp->link_lock); 4315 _bnxt_fw_to_linkmode(advertising, link_info->advertising); 4316 if (!edata->eee_enabled) 4317 goto eee_ok; 4318 4319 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 4320 netdev_warn(dev, "EEE requires autoneg\n"); 4321 rc = -EINVAL; 4322 goto eee_exit; 4323 } 4324 if (edata->tx_lpi_enabled) { 4325 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || 4326 edata->tx_lpi_timer < bp->lpi_tmr_lo)) { 4327 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", 4328 bp->lpi_tmr_lo, bp->lpi_tmr_hi); 4329 rc = -EINVAL; 4330 goto eee_exit; 4331 } else if (!bp->lpi_tmr_hi) { 4332 edata->tx_lpi_timer = eee->tx_lpi_timer; 4333 } 4334 } 4335 if (linkmode_empty(edata->advertised)) { 4336 linkmode_and(edata->advertised, advertising, eee->supported); 4337 } else if (linkmode_andnot(tmp, edata->advertised, advertising)) { 4338 netdev_warn(dev, "EEE advertised must be a subset of autoneg advertised speeds\n"); 4339 rc = -EINVAL; 4340 goto eee_exit; 4341 } 4342 4343 linkmode_copy(eee->advertised, edata->advertised); 4344 eee->tx_lpi_enabled = edata->tx_lpi_enabled; 4345 eee->tx_lpi_timer = edata->tx_lpi_timer; 4346 eee_ok: 4347 eee->eee_enabled = edata->eee_enabled; 4348 4349 if (netif_running(dev)) 4350 rc = bnxt_hwrm_set_link_setting(bp, false, true); 4351 4352 eee_exit: 4353 mutex_unlock(&bp->link_lock); 4354 return rc; 4355 } 4356 4357 static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata) 4358 { 4359 struct bnxt *bp = netdev_priv(dev); 4360 4361 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 4362 return -EOPNOTSUPP; 4363 4364 *edata = bp->eee; 4365 if (!bp->eee.eee_enabled) { 4366 /* Preserve tx_lpi_timer so that the last value will be used 4367 * by default when it is re-enabled. 4368 */ 4369 linkmode_zero(edata->advertised); 4370 edata->tx_lpi_enabled = 0; 4371 } 4372 4373 if (!bp->eee.eee_active) 4374 linkmode_zero(edata->lp_advertised); 4375 4376 return 0; 4377 } 4378 4379 static int bnxt_set_tunable(struct net_device *dev, 4380 const struct ethtool_tunable *tuna, 4381 const void *data) 4382 { 4383 struct bnxt *bp = netdev_priv(dev); 4384 u32 rx_copybreak; 4385 4386 switch (tuna->id) { 4387 case ETHTOOL_RX_COPYBREAK: 4388 rx_copybreak = *(u32 *)data; 4389 if (rx_copybreak > BNXT_MAX_RX_COPYBREAK) 4390 return -ERANGE; 4391 if (rx_copybreak != bp->rx_copybreak) { 4392 if (netif_running(dev)) 4393 return -EBUSY; 4394 bp->rx_copybreak = rx_copybreak; 4395 } 4396 return 0; 4397 default: 4398 return -EOPNOTSUPP; 4399 } 4400 } 4401 4402 static int bnxt_get_tunable(struct net_device *dev, 4403 const struct ethtool_tunable *tuna, void *data) 4404 { 4405 struct bnxt *bp = netdev_priv(dev); 4406 4407 switch (tuna->id) { 4408 case ETHTOOL_RX_COPYBREAK: 4409 *(u32 *)data = bp->rx_copybreak; 4410 break; 4411 default: 4412 return -EOPNOTSUPP; 4413 } 4414 4415 return 0; 4416 } 4417 4418 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, 4419 u16 page_number, u8 bank, 4420 u16 start_addr, u16 data_length, 4421 u8 *buf) 4422 { 4423 struct hwrm_port_phy_i2c_read_output *output; 4424 struct hwrm_port_phy_i2c_read_input *req; 4425 int rc, byte_offset = 0; 4426 4427 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ); 4428 if (rc) 4429 return rc; 4430 4431 output = hwrm_req_hold(bp, req); 4432 req->i2c_slave_addr = i2c_addr; 4433 req->page_number = cpu_to_le16(page_number); 4434 req->port_id = cpu_to_le16(bp->pf.port_id); 4435 do { 4436 u16 xfer_size; 4437 4438 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); 4439 data_length -= xfer_size; 4440 req->page_offset = cpu_to_le16(start_addr + byte_offset); 4441 req->data_length = xfer_size; 4442 req->enables = 4443 cpu_to_le32((start_addr + byte_offset ? 4444 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 4445 0) | 4446 (bank ? 4447 PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER : 4448 0)); 4449 rc = hwrm_req_send(bp, req); 4450 if (!rc) 4451 memcpy(buf + byte_offset, output->data, xfer_size); 4452 byte_offset += xfer_size; 4453 } while (!rc && data_length > 0); 4454 hwrm_req_drop(bp, req); 4455 4456 return rc; 4457 } 4458 4459 static int bnxt_get_module_info(struct net_device *dev, 4460 struct ethtool_modinfo *modinfo) 4461 { 4462 u8 data[SFF_DIAG_SUPPORT_OFFSET + 1]; 4463 struct bnxt *bp = netdev_priv(dev); 4464 int rc; 4465 4466 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4467 return -EPERM; 4468 4469 /* No point in going further if phy status indicates 4470 * module is not inserted or if it is powered down or 4471 * if it is of type 10GBase-T 4472 */ 4473 if (bp->link_info.module_status > 4474 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) 4475 return -EOPNOTSUPP; 4476 4477 /* This feature is not supported in older firmware versions */ 4478 if (bp->hwrm_spec_code < 0x10202) 4479 return -EOPNOTSUPP; 4480 4481 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0, 4482 SFF_DIAG_SUPPORT_OFFSET + 1, 4483 data); 4484 if (!rc) { 4485 u8 module_id = data[0]; 4486 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET]; 4487 4488 switch (module_id) { 4489 case SFF_MODULE_ID_SFP: 4490 modinfo->type = ETH_MODULE_SFF_8472; 4491 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 4492 if (!diag_supported) 4493 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 4494 break; 4495 case SFF_MODULE_ID_QSFP: 4496 case SFF_MODULE_ID_QSFP_PLUS: 4497 modinfo->type = ETH_MODULE_SFF_8436; 4498 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 4499 break; 4500 case SFF_MODULE_ID_QSFP28: 4501 modinfo->type = ETH_MODULE_SFF_8636; 4502 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 4503 break; 4504 default: 4505 rc = -EOPNOTSUPP; 4506 break; 4507 } 4508 } 4509 return rc; 4510 } 4511 4512 static int bnxt_get_module_eeprom(struct net_device *dev, 4513 struct ethtool_eeprom *eeprom, 4514 u8 *data) 4515 { 4516 struct bnxt *bp = netdev_priv(dev); 4517 u16 start = eeprom->offset, length = eeprom->len; 4518 int rc = 0; 4519 4520 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4521 return -EPERM; 4522 4523 memset(data, 0, eeprom->len); 4524 4525 /* Read A0 portion of the EEPROM */ 4526 if (start < ETH_MODULE_SFF_8436_LEN) { 4527 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) 4528 length = ETH_MODULE_SFF_8436_LEN - start; 4529 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 4530 start, length, data); 4531 if (rc) 4532 return rc; 4533 start += length; 4534 data += length; 4535 length = eeprom->len - length; 4536 } 4537 4538 /* Read A2 portion of the EEPROM */ 4539 if (length) { 4540 start -= ETH_MODULE_SFF_8436_LEN; 4541 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0, 4542 start, length, data); 4543 } 4544 return rc; 4545 } 4546 4547 static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack) 4548 { 4549 if (bp->link_info.module_status <= 4550 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) 4551 return 0; 4552 4553 switch (bp->link_info.module_status) { 4554 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 4555 NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down"); 4556 break; 4557 case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED: 4558 NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted"); 4559 break; 4560 case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT: 4561 NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault"); 4562 break; 4563 default: 4564 NL_SET_ERR_MSG_MOD(extack, "Unknown error"); 4565 break; 4566 } 4567 return -EINVAL; 4568 } 4569 4570 static int 4571 bnxt_mod_eeprom_by_page_precheck(struct bnxt *bp, 4572 const struct ethtool_module_eeprom *page_data, 4573 struct netlink_ext_ack *extack) 4574 { 4575 int rc; 4576 4577 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 4578 NL_SET_ERR_MSG_MOD(extack, 4579 "Module read/write not permitted on untrusted VF"); 4580 return -EPERM; 4581 } 4582 4583 rc = bnxt_get_module_status(bp, extack); 4584 if (rc) 4585 return rc; 4586 4587 if (bp->hwrm_spec_code < 0x10202) { 4588 NL_SET_ERR_MSG_MOD(extack, "Firmware version too old"); 4589 return -EINVAL; 4590 } 4591 4592 if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) { 4593 NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection"); 4594 return -EINVAL; 4595 } 4596 return 0; 4597 } 4598 4599 static int bnxt_get_module_eeprom_by_page(struct net_device *dev, 4600 const struct ethtool_module_eeprom *page_data, 4601 struct netlink_ext_ack *extack) 4602 { 4603 struct bnxt *bp = netdev_priv(dev); 4604 int rc; 4605 4606 rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack); 4607 if (rc) 4608 return rc; 4609 4610 rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1, 4611 page_data->page, page_data->bank, 4612 page_data->offset, 4613 page_data->length, 4614 page_data->data); 4615 if (rc) { 4616 NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed"); 4617 return rc; 4618 } 4619 return page_data->length; 4620 } 4621 4622 static int bnxt_write_sfp_module_eeprom_info(struct bnxt *bp, 4623 const struct ethtool_module_eeprom *page) 4624 { 4625 struct hwrm_port_phy_i2c_write_input *req; 4626 int bytes_written = 0; 4627 int rc; 4628 4629 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_WRITE); 4630 if (rc) 4631 return rc; 4632 4633 hwrm_req_hold(bp, req); 4634 req->i2c_slave_addr = page->i2c_address << 1; 4635 req->page_number = cpu_to_le16(page->page); 4636 req->bank_number = page->bank; 4637 req->port_id = cpu_to_le16(bp->pf.port_id); 4638 req->enables = cpu_to_le32(PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET | 4639 PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER); 4640 4641 while (bytes_written < page->length) { 4642 u16 xfer_size; 4643 4644 xfer_size = min_t(u16, page->length - bytes_written, 4645 BNXT_MAX_PHY_I2C_RESP_SIZE); 4646 req->page_offset = cpu_to_le16(page->offset + bytes_written); 4647 req->data_length = xfer_size; 4648 memcpy(req->data, page->data + bytes_written, xfer_size); 4649 rc = hwrm_req_send(bp, req); 4650 if (rc) 4651 break; 4652 bytes_written += xfer_size; 4653 } 4654 4655 hwrm_req_drop(bp, req); 4656 return rc; 4657 } 4658 4659 static int bnxt_set_module_eeprom_by_page(struct net_device *dev, 4660 const struct ethtool_module_eeprom *page_data, 4661 struct netlink_ext_ack *extack) 4662 { 4663 struct bnxt *bp = netdev_priv(dev); 4664 int rc; 4665 4666 rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack); 4667 if (rc) 4668 return rc; 4669 4670 rc = bnxt_write_sfp_module_eeprom_info(bp, page_data); 4671 if (rc) { 4672 NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom write failed"); 4673 return rc; 4674 } 4675 return page_data->length; 4676 } 4677 4678 static int bnxt_nway_reset(struct net_device *dev) 4679 { 4680 int rc = 0; 4681 4682 struct bnxt *bp = netdev_priv(dev); 4683 struct bnxt_link_info *link_info = &bp->link_info; 4684 4685 if (!BNXT_PHY_CFG_ABLE(bp)) 4686 return -EOPNOTSUPP; 4687 4688 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) 4689 return -EINVAL; 4690 4691 if (netif_running(dev)) 4692 rc = bnxt_hwrm_set_link_setting(bp, true, false); 4693 4694 return rc; 4695 } 4696 4697 static int bnxt_set_phys_id(struct net_device *dev, 4698 enum ethtool_phys_id_state state) 4699 { 4700 struct hwrm_port_led_cfg_input *req; 4701 struct bnxt *bp = netdev_priv(dev); 4702 struct bnxt_pf_info *pf = &bp->pf; 4703 struct bnxt_led_cfg *led_cfg; 4704 u8 led_state; 4705 __le16 duration; 4706 int rc, i; 4707 4708 if (!bp->num_leds || BNXT_VF(bp)) 4709 return -EOPNOTSUPP; 4710 4711 if (state == ETHTOOL_ID_ACTIVE) { 4712 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT; 4713 duration = cpu_to_le16(500); 4714 } else if (state == ETHTOOL_ID_INACTIVE) { 4715 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT; 4716 duration = cpu_to_le16(0); 4717 } else { 4718 return -EINVAL; 4719 } 4720 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG); 4721 if (rc) 4722 return rc; 4723 4724 req->port_id = cpu_to_le16(pf->port_id); 4725 req->num_leds = bp->num_leds; 4726 led_cfg = (struct bnxt_led_cfg *)&req->led0_id; 4727 for (i = 0; i < bp->num_leds; i++, led_cfg++) { 4728 req->enables |= BNXT_LED_DFLT_ENABLES(i); 4729 led_cfg->led_id = bp->leds[i].led_id; 4730 led_cfg->led_state = led_state; 4731 led_cfg->led_blink_on = duration; 4732 led_cfg->led_blink_off = duration; 4733 led_cfg->led_group_id = bp->leds[i].led_group_id; 4734 } 4735 return hwrm_req_send(bp, req); 4736 } 4737 4738 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) 4739 { 4740 struct hwrm_selftest_irq_input *req; 4741 int rc; 4742 4743 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ); 4744 if (rc) 4745 return rc; 4746 4747 req->cmpl_ring = cpu_to_le16(cmpl_ring); 4748 return hwrm_req_send(bp, req); 4749 } 4750 4751 static int bnxt_test_irq(struct bnxt *bp) 4752 { 4753 int i; 4754 4755 for (i = 0; i < bp->cp_nr_rings; i++) { 4756 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id; 4757 int rc; 4758 4759 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring); 4760 if (rc) 4761 return rc; 4762 } 4763 return 0; 4764 } 4765 4766 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) 4767 { 4768 struct hwrm_port_mac_cfg_input *req; 4769 int rc; 4770 4771 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); 4772 if (rc) 4773 return rc; 4774 4775 req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); 4776 if (enable) 4777 req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; 4778 else 4779 req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; 4780 return hwrm_req_send(bp, req); 4781 } 4782 4783 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds) 4784 { 4785 struct hwrm_port_phy_qcaps_output *resp; 4786 struct hwrm_port_phy_qcaps_input *req; 4787 int rc; 4788 4789 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); 4790 if (rc) 4791 return rc; 4792 4793 resp = hwrm_req_hold(bp, req); 4794 rc = hwrm_req_send(bp, req); 4795 if (!rc) 4796 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode); 4797 4798 hwrm_req_drop(bp, req); 4799 return rc; 4800 } 4801 4802 static int bnxt_disable_an_for_lpbk(struct bnxt *bp, 4803 struct hwrm_port_phy_cfg_input *req) 4804 { 4805 struct bnxt_link_info *link_info = &bp->link_info; 4806 u16 fw_advertising; 4807 u16 fw_speed; 4808 int rc; 4809 4810 if (!link_info->autoneg || 4811 (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK)) 4812 return 0; 4813 4814 rc = bnxt_query_force_speeds(bp, &fw_advertising); 4815 if (rc) 4816 return rc; 4817 4818 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 4819 if (BNXT_LINK_IS_UP(bp)) 4820 fw_speed = bp->link_info.link_speed; 4821 else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB) 4822 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 4823 else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB) 4824 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 4825 else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB) 4826 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 4827 else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB) 4828 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 4829 4830 req->force_link_speed = cpu_to_le16(fw_speed); 4831 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE | 4832 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 4833 rc = hwrm_req_send(bp, req); 4834 req->flags = 0; 4835 req->force_link_speed = cpu_to_le16(0); 4836 return rc; 4837 } 4838 4839 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext) 4840 { 4841 struct hwrm_port_phy_cfg_input *req; 4842 int rc; 4843 4844 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 4845 if (rc) 4846 return rc; 4847 4848 /* prevent bnxt_disable_an_for_lpbk() from consuming the request */ 4849 hwrm_req_hold(bp, req); 4850 4851 if (enable) { 4852 bnxt_disable_an_for_lpbk(bp, req); 4853 if (ext) 4854 req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; 4855 else 4856 req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; 4857 } else { 4858 req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; 4859 } 4860 req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); 4861 rc = hwrm_req_send(bp, req); 4862 hwrm_req_drop(bp, req); 4863 return rc; 4864 } 4865 4866 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 4867 u32 raw_cons, int pkt_size) 4868 { 4869 struct bnxt_napi *bnapi = cpr->bnapi; 4870 struct bnxt_rx_ring_info *rxr; 4871 struct bnxt_sw_rx_bd *rx_buf; 4872 struct rx_cmp *rxcmp; 4873 u16 cp_cons, cons; 4874 u8 *data; 4875 u32 len; 4876 int i; 4877 4878 rxr = bnapi->rx_ring; 4879 cp_cons = RING_CMP(raw_cons); 4880 rxcmp = (struct rx_cmp *) 4881 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 4882 cons = rxcmp->rx_cmp_opaque; 4883 rx_buf = &rxr->rx_buf_ring[cons]; 4884 data = rx_buf->data_ptr; 4885 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 4886 if (len != pkt_size) 4887 return -EIO; 4888 i = ETH_ALEN; 4889 if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr)) 4890 return -EIO; 4891 i += ETH_ALEN; 4892 for ( ; i < pkt_size; i++) { 4893 if (data[i] != (u8)(i & 0xff)) 4894 return -EIO; 4895 } 4896 return 0; 4897 } 4898 4899 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 4900 int pkt_size) 4901 { 4902 struct tx_cmp *txcmp; 4903 int rc = -EIO; 4904 u32 raw_cons; 4905 u32 cons; 4906 int i; 4907 4908 raw_cons = cpr->cp_raw_cons; 4909 for (i = 0; i < 200; i++) { 4910 cons = RING_CMP(raw_cons); 4911 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 4912 4913 if (!TX_CMP_VALID(txcmp, raw_cons)) { 4914 udelay(5); 4915 continue; 4916 } 4917 4918 /* The valid test of the entry must be done first before 4919 * reading any further. 4920 */ 4921 dma_rmb(); 4922 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP || 4923 TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_V3_CMP) { 4924 rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size); 4925 raw_cons = NEXT_RAW_CMP(raw_cons); 4926 raw_cons = NEXT_RAW_CMP(raw_cons); 4927 break; 4928 } 4929 raw_cons = NEXT_RAW_CMP(raw_cons); 4930 } 4931 cpr->cp_raw_cons = raw_cons; 4932 return rc; 4933 } 4934 4935 static int bnxt_run_loopback(struct bnxt *bp) 4936 { 4937 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; 4938 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 4939 struct bnxt_cp_ring_info *cpr; 4940 int pkt_size, i = 0; 4941 struct sk_buff *skb; 4942 dma_addr_t map; 4943 u8 *data; 4944 int rc; 4945 4946 cpr = &rxr->bnapi->cp_ring; 4947 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4948 cpr = rxr->rx_cpr; 4949 pkt_size = min(bp->dev->mtu + ETH_HLEN, max(BNXT_DEFAULT_RX_COPYBREAK, 4950 bp->rx_copybreak)); 4951 skb = netdev_alloc_skb(bp->dev, pkt_size); 4952 if (!skb) 4953 return -ENOMEM; 4954 data = skb_put(skb, pkt_size); 4955 ether_addr_copy(&data[i], bp->dev->dev_addr); 4956 i += ETH_ALEN; 4957 ether_addr_copy(&data[i], bp->dev->dev_addr); 4958 i += ETH_ALEN; 4959 for ( ; i < pkt_size; i++) 4960 data[i] = (u8)(i & 0xff); 4961 4962 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, 4963 DMA_TO_DEVICE); 4964 if (dma_mapping_error(&bp->pdev->dev, map)) { 4965 dev_kfree_skb(skb); 4966 return -EIO; 4967 } 4968 bnxt_xmit_bd(bp, txr, map, pkt_size, NULL); 4969 4970 /* Sync BD data before updating doorbell */ 4971 wmb(); 4972 4973 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); 4974 rc = bnxt_poll_loopback(bp, cpr, pkt_size); 4975 4976 dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE); 4977 dev_kfree_skb(skb); 4978 return rc; 4979 } 4980 4981 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results) 4982 { 4983 struct hwrm_selftest_exec_output *resp; 4984 struct hwrm_selftest_exec_input *req; 4985 int rc; 4986 4987 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC); 4988 if (rc) 4989 return rc; 4990 4991 hwrm_req_timeout(bp, req, bp->test_info->timeout); 4992 req->flags = test_mask; 4993 4994 resp = hwrm_req_hold(bp, req); 4995 rc = hwrm_req_send(bp, req); 4996 *test_results = resp->test_success; 4997 hwrm_req_drop(bp, req); 4998 return rc; 4999 } 5000 5001 #define BNXT_DRV_TESTS 4 5002 #define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS) 5003 #define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1) 5004 #define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2) 5005 #define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3) 5006 5007 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, 5008 u64 *buf) 5009 { 5010 struct bnxt *bp = netdev_priv(dev); 5011 bool do_ext_lpbk = false; 5012 bool offline = false; 5013 u8 test_results = 0; 5014 u8 test_mask = 0; 5015 int rc = 0, i; 5016 5017 if (!bp->num_tests || !BNXT_PF(bp)) 5018 return; 5019 5020 memset(buf, 0, sizeof(u64) * bp->num_tests); 5021 if (etest->flags & ETH_TEST_FL_OFFLINE && 5022 bnxt_ulp_registered(bp->edev)) { 5023 etest->flags |= ETH_TEST_FL_FAILED; 5024 netdev_warn(dev, "Offline tests cannot be run with RoCE driver loaded\n"); 5025 return; 5026 } 5027 5028 if (!netif_running(dev)) { 5029 etest->flags |= ETH_TEST_FL_FAILED; 5030 return; 5031 } 5032 5033 if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) && 5034 (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK)) 5035 do_ext_lpbk = true; 5036 5037 if (etest->flags & ETH_TEST_FL_OFFLINE) { 5038 if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) { 5039 etest->flags |= ETH_TEST_FL_FAILED; 5040 netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n"); 5041 return; 5042 } 5043 offline = true; 5044 } 5045 5046 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 5047 u8 bit_val = 1 << i; 5048 5049 if (!(bp->test_info->offline_mask & bit_val)) 5050 test_mask |= bit_val; 5051 else if (offline) 5052 test_mask |= bit_val; 5053 } 5054 if (!offline) { 5055 bnxt_run_fw_tests(bp, test_mask, &test_results); 5056 } else { 5057 bnxt_close_nic(bp, true, false); 5058 bnxt_run_fw_tests(bp, test_mask, &test_results); 5059 5060 rc = bnxt_half_open_nic(bp); 5061 if (rc) { 5062 etest->flags |= ETH_TEST_FL_FAILED; 5063 return; 5064 } 5065 buf[BNXT_MACLPBK_TEST_IDX] = 1; 5066 if (bp->mac_flags & BNXT_MAC_FL_NO_MAC_LPBK) 5067 goto skip_mac_loopback; 5068 5069 bnxt_hwrm_mac_loopback(bp, true); 5070 msleep(250); 5071 if (bnxt_run_loopback(bp)) 5072 etest->flags |= ETH_TEST_FL_FAILED; 5073 else 5074 buf[BNXT_MACLPBK_TEST_IDX] = 0; 5075 5076 bnxt_hwrm_mac_loopback(bp, false); 5077 skip_mac_loopback: 5078 buf[BNXT_PHYLPBK_TEST_IDX] = 1; 5079 if (bp->phy_flags & BNXT_PHY_FL_NO_PHY_LPBK) 5080 goto skip_phy_loopback; 5081 5082 bnxt_hwrm_phy_loopback(bp, true, false); 5083 msleep(1000); 5084 if (bnxt_run_loopback(bp)) 5085 etest->flags |= ETH_TEST_FL_FAILED; 5086 else 5087 buf[BNXT_PHYLPBK_TEST_IDX] = 0; 5088 skip_phy_loopback: 5089 buf[BNXT_EXTLPBK_TEST_IDX] = 1; 5090 if (do_ext_lpbk) { 5091 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 5092 bnxt_hwrm_phy_loopback(bp, true, true); 5093 msleep(1000); 5094 if (bnxt_run_loopback(bp)) 5095 etest->flags |= ETH_TEST_FL_FAILED; 5096 else 5097 buf[BNXT_EXTLPBK_TEST_IDX] = 0; 5098 } 5099 bnxt_hwrm_phy_loopback(bp, false, false); 5100 bnxt_half_close_nic(bp); 5101 rc = bnxt_open_nic(bp, true, true); 5102 } 5103 if (rc || bnxt_test_irq(bp)) { 5104 buf[BNXT_IRQ_TEST_IDX] = 1; 5105 etest->flags |= ETH_TEST_FL_FAILED; 5106 } 5107 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 5108 u8 bit_val = 1 << i; 5109 5110 if ((test_mask & bit_val) && !(test_results & bit_val)) { 5111 buf[i] = 1; 5112 etest->flags |= ETH_TEST_FL_FAILED; 5113 } 5114 } 5115 } 5116 5117 static int bnxt_reset(struct net_device *dev, u32 *flags) 5118 { 5119 struct bnxt *bp = netdev_priv(dev); 5120 bool reload = false; 5121 u32 req = *flags; 5122 5123 if (!req) 5124 return -EINVAL; 5125 5126 if (!BNXT_PF(bp)) { 5127 netdev_err(dev, "Reset is not supported from a VF\n"); 5128 return -EOPNOTSUPP; 5129 } 5130 5131 if (pci_vfs_assigned(bp->pdev) && 5132 !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) { 5133 netdev_err(dev, 5134 "Reset not allowed when VFs are assigned to VMs\n"); 5135 return -EBUSY; 5136 } 5137 5138 if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) { 5139 /* This feature is not supported in older firmware versions */ 5140 if (bp->hwrm_spec_code >= 0x10803) { 5141 if (!bnxt_firmware_reset_chip(dev)) { 5142 netdev_info(dev, "Firmware reset request successful.\n"); 5143 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) 5144 reload = true; 5145 *flags &= ~BNXT_FW_RESET_CHIP; 5146 } 5147 } else if (req == BNXT_FW_RESET_CHIP) { 5148 return -EOPNOTSUPP; /* only request, fail hard */ 5149 } 5150 } 5151 5152 if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) { 5153 /* This feature is not supported in older firmware versions */ 5154 if (bp->hwrm_spec_code >= 0x10803) { 5155 if (!bnxt_firmware_reset_ap(dev)) { 5156 netdev_info(dev, "Reset application processor successful.\n"); 5157 reload = true; 5158 *flags &= ~BNXT_FW_RESET_AP; 5159 } 5160 } else if (req == BNXT_FW_RESET_AP) { 5161 return -EOPNOTSUPP; /* only request, fail hard */ 5162 } 5163 } 5164 5165 if (reload) 5166 netdev_info(dev, "Reload driver to complete reset\n"); 5167 5168 return 0; 5169 } 5170 5171 static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump) 5172 { 5173 struct bnxt *bp = netdev_priv(dev); 5174 5175 if (dump->flag > BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE) { 5176 netdev_info(dev, 5177 "Supports only Live(0), Crash(1), Driver(2), Live with cached context(3) dumps.\n"); 5178 return -EINVAL; 5179 } 5180 5181 if (dump->flag == BNXT_DUMP_CRASH) { 5182 if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR && 5183 (!IS_ENABLED(CONFIG_TEE_BNXT_FW))) { 5184 netdev_info(dev, 5185 "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n"); 5186 return -EOPNOTSUPP; 5187 } else if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) { 5188 netdev_info(dev, "Crash dump collection from host memory is not supported on this interface.\n"); 5189 return -EOPNOTSUPP; 5190 } 5191 } 5192 5193 bp->dump_flag = dump->flag; 5194 return 0; 5195 } 5196 5197 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) 5198 { 5199 struct bnxt *bp = netdev_priv(dev); 5200 5201 if (bp->hwrm_spec_code < 0x10801) 5202 return -EOPNOTSUPP; 5203 5204 dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 | 5205 bp->ver_resp.hwrm_fw_min_8b << 16 | 5206 bp->ver_resp.hwrm_fw_bld_8b << 8 | 5207 bp->ver_resp.hwrm_fw_rsvd_8b; 5208 5209 dump->flag = bp->dump_flag; 5210 dump->len = bnxt_get_coredump_length(bp, bp->dump_flag); 5211 return 0; 5212 } 5213 5214 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, 5215 void *buf) 5216 { 5217 struct bnxt *bp = netdev_priv(dev); 5218 5219 if (bp->hwrm_spec_code < 0x10801) 5220 return -EOPNOTSUPP; 5221 5222 memset(buf, 0, dump->len); 5223 5224 dump->flag = bp->dump_flag; 5225 return bnxt_get_coredump(bp, dump->flag, buf, &dump->len); 5226 } 5227 5228 static int bnxt_get_ts_info(struct net_device *dev, 5229 struct kernel_ethtool_ts_info *info) 5230 { 5231 struct bnxt *bp = netdev_priv(dev); 5232 struct bnxt_ptp_cfg *ptp; 5233 5234 ptp = bp->ptp_cfg; 5235 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; 5236 5237 if (!ptp) 5238 return 0; 5239 5240 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 5241 SOF_TIMESTAMPING_RX_HARDWARE | 5242 SOF_TIMESTAMPING_RAW_HARDWARE; 5243 if (ptp->ptp_clock) 5244 info->phc_index = ptp_clock_index(ptp->ptp_clock); 5245 5246 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 5247 5248 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 5249 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 5250 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 5251 5252 if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) 5253 info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL); 5254 return 0; 5255 } 5256 5257 void bnxt_ethtool_init(struct bnxt *bp) 5258 { 5259 struct hwrm_selftest_qlist_output *resp; 5260 struct hwrm_selftest_qlist_input *req; 5261 struct bnxt_test_info *test_info; 5262 struct net_device *dev = bp->dev; 5263 int i, rc; 5264 5265 if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER)) 5266 bnxt_get_pkgver(dev); 5267 5268 bp->num_tests = 0; 5269 if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp)) 5270 return; 5271 5272 test_info = bp->test_info; 5273 if (!test_info) { 5274 test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL); 5275 if (!test_info) 5276 return; 5277 bp->test_info = test_info; 5278 } 5279 5280 if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST)) 5281 return; 5282 5283 resp = hwrm_req_hold(bp, req); 5284 rc = hwrm_req_send_silent(bp, req); 5285 if (rc) 5286 goto ethtool_init_exit; 5287 5288 bp->num_tests = resp->num_tests + BNXT_DRV_TESTS; 5289 if (bp->num_tests > BNXT_MAX_TEST) 5290 bp->num_tests = BNXT_MAX_TEST; 5291 5292 test_info->offline_mask = resp->offline_tests; 5293 test_info->timeout = le16_to_cpu(resp->test_timeout); 5294 if (!test_info->timeout) 5295 test_info->timeout = HWRM_CMD_TIMEOUT; 5296 for (i = 0; i < bp->num_tests; i++) { 5297 char *str = test_info->string[i]; 5298 char *fw_str = resp->test_name[i]; 5299 5300 if (i == BNXT_MACLPBK_TEST_IDX) { 5301 strcpy(str, "Mac loopback test (offline)"); 5302 } else if (i == BNXT_PHYLPBK_TEST_IDX) { 5303 strcpy(str, "Phy loopback test (offline)"); 5304 } else if (i == BNXT_EXTLPBK_TEST_IDX) { 5305 strcpy(str, "Ext loopback test (offline)"); 5306 } else if (i == BNXT_IRQ_TEST_IDX) { 5307 strcpy(str, "Interrupt_test (offline)"); 5308 } else { 5309 snprintf(str, ETH_GSTRING_LEN, "%s test (%s)", 5310 fw_str, test_info->offline_mask & (1 << i) ? 5311 "offline" : "online"); 5312 } 5313 } 5314 5315 ethtool_init_exit: 5316 hwrm_req_drop(bp, req); 5317 } 5318 5319 static void bnxt_get_eth_phy_stats(struct net_device *dev, 5320 struct ethtool_eth_phy_stats *phy_stats) 5321 { 5322 struct bnxt *bp = netdev_priv(dev); 5323 u64 *rx; 5324 5325 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 5326 return; 5327 5328 rx = bp->rx_port_stats_ext.sw_stats; 5329 phy_stats->SymbolErrorDuringCarrier = 5330 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err)); 5331 } 5332 5333 static void bnxt_get_eth_mac_stats(struct net_device *dev, 5334 struct ethtool_eth_mac_stats *mac_stats) 5335 { 5336 struct bnxt *bp = netdev_priv(dev); 5337 u64 *rx, *tx; 5338 5339 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 5340 return; 5341 5342 rx = bp->port_stats.sw_stats; 5343 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 5344 5345 mac_stats->FramesReceivedOK = 5346 BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames); 5347 mac_stats->FramesTransmittedOK = 5348 BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames); 5349 mac_stats->FrameCheckSequenceErrors = 5350 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 5351 mac_stats->AlignmentErrors = 5352 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 5353 mac_stats->OutOfRangeLengthField = 5354 BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames); 5355 } 5356 5357 static void bnxt_get_eth_ctrl_stats(struct net_device *dev, 5358 struct ethtool_eth_ctrl_stats *ctrl_stats) 5359 { 5360 struct bnxt *bp = netdev_priv(dev); 5361 u64 *rx; 5362 5363 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 5364 return; 5365 5366 rx = bp->port_stats.sw_stats; 5367 ctrl_stats->MACControlFramesReceived = 5368 BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames); 5369 } 5370 5371 static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = { 5372 { 0, 64 }, 5373 { 65, 127 }, 5374 { 128, 255 }, 5375 { 256, 511 }, 5376 { 512, 1023 }, 5377 { 1024, 1518 }, 5378 { 1519, 2047 }, 5379 { 2048, 4095 }, 5380 { 4096, 9216 }, 5381 { 9217, 16383 }, 5382 {} 5383 }; 5384 5385 static void bnxt_get_rmon_stats(struct net_device *dev, 5386 struct ethtool_rmon_stats *rmon_stats, 5387 const struct ethtool_rmon_hist_range **ranges) 5388 { 5389 struct bnxt *bp = netdev_priv(dev); 5390 u64 *rx, *tx; 5391 5392 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 5393 return; 5394 5395 rx = bp->port_stats.sw_stats; 5396 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 5397 5398 rmon_stats->jabbers = 5399 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 5400 rmon_stats->oversize_pkts = 5401 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames); 5402 rmon_stats->undersize_pkts = 5403 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames); 5404 5405 rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames); 5406 rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames); 5407 rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames); 5408 rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames); 5409 rmon_stats->hist[4] = 5410 BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames); 5411 rmon_stats->hist[5] = 5412 BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames); 5413 rmon_stats->hist[6] = 5414 BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames); 5415 rmon_stats->hist[7] = 5416 BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames); 5417 rmon_stats->hist[8] = 5418 BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames); 5419 rmon_stats->hist[9] = 5420 BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames); 5421 5422 rmon_stats->hist_tx[0] = 5423 BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames); 5424 rmon_stats->hist_tx[1] = 5425 BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames); 5426 rmon_stats->hist_tx[2] = 5427 BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames); 5428 rmon_stats->hist_tx[3] = 5429 BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames); 5430 rmon_stats->hist_tx[4] = 5431 BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames); 5432 rmon_stats->hist_tx[5] = 5433 BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames); 5434 rmon_stats->hist_tx[6] = 5435 BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames); 5436 rmon_stats->hist_tx[7] = 5437 BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames); 5438 rmon_stats->hist_tx[8] = 5439 BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames); 5440 rmon_stats->hist_tx[9] = 5441 BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames); 5442 5443 *ranges = bnxt_rmon_ranges; 5444 } 5445 5446 static void bnxt_get_ptp_stats(struct net_device *dev, 5447 struct ethtool_ts_stats *ts_stats) 5448 { 5449 struct bnxt *bp = netdev_priv(dev); 5450 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 5451 5452 if (ptp) { 5453 ts_stats->pkts = ptp->stats.ts_pkts; 5454 ts_stats->lost = ptp->stats.ts_lost; 5455 ts_stats->err = atomic64_read(&ptp->stats.ts_err); 5456 } 5457 } 5458 5459 static void bnxt_get_link_ext_stats(struct net_device *dev, 5460 struct ethtool_link_ext_stats *stats) 5461 { 5462 struct bnxt *bp = netdev_priv(dev); 5463 u64 *rx; 5464 5465 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 5466 return; 5467 5468 rx = bp->rx_port_stats_ext.sw_stats; 5469 stats->link_down_events = 5470 *(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events)); 5471 } 5472 5473 void bnxt_ethtool_free(struct bnxt *bp) 5474 { 5475 kfree(bp->test_info); 5476 bp->test_info = NULL; 5477 } 5478 5479 const struct ethtool_ops bnxt_ethtool_ops = { 5480 .cap_link_lanes_supported = 1, 5481 .rxfh_per_ctx_key = 1, 5482 .rxfh_max_num_contexts = BNXT_MAX_ETH_RSS_CTX + 1, 5483 .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5, 5484 .rxfh_priv_size = sizeof(struct bnxt_rss_ctx), 5485 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 5486 ETHTOOL_COALESCE_MAX_FRAMES | 5487 ETHTOOL_COALESCE_USECS_IRQ | 5488 ETHTOOL_COALESCE_MAX_FRAMES_IRQ | 5489 ETHTOOL_COALESCE_STATS_BLOCK_USECS | 5490 ETHTOOL_COALESCE_USE_ADAPTIVE_RX | 5491 ETHTOOL_COALESCE_USE_CQE, 5492 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT | 5493 ETHTOOL_RING_USE_HDS_THRS, 5494 .get_link_ksettings = bnxt_get_link_ksettings, 5495 .set_link_ksettings = bnxt_set_link_ksettings, 5496 .get_fec_stats = bnxt_get_fec_stats, 5497 .get_fecparam = bnxt_get_fecparam, 5498 .set_fecparam = bnxt_set_fecparam, 5499 .get_pause_stats = bnxt_get_pause_stats, 5500 .get_pauseparam = bnxt_get_pauseparam, 5501 .set_pauseparam = bnxt_set_pauseparam, 5502 .get_drvinfo = bnxt_get_drvinfo, 5503 .get_regs_len = bnxt_get_regs_len, 5504 .get_regs = bnxt_get_regs, 5505 .get_wol = bnxt_get_wol, 5506 .set_wol = bnxt_set_wol, 5507 .get_coalesce = bnxt_get_coalesce, 5508 .set_coalesce = bnxt_set_coalesce, 5509 .get_msglevel = bnxt_get_msglevel, 5510 .set_msglevel = bnxt_set_msglevel, 5511 .get_sset_count = bnxt_get_sset_count, 5512 .get_strings = bnxt_get_strings, 5513 .get_ethtool_stats = bnxt_get_ethtool_stats, 5514 .set_ringparam = bnxt_set_ringparam, 5515 .get_ringparam = bnxt_get_ringparam, 5516 .get_channels = bnxt_get_channels, 5517 .set_channels = bnxt_set_channels, 5518 .get_rxnfc = bnxt_get_rxnfc, 5519 .set_rxnfc = bnxt_set_rxnfc, 5520 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size, 5521 .get_rxfh_key_size = bnxt_get_rxfh_key_size, 5522 .get_rxfh = bnxt_get_rxfh, 5523 .set_rxfh = bnxt_set_rxfh, 5524 .get_rxfh_fields = bnxt_get_rxfh_fields, 5525 .set_rxfh_fields = bnxt_set_rxfh_fields, 5526 .create_rxfh_context = bnxt_create_rxfh_context, 5527 .modify_rxfh_context = bnxt_modify_rxfh_context, 5528 .remove_rxfh_context = bnxt_remove_rxfh_context, 5529 .flash_device = bnxt_flash_device, 5530 .get_eeprom_len = bnxt_get_eeprom_len, 5531 .get_eeprom = bnxt_get_eeprom, 5532 .set_eeprom = bnxt_set_eeprom, 5533 .get_link = bnxt_get_link, 5534 .get_link_ext_stats = bnxt_get_link_ext_stats, 5535 .get_eee = bnxt_get_eee, 5536 .set_eee = bnxt_set_eee, 5537 .get_tunable = bnxt_get_tunable, 5538 .set_tunable = bnxt_set_tunable, 5539 .get_module_info = bnxt_get_module_info, 5540 .get_module_eeprom = bnxt_get_module_eeprom, 5541 .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page, 5542 .set_module_eeprom_by_page = bnxt_set_module_eeprom_by_page, 5543 .nway_reset = bnxt_nway_reset, 5544 .set_phys_id = bnxt_set_phys_id, 5545 .self_test = bnxt_self_test, 5546 .get_ts_info = bnxt_get_ts_info, 5547 .reset = bnxt_reset, 5548 .set_dump = bnxt_set_dump, 5549 .get_dump_flag = bnxt_get_dump_flag, 5550 .get_dump_data = bnxt_get_dump_data, 5551 .get_eth_phy_stats = bnxt_get_eth_phy_stats, 5552 .get_eth_mac_stats = bnxt_get_eth_mac_stats, 5553 .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats, 5554 .get_rmon_stats = bnxt_get_rmon_stats, 5555 .get_ts_stats = bnxt_get_ptp_stats, 5556 }; 5557