1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2017 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/ctype.h> 12 #include <linux/stringify.h> 13 #include <linux/ethtool.h> 14 #include <linux/linkmode.h> 15 #include <linux/interrupt.h> 16 #include <linux/pci.h> 17 #include <linux/etherdevice.h> 18 #include <linux/crc32.h> 19 #include <linux/firmware.h> 20 #include <linux/utsname.h> 21 #include <linux/time.h> 22 #include <linux/ptp_clock_kernel.h> 23 #include <linux/net_tstamp.h> 24 #include <linux/timecounter.h> 25 #include "bnxt_hsi.h" 26 #include "bnxt.h" 27 #include "bnxt_xdp.h" 28 #include "bnxt_ptp.h" 29 #include "bnxt_ethtool.h" 30 #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */ 31 #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ 32 #include "bnxt_coredump.h" 33 #define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100) 34 #define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) 35 #define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) 36 37 static u32 bnxt_get_msglevel(struct net_device *dev) 38 { 39 struct bnxt *bp = netdev_priv(dev); 40 41 return bp->msg_enable; 42 } 43 44 static void bnxt_set_msglevel(struct net_device *dev, u32 value) 45 { 46 struct bnxt *bp = netdev_priv(dev); 47 48 bp->msg_enable = value; 49 } 50 51 static int bnxt_get_coalesce(struct net_device *dev, 52 struct ethtool_coalesce *coal) 53 { 54 struct bnxt *bp = netdev_priv(dev); 55 struct bnxt_coal *hw_coal; 56 u16 mult; 57 58 memset(coal, 0, sizeof(*coal)); 59 60 coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM; 61 62 hw_coal = &bp->rx_coal; 63 mult = hw_coal->bufs_per_record; 64 coal->rx_coalesce_usecs = hw_coal->coal_ticks; 65 coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult; 66 coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 67 coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 68 69 hw_coal = &bp->tx_coal; 70 mult = hw_coal->bufs_per_record; 71 coal->tx_coalesce_usecs = hw_coal->coal_ticks; 72 coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult; 73 coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 74 coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 75 76 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; 77 78 return 0; 79 } 80 81 static int bnxt_set_coalesce(struct net_device *dev, 82 struct ethtool_coalesce *coal) 83 { 84 struct bnxt *bp = netdev_priv(dev); 85 bool update_stats = false; 86 struct bnxt_coal *hw_coal; 87 int rc = 0; 88 u16 mult; 89 90 if (coal->use_adaptive_rx_coalesce) { 91 bp->flags |= BNXT_FLAG_DIM; 92 } else { 93 if (bp->flags & BNXT_FLAG_DIM) { 94 bp->flags &= ~(BNXT_FLAG_DIM); 95 goto reset_coalesce; 96 } 97 } 98 99 hw_coal = &bp->rx_coal; 100 mult = hw_coal->bufs_per_record; 101 hw_coal->coal_ticks = coal->rx_coalesce_usecs; 102 hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult; 103 hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq; 104 hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult; 105 106 hw_coal = &bp->tx_coal; 107 mult = hw_coal->bufs_per_record; 108 hw_coal->coal_ticks = coal->tx_coalesce_usecs; 109 hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult; 110 hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq; 111 hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult; 112 113 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { 114 u32 stats_ticks = coal->stats_block_coalesce_usecs; 115 116 /* Allow 0, which means disable. */ 117 if (stats_ticks) 118 stats_ticks = clamp_t(u32, stats_ticks, 119 BNXT_MIN_STATS_COAL_TICKS, 120 BNXT_MAX_STATS_COAL_TICKS); 121 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); 122 bp->stats_coal_ticks = stats_ticks; 123 if (bp->stats_coal_ticks) 124 bp->current_interval = 125 bp->stats_coal_ticks * HZ / 1000000; 126 else 127 bp->current_interval = BNXT_TIMER_INTERVAL; 128 update_stats = true; 129 } 130 131 reset_coalesce: 132 if (netif_running(dev)) { 133 if (update_stats) { 134 rc = bnxt_close_nic(bp, true, false); 135 if (!rc) 136 rc = bnxt_open_nic(bp, true, false); 137 } else { 138 rc = bnxt_hwrm_set_coal(bp); 139 } 140 } 141 142 return rc; 143 } 144 145 static const char * const bnxt_ring_rx_stats_str[] = { 146 "rx_ucast_packets", 147 "rx_mcast_packets", 148 "rx_bcast_packets", 149 "rx_discards", 150 "rx_errors", 151 "rx_ucast_bytes", 152 "rx_mcast_bytes", 153 "rx_bcast_bytes", 154 }; 155 156 static const char * const bnxt_ring_tx_stats_str[] = { 157 "tx_ucast_packets", 158 "tx_mcast_packets", 159 "tx_bcast_packets", 160 "tx_errors", 161 "tx_discards", 162 "tx_ucast_bytes", 163 "tx_mcast_bytes", 164 "tx_bcast_bytes", 165 }; 166 167 static const char * const bnxt_ring_tpa_stats_str[] = { 168 "tpa_packets", 169 "tpa_bytes", 170 "tpa_events", 171 "tpa_aborts", 172 }; 173 174 static const char * const bnxt_ring_tpa2_stats_str[] = { 175 "rx_tpa_eligible_pkt", 176 "rx_tpa_eligible_bytes", 177 "rx_tpa_pkt", 178 "rx_tpa_bytes", 179 "rx_tpa_errors", 180 "rx_tpa_events", 181 }; 182 183 static const char * const bnxt_rx_sw_stats_str[] = { 184 "rx_l4_csum_errors", 185 "rx_resets", 186 "rx_buf_errors", 187 }; 188 189 static const char * const bnxt_cmn_sw_stats_str[] = { 190 "missed_irqs", 191 }; 192 193 #define BNXT_RX_STATS_ENTRY(counter) \ 194 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } 195 196 #define BNXT_TX_STATS_ENTRY(counter) \ 197 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) } 198 199 #define BNXT_RX_STATS_EXT_ENTRY(counter) \ 200 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } 201 202 #define BNXT_TX_STATS_EXT_ENTRY(counter) \ 203 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) } 204 205 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \ 206 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \ 207 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions) 208 209 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \ 210 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \ 211 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions) 212 213 #define BNXT_RX_STATS_EXT_PFC_ENTRIES \ 214 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \ 215 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \ 216 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \ 217 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \ 218 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \ 219 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \ 220 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \ 221 BNXT_RX_STATS_EXT_PFC_ENTRY(7) 222 223 #define BNXT_TX_STATS_EXT_PFC_ENTRIES \ 224 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \ 225 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \ 226 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \ 227 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \ 228 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \ 229 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \ 230 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \ 231 BNXT_TX_STATS_EXT_PFC_ENTRY(7) 232 233 #define BNXT_RX_STATS_EXT_COS_ENTRY(n) \ 234 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \ 235 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n) 236 237 #define BNXT_TX_STATS_EXT_COS_ENTRY(n) \ 238 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \ 239 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n) 240 241 #define BNXT_RX_STATS_EXT_COS_ENTRIES \ 242 BNXT_RX_STATS_EXT_COS_ENTRY(0), \ 243 BNXT_RX_STATS_EXT_COS_ENTRY(1), \ 244 BNXT_RX_STATS_EXT_COS_ENTRY(2), \ 245 BNXT_RX_STATS_EXT_COS_ENTRY(3), \ 246 BNXT_RX_STATS_EXT_COS_ENTRY(4), \ 247 BNXT_RX_STATS_EXT_COS_ENTRY(5), \ 248 BNXT_RX_STATS_EXT_COS_ENTRY(6), \ 249 BNXT_RX_STATS_EXT_COS_ENTRY(7) \ 250 251 #define BNXT_TX_STATS_EXT_COS_ENTRIES \ 252 BNXT_TX_STATS_EXT_COS_ENTRY(0), \ 253 BNXT_TX_STATS_EXT_COS_ENTRY(1), \ 254 BNXT_TX_STATS_EXT_COS_ENTRY(2), \ 255 BNXT_TX_STATS_EXT_COS_ENTRY(3), \ 256 BNXT_TX_STATS_EXT_COS_ENTRY(4), \ 257 BNXT_TX_STATS_EXT_COS_ENTRY(5), \ 258 BNXT_TX_STATS_EXT_COS_ENTRY(6), \ 259 BNXT_TX_STATS_EXT_COS_ENTRY(7) \ 260 261 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \ 262 BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \ 263 BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n) 264 265 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \ 266 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \ 267 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \ 268 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \ 269 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \ 270 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \ 271 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \ 272 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \ 273 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7) 274 275 #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \ 276 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \ 277 __stringify(counter##_pri##n) } 278 279 #define BNXT_TX_STATS_PRI_ENTRY(counter, n) \ 280 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \ 281 __stringify(counter##_pri##n) } 282 283 #define BNXT_RX_STATS_PRI_ENTRIES(counter) \ 284 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \ 285 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \ 286 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \ 287 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \ 288 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \ 289 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \ 290 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \ 291 BNXT_RX_STATS_PRI_ENTRY(counter, 7) 292 293 #define BNXT_TX_STATS_PRI_ENTRIES(counter) \ 294 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \ 295 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \ 296 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \ 297 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \ 298 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \ 299 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \ 300 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \ 301 BNXT_TX_STATS_PRI_ENTRY(counter, 7) 302 303 enum { 304 RX_TOTAL_DISCARDS, 305 TX_TOTAL_DISCARDS, 306 }; 307 308 static struct { 309 u64 counter; 310 char string[ETH_GSTRING_LEN]; 311 } bnxt_sw_func_stats[] = { 312 {0, "rx_total_discard_pkts"}, 313 {0, "tx_total_discard_pkts"}, 314 }; 315 316 #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str) 317 #define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str) 318 #define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str) 319 #define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str) 320 321 static const struct { 322 long offset; 323 char string[ETH_GSTRING_LEN]; 324 } bnxt_port_stats_arr[] = { 325 BNXT_RX_STATS_ENTRY(rx_64b_frames), 326 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames), 327 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames), 328 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames), 329 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames), 330 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames), 331 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames), 332 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames), 333 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames), 334 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames), 335 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames), 336 BNXT_RX_STATS_ENTRY(rx_total_frames), 337 BNXT_RX_STATS_ENTRY(rx_ucast_frames), 338 BNXT_RX_STATS_ENTRY(rx_mcast_frames), 339 BNXT_RX_STATS_ENTRY(rx_bcast_frames), 340 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames), 341 BNXT_RX_STATS_ENTRY(rx_ctrl_frames), 342 BNXT_RX_STATS_ENTRY(rx_pause_frames), 343 BNXT_RX_STATS_ENTRY(rx_pfc_frames), 344 BNXT_RX_STATS_ENTRY(rx_align_err_frames), 345 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames), 346 BNXT_RX_STATS_ENTRY(rx_jbr_frames), 347 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames), 348 BNXT_RX_STATS_ENTRY(rx_tagged_frames), 349 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames), 350 BNXT_RX_STATS_ENTRY(rx_good_frames), 351 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0), 352 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1), 353 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2), 354 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3), 355 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4), 356 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5), 357 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6), 358 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7), 359 BNXT_RX_STATS_ENTRY(rx_undrsz_frames), 360 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events), 361 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration), 362 BNXT_RX_STATS_ENTRY(rx_bytes), 363 BNXT_RX_STATS_ENTRY(rx_runt_bytes), 364 BNXT_RX_STATS_ENTRY(rx_runt_frames), 365 BNXT_RX_STATS_ENTRY(rx_stat_discard), 366 BNXT_RX_STATS_ENTRY(rx_stat_err), 367 368 BNXT_TX_STATS_ENTRY(tx_64b_frames), 369 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames), 370 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames), 371 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames), 372 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames), 373 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames), 374 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames), 375 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames), 376 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames), 377 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames), 378 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames), 379 BNXT_TX_STATS_ENTRY(tx_good_frames), 380 BNXT_TX_STATS_ENTRY(tx_total_frames), 381 BNXT_TX_STATS_ENTRY(tx_ucast_frames), 382 BNXT_TX_STATS_ENTRY(tx_mcast_frames), 383 BNXT_TX_STATS_ENTRY(tx_bcast_frames), 384 BNXT_TX_STATS_ENTRY(tx_pause_frames), 385 BNXT_TX_STATS_ENTRY(tx_pfc_frames), 386 BNXT_TX_STATS_ENTRY(tx_jabber_frames), 387 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames), 388 BNXT_TX_STATS_ENTRY(tx_err), 389 BNXT_TX_STATS_ENTRY(tx_fifo_underruns), 390 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0), 391 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1), 392 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2), 393 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3), 394 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4), 395 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5), 396 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6), 397 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7), 398 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events), 399 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration), 400 BNXT_TX_STATS_ENTRY(tx_total_collisions), 401 BNXT_TX_STATS_ENTRY(tx_bytes), 402 BNXT_TX_STATS_ENTRY(tx_xthol_frames), 403 BNXT_TX_STATS_ENTRY(tx_stat_discard), 404 BNXT_TX_STATS_ENTRY(tx_stat_error), 405 }; 406 407 static const struct { 408 long offset; 409 char string[ETH_GSTRING_LEN]; 410 } bnxt_port_stats_ext_arr[] = { 411 BNXT_RX_STATS_EXT_ENTRY(link_down_events), 412 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events), 413 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events), 414 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events), 415 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), 416 BNXT_RX_STATS_EXT_COS_ENTRIES, 417 BNXT_RX_STATS_EXT_PFC_ENTRIES, 418 BNXT_RX_STATS_EXT_ENTRY(rx_bits), 419 BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold), 420 BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err), 421 BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits), 422 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES, 423 }; 424 425 static const struct { 426 long offset; 427 char string[ETH_GSTRING_LEN]; 428 } bnxt_tx_port_stats_ext_arr[] = { 429 BNXT_TX_STATS_EXT_COS_ENTRIES, 430 BNXT_TX_STATS_EXT_PFC_ENTRIES, 431 }; 432 433 static const struct { 434 long base_off; 435 char string[ETH_GSTRING_LEN]; 436 } bnxt_rx_bytes_pri_arr[] = { 437 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes), 438 }; 439 440 static const struct { 441 long base_off; 442 char string[ETH_GSTRING_LEN]; 443 } bnxt_rx_pkts_pri_arr[] = { 444 BNXT_RX_STATS_PRI_ENTRIES(rx_packets), 445 }; 446 447 static const struct { 448 long base_off; 449 char string[ETH_GSTRING_LEN]; 450 } bnxt_tx_bytes_pri_arr[] = { 451 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes), 452 }; 453 454 static const struct { 455 long base_off; 456 char string[ETH_GSTRING_LEN]; 457 } bnxt_tx_pkts_pri_arr[] = { 458 BNXT_TX_STATS_PRI_ENTRIES(tx_packets), 459 }; 460 461 #define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats) 462 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) 463 #define BNXT_NUM_STATS_PRI \ 464 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \ 465 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \ 466 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \ 467 ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) 468 469 static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) 470 { 471 if (BNXT_SUPPORTS_TPA(bp)) { 472 if (bp->max_tpa_v2) { 473 if (BNXT_CHIP_P5_THOR(bp)) 474 return BNXT_NUM_TPA_RING_STATS_P5; 475 return BNXT_NUM_TPA_RING_STATS_P5_SR2; 476 } 477 return BNXT_NUM_TPA_RING_STATS; 478 } 479 return 0; 480 } 481 482 static int bnxt_get_num_ring_stats(struct bnxt *bp) 483 { 484 int rx, tx, cmn; 485 486 rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS + 487 bnxt_get_num_tpa_ring_stats(bp); 488 tx = NUM_RING_TX_HW_STATS; 489 cmn = NUM_RING_CMN_SW_STATS; 490 return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings + 491 cmn * bp->cp_nr_rings; 492 } 493 494 static int bnxt_get_num_stats(struct bnxt *bp) 495 { 496 int num_stats = bnxt_get_num_ring_stats(bp); 497 498 num_stats += BNXT_NUM_SW_FUNC_STATS; 499 500 if (bp->flags & BNXT_FLAG_PORT_STATS) 501 num_stats += BNXT_NUM_PORT_STATS; 502 503 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 504 num_stats += bp->fw_rx_stats_ext_size + 505 bp->fw_tx_stats_ext_size; 506 if (bp->pri2cos_valid) 507 num_stats += BNXT_NUM_STATS_PRI; 508 } 509 510 return num_stats; 511 } 512 513 static int bnxt_get_sset_count(struct net_device *dev, int sset) 514 { 515 struct bnxt *bp = netdev_priv(dev); 516 517 switch (sset) { 518 case ETH_SS_STATS: 519 return bnxt_get_num_stats(bp); 520 case ETH_SS_TEST: 521 if (!bp->num_tests) 522 return -EOPNOTSUPP; 523 return bp->num_tests; 524 default: 525 return -EOPNOTSUPP; 526 } 527 } 528 529 static bool is_rx_ring(struct bnxt *bp, int ring_num) 530 { 531 return ring_num < bp->rx_nr_rings; 532 } 533 534 static bool is_tx_ring(struct bnxt *bp, int ring_num) 535 { 536 int tx_base = 0; 537 538 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 539 tx_base = bp->rx_nr_rings; 540 541 if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings)) 542 return true; 543 return false; 544 } 545 546 static void bnxt_get_ethtool_stats(struct net_device *dev, 547 struct ethtool_stats *stats, u64 *buf) 548 { 549 u32 i, j = 0; 550 struct bnxt *bp = netdev_priv(dev); 551 u32 tpa_stats; 552 553 if (!bp->bnapi) { 554 j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS; 555 goto skip_ring_stats; 556 } 557 558 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) 559 bnxt_sw_func_stats[i].counter = 0; 560 561 tpa_stats = bnxt_get_num_tpa_ring_stats(bp); 562 for (i = 0; i < bp->cp_nr_rings; i++) { 563 struct bnxt_napi *bnapi = bp->bnapi[i]; 564 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 565 u64 *sw_stats = cpr->stats.sw_stats; 566 u64 *sw; 567 int k; 568 569 if (is_rx_ring(bp, i)) { 570 for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++) 571 buf[j] = sw_stats[k]; 572 } 573 if (is_tx_ring(bp, i)) { 574 k = NUM_RING_RX_HW_STATS; 575 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 576 j++, k++) 577 buf[j] = sw_stats[k]; 578 } 579 if (!tpa_stats || !is_rx_ring(bp, i)) 580 goto skip_tpa_ring_stats; 581 582 k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 583 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS + 584 tpa_stats; j++, k++) 585 buf[j] = sw_stats[k]; 586 587 skip_tpa_ring_stats: 588 sw = (u64 *)&cpr->sw_stats.rx; 589 if (is_rx_ring(bp, i)) { 590 for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++) 591 buf[j] = sw[k]; 592 } 593 594 sw = (u64 *)&cpr->sw_stats.cmn; 595 for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++) 596 buf[j] = sw[k]; 597 598 bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += 599 BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts); 600 bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter += 601 BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts); 602 } 603 604 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++) 605 buf[j] = bnxt_sw_func_stats[i].counter; 606 607 skip_ring_stats: 608 if (bp->flags & BNXT_FLAG_PORT_STATS) { 609 u64 *port_stats = bp->port_stats.sw_stats; 610 611 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) 612 buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset); 613 } 614 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 615 u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats; 616 u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats; 617 618 for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) { 619 buf[j] = *(rx_port_stats_ext + 620 bnxt_port_stats_ext_arr[i].offset); 621 } 622 for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) { 623 buf[j] = *(tx_port_stats_ext + 624 bnxt_tx_port_stats_ext_arr[i].offset); 625 } 626 if (bp->pri2cos_valid) { 627 for (i = 0; i < 8; i++, j++) { 628 long n = bnxt_rx_bytes_pri_arr[i].base_off + 629 bp->pri2cos_idx[i]; 630 631 buf[j] = *(rx_port_stats_ext + n); 632 } 633 for (i = 0; i < 8; i++, j++) { 634 long n = bnxt_rx_pkts_pri_arr[i].base_off + 635 bp->pri2cos_idx[i]; 636 637 buf[j] = *(rx_port_stats_ext + n); 638 } 639 for (i = 0; i < 8; i++, j++) { 640 long n = bnxt_tx_bytes_pri_arr[i].base_off + 641 bp->pri2cos_idx[i]; 642 643 buf[j] = *(tx_port_stats_ext + n); 644 } 645 for (i = 0; i < 8; i++, j++) { 646 long n = bnxt_tx_pkts_pri_arr[i].base_off + 647 bp->pri2cos_idx[i]; 648 649 buf[j] = *(tx_port_stats_ext + n); 650 } 651 } 652 } 653 } 654 655 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 656 { 657 struct bnxt *bp = netdev_priv(dev); 658 static const char * const *str; 659 u32 i, j, num_str; 660 661 switch (stringset) { 662 case ETH_SS_STATS: 663 for (i = 0; i < bp->cp_nr_rings; i++) { 664 if (is_rx_ring(bp, i)) { 665 num_str = NUM_RING_RX_HW_STATS; 666 for (j = 0; j < num_str; j++) { 667 sprintf(buf, "[%d]: %s", i, 668 bnxt_ring_rx_stats_str[j]); 669 buf += ETH_GSTRING_LEN; 670 } 671 } 672 if (is_tx_ring(bp, i)) { 673 num_str = NUM_RING_TX_HW_STATS; 674 for (j = 0; j < num_str; j++) { 675 sprintf(buf, "[%d]: %s", i, 676 bnxt_ring_tx_stats_str[j]); 677 buf += ETH_GSTRING_LEN; 678 } 679 } 680 num_str = bnxt_get_num_tpa_ring_stats(bp); 681 if (!num_str || !is_rx_ring(bp, i)) 682 goto skip_tpa_stats; 683 684 if (bp->max_tpa_v2) 685 str = bnxt_ring_tpa2_stats_str; 686 else 687 str = bnxt_ring_tpa_stats_str; 688 689 for (j = 0; j < num_str; j++) { 690 sprintf(buf, "[%d]: %s", i, str[j]); 691 buf += ETH_GSTRING_LEN; 692 } 693 skip_tpa_stats: 694 if (is_rx_ring(bp, i)) { 695 num_str = NUM_RING_RX_SW_STATS; 696 for (j = 0; j < num_str; j++) { 697 sprintf(buf, "[%d]: %s", i, 698 bnxt_rx_sw_stats_str[j]); 699 buf += ETH_GSTRING_LEN; 700 } 701 } 702 num_str = NUM_RING_CMN_SW_STATS; 703 for (j = 0; j < num_str; j++) { 704 sprintf(buf, "[%d]: %s", i, 705 bnxt_cmn_sw_stats_str[j]); 706 buf += ETH_GSTRING_LEN; 707 } 708 } 709 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) { 710 strcpy(buf, bnxt_sw_func_stats[i].string); 711 buf += ETH_GSTRING_LEN; 712 } 713 714 if (bp->flags & BNXT_FLAG_PORT_STATS) { 715 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) { 716 strcpy(buf, bnxt_port_stats_arr[i].string); 717 buf += ETH_GSTRING_LEN; 718 } 719 } 720 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 721 for (i = 0; i < bp->fw_rx_stats_ext_size; i++) { 722 strcpy(buf, bnxt_port_stats_ext_arr[i].string); 723 buf += ETH_GSTRING_LEN; 724 } 725 for (i = 0; i < bp->fw_tx_stats_ext_size; i++) { 726 strcpy(buf, 727 bnxt_tx_port_stats_ext_arr[i].string); 728 buf += ETH_GSTRING_LEN; 729 } 730 if (bp->pri2cos_valid) { 731 for (i = 0; i < 8; i++) { 732 strcpy(buf, 733 bnxt_rx_bytes_pri_arr[i].string); 734 buf += ETH_GSTRING_LEN; 735 } 736 for (i = 0; i < 8; i++) { 737 strcpy(buf, 738 bnxt_rx_pkts_pri_arr[i].string); 739 buf += ETH_GSTRING_LEN; 740 } 741 for (i = 0; i < 8; i++) { 742 strcpy(buf, 743 bnxt_tx_bytes_pri_arr[i].string); 744 buf += ETH_GSTRING_LEN; 745 } 746 for (i = 0; i < 8; i++) { 747 strcpy(buf, 748 bnxt_tx_pkts_pri_arr[i].string); 749 buf += ETH_GSTRING_LEN; 750 } 751 } 752 } 753 break; 754 case ETH_SS_TEST: 755 if (bp->num_tests) 756 memcpy(buf, bp->test_info->string, 757 bp->num_tests * ETH_GSTRING_LEN); 758 break; 759 default: 760 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n", 761 stringset); 762 break; 763 } 764 } 765 766 static void bnxt_get_ringparam(struct net_device *dev, 767 struct ethtool_ringparam *ering) 768 { 769 struct bnxt *bp = netdev_priv(dev); 770 771 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 772 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA; 773 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT; 774 } else { 775 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT; 776 ering->rx_jumbo_max_pending = 0; 777 } 778 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT; 779 780 ering->rx_pending = bp->rx_ring_size; 781 ering->rx_jumbo_pending = bp->rx_agg_ring_size; 782 ering->tx_pending = bp->tx_ring_size; 783 } 784 785 static int bnxt_set_ringparam(struct net_device *dev, 786 struct ethtool_ringparam *ering) 787 { 788 struct bnxt *bp = netdev_priv(dev); 789 790 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) || 791 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) || 792 (ering->tx_pending <= MAX_SKB_FRAGS)) 793 return -EINVAL; 794 795 if (netif_running(dev)) 796 bnxt_close_nic(bp, false, false); 797 798 bp->rx_ring_size = ering->rx_pending; 799 bp->tx_ring_size = ering->tx_pending; 800 bnxt_set_ring_params(bp); 801 802 if (netif_running(dev)) 803 return bnxt_open_nic(bp, false, false); 804 805 return 0; 806 } 807 808 static void bnxt_get_channels(struct net_device *dev, 809 struct ethtool_channels *channel) 810 { 811 struct bnxt *bp = netdev_priv(dev); 812 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 813 int max_rx_rings, max_tx_rings, tcs; 814 int max_tx_sch_inputs, tx_grps; 815 816 /* Get the most up-to-date max_tx_sch_inputs. */ 817 if (netif_running(dev) && BNXT_NEW_RM(bp)) 818 bnxt_hwrm_func_resc_qcaps(bp, false); 819 max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; 820 821 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); 822 if (max_tx_sch_inputs) 823 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 824 825 tcs = netdev_get_num_tc(dev); 826 tx_grps = max(tcs, 1); 827 if (bp->tx_nr_rings_xdp) 828 tx_grps++; 829 max_tx_rings /= tx_grps; 830 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings); 831 832 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { 833 max_rx_rings = 0; 834 max_tx_rings = 0; 835 } 836 if (max_tx_sch_inputs) 837 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 838 839 if (tcs > 1) 840 max_tx_rings /= tcs; 841 842 channel->max_rx = max_rx_rings; 843 channel->max_tx = max_tx_rings; 844 channel->max_other = 0; 845 if (bp->flags & BNXT_FLAG_SHARED_RINGS) { 846 channel->combined_count = bp->rx_nr_rings; 847 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 848 channel->combined_count--; 849 } else { 850 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) { 851 channel->rx_count = bp->rx_nr_rings; 852 channel->tx_count = bp->tx_nr_rings_per_tc; 853 } 854 } 855 } 856 857 static int bnxt_set_channels(struct net_device *dev, 858 struct ethtool_channels *channel) 859 { 860 struct bnxt *bp = netdev_priv(dev); 861 int req_tx_rings, req_rx_rings, tcs; 862 bool sh = false; 863 int tx_xdp = 0; 864 int rc = 0; 865 866 if (channel->other_count) 867 return -EINVAL; 868 869 if (!channel->combined_count && 870 (!channel->rx_count || !channel->tx_count)) 871 return -EINVAL; 872 873 if (channel->combined_count && 874 (channel->rx_count || channel->tx_count)) 875 return -EINVAL; 876 877 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count || 878 channel->tx_count)) 879 return -EINVAL; 880 881 if (channel->combined_count) 882 sh = true; 883 884 tcs = netdev_get_num_tc(dev); 885 886 req_tx_rings = sh ? channel->combined_count : channel->tx_count; 887 req_rx_rings = sh ? channel->combined_count : channel->rx_count; 888 if (bp->tx_nr_rings_xdp) { 889 if (!sh) { 890 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n"); 891 return -EINVAL; 892 } 893 tx_xdp = req_rx_rings; 894 } 895 rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); 896 if (rc) { 897 netdev_warn(dev, "Unable to allocate the requested rings\n"); 898 return rc; 899 } 900 901 if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != 902 bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && 903 (dev->priv_flags & IFF_RXFH_CONFIGURED)) { 904 netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n"); 905 return -EINVAL; 906 } 907 908 if (netif_running(dev)) { 909 if (BNXT_PF(bp)) { 910 /* TODO CHIMP_FW: Send message to all VF's 911 * before PF unload 912 */ 913 } 914 rc = bnxt_close_nic(bp, true, false); 915 if (rc) { 916 netdev_err(bp->dev, "Set channel failure rc :%x\n", 917 rc); 918 return rc; 919 } 920 } 921 922 if (sh) { 923 bp->flags |= BNXT_FLAG_SHARED_RINGS; 924 bp->rx_nr_rings = channel->combined_count; 925 bp->tx_nr_rings_per_tc = channel->combined_count; 926 } else { 927 bp->flags &= ~BNXT_FLAG_SHARED_RINGS; 928 bp->rx_nr_rings = channel->rx_count; 929 bp->tx_nr_rings_per_tc = channel->tx_count; 930 } 931 bp->tx_nr_rings_xdp = tx_xdp; 932 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp; 933 if (tcs > 1) 934 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp; 935 936 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 937 bp->tx_nr_rings + bp->rx_nr_rings; 938 939 /* After changing number of rx channels, update NTUPLE feature. */ 940 netdev_update_features(dev); 941 if (netif_running(dev)) { 942 rc = bnxt_open_nic(bp, true, false); 943 if ((!rc) && BNXT_PF(bp)) { 944 /* TODO CHIMP_FW: Send message to all VF's 945 * to renable 946 */ 947 } 948 } else { 949 rc = bnxt_reserve_rings(bp, true); 950 } 951 952 return rc; 953 } 954 955 #ifdef CONFIG_RFS_ACCEL 956 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd, 957 u32 *rule_locs) 958 { 959 int i, j = 0; 960 961 cmd->data = bp->ntp_fltr_count; 962 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 963 struct hlist_head *head; 964 struct bnxt_ntuple_filter *fltr; 965 966 head = &bp->ntp_fltr_hash_tbl[i]; 967 rcu_read_lock(); 968 hlist_for_each_entry_rcu(fltr, head, hash) { 969 if (j == cmd->rule_cnt) 970 break; 971 rule_locs[j++] = fltr->sw_id; 972 } 973 rcu_read_unlock(); 974 if (j == cmd->rule_cnt) 975 break; 976 } 977 cmd->rule_cnt = j; 978 return 0; 979 } 980 981 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) 982 { 983 struct ethtool_rx_flow_spec *fs = 984 (struct ethtool_rx_flow_spec *)&cmd->fs; 985 struct bnxt_ntuple_filter *fltr; 986 struct flow_keys *fkeys; 987 int i, rc = -EINVAL; 988 989 if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR) 990 return rc; 991 992 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 993 struct hlist_head *head; 994 995 head = &bp->ntp_fltr_hash_tbl[i]; 996 rcu_read_lock(); 997 hlist_for_each_entry_rcu(fltr, head, hash) { 998 if (fltr->sw_id == fs->location) 999 goto fltr_found; 1000 } 1001 rcu_read_unlock(); 1002 } 1003 return rc; 1004 1005 fltr_found: 1006 fkeys = &fltr->fkeys; 1007 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 1008 if (fkeys->basic.ip_proto == IPPROTO_TCP) 1009 fs->flow_type = TCP_V4_FLOW; 1010 else if (fkeys->basic.ip_proto == IPPROTO_UDP) 1011 fs->flow_type = UDP_V4_FLOW; 1012 else 1013 goto fltr_err; 1014 1015 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; 1016 fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); 1017 1018 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; 1019 fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); 1020 1021 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; 1022 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); 1023 1024 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; 1025 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); 1026 } else { 1027 int i; 1028 1029 if (fkeys->basic.ip_proto == IPPROTO_TCP) 1030 fs->flow_type = TCP_V6_FLOW; 1031 else if (fkeys->basic.ip_proto == IPPROTO_UDP) 1032 fs->flow_type = UDP_V6_FLOW; 1033 else 1034 goto fltr_err; 1035 1036 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = 1037 fkeys->addrs.v6addrs.src; 1038 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = 1039 fkeys->addrs.v6addrs.dst; 1040 for (i = 0; i < 4; i++) { 1041 fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0); 1042 fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0); 1043 } 1044 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; 1045 fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0); 1046 1047 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; 1048 fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0); 1049 } 1050 1051 fs->ring_cookie = fltr->rxq; 1052 rc = 0; 1053 1054 fltr_err: 1055 rcu_read_unlock(); 1056 1057 return rc; 1058 } 1059 #endif 1060 1061 static u64 get_ethtool_ipv4_rss(struct bnxt *bp) 1062 { 1063 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) 1064 return RXH_IP_SRC | RXH_IP_DST; 1065 return 0; 1066 } 1067 1068 static u64 get_ethtool_ipv6_rss(struct bnxt *bp) 1069 { 1070 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) 1071 return RXH_IP_SRC | RXH_IP_DST; 1072 return 0; 1073 } 1074 1075 static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1076 { 1077 cmd->data = 0; 1078 switch (cmd->flow_type) { 1079 case TCP_V4_FLOW: 1080 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) 1081 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1082 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1083 cmd->data |= get_ethtool_ipv4_rss(bp); 1084 break; 1085 case UDP_V4_FLOW: 1086 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4) 1087 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1088 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1089 fallthrough; 1090 case SCTP_V4_FLOW: 1091 case AH_ESP_V4_FLOW: 1092 case AH_V4_FLOW: 1093 case ESP_V4_FLOW: 1094 case IPV4_FLOW: 1095 cmd->data |= get_ethtool_ipv4_rss(bp); 1096 break; 1097 1098 case TCP_V6_FLOW: 1099 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) 1100 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1101 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1102 cmd->data |= get_ethtool_ipv6_rss(bp); 1103 break; 1104 case UDP_V6_FLOW: 1105 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6) 1106 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1107 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1108 fallthrough; 1109 case SCTP_V6_FLOW: 1110 case AH_ESP_V6_FLOW: 1111 case AH_V6_FLOW: 1112 case ESP_V6_FLOW: 1113 case IPV6_FLOW: 1114 cmd->data |= get_ethtool_ipv6_rss(bp); 1115 break; 1116 } 1117 return 0; 1118 } 1119 1120 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) 1121 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST) 1122 1123 static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1124 { 1125 u32 rss_hash_cfg = bp->rss_hash_cfg; 1126 int tuple, rc = 0; 1127 1128 if (cmd->data == RXH_4TUPLE) 1129 tuple = 4; 1130 else if (cmd->data == RXH_2TUPLE) 1131 tuple = 2; 1132 else if (!cmd->data) 1133 tuple = 0; 1134 else 1135 return -EINVAL; 1136 1137 if (cmd->flow_type == TCP_V4_FLOW) { 1138 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1139 if (tuple == 4) 1140 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1141 } else if (cmd->flow_type == UDP_V4_FLOW) { 1142 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP)) 1143 return -EINVAL; 1144 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1145 if (tuple == 4) 1146 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1147 } else if (cmd->flow_type == TCP_V6_FLOW) { 1148 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1149 if (tuple == 4) 1150 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1151 } else if (cmd->flow_type == UDP_V6_FLOW) { 1152 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP)) 1153 return -EINVAL; 1154 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1155 if (tuple == 4) 1156 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1157 } else if (tuple == 4) { 1158 return -EINVAL; 1159 } 1160 1161 switch (cmd->flow_type) { 1162 case TCP_V4_FLOW: 1163 case UDP_V4_FLOW: 1164 case SCTP_V4_FLOW: 1165 case AH_ESP_V4_FLOW: 1166 case AH_V4_FLOW: 1167 case ESP_V4_FLOW: 1168 case IPV4_FLOW: 1169 if (tuple == 2) 1170 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1171 else if (!tuple) 1172 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1173 break; 1174 1175 case TCP_V6_FLOW: 1176 case UDP_V6_FLOW: 1177 case SCTP_V6_FLOW: 1178 case AH_ESP_V6_FLOW: 1179 case AH_V6_FLOW: 1180 case ESP_V6_FLOW: 1181 case IPV6_FLOW: 1182 if (tuple == 2) 1183 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; 1184 else if (!tuple) 1185 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; 1186 break; 1187 } 1188 1189 if (bp->rss_hash_cfg == rss_hash_cfg) 1190 return 0; 1191 1192 bp->rss_hash_cfg = rss_hash_cfg; 1193 if (netif_running(bp->dev)) { 1194 bnxt_close_nic(bp, false, false); 1195 rc = bnxt_open_nic(bp, false, false); 1196 } 1197 return rc; 1198 } 1199 1200 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 1201 u32 *rule_locs) 1202 { 1203 struct bnxt *bp = netdev_priv(dev); 1204 int rc = 0; 1205 1206 switch (cmd->cmd) { 1207 #ifdef CONFIG_RFS_ACCEL 1208 case ETHTOOL_GRXRINGS: 1209 cmd->data = bp->rx_nr_rings; 1210 break; 1211 1212 case ETHTOOL_GRXCLSRLCNT: 1213 cmd->rule_cnt = bp->ntp_fltr_count; 1214 cmd->data = BNXT_NTP_FLTR_MAX_FLTR; 1215 break; 1216 1217 case ETHTOOL_GRXCLSRLALL: 1218 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs); 1219 break; 1220 1221 case ETHTOOL_GRXCLSRULE: 1222 rc = bnxt_grxclsrule(bp, cmd); 1223 break; 1224 #endif 1225 1226 case ETHTOOL_GRXFH: 1227 rc = bnxt_grxfh(bp, cmd); 1228 break; 1229 1230 default: 1231 rc = -EOPNOTSUPP; 1232 break; 1233 } 1234 1235 return rc; 1236 } 1237 1238 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 1239 { 1240 struct bnxt *bp = netdev_priv(dev); 1241 int rc; 1242 1243 switch (cmd->cmd) { 1244 case ETHTOOL_SRXFH: 1245 rc = bnxt_srxfh(bp, cmd); 1246 break; 1247 1248 default: 1249 rc = -EOPNOTSUPP; 1250 break; 1251 } 1252 return rc; 1253 } 1254 1255 u32 bnxt_get_rxfh_indir_size(struct net_device *dev) 1256 { 1257 struct bnxt *bp = netdev_priv(dev); 1258 1259 if (bp->flags & BNXT_FLAG_CHIP_P5) 1260 return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5); 1261 return HW_HASH_INDEX_SIZE; 1262 } 1263 1264 static u32 bnxt_get_rxfh_key_size(struct net_device *dev) 1265 { 1266 return HW_HASH_KEY_SIZE; 1267 } 1268 1269 static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, 1270 u8 *hfunc) 1271 { 1272 struct bnxt *bp = netdev_priv(dev); 1273 struct bnxt_vnic_info *vnic; 1274 u32 i, tbl_size; 1275 1276 if (hfunc) 1277 *hfunc = ETH_RSS_HASH_TOP; 1278 1279 if (!bp->vnic_info) 1280 return 0; 1281 1282 vnic = &bp->vnic_info[0]; 1283 if (indir && bp->rss_indir_tbl) { 1284 tbl_size = bnxt_get_rxfh_indir_size(dev); 1285 for (i = 0; i < tbl_size; i++) 1286 indir[i] = bp->rss_indir_tbl[i]; 1287 } 1288 1289 if (key && vnic->rss_hash_key) 1290 memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE); 1291 1292 return 0; 1293 } 1294 1295 static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir, 1296 const u8 *key, const u8 hfunc) 1297 { 1298 struct bnxt *bp = netdev_priv(dev); 1299 int rc = 0; 1300 1301 if (hfunc && hfunc != ETH_RSS_HASH_TOP) 1302 return -EOPNOTSUPP; 1303 1304 if (key) 1305 return -EOPNOTSUPP; 1306 1307 if (indir) { 1308 u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev); 1309 1310 for (i = 0; i < tbl_size; i++) 1311 bp->rss_indir_tbl[i] = indir[i]; 1312 pad = bp->rss_indir_tbl_entries - tbl_size; 1313 if (pad) 1314 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); 1315 } 1316 1317 if (netif_running(bp->dev)) { 1318 bnxt_close_nic(bp, false, false); 1319 rc = bnxt_open_nic(bp, false, false); 1320 } 1321 return rc; 1322 } 1323 1324 static void bnxt_get_drvinfo(struct net_device *dev, 1325 struct ethtool_drvinfo *info) 1326 { 1327 struct bnxt *bp = netdev_priv(dev); 1328 1329 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 1330 strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); 1331 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); 1332 info->n_stats = bnxt_get_num_stats(bp); 1333 info->testinfo_len = bp->num_tests; 1334 /* TODO CHIMP_FW: eeprom dump details */ 1335 info->eedump_len = 0; 1336 /* TODO CHIMP FW: reg dump details */ 1337 info->regdump_len = 0; 1338 } 1339 1340 static int bnxt_get_regs_len(struct net_device *dev) 1341 { 1342 struct bnxt *bp = netdev_priv(dev); 1343 int reg_len; 1344 1345 if (!BNXT_PF(bp)) 1346 return -EOPNOTSUPP; 1347 1348 reg_len = BNXT_PXP_REG_LEN; 1349 1350 if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED) 1351 reg_len += sizeof(struct pcie_ctx_hw_stats); 1352 1353 return reg_len; 1354 } 1355 1356 static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, 1357 void *_p) 1358 { 1359 struct pcie_ctx_hw_stats *hw_pcie_stats; 1360 struct hwrm_pcie_qstats_input req = {0}; 1361 struct bnxt *bp = netdev_priv(dev); 1362 dma_addr_t hw_pcie_stats_addr; 1363 int rc; 1364 1365 regs->version = 0; 1366 bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p); 1367 1368 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 1369 return; 1370 1371 hw_pcie_stats = dma_alloc_coherent(&bp->pdev->dev, 1372 sizeof(*hw_pcie_stats), 1373 &hw_pcie_stats_addr, GFP_KERNEL); 1374 if (!hw_pcie_stats) 1375 return; 1376 1377 regs->version = 1; 1378 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1); 1379 req.pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); 1380 req.pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); 1381 mutex_lock(&bp->hwrm_cmd_lock); 1382 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1383 if (!rc) { 1384 __le64 *src = (__le64 *)hw_pcie_stats; 1385 u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN); 1386 int i; 1387 1388 for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++) 1389 dst[i] = le64_to_cpu(src[i]); 1390 } 1391 mutex_unlock(&bp->hwrm_cmd_lock); 1392 dma_free_coherent(&bp->pdev->dev, sizeof(*hw_pcie_stats), hw_pcie_stats, 1393 hw_pcie_stats_addr); 1394 } 1395 1396 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 1397 { 1398 struct bnxt *bp = netdev_priv(dev); 1399 1400 wol->supported = 0; 1401 wol->wolopts = 0; 1402 memset(&wol->sopass, 0, sizeof(wol->sopass)); 1403 if (bp->flags & BNXT_FLAG_WOL_CAP) { 1404 wol->supported = WAKE_MAGIC; 1405 if (bp->wol) 1406 wol->wolopts = WAKE_MAGIC; 1407 } 1408 } 1409 1410 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 1411 { 1412 struct bnxt *bp = netdev_priv(dev); 1413 1414 if (wol->wolopts & ~WAKE_MAGIC) 1415 return -EINVAL; 1416 1417 if (wol->wolopts & WAKE_MAGIC) { 1418 if (!(bp->flags & BNXT_FLAG_WOL_CAP)) 1419 return -EINVAL; 1420 if (!bp->wol) { 1421 if (bnxt_hwrm_alloc_wol_fltr(bp)) 1422 return -EBUSY; 1423 bp->wol = 1; 1424 } 1425 } else { 1426 if (bp->wol) { 1427 if (bnxt_hwrm_free_wol_fltr(bp)) 1428 return -EBUSY; 1429 bp->wol = 0; 1430 } 1431 } 1432 return 0; 1433 } 1434 1435 u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) 1436 { 1437 u32 speed_mask = 0; 1438 1439 /* TODO: support 25GB, 40GB, 50GB with different cable type */ 1440 /* set the advertised speeds */ 1441 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) 1442 speed_mask |= ADVERTISED_100baseT_Full; 1443 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) 1444 speed_mask |= ADVERTISED_1000baseT_Full; 1445 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) 1446 speed_mask |= ADVERTISED_2500baseX_Full; 1447 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 1448 speed_mask |= ADVERTISED_10000baseT_Full; 1449 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 1450 speed_mask |= ADVERTISED_40000baseCR4_Full; 1451 1452 if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH) 1453 speed_mask |= ADVERTISED_Pause; 1454 else if (fw_pause & BNXT_LINK_PAUSE_TX) 1455 speed_mask |= ADVERTISED_Asym_Pause; 1456 else if (fw_pause & BNXT_LINK_PAUSE_RX) 1457 speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; 1458 1459 return speed_mask; 1460 } 1461 1462 #define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\ 1463 { \ 1464 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \ 1465 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1466 100baseT_Full); \ 1467 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \ 1468 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1469 1000baseT_Full); \ 1470 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \ 1471 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1472 10000baseT_Full); \ 1473 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \ 1474 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1475 25000baseCR_Full); \ 1476 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \ 1477 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1478 40000baseCR4_Full);\ 1479 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \ 1480 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1481 50000baseCR2_Full);\ 1482 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB) \ 1483 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1484 100000baseCR4_Full);\ 1485 if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \ 1486 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1487 Pause); \ 1488 if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \ 1489 ethtool_link_ksettings_add_link_mode( \ 1490 lk_ksettings, name, Asym_Pause);\ 1491 } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \ 1492 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1493 Asym_Pause); \ 1494 } \ 1495 } 1496 1497 #define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \ 1498 { \ 1499 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1500 100baseT_Full) || \ 1501 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1502 100baseT_Half)) \ 1503 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \ 1504 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1505 1000baseT_Full) || \ 1506 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1507 1000baseT_Half)) \ 1508 (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \ 1509 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1510 10000baseT_Full)) \ 1511 (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \ 1512 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1513 25000baseCR_Full)) \ 1514 (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \ 1515 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1516 40000baseCR4_Full)) \ 1517 (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \ 1518 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1519 50000baseCR2_Full)) \ 1520 (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \ 1521 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1522 100000baseCR4_Full)) \ 1523 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \ 1524 } 1525 1526 #define BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, name) \ 1527 { \ 1528 if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_50GB) \ 1529 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1530 50000baseCR_Full); \ 1531 if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_100GB) \ 1532 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1533 100000baseCR2_Full);\ 1534 if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_200GB) \ 1535 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 1536 200000baseCR4_Full);\ 1537 } 1538 1539 #define BNXT_ETHTOOL_TO_FW_PAM4_SPDS(fw_speeds, lk_ksettings, name) \ 1540 { \ 1541 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1542 50000baseCR_Full)) \ 1543 (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_50GB; \ 1544 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1545 100000baseCR2_Full)) \ 1546 (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_100GB; \ 1547 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 1548 200000baseCR4_Full)) \ 1549 (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_200GB; \ 1550 } 1551 1552 static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info, 1553 struct ethtool_link_ksettings *lk_ksettings) 1554 { 1555 u16 fec_cfg = link_info->fec_cfg; 1556 1557 if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) { 1558 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 1559 lk_ksettings->link_modes.advertising); 1560 return; 1561 } 1562 if (fec_cfg & BNXT_FEC_ENC_BASE_R) 1563 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 1564 lk_ksettings->link_modes.advertising); 1565 if (fec_cfg & BNXT_FEC_ENC_RS) 1566 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 1567 lk_ksettings->link_modes.advertising); 1568 if (fec_cfg & BNXT_FEC_ENC_LLRS) 1569 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 1570 lk_ksettings->link_modes.advertising); 1571 } 1572 1573 static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, 1574 struct ethtool_link_ksettings *lk_ksettings) 1575 { 1576 u16 fw_speeds = link_info->advertising; 1577 u8 fw_pause = 0; 1578 1579 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 1580 fw_pause = link_info->auto_pause_setting; 1581 1582 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising); 1583 fw_speeds = link_info->advertising_pam4; 1584 BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, advertising); 1585 bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings); 1586 } 1587 1588 static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info, 1589 struct ethtool_link_ksettings *lk_ksettings) 1590 { 1591 u16 fw_speeds = link_info->lp_auto_link_speeds; 1592 u8 fw_pause = 0; 1593 1594 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 1595 fw_pause = link_info->lp_pause; 1596 1597 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, 1598 lp_advertising); 1599 fw_speeds = link_info->lp_auto_pam4_link_speeds; 1600 BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, lp_advertising); 1601 } 1602 1603 static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info, 1604 struct ethtool_link_ksettings *lk_ksettings) 1605 { 1606 u16 fec_cfg = link_info->fec_cfg; 1607 1608 if (fec_cfg & BNXT_FEC_NONE) { 1609 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 1610 lk_ksettings->link_modes.supported); 1611 return; 1612 } 1613 if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP) 1614 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 1615 lk_ksettings->link_modes.supported); 1616 if (fec_cfg & BNXT_FEC_ENC_RS_CAP) 1617 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 1618 lk_ksettings->link_modes.supported); 1619 if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP) 1620 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 1621 lk_ksettings->link_modes.supported); 1622 } 1623 1624 static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info, 1625 struct ethtool_link_ksettings *lk_ksettings) 1626 { 1627 u16 fw_speeds = link_info->support_speeds; 1628 1629 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported); 1630 fw_speeds = link_info->support_pam4_speeds; 1631 BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported); 1632 1633 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause); 1634 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1635 Asym_Pause); 1636 1637 if (link_info->support_auto_speeds || 1638 link_info->support_pam4_auto_speeds) 1639 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1640 Autoneg); 1641 bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings); 1642 } 1643 1644 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) 1645 { 1646 switch (fw_link_speed) { 1647 case BNXT_LINK_SPEED_100MB: 1648 return SPEED_100; 1649 case BNXT_LINK_SPEED_1GB: 1650 return SPEED_1000; 1651 case BNXT_LINK_SPEED_2_5GB: 1652 return SPEED_2500; 1653 case BNXT_LINK_SPEED_10GB: 1654 return SPEED_10000; 1655 case BNXT_LINK_SPEED_20GB: 1656 return SPEED_20000; 1657 case BNXT_LINK_SPEED_25GB: 1658 return SPEED_25000; 1659 case BNXT_LINK_SPEED_40GB: 1660 return SPEED_40000; 1661 case BNXT_LINK_SPEED_50GB: 1662 return SPEED_50000; 1663 case BNXT_LINK_SPEED_100GB: 1664 return SPEED_100000; 1665 default: 1666 return SPEED_UNKNOWN; 1667 } 1668 } 1669 1670 static int bnxt_get_link_ksettings(struct net_device *dev, 1671 struct ethtool_link_ksettings *lk_ksettings) 1672 { 1673 struct bnxt *bp = netdev_priv(dev); 1674 struct bnxt_link_info *link_info = &bp->link_info; 1675 struct ethtool_link_settings *base = &lk_ksettings->base; 1676 u32 ethtool_speed; 1677 1678 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); 1679 mutex_lock(&bp->link_lock); 1680 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); 1681 1682 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); 1683 if (link_info->autoneg) { 1684 bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings); 1685 ethtool_link_ksettings_add_link_mode(lk_ksettings, 1686 advertising, Autoneg); 1687 base->autoneg = AUTONEG_ENABLE; 1688 base->duplex = DUPLEX_UNKNOWN; 1689 if (link_info->phy_link_status == BNXT_LINK_LINK) { 1690 bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings); 1691 if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) 1692 base->duplex = DUPLEX_FULL; 1693 else 1694 base->duplex = DUPLEX_HALF; 1695 } 1696 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); 1697 } else { 1698 base->autoneg = AUTONEG_DISABLE; 1699 ethtool_speed = 1700 bnxt_fw_to_ethtool_speed(link_info->req_link_speed); 1701 base->duplex = DUPLEX_HALF; 1702 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) 1703 base->duplex = DUPLEX_FULL; 1704 } 1705 base->speed = ethtool_speed; 1706 1707 base->port = PORT_NONE; 1708 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 1709 base->port = PORT_TP; 1710 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1711 TP); 1712 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, 1713 TP); 1714 } else { 1715 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 1716 FIBRE); 1717 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, 1718 FIBRE); 1719 1720 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) 1721 base->port = PORT_DA; 1722 else if (link_info->media_type == 1723 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE) 1724 base->port = PORT_FIBRE; 1725 } 1726 base->phy_address = link_info->phy_addr; 1727 mutex_unlock(&bp->link_lock); 1728 1729 return 0; 1730 } 1731 1732 static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed) 1733 { 1734 struct bnxt *bp = netdev_priv(dev); 1735 struct bnxt_link_info *link_info = &bp->link_info; 1736 u16 support_pam4_spds = link_info->support_pam4_speeds; 1737 u16 support_spds = link_info->support_speeds; 1738 u8 sig_mode = BNXT_SIG_MODE_NRZ; 1739 u16 fw_speed = 0; 1740 1741 switch (ethtool_speed) { 1742 case SPEED_100: 1743 if (support_spds & BNXT_LINK_SPEED_MSK_100MB) 1744 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB; 1745 break; 1746 case SPEED_1000: 1747 if (support_spds & BNXT_LINK_SPEED_MSK_1GB) 1748 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 1749 break; 1750 case SPEED_2500: 1751 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB) 1752 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB; 1753 break; 1754 case SPEED_10000: 1755 if (support_spds & BNXT_LINK_SPEED_MSK_10GB) 1756 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 1757 break; 1758 case SPEED_20000: 1759 if (support_spds & BNXT_LINK_SPEED_MSK_20GB) 1760 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB; 1761 break; 1762 case SPEED_25000: 1763 if (support_spds & BNXT_LINK_SPEED_MSK_25GB) 1764 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 1765 break; 1766 case SPEED_40000: 1767 if (support_spds & BNXT_LINK_SPEED_MSK_40GB) 1768 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 1769 break; 1770 case SPEED_50000: 1771 if (support_spds & BNXT_LINK_SPEED_MSK_50GB) { 1772 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 1773 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) { 1774 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB; 1775 sig_mode = BNXT_SIG_MODE_PAM4; 1776 } 1777 break; 1778 case SPEED_100000: 1779 if (support_spds & BNXT_LINK_SPEED_MSK_100GB) { 1780 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB; 1781 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) { 1782 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB; 1783 sig_mode = BNXT_SIG_MODE_PAM4; 1784 } 1785 break; 1786 case SPEED_200000: 1787 if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) { 1788 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB; 1789 sig_mode = BNXT_SIG_MODE_PAM4; 1790 } 1791 break; 1792 } 1793 1794 if (!fw_speed) { 1795 netdev_err(dev, "unsupported speed!\n"); 1796 return -EINVAL; 1797 } 1798 1799 if (link_info->req_link_speed == fw_speed && 1800 link_info->req_signal_mode == sig_mode && 1801 link_info->autoneg == 0) 1802 return -EALREADY; 1803 1804 link_info->req_link_speed = fw_speed; 1805 link_info->req_signal_mode = sig_mode; 1806 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; 1807 link_info->autoneg = 0; 1808 link_info->advertising = 0; 1809 link_info->advertising_pam4 = 0; 1810 1811 return 0; 1812 } 1813 1814 u16 bnxt_get_fw_auto_link_speeds(u32 advertising) 1815 { 1816 u16 fw_speed_mask = 0; 1817 1818 /* only support autoneg at speed 100, 1000, and 10000 */ 1819 if (advertising & (ADVERTISED_100baseT_Full | 1820 ADVERTISED_100baseT_Half)) { 1821 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB; 1822 } 1823 if (advertising & (ADVERTISED_1000baseT_Full | 1824 ADVERTISED_1000baseT_Half)) { 1825 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB; 1826 } 1827 if (advertising & ADVERTISED_10000baseT_Full) 1828 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; 1829 1830 if (advertising & ADVERTISED_40000baseCR4_Full) 1831 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB; 1832 1833 return fw_speed_mask; 1834 } 1835 1836 static int bnxt_set_link_ksettings(struct net_device *dev, 1837 const struct ethtool_link_ksettings *lk_ksettings) 1838 { 1839 struct bnxt *bp = netdev_priv(dev); 1840 struct bnxt_link_info *link_info = &bp->link_info; 1841 const struct ethtool_link_settings *base = &lk_ksettings->base; 1842 bool set_pause = false; 1843 u32 speed; 1844 int rc = 0; 1845 1846 if (!BNXT_PHY_CFG_ABLE(bp)) 1847 return -EOPNOTSUPP; 1848 1849 mutex_lock(&bp->link_lock); 1850 if (base->autoneg == AUTONEG_ENABLE) { 1851 link_info->advertising = 0; 1852 link_info->advertising_pam4 = 0; 1853 BNXT_ETHTOOL_TO_FW_SPDS(link_info->advertising, lk_ksettings, 1854 advertising); 1855 BNXT_ETHTOOL_TO_FW_PAM4_SPDS(link_info->advertising_pam4, 1856 lk_ksettings, advertising); 1857 link_info->autoneg |= BNXT_AUTONEG_SPEED; 1858 if (!link_info->advertising && !link_info->advertising_pam4) { 1859 link_info->advertising = link_info->support_auto_speeds; 1860 link_info->advertising_pam4 = 1861 link_info->support_pam4_auto_speeds; 1862 } 1863 /* any change to autoneg will cause link change, therefore the 1864 * driver should put back the original pause setting in autoneg 1865 */ 1866 set_pause = true; 1867 } else { 1868 u8 phy_type = link_info->phy_type; 1869 1870 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || 1871 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE || 1872 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 1873 netdev_err(dev, "10GBase-T devices must autoneg\n"); 1874 rc = -EINVAL; 1875 goto set_setting_exit; 1876 } 1877 if (base->duplex == DUPLEX_HALF) { 1878 netdev_err(dev, "HALF DUPLEX is not supported!\n"); 1879 rc = -EINVAL; 1880 goto set_setting_exit; 1881 } 1882 speed = base->speed; 1883 rc = bnxt_force_link_speed(dev, speed); 1884 if (rc) { 1885 if (rc == -EALREADY) 1886 rc = 0; 1887 goto set_setting_exit; 1888 } 1889 } 1890 1891 if (netif_running(dev)) 1892 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); 1893 1894 set_setting_exit: 1895 mutex_unlock(&bp->link_lock); 1896 return rc; 1897 } 1898 1899 static int bnxt_get_fecparam(struct net_device *dev, 1900 struct ethtool_fecparam *fec) 1901 { 1902 struct bnxt *bp = netdev_priv(dev); 1903 struct bnxt_link_info *link_info; 1904 u8 active_fec; 1905 u16 fec_cfg; 1906 1907 link_info = &bp->link_info; 1908 fec_cfg = link_info->fec_cfg; 1909 active_fec = link_info->active_fec_sig_mode & 1910 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 1911 if (fec_cfg & BNXT_FEC_NONE) { 1912 fec->fec = ETHTOOL_FEC_NONE; 1913 fec->active_fec = ETHTOOL_FEC_NONE; 1914 return 0; 1915 } 1916 if (fec_cfg & BNXT_FEC_AUTONEG) 1917 fec->fec |= ETHTOOL_FEC_AUTO; 1918 if (fec_cfg & BNXT_FEC_ENC_BASE_R) 1919 fec->fec |= ETHTOOL_FEC_BASER; 1920 if (fec_cfg & BNXT_FEC_ENC_RS) 1921 fec->fec |= ETHTOOL_FEC_RS; 1922 if (fec_cfg & BNXT_FEC_ENC_LLRS) 1923 fec->fec |= ETHTOOL_FEC_LLRS; 1924 1925 switch (active_fec) { 1926 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 1927 fec->active_fec |= ETHTOOL_FEC_BASER; 1928 break; 1929 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 1930 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 1931 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 1932 fec->active_fec |= ETHTOOL_FEC_RS; 1933 break; 1934 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 1935 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 1936 fec->active_fec |= ETHTOOL_FEC_LLRS; 1937 break; 1938 } 1939 return 0; 1940 } 1941 1942 static void bnxt_get_fec_stats(struct net_device *dev, 1943 struct ethtool_fec_stats *fec_stats) 1944 { 1945 struct bnxt *bp = netdev_priv(dev); 1946 u64 *rx; 1947 1948 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 1949 return; 1950 1951 rx = bp->rx_port_stats_ext.sw_stats; 1952 fec_stats->corrected_bits.total = 1953 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits)); 1954 } 1955 1956 static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info, 1957 u32 fec) 1958 { 1959 u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE; 1960 1961 if (fec & ETHTOOL_FEC_BASER) 1962 fw_fec |= BNXT_FEC_BASE_R_ON(link_info); 1963 else if (fec & ETHTOOL_FEC_RS) 1964 fw_fec |= BNXT_FEC_RS_ON(link_info); 1965 else if (fec & ETHTOOL_FEC_LLRS) 1966 fw_fec |= BNXT_FEC_LLRS_ON; 1967 return fw_fec; 1968 } 1969 1970 static int bnxt_set_fecparam(struct net_device *dev, 1971 struct ethtool_fecparam *fecparam) 1972 { 1973 struct hwrm_port_phy_cfg_input req = {0}; 1974 struct bnxt *bp = netdev_priv(dev); 1975 struct bnxt_link_info *link_info; 1976 u32 new_cfg, fec = fecparam->fec; 1977 u16 fec_cfg; 1978 int rc; 1979 1980 link_info = &bp->link_info; 1981 fec_cfg = link_info->fec_cfg; 1982 if (fec_cfg & BNXT_FEC_NONE) 1983 return -EOPNOTSUPP; 1984 1985 if (fec & ETHTOOL_FEC_OFF) { 1986 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE | 1987 BNXT_FEC_ALL_OFF(link_info); 1988 goto apply_fec; 1989 } 1990 if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) || 1991 ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) || 1992 ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) || 1993 ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP))) 1994 return -EINVAL; 1995 1996 if (fec & ETHTOOL_FEC_AUTO) { 1997 if (!link_info->autoneg) 1998 return -EINVAL; 1999 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE; 2000 } else { 2001 new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec); 2002 } 2003 2004 apply_fec: 2005 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 2006 req.flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 2007 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2008 /* update current settings */ 2009 if (!rc) { 2010 mutex_lock(&bp->link_lock); 2011 bnxt_update_link(bp, false); 2012 mutex_unlock(&bp->link_lock); 2013 } 2014 return rc; 2015 } 2016 2017 static void bnxt_get_pauseparam(struct net_device *dev, 2018 struct ethtool_pauseparam *epause) 2019 { 2020 struct bnxt *bp = netdev_priv(dev); 2021 struct bnxt_link_info *link_info = &bp->link_info; 2022 2023 if (BNXT_VF(bp)) 2024 return; 2025 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); 2026 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX); 2027 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX); 2028 } 2029 2030 static void bnxt_get_pause_stats(struct net_device *dev, 2031 struct ethtool_pause_stats *epstat) 2032 { 2033 struct bnxt *bp = netdev_priv(dev); 2034 u64 *rx, *tx; 2035 2036 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 2037 return; 2038 2039 rx = bp->port_stats.sw_stats; 2040 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 2041 2042 epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames); 2043 epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames); 2044 } 2045 2046 static int bnxt_set_pauseparam(struct net_device *dev, 2047 struct ethtool_pauseparam *epause) 2048 { 2049 int rc = 0; 2050 struct bnxt *bp = netdev_priv(dev); 2051 struct bnxt_link_info *link_info = &bp->link_info; 2052 2053 if (!BNXT_PHY_CFG_ABLE(bp)) 2054 return -EOPNOTSUPP; 2055 2056 mutex_lock(&bp->link_lock); 2057 if (epause->autoneg) { 2058 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 2059 rc = -EINVAL; 2060 goto pause_exit; 2061 } 2062 2063 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 2064 if (bp->hwrm_spec_code >= 0x10201) 2065 link_info->req_flow_ctrl = 2066 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 2067 } else { 2068 /* when transition from auto pause to force pause, 2069 * force a link change 2070 */ 2071 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 2072 link_info->force_link_chng = true; 2073 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL; 2074 link_info->req_flow_ctrl = 0; 2075 } 2076 if (epause->rx_pause) 2077 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX; 2078 2079 if (epause->tx_pause) 2080 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; 2081 2082 if (netif_running(dev)) 2083 rc = bnxt_hwrm_set_pause(bp); 2084 2085 pause_exit: 2086 mutex_unlock(&bp->link_lock); 2087 return rc; 2088 } 2089 2090 static u32 bnxt_get_link(struct net_device *dev) 2091 { 2092 struct bnxt *bp = netdev_priv(dev); 2093 2094 /* TODO: handle MF, VF, driver close case */ 2095 return bp->link_info.link_up; 2096 } 2097 2098 int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, 2099 struct hwrm_nvm_get_dev_info_output *nvm_dev_info) 2100 { 2101 struct hwrm_nvm_get_dev_info_output *resp = bp->hwrm_cmd_resp_addr; 2102 struct hwrm_nvm_get_dev_info_input req = {0}; 2103 int rc; 2104 2105 if (BNXT_VF(bp)) 2106 return -EOPNOTSUPP; 2107 2108 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DEV_INFO, -1, -1); 2109 mutex_lock(&bp->hwrm_cmd_lock); 2110 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2111 if (!rc) 2112 memcpy(nvm_dev_info, resp, sizeof(*resp)); 2113 mutex_unlock(&bp->hwrm_cmd_lock); 2114 return rc; 2115 } 2116 2117 static void bnxt_print_admin_err(struct bnxt *bp) 2118 { 2119 netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n"); 2120 } 2121 2122 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 2123 u16 ext, u16 *index, u32 *item_length, 2124 u32 *data_length); 2125 2126 static int __bnxt_flash_nvram(struct net_device *dev, u16 dir_type, 2127 u16 dir_ordinal, u16 dir_ext, u16 dir_attr, 2128 u32 dir_item_len, const u8 *data, 2129 size_t data_len) 2130 { 2131 struct bnxt *bp = netdev_priv(dev); 2132 int rc; 2133 struct hwrm_nvm_write_input req = {0}; 2134 dma_addr_t dma_handle; 2135 u8 *kmem = NULL; 2136 2137 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1); 2138 2139 req.dir_type = cpu_to_le16(dir_type); 2140 req.dir_ordinal = cpu_to_le16(dir_ordinal); 2141 req.dir_ext = cpu_to_le16(dir_ext); 2142 req.dir_attr = cpu_to_le16(dir_attr); 2143 req.dir_item_length = cpu_to_le32(dir_item_len); 2144 if (data_len && data) { 2145 req.dir_data_length = cpu_to_le32(data_len); 2146 2147 kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle, 2148 GFP_KERNEL); 2149 if (!kmem) 2150 return -ENOMEM; 2151 2152 memcpy(kmem, data, data_len); 2153 req.host_src_addr = cpu_to_le64(dma_handle); 2154 } 2155 2156 rc = _hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); 2157 if (kmem) 2158 dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); 2159 2160 if (rc == -EACCES) 2161 bnxt_print_admin_err(bp); 2162 return rc; 2163 } 2164 2165 static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, 2166 u16 dir_ordinal, u16 dir_ext, u16 dir_attr, 2167 const u8 *data, size_t data_len) 2168 { 2169 struct bnxt *bp = netdev_priv(dev); 2170 int rc; 2171 2172 mutex_lock(&bp->hwrm_cmd_lock); 2173 rc = __bnxt_flash_nvram(dev, dir_type, dir_ordinal, dir_ext, dir_attr, 2174 0, data, data_len); 2175 mutex_unlock(&bp->hwrm_cmd_lock); 2176 return rc; 2177 } 2178 2179 static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, 2180 u8 self_reset, u8 flags) 2181 { 2182 struct hwrm_fw_reset_input req = {0}; 2183 struct bnxt *bp = netdev_priv(dev); 2184 int rc; 2185 2186 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); 2187 2188 req.embedded_proc_type = proc_type; 2189 req.selfrst_status = self_reset; 2190 req.flags = flags; 2191 2192 if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) { 2193 rc = hwrm_send_message_silent(bp, &req, sizeof(req), 2194 HWRM_CMD_TIMEOUT); 2195 } else { 2196 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2197 if (rc == -EACCES) 2198 bnxt_print_admin_err(bp); 2199 } 2200 return rc; 2201 } 2202 2203 static int bnxt_firmware_reset(struct net_device *dev, 2204 enum bnxt_nvm_directory_type dir_type) 2205 { 2206 u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE; 2207 u8 proc_type, flags = 0; 2208 2209 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ 2210 /* (e.g. when firmware isn't already running) */ 2211 switch (dir_type) { 2212 case BNX_DIR_TYPE_CHIMP_PATCH: 2213 case BNX_DIR_TYPE_BOOTCODE: 2214 case BNX_DIR_TYPE_BOOTCODE_2: 2215 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; 2216 /* Self-reset ChiMP upon next PCIe reset: */ 2217 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 2218 break; 2219 case BNX_DIR_TYPE_APE_FW: 2220 case BNX_DIR_TYPE_APE_PATCH: 2221 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; 2222 /* Self-reset APE upon next PCIe reset: */ 2223 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 2224 break; 2225 case BNX_DIR_TYPE_KONG_FW: 2226 case BNX_DIR_TYPE_KONG_PATCH: 2227 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; 2228 break; 2229 case BNX_DIR_TYPE_BONO_FW: 2230 case BNX_DIR_TYPE_BONO_PATCH: 2231 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; 2232 break; 2233 default: 2234 return -EINVAL; 2235 } 2236 2237 return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags); 2238 } 2239 2240 static int bnxt_firmware_reset_chip(struct net_device *dev) 2241 { 2242 struct bnxt *bp = netdev_priv(dev); 2243 u8 flags = 0; 2244 2245 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 2246 flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 2247 2248 return bnxt_hwrm_firmware_reset(dev, 2249 FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP, 2250 FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP, 2251 flags); 2252 } 2253 2254 static int bnxt_firmware_reset_ap(struct net_device *dev) 2255 { 2256 return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP, 2257 FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE, 2258 0); 2259 } 2260 2261 static int bnxt_flash_firmware(struct net_device *dev, 2262 u16 dir_type, 2263 const u8 *fw_data, 2264 size_t fw_size) 2265 { 2266 int rc = 0; 2267 u16 code_type; 2268 u32 stored_crc; 2269 u32 calculated_crc; 2270 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data; 2271 2272 switch (dir_type) { 2273 case BNX_DIR_TYPE_BOOTCODE: 2274 case BNX_DIR_TYPE_BOOTCODE_2: 2275 code_type = CODE_BOOT; 2276 break; 2277 case BNX_DIR_TYPE_CHIMP_PATCH: 2278 code_type = CODE_CHIMP_PATCH; 2279 break; 2280 case BNX_DIR_TYPE_APE_FW: 2281 code_type = CODE_MCTP_PASSTHRU; 2282 break; 2283 case BNX_DIR_TYPE_APE_PATCH: 2284 code_type = CODE_APE_PATCH; 2285 break; 2286 case BNX_DIR_TYPE_KONG_FW: 2287 code_type = CODE_KONG_FW; 2288 break; 2289 case BNX_DIR_TYPE_KONG_PATCH: 2290 code_type = CODE_KONG_PATCH; 2291 break; 2292 case BNX_DIR_TYPE_BONO_FW: 2293 code_type = CODE_BONO_FW; 2294 break; 2295 case BNX_DIR_TYPE_BONO_PATCH: 2296 code_type = CODE_BONO_PATCH; 2297 break; 2298 default: 2299 netdev_err(dev, "Unsupported directory entry type: %u\n", 2300 dir_type); 2301 return -EINVAL; 2302 } 2303 if (fw_size < sizeof(struct bnxt_fw_header)) { 2304 netdev_err(dev, "Invalid firmware file size: %u\n", 2305 (unsigned int)fw_size); 2306 return -EINVAL; 2307 } 2308 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) { 2309 netdev_err(dev, "Invalid firmware signature: %08X\n", 2310 le32_to_cpu(header->signature)); 2311 return -EINVAL; 2312 } 2313 if (header->code_type != code_type) { 2314 netdev_err(dev, "Expected firmware type: %d, read: %d\n", 2315 code_type, header->code_type); 2316 return -EINVAL; 2317 } 2318 if (header->device != DEVICE_CUMULUS_FAMILY) { 2319 netdev_err(dev, "Expected firmware device family %d, read: %d\n", 2320 DEVICE_CUMULUS_FAMILY, header->device); 2321 return -EINVAL; 2322 } 2323 /* Confirm the CRC32 checksum of the file: */ 2324 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 2325 sizeof(stored_crc))); 2326 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 2327 if (calculated_crc != stored_crc) { 2328 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n", 2329 (unsigned long)stored_crc, 2330 (unsigned long)calculated_crc); 2331 return -EINVAL; 2332 } 2333 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 2334 0, 0, fw_data, fw_size); 2335 if (rc == 0) /* Firmware update successful */ 2336 rc = bnxt_firmware_reset(dev, dir_type); 2337 2338 return rc; 2339 } 2340 2341 static int bnxt_flash_microcode(struct net_device *dev, 2342 u16 dir_type, 2343 const u8 *fw_data, 2344 size_t fw_size) 2345 { 2346 struct bnxt_ucode_trailer *trailer; 2347 u32 calculated_crc; 2348 u32 stored_crc; 2349 int rc = 0; 2350 2351 if (fw_size < sizeof(struct bnxt_ucode_trailer)) { 2352 netdev_err(dev, "Invalid microcode file size: %u\n", 2353 (unsigned int)fw_size); 2354 return -EINVAL; 2355 } 2356 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size - 2357 sizeof(*trailer))); 2358 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) { 2359 netdev_err(dev, "Invalid microcode trailer signature: %08X\n", 2360 le32_to_cpu(trailer->sig)); 2361 return -EINVAL; 2362 } 2363 if (le16_to_cpu(trailer->dir_type) != dir_type) { 2364 netdev_err(dev, "Expected microcode type: %d, read: %d\n", 2365 dir_type, le16_to_cpu(trailer->dir_type)); 2366 return -EINVAL; 2367 } 2368 if (le16_to_cpu(trailer->trailer_length) < 2369 sizeof(struct bnxt_ucode_trailer)) { 2370 netdev_err(dev, "Invalid microcode trailer length: %d\n", 2371 le16_to_cpu(trailer->trailer_length)); 2372 return -EINVAL; 2373 } 2374 2375 /* Confirm the CRC32 checksum of the file: */ 2376 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 2377 sizeof(stored_crc))); 2378 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 2379 if (calculated_crc != stored_crc) { 2380 netdev_err(dev, 2381 "CRC32 (%08lX) does not match calculated: %08lX\n", 2382 (unsigned long)stored_crc, 2383 (unsigned long)calculated_crc); 2384 return -EINVAL; 2385 } 2386 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 2387 0, 0, fw_data, fw_size); 2388 2389 return rc; 2390 } 2391 2392 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type) 2393 { 2394 switch (dir_type) { 2395 case BNX_DIR_TYPE_CHIMP_PATCH: 2396 case BNX_DIR_TYPE_BOOTCODE: 2397 case BNX_DIR_TYPE_BOOTCODE_2: 2398 case BNX_DIR_TYPE_APE_FW: 2399 case BNX_DIR_TYPE_APE_PATCH: 2400 case BNX_DIR_TYPE_KONG_FW: 2401 case BNX_DIR_TYPE_KONG_PATCH: 2402 case BNX_DIR_TYPE_BONO_FW: 2403 case BNX_DIR_TYPE_BONO_PATCH: 2404 return true; 2405 } 2406 2407 return false; 2408 } 2409 2410 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type) 2411 { 2412 switch (dir_type) { 2413 case BNX_DIR_TYPE_AVS: 2414 case BNX_DIR_TYPE_EXP_ROM_MBA: 2415 case BNX_DIR_TYPE_PCIE: 2416 case BNX_DIR_TYPE_TSCF_UCODE: 2417 case BNX_DIR_TYPE_EXT_PHY: 2418 case BNX_DIR_TYPE_CCM: 2419 case BNX_DIR_TYPE_ISCSI_BOOT: 2420 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 2421 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 2422 return true; 2423 } 2424 2425 return false; 2426 } 2427 2428 static bool bnxt_dir_type_is_executable(u16 dir_type) 2429 { 2430 return bnxt_dir_type_is_ape_bin_format(dir_type) || 2431 bnxt_dir_type_is_other_exec_format(dir_type); 2432 } 2433 2434 static int bnxt_flash_firmware_from_file(struct net_device *dev, 2435 u16 dir_type, 2436 const char *filename) 2437 { 2438 const struct firmware *fw; 2439 int rc; 2440 2441 rc = request_firmware(&fw, filename, &dev->dev); 2442 if (rc != 0) { 2443 netdev_err(dev, "Error %d requesting firmware file: %s\n", 2444 rc, filename); 2445 return rc; 2446 } 2447 if (bnxt_dir_type_is_ape_bin_format(dir_type)) 2448 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size); 2449 else if (bnxt_dir_type_is_other_exec_format(dir_type)) 2450 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size); 2451 else 2452 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 2453 0, 0, fw->data, fw->size); 2454 release_firmware(fw); 2455 return rc; 2456 } 2457 2458 #define BNXT_PKG_DMA_SIZE 0x40000 2459 #define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE)) 2460 #define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST)) 2461 2462 int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, 2463 u32 install_type) 2464 { 2465 struct hwrm_nvm_install_update_input install = {0}; 2466 struct hwrm_nvm_install_update_output resp = {0}; 2467 struct hwrm_nvm_modify_input modify = {0}; 2468 struct bnxt *bp = netdev_priv(dev); 2469 bool defrag_attempted = false; 2470 dma_addr_t dma_handle; 2471 u8 *kmem = NULL; 2472 u32 modify_len; 2473 u32 item_len; 2474 int rc = 0; 2475 u16 index; 2476 2477 bnxt_hwrm_fw_set_time(bp); 2478 2479 bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1); 2480 2481 /* Try allocating a large DMA buffer first. Older fw will 2482 * cause excessive NVRAM erases when using small blocks. 2483 */ 2484 modify_len = roundup_pow_of_two(fw->size); 2485 modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE); 2486 while (1) { 2487 kmem = dma_alloc_coherent(&bp->pdev->dev, modify_len, 2488 &dma_handle, GFP_KERNEL); 2489 if (!kmem && modify_len > PAGE_SIZE) 2490 modify_len /= 2; 2491 else 2492 break; 2493 } 2494 if (!kmem) 2495 return -ENOMEM; 2496 2497 modify.host_src_addr = cpu_to_le64(dma_handle); 2498 2499 bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1); 2500 if ((install_type & 0xffff) == 0) 2501 install_type >>= 16; 2502 install.install_type = cpu_to_le32(install_type); 2503 2504 do { 2505 u32 copied = 0, len = modify_len; 2506 2507 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 2508 BNX_DIR_ORDINAL_FIRST, 2509 BNX_DIR_EXT_NONE, 2510 &index, &item_len, NULL); 2511 if (rc) { 2512 netdev_err(dev, "PKG update area not created in nvram\n"); 2513 break; 2514 } 2515 if (fw->size > item_len) { 2516 netdev_err(dev, "PKG insufficient update area in nvram: %lu\n", 2517 (unsigned long)fw->size); 2518 rc = -EFBIG; 2519 break; 2520 } 2521 2522 modify.dir_idx = cpu_to_le16(index); 2523 2524 if (fw->size > modify_len) 2525 modify.flags = BNXT_NVM_MORE_FLAG; 2526 while (copied < fw->size) { 2527 u32 balance = fw->size - copied; 2528 2529 if (balance <= modify_len) { 2530 len = balance; 2531 if (copied) 2532 modify.flags |= BNXT_NVM_LAST_FLAG; 2533 } 2534 memcpy(kmem, fw->data + copied, len); 2535 modify.len = cpu_to_le32(len); 2536 modify.offset = cpu_to_le32(copied); 2537 rc = hwrm_send_message(bp, &modify, sizeof(modify), 2538 FLASH_PACKAGE_TIMEOUT); 2539 if (rc) 2540 goto pkg_abort; 2541 copied += len; 2542 } 2543 mutex_lock(&bp->hwrm_cmd_lock); 2544 rc = _hwrm_send_message_silent(bp, &install, sizeof(install), 2545 INSTALL_PACKAGE_TIMEOUT); 2546 memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp)); 2547 2548 if (defrag_attempted) { 2549 /* We have tried to defragment already in the previous 2550 * iteration. Return with the result for INSTALL_UPDATE 2551 */ 2552 mutex_unlock(&bp->hwrm_cmd_lock); 2553 break; 2554 } 2555 2556 if (rc && ((struct hwrm_err_output *)&resp)->cmd_err == 2557 NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { 2558 install.flags = 2559 cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); 2560 2561 rc = _hwrm_send_message_silent(bp, &install, 2562 sizeof(install), 2563 INSTALL_PACKAGE_TIMEOUT); 2564 memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp)); 2565 2566 if (rc && ((struct hwrm_err_output *)&resp)->cmd_err == 2567 NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) { 2568 /* FW has cleared NVM area, driver will create 2569 * UPDATE directory and try the flash again 2570 */ 2571 defrag_attempted = true; 2572 install.flags = 0; 2573 rc = __bnxt_flash_nvram(bp->dev, 2574 BNX_DIR_TYPE_UPDATE, 2575 BNX_DIR_ORDINAL_FIRST, 2576 0, 0, item_len, NULL, 2577 0); 2578 } else if (rc) { 2579 netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc); 2580 } 2581 } else if (rc) { 2582 netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc); 2583 } 2584 mutex_unlock(&bp->hwrm_cmd_lock); 2585 } while (defrag_attempted && !rc); 2586 2587 pkg_abort: 2588 dma_free_coherent(&bp->pdev->dev, modify_len, kmem, dma_handle); 2589 if (resp.result) { 2590 netdev_err(dev, "PKG install error = %d, problem_item = %d\n", 2591 (s8)resp.result, (int)resp.problem_item); 2592 rc = -ENOPKG; 2593 } 2594 if (rc == -EACCES) 2595 bnxt_print_admin_err(bp); 2596 return rc; 2597 } 2598 2599 static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, 2600 u32 install_type) 2601 { 2602 const struct firmware *fw; 2603 int rc; 2604 2605 rc = request_firmware(&fw, filename, &dev->dev); 2606 if (rc != 0) { 2607 netdev_err(dev, "PKG error %d requesting file: %s\n", 2608 rc, filename); 2609 return rc; 2610 } 2611 2612 rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type); 2613 2614 release_firmware(fw); 2615 2616 return rc; 2617 } 2618 2619 static int bnxt_flash_device(struct net_device *dev, 2620 struct ethtool_flash *flash) 2621 { 2622 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) { 2623 netdev_err(dev, "flashdev not supported from a virtual function\n"); 2624 return -EINVAL; 2625 } 2626 2627 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS || 2628 flash->region > 0xffff) 2629 return bnxt_flash_package_from_file(dev, flash->data, 2630 flash->region); 2631 2632 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data); 2633 } 2634 2635 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) 2636 { 2637 struct bnxt *bp = netdev_priv(dev); 2638 int rc; 2639 struct hwrm_nvm_get_dir_info_input req = {0}; 2640 struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr; 2641 2642 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1); 2643 2644 mutex_lock(&bp->hwrm_cmd_lock); 2645 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2646 if (!rc) { 2647 *entries = le32_to_cpu(output->entries); 2648 *length = le32_to_cpu(output->entry_length); 2649 } 2650 mutex_unlock(&bp->hwrm_cmd_lock); 2651 return rc; 2652 } 2653 2654 static int bnxt_get_eeprom_len(struct net_device *dev) 2655 { 2656 struct bnxt *bp = netdev_priv(dev); 2657 2658 if (BNXT_VF(bp)) 2659 return 0; 2660 2661 /* The -1 return value allows the entire 32-bit range of offsets to be 2662 * passed via the ethtool command-line utility. 2663 */ 2664 return -1; 2665 } 2666 2667 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) 2668 { 2669 struct bnxt *bp = netdev_priv(dev); 2670 int rc; 2671 u32 dir_entries; 2672 u32 entry_length; 2673 u8 *buf; 2674 size_t buflen; 2675 dma_addr_t dma_handle; 2676 struct hwrm_nvm_get_dir_entries_input req = {0}; 2677 2678 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); 2679 if (rc != 0) 2680 return rc; 2681 2682 if (!dir_entries || !entry_length) 2683 return -EIO; 2684 2685 /* Insert 2 bytes of directory info (count and size of entries) */ 2686 if (len < 2) 2687 return -EINVAL; 2688 2689 *data++ = dir_entries; 2690 *data++ = entry_length; 2691 len -= 2; 2692 memset(data, 0xff, len); 2693 2694 buflen = dir_entries * entry_length; 2695 buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle, 2696 GFP_KERNEL); 2697 if (!buf) { 2698 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", 2699 (unsigned)buflen); 2700 return -ENOMEM; 2701 } 2702 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1); 2703 req.host_dest_addr = cpu_to_le64(dma_handle); 2704 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2705 if (rc == 0) 2706 memcpy(data, buf, len > buflen ? buflen : len); 2707 dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle); 2708 return rc; 2709 } 2710 2711 static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, 2712 u32 length, u8 *data) 2713 { 2714 struct bnxt *bp = netdev_priv(dev); 2715 int rc; 2716 u8 *buf; 2717 dma_addr_t dma_handle; 2718 struct hwrm_nvm_read_input req = {0}; 2719 2720 if (!length) 2721 return -EINVAL; 2722 2723 buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle, 2724 GFP_KERNEL); 2725 if (!buf) { 2726 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", 2727 (unsigned)length); 2728 return -ENOMEM; 2729 } 2730 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1); 2731 req.host_dest_addr = cpu_to_le64(dma_handle); 2732 req.dir_idx = cpu_to_le16(index); 2733 req.offset = cpu_to_le32(offset); 2734 req.len = cpu_to_le32(length); 2735 2736 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2737 if (rc == 0) 2738 memcpy(data, buf, length); 2739 dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle); 2740 return rc; 2741 } 2742 2743 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 2744 u16 ext, u16 *index, u32 *item_length, 2745 u32 *data_length) 2746 { 2747 struct bnxt *bp = netdev_priv(dev); 2748 int rc; 2749 struct hwrm_nvm_find_dir_entry_input req = {0}; 2750 struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr; 2751 2752 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1); 2753 req.enables = 0; 2754 req.dir_idx = 0; 2755 req.dir_type = cpu_to_le16(type); 2756 req.dir_ordinal = cpu_to_le16(ordinal); 2757 req.dir_ext = cpu_to_le16(ext); 2758 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; 2759 mutex_lock(&bp->hwrm_cmd_lock); 2760 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2761 if (rc == 0) { 2762 if (index) 2763 *index = le16_to_cpu(output->dir_idx); 2764 if (item_length) 2765 *item_length = le32_to_cpu(output->dir_item_length); 2766 if (data_length) 2767 *data_length = le32_to_cpu(output->dir_data_length); 2768 } 2769 mutex_unlock(&bp->hwrm_cmd_lock); 2770 return rc; 2771 } 2772 2773 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) 2774 { 2775 char *retval = NULL; 2776 char *p; 2777 char *value; 2778 int field = 0; 2779 2780 if (datalen < 1) 2781 return NULL; 2782 /* null-terminate the log data (removing last '\n'): */ 2783 data[datalen - 1] = 0; 2784 for (p = data; *p != 0; p++) { 2785 field = 0; 2786 retval = NULL; 2787 while (*p != 0 && *p != '\n') { 2788 value = p; 2789 while (*p != 0 && *p != '\t' && *p != '\n') 2790 p++; 2791 if (field == desired_field) 2792 retval = value; 2793 if (*p != '\t') 2794 break; 2795 *p = 0; 2796 field++; 2797 p++; 2798 } 2799 if (*p == 0) 2800 break; 2801 *p = 0; 2802 } 2803 return retval; 2804 } 2805 2806 static void bnxt_get_pkgver(struct net_device *dev) 2807 { 2808 struct bnxt *bp = netdev_priv(dev); 2809 u16 index = 0; 2810 char *pkgver; 2811 u32 pkglen; 2812 u8 *pkgbuf; 2813 int len; 2814 2815 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, 2816 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 2817 &index, NULL, &pkglen) != 0) 2818 return; 2819 2820 pkgbuf = kzalloc(pkglen, GFP_KERNEL); 2821 if (!pkgbuf) { 2822 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", 2823 pkglen); 2824 return; 2825 } 2826 2827 if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf)) 2828 goto err; 2829 2830 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, 2831 pkglen); 2832 if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { 2833 len = strlen(bp->fw_ver_str); 2834 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, 2835 "/pkg %s", pkgver); 2836 } 2837 err: 2838 kfree(pkgbuf); 2839 } 2840 2841 static int bnxt_get_eeprom(struct net_device *dev, 2842 struct ethtool_eeprom *eeprom, 2843 u8 *data) 2844 { 2845 u32 index; 2846 u32 offset; 2847 2848 if (eeprom->offset == 0) /* special offset value to get directory */ 2849 return bnxt_get_nvram_directory(dev, eeprom->len, data); 2850 2851 index = eeprom->offset >> 24; 2852 offset = eeprom->offset & 0xffffff; 2853 2854 if (index == 0) { 2855 netdev_err(dev, "unsupported index value: %d\n", index); 2856 return -EINVAL; 2857 } 2858 2859 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data); 2860 } 2861 2862 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) 2863 { 2864 struct bnxt *bp = netdev_priv(dev); 2865 struct hwrm_nvm_erase_dir_entry_input req = {0}; 2866 2867 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1); 2868 req.dir_idx = cpu_to_le16(index); 2869 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2870 } 2871 2872 static int bnxt_set_eeprom(struct net_device *dev, 2873 struct ethtool_eeprom *eeprom, 2874 u8 *data) 2875 { 2876 struct bnxt *bp = netdev_priv(dev); 2877 u8 index, dir_op; 2878 u16 type, ext, ordinal, attr; 2879 2880 if (!BNXT_PF(bp)) { 2881 netdev_err(dev, "NVM write not supported from a virtual function\n"); 2882 return -EINVAL; 2883 } 2884 2885 type = eeprom->magic >> 16; 2886 2887 if (type == 0xffff) { /* special value for directory operations */ 2888 index = eeprom->magic & 0xff; 2889 dir_op = eeprom->magic >> 8; 2890 if (index == 0) 2891 return -EINVAL; 2892 switch (dir_op) { 2893 case 0x0e: /* erase */ 2894 if (eeprom->offset != ~eeprom->magic) 2895 return -EINVAL; 2896 return bnxt_erase_nvram_directory(dev, index - 1); 2897 default: 2898 return -EINVAL; 2899 } 2900 } 2901 2902 /* Create or re-write an NVM item: */ 2903 if (bnxt_dir_type_is_executable(type)) 2904 return -EOPNOTSUPP; 2905 ext = eeprom->magic & 0xffff; 2906 ordinal = eeprom->offset >> 16; 2907 attr = eeprom->offset & 0xffff; 2908 2909 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data, 2910 eeprom->len); 2911 } 2912 2913 static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) 2914 { 2915 struct bnxt *bp = netdev_priv(dev); 2916 struct ethtool_eee *eee = &bp->eee; 2917 struct bnxt_link_info *link_info = &bp->link_info; 2918 u32 advertising; 2919 int rc = 0; 2920 2921 if (!BNXT_PHY_CFG_ABLE(bp)) 2922 return -EOPNOTSUPP; 2923 2924 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 2925 return -EOPNOTSUPP; 2926 2927 mutex_lock(&bp->link_lock); 2928 advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 2929 if (!edata->eee_enabled) 2930 goto eee_ok; 2931 2932 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 2933 netdev_warn(dev, "EEE requires autoneg\n"); 2934 rc = -EINVAL; 2935 goto eee_exit; 2936 } 2937 if (edata->tx_lpi_enabled) { 2938 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || 2939 edata->tx_lpi_timer < bp->lpi_tmr_lo)) { 2940 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", 2941 bp->lpi_tmr_lo, bp->lpi_tmr_hi); 2942 rc = -EINVAL; 2943 goto eee_exit; 2944 } else if (!bp->lpi_tmr_hi) { 2945 edata->tx_lpi_timer = eee->tx_lpi_timer; 2946 } 2947 } 2948 if (!edata->advertised) { 2949 edata->advertised = advertising & eee->supported; 2950 } else if (edata->advertised & ~advertising) { 2951 netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n", 2952 edata->advertised, advertising); 2953 rc = -EINVAL; 2954 goto eee_exit; 2955 } 2956 2957 eee->advertised = edata->advertised; 2958 eee->tx_lpi_enabled = edata->tx_lpi_enabled; 2959 eee->tx_lpi_timer = edata->tx_lpi_timer; 2960 eee_ok: 2961 eee->eee_enabled = edata->eee_enabled; 2962 2963 if (netif_running(dev)) 2964 rc = bnxt_hwrm_set_link_setting(bp, false, true); 2965 2966 eee_exit: 2967 mutex_unlock(&bp->link_lock); 2968 return rc; 2969 } 2970 2971 static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata) 2972 { 2973 struct bnxt *bp = netdev_priv(dev); 2974 2975 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 2976 return -EOPNOTSUPP; 2977 2978 *edata = bp->eee; 2979 if (!bp->eee.eee_enabled) { 2980 /* Preserve tx_lpi_timer so that the last value will be used 2981 * by default when it is re-enabled. 2982 */ 2983 edata->advertised = 0; 2984 edata->tx_lpi_enabled = 0; 2985 } 2986 2987 if (!bp->eee.eee_active) 2988 edata->lp_advertised = 0; 2989 2990 return 0; 2991 } 2992 2993 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, 2994 u16 page_number, u16 start_addr, 2995 u16 data_length, u8 *buf) 2996 { 2997 struct hwrm_port_phy_i2c_read_input req = {0}; 2998 struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr; 2999 int rc, byte_offset = 0; 3000 3001 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1); 3002 req.i2c_slave_addr = i2c_addr; 3003 req.page_number = cpu_to_le16(page_number); 3004 req.port_id = cpu_to_le16(bp->pf.port_id); 3005 do { 3006 u16 xfer_size; 3007 3008 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); 3009 data_length -= xfer_size; 3010 req.page_offset = cpu_to_le16(start_addr + byte_offset); 3011 req.data_length = xfer_size; 3012 req.enables = cpu_to_le32(start_addr + byte_offset ? 3013 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0); 3014 mutex_lock(&bp->hwrm_cmd_lock); 3015 rc = _hwrm_send_message(bp, &req, sizeof(req), 3016 HWRM_CMD_TIMEOUT); 3017 if (!rc) 3018 memcpy(buf + byte_offset, output->data, xfer_size); 3019 mutex_unlock(&bp->hwrm_cmd_lock); 3020 byte_offset += xfer_size; 3021 } while (!rc && data_length > 0); 3022 3023 return rc; 3024 } 3025 3026 static int bnxt_get_module_info(struct net_device *dev, 3027 struct ethtool_modinfo *modinfo) 3028 { 3029 u8 data[SFF_DIAG_SUPPORT_OFFSET + 1]; 3030 struct bnxt *bp = netdev_priv(dev); 3031 int rc; 3032 3033 /* No point in going further if phy status indicates 3034 * module is not inserted or if it is powered down or 3035 * if it is of type 10GBase-T 3036 */ 3037 if (bp->link_info.module_status > 3038 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) 3039 return -EOPNOTSUPP; 3040 3041 /* This feature is not supported in older firmware versions */ 3042 if (bp->hwrm_spec_code < 0x10202) 3043 return -EOPNOTSUPP; 3044 3045 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3046 SFF_DIAG_SUPPORT_OFFSET + 1, 3047 data); 3048 if (!rc) { 3049 u8 module_id = data[0]; 3050 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET]; 3051 3052 switch (module_id) { 3053 case SFF_MODULE_ID_SFP: 3054 modinfo->type = ETH_MODULE_SFF_8472; 3055 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 3056 if (!diag_supported) 3057 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 3058 break; 3059 case SFF_MODULE_ID_QSFP: 3060 case SFF_MODULE_ID_QSFP_PLUS: 3061 modinfo->type = ETH_MODULE_SFF_8436; 3062 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 3063 break; 3064 case SFF_MODULE_ID_QSFP28: 3065 modinfo->type = ETH_MODULE_SFF_8636; 3066 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 3067 break; 3068 default: 3069 rc = -EOPNOTSUPP; 3070 break; 3071 } 3072 } 3073 return rc; 3074 } 3075 3076 static int bnxt_get_module_eeprom(struct net_device *dev, 3077 struct ethtool_eeprom *eeprom, 3078 u8 *data) 3079 { 3080 struct bnxt *bp = netdev_priv(dev); 3081 u16 start = eeprom->offset, length = eeprom->len; 3082 int rc = 0; 3083 3084 memset(data, 0, eeprom->len); 3085 3086 /* Read A0 portion of the EEPROM */ 3087 if (start < ETH_MODULE_SFF_8436_LEN) { 3088 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) 3089 length = ETH_MODULE_SFF_8436_LEN - start; 3090 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 3091 start, length, data); 3092 if (rc) 3093 return rc; 3094 start += length; 3095 data += length; 3096 length = eeprom->len - length; 3097 } 3098 3099 /* Read A2 portion of the EEPROM */ 3100 if (length) { 3101 start -= ETH_MODULE_SFF_8436_LEN; 3102 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 3103 start, length, data); 3104 } 3105 return rc; 3106 } 3107 3108 static int bnxt_nway_reset(struct net_device *dev) 3109 { 3110 int rc = 0; 3111 3112 struct bnxt *bp = netdev_priv(dev); 3113 struct bnxt_link_info *link_info = &bp->link_info; 3114 3115 if (!BNXT_PHY_CFG_ABLE(bp)) 3116 return -EOPNOTSUPP; 3117 3118 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) 3119 return -EINVAL; 3120 3121 if (netif_running(dev)) 3122 rc = bnxt_hwrm_set_link_setting(bp, true, false); 3123 3124 return rc; 3125 } 3126 3127 static int bnxt_set_phys_id(struct net_device *dev, 3128 enum ethtool_phys_id_state state) 3129 { 3130 struct hwrm_port_led_cfg_input req = {0}; 3131 struct bnxt *bp = netdev_priv(dev); 3132 struct bnxt_pf_info *pf = &bp->pf; 3133 struct bnxt_led_cfg *led_cfg; 3134 u8 led_state; 3135 __le16 duration; 3136 int i; 3137 3138 if (!bp->num_leds || BNXT_VF(bp)) 3139 return -EOPNOTSUPP; 3140 3141 if (state == ETHTOOL_ID_ACTIVE) { 3142 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT; 3143 duration = cpu_to_le16(500); 3144 } else if (state == ETHTOOL_ID_INACTIVE) { 3145 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT; 3146 duration = cpu_to_le16(0); 3147 } else { 3148 return -EINVAL; 3149 } 3150 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1); 3151 req.port_id = cpu_to_le16(pf->port_id); 3152 req.num_leds = bp->num_leds; 3153 led_cfg = (struct bnxt_led_cfg *)&req.led0_id; 3154 for (i = 0; i < bp->num_leds; i++, led_cfg++) { 3155 req.enables |= BNXT_LED_DFLT_ENABLES(i); 3156 led_cfg->led_id = bp->leds[i].led_id; 3157 led_cfg->led_state = led_state; 3158 led_cfg->led_blink_on = duration; 3159 led_cfg->led_blink_off = duration; 3160 led_cfg->led_group_id = bp->leds[i].led_group_id; 3161 } 3162 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3163 } 3164 3165 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) 3166 { 3167 struct hwrm_selftest_irq_input req = {0}; 3168 3169 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1); 3170 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3171 } 3172 3173 static int bnxt_test_irq(struct bnxt *bp) 3174 { 3175 int i; 3176 3177 for (i = 0; i < bp->cp_nr_rings; i++) { 3178 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id; 3179 int rc; 3180 3181 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring); 3182 if (rc) 3183 return rc; 3184 } 3185 return 0; 3186 } 3187 3188 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) 3189 { 3190 struct hwrm_port_mac_cfg_input req = {0}; 3191 3192 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1); 3193 3194 req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); 3195 if (enable) 3196 req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; 3197 else 3198 req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; 3199 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3200 } 3201 3202 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds) 3203 { 3204 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 3205 struct hwrm_port_phy_qcaps_input req = {0}; 3206 int rc; 3207 3208 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); 3209 mutex_lock(&bp->hwrm_cmd_lock); 3210 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3211 if (!rc) 3212 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode); 3213 3214 mutex_unlock(&bp->hwrm_cmd_lock); 3215 return rc; 3216 } 3217 3218 static int bnxt_disable_an_for_lpbk(struct bnxt *bp, 3219 struct hwrm_port_phy_cfg_input *req) 3220 { 3221 struct bnxt_link_info *link_info = &bp->link_info; 3222 u16 fw_advertising; 3223 u16 fw_speed; 3224 int rc; 3225 3226 if (!link_info->autoneg || 3227 (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK)) 3228 return 0; 3229 3230 rc = bnxt_query_force_speeds(bp, &fw_advertising); 3231 if (rc) 3232 return rc; 3233 3234 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 3235 if (bp->link_info.link_up) 3236 fw_speed = bp->link_info.link_speed; 3237 else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB) 3238 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 3239 else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB) 3240 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 3241 else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB) 3242 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 3243 else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB) 3244 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 3245 3246 req->force_link_speed = cpu_to_le16(fw_speed); 3247 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE | 3248 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 3249 rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT); 3250 req->flags = 0; 3251 req->force_link_speed = cpu_to_le16(0); 3252 return rc; 3253 } 3254 3255 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext) 3256 { 3257 struct hwrm_port_phy_cfg_input req = {0}; 3258 3259 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 3260 3261 if (enable) { 3262 bnxt_disable_an_for_lpbk(bp, &req); 3263 if (ext) 3264 req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; 3265 else 3266 req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; 3267 } else { 3268 req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; 3269 } 3270 req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); 3271 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3272 } 3273 3274 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 3275 u32 raw_cons, int pkt_size) 3276 { 3277 struct bnxt_napi *bnapi = cpr->bnapi; 3278 struct bnxt_rx_ring_info *rxr; 3279 struct bnxt_sw_rx_bd *rx_buf; 3280 struct rx_cmp *rxcmp; 3281 u16 cp_cons, cons; 3282 u8 *data; 3283 u32 len; 3284 int i; 3285 3286 rxr = bnapi->rx_ring; 3287 cp_cons = RING_CMP(raw_cons); 3288 rxcmp = (struct rx_cmp *) 3289 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 3290 cons = rxcmp->rx_cmp_opaque; 3291 rx_buf = &rxr->rx_buf_ring[cons]; 3292 data = rx_buf->data_ptr; 3293 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 3294 if (len != pkt_size) 3295 return -EIO; 3296 i = ETH_ALEN; 3297 if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr)) 3298 return -EIO; 3299 i += ETH_ALEN; 3300 for ( ; i < pkt_size; i++) { 3301 if (data[i] != (u8)(i & 0xff)) 3302 return -EIO; 3303 } 3304 return 0; 3305 } 3306 3307 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 3308 int pkt_size) 3309 { 3310 struct tx_cmp *txcmp; 3311 int rc = -EIO; 3312 u32 raw_cons; 3313 u32 cons; 3314 int i; 3315 3316 raw_cons = cpr->cp_raw_cons; 3317 for (i = 0; i < 200; i++) { 3318 cons = RING_CMP(raw_cons); 3319 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 3320 3321 if (!TX_CMP_VALID(txcmp, raw_cons)) { 3322 udelay(5); 3323 continue; 3324 } 3325 3326 /* The valid test of the entry must be done first before 3327 * reading any further. 3328 */ 3329 dma_rmb(); 3330 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) { 3331 rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size); 3332 raw_cons = NEXT_RAW_CMP(raw_cons); 3333 raw_cons = NEXT_RAW_CMP(raw_cons); 3334 break; 3335 } 3336 raw_cons = NEXT_RAW_CMP(raw_cons); 3337 } 3338 cpr->cp_raw_cons = raw_cons; 3339 return rc; 3340 } 3341 3342 static int bnxt_run_loopback(struct bnxt *bp) 3343 { 3344 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; 3345 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 3346 struct bnxt_cp_ring_info *cpr; 3347 int pkt_size, i = 0; 3348 struct sk_buff *skb; 3349 dma_addr_t map; 3350 u8 *data; 3351 int rc; 3352 3353 cpr = &rxr->bnapi->cp_ring; 3354 if (bp->flags & BNXT_FLAG_CHIP_P5) 3355 cpr = cpr->cp_ring_arr[BNXT_RX_HDL]; 3356 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh); 3357 skb = netdev_alloc_skb(bp->dev, pkt_size); 3358 if (!skb) 3359 return -ENOMEM; 3360 data = skb_put(skb, pkt_size); 3361 eth_broadcast_addr(data); 3362 i += ETH_ALEN; 3363 ether_addr_copy(&data[i], bp->dev->dev_addr); 3364 i += ETH_ALEN; 3365 for ( ; i < pkt_size; i++) 3366 data[i] = (u8)(i & 0xff); 3367 3368 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, 3369 PCI_DMA_TODEVICE); 3370 if (dma_mapping_error(&bp->pdev->dev, map)) { 3371 dev_kfree_skb(skb); 3372 return -EIO; 3373 } 3374 bnxt_xmit_bd(bp, txr, map, pkt_size); 3375 3376 /* Sync BD data before updating doorbell */ 3377 wmb(); 3378 3379 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); 3380 rc = bnxt_poll_loopback(bp, cpr, pkt_size); 3381 3382 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); 3383 dev_kfree_skb(skb); 3384 return rc; 3385 } 3386 3387 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results) 3388 { 3389 struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr; 3390 struct hwrm_selftest_exec_input req = {0}; 3391 int rc; 3392 3393 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1); 3394 mutex_lock(&bp->hwrm_cmd_lock); 3395 resp->test_success = 0; 3396 req.flags = test_mask; 3397 rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout); 3398 *test_results = resp->test_success; 3399 mutex_unlock(&bp->hwrm_cmd_lock); 3400 return rc; 3401 } 3402 3403 #define BNXT_DRV_TESTS 4 3404 #define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS) 3405 #define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1) 3406 #define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2) 3407 #define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3) 3408 3409 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, 3410 u64 *buf) 3411 { 3412 struct bnxt *bp = netdev_priv(dev); 3413 bool do_ext_lpbk = false; 3414 bool offline = false; 3415 u8 test_results = 0; 3416 u8 test_mask = 0; 3417 int rc = 0, i; 3418 3419 if (!bp->num_tests || !BNXT_PF(bp)) 3420 return; 3421 memset(buf, 0, sizeof(u64) * bp->num_tests); 3422 if (!netif_running(dev)) { 3423 etest->flags |= ETH_TEST_FL_FAILED; 3424 return; 3425 } 3426 3427 if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) && 3428 (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK)) 3429 do_ext_lpbk = true; 3430 3431 if (etest->flags & ETH_TEST_FL_OFFLINE) { 3432 if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) { 3433 etest->flags |= ETH_TEST_FL_FAILED; 3434 netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n"); 3435 return; 3436 } 3437 offline = true; 3438 } 3439 3440 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 3441 u8 bit_val = 1 << i; 3442 3443 if (!(bp->test_info->offline_mask & bit_val)) 3444 test_mask |= bit_val; 3445 else if (offline) 3446 test_mask |= bit_val; 3447 } 3448 if (!offline) { 3449 bnxt_run_fw_tests(bp, test_mask, &test_results); 3450 } else { 3451 rc = bnxt_close_nic(bp, false, false); 3452 if (rc) 3453 return; 3454 bnxt_run_fw_tests(bp, test_mask, &test_results); 3455 3456 buf[BNXT_MACLPBK_TEST_IDX] = 1; 3457 bnxt_hwrm_mac_loopback(bp, true); 3458 msleep(250); 3459 rc = bnxt_half_open_nic(bp); 3460 if (rc) { 3461 bnxt_hwrm_mac_loopback(bp, false); 3462 etest->flags |= ETH_TEST_FL_FAILED; 3463 return; 3464 } 3465 if (bnxt_run_loopback(bp)) 3466 etest->flags |= ETH_TEST_FL_FAILED; 3467 else 3468 buf[BNXT_MACLPBK_TEST_IDX] = 0; 3469 3470 bnxt_hwrm_mac_loopback(bp, false); 3471 bnxt_hwrm_phy_loopback(bp, true, false); 3472 msleep(1000); 3473 if (bnxt_run_loopback(bp)) { 3474 buf[BNXT_PHYLPBK_TEST_IDX] = 1; 3475 etest->flags |= ETH_TEST_FL_FAILED; 3476 } 3477 if (do_ext_lpbk) { 3478 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 3479 bnxt_hwrm_phy_loopback(bp, true, true); 3480 msleep(1000); 3481 if (bnxt_run_loopback(bp)) { 3482 buf[BNXT_EXTLPBK_TEST_IDX] = 1; 3483 etest->flags |= ETH_TEST_FL_FAILED; 3484 } 3485 } 3486 bnxt_hwrm_phy_loopback(bp, false, false); 3487 bnxt_half_close_nic(bp); 3488 rc = bnxt_open_nic(bp, false, true); 3489 } 3490 if (rc || bnxt_test_irq(bp)) { 3491 buf[BNXT_IRQ_TEST_IDX] = 1; 3492 etest->flags |= ETH_TEST_FL_FAILED; 3493 } 3494 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 3495 u8 bit_val = 1 << i; 3496 3497 if ((test_mask & bit_val) && !(test_results & bit_val)) { 3498 buf[i] = 1; 3499 etest->flags |= ETH_TEST_FL_FAILED; 3500 } 3501 } 3502 } 3503 3504 static int bnxt_reset(struct net_device *dev, u32 *flags) 3505 { 3506 struct bnxt *bp = netdev_priv(dev); 3507 bool reload = false; 3508 u32 req = *flags; 3509 3510 if (!req) 3511 return -EINVAL; 3512 3513 if (!BNXT_PF(bp)) { 3514 netdev_err(dev, "Reset is not supported from a VF\n"); 3515 return -EOPNOTSUPP; 3516 } 3517 3518 if (pci_vfs_assigned(bp->pdev) && 3519 !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) { 3520 netdev_err(dev, 3521 "Reset not allowed when VFs are assigned to VMs\n"); 3522 return -EBUSY; 3523 } 3524 3525 if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) { 3526 /* This feature is not supported in older firmware versions */ 3527 if (bp->hwrm_spec_code >= 0x10803) { 3528 if (!bnxt_firmware_reset_chip(dev)) { 3529 netdev_info(dev, "Firmware reset request successful.\n"); 3530 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) 3531 reload = true; 3532 *flags &= ~BNXT_FW_RESET_CHIP; 3533 } 3534 } else if (req == BNXT_FW_RESET_CHIP) { 3535 return -EOPNOTSUPP; /* only request, fail hard */ 3536 } 3537 } 3538 3539 if (req & BNXT_FW_RESET_AP) { 3540 /* This feature is not supported in older firmware versions */ 3541 if (bp->hwrm_spec_code >= 0x10803) { 3542 if (!bnxt_firmware_reset_ap(dev)) { 3543 netdev_info(dev, "Reset application processor successful.\n"); 3544 reload = true; 3545 *flags &= ~BNXT_FW_RESET_AP; 3546 } 3547 } else if (req == BNXT_FW_RESET_AP) { 3548 return -EOPNOTSUPP; /* only request, fail hard */ 3549 } 3550 } 3551 3552 if (reload) 3553 netdev_info(dev, "Reload driver to complete reset\n"); 3554 3555 return 0; 3556 } 3557 3558 static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, 3559 struct bnxt_hwrm_dbg_dma_info *info) 3560 { 3561 struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr; 3562 struct hwrm_dbg_cmn_input *cmn_req = msg; 3563 __le16 *seq_ptr = msg + info->seq_off; 3564 u16 seq = 0, len, segs_off; 3565 void *resp = cmn_resp; 3566 dma_addr_t dma_handle; 3567 int rc, off = 0; 3568 void *dma_buf; 3569 3570 dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle, 3571 GFP_KERNEL); 3572 if (!dma_buf) 3573 return -ENOMEM; 3574 3575 segs_off = offsetof(struct hwrm_dbg_coredump_list_output, 3576 total_segments); 3577 cmn_req->host_dest_addr = cpu_to_le64(dma_handle); 3578 cmn_req->host_buf_len = cpu_to_le32(info->dma_len); 3579 mutex_lock(&bp->hwrm_cmd_lock); 3580 while (1) { 3581 *seq_ptr = cpu_to_le16(seq); 3582 rc = _hwrm_send_message(bp, msg, msg_len, 3583 HWRM_COREDUMP_TIMEOUT); 3584 if (rc) 3585 break; 3586 3587 len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off))); 3588 if (!seq && 3589 cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) { 3590 info->segs = le16_to_cpu(*((__le16 *)(resp + 3591 segs_off))); 3592 if (!info->segs) { 3593 rc = -EIO; 3594 break; 3595 } 3596 3597 info->dest_buf_size = info->segs * 3598 sizeof(struct coredump_segment_record); 3599 info->dest_buf = kmalloc(info->dest_buf_size, 3600 GFP_KERNEL); 3601 if (!info->dest_buf) { 3602 rc = -ENOMEM; 3603 break; 3604 } 3605 } 3606 3607 if (info->dest_buf) { 3608 if ((info->seg_start + off + len) <= 3609 BNXT_COREDUMP_BUF_LEN(info->buf_len)) { 3610 memcpy(info->dest_buf + off, dma_buf, len); 3611 } else { 3612 rc = -ENOBUFS; 3613 break; 3614 } 3615 } 3616 3617 if (cmn_req->req_type == 3618 cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) 3619 info->dest_buf_size += len; 3620 3621 if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE)) 3622 break; 3623 3624 seq++; 3625 off += len; 3626 } 3627 mutex_unlock(&bp->hwrm_cmd_lock); 3628 dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle); 3629 return rc; 3630 } 3631 3632 static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, 3633 struct bnxt_coredump *coredump) 3634 { 3635 struct hwrm_dbg_coredump_list_input req = {0}; 3636 struct bnxt_hwrm_dbg_dma_info info = {NULL}; 3637 int rc; 3638 3639 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1); 3640 3641 info.dma_len = COREDUMP_LIST_BUF_LEN; 3642 info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no); 3643 info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output, 3644 data_len); 3645 3646 rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); 3647 if (!rc) { 3648 coredump->data = info.dest_buf; 3649 coredump->data_size = info.dest_buf_size; 3650 coredump->total_segs = info.segs; 3651 } 3652 return rc; 3653 } 3654 3655 static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, 3656 u16 segment_id) 3657 { 3658 struct hwrm_dbg_coredump_initiate_input req = {0}; 3659 3660 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1); 3661 req.component_id = cpu_to_le16(component_id); 3662 req.segment_id = cpu_to_le16(segment_id); 3663 3664 return hwrm_send_message(bp, &req, sizeof(req), HWRM_COREDUMP_TIMEOUT); 3665 } 3666 3667 static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, 3668 u16 segment_id, u32 *seg_len, 3669 void *buf, u32 buf_len, u32 offset) 3670 { 3671 struct hwrm_dbg_coredump_retrieve_input req = {0}; 3672 struct bnxt_hwrm_dbg_dma_info info = {NULL}; 3673 int rc; 3674 3675 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1); 3676 req.component_id = cpu_to_le16(component_id); 3677 req.segment_id = cpu_to_le16(segment_id); 3678 3679 info.dma_len = COREDUMP_RETRIEVE_BUF_LEN; 3680 info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input, 3681 seq_no); 3682 info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output, 3683 data_len); 3684 if (buf) { 3685 info.dest_buf = buf + offset; 3686 info.buf_len = buf_len; 3687 info.seg_start = offset; 3688 } 3689 3690 rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); 3691 if (!rc) 3692 *seg_len = info.dest_buf_size; 3693 3694 return rc; 3695 } 3696 3697 static void 3698 bnxt_fill_coredump_seg_hdr(struct bnxt *bp, 3699 struct bnxt_coredump_segment_hdr *seg_hdr, 3700 struct coredump_segment_record *seg_rec, u32 seg_len, 3701 int status, u32 duration, u32 instance) 3702 { 3703 memset(seg_hdr, 0, sizeof(*seg_hdr)); 3704 memcpy(seg_hdr->signature, "sEgM", 4); 3705 if (seg_rec) { 3706 seg_hdr->component_id = (__force __le32)seg_rec->component_id; 3707 seg_hdr->segment_id = (__force __le32)seg_rec->segment_id; 3708 seg_hdr->low_version = seg_rec->version_low; 3709 seg_hdr->high_version = seg_rec->version_hi; 3710 } else { 3711 /* For hwrm_ver_get response Component id = 2 3712 * and Segment id = 0 3713 */ 3714 seg_hdr->component_id = cpu_to_le32(2); 3715 seg_hdr->segment_id = 0; 3716 } 3717 seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn); 3718 seg_hdr->length = cpu_to_le32(seg_len); 3719 seg_hdr->status = cpu_to_le32(status); 3720 seg_hdr->duration = cpu_to_le32(duration); 3721 seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr)); 3722 seg_hdr->instance = cpu_to_le32(instance); 3723 } 3724 3725 static void 3726 bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, 3727 time64_t start, s16 start_utc, u16 total_segs, 3728 int status) 3729 { 3730 time64_t end = ktime_get_real_seconds(); 3731 u32 os_ver_major = 0, os_ver_minor = 0; 3732 struct tm tm; 3733 3734 time64_to_tm(start, 0, &tm); 3735 memset(record, 0, sizeof(*record)); 3736 memcpy(record->signature, "cOrE", 4); 3737 record->flags = 0; 3738 record->low_version = 0; 3739 record->high_version = 1; 3740 record->asic_state = 0; 3741 strlcpy(record->system_name, utsname()->nodename, 3742 sizeof(record->system_name)); 3743 record->year = cpu_to_le16(tm.tm_year + 1900); 3744 record->month = cpu_to_le16(tm.tm_mon + 1); 3745 record->day = cpu_to_le16(tm.tm_mday); 3746 record->hour = cpu_to_le16(tm.tm_hour); 3747 record->minute = cpu_to_le16(tm.tm_min); 3748 record->second = cpu_to_le16(tm.tm_sec); 3749 record->utc_bias = cpu_to_le16(start_utc); 3750 strcpy(record->commandline, "ethtool -w"); 3751 record->total_segments = cpu_to_le32(total_segs); 3752 3753 sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor); 3754 record->os_ver_major = cpu_to_le32(os_ver_major); 3755 record->os_ver_minor = cpu_to_le32(os_ver_minor); 3756 3757 strlcpy(record->os_name, utsname()->sysname, 32); 3758 time64_to_tm(end, 0, &tm); 3759 record->end_year = cpu_to_le16(tm.tm_year + 1900); 3760 record->end_month = cpu_to_le16(tm.tm_mon + 1); 3761 record->end_day = cpu_to_le16(tm.tm_mday); 3762 record->end_hour = cpu_to_le16(tm.tm_hour); 3763 record->end_minute = cpu_to_le16(tm.tm_min); 3764 record->end_second = cpu_to_le16(tm.tm_sec); 3765 record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60); 3766 record->asic_id1 = cpu_to_le32(bp->chip_num << 16 | 3767 bp->ver_resp.chip_rev << 8 | 3768 bp->ver_resp.chip_metal); 3769 record->asic_id2 = 0; 3770 record->coredump_status = cpu_to_le32(status); 3771 record->ioctl_low_version = 0; 3772 record->ioctl_high_version = 0; 3773 } 3774 3775 static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) 3776 { 3777 u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output); 3778 u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0; 3779 struct coredump_segment_record *seg_record = NULL; 3780 struct bnxt_coredump_segment_hdr seg_hdr; 3781 struct bnxt_coredump coredump = {NULL}; 3782 time64_t start_time; 3783 u16 start_utc; 3784 int rc = 0, i; 3785 3786 if (buf) 3787 buf_len = *dump_len; 3788 3789 start_time = ktime_get_real_seconds(); 3790 start_utc = sys_tz.tz_minuteswest * 60; 3791 seg_hdr_len = sizeof(seg_hdr); 3792 3793 /* First segment should be hwrm_ver_get response */ 3794 *dump_len = seg_hdr_len + ver_get_resp_len; 3795 if (buf) { 3796 bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len, 3797 0, 0, 0); 3798 memcpy(buf + offset, &seg_hdr, seg_hdr_len); 3799 offset += seg_hdr_len; 3800 memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len); 3801 offset += ver_get_resp_len; 3802 } 3803 3804 rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump); 3805 if (rc) { 3806 netdev_err(bp->dev, "Failed to get coredump segment list\n"); 3807 goto err; 3808 } 3809 3810 *dump_len += seg_hdr_len * coredump.total_segs; 3811 3812 seg_record = (struct coredump_segment_record *)coredump.data; 3813 seg_record_len = sizeof(*seg_record); 3814 3815 for (i = 0; i < coredump.total_segs; i++) { 3816 u16 comp_id = le16_to_cpu(seg_record->component_id); 3817 u16 seg_id = le16_to_cpu(seg_record->segment_id); 3818 u32 duration = 0, seg_len = 0; 3819 unsigned long start, end; 3820 3821 if (buf && ((offset + seg_hdr_len) > 3822 BNXT_COREDUMP_BUF_LEN(buf_len))) { 3823 rc = -ENOBUFS; 3824 goto err; 3825 } 3826 3827 start = jiffies; 3828 3829 rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id); 3830 if (rc) { 3831 netdev_err(bp->dev, 3832 "Failed to initiate coredump for seg = %d\n", 3833 seg_record->segment_id); 3834 goto next_seg; 3835 } 3836 3837 /* Write segment data into the buffer */ 3838 rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id, 3839 &seg_len, buf, buf_len, 3840 offset + seg_hdr_len); 3841 if (rc && rc == -ENOBUFS) 3842 goto err; 3843 else if (rc) 3844 netdev_err(bp->dev, 3845 "Failed to retrieve coredump for seg = %d\n", 3846 seg_record->segment_id); 3847 3848 next_seg: 3849 end = jiffies; 3850 duration = jiffies_to_msecs(end - start); 3851 bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len, 3852 rc, duration, 0); 3853 3854 if (buf) { 3855 /* Write segment header into the buffer */ 3856 memcpy(buf + offset, &seg_hdr, seg_hdr_len); 3857 offset += seg_hdr_len + seg_len; 3858 } 3859 3860 *dump_len += seg_len; 3861 seg_record = 3862 (struct coredump_segment_record *)((u8 *)seg_record + 3863 seg_record_len); 3864 } 3865 3866 err: 3867 if (buf) 3868 bnxt_fill_coredump_record(bp, buf + offset, start_time, 3869 start_utc, coredump.total_segs + 1, 3870 rc); 3871 kfree(coredump.data); 3872 *dump_len += sizeof(struct bnxt_coredump_record); 3873 if (rc == -ENOBUFS) 3874 netdev_err(bp->dev, "Firmware returned large coredump buffer\n"); 3875 return rc; 3876 } 3877 3878 static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump) 3879 { 3880 struct bnxt *bp = netdev_priv(dev); 3881 3882 if (dump->flag > BNXT_DUMP_CRASH) { 3883 netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n"); 3884 return -EINVAL; 3885 } 3886 3887 if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) { 3888 netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n"); 3889 return -EOPNOTSUPP; 3890 } 3891 3892 bp->dump_flag = dump->flag; 3893 return 0; 3894 } 3895 3896 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) 3897 { 3898 struct bnxt *bp = netdev_priv(dev); 3899 3900 if (bp->hwrm_spec_code < 0x10801) 3901 return -EOPNOTSUPP; 3902 3903 dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 | 3904 bp->ver_resp.hwrm_fw_min_8b << 16 | 3905 bp->ver_resp.hwrm_fw_bld_8b << 8 | 3906 bp->ver_resp.hwrm_fw_rsvd_8b; 3907 3908 dump->flag = bp->dump_flag; 3909 if (bp->dump_flag == BNXT_DUMP_CRASH) 3910 dump->len = BNXT_CRASH_DUMP_LEN; 3911 else 3912 bnxt_get_coredump(bp, NULL, &dump->len); 3913 return 0; 3914 } 3915 3916 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, 3917 void *buf) 3918 { 3919 struct bnxt *bp = netdev_priv(dev); 3920 3921 if (bp->hwrm_spec_code < 0x10801) 3922 return -EOPNOTSUPP; 3923 3924 memset(buf, 0, dump->len); 3925 3926 dump->flag = bp->dump_flag; 3927 if (dump->flag == BNXT_DUMP_CRASH) { 3928 #ifdef CONFIG_TEE_BNXT_FW 3929 return tee_bnxt_copy_coredump(buf, 0, dump->len); 3930 #endif 3931 } else { 3932 return bnxt_get_coredump(bp, buf, &dump->len); 3933 } 3934 3935 return 0; 3936 } 3937 3938 static int bnxt_get_ts_info(struct net_device *dev, 3939 struct ethtool_ts_info *info) 3940 { 3941 struct bnxt *bp = netdev_priv(dev); 3942 struct bnxt_ptp_cfg *ptp; 3943 3944 ptp = bp->ptp_cfg; 3945 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 3946 SOF_TIMESTAMPING_RX_SOFTWARE | 3947 SOF_TIMESTAMPING_SOFTWARE; 3948 3949 info->phc_index = -1; 3950 if (!ptp) 3951 return 0; 3952 3953 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 3954 SOF_TIMESTAMPING_RX_HARDWARE | 3955 SOF_TIMESTAMPING_RAW_HARDWARE; 3956 if (ptp->ptp_clock) 3957 info->phc_index = ptp_clock_index(ptp->ptp_clock); 3958 3959 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 3960 3961 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 3962 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 3963 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 3964 return 0; 3965 } 3966 3967 void bnxt_ethtool_init(struct bnxt *bp) 3968 { 3969 struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr; 3970 struct hwrm_selftest_qlist_input req = {0}; 3971 struct bnxt_test_info *test_info; 3972 struct net_device *dev = bp->dev; 3973 int i, rc; 3974 3975 if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER)) 3976 bnxt_get_pkgver(dev); 3977 3978 bp->num_tests = 0; 3979 if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp)) 3980 return; 3981 3982 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1); 3983 mutex_lock(&bp->hwrm_cmd_lock); 3984 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3985 if (rc) 3986 goto ethtool_init_exit; 3987 3988 test_info = bp->test_info; 3989 if (!test_info) 3990 test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL); 3991 if (!test_info) 3992 goto ethtool_init_exit; 3993 3994 bp->test_info = test_info; 3995 bp->num_tests = resp->num_tests + BNXT_DRV_TESTS; 3996 if (bp->num_tests > BNXT_MAX_TEST) 3997 bp->num_tests = BNXT_MAX_TEST; 3998 3999 test_info->offline_mask = resp->offline_tests; 4000 test_info->timeout = le16_to_cpu(resp->test_timeout); 4001 if (!test_info->timeout) 4002 test_info->timeout = HWRM_CMD_TIMEOUT; 4003 for (i = 0; i < bp->num_tests; i++) { 4004 char *str = test_info->string[i]; 4005 char *fw_str = resp->test0_name + i * 32; 4006 4007 if (i == BNXT_MACLPBK_TEST_IDX) { 4008 strcpy(str, "Mac loopback test (offline)"); 4009 } else if (i == BNXT_PHYLPBK_TEST_IDX) { 4010 strcpy(str, "Phy loopback test (offline)"); 4011 } else if (i == BNXT_EXTLPBK_TEST_IDX) { 4012 strcpy(str, "Ext loopback test (offline)"); 4013 } else if (i == BNXT_IRQ_TEST_IDX) { 4014 strcpy(str, "Interrupt_test (offline)"); 4015 } else { 4016 strlcpy(str, fw_str, ETH_GSTRING_LEN); 4017 strncat(str, " test", ETH_GSTRING_LEN - strlen(str)); 4018 if (test_info->offline_mask & (1 << i)) 4019 strncat(str, " (offline)", 4020 ETH_GSTRING_LEN - strlen(str)); 4021 else 4022 strncat(str, " (online)", 4023 ETH_GSTRING_LEN - strlen(str)); 4024 } 4025 } 4026 4027 ethtool_init_exit: 4028 mutex_unlock(&bp->hwrm_cmd_lock); 4029 } 4030 4031 static void bnxt_get_eth_phy_stats(struct net_device *dev, 4032 struct ethtool_eth_phy_stats *phy_stats) 4033 { 4034 struct bnxt *bp = netdev_priv(dev); 4035 u64 *rx; 4036 4037 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 4038 return; 4039 4040 rx = bp->rx_port_stats_ext.sw_stats; 4041 phy_stats->SymbolErrorDuringCarrier = 4042 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err)); 4043 } 4044 4045 static void bnxt_get_eth_mac_stats(struct net_device *dev, 4046 struct ethtool_eth_mac_stats *mac_stats) 4047 { 4048 struct bnxt *bp = netdev_priv(dev); 4049 u64 *rx, *tx; 4050 4051 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 4052 return; 4053 4054 rx = bp->port_stats.sw_stats; 4055 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 4056 4057 mac_stats->FramesReceivedOK = 4058 BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames); 4059 mac_stats->FramesTransmittedOK = 4060 BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames); 4061 mac_stats->FrameCheckSequenceErrors = 4062 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 4063 mac_stats->AlignmentErrors = 4064 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 4065 mac_stats->OutOfRangeLengthField = 4066 BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames); 4067 } 4068 4069 static void bnxt_get_eth_ctrl_stats(struct net_device *dev, 4070 struct ethtool_eth_ctrl_stats *ctrl_stats) 4071 { 4072 struct bnxt *bp = netdev_priv(dev); 4073 u64 *rx; 4074 4075 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 4076 return; 4077 4078 rx = bp->port_stats.sw_stats; 4079 ctrl_stats->MACControlFramesReceived = 4080 BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames); 4081 } 4082 4083 static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = { 4084 { 0, 64 }, 4085 { 65, 127 }, 4086 { 128, 255 }, 4087 { 256, 511 }, 4088 { 512, 1023 }, 4089 { 1024, 1518 }, 4090 { 1519, 2047 }, 4091 { 2048, 4095 }, 4092 { 4096, 9216 }, 4093 { 9217, 16383 }, 4094 {} 4095 }; 4096 4097 static void bnxt_get_rmon_stats(struct net_device *dev, 4098 struct ethtool_rmon_stats *rmon_stats, 4099 const struct ethtool_rmon_hist_range **ranges) 4100 { 4101 struct bnxt *bp = netdev_priv(dev); 4102 u64 *rx, *tx; 4103 4104 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 4105 return; 4106 4107 rx = bp->port_stats.sw_stats; 4108 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 4109 4110 rmon_stats->jabbers = 4111 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 4112 rmon_stats->oversize_pkts = 4113 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames); 4114 rmon_stats->undersize_pkts = 4115 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames); 4116 4117 rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames); 4118 rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames); 4119 rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames); 4120 rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames); 4121 rmon_stats->hist[4] = 4122 BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames); 4123 rmon_stats->hist[5] = 4124 BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames); 4125 rmon_stats->hist[6] = 4126 BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames); 4127 rmon_stats->hist[7] = 4128 BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames); 4129 rmon_stats->hist[8] = 4130 BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames); 4131 rmon_stats->hist[9] = 4132 BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames); 4133 4134 rmon_stats->hist_tx[0] = 4135 BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames); 4136 rmon_stats->hist_tx[1] = 4137 BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames); 4138 rmon_stats->hist_tx[2] = 4139 BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames); 4140 rmon_stats->hist_tx[3] = 4141 BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames); 4142 rmon_stats->hist_tx[4] = 4143 BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames); 4144 rmon_stats->hist_tx[5] = 4145 BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames); 4146 rmon_stats->hist_tx[6] = 4147 BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames); 4148 rmon_stats->hist_tx[7] = 4149 BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames); 4150 rmon_stats->hist_tx[8] = 4151 BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames); 4152 rmon_stats->hist_tx[9] = 4153 BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames); 4154 4155 *ranges = bnxt_rmon_ranges; 4156 } 4157 4158 void bnxt_ethtool_free(struct bnxt *bp) 4159 { 4160 kfree(bp->test_info); 4161 bp->test_info = NULL; 4162 } 4163 4164 const struct ethtool_ops bnxt_ethtool_ops = { 4165 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 4166 ETHTOOL_COALESCE_MAX_FRAMES | 4167 ETHTOOL_COALESCE_USECS_IRQ | 4168 ETHTOOL_COALESCE_MAX_FRAMES_IRQ | 4169 ETHTOOL_COALESCE_STATS_BLOCK_USECS | 4170 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 4171 .get_link_ksettings = bnxt_get_link_ksettings, 4172 .set_link_ksettings = bnxt_set_link_ksettings, 4173 .get_fec_stats = bnxt_get_fec_stats, 4174 .get_fecparam = bnxt_get_fecparam, 4175 .set_fecparam = bnxt_set_fecparam, 4176 .get_pause_stats = bnxt_get_pause_stats, 4177 .get_pauseparam = bnxt_get_pauseparam, 4178 .set_pauseparam = bnxt_set_pauseparam, 4179 .get_drvinfo = bnxt_get_drvinfo, 4180 .get_regs_len = bnxt_get_regs_len, 4181 .get_regs = bnxt_get_regs, 4182 .get_wol = bnxt_get_wol, 4183 .set_wol = bnxt_set_wol, 4184 .get_coalesce = bnxt_get_coalesce, 4185 .set_coalesce = bnxt_set_coalesce, 4186 .get_msglevel = bnxt_get_msglevel, 4187 .set_msglevel = bnxt_set_msglevel, 4188 .get_sset_count = bnxt_get_sset_count, 4189 .get_strings = bnxt_get_strings, 4190 .get_ethtool_stats = bnxt_get_ethtool_stats, 4191 .set_ringparam = bnxt_set_ringparam, 4192 .get_ringparam = bnxt_get_ringparam, 4193 .get_channels = bnxt_get_channels, 4194 .set_channels = bnxt_set_channels, 4195 .get_rxnfc = bnxt_get_rxnfc, 4196 .set_rxnfc = bnxt_set_rxnfc, 4197 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size, 4198 .get_rxfh_key_size = bnxt_get_rxfh_key_size, 4199 .get_rxfh = bnxt_get_rxfh, 4200 .set_rxfh = bnxt_set_rxfh, 4201 .flash_device = bnxt_flash_device, 4202 .get_eeprom_len = bnxt_get_eeprom_len, 4203 .get_eeprom = bnxt_get_eeprom, 4204 .set_eeprom = bnxt_set_eeprom, 4205 .get_link = bnxt_get_link, 4206 .get_eee = bnxt_get_eee, 4207 .set_eee = bnxt_set_eee, 4208 .get_module_info = bnxt_get_module_info, 4209 .get_module_eeprom = bnxt_get_module_eeprom, 4210 .nway_reset = bnxt_nway_reset, 4211 .set_phys_id = bnxt_set_phys_id, 4212 .self_test = bnxt_self_test, 4213 .get_ts_info = bnxt_get_ts_info, 4214 .reset = bnxt_reset, 4215 .set_dump = bnxt_set_dump, 4216 .get_dump_flag = bnxt_get_dump_flag, 4217 .get_dump_data = bnxt_get_dump_data, 4218 .get_eth_phy_stats = bnxt_get_eth_phy_stats, 4219 .get_eth_mac_stats = bnxt_get_eth_mac_stats, 4220 .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats, 4221 .get_rmon_stats = bnxt_get_rmon_stats, 4222 }; 4223