1 /*- 2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 24 * THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #define BXE_DRIVER_VERSION "1.78.89" 31 32 #include "bxe.h" 33 #include "ecore_sp.h" 34 #include "ecore_init.h" 35 #include "ecore_init_ops.h" 36 37 #include "57710_int_offsets.h" 38 #include "57711_int_offsets.h" 39 #include "57712_int_offsets.h" 40 41 /* 42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these 43 * explicitly here for older kernels that don't include this changeset. 44 */ 45 #ifndef CTLTYPE_U64 46 #define CTLTYPE_U64 CTLTYPE_QUAD 47 #define sysctl_handle_64 sysctl_handle_quad 48 #endif 49 50 /* 51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these 52 * here as zero(0) for older kernels that don't include this changeset 53 * thereby masking the functionality. 54 */ 55 #ifndef CSUM_TCP_IPV6 56 #define CSUM_TCP_IPV6 0 57 #define CSUM_UDP_IPV6 0 58 #endif 59 60 /* 61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap 62 * for older kernels that don't include this changeset. 63 */ 64 #if __FreeBSD_version < 900035 65 #define pci_find_cap pci_find_extcap 66 #endif 67 68 #define BXE_DEF_SB_ATT_IDX 0x0001 69 #define BXE_DEF_SB_IDX 0x0002 70 71 /* 72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per 73 * function HW initialization. 74 */ 75 #define FLR_WAIT_USEC 10000 /* 10 msecs */ 76 #define FLR_WAIT_INTERVAL 50 /* usecs */ 77 #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ 78 79 struct pbf_pN_buf_regs { 80 int pN; 81 uint32_t init_crd; 82 uint32_t crd; 83 uint32_t crd_freed; 84 }; 85 86 struct pbf_pN_cmd_regs { 87 int pN; 88 uint32_t lines_occup; 89 uint32_t lines_freed; 90 }; 91 92 /* 93 * PCI Device ID Table used by bxe_probe(). 94 */ 95 #define BXE_DEVDESC_MAX 64 96 static struct bxe_device_type bxe_devs[] = { 97 { 98 BRCM_VENDORID, 99 CHIP_NUM_57710, 100 PCI_ANY_ID, PCI_ANY_ID, 101 "QLogic NetXtreme II BCM57710 10GbE" 102 }, 103 { 104 BRCM_VENDORID, 105 CHIP_NUM_57711, 106 PCI_ANY_ID, PCI_ANY_ID, 107 "QLogic NetXtreme II BCM57711 10GbE" 108 }, 109 { 110 BRCM_VENDORID, 111 CHIP_NUM_57711E, 112 PCI_ANY_ID, PCI_ANY_ID, 113 "QLogic NetXtreme II BCM57711E 10GbE" 114 }, 115 { 116 BRCM_VENDORID, 117 CHIP_NUM_57712, 118 PCI_ANY_ID, PCI_ANY_ID, 119 "QLogic NetXtreme II BCM57712 10GbE" 120 }, 121 { 122 BRCM_VENDORID, 123 CHIP_NUM_57712_MF, 124 PCI_ANY_ID, PCI_ANY_ID, 125 "QLogic NetXtreme II BCM57712 MF 10GbE" 126 }, 127 { 128 BRCM_VENDORID, 129 CHIP_NUM_57800, 130 PCI_ANY_ID, PCI_ANY_ID, 131 "QLogic NetXtreme II BCM57800 10GbE" 132 }, 133 { 134 BRCM_VENDORID, 135 CHIP_NUM_57800_MF, 136 PCI_ANY_ID, PCI_ANY_ID, 137 "QLogic NetXtreme II BCM57800 MF 10GbE" 138 }, 139 { 140 BRCM_VENDORID, 141 CHIP_NUM_57810, 142 PCI_ANY_ID, PCI_ANY_ID, 143 "QLogic NetXtreme II BCM57810 10GbE" 144 }, 145 { 146 BRCM_VENDORID, 147 CHIP_NUM_57810_MF, 148 PCI_ANY_ID, PCI_ANY_ID, 149 "QLogic NetXtreme II BCM57810 MF 10GbE" 150 }, 151 { 152 BRCM_VENDORID, 153 CHIP_NUM_57811, 154 PCI_ANY_ID, PCI_ANY_ID, 155 "QLogic NetXtreme II BCM57811 10GbE" 156 }, 157 { 158 BRCM_VENDORID, 159 CHIP_NUM_57811_MF, 160 PCI_ANY_ID, PCI_ANY_ID, 161 "QLogic NetXtreme II BCM57811 MF 10GbE" 162 }, 163 { 164 BRCM_VENDORID, 165 CHIP_NUM_57840_4_10, 166 PCI_ANY_ID, PCI_ANY_ID, 167 "QLogic NetXtreme II BCM57840 4x10GbE" 168 }, 169 { 170 BRCM_VENDORID, 171 CHIP_NUM_57840_2_20, 172 PCI_ANY_ID, PCI_ANY_ID, 173 "QLogic NetXtreme II BCM57840 2x20GbE" 174 }, 175 { 176 BRCM_VENDORID, 177 CHIP_NUM_57840_MF, 178 PCI_ANY_ID, PCI_ANY_ID, 179 "QLogic NetXtreme II BCM57840 MF 10GbE" 180 }, 181 { 182 0, 0, 0, 0, NULL 183 } 184 }; 185 186 MALLOC_DECLARE(M_BXE_ILT); 187 MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer"); 188 189 /* 190 * FreeBSD device entry points. 191 */ 192 static int bxe_probe(device_t); 193 static int bxe_attach(device_t); 194 static int bxe_detach(device_t); 195 static int bxe_shutdown(device_t); 196 197 /* 198 * FreeBSD KLD module/device interface event handler method. 199 */ 200 static device_method_t bxe_methods[] = { 201 /* Device interface (device_if.h) */ 202 DEVMETHOD(device_probe, bxe_probe), 203 DEVMETHOD(device_attach, bxe_attach), 204 DEVMETHOD(device_detach, bxe_detach), 205 DEVMETHOD(device_shutdown, bxe_shutdown), 206 /* Bus interface (bus_if.h) */ 207 DEVMETHOD(bus_print_child, bus_generic_print_child), 208 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 209 KOBJMETHOD_END 210 }; 211 212 /* 213 * FreeBSD KLD Module data declaration 214 */ 215 static driver_t bxe_driver = { 216 "bxe", /* module name */ 217 bxe_methods, /* event handler */ 218 sizeof(struct bxe_softc) /* extra data */ 219 }; 220 221 /* 222 * FreeBSD dev class is needed to manage dev instances and 223 * to associate with a bus type 224 */ 225 static devclass_t bxe_devclass; 226 227 MODULE_DEPEND(bxe, pci, 1, 1, 1); 228 MODULE_DEPEND(bxe, ether, 1, 1, 1); 229 DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0); 230 231 /* resources needed for unloading a previously loaded device */ 232 233 #define BXE_PREV_WAIT_NEEDED 1 234 struct mtx bxe_prev_mtx; 235 MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF); 236 struct bxe_prev_list_node { 237 LIST_ENTRY(bxe_prev_list_node) node; 238 uint8_t bus; 239 uint8_t slot; 240 uint8_t path; 241 uint8_t aer; /* XXX automatic error recovery */ 242 uint8_t undi; 243 }; 244 static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list); 245 246 static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ 247 248 /* Tunable device values... */ 249 250 SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters"); 251 252 /* Debug */ 253 unsigned long bxe_debug = 0; 254 SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN, 255 &bxe_debug, 0, "Debug logging mode"); 256 257 /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ 258 static int bxe_interrupt_mode = INTR_MODE_MSIX; 259 SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN, 260 &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode"); 261 262 /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */ 263 static int bxe_queue_count = 4; 264 SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, 265 &bxe_queue_count, 0, "Multi-Queue queue count"); 266 267 /* max number of buffers per queue (default RX_BD_USABLE) */ 268 static int bxe_max_rx_bufs = 0; 269 SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN, 270 &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue"); 271 272 /* Host interrupt coalescing RX tick timer (usecs) */ 273 static int bxe_hc_rx_ticks = 25; 274 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN, 275 &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks"); 276 277 /* Host interrupt coalescing TX tick timer (usecs) */ 278 static int bxe_hc_tx_ticks = 50; 279 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN, 280 &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks"); 281 282 /* Maximum number of Rx packets to process at a time */ 283 static int bxe_rx_budget = 0xffffffff; 284 SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN, 285 &bxe_rx_budget, 0, "Rx processing budget"); 286 287 /* Maximum LRO aggregation size */ 288 static int bxe_max_aggregation_size = 0; 289 SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN, 290 &bxe_max_aggregation_size, 0, "max aggregation size"); 291 292 /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */ 293 static int bxe_mrrs = -1; 294 SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN, 295 &bxe_mrrs, 0, "PCIe maximum read request size"); 296 297 /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */ 298 static int bxe_autogreeen = 0; 299 SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN, 300 &bxe_autogreeen, 0, "AutoGrEEEn support"); 301 302 /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */ 303 static int bxe_udp_rss = 0; 304 SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN, 305 &bxe_udp_rss, 0, "UDP RSS support"); 306 307 308 #define STAT_NAME_LEN 32 /* no stat names below can be longer than this */ 309 310 #define STATS_OFFSET32(stat_name) \ 311 (offsetof(struct bxe_eth_stats, stat_name) / 4) 312 313 #define Q_STATS_OFFSET32(stat_name) \ 314 (offsetof(struct bxe_eth_q_stats, stat_name) / 4) 315 316 static const struct { 317 uint32_t offset; 318 uint32_t size; 319 uint32_t flags; 320 #define STATS_FLAGS_PORT 1 321 #define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */ 322 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) 323 char string[STAT_NAME_LEN]; 324 } bxe_eth_stats_arr[] = { 325 { STATS_OFFSET32(total_bytes_received_hi), 326 8, STATS_FLAGS_BOTH, "rx_bytes" }, 327 { STATS_OFFSET32(error_bytes_received_hi), 328 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, 329 { STATS_OFFSET32(total_unicast_packets_received_hi), 330 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, 331 { STATS_OFFSET32(total_multicast_packets_received_hi), 332 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, 333 { STATS_OFFSET32(total_broadcast_packets_received_hi), 334 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, 335 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 336 8, STATS_FLAGS_PORT, "rx_crc_errors" }, 337 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 338 8, STATS_FLAGS_PORT, "rx_align_errors" }, 339 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 340 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, 341 { STATS_OFFSET32(etherstatsoverrsizepkts_hi), 342 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, 343 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 344 8, STATS_FLAGS_PORT, "rx_fragments" }, 345 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 346 8, STATS_FLAGS_PORT, "rx_jabbers" }, 347 { STATS_OFFSET32(no_buff_discard_hi), 348 8, STATS_FLAGS_BOTH, "rx_discards" }, 349 { STATS_OFFSET32(mac_filter_discard), 350 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, 351 { STATS_OFFSET32(mf_tag_discard), 352 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, 353 { STATS_OFFSET32(pfc_frames_received_hi), 354 8, STATS_FLAGS_PORT, "pfc_frames_received" }, 355 { STATS_OFFSET32(pfc_frames_sent_hi), 356 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, 357 { STATS_OFFSET32(brb_drop_hi), 358 8, STATS_FLAGS_PORT, "rx_brb_discard" }, 359 { STATS_OFFSET32(brb_truncate_hi), 360 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, 361 { STATS_OFFSET32(pause_frames_received_hi), 362 8, STATS_FLAGS_PORT, "rx_pause_frames" }, 363 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 364 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, 365 { STATS_OFFSET32(nig_timer_max), 366 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, 367 { STATS_OFFSET32(total_bytes_transmitted_hi), 368 8, STATS_FLAGS_BOTH, "tx_bytes" }, 369 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 370 8, STATS_FLAGS_PORT, "tx_error_bytes" }, 371 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 372 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, 373 { STATS_OFFSET32(total_multicast_packets_transmitted_hi), 374 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, 375 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 376 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, 377 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 378 8, STATS_FLAGS_PORT, "tx_mac_errors" }, 379 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 380 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, 381 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 382 8, STATS_FLAGS_PORT, "tx_single_collisions" }, 383 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 384 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, 385 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 386 8, STATS_FLAGS_PORT, "tx_deferred" }, 387 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 388 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, 389 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 390 8, STATS_FLAGS_PORT, "tx_late_collisions" }, 391 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 392 8, STATS_FLAGS_PORT, "tx_total_collisions" }, 393 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 394 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, 395 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 396 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, 397 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 398 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, 399 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 400 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, 401 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 402 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, 403 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 404 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, 405 { STATS_OFFSET32(etherstatspktsover1522octets_hi), 406 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, 407 { STATS_OFFSET32(pause_frames_sent_hi), 408 8, STATS_FLAGS_PORT, "tx_pause_frames" }, 409 { STATS_OFFSET32(total_tpa_aggregations_hi), 410 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, 411 { STATS_OFFSET32(total_tpa_aggregated_frames_hi), 412 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, 413 { STATS_OFFSET32(total_tpa_bytes_hi), 414 8, STATS_FLAGS_FUNC, "tpa_bytes"}, 415 { STATS_OFFSET32(eee_tx_lpi), 416 4, STATS_FLAGS_PORT, "eee_tx_lpi"}, 417 { STATS_OFFSET32(rx_calls), 418 4, STATS_FLAGS_FUNC, "rx_calls"}, 419 { STATS_OFFSET32(rx_pkts), 420 4, STATS_FLAGS_FUNC, "rx_pkts"}, 421 { STATS_OFFSET32(rx_tpa_pkts), 422 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"}, 423 { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 424 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"}, 425 { STATS_OFFSET32(rx_bxe_service_rxsgl), 426 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"}, 427 { STATS_OFFSET32(rx_jumbo_sge_pkts), 428 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"}, 429 { STATS_OFFSET32(rx_soft_errors), 430 4, STATS_FLAGS_FUNC, "rx_soft_errors"}, 431 { STATS_OFFSET32(rx_hw_csum_errors), 432 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"}, 433 { STATS_OFFSET32(rx_ofld_frames_csum_ip), 434 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"}, 435 { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 436 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"}, 437 { STATS_OFFSET32(rx_budget_reached), 438 4, STATS_FLAGS_FUNC, "rx_budget_reached"}, 439 { STATS_OFFSET32(tx_pkts), 440 4, STATS_FLAGS_FUNC, "tx_pkts"}, 441 { STATS_OFFSET32(tx_soft_errors), 442 4, STATS_FLAGS_FUNC, "tx_soft_errors"}, 443 { STATS_OFFSET32(tx_ofld_frames_csum_ip), 444 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"}, 445 { STATS_OFFSET32(tx_ofld_frames_csum_tcp), 446 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"}, 447 { STATS_OFFSET32(tx_ofld_frames_csum_udp), 448 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"}, 449 { STATS_OFFSET32(tx_ofld_frames_lso), 450 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"}, 451 { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 452 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"}, 453 { STATS_OFFSET32(tx_encap_failures), 454 4, STATS_FLAGS_FUNC, "tx_encap_failures"}, 455 { STATS_OFFSET32(tx_hw_queue_full), 456 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"}, 457 { STATS_OFFSET32(tx_hw_max_queue_depth), 458 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"}, 459 { STATS_OFFSET32(tx_dma_mapping_failure), 460 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"}, 461 { STATS_OFFSET32(tx_max_drbr_queue_depth), 462 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"}, 463 { STATS_OFFSET32(tx_window_violation_std), 464 4, STATS_FLAGS_FUNC, "tx_window_violation_std"}, 465 { STATS_OFFSET32(tx_window_violation_tso), 466 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"}, 467 { STATS_OFFSET32(tx_chain_lost_mbuf), 468 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"}, 469 { STATS_OFFSET32(tx_frames_deferred), 470 4, STATS_FLAGS_FUNC, "tx_frames_deferred"}, 471 { STATS_OFFSET32(tx_queue_xoff), 472 4, STATS_FLAGS_FUNC, "tx_queue_xoff"}, 473 { STATS_OFFSET32(mbuf_defrag_attempts), 474 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"}, 475 { STATS_OFFSET32(mbuf_defrag_failures), 476 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"}, 477 { STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 478 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"}, 479 { STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 480 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"}, 481 { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 482 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"}, 483 { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 484 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"}, 485 { STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 486 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"}, 487 { STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 488 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"}, 489 { STATS_OFFSET32(mbuf_alloc_tx), 490 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"}, 491 { STATS_OFFSET32(mbuf_alloc_rx), 492 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"}, 493 { STATS_OFFSET32(mbuf_alloc_sge), 494 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"}, 495 { STATS_OFFSET32(mbuf_alloc_tpa), 496 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"}, 497 { STATS_OFFSET32(tx_queue_full_return), 498 4, STATS_FLAGS_FUNC, "tx_queue_full_return"}, 499 { STATS_OFFSET32(tx_request_link_down_failures), 500 4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"}, 501 { STATS_OFFSET32(bd_avail_too_less_failures), 502 4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"}, 503 { STATS_OFFSET32(tx_mq_not_empty), 504 4, STATS_FLAGS_FUNC, "tx_mq_not_empty"} 505 506 }; 507 508 static const struct { 509 uint32_t offset; 510 uint32_t size; 511 char string[STAT_NAME_LEN]; 512 } bxe_eth_q_stats_arr[] = { 513 { Q_STATS_OFFSET32(total_bytes_received_hi), 514 8, "rx_bytes" }, 515 { Q_STATS_OFFSET32(total_unicast_packets_received_hi), 516 8, "rx_ucast_packets" }, 517 { Q_STATS_OFFSET32(total_multicast_packets_received_hi), 518 8, "rx_mcast_packets" }, 519 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), 520 8, "rx_bcast_packets" }, 521 { Q_STATS_OFFSET32(no_buff_discard_hi), 522 8, "rx_discards" }, 523 { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 524 8, "tx_bytes" }, 525 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 526 8, "tx_ucast_packets" }, 527 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), 528 8, "tx_mcast_packets" }, 529 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 530 8, "tx_bcast_packets" }, 531 { Q_STATS_OFFSET32(total_tpa_aggregations_hi), 532 8, "tpa_aggregations" }, 533 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), 534 8, "tpa_aggregated_frames"}, 535 { Q_STATS_OFFSET32(total_tpa_bytes_hi), 536 8, "tpa_bytes"}, 537 { Q_STATS_OFFSET32(rx_calls), 538 4, "rx_calls"}, 539 { Q_STATS_OFFSET32(rx_pkts), 540 4, "rx_pkts"}, 541 { Q_STATS_OFFSET32(rx_tpa_pkts), 542 4, "rx_tpa_pkts"}, 543 { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 544 4, "rx_erroneous_jumbo_sge_pkts"}, 545 { Q_STATS_OFFSET32(rx_bxe_service_rxsgl), 546 4, "rx_bxe_service_rxsgl"}, 547 { Q_STATS_OFFSET32(rx_jumbo_sge_pkts), 548 4, "rx_jumbo_sge_pkts"}, 549 { Q_STATS_OFFSET32(rx_soft_errors), 550 4, "rx_soft_errors"}, 551 { Q_STATS_OFFSET32(rx_hw_csum_errors), 552 4, "rx_hw_csum_errors"}, 553 { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip), 554 4, "rx_ofld_frames_csum_ip"}, 555 { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 556 4, "rx_ofld_frames_csum_tcp_udp"}, 557 { Q_STATS_OFFSET32(rx_budget_reached), 558 4, "rx_budget_reached"}, 559 { Q_STATS_OFFSET32(tx_pkts), 560 4, "tx_pkts"}, 561 { Q_STATS_OFFSET32(tx_soft_errors), 562 4, "tx_soft_errors"}, 563 { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip), 564 4, "tx_ofld_frames_csum_ip"}, 565 { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp), 566 4, "tx_ofld_frames_csum_tcp"}, 567 { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp), 568 4, "tx_ofld_frames_csum_udp"}, 569 { Q_STATS_OFFSET32(tx_ofld_frames_lso), 570 4, "tx_ofld_frames_lso"}, 571 { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 572 4, "tx_ofld_frames_lso_hdr_splits"}, 573 { Q_STATS_OFFSET32(tx_encap_failures), 574 4, "tx_encap_failures"}, 575 { Q_STATS_OFFSET32(tx_hw_queue_full), 576 4, "tx_hw_queue_full"}, 577 { Q_STATS_OFFSET32(tx_hw_max_queue_depth), 578 4, "tx_hw_max_queue_depth"}, 579 { Q_STATS_OFFSET32(tx_dma_mapping_failure), 580 4, "tx_dma_mapping_failure"}, 581 { Q_STATS_OFFSET32(tx_max_drbr_queue_depth), 582 4, "tx_max_drbr_queue_depth"}, 583 { Q_STATS_OFFSET32(tx_window_violation_std), 584 4, "tx_window_violation_std"}, 585 { Q_STATS_OFFSET32(tx_window_violation_tso), 586 4, "tx_window_violation_tso"}, 587 { Q_STATS_OFFSET32(tx_chain_lost_mbuf), 588 4, "tx_chain_lost_mbuf"}, 589 { Q_STATS_OFFSET32(tx_frames_deferred), 590 4, "tx_frames_deferred"}, 591 { Q_STATS_OFFSET32(tx_queue_xoff), 592 4, "tx_queue_xoff"}, 593 { Q_STATS_OFFSET32(mbuf_defrag_attempts), 594 4, "mbuf_defrag_attempts"}, 595 { Q_STATS_OFFSET32(mbuf_defrag_failures), 596 4, "mbuf_defrag_failures"}, 597 { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 598 4, "mbuf_rx_bd_alloc_failed"}, 599 { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 600 4, "mbuf_rx_bd_mapping_failed"}, 601 { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 602 4, "mbuf_rx_tpa_alloc_failed"}, 603 { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 604 4, "mbuf_rx_tpa_mapping_failed"}, 605 { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 606 4, "mbuf_rx_sge_alloc_failed"}, 607 { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 608 4, "mbuf_rx_sge_mapping_failed"}, 609 { Q_STATS_OFFSET32(mbuf_alloc_tx), 610 4, "mbuf_alloc_tx"}, 611 { Q_STATS_OFFSET32(mbuf_alloc_rx), 612 4, "mbuf_alloc_rx"}, 613 { Q_STATS_OFFSET32(mbuf_alloc_sge), 614 4, "mbuf_alloc_sge"}, 615 { Q_STATS_OFFSET32(mbuf_alloc_tpa), 616 4, "mbuf_alloc_tpa"}, 617 { Q_STATS_OFFSET32(tx_queue_full_return), 618 4, "tx_queue_full_return"}, 619 { Q_STATS_OFFSET32(tx_request_link_down_failures), 620 4, "tx_request_link_down_failures"}, 621 { Q_STATS_OFFSET32(bd_avail_too_less_failures), 622 4, "bd_avail_too_less_failures"}, 623 { Q_STATS_OFFSET32(tx_mq_not_empty), 624 4, "tx_mq_not_empty"} 625 626 }; 627 628 #define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr) 629 #define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr) 630 631 632 static void bxe_cmng_fns_init(struct bxe_softc *sc, 633 uint8_t read_cfg, 634 uint8_t cmng_type); 635 static int bxe_get_cmng_fns_mode(struct bxe_softc *sc); 636 static void storm_memset_cmng(struct bxe_softc *sc, 637 struct cmng_init *cmng, 638 uint8_t port); 639 static void bxe_set_reset_global(struct bxe_softc *sc); 640 static void bxe_set_reset_in_progress(struct bxe_softc *sc); 641 static uint8_t bxe_reset_is_done(struct bxe_softc *sc, 642 int engine); 643 static uint8_t bxe_clear_pf_load(struct bxe_softc *sc); 644 static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, 645 uint8_t *global, 646 uint8_t print); 647 static void bxe_int_disable(struct bxe_softc *sc); 648 static int bxe_release_leader_lock(struct bxe_softc *sc); 649 static void bxe_pf_disable(struct bxe_softc *sc); 650 static void bxe_free_fp_buffers(struct bxe_softc *sc); 651 static inline void bxe_update_rx_prod(struct bxe_softc *sc, 652 struct bxe_fastpath *fp, 653 uint16_t rx_bd_prod, 654 uint16_t rx_cq_prod, 655 uint16_t rx_sge_prod); 656 static void bxe_link_report_locked(struct bxe_softc *sc); 657 static void bxe_link_report(struct bxe_softc *sc); 658 static void bxe_link_status_update(struct bxe_softc *sc); 659 static void bxe_periodic_callout_func(void *xsc); 660 static void bxe_periodic_start(struct bxe_softc *sc); 661 static void bxe_periodic_stop(struct bxe_softc *sc); 662 static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, 663 uint16_t prev_index, 664 uint16_t index); 665 static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, 666 int queue); 667 static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, 668 uint16_t index); 669 static uint8_t bxe_txeof(struct bxe_softc *sc, 670 struct bxe_fastpath *fp); 671 static void bxe_task_fp(struct bxe_fastpath *fp); 672 static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, 673 struct mbuf *m, 674 uint8_t contents); 675 static int bxe_alloc_mem(struct bxe_softc *sc); 676 static void bxe_free_mem(struct bxe_softc *sc); 677 static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc); 678 static void bxe_free_fw_stats_mem(struct bxe_softc *sc); 679 static int bxe_interrupt_attach(struct bxe_softc *sc); 680 static void bxe_interrupt_detach(struct bxe_softc *sc); 681 static void bxe_set_rx_mode(struct bxe_softc *sc); 682 static int bxe_init_locked(struct bxe_softc *sc); 683 static int bxe_stop_locked(struct bxe_softc *sc); 684 static __noinline int bxe_nic_load(struct bxe_softc *sc, 685 int load_mode); 686 static __noinline int bxe_nic_unload(struct bxe_softc *sc, 687 uint32_t unload_mode, 688 uint8_t keep_link); 689 690 static void bxe_handle_sp_tq(void *context, int pending); 691 static void bxe_handle_fp_tq(void *context, int pending); 692 693 static int bxe_add_cdev(struct bxe_softc *sc); 694 static void bxe_del_cdev(struct bxe_softc *sc); 695 static int bxe_alloc_buf_rings(struct bxe_softc *sc); 696 static void bxe_free_buf_rings(struct bxe_softc *sc); 697 698 /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */ 699 uint32_t 700 calc_crc32(uint8_t *crc32_packet, 701 uint32_t crc32_length, 702 uint32_t crc32_seed, 703 uint8_t complement) 704 { 705 uint32_t byte = 0; 706 uint32_t bit = 0; 707 uint8_t msb = 0; 708 uint32_t temp = 0; 709 uint32_t shft = 0; 710 uint8_t current_byte = 0; 711 uint32_t crc32_result = crc32_seed; 712 const uint32_t CRC32_POLY = 0x1edc6f41; 713 714 if ((crc32_packet == NULL) || 715 (crc32_length == 0) || 716 ((crc32_length % 8) != 0)) 717 { 718 return (crc32_result); 719 } 720 721 for (byte = 0; byte < crc32_length; byte = byte + 1) 722 { 723 current_byte = crc32_packet[byte]; 724 for (bit = 0; bit < 8; bit = bit + 1) 725 { 726 /* msb = crc32_result[31]; */ 727 msb = (uint8_t)(crc32_result >> 31); 728 729 crc32_result = crc32_result << 1; 730 731 /* it (msb != current_byte[bit]) */ 732 if (msb != (0x1 & (current_byte >> bit))) 733 { 734 crc32_result = crc32_result ^ CRC32_POLY; 735 /* crc32_result[0] = 1 */ 736 crc32_result |= 1; 737 } 738 } 739 } 740 741 /* Last step is to: 742 * 1. "mirror" every bit 743 * 2. swap the 4 bytes 744 * 3. complement each bit 745 */ 746 747 /* Mirror */ 748 temp = crc32_result; 749 shft = sizeof(crc32_result) * 8 - 1; 750 751 for (crc32_result >>= 1; crc32_result; crc32_result >>= 1) 752 { 753 temp <<= 1; 754 temp |= crc32_result & 1; 755 shft-- ; 756 } 757 758 /* temp[31-bit] = crc32_result[bit] */ 759 temp <<= shft; 760 761 /* Swap */ 762 /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */ 763 { 764 uint32_t t0, t1, t2, t3; 765 t0 = (0x000000ff & (temp >> 24)); 766 t1 = (0x0000ff00 & (temp >> 8)); 767 t2 = (0x00ff0000 & (temp << 8)); 768 t3 = (0xff000000 & (temp << 24)); 769 crc32_result = t0 | t1 | t2 | t3; 770 } 771 772 /* Complement */ 773 if (complement) 774 { 775 crc32_result = ~crc32_result; 776 } 777 778 return (crc32_result); 779 } 780 781 int 782 bxe_test_bit(int nr, 783 volatile unsigned long *addr) 784 { 785 return ((atomic_load_acq_long(addr) & (1 << nr)) != 0); 786 } 787 788 void 789 bxe_set_bit(unsigned int nr, 790 volatile unsigned long *addr) 791 { 792 atomic_set_acq_long(addr, (1 << nr)); 793 } 794 795 void 796 bxe_clear_bit(int nr, 797 volatile unsigned long *addr) 798 { 799 atomic_clear_acq_long(addr, (1 << nr)); 800 } 801 802 int 803 bxe_test_and_set_bit(int nr, 804 volatile unsigned long *addr) 805 { 806 unsigned long x; 807 nr = (1 << nr); 808 do { 809 x = *addr; 810 } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0); 811 // if (x & nr) bit_was_set; else bit_was_not_set; 812 return (x & nr); 813 } 814 815 int 816 bxe_test_and_clear_bit(int nr, 817 volatile unsigned long *addr) 818 { 819 unsigned long x; 820 nr = (1 << nr); 821 do { 822 x = *addr; 823 } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0); 824 // if (x & nr) bit_was_set; else bit_was_not_set; 825 return (x & nr); 826 } 827 828 int 829 bxe_cmpxchg(volatile int *addr, 830 int old, 831 int new) 832 { 833 int x; 834 do { 835 x = *addr; 836 } while (atomic_cmpset_acq_int(addr, old, new) == 0); 837 return (x); 838 } 839 840 /* 841 * Get DMA memory from the OS. 842 * 843 * Validates that the OS has provided DMA buffers in response to a 844 * bus_dmamap_load call and saves the physical address of those buffers. 845 * When the callback is used the OS will return 0 for the mapping function 846 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any 847 * failures back to the caller. 848 * 849 * Returns: 850 * Nothing. 851 */ 852 static void 853 bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 854 { 855 struct bxe_dma *dma = arg; 856 857 if (error) { 858 dma->paddr = 0; 859 dma->nseg = 0; 860 BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error); 861 } else { 862 dma->paddr = segs->ds_addr; 863 dma->nseg = nseg; 864 } 865 } 866 867 /* 868 * Allocate a block of memory and map it for DMA. No partial completions 869 * allowed and release any resources acquired if we can't acquire all 870 * resources. 871 * 872 * Returns: 873 * 0 = Success, !0 = Failure 874 */ 875 int 876 bxe_dma_alloc(struct bxe_softc *sc, 877 bus_size_t size, 878 struct bxe_dma *dma, 879 const char *msg) 880 { 881 int rc; 882 883 if (dma->size > 0) { 884 BLOGE(sc, "dma block '%s' already has size %lu\n", msg, 885 (unsigned long)dma->size); 886 return (1); 887 } 888 889 memset(dma, 0, sizeof(*dma)); /* sanity */ 890 dma->sc = sc; 891 dma->size = size; 892 snprintf(dma->msg, sizeof(dma->msg), "%s", msg); 893 894 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 895 BCM_PAGE_SIZE, /* alignment */ 896 0, /* boundary limit */ 897 BUS_SPACE_MAXADDR, /* restricted low */ 898 BUS_SPACE_MAXADDR, /* restricted hi */ 899 NULL, /* addr filter() */ 900 NULL, /* addr filter() arg */ 901 size, /* max map size */ 902 1, /* num discontinuous */ 903 size, /* max seg size */ 904 BUS_DMA_ALLOCNOW, /* flags */ 905 NULL, /* lock() */ 906 NULL, /* lock() arg */ 907 &dma->tag); /* returned dma tag */ 908 if (rc != 0) { 909 BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc); 910 memset(dma, 0, sizeof(*dma)); 911 return (1); 912 } 913 914 rc = bus_dmamem_alloc(dma->tag, 915 (void **)&dma->vaddr, 916 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), 917 &dma->map); 918 if (rc != 0) { 919 BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc); 920 bus_dma_tag_destroy(dma->tag); 921 memset(dma, 0, sizeof(*dma)); 922 return (1); 923 } 924 925 rc = bus_dmamap_load(dma->tag, 926 dma->map, 927 dma->vaddr, 928 size, 929 bxe_dma_map_addr, /* BLOGD in here */ 930 dma, 931 BUS_DMA_NOWAIT); 932 if (rc != 0) { 933 BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc); 934 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 935 bus_dma_tag_destroy(dma->tag); 936 memset(dma, 0, sizeof(*dma)); 937 return (1); 938 } 939 940 return (0); 941 } 942 943 void 944 bxe_dma_free(struct bxe_softc *sc, 945 struct bxe_dma *dma) 946 { 947 if (dma->size > 0) { 948 DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL")); 949 950 bus_dmamap_sync(dma->tag, dma->map, 951 (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)); 952 bus_dmamap_unload(dma->tag, dma->map); 953 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 954 bus_dma_tag_destroy(dma->tag); 955 } 956 957 memset(dma, 0, sizeof(*dma)); 958 } 959 960 /* 961 * These indirect read and write routines are only during init. 962 * The locking is handled by the MCP. 963 */ 964 965 void 966 bxe_reg_wr_ind(struct bxe_softc *sc, 967 uint32_t addr, 968 uint32_t val) 969 { 970 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); 971 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4); 972 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 973 } 974 975 uint32_t 976 bxe_reg_rd_ind(struct bxe_softc *sc, 977 uint32_t addr) 978 { 979 uint32_t val; 980 981 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); 982 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4); 983 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 984 985 return (val); 986 } 987 988 static int 989 bxe_acquire_hw_lock(struct bxe_softc *sc, 990 uint32_t resource) 991 { 992 uint32_t lock_status; 993 uint32_t resource_bit = (1 << resource); 994 int func = SC_FUNC(sc); 995 uint32_t hw_lock_control_reg; 996 int cnt; 997 998 /* validate the resource is within range */ 999 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1000 BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" 1001 " resource_bit 0x%x\n", resource, resource_bit); 1002 return (-1); 1003 } 1004 1005 if (func <= 5) { 1006 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 1007 } else { 1008 hw_lock_control_reg = 1009 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 1010 } 1011 1012 /* validate the resource is not already taken */ 1013 lock_status = REG_RD(sc, hw_lock_control_reg); 1014 if (lock_status & resource_bit) { 1015 BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n", 1016 resource, lock_status, resource_bit); 1017 return (-1); 1018 } 1019 1020 /* try every 5ms for 5 seconds */ 1021 for (cnt = 0; cnt < 1000; cnt++) { 1022 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); 1023 lock_status = REG_RD(sc, hw_lock_control_reg); 1024 if (lock_status & resource_bit) { 1025 return (0); 1026 } 1027 DELAY(5000); 1028 } 1029 1030 BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n", 1031 resource, resource_bit); 1032 return (-1); 1033 } 1034 1035 static int 1036 bxe_release_hw_lock(struct bxe_softc *sc, 1037 uint32_t resource) 1038 { 1039 uint32_t lock_status; 1040 uint32_t resource_bit = (1 << resource); 1041 int func = SC_FUNC(sc); 1042 uint32_t hw_lock_control_reg; 1043 1044 /* validate the resource is within range */ 1045 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1046 BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" 1047 " resource_bit 0x%x\n", resource, resource_bit); 1048 return (-1); 1049 } 1050 1051 if (func <= 5) { 1052 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 1053 } else { 1054 hw_lock_control_reg = 1055 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 1056 } 1057 1058 /* validate the resource is currently taken */ 1059 lock_status = REG_RD(sc, hw_lock_control_reg); 1060 if (!(lock_status & resource_bit)) { 1061 BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n", 1062 resource, lock_status, resource_bit); 1063 return (-1); 1064 } 1065 1066 REG_WR(sc, hw_lock_control_reg, resource_bit); 1067 return (0); 1068 } 1069 static void bxe_acquire_phy_lock(struct bxe_softc *sc) 1070 { 1071 BXE_PHY_LOCK(sc); 1072 bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); 1073 } 1074 1075 static void bxe_release_phy_lock(struct bxe_softc *sc) 1076 { 1077 bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); 1078 BXE_PHY_UNLOCK(sc); 1079 } 1080 /* 1081 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise, 1082 * had we done things the other way around, if two pfs from the same port 1083 * would attempt to access nvram at the same time, we could run into a 1084 * scenario such as: 1085 * pf A takes the port lock. 1086 * pf B succeeds in taking the same lock since they are from the same port. 1087 * pf A takes the per pf misc lock. Performs eeprom access. 1088 * pf A finishes. Unlocks the per pf misc lock. 1089 * Pf B takes the lock and proceeds to perform it's own access. 1090 * pf A unlocks the per port lock, while pf B is still working (!). 1091 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own 1092 * access corrupted by pf B).* 1093 */ 1094 static int 1095 bxe_acquire_nvram_lock(struct bxe_softc *sc) 1096 { 1097 int port = SC_PORT(sc); 1098 int count, i; 1099 uint32_t val = 0; 1100 1101 /* acquire HW lock: protect against other PFs in PF Direct Assignment */ 1102 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); 1103 1104 /* adjust timeout for emulation/FPGA */ 1105 count = NVRAM_TIMEOUT_COUNT; 1106 if (CHIP_REV_IS_SLOW(sc)) { 1107 count *= 100; 1108 } 1109 1110 /* request access to nvram interface */ 1111 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 1112 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); 1113 1114 for (i = 0; i < count*10; i++) { 1115 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); 1116 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1117 break; 1118 } 1119 1120 DELAY(5); 1121 } 1122 1123 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1124 BLOGE(sc, "Cannot get access to nvram interface " 1125 "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n", 1126 port, val); 1127 return (-1); 1128 } 1129 1130 return (0); 1131 } 1132 1133 static int 1134 bxe_release_nvram_lock(struct bxe_softc *sc) 1135 { 1136 int port = SC_PORT(sc); 1137 int count, i; 1138 uint32_t val = 0; 1139 1140 /* adjust timeout for emulation/FPGA */ 1141 count = NVRAM_TIMEOUT_COUNT; 1142 if (CHIP_REV_IS_SLOW(sc)) { 1143 count *= 100; 1144 } 1145 1146 /* relinquish nvram interface */ 1147 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 1148 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); 1149 1150 for (i = 0; i < count*10; i++) { 1151 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); 1152 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1153 break; 1154 } 1155 1156 DELAY(5); 1157 } 1158 1159 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1160 BLOGE(sc, "Cannot free access to nvram interface " 1161 "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n", 1162 port, val); 1163 return (-1); 1164 } 1165 1166 /* release HW lock: protect against other PFs in PF Direct Assignment */ 1167 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); 1168 1169 return (0); 1170 } 1171 1172 static void 1173 bxe_enable_nvram_access(struct bxe_softc *sc) 1174 { 1175 uint32_t val; 1176 1177 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1178 1179 /* enable both bits, even on read */ 1180 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1181 (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN)); 1182 } 1183 1184 static void 1185 bxe_disable_nvram_access(struct bxe_softc *sc) 1186 { 1187 uint32_t val; 1188 1189 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1190 1191 /* disable both bits, even after read */ 1192 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1193 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | 1194 MCPR_NVM_ACCESS_ENABLE_WR_EN))); 1195 } 1196 1197 static int 1198 bxe_nvram_read_dword(struct bxe_softc *sc, 1199 uint32_t offset, 1200 uint32_t *ret_val, 1201 uint32_t cmd_flags) 1202 { 1203 int count, i, rc; 1204 uint32_t val; 1205 1206 /* build the command word */ 1207 cmd_flags |= MCPR_NVM_COMMAND_DOIT; 1208 1209 /* need to clear DONE bit separately */ 1210 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1211 1212 /* address of the NVRAM to read from */ 1213 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, 1214 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1215 1216 /* issue a read command */ 1217 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1218 1219 /* adjust timeout for emulation/FPGA */ 1220 count = NVRAM_TIMEOUT_COUNT; 1221 if (CHIP_REV_IS_SLOW(sc)) { 1222 count *= 100; 1223 } 1224 1225 /* wait for completion */ 1226 *ret_val = 0; 1227 rc = -1; 1228 for (i = 0; i < count; i++) { 1229 DELAY(5); 1230 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); 1231 1232 if (val & MCPR_NVM_COMMAND_DONE) { 1233 val = REG_RD(sc, MCP_REG_MCPR_NVM_READ); 1234 /* we read nvram data in cpu order 1235 * but ethtool sees it as an array of bytes 1236 * converting to big-endian will do the work 1237 */ 1238 *ret_val = htobe32(val); 1239 rc = 0; 1240 break; 1241 } 1242 } 1243 1244 if (rc == -1) { 1245 BLOGE(sc, "nvram read timeout expired " 1246 "(offset 0x%x cmd_flags 0x%x val 0x%x)\n", 1247 offset, cmd_flags, val); 1248 } 1249 1250 return (rc); 1251 } 1252 1253 static int 1254 bxe_nvram_read(struct bxe_softc *sc, 1255 uint32_t offset, 1256 uint8_t *ret_buf, 1257 int buf_size) 1258 { 1259 uint32_t cmd_flags; 1260 uint32_t val; 1261 int rc; 1262 1263 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { 1264 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", 1265 offset, buf_size); 1266 return (-1); 1267 } 1268 1269 if ((offset + buf_size) > sc->devinfo.flash_size) { 1270 BLOGE(sc, "Invalid parameter, " 1271 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1272 offset, buf_size, sc->devinfo.flash_size); 1273 return (-1); 1274 } 1275 1276 /* request access to nvram interface */ 1277 rc = bxe_acquire_nvram_lock(sc); 1278 if (rc) { 1279 return (rc); 1280 } 1281 1282 /* enable access to nvram interface */ 1283 bxe_enable_nvram_access(sc); 1284 1285 /* read the first word(s) */ 1286 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1287 while ((buf_size > sizeof(uint32_t)) && (rc == 0)) { 1288 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); 1289 memcpy(ret_buf, &val, 4); 1290 1291 /* advance to the next dword */ 1292 offset += sizeof(uint32_t); 1293 ret_buf += sizeof(uint32_t); 1294 buf_size -= sizeof(uint32_t); 1295 cmd_flags = 0; 1296 } 1297 1298 if (rc == 0) { 1299 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1300 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); 1301 memcpy(ret_buf, &val, 4); 1302 } 1303 1304 /* disable access to nvram interface */ 1305 bxe_disable_nvram_access(sc); 1306 bxe_release_nvram_lock(sc); 1307 1308 return (rc); 1309 } 1310 1311 static int 1312 bxe_nvram_write_dword(struct bxe_softc *sc, 1313 uint32_t offset, 1314 uint32_t val, 1315 uint32_t cmd_flags) 1316 { 1317 int count, i, rc; 1318 1319 /* build the command word */ 1320 cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR); 1321 1322 /* need to clear DONE bit separately */ 1323 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1324 1325 /* write the data */ 1326 REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val); 1327 1328 /* address of the NVRAM to write to */ 1329 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, 1330 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1331 1332 /* issue the write command */ 1333 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1334 1335 /* adjust timeout for emulation/FPGA */ 1336 count = NVRAM_TIMEOUT_COUNT; 1337 if (CHIP_REV_IS_SLOW(sc)) { 1338 count *= 100; 1339 } 1340 1341 /* wait for completion */ 1342 rc = -1; 1343 for (i = 0; i < count; i++) { 1344 DELAY(5); 1345 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); 1346 if (val & MCPR_NVM_COMMAND_DONE) { 1347 rc = 0; 1348 break; 1349 } 1350 } 1351 1352 if (rc == -1) { 1353 BLOGE(sc, "nvram write timeout expired " 1354 "(offset 0x%x cmd_flags 0x%x val 0x%x)\n", 1355 offset, cmd_flags, val); 1356 } 1357 1358 return (rc); 1359 } 1360 1361 #define BYTE_OFFSET(offset) (8 * (offset & 0x03)) 1362 1363 static int 1364 bxe_nvram_write1(struct bxe_softc *sc, 1365 uint32_t offset, 1366 uint8_t *data_buf, 1367 int buf_size) 1368 { 1369 uint32_t cmd_flags; 1370 uint32_t align_offset; 1371 uint32_t val; 1372 int rc; 1373 1374 if ((offset + buf_size) > sc->devinfo.flash_size) { 1375 BLOGE(sc, "Invalid parameter, " 1376 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1377 offset, buf_size, sc->devinfo.flash_size); 1378 return (-1); 1379 } 1380 1381 /* request access to nvram interface */ 1382 rc = bxe_acquire_nvram_lock(sc); 1383 if (rc) { 1384 return (rc); 1385 } 1386 1387 /* enable access to nvram interface */ 1388 bxe_enable_nvram_access(sc); 1389 1390 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); 1391 align_offset = (offset & ~0x03); 1392 rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags); 1393 1394 if (rc == 0) { 1395 val &= ~(0xff << BYTE_OFFSET(offset)); 1396 val |= (*data_buf << BYTE_OFFSET(offset)); 1397 1398 /* nvram data is returned as an array of bytes 1399 * convert it back to cpu order 1400 */ 1401 val = be32toh(val); 1402 1403 rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags); 1404 } 1405 1406 /* disable access to nvram interface */ 1407 bxe_disable_nvram_access(sc); 1408 bxe_release_nvram_lock(sc); 1409 1410 return (rc); 1411 } 1412 1413 static int 1414 bxe_nvram_write(struct bxe_softc *sc, 1415 uint32_t offset, 1416 uint8_t *data_buf, 1417 int buf_size) 1418 { 1419 uint32_t cmd_flags; 1420 uint32_t val; 1421 uint32_t written_so_far; 1422 int rc; 1423 1424 if (buf_size == 1) { 1425 return (bxe_nvram_write1(sc, offset, data_buf, buf_size)); 1426 } 1427 1428 if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) { 1429 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", 1430 offset, buf_size); 1431 return (-1); 1432 } 1433 1434 if (buf_size == 0) { 1435 return (0); /* nothing to do */ 1436 } 1437 1438 if ((offset + buf_size) > sc->devinfo.flash_size) { 1439 BLOGE(sc, "Invalid parameter, " 1440 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1441 offset, buf_size, sc->devinfo.flash_size); 1442 return (-1); 1443 } 1444 1445 /* request access to nvram interface */ 1446 rc = bxe_acquire_nvram_lock(sc); 1447 if (rc) { 1448 return (rc); 1449 } 1450 1451 /* enable access to nvram interface */ 1452 bxe_enable_nvram_access(sc); 1453 1454 written_so_far = 0; 1455 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1456 while ((written_so_far < buf_size) && (rc == 0)) { 1457 if (written_so_far == (buf_size - sizeof(uint32_t))) { 1458 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1459 } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) { 1460 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1461 } else if ((offset % NVRAM_PAGE_SIZE) == 0) { 1462 cmd_flags |= MCPR_NVM_COMMAND_FIRST; 1463 } 1464 1465 memcpy(&val, data_buf, 4); 1466 1467 rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags); 1468 1469 /* advance to the next dword */ 1470 offset += sizeof(uint32_t); 1471 data_buf += sizeof(uint32_t); 1472 written_so_far += sizeof(uint32_t); 1473 cmd_flags = 0; 1474 } 1475 1476 /* disable access to nvram interface */ 1477 bxe_disable_nvram_access(sc); 1478 bxe_release_nvram_lock(sc); 1479 1480 return (rc); 1481 } 1482 1483 /* copy command into DMAE command memory and set DMAE command Go */ 1484 void 1485 bxe_post_dmae(struct bxe_softc *sc, 1486 struct dmae_cmd *dmae, 1487 int idx) 1488 { 1489 uint32_t cmd_offset; 1490 int i; 1491 1492 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx)); 1493 for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) { 1494 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i)); 1495 } 1496 1497 REG_WR(sc, dmae_reg_go_c[idx], 1); 1498 } 1499 1500 uint32_t 1501 bxe_dmae_opcode_add_comp(uint32_t opcode, 1502 uint8_t comp_type) 1503 { 1504 return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) | 1505 DMAE_CMD_C_TYPE_ENABLE)); 1506 } 1507 1508 uint32_t 1509 bxe_dmae_opcode_clr_src_reset(uint32_t opcode) 1510 { 1511 return (opcode & ~DMAE_CMD_SRC_RESET); 1512 } 1513 1514 uint32_t 1515 bxe_dmae_opcode(struct bxe_softc *sc, 1516 uint8_t src_type, 1517 uint8_t dst_type, 1518 uint8_t with_comp, 1519 uint8_t comp_type) 1520 { 1521 uint32_t opcode = 0; 1522 1523 opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) | 1524 (dst_type << DMAE_CMD_DST_SHIFT)); 1525 1526 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 1527 1528 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 1529 1530 opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) | 1531 (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT)); 1532 1533 opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT); 1534 1535 #ifdef __BIG_ENDIAN 1536 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 1537 #else 1538 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 1539 #endif 1540 1541 if (with_comp) { 1542 opcode = bxe_dmae_opcode_add_comp(opcode, comp_type); 1543 } 1544 1545 return (opcode); 1546 } 1547 1548 static void 1549 bxe_prep_dmae_with_comp(struct bxe_softc *sc, 1550 struct dmae_cmd *dmae, 1551 uint8_t src_type, 1552 uint8_t dst_type) 1553 { 1554 memset(dmae, 0, sizeof(struct dmae_cmd)); 1555 1556 /* set the opcode */ 1557 dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type, 1558 TRUE, DMAE_COMP_PCI); 1559 1560 /* fill in the completion parameters */ 1561 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp)); 1562 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp)); 1563 dmae->comp_val = DMAE_COMP_VAL; 1564 } 1565 1566 /* issue a DMAE command over the init channel and wait for completion */ 1567 static int 1568 bxe_issue_dmae_with_comp(struct bxe_softc *sc, 1569 struct dmae_cmd *dmae) 1570 { 1571 uint32_t *wb_comp = BXE_SP(sc, wb_comp); 1572 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; 1573 1574 BXE_DMAE_LOCK(sc); 1575 1576 /* reset completion */ 1577 *wb_comp = 0; 1578 1579 /* post the command on the channel used for initializations */ 1580 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc)); 1581 1582 /* wait for completion */ 1583 DELAY(5); 1584 1585 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 1586 if (!timeout || 1587 (sc->recovery_state != BXE_RECOVERY_DONE && 1588 sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) { 1589 BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n", 1590 *wb_comp, sc->recovery_state); 1591 BXE_DMAE_UNLOCK(sc); 1592 return (DMAE_TIMEOUT); 1593 } 1594 1595 timeout--; 1596 DELAY(50); 1597 } 1598 1599 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 1600 BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n", 1601 *wb_comp, sc->recovery_state); 1602 BXE_DMAE_UNLOCK(sc); 1603 return (DMAE_PCI_ERROR); 1604 } 1605 1606 BXE_DMAE_UNLOCK(sc); 1607 return (0); 1608 } 1609 1610 void 1611 bxe_read_dmae(struct bxe_softc *sc, 1612 uint32_t src_addr, 1613 uint32_t len32) 1614 { 1615 struct dmae_cmd dmae; 1616 uint32_t *data; 1617 int i, rc; 1618 1619 DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32)); 1620 1621 if (!sc->dmae_ready) { 1622 data = BXE_SP(sc, wb_data[0]); 1623 1624 for (i = 0; i < len32; i++) { 1625 data[i] = (CHIP_IS_E1(sc)) ? 1626 bxe_reg_rd_ind(sc, (src_addr + (i * 4))) : 1627 REG_RD(sc, (src_addr + (i * 4))); 1628 } 1629 1630 return; 1631 } 1632 1633 /* set opcode and fixed command fields */ 1634 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 1635 1636 /* fill in addresses and len */ 1637 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ 1638 dmae.src_addr_hi = 0; 1639 dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data)); 1640 dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data)); 1641 dmae.len = len32; 1642 1643 /* issue the command and wait for completion */ 1644 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { 1645 bxe_panic(sc, ("DMAE failed (%d)\n", rc)); 1646 } 1647 } 1648 1649 void 1650 bxe_write_dmae(struct bxe_softc *sc, 1651 bus_addr_t dma_addr, 1652 uint32_t dst_addr, 1653 uint32_t len32) 1654 { 1655 struct dmae_cmd dmae; 1656 int rc; 1657 1658 if (!sc->dmae_ready) { 1659 DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32)); 1660 1661 if (CHIP_IS_E1(sc)) { 1662 ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); 1663 } else { 1664 ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); 1665 } 1666 1667 return; 1668 } 1669 1670 /* set opcode and fixed command fields */ 1671 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 1672 1673 /* fill in addresses and len */ 1674 dmae.src_addr_lo = U64_LO(dma_addr); 1675 dmae.src_addr_hi = U64_HI(dma_addr); 1676 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ 1677 dmae.dst_addr_hi = 0; 1678 dmae.len = len32; 1679 1680 /* issue the command and wait for completion */ 1681 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { 1682 bxe_panic(sc, ("DMAE failed (%d)\n", rc)); 1683 } 1684 } 1685 1686 void 1687 bxe_write_dmae_phys_len(struct bxe_softc *sc, 1688 bus_addr_t phys_addr, 1689 uint32_t addr, 1690 uint32_t len) 1691 { 1692 int dmae_wr_max = DMAE_LEN32_WR_MAX(sc); 1693 int offset = 0; 1694 1695 while (len > dmae_wr_max) { 1696 bxe_write_dmae(sc, 1697 (phys_addr + offset), /* src DMA address */ 1698 (addr + offset), /* dst GRC address */ 1699 dmae_wr_max); 1700 offset += (dmae_wr_max * 4); 1701 len -= dmae_wr_max; 1702 } 1703 1704 bxe_write_dmae(sc, 1705 (phys_addr + offset), /* src DMA address */ 1706 (addr + offset), /* dst GRC address */ 1707 len); 1708 } 1709 1710 void 1711 bxe_set_ctx_validation(struct bxe_softc *sc, 1712 struct eth_context *cxt, 1713 uint32_t cid) 1714 { 1715 /* ustorm cxt validation */ 1716 cxt->ustorm_ag_context.cdu_usage = 1717 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 1718 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); 1719 /* xcontext validation */ 1720 cxt->xstorm_ag_context.cdu_reserved = 1721 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 1722 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); 1723 } 1724 1725 static void 1726 bxe_storm_memset_hc_timeout(struct bxe_softc *sc, 1727 uint8_t port, 1728 uint8_t fw_sb_id, 1729 uint8_t sb_index, 1730 uint8_t ticks) 1731 { 1732 uint32_t addr = 1733 (BAR_CSTRORM_INTMEM + 1734 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); 1735 1736 REG_WR8(sc, addr, ticks); 1737 1738 BLOGD(sc, DBG_LOAD, 1739 "port %d fw_sb_id %d sb_index %d ticks %d\n", 1740 port, fw_sb_id, sb_index, ticks); 1741 } 1742 1743 static void 1744 bxe_storm_memset_hc_disable(struct bxe_softc *sc, 1745 uint8_t port, 1746 uint16_t fw_sb_id, 1747 uint8_t sb_index, 1748 uint8_t disable) 1749 { 1750 uint32_t enable_flag = 1751 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 1752 uint32_t addr = 1753 (BAR_CSTRORM_INTMEM + 1754 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); 1755 uint8_t flags; 1756 1757 /* clear and set */ 1758 flags = REG_RD8(sc, addr); 1759 flags &= ~HC_INDEX_DATA_HC_ENABLED; 1760 flags |= enable_flag; 1761 REG_WR8(sc, addr, flags); 1762 1763 BLOGD(sc, DBG_LOAD, 1764 "port %d fw_sb_id %d sb_index %d disable %d\n", 1765 port, fw_sb_id, sb_index, disable); 1766 } 1767 1768 void 1769 bxe_update_coalesce_sb_index(struct bxe_softc *sc, 1770 uint8_t fw_sb_id, 1771 uint8_t sb_index, 1772 uint8_t disable, 1773 uint16_t usec) 1774 { 1775 int port = SC_PORT(sc); 1776 uint8_t ticks = (usec / 4); /* XXX ??? */ 1777 1778 bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks); 1779 1780 disable = (disable) ? 1 : ((usec) ? 0 : 1); 1781 bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable); 1782 } 1783 1784 void 1785 elink_cb_udelay(struct bxe_softc *sc, 1786 uint32_t usecs) 1787 { 1788 DELAY(usecs); 1789 } 1790 1791 uint32_t 1792 elink_cb_reg_read(struct bxe_softc *sc, 1793 uint32_t reg_addr) 1794 { 1795 return (REG_RD(sc, reg_addr)); 1796 } 1797 1798 void 1799 elink_cb_reg_write(struct bxe_softc *sc, 1800 uint32_t reg_addr, 1801 uint32_t val) 1802 { 1803 REG_WR(sc, reg_addr, val); 1804 } 1805 1806 void 1807 elink_cb_reg_wb_write(struct bxe_softc *sc, 1808 uint32_t offset, 1809 uint32_t *wb_write, 1810 uint16_t len) 1811 { 1812 REG_WR_DMAE(sc, offset, wb_write, len); 1813 } 1814 1815 void 1816 elink_cb_reg_wb_read(struct bxe_softc *sc, 1817 uint32_t offset, 1818 uint32_t *wb_write, 1819 uint16_t len) 1820 { 1821 REG_RD_DMAE(sc, offset, wb_write, len); 1822 } 1823 1824 uint8_t 1825 elink_cb_path_id(struct bxe_softc *sc) 1826 { 1827 return (SC_PATH(sc)); 1828 } 1829 1830 void 1831 elink_cb_event_log(struct bxe_softc *sc, 1832 const elink_log_id_t elink_log_id, 1833 ...) 1834 { 1835 /* XXX */ 1836 BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id); 1837 } 1838 1839 static int 1840 bxe_set_spio(struct bxe_softc *sc, 1841 int spio, 1842 uint32_t mode) 1843 { 1844 uint32_t spio_reg; 1845 1846 /* Only 2 SPIOs are configurable */ 1847 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { 1848 BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode); 1849 return (-1); 1850 } 1851 1852 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 1853 1854 /* read SPIO and mask except the float bits */ 1855 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); 1856 1857 switch (mode) { 1858 case MISC_SPIO_OUTPUT_LOW: 1859 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio); 1860 /* clear FLOAT and set CLR */ 1861 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 1862 spio_reg |= (spio << MISC_SPIO_CLR_POS); 1863 break; 1864 1865 case MISC_SPIO_OUTPUT_HIGH: 1866 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio); 1867 /* clear FLOAT and set SET */ 1868 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 1869 spio_reg |= (spio << MISC_SPIO_SET_POS); 1870 break; 1871 1872 case MISC_SPIO_INPUT_HI_Z: 1873 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio); 1874 /* set FLOAT */ 1875 spio_reg |= (spio << MISC_SPIO_FLOAT_POS); 1876 break; 1877 1878 default: 1879 break; 1880 } 1881 1882 REG_WR(sc, MISC_REG_SPIO, spio_reg); 1883 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 1884 1885 return (0); 1886 } 1887 1888 static int 1889 bxe_gpio_read(struct bxe_softc *sc, 1890 int gpio_num, 1891 uint8_t port) 1892 { 1893 /* The GPIO should be swapped if swap register is set and active */ 1894 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 1895 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 1896 int gpio_shift = (gpio_num + 1897 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 1898 uint32_t gpio_mask = (1 << gpio_shift); 1899 uint32_t gpio_reg; 1900 1901 if (gpio_num > MISC_REGISTERS_GPIO_3) { 1902 BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d" 1903 " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift, 1904 gpio_mask); 1905 return (-1); 1906 } 1907 1908 /* read GPIO value */ 1909 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 1910 1911 /* get the requested pin value */ 1912 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; 1913 } 1914 1915 static int 1916 bxe_gpio_write(struct bxe_softc *sc, 1917 int gpio_num, 1918 uint32_t mode, 1919 uint8_t port) 1920 { 1921 /* The GPIO should be swapped if swap register is set and active */ 1922 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 1923 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 1924 int gpio_shift = (gpio_num + 1925 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 1926 uint32_t gpio_mask = (1 << gpio_shift); 1927 uint32_t gpio_reg; 1928 1929 if (gpio_num > MISC_REGISTERS_GPIO_3) { 1930 BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d" 1931 " gpio_shift %d gpio_mask 0x%x\n", 1932 gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask); 1933 return (-1); 1934 } 1935 1936 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 1937 1938 /* read GPIO and mask except the float bits */ 1939 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 1940 1941 switch (mode) { 1942 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 1943 BLOGD(sc, DBG_PHY, 1944 "Set GPIO %d (shift %d) -> output low\n", 1945 gpio_num, gpio_shift); 1946 /* clear FLOAT and set CLR */ 1947 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 1948 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 1949 break; 1950 1951 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 1952 BLOGD(sc, DBG_PHY, 1953 "Set GPIO %d (shift %d) -> output high\n", 1954 gpio_num, gpio_shift); 1955 /* clear FLOAT and set SET */ 1956 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 1957 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 1958 break; 1959 1960 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 1961 BLOGD(sc, DBG_PHY, 1962 "Set GPIO %d (shift %d) -> input\n", 1963 gpio_num, gpio_shift); 1964 /* set FLOAT */ 1965 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 1966 break; 1967 1968 default: 1969 break; 1970 } 1971 1972 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 1973 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 1974 1975 return (0); 1976 } 1977 1978 static int 1979 bxe_gpio_mult_write(struct bxe_softc *sc, 1980 uint8_t pins, 1981 uint32_t mode) 1982 { 1983 uint32_t gpio_reg; 1984 1985 /* any port swapping should be handled by caller */ 1986 1987 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 1988 1989 /* read GPIO and mask except the float bits */ 1990 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 1991 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 1992 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 1993 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 1994 1995 switch (mode) { 1996 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 1997 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins); 1998 /* set CLR */ 1999 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 2000 break; 2001 2002 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2003 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins); 2004 /* set SET */ 2005 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 2006 break; 2007 2008 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2009 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins); 2010 /* set FLOAT */ 2011 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2012 break; 2013 2014 default: 2015 BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x" 2016 " gpio_reg 0x%x\n", pins, mode, gpio_reg); 2017 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2018 return (-1); 2019 } 2020 2021 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 2022 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2023 2024 return (0); 2025 } 2026 2027 static int 2028 bxe_gpio_int_write(struct bxe_softc *sc, 2029 int gpio_num, 2030 uint32_t mode, 2031 uint8_t port) 2032 { 2033 /* The GPIO should be swapped if swap register is set and active */ 2034 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 2035 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 2036 int gpio_shift = (gpio_num + 2037 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2038 uint32_t gpio_mask = (1 << gpio_shift); 2039 uint32_t gpio_reg; 2040 2041 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2042 BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d" 2043 " gpio_shift %d gpio_mask 0x%x\n", 2044 gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask); 2045 return (-1); 2046 } 2047 2048 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2049 2050 /* read GPIO int */ 2051 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); 2052 2053 switch (mode) { 2054 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 2055 BLOGD(sc, DBG_PHY, 2056 "Clear GPIO INT %d (shift %d) -> output low\n", 2057 gpio_num, gpio_shift); 2058 /* clear SET and set CLR */ 2059 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2060 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2061 break; 2062 2063 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 2064 BLOGD(sc, DBG_PHY, 2065 "Set GPIO INT %d (shift %d) -> output high\n", 2066 gpio_num, gpio_shift); 2067 /* clear CLR and set SET */ 2068 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2069 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2070 break; 2071 2072 default: 2073 break; 2074 } 2075 2076 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); 2077 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2078 2079 return (0); 2080 } 2081 2082 uint32_t 2083 elink_cb_gpio_read(struct bxe_softc *sc, 2084 uint16_t gpio_num, 2085 uint8_t port) 2086 { 2087 return (bxe_gpio_read(sc, gpio_num, port)); 2088 } 2089 2090 uint8_t 2091 elink_cb_gpio_write(struct bxe_softc *sc, 2092 uint16_t gpio_num, 2093 uint8_t mode, /* 0=low 1=high */ 2094 uint8_t port) 2095 { 2096 return (bxe_gpio_write(sc, gpio_num, mode, port)); 2097 } 2098 2099 uint8_t 2100 elink_cb_gpio_mult_write(struct bxe_softc *sc, 2101 uint8_t pins, 2102 uint8_t mode) /* 0=low 1=high */ 2103 { 2104 return (bxe_gpio_mult_write(sc, pins, mode)); 2105 } 2106 2107 uint8_t 2108 elink_cb_gpio_int_write(struct bxe_softc *sc, 2109 uint16_t gpio_num, 2110 uint8_t mode, /* 0=low 1=high */ 2111 uint8_t port) 2112 { 2113 return (bxe_gpio_int_write(sc, gpio_num, mode, port)); 2114 } 2115 2116 void 2117 elink_cb_notify_link_changed(struct bxe_softc *sc) 2118 { 2119 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + 2120 (SC_FUNC(sc) * sizeof(uint32_t))), 1); 2121 } 2122 2123 /* send the MCP a request, block until there is a reply */ 2124 uint32_t 2125 elink_cb_fw_command(struct bxe_softc *sc, 2126 uint32_t command, 2127 uint32_t param) 2128 { 2129 int mb_idx = SC_FW_MB_IDX(sc); 2130 uint32_t seq; 2131 uint32_t rc = 0; 2132 uint32_t cnt = 1; 2133 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; 2134 2135 BXE_FWMB_LOCK(sc); 2136 2137 seq = ++sc->fw_seq; 2138 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); 2139 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); 2140 2141 BLOGD(sc, DBG_PHY, 2142 "wrote command 0x%08x to FW MB param 0x%08x\n", 2143 (command | seq), param); 2144 2145 /* Let the FW do it's magic. GIve it up to 5 seconds... */ 2146 do { 2147 DELAY(delay * 1000); 2148 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); 2149 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2150 2151 BLOGD(sc, DBG_PHY, 2152 "[after %d ms] read 0x%x seq 0x%x from FW MB\n", 2153 cnt*delay, rc, seq); 2154 2155 /* is this a reply to our command? */ 2156 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 2157 rc &= FW_MSG_CODE_MASK; 2158 } else { 2159 /* Ruh-roh! */ 2160 BLOGE(sc, "FW failed to respond!\n"); 2161 // XXX bxe_fw_dump(sc); 2162 rc = 0; 2163 } 2164 2165 BXE_FWMB_UNLOCK(sc); 2166 return (rc); 2167 } 2168 2169 static uint32_t 2170 bxe_fw_command(struct bxe_softc *sc, 2171 uint32_t command, 2172 uint32_t param) 2173 { 2174 return (elink_cb_fw_command(sc, command, param)); 2175 } 2176 2177 static void 2178 __storm_memset_dma_mapping(struct bxe_softc *sc, 2179 uint32_t addr, 2180 bus_addr_t mapping) 2181 { 2182 REG_WR(sc, addr, U64_LO(mapping)); 2183 REG_WR(sc, (addr + 4), U64_HI(mapping)); 2184 } 2185 2186 static void 2187 storm_memset_spq_addr(struct bxe_softc *sc, 2188 bus_addr_t mapping, 2189 uint16_t abs_fid) 2190 { 2191 uint32_t addr = (XSEM_REG_FAST_MEMORY + 2192 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); 2193 __storm_memset_dma_mapping(sc, addr, mapping); 2194 } 2195 2196 static void 2197 storm_memset_vf_to_pf(struct bxe_softc *sc, 2198 uint16_t abs_fid, 2199 uint16_t pf_id) 2200 { 2201 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2202 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2203 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2204 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2205 } 2206 2207 static void 2208 storm_memset_func_en(struct bxe_softc *sc, 2209 uint16_t abs_fid, 2210 uint8_t enable) 2211 { 2212 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2213 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2214 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2215 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2216 } 2217 2218 static void 2219 storm_memset_eq_data(struct bxe_softc *sc, 2220 struct event_ring_data *eq_data, 2221 uint16_t pfid) 2222 { 2223 uint32_t addr; 2224 size_t size; 2225 2226 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); 2227 size = sizeof(struct event_ring_data); 2228 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data); 2229 } 2230 2231 static void 2232 storm_memset_eq_prod(struct bxe_softc *sc, 2233 uint16_t eq_prod, 2234 uint16_t pfid) 2235 { 2236 uint32_t addr = (BAR_CSTRORM_INTMEM + 2237 CSTORM_EVENT_RING_PROD_OFFSET(pfid)); 2238 REG_WR16(sc, addr, eq_prod); 2239 } 2240 2241 /* 2242 * Post a slowpath command. 2243 * 2244 * A slowpath command is used to propagate a configuration change through 2245 * the controller in a controlled manner, allowing each STORM processor and 2246 * other H/W blocks to phase in the change. The commands sent on the 2247 * slowpath are referred to as ramrods. Depending on the ramrod used the 2248 * completion of the ramrod will occur in different ways. Here's a 2249 * breakdown of ramrods and how they complete: 2250 * 2251 * RAMROD_CMD_ID_ETH_PORT_SETUP 2252 * Used to setup the leading connection on a port. Completes on the 2253 * Receive Completion Queue (RCQ) of that port (typically fp[0]). 2254 * 2255 * RAMROD_CMD_ID_ETH_CLIENT_SETUP 2256 * Used to setup an additional connection on a port. Completes on the 2257 * RCQ of the multi-queue/RSS connection being initialized. 2258 * 2259 * RAMROD_CMD_ID_ETH_STAT_QUERY 2260 * Used to force the storm processors to update the statistics database 2261 * in host memory. This ramrod is send on the leading connection CID and 2262 * completes as an index increment of the CSTORM on the default status 2263 * block. 2264 * 2265 * RAMROD_CMD_ID_ETH_UPDATE 2266 * Used to update the state of the leading connection, usually to udpate 2267 * the RSS indirection table. Completes on the RCQ of the leading 2268 * connection. (Not currently used under FreeBSD until OS support becomes 2269 * available.) 2270 * 2271 * RAMROD_CMD_ID_ETH_HALT 2272 * Used when tearing down a connection prior to driver unload. Completes 2273 * on the RCQ of the multi-queue/RSS connection being torn down. Don't 2274 * use this on the leading connection. 2275 * 2276 * RAMROD_CMD_ID_ETH_SET_MAC 2277 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on 2278 * the RCQ of the leading connection. 2279 * 2280 * RAMROD_CMD_ID_ETH_CFC_DEL 2281 * Used when tearing down a conneciton prior to driver unload. Completes 2282 * on the RCQ of the leading connection (since the current connection 2283 * has been completely removed from controller memory). 2284 * 2285 * RAMROD_CMD_ID_ETH_PORT_DEL 2286 * Used to tear down the leading connection prior to driver unload, 2287 * typically fp[0]. Completes as an index increment of the CSTORM on the 2288 * default status block. 2289 * 2290 * RAMROD_CMD_ID_ETH_FORWARD_SETUP 2291 * Used for connection offload. Completes on the RCQ of the multi-queue 2292 * RSS connection that is being offloaded. (Not currently used under 2293 * FreeBSD.) 2294 * 2295 * There can only be one command pending per function. 2296 * 2297 * Returns: 2298 * 0 = Success, !0 = Failure. 2299 */ 2300 2301 /* must be called under the spq lock */ 2302 static inline 2303 struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc) 2304 { 2305 struct eth_spe *next_spe = sc->spq_prod_bd; 2306 2307 if (sc->spq_prod_bd == sc->spq_last_bd) { 2308 /* wrap back to the first eth_spq */ 2309 sc->spq_prod_bd = sc->spq; 2310 sc->spq_prod_idx = 0; 2311 } else { 2312 sc->spq_prod_bd++; 2313 sc->spq_prod_idx++; 2314 } 2315 2316 return (next_spe); 2317 } 2318 2319 /* must be called under the spq lock */ 2320 static inline 2321 void bxe_sp_prod_update(struct bxe_softc *sc) 2322 { 2323 int func = SC_FUNC(sc); 2324 2325 /* 2326 * Make sure that BD data is updated before writing the producer. 2327 * BD data is written to the memory, the producer is read from the 2328 * memory, thus we need a full memory barrier to ensure the ordering. 2329 */ 2330 mb(); 2331 2332 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), 2333 sc->spq_prod_idx); 2334 2335 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 2336 BUS_SPACE_BARRIER_WRITE); 2337 } 2338 2339 /** 2340 * bxe_is_contextless_ramrod - check if the current command ends on EQ 2341 * 2342 * @cmd: command to check 2343 * @cmd_type: command type 2344 */ 2345 static inline 2346 int bxe_is_contextless_ramrod(int cmd, 2347 int cmd_type) 2348 { 2349 if ((cmd_type == NONE_CONNECTION_TYPE) || 2350 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 2351 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 2352 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 2353 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 2354 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 2355 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { 2356 return (TRUE); 2357 } else { 2358 return (FALSE); 2359 } 2360 } 2361 2362 /** 2363 * bxe_sp_post - place a single command on an SP ring 2364 * 2365 * @sc: driver handle 2366 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 2367 * @cid: SW CID the command is related to 2368 * @data_hi: command private data address (high 32 bits) 2369 * @data_lo: command private data address (low 32 bits) 2370 * @cmd_type: command type (e.g. NONE, ETH) 2371 * 2372 * SP data is handled as if it's always an address pair, thus data fields are 2373 * not swapped to little endian in upper functions. Instead this function swaps 2374 * data as if it's two uint32 fields. 2375 */ 2376 int 2377 bxe_sp_post(struct bxe_softc *sc, 2378 int command, 2379 int cid, 2380 uint32_t data_hi, 2381 uint32_t data_lo, 2382 int cmd_type) 2383 { 2384 struct eth_spe *spe; 2385 uint16_t type; 2386 int common; 2387 2388 common = bxe_is_contextless_ramrod(command, cmd_type); 2389 2390 BXE_SP_LOCK(sc); 2391 2392 if (common) { 2393 if (!atomic_load_acq_long(&sc->eq_spq_left)) { 2394 BLOGE(sc, "EQ ring is full!\n"); 2395 BXE_SP_UNLOCK(sc); 2396 return (-1); 2397 } 2398 } else { 2399 if (!atomic_load_acq_long(&sc->cq_spq_left)) { 2400 BLOGE(sc, "SPQ ring is full!\n"); 2401 BXE_SP_UNLOCK(sc); 2402 return (-1); 2403 } 2404 } 2405 2406 spe = bxe_sp_get_next(sc); 2407 2408 /* CID needs port number to be encoded int it */ 2409 spe->hdr.conn_and_cmd_data = 2410 htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid)); 2411 2412 type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE; 2413 2414 /* TBD: Check if it works for VFs */ 2415 type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) & 2416 SPE_HDR_T_FUNCTION_ID); 2417 2418 spe->hdr.type = htole16(type); 2419 2420 spe->data.update_data_addr.hi = htole32(data_hi); 2421 spe->data.update_data_addr.lo = htole32(data_lo); 2422 2423 /* 2424 * It's ok if the actual decrement is issued towards the memory 2425 * somewhere between the lock and unlock. Thus no more explict 2426 * memory barrier is needed. 2427 */ 2428 if (common) { 2429 atomic_subtract_acq_long(&sc->eq_spq_left, 1); 2430 } else { 2431 atomic_subtract_acq_long(&sc->cq_spq_left, 1); 2432 } 2433 2434 BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr); 2435 BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n", 2436 BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata)); 2437 BLOGD(sc, DBG_SP, 2438 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n", 2439 sc->spq_prod_idx, 2440 (uint32_t)U64_HI(sc->spq_dma.paddr), 2441 (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq), 2442 command, 2443 common, 2444 HW_CID(sc, cid), 2445 data_hi, 2446 data_lo, 2447 type, 2448 atomic_load_acq_long(&sc->cq_spq_left), 2449 atomic_load_acq_long(&sc->eq_spq_left)); 2450 2451 bxe_sp_prod_update(sc); 2452 2453 BXE_SP_UNLOCK(sc); 2454 return (0); 2455 } 2456 2457 /** 2458 * bxe_debug_print_ind_table - prints the indirection table configuration. 2459 * 2460 * @sc: driver hanlde 2461 * @p: pointer to rss configuration 2462 */ 2463 2464 /* 2465 * FreeBSD Device probe function. 2466 * 2467 * Compares the device found to the driver's list of supported devices and 2468 * reports back to the bsd loader whether this is the right driver for the device. 2469 * This is the driver entry function called from the "kldload" command. 2470 * 2471 * Returns: 2472 * BUS_PROBE_DEFAULT on success, positive value on failure. 2473 */ 2474 static int 2475 bxe_probe(device_t dev) 2476 { 2477 struct bxe_device_type *t; 2478 char *descbuf; 2479 uint16_t did, sdid, svid, vid; 2480 2481 /* Find our device structure */ 2482 t = bxe_devs; 2483 2484 /* Get the data for the device to be probed. */ 2485 vid = pci_get_vendor(dev); 2486 did = pci_get_device(dev); 2487 svid = pci_get_subvendor(dev); 2488 sdid = pci_get_subdevice(dev); 2489 2490 /* Look through the list of known devices for a match. */ 2491 while (t->bxe_name != NULL) { 2492 if ((vid == t->bxe_vid) && (did == t->bxe_did) && 2493 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) && 2494 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) { 2495 descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT); 2496 if (descbuf == NULL) 2497 return (ENOMEM); 2498 2499 /* Print out the device identity. */ 2500 snprintf(descbuf, BXE_DEVDESC_MAX, 2501 "%s (%c%d) BXE v:%s\n", t->bxe_name, 2502 (((pci_read_config(dev, PCIR_REVID, 4) & 2503 0xf0) >> 4) + 'A'), 2504 (pci_read_config(dev, PCIR_REVID, 4) & 0xf), 2505 BXE_DRIVER_VERSION); 2506 2507 device_set_desc_copy(dev, descbuf); 2508 free(descbuf, M_TEMP); 2509 return (BUS_PROBE_DEFAULT); 2510 } 2511 t++; 2512 } 2513 2514 return (ENXIO); 2515 } 2516 2517 static void 2518 bxe_init_mutexes(struct bxe_softc *sc) 2519 { 2520 #ifdef BXE_CORE_LOCK_SX 2521 snprintf(sc->core_sx_name, sizeof(sc->core_sx_name), 2522 "bxe%d_core_lock", sc->unit); 2523 sx_init(&sc->core_sx, sc->core_sx_name); 2524 #else 2525 snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name), 2526 "bxe%d_core_lock", sc->unit); 2527 mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF); 2528 #endif 2529 2530 snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name), 2531 "bxe%d_sp_lock", sc->unit); 2532 mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF); 2533 2534 snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name), 2535 "bxe%d_dmae_lock", sc->unit); 2536 mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF); 2537 2538 snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name), 2539 "bxe%d_phy_lock", sc->unit); 2540 mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF); 2541 2542 snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name), 2543 "bxe%d_fwmb_lock", sc->unit); 2544 mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF); 2545 2546 snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name), 2547 "bxe%d_print_lock", sc->unit); 2548 mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF); 2549 2550 snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name), 2551 "bxe%d_stats_lock", sc->unit); 2552 mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF); 2553 2554 snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name), 2555 "bxe%d_mcast_lock", sc->unit); 2556 mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF); 2557 } 2558 2559 static void 2560 bxe_release_mutexes(struct bxe_softc *sc) 2561 { 2562 #ifdef BXE_CORE_LOCK_SX 2563 sx_destroy(&sc->core_sx); 2564 #else 2565 if (mtx_initialized(&sc->core_mtx)) { 2566 mtx_destroy(&sc->core_mtx); 2567 } 2568 #endif 2569 2570 if (mtx_initialized(&sc->sp_mtx)) { 2571 mtx_destroy(&sc->sp_mtx); 2572 } 2573 2574 if (mtx_initialized(&sc->dmae_mtx)) { 2575 mtx_destroy(&sc->dmae_mtx); 2576 } 2577 2578 if (mtx_initialized(&sc->port.phy_mtx)) { 2579 mtx_destroy(&sc->port.phy_mtx); 2580 } 2581 2582 if (mtx_initialized(&sc->fwmb_mtx)) { 2583 mtx_destroy(&sc->fwmb_mtx); 2584 } 2585 2586 if (mtx_initialized(&sc->print_mtx)) { 2587 mtx_destroy(&sc->print_mtx); 2588 } 2589 2590 if (mtx_initialized(&sc->stats_mtx)) { 2591 mtx_destroy(&sc->stats_mtx); 2592 } 2593 2594 if (mtx_initialized(&sc->mcast_mtx)) { 2595 mtx_destroy(&sc->mcast_mtx); 2596 } 2597 } 2598 2599 static void 2600 bxe_tx_disable(struct bxe_softc* sc) 2601 { 2602 if_t ifp = sc->ifp; 2603 2604 /* tell the stack the driver is stopped and TX queue is full */ 2605 if (ifp != NULL) { 2606 if_setdrvflags(ifp, 0); 2607 } 2608 } 2609 2610 static void 2611 bxe_drv_pulse(struct bxe_softc *sc) 2612 { 2613 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, 2614 sc->fw_drv_pulse_wr_seq); 2615 } 2616 2617 static inline uint16_t 2618 bxe_tx_avail(struct bxe_softc *sc, 2619 struct bxe_fastpath *fp) 2620 { 2621 int16_t used; 2622 uint16_t prod; 2623 uint16_t cons; 2624 2625 prod = fp->tx_bd_prod; 2626 cons = fp->tx_bd_cons; 2627 2628 used = SUB_S16(prod, cons); 2629 2630 return (int16_t)(sc->tx_ring_size) - used; 2631 } 2632 2633 static inline int 2634 bxe_tx_queue_has_work(struct bxe_fastpath *fp) 2635 { 2636 uint16_t hw_cons; 2637 2638 mb(); /* status block fields can change */ 2639 hw_cons = le16toh(*fp->tx_cons_sb); 2640 return (hw_cons != fp->tx_pkt_cons); 2641 } 2642 2643 static inline uint8_t 2644 bxe_has_tx_work(struct bxe_fastpath *fp) 2645 { 2646 /* expand this for multi-cos if ever supported */ 2647 return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE; 2648 } 2649 2650 static inline int 2651 bxe_has_rx_work(struct bxe_fastpath *fp) 2652 { 2653 uint16_t rx_cq_cons_sb; 2654 2655 mb(); /* status block fields can change */ 2656 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); 2657 if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX) 2658 rx_cq_cons_sb++; 2659 return (fp->rx_cq_cons != rx_cq_cons_sb); 2660 } 2661 2662 static void 2663 bxe_sp_event(struct bxe_softc *sc, 2664 struct bxe_fastpath *fp, 2665 union eth_rx_cqe *rr_cqe) 2666 { 2667 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 2668 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 2669 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; 2670 struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj; 2671 2672 BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n", 2673 fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type); 2674 2675 switch (command) { 2676 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 2677 BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid); 2678 drv_cmd = ECORE_Q_CMD_UPDATE; 2679 break; 2680 2681 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 2682 BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid); 2683 drv_cmd = ECORE_Q_CMD_SETUP; 2684 break; 2685 2686 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 2687 BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); 2688 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; 2689 break; 2690 2691 case (RAMROD_CMD_ID_ETH_HALT): 2692 BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid); 2693 drv_cmd = ECORE_Q_CMD_HALT; 2694 break; 2695 2696 case (RAMROD_CMD_ID_ETH_TERMINATE): 2697 BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid); 2698 drv_cmd = ECORE_Q_CMD_TERMINATE; 2699 break; 2700 2701 case (RAMROD_CMD_ID_ETH_EMPTY): 2702 BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid); 2703 drv_cmd = ECORE_Q_CMD_EMPTY; 2704 break; 2705 2706 default: 2707 BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n", 2708 command, fp->index); 2709 return; 2710 } 2711 2712 if ((drv_cmd != ECORE_Q_CMD_MAX) && 2713 q_obj->complete_cmd(sc, q_obj, drv_cmd)) { 2714 /* 2715 * q_obj->complete_cmd() failure means that this was 2716 * an unexpected completion. 2717 * 2718 * In this case we don't want to increase the sc->spq_left 2719 * because apparently we haven't sent this command the first 2720 * place. 2721 */ 2722 // bxe_panic(sc, ("Unexpected SP completion\n")); 2723 return; 2724 } 2725 2726 atomic_add_acq_long(&sc->cq_spq_left, 1); 2727 2728 BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n", 2729 atomic_load_acq_long(&sc->cq_spq_left)); 2730 } 2731 2732 /* 2733 * The current mbuf is part of an aggregation. Move the mbuf into the TPA 2734 * aggregation queue, put an empty mbuf back onto the receive chain, and mark 2735 * the current aggregation queue as in-progress. 2736 */ 2737 static void 2738 bxe_tpa_start(struct bxe_softc *sc, 2739 struct bxe_fastpath *fp, 2740 uint16_t queue, 2741 uint16_t cons, 2742 uint16_t prod, 2743 struct eth_fast_path_rx_cqe *cqe) 2744 { 2745 struct bxe_sw_rx_bd tmp_bd; 2746 struct bxe_sw_rx_bd *rx_buf; 2747 struct eth_rx_bd *rx_bd; 2748 int max_agg_queues; 2749 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; 2750 uint16_t index; 2751 2752 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START " 2753 "cons=%d prod=%d\n", 2754 fp->index, queue, cons, prod); 2755 2756 max_agg_queues = MAX_AGG_QS(sc); 2757 2758 KASSERT((queue < max_agg_queues), 2759 ("fp[%02d] invalid aggr queue (%d >= %d)!", 2760 fp->index, queue, max_agg_queues)); 2761 2762 KASSERT((tpa_info->state == BXE_TPA_STATE_STOP), 2763 ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!", 2764 fp->index, queue)); 2765 2766 /* copy the existing mbuf and mapping from the TPA pool */ 2767 tmp_bd = tpa_info->bd; 2768 2769 if (tmp_bd.m == NULL) { 2770 uint32_t *tmp; 2771 2772 tmp = (uint32_t *)cqe; 2773 2774 BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n", 2775 fp->index, queue, cons, prod); 2776 BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n", 2777 *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7)); 2778 2779 /* XXX Error handling? */ 2780 return; 2781 } 2782 2783 /* change the TPA queue to the start state */ 2784 tpa_info->state = BXE_TPA_STATE_START; 2785 tpa_info->placement_offset = cqe->placement_offset; 2786 tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags); 2787 tpa_info->vlan_tag = le16toh(cqe->vlan_tag); 2788 tpa_info->len_on_bd = le16toh(cqe->len_on_bd); 2789 2790 fp->rx_tpa_queue_used |= (1 << queue); 2791 2792 /* 2793 * If all the buffer descriptors are filled with mbufs then fill in 2794 * the current consumer index with a new BD. Else if a maximum Rx 2795 * buffer limit is imposed then fill in the next producer index. 2796 */ 2797 index = (sc->max_rx_bufs != RX_BD_USABLE) ? 2798 prod : cons; 2799 2800 /* move the received mbuf and mapping to TPA pool */ 2801 tpa_info->bd = fp->rx_mbuf_chain[cons]; 2802 2803 /* release any existing RX BD mbuf mappings */ 2804 if (cons != index) { 2805 rx_buf = &fp->rx_mbuf_chain[cons]; 2806 2807 if (rx_buf->m_map != NULL) { 2808 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 2809 BUS_DMASYNC_POSTREAD); 2810 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 2811 } 2812 2813 /* 2814 * We get here when the maximum number of rx buffers is less than 2815 * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL 2816 * it out here without concern of a memory leak. 2817 */ 2818 fp->rx_mbuf_chain[cons].m = NULL; 2819 } 2820 2821 /* update the Rx SW BD with the mbuf info from the TPA pool */ 2822 fp->rx_mbuf_chain[index] = tmp_bd; 2823 2824 /* update the Rx BD with the empty mbuf phys address from the TPA pool */ 2825 rx_bd = &fp->rx_chain[index]; 2826 rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr)); 2827 rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr)); 2828 } 2829 2830 /* 2831 * When a TPA aggregation is completed, loop through the individual mbufs 2832 * of the aggregation, combining them into a single mbuf which will be sent 2833 * up the stack. Refill all freed SGEs with mbufs as we go along. 2834 */ 2835 static int 2836 bxe_fill_frag_mbuf(struct bxe_softc *sc, 2837 struct bxe_fastpath *fp, 2838 struct bxe_sw_tpa_info *tpa_info, 2839 uint16_t queue, 2840 uint16_t pages, 2841 struct mbuf *m, 2842 struct eth_end_agg_rx_cqe *cqe, 2843 uint16_t cqe_idx) 2844 { 2845 struct mbuf *m_frag; 2846 uint32_t frag_len, frag_size, i; 2847 uint16_t sge_idx; 2848 int rc = 0; 2849 int j; 2850 2851 frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd; 2852 2853 BLOGD(sc, DBG_LRO, 2854 "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n", 2855 fp->index, queue, tpa_info->len_on_bd, frag_size, pages); 2856 2857 /* make sure the aggregated frame is not too big to handle */ 2858 if (pages > 8 * PAGES_PER_SGE) { 2859 2860 uint32_t *tmp = (uint32_t *)cqe; 2861 2862 BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! " 2863 "pkt_len=%d len_on_bd=%d frag_size=%d\n", 2864 fp->index, cqe_idx, pages, le16toh(cqe->pkt_len), 2865 tpa_info->len_on_bd, frag_size); 2866 2867 BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n", 2868 *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7)); 2869 2870 bxe_panic(sc, ("sge page count error\n")); 2871 return (EINVAL); 2872 } 2873 2874 /* 2875 * Scan through the scatter gather list pulling individual mbufs into a 2876 * single mbuf for the host stack. 2877 */ 2878 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { 2879 sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j])); 2880 2881 /* 2882 * Firmware gives the indices of the SGE as if the ring is an array 2883 * (meaning that the "next" element will consume 2 indices). 2884 */ 2885 frag_len = min(frag_size, (uint32_t)(SGE_PAGES)); 2886 2887 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d " 2888 "sge_idx=%d frag_size=%d frag_len=%d\n", 2889 fp->index, queue, i, j, sge_idx, frag_size, frag_len); 2890 2891 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; 2892 2893 /* allocate a new mbuf for the SGE */ 2894 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); 2895 if (rc) { 2896 /* Leave all remaining SGEs in the ring! */ 2897 return (rc); 2898 } 2899 2900 /* update the fragment length */ 2901 m_frag->m_len = frag_len; 2902 2903 /* concatenate the fragment to the head mbuf */ 2904 m_cat(m, m_frag); 2905 fp->eth_q_stats.mbuf_alloc_sge--; 2906 2907 /* update the TPA mbuf size and remaining fragment size */ 2908 m->m_pkthdr.len += frag_len; 2909 frag_size -= frag_len; 2910 } 2911 2912 BLOGD(sc, DBG_LRO, 2913 "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n", 2914 fp->index, queue, frag_size); 2915 2916 return (rc); 2917 } 2918 2919 static inline void 2920 bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp) 2921 { 2922 int i, j; 2923 2924 for (i = 1; i <= RX_SGE_NUM_PAGES; i++) { 2925 int idx = RX_SGE_TOTAL_PER_PAGE * i - 1; 2926 2927 for (j = 0; j < 2; j++) { 2928 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); 2929 idx--; 2930 } 2931 } 2932 } 2933 2934 static inline void 2935 bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp) 2936 { 2937 /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */ 2938 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); 2939 2940 /* 2941 * Clear the two last indices in the page to 1. These are the indices that 2942 * correspond to the "next" element, hence will never be indicated and 2943 * should be removed from the calculations. 2944 */ 2945 bxe_clear_sge_mask_next_elems(fp); 2946 } 2947 2948 static inline void 2949 bxe_update_last_max_sge(struct bxe_fastpath *fp, 2950 uint16_t idx) 2951 { 2952 uint16_t last_max = fp->last_max_sge; 2953 2954 if (SUB_S16(idx, last_max) > 0) { 2955 fp->last_max_sge = idx; 2956 } 2957 } 2958 2959 static inline void 2960 bxe_update_sge_prod(struct bxe_softc *sc, 2961 struct bxe_fastpath *fp, 2962 uint16_t sge_len, 2963 union eth_sgl_or_raw_data *cqe) 2964 { 2965 uint16_t last_max, last_elem, first_elem; 2966 uint16_t delta = 0; 2967 uint16_t i; 2968 2969 if (!sge_len) { 2970 return; 2971 } 2972 2973 /* first mark all used pages */ 2974 for (i = 0; i < sge_len; i++) { 2975 BIT_VEC64_CLEAR_BIT(fp->sge_mask, 2976 RX_SGE(le16toh(cqe->sgl[i]))); 2977 } 2978 2979 BLOGD(sc, DBG_LRO, 2980 "fp[%02d] fp_cqe->sgl[%d] = %d\n", 2981 fp->index, sge_len - 1, 2982 le16toh(cqe->sgl[sge_len - 1])); 2983 2984 /* assume that the last SGE index is the biggest */ 2985 bxe_update_last_max_sge(fp, 2986 le16toh(cqe->sgl[sge_len - 1])); 2987 2988 last_max = RX_SGE(fp->last_max_sge); 2989 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; 2990 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; 2991 2992 /* if ring is not full */ 2993 if (last_elem + 1 != first_elem) { 2994 last_elem++; 2995 } 2996 2997 /* now update the prod */ 2998 for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) { 2999 if (__predict_true(fp->sge_mask[i])) { 3000 break; 3001 } 3002 3003 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; 3004 delta += BIT_VEC64_ELEM_SZ; 3005 } 3006 3007 if (delta > 0) { 3008 fp->rx_sge_prod += delta; 3009 /* clear page-end entries */ 3010 bxe_clear_sge_mask_next_elems(fp); 3011 } 3012 3013 BLOGD(sc, DBG_LRO, 3014 "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n", 3015 fp->index, fp->last_max_sge, fp->rx_sge_prod); 3016 } 3017 3018 /* 3019 * The aggregation on the current TPA queue has completed. Pull the individual 3020 * mbuf fragments together into a single mbuf, perform all necessary checksum 3021 * calculations, and send the resuting mbuf to the stack. 3022 */ 3023 static void 3024 bxe_tpa_stop(struct bxe_softc *sc, 3025 struct bxe_fastpath *fp, 3026 struct bxe_sw_tpa_info *tpa_info, 3027 uint16_t queue, 3028 uint16_t pages, 3029 struct eth_end_agg_rx_cqe *cqe, 3030 uint16_t cqe_idx) 3031 { 3032 if_t ifp = sc->ifp; 3033 struct mbuf *m; 3034 int rc = 0; 3035 3036 BLOGD(sc, DBG_LRO, 3037 "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n", 3038 fp->index, queue, tpa_info->placement_offset, 3039 le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag); 3040 3041 m = tpa_info->bd.m; 3042 3043 /* allocate a replacement before modifying existing mbuf */ 3044 rc = bxe_alloc_rx_tpa_mbuf(fp, queue); 3045 if (rc) { 3046 /* drop the frame and log an error */ 3047 fp->eth_q_stats.rx_soft_errors++; 3048 goto bxe_tpa_stop_exit; 3049 } 3050 3051 /* we have a replacement, fixup the current mbuf */ 3052 m_adj(m, tpa_info->placement_offset); 3053 m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd; 3054 3055 /* mark the checksums valid (taken care of by the firmware) */ 3056 fp->eth_q_stats.rx_ofld_frames_csum_ip++; 3057 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; 3058 m->m_pkthdr.csum_data = 0xffff; 3059 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | 3060 CSUM_IP_VALID | 3061 CSUM_DATA_VALID | 3062 CSUM_PSEUDO_HDR); 3063 3064 /* aggregate all of the SGEs into a single mbuf */ 3065 rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx); 3066 if (rc) { 3067 /* drop the packet and log an error */ 3068 fp->eth_q_stats.rx_soft_errors++; 3069 m_freem(m); 3070 } else { 3071 if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) { 3072 m->m_pkthdr.ether_vtag = tpa_info->vlan_tag; 3073 m->m_flags |= M_VLANTAG; 3074 } 3075 3076 /* assign packet to this interface interface */ 3077 if_setrcvif(m, ifp); 3078 3079 #if __FreeBSD_version >= 800000 3080 /* specify what RSS queue was used for this flow */ 3081 m->m_pkthdr.flowid = fp->index; 3082 BXE_SET_FLOWID(m); 3083 #endif 3084 3085 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 3086 fp->eth_q_stats.rx_tpa_pkts++; 3087 3088 /* pass the frame to the stack */ 3089 if_input(ifp, m); 3090 } 3091 3092 /* we passed an mbuf up the stack or dropped the frame */ 3093 fp->eth_q_stats.mbuf_alloc_tpa--; 3094 3095 bxe_tpa_stop_exit: 3096 3097 fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP; 3098 fp->rx_tpa_queue_used &= ~(1 << queue); 3099 } 3100 3101 static uint8_t 3102 bxe_service_rxsgl( 3103 struct bxe_fastpath *fp, 3104 uint16_t len, 3105 uint16_t lenonbd, 3106 struct mbuf *m, 3107 struct eth_fast_path_rx_cqe *cqe_fp) 3108 { 3109 struct mbuf *m_frag; 3110 uint16_t frags, frag_len; 3111 uint16_t sge_idx = 0; 3112 uint16_t j; 3113 uint8_t i, rc = 0; 3114 uint32_t frag_size; 3115 3116 /* adjust the mbuf */ 3117 m->m_len = lenonbd; 3118 3119 frag_size = len - lenonbd; 3120 frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; 3121 3122 for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) { 3123 sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j])); 3124 3125 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; 3126 frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE)); 3127 m_frag->m_len = frag_len; 3128 3129 /* allocate a new mbuf for the SGE */ 3130 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); 3131 if (rc) { 3132 /* Leave all remaining SGEs in the ring! */ 3133 return (rc); 3134 } 3135 fp->eth_q_stats.mbuf_alloc_sge--; 3136 3137 /* concatenate the fragment to the head mbuf */ 3138 m_cat(m, m_frag); 3139 3140 frag_size -= frag_len; 3141 } 3142 3143 bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data); 3144 3145 return rc; 3146 } 3147 3148 static uint8_t 3149 bxe_rxeof(struct bxe_softc *sc, 3150 struct bxe_fastpath *fp) 3151 { 3152 if_t ifp = sc->ifp; 3153 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 3154 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; 3155 int rx_pkts = 0; 3156 int rc = 0; 3157 3158 BXE_FP_RX_LOCK(fp); 3159 3160 /* CQ "next element" is of the size of the regular element */ 3161 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); 3162 if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) { 3163 hw_cq_cons++; 3164 } 3165 3166 bd_cons = fp->rx_bd_cons; 3167 bd_prod = fp->rx_bd_prod; 3168 bd_prod_fw = bd_prod; 3169 sw_cq_cons = fp->rx_cq_cons; 3170 sw_cq_prod = fp->rx_cq_prod; 3171 3172 /* 3173 * Memory barrier necessary as speculative reads of the rx 3174 * buffer can be ahead of the index in the status block 3175 */ 3176 rmb(); 3177 3178 BLOGD(sc, DBG_RX, 3179 "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n", 3180 fp->index, hw_cq_cons, sw_cq_cons); 3181 3182 while (sw_cq_cons != hw_cq_cons) { 3183 struct bxe_sw_rx_bd *rx_buf = NULL; 3184 union eth_rx_cqe *cqe; 3185 struct eth_fast_path_rx_cqe *cqe_fp; 3186 uint8_t cqe_fp_flags; 3187 enum eth_rx_cqe_type cqe_fp_type; 3188 uint16_t len, lenonbd, pad; 3189 struct mbuf *m = NULL; 3190 3191 comp_ring_cons = RCQ(sw_cq_cons); 3192 bd_prod = RX_BD(bd_prod); 3193 bd_cons = RX_BD(bd_cons); 3194 3195 cqe = &fp->rcq_chain[comp_ring_cons]; 3196 cqe_fp = &cqe->fast_path_cqe; 3197 cqe_fp_flags = cqe_fp->type_error_flags; 3198 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 3199 3200 BLOGD(sc, DBG_RX, 3201 "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d " 3202 "BD prod=%d cons=%d CQE type=0x%x err=0x%x " 3203 "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n", 3204 fp->index, 3205 hw_cq_cons, 3206 sw_cq_cons, 3207 bd_prod, 3208 bd_cons, 3209 CQE_TYPE(cqe_fp_flags), 3210 cqe_fp_flags, 3211 cqe_fp->status_flags, 3212 le32toh(cqe_fp->rss_hash_result), 3213 le16toh(cqe_fp->vlan_tag), 3214 le16toh(cqe_fp->pkt_len_or_gro_seg_len), 3215 le16toh(cqe_fp->len_on_bd)); 3216 3217 /* is this a slowpath msg? */ 3218 if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) { 3219 bxe_sp_event(sc, fp, cqe); 3220 goto next_cqe; 3221 } 3222 3223 rx_buf = &fp->rx_mbuf_chain[bd_cons]; 3224 3225 if (!CQE_TYPE_FAST(cqe_fp_type)) { 3226 struct bxe_sw_tpa_info *tpa_info; 3227 uint16_t frag_size, pages; 3228 uint8_t queue; 3229 3230 if (CQE_TYPE_START(cqe_fp_type)) { 3231 bxe_tpa_start(sc, fp, cqe_fp->queue_index, 3232 bd_cons, bd_prod, cqe_fp); 3233 m = NULL; /* packet not ready yet */ 3234 goto next_rx; 3235 } 3236 3237 KASSERT(CQE_TYPE_STOP(cqe_fp_type), 3238 ("CQE type is not STOP! (0x%x)\n", cqe_fp_type)); 3239 3240 queue = cqe->end_agg_cqe.queue_index; 3241 tpa_info = &fp->rx_tpa_info[queue]; 3242 3243 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n", 3244 fp->index, queue); 3245 3246 frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) - 3247 tpa_info->len_on_bd); 3248 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; 3249 3250 bxe_tpa_stop(sc, fp, tpa_info, queue, pages, 3251 &cqe->end_agg_cqe, comp_ring_cons); 3252 3253 bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data); 3254 3255 goto next_cqe; 3256 } 3257 3258 /* non TPA */ 3259 3260 /* is this an error packet? */ 3261 if (__predict_false(cqe_fp_flags & 3262 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { 3263 BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons); 3264 fp->eth_q_stats.rx_soft_errors++; 3265 goto next_rx; 3266 } 3267 3268 len = le16toh(cqe_fp->pkt_len_or_gro_seg_len); 3269 lenonbd = le16toh(cqe_fp->len_on_bd); 3270 pad = cqe_fp->placement_offset; 3271 3272 m = rx_buf->m; 3273 3274 if (__predict_false(m == NULL)) { 3275 BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n", 3276 bd_cons, fp->index); 3277 goto next_rx; 3278 } 3279 3280 /* XXX double copy if packet length under a threshold */ 3281 3282 /* 3283 * If all the buffer descriptors are filled with mbufs then fill in 3284 * the current consumer index with a new BD. Else if a maximum Rx 3285 * buffer limit is imposed then fill in the next producer index. 3286 */ 3287 rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons, 3288 (sc->max_rx_bufs != RX_BD_USABLE) ? 3289 bd_prod : bd_cons); 3290 if (rc != 0) { 3291 3292 /* we simply reuse the received mbuf and don't post it to the stack */ 3293 m = NULL; 3294 3295 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", 3296 fp->index, rc); 3297 fp->eth_q_stats.rx_soft_errors++; 3298 3299 if (sc->max_rx_bufs != RX_BD_USABLE) { 3300 /* copy this consumer index to the producer index */ 3301 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf, 3302 sizeof(struct bxe_sw_rx_bd)); 3303 memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd)); 3304 } 3305 3306 goto next_rx; 3307 } 3308 3309 /* current mbuf was detached from the bd */ 3310 fp->eth_q_stats.mbuf_alloc_rx--; 3311 3312 /* we allocated a replacement mbuf, fixup the current one */ 3313 m_adj(m, pad); 3314 m->m_pkthdr.len = m->m_len = len; 3315 3316 if ((len > 60) && (len > lenonbd)) { 3317 fp->eth_q_stats.rx_bxe_service_rxsgl++; 3318 rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp); 3319 if (rc) 3320 break; 3321 fp->eth_q_stats.rx_jumbo_sge_pkts++; 3322 } else if (lenonbd < len) { 3323 fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++; 3324 } 3325 3326 /* assign packet to this interface interface */ 3327 if_setrcvif(m, ifp); 3328 3329 /* assume no hardware checksum has complated */ 3330 m->m_pkthdr.csum_flags = 0; 3331 3332 /* validate checksum if offload enabled */ 3333 if (if_getcapenable(ifp) & IFCAP_RXCSUM) { 3334 /* check for a valid IP frame */ 3335 if (!(cqe->fast_path_cqe.status_flags & 3336 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) { 3337 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 3338 if (__predict_false(cqe_fp_flags & 3339 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) { 3340 fp->eth_q_stats.rx_hw_csum_errors++; 3341 } else { 3342 fp->eth_q_stats.rx_ofld_frames_csum_ip++; 3343 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3344 } 3345 } 3346 3347 /* check for a valid TCP/UDP frame */ 3348 if (!(cqe->fast_path_cqe.status_flags & 3349 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) { 3350 if (__predict_false(cqe_fp_flags & 3351 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) { 3352 fp->eth_q_stats.rx_hw_csum_errors++; 3353 } else { 3354 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; 3355 m->m_pkthdr.csum_data = 0xFFFF; 3356 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | 3357 CSUM_PSEUDO_HDR); 3358 } 3359 } 3360 } 3361 3362 /* if there is a VLAN tag then flag that info */ 3363 if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) { 3364 m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag; 3365 m->m_flags |= M_VLANTAG; 3366 } 3367 3368 #if __FreeBSD_version >= 800000 3369 /* specify what RSS queue was used for this flow */ 3370 m->m_pkthdr.flowid = fp->index; 3371 BXE_SET_FLOWID(m); 3372 #endif 3373 3374 next_rx: 3375 3376 bd_cons = RX_BD_NEXT(bd_cons); 3377 bd_prod = RX_BD_NEXT(bd_prod); 3378 bd_prod_fw = RX_BD_NEXT(bd_prod_fw); 3379 3380 /* pass the frame to the stack */ 3381 if (__predict_true(m != NULL)) { 3382 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 3383 rx_pkts++; 3384 if_input(ifp, m); 3385 } 3386 3387 next_cqe: 3388 3389 sw_cq_prod = RCQ_NEXT(sw_cq_prod); 3390 sw_cq_cons = RCQ_NEXT(sw_cq_cons); 3391 3392 /* limit spinning on the queue */ 3393 if (rc != 0) 3394 break; 3395 3396 if (rx_pkts == sc->rx_budget) { 3397 fp->eth_q_stats.rx_budget_reached++; 3398 break; 3399 } 3400 } /* while work to do */ 3401 3402 fp->rx_bd_cons = bd_cons; 3403 fp->rx_bd_prod = bd_prod_fw; 3404 fp->rx_cq_cons = sw_cq_cons; 3405 fp->rx_cq_prod = sw_cq_prod; 3406 3407 /* Update producers */ 3408 bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod); 3409 3410 fp->eth_q_stats.rx_pkts += rx_pkts; 3411 fp->eth_q_stats.rx_calls++; 3412 3413 BXE_FP_RX_UNLOCK(fp); 3414 3415 return (sw_cq_cons != hw_cq_cons); 3416 } 3417 3418 static uint16_t 3419 bxe_free_tx_pkt(struct bxe_softc *sc, 3420 struct bxe_fastpath *fp, 3421 uint16_t idx) 3422 { 3423 struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx]; 3424 struct eth_tx_start_bd *tx_start_bd; 3425 uint16_t bd_idx = TX_BD(tx_buf->first_bd); 3426 uint16_t new_cons; 3427 int nbd; 3428 3429 /* unmap the mbuf from non-paged memory */ 3430 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 3431 3432 tx_start_bd = &fp->tx_chain[bd_idx].start_bd; 3433 nbd = le16toh(tx_start_bd->nbd) - 1; 3434 3435 new_cons = (tx_buf->first_bd + nbd); 3436 3437 /* free the mbuf */ 3438 if (__predict_true(tx_buf->m != NULL)) { 3439 m_freem(tx_buf->m); 3440 fp->eth_q_stats.mbuf_alloc_tx--; 3441 } else { 3442 fp->eth_q_stats.tx_chain_lost_mbuf++; 3443 } 3444 3445 tx_buf->m = NULL; 3446 tx_buf->first_bd = 0; 3447 3448 return (new_cons); 3449 } 3450 3451 /* transmit timeout watchdog */ 3452 static int 3453 bxe_watchdog(struct bxe_softc *sc, 3454 struct bxe_fastpath *fp) 3455 { 3456 BXE_FP_TX_LOCK(fp); 3457 3458 if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) { 3459 BXE_FP_TX_UNLOCK(fp); 3460 return (0); 3461 } 3462 3463 BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index); 3464 if(sc->trigger_grcdump) { 3465 /* taking grcdump */ 3466 bxe_grc_dump(sc); 3467 } 3468 3469 BXE_FP_TX_UNLOCK(fp); 3470 3471 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT); 3472 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); 3473 3474 return (-1); 3475 } 3476 3477 /* processes transmit completions */ 3478 static uint8_t 3479 bxe_txeof(struct bxe_softc *sc, 3480 struct bxe_fastpath *fp) 3481 { 3482 if_t ifp = sc->ifp; 3483 uint16_t bd_cons, hw_cons, sw_cons, pkt_cons; 3484 uint16_t tx_bd_avail; 3485 3486 BXE_FP_TX_LOCK_ASSERT(fp); 3487 3488 bd_cons = fp->tx_bd_cons; 3489 hw_cons = le16toh(*fp->tx_cons_sb); 3490 sw_cons = fp->tx_pkt_cons; 3491 3492 while (sw_cons != hw_cons) { 3493 pkt_cons = TX_BD(sw_cons); 3494 3495 BLOGD(sc, DBG_TX, 3496 "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n", 3497 fp->index, hw_cons, sw_cons, pkt_cons); 3498 3499 bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons); 3500 3501 sw_cons++; 3502 } 3503 3504 fp->tx_pkt_cons = sw_cons; 3505 fp->tx_bd_cons = bd_cons; 3506 3507 BLOGD(sc, DBG_TX, 3508 "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n", 3509 fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod); 3510 3511 mb(); 3512 3513 tx_bd_avail = bxe_tx_avail(sc, fp); 3514 3515 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 3516 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 3517 } else { 3518 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 3519 } 3520 3521 if (fp->tx_pkt_prod != fp->tx_pkt_cons) { 3522 /* reset the watchdog timer if there are pending transmits */ 3523 fp->watchdog_timer = BXE_TX_TIMEOUT; 3524 return (TRUE); 3525 } else { 3526 /* clear watchdog when there are no pending transmits */ 3527 fp->watchdog_timer = 0; 3528 return (FALSE); 3529 } 3530 } 3531 3532 static void 3533 bxe_drain_tx_queues(struct bxe_softc *sc) 3534 { 3535 struct bxe_fastpath *fp; 3536 int i, count; 3537 3538 /* wait until all TX fastpath tasks have completed */ 3539 for (i = 0; i < sc->num_queues; i++) { 3540 fp = &sc->fp[i]; 3541 3542 count = 1000; 3543 3544 while (bxe_has_tx_work(fp)) { 3545 3546 BXE_FP_TX_LOCK(fp); 3547 bxe_txeof(sc, fp); 3548 BXE_FP_TX_UNLOCK(fp); 3549 3550 if (count == 0) { 3551 BLOGE(sc, "Timeout waiting for fp[%d] " 3552 "transmits to complete!\n", i); 3553 bxe_panic(sc, ("tx drain failure\n")); 3554 return; 3555 } 3556 3557 count--; 3558 DELAY(1000); 3559 rmb(); 3560 } 3561 } 3562 3563 return; 3564 } 3565 3566 static int 3567 bxe_del_all_macs(struct bxe_softc *sc, 3568 struct ecore_vlan_mac_obj *mac_obj, 3569 int mac_type, 3570 uint8_t wait_for_comp) 3571 { 3572 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 3573 int rc; 3574 3575 /* wait for completion of requested */ 3576 if (wait_for_comp) { 3577 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3578 } 3579 3580 /* Set the mac type of addresses we want to clear */ 3581 bxe_set_bit(mac_type, &vlan_mac_flags); 3582 3583 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); 3584 if (rc < 0) { 3585 BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n", 3586 rc, mac_type, wait_for_comp); 3587 } 3588 3589 return (rc); 3590 } 3591 3592 static int 3593 bxe_fill_accept_flags(struct bxe_softc *sc, 3594 uint32_t rx_mode, 3595 unsigned long *rx_accept_flags, 3596 unsigned long *tx_accept_flags) 3597 { 3598 /* Clear the flags first */ 3599 *rx_accept_flags = 0; 3600 *tx_accept_flags = 0; 3601 3602 switch (rx_mode) { 3603 case BXE_RX_MODE_NONE: 3604 /* 3605 * 'drop all' supersedes any accept flags that may have been 3606 * passed to the function. 3607 */ 3608 break; 3609 3610 case BXE_RX_MODE_NORMAL: 3611 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3612 bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags); 3613 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3614 3615 /* internal switching mode */ 3616 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3617 bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags); 3618 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3619 3620 break; 3621 3622 case BXE_RX_MODE_ALLMULTI: 3623 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3624 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 3625 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3626 3627 /* internal switching mode */ 3628 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3629 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 3630 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3631 3632 break; 3633 3634 case BXE_RX_MODE_PROMISC: 3635 /* 3636 * According to deffinition of SI mode, iface in promisc mode 3637 * should receive matched and unmatched (in resolution of port) 3638 * unicast packets. 3639 */ 3640 bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); 3641 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3642 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 3643 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3644 3645 /* internal switching mode */ 3646 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 3647 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3648 3649 if (IS_MF_SI(sc)) { 3650 bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags); 3651 } else { 3652 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3653 } 3654 3655 break; 3656 3657 default: 3658 BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode); 3659 return (-1); 3660 } 3661 3662 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ 3663 if (rx_mode != BXE_RX_MODE_NONE) { 3664 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); 3665 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); 3666 } 3667 3668 return (0); 3669 } 3670 3671 static int 3672 bxe_set_q_rx_mode(struct bxe_softc *sc, 3673 uint8_t cl_id, 3674 unsigned long rx_mode_flags, 3675 unsigned long rx_accept_flags, 3676 unsigned long tx_accept_flags, 3677 unsigned long ramrod_flags) 3678 { 3679 struct ecore_rx_mode_ramrod_params ramrod_param; 3680 int rc; 3681 3682 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3683 3684 /* Prepare ramrod parameters */ 3685 ramrod_param.cid = 0; 3686 ramrod_param.cl_id = cl_id; 3687 ramrod_param.rx_mode_obj = &sc->rx_mode_obj; 3688 ramrod_param.func_id = SC_FUNC(sc); 3689 3690 ramrod_param.pstate = &sc->sp_state; 3691 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; 3692 3693 ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata); 3694 ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata); 3695 3696 bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 3697 3698 ramrod_param.ramrod_flags = ramrod_flags; 3699 ramrod_param.rx_mode_flags = rx_mode_flags; 3700 3701 ramrod_param.rx_accept_flags = rx_accept_flags; 3702 ramrod_param.tx_accept_flags = tx_accept_flags; 3703 3704 rc = ecore_config_rx_mode(sc, &ramrod_param); 3705 if (rc < 0) { 3706 BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x " 3707 "rx_accept_flags 0x%x tx_accept_flags 0x%x " 3708 "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id, 3709 (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags, 3710 (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc); 3711 return (rc); 3712 } 3713 3714 return (0); 3715 } 3716 3717 static int 3718 bxe_set_storm_rx_mode(struct bxe_softc *sc) 3719 { 3720 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 3721 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 3722 int rc; 3723 3724 rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, 3725 &tx_accept_flags); 3726 if (rc) { 3727 return (rc); 3728 } 3729 3730 bxe_set_bit(RAMROD_RX, &ramrod_flags); 3731 bxe_set_bit(RAMROD_TX, &ramrod_flags); 3732 3733 /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */ 3734 return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, 3735 rx_accept_flags, tx_accept_flags, 3736 ramrod_flags)); 3737 } 3738 3739 /* returns the "mcp load_code" according to global load_count array */ 3740 static int 3741 bxe_nic_load_no_mcp(struct bxe_softc *sc) 3742 { 3743 int path = SC_PATH(sc); 3744 int port = SC_PORT(sc); 3745 3746 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", 3747 path, load_count[path][0], load_count[path][1], 3748 load_count[path][2]); 3749 load_count[path][0]++; 3750 load_count[path][1 + port]++; 3751 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", 3752 path, load_count[path][0], load_count[path][1], 3753 load_count[path][2]); 3754 if (load_count[path][0] == 1) { 3755 return (FW_MSG_CODE_DRV_LOAD_COMMON); 3756 } else if (load_count[path][1 + port] == 1) { 3757 return (FW_MSG_CODE_DRV_LOAD_PORT); 3758 } else { 3759 return (FW_MSG_CODE_DRV_LOAD_FUNCTION); 3760 } 3761 } 3762 3763 /* returns the "mcp load_code" according to global load_count array */ 3764 static int 3765 bxe_nic_unload_no_mcp(struct bxe_softc *sc) 3766 { 3767 int port = SC_PORT(sc); 3768 int path = SC_PATH(sc); 3769 3770 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", 3771 path, load_count[path][0], load_count[path][1], 3772 load_count[path][2]); 3773 load_count[path][0]--; 3774 load_count[path][1 + port]--; 3775 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", 3776 path, load_count[path][0], load_count[path][1], 3777 load_count[path][2]); 3778 if (load_count[path][0] == 0) { 3779 return (FW_MSG_CODE_DRV_UNLOAD_COMMON); 3780 } else if (load_count[path][1 + port] == 0) { 3781 return (FW_MSG_CODE_DRV_UNLOAD_PORT); 3782 } else { 3783 return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION); 3784 } 3785 } 3786 3787 /* request unload mode from the MCP: COMMON, PORT or FUNCTION */ 3788 static uint32_t 3789 bxe_send_unload_req(struct bxe_softc *sc, 3790 int unload_mode) 3791 { 3792 uint32_t reset_code = 0; 3793 3794 /* Select the UNLOAD request mode */ 3795 if (unload_mode == UNLOAD_NORMAL) { 3796 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 3797 } else { 3798 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 3799 } 3800 3801 /* Send the request to the MCP */ 3802 if (!BXE_NOMCP(sc)) { 3803 reset_code = bxe_fw_command(sc, reset_code, 0); 3804 } else { 3805 reset_code = bxe_nic_unload_no_mcp(sc); 3806 } 3807 3808 return (reset_code); 3809 } 3810 3811 /* send UNLOAD_DONE command to the MCP */ 3812 static void 3813 bxe_send_unload_done(struct bxe_softc *sc, 3814 uint8_t keep_link) 3815 { 3816 uint32_t reset_param = 3817 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; 3818 3819 /* Report UNLOAD_DONE to MCP */ 3820 if (!BXE_NOMCP(sc)) { 3821 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); 3822 } 3823 } 3824 3825 static int 3826 bxe_func_wait_started(struct bxe_softc *sc) 3827 { 3828 int tout = 50; 3829 3830 if (!sc->port.pmf) { 3831 return (0); 3832 } 3833 3834 /* 3835 * (assumption: No Attention from MCP at this stage) 3836 * PMF probably in the middle of TX disable/enable transaction 3837 * 1. Sync IRS for default SB 3838 * 2. Sync SP queue - this guarantees us that attention handling started 3839 * 3. Wait, that TX disable/enable transaction completes 3840 * 3841 * 1+2 guarantee that if DCBX attention was scheduled it already changed 3842 * pending bit of transaction from STARTED-->TX_STOPPED, if we already 3843 * received completion for the transaction the state is TX_STOPPED. 3844 * State will return to STARTED after completion of TX_STOPPED-->STARTED 3845 * transaction. 3846 */ 3847 3848 /* XXX make sure default SB ISR is done */ 3849 /* need a way to synchronize an irq (intr_mtx?) */ 3850 3851 /* XXX flush any work queues */ 3852 3853 while (ecore_func_get_state(sc, &sc->func_obj) != 3854 ECORE_F_STATE_STARTED && tout--) { 3855 DELAY(20000); 3856 } 3857 3858 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { 3859 /* 3860 * Failed to complete the transaction in a "good way" 3861 * Force both transactions with CLR bit. 3862 */ 3863 struct ecore_func_state_params func_params = { NULL }; 3864 3865 BLOGE(sc, "Unexpected function state! " 3866 "Forcing STARTED-->TX_STOPPED-->STARTED\n"); 3867 3868 func_params.f_obj = &sc->func_obj; 3869 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 3870 3871 /* STARTED-->TX_STOPPED */ 3872 func_params.cmd = ECORE_F_CMD_TX_STOP; 3873 ecore_func_state_change(sc, &func_params); 3874 3875 /* TX_STOPPED-->STARTED */ 3876 func_params.cmd = ECORE_F_CMD_TX_START; 3877 return (ecore_func_state_change(sc, &func_params)); 3878 } 3879 3880 return (0); 3881 } 3882 3883 static int 3884 bxe_stop_queue(struct bxe_softc *sc, 3885 int index) 3886 { 3887 struct bxe_fastpath *fp = &sc->fp[index]; 3888 struct ecore_queue_state_params q_params = { NULL }; 3889 int rc; 3890 3891 BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index); 3892 3893 q_params.q_obj = &sc->sp_objs[fp->index].q_obj; 3894 /* We want to wait for completion in this context */ 3895 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3896 3897 /* Stop the primary connection: */ 3898 3899 /* ...halt the connection */ 3900 q_params.cmd = ECORE_Q_CMD_HALT; 3901 rc = ecore_queue_state_change(sc, &q_params); 3902 if (rc) { 3903 return (rc); 3904 } 3905 3906 /* ...terminate the connection */ 3907 q_params.cmd = ECORE_Q_CMD_TERMINATE; 3908 memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate)); 3909 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 3910 rc = ecore_queue_state_change(sc, &q_params); 3911 if (rc) { 3912 return (rc); 3913 } 3914 3915 /* ...delete cfc entry */ 3916 q_params.cmd = ECORE_Q_CMD_CFC_DEL; 3917 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); 3918 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 3919 return (ecore_queue_state_change(sc, &q_params)); 3920 } 3921 3922 /* wait for the outstanding SP commands */ 3923 static inline uint8_t 3924 bxe_wait_sp_comp(struct bxe_softc *sc, 3925 unsigned long mask) 3926 { 3927 unsigned long tmp; 3928 int tout = 5000; /* wait for 5 secs tops */ 3929 3930 while (tout--) { 3931 mb(); 3932 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { 3933 return (TRUE); 3934 } 3935 3936 DELAY(1000); 3937 } 3938 3939 mb(); 3940 3941 tmp = atomic_load_acq_long(&sc->sp_state); 3942 if (tmp & mask) { 3943 BLOGE(sc, "Filtering completion timed out: " 3944 "sp_state 0x%lx, mask 0x%lx\n", 3945 tmp, mask); 3946 return (FALSE); 3947 } 3948 3949 return (FALSE); 3950 } 3951 3952 static int 3953 bxe_func_stop(struct bxe_softc *sc) 3954 { 3955 struct ecore_func_state_params func_params = { NULL }; 3956 int rc; 3957 3958 /* prepare parameters for function state transitions */ 3959 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 3960 func_params.f_obj = &sc->func_obj; 3961 func_params.cmd = ECORE_F_CMD_STOP; 3962 3963 /* 3964 * Try to stop the function the 'good way'. If it fails (in case 3965 * of a parity error during bxe_chip_cleanup()) and we are 3966 * not in a debug mode, perform a state transaction in order to 3967 * enable further HW_RESET transaction. 3968 */ 3969 rc = ecore_func_state_change(sc, &func_params); 3970 if (rc) { 3971 BLOGE(sc, "FUNC_STOP ramrod failed. " 3972 "Running a dry transaction (%d)\n", rc); 3973 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 3974 return (ecore_func_state_change(sc, &func_params)); 3975 } 3976 3977 return (0); 3978 } 3979 3980 static int 3981 bxe_reset_hw(struct bxe_softc *sc, 3982 uint32_t load_code) 3983 { 3984 struct ecore_func_state_params func_params = { NULL }; 3985 3986 /* Prepare parameters for function state transitions */ 3987 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 3988 3989 func_params.f_obj = &sc->func_obj; 3990 func_params.cmd = ECORE_F_CMD_HW_RESET; 3991 3992 func_params.params.hw_init.load_phase = load_code; 3993 3994 return (ecore_func_state_change(sc, &func_params)); 3995 } 3996 3997 static void 3998 bxe_int_disable_sync(struct bxe_softc *sc, 3999 int disable_hw) 4000 { 4001 if (disable_hw) { 4002 /* prevent the HW from sending interrupts */ 4003 bxe_int_disable(sc); 4004 } 4005 4006 /* XXX need a way to synchronize ALL irqs (intr_mtx?) */ 4007 /* make sure all ISRs are done */ 4008 4009 /* XXX make sure sp_task is not running */ 4010 /* cancel and flush work queues */ 4011 } 4012 4013 static void 4014 bxe_chip_cleanup(struct bxe_softc *sc, 4015 uint32_t unload_mode, 4016 uint8_t keep_link) 4017 { 4018 int port = SC_PORT(sc); 4019 struct ecore_mcast_ramrod_params rparam = { NULL }; 4020 uint32_t reset_code; 4021 int i, rc = 0; 4022 4023 bxe_drain_tx_queues(sc); 4024 4025 /* give HW time to discard old tx messages */ 4026 DELAY(1000); 4027 4028 /* Clean all ETH MACs */ 4029 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE); 4030 if (rc < 0) { 4031 BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc); 4032 } 4033 4034 /* Clean up UC list */ 4035 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE); 4036 if (rc < 0) { 4037 BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc); 4038 } 4039 4040 /* Disable LLH */ 4041 if (!CHIP_IS_E1(sc)) { 4042 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); 4043 } 4044 4045 /* Set "drop all" to stop Rx */ 4046 4047 /* 4048 * We need to take the BXE_MCAST_LOCK() here in order to prevent 4049 * a race between the completion code and this code. 4050 */ 4051 BXE_MCAST_LOCK(sc); 4052 4053 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 4054 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 4055 } else { 4056 bxe_set_storm_rx_mode(sc); 4057 } 4058 4059 /* Clean up multicast configuration */ 4060 rparam.mcast_obj = &sc->mcast_obj; 4061 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 4062 if (rc < 0) { 4063 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); 4064 } 4065 4066 BXE_MCAST_UNLOCK(sc); 4067 4068 // XXX bxe_iov_chip_cleanup(sc); 4069 4070 /* 4071 * Send the UNLOAD_REQUEST to the MCP. This will return if 4072 * this function should perform FUNCTION, PORT, or COMMON HW 4073 * reset. 4074 */ 4075 reset_code = bxe_send_unload_req(sc, unload_mode); 4076 4077 /* 4078 * (assumption: No Attention from MCP at this stage) 4079 * PMF probably in the middle of TX disable/enable transaction 4080 */ 4081 rc = bxe_func_wait_started(sc); 4082 if (rc) { 4083 BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc); 4084 } 4085 4086 /* 4087 * Close multi and leading connections 4088 * Completions for ramrods are collected in a synchronous way 4089 */ 4090 for (i = 0; i < sc->num_queues; i++) { 4091 if (bxe_stop_queue(sc, i)) { 4092 goto unload_error; 4093 } 4094 } 4095 4096 /* 4097 * If SP settings didn't get completed so far - something 4098 * very wrong has happen. 4099 */ 4100 if (!bxe_wait_sp_comp(sc, ~0x0UL)) { 4101 BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc); 4102 } 4103 4104 unload_error: 4105 4106 rc = bxe_func_stop(sc); 4107 if (rc) { 4108 BLOGE(sc, "Function stop failed!(%d)\n", rc); 4109 } 4110 4111 /* disable HW interrupts */ 4112 bxe_int_disable_sync(sc, TRUE); 4113 4114 /* detach interrupts */ 4115 bxe_interrupt_detach(sc); 4116 4117 /* Reset the chip */ 4118 rc = bxe_reset_hw(sc, reset_code); 4119 if (rc) { 4120 BLOGE(sc, "Hardware reset failed(%d)\n", rc); 4121 } 4122 4123 /* Report UNLOAD_DONE to MCP */ 4124 bxe_send_unload_done(sc, keep_link); 4125 } 4126 4127 static void 4128 bxe_disable_close_the_gate(struct bxe_softc *sc) 4129 { 4130 uint32_t val; 4131 int port = SC_PORT(sc); 4132 4133 BLOGD(sc, DBG_LOAD, 4134 "Disabling 'close the gates'\n"); 4135 4136 if (CHIP_IS_E1(sc)) { 4137 uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4138 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4139 val = REG_RD(sc, addr); 4140 val &= ~(0x300); 4141 REG_WR(sc, addr, val); 4142 } else { 4143 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); 4144 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 4145 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 4146 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); 4147 } 4148 } 4149 4150 /* 4151 * Cleans the object that have internal lists without sending 4152 * ramrods. Should be run when interrutps are disabled. 4153 */ 4154 static void 4155 bxe_squeeze_objects(struct bxe_softc *sc) 4156 { 4157 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 4158 struct ecore_mcast_ramrod_params rparam = { NULL }; 4159 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 4160 int rc; 4161 4162 /* Cleanup MACs' object first... */ 4163 4164 /* Wait for completion of requested */ 4165 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 4166 /* Perform a dry cleanup */ 4167 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); 4168 4169 /* Clean ETH primary MAC */ 4170 bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags); 4171 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, 4172 &ramrod_flags); 4173 if (rc != 0) { 4174 BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc); 4175 } 4176 4177 /* Cleanup UC list */ 4178 vlan_mac_flags = 0; 4179 bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags); 4180 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, 4181 &ramrod_flags); 4182 if (rc != 0) { 4183 BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc); 4184 } 4185 4186 /* Now clean mcast object... */ 4187 4188 rparam.mcast_obj = &sc->mcast_obj; 4189 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); 4190 4191 /* Add a DEL command... */ 4192 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 4193 if (rc < 0) { 4194 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); 4195 } 4196 4197 /* now wait until all pending commands are cleared */ 4198 4199 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4200 while (rc != 0) { 4201 if (rc < 0) { 4202 BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc); 4203 return; 4204 } 4205 4206 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4207 } 4208 } 4209 4210 /* stop the controller */ 4211 static __noinline int 4212 bxe_nic_unload(struct bxe_softc *sc, 4213 uint32_t unload_mode, 4214 uint8_t keep_link) 4215 { 4216 uint8_t global = FALSE; 4217 uint32_t val; 4218 int i; 4219 4220 BXE_CORE_LOCK_ASSERT(sc); 4221 4222 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); 4223 4224 for (i = 0; i < sc->num_queues; i++) { 4225 struct bxe_fastpath *fp; 4226 4227 fp = &sc->fp[i]; 4228 BXE_FP_TX_LOCK(fp); 4229 BXE_FP_TX_UNLOCK(fp); 4230 } 4231 4232 BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n"); 4233 4234 /* mark driver as unloaded in shmem2 */ 4235 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 4236 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 4237 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 4238 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 4239 } 4240 4241 if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE && 4242 (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) { 4243 /* 4244 * We can get here if the driver has been unloaded 4245 * during parity error recovery and is either waiting for a 4246 * leader to complete or for other functions to unload and 4247 * then ifconfig down has been issued. In this case we want to 4248 * unload and let other functions to complete a recovery 4249 * process. 4250 */ 4251 sc->recovery_state = BXE_RECOVERY_DONE; 4252 sc->is_leader = 0; 4253 bxe_release_leader_lock(sc); 4254 mb(); 4255 4256 BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n"); 4257 BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x" 4258 " state = 0x%x\n", sc->recovery_state, sc->state); 4259 return (-1); 4260 } 4261 4262 /* 4263 * Nothing to do during unload if previous bxe_nic_load() 4264 * did not completed successfully - all resourses are released. 4265 */ 4266 if ((sc->state == BXE_STATE_CLOSED) || 4267 (sc->state == BXE_STATE_ERROR)) { 4268 return (0); 4269 } 4270 4271 sc->state = BXE_STATE_CLOSING_WAITING_HALT; 4272 mb(); 4273 4274 /* stop tx */ 4275 bxe_tx_disable(sc); 4276 4277 sc->rx_mode = BXE_RX_MODE_NONE; 4278 /* XXX set rx mode ??? */ 4279 4280 if (IS_PF(sc) && !sc->grcdump_done) { 4281 /* set ALWAYS_ALIVE bit in shmem */ 4282 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 4283 4284 bxe_drv_pulse(sc); 4285 4286 bxe_stats_handle(sc, STATS_EVENT_STOP); 4287 bxe_save_statistics(sc); 4288 } 4289 4290 /* wait till consumers catch up with producers in all queues */ 4291 bxe_drain_tx_queues(sc); 4292 4293 /* if VF indicate to PF this function is going down (PF will delete sp 4294 * elements and clear initializations 4295 */ 4296 if (IS_VF(sc)) { 4297 ; /* bxe_vfpf_close_vf(sc); */ 4298 } else if (unload_mode != UNLOAD_RECOVERY) { 4299 /* if this is a normal/close unload need to clean up chip */ 4300 if (!sc->grcdump_done) 4301 bxe_chip_cleanup(sc, unload_mode, keep_link); 4302 } else { 4303 /* Send the UNLOAD_REQUEST to the MCP */ 4304 bxe_send_unload_req(sc, unload_mode); 4305 4306 /* 4307 * Prevent transactions to host from the functions on the 4308 * engine that doesn't reset global blocks in case of global 4309 * attention once gloabl blocks are reset and gates are opened 4310 * (the engine which leader will perform the recovery 4311 * last). 4312 */ 4313 if (!CHIP_IS_E1x(sc)) { 4314 bxe_pf_disable(sc); 4315 } 4316 4317 /* disable HW interrupts */ 4318 bxe_int_disable_sync(sc, TRUE); 4319 4320 /* detach interrupts */ 4321 bxe_interrupt_detach(sc); 4322 4323 /* Report UNLOAD_DONE to MCP */ 4324 bxe_send_unload_done(sc, FALSE); 4325 } 4326 4327 /* 4328 * At this stage no more interrupts will arrive so we may safely clean 4329 * the queue'able objects here in case they failed to get cleaned so far. 4330 */ 4331 if (IS_PF(sc)) { 4332 bxe_squeeze_objects(sc); 4333 } 4334 4335 /* There should be no more pending SP commands at this stage */ 4336 sc->sp_state = 0; 4337 4338 sc->port.pmf = 0; 4339 4340 bxe_free_fp_buffers(sc); 4341 4342 if (IS_PF(sc)) { 4343 bxe_free_mem(sc); 4344 } 4345 4346 bxe_free_fw_stats_mem(sc); 4347 4348 sc->state = BXE_STATE_CLOSED; 4349 4350 /* 4351 * Check if there are pending parity attentions. If there are - set 4352 * RECOVERY_IN_PROGRESS. 4353 */ 4354 if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) { 4355 bxe_set_reset_in_progress(sc); 4356 4357 /* Set RESET_IS_GLOBAL if needed */ 4358 if (global) { 4359 bxe_set_reset_global(sc); 4360 } 4361 } 4362 4363 /* 4364 * The last driver must disable a "close the gate" if there is no 4365 * parity attention or "process kill" pending. 4366 */ 4367 if (IS_PF(sc) && !bxe_clear_pf_load(sc) && 4368 bxe_reset_is_done(sc, SC_PATH(sc))) { 4369 bxe_disable_close_the_gate(sc); 4370 } 4371 4372 BLOGD(sc, DBG_LOAD, "Ended NIC unload\n"); 4373 4374 return (0); 4375 } 4376 4377 /* 4378 * Called by the OS to set various media options (i.e. link, speed, etc.) when 4379 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...". 4380 */ 4381 static int 4382 bxe_ifmedia_update(struct ifnet *ifp) 4383 { 4384 struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp); 4385 struct ifmedia *ifm; 4386 4387 ifm = &sc->ifmedia; 4388 4389 /* We only support Ethernet media type. */ 4390 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 4391 return (EINVAL); 4392 } 4393 4394 switch (IFM_SUBTYPE(ifm->ifm_media)) { 4395 case IFM_AUTO: 4396 break; 4397 case IFM_10G_CX4: 4398 case IFM_10G_SR: 4399 case IFM_10G_T: 4400 case IFM_10G_TWINAX: 4401 default: 4402 /* We don't support changing the media type. */ 4403 BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n", 4404 IFM_SUBTYPE(ifm->ifm_media)); 4405 return (EINVAL); 4406 } 4407 4408 return (0); 4409 } 4410 4411 /* 4412 * Called by the OS to get the current media status (i.e. link, speed, etc.). 4413 */ 4414 static void 4415 bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) 4416 { 4417 struct bxe_softc *sc = if_getsoftc(ifp); 4418 4419 /* Report link down if the driver isn't running. */ 4420 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 4421 ifmr->ifm_active |= IFM_NONE; 4422 return; 4423 } 4424 4425 /* Setup the default interface info. */ 4426 ifmr->ifm_status = IFM_AVALID; 4427 ifmr->ifm_active = IFM_ETHER; 4428 4429 if (sc->link_vars.link_up) { 4430 ifmr->ifm_status |= IFM_ACTIVE; 4431 } else { 4432 ifmr->ifm_active |= IFM_NONE; 4433 return; 4434 } 4435 4436 ifmr->ifm_active |= sc->media; 4437 4438 if (sc->link_vars.duplex == DUPLEX_FULL) { 4439 ifmr->ifm_active |= IFM_FDX; 4440 } else { 4441 ifmr->ifm_active |= IFM_HDX; 4442 } 4443 } 4444 4445 static void 4446 bxe_handle_chip_tq(void *context, 4447 int pending) 4448 { 4449 struct bxe_softc *sc = (struct bxe_softc *)context; 4450 long work = atomic_load_acq_long(&sc->chip_tq_flags); 4451 4452 switch (work) 4453 { 4454 4455 case CHIP_TQ_REINIT: 4456 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { 4457 /* restart the interface */ 4458 BLOGD(sc, DBG_LOAD, "Restarting the interface...\n"); 4459 bxe_periodic_stop(sc); 4460 BXE_CORE_LOCK(sc); 4461 bxe_stop_locked(sc); 4462 bxe_init_locked(sc); 4463 BXE_CORE_UNLOCK(sc); 4464 } 4465 break; 4466 4467 default: 4468 break; 4469 } 4470 } 4471 4472 /* 4473 * Handles any IOCTL calls from the operating system. 4474 * 4475 * Returns: 4476 * 0 = Success, >0 Failure 4477 */ 4478 static int 4479 bxe_ioctl(if_t ifp, 4480 u_long command, 4481 caddr_t data) 4482 { 4483 struct bxe_softc *sc = if_getsoftc(ifp); 4484 struct ifreq *ifr = (struct ifreq *)data; 4485 int mask = 0; 4486 int reinit = 0; 4487 int error = 0; 4488 4489 int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN); 4490 int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING); 4491 4492 switch (command) 4493 { 4494 case SIOCSIFMTU: 4495 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n", 4496 ifr->ifr_mtu); 4497 4498 if (sc->mtu == ifr->ifr_mtu) { 4499 /* nothing to change */ 4500 break; 4501 } 4502 4503 if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) { 4504 BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n", 4505 ifr->ifr_mtu, mtu_min, mtu_max); 4506 error = EINVAL; 4507 break; 4508 } 4509 4510 atomic_store_rel_int((volatile unsigned int *)&sc->mtu, 4511 (unsigned long)ifr->ifr_mtu); 4512 /* 4513 atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp), 4514 (unsigned long)ifr->ifr_mtu); 4515 XXX - Not sure why it needs to be atomic 4516 */ 4517 if_setmtu(ifp, ifr->ifr_mtu); 4518 reinit = 1; 4519 break; 4520 4521 case SIOCSIFFLAGS: 4522 /* toggle the interface state up or down */ 4523 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n"); 4524 4525 BXE_CORE_LOCK(sc); 4526 /* check if the interface is up */ 4527 if (if_getflags(ifp) & IFF_UP) { 4528 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4529 /* set the receive mode flags */ 4530 bxe_set_rx_mode(sc); 4531 } else if(sc->state != BXE_STATE_DISABLED) { 4532 bxe_init_locked(sc); 4533 } 4534 } else { 4535 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4536 bxe_periodic_stop(sc); 4537 bxe_stop_locked(sc); 4538 } 4539 } 4540 BXE_CORE_UNLOCK(sc); 4541 4542 break; 4543 4544 case SIOCADDMULTI: 4545 case SIOCDELMULTI: 4546 /* add/delete multicast addresses */ 4547 BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n"); 4548 4549 /* check if the interface is up */ 4550 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4551 /* set the receive mode flags */ 4552 BXE_CORE_LOCK(sc); 4553 bxe_set_rx_mode(sc); 4554 BXE_CORE_UNLOCK(sc); 4555 } 4556 4557 break; 4558 4559 case SIOCSIFCAP: 4560 /* find out which capabilities have changed */ 4561 mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp)); 4562 4563 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n", 4564 mask); 4565 4566 /* toggle the LRO capabilites enable flag */ 4567 if (mask & IFCAP_LRO) { 4568 if_togglecapenable(ifp, IFCAP_LRO); 4569 BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n", 4570 (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF"); 4571 reinit = 1; 4572 } 4573 4574 /* toggle the TXCSUM checksum capabilites enable flag */ 4575 if (mask & IFCAP_TXCSUM) { 4576 if_togglecapenable(ifp, IFCAP_TXCSUM); 4577 BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n", 4578 (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF"); 4579 if (if_getcapenable(ifp) & IFCAP_TXCSUM) { 4580 if_sethwassistbits(ifp, (CSUM_IP | 4581 CSUM_TCP | 4582 CSUM_UDP | 4583 CSUM_TSO | 4584 CSUM_TCP_IPV6 | 4585 CSUM_UDP_IPV6), 0); 4586 } else { 4587 if_clearhwassist(ifp); /* XXX */ 4588 } 4589 } 4590 4591 /* toggle the RXCSUM checksum capabilities enable flag */ 4592 if (mask & IFCAP_RXCSUM) { 4593 if_togglecapenable(ifp, IFCAP_RXCSUM); 4594 BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n", 4595 (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF"); 4596 if (if_getcapenable(ifp) & IFCAP_RXCSUM) { 4597 if_sethwassistbits(ifp, (CSUM_IP | 4598 CSUM_TCP | 4599 CSUM_UDP | 4600 CSUM_TSO | 4601 CSUM_TCP_IPV6 | 4602 CSUM_UDP_IPV6), 0); 4603 } else { 4604 if_clearhwassist(ifp); /* XXX */ 4605 } 4606 } 4607 4608 /* toggle TSO4 capabilities enabled flag */ 4609 if (mask & IFCAP_TSO4) { 4610 if_togglecapenable(ifp, IFCAP_TSO4); 4611 BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n", 4612 (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF"); 4613 } 4614 4615 /* toggle TSO6 capabilities enabled flag */ 4616 if (mask & IFCAP_TSO6) { 4617 if_togglecapenable(ifp, IFCAP_TSO6); 4618 BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n", 4619 (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF"); 4620 } 4621 4622 /* toggle VLAN_HWTSO capabilities enabled flag */ 4623 if (mask & IFCAP_VLAN_HWTSO) { 4624 4625 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 4626 BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n", 4627 (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF"); 4628 } 4629 4630 /* toggle VLAN_HWCSUM capabilities enabled flag */ 4631 if (mask & IFCAP_VLAN_HWCSUM) { 4632 /* XXX investigate this... */ 4633 BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n"); 4634 error = EINVAL; 4635 } 4636 4637 /* toggle VLAN_MTU capabilities enable flag */ 4638 if (mask & IFCAP_VLAN_MTU) { 4639 /* XXX investigate this... */ 4640 BLOGE(sc, "Changing VLAN_MTU is not supported!\n"); 4641 error = EINVAL; 4642 } 4643 4644 /* toggle VLAN_HWTAGGING capabilities enabled flag */ 4645 if (mask & IFCAP_VLAN_HWTAGGING) { 4646 /* XXX investigate this... */ 4647 BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n"); 4648 error = EINVAL; 4649 } 4650 4651 /* toggle VLAN_HWFILTER capabilities enabled flag */ 4652 if (mask & IFCAP_VLAN_HWFILTER) { 4653 /* XXX investigate this... */ 4654 BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n"); 4655 error = EINVAL; 4656 } 4657 4658 /* XXX not yet... 4659 * IFCAP_WOL_MAGIC 4660 */ 4661 4662 break; 4663 4664 case SIOCSIFMEDIA: 4665 case SIOCGIFMEDIA: 4666 /* set/get interface media */ 4667 BLOGD(sc, DBG_IOCTL, 4668 "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n", 4669 (command & 0xff)); 4670 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 4671 break; 4672 4673 default: 4674 BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n", 4675 (command & 0xff)); 4676 error = ether_ioctl(ifp, command, data); 4677 break; 4678 } 4679 4680 if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { 4681 BLOGD(sc, DBG_LOAD | DBG_IOCTL, 4682 "Re-initializing hardware from IOCTL change\n"); 4683 bxe_periodic_stop(sc); 4684 BXE_CORE_LOCK(sc); 4685 bxe_stop_locked(sc); 4686 bxe_init_locked(sc); 4687 BXE_CORE_UNLOCK(sc); 4688 } 4689 4690 return (error); 4691 } 4692 4693 static __noinline void 4694 bxe_dump_mbuf(struct bxe_softc *sc, 4695 struct mbuf *m, 4696 uint8_t contents) 4697 { 4698 char * type; 4699 int i = 0; 4700 4701 if (!(sc->debug & DBG_MBUF)) { 4702 return; 4703 } 4704 4705 if (m == NULL) { 4706 BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n"); 4707 return; 4708 } 4709 4710 while (m) { 4711 4712 #if __FreeBSD_version >= 1000000 4713 BLOGD(sc, DBG_MBUF, 4714 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", 4715 i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data); 4716 4717 if (m->m_flags & M_PKTHDR) { 4718 BLOGD(sc, DBG_MBUF, 4719 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", 4720 i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS, 4721 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 4722 } 4723 #else 4724 BLOGD(sc, DBG_MBUF, 4725 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", 4726 i, m, m->m_len, m->m_flags, 4727 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data); 4728 4729 if (m->m_flags & M_PKTHDR) { 4730 BLOGD(sc, DBG_MBUF, 4731 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", 4732 i, m->m_pkthdr.len, m->m_flags, 4733 "\20\12M_BCAST\13M_MCAST\14M_FRAG" 4734 "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG" 4735 "\22M_PROMISC\23M_NOFREE", 4736 (int)m->m_pkthdr.csum_flags, 4737 "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS" 4738 "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED" 4739 "\12CSUM_IP_VALID\13CSUM_DATA_VALID" 4740 "\14CSUM_PSEUDO_HDR"); 4741 } 4742 #endif /* #if __FreeBSD_version >= 1000000 */ 4743 4744 if (m->m_flags & M_EXT) { 4745 switch (m->m_ext.ext_type) { 4746 case EXT_CLUSTER: type = "EXT_CLUSTER"; break; 4747 case EXT_SFBUF: type = "EXT_SFBUF"; break; 4748 case EXT_JUMBOP: type = "EXT_JUMBOP"; break; 4749 case EXT_JUMBO9: type = "EXT_JUMBO9"; break; 4750 case EXT_JUMBO16: type = "EXT_JUMBO16"; break; 4751 case EXT_PACKET: type = "EXT_PACKET"; break; 4752 case EXT_MBUF: type = "EXT_MBUF"; break; 4753 case EXT_NET_DRV: type = "EXT_NET_DRV"; break; 4754 case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break; 4755 case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break; 4756 case EXT_EXTREF: type = "EXT_EXTREF"; break; 4757 default: type = "UNKNOWN"; break; 4758 } 4759 4760 BLOGD(sc, DBG_MBUF, 4761 "%02d: - m_ext: %p ext_size=%d type=%s\n", 4762 i, m->m_ext.ext_buf, m->m_ext.ext_size, type); 4763 } 4764 4765 if (contents) { 4766 bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE); 4767 } 4768 4769 m = m->m_next; 4770 i++; 4771 } 4772 } 4773 4774 /* 4775 * Checks to ensure the 13 bd sliding window is >= MSS for TSO. 4776 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS. 4777 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD 4778 * The headers comes in a separate bd in FreeBSD so 13-3=10. 4779 * Returns: 0 if OK to send, 1 if packet needs further defragmentation 4780 */ 4781 static int 4782 bxe_chktso_window(struct bxe_softc *sc, 4783 int nsegs, 4784 bus_dma_segment_t *segs, 4785 struct mbuf *m) 4786 { 4787 uint32_t num_wnds, wnd_size, wnd_sum; 4788 int32_t frag_idx, wnd_idx; 4789 unsigned short lso_mss; 4790 int defrag; 4791 4792 defrag = 0; 4793 wnd_sum = 0; 4794 wnd_size = 10; 4795 num_wnds = nsegs - wnd_size; 4796 lso_mss = htole16(m->m_pkthdr.tso_segsz); 4797 4798 /* 4799 * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the 4800 * first window sum of data while skipping the first assuming it is the 4801 * header in FreeBSD. 4802 */ 4803 for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) { 4804 wnd_sum += htole16(segs[frag_idx].ds_len); 4805 } 4806 4807 /* check the first 10 bd window size */ 4808 if (wnd_sum < lso_mss) { 4809 return (1); 4810 } 4811 4812 /* run through the windows */ 4813 for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) { 4814 /* subtract the first mbuf->m_len of the last wndw(-header) */ 4815 wnd_sum -= htole16(segs[wnd_idx+1].ds_len); 4816 /* add the next mbuf len to the len of our new window */ 4817 wnd_sum += htole16(segs[frag_idx].ds_len); 4818 if (wnd_sum < lso_mss) { 4819 return (1); 4820 } 4821 } 4822 4823 return (0); 4824 } 4825 4826 static uint8_t 4827 bxe_set_pbd_csum_e2(struct bxe_fastpath *fp, 4828 struct mbuf *m, 4829 uint32_t *parsing_data) 4830 { 4831 struct ether_vlan_header *eh = NULL; 4832 struct ip *ip4 = NULL; 4833 struct ip6_hdr *ip6 = NULL; 4834 caddr_t ip = NULL; 4835 struct tcphdr *th = NULL; 4836 int e_hlen, ip_hlen, l4_off; 4837 uint16_t proto; 4838 4839 if (m->m_pkthdr.csum_flags == CSUM_IP) { 4840 /* no L4 checksum offload needed */ 4841 return (0); 4842 } 4843 4844 /* get the Ethernet header */ 4845 eh = mtod(m, struct ether_vlan_header *); 4846 4847 /* handle VLAN encapsulation if present */ 4848 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 4849 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 4850 proto = ntohs(eh->evl_proto); 4851 } else { 4852 e_hlen = ETHER_HDR_LEN; 4853 proto = ntohs(eh->evl_encap_proto); 4854 } 4855 4856 switch (proto) { 4857 case ETHERTYPE_IP: 4858 /* get the IP header, if mbuf len < 20 then header in next mbuf */ 4859 ip4 = (m->m_len < sizeof(struct ip)) ? 4860 (struct ip *)m->m_next->m_data : 4861 (struct ip *)(m->m_data + e_hlen); 4862 /* ip_hl is number of 32-bit words */ 4863 ip_hlen = (ip4->ip_hl << 2); 4864 ip = (caddr_t)ip4; 4865 break; 4866 case ETHERTYPE_IPV6: 4867 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ 4868 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? 4869 (struct ip6_hdr *)m->m_next->m_data : 4870 (struct ip6_hdr *)(m->m_data + e_hlen); 4871 /* XXX cannot support offload with IPv6 extensions */ 4872 ip_hlen = sizeof(struct ip6_hdr); 4873 ip = (caddr_t)ip6; 4874 break; 4875 default: 4876 /* We can't offload in this case... */ 4877 /* XXX error stat ??? */ 4878 return (0); 4879 } 4880 4881 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ 4882 l4_off = (e_hlen + ip_hlen); 4883 4884 *parsing_data |= 4885 (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & 4886 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W); 4887 4888 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 4889 CSUM_TSO | 4890 CSUM_TCP_IPV6)) { 4891 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; 4892 th = (struct tcphdr *)(ip + ip_hlen); 4893 /* th_off is number of 32-bit words */ 4894 *parsing_data |= ((th->th_off << 4895 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & 4896 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW); 4897 return (l4_off + (th->th_off << 2)); /* entire header length */ 4898 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 4899 CSUM_UDP_IPV6)) { 4900 fp->eth_q_stats.tx_ofld_frames_csum_udp++; 4901 return (l4_off + sizeof(struct udphdr)); /* entire header length */ 4902 } else { 4903 /* XXX error stat ??? */ 4904 return (0); 4905 } 4906 } 4907 4908 static uint8_t 4909 bxe_set_pbd_csum(struct bxe_fastpath *fp, 4910 struct mbuf *m, 4911 struct eth_tx_parse_bd_e1x *pbd) 4912 { 4913 struct ether_vlan_header *eh = NULL; 4914 struct ip *ip4 = NULL; 4915 struct ip6_hdr *ip6 = NULL; 4916 caddr_t ip = NULL; 4917 struct tcphdr *th = NULL; 4918 struct udphdr *uh = NULL; 4919 int e_hlen, ip_hlen; 4920 uint16_t proto; 4921 uint8_t hlen; 4922 uint16_t tmp_csum; 4923 uint32_t *tmp_uh; 4924 4925 /* get the Ethernet header */ 4926 eh = mtod(m, struct ether_vlan_header *); 4927 4928 /* handle VLAN encapsulation if present */ 4929 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 4930 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 4931 proto = ntohs(eh->evl_proto); 4932 } else { 4933 e_hlen = ETHER_HDR_LEN; 4934 proto = ntohs(eh->evl_encap_proto); 4935 } 4936 4937 switch (proto) { 4938 case ETHERTYPE_IP: 4939 /* get the IP header, if mbuf len < 20 then header in next mbuf */ 4940 ip4 = (m->m_len < sizeof(struct ip)) ? 4941 (struct ip *)m->m_next->m_data : 4942 (struct ip *)(m->m_data + e_hlen); 4943 /* ip_hl is number of 32-bit words */ 4944 ip_hlen = (ip4->ip_hl << 1); 4945 ip = (caddr_t)ip4; 4946 break; 4947 case ETHERTYPE_IPV6: 4948 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ 4949 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? 4950 (struct ip6_hdr *)m->m_next->m_data : 4951 (struct ip6_hdr *)(m->m_data + e_hlen); 4952 /* XXX cannot support offload with IPv6 extensions */ 4953 ip_hlen = (sizeof(struct ip6_hdr) >> 1); 4954 ip = (caddr_t)ip6; 4955 break; 4956 default: 4957 /* We can't offload in this case... */ 4958 /* XXX error stat ??? */ 4959 return (0); 4960 } 4961 4962 hlen = (e_hlen >> 1); 4963 4964 /* note that rest of global_data is indirectly zeroed here */ 4965 if (m->m_flags & M_VLANTAG) { 4966 pbd->global_data = 4967 htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); 4968 } else { 4969 pbd->global_data = htole16(hlen); 4970 } 4971 4972 pbd->ip_hlen_w = ip_hlen; 4973 4974 hlen += pbd->ip_hlen_w; 4975 4976 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ 4977 4978 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 4979 CSUM_TSO | 4980 CSUM_TCP_IPV6)) { 4981 th = (struct tcphdr *)(ip + (ip_hlen << 1)); 4982 /* th_off is number of 32-bit words */ 4983 hlen += (uint16_t)(th->th_off << 1); 4984 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 4985 CSUM_UDP_IPV6)) { 4986 uh = (struct udphdr *)(ip + (ip_hlen << 1)); 4987 hlen += (sizeof(struct udphdr) / 2); 4988 } else { 4989 /* valid case as only CSUM_IP was set */ 4990 return (0); 4991 } 4992 4993 pbd->total_hlen_w = htole16(hlen); 4994 4995 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 4996 CSUM_TSO | 4997 CSUM_TCP_IPV6)) { 4998 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; 4999 pbd->tcp_pseudo_csum = ntohs(th->th_sum); 5000 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5001 CSUM_UDP_IPV6)) { 5002 fp->eth_q_stats.tx_ofld_frames_csum_udp++; 5003 5004 /* 5005 * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP 5006 * checksums and does not know anything about the UDP header and where 5007 * the checksum field is located. It only knows about TCP. Therefore 5008 * we "lie" to the hardware for outgoing UDP packets w/ checksum 5009 * offload. Since the checksum field offset for TCP is 16 bytes and 5010 * for UDP it is 6 bytes we pass a pointer to the hardware that is 10 5011 * bytes less than the start of the UDP header. This allows the 5012 * hardware to write the checksum in the correct spot. But the 5013 * hardware will compute a checksum which includes the last 10 bytes 5014 * of the IP header. To correct this we tweak the stack computed 5015 * pseudo checksum by folding in the calculation of the inverse 5016 * checksum for those final 10 bytes of the IP header. This allows 5017 * the correct checksum to be computed by the hardware. 5018 */ 5019 5020 /* set pointer 10 bytes before UDP header */ 5021 tmp_uh = (uint32_t *)((uint8_t *)uh - 10); 5022 5023 /* calculate a pseudo header checksum over the first 10 bytes */ 5024 tmp_csum = in_pseudo(*tmp_uh, 5025 *(tmp_uh + 1), 5026 *(uint16_t *)(tmp_uh + 2)); 5027 5028 pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum)); 5029 } 5030 5031 return (hlen * 2); /* entire header length, number of bytes */ 5032 } 5033 5034 static void 5035 bxe_set_pbd_lso_e2(struct mbuf *m, 5036 uint32_t *parsing_data) 5037 { 5038 *parsing_data |= ((m->m_pkthdr.tso_segsz << 5039 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & 5040 ETH_TX_PARSE_BD_E2_LSO_MSS); 5041 5042 /* XXX test for IPv6 with extension header... */ 5043 } 5044 5045 static void 5046 bxe_set_pbd_lso(struct mbuf *m, 5047 struct eth_tx_parse_bd_e1x *pbd) 5048 { 5049 struct ether_vlan_header *eh = NULL; 5050 struct ip *ip = NULL; 5051 struct tcphdr *th = NULL; 5052 int e_hlen; 5053 5054 /* get the Ethernet header */ 5055 eh = mtod(m, struct ether_vlan_header *); 5056 5057 /* handle VLAN encapsulation if present */ 5058 e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ? 5059 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN; 5060 5061 /* get the IP and TCP header, with LSO entire header in first mbuf */ 5062 /* XXX assuming IPv4 */ 5063 ip = (struct ip *)(m->m_data + e_hlen); 5064 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 5065 5066 pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz); 5067 pbd->tcp_send_seq = ntohl(th->th_seq); 5068 pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff); 5069 5070 #if 1 5071 /* XXX IPv4 */ 5072 pbd->ip_id = ntohs(ip->ip_id); 5073 pbd->tcp_pseudo_csum = 5074 ntohs(in_pseudo(ip->ip_src.s_addr, 5075 ip->ip_dst.s_addr, 5076 htons(IPPROTO_TCP))); 5077 #else 5078 /* XXX IPv6 */ 5079 pbd->tcp_pseudo_csum = 5080 ntohs(in_pseudo(&ip6->ip6_src, 5081 &ip6->ip6_dst, 5082 htons(IPPROTO_TCP))); 5083 #endif 5084 5085 pbd->global_data |= 5086 htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); 5087 } 5088 5089 /* 5090 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory 5091 * visible to the controller. 5092 * 5093 * If an mbuf is submitted to this routine and cannot be given to the 5094 * controller (e.g. it has too many fragments) then the function may free 5095 * the mbuf and return to the caller. 5096 * 5097 * Returns: 5098 * 0 = Success, !0 = Failure 5099 * Note the side effect that an mbuf may be freed if it causes a problem. 5100 */ 5101 static int 5102 bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head) 5103 { 5104 bus_dma_segment_t segs[32]; 5105 struct mbuf *m0; 5106 struct bxe_sw_tx_bd *tx_buf; 5107 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 5108 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 5109 /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */ 5110 struct eth_tx_bd *tx_data_bd; 5111 struct eth_tx_bd *tx_total_pkt_size_bd; 5112 struct eth_tx_start_bd *tx_start_bd; 5113 uint16_t bd_prod, pkt_prod, total_pkt_size; 5114 uint8_t mac_type; 5115 int defragged, error, nsegs, rc, nbds, vlan_off, ovlan; 5116 struct bxe_softc *sc; 5117 uint16_t tx_bd_avail; 5118 struct ether_vlan_header *eh; 5119 uint32_t pbd_e2_parsing_data = 0; 5120 uint8_t hlen = 0; 5121 int tmp_bd; 5122 int i; 5123 5124 sc = fp->sc; 5125 5126 #if __FreeBSD_version >= 800000 5127 M_ASSERTPKTHDR(*m_head); 5128 #endif /* #if __FreeBSD_version >= 800000 */ 5129 5130 m0 = *m_head; 5131 rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0; 5132 tx_start_bd = NULL; 5133 tx_data_bd = NULL; 5134 tx_total_pkt_size_bd = NULL; 5135 5136 /* get the H/W pointer for packets and BDs */ 5137 pkt_prod = fp->tx_pkt_prod; 5138 bd_prod = fp->tx_bd_prod; 5139 5140 mac_type = UNICAST_ADDRESS; 5141 5142 /* map the mbuf into the next open DMAable memory */ 5143 tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)]; 5144 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5145 tx_buf->m_map, m0, 5146 segs, &nsegs, BUS_DMA_NOWAIT); 5147 5148 /* mapping errors */ 5149 if(__predict_false(error != 0)) { 5150 fp->eth_q_stats.tx_dma_mapping_failure++; 5151 if (error == ENOMEM) { 5152 /* resource issue, try again later */ 5153 rc = ENOMEM; 5154 } else if (error == EFBIG) { 5155 /* possibly recoverable with defragmentation */ 5156 fp->eth_q_stats.mbuf_defrag_attempts++; 5157 m0 = m_defrag(*m_head, M_NOWAIT); 5158 if (m0 == NULL) { 5159 fp->eth_q_stats.mbuf_defrag_failures++; 5160 rc = ENOBUFS; 5161 } else { 5162 /* defrag successful, try mapping again */ 5163 *m_head = m0; 5164 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5165 tx_buf->m_map, m0, 5166 segs, &nsegs, BUS_DMA_NOWAIT); 5167 if (error) { 5168 fp->eth_q_stats.tx_dma_mapping_failure++; 5169 rc = error; 5170 } 5171 } 5172 } else { 5173 /* unknown, unrecoverable mapping error */ 5174 BLOGE(sc, "Unknown TX mapping error rc=%d\n", error); 5175 bxe_dump_mbuf(sc, m0, FALSE); 5176 rc = error; 5177 } 5178 5179 goto bxe_tx_encap_continue; 5180 } 5181 5182 tx_bd_avail = bxe_tx_avail(sc, fp); 5183 5184 /* make sure there is enough room in the send queue */ 5185 if (__predict_false(tx_bd_avail < (nsegs + 2))) { 5186 /* Recoverable, try again later. */ 5187 fp->eth_q_stats.tx_hw_queue_full++; 5188 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5189 rc = ENOMEM; 5190 goto bxe_tx_encap_continue; 5191 } 5192 5193 /* capture the current H/W TX chain high watermark */ 5194 if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth < 5195 (TX_BD_USABLE - tx_bd_avail))) { 5196 fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail); 5197 } 5198 5199 /* make sure it fits in the packet window */ 5200 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { 5201 /* 5202 * The mbuf may be to big for the controller to handle. If the frame 5203 * is a TSO frame we'll need to do an additional check. 5204 */ 5205 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5206 if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) { 5207 goto bxe_tx_encap_continue; /* OK to send */ 5208 } else { 5209 fp->eth_q_stats.tx_window_violation_tso++; 5210 } 5211 } else { 5212 fp->eth_q_stats.tx_window_violation_std++; 5213 } 5214 5215 /* lets try to defragment this mbuf and remap it */ 5216 fp->eth_q_stats.mbuf_defrag_attempts++; 5217 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5218 5219 m0 = m_defrag(*m_head, M_NOWAIT); 5220 if (m0 == NULL) { 5221 fp->eth_q_stats.mbuf_defrag_failures++; 5222 /* Ugh, just drop the frame... :( */ 5223 rc = ENOBUFS; 5224 } else { 5225 /* defrag successful, try mapping again */ 5226 *m_head = m0; 5227 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5228 tx_buf->m_map, m0, 5229 segs, &nsegs, BUS_DMA_NOWAIT); 5230 if (error) { 5231 fp->eth_q_stats.tx_dma_mapping_failure++; 5232 /* No sense in trying to defrag/copy chain, drop it. :( */ 5233 rc = error; 5234 } 5235 else { 5236 /* if the chain is still too long then drop it */ 5237 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { 5238 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5239 rc = ENODEV; 5240 } 5241 } 5242 } 5243 } 5244 5245 bxe_tx_encap_continue: 5246 5247 /* Check for errors */ 5248 if (rc) { 5249 if (rc == ENOMEM) { 5250 /* recoverable try again later */ 5251 } else { 5252 fp->eth_q_stats.tx_soft_errors++; 5253 fp->eth_q_stats.mbuf_alloc_tx--; 5254 m_freem(*m_head); 5255 *m_head = NULL; 5256 } 5257 5258 return (rc); 5259 } 5260 5261 /* set flag according to packet type (UNICAST_ADDRESS is default) */ 5262 if (m0->m_flags & M_BCAST) { 5263 mac_type = BROADCAST_ADDRESS; 5264 } else if (m0->m_flags & M_MCAST) { 5265 mac_type = MULTICAST_ADDRESS; 5266 } 5267 5268 /* store the mbuf into the mbuf ring */ 5269 tx_buf->m = m0; 5270 tx_buf->first_bd = fp->tx_bd_prod; 5271 tx_buf->flags = 0; 5272 5273 /* prepare the first transmit (start) BD for the mbuf */ 5274 tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd; 5275 5276 BLOGD(sc, DBG_TX, 5277 "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n", 5278 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); 5279 5280 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 5281 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 5282 tx_start_bd->nbytes = htole16(segs[0].ds_len); 5283 total_pkt_size += tx_start_bd->nbytes; 5284 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 5285 5286 tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 5287 5288 /* all frames have at least Start BD + Parsing BD */ 5289 nbds = nsegs + 1; 5290 tx_start_bd->nbd = htole16(nbds); 5291 5292 if (m0->m_flags & M_VLANTAG) { 5293 tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag); 5294 tx_start_bd->bd_flags.as_bitfield |= 5295 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 5296 } else { 5297 /* vf tx, start bd must hold the ethertype for fw to enforce it */ 5298 if (IS_VF(sc)) { 5299 /* map ethernet header to find type and header length */ 5300 eh = mtod(m0, struct ether_vlan_header *); 5301 tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto; 5302 } else { 5303 /* used by FW for packet accounting */ 5304 tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod); 5305 } 5306 } 5307 5308 /* 5309 * add a parsing BD from the chain. The parsing BD is always added 5310 * though it is only used for TSO and chksum 5311 */ 5312 bd_prod = TX_BD_NEXT(bd_prod); 5313 5314 if (m0->m_pkthdr.csum_flags) { 5315 if (m0->m_pkthdr.csum_flags & CSUM_IP) { 5316 fp->eth_q_stats.tx_ofld_frames_csum_ip++; 5317 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; 5318 } 5319 5320 if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) { 5321 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | 5322 ETH_TX_BD_FLAGS_L4_CSUM); 5323 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) { 5324 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | 5325 ETH_TX_BD_FLAGS_IS_UDP | 5326 ETH_TX_BD_FLAGS_L4_CSUM); 5327 } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) || 5328 (m0->m_pkthdr.csum_flags & CSUM_TSO)) { 5329 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; 5330 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) { 5331 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM | 5332 ETH_TX_BD_FLAGS_IS_UDP); 5333 } 5334 } 5335 5336 if (!CHIP_IS_E1x(sc)) { 5337 pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2; 5338 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 5339 5340 if (m0->m_pkthdr.csum_flags) { 5341 hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data); 5342 } 5343 5344 SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, 5345 mac_type); 5346 } else { 5347 uint16_t global_data = 0; 5348 5349 pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x; 5350 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 5351 5352 if (m0->m_pkthdr.csum_flags) { 5353 hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x); 5354 } 5355 5356 SET_FLAG(global_data, 5357 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); 5358 pbd_e1x->global_data |= htole16(global_data); 5359 } 5360 5361 /* setup the parsing BD with TSO specific info */ 5362 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5363 fp->eth_q_stats.tx_ofld_frames_lso++; 5364 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; 5365 5366 if (__predict_false(tx_start_bd->nbytes > hlen)) { 5367 fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++; 5368 5369 /* split the first BD into header/data making the fw job easy */ 5370 nbds++; 5371 tx_start_bd->nbd = htole16(nbds); 5372 tx_start_bd->nbytes = htole16(hlen); 5373 5374 bd_prod = TX_BD_NEXT(bd_prod); 5375 5376 /* new transmit BD after the tx_parse_bd */ 5377 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; 5378 tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen)); 5379 tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen)); 5380 tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen); 5381 if (tx_total_pkt_size_bd == NULL) { 5382 tx_total_pkt_size_bd = tx_data_bd; 5383 } 5384 5385 BLOGD(sc, DBG_TX, 5386 "TSO split header size is %d (%x:%x) nbds %d\n", 5387 le16toh(tx_start_bd->nbytes), 5388 le32toh(tx_start_bd->addr_hi), 5389 le32toh(tx_start_bd->addr_lo), 5390 nbds); 5391 } 5392 5393 if (!CHIP_IS_E1x(sc)) { 5394 bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data); 5395 } else { 5396 bxe_set_pbd_lso(m0, pbd_e1x); 5397 } 5398 } 5399 5400 if (pbd_e2_parsing_data) { 5401 pbd_e2->parsing_data = htole32(pbd_e2_parsing_data); 5402 } 5403 5404 /* prepare remaining BDs, start tx bd contains first seg/frag */ 5405 for (i = 1; i < nsegs ; i++) { 5406 bd_prod = TX_BD_NEXT(bd_prod); 5407 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; 5408 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr)); 5409 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr)); 5410 tx_data_bd->nbytes = htole16(segs[i].ds_len); 5411 if (tx_total_pkt_size_bd == NULL) { 5412 tx_total_pkt_size_bd = tx_data_bd; 5413 } 5414 total_pkt_size += tx_data_bd->nbytes; 5415 } 5416 5417 BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd); 5418 5419 if (tx_total_pkt_size_bd != NULL) { 5420 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size; 5421 } 5422 5423 if (__predict_false(sc->debug & DBG_TX)) { 5424 tmp_bd = tx_buf->first_bd; 5425 for (i = 0; i < nbds; i++) 5426 { 5427 if (i == 0) { 5428 BLOGD(sc, DBG_TX, 5429 "TX Strt: %p bd=%d nbd=%d vlan=0x%x " 5430 "bd_flags=0x%x hdr_nbds=%d\n", 5431 tx_start_bd, 5432 tmp_bd, 5433 le16toh(tx_start_bd->nbd), 5434 le16toh(tx_start_bd->vlan_or_ethertype), 5435 tx_start_bd->bd_flags.as_bitfield, 5436 (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS)); 5437 } else if (i == 1) { 5438 if (pbd_e1x) { 5439 BLOGD(sc, DBG_TX, 5440 "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u " 5441 "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x " 5442 "tcp_seq=%u total_hlen_w=%u\n", 5443 pbd_e1x, 5444 tmp_bd, 5445 pbd_e1x->global_data, 5446 pbd_e1x->ip_hlen_w, 5447 pbd_e1x->ip_id, 5448 pbd_e1x->lso_mss, 5449 pbd_e1x->tcp_flags, 5450 pbd_e1x->tcp_pseudo_csum, 5451 pbd_e1x->tcp_send_seq, 5452 le16toh(pbd_e1x->total_hlen_w)); 5453 } else { /* if (pbd_e2) */ 5454 BLOGD(sc, DBG_TX, 5455 "-> Parse: %p bd=%d dst=%02x:%02x:%02x " 5456 "src=%02x:%02x:%02x parsing_data=0x%x\n", 5457 pbd_e2, 5458 tmp_bd, 5459 pbd_e2->data.mac_addr.dst_hi, 5460 pbd_e2->data.mac_addr.dst_mid, 5461 pbd_e2->data.mac_addr.dst_lo, 5462 pbd_e2->data.mac_addr.src_hi, 5463 pbd_e2->data.mac_addr.src_mid, 5464 pbd_e2->data.mac_addr.src_lo, 5465 pbd_e2->parsing_data); 5466 } 5467 } 5468 5469 if (i != 1) { /* skip parse db as it doesn't hold data */ 5470 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd; 5471 BLOGD(sc, DBG_TX, 5472 "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n", 5473 tx_data_bd, 5474 tmp_bd, 5475 le16toh(tx_data_bd->nbytes), 5476 le32toh(tx_data_bd->addr_hi), 5477 le32toh(tx_data_bd->addr_lo)); 5478 } 5479 5480 tmp_bd = TX_BD_NEXT(tmp_bd); 5481 } 5482 } 5483 5484 BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod); 5485 5486 /* update TX BD producer index value for next TX */ 5487 bd_prod = TX_BD_NEXT(bd_prod); 5488 5489 /* 5490 * If the chain of tx_bd's describing this frame is adjacent to or spans 5491 * an eth_tx_next_bd element then we need to increment the nbds value. 5492 */ 5493 if (TX_BD_IDX(bd_prod) < nbds) { 5494 nbds++; 5495 } 5496 5497 /* don't allow reordering of writes for nbd and packets */ 5498 mb(); 5499 5500 fp->tx_db.data.prod += nbds; 5501 5502 /* producer points to the next free tx_bd at this point */ 5503 fp->tx_pkt_prod++; 5504 fp->tx_bd_prod = bd_prod; 5505 5506 DOORBELL(sc, fp->index, fp->tx_db.raw); 5507 5508 fp->eth_q_stats.tx_pkts++; 5509 5510 /* Prevent speculative reads from getting ahead of the status block. */ 5511 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 5512 0, 0, BUS_SPACE_BARRIER_READ); 5513 5514 /* Prevent speculative reads from getting ahead of the doorbell. */ 5515 bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle, 5516 0, 0, BUS_SPACE_BARRIER_READ); 5517 5518 return (0); 5519 } 5520 5521 static void 5522 bxe_tx_start_locked(struct bxe_softc *sc, 5523 if_t ifp, 5524 struct bxe_fastpath *fp) 5525 { 5526 struct mbuf *m = NULL; 5527 int tx_count = 0; 5528 uint16_t tx_bd_avail; 5529 5530 BXE_FP_TX_LOCK_ASSERT(fp); 5531 5532 /* keep adding entries while there are frames to send */ 5533 while (!if_sendq_empty(ifp)) { 5534 5535 /* 5536 * check for any frames to send 5537 * dequeue can still be NULL even if queue is not empty 5538 */ 5539 m = if_dequeue(ifp); 5540 if (__predict_false(m == NULL)) { 5541 break; 5542 } 5543 5544 /* the mbuf now belongs to us */ 5545 fp->eth_q_stats.mbuf_alloc_tx++; 5546 5547 /* 5548 * Put the frame into the transmit ring. If we don't have room, 5549 * place the mbuf back at the head of the TX queue, set the 5550 * OACTIVE flag, and wait for the NIC to drain the chain. 5551 */ 5552 if (__predict_false(bxe_tx_encap(fp, &m))) { 5553 fp->eth_q_stats.tx_encap_failures++; 5554 if (m != NULL) { 5555 /* mark the TX queue as full and return the frame */ 5556 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 5557 if_sendq_prepend(ifp, m); 5558 fp->eth_q_stats.mbuf_alloc_tx--; 5559 fp->eth_q_stats.tx_queue_xoff++; 5560 } 5561 5562 /* stop looking for more work */ 5563 break; 5564 } 5565 5566 /* the frame was enqueued successfully */ 5567 tx_count++; 5568 5569 /* send a copy of the frame to any BPF listeners. */ 5570 if_etherbpfmtap(ifp, m); 5571 5572 tx_bd_avail = bxe_tx_avail(sc, fp); 5573 5574 /* handle any completions if we're running low */ 5575 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 5576 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ 5577 bxe_txeof(sc, fp); 5578 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { 5579 break; 5580 } 5581 } 5582 } 5583 5584 /* all TX packets were dequeued and/or the tx ring is full */ 5585 if (tx_count > 0) { 5586 /* reset the TX watchdog timeout timer */ 5587 fp->watchdog_timer = BXE_TX_TIMEOUT; 5588 } 5589 } 5590 5591 /* Legacy (non-RSS) dispatch routine */ 5592 static void 5593 bxe_tx_start(if_t ifp) 5594 { 5595 struct bxe_softc *sc; 5596 struct bxe_fastpath *fp; 5597 5598 sc = if_getsoftc(ifp); 5599 5600 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 5601 BLOGW(sc, "Interface not running, ignoring transmit request\n"); 5602 return; 5603 } 5604 5605 if (!sc->link_vars.link_up) { 5606 BLOGW(sc, "Interface link is down, ignoring transmit request\n"); 5607 return; 5608 } 5609 5610 fp = &sc->fp[0]; 5611 5612 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { 5613 fp->eth_q_stats.tx_queue_full_return++; 5614 return; 5615 } 5616 5617 BXE_FP_TX_LOCK(fp); 5618 bxe_tx_start_locked(sc, ifp, fp); 5619 BXE_FP_TX_UNLOCK(fp); 5620 } 5621 5622 #if __FreeBSD_version >= 901504 5623 5624 static int 5625 bxe_tx_mq_start_locked(struct bxe_softc *sc, 5626 if_t ifp, 5627 struct bxe_fastpath *fp, 5628 struct mbuf *m) 5629 { 5630 struct buf_ring *tx_br = fp->tx_br; 5631 struct mbuf *next; 5632 int depth, rc, tx_count; 5633 uint16_t tx_bd_avail; 5634 5635 rc = tx_count = 0; 5636 5637 BXE_FP_TX_LOCK_ASSERT(fp); 5638 5639 if (!tx_br) { 5640 BLOGE(sc, "Multiqueue TX and no buf_ring!\n"); 5641 return (EINVAL); 5642 } 5643 5644 if (m != NULL) { 5645 rc = drbr_enqueue(ifp, tx_br, m); 5646 if (rc != 0) { 5647 fp->eth_q_stats.tx_soft_errors++; 5648 goto bxe_tx_mq_start_locked_exit; 5649 } 5650 } 5651 5652 if (!sc->link_vars.link_up || !(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 5653 fp->eth_q_stats.tx_request_link_down_failures++; 5654 goto bxe_tx_mq_start_locked_exit; 5655 } 5656 5657 /* fetch the depth of the driver queue */ 5658 depth = drbr_inuse_drv(ifp, tx_br); 5659 if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) { 5660 fp->eth_q_stats.tx_max_drbr_queue_depth = depth; 5661 } 5662 5663 /* keep adding entries while there are frames to send */ 5664 while ((next = drbr_peek(ifp, tx_br)) != NULL) { 5665 /* handle any completions if we're running low */ 5666 tx_bd_avail = bxe_tx_avail(sc, fp); 5667 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 5668 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ 5669 bxe_txeof(sc, fp); 5670 tx_bd_avail = bxe_tx_avail(sc, fp); 5671 if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) { 5672 fp->eth_q_stats.bd_avail_too_less_failures++; 5673 m_freem(next); 5674 drbr_advance(ifp, tx_br); 5675 rc = ENOBUFS; 5676 break; 5677 } 5678 } 5679 5680 /* the mbuf now belongs to us */ 5681 fp->eth_q_stats.mbuf_alloc_tx++; 5682 5683 /* 5684 * Put the frame into the transmit ring. If we don't have room, 5685 * place the mbuf back at the head of the TX queue, set the 5686 * OACTIVE flag, and wait for the NIC to drain the chain. 5687 */ 5688 rc = bxe_tx_encap(fp, &next); 5689 if (__predict_false(rc != 0)) { 5690 fp->eth_q_stats.tx_encap_failures++; 5691 if (next != NULL) { 5692 /* mark the TX queue as full and save the frame */ 5693 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 5694 drbr_putback(ifp, tx_br, next); 5695 fp->eth_q_stats.mbuf_alloc_tx--; 5696 fp->eth_q_stats.tx_frames_deferred++; 5697 } else 5698 drbr_advance(ifp, tx_br); 5699 5700 /* stop looking for more work */ 5701 break; 5702 } 5703 5704 /* the transmit frame was enqueued successfully */ 5705 tx_count++; 5706 5707 /* send a copy of the frame to any BPF listeners */ 5708 if_etherbpfmtap(ifp, next); 5709 5710 drbr_advance(ifp, tx_br); 5711 } 5712 5713 /* all TX packets were dequeued and/or the tx ring is full */ 5714 if (tx_count > 0) { 5715 /* reset the TX watchdog timeout timer */ 5716 fp->watchdog_timer = BXE_TX_TIMEOUT; 5717 } 5718 5719 bxe_tx_mq_start_locked_exit: 5720 /* If we didn't drain the drbr, enqueue a task in the future to do it. */ 5721 if (!drbr_empty(ifp, tx_br)) { 5722 fp->eth_q_stats.tx_mq_not_empty++; 5723 taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1); 5724 } 5725 5726 return (rc); 5727 } 5728 5729 static void 5730 bxe_tx_mq_start_deferred(void *arg, 5731 int pending) 5732 { 5733 struct bxe_fastpath *fp = (struct bxe_fastpath *)arg; 5734 struct bxe_softc *sc = fp->sc; 5735 if_t ifp = sc->ifp; 5736 5737 BXE_FP_TX_LOCK(fp); 5738 bxe_tx_mq_start_locked(sc, ifp, fp, NULL); 5739 BXE_FP_TX_UNLOCK(fp); 5740 } 5741 5742 /* Multiqueue (TSS) dispatch routine. */ 5743 static int 5744 bxe_tx_mq_start(struct ifnet *ifp, 5745 struct mbuf *m) 5746 { 5747 struct bxe_softc *sc = if_getsoftc(ifp); 5748 struct bxe_fastpath *fp; 5749 int fp_index, rc; 5750 5751 fp_index = 0; /* default is the first queue */ 5752 5753 /* check if flowid is set */ 5754 5755 if (BXE_VALID_FLOWID(m)) 5756 fp_index = (m->m_pkthdr.flowid % sc->num_queues); 5757 5758 fp = &sc->fp[fp_index]; 5759 5760 if (BXE_FP_TX_TRYLOCK(fp)) { 5761 rc = bxe_tx_mq_start_locked(sc, ifp, fp, m); 5762 BXE_FP_TX_UNLOCK(fp); 5763 } else { 5764 rc = drbr_enqueue(ifp, fp->tx_br, m); 5765 taskqueue_enqueue(fp->tq, &fp->tx_task); 5766 } 5767 5768 return (rc); 5769 } 5770 5771 static void 5772 bxe_mq_flush(struct ifnet *ifp) 5773 { 5774 struct bxe_softc *sc = if_getsoftc(ifp); 5775 struct bxe_fastpath *fp; 5776 struct mbuf *m; 5777 int i; 5778 5779 for (i = 0; i < sc->num_queues; i++) { 5780 fp = &sc->fp[i]; 5781 5782 if (fp->state != BXE_FP_STATE_OPEN) { 5783 BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n", 5784 fp->index, fp->state); 5785 continue; 5786 } 5787 5788 if (fp->tx_br != NULL) { 5789 BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index); 5790 BXE_FP_TX_LOCK(fp); 5791 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { 5792 m_freem(m); 5793 } 5794 BXE_FP_TX_UNLOCK(fp); 5795 } 5796 } 5797 5798 if_qflush(ifp); 5799 } 5800 5801 #endif /* FreeBSD_version >= 901504 */ 5802 5803 static uint16_t 5804 bxe_cid_ilt_lines(struct bxe_softc *sc) 5805 { 5806 if (IS_SRIOV(sc)) { 5807 return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS); 5808 } 5809 return (L2_ILT_LINES(sc)); 5810 } 5811 5812 static void 5813 bxe_ilt_set_info(struct bxe_softc *sc) 5814 { 5815 struct ilt_client_info *ilt_client; 5816 struct ecore_ilt *ilt = sc->ilt; 5817 uint16_t line = 0; 5818 5819 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); 5820 BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line); 5821 5822 /* CDU */ 5823 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 5824 ilt_client->client_num = ILT_CLIENT_CDU; 5825 ilt_client->page_size = CDU_ILT_PAGE_SZ; 5826 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 5827 ilt_client->start = line; 5828 line += bxe_cid_ilt_lines(sc); 5829 5830 if (CNIC_SUPPORT(sc)) { 5831 line += CNIC_ILT_LINES; 5832 } 5833 5834 ilt_client->end = (line - 1); 5835 5836 BLOGD(sc, DBG_LOAD, 5837 "ilt client[CDU]: start %d, end %d, " 5838 "psz 0x%x, flags 0x%x, hw psz %d\n", 5839 ilt_client->start, ilt_client->end, 5840 ilt_client->page_size, 5841 ilt_client->flags, 5842 ilog2(ilt_client->page_size >> 12)); 5843 5844 /* QM */ 5845 if (QM_INIT(sc->qm_cid_count)) { 5846 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 5847 ilt_client->client_num = ILT_CLIENT_QM; 5848 ilt_client->page_size = QM_ILT_PAGE_SZ; 5849 ilt_client->flags = 0; 5850 ilt_client->start = line; 5851 5852 /* 4 bytes for each cid */ 5853 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 5854 QM_ILT_PAGE_SZ); 5855 5856 ilt_client->end = (line - 1); 5857 5858 BLOGD(sc, DBG_LOAD, 5859 "ilt client[QM]: start %d, end %d, " 5860 "psz 0x%x, flags 0x%x, hw psz %d\n", 5861 ilt_client->start, ilt_client->end, 5862 ilt_client->page_size, ilt_client->flags, 5863 ilog2(ilt_client->page_size >> 12)); 5864 } 5865 5866 if (CNIC_SUPPORT(sc)) { 5867 /* SRC */ 5868 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 5869 ilt_client->client_num = ILT_CLIENT_SRC; 5870 ilt_client->page_size = SRC_ILT_PAGE_SZ; 5871 ilt_client->flags = 0; 5872 ilt_client->start = line; 5873 line += SRC_ILT_LINES; 5874 ilt_client->end = (line - 1); 5875 5876 BLOGD(sc, DBG_LOAD, 5877 "ilt client[SRC]: start %d, end %d, " 5878 "psz 0x%x, flags 0x%x, hw psz %d\n", 5879 ilt_client->start, ilt_client->end, 5880 ilt_client->page_size, ilt_client->flags, 5881 ilog2(ilt_client->page_size >> 12)); 5882 5883 /* TM */ 5884 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 5885 ilt_client->client_num = ILT_CLIENT_TM; 5886 ilt_client->page_size = TM_ILT_PAGE_SZ; 5887 ilt_client->flags = 0; 5888 ilt_client->start = line; 5889 line += TM_ILT_LINES; 5890 ilt_client->end = (line - 1); 5891 5892 BLOGD(sc, DBG_LOAD, 5893 "ilt client[TM]: start %d, end %d, " 5894 "psz 0x%x, flags 0x%x, hw psz %d\n", 5895 ilt_client->start, ilt_client->end, 5896 ilt_client->page_size, ilt_client->flags, 5897 ilog2(ilt_client->page_size >> 12)); 5898 } 5899 5900 KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!")); 5901 } 5902 5903 static void 5904 bxe_set_fp_rx_buf_size(struct bxe_softc *sc) 5905 { 5906 int i; 5907 uint32_t rx_buf_size; 5908 5909 rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu); 5910 5911 for (i = 0; i < sc->num_queues; i++) { 5912 if(rx_buf_size <= MCLBYTES){ 5913 sc->fp[i].rx_buf_size = rx_buf_size; 5914 sc->fp[i].mbuf_alloc_size = MCLBYTES; 5915 }else if (rx_buf_size <= MJUMPAGESIZE){ 5916 sc->fp[i].rx_buf_size = rx_buf_size; 5917 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; 5918 }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){ 5919 sc->fp[i].rx_buf_size = MCLBYTES; 5920 sc->fp[i].mbuf_alloc_size = MCLBYTES; 5921 }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){ 5922 sc->fp[i].rx_buf_size = MJUMPAGESIZE; 5923 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; 5924 }else { 5925 sc->fp[i].rx_buf_size = MCLBYTES; 5926 sc->fp[i].mbuf_alloc_size = MCLBYTES; 5927 } 5928 } 5929 } 5930 5931 static int 5932 bxe_alloc_ilt_mem(struct bxe_softc *sc) 5933 { 5934 int rc = 0; 5935 5936 if ((sc->ilt = 5937 (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt), 5938 M_BXE_ILT, 5939 (M_NOWAIT | M_ZERO))) == NULL) { 5940 rc = 1; 5941 } 5942 5943 return (rc); 5944 } 5945 5946 static int 5947 bxe_alloc_ilt_lines_mem(struct bxe_softc *sc) 5948 { 5949 int rc = 0; 5950 5951 if ((sc->ilt->lines = 5952 (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES), 5953 M_BXE_ILT, 5954 (M_NOWAIT | M_ZERO))) == NULL) { 5955 rc = 1; 5956 } 5957 5958 return (rc); 5959 } 5960 5961 static void 5962 bxe_free_ilt_mem(struct bxe_softc *sc) 5963 { 5964 if (sc->ilt != NULL) { 5965 free(sc->ilt, M_BXE_ILT); 5966 sc->ilt = NULL; 5967 } 5968 } 5969 5970 static void 5971 bxe_free_ilt_lines_mem(struct bxe_softc *sc) 5972 { 5973 if (sc->ilt->lines != NULL) { 5974 free(sc->ilt->lines, M_BXE_ILT); 5975 sc->ilt->lines = NULL; 5976 } 5977 } 5978 5979 static void 5980 bxe_free_mem(struct bxe_softc *sc) 5981 { 5982 int i; 5983 5984 for (i = 0; i < L2_ILT_LINES(sc); i++) { 5985 bxe_dma_free(sc, &sc->context[i].vcxt_dma); 5986 sc->context[i].vcxt = NULL; 5987 sc->context[i].size = 0; 5988 } 5989 5990 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); 5991 5992 bxe_free_ilt_lines_mem(sc); 5993 5994 } 5995 5996 static int 5997 bxe_alloc_mem(struct bxe_softc *sc) 5998 { 5999 int context_size; 6000 int allocated; 6001 int i; 6002 6003 /* 6004 * Allocate memory for CDU context: 6005 * This memory is allocated separately and not in the generic ILT 6006 * functions because CDU differs in few aspects: 6007 * 1. There can be multiple entities allocating memory for context - 6008 * regular L2, CNIC, and SRIOV drivers. Each separately controls 6009 * its own ILT lines. 6010 * 2. Since CDU page-size is not a single 4KB page (which is the case 6011 * for the other ILT clients), to be efficient we want to support 6012 * allocation of sub-page-size in the last entry. 6013 * 3. Context pointers are used by the driver to pass to FW / update 6014 * the context (for the other ILT clients the pointers are used just to 6015 * free the memory during unload). 6016 */ 6017 context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc)); 6018 for (i = 0, allocated = 0; allocated < context_size; i++) { 6019 sc->context[i].size = min(CDU_ILT_PAGE_SZ, 6020 (context_size - allocated)); 6021 6022 if (bxe_dma_alloc(sc, sc->context[i].size, 6023 &sc->context[i].vcxt_dma, 6024 "cdu context") != 0) { 6025 bxe_free_mem(sc); 6026 return (-1); 6027 } 6028 6029 sc->context[i].vcxt = 6030 (union cdu_context *)sc->context[i].vcxt_dma.vaddr; 6031 6032 allocated += sc->context[i].size; 6033 } 6034 6035 bxe_alloc_ilt_lines_mem(sc); 6036 6037 BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n", 6038 sc->ilt, sc->ilt->start_line, sc->ilt->lines); 6039 { 6040 for (i = 0; i < 4; i++) { 6041 BLOGD(sc, DBG_LOAD, 6042 "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n", 6043 i, 6044 sc->ilt->clients[i].page_size, 6045 sc->ilt->clients[i].start, 6046 sc->ilt->clients[i].end, 6047 sc->ilt->clients[i].client_num, 6048 sc->ilt->clients[i].flags); 6049 } 6050 } 6051 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { 6052 BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n"); 6053 bxe_free_mem(sc); 6054 return (-1); 6055 } 6056 6057 return (0); 6058 } 6059 6060 static void 6061 bxe_free_rx_bd_chain(struct bxe_fastpath *fp) 6062 { 6063 struct bxe_softc *sc; 6064 int i; 6065 6066 sc = fp->sc; 6067 6068 if (fp->rx_mbuf_tag == NULL) { 6069 return; 6070 } 6071 6072 /* free all mbufs and unload all maps */ 6073 for (i = 0; i < RX_BD_TOTAL; i++) { 6074 if (fp->rx_mbuf_chain[i].m_map != NULL) { 6075 bus_dmamap_sync(fp->rx_mbuf_tag, 6076 fp->rx_mbuf_chain[i].m_map, 6077 BUS_DMASYNC_POSTREAD); 6078 bus_dmamap_unload(fp->rx_mbuf_tag, 6079 fp->rx_mbuf_chain[i].m_map); 6080 } 6081 6082 if (fp->rx_mbuf_chain[i].m != NULL) { 6083 m_freem(fp->rx_mbuf_chain[i].m); 6084 fp->rx_mbuf_chain[i].m = NULL; 6085 fp->eth_q_stats.mbuf_alloc_rx--; 6086 } 6087 } 6088 } 6089 6090 static void 6091 bxe_free_tpa_pool(struct bxe_fastpath *fp) 6092 { 6093 struct bxe_softc *sc; 6094 int i, max_agg_queues; 6095 6096 sc = fp->sc; 6097 6098 if (fp->rx_mbuf_tag == NULL) { 6099 return; 6100 } 6101 6102 max_agg_queues = MAX_AGG_QS(sc); 6103 6104 /* release all mbufs and unload all DMA maps in the TPA pool */ 6105 for (i = 0; i < max_agg_queues; i++) { 6106 if (fp->rx_tpa_info[i].bd.m_map != NULL) { 6107 bus_dmamap_sync(fp->rx_mbuf_tag, 6108 fp->rx_tpa_info[i].bd.m_map, 6109 BUS_DMASYNC_POSTREAD); 6110 bus_dmamap_unload(fp->rx_mbuf_tag, 6111 fp->rx_tpa_info[i].bd.m_map); 6112 } 6113 6114 if (fp->rx_tpa_info[i].bd.m != NULL) { 6115 m_freem(fp->rx_tpa_info[i].bd.m); 6116 fp->rx_tpa_info[i].bd.m = NULL; 6117 fp->eth_q_stats.mbuf_alloc_tpa--; 6118 } 6119 } 6120 } 6121 6122 static void 6123 bxe_free_sge_chain(struct bxe_fastpath *fp) 6124 { 6125 struct bxe_softc *sc; 6126 int i; 6127 6128 sc = fp->sc; 6129 6130 if (fp->rx_sge_mbuf_tag == NULL) { 6131 return; 6132 } 6133 6134 /* rree all mbufs and unload all maps */ 6135 for (i = 0; i < RX_SGE_TOTAL; i++) { 6136 if (fp->rx_sge_mbuf_chain[i].m_map != NULL) { 6137 bus_dmamap_sync(fp->rx_sge_mbuf_tag, 6138 fp->rx_sge_mbuf_chain[i].m_map, 6139 BUS_DMASYNC_POSTREAD); 6140 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 6141 fp->rx_sge_mbuf_chain[i].m_map); 6142 } 6143 6144 if (fp->rx_sge_mbuf_chain[i].m != NULL) { 6145 m_freem(fp->rx_sge_mbuf_chain[i].m); 6146 fp->rx_sge_mbuf_chain[i].m = NULL; 6147 fp->eth_q_stats.mbuf_alloc_sge--; 6148 } 6149 } 6150 } 6151 6152 static void 6153 bxe_free_fp_buffers(struct bxe_softc *sc) 6154 { 6155 struct bxe_fastpath *fp; 6156 int i; 6157 6158 for (i = 0; i < sc->num_queues; i++) { 6159 fp = &sc->fp[i]; 6160 6161 #if __FreeBSD_version >= 901504 6162 if (fp->tx_br != NULL) { 6163 /* just in case bxe_mq_flush() wasn't called */ 6164 if (mtx_initialized(&fp->tx_mtx)) { 6165 struct mbuf *m; 6166 6167 BXE_FP_TX_LOCK(fp); 6168 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) 6169 m_freem(m); 6170 BXE_FP_TX_UNLOCK(fp); 6171 } 6172 } 6173 #endif 6174 6175 /* free all RX buffers */ 6176 bxe_free_rx_bd_chain(fp); 6177 bxe_free_tpa_pool(fp); 6178 bxe_free_sge_chain(fp); 6179 6180 if (fp->eth_q_stats.mbuf_alloc_rx != 0) { 6181 BLOGE(sc, "failed to claim all rx mbufs (%d left)\n", 6182 fp->eth_q_stats.mbuf_alloc_rx); 6183 } 6184 6185 if (fp->eth_q_stats.mbuf_alloc_sge != 0) { 6186 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", 6187 fp->eth_q_stats.mbuf_alloc_sge); 6188 } 6189 6190 if (fp->eth_q_stats.mbuf_alloc_tpa != 0) { 6191 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", 6192 fp->eth_q_stats.mbuf_alloc_tpa); 6193 } 6194 6195 if (fp->eth_q_stats.mbuf_alloc_tx != 0) { 6196 BLOGE(sc, "failed to release tx mbufs (%d left)\n", 6197 fp->eth_q_stats.mbuf_alloc_tx); 6198 } 6199 6200 /* XXX verify all mbufs were reclaimed */ 6201 } 6202 } 6203 6204 static int 6205 bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, 6206 uint16_t prev_index, 6207 uint16_t index) 6208 { 6209 struct bxe_sw_rx_bd *rx_buf; 6210 struct eth_rx_bd *rx_bd; 6211 bus_dma_segment_t segs[1]; 6212 bus_dmamap_t map; 6213 struct mbuf *m; 6214 int nsegs, rc; 6215 6216 rc = 0; 6217 6218 /* allocate the new RX BD mbuf */ 6219 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); 6220 if (__predict_false(m == NULL)) { 6221 fp->eth_q_stats.mbuf_rx_bd_alloc_failed++; 6222 return (ENOBUFS); 6223 } 6224 6225 fp->eth_q_stats.mbuf_alloc_rx++; 6226 6227 /* initialize the mbuf buffer length */ 6228 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; 6229 6230 /* map the mbuf into non-paged pool */ 6231 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, 6232 fp->rx_mbuf_spare_map, 6233 m, segs, &nsegs, BUS_DMA_NOWAIT); 6234 if (__predict_false(rc != 0)) { 6235 fp->eth_q_stats.mbuf_rx_bd_mapping_failed++; 6236 m_freem(m); 6237 fp->eth_q_stats.mbuf_alloc_rx--; 6238 return (rc); 6239 } 6240 6241 /* all mbufs must map to a single segment */ 6242 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6243 6244 /* release any existing RX BD mbuf mappings */ 6245 6246 if (prev_index != index) { 6247 rx_buf = &fp->rx_mbuf_chain[prev_index]; 6248 6249 if (rx_buf->m_map != NULL) { 6250 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6251 BUS_DMASYNC_POSTREAD); 6252 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 6253 } 6254 6255 /* 6256 * We only get here from bxe_rxeof() when the maximum number 6257 * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already 6258 * holds the mbuf in the prev_index so it's OK to NULL it out 6259 * here without concern of a memory leak. 6260 */ 6261 fp->rx_mbuf_chain[prev_index].m = NULL; 6262 } 6263 6264 rx_buf = &fp->rx_mbuf_chain[index]; 6265 6266 if (rx_buf->m_map != NULL) { 6267 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6268 BUS_DMASYNC_POSTREAD); 6269 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 6270 } 6271 6272 /* save the mbuf and mapping info for a future packet */ 6273 map = (prev_index != index) ? 6274 fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map; 6275 rx_buf->m_map = fp->rx_mbuf_spare_map; 6276 fp->rx_mbuf_spare_map = map; 6277 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6278 BUS_DMASYNC_PREREAD); 6279 rx_buf->m = m; 6280 6281 rx_bd = &fp->rx_chain[index]; 6282 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 6283 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 6284 6285 return (rc); 6286 } 6287 6288 static int 6289 bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, 6290 int queue) 6291 { 6292 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; 6293 bus_dma_segment_t segs[1]; 6294 bus_dmamap_t map; 6295 struct mbuf *m; 6296 int nsegs; 6297 int rc = 0; 6298 6299 /* allocate the new TPA mbuf */ 6300 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); 6301 if (__predict_false(m == NULL)) { 6302 fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++; 6303 return (ENOBUFS); 6304 } 6305 6306 fp->eth_q_stats.mbuf_alloc_tpa++; 6307 6308 /* initialize the mbuf buffer length */ 6309 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; 6310 6311 /* map the mbuf into non-paged pool */ 6312 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, 6313 fp->rx_tpa_info_mbuf_spare_map, 6314 m, segs, &nsegs, BUS_DMA_NOWAIT); 6315 if (__predict_false(rc != 0)) { 6316 fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++; 6317 m_free(m); 6318 fp->eth_q_stats.mbuf_alloc_tpa--; 6319 return (rc); 6320 } 6321 6322 /* all mbufs must map to a single segment */ 6323 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6324 6325 /* release any existing TPA mbuf mapping */ 6326 if (tpa_info->bd.m_map != NULL) { 6327 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, 6328 BUS_DMASYNC_POSTREAD); 6329 bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map); 6330 } 6331 6332 /* save the mbuf and mapping info for the TPA mbuf */ 6333 map = tpa_info->bd.m_map; 6334 tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map; 6335 fp->rx_tpa_info_mbuf_spare_map = map; 6336 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, 6337 BUS_DMASYNC_PREREAD); 6338 tpa_info->bd.m = m; 6339 tpa_info->seg = segs[0]; 6340 6341 return (rc); 6342 } 6343 6344 /* 6345 * Allocate an mbuf and assign it to the receive scatter gather chain. The 6346 * caller must take care to save a copy of the existing mbuf in the SG mbuf 6347 * chain. 6348 */ 6349 static int 6350 bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, 6351 uint16_t index) 6352 { 6353 struct bxe_sw_rx_bd *sge_buf; 6354 struct eth_rx_sge *sge; 6355 bus_dma_segment_t segs[1]; 6356 bus_dmamap_t map; 6357 struct mbuf *m; 6358 int nsegs; 6359 int rc = 0; 6360 6361 /* allocate a new SGE mbuf */ 6362 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE); 6363 if (__predict_false(m == NULL)) { 6364 fp->eth_q_stats.mbuf_rx_sge_alloc_failed++; 6365 return (ENOMEM); 6366 } 6367 6368 fp->eth_q_stats.mbuf_alloc_sge++; 6369 6370 /* initialize the mbuf buffer length */ 6371 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE; 6372 6373 /* map the SGE mbuf into non-paged pool */ 6374 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag, 6375 fp->rx_sge_mbuf_spare_map, 6376 m, segs, &nsegs, BUS_DMA_NOWAIT); 6377 if (__predict_false(rc != 0)) { 6378 fp->eth_q_stats.mbuf_rx_sge_mapping_failed++; 6379 m_freem(m); 6380 fp->eth_q_stats.mbuf_alloc_sge--; 6381 return (rc); 6382 } 6383 6384 /* all mbufs must map to a single segment */ 6385 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6386 6387 sge_buf = &fp->rx_sge_mbuf_chain[index]; 6388 6389 /* release any existing SGE mbuf mapping */ 6390 if (sge_buf->m_map != NULL) { 6391 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, 6392 BUS_DMASYNC_POSTREAD); 6393 bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map); 6394 } 6395 6396 /* save the mbuf and mapping info for a future packet */ 6397 map = sge_buf->m_map; 6398 sge_buf->m_map = fp->rx_sge_mbuf_spare_map; 6399 fp->rx_sge_mbuf_spare_map = map; 6400 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, 6401 BUS_DMASYNC_PREREAD); 6402 sge_buf->m = m; 6403 6404 sge = &fp->rx_sge_chain[index]; 6405 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 6406 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 6407 6408 return (rc); 6409 } 6410 6411 static __noinline int 6412 bxe_alloc_fp_buffers(struct bxe_softc *sc) 6413 { 6414 struct bxe_fastpath *fp; 6415 int i, j, rc = 0; 6416 int ring_prod, cqe_ring_prod; 6417 int max_agg_queues; 6418 6419 for (i = 0; i < sc->num_queues; i++) { 6420 fp = &sc->fp[i]; 6421 6422 ring_prod = cqe_ring_prod = 0; 6423 fp->rx_bd_cons = 0; 6424 fp->rx_cq_cons = 0; 6425 6426 /* allocate buffers for the RX BDs in RX BD chain */ 6427 for (j = 0; j < sc->max_rx_bufs; j++) { 6428 rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod); 6429 if (rc != 0) { 6430 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", 6431 i, rc); 6432 goto bxe_alloc_fp_buffers_error; 6433 } 6434 6435 ring_prod = RX_BD_NEXT(ring_prod); 6436 cqe_ring_prod = RCQ_NEXT(cqe_ring_prod); 6437 } 6438 6439 fp->rx_bd_prod = ring_prod; 6440 fp->rx_cq_prod = cqe_ring_prod; 6441 fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0; 6442 6443 max_agg_queues = MAX_AGG_QS(sc); 6444 6445 fp->tpa_enable = TRUE; 6446 6447 /* fill the TPA pool */ 6448 for (j = 0; j < max_agg_queues; j++) { 6449 rc = bxe_alloc_rx_tpa_mbuf(fp, j); 6450 if (rc != 0) { 6451 BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n", 6452 i, j); 6453 fp->tpa_enable = FALSE; 6454 goto bxe_alloc_fp_buffers_error; 6455 } 6456 6457 fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP; 6458 } 6459 6460 if (fp->tpa_enable) { 6461 /* fill the RX SGE chain */ 6462 ring_prod = 0; 6463 for (j = 0; j < RX_SGE_USABLE; j++) { 6464 rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod); 6465 if (rc != 0) { 6466 BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n", 6467 i, ring_prod); 6468 fp->tpa_enable = FALSE; 6469 ring_prod = 0; 6470 goto bxe_alloc_fp_buffers_error; 6471 } 6472 6473 ring_prod = RX_SGE_NEXT(ring_prod); 6474 } 6475 6476 fp->rx_sge_prod = ring_prod; 6477 } 6478 } 6479 6480 return (0); 6481 6482 bxe_alloc_fp_buffers_error: 6483 6484 /* unwind what was already allocated */ 6485 bxe_free_rx_bd_chain(fp); 6486 bxe_free_tpa_pool(fp); 6487 bxe_free_sge_chain(fp); 6488 6489 return (ENOBUFS); 6490 } 6491 6492 static void 6493 bxe_free_fw_stats_mem(struct bxe_softc *sc) 6494 { 6495 bxe_dma_free(sc, &sc->fw_stats_dma); 6496 6497 sc->fw_stats_num = 0; 6498 6499 sc->fw_stats_req_size = 0; 6500 sc->fw_stats_req = NULL; 6501 sc->fw_stats_req_mapping = 0; 6502 6503 sc->fw_stats_data_size = 0; 6504 sc->fw_stats_data = NULL; 6505 sc->fw_stats_data_mapping = 0; 6506 } 6507 6508 static int 6509 bxe_alloc_fw_stats_mem(struct bxe_softc *sc) 6510 { 6511 uint8_t num_queue_stats; 6512 int num_groups; 6513 6514 /* number of queues for statistics is number of eth queues */ 6515 num_queue_stats = BXE_NUM_ETH_QUEUES(sc); 6516 6517 /* 6518 * Total number of FW statistics requests = 6519 * 1 for port stats + 1 for PF stats + num of queues 6520 */ 6521 sc->fw_stats_num = (2 + num_queue_stats); 6522 6523 /* 6524 * Request is built from stats_query_header and an array of 6525 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT 6526 * rules. The real number or requests is configured in the 6527 * stats_query_header. 6528 */ 6529 num_groups = 6530 ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) + 6531 ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0)); 6532 6533 BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n", 6534 sc->fw_stats_num, num_groups); 6535 6536 sc->fw_stats_req_size = 6537 (sizeof(struct stats_query_header) + 6538 (num_groups * sizeof(struct stats_query_cmd_group))); 6539 6540 /* 6541 * Data for statistics requests + stats_counter. 6542 * stats_counter holds per-STORM counters that are incremented when 6543 * STORM has finished with the current request. Memory for FCoE 6544 * offloaded statistics are counted anyway, even if they will not be sent. 6545 * VF stats are not accounted for here as the data of VF stats is stored 6546 * in memory allocated by the VF, not here. 6547 */ 6548 sc->fw_stats_data_size = 6549 (sizeof(struct stats_counter) + 6550 sizeof(struct per_port_stats) + 6551 sizeof(struct per_pf_stats) + 6552 /* sizeof(struct fcoe_statistics_params) + */ 6553 (sizeof(struct per_queue_stats) * num_queue_stats)); 6554 6555 if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), 6556 &sc->fw_stats_dma, "fw stats") != 0) { 6557 bxe_free_fw_stats_mem(sc); 6558 return (-1); 6559 } 6560 6561 /* set up the shortcuts */ 6562 6563 sc->fw_stats_req = 6564 (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr; 6565 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; 6566 6567 sc->fw_stats_data = 6568 (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr + 6569 sc->fw_stats_req_size); 6570 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + 6571 sc->fw_stats_req_size); 6572 6573 BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n", 6574 (uintmax_t)sc->fw_stats_req_mapping); 6575 6576 BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n", 6577 (uintmax_t)sc->fw_stats_data_mapping); 6578 6579 return (0); 6580 } 6581 6582 /* 6583 * Bits map: 6584 * 0-7 - Engine0 load counter. 6585 * 8-15 - Engine1 load counter. 6586 * 16 - Engine0 RESET_IN_PROGRESS bit. 6587 * 17 - Engine1 RESET_IN_PROGRESS bit. 6588 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active 6589 * function on the engine 6590 * 19 - Engine1 ONE_IS_LOADED. 6591 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 6592 * leader to complete (check for both RESET_IN_PROGRESS bits and not 6593 * for just the one belonging to its engine). 6594 */ 6595 #define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 6596 #define BXE_PATH0_LOAD_CNT_MASK 0x000000ff 6597 #define BXE_PATH0_LOAD_CNT_SHIFT 0 6598 #define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00 6599 #define BXE_PATH1_LOAD_CNT_SHIFT 8 6600 #define BXE_PATH0_RST_IN_PROG_BIT 0x00010000 6601 #define BXE_PATH1_RST_IN_PROG_BIT 0x00020000 6602 #define BXE_GLOBAL_RESET_BIT 0x00040000 6603 6604 /* set the GLOBAL_RESET bit, should be run under rtnl lock */ 6605 static void 6606 bxe_set_reset_global(struct bxe_softc *sc) 6607 { 6608 uint32_t val; 6609 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6610 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6611 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT); 6612 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6613 } 6614 6615 /* clear the GLOBAL_RESET bit, should be run under rtnl lock */ 6616 static void 6617 bxe_clear_reset_global(struct bxe_softc *sc) 6618 { 6619 uint32_t val; 6620 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6621 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6622 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT)); 6623 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6624 } 6625 6626 /* checks the GLOBAL_RESET bit, should be run under rtnl lock */ 6627 static uint8_t 6628 bxe_reset_is_global(struct bxe_softc *sc) 6629 { 6630 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6631 BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val); 6632 return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE; 6633 } 6634 6635 /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ 6636 static void 6637 bxe_set_reset_done(struct bxe_softc *sc) 6638 { 6639 uint32_t val; 6640 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : 6641 BXE_PATH0_RST_IN_PROG_BIT; 6642 6643 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6644 6645 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6646 /* Clear the bit */ 6647 val &= ~bit; 6648 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 6649 6650 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6651 } 6652 6653 /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ 6654 static void 6655 bxe_set_reset_in_progress(struct bxe_softc *sc) 6656 { 6657 uint32_t val; 6658 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : 6659 BXE_PATH0_RST_IN_PROG_BIT; 6660 6661 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6662 6663 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6664 /* Set the bit */ 6665 val |= bit; 6666 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 6667 6668 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6669 } 6670 6671 /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ 6672 static uint8_t 6673 bxe_reset_is_done(struct bxe_softc *sc, 6674 int engine) 6675 { 6676 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6677 uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT : 6678 BXE_PATH0_RST_IN_PROG_BIT; 6679 6680 /* return false if bit is set */ 6681 return (val & bit) ? FALSE : TRUE; 6682 } 6683 6684 /* get the load status for an engine, should be run under rtnl lock */ 6685 static uint8_t 6686 bxe_get_load_status(struct bxe_softc *sc, 6687 int engine) 6688 { 6689 uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK : 6690 BXE_PATH0_LOAD_CNT_MASK; 6691 uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT : 6692 BXE_PATH0_LOAD_CNT_SHIFT; 6693 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6694 6695 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); 6696 6697 val = ((val & mask) >> shift); 6698 6699 BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val); 6700 6701 return (val != 0); 6702 } 6703 6704 /* set pf load mark */ 6705 /* XXX needs to be under rtnl lock */ 6706 static void 6707 bxe_set_pf_load(struct bxe_softc *sc) 6708 { 6709 uint32_t val; 6710 uint32_t val1; 6711 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : 6712 BXE_PATH0_LOAD_CNT_MASK; 6713 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : 6714 BXE_PATH0_LOAD_CNT_SHIFT; 6715 6716 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6717 6718 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6719 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); 6720 6721 /* get the current counter value */ 6722 val1 = ((val & mask) >> shift); 6723 6724 /* set bit of this PF */ 6725 val1 |= (1 << SC_ABS_FUNC(sc)); 6726 6727 /* clear the old value */ 6728 val &= ~mask; 6729 6730 /* set the new one */ 6731 val |= ((val1 << shift) & mask); 6732 6733 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 6734 6735 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6736 } 6737 6738 /* clear pf load mark */ 6739 /* XXX needs to be under rtnl lock */ 6740 static uint8_t 6741 bxe_clear_pf_load(struct bxe_softc *sc) 6742 { 6743 uint32_t val1, val; 6744 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : 6745 BXE_PATH0_LOAD_CNT_MASK; 6746 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : 6747 BXE_PATH0_LOAD_CNT_SHIFT; 6748 6749 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6750 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6751 BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val); 6752 6753 /* get the current counter value */ 6754 val1 = (val & mask) >> shift; 6755 6756 /* clear bit of that PF */ 6757 val1 &= ~(1 << SC_ABS_FUNC(sc)); 6758 6759 /* clear the old value */ 6760 val &= ~mask; 6761 6762 /* set the new one */ 6763 val |= ((val1 << shift) & mask); 6764 6765 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 6766 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6767 return (val1 != 0); 6768 } 6769 6770 /* send load requrest to mcp and analyze response */ 6771 static int 6772 bxe_nic_load_request(struct bxe_softc *sc, 6773 uint32_t *load_code) 6774 { 6775 /* init fw_seq */ 6776 sc->fw_seq = 6777 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 6778 DRV_MSG_SEQ_NUMBER_MASK); 6779 6780 BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq); 6781 6782 /* get the current FW pulse sequence */ 6783 sc->fw_drv_pulse_wr_seq = 6784 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & 6785 DRV_PULSE_SEQ_MASK); 6786 6787 BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n", 6788 sc->fw_drv_pulse_wr_seq); 6789 6790 /* load request */ 6791 (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 6792 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 6793 6794 /* if the MCP fails to respond we must abort */ 6795 if (!(*load_code)) { 6796 BLOGE(sc, "MCP response failure!\n"); 6797 return (-1); 6798 } 6799 6800 /* if MCP refused then must abort */ 6801 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { 6802 BLOGE(sc, "MCP refused load request\n"); 6803 return (-1); 6804 } 6805 6806 return (0); 6807 } 6808 6809 /* 6810 * Check whether another PF has already loaded FW to chip. In virtualized 6811 * environments a pf from anoth VM may have already initialized the device 6812 * including loading FW. 6813 */ 6814 static int 6815 bxe_nic_load_analyze_req(struct bxe_softc *sc, 6816 uint32_t load_code) 6817 { 6818 uint32_t my_fw, loaded_fw; 6819 6820 /* is another pf loaded on this engine? */ 6821 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 6822 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 6823 /* build my FW version dword */ 6824 my_fw = (BCM_5710_FW_MAJOR_VERSION + 6825 (BCM_5710_FW_MINOR_VERSION << 8 ) + 6826 (BCM_5710_FW_REVISION_VERSION << 16) + 6827 (BCM_5710_FW_ENGINEERING_VERSION << 24)); 6828 6829 /* read loaded FW from chip */ 6830 loaded_fw = REG_RD(sc, XSEM_REG_PRAM); 6831 BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n", 6832 loaded_fw, my_fw); 6833 6834 /* abort nic load if version mismatch */ 6835 if (my_fw != loaded_fw) { 6836 BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)", 6837 loaded_fw, my_fw); 6838 return (-1); 6839 } 6840 } 6841 6842 return (0); 6843 } 6844 6845 /* mark PMF if applicable */ 6846 static void 6847 bxe_nic_load_pmf(struct bxe_softc *sc, 6848 uint32_t load_code) 6849 { 6850 uint32_t ncsi_oem_data_addr; 6851 6852 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 6853 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || 6854 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { 6855 /* 6856 * Barrier here for ordering between the writing to sc->port.pmf here 6857 * and reading it from the periodic task. 6858 */ 6859 sc->port.pmf = 1; 6860 mb(); 6861 } else { 6862 sc->port.pmf = 0; 6863 } 6864 6865 BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf); 6866 6867 /* XXX needed? */ 6868 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { 6869 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { 6870 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); 6871 if (ncsi_oem_data_addr) { 6872 REG_WR(sc, 6873 (ncsi_oem_data_addr + 6874 offsetof(struct glob_ncsi_oem_data, driver_version)), 6875 0); 6876 } 6877 } 6878 } 6879 } 6880 6881 static void 6882 bxe_read_mf_cfg(struct bxe_softc *sc) 6883 { 6884 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); 6885 int abs_func; 6886 int vn; 6887 6888 if (BXE_NOMCP(sc)) { 6889 return; /* what should be the default bvalue in this case */ 6890 } 6891 6892 /* 6893 * The formula for computing the absolute function number is... 6894 * For 2 port configuration (4 functions per port): 6895 * abs_func = 2 * vn + SC_PORT + SC_PATH 6896 * For 4 port configuration (2 functions per port): 6897 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH 6898 */ 6899 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 6900 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); 6901 if (abs_func >= E1H_FUNC_MAX) { 6902 break; 6903 } 6904 sc->devinfo.mf_info.mf_config[vn] = 6905 MFCFG_RD(sc, func_mf_config[abs_func].config); 6906 } 6907 6908 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & 6909 FUNC_MF_CFG_FUNC_DISABLED) { 6910 BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n"); 6911 sc->flags |= BXE_MF_FUNC_DIS; 6912 } else { 6913 BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n"); 6914 sc->flags &= ~BXE_MF_FUNC_DIS; 6915 } 6916 } 6917 6918 /* acquire split MCP access lock register */ 6919 static int bxe_acquire_alr(struct bxe_softc *sc) 6920 { 6921 uint32_t j, val; 6922 6923 for (j = 0; j < 1000; j++) { 6924 val = (1UL << 31); 6925 REG_WR(sc, GRCBASE_MCP + 0x9c, val); 6926 val = REG_RD(sc, GRCBASE_MCP + 0x9c); 6927 if (val & (1L << 31)) 6928 break; 6929 6930 DELAY(5000); 6931 } 6932 6933 if (!(val & (1L << 31))) { 6934 BLOGE(sc, "Cannot acquire MCP access lock register\n"); 6935 return (-1); 6936 } 6937 6938 return (0); 6939 } 6940 6941 /* release split MCP access lock register */ 6942 static void bxe_release_alr(struct bxe_softc *sc) 6943 { 6944 REG_WR(sc, GRCBASE_MCP + 0x9c, 0); 6945 } 6946 6947 static void 6948 bxe_fan_failure(struct bxe_softc *sc) 6949 { 6950 int port = SC_PORT(sc); 6951 uint32_t ext_phy_config; 6952 6953 /* mark the failure */ 6954 ext_phy_config = 6955 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 6956 6957 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 6958 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 6959 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, 6960 ext_phy_config); 6961 6962 /* log the failure */ 6963 BLOGW(sc, "Fan Failure has caused the driver to shutdown " 6964 "the card to prevent permanent damage. " 6965 "Please contact OEM Support for assistance\n"); 6966 6967 /* XXX */ 6968 #if 1 6969 bxe_panic(sc, ("Schedule task to handle fan failure\n")); 6970 #else 6971 /* 6972 * Schedule device reset (unload) 6973 * This is due to some boards consuming sufficient power when driver is 6974 * up to overheat if fan fails. 6975 */ 6976 bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state); 6977 schedule_delayed_work(&sc->sp_rtnl_task, 0); 6978 #endif 6979 } 6980 6981 /* this function is called upon a link interrupt */ 6982 static void 6983 bxe_link_attn(struct bxe_softc *sc) 6984 { 6985 uint32_t pause_enabled = 0; 6986 struct host_port_stats *pstats; 6987 int cmng_fns; 6988 struct bxe_fastpath *fp; 6989 int i; 6990 6991 /* Make sure that we are synced with the current statistics */ 6992 bxe_stats_handle(sc, STATS_EVENT_STOP); 6993 6994 elink_link_update(&sc->link_params, &sc->link_vars); 6995 6996 if (sc->link_vars.link_up) { 6997 6998 /* dropless flow control */ 6999 if (!CHIP_IS_E1(sc) && sc->dropless_fc) { 7000 pause_enabled = 0; 7001 7002 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 7003 pause_enabled = 1; 7004 } 7005 7006 REG_WR(sc, 7007 (BAR_USTRORM_INTMEM + 7008 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), 7009 pause_enabled); 7010 } 7011 7012 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { 7013 pstats = BXE_SP(sc, port_stats); 7014 /* reset old mac stats */ 7015 memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx)); 7016 } 7017 7018 if (sc->state == BXE_STATE_OPEN) { 7019 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 7020 } 7021 7022 /* Restart tx when the link comes back. */ 7023 FOR_EACH_ETH_QUEUE(sc, i) { 7024 fp = &sc->fp[i]; 7025 taskqueue_enqueue(fp->tq, &fp->tx_task); 7026 } 7027 } 7028 7029 if (sc->link_vars.link_up && sc->link_vars.line_speed) { 7030 cmng_fns = bxe_get_cmng_fns_mode(sc); 7031 7032 if (cmng_fns != CMNG_FNS_NONE) { 7033 bxe_cmng_fns_init(sc, FALSE, cmng_fns); 7034 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 7035 } else { 7036 /* rate shaping and fairness are disabled */ 7037 BLOGD(sc, DBG_LOAD, "single function mode without fairness\n"); 7038 } 7039 } 7040 7041 bxe_link_report_locked(sc); 7042 7043 if (IS_MF(sc)) { 7044 ; // XXX bxe_link_sync_notify(sc); 7045 } 7046 } 7047 7048 static void 7049 bxe_attn_int_asserted(struct bxe_softc *sc, 7050 uint32_t asserted) 7051 { 7052 int port = SC_PORT(sc); 7053 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 7054 MISC_REG_AEU_MASK_ATTN_FUNC_0; 7055 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 7056 NIG_REG_MASK_INTERRUPT_PORT0; 7057 uint32_t aeu_mask; 7058 uint32_t nig_mask = 0; 7059 uint32_t reg_addr; 7060 uint32_t igu_acked; 7061 uint32_t cnt; 7062 7063 if (sc->attn_state & asserted) { 7064 BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted); 7065 } 7066 7067 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 7068 7069 aeu_mask = REG_RD(sc, aeu_addr); 7070 7071 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n", 7072 aeu_mask, asserted); 7073 7074 aeu_mask &= ~(asserted & 0x3ff); 7075 7076 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); 7077 7078 REG_WR(sc, aeu_addr, aeu_mask); 7079 7080 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 7081 7082 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); 7083 sc->attn_state |= asserted; 7084 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); 7085 7086 if (asserted & ATTN_HARD_WIRED_MASK) { 7087 if (asserted & ATTN_NIG_FOR_FUNC) { 7088 7089 bxe_acquire_phy_lock(sc); 7090 /* save nig interrupt mask */ 7091 nig_mask = REG_RD(sc, nig_int_mask_addr); 7092 7093 /* If nig_mask is not set, no need to call the update function */ 7094 if (nig_mask) { 7095 REG_WR(sc, nig_int_mask_addr, 0); 7096 7097 bxe_link_attn(sc); 7098 } 7099 7100 /* handle unicore attn? */ 7101 } 7102 7103 if (asserted & ATTN_SW_TIMER_4_FUNC) { 7104 BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n"); 7105 } 7106 7107 if (asserted & GPIO_2_FUNC) { 7108 BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n"); 7109 } 7110 7111 if (asserted & GPIO_3_FUNC) { 7112 BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n"); 7113 } 7114 7115 if (asserted & GPIO_4_FUNC) { 7116 BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n"); 7117 } 7118 7119 if (port == 0) { 7120 if (asserted & ATTN_GENERAL_ATTN_1) { 7121 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n"); 7122 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 7123 } 7124 if (asserted & ATTN_GENERAL_ATTN_2) { 7125 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n"); 7126 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 7127 } 7128 if (asserted & ATTN_GENERAL_ATTN_3) { 7129 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n"); 7130 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 7131 } 7132 } else { 7133 if (asserted & ATTN_GENERAL_ATTN_4) { 7134 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n"); 7135 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 7136 } 7137 if (asserted & ATTN_GENERAL_ATTN_5) { 7138 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n"); 7139 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 7140 } 7141 if (asserted & ATTN_GENERAL_ATTN_6) { 7142 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n"); 7143 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 7144 } 7145 } 7146 } /* hardwired */ 7147 7148 if (sc->devinfo.int_block == INT_BLOCK_HC) { 7149 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET); 7150 } else { 7151 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); 7152 } 7153 7154 BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", 7155 asserted, 7156 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 7157 REG_WR(sc, reg_addr, asserted); 7158 7159 /* now set back the mask */ 7160 if (asserted & ATTN_NIG_FOR_FUNC) { 7161 /* 7162 * Verify that IGU ack through BAR was written before restoring 7163 * NIG mask. This loop should exit after 2-3 iterations max. 7164 */ 7165 if (sc->devinfo.int_block != INT_BLOCK_HC) { 7166 cnt = 0; 7167 7168 do { 7169 igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); 7170 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && 7171 (++cnt < MAX_IGU_ATTN_ACK_TO)); 7172 7173 if (!igu_acked) { 7174 BLOGE(sc, "Failed to verify IGU ack on time\n"); 7175 } 7176 7177 mb(); 7178 } 7179 7180 REG_WR(sc, nig_int_mask_addr, nig_mask); 7181 7182 bxe_release_phy_lock(sc); 7183 } 7184 } 7185 7186 static void 7187 bxe_print_next_block(struct bxe_softc *sc, 7188 int idx, 7189 const char *blk) 7190 { 7191 BLOGI(sc, "%s%s", idx ? ", " : "", blk); 7192 } 7193 7194 static int 7195 bxe_check_blocks_with_parity0(struct bxe_softc *sc, 7196 uint32_t sig, 7197 int par_num, 7198 uint8_t print) 7199 { 7200 uint32_t cur_bit = 0; 7201 int i = 0; 7202 7203 for (i = 0; sig; i++) { 7204 cur_bit = ((uint32_t)0x1 << i); 7205 if (sig & cur_bit) { 7206 switch (cur_bit) { 7207 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 7208 if (print) 7209 bxe_print_next_block(sc, par_num++, "BRB"); 7210 break; 7211 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 7212 if (print) 7213 bxe_print_next_block(sc, par_num++, "PARSER"); 7214 break; 7215 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 7216 if (print) 7217 bxe_print_next_block(sc, par_num++, "TSDM"); 7218 break; 7219 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 7220 if (print) 7221 bxe_print_next_block(sc, par_num++, "SEARCHER"); 7222 break; 7223 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 7224 if (print) 7225 bxe_print_next_block(sc, par_num++, "TCM"); 7226 break; 7227 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 7228 if (print) 7229 bxe_print_next_block(sc, par_num++, "TSEMI"); 7230 break; 7231 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 7232 if (print) 7233 bxe_print_next_block(sc, par_num++, "XPB"); 7234 break; 7235 } 7236 7237 /* Clear the bit */ 7238 sig &= ~cur_bit; 7239 } 7240 } 7241 7242 return (par_num); 7243 } 7244 7245 static int 7246 bxe_check_blocks_with_parity1(struct bxe_softc *sc, 7247 uint32_t sig, 7248 int par_num, 7249 uint8_t *global, 7250 uint8_t print) 7251 { 7252 int i = 0; 7253 uint32_t cur_bit = 0; 7254 for (i = 0; sig; i++) { 7255 cur_bit = ((uint32_t)0x1 << i); 7256 if (sig & cur_bit) { 7257 switch (cur_bit) { 7258 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 7259 if (print) 7260 bxe_print_next_block(sc, par_num++, "PBF"); 7261 break; 7262 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 7263 if (print) 7264 bxe_print_next_block(sc, par_num++, "QM"); 7265 break; 7266 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 7267 if (print) 7268 bxe_print_next_block(sc, par_num++, "TM"); 7269 break; 7270 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 7271 if (print) 7272 bxe_print_next_block(sc, par_num++, "XSDM"); 7273 break; 7274 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 7275 if (print) 7276 bxe_print_next_block(sc, par_num++, "XCM"); 7277 break; 7278 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 7279 if (print) 7280 bxe_print_next_block(sc, par_num++, "XSEMI"); 7281 break; 7282 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 7283 if (print) 7284 bxe_print_next_block(sc, par_num++, "DOORBELLQ"); 7285 break; 7286 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 7287 if (print) 7288 bxe_print_next_block(sc, par_num++, "NIG"); 7289 break; 7290 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 7291 if (print) 7292 bxe_print_next_block(sc, par_num++, "VAUX PCI CORE"); 7293 *global = TRUE; 7294 break; 7295 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 7296 if (print) 7297 bxe_print_next_block(sc, par_num++, "DEBUG"); 7298 break; 7299 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 7300 if (print) 7301 bxe_print_next_block(sc, par_num++, "USDM"); 7302 break; 7303 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 7304 if (print) 7305 bxe_print_next_block(sc, par_num++, "UCM"); 7306 break; 7307 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 7308 if (print) 7309 bxe_print_next_block(sc, par_num++, "USEMI"); 7310 break; 7311 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 7312 if (print) 7313 bxe_print_next_block(sc, par_num++, "UPB"); 7314 break; 7315 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 7316 if (print) 7317 bxe_print_next_block(sc, par_num++, "CSDM"); 7318 break; 7319 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 7320 if (print) 7321 bxe_print_next_block(sc, par_num++, "CCM"); 7322 break; 7323 } 7324 7325 /* Clear the bit */ 7326 sig &= ~cur_bit; 7327 } 7328 } 7329 7330 return (par_num); 7331 } 7332 7333 static int 7334 bxe_check_blocks_with_parity2(struct bxe_softc *sc, 7335 uint32_t sig, 7336 int par_num, 7337 uint8_t print) 7338 { 7339 uint32_t cur_bit = 0; 7340 int i = 0; 7341 7342 for (i = 0; sig; i++) { 7343 cur_bit = ((uint32_t)0x1 << i); 7344 if (sig & cur_bit) { 7345 switch (cur_bit) { 7346 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 7347 if (print) 7348 bxe_print_next_block(sc, par_num++, "CSEMI"); 7349 break; 7350 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 7351 if (print) 7352 bxe_print_next_block(sc, par_num++, "PXP"); 7353 break; 7354 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 7355 if (print) 7356 bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT"); 7357 break; 7358 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 7359 if (print) 7360 bxe_print_next_block(sc, par_num++, "CFC"); 7361 break; 7362 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 7363 if (print) 7364 bxe_print_next_block(sc, par_num++, "CDU"); 7365 break; 7366 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 7367 if (print) 7368 bxe_print_next_block(sc, par_num++, "DMAE"); 7369 break; 7370 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 7371 if (print) 7372 bxe_print_next_block(sc, par_num++, "IGU"); 7373 break; 7374 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 7375 if (print) 7376 bxe_print_next_block(sc, par_num++, "MISC"); 7377 break; 7378 } 7379 7380 /* Clear the bit */ 7381 sig &= ~cur_bit; 7382 } 7383 } 7384 7385 return (par_num); 7386 } 7387 7388 static int 7389 bxe_check_blocks_with_parity3(struct bxe_softc *sc, 7390 uint32_t sig, 7391 int par_num, 7392 uint8_t *global, 7393 uint8_t print) 7394 { 7395 uint32_t cur_bit = 0; 7396 int i = 0; 7397 7398 for (i = 0; sig; i++) { 7399 cur_bit = ((uint32_t)0x1 << i); 7400 if (sig & cur_bit) { 7401 switch (cur_bit) { 7402 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 7403 if (print) 7404 bxe_print_next_block(sc, par_num++, "MCP ROM"); 7405 *global = TRUE; 7406 break; 7407 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 7408 if (print) 7409 bxe_print_next_block(sc, par_num++, 7410 "MCP UMP RX"); 7411 *global = TRUE; 7412 break; 7413 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 7414 if (print) 7415 bxe_print_next_block(sc, par_num++, 7416 "MCP UMP TX"); 7417 *global = TRUE; 7418 break; 7419 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 7420 if (print) 7421 bxe_print_next_block(sc, par_num++, 7422 "MCP SCPAD"); 7423 *global = TRUE; 7424 break; 7425 } 7426 7427 /* Clear the bit */ 7428 sig &= ~cur_bit; 7429 } 7430 } 7431 7432 return (par_num); 7433 } 7434 7435 static int 7436 bxe_check_blocks_with_parity4(struct bxe_softc *sc, 7437 uint32_t sig, 7438 int par_num, 7439 uint8_t print) 7440 { 7441 uint32_t cur_bit = 0; 7442 int i = 0; 7443 7444 for (i = 0; sig; i++) { 7445 cur_bit = ((uint32_t)0x1 << i); 7446 if (sig & cur_bit) { 7447 switch (cur_bit) { 7448 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 7449 if (print) 7450 bxe_print_next_block(sc, par_num++, "PGLUE_B"); 7451 break; 7452 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 7453 if (print) 7454 bxe_print_next_block(sc, par_num++, "ATC"); 7455 break; 7456 } 7457 7458 /* Clear the bit */ 7459 sig &= ~cur_bit; 7460 } 7461 } 7462 7463 return (par_num); 7464 } 7465 7466 static uint8_t 7467 bxe_parity_attn(struct bxe_softc *sc, 7468 uint8_t *global, 7469 uint8_t print, 7470 uint32_t *sig) 7471 { 7472 int par_num = 0; 7473 7474 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 7475 (sig[1] & HW_PRTY_ASSERT_SET_1) || 7476 (sig[2] & HW_PRTY_ASSERT_SET_2) || 7477 (sig[3] & HW_PRTY_ASSERT_SET_3) || 7478 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 7479 BLOGE(sc, "Parity error: HW block parity attention:\n" 7480 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 7481 (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0), 7482 (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1), 7483 (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2), 7484 (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3), 7485 (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4)); 7486 7487 if (print) 7488 BLOGI(sc, "Parity errors detected in blocks: "); 7489 7490 par_num = 7491 bxe_check_blocks_with_parity0(sc, sig[0] & 7492 HW_PRTY_ASSERT_SET_0, 7493 par_num, print); 7494 par_num = 7495 bxe_check_blocks_with_parity1(sc, sig[1] & 7496 HW_PRTY_ASSERT_SET_1, 7497 par_num, global, print); 7498 par_num = 7499 bxe_check_blocks_with_parity2(sc, sig[2] & 7500 HW_PRTY_ASSERT_SET_2, 7501 par_num, print); 7502 par_num = 7503 bxe_check_blocks_with_parity3(sc, sig[3] & 7504 HW_PRTY_ASSERT_SET_3, 7505 par_num, global, print); 7506 par_num = 7507 bxe_check_blocks_with_parity4(sc, sig[4] & 7508 HW_PRTY_ASSERT_SET_4, 7509 par_num, print); 7510 7511 if (print) 7512 BLOGI(sc, "\n"); 7513 7514 return (TRUE); 7515 } 7516 7517 return (FALSE); 7518 } 7519 7520 static uint8_t 7521 bxe_chk_parity_attn(struct bxe_softc *sc, 7522 uint8_t *global, 7523 uint8_t print) 7524 { 7525 struct attn_route attn = { {0} }; 7526 int port = SC_PORT(sc); 7527 7528 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 7529 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 7530 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 7531 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 7532 7533 /* 7534 * Since MCP attentions can't be disabled inside the block, we need to 7535 * read AEU registers to see whether they're currently disabled 7536 */ 7537 attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 7538 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) & 7539 MISC_AEU_ENABLE_MCP_PRTY_BITS) | 7540 ~MISC_AEU_ENABLE_MCP_PRTY_BITS); 7541 7542 7543 if (!CHIP_IS_E1x(sc)) 7544 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 7545 7546 return (bxe_parity_attn(sc, global, print, attn.sig)); 7547 } 7548 7549 static void 7550 bxe_attn_int_deasserted4(struct bxe_softc *sc, 7551 uint32_t attn) 7552 { 7553 uint32_t val; 7554 7555 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 7556 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 7557 BLOGE(sc, "PGLUE hw attention 0x%08x\n", val); 7558 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 7559 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); 7560 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 7561 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); 7562 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 7563 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); 7564 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 7565 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); 7566 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 7567 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); 7568 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 7569 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); 7570 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 7571 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); 7572 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 7573 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); 7574 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 7575 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); 7576 } 7577 7578 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 7579 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); 7580 BLOGE(sc, "ATC hw attention 0x%08x\n", val); 7581 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 7582 BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); 7583 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 7584 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); 7585 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 7586 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); 7587 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 7588 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); 7589 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 7590 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); 7591 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 7592 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); 7593 } 7594 7595 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 7596 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 7597 BLOGE(sc, "FATAL parity attention set4 0x%08x\n", 7598 (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 7599 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 7600 } 7601 } 7602 7603 static void 7604 bxe_e1h_disable(struct bxe_softc *sc) 7605 { 7606 int port = SC_PORT(sc); 7607 7608 bxe_tx_disable(sc); 7609 7610 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); 7611 } 7612 7613 static void 7614 bxe_e1h_enable(struct bxe_softc *sc) 7615 { 7616 int port = SC_PORT(sc); 7617 7618 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); 7619 7620 // XXX bxe_tx_enable(sc); 7621 } 7622 7623 /* 7624 * called due to MCP event (on pmf): 7625 * reread new bandwidth configuration 7626 * configure FW 7627 * notify others function about the change 7628 */ 7629 static void 7630 bxe_config_mf_bw(struct bxe_softc *sc) 7631 { 7632 if (sc->link_vars.link_up) { 7633 bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); 7634 // XXX bxe_link_sync_notify(sc); 7635 } 7636 7637 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 7638 } 7639 7640 static void 7641 bxe_set_mf_bw(struct bxe_softc *sc) 7642 { 7643 bxe_config_mf_bw(sc); 7644 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 7645 } 7646 7647 static void 7648 bxe_handle_eee_event(struct bxe_softc *sc) 7649 { 7650 BLOGD(sc, DBG_INTR, "EEE - LLDP event\n"); 7651 bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 7652 } 7653 7654 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 7655 7656 static void 7657 bxe_drv_info_ether_stat(struct bxe_softc *sc) 7658 { 7659 struct eth_stats_info *ether_stat = 7660 &sc->sp->drv_info_to_mcp.ether_stat; 7661 7662 strlcpy(ether_stat->version, BXE_DRIVER_VERSION, 7663 ETH_STAT_INFO_VERSION_LEN); 7664 7665 /* XXX (+ MAC_PAD) taken from other driver... verify this is right */ 7666 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, 7667 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 7668 ether_stat->mac_local + MAC_PAD, 7669 MAC_PAD, ETH_ALEN); 7670 7671 ether_stat->mtu_size = sc->mtu; 7672 7673 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 7674 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { 7675 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; 7676 } 7677 7678 // XXX ether_stat->feature_flags |= ???; 7679 7680 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; 7681 7682 ether_stat->txq_size = sc->tx_ring_size; 7683 ether_stat->rxq_size = sc->rx_ring_size; 7684 } 7685 7686 static void 7687 bxe_handle_drv_info_req(struct bxe_softc *sc) 7688 { 7689 enum drv_info_opcode op_code; 7690 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); 7691 7692 /* if drv_info version supported by MFW doesn't match - send NACK */ 7693 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 7694 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 7695 return; 7696 } 7697 7698 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 7699 DRV_INFO_CONTROL_OP_CODE_SHIFT); 7700 7701 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); 7702 7703 switch (op_code) { 7704 case ETH_STATS_OPCODE: 7705 bxe_drv_info_ether_stat(sc); 7706 break; 7707 case FCOE_STATS_OPCODE: 7708 case ISCSI_STATS_OPCODE: 7709 default: 7710 /* if op code isn't supported - send NACK */ 7711 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 7712 return; 7713 } 7714 7715 /* 7716 * If we got drv_info attn from MFW then these fields are defined in 7717 * shmem2 for sure 7718 */ 7719 SHMEM2_WR(sc, drv_info_host_addr_lo, 7720 U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp))); 7721 SHMEM2_WR(sc, drv_info_host_addr_hi, 7722 U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp))); 7723 7724 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); 7725 } 7726 7727 static void 7728 bxe_dcc_event(struct bxe_softc *sc, 7729 uint32_t dcc_event) 7730 { 7731 BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event); 7732 7733 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 7734 /* 7735 * This is the only place besides the function initialization 7736 * where the sc->flags can change so it is done without any 7737 * locks 7738 */ 7739 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { 7740 BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n"); 7741 sc->flags |= BXE_MF_FUNC_DIS; 7742 bxe_e1h_disable(sc); 7743 } else { 7744 BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n"); 7745 sc->flags &= ~BXE_MF_FUNC_DIS; 7746 bxe_e1h_enable(sc); 7747 } 7748 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 7749 } 7750 7751 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 7752 bxe_config_mf_bw(sc); 7753 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 7754 } 7755 7756 /* Report results to MCP */ 7757 if (dcc_event) 7758 bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); 7759 else 7760 bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); 7761 } 7762 7763 static void 7764 bxe_pmf_update(struct bxe_softc *sc) 7765 { 7766 int port = SC_PORT(sc); 7767 uint32_t val; 7768 7769 sc->port.pmf = 1; 7770 BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf); 7771 7772 /* 7773 * We need the mb() to ensure the ordering between the writing to 7774 * sc->port.pmf here and reading it from the bxe_periodic_task(). 7775 */ 7776 mb(); 7777 7778 /* queue a periodic task */ 7779 // XXX schedule task... 7780 7781 // XXX bxe_dcbx_pmf_update(sc); 7782 7783 /* enable nig attention */ 7784 val = (0xff0f | (1 << (SC_VN(sc) + 4))); 7785 if (sc->devinfo.int_block == INT_BLOCK_HC) { 7786 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val); 7787 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val); 7788 } else if (!CHIP_IS_E1x(sc)) { 7789 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 7790 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 7791 } 7792 7793 bxe_stats_handle(sc, STATS_EVENT_PMF); 7794 } 7795 7796 static int 7797 bxe_mc_assert(struct bxe_softc *sc) 7798 { 7799 char last_idx; 7800 int i, rc = 0; 7801 uint32_t row0, row1, row2, row3; 7802 7803 /* XSTORM */ 7804 last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); 7805 if (last_idx) 7806 BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 7807 7808 /* print the asserts */ 7809 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 7810 7811 row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); 7812 row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4); 7813 row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8); 7814 row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12); 7815 7816 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 7817 BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 7818 i, row3, row2, row1, row0); 7819 rc++; 7820 } else { 7821 break; 7822 } 7823 } 7824 7825 /* TSTORM */ 7826 last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); 7827 if (last_idx) { 7828 BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 7829 } 7830 7831 /* print the asserts */ 7832 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 7833 7834 row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); 7835 row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4); 7836 row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8); 7837 row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12); 7838 7839 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 7840 BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 7841 i, row3, row2, row1, row0); 7842 rc++; 7843 } else { 7844 break; 7845 } 7846 } 7847 7848 /* CSTORM */ 7849 last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); 7850 if (last_idx) { 7851 BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 7852 } 7853 7854 /* print the asserts */ 7855 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 7856 7857 row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); 7858 row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4); 7859 row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8); 7860 row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12); 7861 7862 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 7863 BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 7864 i, row3, row2, row1, row0); 7865 rc++; 7866 } else { 7867 break; 7868 } 7869 } 7870 7871 /* USTORM */ 7872 last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); 7873 if (last_idx) { 7874 BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 7875 } 7876 7877 /* print the asserts */ 7878 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 7879 7880 row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); 7881 row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4); 7882 row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8); 7883 row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12); 7884 7885 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 7886 BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 7887 i, row3, row2, row1, row0); 7888 rc++; 7889 } else { 7890 break; 7891 } 7892 } 7893 7894 return (rc); 7895 } 7896 7897 static void 7898 bxe_attn_int_deasserted3(struct bxe_softc *sc, 7899 uint32_t attn) 7900 { 7901 int func = SC_FUNC(sc); 7902 uint32_t val; 7903 7904 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 7905 7906 if (attn & BXE_PMF_LINK_ASSERT(sc)) { 7907 7908 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 7909 bxe_read_mf_cfg(sc); 7910 sc->devinfo.mf_info.mf_config[SC_VN(sc)] = 7911 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 7912 val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); 7913 7914 if (val & DRV_STATUS_DCC_EVENT_MASK) 7915 bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK)); 7916 7917 if (val & DRV_STATUS_SET_MF_BW) 7918 bxe_set_mf_bw(sc); 7919 7920 if (val & DRV_STATUS_DRV_INFO_REQ) 7921 bxe_handle_drv_info_req(sc); 7922 7923 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) 7924 bxe_pmf_update(sc); 7925 7926 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 7927 bxe_handle_eee_event(sc); 7928 7929 if (sc->link_vars.periodic_flags & 7930 ELINK_PERIODIC_FLAGS_LINK_EVENT) { 7931 /* sync with link */ 7932 bxe_acquire_phy_lock(sc); 7933 sc->link_vars.periodic_flags &= 7934 ~ELINK_PERIODIC_FLAGS_LINK_EVENT; 7935 bxe_release_phy_lock(sc); 7936 if (IS_MF(sc)) 7937 ; // XXX bxe_link_sync_notify(sc); 7938 bxe_link_report(sc); 7939 } 7940 7941 /* 7942 * Always call it here: bxe_link_report() will 7943 * prevent the link indication duplication. 7944 */ 7945 bxe_link_status_update(sc); 7946 7947 } else if (attn & BXE_MC_ASSERT_BITS) { 7948 7949 BLOGE(sc, "MC assert!\n"); 7950 bxe_mc_assert(sc); 7951 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); 7952 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); 7953 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); 7954 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); 7955 bxe_panic(sc, ("MC assert!\n")); 7956 7957 } else if (attn & BXE_MCP_ASSERT) { 7958 7959 BLOGE(sc, "MCP assert!\n"); 7960 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); 7961 // XXX bxe_fw_dump(sc); 7962 7963 } else { 7964 BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn); 7965 } 7966 } 7967 7968 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 7969 BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn); 7970 if (attn & BXE_GRC_TIMEOUT) { 7971 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); 7972 BLOGE(sc, "GRC time-out 0x%08x\n", val); 7973 } 7974 if (attn & BXE_GRC_RSV) { 7975 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN); 7976 BLOGE(sc, "GRC reserved 0x%08x\n", val); 7977 } 7978 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 7979 } 7980 } 7981 7982 static void 7983 bxe_attn_int_deasserted2(struct bxe_softc *sc, 7984 uint32_t attn) 7985 { 7986 int port = SC_PORT(sc); 7987 int reg_offset; 7988 uint32_t val0, mask0, val1, mask1; 7989 uint32_t val; 7990 7991 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 7992 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); 7993 BLOGE(sc, "CFC hw attention 0x%08x\n", val); 7994 /* CFC error attention */ 7995 if (val & 0x2) { 7996 BLOGE(sc, "FATAL error from CFC\n"); 7997 } 7998 } 7999 8000 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 8001 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); 8002 BLOGE(sc, "PXP hw attention-0 0x%08x\n", val); 8003 /* RQ_USDMDP_FIFO_OVERFLOW */ 8004 if (val & 0x18000) { 8005 BLOGE(sc, "FATAL error from PXP\n"); 8006 } 8007 8008 if (!CHIP_IS_E1x(sc)) { 8009 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); 8010 BLOGE(sc, "PXP hw attention-1 0x%08x\n", val); 8011 } 8012 } 8013 8014 #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR 8015 #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT 8016 8017 if (attn & AEU_PXP2_HW_INT_BIT) { 8018 /* CQ47854 workaround do not panic on 8019 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 8020 */ 8021 if (!CHIP_IS_E1x(sc)) { 8022 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); 8023 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); 8024 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); 8025 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); 8026 /* 8027 * If the only PXP2_EOP_ERROR_BIT is set in 8028 * STS0 and STS1 - clear it 8029 * 8030 * probably we lose additional attentions between 8031 * STS0 and STS_CLR0, in this case user will not 8032 * be notified about them 8033 */ 8034 if (val0 & mask0 & PXP2_EOP_ERROR_BIT && 8035 !(val1 & mask1)) 8036 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 8037 8038 /* print the register, since no one can restore it */ 8039 BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0); 8040 8041 /* 8042 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 8043 * then notify 8044 */ 8045 if (val0 & PXP2_EOP_ERROR_BIT) { 8046 BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n"); 8047 8048 /* 8049 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is 8050 * set then clear attention from PXP2 block without panic 8051 */ 8052 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && 8053 ((val1 & mask1) == 0)) 8054 attn &= ~AEU_PXP2_HW_INT_BIT; 8055 } 8056 } 8057 } 8058 8059 if (attn & HW_INTERRUT_ASSERT_SET_2) { 8060 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 8061 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 8062 8063 val = REG_RD(sc, reg_offset); 8064 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 8065 REG_WR(sc, reg_offset, val); 8066 8067 BLOGE(sc, "FATAL HW block attention set2 0x%x\n", 8068 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2)); 8069 bxe_panic(sc, ("HW block attention set2\n")); 8070 } 8071 } 8072 8073 static void 8074 bxe_attn_int_deasserted1(struct bxe_softc *sc, 8075 uint32_t attn) 8076 { 8077 int port = SC_PORT(sc); 8078 int reg_offset; 8079 uint32_t val; 8080 8081 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 8082 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); 8083 BLOGE(sc, "DB hw attention 0x%08x\n", val); 8084 /* DORQ discard attention */ 8085 if (val & 0x2) { 8086 BLOGE(sc, "FATAL error from DORQ\n"); 8087 } 8088 } 8089 8090 if (attn & HW_INTERRUT_ASSERT_SET_1) { 8091 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 8092 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 8093 8094 val = REG_RD(sc, reg_offset); 8095 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 8096 REG_WR(sc, reg_offset, val); 8097 8098 BLOGE(sc, "FATAL HW block attention set1 0x%08x\n", 8099 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1)); 8100 bxe_panic(sc, ("HW block attention set1\n")); 8101 } 8102 } 8103 8104 static void 8105 bxe_attn_int_deasserted0(struct bxe_softc *sc, 8106 uint32_t attn) 8107 { 8108 int port = SC_PORT(sc); 8109 int reg_offset; 8110 uint32_t val; 8111 8112 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 8113 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 8114 8115 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 8116 val = REG_RD(sc, reg_offset); 8117 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 8118 REG_WR(sc, reg_offset, val); 8119 8120 BLOGW(sc, "SPIO5 hw attention\n"); 8121 8122 /* Fan failure attention */ 8123 elink_hw_reset_phy(&sc->link_params); 8124 bxe_fan_failure(sc); 8125 } 8126 8127 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { 8128 bxe_acquire_phy_lock(sc); 8129 elink_handle_module_detect_int(&sc->link_params); 8130 bxe_release_phy_lock(sc); 8131 } 8132 8133 if (attn & HW_INTERRUT_ASSERT_SET_0) { 8134 val = REG_RD(sc, reg_offset); 8135 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 8136 REG_WR(sc, reg_offset, val); 8137 8138 bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n", 8139 (attn & HW_INTERRUT_ASSERT_SET_0))); 8140 } 8141 } 8142 8143 static void 8144 bxe_attn_int_deasserted(struct bxe_softc *sc, 8145 uint32_t deasserted) 8146 { 8147 struct attn_route attn; 8148 struct attn_route *group_mask; 8149 int port = SC_PORT(sc); 8150 int index; 8151 uint32_t reg_addr; 8152 uint32_t val; 8153 uint32_t aeu_mask; 8154 uint8_t global = FALSE; 8155 8156 /* 8157 * Need to take HW lock because MCP or other port might also 8158 * try to handle this event. 8159 */ 8160 bxe_acquire_alr(sc); 8161 8162 if (bxe_chk_parity_attn(sc, &global, TRUE)) { 8163 /* XXX 8164 * In case of parity errors don't handle attentions so that 8165 * other function would "see" parity errors. 8166 */ 8167 sc->recovery_state = BXE_RECOVERY_INIT; 8168 // XXX schedule a recovery task... 8169 /* disable HW interrupts */ 8170 bxe_int_disable(sc); 8171 bxe_release_alr(sc); 8172 return; 8173 } 8174 8175 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 8176 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 8177 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 8178 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 8179 if (!CHIP_IS_E1x(sc)) { 8180 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 8181 } else { 8182 attn.sig[4] = 0; 8183 } 8184 8185 BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 8186 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); 8187 8188 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 8189 if (deasserted & (1 << index)) { 8190 group_mask = &sc->attn_group[index]; 8191 8192 BLOGD(sc, DBG_INTR, 8193 "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index, 8194 group_mask->sig[0], group_mask->sig[1], 8195 group_mask->sig[2], group_mask->sig[3], 8196 group_mask->sig[4]); 8197 8198 bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]); 8199 bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]); 8200 bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]); 8201 bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]); 8202 bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]); 8203 } 8204 } 8205 8206 bxe_release_alr(sc); 8207 8208 if (sc->devinfo.int_block == INT_BLOCK_HC) { 8209 reg_addr = (HC_REG_COMMAND_REG + port*32 + 8210 COMMAND_REG_ATTN_BITS_CLR); 8211 } else { 8212 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); 8213 } 8214 8215 val = ~deasserted; 8216 BLOGD(sc, DBG_INTR, 8217 "about to mask 0x%08x at %s addr 0x%08x\n", val, 8218 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 8219 REG_WR(sc, reg_addr, val); 8220 8221 if (~sc->attn_state & deasserted) { 8222 BLOGE(sc, "IGU error\n"); 8223 } 8224 8225 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 8226 MISC_REG_AEU_MASK_ATTN_FUNC_0; 8227 8228 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 8229 8230 aeu_mask = REG_RD(sc, reg_addr); 8231 8232 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n", 8233 aeu_mask, deasserted); 8234 aeu_mask |= (deasserted & 0x3ff); 8235 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); 8236 8237 REG_WR(sc, reg_addr, aeu_mask); 8238 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 8239 8240 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); 8241 sc->attn_state &= ~deasserted; 8242 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); 8243 } 8244 8245 static void 8246 bxe_attn_int(struct bxe_softc *sc) 8247 { 8248 /* read local copy of bits */ 8249 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); 8250 uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack); 8251 uint32_t attn_state = sc->attn_state; 8252 8253 /* look for changed bits */ 8254 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; 8255 uint32_t deasserted = ~attn_bits & attn_ack & attn_state; 8256 8257 BLOGD(sc, DBG_INTR, 8258 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n", 8259 attn_bits, attn_ack, asserted, deasserted); 8260 8261 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { 8262 BLOGE(sc, "BAD attention state\n"); 8263 } 8264 8265 /* handle bits that were raised */ 8266 if (asserted) { 8267 bxe_attn_int_asserted(sc, asserted); 8268 } 8269 8270 if (deasserted) { 8271 bxe_attn_int_deasserted(sc, deasserted); 8272 } 8273 } 8274 8275 static uint16_t 8276 bxe_update_dsb_idx(struct bxe_softc *sc) 8277 { 8278 struct host_sp_status_block *def_sb = sc->def_sb; 8279 uint16_t rc = 0; 8280 8281 mb(); /* status block is written to by the chip */ 8282 8283 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 8284 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; 8285 rc |= BXE_DEF_SB_ATT_IDX; 8286 } 8287 8288 if (sc->def_idx != def_sb->sp_sb.running_index) { 8289 sc->def_idx = def_sb->sp_sb.running_index; 8290 rc |= BXE_DEF_SB_IDX; 8291 } 8292 8293 mb(); 8294 8295 return (rc); 8296 } 8297 8298 static inline struct ecore_queue_sp_obj * 8299 bxe_cid_to_q_obj(struct bxe_softc *sc, 8300 uint32_t cid) 8301 { 8302 BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid); 8303 return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj); 8304 } 8305 8306 static void 8307 bxe_handle_mcast_eqe(struct bxe_softc *sc) 8308 { 8309 struct ecore_mcast_ramrod_params rparam; 8310 int rc; 8311 8312 memset(&rparam, 0, sizeof(rparam)); 8313 8314 rparam.mcast_obj = &sc->mcast_obj; 8315 8316 BXE_MCAST_LOCK(sc); 8317 8318 /* clear pending state for the last command */ 8319 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); 8320 8321 /* if there are pending mcast commands - send them */ 8322 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { 8323 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 8324 if (rc < 0) { 8325 BLOGD(sc, DBG_SP, 8326 "ERROR: Failed to send pending mcast commands (%d)\n", rc); 8327 } 8328 } 8329 8330 BXE_MCAST_UNLOCK(sc); 8331 } 8332 8333 static void 8334 bxe_handle_classification_eqe(struct bxe_softc *sc, 8335 union event_ring_elem *elem) 8336 { 8337 unsigned long ramrod_flags = 0; 8338 int rc = 0; 8339 uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; 8340 struct ecore_vlan_mac_obj *vlan_mac_obj; 8341 8342 /* always push next commands out, don't wait here */ 8343 bit_set(&ramrod_flags, RAMROD_CONT); 8344 8345 switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) { 8346 case ECORE_FILTER_MAC_PENDING: 8347 BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n"); 8348 vlan_mac_obj = &sc->sp_objs[cid].mac_obj; 8349 break; 8350 8351 case ECORE_FILTER_MCAST_PENDING: 8352 BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n"); 8353 /* 8354 * This is only relevant for 57710 where multicast MACs are 8355 * configured as unicast MACs using the same ramrod. 8356 */ 8357 bxe_handle_mcast_eqe(sc); 8358 return; 8359 8360 default: 8361 BLOGE(sc, "Unsupported classification command: %d\n", 8362 elem->message.data.eth_event.echo); 8363 return; 8364 } 8365 8366 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); 8367 8368 if (rc < 0) { 8369 BLOGE(sc, "Failed to schedule new commands (%d)\n", rc); 8370 } else if (rc > 0) { 8371 BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n"); 8372 } 8373 } 8374 8375 static void 8376 bxe_handle_rx_mode_eqe(struct bxe_softc *sc, 8377 union event_ring_elem *elem) 8378 { 8379 bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 8380 8381 /* send rx_mode command again if was requested */ 8382 if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, 8383 &sc->sp_state)) { 8384 bxe_set_storm_rx_mode(sc); 8385 } 8386 } 8387 8388 static void 8389 bxe_update_eq_prod(struct bxe_softc *sc, 8390 uint16_t prod) 8391 { 8392 storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); 8393 wmb(); /* keep prod updates ordered */ 8394 } 8395 8396 static void 8397 bxe_eq_int(struct bxe_softc *sc) 8398 { 8399 uint16_t hw_cons, sw_cons, sw_prod; 8400 union event_ring_elem *elem; 8401 uint8_t echo; 8402 uint32_t cid; 8403 uint8_t opcode; 8404 int spqe_cnt = 0; 8405 struct ecore_queue_sp_obj *q_obj; 8406 struct ecore_func_sp_obj *f_obj = &sc->func_obj; 8407 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; 8408 8409 hw_cons = le16toh(*sc->eq_cons_sb); 8410 8411 /* 8412 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. 8413 * when we get to the next-page we need to adjust so the loop 8414 * condition below will be met. The next element is the size of a 8415 * regular element and hence incrementing by 1 8416 */ 8417 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { 8418 hw_cons++; 8419 } 8420 8421 /* 8422 * This function may never run in parallel with itself for a 8423 * specific sc and no need for a read memory barrier here. 8424 */ 8425 sw_cons = sc->eq_cons; 8426 sw_prod = sc->eq_prod; 8427 8428 BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n", 8429 hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left)); 8430 8431 for (; 8432 sw_cons != hw_cons; 8433 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 8434 8435 elem = &sc->eq[EQ_DESC(sw_cons)]; 8436 8437 /* elem CID originates from FW, actually LE */ 8438 cid = SW_CID(elem->message.data.cfc_del_event.cid); 8439 opcode = elem->message.opcode; 8440 8441 /* handle eq element */ 8442 switch (opcode) { 8443 8444 case EVENT_RING_OPCODE_STAT_QUERY: 8445 BLOGD(sc, DBG_SP, "got statistics completion event %d\n", 8446 sc->stats_comp++); 8447 /* nothing to do with stats comp */ 8448 goto next_spqe; 8449 8450 case EVENT_RING_OPCODE_CFC_DEL: 8451 /* handle according to cid range */ 8452 /* we may want to verify here that the sc state is HALTING */ 8453 BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid); 8454 q_obj = bxe_cid_to_q_obj(sc, cid); 8455 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { 8456 break; 8457 } 8458 goto next_spqe; 8459 8460 case EVENT_RING_OPCODE_STOP_TRAFFIC: 8461 BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n"); 8462 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { 8463 break; 8464 } 8465 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED); 8466 goto next_spqe; 8467 8468 case EVENT_RING_OPCODE_START_TRAFFIC: 8469 BLOGD(sc, DBG_SP, "got START TRAFFIC\n"); 8470 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) { 8471 break; 8472 } 8473 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED); 8474 goto next_spqe; 8475 8476 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 8477 echo = elem->message.data.function_update_event.echo; 8478 if (echo == SWITCH_UPDATE) { 8479 BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n"); 8480 if (f_obj->complete_cmd(sc, f_obj, 8481 ECORE_F_CMD_SWITCH_UPDATE)) { 8482 break; 8483 } 8484 } 8485 else { 8486 BLOGD(sc, DBG_SP, 8487 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 8488 } 8489 goto next_spqe; 8490 8491 case EVENT_RING_OPCODE_FORWARD_SETUP: 8492 q_obj = &bxe_fwd_sp_obj(sc, q_obj); 8493 if (q_obj->complete_cmd(sc, q_obj, 8494 ECORE_Q_CMD_SETUP_TX_ONLY)) { 8495 break; 8496 } 8497 goto next_spqe; 8498 8499 case EVENT_RING_OPCODE_FUNCTION_START: 8500 BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n"); 8501 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { 8502 break; 8503 } 8504 goto next_spqe; 8505 8506 case EVENT_RING_OPCODE_FUNCTION_STOP: 8507 BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n"); 8508 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { 8509 break; 8510 } 8511 goto next_spqe; 8512 } 8513 8514 switch (opcode | sc->state) { 8515 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN): 8516 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT): 8517 cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; 8518 BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid); 8519 rss_raw->clear_pending(rss_raw); 8520 break; 8521 8522 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN): 8523 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG): 8524 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT): 8525 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN): 8526 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG): 8527 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8528 BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n"); 8529 bxe_handle_classification_eqe(sc, elem); 8530 break; 8531 8532 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN): 8533 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG): 8534 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8535 BLOGD(sc, DBG_SP, "got mcast ramrod\n"); 8536 bxe_handle_mcast_eqe(sc); 8537 break; 8538 8539 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN): 8540 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG): 8541 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8542 BLOGD(sc, DBG_SP, "got rx_mode ramrod\n"); 8543 bxe_handle_rx_mode_eqe(sc, elem); 8544 break; 8545 8546 default: 8547 /* unknown event log error and continue */ 8548 BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n", 8549 elem->message.opcode, sc->state); 8550 } 8551 8552 next_spqe: 8553 spqe_cnt++; 8554 } /* for */ 8555 8556 mb(); 8557 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); 8558 8559 sc->eq_cons = sw_cons; 8560 sc->eq_prod = sw_prod; 8561 8562 /* make sure that above mem writes were issued towards the memory */ 8563 wmb(); 8564 8565 /* update producer */ 8566 bxe_update_eq_prod(sc, sc->eq_prod); 8567 } 8568 8569 static void 8570 bxe_handle_sp_tq(void *context, 8571 int pending) 8572 { 8573 struct bxe_softc *sc = (struct bxe_softc *)context; 8574 uint16_t status; 8575 8576 BLOGD(sc, DBG_SP, "---> SP TASK <---\n"); 8577 8578 /* what work needs to be performed? */ 8579 status = bxe_update_dsb_idx(sc); 8580 8581 BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status); 8582 8583 /* HW attentions */ 8584 if (status & BXE_DEF_SB_ATT_IDX) { 8585 BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n"); 8586 bxe_attn_int(sc); 8587 status &= ~BXE_DEF_SB_ATT_IDX; 8588 } 8589 8590 /* SP events: STAT_QUERY and others */ 8591 if (status & BXE_DEF_SB_IDX) { 8592 /* handle EQ completions */ 8593 BLOGD(sc, DBG_SP, "---> EQ INTR <---\n"); 8594 bxe_eq_int(sc); 8595 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 8596 le16toh(sc->def_idx), IGU_INT_NOP, 1); 8597 status &= ~BXE_DEF_SB_IDX; 8598 } 8599 8600 /* if status is non zero then something went wrong */ 8601 if (__predict_false(status)) { 8602 BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status); 8603 } 8604 8605 /* ack status block only if something was actually handled */ 8606 bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 8607 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); 8608 8609 /* 8610 * Must be called after the EQ processing (since eq leads to sriov 8611 * ramrod completion flows). 8612 * This flow may have been scheduled by the arrival of a ramrod 8613 * completion, or by the sriov code rescheduling itself. 8614 */ 8615 // XXX bxe_iov_sp_task(sc); 8616 8617 } 8618 8619 static void 8620 bxe_handle_fp_tq(void *context, 8621 int pending) 8622 { 8623 struct bxe_fastpath *fp = (struct bxe_fastpath *)context; 8624 struct bxe_softc *sc = fp->sc; 8625 uint8_t more_tx = FALSE; 8626 uint8_t more_rx = FALSE; 8627 8628 BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index); 8629 8630 /* XXX 8631 * IFF_DRV_RUNNING state can't be checked here since we process 8632 * slowpath events on a client queue during setup. Instead 8633 * we need to add a "process/continue" flag here that the driver 8634 * can use to tell the task here not to do anything. 8635 */ 8636 #if 0 8637 if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { 8638 return; 8639 } 8640 #endif 8641 8642 /* update the fastpath index */ 8643 bxe_update_fp_sb_idx(fp); 8644 8645 /* XXX add loop here if ever support multiple tx CoS */ 8646 /* fp->txdata[cos] */ 8647 if (bxe_has_tx_work(fp)) { 8648 BXE_FP_TX_LOCK(fp); 8649 more_tx = bxe_txeof(sc, fp); 8650 BXE_FP_TX_UNLOCK(fp); 8651 } 8652 8653 if (bxe_has_rx_work(fp)) { 8654 more_rx = bxe_rxeof(sc, fp); 8655 } 8656 8657 if (more_rx /*|| more_tx*/) { 8658 /* still more work to do */ 8659 taskqueue_enqueue(fp->tq, &fp->tq_task); 8660 return; 8661 } 8662 8663 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 8664 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 8665 } 8666 8667 static void 8668 bxe_task_fp(struct bxe_fastpath *fp) 8669 { 8670 struct bxe_softc *sc = fp->sc; 8671 uint8_t more_tx = FALSE; 8672 uint8_t more_rx = FALSE; 8673 8674 BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index); 8675 8676 /* update the fastpath index */ 8677 bxe_update_fp_sb_idx(fp); 8678 8679 /* XXX add loop here if ever support multiple tx CoS */ 8680 /* fp->txdata[cos] */ 8681 if (bxe_has_tx_work(fp)) { 8682 BXE_FP_TX_LOCK(fp); 8683 more_tx = bxe_txeof(sc, fp); 8684 BXE_FP_TX_UNLOCK(fp); 8685 } 8686 8687 if (bxe_has_rx_work(fp)) { 8688 more_rx = bxe_rxeof(sc, fp); 8689 } 8690 8691 if (more_rx /*|| more_tx*/) { 8692 /* still more work to do, bail out if this ISR and process later */ 8693 taskqueue_enqueue(fp->tq, &fp->tq_task); 8694 return; 8695 } 8696 8697 /* 8698 * Here we write the fastpath index taken before doing any tx or rx work. 8699 * It is very well possible other hw events occurred up to this point and 8700 * they were actually processed accordingly above. Since we're going to 8701 * write an older fastpath index, an interrupt is coming which we might 8702 * not do any work in. 8703 */ 8704 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 8705 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 8706 } 8707 8708 /* 8709 * Legacy interrupt entry point. 8710 * 8711 * Verifies that the controller generated the interrupt and 8712 * then calls a separate routine to handle the various 8713 * interrupt causes: link, RX, and TX. 8714 */ 8715 static void 8716 bxe_intr_legacy(void *xsc) 8717 { 8718 struct bxe_softc *sc = (struct bxe_softc *)xsc; 8719 struct bxe_fastpath *fp; 8720 uint16_t status, mask; 8721 int i; 8722 8723 BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n"); 8724 8725 /* 8726 * 0 for ustorm, 1 for cstorm 8727 * the bits returned from ack_int() are 0-15 8728 * bit 0 = attention status block 8729 * bit 1 = fast path status block 8730 * a mask of 0x2 or more = tx/rx event 8731 * a mask of 1 = slow path event 8732 */ 8733 8734 status = bxe_ack_int(sc); 8735 8736 /* the interrupt is not for us */ 8737 if (__predict_false(status == 0)) { 8738 BLOGD(sc, DBG_INTR, "Not our interrupt!\n"); 8739 return; 8740 } 8741 8742 BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status); 8743 8744 FOR_EACH_ETH_QUEUE(sc, i) { 8745 fp = &sc->fp[i]; 8746 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); 8747 if (status & mask) { 8748 /* acknowledge and disable further fastpath interrupts */ 8749 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 8750 bxe_task_fp(fp); 8751 status &= ~mask; 8752 } 8753 } 8754 8755 if (__predict_false(status & 0x1)) { 8756 /* acknowledge and disable further slowpath interrupts */ 8757 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 8758 8759 /* schedule slowpath handler */ 8760 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); 8761 8762 status &= ~0x1; 8763 } 8764 8765 if (__predict_false(status)) { 8766 BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status); 8767 } 8768 } 8769 8770 /* slowpath interrupt entry point */ 8771 static void 8772 bxe_intr_sp(void *xsc) 8773 { 8774 struct bxe_softc *sc = (struct bxe_softc *)xsc; 8775 8776 BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n"); 8777 8778 /* acknowledge and disable further slowpath interrupts */ 8779 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 8780 8781 /* schedule slowpath handler */ 8782 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); 8783 } 8784 8785 /* fastpath interrupt entry point */ 8786 static void 8787 bxe_intr_fp(void *xfp) 8788 { 8789 struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp; 8790 struct bxe_softc *sc = fp->sc; 8791 8792 BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index); 8793 8794 BLOGD(sc, DBG_INTR, 8795 "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n", 8796 curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id); 8797 8798 /* acknowledge and disable further fastpath interrupts */ 8799 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 8800 8801 bxe_task_fp(fp); 8802 } 8803 8804 /* Release all interrupts allocated by the driver. */ 8805 static void 8806 bxe_interrupt_free(struct bxe_softc *sc) 8807 { 8808 int i; 8809 8810 switch (sc->interrupt_mode) { 8811 case INTR_MODE_INTX: 8812 BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n"); 8813 if (sc->intr[0].resource != NULL) { 8814 bus_release_resource(sc->dev, 8815 SYS_RES_IRQ, 8816 sc->intr[0].rid, 8817 sc->intr[0].resource); 8818 } 8819 break; 8820 case INTR_MODE_MSI: 8821 for (i = 0; i < sc->intr_count; i++) { 8822 BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i); 8823 if (sc->intr[i].resource && sc->intr[i].rid) { 8824 bus_release_resource(sc->dev, 8825 SYS_RES_IRQ, 8826 sc->intr[i].rid, 8827 sc->intr[i].resource); 8828 } 8829 } 8830 pci_release_msi(sc->dev); 8831 break; 8832 case INTR_MODE_MSIX: 8833 for (i = 0; i < sc->intr_count; i++) { 8834 BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i); 8835 if (sc->intr[i].resource && sc->intr[i].rid) { 8836 bus_release_resource(sc->dev, 8837 SYS_RES_IRQ, 8838 sc->intr[i].rid, 8839 sc->intr[i].resource); 8840 } 8841 } 8842 pci_release_msi(sc->dev); 8843 break; 8844 default: 8845 /* nothing to do as initial allocation failed */ 8846 break; 8847 } 8848 } 8849 8850 /* 8851 * This function determines and allocates the appropriate 8852 * interrupt based on system capabilites and user request. 8853 * 8854 * The user may force a particular interrupt mode, specify 8855 * the number of receive queues, specify the method for 8856 * distribuitng received frames to receive queues, or use 8857 * the default settings which will automatically select the 8858 * best supported combination. In addition, the OS may or 8859 * may not support certain combinations of these settings. 8860 * This routine attempts to reconcile the settings requested 8861 * by the user with the capabilites available from the system 8862 * to select the optimal combination of features. 8863 * 8864 * Returns: 8865 * 0 = Success, !0 = Failure. 8866 */ 8867 static int 8868 bxe_interrupt_alloc(struct bxe_softc *sc) 8869 { 8870 int msix_count = 0; 8871 int msi_count = 0; 8872 int num_requested = 0; 8873 int num_allocated = 0; 8874 int rid, i, j; 8875 int rc; 8876 8877 /* get the number of available MSI/MSI-X interrupts from the OS */ 8878 if (sc->interrupt_mode > 0) { 8879 if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) { 8880 msix_count = pci_msix_count(sc->dev); 8881 } 8882 8883 if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) { 8884 msi_count = pci_msi_count(sc->dev); 8885 } 8886 8887 BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n", 8888 msi_count, msix_count); 8889 } 8890 8891 do { /* try allocating MSI-X interrupt resources (at least 2) */ 8892 if (sc->interrupt_mode != INTR_MODE_MSIX) { 8893 break; 8894 } 8895 8896 if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) || 8897 (msix_count < 2)) { 8898 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 8899 break; 8900 } 8901 8902 /* ask for the necessary number of MSI-X vectors */ 8903 num_requested = min((sc->num_queues + 1), msix_count); 8904 8905 BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested); 8906 8907 num_allocated = num_requested; 8908 if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) { 8909 BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc); 8910 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 8911 break; 8912 } 8913 8914 if (num_allocated < 2) { /* possible? */ 8915 BLOGE(sc, "MSI-X allocation less than 2!\n"); 8916 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 8917 pci_release_msi(sc->dev); 8918 break; 8919 } 8920 8921 BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n", 8922 num_requested, num_allocated); 8923 8924 /* best effort so use the number of vectors allocated to us */ 8925 sc->intr_count = num_allocated; 8926 sc->num_queues = num_allocated - 1; 8927 8928 rid = 1; /* initial resource identifier */ 8929 8930 /* allocate the MSI-X vectors */ 8931 for (i = 0; i < num_allocated; i++) { 8932 sc->intr[i].rid = (rid + i); 8933 8934 if ((sc->intr[i].resource = 8935 bus_alloc_resource_any(sc->dev, 8936 SYS_RES_IRQ, 8937 &sc->intr[i].rid, 8938 RF_ACTIVE)) == NULL) { 8939 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n", 8940 i, (rid + i)); 8941 8942 for (j = (i - 1); j >= 0; j--) { 8943 bus_release_resource(sc->dev, 8944 SYS_RES_IRQ, 8945 sc->intr[j].rid, 8946 sc->intr[j].resource); 8947 } 8948 8949 sc->intr_count = 0; 8950 sc->num_queues = 0; 8951 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 8952 pci_release_msi(sc->dev); 8953 break; 8954 } 8955 8956 BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i)); 8957 } 8958 } while (0); 8959 8960 do { /* try allocating MSI vector resources (at least 2) */ 8961 if (sc->interrupt_mode != INTR_MODE_MSI) { 8962 break; 8963 } 8964 8965 if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) || 8966 (msi_count < 1)) { 8967 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 8968 break; 8969 } 8970 8971 /* ask for a single MSI vector */ 8972 num_requested = 1; 8973 8974 BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested); 8975 8976 num_allocated = num_requested; 8977 if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) { 8978 BLOGE(sc, "MSI alloc failed (%d)!\n", rc); 8979 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 8980 break; 8981 } 8982 8983 if (num_allocated != 1) { /* possible? */ 8984 BLOGE(sc, "MSI allocation is not 1!\n"); 8985 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 8986 pci_release_msi(sc->dev); 8987 break; 8988 } 8989 8990 BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n", 8991 num_requested, num_allocated); 8992 8993 /* best effort so use the number of vectors allocated to us */ 8994 sc->intr_count = num_allocated; 8995 sc->num_queues = num_allocated; 8996 8997 rid = 1; /* initial resource identifier */ 8998 8999 sc->intr[0].rid = rid; 9000 9001 if ((sc->intr[0].resource = 9002 bus_alloc_resource_any(sc->dev, 9003 SYS_RES_IRQ, 9004 &sc->intr[0].rid, 9005 RF_ACTIVE)) == NULL) { 9006 BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid); 9007 sc->intr_count = 0; 9008 sc->num_queues = 0; 9009 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9010 pci_release_msi(sc->dev); 9011 break; 9012 } 9013 9014 BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid); 9015 } while (0); 9016 9017 do { /* try allocating INTx vector resources */ 9018 if (sc->interrupt_mode != INTR_MODE_INTX) { 9019 break; 9020 } 9021 9022 BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n"); 9023 9024 /* only one vector for INTx */ 9025 sc->intr_count = 1; 9026 sc->num_queues = 1; 9027 9028 rid = 0; /* initial resource identifier */ 9029 9030 sc->intr[0].rid = rid; 9031 9032 if ((sc->intr[0].resource = 9033 bus_alloc_resource_any(sc->dev, 9034 SYS_RES_IRQ, 9035 &sc->intr[0].rid, 9036 (RF_ACTIVE | RF_SHAREABLE))) == NULL) { 9037 BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid); 9038 sc->intr_count = 0; 9039 sc->num_queues = 0; 9040 sc->interrupt_mode = -1; /* Failed! */ 9041 break; 9042 } 9043 9044 BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid); 9045 } while (0); 9046 9047 if (sc->interrupt_mode == -1) { 9048 BLOGE(sc, "Interrupt Allocation: FAILED!!!\n"); 9049 rc = 1; 9050 } else { 9051 BLOGD(sc, DBG_LOAD, 9052 "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n", 9053 sc->interrupt_mode, sc->num_queues); 9054 rc = 0; 9055 } 9056 9057 return (rc); 9058 } 9059 9060 static void 9061 bxe_interrupt_detach(struct bxe_softc *sc) 9062 { 9063 struct bxe_fastpath *fp; 9064 int i; 9065 9066 /* release interrupt resources */ 9067 for (i = 0; i < sc->intr_count; i++) { 9068 if (sc->intr[i].resource && sc->intr[i].tag) { 9069 BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i); 9070 bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag); 9071 } 9072 } 9073 9074 for (i = 0; i < sc->num_queues; i++) { 9075 fp = &sc->fp[i]; 9076 if (fp->tq) { 9077 taskqueue_drain(fp->tq, &fp->tq_task); 9078 taskqueue_drain(fp->tq, &fp->tx_task); 9079 while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task, 9080 NULL)) 9081 taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task); 9082 taskqueue_free(fp->tq); 9083 fp->tq = NULL; 9084 } 9085 } 9086 9087 9088 if (sc->sp_tq) { 9089 taskqueue_drain(sc->sp_tq, &sc->sp_tq_task); 9090 taskqueue_free(sc->sp_tq); 9091 sc->sp_tq = NULL; 9092 } 9093 } 9094 9095 /* 9096 * Enables interrupts and attach to the ISR. 9097 * 9098 * When using multiple MSI/MSI-X vectors the first vector 9099 * is used for slowpath operations while all remaining 9100 * vectors are used for fastpath operations. If only a 9101 * single MSI/MSI-X vector is used (SINGLE_ISR) then the 9102 * ISR must look for both slowpath and fastpath completions. 9103 */ 9104 static int 9105 bxe_interrupt_attach(struct bxe_softc *sc) 9106 { 9107 struct bxe_fastpath *fp; 9108 int rc = 0; 9109 int i; 9110 9111 snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name), 9112 "bxe%d_sp_tq", sc->unit); 9113 TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc); 9114 sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT, 9115 taskqueue_thread_enqueue, 9116 &sc->sp_tq); 9117 taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */ 9118 "%s", sc->sp_tq_name); 9119 9120 9121 for (i = 0; i < sc->num_queues; i++) { 9122 fp = &sc->fp[i]; 9123 snprintf(fp->tq_name, sizeof(fp->tq_name), 9124 "bxe%d_fp%d_tq", sc->unit, i); 9125 TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); 9126 TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp); 9127 fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT, 9128 taskqueue_thread_enqueue, 9129 &fp->tq); 9130 TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0, 9131 bxe_tx_mq_start_deferred, fp); 9132 taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */ 9133 "%s", fp->tq_name); 9134 } 9135 9136 /* setup interrupt handlers */ 9137 if (sc->interrupt_mode == INTR_MODE_MSIX) { 9138 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n"); 9139 9140 /* 9141 * Setup the interrupt handler. Note that we pass the driver instance 9142 * to the interrupt handler for the slowpath. 9143 */ 9144 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9145 (INTR_TYPE_NET | INTR_MPSAFE), 9146 NULL, bxe_intr_sp, sc, 9147 &sc->intr[0].tag)) != 0) { 9148 BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc); 9149 goto bxe_interrupt_attach_exit; 9150 } 9151 9152 bus_describe_intr(sc->dev, sc->intr[0].resource, 9153 sc->intr[0].tag, "sp"); 9154 9155 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */ 9156 9157 /* initialize the fastpath vectors (note the first was used for sp) */ 9158 for (i = 0; i < sc->num_queues; i++) { 9159 fp = &sc->fp[i]; 9160 BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1)); 9161 9162 /* 9163 * Setup the interrupt handler. Note that we pass the 9164 * fastpath context to the interrupt handler in this 9165 * case. 9166 */ 9167 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource, 9168 (INTR_TYPE_NET | INTR_MPSAFE), 9169 NULL, bxe_intr_fp, fp, 9170 &sc->intr[i + 1].tag)) != 0) { 9171 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n", 9172 (i + 1), rc); 9173 goto bxe_interrupt_attach_exit; 9174 } 9175 9176 bus_describe_intr(sc->dev, sc->intr[i + 1].resource, 9177 sc->intr[i + 1].tag, "fp%02d", i); 9178 9179 /* bind the fastpath instance to a cpu */ 9180 if (sc->num_queues > 1) { 9181 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i); 9182 } 9183 9184 fp->state = BXE_FP_STATE_IRQ; 9185 } 9186 } else if (sc->interrupt_mode == INTR_MODE_MSI) { 9187 BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n"); 9188 9189 /* 9190 * Setup the interrupt handler. Note that we pass the 9191 * driver instance to the interrupt handler which 9192 * will handle both the slowpath and fastpath. 9193 */ 9194 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9195 (INTR_TYPE_NET | INTR_MPSAFE), 9196 NULL, bxe_intr_legacy, sc, 9197 &sc->intr[0].tag)) != 0) { 9198 BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc); 9199 goto bxe_interrupt_attach_exit; 9200 } 9201 9202 } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */ 9203 BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n"); 9204 9205 /* 9206 * Setup the interrupt handler. Note that we pass the 9207 * driver instance to the interrupt handler which 9208 * will handle both the slowpath and fastpath. 9209 */ 9210 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9211 (INTR_TYPE_NET | INTR_MPSAFE), 9212 NULL, bxe_intr_legacy, sc, 9213 &sc->intr[0].tag)) != 0) { 9214 BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc); 9215 goto bxe_interrupt_attach_exit; 9216 } 9217 } 9218 9219 bxe_interrupt_attach_exit: 9220 9221 return (rc); 9222 } 9223 9224 static int bxe_init_hw_common_chip(struct bxe_softc *sc); 9225 static int bxe_init_hw_common(struct bxe_softc *sc); 9226 static int bxe_init_hw_port(struct bxe_softc *sc); 9227 static int bxe_init_hw_func(struct bxe_softc *sc); 9228 static void bxe_reset_common(struct bxe_softc *sc); 9229 static void bxe_reset_port(struct bxe_softc *sc); 9230 static void bxe_reset_func(struct bxe_softc *sc); 9231 static int bxe_gunzip_init(struct bxe_softc *sc); 9232 static void bxe_gunzip_end(struct bxe_softc *sc); 9233 static int bxe_init_firmware(struct bxe_softc *sc); 9234 static void bxe_release_firmware(struct bxe_softc *sc); 9235 9236 static struct 9237 ecore_func_sp_drv_ops bxe_func_sp_drv = { 9238 .init_hw_cmn_chip = bxe_init_hw_common_chip, 9239 .init_hw_cmn = bxe_init_hw_common, 9240 .init_hw_port = bxe_init_hw_port, 9241 .init_hw_func = bxe_init_hw_func, 9242 9243 .reset_hw_cmn = bxe_reset_common, 9244 .reset_hw_port = bxe_reset_port, 9245 .reset_hw_func = bxe_reset_func, 9246 9247 .gunzip_init = bxe_gunzip_init, 9248 .gunzip_end = bxe_gunzip_end, 9249 9250 .init_fw = bxe_init_firmware, 9251 .release_fw = bxe_release_firmware, 9252 }; 9253 9254 static void 9255 bxe_init_func_obj(struct bxe_softc *sc) 9256 { 9257 sc->dmae_ready = 0; 9258 9259 ecore_init_func_obj(sc, 9260 &sc->func_obj, 9261 BXE_SP(sc, func_rdata), 9262 BXE_SP_MAPPING(sc, func_rdata), 9263 BXE_SP(sc, func_afex_rdata), 9264 BXE_SP_MAPPING(sc, func_afex_rdata), 9265 &bxe_func_sp_drv); 9266 } 9267 9268 static int 9269 bxe_init_hw(struct bxe_softc *sc, 9270 uint32_t load_code) 9271 { 9272 struct ecore_func_state_params func_params = { NULL }; 9273 int rc; 9274 9275 /* prepare the parameters for function state transitions */ 9276 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); 9277 9278 func_params.f_obj = &sc->func_obj; 9279 func_params.cmd = ECORE_F_CMD_HW_INIT; 9280 9281 func_params.params.hw_init.load_phase = load_code; 9282 9283 /* 9284 * Via a plethora of function pointers, we will eventually reach 9285 * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func(). 9286 */ 9287 rc = ecore_func_state_change(sc, &func_params); 9288 9289 return (rc); 9290 } 9291 9292 static void 9293 bxe_fill(struct bxe_softc *sc, 9294 uint32_t addr, 9295 int fill, 9296 uint32_t len) 9297 { 9298 uint32_t i; 9299 9300 if (!(len % 4) && !(addr % 4)) { 9301 for (i = 0; i < len; i += 4) { 9302 REG_WR(sc, (addr + i), fill); 9303 } 9304 } else { 9305 for (i = 0; i < len; i++) { 9306 REG_WR8(sc, (addr + i), fill); 9307 } 9308 } 9309 } 9310 9311 /* writes FP SP data to FW - data_size in dwords */ 9312 static void 9313 bxe_wr_fp_sb_data(struct bxe_softc *sc, 9314 int fw_sb_id, 9315 uint32_t *sb_data_p, 9316 uint32_t data_size) 9317 { 9318 int index; 9319 9320 for (index = 0; index < data_size; index++) { 9321 REG_WR(sc, 9322 (BAR_CSTRORM_INTMEM + 9323 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 9324 (sizeof(uint32_t) * index)), 9325 *(sb_data_p + index)); 9326 } 9327 } 9328 9329 static void 9330 bxe_zero_fp_sb(struct bxe_softc *sc, 9331 int fw_sb_id) 9332 { 9333 struct hc_status_block_data_e2 sb_data_e2; 9334 struct hc_status_block_data_e1x sb_data_e1x; 9335 uint32_t *sb_data_p; 9336 uint32_t data_size = 0; 9337 9338 if (!CHIP_IS_E1x(sc)) { 9339 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 9340 sb_data_e2.common.state = SB_DISABLED; 9341 sb_data_e2.common.p_func.vf_valid = FALSE; 9342 sb_data_p = (uint32_t *)&sb_data_e2; 9343 data_size = (sizeof(struct hc_status_block_data_e2) / 9344 sizeof(uint32_t)); 9345 } else { 9346 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); 9347 sb_data_e1x.common.state = SB_DISABLED; 9348 sb_data_e1x.common.p_func.vf_valid = FALSE; 9349 sb_data_p = (uint32_t *)&sb_data_e1x; 9350 data_size = (sizeof(struct hc_status_block_data_e1x) / 9351 sizeof(uint32_t)); 9352 } 9353 9354 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 9355 9356 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 9357 0, CSTORM_STATUS_BLOCK_SIZE); 9358 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), 9359 0, CSTORM_SYNC_BLOCK_SIZE); 9360 } 9361 9362 static void 9363 bxe_wr_sp_sb_data(struct bxe_softc *sc, 9364 struct hc_sp_status_block_data *sp_sb_data) 9365 { 9366 int i; 9367 9368 for (i = 0; 9369 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); 9370 i++) { 9371 REG_WR(sc, 9372 (BAR_CSTRORM_INTMEM + 9373 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + 9374 (i * sizeof(uint32_t))), 9375 *((uint32_t *)sp_sb_data + i)); 9376 } 9377 } 9378 9379 static void 9380 bxe_zero_sp_sb(struct bxe_softc *sc) 9381 { 9382 struct hc_sp_status_block_data sp_sb_data; 9383 9384 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 9385 9386 sp_sb_data.state = SB_DISABLED; 9387 sp_sb_data.p_func.vf_valid = FALSE; 9388 9389 bxe_wr_sp_sb_data(sc, &sp_sb_data); 9390 9391 bxe_fill(sc, 9392 (BAR_CSTRORM_INTMEM + 9393 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), 9394 0, CSTORM_SP_STATUS_BLOCK_SIZE); 9395 bxe_fill(sc, 9396 (BAR_CSTRORM_INTMEM + 9397 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), 9398 0, CSTORM_SP_SYNC_BLOCK_SIZE); 9399 } 9400 9401 static void 9402 bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, 9403 int igu_sb_id, 9404 int igu_seg_id) 9405 { 9406 hc_sm->igu_sb_id = igu_sb_id; 9407 hc_sm->igu_seg_id = igu_seg_id; 9408 hc_sm->timer_value = 0xFF; 9409 hc_sm->time_to_expire = 0xFFFFFFFF; 9410 } 9411 9412 static void 9413 bxe_map_sb_state_machines(struct hc_index_data *index_data) 9414 { 9415 /* zero out state machine indices */ 9416 9417 /* rx indices */ 9418 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 9419 9420 /* tx indices */ 9421 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 9422 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 9423 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 9424 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 9425 9426 /* map indices */ 9427 9428 /* rx indices */ 9429 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 9430 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9431 9432 /* tx indices */ 9433 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 9434 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9435 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 9436 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9437 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 9438 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9439 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 9440 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9441 } 9442 9443 static void 9444 bxe_init_sb(struct bxe_softc *sc, 9445 bus_addr_t busaddr, 9446 int vfid, 9447 uint8_t vf_valid, 9448 int fw_sb_id, 9449 int igu_sb_id) 9450 { 9451 struct hc_status_block_data_e2 sb_data_e2; 9452 struct hc_status_block_data_e1x sb_data_e1x; 9453 struct hc_status_block_sm *hc_sm_p; 9454 uint32_t *sb_data_p; 9455 int igu_seg_id; 9456 int data_size; 9457 9458 if (CHIP_INT_MODE_IS_BC(sc)) { 9459 igu_seg_id = HC_SEG_ACCESS_NORM; 9460 } else { 9461 igu_seg_id = IGU_SEG_ACCESS_NORM; 9462 } 9463 9464 bxe_zero_fp_sb(sc, fw_sb_id); 9465 9466 if (!CHIP_IS_E1x(sc)) { 9467 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 9468 sb_data_e2.common.state = SB_ENABLED; 9469 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); 9470 sb_data_e2.common.p_func.vf_id = vfid; 9471 sb_data_e2.common.p_func.vf_valid = vf_valid; 9472 sb_data_e2.common.p_func.vnic_id = SC_VN(sc); 9473 sb_data_e2.common.same_igu_sb_1b = TRUE; 9474 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); 9475 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); 9476 hc_sm_p = sb_data_e2.common.state_machine; 9477 sb_data_p = (uint32_t *)&sb_data_e2; 9478 data_size = (sizeof(struct hc_status_block_data_e2) / 9479 sizeof(uint32_t)); 9480 bxe_map_sb_state_machines(sb_data_e2.index_data); 9481 } else { 9482 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); 9483 sb_data_e1x.common.state = SB_ENABLED; 9484 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); 9485 sb_data_e1x.common.p_func.vf_id = 0xff; 9486 sb_data_e1x.common.p_func.vf_valid = FALSE; 9487 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); 9488 sb_data_e1x.common.same_igu_sb_1b = TRUE; 9489 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); 9490 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); 9491 hc_sm_p = sb_data_e1x.common.state_machine; 9492 sb_data_p = (uint32_t *)&sb_data_e1x; 9493 data_size = (sizeof(struct hc_status_block_data_e1x) / 9494 sizeof(uint32_t)); 9495 bxe_map_sb_state_machines(sb_data_e1x.index_data); 9496 } 9497 9498 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); 9499 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); 9500 9501 BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id); 9502 9503 /* write indices to HW - PCI guarantees endianity of regpairs */ 9504 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 9505 } 9506 9507 static inline uint8_t 9508 bxe_fp_qzone_id(struct bxe_fastpath *fp) 9509 { 9510 if (CHIP_IS_E1x(fp->sc)) { 9511 return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H); 9512 } else { 9513 return (fp->cl_id); 9514 } 9515 } 9516 9517 static inline uint32_t 9518 bxe_rx_ustorm_prods_offset(struct bxe_softc *sc, 9519 struct bxe_fastpath *fp) 9520 { 9521 uint32_t offset = BAR_USTRORM_INTMEM; 9522 9523 if (!CHIP_IS_E1x(sc)) { 9524 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 9525 } else { 9526 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); 9527 } 9528 9529 return (offset); 9530 } 9531 9532 static void 9533 bxe_init_eth_fp(struct bxe_softc *sc, 9534 int idx) 9535 { 9536 struct bxe_fastpath *fp = &sc->fp[idx]; 9537 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; 9538 unsigned long q_type = 0; 9539 int cos; 9540 9541 fp->sc = sc; 9542 fp->index = idx; 9543 9544 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); 9545 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); 9546 9547 fp->cl_id = (CHIP_IS_E1x(sc)) ? 9548 (SC_L_ID(sc) + idx) : 9549 /* want client ID same as IGU SB ID for non-E1 */ 9550 fp->igu_sb_id; 9551 fp->cl_qzone_id = bxe_fp_qzone_id(fp); 9552 9553 /* setup sb indices */ 9554 if (!CHIP_IS_E1x(sc)) { 9555 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; 9556 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; 9557 } else { 9558 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; 9559 fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index; 9560 } 9561 9562 /* init shortcut */ 9563 fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp); 9564 9565 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; 9566 9567 /* 9568 * XXX If multiple CoS is ever supported then each fastpath structure 9569 * will need to maintain tx producer/consumer/dma/etc values *per* CoS. 9570 */ 9571 for (cos = 0; cos < sc->max_cos; cos++) { 9572 cids[cos] = idx; 9573 } 9574 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; 9575 9576 /* nothing more for a VF to do */ 9577 if (IS_VF(sc)) { 9578 return; 9579 } 9580 9581 bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE, 9582 fp->fw_sb_id, fp->igu_sb_id); 9583 9584 bxe_update_fp_sb_idx(fp); 9585 9586 /* Configure Queue State object */ 9587 bit_set(&q_type, ECORE_Q_TYPE_HAS_RX); 9588 bit_set(&q_type, ECORE_Q_TYPE_HAS_TX); 9589 9590 ecore_init_queue_obj(sc, 9591 &sc->sp_objs[idx].q_obj, 9592 fp->cl_id, 9593 cids, 9594 sc->max_cos, 9595 SC_FUNC(sc), 9596 BXE_SP(sc, q_rdata), 9597 BXE_SP_MAPPING(sc, q_rdata), 9598 q_type); 9599 9600 /* configure classification DBs */ 9601 ecore_init_mac_obj(sc, 9602 &sc->sp_objs[idx].mac_obj, 9603 fp->cl_id, 9604 idx, 9605 SC_FUNC(sc), 9606 BXE_SP(sc, mac_rdata), 9607 BXE_SP_MAPPING(sc, mac_rdata), 9608 ECORE_FILTER_MAC_PENDING, 9609 &sc->sp_state, 9610 ECORE_OBJ_TYPE_RX_TX, 9611 &sc->macs_pool); 9612 9613 BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n", 9614 idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); 9615 } 9616 9617 static inline void 9618 bxe_update_rx_prod(struct bxe_softc *sc, 9619 struct bxe_fastpath *fp, 9620 uint16_t rx_bd_prod, 9621 uint16_t rx_cq_prod, 9622 uint16_t rx_sge_prod) 9623 { 9624 struct ustorm_eth_rx_producers rx_prods = { 0 }; 9625 uint32_t i; 9626 9627 /* update producers */ 9628 rx_prods.bd_prod = rx_bd_prod; 9629 rx_prods.cqe_prod = rx_cq_prod; 9630 rx_prods.sge_prod = rx_sge_prod; 9631 9632 /* 9633 * Make sure that the BD and SGE data is updated before updating the 9634 * producers since FW might read the BD/SGE right after the producer 9635 * is updated. 9636 * This is only applicable for weak-ordered memory model archs such 9637 * as IA-64. The following barrier is also mandatory since FW will 9638 * assumes BDs must have buffers. 9639 */ 9640 wmb(); 9641 9642 for (i = 0; i < (sizeof(rx_prods) / 4); i++) { 9643 REG_WR(sc, 9644 (fp->ustorm_rx_prods_offset + (i * 4)), 9645 ((uint32_t *)&rx_prods)[i]); 9646 } 9647 9648 wmb(); /* keep prod updates ordered */ 9649 9650 BLOGD(sc, DBG_RX, 9651 "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n", 9652 fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod); 9653 } 9654 9655 static void 9656 bxe_init_rx_rings(struct bxe_softc *sc) 9657 { 9658 struct bxe_fastpath *fp; 9659 int i; 9660 9661 for (i = 0; i < sc->num_queues; i++) { 9662 fp = &sc->fp[i]; 9663 9664 fp->rx_bd_cons = 0; 9665 9666 /* 9667 * Activate the BD ring... 9668 * Warning, this will generate an interrupt (to the TSTORM) 9669 * so this can only be done after the chip is initialized 9670 */ 9671 bxe_update_rx_prod(sc, fp, 9672 fp->rx_bd_prod, 9673 fp->rx_cq_prod, 9674 fp->rx_sge_prod); 9675 9676 if (i != 0) { 9677 continue; 9678 } 9679 9680 if (CHIP_IS_E1(sc)) { 9681 REG_WR(sc, 9682 (BAR_USTRORM_INTMEM + 9683 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))), 9684 U64_LO(fp->rcq_dma.paddr)); 9685 REG_WR(sc, 9686 (BAR_USTRORM_INTMEM + 9687 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4), 9688 U64_HI(fp->rcq_dma.paddr)); 9689 } 9690 } 9691 } 9692 9693 static void 9694 bxe_init_tx_ring_one(struct bxe_fastpath *fp) 9695 { 9696 SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1); 9697 fp->tx_db.data.zero_fill1 = 0; 9698 fp->tx_db.data.prod = 0; 9699 9700 fp->tx_pkt_prod = 0; 9701 fp->tx_pkt_cons = 0; 9702 fp->tx_bd_prod = 0; 9703 fp->tx_bd_cons = 0; 9704 fp->eth_q_stats.tx_pkts = 0; 9705 } 9706 9707 static inline void 9708 bxe_init_tx_rings(struct bxe_softc *sc) 9709 { 9710 int i; 9711 9712 for (i = 0; i < sc->num_queues; i++) { 9713 bxe_init_tx_ring_one(&sc->fp[i]); 9714 } 9715 } 9716 9717 static void 9718 bxe_init_def_sb(struct bxe_softc *sc) 9719 { 9720 struct host_sp_status_block *def_sb = sc->def_sb; 9721 bus_addr_t mapping = sc->def_sb_dma.paddr; 9722 int igu_sp_sb_index; 9723 int igu_seg_id; 9724 int port = SC_PORT(sc); 9725 int func = SC_FUNC(sc); 9726 int reg_offset, reg_offset_en5; 9727 uint64_t section; 9728 int index, sindex; 9729 struct hc_sp_status_block_data sp_sb_data; 9730 9731 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 9732 9733 if (CHIP_INT_MODE_IS_BC(sc)) { 9734 igu_sp_sb_index = DEF_SB_IGU_ID; 9735 igu_seg_id = HC_SEG_ACCESS_DEF; 9736 } else { 9737 igu_sp_sb_index = sc->igu_dsb_id; 9738 igu_seg_id = IGU_SEG_ACCESS_DEF; 9739 } 9740 9741 /* attentions */ 9742 section = ((uint64_t)mapping + 9743 offsetof(struct host_sp_status_block, atten_status_block)); 9744 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 9745 sc->attn_state = 0; 9746 9747 reg_offset = (port) ? 9748 MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 9749 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 9750 reg_offset_en5 = (port) ? 9751 MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 9752 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; 9753 9754 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 9755 /* take care of sig[0]..sig[4] */ 9756 for (sindex = 0; sindex < 4; sindex++) { 9757 sc->attn_group[index].sig[sindex] = 9758 REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index))); 9759 } 9760 9761 if (!CHIP_IS_E1x(sc)) { 9762 /* 9763 * enable5 is separate from the rest of the registers, 9764 * and the address skip is 4 and not 16 between the 9765 * different groups 9766 */ 9767 sc->attn_group[index].sig[4] = 9768 REG_RD(sc, (reg_offset_en5 + (0x4 * index))); 9769 } else { 9770 sc->attn_group[index].sig[4] = 0; 9771 } 9772 } 9773 9774 if (sc->devinfo.int_block == INT_BLOCK_HC) { 9775 reg_offset = (port) ? 9776 HC_REG_ATTN_MSG1_ADDR_L : 9777 HC_REG_ATTN_MSG0_ADDR_L; 9778 REG_WR(sc, reg_offset, U64_LO(section)); 9779 REG_WR(sc, (reg_offset + 4), U64_HI(section)); 9780 } else if (!CHIP_IS_E1x(sc)) { 9781 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 9782 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 9783 } 9784 9785 section = ((uint64_t)mapping + 9786 offsetof(struct host_sp_status_block, sp_sb)); 9787 9788 bxe_zero_sp_sb(sc); 9789 9790 /* PCI guarantees endianity of regpair */ 9791 sp_sb_data.state = SB_ENABLED; 9792 sp_sb_data.host_sb_addr.lo = U64_LO(section); 9793 sp_sb_data.host_sb_addr.hi = U64_HI(section); 9794 sp_sb_data.igu_sb_id = igu_sp_sb_index; 9795 sp_sb_data.igu_seg_id = igu_seg_id; 9796 sp_sb_data.p_func.pf_id = func; 9797 sp_sb_data.p_func.vnic_id = SC_VN(sc); 9798 sp_sb_data.p_func.vf_id = 0xff; 9799 9800 bxe_wr_sp_sb_data(sc, &sp_sb_data); 9801 9802 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 9803 } 9804 9805 static void 9806 bxe_init_sp_ring(struct bxe_softc *sc) 9807 { 9808 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); 9809 sc->spq_prod_idx = 0; 9810 sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; 9811 sc->spq_prod_bd = sc->spq; 9812 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); 9813 } 9814 9815 static void 9816 bxe_init_eq_ring(struct bxe_softc *sc) 9817 { 9818 union event_ring_elem *elem; 9819 int i; 9820 9821 for (i = 1; i <= NUM_EQ_PAGES; i++) { 9822 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; 9823 9824 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + 9825 BCM_PAGE_SIZE * 9826 (i % NUM_EQ_PAGES))); 9827 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + 9828 BCM_PAGE_SIZE * 9829 (i % NUM_EQ_PAGES))); 9830 } 9831 9832 sc->eq_cons = 0; 9833 sc->eq_prod = NUM_EQ_DESC; 9834 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; 9835 9836 atomic_store_rel_long(&sc->eq_spq_left, 9837 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), 9838 NUM_EQ_DESC) - 1)); 9839 } 9840 9841 static void 9842 bxe_init_internal_common(struct bxe_softc *sc) 9843 { 9844 int i; 9845 9846 /* 9847 * Zero this manually as its initialization is currently missing 9848 * in the initTool. 9849 */ 9850 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { 9851 REG_WR(sc, 9852 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), 9853 0); 9854 } 9855 9856 if (!CHIP_IS_E1x(sc)) { 9857 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), 9858 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE); 9859 } 9860 } 9861 9862 static void 9863 bxe_init_internal(struct bxe_softc *sc, 9864 uint32_t load_code) 9865 { 9866 switch (load_code) { 9867 case FW_MSG_CODE_DRV_LOAD_COMMON: 9868 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 9869 bxe_init_internal_common(sc); 9870 /* no break */ 9871 9872 case FW_MSG_CODE_DRV_LOAD_PORT: 9873 /* nothing to do */ 9874 /* no break */ 9875 9876 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 9877 /* internal memory per function is initialized inside bxe_pf_init */ 9878 break; 9879 9880 default: 9881 BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code); 9882 break; 9883 } 9884 } 9885 9886 static void 9887 storm_memset_func_cfg(struct bxe_softc *sc, 9888 struct tstorm_eth_function_common_config *tcfg, 9889 uint16_t abs_fid) 9890 { 9891 uint32_t addr; 9892 size_t size; 9893 9894 addr = (BAR_TSTRORM_INTMEM + 9895 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); 9896 size = sizeof(struct tstorm_eth_function_common_config); 9897 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg); 9898 } 9899 9900 static void 9901 bxe_func_init(struct bxe_softc *sc, 9902 struct bxe_func_init_params *p) 9903 { 9904 struct tstorm_eth_function_common_config tcfg = { 0 }; 9905 9906 if (CHIP_IS_E1x(sc)) { 9907 storm_memset_func_cfg(sc, &tcfg, p->func_id); 9908 } 9909 9910 /* Enable the function in the FW */ 9911 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); 9912 storm_memset_func_en(sc, p->func_id, 1); 9913 9914 /* spq */ 9915 if (p->func_flgs & FUNC_FLG_SPQ) { 9916 storm_memset_spq_addr(sc, p->spq_map, p->func_id); 9917 REG_WR(sc, 9918 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)), 9919 p->spq_prod); 9920 } 9921 } 9922 9923 /* 9924 * Calculates the sum of vn_min_rates. 9925 * It's needed for further normalizing of the min_rates. 9926 * Returns: 9927 * sum of vn_min_rates. 9928 * or 9929 * 0 - if all the min_rates are 0. 9930 * In the later case fainess algorithm should be deactivated. 9931 * If all min rates are not zero then those that are zeroes will be set to 1. 9932 */ 9933 static void 9934 bxe_calc_vn_min(struct bxe_softc *sc, 9935 struct cmng_init_input *input) 9936 { 9937 uint32_t vn_cfg; 9938 uint32_t vn_min_rate; 9939 int all_zero = 1; 9940 int vn; 9941 9942 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 9943 vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 9944 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 9945 FUNC_MF_CFG_MIN_BW_SHIFT) * 100); 9946 9947 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 9948 /* skip hidden VNs */ 9949 vn_min_rate = 0; 9950 } else if (!vn_min_rate) { 9951 /* If min rate is zero - set it to 100 */ 9952 vn_min_rate = DEF_MIN_RATE; 9953 } else { 9954 all_zero = 0; 9955 } 9956 9957 input->vnic_min_rate[vn] = vn_min_rate; 9958 } 9959 9960 /* if ETS or all min rates are zeros - disable fairness */ 9961 if (BXE_IS_ETS_ENABLED(sc)) { 9962 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 9963 BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n"); 9964 } else if (all_zero) { 9965 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 9966 BLOGD(sc, DBG_LOAD, 9967 "Fariness disabled (all MIN values are zeroes)\n"); 9968 } else { 9969 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 9970 } 9971 } 9972 9973 static inline uint16_t 9974 bxe_extract_max_cfg(struct bxe_softc *sc, 9975 uint32_t mf_cfg) 9976 { 9977 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 9978 FUNC_MF_CFG_MAX_BW_SHIFT); 9979 9980 if (!max_cfg) { 9981 BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n"); 9982 max_cfg = 100; 9983 } 9984 9985 return (max_cfg); 9986 } 9987 9988 static void 9989 bxe_calc_vn_max(struct bxe_softc *sc, 9990 int vn, 9991 struct cmng_init_input *input) 9992 { 9993 uint16_t vn_max_rate; 9994 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 9995 uint32_t max_cfg; 9996 9997 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 9998 vn_max_rate = 0; 9999 } else { 10000 max_cfg = bxe_extract_max_cfg(sc, vn_cfg); 10001 10002 if (IS_MF_SI(sc)) { 10003 /* max_cfg in percents of linkspeed */ 10004 vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100); 10005 } else { /* SD modes */ 10006 /* max_cfg is absolute in 100Mb units */ 10007 vn_max_rate = (max_cfg * 100); 10008 } 10009 } 10010 10011 BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); 10012 10013 input->vnic_max_rate[vn] = vn_max_rate; 10014 } 10015 10016 static void 10017 bxe_cmng_fns_init(struct bxe_softc *sc, 10018 uint8_t read_cfg, 10019 uint8_t cmng_type) 10020 { 10021 struct cmng_init_input input; 10022 int vn; 10023 10024 memset(&input, 0, sizeof(struct cmng_init_input)); 10025 10026 input.port_rate = sc->link_vars.line_speed; 10027 10028 if (cmng_type == CMNG_FNS_MINMAX) { 10029 /* read mf conf from shmem */ 10030 if (read_cfg) { 10031 bxe_read_mf_cfg(sc); 10032 } 10033 10034 /* get VN min rate and enable fairness if not 0 */ 10035 bxe_calc_vn_min(sc, &input); 10036 10037 /* get VN max rate */ 10038 if (sc->port.pmf) { 10039 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10040 bxe_calc_vn_max(sc, vn, &input); 10041 } 10042 } 10043 10044 /* always enable rate shaping and fairness */ 10045 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 10046 10047 ecore_init_cmng(&input, &sc->cmng); 10048 return; 10049 } 10050 10051 /* rate shaping and fairness are disabled */ 10052 BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n"); 10053 } 10054 10055 static int 10056 bxe_get_cmng_fns_mode(struct bxe_softc *sc) 10057 { 10058 if (CHIP_REV_IS_SLOW(sc)) { 10059 return (CMNG_FNS_NONE); 10060 } 10061 10062 if (IS_MF(sc)) { 10063 return (CMNG_FNS_MINMAX); 10064 } 10065 10066 return (CMNG_FNS_NONE); 10067 } 10068 10069 static void 10070 storm_memset_cmng(struct bxe_softc *sc, 10071 struct cmng_init *cmng, 10072 uint8_t port) 10073 { 10074 int vn; 10075 int func; 10076 uint32_t addr; 10077 size_t size; 10078 10079 addr = (BAR_XSTRORM_INTMEM + 10080 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); 10081 size = sizeof(struct cmng_struct_per_port); 10082 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port); 10083 10084 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10085 func = func_by_vn(sc, vn); 10086 10087 addr = (BAR_XSTRORM_INTMEM + 10088 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); 10089 size = sizeof(struct rate_shaping_vars_per_vn); 10090 ecore_storm_memset_struct(sc, addr, size, 10091 (uint32_t *)&cmng->vnic.vnic_max_rate[vn]); 10092 10093 addr = (BAR_XSTRORM_INTMEM + 10094 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); 10095 size = sizeof(struct fairness_vars_per_vn); 10096 ecore_storm_memset_struct(sc, addr, size, 10097 (uint32_t *)&cmng->vnic.vnic_min_rate[vn]); 10098 } 10099 } 10100 10101 static void 10102 bxe_pf_init(struct bxe_softc *sc) 10103 { 10104 struct bxe_func_init_params func_init = { 0 }; 10105 struct event_ring_data eq_data = { { 0 } }; 10106 uint16_t flags; 10107 10108 if (!CHIP_IS_E1x(sc)) { 10109 /* reset IGU PF statistics: MSIX + ATTN */ 10110 /* PF */ 10111 REG_WR(sc, 10112 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 10113 (BXE_IGU_STAS_MSG_VF_CNT * 4) + 10114 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 10115 0); 10116 /* ATTN */ 10117 REG_WR(sc, 10118 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 10119 (BXE_IGU_STAS_MSG_VF_CNT * 4) + 10120 (BXE_IGU_STAS_MSG_PF_CNT * 4) + 10121 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 10122 0); 10123 } 10124 10125 /* function setup flags */ 10126 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 10127 10128 /* 10129 * This flag is relevant for E1x only. 10130 * E2 doesn't have a TPA configuration in a function level. 10131 */ 10132 flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0; 10133 10134 func_init.func_flgs = flags; 10135 func_init.pf_id = SC_FUNC(sc); 10136 func_init.func_id = SC_FUNC(sc); 10137 func_init.spq_map = sc->spq_dma.paddr; 10138 func_init.spq_prod = sc->spq_prod_idx; 10139 10140 bxe_func_init(sc, &func_init); 10141 10142 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); 10143 10144 /* 10145 * Congestion management values depend on the link rate. 10146 * There is no active link so initial link rate is set to 10Gbps. 10147 * When the link comes up the congestion management values are 10148 * re-calculated according to the actual link rate. 10149 */ 10150 sc->link_vars.line_speed = SPEED_10000; 10151 bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc)); 10152 10153 /* Only the PMF sets the HW */ 10154 if (sc->port.pmf) { 10155 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 10156 } 10157 10158 /* init Event Queue - PCI bus guarantees correct endainity */ 10159 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); 10160 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); 10161 eq_data.producer = sc->eq_prod; 10162 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 10163 eq_data.sb_id = DEF_SB_ID; 10164 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); 10165 } 10166 10167 static void 10168 bxe_hc_int_enable(struct bxe_softc *sc) 10169 { 10170 int port = SC_PORT(sc); 10171 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 10172 uint32_t val = REG_RD(sc, addr); 10173 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; 10174 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && 10175 (sc->intr_count == 1)) ? TRUE : FALSE; 10176 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; 10177 10178 if (msix) { 10179 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10180 HC_CONFIG_0_REG_INT_LINE_EN_0); 10181 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10182 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10183 if (single_msix) { 10184 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 10185 } 10186 } else if (msi) { 10187 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 10188 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10189 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10190 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10191 } else { 10192 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10193 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10194 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10195 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10196 10197 if (!CHIP_IS_E1(sc)) { 10198 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", 10199 val, port, addr); 10200 10201 REG_WR(sc, addr, val); 10202 10203 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 10204 } 10205 } 10206 10207 if (CHIP_IS_E1(sc)) { 10208 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF); 10209 } 10210 10211 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", 10212 val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 10213 10214 REG_WR(sc, addr, val); 10215 10216 /* ensure that HC_CONFIG is written before leading/trailing edge config */ 10217 mb(); 10218 10219 if (!CHIP_IS_E1(sc)) { 10220 /* init leading/trailing edge */ 10221 if (IS_MF(sc)) { 10222 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 10223 if (sc->port.pmf) { 10224 /* enable nig and gpio3 attention */ 10225 val |= 0x1100; 10226 } 10227 } else { 10228 val = 0xffff; 10229 } 10230 10231 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val); 10232 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val); 10233 } 10234 10235 /* make sure that interrupts are indeed enabled from here on */ 10236 mb(); 10237 } 10238 10239 static void 10240 bxe_igu_int_enable(struct bxe_softc *sc) 10241 { 10242 uint32_t val; 10243 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; 10244 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && 10245 (sc->intr_count == 1)) ? TRUE : FALSE; 10246 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; 10247 10248 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 10249 10250 if (msix) { 10251 val &= ~(IGU_PF_CONF_INT_LINE_EN | 10252 IGU_PF_CONF_SINGLE_ISR_EN); 10253 val |= (IGU_PF_CONF_MSI_MSIX_EN | 10254 IGU_PF_CONF_ATTN_BIT_EN); 10255 if (single_msix) { 10256 val |= IGU_PF_CONF_SINGLE_ISR_EN; 10257 } 10258 } else if (msi) { 10259 val &= ~IGU_PF_CONF_INT_LINE_EN; 10260 val |= (IGU_PF_CONF_MSI_MSIX_EN | 10261 IGU_PF_CONF_ATTN_BIT_EN | 10262 IGU_PF_CONF_SINGLE_ISR_EN); 10263 } else { 10264 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 10265 val |= (IGU_PF_CONF_INT_LINE_EN | 10266 IGU_PF_CONF_ATTN_BIT_EN | 10267 IGU_PF_CONF_SINGLE_ISR_EN); 10268 } 10269 10270 /* clean previous status - need to configure igu prior to ack*/ 10271 if ((!msix) || single_msix) { 10272 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10273 bxe_ack_int(sc); 10274 } 10275 10276 val |= IGU_PF_CONF_FUNC_EN; 10277 10278 BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n", 10279 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 10280 10281 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10282 10283 mb(); 10284 10285 /* init leading/trailing edge */ 10286 if (IS_MF(sc)) { 10287 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 10288 if (sc->port.pmf) { 10289 /* enable nig and gpio3 attention */ 10290 val |= 0x1100; 10291 } 10292 } else { 10293 val = 0xffff; 10294 } 10295 10296 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 10297 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 10298 10299 /* make sure that interrupts are indeed enabled from here on */ 10300 mb(); 10301 } 10302 10303 static void 10304 bxe_int_enable(struct bxe_softc *sc) 10305 { 10306 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10307 bxe_hc_int_enable(sc); 10308 } else { 10309 bxe_igu_int_enable(sc); 10310 } 10311 } 10312 10313 static void 10314 bxe_hc_int_disable(struct bxe_softc *sc) 10315 { 10316 int port = SC_PORT(sc); 10317 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 10318 uint32_t val = REG_RD(sc, addr); 10319 10320 /* 10321 * In E1 we must use only PCI configuration space to disable MSI/MSIX 10322 * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC 10323 * block 10324 */ 10325 if (CHIP_IS_E1(sc)) { 10326 /* 10327 * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register 10328 * to prevent from HC sending interrupts after we exit the function 10329 */ 10330 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0); 10331 10332 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10333 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10334 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10335 } else { 10336 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10337 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10338 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10339 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10340 } 10341 10342 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); 10343 10344 /* flush all outstanding writes */ 10345 mb(); 10346 10347 REG_WR(sc, addr, val); 10348 if (REG_RD(sc, addr) != val) { 10349 BLOGE(sc, "proper val not read from HC IGU!\n"); 10350 } 10351 } 10352 10353 static void 10354 bxe_igu_int_disable(struct bxe_softc *sc) 10355 { 10356 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 10357 10358 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 10359 IGU_PF_CONF_INT_LINE_EN | 10360 IGU_PF_CONF_ATTN_BIT_EN); 10361 10362 BLOGD(sc, DBG_INTR, "write %x to IGU\n", val); 10363 10364 /* flush all outstanding writes */ 10365 mb(); 10366 10367 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10368 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { 10369 BLOGE(sc, "proper val not read from IGU!\n"); 10370 } 10371 } 10372 10373 static void 10374 bxe_int_disable(struct bxe_softc *sc) 10375 { 10376 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10377 bxe_hc_int_disable(sc); 10378 } else { 10379 bxe_igu_int_disable(sc); 10380 } 10381 } 10382 10383 static void 10384 bxe_nic_init(struct bxe_softc *sc, 10385 int load_code) 10386 { 10387 int i; 10388 10389 for (i = 0; i < sc->num_queues; i++) { 10390 bxe_init_eth_fp(sc, i); 10391 } 10392 10393 rmb(); /* ensure status block indices were read */ 10394 10395 bxe_init_rx_rings(sc); 10396 bxe_init_tx_rings(sc); 10397 10398 if (IS_VF(sc)) { 10399 return; 10400 } 10401 10402 /* initialize MOD_ABS interrupts */ 10403 elink_init_mod_abs_int(sc, &sc->link_vars, 10404 sc->devinfo.chip_id, 10405 sc->devinfo.shmem_base, 10406 sc->devinfo.shmem2_base, 10407 SC_PORT(sc)); 10408 10409 bxe_init_def_sb(sc); 10410 bxe_update_dsb_idx(sc); 10411 bxe_init_sp_ring(sc); 10412 bxe_init_eq_ring(sc); 10413 bxe_init_internal(sc, load_code); 10414 bxe_pf_init(sc); 10415 bxe_stats_init(sc); 10416 10417 /* flush all before enabling interrupts */ 10418 mb(); 10419 10420 bxe_int_enable(sc); 10421 10422 /* check for SPIO5 */ 10423 bxe_attn_int_deasserted0(sc, 10424 REG_RD(sc, 10425 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 10426 SC_PORT(sc)*4)) & 10427 AEU_INPUTS_ATTN_BITS_SPIO5); 10428 } 10429 10430 static inline void 10431 bxe_init_objs(struct bxe_softc *sc) 10432 { 10433 /* mcast rules must be added to tx if tx switching is enabled */ 10434 ecore_obj_type o_type = 10435 (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX : 10436 ECORE_OBJ_TYPE_RX; 10437 10438 /* RX_MODE controlling object */ 10439 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); 10440 10441 /* multicast configuration controlling object */ 10442 ecore_init_mcast_obj(sc, 10443 &sc->mcast_obj, 10444 sc->fp[0].cl_id, 10445 sc->fp[0].index, 10446 SC_FUNC(sc), 10447 SC_FUNC(sc), 10448 BXE_SP(sc, mcast_rdata), 10449 BXE_SP_MAPPING(sc, mcast_rdata), 10450 ECORE_FILTER_MCAST_PENDING, 10451 &sc->sp_state, 10452 o_type); 10453 10454 /* Setup CAM credit pools */ 10455 ecore_init_mac_credit_pool(sc, 10456 &sc->macs_pool, 10457 SC_FUNC(sc), 10458 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 10459 VNICS_PER_PATH(sc)); 10460 10461 ecore_init_vlan_credit_pool(sc, 10462 &sc->vlans_pool, 10463 SC_ABS_FUNC(sc) >> 1, 10464 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 10465 VNICS_PER_PATH(sc)); 10466 10467 /* RSS configuration object */ 10468 ecore_init_rss_config_obj(sc, 10469 &sc->rss_conf_obj, 10470 sc->fp[0].cl_id, 10471 sc->fp[0].index, 10472 SC_FUNC(sc), 10473 SC_FUNC(sc), 10474 BXE_SP(sc, rss_rdata), 10475 BXE_SP_MAPPING(sc, rss_rdata), 10476 ECORE_FILTER_RSS_CONF_PENDING, 10477 &sc->sp_state, ECORE_OBJ_TYPE_RX); 10478 } 10479 10480 /* 10481 * Initialize the function. This must be called before sending CLIENT_SETUP 10482 * for the first client. 10483 */ 10484 static inline int 10485 bxe_func_start(struct bxe_softc *sc) 10486 { 10487 struct ecore_func_state_params func_params = { NULL }; 10488 struct ecore_func_start_params *start_params = &func_params.params.start; 10489 10490 /* Prepare parameters for function state transitions */ 10491 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); 10492 10493 func_params.f_obj = &sc->func_obj; 10494 func_params.cmd = ECORE_F_CMD_START; 10495 10496 /* Function parameters */ 10497 start_params->mf_mode = sc->devinfo.mf_info.mf_mode; 10498 start_params->sd_vlan_tag = OVLAN(sc); 10499 10500 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { 10501 start_params->network_cos_mode = STATIC_COS; 10502 } else { /* CHIP_IS_E1X */ 10503 start_params->network_cos_mode = FW_WRR; 10504 } 10505 10506 //start_params->gre_tunnel_mode = 0; 10507 //start_params->gre_tunnel_rss = 0; 10508 10509 return (ecore_func_state_change(sc, &func_params)); 10510 } 10511 10512 static int 10513 bxe_set_power_state(struct bxe_softc *sc, 10514 uint8_t state) 10515 { 10516 uint16_t pmcsr; 10517 10518 /* If there is no power capability, silently succeed */ 10519 if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) { 10520 BLOGW(sc, "No power capability\n"); 10521 return (0); 10522 } 10523 10524 pmcsr = pci_read_config(sc->dev, 10525 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 10526 2); 10527 10528 switch (state) { 10529 case PCI_PM_D0: 10530 pci_write_config(sc->dev, 10531 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 10532 ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2); 10533 10534 if (pmcsr & PCIM_PSTAT_DMASK) { 10535 /* delay required during transition out of D3hot */ 10536 DELAY(20000); 10537 } 10538 10539 break; 10540 10541 case PCI_PM_D3hot: 10542 /* XXX if there are other clients above don't shut down the power */ 10543 10544 /* don't shut down the power for emulation and FPGA */ 10545 if (CHIP_REV_IS_SLOW(sc)) { 10546 return (0); 10547 } 10548 10549 pmcsr &= ~PCIM_PSTAT_DMASK; 10550 pmcsr |= PCIM_PSTAT_D3; 10551 10552 if (sc->wol) { 10553 pmcsr |= PCIM_PSTAT_PMEENABLE; 10554 } 10555 10556 pci_write_config(sc->dev, 10557 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 10558 pmcsr, 4); 10559 10560 /* 10561 * No more memory access after this point until device is brought back 10562 * to D0 state. 10563 */ 10564 break; 10565 10566 default: 10567 BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n", 10568 state, pmcsr); 10569 return (-1); 10570 } 10571 10572 return (0); 10573 } 10574 10575 10576 /* return true if succeeded to acquire the lock */ 10577 static uint8_t 10578 bxe_trylock_hw_lock(struct bxe_softc *sc, 10579 uint32_t resource) 10580 { 10581 uint32_t lock_status; 10582 uint32_t resource_bit = (1 << resource); 10583 int func = SC_FUNC(sc); 10584 uint32_t hw_lock_control_reg; 10585 10586 BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource); 10587 10588 /* Validating that the resource is within range */ 10589 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 10590 BLOGD(sc, DBG_LOAD, 10591 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 10592 resource, HW_LOCK_MAX_RESOURCE_VALUE); 10593 return (FALSE); 10594 } 10595 10596 if (func <= 5) { 10597 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 10598 } else { 10599 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 10600 } 10601 10602 /* try to acquire the lock */ 10603 REG_WR(sc, hw_lock_control_reg + 4, resource_bit); 10604 lock_status = REG_RD(sc, hw_lock_control_reg); 10605 if (lock_status & resource_bit) { 10606 return (TRUE); 10607 } 10608 10609 BLOGE(sc, "Failed to get a resource lock 0x%x func %d " 10610 "lock_status 0x%x resource_bit 0x%x\n", resource, func, 10611 lock_status, resource_bit); 10612 10613 return (FALSE); 10614 } 10615 10616 /* 10617 * Get the recovery leader resource id according to the engine this function 10618 * belongs to. Currently only only 2 engines is supported. 10619 */ 10620 static int 10621 bxe_get_leader_lock_resource(struct bxe_softc *sc) 10622 { 10623 if (SC_PATH(sc)) { 10624 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1); 10625 } else { 10626 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0); 10627 } 10628 } 10629 10630 /* try to acquire a leader lock for current engine */ 10631 static uint8_t 10632 bxe_trylock_leader_lock(struct bxe_softc *sc) 10633 { 10634 return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc))); 10635 } 10636 10637 static int 10638 bxe_release_leader_lock(struct bxe_softc *sc) 10639 { 10640 return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc))); 10641 } 10642 10643 /* close gates #2, #3 and #4 */ 10644 static void 10645 bxe_set_234_gates(struct bxe_softc *sc, 10646 uint8_t close) 10647 { 10648 uint32_t val; 10649 10650 /* gates #2 and #4a are closed/opened for "not E1" only */ 10651 if (!CHIP_IS_E1(sc)) { 10652 /* #4 */ 10653 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close); 10654 /* #2 */ 10655 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); 10656 } 10657 10658 /* #3 */ 10659 if (CHIP_IS_E1x(sc)) { 10660 /* prevent interrupts from HC on both ports */ 10661 val = REG_RD(sc, HC_REG_CONFIG_1); 10662 REG_WR(sc, HC_REG_CONFIG_1, 10663 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : 10664 (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 10665 10666 val = REG_RD(sc, HC_REG_CONFIG_0); 10667 REG_WR(sc, HC_REG_CONFIG_0, 10668 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : 10669 (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 10670 } else { 10671 /* Prevent incoming interrupts in IGU */ 10672 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 10673 10674 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, 10675 (!close) ? 10676 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : 10677 (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 10678 } 10679 10680 BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n", 10681 close ? "closing" : "opening"); 10682 10683 wmb(); 10684 } 10685 10686 /* poll for pending writes bit, it should get cleared in no more than 1s */ 10687 static int 10688 bxe_er_poll_igu_vq(struct bxe_softc *sc) 10689 { 10690 uint32_t cnt = 1000; 10691 uint32_t pend_bits = 0; 10692 10693 do { 10694 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); 10695 10696 if (pend_bits == 0) { 10697 break; 10698 } 10699 10700 DELAY(1000); 10701 } while (--cnt > 0); 10702 10703 if (cnt == 0) { 10704 BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits); 10705 return (-1); 10706 } 10707 10708 return (0); 10709 } 10710 10711 #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ 10712 10713 static void 10714 bxe_clp_reset_prep(struct bxe_softc *sc, 10715 uint32_t *magic_val) 10716 { 10717 /* Do some magic... */ 10718 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 10719 *magic_val = val & SHARED_MF_CLP_MAGIC; 10720 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 10721 } 10722 10723 /* restore the value of the 'magic' bit */ 10724 static void 10725 bxe_clp_reset_done(struct bxe_softc *sc, 10726 uint32_t magic_val) 10727 { 10728 /* Restore the 'magic' bit value... */ 10729 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 10730 MFCFG_WR(sc, shared_mf_config.clp_mb, 10731 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 10732 } 10733 10734 /* prepare for MCP reset, takes care of CLP configurations */ 10735 static void 10736 bxe_reset_mcp_prep(struct bxe_softc *sc, 10737 uint32_t *magic_val) 10738 { 10739 uint32_t shmem; 10740 uint32_t validity_offset; 10741 10742 /* set `magic' bit in order to save MF config */ 10743 if (!CHIP_IS_E1(sc)) { 10744 bxe_clp_reset_prep(sc, magic_val); 10745 } 10746 10747 /* get shmem offset */ 10748 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 10749 validity_offset = 10750 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); 10751 10752 /* Clear validity map flags */ 10753 if (shmem > 0) { 10754 REG_WR(sc, shmem + validity_offset, 0); 10755 } 10756 } 10757 10758 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 10759 #define MCP_ONE_TIMEOUT 100 /* 100 ms */ 10760 10761 static void 10762 bxe_mcp_wait_one(struct bxe_softc *sc) 10763 { 10764 /* special handling for emulation and FPGA (10 times longer) */ 10765 if (CHIP_REV_IS_SLOW(sc)) { 10766 DELAY((MCP_ONE_TIMEOUT*10) * 1000); 10767 } else { 10768 DELAY((MCP_ONE_TIMEOUT) * 1000); 10769 } 10770 } 10771 10772 /* initialize shmem_base and waits for validity signature to appear */ 10773 static int 10774 bxe_init_shmem(struct bxe_softc *sc) 10775 { 10776 int cnt = 0; 10777 uint32_t val = 0; 10778 10779 do { 10780 sc->devinfo.shmem_base = 10781 sc->link_params.shmem_base = 10782 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 10783 10784 if (sc->devinfo.shmem_base) { 10785 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 10786 if (val & SHR_MEM_VALIDITY_MB) 10787 return (0); 10788 } 10789 10790 bxe_mcp_wait_one(sc); 10791 10792 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 10793 10794 BLOGE(sc, "BAD MCP validity signature\n"); 10795 10796 return (-1); 10797 } 10798 10799 static int 10800 bxe_reset_mcp_comp(struct bxe_softc *sc, 10801 uint32_t magic_val) 10802 { 10803 int rc = bxe_init_shmem(sc); 10804 10805 /* Restore the `magic' bit value */ 10806 if (!CHIP_IS_E1(sc)) { 10807 bxe_clp_reset_done(sc, magic_val); 10808 } 10809 10810 return (rc); 10811 } 10812 10813 static void 10814 bxe_pxp_prep(struct bxe_softc *sc) 10815 { 10816 if (!CHIP_IS_E1(sc)) { 10817 REG_WR(sc, PXP2_REG_RD_START_INIT, 0); 10818 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); 10819 wmb(); 10820 } 10821 } 10822 10823 /* 10824 * Reset the whole chip except for: 10825 * - PCIE core 10826 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) 10827 * - IGU 10828 * - MISC (including AEU) 10829 * - GRC 10830 * - RBCN, RBCP 10831 */ 10832 static void 10833 bxe_process_kill_chip_reset(struct bxe_softc *sc, 10834 uint8_t global) 10835 { 10836 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 10837 uint32_t global_bits2, stay_reset2; 10838 10839 /* 10840 * Bits that have to be set in reset_mask2 if we want to reset 'global' 10841 * (per chip) blocks. 10842 */ 10843 global_bits2 = 10844 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 10845 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 10846 10847 /* 10848 * Don't reset the following blocks. 10849 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be 10850 * reset, as in 4 port device they might still be owned 10851 * by the MCP (there is only one leader per path). 10852 */ 10853 not_reset_mask1 = 10854 MISC_REGISTERS_RESET_REG_1_RST_HC | 10855 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 10856 MISC_REGISTERS_RESET_REG_1_RST_PXP; 10857 10858 not_reset_mask2 = 10859 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 10860 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 10861 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 10862 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 10863 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 10864 MISC_REGISTERS_RESET_REG_2_RST_GRC | 10865 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 10866 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 10867 MISC_REGISTERS_RESET_REG_2_RST_ATC | 10868 MISC_REGISTERS_RESET_REG_2_PGLC | 10869 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 10870 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 10871 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 10872 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 10873 MISC_REGISTERS_RESET_REG_2_UMAC0 | 10874 MISC_REGISTERS_RESET_REG_2_UMAC1; 10875 10876 /* 10877 * Keep the following blocks in reset: 10878 * - all xxMACs are handled by the elink code. 10879 */ 10880 stay_reset2 = 10881 MISC_REGISTERS_RESET_REG_2_XMAC | 10882 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 10883 10884 /* Full reset masks according to the chip */ 10885 reset_mask1 = 0xffffffff; 10886 10887 if (CHIP_IS_E1(sc)) 10888 reset_mask2 = 0xffff; 10889 else if (CHIP_IS_E1H(sc)) 10890 reset_mask2 = 0x1ffff; 10891 else if (CHIP_IS_E2(sc)) 10892 reset_mask2 = 0xfffff; 10893 else /* CHIP_IS_E3 */ 10894 reset_mask2 = 0x3ffffff; 10895 10896 /* Don't reset global blocks unless we need to */ 10897 if (!global) 10898 reset_mask2 &= ~global_bits2; 10899 10900 /* 10901 * In case of attention in the QM, we need to reset PXP 10902 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 10903 * because otherwise QM reset would release 'close the gates' shortly 10904 * before resetting the PXP, then the PSWRQ would send a write 10905 * request to PGLUE. Then when PXP is reset, PGLUE would try to 10906 * read the payload data from PSWWR, but PSWWR would not 10907 * respond. The write queue in PGLUE would stuck, dmae commands 10908 * would not return. Therefore it's important to reset the second 10909 * reset register (containing the 10910 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 10911 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 10912 * bit). 10913 */ 10914 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 10915 reset_mask2 & (~not_reset_mask2)); 10916 10917 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 10918 reset_mask1 & (~not_reset_mask1)); 10919 10920 mb(); 10921 wmb(); 10922 10923 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 10924 reset_mask2 & (~stay_reset2)); 10925 10926 mb(); 10927 wmb(); 10928 10929 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 10930 wmb(); 10931 } 10932 10933 static int 10934 bxe_process_kill(struct bxe_softc *sc, 10935 uint8_t global) 10936 { 10937 int cnt = 1000; 10938 uint32_t val = 0; 10939 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 10940 uint32_t tags_63_32 = 0; 10941 10942 /* Empty the Tetris buffer, wait for 1s */ 10943 do { 10944 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); 10945 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); 10946 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); 10947 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); 10948 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); 10949 if (CHIP_IS_E3(sc)) { 10950 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); 10951 } 10952 10953 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 10954 ((port_is_idle_0 & 0x1) == 0x1) && 10955 ((port_is_idle_1 & 0x1) == 0x1) && 10956 (pgl_exp_rom2 == 0xffffffff) && 10957 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) 10958 break; 10959 DELAY(1000); 10960 } while (cnt-- > 0); 10961 10962 if (cnt <= 0) { 10963 BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there " 10964 "are still outstanding read requests after 1s! " 10965 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " 10966 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", 10967 sr_cnt, blk_cnt, port_is_idle_0, 10968 port_is_idle_1, pgl_exp_rom2); 10969 return (-1); 10970 } 10971 10972 mb(); 10973 10974 /* Close gates #2, #3 and #4 */ 10975 bxe_set_234_gates(sc, TRUE); 10976 10977 /* Poll for IGU VQs for 57712 and newer chips */ 10978 if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) { 10979 return (-1); 10980 } 10981 10982 /* XXX indicate that "process kill" is in progress to MCP */ 10983 10984 /* clear "unprepared" bit */ 10985 REG_WR(sc, MISC_REG_UNPREPARED, 0); 10986 mb(); 10987 10988 /* Make sure all is written to the chip before the reset */ 10989 wmb(); 10990 10991 /* 10992 * Wait for 1ms to empty GLUE and PCI-E core queues, 10993 * PSWHST, GRC and PSWRD Tetris buffer. 10994 */ 10995 DELAY(1000); 10996 10997 /* Prepare to chip reset: */ 10998 /* MCP */ 10999 if (global) { 11000 bxe_reset_mcp_prep(sc, &val); 11001 } 11002 11003 /* PXP */ 11004 bxe_pxp_prep(sc); 11005 mb(); 11006 11007 /* reset the chip */ 11008 bxe_process_kill_chip_reset(sc, global); 11009 mb(); 11010 11011 /* clear errors in PGB */ 11012 if (!CHIP_IS_E1(sc)) 11013 REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); 11014 11015 /* Recover after reset: */ 11016 /* MCP */ 11017 if (global && bxe_reset_mcp_comp(sc, val)) { 11018 return (-1); 11019 } 11020 11021 /* XXX add resetting the NO_MCP mode DB here */ 11022 11023 /* Open the gates #2, #3 and #4 */ 11024 bxe_set_234_gates(sc, FALSE); 11025 11026 /* XXX 11027 * IGU/AEU preparation bring back the AEU/IGU to a reset state 11028 * re-enable attentions 11029 */ 11030 11031 return (0); 11032 } 11033 11034 static int 11035 bxe_leader_reset(struct bxe_softc *sc) 11036 { 11037 int rc = 0; 11038 uint8_t global = bxe_reset_is_global(sc); 11039 uint32_t load_code; 11040 11041 /* 11042 * If not going to reset MCP, load "fake" driver to reset HW while 11043 * driver is owner of the HW. 11044 */ 11045 if (!global && !BXE_NOMCP(sc)) { 11046 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 11047 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 11048 if (!load_code) { 11049 BLOGE(sc, "MCP response failure, aborting\n"); 11050 rc = -1; 11051 goto exit_leader_reset; 11052 } 11053 11054 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 11055 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 11056 BLOGE(sc, "MCP unexpected response, aborting\n"); 11057 rc = -1; 11058 goto exit_leader_reset2; 11059 } 11060 11061 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 11062 if (!load_code) { 11063 BLOGE(sc, "MCP response failure, aborting\n"); 11064 rc = -1; 11065 goto exit_leader_reset2; 11066 } 11067 } 11068 11069 /* try to recover after the failure */ 11070 if (bxe_process_kill(sc, global)) { 11071 BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc)); 11072 rc = -1; 11073 goto exit_leader_reset2; 11074 } 11075 11076 /* 11077 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver 11078 * state. 11079 */ 11080 bxe_set_reset_done(sc); 11081 if (global) { 11082 bxe_clear_reset_global(sc); 11083 } 11084 11085 exit_leader_reset2: 11086 11087 /* unload "fake driver" if it was loaded */ 11088 if (!global && !BXE_NOMCP(sc)) { 11089 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 11090 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 11091 } 11092 11093 exit_leader_reset: 11094 11095 sc->is_leader = 0; 11096 bxe_release_leader_lock(sc); 11097 11098 mb(); 11099 return (rc); 11100 } 11101 11102 /* 11103 * prepare INIT transition, parameters configured: 11104 * - HC configuration 11105 * - Queue's CDU context 11106 */ 11107 static void 11108 bxe_pf_q_prep_init(struct bxe_softc *sc, 11109 struct bxe_fastpath *fp, 11110 struct ecore_queue_init_params *init_params) 11111 { 11112 uint8_t cos; 11113 int cxt_index, cxt_offset; 11114 11115 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); 11116 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); 11117 11118 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); 11119 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); 11120 11121 /* HC rate */ 11122 init_params->rx.hc_rate = 11123 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; 11124 init_params->tx.hc_rate = 11125 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; 11126 11127 /* FW SB ID */ 11128 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; 11129 11130 /* CQ index among the SB indices */ 11131 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 11132 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 11133 11134 /* set maximum number of COSs supported by this queue */ 11135 init_params->max_cos = sc->max_cos; 11136 11137 BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n", 11138 fp->index, init_params->max_cos); 11139 11140 /* set the context pointers queue object */ 11141 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 11142 /* XXX change index/cid here if ever support multiple tx CoS */ 11143 /* fp->txdata[cos]->cid */ 11144 cxt_index = fp->index / ILT_PAGE_CIDS; 11145 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); 11146 init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth; 11147 } 11148 } 11149 11150 /* set flags that are common for the Tx-only and not normal connections */ 11151 static unsigned long 11152 bxe_get_common_flags(struct bxe_softc *sc, 11153 struct bxe_fastpath *fp, 11154 uint8_t zero_stats) 11155 { 11156 unsigned long flags = 0; 11157 11158 /* PF driver will always initialize the Queue to an ACTIVE state */ 11159 bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags); 11160 11161 /* 11162 * tx only connections collect statistics (on the same index as the 11163 * parent connection). The statistics are zeroed when the parent 11164 * connection is initialized. 11165 */ 11166 11167 bxe_set_bit(ECORE_Q_FLG_STATS, &flags); 11168 if (zero_stats) { 11169 bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags); 11170 } 11171 11172 /* 11173 * tx only connections can support tx-switching, though their 11174 * CoS-ness doesn't survive the loopback 11175 */ 11176 if (sc->flags & BXE_TX_SWITCHING) { 11177 bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags); 11178 } 11179 11180 bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); 11181 11182 return (flags); 11183 } 11184 11185 static unsigned long 11186 bxe_get_q_flags(struct bxe_softc *sc, 11187 struct bxe_fastpath *fp, 11188 uint8_t leading) 11189 { 11190 unsigned long flags = 0; 11191 11192 if (IS_MF_SD(sc)) { 11193 bxe_set_bit(ECORE_Q_FLG_OV, &flags); 11194 } 11195 11196 if (if_getcapenable(sc->ifp) & IFCAP_LRO) { 11197 bxe_set_bit(ECORE_Q_FLG_TPA, &flags); 11198 bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags); 11199 } 11200 11201 if (leading) { 11202 bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags); 11203 bxe_set_bit(ECORE_Q_FLG_MCAST, &flags); 11204 } 11205 11206 bxe_set_bit(ECORE_Q_FLG_VLAN, &flags); 11207 11208 /* merge with common flags */ 11209 return (flags | bxe_get_common_flags(sc, fp, TRUE)); 11210 } 11211 11212 static void 11213 bxe_pf_q_prep_general(struct bxe_softc *sc, 11214 struct bxe_fastpath *fp, 11215 struct ecore_general_setup_params *gen_init, 11216 uint8_t cos) 11217 { 11218 gen_init->stat_id = bxe_stats_id(fp); 11219 gen_init->spcl_id = fp->cl_id; 11220 gen_init->mtu = sc->mtu; 11221 gen_init->cos = cos; 11222 } 11223 11224 static void 11225 bxe_pf_rx_q_prep(struct bxe_softc *sc, 11226 struct bxe_fastpath *fp, 11227 struct rxq_pause_params *pause, 11228 struct ecore_rxq_setup_params *rxq_init) 11229 { 11230 uint8_t max_sge = 0; 11231 uint16_t sge_sz = 0; 11232 uint16_t tpa_agg_size = 0; 11233 11234 pause->sge_th_lo = SGE_TH_LO(sc); 11235 pause->sge_th_hi = SGE_TH_HI(sc); 11236 11237 /* validate SGE ring has enough to cross high threshold */ 11238 if (sc->dropless_fc && 11239 (pause->sge_th_hi + FW_PREFETCH_CNT) > 11240 (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) { 11241 BLOGW(sc, "sge ring threshold limit\n"); 11242 } 11243 11244 /* minimum max_aggregation_size is 2*MTU (two full buffers) */ 11245 tpa_agg_size = (2 * sc->mtu); 11246 if (tpa_agg_size < sc->max_aggregation_size) { 11247 tpa_agg_size = sc->max_aggregation_size; 11248 } 11249 11250 max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT; 11251 max_sge = ((max_sge + PAGES_PER_SGE - 1) & 11252 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; 11253 sge_sz = (uint16_t)min(SGE_PAGES, 0xffff); 11254 11255 /* pause - not for e1 */ 11256 if (!CHIP_IS_E1(sc)) { 11257 pause->bd_th_lo = BD_TH_LO(sc); 11258 pause->bd_th_hi = BD_TH_HI(sc); 11259 11260 pause->rcq_th_lo = RCQ_TH_LO(sc); 11261 pause->rcq_th_hi = RCQ_TH_HI(sc); 11262 11263 /* validate rings have enough entries to cross high thresholds */ 11264 if (sc->dropless_fc && 11265 pause->bd_th_hi + FW_PREFETCH_CNT > 11266 sc->rx_ring_size) { 11267 BLOGW(sc, "rx bd ring threshold limit\n"); 11268 } 11269 11270 if (sc->dropless_fc && 11271 pause->rcq_th_hi + FW_PREFETCH_CNT > 11272 RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) { 11273 BLOGW(sc, "rcq ring threshold limit\n"); 11274 } 11275 11276 pause->pri_map = 1; 11277 } 11278 11279 /* rxq setup */ 11280 rxq_init->dscr_map = fp->rx_dma.paddr; 11281 rxq_init->sge_map = fp->rx_sge_dma.paddr; 11282 rxq_init->rcq_map = fp->rcq_dma.paddr; 11283 rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE); 11284 11285 /* 11286 * This should be a maximum number of data bytes that may be 11287 * placed on the BD (not including paddings). 11288 */ 11289 rxq_init->buf_sz = (fp->rx_buf_size - 11290 IP_HEADER_ALIGNMENT_PADDING); 11291 11292 rxq_init->cl_qzone_id = fp->cl_qzone_id; 11293 rxq_init->tpa_agg_sz = tpa_agg_size; 11294 rxq_init->sge_buf_sz = sge_sz; 11295 rxq_init->max_sges_pkt = max_sge; 11296 rxq_init->rss_engine_id = SC_FUNC(sc); 11297 rxq_init->mcast_engine_id = SC_FUNC(sc); 11298 11299 /* 11300 * Maximum number or simultaneous TPA aggregation for this Queue. 11301 * For PF Clients it should be the maximum available number. 11302 * VF driver(s) may want to define it to a smaller value. 11303 */ 11304 rxq_init->max_tpa_queues = MAX_AGG_QS(sc); 11305 11306 rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT; 11307 rxq_init->fw_sb_id = fp->fw_sb_id; 11308 11309 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 11310 11311 /* 11312 * configure silent vlan removal 11313 * if multi function mode is afex, then mask default vlan 11314 */ 11315 if (IS_MF_AFEX(sc)) { 11316 rxq_init->silent_removal_value = 11317 sc->devinfo.mf_info.afex_def_vlan_tag; 11318 rxq_init->silent_removal_mask = EVL_VLID_MASK; 11319 } 11320 } 11321 11322 static void 11323 bxe_pf_tx_q_prep(struct bxe_softc *sc, 11324 struct bxe_fastpath *fp, 11325 struct ecore_txq_setup_params *txq_init, 11326 uint8_t cos) 11327 { 11328 /* 11329 * XXX If multiple CoS is ever supported then each fastpath structure 11330 * will need to maintain tx producer/consumer/dma/etc values *per* CoS. 11331 * fp->txdata[cos]->tx_dma.paddr; 11332 */ 11333 txq_init->dscr_map = fp->tx_dma.paddr; 11334 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 11335 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 11336 txq_init->fw_sb_id = fp->fw_sb_id; 11337 11338 /* 11339 * set the TSS leading client id for TX classfication to the 11340 * leading RSS client id 11341 */ 11342 txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id); 11343 } 11344 11345 /* 11346 * This function performs 2 steps in a queue state machine: 11347 * 1) RESET->INIT 11348 * 2) INIT->SETUP 11349 */ 11350 static int 11351 bxe_setup_queue(struct bxe_softc *sc, 11352 struct bxe_fastpath *fp, 11353 uint8_t leading) 11354 { 11355 struct ecore_queue_state_params q_params = { NULL }; 11356 struct ecore_queue_setup_params *setup_params = 11357 &q_params.params.setup; 11358 int rc; 11359 11360 BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index); 11361 11362 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 11363 11364 q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj; 11365 11366 /* we want to wait for completion in this context */ 11367 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 11368 11369 /* prepare the INIT parameters */ 11370 bxe_pf_q_prep_init(sc, fp, &q_params.params.init); 11371 11372 /* Set the command */ 11373 q_params.cmd = ECORE_Q_CMD_INIT; 11374 11375 /* Change the state to INIT */ 11376 rc = ecore_queue_state_change(sc, &q_params); 11377 if (rc) { 11378 BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc); 11379 return (rc); 11380 } 11381 11382 BLOGD(sc, DBG_LOAD, "init complete\n"); 11383 11384 /* now move the Queue to the SETUP state */ 11385 memset(setup_params, 0, sizeof(*setup_params)); 11386 11387 /* set Queue flags */ 11388 setup_params->flags = bxe_get_q_flags(sc, fp, leading); 11389 11390 /* set general SETUP parameters */ 11391 bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params, 11392 FIRST_TX_COS_INDEX); 11393 11394 bxe_pf_rx_q_prep(sc, fp, 11395 &setup_params->pause_params, 11396 &setup_params->rxq_params); 11397 11398 bxe_pf_tx_q_prep(sc, fp, 11399 &setup_params->txq_params, 11400 FIRST_TX_COS_INDEX); 11401 11402 /* Set the command */ 11403 q_params.cmd = ECORE_Q_CMD_SETUP; 11404 11405 /* change the state to SETUP */ 11406 rc = ecore_queue_state_change(sc, &q_params); 11407 if (rc) { 11408 BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc); 11409 return (rc); 11410 } 11411 11412 return (rc); 11413 } 11414 11415 static int 11416 bxe_setup_leading(struct bxe_softc *sc) 11417 { 11418 return (bxe_setup_queue(sc, &sc->fp[0], TRUE)); 11419 } 11420 11421 static int 11422 bxe_config_rss_pf(struct bxe_softc *sc, 11423 struct ecore_rss_config_obj *rss_obj, 11424 uint8_t config_hash) 11425 { 11426 struct ecore_config_rss_params params = { NULL }; 11427 int i; 11428 11429 /* 11430 * Although RSS is meaningless when there is a single HW queue we 11431 * still need it enabled in order to have HW Rx hash generated. 11432 */ 11433 11434 params.rss_obj = rss_obj; 11435 11436 bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); 11437 11438 bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); 11439 11440 /* RSS configuration */ 11441 bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags); 11442 bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); 11443 bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags); 11444 bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); 11445 if (rss_obj->udp_rss_v4) { 11446 bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); 11447 } 11448 if (rss_obj->udp_rss_v6) { 11449 bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); 11450 } 11451 11452 /* Hash bits */ 11453 params.rss_result_mask = MULTI_MASK; 11454 11455 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); 11456 11457 if (config_hash) { 11458 /* RSS keys */ 11459 for (i = 0; i < sizeof(params.rss_key) / 4; i++) { 11460 params.rss_key[i] = arc4random(); 11461 } 11462 11463 bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); 11464 } 11465 11466 return (ecore_config_rss(sc, ¶ms)); 11467 } 11468 11469 static int 11470 bxe_config_rss_eth(struct bxe_softc *sc, 11471 uint8_t config_hash) 11472 { 11473 return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash)); 11474 } 11475 11476 static int 11477 bxe_init_rss_pf(struct bxe_softc *sc) 11478 { 11479 uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc); 11480 int i; 11481 11482 /* 11483 * Prepare the initial contents of the indirection table if 11484 * RSS is enabled 11485 */ 11486 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { 11487 sc->rss_conf_obj.ind_table[i] = 11488 (sc->fp->cl_id + (i % num_eth_queues)); 11489 } 11490 11491 if (sc->udp_rss) { 11492 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; 11493 } 11494 11495 /* 11496 * For 57710 and 57711 SEARCHER configuration (rss_keys) is 11497 * per-port, so if explicit configuration is needed, do it only 11498 * for a PMF. 11499 * 11500 * For 57712 and newer it's a per-function configuration. 11501 */ 11502 return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc))); 11503 } 11504 11505 static int 11506 bxe_set_mac_one(struct bxe_softc *sc, 11507 uint8_t *mac, 11508 struct ecore_vlan_mac_obj *obj, 11509 uint8_t set, 11510 int mac_type, 11511 unsigned long *ramrod_flags) 11512 { 11513 struct ecore_vlan_mac_ramrod_params ramrod_param; 11514 int rc; 11515 11516 memset(&ramrod_param, 0, sizeof(ramrod_param)); 11517 11518 /* fill in general parameters */ 11519 ramrod_param.vlan_mac_obj = obj; 11520 ramrod_param.ramrod_flags = *ramrod_flags; 11521 11522 /* fill a user request section if needed */ 11523 if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) { 11524 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); 11525 11526 bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); 11527 11528 /* Set the command: ADD or DEL */ 11529 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : 11530 ECORE_VLAN_MAC_DEL; 11531 } 11532 11533 rc = ecore_config_vlan_mac(sc, &ramrod_param); 11534 11535 if (rc == ECORE_EXISTS) { 11536 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); 11537 /* do not treat adding same MAC as error */ 11538 rc = 0; 11539 } else if (rc < 0) { 11540 BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc); 11541 } 11542 11543 return (rc); 11544 } 11545 11546 static int 11547 bxe_set_eth_mac(struct bxe_softc *sc, 11548 uint8_t set) 11549 { 11550 unsigned long ramrod_flags = 0; 11551 11552 BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n"); 11553 11554 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 11555 11556 /* Eth MAC is set on RSS leading client (fp[0]) */ 11557 return (bxe_set_mac_one(sc, sc->link_params.mac_addr, 11558 &sc->sp_objs->mac_obj, 11559 set, ECORE_ETH_MAC, &ramrod_flags)); 11560 } 11561 11562 static int 11563 bxe_get_cur_phy_idx(struct bxe_softc *sc) 11564 { 11565 uint32_t sel_phy_idx = 0; 11566 11567 if (sc->link_params.num_phys <= 1) { 11568 return (ELINK_INT_PHY); 11569 } 11570 11571 if (sc->link_vars.link_up) { 11572 sel_phy_idx = ELINK_EXT_PHY1; 11573 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ 11574 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && 11575 (sc->link_params.phy[ELINK_EXT_PHY2].supported & 11576 ELINK_SUPPORTED_FIBRE)) 11577 sel_phy_idx = ELINK_EXT_PHY2; 11578 } else { 11579 switch (elink_phy_selection(&sc->link_params)) { 11580 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 11581 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: 11582 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 11583 sel_phy_idx = ELINK_EXT_PHY1; 11584 break; 11585 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: 11586 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 11587 sel_phy_idx = ELINK_EXT_PHY2; 11588 break; 11589 } 11590 } 11591 11592 return (sel_phy_idx); 11593 } 11594 11595 static int 11596 bxe_get_link_cfg_idx(struct bxe_softc *sc) 11597 { 11598 uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc); 11599 11600 /* 11601 * The selected activated PHY is always after swapping (in case PHY 11602 * swapping is enabled). So when swapping is enabled, we need to reverse 11603 * the configuration 11604 */ 11605 11606 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 11607 if (sel_phy_idx == ELINK_EXT_PHY1) 11608 sel_phy_idx = ELINK_EXT_PHY2; 11609 else if (sel_phy_idx == ELINK_EXT_PHY2) 11610 sel_phy_idx = ELINK_EXT_PHY1; 11611 } 11612 11613 return (ELINK_LINK_CONFIG_IDX(sel_phy_idx)); 11614 } 11615 11616 static void 11617 bxe_set_requested_fc(struct bxe_softc *sc) 11618 { 11619 /* 11620 * Initialize link parameters structure variables 11621 * It is recommended to turn off RX FC for jumbo frames 11622 * for better performance 11623 */ 11624 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { 11625 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; 11626 } else { 11627 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; 11628 } 11629 } 11630 11631 static void 11632 bxe_calc_fc_adv(struct bxe_softc *sc) 11633 { 11634 uint8_t cfg_idx = bxe_get_link_cfg_idx(sc); 11635 switch (sc->link_vars.ieee_fc & 11636 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 11637 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 11638 default: 11639 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 11640 ADVERTISED_Pause); 11641 break; 11642 11643 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 11644 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 11645 ADVERTISED_Pause); 11646 break; 11647 11648 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 11649 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 11650 break; 11651 } 11652 } 11653 11654 static uint16_t 11655 bxe_get_mf_speed(struct bxe_softc *sc) 11656 { 11657 uint16_t line_speed = sc->link_vars.line_speed; 11658 if (IS_MF(sc)) { 11659 uint16_t maxCfg = 11660 bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]); 11661 11662 /* calculate the current MAX line speed limit for the MF devices */ 11663 if (IS_MF_SI(sc)) { 11664 line_speed = (line_speed * maxCfg) / 100; 11665 } else { /* SD mode */ 11666 uint16_t vn_max_rate = maxCfg * 100; 11667 11668 if (vn_max_rate < line_speed) { 11669 line_speed = vn_max_rate; 11670 } 11671 } 11672 } 11673 11674 return (line_speed); 11675 } 11676 11677 static void 11678 bxe_fill_report_data(struct bxe_softc *sc, 11679 struct bxe_link_report_data *data) 11680 { 11681 uint16_t line_speed = bxe_get_mf_speed(sc); 11682 11683 memset(data, 0, sizeof(*data)); 11684 11685 /* fill the report data with the effective line speed */ 11686 data->line_speed = line_speed; 11687 11688 /* Link is down */ 11689 if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) { 11690 bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags); 11691 } 11692 11693 /* Full DUPLEX */ 11694 if (sc->link_vars.duplex == DUPLEX_FULL) { 11695 bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags); 11696 } 11697 11698 /* Rx Flow Control is ON */ 11699 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { 11700 bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags); 11701 } 11702 11703 /* Tx Flow Control is ON */ 11704 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 11705 bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags); 11706 } 11707 } 11708 11709 /* report link status to OS, should be called under phy_lock */ 11710 static void 11711 bxe_link_report_locked(struct bxe_softc *sc) 11712 { 11713 struct bxe_link_report_data cur_data; 11714 11715 /* reread mf_cfg */ 11716 if (IS_PF(sc) && !CHIP_IS_E1(sc)) { 11717 bxe_read_mf_cfg(sc); 11718 } 11719 11720 /* Read the current link report info */ 11721 bxe_fill_report_data(sc, &cur_data); 11722 11723 /* Don't report link down or exactly the same link status twice */ 11724 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || 11725 (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 11726 &sc->last_reported_link.link_report_flags) && 11727 bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 11728 &cur_data.link_report_flags))) { 11729 return; 11730 } 11731 11732 sc->link_cnt++; 11733 11734 /* report new link params and remember the state for the next time */ 11735 memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); 11736 11737 if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 11738 &cur_data.link_report_flags)) { 11739 if_link_state_change(sc->ifp, LINK_STATE_DOWN); 11740 BLOGI(sc, "NIC Link is Down\n"); 11741 } else { 11742 const char *duplex; 11743 const char *flow; 11744 11745 if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX, 11746 &cur_data.link_report_flags)) { 11747 duplex = "full"; 11748 } else { 11749 duplex = "half"; 11750 } 11751 11752 /* 11753 * Handle the FC at the end so that only these flags would be 11754 * possibly set. This way we may easily check if there is no FC 11755 * enabled. 11756 */ 11757 if (cur_data.link_report_flags) { 11758 if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 11759 &cur_data.link_report_flags) && 11760 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 11761 &cur_data.link_report_flags)) { 11762 flow = "ON - receive & transmit"; 11763 } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 11764 &cur_data.link_report_flags) && 11765 !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 11766 &cur_data.link_report_flags)) { 11767 flow = "ON - receive"; 11768 } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 11769 &cur_data.link_report_flags) && 11770 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 11771 &cur_data.link_report_flags)) { 11772 flow = "ON - transmit"; 11773 } else { 11774 flow = "none"; /* possible? */ 11775 } 11776 } else { 11777 flow = "none"; 11778 } 11779 11780 if_link_state_change(sc->ifp, LINK_STATE_UP); 11781 BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", 11782 cur_data.line_speed, duplex, flow); 11783 } 11784 } 11785 11786 static void 11787 bxe_link_report(struct bxe_softc *sc) 11788 { 11789 bxe_acquire_phy_lock(sc); 11790 bxe_link_report_locked(sc); 11791 bxe_release_phy_lock(sc); 11792 } 11793 11794 static void 11795 bxe_link_status_update(struct bxe_softc *sc) 11796 { 11797 if (sc->state != BXE_STATE_OPEN) { 11798 return; 11799 } 11800 11801 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { 11802 elink_link_status_update(&sc->link_params, &sc->link_vars); 11803 } else { 11804 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | 11805 ELINK_SUPPORTED_10baseT_Full | 11806 ELINK_SUPPORTED_100baseT_Half | 11807 ELINK_SUPPORTED_100baseT_Full | 11808 ELINK_SUPPORTED_1000baseT_Full | 11809 ELINK_SUPPORTED_2500baseX_Full | 11810 ELINK_SUPPORTED_10000baseT_Full | 11811 ELINK_SUPPORTED_TP | 11812 ELINK_SUPPORTED_FIBRE | 11813 ELINK_SUPPORTED_Autoneg | 11814 ELINK_SUPPORTED_Pause | 11815 ELINK_SUPPORTED_Asym_Pause); 11816 sc->port.advertising[0] = sc->port.supported[0]; 11817 11818 sc->link_params.sc = sc; 11819 sc->link_params.port = SC_PORT(sc); 11820 sc->link_params.req_duplex[0] = DUPLEX_FULL; 11821 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; 11822 sc->link_params.req_line_speed[0] = SPEED_10000; 11823 sc->link_params.speed_cap_mask[0] = 0x7f0000; 11824 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; 11825 11826 if (CHIP_REV_IS_FPGA(sc)) { 11827 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; 11828 sc->link_vars.line_speed = ELINK_SPEED_1000; 11829 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 11830 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); 11831 } else { 11832 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; 11833 sc->link_vars.line_speed = ELINK_SPEED_10000; 11834 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 11835 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); 11836 } 11837 11838 sc->link_vars.link_up = 1; 11839 11840 sc->link_vars.duplex = DUPLEX_FULL; 11841 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; 11842 11843 if (IS_PF(sc)) { 11844 REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0); 11845 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 11846 bxe_link_report(sc); 11847 } 11848 } 11849 11850 if (IS_PF(sc)) { 11851 if (sc->link_vars.link_up) { 11852 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 11853 } else { 11854 bxe_stats_handle(sc, STATS_EVENT_STOP); 11855 } 11856 bxe_link_report(sc); 11857 } else { 11858 bxe_link_report(sc); 11859 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 11860 } 11861 } 11862 11863 static int 11864 bxe_initial_phy_init(struct bxe_softc *sc, 11865 int load_mode) 11866 { 11867 int rc, cfg_idx = bxe_get_link_cfg_idx(sc); 11868 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; 11869 struct elink_params *lp = &sc->link_params; 11870 11871 bxe_set_requested_fc(sc); 11872 11873 if (CHIP_REV_IS_SLOW(sc)) { 11874 uint32_t bond = CHIP_BOND_ID(sc); 11875 uint32_t feat = 0; 11876 11877 if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) { 11878 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 11879 } else if (bond & 0x4) { 11880 if (CHIP_IS_E3(sc)) { 11881 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC; 11882 } else { 11883 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 11884 } 11885 } else if (bond & 0x8) { 11886 if (CHIP_IS_E3(sc)) { 11887 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC; 11888 } else { 11889 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 11890 } 11891 } 11892 11893 /* disable EMAC for E3 and above */ 11894 if (bond & 0x2) { 11895 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 11896 } 11897 11898 sc->link_params.feature_config_flags |= feat; 11899 } 11900 11901 bxe_acquire_phy_lock(sc); 11902 11903 if (load_mode == LOAD_DIAG) { 11904 lp->loopback_mode = ELINK_LOOPBACK_XGXS; 11905 /* Prefer doing PHY loopback at 10G speed, if possible */ 11906 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { 11907 if (lp->speed_cap_mask[cfg_idx] & 11908 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 11909 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; 11910 } else { 11911 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; 11912 } 11913 } 11914 } 11915 11916 if (load_mode == LOAD_LOOPBACK_EXT) { 11917 lp->loopback_mode = ELINK_LOOPBACK_EXT; 11918 } 11919 11920 rc = elink_phy_init(&sc->link_params, &sc->link_vars); 11921 11922 bxe_release_phy_lock(sc); 11923 11924 bxe_calc_fc_adv(sc); 11925 11926 if (sc->link_vars.link_up) { 11927 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 11928 bxe_link_report(sc); 11929 } 11930 11931 if (!CHIP_REV_IS_SLOW(sc)) { 11932 bxe_periodic_start(sc); 11933 } 11934 11935 sc->link_params.req_line_speed[cfg_idx] = req_line_speed; 11936 return (rc); 11937 } 11938 11939 /* must be called under IF_ADDR_LOCK */ 11940 11941 static int 11942 bxe_set_mc_list(struct bxe_softc *sc) 11943 { 11944 struct ecore_mcast_ramrod_params rparam = { NULL }; 11945 int rc = 0; 11946 int mc_count = 0; 11947 int mcnt, i; 11948 struct ecore_mcast_list_elem *mc_mac, *mc_mac_start; 11949 unsigned char *mta; 11950 if_t ifp = sc->ifp; 11951 11952 mc_count = if_multiaddr_count(ifp, -1);/* XXX they don't have a limit */ 11953 if (!mc_count) 11954 return (0); 11955 11956 mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN * 11957 mc_count, M_DEVBUF, M_NOWAIT); 11958 11959 if(mta == NULL) { 11960 BLOGE(sc, "Failed to allocate temp mcast list\n"); 11961 return (-1); 11962 } 11963 bzero(mta, (sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count)); 11964 11965 mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, (M_NOWAIT | M_ZERO)); 11966 mc_mac_start = mc_mac; 11967 11968 if (!mc_mac) { 11969 free(mta, M_DEVBUF); 11970 BLOGE(sc, "Failed to allocate temp mcast list\n"); 11971 return (-1); 11972 } 11973 bzero(mc_mac, (sizeof(*mc_mac) * mc_count)); 11974 11975 /* mta and mcnt not expected to be different */ 11976 if_multiaddr_array(ifp, mta, &mcnt, mc_count); 11977 11978 11979 rparam.mcast_obj = &sc->mcast_obj; 11980 ECORE_LIST_INIT(&rparam.mcast_list); 11981 11982 for(i=0; i< mcnt; i++) { 11983 11984 mc_mac->mac = (uint8_t *)(mta + (i * ETHER_ADDR_LEN)); 11985 ECORE_LIST_PUSH_TAIL(&mc_mac->link, &rparam.mcast_list); 11986 11987 BLOGD(sc, DBG_LOAD, 11988 "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n", 11989 mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2], 11990 mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]); 11991 11992 mc_mac++; 11993 } 11994 rparam.mcast_list_len = mc_count; 11995 11996 BXE_MCAST_LOCK(sc); 11997 11998 /* first, clear all configured multicast MACs */ 11999 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 12000 if (rc < 0) { 12001 BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc); 12002 BXE_MCAST_UNLOCK(sc); 12003 free(mc_mac_start, M_DEVBUF); 12004 free(mta, M_DEVBUF); 12005 return (rc); 12006 } 12007 12008 /* Now add the new MACs */ 12009 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD); 12010 if (rc < 0) { 12011 BLOGE(sc, "Failed to set new mcast config (%d)\n", rc); 12012 } 12013 12014 BXE_MCAST_UNLOCK(sc); 12015 12016 free(mc_mac_start, M_DEVBUF); 12017 free(mta, M_DEVBUF); 12018 12019 return (rc); 12020 } 12021 12022 static int 12023 bxe_set_uc_list(struct bxe_softc *sc) 12024 { 12025 if_t ifp = sc->ifp; 12026 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 12027 struct ifaddr *ifa; 12028 unsigned long ramrod_flags = 0; 12029 int rc; 12030 12031 #if __FreeBSD_version < 800000 12032 IF_ADDR_LOCK(ifp); 12033 #else 12034 if_addr_rlock(ifp); 12035 #endif 12036 12037 /* first schedule a cleanup up of old configuration */ 12038 rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE); 12039 if (rc < 0) { 12040 BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc); 12041 #if __FreeBSD_version < 800000 12042 IF_ADDR_UNLOCK(ifp); 12043 #else 12044 if_addr_runlock(ifp); 12045 #endif 12046 return (rc); 12047 } 12048 12049 ifa = if_getifaddr(ifp); /* XXX Is this structure */ 12050 while (ifa) { 12051 if (ifa->ifa_addr->sa_family != AF_LINK) { 12052 ifa = TAILQ_NEXT(ifa, ifa_link); 12053 continue; 12054 } 12055 12056 rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 12057 mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags); 12058 if (rc == -EEXIST) { 12059 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); 12060 /* do not treat adding same MAC as an error */ 12061 rc = 0; 12062 } else if (rc < 0) { 12063 BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc); 12064 #if __FreeBSD_version < 800000 12065 IF_ADDR_UNLOCK(ifp); 12066 #else 12067 if_addr_runlock(ifp); 12068 #endif 12069 return (rc); 12070 } 12071 12072 ifa = TAILQ_NEXT(ifa, ifa_link); 12073 } 12074 12075 #if __FreeBSD_version < 800000 12076 IF_ADDR_UNLOCK(ifp); 12077 #else 12078 if_addr_runlock(ifp); 12079 #endif 12080 12081 /* Execute the pending commands */ 12082 bit_set(&ramrod_flags, RAMROD_CONT); 12083 return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */, 12084 ECORE_UC_LIST_MAC, &ramrod_flags)); 12085 } 12086 12087 static void 12088 bxe_set_rx_mode(struct bxe_softc *sc) 12089 { 12090 if_t ifp = sc->ifp; 12091 uint32_t rx_mode = BXE_RX_MODE_NORMAL; 12092 12093 if (sc->state != BXE_STATE_OPEN) { 12094 BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state); 12095 return; 12096 } 12097 12098 BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp)); 12099 12100 if (if_getflags(ifp) & IFF_PROMISC) { 12101 rx_mode = BXE_RX_MODE_PROMISC; 12102 } else if ((if_getflags(ifp) & IFF_ALLMULTI) || 12103 ((if_getamcount(ifp) > BXE_MAX_MULTICAST) && 12104 CHIP_IS_E1(sc))) { 12105 rx_mode = BXE_RX_MODE_ALLMULTI; 12106 } else { 12107 if (IS_PF(sc)) { 12108 /* some multicasts */ 12109 if (bxe_set_mc_list(sc) < 0) { 12110 rx_mode = BXE_RX_MODE_ALLMULTI; 12111 } 12112 if (bxe_set_uc_list(sc) < 0) { 12113 rx_mode = BXE_RX_MODE_PROMISC; 12114 } 12115 } 12116 } 12117 12118 sc->rx_mode = rx_mode; 12119 12120 /* schedule the rx_mode command */ 12121 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 12122 BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n"); 12123 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 12124 return; 12125 } 12126 12127 if (IS_PF(sc)) { 12128 bxe_set_storm_rx_mode(sc); 12129 } 12130 } 12131 12132 12133 /* update flags in shmem */ 12134 static void 12135 bxe_update_drv_flags(struct bxe_softc *sc, 12136 uint32_t flags, 12137 uint32_t set) 12138 { 12139 uint32_t drv_flags; 12140 12141 if (SHMEM2_HAS(sc, drv_flags)) { 12142 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 12143 drv_flags = SHMEM2_RD(sc, drv_flags); 12144 12145 if (set) { 12146 SET_FLAGS(drv_flags, flags); 12147 } else { 12148 RESET_FLAGS(drv_flags, flags); 12149 } 12150 12151 SHMEM2_WR(sc, drv_flags, drv_flags); 12152 BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags); 12153 12154 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 12155 } 12156 } 12157 12158 /* periodic timer callout routine, only runs when the interface is up */ 12159 12160 static void 12161 bxe_periodic_callout_func(void *xsc) 12162 { 12163 struct bxe_softc *sc = (struct bxe_softc *)xsc; 12164 int i; 12165 12166 if (!BXE_CORE_TRYLOCK(sc)) { 12167 /* just bail and try again next time */ 12168 12169 if ((sc->state == BXE_STATE_OPEN) && 12170 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { 12171 /* schedule the next periodic callout */ 12172 callout_reset(&sc->periodic_callout, hz, 12173 bxe_periodic_callout_func, sc); 12174 } 12175 12176 return; 12177 } 12178 12179 if ((sc->state != BXE_STATE_OPEN) || 12180 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { 12181 BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state); 12182 BXE_CORE_UNLOCK(sc); 12183 return; 12184 } 12185 12186 12187 /* Check for TX timeouts on any fastpath. */ 12188 FOR_EACH_QUEUE(sc, i) { 12189 if (bxe_watchdog(sc, &sc->fp[i]) != 0) { 12190 /* Ruh-Roh, chip was reset! */ 12191 break; 12192 } 12193 } 12194 12195 if (!CHIP_REV_IS_SLOW(sc)) { 12196 /* 12197 * This barrier is needed to ensure the ordering between the writing 12198 * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and 12199 * the reading here. 12200 */ 12201 mb(); 12202 if (sc->port.pmf) { 12203 bxe_acquire_phy_lock(sc); 12204 elink_period_func(&sc->link_params, &sc->link_vars); 12205 bxe_release_phy_lock(sc); 12206 } 12207 } 12208 12209 if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) { 12210 int mb_idx = SC_FW_MB_IDX(sc); 12211 uint32_t drv_pulse; 12212 uint32_t mcp_pulse; 12213 12214 ++sc->fw_drv_pulse_wr_seq; 12215 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 12216 12217 drv_pulse = sc->fw_drv_pulse_wr_seq; 12218 bxe_drv_pulse(sc); 12219 12220 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & 12221 MCP_PULSE_SEQ_MASK); 12222 12223 /* 12224 * The delta between driver pulse and mcp response should 12225 * be 1 (before mcp response) or 0 (after mcp response). 12226 */ 12227 if ((drv_pulse != mcp_pulse) && 12228 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 12229 /* someone lost a heartbeat... */ 12230 BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n", 12231 drv_pulse, mcp_pulse); 12232 } 12233 } 12234 12235 /* state is BXE_STATE_OPEN */ 12236 bxe_stats_handle(sc, STATS_EVENT_UPDATE); 12237 12238 BXE_CORE_UNLOCK(sc); 12239 12240 if ((sc->state == BXE_STATE_OPEN) && 12241 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { 12242 /* schedule the next periodic callout */ 12243 callout_reset(&sc->periodic_callout, hz, 12244 bxe_periodic_callout_func, sc); 12245 } 12246 } 12247 12248 static void 12249 bxe_periodic_start(struct bxe_softc *sc) 12250 { 12251 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); 12252 callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); 12253 } 12254 12255 static void 12256 bxe_periodic_stop(struct bxe_softc *sc) 12257 { 12258 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); 12259 callout_drain(&sc->periodic_callout); 12260 } 12261 12262 /* start the controller */ 12263 static __noinline int 12264 bxe_nic_load(struct bxe_softc *sc, 12265 int load_mode) 12266 { 12267 uint32_t val; 12268 int load_code = 0; 12269 int i, rc = 0; 12270 12271 BXE_CORE_LOCK_ASSERT(sc); 12272 12273 BLOGD(sc, DBG_LOAD, "Starting NIC load...\n"); 12274 12275 sc->state = BXE_STATE_OPENING_WAITING_LOAD; 12276 12277 if (IS_PF(sc)) { 12278 /* must be called before memory allocation and HW init */ 12279 bxe_ilt_set_info(sc); 12280 } 12281 12282 sc->last_reported_link_state = LINK_STATE_UNKNOWN; 12283 12284 bxe_set_fp_rx_buf_size(sc); 12285 12286 if (bxe_alloc_fp_buffers(sc) != 0) { 12287 BLOGE(sc, "Failed to allocate fastpath memory\n"); 12288 sc->state = BXE_STATE_CLOSED; 12289 rc = ENOMEM; 12290 goto bxe_nic_load_error0; 12291 } 12292 12293 if (bxe_alloc_mem(sc) != 0) { 12294 sc->state = BXE_STATE_CLOSED; 12295 rc = ENOMEM; 12296 goto bxe_nic_load_error0; 12297 } 12298 12299 if (bxe_alloc_fw_stats_mem(sc) != 0) { 12300 sc->state = BXE_STATE_CLOSED; 12301 rc = ENOMEM; 12302 goto bxe_nic_load_error0; 12303 } 12304 12305 if (IS_PF(sc)) { 12306 /* set pf load just before approaching the MCP */ 12307 bxe_set_pf_load(sc); 12308 12309 /* if MCP exists send load request and analyze response */ 12310 if (!BXE_NOMCP(sc)) { 12311 /* attempt to load pf */ 12312 if (bxe_nic_load_request(sc, &load_code) != 0) { 12313 sc->state = BXE_STATE_CLOSED; 12314 rc = ENXIO; 12315 goto bxe_nic_load_error1; 12316 } 12317 12318 /* what did the MCP say? */ 12319 if (bxe_nic_load_analyze_req(sc, load_code) != 0) { 12320 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12321 sc->state = BXE_STATE_CLOSED; 12322 rc = ENXIO; 12323 goto bxe_nic_load_error2; 12324 } 12325 } else { 12326 BLOGI(sc, "Device has no MCP!\n"); 12327 load_code = bxe_nic_load_no_mcp(sc); 12328 } 12329 12330 /* mark PMF if applicable */ 12331 bxe_nic_load_pmf(sc, load_code); 12332 12333 /* Init Function state controlling object */ 12334 bxe_init_func_obj(sc); 12335 12336 /* Initialize HW */ 12337 if (bxe_init_hw(sc, load_code) != 0) { 12338 BLOGE(sc, "HW init failed\n"); 12339 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12340 sc->state = BXE_STATE_CLOSED; 12341 rc = ENXIO; 12342 goto bxe_nic_load_error2; 12343 } 12344 } 12345 12346 /* set ALWAYS_ALIVE bit in shmem */ 12347 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 12348 bxe_drv_pulse(sc); 12349 sc->flags |= BXE_NO_PULSE; 12350 12351 /* attach interrupts */ 12352 if (bxe_interrupt_attach(sc) != 0) { 12353 sc->state = BXE_STATE_CLOSED; 12354 rc = ENXIO; 12355 goto bxe_nic_load_error2; 12356 } 12357 12358 bxe_nic_init(sc, load_code); 12359 12360 /* Init per-function objects */ 12361 if (IS_PF(sc)) { 12362 bxe_init_objs(sc); 12363 // XXX bxe_iov_nic_init(sc); 12364 12365 /* set AFEX default VLAN tag to an invalid value */ 12366 sc->devinfo.mf_info.afex_def_vlan_tag = -1; 12367 // XXX bxe_nic_load_afex_dcc(sc, load_code); 12368 12369 sc->state = BXE_STATE_OPENING_WAITING_PORT; 12370 rc = bxe_func_start(sc); 12371 if (rc) { 12372 BLOGE(sc, "Function start failed! rc = %d\n", rc); 12373 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12374 sc->state = BXE_STATE_ERROR; 12375 goto bxe_nic_load_error3; 12376 } 12377 12378 /* send LOAD_DONE command to MCP */ 12379 if (!BXE_NOMCP(sc)) { 12380 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12381 if (!load_code) { 12382 BLOGE(sc, "MCP response failure, aborting\n"); 12383 sc->state = BXE_STATE_ERROR; 12384 rc = ENXIO; 12385 goto bxe_nic_load_error3; 12386 } 12387 } 12388 12389 rc = bxe_setup_leading(sc); 12390 if (rc) { 12391 BLOGE(sc, "Setup leading failed! rc = %d\n", rc); 12392 sc->state = BXE_STATE_ERROR; 12393 goto bxe_nic_load_error3; 12394 } 12395 12396 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { 12397 rc = bxe_setup_queue(sc, &sc->fp[i], FALSE); 12398 if (rc) { 12399 BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc); 12400 sc->state = BXE_STATE_ERROR; 12401 goto bxe_nic_load_error3; 12402 } 12403 } 12404 12405 rc = bxe_init_rss_pf(sc); 12406 if (rc) { 12407 BLOGE(sc, "PF RSS init failed\n"); 12408 sc->state = BXE_STATE_ERROR; 12409 goto bxe_nic_load_error3; 12410 } 12411 } 12412 /* XXX VF */ 12413 12414 /* now when Clients are configured we are ready to work */ 12415 sc->state = BXE_STATE_OPEN; 12416 12417 /* Configure a ucast MAC */ 12418 if (IS_PF(sc)) { 12419 rc = bxe_set_eth_mac(sc, TRUE); 12420 } 12421 if (rc) { 12422 BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc); 12423 sc->state = BXE_STATE_ERROR; 12424 goto bxe_nic_load_error3; 12425 } 12426 12427 if (sc->port.pmf) { 12428 rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN); 12429 if (rc) { 12430 sc->state = BXE_STATE_ERROR; 12431 goto bxe_nic_load_error3; 12432 } 12433 } 12434 12435 sc->link_params.feature_config_flags &= 12436 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; 12437 12438 /* start fast path */ 12439 12440 /* Initialize Rx filter */ 12441 bxe_set_rx_mode(sc); 12442 12443 /* start the Tx */ 12444 switch (/* XXX load_mode */LOAD_OPEN) { 12445 case LOAD_NORMAL: 12446 case LOAD_OPEN: 12447 break; 12448 12449 case LOAD_DIAG: 12450 case LOAD_LOOPBACK_EXT: 12451 sc->state = BXE_STATE_DIAG; 12452 break; 12453 12454 default: 12455 break; 12456 } 12457 12458 if (sc->port.pmf) { 12459 bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); 12460 } else { 12461 bxe_link_status_update(sc); 12462 } 12463 12464 /* start the periodic timer callout */ 12465 bxe_periodic_start(sc); 12466 12467 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 12468 /* mark driver is loaded in shmem2 */ 12469 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 12470 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 12471 (val | 12472 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | 12473 DRV_FLAGS_CAPABILITIES_LOADED_L2)); 12474 } 12475 12476 /* wait for all pending SP commands to complete */ 12477 if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) { 12478 BLOGE(sc, "Timeout waiting for all SPs to complete!\n"); 12479 bxe_periodic_stop(sc); 12480 bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE); 12481 return (ENXIO); 12482 } 12483 12484 /* Tell the stack the driver is running! */ 12485 if_setdrvflags(sc->ifp, IFF_DRV_RUNNING); 12486 12487 BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n"); 12488 12489 return (0); 12490 12491 bxe_nic_load_error3: 12492 12493 if (IS_PF(sc)) { 12494 bxe_int_disable_sync(sc, 1); 12495 12496 /* clean out queued objects */ 12497 bxe_squeeze_objects(sc); 12498 } 12499 12500 bxe_interrupt_detach(sc); 12501 12502 bxe_nic_load_error2: 12503 12504 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 12505 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 12506 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 12507 } 12508 12509 sc->port.pmf = 0; 12510 12511 bxe_nic_load_error1: 12512 12513 /* clear pf_load status, as it was already set */ 12514 if (IS_PF(sc)) { 12515 bxe_clear_pf_load(sc); 12516 } 12517 12518 bxe_nic_load_error0: 12519 12520 bxe_free_fw_stats_mem(sc); 12521 bxe_free_fp_buffers(sc); 12522 bxe_free_mem(sc); 12523 12524 return (rc); 12525 } 12526 12527 static int 12528 bxe_init_locked(struct bxe_softc *sc) 12529 { 12530 int other_engine = SC_PATH(sc) ? 0 : 1; 12531 uint8_t other_load_status, load_status; 12532 uint8_t global = FALSE; 12533 int rc; 12534 12535 BXE_CORE_LOCK_ASSERT(sc); 12536 12537 /* check if the driver is already running */ 12538 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { 12539 BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n"); 12540 return (0); 12541 } 12542 12543 bxe_set_power_state(sc, PCI_PM_D0); 12544 12545 /* 12546 * If parity occurred during the unload, then attentions and/or 12547 * RECOVERY_IN_PROGRES may still be set. If so we want the first function 12548 * loaded on the current engine to complete the recovery. Parity recovery 12549 * is only relevant for PF driver. 12550 */ 12551 if (IS_PF(sc)) { 12552 other_load_status = bxe_get_load_status(sc, other_engine); 12553 load_status = bxe_get_load_status(sc, SC_PATH(sc)); 12554 12555 if (!bxe_reset_is_done(sc, SC_PATH(sc)) || 12556 bxe_chk_parity_attn(sc, &global, TRUE)) { 12557 do { 12558 /* 12559 * If there are attentions and they are in global blocks, set 12560 * the GLOBAL_RESET bit regardless whether it will be this 12561 * function that will complete the recovery or not. 12562 */ 12563 if (global) { 12564 bxe_set_reset_global(sc); 12565 } 12566 12567 /* 12568 * Only the first function on the current engine should try 12569 * to recover in open. In case of attentions in global blocks 12570 * only the first in the chip should try to recover. 12571 */ 12572 if ((!load_status && (!global || !other_load_status)) && 12573 bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) { 12574 BLOGI(sc, "Recovered during init\n"); 12575 break; 12576 } 12577 12578 /* recovery has failed... */ 12579 bxe_set_power_state(sc, PCI_PM_D3hot); 12580 sc->recovery_state = BXE_RECOVERY_FAILED; 12581 12582 BLOGE(sc, "Recovery flow hasn't properly " 12583 "completed yet, try again later. " 12584 "If you still see this message after a " 12585 "few retries then power cycle is required.\n"); 12586 12587 rc = ENXIO; 12588 goto bxe_init_locked_done; 12589 } while (0); 12590 } 12591 } 12592 12593 sc->recovery_state = BXE_RECOVERY_DONE; 12594 12595 rc = bxe_nic_load(sc, LOAD_OPEN); 12596 12597 bxe_init_locked_done: 12598 12599 if (rc) { 12600 /* Tell the stack the driver is NOT running! */ 12601 BLOGE(sc, "Initialization failed, " 12602 "stack notified driver is NOT running!\n"); 12603 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); 12604 } 12605 12606 return (rc); 12607 } 12608 12609 static int 12610 bxe_stop_locked(struct bxe_softc *sc) 12611 { 12612 BXE_CORE_LOCK_ASSERT(sc); 12613 return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE)); 12614 } 12615 12616 /* 12617 * Handles controller initialization when called from an unlocked routine. 12618 * ifconfig calls this function. 12619 * 12620 * Returns: 12621 * void 12622 */ 12623 static void 12624 bxe_init(void *xsc) 12625 { 12626 struct bxe_softc *sc = (struct bxe_softc *)xsc; 12627 12628 BXE_CORE_LOCK(sc); 12629 bxe_init_locked(sc); 12630 BXE_CORE_UNLOCK(sc); 12631 } 12632 12633 static int 12634 bxe_init_ifnet(struct bxe_softc *sc) 12635 { 12636 if_t ifp; 12637 int capabilities; 12638 12639 /* ifconfig entrypoint for media type/status reporting */ 12640 ifmedia_init(&sc->ifmedia, IFM_IMASK, 12641 bxe_ifmedia_update, 12642 bxe_ifmedia_status); 12643 12644 /* set the default interface values */ 12645 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL); 12646 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); 12647 ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO)); 12648 12649 sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */ 12650 12651 /* allocate the ifnet structure */ 12652 if ((ifp = if_gethandle(IFT_ETHER)) == NULL) { 12653 BLOGE(sc, "Interface allocation failed!\n"); 12654 return (ENXIO); 12655 } 12656 12657 if_setsoftc(ifp, sc); 12658 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); 12659 if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST)); 12660 if_setioctlfn(ifp, bxe_ioctl); 12661 if_setstartfn(ifp, bxe_tx_start); 12662 if_setgetcounterfn(ifp, bxe_get_counter); 12663 #if __FreeBSD_version >= 901504 12664 if_settransmitfn(ifp, bxe_tx_mq_start); 12665 if_setqflushfn(ifp, bxe_mq_flush); 12666 #endif 12667 #ifdef FreeBSD8_0 12668 if_settimer(ifp, 0); 12669 #endif 12670 if_setinitfn(ifp, bxe_init); 12671 if_setmtu(ifp, sc->mtu); 12672 if_sethwassist(ifp, (CSUM_IP | 12673 CSUM_TCP | 12674 CSUM_UDP | 12675 CSUM_TSO | 12676 CSUM_TCP_IPV6 | 12677 CSUM_UDP_IPV6)); 12678 12679 capabilities = 12680 #if __FreeBSD_version < 700000 12681 (IFCAP_VLAN_MTU | 12682 IFCAP_VLAN_HWTAGGING | 12683 IFCAP_HWCSUM | 12684 IFCAP_JUMBO_MTU | 12685 IFCAP_LRO); 12686 #else 12687 (IFCAP_VLAN_MTU | 12688 IFCAP_VLAN_HWTAGGING | 12689 IFCAP_VLAN_HWTSO | 12690 IFCAP_VLAN_HWFILTER | 12691 IFCAP_VLAN_HWCSUM | 12692 IFCAP_HWCSUM | 12693 IFCAP_JUMBO_MTU | 12694 IFCAP_LRO | 12695 IFCAP_TSO4 | 12696 IFCAP_TSO6 | 12697 IFCAP_WOL_MAGIC); 12698 #endif 12699 if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */ 12700 if_setcapenable(ifp, if_getcapabilities(ifp)); 12701 if_setbaudrate(ifp, IF_Gbps(10)); 12702 /* XXX */ 12703 if_setsendqlen(ifp, sc->tx_ring_size); 12704 if_setsendqready(ifp); 12705 /* XXX */ 12706 12707 sc->ifp = ifp; 12708 12709 /* attach to the Ethernet interface list */ 12710 ether_ifattach(ifp, sc->link_params.mac_addr); 12711 12712 return (0); 12713 } 12714 12715 static void 12716 bxe_deallocate_bars(struct bxe_softc *sc) 12717 { 12718 int i; 12719 12720 for (i = 0; i < MAX_BARS; i++) { 12721 if (sc->bar[i].resource != NULL) { 12722 bus_release_resource(sc->dev, 12723 SYS_RES_MEMORY, 12724 sc->bar[i].rid, 12725 sc->bar[i].resource); 12726 BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n", 12727 i, PCIR_BAR(i)); 12728 } 12729 } 12730 } 12731 12732 static int 12733 bxe_allocate_bars(struct bxe_softc *sc) 12734 { 12735 u_int flags; 12736 int i; 12737 12738 memset(sc->bar, 0, sizeof(sc->bar)); 12739 12740 for (i = 0; i < MAX_BARS; i++) { 12741 12742 /* memory resources reside at BARs 0, 2, 4 */ 12743 /* Run `pciconf -lb` to see mappings */ 12744 if ((i != 0) && (i != 2) && (i != 4)) { 12745 continue; 12746 } 12747 12748 sc->bar[i].rid = PCIR_BAR(i); 12749 12750 flags = RF_ACTIVE; 12751 if (i == 0) { 12752 flags |= RF_SHAREABLE; 12753 } 12754 12755 if ((sc->bar[i].resource = 12756 bus_alloc_resource_any(sc->dev, 12757 SYS_RES_MEMORY, 12758 &sc->bar[i].rid, 12759 flags)) == NULL) { 12760 return (0); 12761 } 12762 12763 sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource); 12764 sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource); 12765 sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource); 12766 12767 BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%jd) -> %p\n", 12768 i, PCIR_BAR(i), 12769 (void *)rman_get_start(sc->bar[i].resource), 12770 (void *)rman_get_end(sc->bar[i].resource), 12771 rman_get_size(sc->bar[i].resource), 12772 (void *)sc->bar[i].kva); 12773 } 12774 12775 return (0); 12776 } 12777 12778 static void 12779 bxe_get_function_num(struct bxe_softc *sc) 12780 { 12781 uint32_t val = 0; 12782 12783 /* 12784 * Read the ME register to get the function number. The ME register 12785 * holds the relative-function number and absolute-function number. The 12786 * absolute-function number appears only in E2 and above. Before that 12787 * these bits always contained zero, therefore we cannot blindly use them. 12788 */ 12789 12790 val = REG_RD(sc, BAR_ME_REGISTER); 12791 12792 sc->pfunc_rel = 12793 (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); 12794 sc->path_id = 12795 (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1; 12796 12797 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 12798 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); 12799 } else { 12800 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); 12801 } 12802 12803 BLOGD(sc, DBG_LOAD, 12804 "Relative function %d, Absolute function %d, Path %d\n", 12805 sc->pfunc_rel, sc->pfunc_abs, sc->path_id); 12806 } 12807 12808 static uint32_t 12809 bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc) 12810 { 12811 uint32_t shmem2_size; 12812 uint32_t offset; 12813 uint32_t mf_cfg_offset_value; 12814 12815 /* Non 57712 */ 12816 offset = (SHMEM_RD(sc, func_mb) + 12817 (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); 12818 12819 /* 57712 plus */ 12820 if (sc->devinfo.shmem2_base != 0) { 12821 shmem2_size = SHMEM2_RD(sc, size); 12822 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { 12823 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); 12824 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { 12825 offset = mf_cfg_offset_value; 12826 } 12827 } 12828 } 12829 12830 return (offset); 12831 } 12832 12833 static uint32_t 12834 bxe_pcie_capability_read(struct bxe_softc *sc, 12835 int reg, 12836 int width) 12837 { 12838 int pcie_reg; 12839 12840 /* ensure PCIe capability is enabled */ 12841 if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) { 12842 if (pcie_reg != 0) { 12843 BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg); 12844 return (pci_read_config(sc->dev, (pcie_reg + reg), width)); 12845 } 12846 } 12847 12848 BLOGE(sc, "PCIe capability NOT FOUND!!!\n"); 12849 12850 return (0); 12851 } 12852 12853 static uint8_t 12854 bxe_is_pcie_pending(struct bxe_softc *sc) 12855 { 12856 return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) & 12857 PCIM_EXP_STA_TRANSACTION_PND); 12858 } 12859 12860 /* 12861 * Walk the PCI capabiites list for the device to find what features are 12862 * supported. These capabilites may be enabled/disabled by firmware so it's 12863 * best to walk the list rather than make assumptions. 12864 */ 12865 static void 12866 bxe_probe_pci_caps(struct bxe_softc *sc) 12867 { 12868 uint16_t link_status; 12869 int reg; 12870 12871 /* check if PCI Power Management is enabled */ 12872 if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) { 12873 if (reg != 0) { 12874 BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg); 12875 12876 sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG; 12877 sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg; 12878 } 12879 } 12880 12881 link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2); 12882 12883 /* handle PCIe 2.0 workarounds for 57710 */ 12884 if (CHIP_IS_E1(sc)) { 12885 /* workaround for 57710 errata E4_57710_27462 */ 12886 sc->devinfo.pcie_link_speed = 12887 (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1; 12888 12889 /* workaround for 57710 errata E4_57710_27488 */ 12890 sc->devinfo.pcie_link_width = 12891 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 12892 if (sc->devinfo.pcie_link_speed > 1) { 12893 sc->devinfo.pcie_link_width = 12894 ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1; 12895 } 12896 } else { 12897 sc->devinfo.pcie_link_speed = 12898 (link_status & PCIM_LINK_STA_SPEED); 12899 sc->devinfo.pcie_link_width = 12900 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 12901 } 12902 12903 BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n", 12904 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); 12905 12906 sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG; 12907 sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg; 12908 12909 /* check if MSI capability is enabled */ 12910 if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) { 12911 if (reg != 0) { 12912 BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg); 12913 12914 sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG; 12915 sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg; 12916 } 12917 } 12918 12919 /* check if MSI-X capability is enabled */ 12920 if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) { 12921 if (reg != 0) { 12922 BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg); 12923 12924 sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG; 12925 sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg; 12926 } 12927 } 12928 } 12929 12930 static int 12931 bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc) 12932 { 12933 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 12934 uint32_t val; 12935 12936 /* get the outer vlan if we're in switch-dependent mode */ 12937 12938 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 12939 mf_info->ext_id = (uint16_t)val; 12940 12941 mf_info->multi_vnics_mode = 1; 12942 12943 if (!VALID_OVLAN(mf_info->ext_id)) { 12944 BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id); 12945 return (1); 12946 } 12947 12948 /* get the capabilities */ 12949 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 12950 FUNC_MF_CFG_PROTOCOL_ISCSI) { 12951 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; 12952 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 12953 FUNC_MF_CFG_PROTOCOL_FCOE) { 12954 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; 12955 } else { 12956 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; 12957 } 12958 12959 mf_info->vnics_per_port = 12960 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 12961 12962 return (0); 12963 } 12964 12965 static uint32_t 12966 bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc) 12967 { 12968 uint32_t retval = 0; 12969 uint32_t val; 12970 12971 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 12972 12973 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { 12974 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { 12975 retval |= MF_PROTO_SUPPORT_ETHERNET; 12976 } 12977 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 12978 retval |= MF_PROTO_SUPPORT_ISCSI; 12979 } 12980 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 12981 retval |= MF_PROTO_SUPPORT_FCOE; 12982 } 12983 } 12984 12985 return (retval); 12986 } 12987 12988 static int 12989 bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc) 12990 { 12991 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 12992 uint32_t val; 12993 12994 /* 12995 * There is no outer vlan if we're in switch-independent mode. 12996 * If the mac is valid then assume multi-function. 12997 */ 12998 12999 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 13000 13001 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); 13002 13003 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); 13004 13005 mf_info->vnics_per_port = 13006 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13007 13008 return (0); 13009 } 13010 13011 static int 13012 bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc) 13013 { 13014 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13015 uint32_t e1hov_tag; 13016 uint32_t func_config; 13017 uint32_t niv_config; 13018 13019 mf_info->multi_vnics_mode = 1; 13020 13021 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13022 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 13023 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); 13024 13025 mf_info->ext_id = 13026 (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> 13027 FUNC_MF_CFG_E1HOV_TAG_SHIFT); 13028 13029 mf_info->default_vlan = 13030 (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> 13031 FUNC_MF_CFG_AFEX_VLAN_SHIFT); 13032 13033 mf_info->niv_allowed_priorities = 13034 (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 13035 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); 13036 13037 mf_info->niv_default_cos = 13038 (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 13039 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); 13040 13041 mf_info->afex_vlan_mode = 13042 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 13043 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); 13044 13045 mf_info->niv_mba_enabled = 13046 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> 13047 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); 13048 13049 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); 13050 13051 mf_info->vnics_per_port = 13052 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13053 13054 return (0); 13055 } 13056 13057 static int 13058 bxe_check_valid_mf_cfg(struct bxe_softc *sc) 13059 { 13060 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13061 uint32_t mf_cfg1; 13062 uint32_t mf_cfg2; 13063 uint32_t ovlan1; 13064 uint32_t ovlan2; 13065 uint8_t i, j; 13066 13067 BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n", 13068 SC_PORT(sc)); 13069 BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n", 13070 mf_info->mf_config[SC_VN(sc)]); 13071 BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n", 13072 mf_info->multi_vnics_mode); 13073 BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n", 13074 mf_info->vnics_per_port); 13075 BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n", 13076 mf_info->ext_id); 13077 BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n", 13078 mf_info->min_bw[0], mf_info->min_bw[1], 13079 mf_info->min_bw[2], mf_info->min_bw[3]); 13080 BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n", 13081 mf_info->max_bw[0], mf_info->max_bw[1], 13082 mf_info->max_bw[2], mf_info->max_bw[3]); 13083 BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n", 13084 sc->mac_addr_str); 13085 13086 /* various MF mode sanity checks... */ 13087 13088 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { 13089 BLOGE(sc, "Enumerated function %d is marked as hidden\n", 13090 SC_PORT(sc)); 13091 return (1); 13092 } 13093 13094 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { 13095 BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n", 13096 mf_info->vnics_per_port, mf_info->multi_vnics_mode); 13097 return (1); 13098 } 13099 13100 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 13101 /* vnic id > 0 must have valid ovlan in switch-dependent mode */ 13102 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { 13103 BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n", 13104 SC_VN(sc), OVLAN(sc)); 13105 return (1); 13106 } 13107 13108 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { 13109 BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n", 13110 mf_info->multi_vnics_mode, OVLAN(sc)); 13111 return (1); 13112 } 13113 13114 /* 13115 * Verify all functions are either MF or SF mode. If MF, make sure 13116 * sure that all non-hidden functions have a valid ovlan. If SF, 13117 * make sure that all non-hidden functions have an invalid ovlan. 13118 */ 13119 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13120 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 13121 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 13122 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 13123 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) || 13124 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) { 13125 BLOGE(sc, "mf_mode=SD function %d MF config " 13126 "mismatch, multi_vnics_mode=%d ovlan=%d\n", 13127 i, mf_info->multi_vnics_mode, ovlan1); 13128 return (1); 13129 } 13130 } 13131 13132 /* Verify all funcs on the same port each have a different ovlan. */ 13133 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13134 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 13135 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 13136 /* iterate from the next function on the port to the max func */ 13137 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { 13138 mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config); 13139 ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag); 13140 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 13141 VALID_OVLAN(ovlan1) && 13142 !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) && 13143 VALID_OVLAN(ovlan2) && 13144 (ovlan1 == ovlan2)) { 13145 BLOGE(sc, "mf_mode=SD functions %d and %d " 13146 "have the same ovlan (%d)\n", 13147 i, j, ovlan1); 13148 return (1); 13149 } 13150 } 13151 } 13152 } /* MULTI_FUNCTION_SD */ 13153 13154 return (0); 13155 } 13156 13157 static int 13158 bxe_get_mf_cfg_info(struct bxe_softc *sc) 13159 { 13160 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13161 uint32_t val, mac_upper; 13162 uint8_t i, vnic; 13163 13164 /* initialize mf_info defaults */ 13165 mf_info->vnics_per_port = 1; 13166 mf_info->multi_vnics_mode = FALSE; 13167 mf_info->path_has_ovlan = FALSE; 13168 mf_info->mf_mode = SINGLE_FUNCTION; 13169 13170 if (!CHIP_IS_MF_CAP(sc)) { 13171 return (0); 13172 } 13173 13174 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { 13175 BLOGE(sc, "Invalid mf_cfg_base!\n"); 13176 return (1); 13177 } 13178 13179 /* get the MF mode (switch dependent / independent / single-function) */ 13180 13181 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 13182 13183 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) 13184 { 13185 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 13186 13187 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13188 13189 /* check for legal upper mac bytes */ 13190 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { 13191 mf_info->mf_mode = MULTI_FUNCTION_SI; 13192 } else { 13193 BLOGE(sc, "Invalid config for Switch Independent mode\n"); 13194 } 13195 13196 break; 13197 13198 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 13199 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: 13200 13201 /* get outer vlan configuration */ 13202 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13203 13204 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != 13205 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 13206 mf_info->mf_mode = MULTI_FUNCTION_SD; 13207 } else { 13208 BLOGE(sc, "Invalid config for Switch Dependent mode\n"); 13209 } 13210 13211 break; 13212 13213 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: 13214 13215 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ 13216 return (0); 13217 13218 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 13219 13220 /* 13221 * Mark MF mode as NIV if MCP version includes NPAR-SD support 13222 * and the MAC address is valid. 13223 */ 13224 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13225 13226 if ((SHMEM2_HAS(sc, afex_driver_support)) && 13227 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { 13228 mf_info->mf_mode = MULTI_FUNCTION_AFEX; 13229 } else { 13230 BLOGE(sc, "Invalid config for AFEX mode\n"); 13231 } 13232 13233 break; 13234 13235 default: 13236 13237 BLOGE(sc, "Unknown MF mode (0x%08x)\n", 13238 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); 13239 13240 return (1); 13241 } 13242 13243 /* set path mf_mode (which could be different than function mf_mode) */ 13244 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 13245 mf_info->path_has_ovlan = TRUE; 13246 } else if (mf_info->mf_mode == SINGLE_FUNCTION) { 13247 /* 13248 * Decide on path multi vnics mode. If we're not in MF mode and in 13249 * 4-port mode, this is good enough to check vnic-0 of the other port 13250 * on the same path 13251 */ 13252 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 13253 uint8_t other_port = !(PORT_ID(sc) & 1); 13254 uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port)); 13255 13256 val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag); 13257 13258 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0; 13259 } 13260 } 13261 13262 if (mf_info->mf_mode == SINGLE_FUNCTION) { 13263 /* invalid MF config */ 13264 if (SC_VN(sc) >= 1) { 13265 BLOGE(sc, "VNIC ID >= 1 in SF mode\n"); 13266 return (1); 13267 } 13268 13269 return (0); 13270 } 13271 13272 /* get the MF configuration */ 13273 mf_info->mf_config[SC_VN(sc)] = 13274 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 13275 13276 switch(mf_info->mf_mode) 13277 { 13278 case MULTI_FUNCTION_SD: 13279 13280 bxe_get_shmem_mf_cfg_info_sd(sc); 13281 break; 13282 13283 case MULTI_FUNCTION_SI: 13284 13285 bxe_get_shmem_mf_cfg_info_si(sc); 13286 break; 13287 13288 case MULTI_FUNCTION_AFEX: 13289 13290 bxe_get_shmem_mf_cfg_info_niv(sc); 13291 break; 13292 13293 default: 13294 13295 BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n", 13296 mf_info->mf_mode); 13297 return (1); 13298 } 13299 13300 /* get the congestion management parameters */ 13301 13302 vnic = 0; 13303 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13304 /* get min/max bw */ 13305 val = MFCFG_RD(sc, func_mf_config[i].config); 13306 mf_info->min_bw[vnic] = 13307 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT); 13308 mf_info->max_bw[vnic] = 13309 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); 13310 vnic++; 13311 } 13312 13313 return (bxe_check_valid_mf_cfg(sc)); 13314 } 13315 13316 static int 13317 bxe_get_shmem_info(struct bxe_softc *sc) 13318 { 13319 int port; 13320 uint32_t mac_hi, mac_lo, val; 13321 13322 port = SC_PORT(sc); 13323 mac_hi = mac_lo = 0; 13324 13325 sc->link_params.sc = sc; 13326 sc->link_params.port = port; 13327 13328 /* get the hardware config info */ 13329 sc->devinfo.hw_config = 13330 SHMEM_RD(sc, dev_info.shared_hw_config.config); 13331 sc->devinfo.hw_config2 = 13332 SHMEM_RD(sc, dev_info.shared_hw_config.config2); 13333 13334 sc->link_params.hw_led_mode = 13335 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> 13336 SHARED_HW_CFG_LED_MODE_SHIFT); 13337 13338 /* get the port feature config */ 13339 sc->port.config = 13340 SHMEM_RD(sc, dev_info.port_feature_config[port].config); 13341 13342 /* get the link params */ 13343 sc->link_params.speed_cap_mask[0] = 13344 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask); 13345 sc->link_params.speed_cap_mask[1] = 13346 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2); 13347 13348 /* get the lane config */ 13349 sc->link_params.lane_config = 13350 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); 13351 13352 /* get the link config */ 13353 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); 13354 sc->port.link_config[ELINK_INT_PHY] = val; 13355 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); 13356 sc->port.link_config[ELINK_EXT_PHY1] = 13357 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); 13358 13359 /* get the override preemphasis flag and enable it or turn it off */ 13360 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 13361 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { 13362 sc->link_params.feature_config_flags |= 13363 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 13364 } else { 13365 sc->link_params.feature_config_flags &= 13366 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 13367 } 13368 13369 /* get the initial value of the link params */ 13370 sc->link_params.multi_phy_config = 13371 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); 13372 13373 /* get external phy info */ 13374 sc->port.ext_phy_config = 13375 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 13376 13377 /* get the multifunction configuration */ 13378 bxe_get_mf_cfg_info(sc); 13379 13380 /* get the mac address */ 13381 if (IS_MF(sc)) { 13382 mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13383 mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); 13384 } else { 13385 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); 13386 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); 13387 } 13388 13389 if ((mac_lo == 0) && (mac_hi == 0)) { 13390 *sc->mac_addr_str = 0; 13391 BLOGE(sc, "No Ethernet address programmed!\n"); 13392 } else { 13393 sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8); 13394 sc->link_params.mac_addr[1] = (uint8_t)(mac_hi); 13395 sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24); 13396 sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16); 13397 sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8); 13398 sc->link_params.mac_addr[5] = (uint8_t)(mac_lo); 13399 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), 13400 "%02x:%02x:%02x:%02x:%02x:%02x", 13401 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1], 13402 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3], 13403 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]); 13404 BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str); 13405 } 13406 13407 return (0); 13408 } 13409 13410 static void 13411 bxe_get_tunable_params(struct bxe_softc *sc) 13412 { 13413 /* sanity checks */ 13414 13415 if ((bxe_interrupt_mode != INTR_MODE_INTX) && 13416 (bxe_interrupt_mode != INTR_MODE_MSI) && 13417 (bxe_interrupt_mode != INTR_MODE_MSIX)) { 13418 BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode); 13419 bxe_interrupt_mode = INTR_MODE_MSIX; 13420 } 13421 13422 if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) { 13423 BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count); 13424 bxe_queue_count = 0; 13425 } 13426 13427 if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) { 13428 if (bxe_max_rx_bufs == 0) { 13429 bxe_max_rx_bufs = RX_BD_USABLE; 13430 } else { 13431 BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs); 13432 bxe_max_rx_bufs = 2048; 13433 } 13434 } 13435 13436 if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) { 13437 BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks); 13438 bxe_hc_rx_ticks = 25; 13439 } 13440 13441 if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) { 13442 BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks); 13443 bxe_hc_tx_ticks = 50; 13444 } 13445 13446 if (bxe_max_aggregation_size == 0) { 13447 bxe_max_aggregation_size = TPA_AGG_SIZE; 13448 } 13449 13450 if (bxe_max_aggregation_size > 0xffff) { 13451 BLOGW(sc, "invalid max_aggregation_size (%d)\n", 13452 bxe_max_aggregation_size); 13453 bxe_max_aggregation_size = TPA_AGG_SIZE; 13454 } 13455 13456 if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) { 13457 BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs); 13458 bxe_mrrs = -1; 13459 } 13460 13461 if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) { 13462 BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen); 13463 bxe_autogreeen = 0; 13464 } 13465 13466 if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) { 13467 BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss); 13468 bxe_udp_rss = 0; 13469 } 13470 13471 /* pull in user settings */ 13472 13473 sc->interrupt_mode = bxe_interrupt_mode; 13474 sc->max_rx_bufs = bxe_max_rx_bufs; 13475 sc->hc_rx_ticks = bxe_hc_rx_ticks; 13476 sc->hc_tx_ticks = bxe_hc_tx_ticks; 13477 sc->max_aggregation_size = bxe_max_aggregation_size; 13478 sc->mrrs = bxe_mrrs; 13479 sc->autogreeen = bxe_autogreeen; 13480 sc->udp_rss = bxe_udp_rss; 13481 13482 if (bxe_interrupt_mode == INTR_MODE_INTX) { 13483 sc->num_queues = 1; 13484 } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */ 13485 sc->num_queues = 13486 min((bxe_queue_count ? bxe_queue_count : mp_ncpus), 13487 MAX_RSS_CHAINS); 13488 if (sc->num_queues > mp_ncpus) { 13489 sc->num_queues = mp_ncpus; 13490 } 13491 } 13492 13493 BLOGD(sc, DBG_LOAD, 13494 "User Config: " 13495 "debug=0x%lx " 13496 "interrupt_mode=%d " 13497 "queue_count=%d " 13498 "hc_rx_ticks=%d " 13499 "hc_tx_ticks=%d " 13500 "rx_budget=%d " 13501 "max_aggregation_size=%d " 13502 "mrrs=%d " 13503 "autogreeen=%d " 13504 "udp_rss=%d\n", 13505 bxe_debug, 13506 sc->interrupt_mode, 13507 sc->num_queues, 13508 sc->hc_rx_ticks, 13509 sc->hc_tx_ticks, 13510 bxe_rx_budget, 13511 sc->max_aggregation_size, 13512 sc->mrrs, 13513 sc->autogreeen, 13514 sc->udp_rss); 13515 } 13516 13517 static int 13518 bxe_media_detect(struct bxe_softc *sc) 13519 { 13520 int port_type; 13521 uint32_t phy_idx = bxe_get_cur_phy_idx(sc); 13522 13523 switch (sc->link_params.phy[phy_idx].media_type) { 13524 case ELINK_ETH_PHY_SFPP_10G_FIBER: 13525 case ELINK_ETH_PHY_XFP_FIBER: 13526 BLOGI(sc, "Found 10Gb Fiber media.\n"); 13527 sc->media = IFM_10G_SR; 13528 port_type = PORT_FIBRE; 13529 break; 13530 case ELINK_ETH_PHY_SFP_1G_FIBER: 13531 BLOGI(sc, "Found 1Gb Fiber media.\n"); 13532 sc->media = IFM_1000_SX; 13533 port_type = PORT_FIBRE; 13534 break; 13535 case ELINK_ETH_PHY_KR: 13536 case ELINK_ETH_PHY_CX4: 13537 BLOGI(sc, "Found 10GBase-CX4 media.\n"); 13538 sc->media = IFM_10G_CX4; 13539 port_type = PORT_FIBRE; 13540 break; 13541 case ELINK_ETH_PHY_DA_TWINAX: 13542 BLOGI(sc, "Found 10Gb Twinax media.\n"); 13543 sc->media = IFM_10G_TWINAX; 13544 port_type = PORT_DA; 13545 break; 13546 case ELINK_ETH_PHY_BASE_T: 13547 if (sc->link_params.speed_cap_mask[0] & 13548 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 13549 BLOGI(sc, "Found 10GBase-T media.\n"); 13550 sc->media = IFM_10G_T; 13551 port_type = PORT_TP; 13552 } else { 13553 BLOGI(sc, "Found 1000Base-T media.\n"); 13554 sc->media = IFM_1000_T; 13555 port_type = PORT_TP; 13556 } 13557 break; 13558 case ELINK_ETH_PHY_NOT_PRESENT: 13559 BLOGI(sc, "Media not present.\n"); 13560 sc->media = 0; 13561 port_type = PORT_OTHER; 13562 break; 13563 case ELINK_ETH_PHY_UNSPECIFIED: 13564 default: 13565 BLOGI(sc, "Unknown media!\n"); 13566 sc->media = 0; 13567 port_type = PORT_OTHER; 13568 break; 13569 } 13570 return port_type; 13571 } 13572 13573 #define GET_FIELD(value, fname) \ 13574 (((value) & (fname##_MASK)) >> (fname##_SHIFT)) 13575 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 13576 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 13577 13578 static int 13579 bxe_get_igu_cam_info(struct bxe_softc *sc) 13580 { 13581 int pfid = SC_FUNC(sc); 13582 int igu_sb_id; 13583 uint32_t val; 13584 uint8_t fid, igu_sb_cnt = 0; 13585 13586 sc->igu_base_sb = 0xff; 13587 13588 if (CHIP_INT_MODE_IS_BC(sc)) { 13589 int vn = SC_VN(sc); 13590 igu_sb_cnt = sc->igu_sb_cnt; 13591 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * 13592 FP_SB_MAX_E1x); 13593 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + 13594 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); 13595 return (0); 13596 } 13597 13598 /* IGU in normal mode - read CAM */ 13599 for (igu_sb_id = 0; 13600 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; 13601 igu_sb_id++) { 13602 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 13603 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { 13604 continue; 13605 } 13606 fid = IGU_FID(val); 13607 if ((fid & IGU_FID_ENCODE_IS_PF)) { 13608 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { 13609 continue; 13610 } 13611 if (IGU_VEC(val) == 0) { 13612 /* default status block */ 13613 sc->igu_dsb_id = igu_sb_id; 13614 } else { 13615 if (sc->igu_base_sb == 0xff) { 13616 sc->igu_base_sb = igu_sb_id; 13617 } 13618 igu_sb_cnt++; 13619 } 13620 } 13621 } 13622 13623 /* 13624 * Due to new PF resource allocation by MFW T7.4 and above, it's optional 13625 * that number of CAM entries will not be equal to the value advertised in 13626 * PCI. Driver should use the minimal value of both as the actual status 13627 * block count 13628 */ 13629 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); 13630 13631 if (igu_sb_cnt == 0) { 13632 BLOGE(sc, "CAM configuration error\n"); 13633 return (-1); 13634 } 13635 13636 return (0); 13637 } 13638 13639 /* 13640 * Gather various information from the device config space, the device itself, 13641 * shmem, and the user input. 13642 */ 13643 static int 13644 bxe_get_device_info(struct bxe_softc *sc) 13645 { 13646 uint32_t val; 13647 int rc; 13648 13649 /* Get the data for the device */ 13650 sc->devinfo.vendor_id = pci_get_vendor(sc->dev); 13651 sc->devinfo.device_id = pci_get_device(sc->dev); 13652 sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev); 13653 sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev); 13654 13655 /* get the chip revision (chip metal comes from pci config space) */ 13656 sc->devinfo.chip_id = 13657 sc->link_params.chip_id = 13658 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | 13659 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | 13660 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | 13661 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); 13662 13663 /* force 57811 according to MISC register */ 13664 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 13665 if (CHIP_IS_57810(sc)) { 13666 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | 13667 (sc->devinfo.chip_id & 0x0000ffff)); 13668 } else if (CHIP_IS_57810_MF(sc)) { 13669 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | 13670 (sc->devinfo.chip_id & 0x0000ffff)); 13671 } 13672 sc->devinfo.chip_id |= 0x1; 13673 } 13674 13675 BLOGD(sc, DBG_LOAD, 13676 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n", 13677 sc->devinfo.chip_id, 13678 ((sc->devinfo.chip_id >> 16) & 0xffff), 13679 ((sc->devinfo.chip_id >> 12) & 0xf), 13680 ((sc->devinfo.chip_id >> 4) & 0xff), 13681 ((sc->devinfo.chip_id >> 0) & 0xf)); 13682 13683 val = (REG_RD(sc, 0x2874) & 0x55); 13684 if ((sc->devinfo.chip_id & 0x1) || 13685 (CHIP_IS_E1(sc) && val) || 13686 (CHIP_IS_E1H(sc) && (val == 0x55))) { 13687 sc->flags |= BXE_ONE_PORT_FLAG; 13688 BLOGD(sc, DBG_LOAD, "single port device\n"); 13689 } 13690 13691 /* set the doorbell size */ 13692 sc->doorbell_size = (1 << BXE_DB_SHIFT); 13693 13694 /* determine whether the device is in 2 port or 4 port mode */ 13695 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/ 13696 if (CHIP_IS_E2E3(sc)) { 13697 /* 13698 * Read port4mode_en_ovwr[0]: 13699 * If 1, four port mode is in port4mode_en_ovwr[1]. 13700 * If 0, four port mode is in port4mode_en[0]. 13701 */ 13702 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); 13703 if (val & 1) { 13704 val = ((val >> 1) & 1); 13705 } else { 13706 val = REG_RD(sc, MISC_REG_PORT4MODE_EN); 13707 } 13708 13709 sc->devinfo.chip_port_mode = 13710 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; 13711 13712 BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2"); 13713 } 13714 13715 /* get the function and path info for the device */ 13716 bxe_get_function_num(sc); 13717 13718 /* get the shared memory base address */ 13719 sc->devinfo.shmem_base = 13720 sc->link_params.shmem_base = 13721 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 13722 sc->devinfo.shmem2_base = 13723 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : 13724 MISC_REG_GENERIC_CR_0)); 13725 13726 BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n", 13727 sc->devinfo.shmem_base, sc->devinfo.shmem2_base); 13728 13729 if (!sc->devinfo.shmem_base) { 13730 /* this should ONLY prevent upcoming shmem reads */ 13731 BLOGI(sc, "MCP not active\n"); 13732 sc->flags |= BXE_NO_MCP_FLAG; 13733 return (0); 13734 } 13735 13736 /* make sure the shared memory contents are valid */ 13737 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 13738 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != 13739 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { 13740 BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val); 13741 return (0); 13742 } 13743 BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val); 13744 13745 /* get the bootcode version */ 13746 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); 13747 snprintf(sc->devinfo.bc_ver_str, 13748 sizeof(sc->devinfo.bc_ver_str), 13749 "%d.%d.%d", 13750 ((sc->devinfo.bc_ver >> 24) & 0xff), 13751 ((sc->devinfo.bc_ver >> 16) & 0xff), 13752 ((sc->devinfo.bc_ver >> 8) & 0xff)); 13753 BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str); 13754 13755 /* get the bootcode shmem address */ 13756 sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc); 13757 BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base); 13758 13759 /* clean indirect addresses as they're not used */ 13760 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 13761 if (IS_PF(sc)) { 13762 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); 13763 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); 13764 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); 13765 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); 13766 if (CHIP_IS_E1x(sc)) { 13767 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); 13768 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); 13769 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); 13770 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); 13771 } 13772 13773 /* 13774 * Enable internal target-read (in case we are probed after PF 13775 * FLR). Must be done prior to any BAR read access. Only for 13776 * 57712 and up 13777 */ 13778 if (!CHIP_IS_E1x(sc)) { 13779 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 13780 } 13781 } 13782 13783 /* get the nvram size */ 13784 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); 13785 sc->devinfo.flash_size = 13786 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); 13787 BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size); 13788 13789 /* get PCI capabilites */ 13790 bxe_probe_pci_caps(sc); 13791 13792 bxe_set_power_state(sc, PCI_PM_D0); 13793 13794 /* get various configuration parameters from shmem */ 13795 bxe_get_shmem_info(sc); 13796 13797 if (sc->devinfo.pcie_msix_cap_reg != 0) { 13798 val = pci_read_config(sc->dev, 13799 (sc->devinfo.pcie_msix_cap_reg + 13800 PCIR_MSIX_CTRL), 13801 2); 13802 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE); 13803 } else { 13804 sc->igu_sb_cnt = 1; 13805 } 13806 13807 sc->igu_base_addr = BAR_IGU_INTMEM; 13808 13809 /* initialize IGU parameters */ 13810 if (CHIP_IS_E1x(sc)) { 13811 sc->devinfo.int_block = INT_BLOCK_HC; 13812 sc->igu_dsb_id = DEF_SB_IGU_ID; 13813 sc->igu_base_sb = 0; 13814 } else { 13815 sc->devinfo.int_block = INT_BLOCK_IGU; 13816 13817 /* do not allow device reset during IGU info preocessing */ 13818 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 13819 13820 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 13821 13822 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 13823 int tout = 5000; 13824 13825 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n"); 13826 13827 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 13828 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); 13829 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); 13830 13831 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 13832 tout--; 13833 DELAY(1000); 13834 } 13835 13836 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 13837 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n"); 13838 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 13839 return (-1); 13840 } 13841 } 13842 13843 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 13844 BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n"); 13845 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; 13846 } else { 13847 BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n"); 13848 } 13849 13850 rc = bxe_get_igu_cam_info(sc); 13851 13852 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 13853 13854 if (rc) { 13855 return (rc); 13856 } 13857 } 13858 13859 /* 13860 * Get base FW non-default (fast path) status block ID. This value is 13861 * used to initialize the fw_sb_id saved on the fp/queue structure to 13862 * determine the id used by the FW. 13863 */ 13864 if (CHIP_IS_E1x(sc)) { 13865 sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); 13866 } else { 13867 /* 13868 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of 13869 * the same queue are indicated on the same IGU SB). So we prefer 13870 * FW and IGU SBs to be the same value. 13871 */ 13872 sc->base_fw_ndsb = sc->igu_base_sb; 13873 } 13874 13875 BLOGD(sc, DBG_LOAD, 13876 "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n", 13877 sc->igu_dsb_id, sc->igu_base_sb, 13878 sc->igu_sb_cnt, sc->base_fw_ndsb); 13879 13880 elink_phy_probe(&sc->link_params); 13881 13882 return (0); 13883 } 13884 13885 static void 13886 bxe_link_settings_supported(struct bxe_softc *sc, 13887 uint32_t switch_cfg) 13888 { 13889 uint32_t cfg_size = 0; 13890 uint32_t idx; 13891 uint8_t port = SC_PORT(sc); 13892 13893 /* aggregation of supported attributes of all external phys */ 13894 sc->port.supported[0] = 0; 13895 sc->port.supported[1] = 0; 13896 13897 switch (sc->link_params.num_phys) { 13898 case 1: 13899 sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported; 13900 cfg_size = 1; 13901 break; 13902 case 2: 13903 sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; 13904 cfg_size = 1; 13905 break; 13906 case 3: 13907 if (sc->link_params.multi_phy_config & 13908 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 13909 sc->port.supported[1] = 13910 sc->link_params.phy[ELINK_EXT_PHY1].supported; 13911 sc->port.supported[0] = 13912 sc->link_params.phy[ELINK_EXT_PHY2].supported; 13913 } else { 13914 sc->port.supported[0] = 13915 sc->link_params.phy[ELINK_EXT_PHY1].supported; 13916 sc->port.supported[1] = 13917 sc->link_params.phy[ELINK_EXT_PHY2].supported; 13918 } 13919 cfg_size = 2; 13920 break; 13921 } 13922 13923 if (!(sc->port.supported[0] || sc->port.supported[1])) { 13924 BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n", 13925 SHMEM_RD(sc, 13926 dev_info.port_hw_config[port].external_phy_config), 13927 SHMEM_RD(sc, 13928 dev_info.port_hw_config[port].external_phy_config2)); 13929 return; 13930 } 13931 13932 if (CHIP_IS_E3(sc)) 13933 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); 13934 else { 13935 switch (switch_cfg) { 13936 case ELINK_SWITCH_CFG_1G: 13937 sc->port.phy_addr = 13938 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); 13939 break; 13940 case ELINK_SWITCH_CFG_10G: 13941 sc->port.phy_addr = 13942 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); 13943 break; 13944 default: 13945 BLOGE(sc, "Invalid switch config in link_config=0x%08x\n", 13946 sc->port.link_config[0]); 13947 return; 13948 } 13949 } 13950 13951 BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr); 13952 13953 /* mask what we support according to speed_cap_mask per configuration */ 13954 for (idx = 0; idx < cfg_size; idx++) { 13955 if (!(sc->link_params.speed_cap_mask[idx] & 13956 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { 13957 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half; 13958 } 13959 13960 if (!(sc->link_params.speed_cap_mask[idx] & 13961 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { 13962 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full; 13963 } 13964 13965 if (!(sc->link_params.speed_cap_mask[idx] & 13966 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { 13967 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half; 13968 } 13969 13970 if (!(sc->link_params.speed_cap_mask[idx] & 13971 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { 13972 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full; 13973 } 13974 13975 if (!(sc->link_params.speed_cap_mask[idx] & 13976 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { 13977 sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full; 13978 } 13979 13980 if (!(sc->link_params.speed_cap_mask[idx] & 13981 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { 13982 sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full; 13983 } 13984 13985 if (!(sc->link_params.speed_cap_mask[idx] & 13986 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 13987 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full; 13988 } 13989 13990 if (!(sc->link_params.speed_cap_mask[idx] & 13991 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { 13992 sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full; 13993 } 13994 } 13995 13996 BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n", 13997 sc->port.supported[0], sc->port.supported[1]); 13998 } 13999 14000 static void 14001 bxe_link_settings_requested(struct bxe_softc *sc) 14002 { 14003 uint32_t link_config; 14004 uint32_t idx; 14005 uint32_t cfg_size = 0; 14006 14007 sc->port.advertising[0] = 0; 14008 sc->port.advertising[1] = 0; 14009 14010 switch (sc->link_params.num_phys) { 14011 case 1: 14012 case 2: 14013 cfg_size = 1; 14014 break; 14015 case 3: 14016 cfg_size = 2; 14017 break; 14018 } 14019 14020 for (idx = 0; idx < cfg_size; idx++) { 14021 sc->link_params.req_duplex[idx] = DUPLEX_FULL; 14022 link_config = sc->port.link_config[idx]; 14023 14024 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 14025 case PORT_FEATURE_LINK_SPEED_AUTO: 14026 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { 14027 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; 14028 sc->port.advertising[idx] |= sc->port.supported[idx]; 14029 if (sc->link_params.phy[ELINK_EXT_PHY1].type == 14030 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 14031 sc->port.advertising[idx] |= 14032 (ELINK_SUPPORTED_100baseT_Half | 14033 ELINK_SUPPORTED_100baseT_Full); 14034 } else { 14035 /* force 10G, no AN */ 14036 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; 14037 sc->port.advertising[idx] |= 14038 (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); 14039 continue; 14040 } 14041 break; 14042 14043 case PORT_FEATURE_LINK_SPEED_10M_FULL: 14044 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) { 14045 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; 14046 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full | 14047 ADVERTISED_TP); 14048 } else { 14049 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14050 "speed_cap_mask=0x%08x\n", 14051 link_config, sc->link_params.speed_cap_mask[idx]); 14052 return; 14053 } 14054 break; 14055 14056 case PORT_FEATURE_LINK_SPEED_10M_HALF: 14057 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) { 14058 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; 14059 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 14060 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half | 14061 ADVERTISED_TP); 14062 } else { 14063 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14064 "speed_cap_mask=0x%08x\n", 14065 link_config, sc->link_params.speed_cap_mask[idx]); 14066 return; 14067 } 14068 break; 14069 14070 case PORT_FEATURE_LINK_SPEED_100M_FULL: 14071 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) { 14072 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; 14073 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full | 14074 ADVERTISED_TP); 14075 } else { 14076 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14077 "speed_cap_mask=0x%08x\n", 14078 link_config, sc->link_params.speed_cap_mask[idx]); 14079 return; 14080 } 14081 break; 14082 14083 case PORT_FEATURE_LINK_SPEED_100M_HALF: 14084 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) { 14085 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; 14086 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 14087 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half | 14088 ADVERTISED_TP); 14089 } else { 14090 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14091 "speed_cap_mask=0x%08x\n", 14092 link_config, sc->link_params.speed_cap_mask[idx]); 14093 return; 14094 } 14095 break; 14096 14097 case PORT_FEATURE_LINK_SPEED_1G: 14098 if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) { 14099 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000; 14100 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full | 14101 ADVERTISED_TP); 14102 } else { 14103 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14104 "speed_cap_mask=0x%08x\n", 14105 link_config, sc->link_params.speed_cap_mask[idx]); 14106 return; 14107 } 14108 break; 14109 14110 case PORT_FEATURE_LINK_SPEED_2_5G: 14111 if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) { 14112 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500; 14113 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full | 14114 ADVERTISED_TP); 14115 } else { 14116 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14117 "speed_cap_mask=0x%08x\n", 14118 link_config, sc->link_params.speed_cap_mask[idx]); 14119 return; 14120 } 14121 break; 14122 14123 case PORT_FEATURE_LINK_SPEED_10G_CX4: 14124 if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) { 14125 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; 14126 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | 14127 ADVERTISED_FIBRE); 14128 } else { 14129 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14130 "speed_cap_mask=0x%08x\n", 14131 link_config, sc->link_params.speed_cap_mask[idx]); 14132 return; 14133 } 14134 break; 14135 14136 case PORT_FEATURE_LINK_SPEED_20G: 14137 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; 14138 break; 14139 14140 default: 14141 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14142 "speed_cap_mask=0x%08x\n", 14143 link_config, sc->link_params.speed_cap_mask[idx]); 14144 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; 14145 sc->port.advertising[idx] = sc->port.supported[idx]; 14146 break; 14147 } 14148 14149 sc->link_params.req_flow_ctrl[idx] = 14150 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); 14151 14152 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { 14153 if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { 14154 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE; 14155 } else { 14156 bxe_set_requested_fc(sc); 14157 } 14158 } 14159 14160 BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d " 14161 "req_flow_ctrl=0x%x advertising=0x%x\n", 14162 sc->link_params.req_line_speed[idx], 14163 sc->link_params.req_duplex[idx], 14164 sc->link_params.req_flow_ctrl[idx], 14165 sc->port.advertising[idx]); 14166 } 14167 } 14168 14169 static void 14170 bxe_get_phy_info(struct bxe_softc *sc) 14171 { 14172 uint8_t port = SC_PORT(sc); 14173 uint32_t config = sc->port.config; 14174 uint32_t eee_mode; 14175 14176 /* shmem data already read in bxe_get_shmem_info() */ 14177 14178 BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x " 14179 "link_config0=0x%08x\n", 14180 sc->link_params.lane_config, 14181 sc->link_params.speed_cap_mask[0], 14182 sc->port.link_config[0]); 14183 14184 bxe_link_settings_supported(sc, sc->link_params.switch_cfg); 14185 bxe_link_settings_requested(sc); 14186 14187 if (sc->autogreeen == AUTO_GREEN_FORCE_ON) { 14188 sc->link_params.feature_config_flags |= 14189 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14190 } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) { 14191 sc->link_params.feature_config_flags &= 14192 ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14193 } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) { 14194 sc->link_params.feature_config_flags |= 14195 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14196 } 14197 14198 /* configure link feature according to nvram value */ 14199 eee_mode = 14200 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) & 14201 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 14202 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 14203 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 14204 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | 14205 ELINK_EEE_MODE_ENABLE_LPI | 14206 ELINK_EEE_MODE_OUTPUT_TIME); 14207 } else { 14208 sc->link_params.eee_mode = 0; 14209 } 14210 14211 /* get the media type */ 14212 bxe_media_detect(sc); 14213 } 14214 14215 static void 14216 bxe_get_params(struct bxe_softc *sc) 14217 { 14218 /* get user tunable params */ 14219 bxe_get_tunable_params(sc); 14220 14221 /* select the RX and TX ring sizes */ 14222 sc->tx_ring_size = TX_BD_USABLE; 14223 sc->rx_ring_size = RX_BD_USABLE; 14224 14225 /* XXX disable WoL */ 14226 sc->wol = 0; 14227 } 14228 14229 static void 14230 bxe_set_modes_bitmap(struct bxe_softc *sc) 14231 { 14232 uint32_t flags = 0; 14233 14234 if (CHIP_REV_IS_FPGA(sc)) { 14235 SET_FLAGS(flags, MODE_FPGA); 14236 } else if (CHIP_REV_IS_EMUL(sc)) { 14237 SET_FLAGS(flags, MODE_EMUL); 14238 } else { 14239 SET_FLAGS(flags, MODE_ASIC); 14240 } 14241 14242 if (CHIP_IS_MODE_4_PORT(sc)) { 14243 SET_FLAGS(flags, MODE_PORT4); 14244 } else { 14245 SET_FLAGS(flags, MODE_PORT2); 14246 } 14247 14248 if (CHIP_IS_E2(sc)) { 14249 SET_FLAGS(flags, MODE_E2); 14250 } else if (CHIP_IS_E3(sc)) { 14251 SET_FLAGS(flags, MODE_E3); 14252 if (CHIP_REV(sc) == CHIP_REV_Ax) { 14253 SET_FLAGS(flags, MODE_E3_A0); 14254 } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ { 14255 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); 14256 } 14257 } 14258 14259 if (IS_MF(sc)) { 14260 SET_FLAGS(flags, MODE_MF); 14261 switch (sc->devinfo.mf_info.mf_mode) { 14262 case MULTI_FUNCTION_SD: 14263 SET_FLAGS(flags, MODE_MF_SD); 14264 break; 14265 case MULTI_FUNCTION_SI: 14266 SET_FLAGS(flags, MODE_MF_SI); 14267 break; 14268 case MULTI_FUNCTION_AFEX: 14269 SET_FLAGS(flags, MODE_MF_AFEX); 14270 break; 14271 } 14272 } else { 14273 SET_FLAGS(flags, MODE_SF); 14274 } 14275 14276 #if defined(__LITTLE_ENDIAN) 14277 SET_FLAGS(flags, MODE_LITTLE_ENDIAN); 14278 #else /* __BIG_ENDIAN */ 14279 SET_FLAGS(flags, MODE_BIG_ENDIAN); 14280 #endif 14281 14282 INIT_MODE_FLAGS(sc) = flags; 14283 } 14284 14285 static int 14286 bxe_alloc_hsi_mem(struct bxe_softc *sc) 14287 { 14288 struct bxe_fastpath *fp; 14289 bus_addr_t busaddr; 14290 int max_agg_queues; 14291 int max_segments; 14292 bus_size_t max_size; 14293 bus_size_t max_seg_size; 14294 char buf[32]; 14295 int rc; 14296 int i, j; 14297 14298 /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */ 14299 14300 /* allocate the parent bus DMA tag */ 14301 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */ 14302 1, /* alignment */ 14303 0, /* boundary limit */ 14304 BUS_SPACE_MAXADDR, /* restricted low */ 14305 BUS_SPACE_MAXADDR, /* restricted hi */ 14306 NULL, /* addr filter() */ 14307 NULL, /* addr filter() arg */ 14308 BUS_SPACE_MAXSIZE_32BIT, /* max map size */ 14309 BUS_SPACE_UNRESTRICTED, /* num discontinuous */ 14310 BUS_SPACE_MAXSIZE_32BIT, /* max seg size */ 14311 0, /* flags */ 14312 NULL, /* lock() */ 14313 NULL, /* lock() arg */ 14314 &sc->parent_dma_tag); /* returned dma tag */ 14315 if (rc != 0) { 14316 BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc); 14317 return (1); 14318 } 14319 14320 /************************/ 14321 /* DEFAULT STATUS BLOCK */ 14322 /************************/ 14323 14324 if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block), 14325 &sc->def_sb_dma, "default status block") != 0) { 14326 /* XXX */ 14327 bus_dma_tag_destroy(sc->parent_dma_tag); 14328 return (1); 14329 } 14330 14331 sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr; 14332 14333 /***************/ 14334 /* EVENT QUEUE */ 14335 /***************/ 14336 14337 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, 14338 &sc->eq_dma, "event queue") != 0) { 14339 /* XXX */ 14340 bxe_dma_free(sc, &sc->def_sb_dma); 14341 sc->def_sb = NULL; 14342 bus_dma_tag_destroy(sc->parent_dma_tag); 14343 return (1); 14344 } 14345 14346 sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr; 14347 14348 /*************/ 14349 /* SLOW PATH */ 14350 /*************/ 14351 14352 if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath), 14353 &sc->sp_dma, "slow path") != 0) { 14354 /* XXX */ 14355 bxe_dma_free(sc, &sc->eq_dma); 14356 sc->eq = NULL; 14357 bxe_dma_free(sc, &sc->def_sb_dma); 14358 sc->def_sb = NULL; 14359 bus_dma_tag_destroy(sc->parent_dma_tag); 14360 return (1); 14361 } 14362 14363 sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr; 14364 14365 /*******************/ 14366 /* SLOW PATH QUEUE */ 14367 /*******************/ 14368 14369 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, 14370 &sc->spq_dma, "slow path queue") != 0) { 14371 /* XXX */ 14372 bxe_dma_free(sc, &sc->sp_dma); 14373 sc->sp = NULL; 14374 bxe_dma_free(sc, &sc->eq_dma); 14375 sc->eq = NULL; 14376 bxe_dma_free(sc, &sc->def_sb_dma); 14377 sc->def_sb = NULL; 14378 bus_dma_tag_destroy(sc->parent_dma_tag); 14379 return (1); 14380 } 14381 14382 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; 14383 14384 /***************************/ 14385 /* FW DECOMPRESSION BUFFER */ 14386 /***************************/ 14387 14388 if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, 14389 "fw decompression buffer") != 0) { 14390 /* XXX */ 14391 bxe_dma_free(sc, &sc->spq_dma); 14392 sc->spq = NULL; 14393 bxe_dma_free(sc, &sc->sp_dma); 14394 sc->sp = NULL; 14395 bxe_dma_free(sc, &sc->eq_dma); 14396 sc->eq = NULL; 14397 bxe_dma_free(sc, &sc->def_sb_dma); 14398 sc->def_sb = NULL; 14399 bus_dma_tag_destroy(sc->parent_dma_tag); 14400 return (1); 14401 } 14402 14403 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; 14404 14405 if ((sc->gz_strm = 14406 malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) { 14407 /* XXX */ 14408 bxe_dma_free(sc, &sc->gz_buf_dma); 14409 sc->gz_buf = NULL; 14410 bxe_dma_free(sc, &sc->spq_dma); 14411 sc->spq = NULL; 14412 bxe_dma_free(sc, &sc->sp_dma); 14413 sc->sp = NULL; 14414 bxe_dma_free(sc, &sc->eq_dma); 14415 sc->eq = NULL; 14416 bxe_dma_free(sc, &sc->def_sb_dma); 14417 sc->def_sb = NULL; 14418 bus_dma_tag_destroy(sc->parent_dma_tag); 14419 return (1); 14420 } 14421 14422 /*************/ 14423 /* FASTPATHS */ 14424 /*************/ 14425 14426 /* allocate DMA memory for each fastpath structure */ 14427 for (i = 0; i < sc->num_queues; i++) { 14428 fp = &sc->fp[i]; 14429 fp->sc = sc; 14430 fp->index = i; 14431 14432 /*******************/ 14433 /* FP STATUS BLOCK */ 14434 /*******************/ 14435 14436 snprintf(buf, sizeof(buf), "fp %d status block", i); 14437 if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block), 14438 &fp->sb_dma, buf) != 0) { 14439 /* XXX unwind and free previous fastpath allocations */ 14440 BLOGE(sc, "Failed to alloc %s\n", buf); 14441 return (1); 14442 } else { 14443 if (CHIP_IS_E2E3(sc)) { 14444 fp->status_block.e2_sb = 14445 (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr; 14446 } else { 14447 fp->status_block.e1x_sb = 14448 (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr; 14449 } 14450 } 14451 14452 /******************/ 14453 /* FP TX BD CHAIN */ 14454 /******************/ 14455 14456 snprintf(buf, sizeof(buf), "fp %d tx bd chain", i); 14457 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES), 14458 &fp->tx_dma, buf) != 0) { 14459 /* XXX unwind and free previous fastpath allocations */ 14460 BLOGE(sc, "Failed to alloc %s\n", buf); 14461 return (1); 14462 } else { 14463 fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr; 14464 } 14465 14466 /* link together the tx bd chain pages */ 14467 for (j = 1; j <= TX_BD_NUM_PAGES; j++) { 14468 /* index into the tx bd chain array to last entry per page */ 14469 struct eth_tx_next_bd *tx_next_bd = 14470 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd; 14471 /* point to the next page and wrap from last page */ 14472 busaddr = (fp->tx_dma.paddr + 14473 (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES))); 14474 tx_next_bd->addr_hi = htole32(U64_HI(busaddr)); 14475 tx_next_bd->addr_lo = htole32(U64_LO(busaddr)); 14476 } 14477 14478 /******************/ 14479 /* FP RX BD CHAIN */ 14480 /******************/ 14481 14482 snprintf(buf, sizeof(buf), "fp %d rx bd chain", i); 14483 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES), 14484 &fp->rx_dma, buf) != 0) { 14485 /* XXX unwind and free previous fastpath allocations */ 14486 BLOGE(sc, "Failed to alloc %s\n", buf); 14487 return (1); 14488 } else { 14489 fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr; 14490 } 14491 14492 /* link together the rx bd chain pages */ 14493 for (j = 1; j <= RX_BD_NUM_PAGES; j++) { 14494 /* index into the rx bd chain array to last entry per page */ 14495 struct eth_rx_bd *rx_bd = 14496 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2]; 14497 /* point to the next page and wrap from last page */ 14498 busaddr = (fp->rx_dma.paddr + 14499 (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES))); 14500 rx_bd->addr_hi = htole32(U64_HI(busaddr)); 14501 rx_bd->addr_lo = htole32(U64_LO(busaddr)); 14502 } 14503 14504 /*******************/ 14505 /* FP RX RCQ CHAIN */ 14506 /*******************/ 14507 14508 snprintf(buf, sizeof(buf), "fp %d rcq chain", i); 14509 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES), 14510 &fp->rcq_dma, buf) != 0) { 14511 /* XXX unwind and free previous fastpath allocations */ 14512 BLOGE(sc, "Failed to alloc %s\n", buf); 14513 return (1); 14514 } else { 14515 fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr; 14516 } 14517 14518 /* link together the rcq chain pages */ 14519 for (j = 1; j <= RCQ_NUM_PAGES; j++) { 14520 /* index into the rcq chain array to last entry per page */ 14521 struct eth_rx_cqe_next_page *rx_cqe_next = 14522 (struct eth_rx_cqe_next_page *) 14523 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1]; 14524 /* point to the next page and wrap from last page */ 14525 busaddr = (fp->rcq_dma.paddr + 14526 (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES))); 14527 rx_cqe_next->addr_hi = htole32(U64_HI(busaddr)); 14528 rx_cqe_next->addr_lo = htole32(U64_LO(busaddr)); 14529 } 14530 14531 /*******************/ 14532 /* FP RX SGE CHAIN */ 14533 /*******************/ 14534 14535 snprintf(buf, sizeof(buf), "fp %d sge chain", i); 14536 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES), 14537 &fp->rx_sge_dma, buf) != 0) { 14538 /* XXX unwind and free previous fastpath allocations */ 14539 BLOGE(sc, "Failed to alloc %s\n", buf); 14540 return (1); 14541 } else { 14542 fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr; 14543 } 14544 14545 /* link together the sge chain pages */ 14546 for (j = 1; j <= RX_SGE_NUM_PAGES; j++) { 14547 /* index into the rcq chain array to last entry per page */ 14548 struct eth_rx_sge *rx_sge = 14549 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2]; 14550 /* point to the next page and wrap from last page */ 14551 busaddr = (fp->rx_sge_dma.paddr + 14552 (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES))); 14553 rx_sge->addr_hi = htole32(U64_HI(busaddr)); 14554 rx_sge->addr_lo = htole32(U64_LO(busaddr)); 14555 } 14556 14557 /***********************/ 14558 /* FP TX MBUF DMA MAPS */ 14559 /***********************/ 14560 14561 /* set required sizes before mapping to conserve resources */ 14562 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { 14563 max_size = BXE_TSO_MAX_SIZE; 14564 max_segments = BXE_TSO_MAX_SEGMENTS; 14565 max_seg_size = BXE_TSO_MAX_SEG_SIZE; 14566 } else { 14567 max_size = (MCLBYTES * BXE_MAX_SEGMENTS); 14568 max_segments = BXE_MAX_SEGMENTS; 14569 max_seg_size = MCLBYTES; 14570 } 14571 14572 /* create a dma tag for the tx mbufs */ 14573 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 14574 1, /* alignment */ 14575 0, /* boundary limit */ 14576 BUS_SPACE_MAXADDR, /* restricted low */ 14577 BUS_SPACE_MAXADDR, /* restricted hi */ 14578 NULL, /* addr filter() */ 14579 NULL, /* addr filter() arg */ 14580 max_size, /* max map size */ 14581 max_segments, /* num discontinuous */ 14582 max_seg_size, /* max seg size */ 14583 0, /* flags */ 14584 NULL, /* lock() */ 14585 NULL, /* lock() arg */ 14586 &fp->tx_mbuf_tag); /* returned dma tag */ 14587 if (rc != 0) { 14588 /* XXX unwind and free previous fastpath allocations */ 14589 BLOGE(sc, "Failed to create dma tag for " 14590 "'fp %d tx mbufs' (%d)\n", i, rc); 14591 return (1); 14592 } 14593 14594 /* create dma maps for each of the tx mbuf clusters */ 14595 for (j = 0; j < TX_BD_TOTAL; j++) { 14596 if (bus_dmamap_create(fp->tx_mbuf_tag, 14597 BUS_DMA_NOWAIT, 14598 &fp->tx_mbuf_chain[j].m_map)) { 14599 /* XXX unwind and free previous fastpath allocations */ 14600 BLOGE(sc, "Failed to create dma map for " 14601 "'fp %d tx mbuf %d' (%d)\n", i, j, rc); 14602 return (1); 14603 } 14604 } 14605 14606 /***********************/ 14607 /* FP RX MBUF DMA MAPS */ 14608 /***********************/ 14609 14610 /* create a dma tag for the rx mbufs */ 14611 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 14612 1, /* alignment */ 14613 0, /* boundary limit */ 14614 BUS_SPACE_MAXADDR, /* restricted low */ 14615 BUS_SPACE_MAXADDR, /* restricted hi */ 14616 NULL, /* addr filter() */ 14617 NULL, /* addr filter() arg */ 14618 MJUM9BYTES, /* max map size */ 14619 1, /* num discontinuous */ 14620 MJUM9BYTES, /* max seg size */ 14621 0, /* flags */ 14622 NULL, /* lock() */ 14623 NULL, /* lock() arg */ 14624 &fp->rx_mbuf_tag); /* returned dma tag */ 14625 if (rc != 0) { 14626 /* XXX unwind and free previous fastpath allocations */ 14627 BLOGE(sc, "Failed to create dma tag for " 14628 "'fp %d rx mbufs' (%d)\n", i, rc); 14629 return (1); 14630 } 14631 14632 /* create dma maps for each of the rx mbuf clusters */ 14633 for (j = 0; j < RX_BD_TOTAL; j++) { 14634 if (bus_dmamap_create(fp->rx_mbuf_tag, 14635 BUS_DMA_NOWAIT, 14636 &fp->rx_mbuf_chain[j].m_map)) { 14637 /* XXX unwind and free previous fastpath allocations */ 14638 BLOGE(sc, "Failed to create dma map for " 14639 "'fp %d rx mbuf %d' (%d)\n", i, j, rc); 14640 return (1); 14641 } 14642 } 14643 14644 /* create dma map for the spare rx mbuf cluster */ 14645 if (bus_dmamap_create(fp->rx_mbuf_tag, 14646 BUS_DMA_NOWAIT, 14647 &fp->rx_mbuf_spare_map)) { 14648 /* XXX unwind and free previous fastpath allocations */ 14649 BLOGE(sc, "Failed to create dma map for " 14650 "'fp %d spare rx mbuf' (%d)\n", i, rc); 14651 return (1); 14652 } 14653 14654 /***************************/ 14655 /* FP RX SGE MBUF DMA MAPS */ 14656 /***************************/ 14657 14658 /* create a dma tag for the rx sge mbufs */ 14659 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 14660 1, /* alignment */ 14661 0, /* boundary limit */ 14662 BUS_SPACE_MAXADDR, /* restricted low */ 14663 BUS_SPACE_MAXADDR, /* restricted hi */ 14664 NULL, /* addr filter() */ 14665 NULL, /* addr filter() arg */ 14666 BCM_PAGE_SIZE, /* max map size */ 14667 1, /* num discontinuous */ 14668 BCM_PAGE_SIZE, /* max seg size */ 14669 0, /* flags */ 14670 NULL, /* lock() */ 14671 NULL, /* lock() arg */ 14672 &fp->rx_sge_mbuf_tag); /* returned dma tag */ 14673 if (rc != 0) { 14674 /* XXX unwind and free previous fastpath allocations */ 14675 BLOGE(sc, "Failed to create dma tag for " 14676 "'fp %d rx sge mbufs' (%d)\n", i, rc); 14677 return (1); 14678 } 14679 14680 /* create dma maps for the rx sge mbuf clusters */ 14681 for (j = 0; j < RX_SGE_TOTAL; j++) { 14682 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, 14683 BUS_DMA_NOWAIT, 14684 &fp->rx_sge_mbuf_chain[j].m_map)) { 14685 /* XXX unwind and free previous fastpath allocations */ 14686 BLOGE(sc, "Failed to create dma map for " 14687 "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc); 14688 return (1); 14689 } 14690 } 14691 14692 /* create dma map for the spare rx sge mbuf cluster */ 14693 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, 14694 BUS_DMA_NOWAIT, 14695 &fp->rx_sge_mbuf_spare_map)) { 14696 /* XXX unwind and free previous fastpath allocations */ 14697 BLOGE(sc, "Failed to create dma map for " 14698 "'fp %d spare rx sge mbuf' (%d)\n", i, rc); 14699 return (1); 14700 } 14701 14702 /***************************/ 14703 /* FP RX TPA MBUF DMA MAPS */ 14704 /***************************/ 14705 14706 /* create dma maps for the rx tpa mbuf clusters */ 14707 max_agg_queues = MAX_AGG_QS(sc); 14708 14709 for (j = 0; j < max_agg_queues; j++) { 14710 if (bus_dmamap_create(fp->rx_mbuf_tag, 14711 BUS_DMA_NOWAIT, 14712 &fp->rx_tpa_info[j].bd.m_map)) { 14713 /* XXX unwind and free previous fastpath allocations */ 14714 BLOGE(sc, "Failed to create dma map for " 14715 "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc); 14716 return (1); 14717 } 14718 } 14719 14720 /* create dma map for the spare rx tpa mbuf cluster */ 14721 if (bus_dmamap_create(fp->rx_mbuf_tag, 14722 BUS_DMA_NOWAIT, 14723 &fp->rx_tpa_info_mbuf_spare_map)) { 14724 /* XXX unwind and free previous fastpath allocations */ 14725 BLOGE(sc, "Failed to create dma map for " 14726 "'fp %d spare rx tpa mbuf' (%d)\n", i, rc); 14727 return (1); 14728 } 14729 14730 bxe_init_sge_ring_bit_mask(fp); 14731 } 14732 14733 return (0); 14734 } 14735 14736 static void 14737 bxe_free_hsi_mem(struct bxe_softc *sc) 14738 { 14739 struct bxe_fastpath *fp; 14740 int max_agg_queues; 14741 int i, j; 14742 14743 if (sc->parent_dma_tag == NULL) { 14744 return; /* assume nothing was allocated */ 14745 } 14746 14747 for (i = 0; i < sc->num_queues; i++) { 14748 fp = &sc->fp[i]; 14749 14750 /*******************/ 14751 /* FP STATUS BLOCK */ 14752 /*******************/ 14753 14754 bxe_dma_free(sc, &fp->sb_dma); 14755 memset(&fp->status_block, 0, sizeof(fp->status_block)); 14756 14757 /******************/ 14758 /* FP TX BD CHAIN */ 14759 /******************/ 14760 14761 bxe_dma_free(sc, &fp->tx_dma); 14762 fp->tx_chain = NULL; 14763 14764 /******************/ 14765 /* FP RX BD CHAIN */ 14766 /******************/ 14767 14768 bxe_dma_free(sc, &fp->rx_dma); 14769 fp->rx_chain = NULL; 14770 14771 /*******************/ 14772 /* FP RX RCQ CHAIN */ 14773 /*******************/ 14774 14775 bxe_dma_free(sc, &fp->rcq_dma); 14776 fp->rcq_chain = NULL; 14777 14778 /*******************/ 14779 /* FP RX SGE CHAIN */ 14780 /*******************/ 14781 14782 bxe_dma_free(sc, &fp->rx_sge_dma); 14783 fp->rx_sge_chain = NULL; 14784 14785 /***********************/ 14786 /* FP TX MBUF DMA MAPS */ 14787 /***********************/ 14788 14789 if (fp->tx_mbuf_tag != NULL) { 14790 for (j = 0; j < TX_BD_TOTAL; j++) { 14791 if (fp->tx_mbuf_chain[j].m_map != NULL) { 14792 bus_dmamap_unload(fp->tx_mbuf_tag, 14793 fp->tx_mbuf_chain[j].m_map); 14794 bus_dmamap_destroy(fp->tx_mbuf_tag, 14795 fp->tx_mbuf_chain[j].m_map); 14796 } 14797 } 14798 14799 bus_dma_tag_destroy(fp->tx_mbuf_tag); 14800 fp->tx_mbuf_tag = NULL; 14801 } 14802 14803 /***********************/ 14804 /* FP RX MBUF DMA MAPS */ 14805 /***********************/ 14806 14807 if (fp->rx_mbuf_tag != NULL) { 14808 for (j = 0; j < RX_BD_TOTAL; j++) { 14809 if (fp->rx_mbuf_chain[j].m_map != NULL) { 14810 bus_dmamap_unload(fp->rx_mbuf_tag, 14811 fp->rx_mbuf_chain[j].m_map); 14812 bus_dmamap_destroy(fp->rx_mbuf_tag, 14813 fp->rx_mbuf_chain[j].m_map); 14814 } 14815 } 14816 14817 if (fp->rx_mbuf_spare_map != NULL) { 14818 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); 14819 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); 14820 } 14821 14822 /***************************/ 14823 /* FP RX TPA MBUF DMA MAPS */ 14824 /***************************/ 14825 14826 max_agg_queues = MAX_AGG_QS(sc); 14827 14828 for (j = 0; j < max_agg_queues; j++) { 14829 if (fp->rx_tpa_info[j].bd.m_map != NULL) { 14830 bus_dmamap_unload(fp->rx_mbuf_tag, 14831 fp->rx_tpa_info[j].bd.m_map); 14832 bus_dmamap_destroy(fp->rx_mbuf_tag, 14833 fp->rx_tpa_info[j].bd.m_map); 14834 } 14835 } 14836 14837 if (fp->rx_tpa_info_mbuf_spare_map != NULL) { 14838 bus_dmamap_unload(fp->rx_mbuf_tag, 14839 fp->rx_tpa_info_mbuf_spare_map); 14840 bus_dmamap_destroy(fp->rx_mbuf_tag, 14841 fp->rx_tpa_info_mbuf_spare_map); 14842 } 14843 14844 bus_dma_tag_destroy(fp->rx_mbuf_tag); 14845 fp->rx_mbuf_tag = NULL; 14846 } 14847 14848 /***************************/ 14849 /* FP RX SGE MBUF DMA MAPS */ 14850 /***************************/ 14851 14852 if (fp->rx_sge_mbuf_tag != NULL) { 14853 for (j = 0; j < RX_SGE_TOTAL; j++) { 14854 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) { 14855 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 14856 fp->rx_sge_mbuf_chain[j].m_map); 14857 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, 14858 fp->rx_sge_mbuf_chain[j].m_map); 14859 } 14860 } 14861 14862 if (fp->rx_sge_mbuf_spare_map != NULL) { 14863 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 14864 fp->rx_sge_mbuf_spare_map); 14865 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, 14866 fp->rx_sge_mbuf_spare_map); 14867 } 14868 14869 bus_dma_tag_destroy(fp->rx_sge_mbuf_tag); 14870 fp->rx_sge_mbuf_tag = NULL; 14871 } 14872 } 14873 14874 /***************************/ 14875 /* FW DECOMPRESSION BUFFER */ 14876 /***************************/ 14877 14878 bxe_dma_free(sc, &sc->gz_buf_dma); 14879 sc->gz_buf = NULL; 14880 free(sc->gz_strm, M_DEVBUF); 14881 sc->gz_strm = NULL; 14882 14883 /*******************/ 14884 /* SLOW PATH QUEUE */ 14885 /*******************/ 14886 14887 bxe_dma_free(sc, &sc->spq_dma); 14888 sc->spq = NULL; 14889 14890 /*************/ 14891 /* SLOW PATH */ 14892 /*************/ 14893 14894 bxe_dma_free(sc, &sc->sp_dma); 14895 sc->sp = NULL; 14896 14897 /***************/ 14898 /* EVENT QUEUE */ 14899 /***************/ 14900 14901 bxe_dma_free(sc, &sc->eq_dma); 14902 sc->eq = NULL; 14903 14904 /************************/ 14905 /* DEFAULT STATUS BLOCK */ 14906 /************************/ 14907 14908 bxe_dma_free(sc, &sc->def_sb_dma); 14909 sc->def_sb = NULL; 14910 14911 bus_dma_tag_destroy(sc->parent_dma_tag); 14912 sc->parent_dma_tag = NULL; 14913 } 14914 14915 /* 14916 * Previous driver DMAE transaction may have occurred when pre-boot stage 14917 * ended and boot began. This would invalidate the addresses of the 14918 * transaction, resulting in was-error bit set in the PCI causing all 14919 * hw-to-host PCIe transactions to timeout. If this happened we want to clear 14920 * the interrupt which detected this from the pglueb and the was-done bit 14921 */ 14922 static void 14923 bxe_prev_interrupted_dmae(struct bxe_softc *sc) 14924 { 14925 uint32_t val; 14926 14927 if (!CHIP_IS_E1x(sc)) { 14928 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); 14929 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 14930 BLOGD(sc, DBG_LOAD, 14931 "Clearing 'was-error' bit that was set in pglueb"); 14932 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc)); 14933 } 14934 } 14935 } 14936 14937 static int 14938 bxe_prev_mcp_done(struct bxe_softc *sc) 14939 { 14940 uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 14941 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 14942 if (!rc) { 14943 BLOGE(sc, "MCP response failure, aborting\n"); 14944 return (-1); 14945 } 14946 14947 return (0); 14948 } 14949 14950 static struct bxe_prev_list_node * 14951 bxe_prev_path_get_entry(struct bxe_softc *sc) 14952 { 14953 struct bxe_prev_list_node *tmp; 14954 14955 LIST_FOREACH(tmp, &bxe_prev_list, node) { 14956 if ((sc->pcie_bus == tmp->bus) && 14957 (sc->pcie_device == tmp->slot) && 14958 (SC_PATH(sc) == tmp->path)) { 14959 return (tmp); 14960 } 14961 } 14962 14963 return (NULL); 14964 } 14965 14966 static uint8_t 14967 bxe_prev_is_path_marked(struct bxe_softc *sc) 14968 { 14969 struct bxe_prev_list_node *tmp; 14970 int rc = FALSE; 14971 14972 mtx_lock(&bxe_prev_mtx); 14973 14974 tmp = bxe_prev_path_get_entry(sc); 14975 if (tmp) { 14976 if (tmp->aer) { 14977 BLOGD(sc, DBG_LOAD, 14978 "Path %d/%d/%d was marked by AER\n", 14979 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 14980 } else { 14981 rc = TRUE; 14982 BLOGD(sc, DBG_LOAD, 14983 "Path %d/%d/%d was already cleaned from previous drivers\n", 14984 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 14985 } 14986 } 14987 14988 mtx_unlock(&bxe_prev_mtx); 14989 14990 return (rc); 14991 } 14992 14993 static int 14994 bxe_prev_mark_path(struct bxe_softc *sc, 14995 uint8_t after_undi) 14996 { 14997 struct bxe_prev_list_node *tmp; 14998 14999 mtx_lock(&bxe_prev_mtx); 15000 15001 /* Check whether the entry for this path already exists */ 15002 tmp = bxe_prev_path_get_entry(sc); 15003 if (tmp) { 15004 if (!tmp->aer) { 15005 BLOGD(sc, DBG_LOAD, 15006 "Re-marking AER in path %d/%d/%d\n", 15007 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15008 } else { 15009 BLOGD(sc, DBG_LOAD, 15010 "Removing AER indication from path %d/%d/%d\n", 15011 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15012 tmp->aer = 0; 15013 } 15014 15015 mtx_unlock(&bxe_prev_mtx); 15016 return (0); 15017 } 15018 15019 mtx_unlock(&bxe_prev_mtx); 15020 15021 /* Create an entry for this path and add it */ 15022 tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF, 15023 (M_NOWAIT | M_ZERO)); 15024 if (!tmp) { 15025 BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n"); 15026 return (-1); 15027 } 15028 15029 tmp->bus = sc->pcie_bus; 15030 tmp->slot = sc->pcie_device; 15031 tmp->path = SC_PATH(sc); 15032 tmp->aer = 0; 15033 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; 15034 15035 mtx_lock(&bxe_prev_mtx); 15036 15037 BLOGD(sc, DBG_LOAD, 15038 "Marked path %d/%d/%d - finished previous unload\n", 15039 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15040 LIST_INSERT_HEAD(&bxe_prev_list, tmp, node); 15041 15042 mtx_unlock(&bxe_prev_mtx); 15043 15044 return (0); 15045 } 15046 15047 static int 15048 bxe_do_flr(struct bxe_softc *sc) 15049 { 15050 int i; 15051 15052 /* only E2 and onwards support FLR */ 15053 if (CHIP_IS_E1x(sc)) { 15054 BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n"); 15055 return (-1); 15056 } 15057 15058 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ 15059 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 15060 BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n", 15061 sc->devinfo.bc_ver); 15062 return (-1); 15063 } 15064 15065 /* Wait for Transaction Pending bit clean */ 15066 for (i = 0; i < 4; i++) { 15067 if (i) { 15068 DELAY(((1 << (i - 1)) * 100) * 1000); 15069 } 15070 15071 if (!bxe_is_pcie_pending(sc)) { 15072 goto clear; 15073 } 15074 } 15075 15076 BLOGE(sc, "PCIE transaction is not cleared, " 15077 "proceeding with reset anyway\n"); 15078 15079 clear: 15080 15081 BLOGD(sc, DBG_LOAD, "Initiating FLR\n"); 15082 bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); 15083 15084 return (0); 15085 } 15086 15087 struct bxe_mac_vals { 15088 uint32_t xmac_addr; 15089 uint32_t xmac_val; 15090 uint32_t emac_addr; 15091 uint32_t emac_val; 15092 uint32_t umac_addr; 15093 uint32_t umac_val; 15094 uint32_t bmac_addr; 15095 uint32_t bmac_val[2]; 15096 }; 15097 15098 static void 15099 bxe_prev_unload_close_mac(struct bxe_softc *sc, 15100 struct bxe_mac_vals *vals) 15101 { 15102 uint32_t val, base_addr, offset, mask, reset_reg; 15103 uint8_t mac_stopped = FALSE; 15104 uint8_t port = SC_PORT(sc); 15105 uint32_t wb_data[2]; 15106 15107 /* reset addresses as they also mark which values were changed */ 15108 vals->bmac_addr = 0; 15109 vals->umac_addr = 0; 15110 vals->xmac_addr = 0; 15111 vals->emac_addr = 0; 15112 15113 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); 15114 15115 if (!CHIP_IS_E3(sc)) { 15116 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 15117 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 15118 if ((mask & reset_reg) && val) { 15119 BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n"); 15120 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM 15121 : NIG_REG_INGRESS_BMAC0_MEM; 15122 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL 15123 : BIGMAC_REGISTER_BMAC_CONTROL; 15124 15125 /* 15126 * use rd/wr since we cannot use dmae. This is safe 15127 * since MCP won't access the bus due to the request 15128 * to unload, and no function on the path can be 15129 * loaded at this time. 15130 */ 15131 wb_data[0] = REG_RD(sc, base_addr + offset); 15132 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); 15133 vals->bmac_addr = base_addr + offset; 15134 vals->bmac_val[0] = wb_data[0]; 15135 vals->bmac_val[1] = wb_data[1]; 15136 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; 15137 REG_WR(sc, vals->bmac_addr, wb_data[0]); 15138 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); 15139 } 15140 15141 BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n"); 15142 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4; 15143 vals->emac_val = REG_RD(sc, vals->emac_addr); 15144 REG_WR(sc, vals->emac_addr, 0); 15145 mac_stopped = TRUE; 15146 } else { 15147 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 15148 BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n"); 15149 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 15150 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); 15151 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1)); 15152 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1)); 15153 vals->xmac_addr = base_addr + XMAC_REG_CTRL; 15154 vals->xmac_val = REG_RD(sc, vals->xmac_addr); 15155 REG_WR(sc, vals->xmac_addr, 0); 15156 mac_stopped = TRUE; 15157 } 15158 15159 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 15160 if (mask & reset_reg) { 15161 BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n"); 15162 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 15163 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; 15164 vals->umac_val = REG_RD(sc, vals->umac_addr); 15165 REG_WR(sc, vals->umac_addr, 0); 15166 mac_stopped = TRUE; 15167 } 15168 } 15169 15170 if (mac_stopped) { 15171 DELAY(20000); 15172 } 15173 } 15174 15175 #define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 15176 #define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff) 15177 #define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 15178 #define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 15179 15180 static void 15181 bxe_prev_unload_undi_inc(struct bxe_softc *sc, 15182 uint8_t port, 15183 uint8_t inc) 15184 { 15185 uint16_t rcq, bd; 15186 uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port)); 15187 15188 rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc; 15189 bd = BXE_PREV_UNDI_BD(tmp_reg) + inc; 15190 15191 tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd); 15192 REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg); 15193 15194 BLOGD(sc, DBG_LOAD, 15195 "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", 15196 port, bd, rcq); 15197 } 15198 15199 static int 15200 bxe_prev_unload_common(struct bxe_softc *sc) 15201 { 15202 uint32_t reset_reg, tmp_reg = 0, rc; 15203 uint8_t prev_undi = FALSE; 15204 struct bxe_mac_vals mac_vals; 15205 uint32_t timer_count = 1000; 15206 uint32_t prev_brb; 15207 15208 /* 15209 * It is possible a previous function received 'common' answer, 15210 * but hasn't loaded yet, therefore creating a scenario of 15211 * multiple functions receiving 'common' on the same path. 15212 */ 15213 BLOGD(sc, DBG_LOAD, "Common unload Flow\n"); 15214 15215 memset(&mac_vals, 0, sizeof(mac_vals)); 15216 15217 if (bxe_prev_is_path_marked(sc)) { 15218 return (bxe_prev_mcp_done(sc)); 15219 } 15220 15221 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); 15222 15223 /* Reset should be performed after BRB is emptied */ 15224 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 15225 /* Close the MAC Rx to prevent BRB from filling up */ 15226 bxe_prev_unload_close_mac(sc, &mac_vals); 15227 15228 /* close LLH filters towards the BRB */ 15229 elink_set_rx_filter(&sc->link_params, 0); 15230 15231 /* 15232 * Check if the UNDI driver was previously loaded. 15233 * UNDI driver initializes CID offset for normal bell to 0x7 15234 */ 15235 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 15236 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); 15237 if (tmp_reg == 0x7) { 15238 BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n"); 15239 prev_undi = TRUE; 15240 /* clear the UNDI indication */ 15241 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); 15242 /* clear possible idle check errors */ 15243 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); 15244 } 15245 } 15246 15247 /* wait until BRB is empty */ 15248 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 15249 while (timer_count) { 15250 prev_brb = tmp_reg; 15251 15252 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 15253 if (!tmp_reg) { 15254 break; 15255 } 15256 15257 BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg); 15258 15259 /* reset timer as long as BRB actually gets emptied */ 15260 if (prev_brb > tmp_reg) { 15261 timer_count = 1000; 15262 } else { 15263 timer_count--; 15264 } 15265 15266 /* If UNDI resides in memory, manually increment it */ 15267 if (prev_undi) { 15268 bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1); 15269 } 15270 15271 DELAY(10); 15272 } 15273 15274 if (!timer_count) { 15275 BLOGE(sc, "Failed to empty BRB\n"); 15276 } 15277 } 15278 15279 /* No packets are in the pipeline, path is ready for reset */ 15280 bxe_reset_common(sc); 15281 15282 if (mac_vals.xmac_addr) { 15283 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); 15284 } 15285 if (mac_vals.umac_addr) { 15286 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); 15287 } 15288 if (mac_vals.emac_addr) { 15289 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); 15290 } 15291 if (mac_vals.bmac_addr) { 15292 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); 15293 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); 15294 } 15295 15296 rc = bxe_prev_mark_path(sc, prev_undi); 15297 if (rc) { 15298 bxe_prev_mcp_done(sc); 15299 return (rc); 15300 } 15301 15302 return (bxe_prev_mcp_done(sc)); 15303 } 15304 15305 static int 15306 bxe_prev_unload_uncommon(struct bxe_softc *sc) 15307 { 15308 int rc; 15309 15310 BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n"); 15311 15312 /* Test if previous unload process was already finished for this path */ 15313 if (bxe_prev_is_path_marked(sc)) { 15314 return (bxe_prev_mcp_done(sc)); 15315 } 15316 15317 BLOGD(sc, DBG_LOAD, "Path is unmarked\n"); 15318 15319 /* 15320 * If function has FLR capabilities, and existing FW version matches 15321 * the one required, then FLR will be sufficient to clean any residue 15322 * left by previous driver 15323 */ 15324 rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); 15325 if (!rc) { 15326 /* fw version is good */ 15327 BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n"); 15328 rc = bxe_do_flr(sc); 15329 } 15330 15331 if (!rc) { 15332 /* FLR was performed */ 15333 BLOGD(sc, DBG_LOAD, "FLR successful\n"); 15334 return (0); 15335 } 15336 15337 BLOGD(sc, DBG_LOAD, "Could not FLR\n"); 15338 15339 /* Close the MCP request, return failure*/ 15340 rc = bxe_prev_mcp_done(sc); 15341 if (!rc) { 15342 rc = BXE_PREV_WAIT_NEEDED; 15343 } 15344 15345 return (rc); 15346 } 15347 15348 static int 15349 bxe_prev_unload(struct bxe_softc *sc) 15350 { 15351 int time_counter = 10; 15352 uint32_t fw, hw_lock_reg, hw_lock_val; 15353 uint32_t rc = 0; 15354 15355 /* 15356 * Clear HW from errors which may have resulted from an interrupted 15357 * DMAE transaction. 15358 */ 15359 bxe_prev_interrupted_dmae(sc); 15360 15361 /* Release previously held locks */ 15362 hw_lock_reg = 15363 (SC_FUNC(sc) <= 5) ? 15364 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : 15365 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); 15366 15367 hw_lock_val = (REG_RD(sc, hw_lock_reg)); 15368 if (hw_lock_val) { 15369 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 15370 BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n"); 15371 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 15372 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); 15373 } 15374 BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n"); 15375 REG_WR(sc, hw_lock_reg, 0xffffffff); 15376 } else { 15377 BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n"); 15378 } 15379 15380 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { 15381 BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n"); 15382 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); 15383 } 15384 15385 do { 15386 /* Lock MCP using an unload request */ 15387 fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 15388 if (!fw) { 15389 BLOGE(sc, "MCP response failure, aborting\n"); 15390 rc = -1; 15391 break; 15392 } 15393 15394 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { 15395 rc = bxe_prev_unload_common(sc); 15396 break; 15397 } 15398 15399 /* non-common reply from MCP night require looping */ 15400 rc = bxe_prev_unload_uncommon(sc); 15401 if (rc != BXE_PREV_WAIT_NEEDED) { 15402 break; 15403 } 15404 15405 DELAY(20000); 15406 } while (--time_counter); 15407 15408 if (!time_counter || rc) { 15409 BLOGE(sc, "Failed to unload previous driver!" 15410 " time_counter %d rc %d\n", time_counter, rc); 15411 rc = -1; 15412 } 15413 15414 return (rc); 15415 } 15416 15417 void 15418 bxe_dcbx_set_state(struct bxe_softc *sc, 15419 uint8_t dcb_on, 15420 uint32_t dcbx_enabled) 15421 { 15422 if (!CHIP_IS_E1x(sc)) { 15423 sc->dcb_state = dcb_on; 15424 sc->dcbx_enabled = dcbx_enabled; 15425 } else { 15426 sc->dcb_state = FALSE; 15427 sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID; 15428 } 15429 BLOGD(sc, DBG_LOAD, 15430 "DCB state [%s:%s]\n", 15431 dcb_on ? "ON" : "OFF", 15432 (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" : 15433 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" : 15434 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ? 15435 "on-chip with negotiation" : "invalid"); 15436 } 15437 15438 /* must be called after sriov-enable */ 15439 static int 15440 bxe_set_qm_cid_count(struct bxe_softc *sc) 15441 { 15442 int cid_count = BXE_L2_MAX_CID(sc); 15443 15444 if (IS_SRIOV(sc)) { 15445 cid_count += BXE_VF_CIDS; 15446 } 15447 15448 if (CNIC_SUPPORT(sc)) { 15449 cid_count += CNIC_CID_MAX; 15450 } 15451 15452 return (roundup(cid_count, QM_CID_ROUND)); 15453 } 15454 15455 static void 15456 bxe_init_multi_cos(struct bxe_softc *sc) 15457 { 15458 int pri, cos; 15459 15460 uint32_t pri_map = 0; /* XXX change to user config */ 15461 15462 for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) { 15463 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); 15464 if (cos < sc->max_cos) { 15465 sc->prio_to_cos[pri] = cos; 15466 } else { 15467 BLOGW(sc, "Invalid COS %d for priority %d " 15468 "(max COS is %d), setting to 0\n", 15469 cos, pri, (sc->max_cos - 1)); 15470 sc->prio_to_cos[pri] = 0; 15471 } 15472 } 15473 } 15474 15475 static int 15476 bxe_sysctl_state(SYSCTL_HANDLER_ARGS) 15477 { 15478 struct bxe_softc *sc; 15479 int error, result; 15480 15481 result = 0; 15482 error = sysctl_handle_int(oidp, &result, 0, req); 15483 15484 if (error || !req->newptr) { 15485 return (error); 15486 } 15487 15488 if (result == 1) { 15489 uint32_t temp; 15490 sc = (struct bxe_softc *)arg1; 15491 15492 BLOGI(sc, "... dumping driver state ...\n"); 15493 temp = SHMEM2_RD(sc, temperature_in_half_celsius); 15494 BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2)); 15495 } 15496 15497 return (error); 15498 } 15499 15500 static int 15501 bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS) 15502 { 15503 struct bxe_softc *sc = (struct bxe_softc *)arg1; 15504 uint32_t *eth_stats = (uint32_t *)&sc->eth_stats; 15505 uint32_t *offset; 15506 uint64_t value = 0; 15507 int index = (int)arg2; 15508 15509 if (index >= BXE_NUM_ETH_STATS) { 15510 BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index); 15511 return (-1); 15512 } 15513 15514 offset = (eth_stats + bxe_eth_stats_arr[index].offset); 15515 15516 switch (bxe_eth_stats_arr[index].size) { 15517 case 4: 15518 value = (uint64_t)*offset; 15519 break; 15520 case 8: 15521 value = HILO_U64(*offset, *(offset + 1)); 15522 break; 15523 default: 15524 BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n", 15525 index, bxe_eth_stats_arr[index].size); 15526 return (-1); 15527 } 15528 15529 return (sysctl_handle_64(oidp, &value, 0, req)); 15530 } 15531 15532 static int 15533 bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS) 15534 { 15535 struct bxe_softc *sc = (struct bxe_softc *)arg1; 15536 uint32_t *eth_stats; 15537 uint32_t *offset; 15538 uint64_t value = 0; 15539 uint32_t q_stat = (uint32_t)arg2; 15540 uint32_t fp_index = ((q_stat >> 16) & 0xffff); 15541 uint32_t index = (q_stat & 0xffff); 15542 15543 eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats; 15544 15545 if (index >= BXE_NUM_ETH_Q_STATS) { 15546 BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index); 15547 return (-1); 15548 } 15549 15550 offset = (eth_stats + bxe_eth_q_stats_arr[index].offset); 15551 15552 switch (bxe_eth_q_stats_arr[index].size) { 15553 case 4: 15554 value = (uint64_t)*offset; 15555 break; 15556 case 8: 15557 value = HILO_U64(*offset, *(offset + 1)); 15558 break; 15559 default: 15560 BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n", 15561 index, bxe_eth_q_stats_arr[index].size); 15562 return (-1); 15563 } 15564 15565 return (sysctl_handle_64(oidp, &value, 0, req)); 15566 } 15567 15568 static void 15569 bxe_add_sysctls(struct bxe_softc *sc) 15570 { 15571 struct sysctl_ctx_list *ctx; 15572 struct sysctl_oid_list *children; 15573 struct sysctl_oid *queue_top, *queue; 15574 struct sysctl_oid_list *queue_top_children, *queue_children; 15575 char queue_num_buf[32]; 15576 uint32_t q_stat; 15577 int i, j; 15578 15579 ctx = device_get_sysctl_ctx(sc->dev); 15580 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 15581 15582 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version", 15583 CTLFLAG_RD, BXE_DRIVER_VERSION, 0, 15584 "version"); 15585 15586 snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d", 15587 BCM_5710_FW_MAJOR_VERSION, 15588 BCM_5710_FW_MINOR_VERSION, 15589 BCM_5710_FW_REVISION_VERSION, 15590 BCM_5710_FW_ENGINEERING_VERSION); 15591 15592 snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s", 15593 ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" : 15594 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" : 15595 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" : 15596 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" : 15597 "Unknown")); 15598 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics", 15599 CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0, 15600 "multifunction vnics per port"); 15601 15602 snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d", 15603 ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" : 15604 (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" : 15605 (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" : 15606 "???GT/s"), 15607 sc->devinfo.pcie_link_width); 15608 15609 sc->debug = bxe_debug; 15610 15611 #if __FreeBSD_version >= 900000 15612 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", 15613 CTLFLAG_RD, sc->devinfo.bc_ver_str, 0, 15614 "bootcode version"); 15615 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", 15616 CTLFLAG_RD, sc->fw_ver_str, 0, 15617 "firmware version"); 15618 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", 15619 CTLFLAG_RD, sc->mf_mode_str, 0, 15620 "multifunction mode"); 15621 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", 15622 CTLFLAG_RD, sc->mac_addr_str, 0, 15623 "mac address"); 15624 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", 15625 CTLFLAG_RD, sc->pci_link_str, 0, 15626 "pci link status"); 15627 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug", 15628 CTLFLAG_RW, &sc->debug, 15629 "debug logging mode"); 15630 #else 15631 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", 15632 CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0, 15633 "bootcode version"); 15634 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", 15635 CTLFLAG_RD, &sc->fw_ver_str, 0, 15636 "firmware version"); 15637 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", 15638 CTLFLAG_RD, &sc->mf_mode_str, 0, 15639 "multifunction mode"); 15640 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", 15641 CTLFLAG_RD, &sc->mac_addr_str, 0, 15642 "mac address"); 15643 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", 15644 CTLFLAG_RD, &sc->pci_link_str, 0, 15645 "pci link status"); 15646 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug", 15647 CTLFLAG_RW, &sc->debug, 0, 15648 "debug logging mode"); 15649 #endif /* #if __FreeBSD_version >= 900000 */ 15650 15651 sc->trigger_grcdump = 0; 15652 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump", 15653 CTLFLAG_RW, &sc->trigger_grcdump, 0, 15654 "trigger grcdump should be invoked" 15655 " before collecting grcdump"); 15656 15657 sc->grcdump_started = 0; 15658 sc->grcdump_done = 0; 15659 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done", 15660 CTLFLAG_RD, &sc->grcdump_done, 0, 15661 "set by driver when grcdump is done"); 15662 15663 sc->rx_budget = bxe_rx_budget; 15664 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget", 15665 CTLFLAG_RW, &sc->rx_budget, 0, 15666 "rx processing budget"); 15667 15668 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state", 15669 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, 15670 bxe_sysctl_state, "IU", "dump driver state"); 15671 15672 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 15673 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 15674 bxe_eth_stats_arr[i].string, 15675 CTLTYPE_U64 | CTLFLAG_RD, sc, i, 15676 bxe_sysctl_eth_stat, "LU", 15677 bxe_eth_stats_arr[i].string); 15678 } 15679 15680 /* add a new parent node for all queues "dev.bxe.#.queue" */ 15681 queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue", 15682 CTLFLAG_RD, NULL, "queue"); 15683 queue_top_children = SYSCTL_CHILDREN(queue_top); 15684 15685 for (i = 0; i < sc->num_queues; i++) { 15686 /* add a new parent node for a single queue "dev.bxe.#.queue.#" */ 15687 snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i); 15688 queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO, 15689 queue_num_buf, CTLFLAG_RD, NULL, 15690 "single queue"); 15691 queue_children = SYSCTL_CHILDREN(queue); 15692 15693 for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) { 15694 q_stat = ((i << 16) | j); 15695 SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO, 15696 bxe_eth_q_stats_arr[j].string, 15697 CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat, 15698 bxe_sysctl_eth_q_stat, "LU", 15699 bxe_eth_q_stats_arr[j].string); 15700 } 15701 } 15702 } 15703 15704 static int 15705 bxe_alloc_buf_rings(struct bxe_softc *sc) 15706 { 15707 #if __FreeBSD_version >= 901504 15708 15709 int i; 15710 struct bxe_fastpath *fp; 15711 15712 for (i = 0; i < sc->num_queues; i++) { 15713 15714 fp = &sc->fp[i]; 15715 15716 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF, 15717 M_NOWAIT, &fp->tx_mtx); 15718 if (fp->tx_br == NULL) 15719 return (-1); 15720 } 15721 #endif 15722 return (0); 15723 } 15724 15725 static void 15726 bxe_free_buf_rings(struct bxe_softc *sc) 15727 { 15728 #if __FreeBSD_version >= 901504 15729 15730 int i; 15731 struct bxe_fastpath *fp; 15732 15733 for (i = 0; i < sc->num_queues; i++) { 15734 15735 fp = &sc->fp[i]; 15736 15737 if (fp->tx_br) { 15738 buf_ring_free(fp->tx_br, M_DEVBUF); 15739 fp->tx_br = NULL; 15740 } 15741 } 15742 15743 #endif 15744 } 15745 15746 static void 15747 bxe_init_fp_mutexs(struct bxe_softc *sc) 15748 { 15749 int i; 15750 struct bxe_fastpath *fp; 15751 15752 for (i = 0; i < sc->num_queues; i++) { 15753 15754 fp = &sc->fp[i]; 15755 15756 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 15757 "bxe%d_fp%d_tx_lock", sc->unit, i); 15758 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 15759 15760 snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name), 15761 "bxe%d_fp%d_rx_lock", sc->unit, i); 15762 mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF); 15763 } 15764 } 15765 15766 static void 15767 bxe_destroy_fp_mutexs(struct bxe_softc *sc) 15768 { 15769 int i; 15770 struct bxe_fastpath *fp; 15771 15772 for (i = 0; i < sc->num_queues; i++) { 15773 15774 fp = &sc->fp[i]; 15775 15776 if (mtx_initialized(&fp->tx_mtx)) { 15777 mtx_destroy(&fp->tx_mtx); 15778 } 15779 15780 if (mtx_initialized(&fp->rx_mtx)) { 15781 mtx_destroy(&fp->rx_mtx); 15782 } 15783 } 15784 } 15785 15786 15787 /* 15788 * Device attach function. 15789 * 15790 * Allocates device resources, performs secondary chip identification, and 15791 * initializes driver instance variables. This function is called from driver 15792 * load after a successful probe. 15793 * 15794 * Returns: 15795 * 0 = Success, >0 = Failure 15796 */ 15797 static int 15798 bxe_attach(device_t dev) 15799 { 15800 struct bxe_softc *sc; 15801 15802 sc = device_get_softc(dev); 15803 15804 BLOGD(sc, DBG_LOAD, "Starting attach...\n"); 15805 15806 sc->state = BXE_STATE_CLOSED; 15807 15808 sc->dev = dev; 15809 sc->unit = device_get_unit(dev); 15810 15811 BLOGD(sc, DBG_LOAD, "softc = %p\n", sc); 15812 15813 sc->pcie_bus = pci_get_bus(dev); 15814 sc->pcie_device = pci_get_slot(dev); 15815 sc->pcie_func = pci_get_function(dev); 15816 15817 /* enable bus master capability */ 15818 pci_enable_busmaster(dev); 15819 15820 /* get the BARs */ 15821 if (bxe_allocate_bars(sc) != 0) { 15822 return (ENXIO); 15823 } 15824 15825 /* initialize the mutexes */ 15826 bxe_init_mutexes(sc); 15827 15828 /* prepare the periodic callout */ 15829 callout_init(&sc->periodic_callout, 0); 15830 15831 /* prepare the chip taskqueue */ 15832 sc->chip_tq_flags = CHIP_TQ_NONE; 15833 snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name), 15834 "bxe%d_chip_tq", sc->unit); 15835 TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc); 15836 sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT, 15837 taskqueue_thread_enqueue, 15838 &sc->chip_tq); 15839 taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */ 15840 "%s", sc->chip_tq_name); 15841 15842 /* get device info and set params */ 15843 if (bxe_get_device_info(sc) != 0) { 15844 BLOGE(sc, "getting device info\n"); 15845 bxe_deallocate_bars(sc); 15846 pci_disable_busmaster(dev); 15847 return (ENXIO); 15848 } 15849 15850 /* get final misc params */ 15851 bxe_get_params(sc); 15852 15853 /* set the default MTU (changed via ifconfig) */ 15854 sc->mtu = ETHERMTU; 15855 15856 bxe_set_modes_bitmap(sc); 15857 15858 /* XXX 15859 * If in AFEX mode and the function is configured for FCoE 15860 * then bail... no L2 allowed. 15861 */ 15862 15863 /* get phy settings from shmem and 'and' against admin settings */ 15864 bxe_get_phy_info(sc); 15865 15866 /* initialize the FreeBSD ifnet interface */ 15867 if (bxe_init_ifnet(sc) != 0) { 15868 bxe_release_mutexes(sc); 15869 bxe_deallocate_bars(sc); 15870 pci_disable_busmaster(dev); 15871 return (ENXIO); 15872 } 15873 15874 if (bxe_add_cdev(sc) != 0) { 15875 if (sc->ifp != NULL) { 15876 ether_ifdetach(sc->ifp); 15877 } 15878 ifmedia_removeall(&sc->ifmedia); 15879 bxe_release_mutexes(sc); 15880 bxe_deallocate_bars(sc); 15881 pci_disable_busmaster(dev); 15882 return (ENXIO); 15883 } 15884 15885 /* allocate device interrupts */ 15886 if (bxe_interrupt_alloc(sc) != 0) { 15887 bxe_del_cdev(sc); 15888 if (sc->ifp != NULL) { 15889 ether_ifdetach(sc->ifp); 15890 } 15891 ifmedia_removeall(&sc->ifmedia); 15892 bxe_release_mutexes(sc); 15893 bxe_deallocate_bars(sc); 15894 pci_disable_busmaster(dev); 15895 return (ENXIO); 15896 } 15897 15898 bxe_init_fp_mutexs(sc); 15899 15900 if (bxe_alloc_buf_rings(sc) != 0) { 15901 bxe_free_buf_rings(sc); 15902 bxe_interrupt_free(sc); 15903 bxe_del_cdev(sc); 15904 if (sc->ifp != NULL) { 15905 ether_ifdetach(sc->ifp); 15906 } 15907 ifmedia_removeall(&sc->ifmedia); 15908 bxe_release_mutexes(sc); 15909 bxe_deallocate_bars(sc); 15910 pci_disable_busmaster(dev); 15911 return (ENXIO); 15912 } 15913 15914 /* allocate ilt */ 15915 if (bxe_alloc_ilt_mem(sc) != 0) { 15916 bxe_free_buf_rings(sc); 15917 bxe_interrupt_free(sc); 15918 bxe_del_cdev(sc); 15919 if (sc->ifp != NULL) { 15920 ether_ifdetach(sc->ifp); 15921 } 15922 ifmedia_removeall(&sc->ifmedia); 15923 bxe_release_mutexes(sc); 15924 bxe_deallocate_bars(sc); 15925 pci_disable_busmaster(dev); 15926 return (ENXIO); 15927 } 15928 15929 /* allocate the host hardware/software hsi structures */ 15930 if (bxe_alloc_hsi_mem(sc) != 0) { 15931 bxe_free_ilt_mem(sc); 15932 bxe_free_buf_rings(sc); 15933 bxe_interrupt_free(sc); 15934 bxe_del_cdev(sc); 15935 if (sc->ifp != NULL) { 15936 ether_ifdetach(sc->ifp); 15937 } 15938 ifmedia_removeall(&sc->ifmedia); 15939 bxe_release_mutexes(sc); 15940 bxe_deallocate_bars(sc); 15941 pci_disable_busmaster(dev); 15942 return (ENXIO); 15943 } 15944 15945 /* need to reset chip if UNDI was active */ 15946 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 15947 /* init fw_seq */ 15948 sc->fw_seq = 15949 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 15950 DRV_MSG_SEQ_NUMBER_MASK); 15951 BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq); 15952 bxe_prev_unload(sc); 15953 } 15954 15955 #if 1 15956 /* XXX */ 15957 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); 15958 #else 15959 if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) && 15960 SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) && 15961 SHMEM2_RD(sc, dcbx_lldp_params_offset) && 15962 SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) { 15963 bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON); 15964 bxe_dcbx_init_params(sc); 15965 } else { 15966 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); 15967 } 15968 #endif 15969 15970 /* calculate qm_cid_count */ 15971 sc->qm_cid_count = bxe_set_qm_cid_count(sc); 15972 BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count); 15973 15974 sc->max_cos = 1; 15975 bxe_init_multi_cos(sc); 15976 15977 bxe_add_sysctls(sc); 15978 15979 return (0); 15980 } 15981 15982 /* 15983 * Device detach function. 15984 * 15985 * Stops the controller, resets the controller, and releases resources. 15986 * 15987 * Returns: 15988 * 0 = Success, >0 = Failure 15989 */ 15990 static int 15991 bxe_detach(device_t dev) 15992 { 15993 struct bxe_softc *sc; 15994 if_t ifp; 15995 15996 sc = device_get_softc(dev); 15997 15998 BLOGD(sc, DBG_LOAD, "Starting detach...\n"); 15999 16000 ifp = sc->ifp; 16001 if (ifp != NULL && if_vlantrunkinuse(ifp)) { 16002 BLOGE(sc, "Cannot detach while VLANs are in use.\n"); 16003 return(EBUSY); 16004 } 16005 16006 bxe_del_cdev(sc); 16007 16008 /* stop the periodic callout */ 16009 bxe_periodic_stop(sc); 16010 16011 /* stop the chip taskqueue */ 16012 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE); 16013 if (sc->chip_tq) { 16014 taskqueue_drain(sc->chip_tq, &sc->chip_tq_task); 16015 taskqueue_free(sc->chip_tq); 16016 sc->chip_tq = NULL; 16017 } 16018 16019 /* stop and reset the controller if it was open */ 16020 if (sc->state != BXE_STATE_CLOSED) { 16021 BXE_CORE_LOCK(sc); 16022 bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE); 16023 sc->state = BXE_STATE_DISABLED; 16024 BXE_CORE_UNLOCK(sc); 16025 } 16026 16027 /* release the network interface */ 16028 if (ifp != NULL) { 16029 ether_ifdetach(ifp); 16030 } 16031 ifmedia_removeall(&sc->ifmedia); 16032 16033 /* XXX do the following based on driver state... */ 16034 16035 /* free the host hardware/software hsi structures */ 16036 bxe_free_hsi_mem(sc); 16037 16038 /* free ilt */ 16039 bxe_free_ilt_mem(sc); 16040 16041 bxe_free_buf_rings(sc); 16042 16043 /* release the interrupts */ 16044 bxe_interrupt_free(sc); 16045 16046 /* Release the mutexes*/ 16047 bxe_destroy_fp_mutexs(sc); 16048 bxe_release_mutexes(sc); 16049 16050 16051 /* Release the PCIe BAR mapped memory */ 16052 bxe_deallocate_bars(sc); 16053 16054 /* Release the FreeBSD interface. */ 16055 if (sc->ifp != NULL) { 16056 if_free(sc->ifp); 16057 } 16058 16059 pci_disable_busmaster(dev); 16060 16061 return (0); 16062 } 16063 16064 /* 16065 * Device shutdown function. 16066 * 16067 * Stops and resets the controller. 16068 * 16069 * Returns: 16070 * Nothing 16071 */ 16072 static int 16073 bxe_shutdown(device_t dev) 16074 { 16075 struct bxe_softc *sc; 16076 16077 sc = device_get_softc(dev); 16078 16079 BLOGD(sc, DBG_LOAD, "Starting shutdown...\n"); 16080 16081 /* stop the periodic callout */ 16082 bxe_periodic_stop(sc); 16083 16084 BXE_CORE_LOCK(sc); 16085 bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE); 16086 BXE_CORE_UNLOCK(sc); 16087 16088 return (0); 16089 } 16090 16091 void 16092 bxe_igu_ack_sb(struct bxe_softc *sc, 16093 uint8_t igu_sb_id, 16094 uint8_t segment, 16095 uint16_t index, 16096 uint8_t op, 16097 uint8_t update) 16098 { 16099 uint32_t igu_addr = sc->igu_base_addr; 16100 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; 16101 bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr); 16102 } 16103 16104 static void 16105 bxe_igu_clear_sb_gen(struct bxe_softc *sc, 16106 uint8_t func, 16107 uint8_t idu_sb_id, 16108 uint8_t is_pf) 16109 { 16110 uint32_t data, ctl, cnt = 100; 16111 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 16112 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 16113 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 16114 uint32_t sb_bit = 1 << (idu_sb_id%32); 16115 uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 16116 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 16117 16118 /* Not supported in BC mode */ 16119 if (CHIP_INT_MODE_IS_BC(sc)) { 16120 return; 16121 } 16122 16123 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << 16124 IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 16125 IGU_REGULAR_CLEANUP_SET | 16126 IGU_REGULAR_BCLEANUP); 16127 16128 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | 16129 (func_encode << IGU_CTRL_REG_FID_SHIFT) | 16130 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); 16131 16132 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 16133 data, igu_addr_data); 16134 REG_WR(sc, igu_addr_data, data); 16135 16136 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 16137 BUS_SPACE_BARRIER_WRITE); 16138 mb(); 16139 16140 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 16141 ctl, igu_addr_ctl); 16142 REG_WR(sc, igu_addr_ctl, ctl); 16143 16144 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 16145 BUS_SPACE_BARRIER_WRITE); 16146 mb(); 16147 16148 /* wait for clean up to finish */ 16149 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { 16150 DELAY(20000); 16151 } 16152 16153 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { 16154 BLOGD(sc, DBG_LOAD, 16155 "Unable to finish IGU cleanup: " 16156 "idu_sb_id %d offset %d bit %d (cnt %d)\n", 16157 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 16158 } 16159 } 16160 16161 static void 16162 bxe_igu_clear_sb(struct bxe_softc *sc, 16163 uint8_t idu_sb_id) 16164 { 16165 bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); 16166 } 16167 16168 16169 16170 16171 16172 16173 16174 /*******************/ 16175 /* ECORE CALLBACKS */ 16176 /*******************/ 16177 16178 static void 16179 bxe_reset_common(struct bxe_softc *sc) 16180 { 16181 uint32_t val = 0x1400; 16182 16183 /* reset_common */ 16184 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f); 16185 16186 if (CHIP_IS_E3(sc)) { 16187 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 16188 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 16189 } 16190 16191 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); 16192 } 16193 16194 static void 16195 bxe_common_init_phy(struct bxe_softc *sc) 16196 { 16197 uint32_t shmem_base[2]; 16198 uint32_t shmem2_base[2]; 16199 16200 /* Avoid common init in case MFW supports LFA */ 16201 if (SHMEM2_RD(sc, size) > 16202 (uint32_t)offsetof(struct shmem2_region, 16203 lfa_host_addr[SC_PORT(sc)])) { 16204 return; 16205 } 16206 16207 shmem_base[0] = sc->devinfo.shmem_base; 16208 shmem2_base[0] = sc->devinfo.shmem2_base; 16209 16210 if (!CHIP_IS_E1x(sc)) { 16211 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); 16212 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); 16213 } 16214 16215 bxe_acquire_phy_lock(sc); 16216 elink_common_init_phy(sc, shmem_base, shmem2_base, 16217 sc->devinfo.chip_id, 0); 16218 bxe_release_phy_lock(sc); 16219 } 16220 16221 static void 16222 bxe_pf_disable(struct bxe_softc *sc) 16223 { 16224 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 16225 16226 val &= ~IGU_PF_CONF_FUNC_EN; 16227 16228 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 16229 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 16230 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); 16231 } 16232 16233 static void 16234 bxe_init_pxp(struct bxe_softc *sc) 16235 { 16236 uint16_t devctl; 16237 int r_order, w_order; 16238 16239 devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2); 16240 16241 BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl); 16242 16243 w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5); 16244 16245 if (sc->mrrs == -1) { 16246 r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12); 16247 } else { 16248 BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs); 16249 r_order = sc->mrrs; 16250 } 16251 16252 ecore_init_pxp_arb(sc, r_order, w_order); 16253 } 16254 16255 static uint32_t 16256 bxe_get_pretend_reg(struct bxe_softc *sc) 16257 { 16258 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; 16259 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); 16260 return (base + (SC_ABS_FUNC(sc)) * stride); 16261 } 16262 16263 /* 16264 * Called only on E1H or E2. 16265 * When pretending to be PF, the pretend value is the function number 0..7. 16266 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 16267 * combination. 16268 */ 16269 static int 16270 bxe_pretend_func(struct bxe_softc *sc, 16271 uint16_t pretend_func_val) 16272 { 16273 uint32_t pretend_reg; 16274 16275 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) { 16276 return (-1); 16277 } 16278 16279 /* get my own pretend register */ 16280 pretend_reg = bxe_get_pretend_reg(sc); 16281 REG_WR(sc, pretend_reg, pretend_func_val); 16282 REG_RD(sc, pretend_reg); 16283 return (0); 16284 } 16285 16286 static void 16287 bxe_iov_init_dmae(struct bxe_softc *sc) 16288 { 16289 return; 16290 } 16291 16292 static void 16293 bxe_iov_init_dq(struct bxe_softc *sc) 16294 { 16295 return; 16296 } 16297 16298 /* send a NIG loopback debug packet */ 16299 static void 16300 bxe_lb_pckt(struct bxe_softc *sc) 16301 { 16302 uint32_t wb_write[3]; 16303 16304 /* Ethernet source and destination addresses */ 16305 wb_write[0] = 0x55555555; 16306 wb_write[1] = 0x55555555; 16307 wb_write[2] = 0x20; /* SOP */ 16308 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 16309 16310 /* NON-IP protocol */ 16311 wb_write[0] = 0x09000000; 16312 wb_write[1] = 0x55555555; 16313 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ 16314 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 16315 } 16316 16317 /* 16318 * Some of the internal memories are not directly readable from the driver. 16319 * To test them we send debug packets. 16320 */ 16321 static int 16322 bxe_int_mem_test(struct bxe_softc *sc) 16323 { 16324 int factor; 16325 int count, i; 16326 uint32_t val = 0; 16327 16328 if (CHIP_REV_IS_FPGA(sc)) { 16329 factor = 120; 16330 } else if (CHIP_REV_IS_EMUL(sc)) { 16331 factor = 200; 16332 } else { 16333 factor = 1; 16334 } 16335 16336 /* disable inputs of parser neighbor blocks */ 16337 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); 16338 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); 16339 REG_WR(sc, CFC_REG_DEBUG0, 0x1); 16340 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); 16341 16342 /* write 0 to parser credits for CFC search request */ 16343 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 16344 16345 /* send Ethernet packet */ 16346 bxe_lb_pckt(sc); 16347 16348 /* TODO do i reset NIG statistic? */ 16349 /* Wait until NIG register shows 1 packet of size 0x10 */ 16350 count = 1000 * factor; 16351 while (count) { 16352 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 16353 val = *BXE_SP(sc, wb_data[0]); 16354 if (val == 0x10) { 16355 break; 16356 } 16357 16358 DELAY(10000); 16359 count--; 16360 } 16361 16362 if (val != 0x10) { 16363 BLOGE(sc, "NIG timeout val=0x%x\n", val); 16364 return (-1); 16365 } 16366 16367 /* wait until PRS register shows 1 packet */ 16368 count = (1000 * factor); 16369 while (count) { 16370 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16371 if (val == 1) { 16372 break; 16373 } 16374 16375 DELAY(10000); 16376 count--; 16377 } 16378 16379 if (val != 0x1) { 16380 BLOGE(sc, "PRS timeout val=0x%x\n", val); 16381 return (-2); 16382 } 16383 16384 /* Reset and init BRB, PRS */ 16385 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 16386 DELAY(50000); 16387 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 16388 DELAY(50000); 16389 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 16390 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 16391 16392 /* Disable inputs of parser neighbor blocks */ 16393 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); 16394 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); 16395 REG_WR(sc, CFC_REG_DEBUG0, 0x1); 16396 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); 16397 16398 /* Write 0 to parser credits for CFC search request */ 16399 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 16400 16401 /* send 10 Ethernet packets */ 16402 for (i = 0; i < 10; i++) { 16403 bxe_lb_pckt(sc); 16404 } 16405 16406 /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */ 16407 count = (1000 * factor); 16408 while (count) { 16409 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 16410 val = *BXE_SP(sc, wb_data[0]); 16411 if (val == 0xb0) { 16412 break; 16413 } 16414 16415 DELAY(10000); 16416 count--; 16417 } 16418 16419 if (val != 0xb0) { 16420 BLOGE(sc, "NIG timeout val=0x%x\n", val); 16421 return (-3); 16422 } 16423 16424 /* Wait until PRS register shows 2 packets */ 16425 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16426 if (val != 2) { 16427 BLOGE(sc, "PRS timeout val=0x%x\n", val); 16428 } 16429 16430 /* Write 1 to parser credits for CFC search request */ 16431 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); 16432 16433 /* Wait until PRS register shows 3 packets */ 16434 DELAY(10000 * factor); 16435 16436 /* Wait until NIG register shows 1 packet of size 0x10 */ 16437 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16438 if (val != 3) { 16439 BLOGE(sc, "PRS timeout val=0x%x\n", val); 16440 } 16441 16442 /* clear NIG EOP FIFO */ 16443 for (i = 0; i < 11; i++) { 16444 REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO); 16445 } 16446 16447 val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY); 16448 if (val != 1) { 16449 BLOGE(sc, "clear of NIG failed val=0x%x\n", val); 16450 return (-4); 16451 } 16452 16453 /* Reset and init BRB, PRS, NIG */ 16454 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 16455 DELAY(50000); 16456 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 16457 DELAY(50000); 16458 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 16459 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 16460 if (!CNIC_SUPPORT(sc)) { 16461 /* set NIC mode */ 16462 REG_WR(sc, PRS_REG_NIC_MODE, 1); 16463 } 16464 16465 /* Enable inputs of parser neighbor blocks */ 16466 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff); 16467 REG_WR(sc, TCM_REG_PRS_IFEN, 0x1); 16468 REG_WR(sc, CFC_REG_DEBUG0, 0x0); 16469 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1); 16470 16471 return (0); 16472 } 16473 16474 static void 16475 bxe_setup_fan_failure_detection(struct bxe_softc *sc) 16476 { 16477 int is_required; 16478 uint32_t val; 16479 int port; 16480 16481 is_required = 0; 16482 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & 16483 SHARED_HW_CFG_FAN_FAILURE_MASK); 16484 16485 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { 16486 is_required = 1; 16487 } 16488 /* 16489 * The fan failure mechanism is usually related to the PHY type since 16490 * the power consumption of the board is affected by the PHY. Currently, 16491 * fan is required for most designs with SFX7101, BCM8727 and BCM8481. 16492 */ 16493 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { 16494 for (port = PORT_0; port < PORT_MAX; port++) { 16495 is_required |= elink_fan_failure_det_req(sc, 16496 sc->devinfo.shmem_base, 16497 sc->devinfo.shmem2_base, 16498 port); 16499 } 16500 } 16501 16502 BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required); 16503 16504 if (is_required == 0) { 16505 return; 16506 } 16507 16508 /* Fan failure is indicated by SPIO 5 */ 16509 bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); 16510 16511 /* set to active low mode */ 16512 val = REG_RD(sc, MISC_REG_SPIO_INT); 16513 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); 16514 REG_WR(sc, MISC_REG_SPIO_INT, val); 16515 16516 /* enable interrupt to signal the IGU */ 16517 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 16518 val |= MISC_SPIO_SPIO5; 16519 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); 16520 } 16521 16522 static void 16523 bxe_enable_blocks_attention(struct bxe_softc *sc) 16524 { 16525 uint32_t val; 16526 16527 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 16528 if (!CHIP_IS_E1x(sc)) { 16529 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); 16530 } else { 16531 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); 16532 } 16533 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 16534 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 16535 /* 16536 * mask read length error interrupts in brb for parser 16537 * (parsing unit and 'checksum and crc' unit) 16538 * these errors are legal (PU reads fixed length and CAC can cause 16539 * read length error on truncated packets) 16540 */ 16541 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); 16542 REG_WR(sc, QM_REG_QM_INT_MASK, 0); 16543 REG_WR(sc, TM_REG_TM_INT_MASK, 0); 16544 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); 16545 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); 16546 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); 16547 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ 16548 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ 16549 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); 16550 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); 16551 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); 16552 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ 16553 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ 16554 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 16555 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); 16556 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); 16557 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); 16558 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ 16559 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ 16560 16561 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 16562 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 16563 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); 16564 if (!CHIP_IS_E1x(sc)) { 16565 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | 16566 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); 16567 } 16568 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); 16569 16570 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); 16571 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); 16572 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); 16573 /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ 16574 16575 if (!CHIP_IS_E1x(sc)) { 16576 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 16577 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 16578 } 16579 16580 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); 16581 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); 16582 /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ 16583 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 16584 } 16585 16586 /** 16587 * bxe_init_hw_common - initialize the HW at the COMMON phase. 16588 * 16589 * @sc: driver handle 16590 */ 16591 static int 16592 bxe_init_hw_common(struct bxe_softc *sc) 16593 { 16594 uint8_t abs_func_id; 16595 uint32_t val; 16596 16597 BLOGD(sc, DBG_LOAD, "starting common init for func %d\n", 16598 SC_ABS_FUNC(sc)); 16599 16600 /* 16601 * take the RESET lock to protect undi_unload flow from accessing 16602 * registers while we are resetting the chip 16603 */ 16604 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 16605 16606 bxe_reset_common(sc); 16607 16608 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); 16609 16610 val = 0xfffc; 16611 if (CHIP_IS_E3(sc)) { 16612 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 16613 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 16614 } 16615 16616 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); 16617 16618 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 16619 16620 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); 16621 BLOGD(sc, DBG_LOAD, "after misc block init\n"); 16622 16623 if (!CHIP_IS_E1x(sc)) { 16624 /* 16625 * 4-port mode or 2-port mode we need to turn off master-enable for 16626 * everyone. After that we turn it back on for self. So, we disregard 16627 * multi-function, and always disable all functions on the given path, 16628 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 16629 */ 16630 for (abs_func_id = SC_PATH(sc); 16631 abs_func_id < (E2_FUNC_MAX * 2); 16632 abs_func_id += 2) { 16633 if (abs_func_id == SC_ABS_FUNC(sc)) { 16634 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 16635 continue; 16636 } 16637 16638 bxe_pretend_func(sc, abs_func_id); 16639 16640 /* clear pf enable */ 16641 bxe_pf_disable(sc); 16642 16643 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 16644 } 16645 } 16646 16647 BLOGD(sc, DBG_LOAD, "after pf disable\n"); 16648 16649 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); 16650 16651 if (CHIP_IS_E1(sc)) { 16652 /* 16653 * enable HW interrupt from PXP on USDM overflow 16654 * bit 16 on INT_MASK_0 16655 */ 16656 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 16657 } 16658 16659 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); 16660 bxe_init_pxp(sc); 16661 16662 #ifdef __BIG_ENDIAN 16663 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); 16664 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); 16665 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 16666 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 16667 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 16668 /* make sure this value is 0 */ 16669 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); 16670 16671 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); 16672 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); 16673 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); 16674 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); 16675 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 16676 #endif 16677 16678 ecore_ilt_init_page_size(sc, INITOP_SET); 16679 16680 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { 16681 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 16682 } 16683 16684 /* let the HW do it's magic... */ 16685 DELAY(100000); 16686 16687 /* finish PXP init */ 16688 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); 16689 if (val != 1) { 16690 BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n", 16691 val); 16692 return (-1); 16693 } 16694 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); 16695 if (val != 1) { 16696 BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val); 16697 return (-1); 16698 } 16699 16700 BLOGD(sc, DBG_LOAD, "after pxp init\n"); 16701 16702 /* 16703 * Timer bug workaround for E2 only. We need to set the entire ILT to have 16704 * entries with value "0" and valid bit on. This needs to be done by the 16705 * first PF that is loaded in a path (i.e. common phase) 16706 */ 16707 if (!CHIP_IS_E1x(sc)) { 16708 /* 16709 * In E2 there is a bug in the timers block that can cause function 6 / 7 16710 * (i.e. vnic3) to start even if it is marked as "scan-off". 16711 * This occurs when a different function (func2,3) is being marked 16712 * as "scan-off". Real-life scenario for example: if a driver is being 16713 * load-unloaded while func6,7 are down. This will cause the timer to access 16714 * the ilt, translate to a logical address and send a request to read/write. 16715 * Since the ilt for the function that is down is not valid, this will cause 16716 * a translation error which is unrecoverable. 16717 * The Workaround is intended to make sure that when this happens nothing 16718 * fatal will occur. The workaround: 16719 * 1. First PF driver which loads on a path will: 16720 * a. After taking the chip out of reset, by using pretend, 16721 * it will write "0" to the following registers of 16722 * the other vnics. 16723 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 16724 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 16725 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 16726 * And for itself it will write '1' to 16727 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 16728 * dmae-operations (writing to pram for example.) 16729 * note: can be done for only function 6,7 but cleaner this 16730 * way. 16731 * b. Write zero+valid to the entire ILT. 16732 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 16733 * VNIC3 (of that port). The range allocated will be the 16734 * entire ILT. This is needed to prevent ILT range error. 16735 * 2. Any PF driver load flow: 16736 * a. ILT update with the physical addresses of the allocated 16737 * logical pages. 16738 * b. Wait 20msec. - note that this timeout is needed to make 16739 * sure there are no requests in one of the PXP internal 16740 * queues with "old" ILT addresses. 16741 * c. PF enable in the PGLC. 16742 * d. Clear the was_error of the PF in the PGLC. (could have 16743 * occurred while driver was down) 16744 * e. PF enable in the CFC (WEAK + STRONG) 16745 * f. Timers scan enable 16746 * 3. PF driver unload flow: 16747 * a. Clear the Timers scan_en. 16748 * b. Polling for scan_on=0 for that PF. 16749 * c. Clear the PF enable bit in the PXP. 16750 * d. Clear the PF enable in the CFC (WEAK + STRONG) 16751 * e. Write zero+valid to all ILT entries (The valid bit must 16752 * stay set) 16753 * f. If this is VNIC 3 of a port then also init 16754 * first_timers_ilt_entry to zero and last_timers_ilt_entry 16755 * to the last enrty in the ILT. 16756 * 16757 * Notes: 16758 * Currently the PF error in the PGLC is non recoverable. 16759 * In the future the there will be a recovery routine for this error. 16760 * Currently attention is masked. 16761 * Having an MCP lock on the load/unload process does not guarantee that 16762 * there is no Timer disable during Func6/7 enable. This is because the 16763 * Timers scan is currently being cleared by the MCP on FLR. 16764 * Step 2.d can be done only for PF6/7 and the driver can also check if 16765 * there is error before clearing it. But the flow above is simpler and 16766 * more general. 16767 * All ILT entries are written by zero+valid and not just PF6/7 16768 * ILT entries since in the future the ILT entries allocation for 16769 * PF-s might be dynamic. 16770 */ 16771 struct ilt_client_info ilt_cli; 16772 struct ecore_ilt ilt; 16773 16774 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 16775 memset(&ilt, 0, sizeof(struct ecore_ilt)); 16776 16777 /* initialize dummy TM client */ 16778 ilt_cli.start = 0; 16779 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 16780 ilt_cli.client_num = ILT_CLIENT_TM; 16781 16782 /* 16783 * Step 1: set zeroes to all ilt page entries with valid bit on 16784 * Step 2: set the timers first/last ilt entry to point 16785 * to the entire range to prevent ILT range error for 3rd/4th 16786 * vnic (this code assumes existence of the vnic) 16787 * 16788 * both steps performed by call to ecore_ilt_client_init_op() 16789 * with dummy TM client 16790 * 16791 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 16792 * and his brother are split registers 16793 */ 16794 16795 bxe_pretend_func(sc, (SC_PATH(sc) + 6)); 16796 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); 16797 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 16798 16799 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN); 16800 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN); 16801 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 16802 } 16803 16804 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); 16805 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); 16806 16807 if (!CHIP_IS_E1x(sc)) { 16808 int factor = CHIP_REV_IS_EMUL(sc) ? 1000 : 16809 (CHIP_REV_IS_FPGA(sc) ? 400 : 0); 16810 16811 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); 16812 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); 16813 16814 /* let the HW do it's magic... */ 16815 do { 16816 DELAY(200000); 16817 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); 16818 } while (factor-- && (val != 1)); 16819 16820 if (val != 1) { 16821 BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val); 16822 return (-1); 16823 } 16824 } 16825 16826 BLOGD(sc, DBG_LOAD, "after pglue and atc init\n"); 16827 16828 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); 16829 16830 bxe_iov_init_dmae(sc); 16831 16832 /* clean the DMAE memory */ 16833 sc->dmae_ready = 1; 16834 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1); 16835 16836 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); 16837 16838 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); 16839 16840 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); 16841 16842 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); 16843 16844 bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); 16845 bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); 16846 bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); 16847 bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); 16848 16849 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); 16850 16851 /* QM queues pointers table */ 16852 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); 16853 16854 /* soft reset pulse */ 16855 REG_WR(sc, QM_REG_SOFT_RESET, 1); 16856 REG_WR(sc, QM_REG_SOFT_RESET, 0); 16857 16858 if (CNIC_SUPPORT(sc)) 16859 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); 16860 16861 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); 16862 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT); 16863 if (!CHIP_REV_IS_SLOW(sc)) { 16864 /* enable hw interrupt from doorbell Q */ 16865 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 16866 } 16867 16868 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 16869 16870 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 16871 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); 16872 16873 if (!CHIP_IS_E1(sc)) { 16874 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); 16875 } 16876 16877 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { 16878 if (IS_MF_AFEX(sc)) { 16879 /* 16880 * configure that AFEX and VLAN headers must be 16881 * received in AFEX mode 16882 */ 16883 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); 16884 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); 16885 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 16886 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 16887 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); 16888 } else { 16889 /* 16890 * Bit-map indicating which L2 hdrs may appear 16891 * after the basic Ethernet header 16892 */ 16893 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 16894 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 16895 } 16896 } 16897 16898 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); 16899 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); 16900 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); 16901 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); 16902 16903 if (!CHIP_IS_E1x(sc)) { 16904 /* reset VFC memories */ 16905 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 16906 VFC_MEMORIES_RST_REG_CAM_RST | 16907 VFC_MEMORIES_RST_REG_RAM_RST); 16908 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 16909 VFC_MEMORIES_RST_REG_CAM_RST | 16910 VFC_MEMORIES_RST_REG_RAM_RST); 16911 16912 DELAY(20000); 16913 } 16914 16915 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); 16916 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); 16917 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); 16918 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); 16919 16920 /* sync semi rtc */ 16921 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 16922 0x80000000); 16923 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 16924 0x80000000); 16925 16926 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); 16927 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); 16928 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); 16929 16930 if (!CHIP_IS_E1x(sc)) { 16931 if (IS_MF_AFEX(sc)) { 16932 /* 16933 * configure that AFEX and VLAN headers must be 16934 * sent in AFEX mode 16935 */ 16936 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); 16937 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); 16938 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 16939 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 16940 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); 16941 } else { 16942 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 16943 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 16944 } 16945 } 16946 16947 REG_WR(sc, SRC_REG_SOFT_RST, 1); 16948 16949 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); 16950 16951 if (CNIC_SUPPORT(sc)) { 16952 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); 16953 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 16954 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); 16955 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); 16956 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); 16957 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 16958 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); 16959 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 16960 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); 16961 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); 16962 } 16963 REG_WR(sc, SRC_REG_SOFT_RST, 0); 16964 16965 if (sizeof(union cdu_context) != 1024) { 16966 /* we currently assume that a context is 1024 bytes */ 16967 BLOGE(sc, "please adjust the size of cdu_context(%ld)\n", 16968 (long)sizeof(union cdu_context)); 16969 } 16970 16971 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); 16972 val = (4 << 24) + (0 << 12) + 1024; 16973 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); 16974 16975 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); 16976 16977 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); 16978 /* enable context validation interrupt from CFC */ 16979 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 16980 16981 /* set the thresholds to prevent CFC/CDU race */ 16982 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); 16983 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); 16984 16985 if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) { 16986 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); 16987 } 16988 16989 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); 16990 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); 16991 16992 /* Reset PCIE errors for debug */ 16993 REG_WR(sc, 0x2814, 0xffffffff); 16994 REG_WR(sc, 0x3820, 0xffffffff); 16995 16996 if (!CHIP_IS_E1x(sc)) { 16997 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 16998 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 16999 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 17000 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 17001 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 17002 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 17003 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 17004 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 17005 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 17006 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 17007 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 17008 } 17009 17010 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); 17011 17012 if (!CHIP_IS_E1(sc)) { 17013 /* in E3 this done in per-port section */ 17014 if (!CHIP_IS_E3(sc)) 17015 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); 17016 } 17017 17018 if (CHIP_IS_E1H(sc)) { 17019 /* not applicable for E2 (and above ...) */ 17020 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); 17021 } 17022 17023 if (CHIP_REV_IS_SLOW(sc)) { 17024 DELAY(200000); 17025 } 17026 17027 /* finish CFC init */ 17028 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); 17029 if (val != 1) { 17030 BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val); 17031 return (-1); 17032 } 17033 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); 17034 if (val != 1) { 17035 BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val); 17036 return (-1); 17037 } 17038 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 17039 if (val != 1) { 17040 BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val); 17041 return (-1); 17042 } 17043 REG_WR(sc, CFC_REG_DEBUG0, 0); 17044 17045 if (CHIP_IS_E1(sc)) { 17046 /* read NIG statistic to see if this is our first up since powerup */ 17047 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 17048 val = *BXE_SP(sc, wb_data[0]); 17049 17050 /* do internal memory self test */ 17051 if ((val == 0) && bxe_int_mem_test(sc)) { 17052 BLOGE(sc, "internal mem self test failed val=0x%x\n", val); 17053 return (-1); 17054 } 17055 } 17056 17057 bxe_setup_fan_failure_detection(sc); 17058 17059 /* clear PXP2 attentions */ 17060 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 17061 17062 bxe_enable_blocks_attention(sc); 17063 17064 if (!CHIP_REV_IS_SLOW(sc)) { 17065 ecore_enable_blocks_parity(sc); 17066 } 17067 17068 if (!BXE_NOMCP(sc)) { 17069 if (CHIP_IS_E1x(sc)) { 17070 bxe_common_init_phy(sc); 17071 } 17072 } 17073 17074 return (0); 17075 } 17076 17077 /** 17078 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase. 17079 * 17080 * @sc: driver handle 17081 */ 17082 static int 17083 bxe_init_hw_common_chip(struct bxe_softc *sc) 17084 { 17085 int rc = bxe_init_hw_common(sc); 17086 17087 if (rc) { 17088 BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc); 17089 return (rc); 17090 } 17091 17092 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 17093 if (!BXE_NOMCP(sc)) { 17094 bxe_common_init_phy(sc); 17095 } 17096 17097 return (0); 17098 } 17099 17100 static int 17101 bxe_init_hw_port(struct bxe_softc *sc) 17102 { 17103 int port = SC_PORT(sc); 17104 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 17105 uint32_t low, high; 17106 uint32_t val; 17107 17108 BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port); 17109 17110 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 17111 17112 ecore_init_block(sc, BLOCK_MISC, init_phase); 17113 ecore_init_block(sc, BLOCK_PXP, init_phase); 17114 ecore_init_block(sc, BLOCK_PXP2, init_phase); 17115 17116 /* 17117 * Timers bug workaround: disables the pf_master bit in pglue at 17118 * common phase, we need to enable it here before any dmae access are 17119 * attempted. Therefore we manually added the enable-master to the 17120 * port phase (it also happens in the function phase) 17121 */ 17122 if (!CHIP_IS_E1x(sc)) { 17123 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17124 } 17125 17126 ecore_init_block(sc, BLOCK_ATC, init_phase); 17127 ecore_init_block(sc, BLOCK_DMAE, init_phase); 17128 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 17129 ecore_init_block(sc, BLOCK_QM, init_phase); 17130 17131 ecore_init_block(sc, BLOCK_TCM, init_phase); 17132 ecore_init_block(sc, BLOCK_UCM, init_phase); 17133 ecore_init_block(sc, BLOCK_CCM, init_phase); 17134 ecore_init_block(sc, BLOCK_XCM, init_phase); 17135 17136 /* QM cid (connection) count */ 17137 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); 17138 17139 if (CNIC_SUPPORT(sc)) { 17140 ecore_init_block(sc, BLOCK_TM, init_phase); 17141 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20); 17142 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 17143 } 17144 17145 ecore_init_block(sc, BLOCK_DORQ, init_phase); 17146 17147 ecore_init_block(sc, BLOCK_BRB1, init_phase); 17148 17149 if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) { 17150 if (IS_MF(sc)) { 17151 low = (BXE_ONE_PORT(sc) ? 160 : 246); 17152 } else if (sc->mtu > 4096) { 17153 if (BXE_ONE_PORT(sc)) { 17154 low = 160; 17155 } else { 17156 val = sc->mtu; 17157 /* (24*1024 + val*4)/256 */ 17158 low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); 17159 } 17160 } else { 17161 low = (BXE_ONE_PORT(sc) ? 80 : 160); 17162 } 17163 high = (low + 56); /* 14*1024/256 */ 17164 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); 17165 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); 17166 } 17167 17168 if (CHIP_IS_MODE_4_PORT(sc)) { 17169 REG_WR(sc, SC_PORT(sc) ? 17170 BRB1_REG_MAC_GUARANTIED_1 : 17171 BRB1_REG_MAC_GUARANTIED_0, 40); 17172 } 17173 17174 ecore_init_block(sc, BLOCK_PRS, init_phase); 17175 if (CHIP_IS_E3B0(sc)) { 17176 if (IS_MF_AFEX(sc)) { 17177 /* configure headers for AFEX mode */ 17178 REG_WR(sc, SC_PORT(sc) ? 17179 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 17180 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); 17181 REG_WR(sc, SC_PORT(sc) ? 17182 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : 17183 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); 17184 REG_WR(sc, SC_PORT(sc) ? 17185 PRS_REG_MUST_HAVE_HDRS_PORT_1 : 17186 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 17187 } else { 17188 /* Ovlan exists only if we are in multi-function + 17189 * switch-dependent mode, in switch-independent there 17190 * is no ovlan headers 17191 */ 17192 REG_WR(sc, SC_PORT(sc) ? 17193 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 17194 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 17195 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); 17196 } 17197 } 17198 17199 ecore_init_block(sc, BLOCK_TSDM, init_phase); 17200 ecore_init_block(sc, BLOCK_CSDM, init_phase); 17201 ecore_init_block(sc, BLOCK_USDM, init_phase); 17202 ecore_init_block(sc, BLOCK_XSDM, init_phase); 17203 17204 ecore_init_block(sc, BLOCK_TSEM, init_phase); 17205 ecore_init_block(sc, BLOCK_USEM, init_phase); 17206 ecore_init_block(sc, BLOCK_CSEM, init_phase); 17207 ecore_init_block(sc, BLOCK_XSEM, init_phase); 17208 17209 ecore_init_block(sc, BLOCK_UPB, init_phase); 17210 ecore_init_block(sc, BLOCK_XPB, init_phase); 17211 17212 ecore_init_block(sc, BLOCK_PBF, init_phase); 17213 17214 if (CHIP_IS_E1x(sc)) { 17215 /* configure PBF to work without PAUSE mtu 9000 */ 17216 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 17217 17218 /* update threshold */ 17219 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 17220 /* update init credit */ 17221 REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 17222 17223 /* probe changes */ 17224 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1); 17225 DELAY(50); 17226 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0); 17227 } 17228 17229 if (CNIC_SUPPORT(sc)) { 17230 ecore_init_block(sc, BLOCK_SRC, init_phase); 17231 } 17232 17233 ecore_init_block(sc, BLOCK_CDU, init_phase); 17234 ecore_init_block(sc, BLOCK_CFC, init_phase); 17235 17236 if (CHIP_IS_E1(sc)) { 17237 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 17238 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 17239 } 17240 ecore_init_block(sc, BLOCK_HC, init_phase); 17241 17242 ecore_init_block(sc, BLOCK_IGU, init_phase); 17243 17244 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 17245 /* init aeu_mask_attn_func_0/1: 17246 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 17247 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 17248 * bits 4-7 are used for "per vn group attention" */ 17249 val = IS_MF(sc) ? 0xF7 : 0x7; 17250 /* Enable DCBX attention for all but E1 */ 17251 val |= CHIP_IS_E1(sc) ? 0 : 0x10; 17252 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 17253 17254 ecore_init_block(sc, BLOCK_NIG, init_phase); 17255 17256 if (!CHIP_IS_E1x(sc)) { 17257 /* Bit-map indicating which L2 hdrs may appear after the 17258 * basic Ethernet header 17259 */ 17260 if (IS_MF_AFEX(sc)) { 17261 REG_WR(sc, SC_PORT(sc) ? 17262 NIG_REG_P1_HDRS_AFTER_BASIC : 17263 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 17264 } else { 17265 REG_WR(sc, SC_PORT(sc) ? 17266 NIG_REG_P1_HDRS_AFTER_BASIC : 17267 NIG_REG_P0_HDRS_AFTER_BASIC, 17268 IS_MF_SD(sc) ? 7 : 6); 17269 } 17270 17271 if (CHIP_IS_E3(sc)) { 17272 REG_WR(sc, SC_PORT(sc) ? 17273 NIG_REG_LLH1_MF_MODE : 17274 NIG_REG_LLH_MF_MODE, IS_MF(sc)); 17275 } 17276 } 17277 if (!CHIP_IS_E3(sc)) { 17278 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 17279 } 17280 17281 if (!CHIP_IS_E1(sc)) { 17282 /* 0x2 disable mf_ov, 0x1 enable */ 17283 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 17284 (IS_MF_SD(sc) ? 0x1 : 0x2)); 17285 17286 if (!CHIP_IS_E1x(sc)) { 17287 val = 0; 17288 switch (sc->devinfo.mf_info.mf_mode) { 17289 case MULTI_FUNCTION_SD: 17290 val = 1; 17291 break; 17292 case MULTI_FUNCTION_SI: 17293 case MULTI_FUNCTION_AFEX: 17294 val = 2; 17295 break; 17296 } 17297 17298 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : 17299 NIG_REG_LLH0_CLS_TYPE), val); 17300 } 17301 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 17302 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 17303 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); 17304 } 17305 17306 /* If SPIO5 is set to generate interrupts, enable it for this port */ 17307 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 17308 if (val & MISC_SPIO_SPIO5) { 17309 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 17310 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 17311 val = REG_RD(sc, reg_addr); 17312 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 17313 REG_WR(sc, reg_addr, val); 17314 } 17315 17316 return (0); 17317 } 17318 17319 static uint32_t 17320 bxe_flr_clnup_reg_poll(struct bxe_softc *sc, 17321 uint32_t reg, 17322 uint32_t expected, 17323 uint32_t poll_count) 17324 { 17325 uint32_t cur_cnt = poll_count; 17326 uint32_t val; 17327 17328 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { 17329 DELAY(FLR_WAIT_INTERVAL); 17330 } 17331 17332 return (val); 17333 } 17334 17335 static int 17336 bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc, 17337 uint32_t reg, 17338 char *msg, 17339 uint32_t poll_cnt) 17340 { 17341 uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); 17342 17343 if (val != 0) { 17344 BLOGE(sc, "%s usage count=%d\n", msg, val); 17345 return (1); 17346 } 17347 17348 return (0); 17349 } 17350 17351 /* Common routines with VF FLR cleanup */ 17352 static uint32_t 17353 bxe_flr_clnup_poll_count(struct bxe_softc *sc) 17354 { 17355 /* adjust polling timeout */ 17356 if (CHIP_REV_IS_EMUL(sc)) { 17357 return (FLR_POLL_CNT * 2000); 17358 } 17359 17360 if (CHIP_REV_IS_FPGA(sc)) { 17361 return (FLR_POLL_CNT * 120); 17362 } 17363 17364 return (FLR_POLL_CNT); 17365 } 17366 17367 static int 17368 bxe_poll_hw_usage_counters(struct bxe_softc *sc, 17369 uint32_t poll_cnt) 17370 { 17371 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 17372 if (bxe_flr_clnup_poll_hw_counter(sc, 17373 CFC_REG_NUM_LCIDS_INSIDE_PF, 17374 "CFC PF usage counter timed out", 17375 poll_cnt)) { 17376 return (1); 17377 } 17378 17379 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 17380 if (bxe_flr_clnup_poll_hw_counter(sc, 17381 DORQ_REG_PF_USAGE_CNT, 17382 "DQ PF usage counter timed out", 17383 poll_cnt)) { 17384 return (1); 17385 } 17386 17387 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 17388 if (bxe_flr_clnup_poll_hw_counter(sc, 17389 QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc), 17390 "QM PF usage counter timed out", 17391 poll_cnt)) { 17392 return (1); 17393 } 17394 17395 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 17396 if (bxe_flr_clnup_poll_hw_counter(sc, 17397 TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc), 17398 "Timers VNIC usage counter timed out", 17399 poll_cnt)) { 17400 return (1); 17401 } 17402 17403 if (bxe_flr_clnup_poll_hw_counter(sc, 17404 TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc), 17405 "Timers NUM_SCANS usage counter timed out", 17406 poll_cnt)) { 17407 return (1); 17408 } 17409 17410 /* Wait DMAE PF usage counter to zero */ 17411 if (bxe_flr_clnup_poll_hw_counter(sc, 17412 dmae_reg_go_c[INIT_DMAE_C(sc)], 17413 "DMAE dommand register timed out", 17414 poll_cnt)) { 17415 return (1); 17416 } 17417 17418 return (0); 17419 } 17420 17421 #define OP_GEN_PARAM(param) \ 17422 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 17423 #define OP_GEN_TYPE(type) \ 17424 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 17425 #define OP_GEN_AGG_VECT(index) \ 17426 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 17427 17428 static int 17429 bxe_send_final_clnup(struct bxe_softc *sc, 17430 uint8_t clnup_func, 17431 uint32_t poll_cnt) 17432 { 17433 uint32_t op_gen_command = 0; 17434 uint32_t comp_addr = (BAR_CSTRORM_INTMEM + 17435 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); 17436 int ret = 0; 17437 17438 if (REG_RD(sc, comp_addr)) { 17439 BLOGE(sc, "Cleanup complete was not 0 before sending\n"); 17440 return (1); 17441 } 17442 17443 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 17444 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 17445 op_gen_command |= OP_GEN_AGG_VECT(clnup_func); 17446 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 17447 17448 BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n"); 17449 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); 17450 17451 if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { 17452 BLOGE(sc, "FW final cleanup did not succeed\n"); 17453 BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n", 17454 (REG_RD(sc, comp_addr))); 17455 bxe_panic(sc, ("FLR cleanup failed\n")); 17456 return (1); 17457 } 17458 17459 /* Zero completion for nxt FLR */ 17460 REG_WR(sc, comp_addr, 0); 17461 17462 return (ret); 17463 } 17464 17465 static void 17466 bxe_pbf_pN_buf_flushed(struct bxe_softc *sc, 17467 struct pbf_pN_buf_regs *regs, 17468 uint32_t poll_count) 17469 { 17470 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; 17471 uint32_t cur_cnt = poll_count; 17472 17473 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); 17474 crd = crd_start = REG_RD(sc, regs->crd); 17475 init_crd = REG_RD(sc, regs->init_crd); 17476 17477 BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); 17478 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd); 17479 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); 17480 17481 while ((crd != init_crd) && 17482 ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) < 17483 (init_crd - crd_start))) { 17484 if (cur_cnt--) { 17485 DELAY(FLR_WAIT_INTERVAL); 17486 crd = REG_RD(sc, regs->crd); 17487 crd_freed = REG_RD(sc, regs->crd_freed); 17488 } else { 17489 BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN); 17490 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd); 17491 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed); 17492 break; 17493 } 17494 } 17495 17496 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n", 17497 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 17498 } 17499 17500 static void 17501 bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc, 17502 struct pbf_pN_cmd_regs *regs, 17503 uint32_t poll_count) 17504 { 17505 uint32_t occup, to_free, freed, freed_start; 17506 uint32_t cur_cnt = poll_count; 17507 17508 occup = to_free = REG_RD(sc, regs->lines_occup); 17509 freed = freed_start = REG_RD(sc, regs->lines_freed); 17510 17511 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 17512 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 17513 17514 while (occup && 17515 ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) { 17516 if (cur_cnt--) { 17517 DELAY(FLR_WAIT_INTERVAL); 17518 occup = REG_RD(sc, regs->lines_occup); 17519 freed = REG_RD(sc, regs->lines_freed); 17520 } else { 17521 BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN); 17522 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 17523 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 17524 break; 17525 } 17526 } 17527 17528 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n", 17529 poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 17530 } 17531 17532 static void 17533 bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count) 17534 { 17535 struct pbf_pN_cmd_regs cmd_regs[] = { 17536 {0, (CHIP_IS_E3B0(sc)) ? 17537 PBF_REG_TQ_OCCUPANCY_Q0 : 17538 PBF_REG_P0_TQ_OCCUPANCY, 17539 (CHIP_IS_E3B0(sc)) ? 17540 PBF_REG_TQ_LINES_FREED_CNT_Q0 : 17541 PBF_REG_P0_TQ_LINES_FREED_CNT}, 17542 {1, (CHIP_IS_E3B0(sc)) ? 17543 PBF_REG_TQ_OCCUPANCY_Q1 : 17544 PBF_REG_P1_TQ_OCCUPANCY, 17545 (CHIP_IS_E3B0(sc)) ? 17546 PBF_REG_TQ_LINES_FREED_CNT_Q1 : 17547 PBF_REG_P1_TQ_LINES_FREED_CNT}, 17548 {4, (CHIP_IS_E3B0(sc)) ? 17549 PBF_REG_TQ_OCCUPANCY_LB_Q : 17550 PBF_REG_P4_TQ_OCCUPANCY, 17551 (CHIP_IS_E3B0(sc)) ? 17552 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 17553 PBF_REG_P4_TQ_LINES_FREED_CNT} 17554 }; 17555 17556 struct pbf_pN_buf_regs buf_regs[] = { 17557 {0, (CHIP_IS_E3B0(sc)) ? 17558 PBF_REG_INIT_CRD_Q0 : 17559 PBF_REG_P0_INIT_CRD , 17560 (CHIP_IS_E3B0(sc)) ? 17561 PBF_REG_CREDIT_Q0 : 17562 PBF_REG_P0_CREDIT, 17563 (CHIP_IS_E3B0(sc)) ? 17564 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 17565 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 17566 {1, (CHIP_IS_E3B0(sc)) ? 17567 PBF_REG_INIT_CRD_Q1 : 17568 PBF_REG_P1_INIT_CRD, 17569 (CHIP_IS_E3B0(sc)) ? 17570 PBF_REG_CREDIT_Q1 : 17571 PBF_REG_P1_CREDIT, 17572 (CHIP_IS_E3B0(sc)) ? 17573 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 17574 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 17575 {4, (CHIP_IS_E3B0(sc)) ? 17576 PBF_REG_INIT_CRD_LB_Q : 17577 PBF_REG_P4_INIT_CRD, 17578 (CHIP_IS_E3B0(sc)) ? 17579 PBF_REG_CREDIT_LB_Q : 17580 PBF_REG_P4_CREDIT, 17581 (CHIP_IS_E3B0(sc)) ? 17582 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 17583 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 17584 }; 17585 17586 int i; 17587 17588 /* Verify the command queues are flushed P0, P1, P4 */ 17589 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { 17590 bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); 17591 } 17592 17593 /* Verify the transmission buffers are flushed P0, P1, P4 */ 17594 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { 17595 bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); 17596 } 17597 } 17598 17599 static void 17600 bxe_hw_enable_status(struct bxe_softc *sc) 17601 { 17602 uint32_t val; 17603 17604 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); 17605 BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); 17606 17607 val = REG_RD(sc, PBF_REG_DISABLE_PF); 17608 BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val); 17609 17610 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); 17611 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); 17612 17613 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); 17614 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); 17615 17616 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 17617 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); 17618 17619 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 17620 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); 17621 17622 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 17623 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); 17624 17625 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 17626 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val); 17627 } 17628 17629 static int 17630 bxe_pf_flr_clnup(struct bxe_softc *sc) 17631 { 17632 uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc); 17633 17634 BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc)); 17635 17636 /* Re-enable PF target read access */ 17637 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 17638 17639 /* Poll HW usage counters */ 17640 BLOGD(sc, DBG_LOAD, "Polling usage counters\n"); 17641 if (bxe_poll_hw_usage_counters(sc, poll_cnt)) { 17642 return (-1); 17643 } 17644 17645 /* Zero the igu 'trailing edge' and 'leading edge' */ 17646 17647 /* Send the FW cleanup command */ 17648 if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) { 17649 return (-1); 17650 } 17651 17652 /* ATC cleanup */ 17653 17654 /* Verify TX hw is flushed */ 17655 bxe_tx_hw_flushed(sc, poll_cnt); 17656 17657 /* Wait 100ms (not adjusted according to platform) */ 17658 DELAY(100000); 17659 17660 /* Verify no pending pci transactions */ 17661 if (bxe_is_pcie_pending(sc)) { 17662 BLOGE(sc, "PCIE Transactions still pending\n"); 17663 } 17664 17665 /* Debug */ 17666 bxe_hw_enable_status(sc); 17667 17668 /* 17669 * Master enable - Due to WB DMAE writes performed before this 17670 * register is re-initialized as part of the regular function init 17671 */ 17672 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17673 17674 return (0); 17675 } 17676 17677 static int 17678 bxe_init_hw_func(struct bxe_softc *sc) 17679 { 17680 int port = SC_PORT(sc); 17681 int func = SC_FUNC(sc); 17682 int init_phase = PHASE_PF0 + func; 17683 struct ecore_ilt *ilt = sc->ilt; 17684 uint16_t cdu_ilt_start; 17685 uint32_t addr, val; 17686 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; 17687 int i, main_mem_width, rc; 17688 17689 BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func); 17690 17691 /* FLR cleanup */ 17692 if (!CHIP_IS_E1x(sc)) { 17693 rc = bxe_pf_flr_clnup(sc); 17694 if (rc) { 17695 BLOGE(sc, "FLR cleanup failed!\n"); 17696 // XXX bxe_fw_dump(sc); 17697 // XXX bxe_idle_chk(sc); 17698 return (rc); 17699 } 17700 } 17701 17702 /* set MSI reconfigure capability */ 17703 if (sc->devinfo.int_block == INT_BLOCK_HC) { 17704 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 17705 val = REG_RD(sc, addr); 17706 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 17707 REG_WR(sc, addr, val); 17708 } 17709 17710 ecore_init_block(sc, BLOCK_PXP, init_phase); 17711 ecore_init_block(sc, BLOCK_PXP2, init_phase); 17712 17713 ilt = sc->ilt; 17714 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 17715 17716 for (i = 0; i < L2_ILT_LINES(sc); i++) { 17717 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; 17718 ilt->lines[cdu_ilt_start + i].page_mapping = 17719 sc->context[i].vcxt_dma.paddr; 17720 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; 17721 } 17722 ecore_ilt_init_op(sc, INITOP_SET); 17723 17724 /* Set NIC mode */ 17725 REG_WR(sc, PRS_REG_NIC_MODE, 1); 17726 BLOGD(sc, DBG_LOAD, "NIC MODE configured\n"); 17727 17728 if (!CHIP_IS_E1x(sc)) { 17729 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; 17730 17731 /* Turn on a single ISR mode in IGU if driver is going to use 17732 * INT#x or MSI 17733 */ 17734 if (sc->interrupt_mode != INTR_MODE_MSIX) { 17735 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 17736 } 17737 17738 /* 17739 * Timers workaround bug: function init part. 17740 * Need to wait 20msec after initializing ILT, 17741 * needed to make sure there are no requests in 17742 * one of the PXP internal queues with "old" ILT addresses 17743 */ 17744 DELAY(20000); 17745 17746 /* 17747 * Master enable - Due to WB DMAE writes performed before this 17748 * register is re-initialized as part of the regular function 17749 * init 17750 */ 17751 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17752 /* Enable the function in IGU */ 17753 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); 17754 } 17755 17756 sc->dmae_ready = 1; 17757 17758 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 17759 17760 if (!CHIP_IS_E1x(sc)) 17761 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); 17762 17763 ecore_init_block(sc, BLOCK_ATC, init_phase); 17764 ecore_init_block(sc, BLOCK_DMAE, init_phase); 17765 ecore_init_block(sc, BLOCK_NIG, init_phase); 17766 ecore_init_block(sc, BLOCK_SRC, init_phase); 17767 ecore_init_block(sc, BLOCK_MISC, init_phase); 17768 ecore_init_block(sc, BLOCK_TCM, init_phase); 17769 ecore_init_block(sc, BLOCK_UCM, init_phase); 17770 ecore_init_block(sc, BLOCK_CCM, init_phase); 17771 ecore_init_block(sc, BLOCK_XCM, init_phase); 17772 ecore_init_block(sc, BLOCK_TSEM, init_phase); 17773 ecore_init_block(sc, BLOCK_USEM, init_phase); 17774 ecore_init_block(sc, BLOCK_CSEM, init_phase); 17775 ecore_init_block(sc, BLOCK_XSEM, init_phase); 17776 17777 if (!CHIP_IS_E1x(sc)) 17778 REG_WR(sc, QM_REG_PF_EN, 1); 17779 17780 if (!CHIP_IS_E1x(sc)) { 17781 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 17782 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 17783 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 17784 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 17785 } 17786 ecore_init_block(sc, BLOCK_QM, init_phase); 17787 17788 ecore_init_block(sc, BLOCK_TM, init_phase); 17789 ecore_init_block(sc, BLOCK_DORQ, init_phase); 17790 17791 bxe_iov_init_dq(sc); 17792 17793 ecore_init_block(sc, BLOCK_BRB1, init_phase); 17794 ecore_init_block(sc, BLOCK_PRS, init_phase); 17795 ecore_init_block(sc, BLOCK_TSDM, init_phase); 17796 ecore_init_block(sc, BLOCK_CSDM, init_phase); 17797 ecore_init_block(sc, BLOCK_USDM, init_phase); 17798 ecore_init_block(sc, BLOCK_XSDM, init_phase); 17799 ecore_init_block(sc, BLOCK_UPB, init_phase); 17800 ecore_init_block(sc, BLOCK_XPB, init_phase); 17801 ecore_init_block(sc, BLOCK_PBF, init_phase); 17802 if (!CHIP_IS_E1x(sc)) 17803 REG_WR(sc, PBF_REG_DISABLE_PF, 0); 17804 17805 ecore_init_block(sc, BLOCK_CDU, init_phase); 17806 17807 ecore_init_block(sc, BLOCK_CFC, init_phase); 17808 17809 if (!CHIP_IS_E1x(sc)) 17810 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); 17811 17812 if (IS_MF(sc)) { 17813 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); 17814 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc)); 17815 } 17816 17817 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 17818 17819 /* HC init per function */ 17820 if (sc->devinfo.int_block == INT_BLOCK_HC) { 17821 if (CHIP_IS_E1H(sc)) { 17822 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 17823 17824 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 17825 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 17826 } 17827 ecore_init_block(sc, BLOCK_HC, init_phase); 17828 17829 } else { 17830 int num_segs, sb_idx, prod_offset; 17831 17832 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 17833 17834 if (!CHIP_IS_E1x(sc)) { 17835 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 17836 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 17837 } 17838 17839 ecore_init_block(sc, BLOCK_IGU, init_phase); 17840 17841 if (!CHIP_IS_E1x(sc)) { 17842 int dsb_idx = 0; 17843 /** 17844 * Producer memory: 17845 * E2 mode: address 0-135 match to the mapping memory; 17846 * 136 - PF0 default prod; 137 - PF1 default prod; 17847 * 138 - PF2 default prod; 139 - PF3 default prod; 17848 * 140 - PF0 attn prod; 141 - PF1 attn prod; 17849 * 142 - PF2 attn prod; 143 - PF3 attn prod; 17850 * 144-147 reserved. 17851 * 17852 * E1.5 mode - In backward compatible mode; 17853 * for non default SB; each even line in the memory 17854 * holds the U producer and each odd line hold 17855 * the C producer. The first 128 producers are for 17856 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 17857 * producers are for the DSB for each PF. 17858 * Each PF has five segments: (the order inside each 17859 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 17860 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 17861 * 144-147 attn prods; 17862 */ 17863 /* non-default-status-blocks */ 17864 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 17865 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 17866 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { 17867 prod_offset = (sc->igu_base_sb + sb_idx) * 17868 num_segs; 17869 17870 for (i = 0; i < num_segs; i++) { 17871 addr = IGU_REG_PROD_CONS_MEMORY + 17872 (prod_offset + i) * 4; 17873 REG_WR(sc, addr, 0); 17874 } 17875 /* send consumer update with value 0 */ 17876 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx, 17877 USTORM_ID, 0, IGU_INT_NOP, 1); 17878 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); 17879 } 17880 17881 /* default-status-blocks */ 17882 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 17883 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 17884 17885 if (CHIP_IS_MODE_4_PORT(sc)) 17886 dsb_idx = SC_FUNC(sc); 17887 else 17888 dsb_idx = SC_VN(sc); 17889 17890 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? 17891 IGU_BC_BASE_DSB_PROD + dsb_idx : 17892 IGU_NORM_BASE_DSB_PROD + dsb_idx); 17893 17894 /* 17895 * igu prods come in chunks of E1HVN_MAX (4) - 17896 * does not matters what is the current chip mode 17897 */ 17898 for (i = 0; i < (num_segs * E1HVN_MAX); 17899 i += E1HVN_MAX) { 17900 addr = IGU_REG_PROD_CONS_MEMORY + 17901 (prod_offset + i)*4; 17902 REG_WR(sc, addr, 0); 17903 } 17904 /* send consumer update with 0 */ 17905 if (CHIP_INT_MODE_IS_BC(sc)) { 17906 bxe_ack_sb(sc, sc->igu_dsb_id, 17907 USTORM_ID, 0, IGU_INT_NOP, 1); 17908 bxe_ack_sb(sc, sc->igu_dsb_id, 17909 CSTORM_ID, 0, IGU_INT_NOP, 1); 17910 bxe_ack_sb(sc, sc->igu_dsb_id, 17911 XSTORM_ID, 0, IGU_INT_NOP, 1); 17912 bxe_ack_sb(sc, sc->igu_dsb_id, 17913 TSTORM_ID, 0, IGU_INT_NOP, 1); 17914 bxe_ack_sb(sc, sc->igu_dsb_id, 17915 ATTENTION_ID, 0, IGU_INT_NOP, 1); 17916 } else { 17917 bxe_ack_sb(sc, sc->igu_dsb_id, 17918 USTORM_ID, 0, IGU_INT_NOP, 1); 17919 bxe_ack_sb(sc, sc->igu_dsb_id, 17920 ATTENTION_ID, 0, IGU_INT_NOP, 1); 17921 } 17922 bxe_igu_clear_sb(sc, sc->igu_dsb_id); 17923 17924 /* !!! these should become driver const once 17925 rf-tool supports split-68 const */ 17926 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 17927 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 17928 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); 17929 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); 17930 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); 17931 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); 17932 } 17933 } 17934 17935 /* Reset PCIE errors for debug */ 17936 REG_WR(sc, 0x2114, 0xffffffff); 17937 REG_WR(sc, 0x2120, 0xffffffff); 17938 17939 if (CHIP_IS_E1x(sc)) { 17940 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ 17941 main_mem_base = HC_REG_MAIN_MEMORY + 17942 SC_PORT(sc) * (main_mem_size * 4); 17943 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 17944 main_mem_width = 8; 17945 17946 val = REG_RD(sc, main_mem_prty_clr); 17947 if (val) { 17948 BLOGD(sc, DBG_LOAD, 17949 "Parity errors in HC block during function init (0x%x)!\n", 17950 val); 17951 } 17952 17953 /* Clear "false" parity errors in MSI-X table */ 17954 for (i = main_mem_base; 17955 i < main_mem_base + main_mem_size * 4; 17956 i += main_mem_width) { 17957 bxe_read_dmae(sc, i, main_mem_width / 4); 17958 bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data), 17959 i, main_mem_width / 4); 17960 } 17961 /* Clear HC parity attention */ 17962 REG_RD(sc, main_mem_prty_clr); 17963 } 17964 17965 #if 1 17966 /* Enable STORMs SP logging */ 17967 REG_WR8(sc, BAR_USTRORM_INTMEM + 17968 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 17969 REG_WR8(sc, BAR_TSTRORM_INTMEM + 17970 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 17971 REG_WR8(sc, BAR_CSTRORM_INTMEM + 17972 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 17973 REG_WR8(sc, BAR_XSTRORM_INTMEM + 17974 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 17975 #endif 17976 17977 elink_phy_probe(&sc->link_params); 17978 17979 return (0); 17980 } 17981 17982 static void 17983 bxe_link_reset(struct bxe_softc *sc) 17984 { 17985 if (!BXE_NOMCP(sc)) { 17986 bxe_acquire_phy_lock(sc); 17987 elink_lfa_reset(&sc->link_params, &sc->link_vars); 17988 bxe_release_phy_lock(sc); 17989 } else { 17990 if (!CHIP_REV_IS_SLOW(sc)) { 17991 BLOGW(sc, "Bootcode is missing - cannot reset link\n"); 17992 } 17993 } 17994 } 17995 17996 static void 17997 bxe_reset_port(struct bxe_softc *sc) 17998 { 17999 int port = SC_PORT(sc); 18000 uint32_t val; 18001 18002 /* reset physical Link */ 18003 bxe_link_reset(sc); 18004 18005 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 18006 18007 /* Do not rcv packets to BRB */ 18008 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); 18009 /* Do not direct rcv packets that are not for MCP to the BRB */ 18010 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 18011 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 18012 18013 /* Configure AEU */ 18014 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); 18015 18016 DELAY(100000); 18017 18018 /* Check for BRB port occupancy */ 18019 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 18020 if (val) { 18021 BLOGD(sc, DBG_LOAD, 18022 "BRB1 is not empty, %d blocks are occupied\n", val); 18023 } 18024 18025 /* TODO: Close Doorbell port? */ 18026 } 18027 18028 static void 18029 bxe_ilt_wr(struct bxe_softc *sc, 18030 uint32_t index, 18031 bus_addr_t addr) 18032 { 18033 int reg; 18034 uint32_t wb_write[2]; 18035 18036 if (CHIP_IS_E1(sc)) { 18037 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 18038 } else { 18039 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 18040 } 18041 18042 wb_write[0] = ONCHIP_ADDR1(addr); 18043 wb_write[1] = ONCHIP_ADDR2(addr); 18044 REG_WR_DMAE(sc, reg, wb_write, 2); 18045 } 18046 18047 static void 18048 bxe_clear_func_ilt(struct bxe_softc *sc, 18049 uint32_t func) 18050 { 18051 uint32_t i, base = FUNC_ILT_BASE(func); 18052 for (i = base; i < base + ILT_PER_FUNC; i++) { 18053 bxe_ilt_wr(sc, i, 0); 18054 } 18055 } 18056 18057 static void 18058 bxe_reset_func(struct bxe_softc *sc) 18059 { 18060 struct bxe_fastpath *fp; 18061 int port = SC_PORT(sc); 18062 int func = SC_FUNC(sc); 18063 int i; 18064 18065 /* Disable the function in the FW */ 18066 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 18067 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 18068 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 18069 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 18070 18071 /* FP SBs */ 18072 FOR_EACH_ETH_QUEUE(sc, i) { 18073 fp = &sc->fp[i]; 18074 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18075 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 18076 SB_DISABLED); 18077 } 18078 18079 /* SP SB */ 18080 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18081 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 18082 SB_DISABLED); 18083 18084 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { 18085 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0); 18086 } 18087 18088 /* Configure IGU */ 18089 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18090 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 18091 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 18092 } else { 18093 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 18094 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 18095 } 18096 18097 if (CNIC_LOADED(sc)) { 18098 /* Disable Timer scan */ 18099 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 18100 /* 18101 * Wait for at least 10ms and up to 2 second for the timers 18102 * scan to complete 18103 */ 18104 for (i = 0; i < 200; i++) { 18105 DELAY(10000); 18106 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4)) 18107 break; 18108 } 18109 } 18110 18111 /* Clear ILT */ 18112 bxe_clear_func_ilt(sc, func); 18113 18114 /* 18115 * Timers workaround bug for E2: if this is vnic-3, 18116 * we need to set the entire ilt range for this timers. 18117 */ 18118 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { 18119 struct ilt_client_info ilt_cli; 18120 /* use dummy TM client */ 18121 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 18122 ilt_cli.start = 0; 18123 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 18124 ilt_cli.client_num = ILT_CLIENT_TM; 18125 18126 ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR); 18127 } 18128 18129 /* this assumes that reset_port() called before reset_func()*/ 18130 if (!CHIP_IS_E1x(sc)) { 18131 bxe_pf_disable(sc); 18132 } 18133 18134 sc->dmae_ready = 0; 18135 } 18136 18137 static int 18138 bxe_gunzip_init(struct bxe_softc *sc) 18139 { 18140 return (0); 18141 } 18142 18143 static void 18144 bxe_gunzip_end(struct bxe_softc *sc) 18145 { 18146 return; 18147 } 18148 18149 static int 18150 bxe_init_firmware(struct bxe_softc *sc) 18151 { 18152 if (CHIP_IS_E1(sc)) { 18153 ecore_init_e1_firmware(sc); 18154 sc->iro_array = e1_iro_arr; 18155 } else if (CHIP_IS_E1H(sc)) { 18156 ecore_init_e1h_firmware(sc); 18157 sc->iro_array = e1h_iro_arr; 18158 } else if (!CHIP_IS_E1x(sc)) { 18159 ecore_init_e2_firmware(sc); 18160 sc->iro_array = e2_iro_arr; 18161 } else { 18162 BLOGE(sc, "Unsupported chip revision\n"); 18163 return (-1); 18164 } 18165 18166 return (0); 18167 } 18168 18169 static void 18170 bxe_release_firmware(struct bxe_softc *sc) 18171 { 18172 /* Do nothing */ 18173 return; 18174 } 18175 18176 static int 18177 ecore_gunzip(struct bxe_softc *sc, 18178 const uint8_t *zbuf, 18179 int len) 18180 { 18181 /* XXX : Implement... */ 18182 BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n"); 18183 return (FALSE); 18184 } 18185 18186 static void 18187 ecore_reg_wr_ind(struct bxe_softc *sc, 18188 uint32_t addr, 18189 uint32_t val) 18190 { 18191 bxe_reg_wr_ind(sc, addr, val); 18192 } 18193 18194 static void 18195 ecore_write_dmae_phys_len(struct bxe_softc *sc, 18196 bus_addr_t phys_addr, 18197 uint32_t addr, 18198 uint32_t len) 18199 { 18200 bxe_write_dmae_phys_len(sc, phys_addr, addr, len); 18201 } 18202 18203 void 18204 ecore_storm_memset_struct(struct bxe_softc *sc, 18205 uint32_t addr, 18206 size_t size, 18207 uint32_t *data) 18208 { 18209 uint8_t i; 18210 for (i = 0; i < size/4; i++) { 18211 REG_WR(sc, addr + (i * 4), data[i]); 18212 } 18213 } 18214 18215 18216 /* 18217 * character device - ioctl interface definitions 18218 */ 18219 18220 18221 #include "bxe_dump.h" 18222 #include "bxe_ioctl.h" 18223 #include <sys/conf.h> 18224 18225 static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, 18226 struct thread *td); 18227 18228 static struct cdevsw bxe_cdevsw = { 18229 .d_version = D_VERSION, 18230 .d_ioctl = bxe_eioctl, 18231 .d_name = "bxecnic", 18232 }; 18233 18234 #define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1)) 18235 18236 18237 #define DUMP_ALL_PRESETS 0x1FFF 18238 #define DUMP_MAX_PRESETS 13 18239 #define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1) 18240 #define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H) 18241 #define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2) 18242 #define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0) 18243 #define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0) 18244 18245 #define IS_REG_IN_PRESET(presets, idx) \ 18246 ((presets & (1 << (idx-1))) == (1 << (idx-1))) 18247 18248 18249 static int 18250 bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset) 18251 { 18252 if (CHIP_IS_E1(sc)) 18253 return dump_num_registers[0][preset-1]; 18254 else if (CHIP_IS_E1H(sc)) 18255 return dump_num_registers[1][preset-1]; 18256 else if (CHIP_IS_E2(sc)) 18257 return dump_num_registers[2][preset-1]; 18258 else if (CHIP_IS_E3A0(sc)) 18259 return dump_num_registers[3][preset-1]; 18260 else if (CHIP_IS_E3B0(sc)) 18261 return dump_num_registers[4][preset-1]; 18262 else 18263 return 0; 18264 } 18265 18266 static int 18267 bxe_get_total_regs_len32(struct bxe_softc *sc) 18268 { 18269 uint32_t preset_idx; 18270 int regdump_len32 = 0; 18271 18272 18273 /* Calculate the total preset regs length */ 18274 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { 18275 regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx); 18276 } 18277 18278 return regdump_len32; 18279 } 18280 18281 static const uint32_t * 18282 __bxe_get_page_addr_ar(struct bxe_softc *sc) 18283 { 18284 if (CHIP_IS_E2(sc)) 18285 return page_vals_e2; 18286 else if (CHIP_IS_E3(sc)) 18287 return page_vals_e3; 18288 else 18289 return NULL; 18290 } 18291 18292 static uint32_t 18293 __bxe_get_page_reg_num(struct bxe_softc *sc) 18294 { 18295 if (CHIP_IS_E2(sc)) 18296 return PAGE_MODE_VALUES_E2; 18297 else if (CHIP_IS_E3(sc)) 18298 return PAGE_MODE_VALUES_E3; 18299 else 18300 return 0; 18301 } 18302 18303 static const uint32_t * 18304 __bxe_get_page_write_ar(struct bxe_softc *sc) 18305 { 18306 if (CHIP_IS_E2(sc)) 18307 return page_write_regs_e2; 18308 else if (CHIP_IS_E3(sc)) 18309 return page_write_regs_e3; 18310 else 18311 return NULL; 18312 } 18313 18314 static uint32_t 18315 __bxe_get_page_write_num(struct bxe_softc *sc) 18316 { 18317 if (CHIP_IS_E2(sc)) 18318 return PAGE_WRITE_REGS_E2; 18319 else if (CHIP_IS_E3(sc)) 18320 return PAGE_WRITE_REGS_E3; 18321 else 18322 return 0; 18323 } 18324 18325 static const struct reg_addr * 18326 __bxe_get_page_read_ar(struct bxe_softc *sc) 18327 { 18328 if (CHIP_IS_E2(sc)) 18329 return page_read_regs_e2; 18330 else if (CHIP_IS_E3(sc)) 18331 return page_read_regs_e3; 18332 else 18333 return NULL; 18334 } 18335 18336 static uint32_t 18337 __bxe_get_page_read_num(struct bxe_softc *sc) 18338 { 18339 if (CHIP_IS_E2(sc)) 18340 return PAGE_READ_REGS_E2; 18341 else if (CHIP_IS_E3(sc)) 18342 return PAGE_READ_REGS_E3; 18343 else 18344 return 0; 18345 } 18346 18347 static bool 18348 bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info) 18349 { 18350 if (CHIP_IS_E1(sc)) 18351 return IS_E1_REG(reg_info->chips); 18352 else if (CHIP_IS_E1H(sc)) 18353 return IS_E1H_REG(reg_info->chips); 18354 else if (CHIP_IS_E2(sc)) 18355 return IS_E2_REG(reg_info->chips); 18356 else if (CHIP_IS_E3A0(sc)) 18357 return IS_E3A0_REG(reg_info->chips); 18358 else if (CHIP_IS_E3B0(sc)) 18359 return IS_E3B0_REG(reg_info->chips); 18360 else 18361 return 0; 18362 } 18363 18364 static bool 18365 bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info) 18366 { 18367 if (CHIP_IS_E1(sc)) 18368 return IS_E1_REG(wreg_info->chips); 18369 else if (CHIP_IS_E1H(sc)) 18370 return IS_E1H_REG(wreg_info->chips); 18371 else if (CHIP_IS_E2(sc)) 18372 return IS_E2_REG(wreg_info->chips); 18373 else if (CHIP_IS_E3A0(sc)) 18374 return IS_E3A0_REG(wreg_info->chips); 18375 else if (CHIP_IS_E3B0(sc)) 18376 return IS_E3B0_REG(wreg_info->chips); 18377 else 18378 return 0; 18379 } 18380 18381 /** 18382 * bxe_read_pages_regs - read "paged" registers 18383 * 18384 * @bp device handle 18385 * @p output buffer 18386 * 18387 * Reads "paged" memories: memories that may only be read by first writing to a 18388 * specific address ("write address") and then reading from a specific address 18389 * ("read address"). There may be more than one write address per "page" and 18390 * more than one read address per write address. 18391 */ 18392 static void 18393 bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) 18394 { 18395 uint32_t i, j, k, n; 18396 18397 /* addresses of the paged registers */ 18398 const uint32_t *page_addr = __bxe_get_page_addr_ar(sc); 18399 /* number of paged registers */ 18400 int num_pages = __bxe_get_page_reg_num(sc); 18401 /* write addresses */ 18402 const uint32_t *write_addr = __bxe_get_page_write_ar(sc); 18403 /* number of write addresses */ 18404 int write_num = __bxe_get_page_write_num(sc); 18405 /* read addresses info */ 18406 const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc); 18407 /* number of read addresses */ 18408 int read_num = __bxe_get_page_read_num(sc); 18409 uint32_t addr, size; 18410 18411 for (i = 0; i < num_pages; i++) { 18412 for (j = 0; j < write_num; j++) { 18413 REG_WR(sc, write_addr[j], page_addr[i]); 18414 18415 for (k = 0; k < read_num; k++) { 18416 if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) { 18417 size = read_addr[k].size; 18418 for (n = 0; n < size; n++) { 18419 addr = read_addr[k].addr + n*4; 18420 *p++ = REG_RD(sc, addr); 18421 } 18422 } 18423 } 18424 } 18425 } 18426 return; 18427 } 18428 18429 18430 static int 18431 bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) 18432 { 18433 uint32_t i, j, addr; 18434 const struct wreg_addr *wreg_addr_p = NULL; 18435 18436 if (CHIP_IS_E1(sc)) 18437 wreg_addr_p = &wreg_addr_e1; 18438 else if (CHIP_IS_E1H(sc)) 18439 wreg_addr_p = &wreg_addr_e1h; 18440 else if (CHIP_IS_E2(sc)) 18441 wreg_addr_p = &wreg_addr_e2; 18442 else if (CHIP_IS_E3A0(sc)) 18443 wreg_addr_p = &wreg_addr_e3; 18444 else if (CHIP_IS_E3B0(sc)) 18445 wreg_addr_p = &wreg_addr_e3b0; 18446 else 18447 return (-1); 18448 18449 /* Read the idle_chk registers */ 18450 for (i = 0; i < IDLE_REGS_COUNT; i++) { 18451 if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) && 18452 IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) { 18453 for (j = 0; j < idle_reg_addrs[i].size; j++) 18454 *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4); 18455 } 18456 } 18457 18458 /* Read the regular registers */ 18459 for (i = 0; i < REGS_COUNT; i++) { 18460 if (bxe_is_reg_in_chip(sc, ®_addrs[i]) && 18461 IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) { 18462 for (j = 0; j < reg_addrs[i].size; j++) 18463 *p++ = REG_RD(sc, reg_addrs[i].addr + j*4); 18464 } 18465 } 18466 18467 /* Read the CAM registers */ 18468 if (bxe_is_wreg_in_chip(sc, wreg_addr_p) && 18469 IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) { 18470 for (i = 0; i < wreg_addr_p->size; i++) { 18471 *p++ = REG_RD(sc, wreg_addr_p->addr + i*4); 18472 18473 /* In case of wreg_addr register, read additional 18474 registers from read_regs array 18475 */ 18476 for (j = 0; j < wreg_addr_p->read_regs_count; j++) { 18477 addr = *(wreg_addr_p->read_regs); 18478 *p++ = REG_RD(sc, addr + j*4); 18479 } 18480 } 18481 } 18482 18483 /* Paged registers are supported in E2 & E3 only */ 18484 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { 18485 /* Read "paged" registers */ 18486 bxe_read_pages_regs(sc, p, preset); 18487 } 18488 18489 return 0; 18490 } 18491 18492 int 18493 bxe_grc_dump(struct bxe_softc *sc) 18494 { 18495 int rval = 0; 18496 uint32_t preset_idx; 18497 uint8_t *buf; 18498 uint32_t size; 18499 struct dump_header *d_hdr; 18500 uint32_t i; 18501 uint32_t reg_val; 18502 uint32_t reg_addr; 18503 uint32_t cmd_offset; 18504 int context_size; 18505 int allocated; 18506 struct ecore_ilt *ilt = SC_ILT(sc); 18507 struct bxe_fastpath *fp; 18508 struct ilt_client_info *ilt_cli; 18509 int grc_dump_size; 18510 18511 18512 if (sc->grcdump_done || sc->grcdump_started) 18513 return (rval); 18514 18515 sc->grcdump_started = 1; 18516 BLOGI(sc, "Started collecting grcdump\n"); 18517 18518 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + 18519 sizeof(struct dump_header); 18520 18521 sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT); 18522 18523 if (sc->grc_dump == NULL) { 18524 BLOGW(sc, "Unable to allocate memory for grcdump collection\n"); 18525 return(ENOMEM); 18526 } 18527 18528 18529 18530 /* Disable parity attentions as long as following dump may 18531 * cause false alarms by reading never written registers. We 18532 * will re-enable parity attentions right after the dump. 18533 */ 18534 18535 /* Disable parity on path 0 */ 18536 bxe_pretend_func(sc, 0); 18537 18538 ecore_disable_blocks_parity(sc); 18539 18540 /* Disable parity on path 1 */ 18541 bxe_pretend_func(sc, 1); 18542 ecore_disable_blocks_parity(sc); 18543 18544 /* Return to current function */ 18545 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 18546 18547 buf = sc->grc_dump; 18548 d_hdr = sc->grc_dump; 18549 18550 d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1; 18551 d_hdr->version = BNX2X_DUMP_VERSION; 18552 d_hdr->preset = DUMP_ALL_PRESETS; 18553 18554 if (CHIP_IS_E1(sc)) { 18555 d_hdr->dump_meta_data = DUMP_CHIP_E1; 18556 } else if (CHIP_IS_E1H(sc)) { 18557 d_hdr->dump_meta_data = DUMP_CHIP_E1H; 18558 } else if (CHIP_IS_E2(sc)) { 18559 d_hdr->dump_meta_data = DUMP_CHIP_E2 | 18560 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); 18561 } else if (CHIP_IS_E3A0(sc)) { 18562 d_hdr->dump_meta_data = DUMP_CHIP_E3A0 | 18563 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); 18564 } else if (CHIP_IS_E3B0(sc)) { 18565 d_hdr->dump_meta_data = DUMP_CHIP_E3B0 | 18566 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); 18567 } 18568 18569 buf += sizeof(struct dump_header); 18570 18571 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { 18572 18573 /* Skip presets with IOR */ 18574 if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) || 18575 (preset_idx == 11)) 18576 continue; 18577 18578 rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx); 18579 18580 if (rval) 18581 break; 18582 18583 size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t)); 18584 18585 buf += size; 18586 } 18587 18588 bxe_pretend_func(sc, 0); 18589 ecore_clear_blocks_parity(sc); 18590 ecore_enable_blocks_parity(sc); 18591 18592 bxe_pretend_func(sc, 1); 18593 ecore_clear_blocks_parity(sc); 18594 ecore_enable_blocks_parity(sc); 18595 18596 /* Return to current function */ 18597 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 18598 18599 18600 context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc)); 18601 for (i = 0, allocated = 0; allocated < context_size; i++) { 18602 18603 BLOGI(sc, "cdu_context i %d paddr %#jx vaddr %p size 0x%zx\n", i, 18604 (uintmax_t)sc->context[i].vcxt_dma.paddr, 18605 sc->context[i].vcxt_dma.vaddr, 18606 sc->context[i].size); 18607 allocated += sc->context[i].size; 18608 } 18609 BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n", 18610 (uintmax_t)sc->fw_stats_req_mapping, 18611 (uintmax_t)sc->fw_stats_data_mapping, 18612 sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size)); 18613 BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n", 18614 (void *)sc->def_sb_dma.paddr, sc->def_sb, 18615 sizeof(struct host_sp_status_block)); 18616 BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n", 18617 (uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE); 18618 BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n", 18619 (uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr, 18620 sizeof(struct bxe_slowpath)); 18621 BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n", 18622 (uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE); 18623 BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n", 18624 (uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr, 18625 FW_BUF_SIZE); 18626 for (i = 0; i < sc->num_queues; i++) { 18627 fp = &sc->fp[i]; 18628 BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i, 18629 (uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr, 18630 sizeof(union bxe_host_hc_status_block)); 18631 BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, 18632 (uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr, 18633 (BCM_PAGE_SIZE * TX_BD_NUM_PAGES)); 18634 BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, 18635 (uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr, 18636 (BCM_PAGE_SIZE * RX_BD_NUM_PAGES)); 18637 BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i, 18638 (uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr, 18639 (BCM_PAGE_SIZE * RCQ_NUM_PAGES)); 18640 BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, 18641 (uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr, 18642 (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES)); 18643 } 18644 18645 ilt_cli = &ilt->clients[1]; 18646 for (i = ilt_cli->start; i <= ilt_cli->end; i++) { 18647 BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n", 18648 (uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr), 18649 ((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE); 18650 } 18651 18652 18653 cmd_offset = DMAE_REG_CMD_MEM; 18654 for (i = 0; i < 224; i++) { 18655 reg_addr = (cmd_offset +(i * 4)); 18656 reg_val = REG_RD(sc, reg_addr); 18657 BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i, 18658 reg_addr, reg_val); 18659 } 18660 18661 18662 BLOGI(sc, "Collection of grcdump done\n"); 18663 sc->grcdump_done = 1; 18664 return(rval); 18665 } 18666 18667 static int 18668 bxe_add_cdev(struct bxe_softc *sc) 18669 { 18670 sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT); 18671 18672 if (sc->eeprom == NULL) { 18673 BLOGW(sc, "Unable to alloc for eeprom size buffer\n"); 18674 return (-1); 18675 } 18676 18677 sc->ioctl_dev = make_dev(&bxe_cdevsw, 18678 sc->ifp->if_dunit, 18679 UID_ROOT, 18680 GID_WHEEL, 18681 0600, 18682 "%s", 18683 if_name(sc->ifp)); 18684 18685 if (sc->ioctl_dev == NULL) { 18686 free(sc->eeprom, M_DEVBUF); 18687 sc->eeprom = NULL; 18688 return (-1); 18689 } 18690 18691 sc->ioctl_dev->si_drv1 = sc; 18692 18693 return (0); 18694 } 18695 18696 static void 18697 bxe_del_cdev(struct bxe_softc *sc) 18698 { 18699 if (sc->ioctl_dev != NULL) 18700 destroy_dev(sc->ioctl_dev); 18701 18702 if (sc->eeprom != NULL) { 18703 free(sc->eeprom, M_DEVBUF); 18704 sc->eeprom = NULL; 18705 } 18706 sc->ioctl_dev = NULL; 18707 18708 return; 18709 } 18710 18711 static bool bxe_is_nvram_accessible(struct bxe_softc *sc) 18712 { 18713 18714 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) 18715 return FALSE; 18716 18717 return TRUE; 18718 } 18719 18720 18721 static int 18722 bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len) 18723 { 18724 int rval = 0; 18725 18726 if(!bxe_is_nvram_accessible(sc)) { 18727 BLOGW(sc, "Cannot access eeprom when interface is down\n"); 18728 return (-EAGAIN); 18729 } 18730 rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len); 18731 18732 18733 return (rval); 18734 } 18735 18736 static int 18737 bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len) 18738 { 18739 int rval = 0; 18740 18741 if(!bxe_is_nvram_accessible(sc)) { 18742 BLOGW(sc, "Cannot access eeprom when interface is down\n"); 18743 return (-EAGAIN); 18744 } 18745 rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len); 18746 18747 return (rval); 18748 } 18749 18750 static int 18751 bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom) 18752 { 18753 int rval = 0; 18754 18755 switch (eeprom->eeprom_cmd) { 18756 18757 case BXE_EEPROM_CMD_SET_EEPROM: 18758 18759 rval = copyin(eeprom->eeprom_data, sc->eeprom, 18760 eeprom->eeprom_data_len); 18761 18762 if (rval) 18763 break; 18764 18765 rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset, 18766 eeprom->eeprom_data_len); 18767 break; 18768 18769 case BXE_EEPROM_CMD_GET_EEPROM: 18770 18771 rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset, 18772 eeprom->eeprom_data_len); 18773 18774 if (rval) { 18775 break; 18776 } 18777 18778 rval = copyout(sc->eeprom, eeprom->eeprom_data, 18779 eeprom->eeprom_data_len); 18780 break; 18781 18782 default: 18783 rval = EINVAL; 18784 break; 18785 } 18786 18787 if (rval) { 18788 BLOGW(sc, "ioctl cmd %d failed rval %d\n", eeprom->eeprom_cmd, rval); 18789 } 18790 18791 return (rval); 18792 } 18793 18794 static int 18795 bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p) 18796 { 18797 uint32_t ext_phy_config; 18798 int port = SC_PORT(sc); 18799 int cfg_idx = bxe_get_link_cfg_idx(sc); 18800 18801 dev_p->supported = sc->port.supported[cfg_idx] | 18802 (sc->port.supported[cfg_idx ^ 1] & 18803 (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE)); 18804 dev_p->advertising = sc->port.advertising[cfg_idx]; 18805 if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type == 18806 ELINK_ETH_PHY_SFP_1G_FIBER) { 18807 dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full); 18808 dev_p->advertising &= ~(ADVERTISED_10000baseT_Full); 18809 } 18810 if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up && 18811 !(sc->flags & BXE_MF_FUNC_DIS)) { 18812 dev_p->duplex = sc->link_vars.duplex; 18813 if (IS_MF(sc) && !BXE_NOMCP(sc)) 18814 dev_p->speed = bxe_get_mf_speed(sc); 18815 else 18816 dev_p->speed = sc->link_vars.line_speed; 18817 } else { 18818 dev_p->duplex = DUPLEX_UNKNOWN; 18819 dev_p->speed = SPEED_UNKNOWN; 18820 } 18821 18822 dev_p->port = bxe_media_detect(sc); 18823 18824 ext_phy_config = SHMEM_RD(sc, 18825 dev_info.port_hw_config[port].external_phy_config); 18826 if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) == 18827 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 18828 dev_p->phy_address = sc->port.phy_addr; 18829 else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) != 18830 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && 18831 ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) != 18832 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 18833 dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config); 18834 else 18835 dev_p->phy_address = 0; 18836 18837 if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) 18838 dev_p->autoneg = AUTONEG_ENABLE; 18839 else 18840 dev_p->autoneg = AUTONEG_DISABLE; 18841 18842 18843 return 0; 18844 } 18845 18846 static int 18847 bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, 18848 struct thread *td) 18849 { 18850 struct bxe_softc *sc; 18851 int rval = 0; 18852 device_t pci_dev; 18853 bxe_grcdump_t *dump = NULL; 18854 int grc_dump_size; 18855 bxe_drvinfo_t *drv_infop = NULL; 18856 bxe_dev_setting_t *dev_p; 18857 bxe_dev_setting_t dev_set; 18858 bxe_get_regs_t *reg_p; 18859 bxe_reg_rdw_t *reg_rdw_p; 18860 bxe_pcicfg_rdw_t *cfg_rdw_p; 18861 bxe_perm_mac_addr_t *mac_addr_p; 18862 18863 18864 if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL) 18865 return ENXIO; 18866 18867 pci_dev= sc->dev; 18868 18869 dump = (bxe_grcdump_t *)data; 18870 18871 switch(cmd) { 18872 18873 case BXE_GRC_DUMP_SIZE: 18874 dump->pci_func = sc->pcie_func; 18875 dump->grcdump_size = 18876 (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + 18877 sizeof(struct dump_header); 18878 break; 18879 18880 case BXE_GRC_DUMP: 18881 18882 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + 18883 sizeof(struct dump_header); 18884 if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) || 18885 (dump->grcdump_size < grc_dump_size)) { 18886 rval = EINVAL; 18887 break; 18888 } 18889 18890 if((sc->trigger_grcdump) && (!sc->grcdump_done) && 18891 (!sc->grcdump_started)) { 18892 rval = bxe_grc_dump(sc); 18893 } 18894 18895 if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) && 18896 (sc->grc_dump != NULL)) { 18897 dump->grcdump_dwords = grc_dump_size >> 2; 18898 rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size); 18899 free(sc->grc_dump, M_DEVBUF); 18900 sc->grc_dump = NULL; 18901 sc->grcdump_started = 0; 18902 sc->grcdump_done = 0; 18903 } 18904 18905 break; 18906 18907 case BXE_DRV_INFO: 18908 drv_infop = (bxe_drvinfo_t *)data; 18909 snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe"); 18910 snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s", 18911 BXE_DRIVER_VERSION); 18912 snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s", 18913 sc->devinfo.bc_ver_str); 18914 snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH, 18915 "%s", sc->fw_ver_str); 18916 drv_infop->eeprom_dump_len = sc->devinfo.flash_size; 18917 drv_infop->reg_dump_len = 18918 (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) 18919 + sizeof(struct dump_header); 18920 snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d", 18921 sc->pcie_bus, sc->pcie_device, sc->pcie_func); 18922 break; 18923 18924 case BXE_DEV_SETTING: 18925 dev_p = (bxe_dev_setting_t *)data; 18926 bxe_get_settings(sc, &dev_set); 18927 dev_p->supported = dev_set.supported; 18928 dev_p->advertising = dev_set.advertising; 18929 dev_p->speed = dev_set.speed; 18930 dev_p->duplex = dev_set.duplex; 18931 dev_p->port = dev_set.port; 18932 dev_p->phy_address = dev_set.phy_address; 18933 dev_p->autoneg = dev_set.autoneg; 18934 18935 break; 18936 18937 case BXE_GET_REGS: 18938 18939 reg_p = (bxe_get_regs_t *)data; 18940 grc_dump_size = reg_p->reg_buf_len; 18941 18942 if((!sc->grcdump_done) && (!sc->grcdump_started)) { 18943 bxe_grc_dump(sc); 18944 } 18945 if((sc->grcdump_done) && (sc->grcdump_started) && 18946 (sc->grc_dump != NULL)) { 18947 rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size); 18948 free(sc->grc_dump, M_DEVBUF); 18949 sc->grc_dump = NULL; 18950 sc->grcdump_started = 0; 18951 sc->grcdump_done = 0; 18952 } 18953 18954 break; 18955 18956 case BXE_RDW_REG: 18957 reg_rdw_p = (bxe_reg_rdw_t *)data; 18958 if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) && 18959 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT)) 18960 reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id); 18961 18962 if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) && 18963 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT)) 18964 REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val); 18965 18966 break; 18967 18968 case BXE_RDW_PCICFG: 18969 cfg_rdw_p = (bxe_pcicfg_rdw_t *)data; 18970 if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) { 18971 18972 cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id, 18973 cfg_rdw_p->cfg_width); 18974 18975 } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) { 18976 pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val, 18977 cfg_rdw_p->cfg_width); 18978 } else { 18979 BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n"); 18980 } 18981 break; 18982 18983 case BXE_MAC_ADDR: 18984 mac_addr_p = (bxe_perm_mac_addr_t *)data; 18985 snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s", 18986 sc->mac_addr_str); 18987 break; 18988 18989 case BXE_EEPROM: 18990 rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data); 18991 break; 18992 18993 18994 default: 18995 break; 18996 } 18997 18998 return (rval); 18999 } 19000