1 /*- 2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 24 * THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #define BXE_DRIVER_VERSION "1.78.79" 31 32 #include "bxe.h" 33 #include "ecore_sp.h" 34 #include "ecore_init.h" 35 #include "ecore_init_ops.h" 36 37 #include "57710_int_offsets.h" 38 #include "57711_int_offsets.h" 39 #include "57712_int_offsets.h" 40 41 /* 42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these 43 * explicitly here for older kernels that don't include this changeset. 44 */ 45 #ifndef CTLTYPE_U64 46 #define CTLTYPE_U64 CTLTYPE_QUAD 47 #define sysctl_handle_64 sysctl_handle_quad 48 #endif 49 50 /* 51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these 52 * here as zero(0) for older kernels that don't include this changeset 53 * thereby masking the functionality. 54 */ 55 #ifndef CSUM_TCP_IPV6 56 #define CSUM_TCP_IPV6 0 57 #define CSUM_UDP_IPV6 0 58 #endif 59 60 /* 61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap 62 * for older kernels that don't include this changeset. 63 */ 64 #if __FreeBSD_version < 900035 65 #define pci_find_cap pci_find_extcap 66 #endif 67 68 #define BXE_DEF_SB_ATT_IDX 0x0001 69 #define BXE_DEF_SB_IDX 0x0002 70 71 /* 72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per 73 * function HW initialization. 74 */ 75 #define FLR_WAIT_USEC 10000 /* 10 msecs */ 76 #define FLR_WAIT_INTERVAL 50 /* usecs */ 77 #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ 78 79 struct pbf_pN_buf_regs { 80 int pN; 81 uint32_t init_crd; 82 uint32_t crd; 83 uint32_t crd_freed; 84 }; 85 86 struct pbf_pN_cmd_regs { 87 int pN; 88 uint32_t lines_occup; 89 uint32_t lines_freed; 90 }; 91 92 /* 93 * PCI Device ID Table used by bxe_probe(). 94 */ 95 #define BXE_DEVDESC_MAX 64 96 static struct bxe_device_type bxe_devs[] = { 97 { 98 BRCM_VENDORID, 99 CHIP_NUM_57710, 100 PCI_ANY_ID, PCI_ANY_ID, 101 "QLogic NetXtreme II BCM57710 10GbE" 102 }, 103 { 104 BRCM_VENDORID, 105 CHIP_NUM_57711, 106 PCI_ANY_ID, PCI_ANY_ID, 107 "QLogic NetXtreme II BCM57711 10GbE" 108 }, 109 { 110 BRCM_VENDORID, 111 CHIP_NUM_57711E, 112 PCI_ANY_ID, PCI_ANY_ID, 113 "QLogic NetXtreme II BCM57711E 10GbE" 114 }, 115 { 116 BRCM_VENDORID, 117 CHIP_NUM_57712, 118 PCI_ANY_ID, PCI_ANY_ID, 119 "QLogic NetXtreme II BCM57712 10GbE" 120 }, 121 { 122 BRCM_VENDORID, 123 CHIP_NUM_57712_MF, 124 PCI_ANY_ID, PCI_ANY_ID, 125 "QLogic NetXtreme II BCM57712 MF 10GbE" 126 }, 127 #if 0 128 { 129 BRCM_VENDORID, 130 CHIP_NUM_57712_VF, 131 PCI_ANY_ID, PCI_ANY_ID, 132 "QLogic NetXtreme II BCM57712 VF 10GbE" 133 }, 134 #endif 135 { 136 BRCM_VENDORID, 137 CHIP_NUM_57800, 138 PCI_ANY_ID, PCI_ANY_ID, 139 "QLogic NetXtreme II BCM57800 10GbE" 140 }, 141 { 142 BRCM_VENDORID, 143 CHIP_NUM_57800_MF, 144 PCI_ANY_ID, PCI_ANY_ID, 145 "QLogic NetXtreme II BCM57800 MF 10GbE" 146 }, 147 #if 0 148 { 149 BRCM_VENDORID, 150 CHIP_NUM_57800_VF, 151 PCI_ANY_ID, PCI_ANY_ID, 152 "QLogic NetXtreme II BCM57800 VF 10GbE" 153 }, 154 #endif 155 { 156 BRCM_VENDORID, 157 CHIP_NUM_57810, 158 PCI_ANY_ID, PCI_ANY_ID, 159 "QLogic NetXtreme II BCM57810 10GbE" 160 }, 161 { 162 BRCM_VENDORID, 163 CHIP_NUM_57810_MF, 164 PCI_ANY_ID, PCI_ANY_ID, 165 "QLogic NetXtreme II BCM57810 MF 10GbE" 166 }, 167 #if 0 168 { 169 BRCM_VENDORID, 170 CHIP_NUM_57810_VF, 171 PCI_ANY_ID, PCI_ANY_ID, 172 "QLogic NetXtreme II BCM57810 VF 10GbE" 173 }, 174 #endif 175 { 176 BRCM_VENDORID, 177 CHIP_NUM_57811, 178 PCI_ANY_ID, PCI_ANY_ID, 179 "QLogic NetXtreme II BCM57811 10GbE" 180 }, 181 { 182 BRCM_VENDORID, 183 CHIP_NUM_57811_MF, 184 PCI_ANY_ID, PCI_ANY_ID, 185 "QLogic NetXtreme II BCM57811 MF 10GbE" 186 }, 187 #if 0 188 { 189 BRCM_VENDORID, 190 CHIP_NUM_57811_VF, 191 PCI_ANY_ID, PCI_ANY_ID, 192 "QLogic NetXtreme II BCM57811 VF 10GbE" 193 }, 194 #endif 195 { 196 BRCM_VENDORID, 197 CHIP_NUM_57840_4_10, 198 PCI_ANY_ID, PCI_ANY_ID, 199 "QLogic NetXtreme II BCM57840 4x10GbE" 200 }, 201 #if 0 202 { 203 BRCM_VENDORID, 204 CHIP_NUM_57840_2_20, 205 PCI_ANY_ID, PCI_ANY_ID, 206 "QLogic NetXtreme II BCM57840 2x20GbE" 207 }, 208 #endif 209 { 210 BRCM_VENDORID, 211 CHIP_NUM_57840_MF, 212 PCI_ANY_ID, PCI_ANY_ID, 213 "QLogic NetXtreme II BCM57840 MF 10GbE" 214 }, 215 #if 0 216 { 217 BRCM_VENDORID, 218 CHIP_NUM_57840_VF, 219 PCI_ANY_ID, PCI_ANY_ID, 220 "QLogic NetXtreme II BCM57840 VF 10GbE" 221 }, 222 #endif 223 { 224 0, 0, 0, 0, NULL 225 } 226 }; 227 228 MALLOC_DECLARE(M_BXE_ILT); 229 MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer"); 230 231 /* 232 * FreeBSD device entry points. 233 */ 234 static int bxe_probe(device_t); 235 static int bxe_attach(device_t); 236 static int bxe_detach(device_t); 237 static int bxe_shutdown(device_t); 238 239 /* 240 * FreeBSD KLD module/device interface event handler method. 241 */ 242 static device_method_t bxe_methods[] = { 243 /* Device interface (device_if.h) */ 244 DEVMETHOD(device_probe, bxe_probe), 245 DEVMETHOD(device_attach, bxe_attach), 246 DEVMETHOD(device_detach, bxe_detach), 247 DEVMETHOD(device_shutdown, bxe_shutdown), 248 #if 0 249 DEVMETHOD(device_suspend, bxe_suspend), 250 DEVMETHOD(device_resume, bxe_resume), 251 #endif 252 /* Bus interface (bus_if.h) */ 253 DEVMETHOD(bus_print_child, bus_generic_print_child), 254 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 255 KOBJMETHOD_END 256 }; 257 258 /* 259 * FreeBSD KLD Module data declaration 260 */ 261 static driver_t bxe_driver = { 262 "bxe", /* module name */ 263 bxe_methods, /* event handler */ 264 sizeof(struct bxe_softc) /* extra data */ 265 }; 266 267 /* 268 * FreeBSD dev class is needed to manage dev instances and 269 * to associate with a bus type 270 */ 271 static devclass_t bxe_devclass; 272 273 MODULE_DEPEND(bxe, pci, 1, 1, 1); 274 MODULE_DEPEND(bxe, ether, 1, 1, 1); 275 DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0); 276 277 /* resources needed for unloading a previously loaded device */ 278 279 #define BXE_PREV_WAIT_NEEDED 1 280 struct mtx bxe_prev_mtx; 281 MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF); 282 struct bxe_prev_list_node { 283 LIST_ENTRY(bxe_prev_list_node) node; 284 uint8_t bus; 285 uint8_t slot; 286 uint8_t path; 287 uint8_t aer; /* XXX automatic error recovery */ 288 uint8_t undi; 289 }; 290 static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list); 291 292 static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ 293 294 /* Tunable device values... */ 295 296 SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters"); 297 298 /* Debug */ 299 unsigned long bxe_debug = 0; 300 SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN, 301 &bxe_debug, 0, "Debug logging mode"); 302 303 /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ 304 static int bxe_interrupt_mode = INTR_MODE_MSIX; 305 SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN, 306 &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode"); 307 308 /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */ 309 static int bxe_queue_count = 4; 310 SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, 311 &bxe_queue_count, 0, "Multi-Queue queue count"); 312 313 /* max number of buffers per queue (default RX_BD_USABLE) */ 314 static int bxe_max_rx_bufs = 0; 315 SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN, 316 &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue"); 317 318 /* Host interrupt coalescing RX tick timer (usecs) */ 319 static int bxe_hc_rx_ticks = 25; 320 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN, 321 &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks"); 322 323 /* Host interrupt coalescing TX tick timer (usecs) */ 324 static int bxe_hc_tx_ticks = 50; 325 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN, 326 &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks"); 327 328 /* Maximum number of Rx packets to process at a time */ 329 static int bxe_rx_budget = 0xffffffff; 330 SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN, 331 &bxe_rx_budget, 0, "Rx processing budget"); 332 333 /* Maximum LRO aggregation size */ 334 static int bxe_max_aggregation_size = 0; 335 SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN, 336 &bxe_max_aggregation_size, 0, "max aggregation size"); 337 338 /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */ 339 static int bxe_mrrs = -1; 340 SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN, 341 &bxe_mrrs, 0, "PCIe maximum read request size"); 342 343 /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */ 344 static int bxe_autogreeen = 0; 345 SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN, 346 &bxe_autogreeen, 0, "AutoGrEEEn support"); 347 348 /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */ 349 static int bxe_udp_rss = 0; 350 SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN, 351 &bxe_udp_rss, 0, "UDP RSS support"); 352 353 354 #define STAT_NAME_LEN 32 /* no stat names below can be longer than this */ 355 356 #define STATS_OFFSET32(stat_name) \ 357 (offsetof(struct bxe_eth_stats, stat_name) / 4) 358 359 #define Q_STATS_OFFSET32(stat_name) \ 360 (offsetof(struct bxe_eth_q_stats, stat_name) / 4) 361 362 static const struct { 363 uint32_t offset; 364 uint32_t size; 365 uint32_t flags; 366 #define STATS_FLAGS_PORT 1 367 #define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */ 368 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) 369 char string[STAT_NAME_LEN]; 370 } bxe_eth_stats_arr[] = { 371 { STATS_OFFSET32(total_bytes_received_hi), 372 8, STATS_FLAGS_BOTH, "rx_bytes" }, 373 { STATS_OFFSET32(error_bytes_received_hi), 374 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, 375 { STATS_OFFSET32(total_unicast_packets_received_hi), 376 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, 377 { STATS_OFFSET32(total_multicast_packets_received_hi), 378 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, 379 { STATS_OFFSET32(total_broadcast_packets_received_hi), 380 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, 381 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 382 8, STATS_FLAGS_PORT, "rx_crc_errors" }, 383 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 384 8, STATS_FLAGS_PORT, "rx_align_errors" }, 385 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 386 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, 387 { STATS_OFFSET32(etherstatsoverrsizepkts_hi), 388 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, 389 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 390 8, STATS_FLAGS_PORT, "rx_fragments" }, 391 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 392 8, STATS_FLAGS_PORT, "rx_jabbers" }, 393 { STATS_OFFSET32(no_buff_discard_hi), 394 8, STATS_FLAGS_BOTH, "rx_discards" }, 395 { STATS_OFFSET32(mac_filter_discard), 396 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, 397 { STATS_OFFSET32(mf_tag_discard), 398 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, 399 { STATS_OFFSET32(pfc_frames_received_hi), 400 8, STATS_FLAGS_PORT, "pfc_frames_received" }, 401 { STATS_OFFSET32(pfc_frames_sent_hi), 402 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, 403 { STATS_OFFSET32(brb_drop_hi), 404 8, STATS_FLAGS_PORT, "rx_brb_discard" }, 405 { STATS_OFFSET32(brb_truncate_hi), 406 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, 407 { STATS_OFFSET32(pause_frames_received_hi), 408 8, STATS_FLAGS_PORT, "rx_pause_frames" }, 409 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 410 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, 411 { STATS_OFFSET32(nig_timer_max), 412 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, 413 { STATS_OFFSET32(total_bytes_transmitted_hi), 414 8, STATS_FLAGS_BOTH, "tx_bytes" }, 415 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 416 8, STATS_FLAGS_PORT, "tx_error_bytes" }, 417 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 418 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, 419 { STATS_OFFSET32(total_multicast_packets_transmitted_hi), 420 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, 421 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 422 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, 423 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 424 8, STATS_FLAGS_PORT, "tx_mac_errors" }, 425 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 426 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, 427 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 428 8, STATS_FLAGS_PORT, "tx_single_collisions" }, 429 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 430 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, 431 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 432 8, STATS_FLAGS_PORT, "tx_deferred" }, 433 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 434 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, 435 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 436 8, STATS_FLAGS_PORT, "tx_late_collisions" }, 437 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 438 8, STATS_FLAGS_PORT, "tx_total_collisions" }, 439 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 440 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, 441 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 442 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, 443 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 444 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, 445 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 446 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, 447 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 448 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, 449 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 450 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, 451 { STATS_OFFSET32(etherstatspktsover1522octets_hi), 452 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, 453 { STATS_OFFSET32(pause_frames_sent_hi), 454 8, STATS_FLAGS_PORT, "tx_pause_frames" }, 455 { STATS_OFFSET32(total_tpa_aggregations_hi), 456 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, 457 { STATS_OFFSET32(total_tpa_aggregated_frames_hi), 458 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, 459 { STATS_OFFSET32(total_tpa_bytes_hi), 460 8, STATS_FLAGS_FUNC, "tpa_bytes"}, 461 #if 0 462 { STATS_OFFSET32(recoverable_error), 463 4, STATS_FLAGS_FUNC, "recoverable_errors" }, 464 { STATS_OFFSET32(unrecoverable_error), 465 4, STATS_FLAGS_FUNC, "unrecoverable_errors" }, 466 #endif 467 { STATS_OFFSET32(eee_tx_lpi), 468 4, STATS_FLAGS_PORT, "eee_tx_lpi"}, 469 { STATS_OFFSET32(rx_calls), 470 4, STATS_FLAGS_FUNC, "rx_calls"}, 471 { STATS_OFFSET32(rx_pkts), 472 4, STATS_FLAGS_FUNC, "rx_pkts"}, 473 { STATS_OFFSET32(rx_tpa_pkts), 474 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"}, 475 { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 476 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"}, 477 { STATS_OFFSET32(rx_bxe_service_rxsgl), 478 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"}, 479 { STATS_OFFSET32(rx_jumbo_sge_pkts), 480 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"}, 481 { STATS_OFFSET32(rx_soft_errors), 482 4, STATS_FLAGS_FUNC, "rx_soft_errors"}, 483 { STATS_OFFSET32(rx_hw_csum_errors), 484 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"}, 485 { STATS_OFFSET32(rx_ofld_frames_csum_ip), 486 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"}, 487 { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 488 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"}, 489 { STATS_OFFSET32(rx_budget_reached), 490 4, STATS_FLAGS_FUNC, "rx_budget_reached"}, 491 { STATS_OFFSET32(tx_pkts), 492 4, STATS_FLAGS_FUNC, "tx_pkts"}, 493 { STATS_OFFSET32(tx_soft_errors), 494 4, STATS_FLAGS_FUNC, "tx_soft_errors"}, 495 { STATS_OFFSET32(tx_ofld_frames_csum_ip), 496 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"}, 497 { STATS_OFFSET32(tx_ofld_frames_csum_tcp), 498 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"}, 499 { STATS_OFFSET32(tx_ofld_frames_csum_udp), 500 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"}, 501 { STATS_OFFSET32(tx_ofld_frames_lso), 502 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"}, 503 { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 504 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"}, 505 { STATS_OFFSET32(tx_encap_failures), 506 4, STATS_FLAGS_FUNC, "tx_encap_failures"}, 507 { STATS_OFFSET32(tx_hw_queue_full), 508 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"}, 509 { STATS_OFFSET32(tx_hw_max_queue_depth), 510 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"}, 511 { STATS_OFFSET32(tx_dma_mapping_failure), 512 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"}, 513 { STATS_OFFSET32(tx_max_drbr_queue_depth), 514 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"}, 515 { STATS_OFFSET32(tx_window_violation_std), 516 4, STATS_FLAGS_FUNC, "tx_window_violation_std"}, 517 { STATS_OFFSET32(tx_window_violation_tso), 518 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"}, 519 #if 0 520 { STATS_OFFSET32(tx_unsupported_tso_request_ipv6), 521 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_ipv6"}, 522 { STATS_OFFSET32(tx_unsupported_tso_request_not_tcp), 523 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_not_tcp"}, 524 #endif 525 { STATS_OFFSET32(tx_chain_lost_mbuf), 526 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"}, 527 { STATS_OFFSET32(tx_frames_deferred), 528 4, STATS_FLAGS_FUNC, "tx_frames_deferred"}, 529 { STATS_OFFSET32(tx_queue_xoff), 530 4, STATS_FLAGS_FUNC, "tx_queue_xoff"}, 531 { STATS_OFFSET32(mbuf_defrag_attempts), 532 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"}, 533 { STATS_OFFSET32(mbuf_defrag_failures), 534 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"}, 535 { STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 536 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"}, 537 { STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 538 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"}, 539 { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 540 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"}, 541 { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 542 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"}, 543 { STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 544 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"}, 545 { STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 546 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"}, 547 { STATS_OFFSET32(mbuf_alloc_tx), 548 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"}, 549 { STATS_OFFSET32(mbuf_alloc_rx), 550 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"}, 551 { STATS_OFFSET32(mbuf_alloc_sge), 552 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"}, 553 { STATS_OFFSET32(mbuf_alloc_tpa), 554 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"} 555 }; 556 557 static const struct { 558 uint32_t offset; 559 uint32_t size; 560 char string[STAT_NAME_LEN]; 561 } bxe_eth_q_stats_arr[] = { 562 { Q_STATS_OFFSET32(total_bytes_received_hi), 563 8, "rx_bytes" }, 564 { Q_STATS_OFFSET32(total_unicast_packets_received_hi), 565 8, "rx_ucast_packets" }, 566 { Q_STATS_OFFSET32(total_multicast_packets_received_hi), 567 8, "rx_mcast_packets" }, 568 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), 569 8, "rx_bcast_packets" }, 570 { Q_STATS_OFFSET32(no_buff_discard_hi), 571 8, "rx_discards" }, 572 { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 573 8, "tx_bytes" }, 574 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 575 8, "tx_ucast_packets" }, 576 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), 577 8, "tx_mcast_packets" }, 578 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 579 8, "tx_bcast_packets" }, 580 { Q_STATS_OFFSET32(total_tpa_aggregations_hi), 581 8, "tpa_aggregations" }, 582 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), 583 8, "tpa_aggregated_frames"}, 584 { Q_STATS_OFFSET32(total_tpa_bytes_hi), 585 8, "tpa_bytes"}, 586 { Q_STATS_OFFSET32(rx_calls), 587 4, "rx_calls"}, 588 { Q_STATS_OFFSET32(rx_pkts), 589 4, "rx_pkts"}, 590 { Q_STATS_OFFSET32(rx_tpa_pkts), 591 4, "rx_tpa_pkts"}, 592 { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 593 4, "rx_erroneous_jumbo_sge_pkts"}, 594 { Q_STATS_OFFSET32(rx_bxe_service_rxsgl), 595 4, "rx_bxe_service_rxsgl"}, 596 { Q_STATS_OFFSET32(rx_jumbo_sge_pkts), 597 4, "rx_jumbo_sge_pkts"}, 598 { Q_STATS_OFFSET32(rx_soft_errors), 599 4, "rx_soft_errors"}, 600 { Q_STATS_OFFSET32(rx_hw_csum_errors), 601 4, "rx_hw_csum_errors"}, 602 { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip), 603 4, "rx_ofld_frames_csum_ip"}, 604 { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 605 4, "rx_ofld_frames_csum_tcp_udp"}, 606 { Q_STATS_OFFSET32(rx_budget_reached), 607 4, "rx_budget_reached"}, 608 { Q_STATS_OFFSET32(tx_pkts), 609 4, "tx_pkts"}, 610 { Q_STATS_OFFSET32(tx_soft_errors), 611 4, "tx_soft_errors"}, 612 { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip), 613 4, "tx_ofld_frames_csum_ip"}, 614 { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp), 615 4, "tx_ofld_frames_csum_tcp"}, 616 { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp), 617 4, "tx_ofld_frames_csum_udp"}, 618 { Q_STATS_OFFSET32(tx_ofld_frames_lso), 619 4, "tx_ofld_frames_lso"}, 620 { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 621 4, "tx_ofld_frames_lso_hdr_splits"}, 622 { Q_STATS_OFFSET32(tx_encap_failures), 623 4, "tx_encap_failures"}, 624 { Q_STATS_OFFSET32(tx_hw_queue_full), 625 4, "tx_hw_queue_full"}, 626 { Q_STATS_OFFSET32(tx_hw_max_queue_depth), 627 4, "tx_hw_max_queue_depth"}, 628 { Q_STATS_OFFSET32(tx_dma_mapping_failure), 629 4, "tx_dma_mapping_failure"}, 630 { Q_STATS_OFFSET32(tx_max_drbr_queue_depth), 631 4, "tx_max_drbr_queue_depth"}, 632 { Q_STATS_OFFSET32(tx_window_violation_std), 633 4, "tx_window_violation_std"}, 634 { Q_STATS_OFFSET32(tx_window_violation_tso), 635 4, "tx_window_violation_tso"}, 636 #if 0 637 { Q_STATS_OFFSET32(tx_unsupported_tso_request_ipv6), 638 4, "tx_unsupported_tso_request_ipv6"}, 639 { Q_STATS_OFFSET32(tx_unsupported_tso_request_not_tcp), 640 4, "tx_unsupported_tso_request_not_tcp"}, 641 #endif 642 { Q_STATS_OFFSET32(tx_chain_lost_mbuf), 643 4, "tx_chain_lost_mbuf"}, 644 { Q_STATS_OFFSET32(tx_frames_deferred), 645 4, "tx_frames_deferred"}, 646 { Q_STATS_OFFSET32(tx_queue_xoff), 647 4, "tx_queue_xoff"}, 648 { Q_STATS_OFFSET32(mbuf_defrag_attempts), 649 4, "mbuf_defrag_attempts"}, 650 { Q_STATS_OFFSET32(mbuf_defrag_failures), 651 4, "mbuf_defrag_failures"}, 652 { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 653 4, "mbuf_rx_bd_alloc_failed"}, 654 { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 655 4, "mbuf_rx_bd_mapping_failed"}, 656 { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 657 4, "mbuf_rx_tpa_alloc_failed"}, 658 { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 659 4, "mbuf_rx_tpa_mapping_failed"}, 660 { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 661 4, "mbuf_rx_sge_alloc_failed"}, 662 { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 663 4, "mbuf_rx_sge_mapping_failed"}, 664 { Q_STATS_OFFSET32(mbuf_alloc_tx), 665 4, "mbuf_alloc_tx"}, 666 { Q_STATS_OFFSET32(mbuf_alloc_rx), 667 4, "mbuf_alloc_rx"}, 668 { Q_STATS_OFFSET32(mbuf_alloc_sge), 669 4, "mbuf_alloc_sge"}, 670 { Q_STATS_OFFSET32(mbuf_alloc_tpa), 671 4, "mbuf_alloc_tpa"} 672 }; 673 674 #define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr) 675 #define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr) 676 677 678 static void bxe_cmng_fns_init(struct bxe_softc *sc, 679 uint8_t read_cfg, 680 uint8_t cmng_type); 681 static int bxe_get_cmng_fns_mode(struct bxe_softc *sc); 682 static void storm_memset_cmng(struct bxe_softc *sc, 683 struct cmng_init *cmng, 684 uint8_t port); 685 static void bxe_set_reset_global(struct bxe_softc *sc); 686 static void bxe_set_reset_in_progress(struct bxe_softc *sc); 687 static uint8_t bxe_reset_is_done(struct bxe_softc *sc, 688 int engine); 689 static uint8_t bxe_clear_pf_load(struct bxe_softc *sc); 690 static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, 691 uint8_t *global, 692 uint8_t print); 693 static void bxe_int_disable(struct bxe_softc *sc); 694 static int bxe_release_leader_lock(struct bxe_softc *sc); 695 static void bxe_pf_disable(struct bxe_softc *sc); 696 static void bxe_free_fp_buffers(struct bxe_softc *sc); 697 static inline void bxe_update_rx_prod(struct bxe_softc *sc, 698 struct bxe_fastpath *fp, 699 uint16_t rx_bd_prod, 700 uint16_t rx_cq_prod, 701 uint16_t rx_sge_prod); 702 static void bxe_link_report_locked(struct bxe_softc *sc); 703 static void bxe_link_report(struct bxe_softc *sc); 704 static void bxe_link_status_update(struct bxe_softc *sc); 705 static void bxe_periodic_callout_func(void *xsc); 706 static void bxe_periodic_start(struct bxe_softc *sc); 707 static void bxe_periodic_stop(struct bxe_softc *sc); 708 static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, 709 uint16_t prev_index, 710 uint16_t index); 711 static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, 712 int queue); 713 static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, 714 uint16_t index); 715 static uint8_t bxe_txeof(struct bxe_softc *sc, 716 struct bxe_fastpath *fp); 717 static void bxe_task_fp(struct bxe_fastpath *fp); 718 static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, 719 struct mbuf *m, 720 uint8_t contents); 721 static int bxe_alloc_mem(struct bxe_softc *sc); 722 static void bxe_free_mem(struct bxe_softc *sc); 723 static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc); 724 static void bxe_free_fw_stats_mem(struct bxe_softc *sc); 725 static int bxe_interrupt_attach(struct bxe_softc *sc); 726 static void bxe_interrupt_detach(struct bxe_softc *sc); 727 static void bxe_set_rx_mode(struct bxe_softc *sc); 728 static int bxe_init_locked(struct bxe_softc *sc); 729 static int bxe_stop_locked(struct bxe_softc *sc); 730 static __noinline int bxe_nic_load(struct bxe_softc *sc, 731 int load_mode); 732 static __noinline int bxe_nic_unload(struct bxe_softc *sc, 733 uint32_t unload_mode, 734 uint8_t keep_link); 735 736 static void bxe_handle_sp_tq(void *context, int pending); 737 static void bxe_handle_fp_tq(void *context, int pending); 738 739 static int bxe_add_cdev(struct bxe_softc *sc); 740 static void bxe_del_cdev(struct bxe_softc *sc); 741 742 /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */ 743 uint32_t 744 calc_crc32(uint8_t *crc32_packet, 745 uint32_t crc32_length, 746 uint32_t crc32_seed, 747 uint8_t complement) 748 { 749 uint32_t byte = 0; 750 uint32_t bit = 0; 751 uint8_t msb = 0; 752 uint32_t temp = 0; 753 uint32_t shft = 0; 754 uint8_t current_byte = 0; 755 uint32_t crc32_result = crc32_seed; 756 const uint32_t CRC32_POLY = 0x1edc6f41; 757 758 if ((crc32_packet == NULL) || 759 (crc32_length == 0) || 760 ((crc32_length % 8) != 0)) 761 { 762 return (crc32_result); 763 } 764 765 for (byte = 0; byte < crc32_length; byte = byte + 1) 766 { 767 current_byte = crc32_packet[byte]; 768 for (bit = 0; bit < 8; bit = bit + 1) 769 { 770 /* msb = crc32_result[31]; */ 771 msb = (uint8_t)(crc32_result >> 31); 772 773 crc32_result = crc32_result << 1; 774 775 /* it (msb != current_byte[bit]) */ 776 if (msb != (0x1 & (current_byte >> bit))) 777 { 778 crc32_result = crc32_result ^ CRC32_POLY; 779 /* crc32_result[0] = 1 */ 780 crc32_result |= 1; 781 } 782 } 783 } 784 785 /* Last step is to: 786 * 1. "mirror" every bit 787 * 2. swap the 4 bytes 788 * 3. complement each bit 789 */ 790 791 /* Mirror */ 792 temp = crc32_result; 793 shft = sizeof(crc32_result) * 8 - 1; 794 795 for (crc32_result >>= 1; crc32_result; crc32_result >>= 1) 796 { 797 temp <<= 1; 798 temp |= crc32_result & 1; 799 shft-- ; 800 } 801 802 /* temp[31-bit] = crc32_result[bit] */ 803 temp <<= shft; 804 805 /* Swap */ 806 /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */ 807 { 808 uint32_t t0, t1, t2, t3; 809 t0 = (0x000000ff & (temp >> 24)); 810 t1 = (0x0000ff00 & (temp >> 8)); 811 t2 = (0x00ff0000 & (temp << 8)); 812 t3 = (0xff000000 & (temp << 24)); 813 crc32_result = t0 | t1 | t2 | t3; 814 } 815 816 /* Complement */ 817 if (complement) 818 { 819 crc32_result = ~crc32_result; 820 } 821 822 return (crc32_result); 823 } 824 825 int 826 bxe_test_bit(int nr, 827 volatile unsigned long *addr) 828 { 829 return ((atomic_load_acq_long(addr) & (1 << nr)) != 0); 830 } 831 832 void 833 bxe_set_bit(unsigned int nr, 834 volatile unsigned long *addr) 835 { 836 atomic_set_acq_long(addr, (1 << nr)); 837 } 838 839 void 840 bxe_clear_bit(int nr, 841 volatile unsigned long *addr) 842 { 843 atomic_clear_acq_long(addr, (1 << nr)); 844 } 845 846 int 847 bxe_test_and_set_bit(int nr, 848 volatile unsigned long *addr) 849 { 850 unsigned long x; 851 nr = (1 << nr); 852 do { 853 x = *addr; 854 } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0); 855 // if (x & nr) bit_was_set; else bit_was_not_set; 856 return (x & nr); 857 } 858 859 int 860 bxe_test_and_clear_bit(int nr, 861 volatile unsigned long *addr) 862 { 863 unsigned long x; 864 nr = (1 << nr); 865 do { 866 x = *addr; 867 } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0); 868 // if (x & nr) bit_was_set; else bit_was_not_set; 869 return (x & nr); 870 } 871 872 int 873 bxe_cmpxchg(volatile int *addr, 874 int old, 875 int new) 876 { 877 int x; 878 do { 879 x = *addr; 880 } while (atomic_cmpset_acq_int(addr, old, new) == 0); 881 return (x); 882 } 883 884 /* 885 * Get DMA memory from the OS. 886 * 887 * Validates that the OS has provided DMA buffers in response to a 888 * bus_dmamap_load call and saves the physical address of those buffers. 889 * When the callback is used the OS will return 0 for the mapping function 890 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any 891 * failures back to the caller. 892 * 893 * Returns: 894 * Nothing. 895 */ 896 static void 897 bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 898 { 899 struct bxe_dma *dma = arg; 900 901 if (error) { 902 dma->paddr = 0; 903 dma->nseg = 0; 904 BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error); 905 } else { 906 dma->paddr = segs->ds_addr; 907 dma->nseg = nseg; 908 #if 0 909 BLOGD(dma->sc, DBG_LOAD, 910 "DMA alloc '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n", 911 dma->msg, dma->vaddr, (void *)dma->paddr, 912 dma->nseg, dma->size); 913 #endif 914 } 915 } 916 917 /* 918 * Allocate a block of memory and map it for DMA. No partial completions 919 * allowed and release any resources acquired if we can't acquire all 920 * resources. 921 * 922 * Returns: 923 * 0 = Success, !0 = Failure 924 */ 925 int 926 bxe_dma_alloc(struct bxe_softc *sc, 927 bus_size_t size, 928 struct bxe_dma *dma, 929 const char *msg) 930 { 931 int rc; 932 933 if (dma->size > 0) { 934 BLOGE(sc, "dma block '%s' already has size %lu\n", msg, 935 (unsigned long)dma->size); 936 return (1); 937 } 938 939 memset(dma, 0, sizeof(*dma)); /* sanity */ 940 dma->sc = sc; 941 dma->size = size; 942 snprintf(dma->msg, sizeof(dma->msg), "%s", msg); 943 944 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 945 BCM_PAGE_SIZE, /* alignment */ 946 0, /* boundary limit */ 947 BUS_SPACE_MAXADDR, /* restricted low */ 948 BUS_SPACE_MAXADDR, /* restricted hi */ 949 NULL, /* addr filter() */ 950 NULL, /* addr filter() arg */ 951 size, /* max map size */ 952 1, /* num discontinuous */ 953 size, /* max seg size */ 954 BUS_DMA_ALLOCNOW, /* flags */ 955 NULL, /* lock() */ 956 NULL, /* lock() arg */ 957 &dma->tag); /* returned dma tag */ 958 if (rc != 0) { 959 BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc); 960 memset(dma, 0, sizeof(*dma)); 961 return (1); 962 } 963 964 rc = bus_dmamem_alloc(dma->tag, 965 (void **)&dma->vaddr, 966 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), 967 &dma->map); 968 if (rc != 0) { 969 BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc); 970 bus_dma_tag_destroy(dma->tag); 971 memset(dma, 0, sizeof(*dma)); 972 return (1); 973 } 974 975 rc = bus_dmamap_load(dma->tag, 976 dma->map, 977 dma->vaddr, 978 size, 979 bxe_dma_map_addr, /* BLOGD in here */ 980 dma, 981 BUS_DMA_NOWAIT); 982 if (rc != 0) { 983 BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc); 984 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 985 bus_dma_tag_destroy(dma->tag); 986 memset(dma, 0, sizeof(*dma)); 987 return (1); 988 } 989 990 return (0); 991 } 992 993 void 994 bxe_dma_free(struct bxe_softc *sc, 995 struct bxe_dma *dma) 996 { 997 if (dma->size > 0) { 998 #if 0 999 BLOGD(sc, DBG_LOAD, 1000 "DMA free '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n", 1001 dma->msg, dma->vaddr, (void *)dma->paddr, 1002 dma->nseg, dma->size); 1003 #endif 1004 1005 DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL")); 1006 1007 bus_dmamap_sync(dma->tag, dma->map, 1008 (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)); 1009 bus_dmamap_unload(dma->tag, dma->map); 1010 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1011 bus_dma_tag_destroy(dma->tag); 1012 } 1013 1014 memset(dma, 0, sizeof(*dma)); 1015 } 1016 1017 /* 1018 * These indirect read and write routines are only during init. 1019 * The locking is handled by the MCP. 1020 */ 1021 1022 void 1023 bxe_reg_wr_ind(struct bxe_softc *sc, 1024 uint32_t addr, 1025 uint32_t val) 1026 { 1027 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); 1028 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4); 1029 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 1030 } 1031 1032 uint32_t 1033 bxe_reg_rd_ind(struct bxe_softc *sc, 1034 uint32_t addr) 1035 { 1036 uint32_t val; 1037 1038 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); 1039 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4); 1040 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 1041 1042 return (val); 1043 } 1044 1045 #if 0 1046 void bxe_dp_dmae(struct bxe_softc *sc, struct dmae_command *dmae, int msglvl) 1047 { 1048 uint32_t src_type = dmae->opcode & DMAE_COMMAND_SRC; 1049 1050 switch (dmae->opcode & DMAE_COMMAND_DST) { 1051 case DMAE_CMD_DST_PCI: 1052 if (src_type == DMAE_CMD_SRC_PCI) 1053 DP(msglvl, "DMAE: opcode 0x%08x\n" 1054 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" 1055 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1056 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 1057 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 1058 dmae->comp_addr_hi, dmae->comp_addr_lo, 1059 dmae->comp_val); 1060 else 1061 DP(msglvl, "DMAE: opcode 0x%08x\n" 1062 "src [%08x], len [%d*4], dst [%x:%08x]\n" 1063 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1064 dmae->opcode, dmae->src_addr_lo >> 2, 1065 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 1066 dmae->comp_addr_hi, dmae->comp_addr_lo, 1067 dmae->comp_val); 1068 break; 1069 case DMAE_CMD_DST_GRC: 1070 if (src_type == DMAE_CMD_SRC_PCI) 1071 DP(msglvl, "DMAE: opcode 0x%08x\n" 1072 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" 1073 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1074 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 1075 dmae->len, dmae->dst_addr_lo >> 2, 1076 dmae->comp_addr_hi, dmae->comp_addr_lo, 1077 dmae->comp_val); 1078 else 1079 DP(msglvl, "DMAE: opcode 0x%08x\n" 1080 "src [%08x], len [%d*4], dst [%08x]\n" 1081 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1082 dmae->opcode, dmae->src_addr_lo >> 2, 1083 dmae->len, dmae->dst_addr_lo >> 2, 1084 dmae->comp_addr_hi, dmae->comp_addr_lo, 1085 dmae->comp_val); 1086 break; 1087 default: 1088 if (src_type == DMAE_CMD_SRC_PCI) 1089 DP(msglvl, "DMAE: opcode 0x%08x\n" 1090 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" 1091 "comp_addr [%x:%08x] comp_val 0x%08x\n", 1092 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 1093 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 1094 dmae->comp_val); 1095 else 1096 DP(msglvl, "DMAE: opcode 0x%08x\n" 1097 "src_addr [%08x] len [%d * 4] dst_addr [none]\n" 1098 "comp_addr [%x:%08x] comp_val 0x%08x\n", 1099 dmae->opcode, dmae->src_addr_lo >> 2, 1100 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 1101 dmae->comp_val); 1102 break; 1103 } 1104 1105 } 1106 #endif 1107 1108 static int 1109 bxe_acquire_hw_lock(struct bxe_softc *sc, 1110 uint32_t resource) 1111 { 1112 uint32_t lock_status; 1113 uint32_t resource_bit = (1 << resource); 1114 int func = SC_FUNC(sc); 1115 uint32_t hw_lock_control_reg; 1116 int cnt; 1117 1118 /* validate the resource is within range */ 1119 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1120 BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource); 1121 return (-1); 1122 } 1123 1124 if (func <= 5) { 1125 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 1126 } else { 1127 hw_lock_control_reg = 1128 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 1129 } 1130 1131 /* validate the resource is not already taken */ 1132 lock_status = REG_RD(sc, hw_lock_control_reg); 1133 if (lock_status & resource_bit) { 1134 BLOGE(sc, "resource in use (status 0x%x bit 0x%x)\n", 1135 lock_status, resource_bit); 1136 return (-1); 1137 } 1138 1139 /* try every 5ms for 5 seconds */ 1140 for (cnt = 0; cnt < 1000; cnt++) { 1141 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); 1142 lock_status = REG_RD(sc, hw_lock_control_reg); 1143 if (lock_status & resource_bit) { 1144 return (0); 1145 } 1146 DELAY(5000); 1147 } 1148 1149 BLOGE(sc, "Resource lock timeout!\n"); 1150 return (-1); 1151 } 1152 1153 static int 1154 bxe_release_hw_lock(struct bxe_softc *sc, 1155 uint32_t resource) 1156 { 1157 uint32_t lock_status; 1158 uint32_t resource_bit = (1 << resource); 1159 int func = SC_FUNC(sc); 1160 uint32_t hw_lock_control_reg; 1161 1162 /* validate the resource is within range */ 1163 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1164 BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource); 1165 return (-1); 1166 } 1167 1168 if (func <= 5) { 1169 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 1170 } else { 1171 hw_lock_control_reg = 1172 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 1173 } 1174 1175 /* validate the resource is currently taken */ 1176 lock_status = REG_RD(sc, hw_lock_control_reg); 1177 if (!(lock_status & resource_bit)) { 1178 BLOGE(sc, "resource not in use (status 0x%x bit 0x%x)\n", 1179 lock_status, resource_bit); 1180 return (-1); 1181 } 1182 1183 REG_WR(sc, hw_lock_control_reg, resource_bit); 1184 return (0); 1185 } 1186 static void bxe_acquire_phy_lock(struct bxe_softc *sc) 1187 { 1188 BXE_PHY_LOCK(sc); 1189 bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); 1190 } 1191 1192 static void bxe_release_phy_lock(struct bxe_softc *sc) 1193 { 1194 bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); 1195 BXE_PHY_UNLOCK(sc); 1196 } 1197 /* 1198 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise, 1199 * had we done things the other way around, if two pfs from the same port 1200 * would attempt to access nvram at the same time, we could run into a 1201 * scenario such as: 1202 * pf A takes the port lock. 1203 * pf B succeeds in taking the same lock since they are from the same port. 1204 * pf A takes the per pf misc lock. Performs eeprom access. 1205 * pf A finishes. Unlocks the per pf misc lock. 1206 * Pf B takes the lock and proceeds to perform it's own access. 1207 * pf A unlocks the per port lock, while pf B is still working (!). 1208 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own 1209 * access corrupted by pf B).* 1210 */ 1211 static int 1212 bxe_acquire_nvram_lock(struct bxe_softc *sc) 1213 { 1214 int port = SC_PORT(sc); 1215 int count, i; 1216 uint32_t val = 0; 1217 1218 /* acquire HW lock: protect against other PFs in PF Direct Assignment */ 1219 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); 1220 1221 /* adjust timeout for emulation/FPGA */ 1222 count = NVRAM_TIMEOUT_COUNT; 1223 if (CHIP_REV_IS_SLOW(sc)) { 1224 count *= 100; 1225 } 1226 1227 /* request access to nvram interface */ 1228 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 1229 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); 1230 1231 for (i = 0; i < count*10; i++) { 1232 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); 1233 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1234 break; 1235 } 1236 1237 DELAY(5); 1238 } 1239 1240 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1241 BLOGE(sc, "Cannot get access to nvram interface\n"); 1242 return (-1); 1243 } 1244 1245 return (0); 1246 } 1247 1248 static int 1249 bxe_release_nvram_lock(struct bxe_softc *sc) 1250 { 1251 int port = SC_PORT(sc); 1252 int count, i; 1253 uint32_t val = 0; 1254 1255 /* adjust timeout for emulation/FPGA */ 1256 count = NVRAM_TIMEOUT_COUNT; 1257 if (CHIP_REV_IS_SLOW(sc)) { 1258 count *= 100; 1259 } 1260 1261 /* relinquish nvram interface */ 1262 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 1263 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); 1264 1265 for (i = 0; i < count*10; i++) { 1266 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); 1267 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1268 break; 1269 } 1270 1271 DELAY(5); 1272 } 1273 1274 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1275 BLOGE(sc, "Cannot free access to nvram interface\n"); 1276 return (-1); 1277 } 1278 1279 /* release HW lock: protect against other PFs in PF Direct Assignment */ 1280 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); 1281 1282 return (0); 1283 } 1284 1285 static void 1286 bxe_enable_nvram_access(struct bxe_softc *sc) 1287 { 1288 uint32_t val; 1289 1290 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1291 1292 /* enable both bits, even on read */ 1293 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1294 (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN)); 1295 } 1296 1297 static void 1298 bxe_disable_nvram_access(struct bxe_softc *sc) 1299 { 1300 uint32_t val; 1301 1302 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1303 1304 /* disable both bits, even after read */ 1305 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1306 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | 1307 MCPR_NVM_ACCESS_ENABLE_WR_EN))); 1308 } 1309 1310 static int 1311 bxe_nvram_read_dword(struct bxe_softc *sc, 1312 uint32_t offset, 1313 uint32_t *ret_val, 1314 uint32_t cmd_flags) 1315 { 1316 int count, i, rc; 1317 uint32_t val; 1318 1319 /* build the command word */ 1320 cmd_flags |= MCPR_NVM_COMMAND_DOIT; 1321 1322 /* need to clear DONE bit separately */ 1323 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1324 1325 /* address of the NVRAM to read from */ 1326 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, 1327 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1328 1329 /* issue a read command */ 1330 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1331 1332 /* adjust timeout for emulation/FPGA */ 1333 count = NVRAM_TIMEOUT_COUNT; 1334 if (CHIP_REV_IS_SLOW(sc)) { 1335 count *= 100; 1336 } 1337 1338 /* wait for completion */ 1339 *ret_val = 0; 1340 rc = -1; 1341 for (i = 0; i < count; i++) { 1342 DELAY(5); 1343 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); 1344 1345 if (val & MCPR_NVM_COMMAND_DONE) { 1346 val = REG_RD(sc, MCP_REG_MCPR_NVM_READ); 1347 /* we read nvram data in cpu order 1348 * but ethtool sees it as an array of bytes 1349 * converting to big-endian will do the work 1350 */ 1351 *ret_val = htobe32(val); 1352 rc = 0; 1353 break; 1354 } 1355 } 1356 1357 if (rc == -1) { 1358 BLOGE(sc, "nvram read timeout expired\n"); 1359 } 1360 1361 return (rc); 1362 } 1363 1364 static int 1365 bxe_nvram_read(struct bxe_softc *sc, 1366 uint32_t offset, 1367 uint8_t *ret_buf, 1368 int buf_size) 1369 { 1370 uint32_t cmd_flags; 1371 uint32_t val; 1372 int rc; 1373 1374 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { 1375 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", 1376 offset, buf_size); 1377 return (-1); 1378 } 1379 1380 if ((offset + buf_size) > sc->devinfo.flash_size) { 1381 BLOGE(sc, "Invalid parameter, " 1382 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1383 offset, buf_size, sc->devinfo.flash_size); 1384 return (-1); 1385 } 1386 1387 /* request access to nvram interface */ 1388 rc = bxe_acquire_nvram_lock(sc); 1389 if (rc) { 1390 return (rc); 1391 } 1392 1393 /* enable access to nvram interface */ 1394 bxe_enable_nvram_access(sc); 1395 1396 /* read the first word(s) */ 1397 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1398 while ((buf_size > sizeof(uint32_t)) && (rc == 0)) { 1399 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); 1400 memcpy(ret_buf, &val, 4); 1401 1402 /* advance to the next dword */ 1403 offset += sizeof(uint32_t); 1404 ret_buf += sizeof(uint32_t); 1405 buf_size -= sizeof(uint32_t); 1406 cmd_flags = 0; 1407 } 1408 1409 if (rc == 0) { 1410 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1411 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); 1412 memcpy(ret_buf, &val, 4); 1413 } 1414 1415 /* disable access to nvram interface */ 1416 bxe_disable_nvram_access(sc); 1417 bxe_release_nvram_lock(sc); 1418 1419 return (rc); 1420 } 1421 1422 static int 1423 bxe_nvram_write_dword(struct bxe_softc *sc, 1424 uint32_t offset, 1425 uint32_t val, 1426 uint32_t cmd_flags) 1427 { 1428 int count, i, rc; 1429 1430 /* build the command word */ 1431 cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR); 1432 1433 /* need to clear DONE bit separately */ 1434 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1435 1436 /* write the data */ 1437 REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val); 1438 1439 /* address of the NVRAM to write to */ 1440 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, 1441 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1442 1443 /* issue the write command */ 1444 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1445 1446 /* adjust timeout for emulation/FPGA */ 1447 count = NVRAM_TIMEOUT_COUNT; 1448 if (CHIP_REV_IS_SLOW(sc)) { 1449 count *= 100; 1450 } 1451 1452 /* wait for completion */ 1453 rc = -1; 1454 for (i = 0; i < count; i++) { 1455 DELAY(5); 1456 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); 1457 if (val & MCPR_NVM_COMMAND_DONE) { 1458 rc = 0; 1459 break; 1460 } 1461 } 1462 1463 if (rc == -1) { 1464 BLOGE(sc, "nvram write timeout expired\n"); 1465 } 1466 1467 return (rc); 1468 } 1469 1470 #define BYTE_OFFSET(offset) (8 * (offset & 0x03)) 1471 1472 static int 1473 bxe_nvram_write1(struct bxe_softc *sc, 1474 uint32_t offset, 1475 uint8_t *data_buf, 1476 int buf_size) 1477 { 1478 uint32_t cmd_flags; 1479 uint32_t align_offset; 1480 uint32_t val; 1481 int rc; 1482 1483 if ((offset + buf_size) > sc->devinfo.flash_size) { 1484 BLOGE(sc, "Invalid parameter, " 1485 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1486 offset, buf_size, sc->devinfo.flash_size); 1487 return (-1); 1488 } 1489 1490 /* request access to nvram interface */ 1491 rc = bxe_acquire_nvram_lock(sc); 1492 if (rc) { 1493 return (rc); 1494 } 1495 1496 /* enable access to nvram interface */ 1497 bxe_enable_nvram_access(sc); 1498 1499 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); 1500 align_offset = (offset & ~0x03); 1501 rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags); 1502 1503 if (rc == 0) { 1504 val &= ~(0xff << BYTE_OFFSET(offset)); 1505 val |= (*data_buf << BYTE_OFFSET(offset)); 1506 1507 /* nvram data is returned as an array of bytes 1508 * convert it back to cpu order 1509 */ 1510 val = be32toh(val); 1511 1512 rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags); 1513 } 1514 1515 /* disable access to nvram interface */ 1516 bxe_disable_nvram_access(sc); 1517 bxe_release_nvram_lock(sc); 1518 1519 return (rc); 1520 } 1521 1522 static int 1523 bxe_nvram_write(struct bxe_softc *sc, 1524 uint32_t offset, 1525 uint8_t *data_buf, 1526 int buf_size) 1527 { 1528 uint32_t cmd_flags; 1529 uint32_t val; 1530 uint32_t written_so_far; 1531 int rc; 1532 1533 if (buf_size == 1) { 1534 return (bxe_nvram_write1(sc, offset, data_buf, buf_size)); 1535 } 1536 1537 if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) { 1538 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", 1539 offset, buf_size); 1540 return (-1); 1541 } 1542 1543 if (buf_size == 0) { 1544 return (0); /* nothing to do */ 1545 } 1546 1547 if ((offset + buf_size) > sc->devinfo.flash_size) { 1548 BLOGE(sc, "Invalid parameter, " 1549 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1550 offset, buf_size, sc->devinfo.flash_size); 1551 return (-1); 1552 } 1553 1554 /* request access to nvram interface */ 1555 rc = bxe_acquire_nvram_lock(sc); 1556 if (rc) { 1557 return (rc); 1558 } 1559 1560 /* enable access to nvram interface */ 1561 bxe_enable_nvram_access(sc); 1562 1563 written_so_far = 0; 1564 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1565 while ((written_so_far < buf_size) && (rc == 0)) { 1566 if (written_so_far == (buf_size - sizeof(uint32_t))) { 1567 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1568 } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) { 1569 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1570 } else if ((offset % NVRAM_PAGE_SIZE) == 0) { 1571 cmd_flags |= MCPR_NVM_COMMAND_FIRST; 1572 } 1573 1574 memcpy(&val, data_buf, 4); 1575 1576 rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags); 1577 1578 /* advance to the next dword */ 1579 offset += sizeof(uint32_t); 1580 data_buf += sizeof(uint32_t); 1581 written_so_far += sizeof(uint32_t); 1582 cmd_flags = 0; 1583 } 1584 1585 /* disable access to nvram interface */ 1586 bxe_disable_nvram_access(sc); 1587 bxe_release_nvram_lock(sc); 1588 1589 return (rc); 1590 } 1591 1592 /* copy command into DMAE command memory and set DMAE command Go */ 1593 void 1594 bxe_post_dmae(struct bxe_softc *sc, 1595 struct dmae_command *dmae, 1596 int idx) 1597 { 1598 uint32_t cmd_offset; 1599 int i; 1600 1601 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx)); 1602 for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) { 1603 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i)); 1604 } 1605 1606 REG_WR(sc, dmae_reg_go_c[idx], 1); 1607 } 1608 1609 uint32_t 1610 bxe_dmae_opcode_add_comp(uint32_t opcode, 1611 uint8_t comp_type) 1612 { 1613 return (opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | 1614 DMAE_COMMAND_C_TYPE_ENABLE)); 1615 } 1616 1617 uint32_t 1618 bxe_dmae_opcode_clr_src_reset(uint32_t opcode) 1619 { 1620 return (opcode & ~DMAE_COMMAND_SRC_RESET); 1621 } 1622 1623 uint32_t 1624 bxe_dmae_opcode(struct bxe_softc *sc, 1625 uint8_t src_type, 1626 uint8_t dst_type, 1627 uint8_t with_comp, 1628 uint8_t comp_type) 1629 { 1630 uint32_t opcode = 0; 1631 1632 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | 1633 (dst_type << DMAE_COMMAND_DST_SHIFT)); 1634 1635 opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET); 1636 1637 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 1638 1639 opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) | 1640 (SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT)); 1641 1642 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 1643 1644 #ifdef __BIG_ENDIAN 1645 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 1646 #else 1647 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 1648 #endif 1649 1650 if (with_comp) { 1651 opcode = bxe_dmae_opcode_add_comp(opcode, comp_type); 1652 } 1653 1654 return (opcode); 1655 } 1656 1657 static void 1658 bxe_prep_dmae_with_comp(struct bxe_softc *sc, 1659 struct dmae_command *dmae, 1660 uint8_t src_type, 1661 uint8_t dst_type) 1662 { 1663 memset(dmae, 0, sizeof(struct dmae_command)); 1664 1665 /* set the opcode */ 1666 dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type, 1667 TRUE, DMAE_COMP_PCI); 1668 1669 /* fill in the completion parameters */ 1670 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp)); 1671 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp)); 1672 dmae->comp_val = DMAE_COMP_VAL; 1673 } 1674 1675 /* issue a DMAE command over the init channel and wait for completion */ 1676 static int 1677 bxe_issue_dmae_with_comp(struct bxe_softc *sc, 1678 struct dmae_command *dmae) 1679 { 1680 uint32_t *wb_comp = BXE_SP(sc, wb_comp); 1681 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; 1682 1683 BXE_DMAE_LOCK(sc); 1684 1685 /* reset completion */ 1686 *wb_comp = 0; 1687 1688 /* post the command on the channel used for initializations */ 1689 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc)); 1690 1691 /* wait for completion */ 1692 DELAY(5); 1693 1694 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 1695 if (!timeout || 1696 (sc->recovery_state != BXE_RECOVERY_DONE && 1697 sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) { 1698 BLOGE(sc, "DMAE timeout!\n"); 1699 BXE_DMAE_UNLOCK(sc); 1700 return (DMAE_TIMEOUT); 1701 } 1702 1703 timeout--; 1704 DELAY(50); 1705 } 1706 1707 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 1708 BLOGE(sc, "DMAE PCI error!\n"); 1709 BXE_DMAE_UNLOCK(sc); 1710 return (DMAE_PCI_ERROR); 1711 } 1712 1713 BXE_DMAE_UNLOCK(sc); 1714 return (0); 1715 } 1716 1717 void 1718 bxe_read_dmae(struct bxe_softc *sc, 1719 uint32_t src_addr, 1720 uint32_t len32) 1721 { 1722 struct dmae_command dmae; 1723 uint32_t *data; 1724 int i, rc; 1725 1726 DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32)); 1727 1728 if (!sc->dmae_ready) { 1729 data = BXE_SP(sc, wb_data[0]); 1730 1731 for (i = 0; i < len32; i++) { 1732 data[i] = (CHIP_IS_E1(sc)) ? 1733 bxe_reg_rd_ind(sc, (src_addr + (i * 4))) : 1734 REG_RD(sc, (src_addr + (i * 4))); 1735 } 1736 1737 return; 1738 } 1739 1740 /* set opcode and fixed command fields */ 1741 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 1742 1743 /* fill in addresses and len */ 1744 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ 1745 dmae.src_addr_hi = 0; 1746 dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data)); 1747 dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data)); 1748 dmae.len = len32; 1749 1750 /* issue the command and wait for completion */ 1751 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { 1752 bxe_panic(sc, ("DMAE failed (%d)\n", rc)); 1753 }; 1754 } 1755 1756 void 1757 bxe_write_dmae(struct bxe_softc *sc, 1758 bus_addr_t dma_addr, 1759 uint32_t dst_addr, 1760 uint32_t len32) 1761 { 1762 struct dmae_command dmae; 1763 int rc; 1764 1765 if (!sc->dmae_ready) { 1766 DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32)); 1767 1768 if (CHIP_IS_E1(sc)) { 1769 ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); 1770 } else { 1771 ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); 1772 } 1773 1774 return; 1775 } 1776 1777 /* set opcode and fixed command fields */ 1778 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 1779 1780 /* fill in addresses and len */ 1781 dmae.src_addr_lo = U64_LO(dma_addr); 1782 dmae.src_addr_hi = U64_HI(dma_addr); 1783 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ 1784 dmae.dst_addr_hi = 0; 1785 dmae.len = len32; 1786 1787 /* issue the command and wait for completion */ 1788 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { 1789 bxe_panic(sc, ("DMAE failed (%d)\n", rc)); 1790 } 1791 } 1792 1793 void 1794 bxe_write_dmae_phys_len(struct bxe_softc *sc, 1795 bus_addr_t phys_addr, 1796 uint32_t addr, 1797 uint32_t len) 1798 { 1799 int dmae_wr_max = DMAE_LEN32_WR_MAX(sc); 1800 int offset = 0; 1801 1802 while (len > dmae_wr_max) { 1803 bxe_write_dmae(sc, 1804 (phys_addr + offset), /* src DMA address */ 1805 (addr + offset), /* dst GRC address */ 1806 dmae_wr_max); 1807 offset += (dmae_wr_max * 4); 1808 len -= dmae_wr_max; 1809 } 1810 1811 bxe_write_dmae(sc, 1812 (phys_addr + offset), /* src DMA address */ 1813 (addr + offset), /* dst GRC address */ 1814 len); 1815 } 1816 1817 void 1818 bxe_set_ctx_validation(struct bxe_softc *sc, 1819 struct eth_context *cxt, 1820 uint32_t cid) 1821 { 1822 /* ustorm cxt validation */ 1823 cxt->ustorm_ag_context.cdu_usage = 1824 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 1825 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); 1826 /* xcontext validation */ 1827 cxt->xstorm_ag_context.cdu_reserved = 1828 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 1829 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); 1830 } 1831 1832 static void 1833 bxe_storm_memset_hc_timeout(struct bxe_softc *sc, 1834 uint8_t port, 1835 uint8_t fw_sb_id, 1836 uint8_t sb_index, 1837 uint8_t ticks) 1838 { 1839 uint32_t addr = 1840 (BAR_CSTRORM_INTMEM + 1841 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); 1842 1843 REG_WR8(sc, addr, ticks); 1844 1845 BLOGD(sc, DBG_LOAD, 1846 "port %d fw_sb_id %d sb_index %d ticks %d\n", 1847 port, fw_sb_id, sb_index, ticks); 1848 } 1849 1850 static void 1851 bxe_storm_memset_hc_disable(struct bxe_softc *sc, 1852 uint8_t port, 1853 uint16_t fw_sb_id, 1854 uint8_t sb_index, 1855 uint8_t disable) 1856 { 1857 uint32_t enable_flag = 1858 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 1859 uint32_t addr = 1860 (BAR_CSTRORM_INTMEM + 1861 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); 1862 uint8_t flags; 1863 1864 /* clear and set */ 1865 flags = REG_RD8(sc, addr); 1866 flags &= ~HC_INDEX_DATA_HC_ENABLED; 1867 flags |= enable_flag; 1868 REG_WR8(sc, addr, flags); 1869 1870 BLOGD(sc, DBG_LOAD, 1871 "port %d fw_sb_id %d sb_index %d disable %d\n", 1872 port, fw_sb_id, sb_index, disable); 1873 } 1874 1875 void 1876 bxe_update_coalesce_sb_index(struct bxe_softc *sc, 1877 uint8_t fw_sb_id, 1878 uint8_t sb_index, 1879 uint8_t disable, 1880 uint16_t usec) 1881 { 1882 int port = SC_PORT(sc); 1883 uint8_t ticks = (usec / 4); /* XXX ??? */ 1884 1885 bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks); 1886 1887 disable = (disable) ? 1 : ((usec) ? 0 : 1); 1888 bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable); 1889 } 1890 1891 void 1892 elink_cb_udelay(struct bxe_softc *sc, 1893 uint32_t usecs) 1894 { 1895 DELAY(usecs); 1896 } 1897 1898 uint32_t 1899 elink_cb_reg_read(struct bxe_softc *sc, 1900 uint32_t reg_addr) 1901 { 1902 return (REG_RD(sc, reg_addr)); 1903 } 1904 1905 void 1906 elink_cb_reg_write(struct bxe_softc *sc, 1907 uint32_t reg_addr, 1908 uint32_t val) 1909 { 1910 REG_WR(sc, reg_addr, val); 1911 } 1912 1913 void 1914 elink_cb_reg_wb_write(struct bxe_softc *sc, 1915 uint32_t offset, 1916 uint32_t *wb_write, 1917 uint16_t len) 1918 { 1919 REG_WR_DMAE(sc, offset, wb_write, len); 1920 } 1921 1922 void 1923 elink_cb_reg_wb_read(struct bxe_softc *sc, 1924 uint32_t offset, 1925 uint32_t *wb_write, 1926 uint16_t len) 1927 { 1928 REG_RD_DMAE(sc, offset, wb_write, len); 1929 } 1930 1931 uint8_t 1932 elink_cb_path_id(struct bxe_softc *sc) 1933 { 1934 return (SC_PATH(sc)); 1935 } 1936 1937 void 1938 elink_cb_event_log(struct bxe_softc *sc, 1939 const elink_log_id_t elink_log_id, 1940 ...) 1941 { 1942 /* XXX */ 1943 #if 0 1944 //va_list ap; 1945 va_start(ap, elink_log_id); 1946 _XXX_(sc, lm_log_id, ap); 1947 va_end(ap); 1948 #endif 1949 BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id); 1950 } 1951 1952 static int 1953 bxe_set_spio(struct bxe_softc *sc, 1954 int spio, 1955 uint32_t mode) 1956 { 1957 uint32_t spio_reg; 1958 1959 /* Only 2 SPIOs are configurable */ 1960 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { 1961 BLOGE(sc, "Invalid SPIO 0x%x\n", spio); 1962 return (-1); 1963 } 1964 1965 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 1966 1967 /* read SPIO and mask except the float bits */ 1968 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); 1969 1970 switch (mode) { 1971 case MISC_SPIO_OUTPUT_LOW: 1972 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio); 1973 /* clear FLOAT and set CLR */ 1974 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 1975 spio_reg |= (spio << MISC_SPIO_CLR_POS); 1976 break; 1977 1978 case MISC_SPIO_OUTPUT_HIGH: 1979 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio); 1980 /* clear FLOAT and set SET */ 1981 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 1982 spio_reg |= (spio << MISC_SPIO_SET_POS); 1983 break; 1984 1985 case MISC_SPIO_INPUT_HI_Z: 1986 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio); 1987 /* set FLOAT */ 1988 spio_reg |= (spio << MISC_SPIO_FLOAT_POS); 1989 break; 1990 1991 default: 1992 break; 1993 } 1994 1995 REG_WR(sc, MISC_REG_SPIO, spio_reg); 1996 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 1997 1998 return (0); 1999 } 2000 2001 static int 2002 bxe_gpio_read(struct bxe_softc *sc, 2003 int gpio_num, 2004 uint8_t port) 2005 { 2006 /* The GPIO should be swapped if swap register is set and active */ 2007 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 2008 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 2009 int gpio_shift = (gpio_num + 2010 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2011 uint32_t gpio_mask = (1 << gpio_shift); 2012 uint32_t gpio_reg; 2013 2014 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2015 BLOGE(sc, "Invalid GPIO %d\n", gpio_num); 2016 return (-1); 2017 } 2018 2019 /* read GPIO value */ 2020 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 2021 2022 /* get the requested pin value */ 2023 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; 2024 } 2025 2026 static int 2027 bxe_gpio_write(struct bxe_softc *sc, 2028 int gpio_num, 2029 uint32_t mode, 2030 uint8_t port) 2031 { 2032 /* The GPIO should be swapped if swap register is set and active */ 2033 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 2034 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 2035 int gpio_shift = (gpio_num + 2036 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2037 uint32_t gpio_mask = (1 << gpio_shift); 2038 uint32_t gpio_reg; 2039 2040 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2041 BLOGE(sc, "Invalid GPIO %d\n", gpio_num); 2042 return (-1); 2043 } 2044 2045 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2046 2047 /* read GPIO and mask except the float bits */ 2048 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 2049 2050 switch (mode) { 2051 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2052 BLOGD(sc, DBG_PHY, 2053 "Set GPIO %d (shift %d) -> output low\n", 2054 gpio_num, gpio_shift); 2055 /* clear FLOAT and set CLR */ 2056 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2057 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 2058 break; 2059 2060 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2061 BLOGD(sc, DBG_PHY, 2062 "Set GPIO %d (shift %d) -> output high\n", 2063 gpio_num, gpio_shift); 2064 /* clear FLOAT and set SET */ 2065 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2066 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 2067 break; 2068 2069 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2070 BLOGD(sc, DBG_PHY, 2071 "Set GPIO %d (shift %d) -> input\n", 2072 gpio_num, gpio_shift); 2073 /* set FLOAT */ 2074 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2075 break; 2076 2077 default: 2078 break; 2079 } 2080 2081 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 2082 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2083 2084 return (0); 2085 } 2086 2087 static int 2088 bxe_gpio_mult_write(struct bxe_softc *sc, 2089 uint8_t pins, 2090 uint32_t mode) 2091 { 2092 uint32_t gpio_reg; 2093 2094 /* any port swapping should be handled by caller */ 2095 2096 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2097 2098 /* read GPIO and mask except the float bits */ 2099 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 2100 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2101 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 2102 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 2103 2104 switch (mode) { 2105 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2106 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins); 2107 /* set CLR */ 2108 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 2109 break; 2110 2111 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2112 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins); 2113 /* set SET */ 2114 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 2115 break; 2116 2117 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2118 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins); 2119 /* set FLOAT */ 2120 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2121 break; 2122 2123 default: 2124 BLOGE(sc, "Invalid GPIO mode assignment %d\n", mode); 2125 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2126 return (-1); 2127 } 2128 2129 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 2130 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2131 2132 return (0); 2133 } 2134 2135 static int 2136 bxe_gpio_int_write(struct bxe_softc *sc, 2137 int gpio_num, 2138 uint32_t mode, 2139 uint8_t port) 2140 { 2141 /* The GPIO should be swapped if swap register is set and active */ 2142 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 2143 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 2144 int gpio_shift = (gpio_num + 2145 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2146 uint32_t gpio_mask = (1 << gpio_shift); 2147 uint32_t gpio_reg; 2148 2149 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2150 BLOGE(sc, "Invalid GPIO %d\n", gpio_num); 2151 return (-1); 2152 } 2153 2154 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2155 2156 /* read GPIO int */ 2157 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); 2158 2159 switch (mode) { 2160 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 2161 BLOGD(sc, DBG_PHY, 2162 "Clear GPIO INT %d (shift %d) -> output low\n", 2163 gpio_num, gpio_shift); 2164 /* clear SET and set CLR */ 2165 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2166 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2167 break; 2168 2169 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 2170 BLOGD(sc, DBG_PHY, 2171 "Set GPIO INT %d (shift %d) -> output high\n", 2172 gpio_num, gpio_shift); 2173 /* clear CLR and set SET */ 2174 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2175 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2176 break; 2177 2178 default: 2179 break; 2180 } 2181 2182 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); 2183 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2184 2185 return (0); 2186 } 2187 2188 uint32_t 2189 elink_cb_gpio_read(struct bxe_softc *sc, 2190 uint16_t gpio_num, 2191 uint8_t port) 2192 { 2193 return (bxe_gpio_read(sc, gpio_num, port)); 2194 } 2195 2196 uint8_t 2197 elink_cb_gpio_write(struct bxe_softc *sc, 2198 uint16_t gpio_num, 2199 uint8_t mode, /* 0=low 1=high */ 2200 uint8_t port) 2201 { 2202 return (bxe_gpio_write(sc, gpio_num, mode, port)); 2203 } 2204 2205 uint8_t 2206 elink_cb_gpio_mult_write(struct bxe_softc *sc, 2207 uint8_t pins, 2208 uint8_t mode) /* 0=low 1=high */ 2209 { 2210 return (bxe_gpio_mult_write(sc, pins, mode)); 2211 } 2212 2213 uint8_t 2214 elink_cb_gpio_int_write(struct bxe_softc *sc, 2215 uint16_t gpio_num, 2216 uint8_t mode, /* 0=low 1=high */ 2217 uint8_t port) 2218 { 2219 return (bxe_gpio_int_write(sc, gpio_num, mode, port)); 2220 } 2221 2222 void 2223 elink_cb_notify_link_changed(struct bxe_softc *sc) 2224 { 2225 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + 2226 (SC_FUNC(sc) * sizeof(uint32_t))), 1); 2227 } 2228 2229 /* send the MCP a request, block until there is a reply */ 2230 uint32_t 2231 elink_cb_fw_command(struct bxe_softc *sc, 2232 uint32_t command, 2233 uint32_t param) 2234 { 2235 int mb_idx = SC_FW_MB_IDX(sc); 2236 uint32_t seq; 2237 uint32_t rc = 0; 2238 uint32_t cnt = 1; 2239 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; 2240 2241 BXE_FWMB_LOCK(sc); 2242 2243 seq = ++sc->fw_seq; 2244 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); 2245 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); 2246 2247 BLOGD(sc, DBG_PHY, 2248 "wrote command 0x%08x to FW MB param 0x%08x\n", 2249 (command | seq), param); 2250 2251 /* Let the FW do it's magic. GIve it up to 5 seconds... */ 2252 do { 2253 DELAY(delay * 1000); 2254 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); 2255 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2256 2257 BLOGD(sc, DBG_PHY, 2258 "[after %d ms] read 0x%x seq 0x%x from FW MB\n", 2259 cnt*delay, rc, seq); 2260 2261 /* is this a reply to our command? */ 2262 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 2263 rc &= FW_MSG_CODE_MASK; 2264 } else { 2265 /* Ruh-roh! */ 2266 BLOGE(sc, "FW failed to respond!\n"); 2267 // XXX bxe_fw_dump(sc); 2268 rc = 0; 2269 } 2270 2271 BXE_FWMB_UNLOCK(sc); 2272 return (rc); 2273 } 2274 2275 static uint32_t 2276 bxe_fw_command(struct bxe_softc *sc, 2277 uint32_t command, 2278 uint32_t param) 2279 { 2280 return (elink_cb_fw_command(sc, command, param)); 2281 } 2282 2283 static void 2284 __storm_memset_dma_mapping(struct bxe_softc *sc, 2285 uint32_t addr, 2286 bus_addr_t mapping) 2287 { 2288 REG_WR(sc, addr, U64_LO(mapping)); 2289 REG_WR(sc, (addr + 4), U64_HI(mapping)); 2290 } 2291 2292 static void 2293 storm_memset_spq_addr(struct bxe_softc *sc, 2294 bus_addr_t mapping, 2295 uint16_t abs_fid) 2296 { 2297 uint32_t addr = (XSEM_REG_FAST_MEMORY + 2298 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); 2299 __storm_memset_dma_mapping(sc, addr, mapping); 2300 } 2301 2302 static void 2303 storm_memset_vf_to_pf(struct bxe_softc *sc, 2304 uint16_t abs_fid, 2305 uint16_t pf_id) 2306 { 2307 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2308 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2309 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2310 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2311 } 2312 2313 static void 2314 storm_memset_func_en(struct bxe_softc *sc, 2315 uint16_t abs_fid, 2316 uint8_t enable) 2317 { 2318 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2319 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2320 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2321 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2322 } 2323 2324 static void 2325 storm_memset_eq_data(struct bxe_softc *sc, 2326 struct event_ring_data *eq_data, 2327 uint16_t pfid) 2328 { 2329 uint32_t addr; 2330 size_t size; 2331 2332 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); 2333 size = sizeof(struct event_ring_data); 2334 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data); 2335 } 2336 2337 static void 2338 storm_memset_eq_prod(struct bxe_softc *sc, 2339 uint16_t eq_prod, 2340 uint16_t pfid) 2341 { 2342 uint32_t addr = (BAR_CSTRORM_INTMEM + 2343 CSTORM_EVENT_RING_PROD_OFFSET(pfid)); 2344 REG_WR16(sc, addr, eq_prod); 2345 } 2346 2347 /* 2348 * Post a slowpath command. 2349 * 2350 * A slowpath command is used to propogate a configuration change through 2351 * the controller in a controlled manner, allowing each STORM processor and 2352 * other H/W blocks to phase in the change. The commands sent on the 2353 * slowpath are referred to as ramrods. Depending on the ramrod used the 2354 * completion of the ramrod will occur in different ways. Here's a 2355 * breakdown of ramrods and how they complete: 2356 * 2357 * RAMROD_CMD_ID_ETH_PORT_SETUP 2358 * Used to setup the leading connection on a port. Completes on the 2359 * Receive Completion Queue (RCQ) of that port (typically fp[0]). 2360 * 2361 * RAMROD_CMD_ID_ETH_CLIENT_SETUP 2362 * Used to setup an additional connection on a port. Completes on the 2363 * RCQ of the multi-queue/RSS connection being initialized. 2364 * 2365 * RAMROD_CMD_ID_ETH_STAT_QUERY 2366 * Used to force the storm processors to update the statistics database 2367 * in host memory. This ramrod is send on the leading connection CID and 2368 * completes as an index increment of the CSTORM on the default status 2369 * block. 2370 * 2371 * RAMROD_CMD_ID_ETH_UPDATE 2372 * Used to update the state of the leading connection, usually to udpate 2373 * the RSS indirection table. Completes on the RCQ of the leading 2374 * connection. (Not currently used under FreeBSD until OS support becomes 2375 * available.) 2376 * 2377 * RAMROD_CMD_ID_ETH_HALT 2378 * Used when tearing down a connection prior to driver unload. Completes 2379 * on the RCQ of the multi-queue/RSS connection being torn down. Don't 2380 * use this on the leading connection. 2381 * 2382 * RAMROD_CMD_ID_ETH_SET_MAC 2383 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on 2384 * the RCQ of the leading connection. 2385 * 2386 * RAMROD_CMD_ID_ETH_CFC_DEL 2387 * Used when tearing down a conneciton prior to driver unload. Completes 2388 * on the RCQ of the leading connection (since the current connection 2389 * has been completely removed from controller memory). 2390 * 2391 * RAMROD_CMD_ID_ETH_PORT_DEL 2392 * Used to tear down the leading connection prior to driver unload, 2393 * typically fp[0]. Completes as an index increment of the CSTORM on the 2394 * default status block. 2395 * 2396 * RAMROD_CMD_ID_ETH_FORWARD_SETUP 2397 * Used for connection offload. Completes on the RCQ of the multi-queue 2398 * RSS connection that is being offloaded. (Not currently used under 2399 * FreeBSD.) 2400 * 2401 * There can only be one command pending per function. 2402 * 2403 * Returns: 2404 * 0 = Success, !0 = Failure. 2405 */ 2406 2407 /* must be called under the spq lock */ 2408 static inline 2409 struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc) 2410 { 2411 struct eth_spe *next_spe = sc->spq_prod_bd; 2412 2413 if (sc->spq_prod_bd == sc->spq_last_bd) { 2414 /* wrap back to the first eth_spq */ 2415 sc->spq_prod_bd = sc->spq; 2416 sc->spq_prod_idx = 0; 2417 } else { 2418 sc->spq_prod_bd++; 2419 sc->spq_prod_idx++; 2420 } 2421 2422 return (next_spe); 2423 } 2424 2425 /* must be called under the spq lock */ 2426 static inline 2427 void bxe_sp_prod_update(struct bxe_softc *sc) 2428 { 2429 int func = SC_FUNC(sc); 2430 2431 /* 2432 * Make sure that BD data is updated before writing the producer. 2433 * BD data is written to the memory, the producer is read from the 2434 * memory, thus we need a full memory barrier to ensure the ordering. 2435 */ 2436 mb(); 2437 2438 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), 2439 sc->spq_prod_idx); 2440 2441 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 2442 BUS_SPACE_BARRIER_WRITE); 2443 } 2444 2445 /** 2446 * bxe_is_contextless_ramrod - check if the current command ends on EQ 2447 * 2448 * @cmd: command to check 2449 * @cmd_type: command type 2450 */ 2451 static inline 2452 int bxe_is_contextless_ramrod(int cmd, 2453 int cmd_type) 2454 { 2455 if ((cmd_type == NONE_CONNECTION_TYPE) || 2456 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 2457 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 2458 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 2459 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 2460 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 2461 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { 2462 return (TRUE); 2463 } else { 2464 return (FALSE); 2465 } 2466 } 2467 2468 /** 2469 * bxe_sp_post - place a single command on an SP ring 2470 * 2471 * @sc: driver handle 2472 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 2473 * @cid: SW CID the command is related to 2474 * @data_hi: command private data address (high 32 bits) 2475 * @data_lo: command private data address (low 32 bits) 2476 * @cmd_type: command type (e.g. NONE, ETH) 2477 * 2478 * SP data is handled as if it's always an address pair, thus data fields are 2479 * not swapped to little endian in upper functions. Instead this function swaps 2480 * data as if it's two uint32 fields. 2481 */ 2482 int 2483 bxe_sp_post(struct bxe_softc *sc, 2484 int command, 2485 int cid, 2486 uint32_t data_hi, 2487 uint32_t data_lo, 2488 int cmd_type) 2489 { 2490 struct eth_spe *spe; 2491 uint16_t type; 2492 int common; 2493 2494 common = bxe_is_contextless_ramrod(command, cmd_type); 2495 2496 BXE_SP_LOCK(sc); 2497 2498 if (common) { 2499 if (!atomic_load_acq_long(&sc->eq_spq_left)) { 2500 BLOGE(sc, "EQ ring is full!\n"); 2501 BXE_SP_UNLOCK(sc); 2502 return (-1); 2503 } 2504 } else { 2505 if (!atomic_load_acq_long(&sc->cq_spq_left)) { 2506 BLOGE(sc, "SPQ ring is full!\n"); 2507 BXE_SP_UNLOCK(sc); 2508 return (-1); 2509 } 2510 } 2511 2512 spe = bxe_sp_get_next(sc); 2513 2514 /* CID needs port number to be encoded int it */ 2515 spe->hdr.conn_and_cmd_data = 2516 htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid)); 2517 2518 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 2519 2520 /* TBD: Check if it works for VFs */ 2521 type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) & 2522 SPE_HDR_FUNCTION_ID); 2523 2524 spe->hdr.type = htole16(type); 2525 2526 spe->data.update_data_addr.hi = htole32(data_hi); 2527 spe->data.update_data_addr.lo = htole32(data_lo); 2528 2529 /* 2530 * It's ok if the actual decrement is issued towards the memory 2531 * somewhere between the lock and unlock. Thus no more explict 2532 * memory barrier is needed. 2533 */ 2534 if (common) { 2535 atomic_subtract_acq_long(&sc->eq_spq_left, 1); 2536 } else { 2537 atomic_subtract_acq_long(&sc->cq_spq_left, 1); 2538 } 2539 2540 BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr); 2541 BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n", 2542 BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata)); 2543 BLOGD(sc, DBG_SP, 2544 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n", 2545 sc->spq_prod_idx, 2546 (uint32_t)U64_HI(sc->spq_dma.paddr), 2547 (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq), 2548 command, 2549 common, 2550 HW_CID(sc, cid), 2551 data_hi, 2552 data_lo, 2553 type, 2554 atomic_load_acq_long(&sc->cq_spq_left), 2555 atomic_load_acq_long(&sc->eq_spq_left)); 2556 2557 bxe_sp_prod_update(sc); 2558 2559 BXE_SP_UNLOCK(sc); 2560 return (0); 2561 } 2562 2563 /** 2564 * bxe_debug_print_ind_table - prints the indirection table configuration. 2565 * 2566 * @sc: driver hanlde 2567 * @p: pointer to rss configuration 2568 */ 2569 #if 0 2570 static void 2571 bxe_debug_print_ind_table(struct bxe_softc *sc, 2572 struct ecore_config_rss_params *p) 2573 { 2574 int i; 2575 2576 BLOGD(sc, DBG_LOAD, "Setting indirection table to:\n"); 2577 BLOGD(sc, DBG_LOAD, " 0x0000: "); 2578 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { 2579 BLOGD(sc, DBG_LOAD, "0x%02x ", p->ind_table[i]); 2580 2581 /* Print 4 bytes in a line */ 2582 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && 2583 (((i + 1) & 0x3) == 0)) { 2584 BLOGD(sc, DBG_LOAD, "\n"); 2585 BLOGD(sc, DBG_LOAD, "0x%04x: ", i + 1); 2586 } 2587 } 2588 2589 BLOGD(sc, DBG_LOAD, "\n"); 2590 } 2591 #endif 2592 2593 /* 2594 * FreeBSD Device probe function. 2595 * 2596 * Compares the device found to the driver's list of supported devices and 2597 * reports back to the bsd loader whether this is the right driver for the device. 2598 * This is the driver entry function called from the "kldload" command. 2599 * 2600 * Returns: 2601 * BUS_PROBE_DEFAULT on success, positive value on failure. 2602 */ 2603 static int 2604 bxe_probe(device_t dev) 2605 { 2606 struct bxe_softc *sc; 2607 struct bxe_device_type *t; 2608 char *descbuf; 2609 uint16_t did, sdid, svid, vid; 2610 2611 /* Find our device structure */ 2612 sc = device_get_softc(dev); 2613 sc->dev = dev; 2614 t = bxe_devs; 2615 2616 /* Get the data for the device to be probed. */ 2617 vid = pci_get_vendor(dev); 2618 did = pci_get_device(dev); 2619 svid = pci_get_subvendor(dev); 2620 sdid = pci_get_subdevice(dev); 2621 2622 BLOGD(sc, DBG_LOAD, 2623 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, " 2624 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid); 2625 2626 /* Look through the list of known devices for a match. */ 2627 while (t->bxe_name != NULL) { 2628 if ((vid == t->bxe_vid) && (did == t->bxe_did) && 2629 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) && 2630 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) { 2631 descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT); 2632 if (descbuf == NULL) 2633 return (ENOMEM); 2634 2635 /* Print out the device identity. */ 2636 snprintf(descbuf, BXE_DEVDESC_MAX, 2637 "%s (%c%d) BXE v:%s\n", t->bxe_name, 2638 (((pci_read_config(dev, PCIR_REVID, 4) & 2639 0xf0) >> 4) + 'A'), 2640 (pci_read_config(dev, PCIR_REVID, 4) & 0xf), 2641 BXE_DRIVER_VERSION); 2642 2643 device_set_desc_copy(dev, descbuf); 2644 free(descbuf, M_TEMP); 2645 return (BUS_PROBE_DEFAULT); 2646 } 2647 t++; 2648 } 2649 2650 return (ENXIO); 2651 } 2652 2653 static void 2654 bxe_init_mutexes(struct bxe_softc *sc) 2655 { 2656 #ifdef BXE_CORE_LOCK_SX 2657 snprintf(sc->core_sx_name, sizeof(sc->core_sx_name), 2658 "bxe%d_core_lock", sc->unit); 2659 sx_init(&sc->core_sx, sc->core_sx_name); 2660 #else 2661 snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name), 2662 "bxe%d_core_lock", sc->unit); 2663 mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF); 2664 #endif 2665 2666 snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name), 2667 "bxe%d_sp_lock", sc->unit); 2668 mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF); 2669 2670 snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name), 2671 "bxe%d_dmae_lock", sc->unit); 2672 mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF); 2673 2674 snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name), 2675 "bxe%d_phy_lock", sc->unit); 2676 mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF); 2677 2678 snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name), 2679 "bxe%d_fwmb_lock", sc->unit); 2680 mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF); 2681 2682 snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name), 2683 "bxe%d_print_lock", sc->unit); 2684 mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF); 2685 2686 snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name), 2687 "bxe%d_stats_lock", sc->unit); 2688 mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF); 2689 2690 snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name), 2691 "bxe%d_mcast_lock", sc->unit); 2692 mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF); 2693 } 2694 2695 static void 2696 bxe_release_mutexes(struct bxe_softc *sc) 2697 { 2698 #ifdef BXE_CORE_LOCK_SX 2699 sx_destroy(&sc->core_sx); 2700 #else 2701 if (mtx_initialized(&sc->core_mtx)) { 2702 mtx_destroy(&sc->core_mtx); 2703 } 2704 #endif 2705 2706 if (mtx_initialized(&sc->sp_mtx)) { 2707 mtx_destroy(&sc->sp_mtx); 2708 } 2709 2710 if (mtx_initialized(&sc->dmae_mtx)) { 2711 mtx_destroy(&sc->dmae_mtx); 2712 } 2713 2714 if (mtx_initialized(&sc->port.phy_mtx)) { 2715 mtx_destroy(&sc->port.phy_mtx); 2716 } 2717 2718 if (mtx_initialized(&sc->fwmb_mtx)) { 2719 mtx_destroy(&sc->fwmb_mtx); 2720 } 2721 2722 if (mtx_initialized(&sc->print_mtx)) { 2723 mtx_destroy(&sc->print_mtx); 2724 } 2725 2726 if (mtx_initialized(&sc->stats_mtx)) { 2727 mtx_destroy(&sc->stats_mtx); 2728 } 2729 2730 if (mtx_initialized(&sc->mcast_mtx)) { 2731 mtx_destroy(&sc->mcast_mtx); 2732 } 2733 } 2734 2735 static void 2736 bxe_tx_disable(struct bxe_softc* sc) 2737 { 2738 if_t ifp = sc->ifp; 2739 2740 /* tell the stack the driver is stopped and TX queue is full */ 2741 if (ifp != NULL) { 2742 if_setdrvflags(ifp, 0); 2743 } 2744 } 2745 2746 static void 2747 bxe_drv_pulse(struct bxe_softc *sc) 2748 { 2749 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, 2750 sc->fw_drv_pulse_wr_seq); 2751 } 2752 2753 static inline uint16_t 2754 bxe_tx_avail(struct bxe_softc *sc, 2755 struct bxe_fastpath *fp) 2756 { 2757 int16_t used; 2758 uint16_t prod; 2759 uint16_t cons; 2760 2761 prod = fp->tx_bd_prod; 2762 cons = fp->tx_bd_cons; 2763 2764 used = SUB_S16(prod, cons); 2765 2766 #if 0 2767 KASSERT((used < 0), ("used tx bds < 0")); 2768 KASSERT((used > sc->tx_ring_size), ("used tx bds > tx_ring_size")); 2769 KASSERT(((sc->tx_ring_size - used) > MAX_TX_AVAIL), 2770 ("invalid number of tx bds used")); 2771 #endif 2772 2773 return (int16_t)(sc->tx_ring_size) - used; 2774 } 2775 2776 static inline int 2777 bxe_tx_queue_has_work(struct bxe_fastpath *fp) 2778 { 2779 uint16_t hw_cons; 2780 2781 mb(); /* status block fields can change */ 2782 hw_cons = le16toh(*fp->tx_cons_sb); 2783 return (hw_cons != fp->tx_pkt_cons); 2784 } 2785 2786 static inline uint8_t 2787 bxe_has_tx_work(struct bxe_fastpath *fp) 2788 { 2789 /* expand this for multi-cos if ever supported */ 2790 return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE; 2791 } 2792 2793 static inline int 2794 bxe_has_rx_work(struct bxe_fastpath *fp) 2795 { 2796 uint16_t rx_cq_cons_sb; 2797 2798 mb(); /* status block fields can change */ 2799 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); 2800 if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX) 2801 rx_cq_cons_sb++; 2802 return (fp->rx_cq_cons != rx_cq_cons_sb); 2803 } 2804 2805 static void 2806 bxe_sp_event(struct bxe_softc *sc, 2807 struct bxe_fastpath *fp, 2808 union eth_rx_cqe *rr_cqe) 2809 { 2810 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 2811 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 2812 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; 2813 struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj; 2814 2815 BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n", 2816 fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type); 2817 2818 #if 0 2819 /* 2820 * If cid is within VF range, replace the slowpath object with the 2821 * one corresponding to this VF 2822 */ 2823 if ((cid >= BXE_FIRST_VF_CID) && (cid < BXE_FIRST_VF_CID + BXE_VF_CIDS)) { 2824 bxe_iov_set_queue_sp_obj(sc, cid, &q_obj); 2825 } 2826 #endif 2827 2828 switch (command) { 2829 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 2830 BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid); 2831 drv_cmd = ECORE_Q_CMD_UPDATE; 2832 break; 2833 2834 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 2835 BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid); 2836 drv_cmd = ECORE_Q_CMD_SETUP; 2837 break; 2838 2839 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 2840 BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); 2841 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; 2842 break; 2843 2844 case (RAMROD_CMD_ID_ETH_HALT): 2845 BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid); 2846 drv_cmd = ECORE_Q_CMD_HALT; 2847 break; 2848 2849 case (RAMROD_CMD_ID_ETH_TERMINATE): 2850 BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid); 2851 drv_cmd = ECORE_Q_CMD_TERMINATE; 2852 break; 2853 2854 case (RAMROD_CMD_ID_ETH_EMPTY): 2855 BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid); 2856 drv_cmd = ECORE_Q_CMD_EMPTY; 2857 break; 2858 2859 default: 2860 BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n", 2861 command, fp->index); 2862 return; 2863 } 2864 2865 if ((drv_cmd != ECORE_Q_CMD_MAX) && 2866 q_obj->complete_cmd(sc, q_obj, drv_cmd)) { 2867 /* 2868 * q_obj->complete_cmd() failure means that this was 2869 * an unexpected completion. 2870 * 2871 * In this case we don't want to increase the sc->spq_left 2872 * because apparently we haven't sent this command the first 2873 * place. 2874 */ 2875 // bxe_panic(sc, ("Unexpected SP completion\n")); 2876 return; 2877 } 2878 2879 #if 0 2880 /* SRIOV: reschedule any 'in_progress' operations */ 2881 bxe_iov_sp_event(sc, cid, TRUE); 2882 #endif 2883 2884 atomic_add_acq_long(&sc->cq_spq_left, 1); 2885 2886 BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n", 2887 atomic_load_acq_long(&sc->cq_spq_left)); 2888 2889 #if 0 2890 if ((drv_cmd == ECORE_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && 2891 (!!bxe_test_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state))) { 2892 /* 2893 * If Queue update ramrod is completed for last Queue in AFEX VIF set 2894 * flow, then ACK MCP at the end. Mark pending ACK to MCP bit to 2895 * prevent case that both bits are cleared. At the end of load/unload 2896 * driver checks that sp_state is cleared and this order prevents 2897 * races. 2898 */ 2899 bxe_set_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, &sc->sp_state); 2900 wmb(); 2901 bxe_clear_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state); 2902 2903 /* schedule the sp task as MCP ack is required */ 2904 bxe_schedule_sp_task(sc); 2905 } 2906 #endif 2907 } 2908 2909 /* 2910 * The current mbuf is part of an aggregation. Move the mbuf into the TPA 2911 * aggregation queue, put an empty mbuf back onto the receive chain, and mark 2912 * the current aggregation queue as in-progress. 2913 */ 2914 static void 2915 bxe_tpa_start(struct bxe_softc *sc, 2916 struct bxe_fastpath *fp, 2917 uint16_t queue, 2918 uint16_t cons, 2919 uint16_t prod, 2920 struct eth_fast_path_rx_cqe *cqe) 2921 { 2922 struct bxe_sw_rx_bd tmp_bd; 2923 struct bxe_sw_rx_bd *rx_buf; 2924 struct eth_rx_bd *rx_bd; 2925 int max_agg_queues; 2926 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; 2927 uint16_t index; 2928 2929 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START " 2930 "cons=%d prod=%d\n", 2931 fp->index, queue, cons, prod); 2932 2933 max_agg_queues = MAX_AGG_QS(sc); 2934 2935 KASSERT((queue < max_agg_queues), 2936 ("fp[%02d] invalid aggr queue (%d >= %d)!", 2937 fp->index, queue, max_agg_queues)); 2938 2939 KASSERT((tpa_info->state == BXE_TPA_STATE_STOP), 2940 ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!", 2941 fp->index, queue)); 2942 2943 /* copy the existing mbuf and mapping from the TPA pool */ 2944 tmp_bd = tpa_info->bd; 2945 2946 if (tmp_bd.m == NULL) { 2947 BLOGE(sc, "fp[%02d].tpa[%02d] mbuf not allocated!\n", 2948 fp->index, queue); 2949 /* XXX Error handling? */ 2950 return; 2951 } 2952 2953 /* change the TPA queue to the start state */ 2954 tpa_info->state = BXE_TPA_STATE_START; 2955 tpa_info->placement_offset = cqe->placement_offset; 2956 tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags); 2957 tpa_info->vlan_tag = le16toh(cqe->vlan_tag); 2958 tpa_info->len_on_bd = le16toh(cqe->len_on_bd); 2959 2960 fp->rx_tpa_queue_used |= (1 << queue); 2961 2962 /* 2963 * If all the buffer descriptors are filled with mbufs then fill in 2964 * the current consumer index with a new BD. Else if a maximum Rx 2965 * buffer limit is imposed then fill in the next producer index. 2966 */ 2967 index = (sc->max_rx_bufs != RX_BD_USABLE) ? 2968 prod : cons; 2969 2970 /* move the received mbuf and mapping to TPA pool */ 2971 tpa_info->bd = fp->rx_mbuf_chain[cons]; 2972 2973 /* release any existing RX BD mbuf mappings */ 2974 if (cons != index) { 2975 rx_buf = &fp->rx_mbuf_chain[cons]; 2976 2977 if (rx_buf->m_map != NULL) { 2978 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 2979 BUS_DMASYNC_POSTREAD); 2980 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 2981 } 2982 2983 /* 2984 * We get here when the maximum number of rx buffers is less than 2985 * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL 2986 * it out here without concern of a memory leak. 2987 */ 2988 fp->rx_mbuf_chain[cons].m = NULL; 2989 } 2990 2991 /* update the Rx SW BD with the mbuf info from the TPA pool */ 2992 fp->rx_mbuf_chain[index] = tmp_bd; 2993 2994 /* update the Rx BD with the empty mbuf phys address from the TPA pool */ 2995 rx_bd = &fp->rx_chain[index]; 2996 rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr)); 2997 rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr)); 2998 } 2999 3000 /* 3001 * When a TPA aggregation is completed, loop through the individual mbufs 3002 * of the aggregation, combining them into a single mbuf which will be sent 3003 * up the stack. Refill all freed SGEs with mbufs as we go along. 3004 */ 3005 static int 3006 bxe_fill_frag_mbuf(struct bxe_softc *sc, 3007 struct bxe_fastpath *fp, 3008 struct bxe_sw_tpa_info *tpa_info, 3009 uint16_t queue, 3010 uint16_t pages, 3011 struct mbuf *m, 3012 struct eth_end_agg_rx_cqe *cqe, 3013 uint16_t cqe_idx) 3014 { 3015 struct mbuf *m_frag; 3016 uint32_t frag_len, frag_size, i; 3017 uint16_t sge_idx; 3018 int rc = 0; 3019 int j; 3020 3021 frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd; 3022 3023 BLOGD(sc, DBG_LRO, 3024 "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n", 3025 fp->index, queue, tpa_info->len_on_bd, frag_size, pages); 3026 3027 /* make sure the aggregated frame is not too big to handle */ 3028 if (pages > 8 * PAGES_PER_SGE) { 3029 BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! " 3030 "pkt_len=%d len_on_bd=%d frag_size=%d\n", 3031 fp->index, cqe_idx, pages, le16toh(cqe->pkt_len), 3032 tpa_info->len_on_bd, frag_size); 3033 bxe_panic(sc, ("sge page count error\n")); 3034 return (EINVAL); 3035 } 3036 3037 /* 3038 * Scan through the scatter gather list pulling individual mbufs into a 3039 * single mbuf for the host stack. 3040 */ 3041 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { 3042 sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j])); 3043 3044 /* 3045 * Firmware gives the indices of the SGE as if the ring is an array 3046 * (meaning that the "next" element will consume 2 indices). 3047 */ 3048 frag_len = min(frag_size, (uint32_t)(SGE_PAGES)); 3049 3050 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d " 3051 "sge_idx=%d frag_size=%d frag_len=%d\n", 3052 fp->index, queue, i, j, sge_idx, frag_size, frag_len); 3053 3054 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; 3055 3056 /* allocate a new mbuf for the SGE */ 3057 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); 3058 if (rc) { 3059 /* Leave all remaining SGEs in the ring! */ 3060 return (rc); 3061 } 3062 3063 /* update the fragment length */ 3064 m_frag->m_len = frag_len; 3065 3066 /* concatenate the fragment to the head mbuf */ 3067 m_cat(m, m_frag); 3068 fp->eth_q_stats.mbuf_alloc_sge--; 3069 3070 /* update the TPA mbuf size and remaining fragment size */ 3071 m->m_pkthdr.len += frag_len; 3072 frag_size -= frag_len; 3073 } 3074 3075 BLOGD(sc, DBG_LRO, 3076 "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n", 3077 fp->index, queue, frag_size); 3078 3079 return (rc); 3080 } 3081 3082 static inline void 3083 bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp) 3084 { 3085 int i, j; 3086 3087 for (i = 1; i <= RX_SGE_NUM_PAGES; i++) { 3088 int idx = RX_SGE_TOTAL_PER_PAGE * i - 1; 3089 3090 for (j = 0; j < 2; j++) { 3091 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); 3092 idx--; 3093 } 3094 } 3095 } 3096 3097 static inline void 3098 bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp) 3099 { 3100 /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */ 3101 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); 3102 3103 /* 3104 * Clear the two last indices in the page to 1. These are the indices that 3105 * correspond to the "next" element, hence will never be indicated and 3106 * should be removed from the calculations. 3107 */ 3108 bxe_clear_sge_mask_next_elems(fp); 3109 } 3110 3111 static inline void 3112 bxe_update_last_max_sge(struct bxe_fastpath *fp, 3113 uint16_t idx) 3114 { 3115 uint16_t last_max = fp->last_max_sge; 3116 3117 if (SUB_S16(idx, last_max) > 0) { 3118 fp->last_max_sge = idx; 3119 } 3120 } 3121 3122 static inline void 3123 bxe_update_sge_prod(struct bxe_softc *sc, 3124 struct bxe_fastpath *fp, 3125 uint16_t sge_len, 3126 union eth_sgl_or_raw_data *cqe) 3127 { 3128 uint16_t last_max, last_elem, first_elem; 3129 uint16_t delta = 0; 3130 uint16_t i; 3131 3132 if (!sge_len) { 3133 return; 3134 } 3135 3136 /* first mark all used pages */ 3137 for (i = 0; i < sge_len; i++) { 3138 BIT_VEC64_CLEAR_BIT(fp->sge_mask, 3139 RX_SGE(le16toh(cqe->sgl[i]))); 3140 } 3141 3142 BLOGD(sc, DBG_LRO, 3143 "fp[%02d] fp_cqe->sgl[%d] = %d\n", 3144 fp->index, sge_len - 1, 3145 le16toh(cqe->sgl[sge_len - 1])); 3146 3147 /* assume that the last SGE index is the biggest */ 3148 bxe_update_last_max_sge(fp, 3149 le16toh(cqe->sgl[sge_len - 1])); 3150 3151 last_max = RX_SGE(fp->last_max_sge); 3152 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; 3153 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; 3154 3155 /* if ring is not full */ 3156 if (last_elem + 1 != first_elem) { 3157 last_elem++; 3158 } 3159 3160 /* now update the prod */ 3161 for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) { 3162 if (__predict_true(fp->sge_mask[i])) { 3163 break; 3164 } 3165 3166 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; 3167 delta += BIT_VEC64_ELEM_SZ; 3168 } 3169 3170 if (delta > 0) { 3171 fp->rx_sge_prod += delta; 3172 /* clear page-end entries */ 3173 bxe_clear_sge_mask_next_elems(fp); 3174 } 3175 3176 BLOGD(sc, DBG_LRO, 3177 "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n", 3178 fp->index, fp->last_max_sge, fp->rx_sge_prod); 3179 } 3180 3181 /* 3182 * The aggregation on the current TPA queue has completed. Pull the individual 3183 * mbuf fragments together into a single mbuf, perform all necessary checksum 3184 * calculations, and send the resuting mbuf to the stack. 3185 */ 3186 static void 3187 bxe_tpa_stop(struct bxe_softc *sc, 3188 struct bxe_fastpath *fp, 3189 struct bxe_sw_tpa_info *tpa_info, 3190 uint16_t queue, 3191 uint16_t pages, 3192 struct eth_end_agg_rx_cqe *cqe, 3193 uint16_t cqe_idx) 3194 { 3195 if_t ifp = sc->ifp; 3196 struct mbuf *m; 3197 int rc = 0; 3198 3199 BLOGD(sc, DBG_LRO, 3200 "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n", 3201 fp->index, queue, tpa_info->placement_offset, 3202 le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag); 3203 3204 m = tpa_info->bd.m; 3205 3206 /* allocate a replacement before modifying existing mbuf */ 3207 rc = bxe_alloc_rx_tpa_mbuf(fp, queue); 3208 if (rc) { 3209 /* drop the frame and log an error */ 3210 fp->eth_q_stats.rx_soft_errors++; 3211 goto bxe_tpa_stop_exit; 3212 } 3213 3214 /* we have a replacement, fixup the current mbuf */ 3215 m_adj(m, tpa_info->placement_offset); 3216 m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd; 3217 3218 /* mark the checksums valid (taken care of by the firmware) */ 3219 fp->eth_q_stats.rx_ofld_frames_csum_ip++; 3220 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; 3221 m->m_pkthdr.csum_data = 0xffff; 3222 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | 3223 CSUM_IP_VALID | 3224 CSUM_DATA_VALID | 3225 CSUM_PSEUDO_HDR); 3226 3227 /* aggregate all of the SGEs into a single mbuf */ 3228 rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx); 3229 if (rc) { 3230 /* drop the packet and log an error */ 3231 fp->eth_q_stats.rx_soft_errors++; 3232 m_freem(m); 3233 } else { 3234 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) { 3235 m->m_pkthdr.ether_vtag = tpa_info->vlan_tag; 3236 m->m_flags |= M_VLANTAG; 3237 } 3238 3239 /* assign packet to this interface interface */ 3240 if_setrcvif(m, ifp); 3241 3242 #if __FreeBSD_version >= 800000 3243 /* specify what RSS queue was used for this flow */ 3244 m->m_pkthdr.flowid = fp->index; 3245 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); 3246 #endif 3247 3248 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 3249 fp->eth_q_stats.rx_tpa_pkts++; 3250 3251 /* pass the frame to the stack */ 3252 if_input(ifp, m); 3253 } 3254 3255 /* we passed an mbuf up the stack or dropped the frame */ 3256 fp->eth_q_stats.mbuf_alloc_tpa--; 3257 3258 bxe_tpa_stop_exit: 3259 3260 fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP; 3261 fp->rx_tpa_queue_used &= ~(1 << queue); 3262 } 3263 3264 static uint8_t 3265 bxe_service_rxsgl( 3266 struct bxe_fastpath *fp, 3267 uint16_t len, 3268 uint16_t lenonbd, 3269 struct mbuf *m, 3270 struct eth_fast_path_rx_cqe *cqe_fp) 3271 { 3272 struct mbuf *m_frag; 3273 uint16_t frags, frag_len; 3274 uint16_t sge_idx = 0; 3275 uint16_t j; 3276 uint8_t i, rc = 0; 3277 uint32_t frag_size; 3278 3279 /* adjust the mbuf */ 3280 m->m_len = lenonbd; 3281 3282 frag_size = len - lenonbd; 3283 frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; 3284 3285 for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) { 3286 sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j])); 3287 3288 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; 3289 frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE)); 3290 m_frag->m_len = frag_len; 3291 3292 /* allocate a new mbuf for the SGE */ 3293 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); 3294 if (rc) { 3295 /* Leave all remaining SGEs in the ring! */ 3296 return (rc); 3297 } 3298 fp->eth_q_stats.mbuf_alloc_sge--; 3299 3300 /* concatenate the fragment to the head mbuf */ 3301 m_cat(m, m_frag); 3302 3303 frag_size -= frag_len; 3304 } 3305 3306 bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data); 3307 3308 return rc; 3309 } 3310 3311 static uint8_t 3312 bxe_rxeof(struct bxe_softc *sc, 3313 struct bxe_fastpath *fp) 3314 { 3315 if_t ifp = sc->ifp; 3316 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 3317 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; 3318 int rx_pkts = 0; 3319 int rc = 0; 3320 3321 BXE_FP_RX_LOCK(fp); 3322 3323 /* CQ "next element" is of the size of the regular element */ 3324 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); 3325 if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) { 3326 hw_cq_cons++; 3327 } 3328 3329 bd_cons = fp->rx_bd_cons; 3330 bd_prod = fp->rx_bd_prod; 3331 bd_prod_fw = bd_prod; 3332 sw_cq_cons = fp->rx_cq_cons; 3333 sw_cq_prod = fp->rx_cq_prod; 3334 3335 /* 3336 * Memory barrier necessary as speculative reads of the rx 3337 * buffer can be ahead of the index in the status block 3338 */ 3339 rmb(); 3340 3341 BLOGD(sc, DBG_RX, 3342 "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n", 3343 fp->index, hw_cq_cons, sw_cq_cons); 3344 3345 while (sw_cq_cons != hw_cq_cons) { 3346 struct bxe_sw_rx_bd *rx_buf = NULL; 3347 union eth_rx_cqe *cqe; 3348 struct eth_fast_path_rx_cqe *cqe_fp; 3349 uint8_t cqe_fp_flags; 3350 enum eth_rx_cqe_type cqe_fp_type; 3351 uint16_t len, lenonbd, pad; 3352 struct mbuf *m = NULL; 3353 3354 comp_ring_cons = RCQ(sw_cq_cons); 3355 bd_prod = RX_BD(bd_prod); 3356 bd_cons = RX_BD(bd_cons); 3357 3358 cqe = &fp->rcq_chain[comp_ring_cons]; 3359 cqe_fp = &cqe->fast_path_cqe; 3360 cqe_fp_flags = cqe_fp->type_error_flags; 3361 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 3362 3363 BLOGD(sc, DBG_RX, 3364 "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d " 3365 "BD prod=%d cons=%d CQE type=0x%x err=0x%x " 3366 "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n", 3367 fp->index, 3368 hw_cq_cons, 3369 sw_cq_cons, 3370 bd_prod, 3371 bd_cons, 3372 CQE_TYPE(cqe_fp_flags), 3373 cqe_fp_flags, 3374 cqe_fp->status_flags, 3375 le32toh(cqe_fp->rss_hash_result), 3376 le16toh(cqe_fp->vlan_tag), 3377 le16toh(cqe_fp->pkt_len_or_gro_seg_len), 3378 le16toh(cqe_fp->len_on_bd)); 3379 3380 /* is this a slowpath msg? */ 3381 if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) { 3382 bxe_sp_event(sc, fp, cqe); 3383 goto next_cqe; 3384 } 3385 3386 rx_buf = &fp->rx_mbuf_chain[bd_cons]; 3387 3388 if (!CQE_TYPE_FAST(cqe_fp_type)) { 3389 struct bxe_sw_tpa_info *tpa_info; 3390 uint16_t frag_size, pages; 3391 uint8_t queue; 3392 3393 #if 0 3394 /* sanity check */ 3395 if (!fp->tpa_enable && 3396 (CQE_TYPE_START(cqe_fp_type) || CQE_TYPE_STOP(cqe_fp_type))) { 3397 BLOGE(sc, "START/STOP packet while !tpa_enable type (0x%x)\n", 3398 CQE_TYPE(cqe_fp_type)); 3399 } 3400 #endif 3401 3402 if (CQE_TYPE_START(cqe_fp_type)) { 3403 bxe_tpa_start(sc, fp, cqe_fp->queue_index, 3404 bd_cons, bd_prod, cqe_fp); 3405 m = NULL; /* packet not ready yet */ 3406 goto next_rx; 3407 } 3408 3409 KASSERT(CQE_TYPE_STOP(cqe_fp_type), 3410 ("CQE type is not STOP! (0x%x)\n", cqe_fp_type)); 3411 3412 queue = cqe->end_agg_cqe.queue_index; 3413 tpa_info = &fp->rx_tpa_info[queue]; 3414 3415 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n", 3416 fp->index, queue); 3417 3418 frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) - 3419 tpa_info->len_on_bd); 3420 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; 3421 3422 bxe_tpa_stop(sc, fp, tpa_info, queue, pages, 3423 &cqe->end_agg_cqe, comp_ring_cons); 3424 3425 bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data); 3426 3427 goto next_cqe; 3428 } 3429 3430 /* non TPA */ 3431 3432 /* is this an error packet? */ 3433 if (__predict_false(cqe_fp_flags & 3434 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { 3435 BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons); 3436 fp->eth_q_stats.rx_soft_errors++; 3437 goto next_rx; 3438 } 3439 3440 len = le16toh(cqe_fp->pkt_len_or_gro_seg_len); 3441 lenonbd = le16toh(cqe_fp->len_on_bd); 3442 pad = cqe_fp->placement_offset; 3443 3444 m = rx_buf->m; 3445 3446 if (__predict_false(m == NULL)) { 3447 BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n", 3448 bd_cons, fp->index); 3449 goto next_rx; 3450 } 3451 3452 /* XXX double copy if packet length under a threshold */ 3453 3454 /* 3455 * If all the buffer descriptors are filled with mbufs then fill in 3456 * the current consumer index with a new BD. Else if a maximum Rx 3457 * buffer limit is imposed then fill in the next producer index. 3458 */ 3459 rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons, 3460 (sc->max_rx_bufs != RX_BD_USABLE) ? 3461 bd_prod : bd_cons); 3462 if (rc != 0) { 3463 3464 /* we simply reuse the received mbuf and don't post it to the stack */ 3465 m = NULL; 3466 3467 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", 3468 fp->index, rc); 3469 fp->eth_q_stats.rx_soft_errors++; 3470 3471 if (sc->max_rx_bufs != RX_BD_USABLE) { 3472 /* copy this consumer index to the producer index */ 3473 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf, 3474 sizeof(struct bxe_sw_rx_bd)); 3475 memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd)); 3476 } 3477 3478 goto next_rx; 3479 } 3480 3481 /* current mbuf was detached from the bd */ 3482 fp->eth_q_stats.mbuf_alloc_rx--; 3483 3484 /* we allocated a replacement mbuf, fixup the current one */ 3485 m_adj(m, pad); 3486 m->m_pkthdr.len = m->m_len = len; 3487 3488 if ((len > 60) && (len > lenonbd)) { 3489 fp->eth_q_stats.rx_bxe_service_rxsgl++; 3490 rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp); 3491 if (rc) 3492 break; 3493 fp->eth_q_stats.rx_jumbo_sge_pkts++; 3494 } else if (lenonbd < len) { 3495 fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++; 3496 } 3497 3498 /* assign packet to this interface interface */ 3499 if_setrcvif(m, ifp); 3500 3501 /* assume no hardware checksum has complated */ 3502 m->m_pkthdr.csum_flags = 0; 3503 3504 /* validate checksum if offload enabled */ 3505 if (if_getcapenable(ifp) & IFCAP_RXCSUM) { 3506 /* check for a valid IP frame */ 3507 if (!(cqe->fast_path_cqe.status_flags & 3508 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) { 3509 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 3510 if (__predict_false(cqe_fp_flags & 3511 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) { 3512 fp->eth_q_stats.rx_hw_csum_errors++; 3513 } else { 3514 fp->eth_q_stats.rx_ofld_frames_csum_ip++; 3515 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3516 } 3517 } 3518 3519 /* check for a valid TCP/UDP frame */ 3520 if (!(cqe->fast_path_cqe.status_flags & 3521 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) { 3522 if (__predict_false(cqe_fp_flags & 3523 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) { 3524 fp->eth_q_stats.rx_hw_csum_errors++; 3525 } else { 3526 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; 3527 m->m_pkthdr.csum_data = 0xFFFF; 3528 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | 3529 CSUM_PSEUDO_HDR); 3530 } 3531 } 3532 } 3533 3534 /* if there is a VLAN tag then flag that info */ 3535 if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_VLAN) { 3536 m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag; 3537 m->m_flags |= M_VLANTAG; 3538 } 3539 3540 #if __FreeBSD_version >= 800000 3541 /* specify what RSS queue was used for this flow */ 3542 m->m_pkthdr.flowid = fp->index; 3543 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); 3544 #endif 3545 3546 next_rx: 3547 3548 bd_cons = RX_BD_NEXT(bd_cons); 3549 bd_prod = RX_BD_NEXT(bd_prod); 3550 bd_prod_fw = RX_BD_NEXT(bd_prod_fw); 3551 3552 /* pass the frame to the stack */ 3553 if (__predict_true(m != NULL)) { 3554 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 3555 rx_pkts++; 3556 if_input(ifp, m); 3557 } 3558 3559 next_cqe: 3560 3561 sw_cq_prod = RCQ_NEXT(sw_cq_prod); 3562 sw_cq_cons = RCQ_NEXT(sw_cq_cons); 3563 3564 /* limit spinning on the queue */ 3565 if (rc != 0) 3566 break; 3567 3568 if (rx_pkts == sc->rx_budget) { 3569 fp->eth_q_stats.rx_budget_reached++; 3570 break; 3571 } 3572 } /* while work to do */ 3573 3574 fp->rx_bd_cons = bd_cons; 3575 fp->rx_bd_prod = bd_prod_fw; 3576 fp->rx_cq_cons = sw_cq_cons; 3577 fp->rx_cq_prod = sw_cq_prod; 3578 3579 /* Update producers */ 3580 bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod); 3581 3582 fp->eth_q_stats.rx_pkts += rx_pkts; 3583 fp->eth_q_stats.rx_calls++; 3584 3585 BXE_FP_RX_UNLOCK(fp); 3586 3587 return (sw_cq_cons != hw_cq_cons); 3588 } 3589 3590 static uint16_t 3591 bxe_free_tx_pkt(struct bxe_softc *sc, 3592 struct bxe_fastpath *fp, 3593 uint16_t idx) 3594 { 3595 struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx]; 3596 struct eth_tx_start_bd *tx_start_bd; 3597 uint16_t bd_idx = TX_BD(tx_buf->first_bd); 3598 uint16_t new_cons; 3599 int nbd; 3600 3601 /* unmap the mbuf from non-paged memory */ 3602 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 3603 3604 tx_start_bd = &fp->tx_chain[bd_idx].start_bd; 3605 nbd = le16toh(tx_start_bd->nbd) - 1; 3606 3607 #if 0 3608 if ((nbd - 1) > (MAX_MBUF_FRAGS + 2)) { 3609 bxe_panic(sc, ("BAD nbd!\n")); 3610 } 3611 #endif 3612 3613 new_cons = (tx_buf->first_bd + nbd); 3614 3615 #if 0 3616 struct eth_tx_bd *tx_data_bd; 3617 3618 /* 3619 * The following code doesn't do anything but is left here 3620 * for clarity on what the new value of new_cons skipped. 3621 */ 3622 3623 /* get the next bd */ 3624 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3625 3626 /* skip the parse bd */ 3627 --nbd; 3628 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3629 3630 /* skip the TSO split header bd since they have no mapping */ 3631 if (tx_buf->flags & BXE_TSO_SPLIT_BD) { 3632 --nbd; 3633 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3634 } 3635 3636 /* now free frags */ 3637 while (nbd > 0) { 3638 tx_data_bd = &fp->tx_chain[bd_idx].reg_bd; 3639 if (--nbd) { 3640 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3641 } 3642 } 3643 #endif 3644 3645 /* free the mbuf */ 3646 if (__predict_true(tx_buf->m != NULL)) { 3647 m_freem(tx_buf->m); 3648 fp->eth_q_stats.mbuf_alloc_tx--; 3649 } else { 3650 fp->eth_q_stats.tx_chain_lost_mbuf++; 3651 } 3652 3653 tx_buf->m = NULL; 3654 tx_buf->first_bd = 0; 3655 3656 return (new_cons); 3657 } 3658 3659 /* transmit timeout watchdog */ 3660 static int 3661 bxe_watchdog(struct bxe_softc *sc, 3662 struct bxe_fastpath *fp) 3663 { 3664 BXE_FP_TX_LOCK(fp); 3665 3666 if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) { 3667 BXE_FP_TX_UNLOCK(fp); 3668 return (0); 3669 } 3670 3671 BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index); 3672 3673 BXE_FP_TX_UNLOCK(fp); 3674 3675 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT); 3676 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); 3677 3678 return (-1); 3679 } 3680 3681 /* processes transmit completions */ 3682 static uint8_t 3683 bxe_txeof(struct bxe_softc *sc, 3684 struct bxe_fastpath *fp) 3685 { 3686 if_t ifp = sc->ifp; 3687 uint16_t bd_cons, hw_cons, sw_cons, pkt_cons; 3688 uint16_t tx_bd_avail; 3689 3690 BXE_FP_TX_LOCK_ASSERT(fp); 3691 3692 bd_cons = fp->tx_bd_cons; 3693 hw_cons = le16toh(*fp->tx_cons_sb); 3694 sw_cons = fp->tx_pkt_cons; 3695 3696 while (sw_cons != hw_cons) { 3697 pkt_cons = TX_BD(sw_cons); 3698 3699 BLOGD(sc, DBG_TX, 3700 "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n", 3701 fp->index, hw_cons, sw_cons, pkt_cons); 3702 3703 bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons); 3704 3705 sw_cons++; 3706 } 3707 3708 fp->tx_pkt_cons = sw_cons; 3709 fp->tx_bd_cons = bd_cons; 3710 3711 BLOGD(sc, DBG_TX, 3712 "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n", 3713 fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod); 3714 3715 mb(); 3716 3717 tx_bd_avail = bxe_tx_avail(sc, fp); 3718 3719 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 3720 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 3721 } else { 3722 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 3723 } 3724 3725 if (fp->tx_pkt_prod != fp->tx_pkt_cons) { 3726 /* reset the watchdog timer if there are pending transmits */ 3727 fp->watchdog_timer = BXE_TX_TIMEOUT; 3728 return (TRUE); 3729 } else { 3730 /* clear watchdog when there are no pending transmits */ 3731 fp->watchdog_timer = 0; 3732 return (FALSE); 3733 } 3734 } 3735 3736 static void 3737 bxe_drain_tx_queues(struct bxe_softc *sc) 3738 { 3739 struct bxe_fastpath *fp; 3740 int i, count; 3741 3742 /* wait until all TX fastpath tasks have completed */ 3743 for (i = 0; i < sc->num_queues; i++) { 3744 fp = &sc->fp[i]; 3745 3746 count = 1000; 3747 3748 while (bxe_has_tx_work(fp)) { 3749 3750 BXE_FP_TX_LOCK(fp); 3751 bxe_txeof(sc, fp); 3752 BXE_FP_TX_UNLOCK(fp); 3753 3754 if (count == 0) { 3755 BLOGE(sc, "Timeout waiting for fp[%d] " 3756 "transmits to complete!\n", i); 3757 bxe_panic(sc, ("tx drain failure\n")); 3758 return; 3759 } 3760 3761 count--; 3762 DELAY(1000); 3763 rmb(); 3764 } 3765 } 3766 3767 return; 3768 } 3769 3770 static int 3771 bxe_del_all_macs(struct bxe_softc *sc, 3772 struct ecore_vlan_mac_obj *mac_obj, 3773 int mac_type, 3774 uint8_t wait_for_comp) 3775 { 3776 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 3777 int rc; 3778 3779 /* wait for completion of requested */ 3780 if (wait_for_comp) { 3781 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3782 } 3783 3784 /* Set the mac type of addresses we want to clear */ 3785 bxe_set_bit(mac_type, &vlan_mac_flags); 3786 3787 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); 3788 if (rc < 0) { 3789 BLOGE(sc, "Failed to delete MACs (%d)\n", rc); 3790 } 3791 3792 return (rc); 3793 } 3794 3795 static int 3796 bxe_fill_accept_flags(struct bxe_softc *sc, 3797 uint32_t rx_mode, 3798 unsigned long *rx_accept_flags, 3799 unsigned long *tx_accept_flags) 3800 { 3801 /* Clear the flags first */ 3802 *rx_accept_flags = 0; 3803 *tx_accept_flags = 0; 3804 3805 switch (rx_mode) { 3806 case BXE_RX_MODE_NONE: 3807 /* 3808 * 'drop all' supersedes any accept flags that may have been 3809 * passed to the function. 3810 */ 3811 break; 3812 3813 case BXE_RX_MODE_NORMAL: 3814 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3815 bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags); 3816 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3817 3818 /* internal switching mode */ 3819 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3820 bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags); 3821 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3822 3823 break; 3824 3825 case BXE_RX_MODE_ALLMULTI: 3826 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3827 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 3828 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3829 3830 /* internal switching mode */ 3831 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3832 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 3833 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3834 3835 break; 3836 3837 case BXE_RX_MODE_PROMISC: 3838 /* 3839 * According to deffinition of SI mode, iface in promisc mode 3840 * should receive matched and unmatched (in resolution of port) 3841 * unicast packets. 3842 */ 3843 bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); 3844 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3845 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 3846 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3847 3848 /* internal switching mode */ 3849 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 3850 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3851 3852 if (IS_MF_SI(sc)) { 3853 bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags); 3854 } else { 3855 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3856 } 3857 3858 break; 3859 3860 default: 3861 BLOGE(sc, "Unknown rx_mode (%d)\n", rx_mode); 3862 return (-1); 3863 } 3864 3865 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ 3866 if (rx_mode != BXE_RX_MODE_NONE) { 3867 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); 3868 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); 3869 } 3870 3871 return (0); 3872 } 3873 3874 static int 3875 bxe_set_q_rx_mode(struct bxe_softc *sc, 3876 uint8_t cl_id, 3877 unsigned long rx_mode_flags, 3878 unsigned long rx_accept_flags, 3879 unsigned long tx_accept_flags, 3880 unsigned long ramrod_flags) 3881 { 3882 struct ecore_rx_mode_ramrod_params ramrod_param; 3883 int rc; 3884 3885 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3886 3887 /* Prepare ramrod parameters */ 3888 ramrod_param.cid = 0; 3889 ramrod_param.cl_id = cl_id; 3890 ramrod_param.rx_mode_obj = &sc->rx_mode_obj; 3891 ramrod_param.func_id = SC_FUNC(sc); 3892 3893 ramrod_param.pstate = &sc->sp_state; 3894 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; 3895 3896 ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata); 3897 ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata); 3898 3899 bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 3900 3901 ramrod_param.ramrod_flags = ramrod_flags; 3902 ramrod_param.rx_mode_flags = rx_mode_flags; 3903 3904 ramrod_param.rx_accept_flags = rx_accept_flags; 3905 ramrod_param.tx_accept_flags = tx_accept_flags; 3906 3907 rc = ecore_config_rx_mode(sc, &ramrod_param); 3908 if (rc < 0) { 3909 BLOGE(sc, "Set rx_mode %d failed\n", sc->rx_mode); 3910 return (rc); 3911 } 3912 3913 return (0); 3914 } 3915 3916 static int 3917 bxe_set_storm_rx_mode(struct bxe_softc *sc) 3918 { 3919 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 3920 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 3921 int rc; 3922 3923 rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, 3924 &tx_accept_flags); 3925 if (rc) { 3926 return (rc); 3927 } 3928 3929 bxe_set_bit(RAMROD_RX, &ramrod_flags); 3930 bxe_set_bit(RAMROD_TX, &ramrod_flags); 3931 3932 /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */ 3933 return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, 3934 rx_accept_flags, tx_accept_flags, 3935 ramrod_flags)); 3936 } 3937 3938 /* returns the "mcp load_code" according to global load_count array */ 3939 static int 3940 bxe_nic_load_no_mcp(struct bxe_softc *sc) 3941 { 3942 int path = SC_PATH(sc); 3943 int port = SC_PORT(sc); 3944 3945 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", 3946 path, load_count[path][0], load_count[path][1], 3947 load_count[path][2]); 3948 load_count[path][0]++; 3949 load_count[path][1 + port]++; 3950 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", 3951 path, load_count[path][0], load_count[path][1], 3952 load_count[path][2]); 3953 if (load_count[path][0] == 1) { 3954 return (FW_MSG_CODE_DRV_LOAD_COMMON); 3955 } else if (load_count[path][1 + port] == 1) { 3956 return (FW_MSG_CODE_DRV_LOAD_PORT); 3957 } else { 3958 return (FW_MSG_CODE_DRV_LOAD_FUNCTION); 3959 } 3960 } 3961 3962 /* returns the "mcp load_code" according to global load_count array */ 3963 static int 3964 bxe_nic_unload_no_mcp(struct bxe_softc *sc) 3965 { 3966 int port = SC_PORT(sc); 3967 int path = SC_PATH(sc); 3968 3969 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", 3970 path, load_count[path][0], load_count[path][1], 3971 load_count[path][2]); 3972 load_count[path][0]--; 3973 load_count[path][1 + port]--; 3974 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", 3975 path, load_count[path][0], load_count[path][1], 3976 load_count[path][2]); 3977 if (load_count[path][0] == 0) { 3978 return (FW_MSG_CODE_DRV_UNLOAD_COMMON); 3979 } else if (load_count[path][1 + port] == 0) { 3980 return (FW_MSG_CODE_DRV_UNLOAD_PORT); 3981 } else { 3982 return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION); 3983 } 3984 } 3985 3986 /* request unload mode from the MCP: COMMON, PORT or FUNCTION */ 3987 static uint32_t 3988 bxe_send_unload_req(struct bxe_softc *sc, 3989 int unload_mode) 3990 { 3991 uint32_t reset_code = 0; 3992 #if 0 3993 int port = SC_PORT(sc); 3994 int path = SC_PATH(sc); 3995 #endif 3996 3997 /* Select the UNLOAD request mode */ 3998 if (unload_mode == UNLOAD_NORMAL) { 3999 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 4000 } 4001 #if 0 4002 else if (sc->flags & BXE_NO_WOL_FLAG) { 4003 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 4004 } else if (sc->wol) { 4005 uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 4006 uint8_t *mac_addr = sc->dev->dev_addr; 4007 uint32_t val; 4008 uint16_t pmc; 4009 4010 /* 4011 * The mac address is written to entries 1-4 to 4012 * preserve entry 0 which is used by the PMF 4013 */ 4014 uint8_t entry = (SC_VN(sc) + 1)*8; 4015 4016 val = (mac_addr[0] << 8) | mac_addr[1]; 4017 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry, val); 4018 4019 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 4020 (mac_addr[4] << 8) | mac_addr[5]; 4021 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 4022 4023 /* Enable the PME and clear the status */ 4024 pmc = pci_read_config(sc->dev, 4025 (sc->devinfo.pcie_pm_cap_reg + 4026 PCIR_POWER_STATUS), 4027 2); 4028 pmc |= PCIM_PSTAT_PMEENABLE | PCIM_PSTAT_PME; 4029 pci_write_config(sc->dev, 4030 (sc->devinfo.pcie_pm_cap_reg + 4031 PCIR_POWER_STATUS), 4032 pmc, 4); 4033 4034 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 4035 } 4036 #endif 4037 else { 4038 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 4039 } 4040 4041 /* Send the request to the MCP */ 4042 if (!BXE_NOMCP(sc)) { 4043 reset_code = bxe_fw_command(sc, reset_code, 0); 4044 } else { 4045 reset_code = bxe_nic_unload_no_mcp(sc); 4046 } 4047 4048 return (reset_code); 4049 } 4050 4051 /* send UNLOAD_DONE command to the MCP */ 4052 static void 4053 bxe_send_unload_done(struct bxe_softc *sc, 4054 uint8_t keep_link) 4055 { 4056 uint32_t reset_param = 4057 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; 4058 4059 /* Report UNLOAD_DONE to MCP */ 4060 if (!BXE_NOMCP(sc)) { 4061 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); 4062 } 4063 } 4064 4065 static int 4066 bxe_func_wait_started(struct bxe_softc *sc) 4067 { 4068 int tout = 50; 4069 4070 if (!sc->port.pmf) { 4071 return (0); 4072 } 4073 4074 /* 4075 * (assumption: No Attention from MCP at this stage) 4076 * PMF probably in the middle of TX disable/enable transaction 4077 * 1. Sync IRS for default SB 4078 * 2. Sync SP queue - this guarantees us that attention handling started 4079 * 3. Wait, that TX disable/enable transaction completes 4080 * 4081 * 1+2 guarantee that if DCBX attention was scheduled it already changed 4082 * pending bit of transaction from STARTED-->TX_STOPPED, if we already 4083 * received completion for the transaction the state is TX_STOPPED. 4084 * State will return to STARTED after completion of TX_STOPPED-->STARTED 4085 * transaction. 4086 */ 4087 4088 /* XXX make sure default SB ISR is done */ 4089 /* need a way to synchronize an irq (intr_mtx?) */ 4090 4091 /* XXX flush any work queues */ 4092 4093 while (ecore_func_get_state(sc, &sc->func_obj) != 4094 ECORE_F_STATE_STARTED && tout--) { 4095 DELAY(20000); 4096 } 4097 4098 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { 4099 /* 4100 * Failed to complete the transaction in a "good way" 4101 * Force both transactions with CLR bit. 4102 */ 4103 struct ecore_func_state_params func_params = { NULL }; 4104 4105 BLOGE(sc, "Unexpected function state! " 4106 "Forcing STARTED-->TX_STOPPED-->STARTED\n"); 4107 4108 func_params.f_obj = &sc->func_obj; 4109 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 4110 4111 /* STARTED-->TX_STOPPED */ 4112 func_params.cmd = ECORE_F_CMD_TX_STOP; 4113 ecore_func_state_change(sc, &func_params); 4114 4115 /* TX_STOPPED-->STARTED */ 4116 func_params.cmd = ECORE_F_CMD_TX_START; 4117 return (ecore_func_state_change(sc, &func_params)); 4118 } 4119 4120 return (0); 4121 } 4122 4123 static int 4124 bxe_stop_queue(struct bxe_softc *sc, 4125 int index) 4126 { 4127 struct bxe_fastpath *fp = &sc->fp[index]; 4128 struct ecore_queue_state_params q_params = { NULL }; 4129 int rc; 4130 4131 BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index); 4132 4133 q_params.q_obj = &sc->sp_objs[fp->index].q_obj; 4134 /* We want to wait for completion in this context */ 4135 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 4136 4137 /* Stop the primary connection: */ 4138 4139 /* ...halt the connection */ 4140 q_params.cmd = ECORE_Q_CMD_HALT; 4141 rc = ecore_queue_state_change(sc, &q_params); 4142 if (rc) { 4143 return (rc); 4144 } 4145 4146 /* ...terminate the connection */ 4147 q_params.cmd = ECORE_Q_CMD_TERMINATE; 4148 memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate)); 4149 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 4150 rc = ecore_queue_state_change(sc, &q_params); 4151 if (rc) { 4152 return (rc); 4153 } 4154 4155 /* ...delete cfc entry */ 4156 q_params.cmd = ECORE_Q_CMD_CFC_DEL; 4157 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); 4158 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 4159 return (ecore_queue_state_change(sc, &q_params)); 4160 } 4161 4162 /* wait for the outstanding SP commands */ 4163 static inline uint8_t 4164 bxe_wait_sp_comp(struct bxe_softc *sc, 4165 unsigned long mask) 4166 { 4167 unsigned long tmp; 4168 int tout = 5000; /* wait for 5 secs tops */ 4169 4170 while (tout--) { 4171 mb(); 4172 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { 4173 return (TRUE); 4174 } 4175 4176 DELAY(1000); 4177 } 4178 4179 mb(); 4180 4181 tmp = atomic_load_acq_long(&sc->sp_state); 4182 if (tmp & mask) { 4183 BLOGE(sc, "Filtering completion timed out: " 4184 "sp_state 0x%lx, mask 0x%lx\n", 4185 tmp, mask); 4186 return (FALSE); 4187 } 4188 4189 return (FALSE); 4190 } 4191 4192 static int 4193 bxe_func_stop(struct bxe_softc *sc) 4194 { 4195 struct ecore_func_state_params func_params = { NULL }; 4196 int rc; 4197 4198 /* prepare parameters for function state transitions */ 4199 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 4200 func_params.f_obj = &sc->func_obj; 4201 func_params.cmd = ECORE_F_CMD_STOP; 4202 4203 /* 4204 * Try to stop the function the 'good way'. If it fails (in case 4205 * of a parity error during bxe_chip_cleanup()) and we are 4206 * not in a debug mode, perform a state transaction in order to 4207 * enable further HW_RESET transaction. 4208 */ 4209 rc = ecore_func_state_change(sc, &func_params); 4210 if (rc) { 4211 BLOGE(sc, "FUNC_STOP ramrod failed. " 4212 "Running a dry transaction\n"); 4213 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 4214 return (ecore_func_state_change(sc, &func_params)); 4215 } 4216 4217 return (0); 4218 } 4219 4220 static int 4221 bxe_reset_hw(struct bxe_softc *sc, 4222 uint32_t load_code) 4223 { 4224 struct ecore_func_state_params func_params = { NULL }; 4225 4226 /* Prepare parameters for function state transitions */ 4227 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 4228 4229 func_params.f_obj = &sc->func_obj; 4230 func_params.cmd = ECORE_F_CMD_HW_RESET; 4231 4232 func_params.params.hw_init.load_phase = load_code; 4233 4234 return (ecore_func_state_change(sc, &func_params)); 4235 } 4236 4237 static void 4238 bxe_int_disable_sync(struct bxe_softc *sc, 4239 int disable_hw) 4240 { 4241 if (disable_hw) { 4242 /* prevent the HW from sending interrupts */ 4243 bxe_int_disable(sc); 4244 } 4245 4246 /* XXX need a way to synchronize ALL irqs (intr_mtx?) */ 4247 /* make sure all ISRs are done */ 4248 4249 /* XXX make sure sp_task is not running */ 4250 /* cancel and flush work queues */ 4251 } 4252 4253 static void 4254 bxe_chip_cleanup(struct bxe_softc *sc, 4255 uint32_t unload_mode, 4256 uint8_t keep_link) 4257 { 4258 int port = SC_PORT(sc); 4259 struct ecore_mcast_ramrod_params rparam = { NULL }; 4260 uint32_t reset_code; 4261 int i, rc = 0; 4262 4263 bxe_drain_tx_queues(sc); 4264 4265 /* give HW time to discard old tx messages */ 4266 DELAY(1000); 4267 4268 /* Clean all ETH MACs */ 4269 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE); 4270 if (rc < 0) { 4271 BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc); 4272 } 4273 4274 /* Clean up UC list */ 4275 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE); 4276 if (rc < 0) { 4277 BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc); 4278 } 4279 4280 /* Disable LLH */ 4281 if (!CHIP_IS_E1(sc)) { 4282 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); 4283 } 4284 4285 /* Set "drop all" to stop Rx */ 4286 4287 /* 4288 * We need to take the BXE_MCAST_LOCK() here in order to prevent 4289 * a race between the completion code and this code. 4290 */ 4291 BXE_MCAST_LOCK(sc); 4292 4293 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 4294 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 4295 } else { 4296 bxe_set_storm_rx_mode(sc); 4297 } 4298 4299 /* Clean up multicast configuration */ 4300 rparam.mcast_obj = &sc->mcast_obj; 4301 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 4302 if (rc < 0) { 4303 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); 4304 } 4305 4306 BXE_MCAST_UNLOCK(sc); 4307 4308 // XXX bxe_iov_chip_cleanup(sc); 4309 4310 /* 4311 * Send the UNLOAD_REQUEST to the MCP. This will return if 4312 * this function should perform FUNCTION, PORT, or COMMON HW 4313 * reset. 4314 */ 4315 reset_code = bxe_send_unload_req(sc, unload_mode); 4316 4317 /* 4318 * (assumption: No Attention from MCP at this stage) 4319 * PMF probably in the middle of TX disable/enable transaction 4320 */ 4321 rc = bxe_func_wait_started(sc); 4322 if (rc) { 4323 BLOGE(sc, "bxe_func_wait_started failed\n"); 4324 } 4325 4326 /* 4327 * Close multi and leading connections 4328 * Completions for ramrods are collected in a synchronous way 4329 */ 4330 for (i = 0; i < sc->num_queues; i++) { 4331 if (bxe_stop_queue(sc, i)) { 4332 goto unload_error; 4333 } 4334 } 4335 4336 /* 4337 * If SP settings didn't get completed so far - something 4338 * very wrong has happen. 4339 */ 4340 if (!bxe_wait_sp_comp(sc, ~0x0UL)) { 4341 BLOGE(sc, "Common slow path ramrods got stuck!\n"); 4342 } 4343 4344 unload_error: 4345 4346 rc = bxe_func_stop(sc); 4347 if (rc) { 4348 BLOGE(sc, "Function stop failed!\n"); 4349 } 4350 4351 /* disable HW interrupts */ 4352 bxe_int_disable_sync(sc, TRUE); 4353 4354 /* detach interrupts */ 4355 bxe_interrupt_detach(sc); 4356 4357 /* Reset the chip */ 4358 rc = bxe_reset_hw(sc, reset_code); 4359 if (rc) { 4360 BLOGE(sc, "Hardware reset failed\n"); 4361 } 4362 4363 /* Report UNLOAD_DONE to MCP */ 4364 bxe_send_unload_done(sc, keep_link); 4365 } 4366 4367 static void 4368 bxe_disable_close_the_gate(struct bxe_softc *sc) 4369 { 4370 uint32_t val; 4371 int port = SC_PORT(sc); 4372 4373 BLOGD(sc, DBG_LOAD, 4374 "Disabling 'close the gates'\n"); 4375 4376 if (CHIP_IS_E1(sc)) { 4377 uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4378 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4379 val = REG_RD(sc, addr); 4380 val &= ~(0x300); 4381 REG_WR(sc, addr, val); 4382 } else { 4383 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); 4384 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 4385 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 4386 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); 4387 } 4388 } 4389 4390 /* 4391 * Cleans the object that have internal lists without sending 4392 * ramrods. Should be run when interrutps are disabled. 4393 */ 4394 static void 4395 bxe_squeeze_objects(struct bxe_softc *sc) 4396 { 4397 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 4398 struct ecore_mcast_ramrod_params rparam = { NULL }; 4399 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 4400 int rc; 4401 4402 /* Cleanup MACs' object first... */ 4403 4404 /* Wait for completion of requested */ 4405 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 4406 /* Perform a dry cleanup */ 4407 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); 4408 4409 /* Clean ETH primary MAC */ 4410 bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags); 4411 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, 4412 &ramrod_flags); 4413 if (rc != 0) { 4414 BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc); 4415 } 4416 4417 /* Cleanup UC list */ 4418 vlan_mac_flags = 0; 4419 bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags); 4420 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, 4421 &ramrod_flags); 4422 if (rc != 0) { 4423 BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc); 4424 } 4425 4426 /* Now clean mcast object... */ 4427 4428 rparam.mcast_obj = &sc->mcast_obj; 4429 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); 4430 4431 /* Add a DEL command... */ 4432 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 4433 if (rc < 0) { 4434 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); 4435 } 4436 4437 /* now wait until all pending commands are cleared */ 4438 4439 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4440 while (rc != 0) { 4441 if (rc < 0) { 4442 BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc); 4443 return; 4444 } 4445 4446 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4447 } 4448 } 4449 4450 /* stop the controller */ 4451 static __noinline int 4452 bxe_nic_unload(struct bxe_softc *sc, 4453 uint32_t unload_mode, 4454 uint8_t keep_link) 4455 { 4456 uint8_t global = FALSE; 4457 uint32_t val; 4458 4459 BXE_CORE_LOCK_ASSERT(sc); 4460 4461 BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n"); 4462 4463 /* mark driver as unloaded in shmem2 */ 4464 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 4465 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 4466 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 4467 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 4468 } 4469 4470 if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE && 4471 (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) { 4472 /* 4473 * We can get here if the driver has been unloaded 4474 * during parity error recovery and is either waiting for a 4475 * leader to complete or for other functions to unload and 4476 * then ifconfig down has been issued. In this case we want to 4477 * unload and let other functions to complete a recovery 4478 * process. 4479 */ 4480 sc->recovery_state = BXE_RECOVERY_DONE; 4481 sc->is_leader = 0; 4482 bxe_release_leader_lock(sc); 4483 mb(); 4484 4485 BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n"); 4486 BLOGE(sc, "Can't unload in closed or error state\n"); 4487 return (-1); 4488 } 4489 4490 /* 4491 * Nothing to do during unload if previous bxe_nic_load() 4492 * did not completed succesfully - all resourses are released. 4493 */ 4494 if ((sc->state == BXE_STATE_CLOSED) || 4495 (sc->state == BXE_STATE_ERROR)) { 4496 return (0); 4497 } 4498 4499 sc->state = BXE_STATE_CLOSING_WAITING_HALT; 4500 mb(); 4501 4502 /* stop tx */ 4503 bxe_tx_disable(sc); 4504 4505 sc->rx_mode = BXE_RX_MODE_NONE; 4506 /* XXX set rx mode ??? */ 4507 4508 if (IS_PF(sc) && !sc->grcdump_done) { 4509 /* set ALWAYS_ALIVE bit in shmem */ 4510 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 4511 4512 bxe_drv_pulse(sc); 4513 4514 bxe_stats_handle(sc, STATS_EVENT_STOP); 4515 bxe_save_statistics(sc); 4516 } 4517 4518 /* wait till consumers catch up with producers in all queues */ 4519 bxe_drain_tx_queues(sc); 4520 4521 /* if VF indicate to PF this function is going down (PF will delete sp 4522 * elements and clear initializations 4523 */ 4524 if (IS_VF(sc)) { 4525 ; /* bxe_vfpf_close_vf(sc); */ 4526 } else if (unload_mode != UNLOAD_RECOVERY) { 4527 /* if this is a normal/close unload need to clean up chip */ 4528 if (!sc->grcdump_done) 4529 bxe_chip_cleanup(sc, unload_mode, keep_link); 4530 } else { 4531 /* Send the UNLOAD_REQUEST to the MCP */ 4532 bxe_send_unload_req(sc, unload_mode); 4533 4534 /* 4535 * Prevent transactions to host from the functions on the 4536 * engine that doesn't reset global blocks in case of global 4537 * attention once gloabl blocks are reset and gates are opened 4538 * (the engine which leader will perform the recovery 4539 * last). 4540 */ 4541 if (!CHIP_IS_E1x(sc)) { 4542 bxe_pf_disable(sc); 4543 } 4544 4545 /* disable HW interrupts */ 4546 bxe_int_disable_sync(sc, TRUE); 4547 4548 /* detach interrupts */ 4549 bxe_interrupt_detach(sc); 4550 4551 /* Report UNLOAD_DONE to MCP */ 4552 bxe_send_unload_done(sc, FALSE); 4553 } 4554 4555 /* 4556 * At this stage no more interrupts will arrive so we may safely clean 4557 * the queue'able objects here in case they failed to get cleaned so far. 4558 */ 4559 if (IS_PF(sc)) { 4560 bxe_squeeze_objects(sc); 4561 } 4562 4563 /* There should be no more pending SP commands at this stage */ 4564 sc->sp_state = 0; 4565 4566 sc->port.pmf = 0; 4567 4568 bxe_free_fp_buffers(sc); 4569 4570 if (IS_PF(sc)) { 4571 bxe_free_mem(sc); 4572 } 4573 4574 bxe_free_fw_stats_mem(sc); 4575 4576 sc->state = BXE_STATE_CLOSED; 4577 4578 /* 4579 * Check if there are pending parity attentions. If there are - set 4580 * RECOVERY_IN_PROGRESS. 4581 */ 4582 if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) { 4583 bxe_set_reset_in_progress(sc); 4584 4585 /* Set RESET_IS_GLOBAL if needed */ 4586 if (global) { 4587 bxe_set_reset_global(sc); 4588 } 4589 } 4590 4591 /* 4592 * The last driver must disable a "close the gate" if there is no 4593 * parity attention or "process kill" pending. 4594 */ 4595 if (IS_PF(sc) && !bxe_clear_pf_load(sc) && 4596 bxe_reset_is_done(sc, SC_PATH(sc))) { 4597 bxe_disable_close_the_gate(sc); 4598 } 4599 4600 BLOGD(sc, DBG_LOAD, "Ended NIC unload\n"); 4601 4602 return (0); 4603 } 4604 4605 /* 4606 * Called by the OS to set various media options (i.e. link, speed, etc.) when 4607 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...". 4608 */ 4609 static int 4610 bxe_ifmedia_update(struct ifnet *ifp) 4611 { 4612 struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp); 4613 struct ifmedia *ifm; 4614 4615 ifm = &sc->ifmedia; 4616 4617 /* We only support Ethernet media type. */ 4618 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 4619 return (EINVAL); 4620 } 4621 4622 switch (IFM_SUBTYPE(ifm->ifm_media)) { 4623 case IFM_AUTO: 4624 break; 4625 case IFM_10G_CX4: 4626 case IFM_10G_SR: 4627 case IFM_10G_T: 4628 case IFM_10G_TWINAX: 4629 default: 4630 /* We don't support changing the media type. */ 4631 BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n", 4632 IFM_SUBTYPE(ifm->ifm_media)); 4633 return (EINVAL); 4634 } 4635 4636 return (0); 4637 } 4638 4639 /* 4640 * Called by the OS to get the current media status (i.e. link, speed, etc.). 4641 */ 4642 static void 4643 bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) 4644 { 4645 struct bxe_softc *sc = if_getsoftc(ifp); 4646 4647 /* Report link down if the driver isn't running. */ 4648 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 4649 ifmr->ifm_active |= IFM_NONE; 4650 return; 4651 } 4652 4653 /* Setup the default interface info. */ 4654 ifmr->ifm_status = IFM_AVALID; 4655 ifmr->ifm_active = IFM_ETHER; 4656 4657 if (sc->link_vars.link_up) { 4658 ifmr->ifm_status |= IFM_ACTIVE; 4659 } else { 4660 ifmr->ifm_active |= IFM_NONE; 4661 return; 4662 } 4663 4664 ifmr->ifm_active |= sc->media; 4665 4666 if (sc->link_vars.duplex == DUPLEX_FULL) { 4667 ifmr->ifm_active |= IFM_FDX; 4668 } else { 4669 ifmr->ifm_active |= IFM_HDX; 4670 } 4671 } 4672 4673 static int 4674 bxe_ioctl_nvram(struct bxe_softc *sc, 4675 uint32_t priv_op, 4676 struct ifreq *ifr) 4677 { 4678 struct bxe_nvram_data nvdata_base; 4679 struct bxe_nvram_data *nvdata; 4680 int len; 4681 int error = 0; 4682 4683 copyin(ifr->ifr_data, &nvdata_base, sizeof(nvdata_base)); 4684 4685 len = (sizeof(struct bxe_nvram_data) + 4686 nvdata_base.len - 4687 sizeof(uint32_t)); 4688 4689 if (len > sizeof(struct bxe_nvram_data)) { 4690 if ((nvdata = (struct bxe_nvram_data *) 4691 malloc(len, M_DEVBUF, 4692 (M_NOWAIT | M_ZERO))) == NULL) { 4693 BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed\n"); 4694 return (1); 4695 } 4696 memcpy(nvdata, &nvdata_base, sizeof(struct bxe_nvram_data)); 4697 } else { 4698 nvdata = &nvdata_base; 4699 } 4700 4701 if (priv_op == BXE_IOC_RD_NVRAM) { 4702 BLOGD(sc, DBG_IOCTL, "IOC_RD_NVRAM 0x%x %d\n", 4703 nvdata->offset, nvdata->len); 4704 error = bxe_nvram_read(sc, 4705 nvdata->offset, 4706 (uint8_t *)nvdata->value, 4707 nvdata->len); 4708 copyout(nvdata, ifr->ifr_data, len); 4709 } else { /* BXE_IOC_WR_NVRAM */ 4710 BLOGD(sc, DBG_IOCTL, "IOC_WR_NVRAM 0x%x %d\n", 4711 nvdata->offset, nvdata->len); 4712 copyin(ifr->ifr_data, nvdata, len); 4713 error = bxe_nvram_write(sc, 4714 nvdata->offset, 4715 (uint8_t *)nvdata->value, 4716 nvdata->len); 4717 } 4718 4719 if (len > sizeof(struct bxe_nvram_data)) { 4720 free(nvdata, M_DEVBUF); 4721 } 4722 4723 return (error); 4724 } 4725 4726 static int 4727 bxe_ioctl_stats_show(struct bxe_softc *sc, 4728 uint32_t priv_op, 4729 struct ifreq *ifr) 4730 { 4731 const size_t str_size = (BXE_NUM_ETH_STATS * STAT_NAME_LEN); 4732 const size_t stats_size = (BXE_NUM_ETH_STATS * sizeof(uint64_t)); 4733 caddr_t p_tmp; 4734 uint32_t *offset; 4735 int i; 4736 4737 switch (priv_op) 4738 { 4739 case BXE_IOC_STATS_SHOW_NUM: 4740 memset(ifr->ifr_data, 0, sizeof(union bxe_stats_show_data)); 4741 ((union bxe_stats_show_data *)ifr->ifr_data)->desc.num = 4742 BXE_NUM_ETH_STATS; 4743 ((union bxe_stats_show_data *)ifr->ifr_data)->desc.len = 4744 STAT_NAME_LEN; 4745 return (0); 4746 4747 case BXE_IOC_STATS_SHOW_STR: 4748 memset(ifr->ifr_data, 0, str_size); 4749 p_tmp = ifr->ifr_data; 4750 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 4751 strcpy(p_tmp, bxe_eth_stats_arr[i].string); 4752 p_tmp += STAT_NAME_LEN; 4753 } 4754 return (0); 4755 4756 case BXE_IOC_STATS_SHOW_CNT: 4757 memset(ifr->ifr_data, 0, stats_size); 4758 p_tmp = ifr->ifr_data; 4759 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 4760 offset = ((uint32_t *)&sc->eth_stats + 4761 bxe_eth_stats_arr[i].offset); 4762 switch (bxe_eth_stats_arr[i].size) { 4763 case 4: 4764 *((uint64_t *)p_tmp) = (uint64_t)*offset; 4765 break; 4766 case 8: 4767 *((uint64_t *)p_tmp) = HILO_U64(*offset, *(offset + 1)); 4768 break; 4769 default: 4770 *((uint64_t *)p_tmp) = 0; 4771 } 4772 p_tmp += sizeof(uint64_t); 4773 } 4774 return (0); 4775 4776 default: 4777 return (-1); 4778 } 4779 } 4780 4781 static void 4782 bxe_handle_chip_tq(void *context, 4783 int pending) 4784 { 4785 struct bxe_softc *sc = (struct bxe_softc *)context; 4786 long work = atomic_load_acq_long(&sc->chip_tq_flags); 4787 4788 switch (work) 4789 { 4790 4791 case CHIP_TQ_REINIT: 4792 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { 4793 /* restart the interface */ 4794 BLOGD(sc, DBG_LOAD, "Restarting the interface...\n"); 4795 bxe_periodic_stop(sc); 4796 BXE_CORE_LOCK(sc); 4797 bxe_stop_locked(sc); 4798 bxe_init_locked(sc); 4799 BXE_CORE_UNLOCK(sc); 4800 } 4801 break; 4802 4803 default: 4804 break; 4805 } 4806 } 4807 4808 /* 4809 * Handles any IOCTL calls from the operating system. 4810 * 4811 * Returns: 4812 * 0 = Success, >0 Failure 4813 */ 4814 static int 4815 bxe_ioctl(if_t ifp, 4816 u_long command, 4817 caddr_t data) 4818 { 4819 struct bxe_softc *sc = if_getsoftc(ifp); 4820 struct ifreq *ifr = (struct ifreq *)data; 4821 struct bxe_nvram_data *nvdata; 4822 uint32_t priv_op; 4823 int mask = 0; 4824 int reinit = 0; 4825 int error = 0; 4826 4827 int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN); 4828 int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING); 4829 4830 switch (command) 4831 { 4832 case SIOCSIFMTU: 4833 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n", 4834 ifr->ifr_mtu); 4835 4836 if (sc->mtu == ifr->ifr_mtu) { 4837 /* nothing to change */ 4838 break; 4839 } 4840 4841 if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) { 4842 BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n", 4843 ifr->ifr_mtu, mtu_min, mtu_max); 4844 error = EINVAL; 4845 break; 4846 } 4847 4848 atomic_store_rel_int((volatile unsigned int *)&sc->mtu, 4849 (unsigned long)ifr->ifr_mtu); 4850 /* 4851 atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp), 4852 (unsigned long)ifr->ifr_mtu); 4853 XXX - Not sure why it needs to be atomic 4854 */ 4855 if_setmtu(ifp, ifr->ifr_mtu); 4856 reinit = 1; 4857 break; 4858 4859 case SIOCSIFFLAGS: 4860 /* toggle the interface state up or down */ 4861 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n"); 4862 4863 BXE_CORE_LOCK(sc); 4864 /* check if the interface is up */ 4865 if (if_getflags(ifp) & IFF_UP) { 4866 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4867 /* set the receive mode flags */ 4868 bxe_set_rx_mode(sc); 4869 } else { 4870 bxe_init_locked(sc); 4871 } 4872 } else { 4873 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4874 bxe_periodic_stop(sc); 4875 bxe_stop_locked(sc); 4876 } 4877 } 4878 BXE_CORE_UNLOCK(sc); 4879 4880 break; 4881 4882 case SIOCADDMULTI: 4883 case SIOCDELMULTI: 4884 /* add/delete multicast addresses */ 4885 BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n"); 4886 4887 /* check if the interface is up */ 4888 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4889 /* set the receive mode flags */ 4890 BXE_CORE_LOCK(sc); 4891 bxe_set_rx_mode(sc); 4892 BXE_CORE_UNLOCK(sc); 4893 } 4894 4895 break; 4896 4897 case SIOCSIFCAP: 4898 /* find out which capabilities have changed */ 4899 mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp)); 4900 4901 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n", 4902 mask); 4903 4904 /* toggle the LRO capabilites enable flag */ 4905 if (mask & IFCAP_LRO) { 4906 if_togglecapenable(ifp, IFCAP_LRO); 4907 BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n", 4908 (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF"); 4909 reinit = 1; 4910 } 4911 4912 /* toggle the TXCSUM checksum capabilites enable flag */ 4913 if (mask & IFCAP_TXCSUM) { 4914 if_togglecapenable(ifp, IFCAP_TXCSUM); 4915 BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n", 4916 (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF"); 4917 if (if_getcapenable(ifp) & IFCAP_TXCSUM) { 4918 if_sethwassistbits(ifp, (CSUM_IP | 4919 CSUM_TCP | 4920 CSUM_UDP | 4921 CSUM_TSO | 4922 CSUM_TCP_IPV6 | 4923 CSUM_UDP_IPV6), 0); 4924 } else { 4925 if_clearhwassist(ifp); /* XXX */ 4926 } 4927 } 4928 4929 /* toggle the RXCSUM checksum capabilities enable flag */ 4930 if (mask & IFCAP_RXCSUM) { 4931 if_togglecapenable(ifp, IFCAP_RXCSUM); 4932 BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n", 4933 (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF"); 4934 if (if_getcapenable(ifp) & IFCAP_RXCSUM) { 4935 if_sethwassistbits(ifp, (CSUM_IP | 4936 CSUM_TCP | 4937 CSUM_UDP | 4938 CSUM_TSO | 4939 CSUM_TCP_IPV6 | 4940 CSUM_UDP_IPV6), 0); 4941 } else { 4942 if_clearhwassist(ifp); /* XXX */ 4943 } 4944 } 4945 4946 /* toggle TSO4 capabilities enabled flag */ 4947 if (mask & IFCAP_TSO4) { 4948 if_togglecapenable(ifp, IFCAP_TSO4); 4949 BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n", 4950 (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF"); 4951 } 4952 4953 /* toggle TSO6 capabilities enabled flag */ 4954 if (mask & IFCAP_TSO6) { 4955 if_togglecapenable(ifp, IFCAP_TSO6); 4956 BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n", 4957 (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF"); 4958 } 4959 4960 /* toggle VLAN_HWTSO capabilities enabled flag */ 4961 if (mask & IFCAP_VLAN_HWTSO) { 4962 4963 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 4964 BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n", 4965 (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF"); 4966 } 4967 4968 /* toggle VLAN_HWCSUM capabilities enabled flag */ 4969 if (mask & IFCAP_VLAN_HWCSUM) { 4970 /* XXX investigate this... */ 4971 BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n"); 4972 error = EINVAL; 4973 } 4974 4975 /* toggle VLAN_MTU capabilities enable flag */ 4976 if (mask & IFCAP_VLAN_MTU) { 4977 /* XXX investigate this... */ 4978 BLOGE(sc, "Changing VLAN_MTU is not supported!\n"); 4979 error = EINVAL; 4980 } 4981 4982 /* toggle VLAN_HWTAGGING capabilities enabled flag */ 4983 if (mask & IFCAP_VLAN_HWTAGGING) { 4984 /* XXX investigate this... */ 4985 BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n"); 4986 error = EINVAL; 4987 } 4988 4989 /* toggle VLAN_HWFILTER capabilities enabled flag */ 4990 if (mask & IFCAP_VLAN_HWFILTER) { 4991 /* XXX investigate this... */ 4992 BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n"); 4993 error = EINVAL; 4994 } 4995 4996 /* XXX not yet... 4997 * IFCAP_WOL_MAGIC 4998 */ 4999 5000 break; 5001 5002 case SIOCSIFMEDIA: 5003 case SIOCGIFMEDIA: 5004 /* set/get interface media */ 5005 BLOGD(sc, DBG_IOCTL, 5006 "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n", 5007 (command & 0xff)); 5008 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 5009 break; 5010 5011 case SIOCGPRIVATE_0: 5012 copyin(ifr->ifr_data, &priv_op, sizeof(priv_op)); 5013 5014 switch (priv_op) 5015 { 5016 case BXE_IOC_RD_NVRAM: 5017 case BXE_IOC_WR_NVRAM: 5018 nvdata = (struct bxe_nvram_data *)ifr->ifr_data; 5019 BLOGD(sc, DBG_IOCTL, 5020 "Received Private NVRAM ioctl addr=0x%x size=%u\n", 5021 nvdata->offset, nvdata->len); 5022 error = bxe_ioctl_nvram(sc, priv_op, ifr); 5023 break; 5024 5025 case BXE_IOC_STATS_SHOW_NUM: 5026 case BXE_IOC_STATS_SHOW_STR: 5027 case BXE_IOC_STATS_SHOW_CNT: 5028 BLOGD(sc, DBG_IOCTL, "Received Private Stats ioctl (%d)\n", 5029 priv_op); 5030 error = bxe_ioctl_stats_show(sc, priv_op, ifr); 5031 break; 5032 5033 default: 5034 BLOGW(sc, "Received Private Unknown ioctl (%d)\n", priv_op); 5035 error = EINVAL; 5036 break; 5037 } 5038 5039 break; 5040 5041 default: 5042 BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n", 5043 (command & 0xff)); 5044 error = ether_ioctl(ifp, command, data); 5045 break; 5046 } 5047 5048 if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { 5049 BLOGD(sc, DBG_LOAD | DBG_IOCTL, 5050 "Re-initializing hardware from IOCTL change\n"); 5051 bxe_periodic_stop(sc); 5052 BXE_CORE_LOCK(sc); 5053 bxe_stop_locked(sc); 5054 bxe_init_locked(sc); 5055 BXE_CORE_UNLOCK(sc); 5056 } 5057 5058 return (error); 5059 } 5060 5061 static __noinline void 5062 bxe_dump_mbuf(struct bxe_softc *sc, 5063 struct mbuf *m, 5064 uint8_t contents) 5065 { 5066 char * type; 5067 int i = 0; 5068 5069 if (!(sc->debug & DBG_MBUF)) { 5070 return; 5071 } 5072 5073 if (m == NULL) { 5074 BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n"); 5075 return; 5076 } 5077 5078 while (m) { 5079 BLOGD(sc, DBG_MBUF, 5080 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", 5081 i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data); 5082 5083 if (m->m_flags & M_PKTHDR) { 5084 BLOGD(sc, DBG_MBUF, 5085 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", 5086 i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS, 5087 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 5088 } 5089 5090 if (m->m_flags & M_EXT) { 5091 switch (m->m_ext.ext_type) { 5092 case EXT_CLUSTER: type = "EXT_CLUSTER"; break; 5093 case EXT_SFBUF: type = "EXT_SFBUF"; break; 5094 case EXT_JUMBOP: type = "EXT_JUMBOP"; break; 5095 case EXT_JUMBO9: type = "EXT_JUMBO9"; break; 5096 case EXT_JUMBO16: type = "EXT_JUMBO16"; break; 5097 case EXT_PACKET: type = "EXT_PACKET"; break; 5098 case EXT_MBUF: type = "EXT_MBUF"; break; 5099 case EXT_NET_DRV: type = "EXT_NET_DRV"; break; 5100 case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break; 5101 case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break; 5102 case EXT_EXTREF: type = "EXT_EXTREF"; break; 5103 default: type = "UNKNOWN"; break; 5104 } 5105 5106 BLOGD(sc, DBG_MBUF, 5107 "%02d: - m_ext: %p ext_size=%d type=%s\n", 5108 i, m->m_ext.ext_buf, m->m_ext.ext_size, type); 5109 } 5110 5111 if (contents) { 5112 bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE); 5113 } 5114 5115 m = m->m_next; 5116 i++; 5117 } 5118 } 5119 5120 /* 5121 * Checks to ensure the 13 bd sliding window is >= MSS for TSO. 5122 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS. 5123 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD 5124 * The headers comes in a seperate bd in FreeBSD so 13-3=10. 5125 * Returns: 0 if OK to send, 1 if packet needs further defragmentation 5126 */ 5127 static int 5128 bxe_chktso_window(struct bxe_softc *sc, 5129 int nsegs, 5130 bus_dma_segment_t *segs, 5131 struct mbuf *m) 5132 { 5133 uint32_t num_wnds, wnd_size, wnd_sum; 5134 int32_t frag_idx, wnd_idx; 5135 unsigned short lso_mss; 5136 int defrag; 5137 5138 defrag = 0; 5139 wnd_sum = 0; 5140 wnd_size = 10; 5141 num_wnds = nsegs - wnd_size; 5142 lso_mss = htole16(m->m_pkthdr.tso_segsz); 5143 5144 /* 5145 * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the 5146 * first window sum of data while skipping the first assuming it is the 5147 * header in FreeBSD. 5148 */ 5149 for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) { 5150 wnd_sum += htole16(segs[frag_idx].ds_len); 5151 } 5152 5153 /* check the first 10 bd window size */ 5154 if (wnd_sum < lso_mss) { 5155 return (1); 5156 } 5157 5158 /* run through the windows */ 5159 for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) { 5160 /* subtract the first mbuf->m_len of the last wndw(-header) */ 5161 wnd_sum -= htole16(segs[wnd_idx+1].ds_len); 5162 /* add the next mbuf len to the len of our new window */ 5163 wnd_sum += htole16(segs[frag_idx].ds_len); 5164 if (wnd_sum < lso_mss) { 5165 return (1); 5166 } 5167 } 5168 5169 return (0); 5170 } 5171 5172 static uint8_t 5173 bxe_set_pbd_csum_e2(struct bxe_fastpath *fp, 5174 struct mbuf *m, 5175 uint32_t *parsing_data) 5176 { 5177 struct ether_vlan_header *eh = NULL; 5178 struct ip *ip4 = NULL; 5179 struct ip6_hdr *ip6 = NULL; 5180 caddr_t ip = NULL; 5181 struct tcphdr *th = NULL; 5182 int e_hlen, ip_hlen, l4_off; 5183 uint16_t proto; 5184 5185 if (m->m_pkthdr.csum_flags == CSUM_IP) { 5186 /* no L4 checksum offload needed */ 5187 return (0); 5188 } 5189 5190 /* get the Ethernet header */ 5191 eh = mtod(m, struct ether_vlan_header *); 5192 5193 /* handle VLAN encapsulation if present */ 5194 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 5195 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 5196 proto = ntohs(eh->evl_proto); 5197 } else { 5198 e_hlen = ETHER_HDR_LEN; 5199 proto = ntohs(eh->evl_encap_proto); 5200 } 5201 5202 switch (proto) { 5203 case ETHERTYPE_IP: 5204 /* get the IP header, if mbuf len < 20 then header in next mbuf */ 5205 ip4 = (m->m_len < sizeof(struct ip)) ? 5206 (struct ip *)m->m_next->m_data : 5207 (struct ip *)(m->m_data + e_hlen); 5208 /* ip_hl is number of 32-bit words */ 5209 ip_hlen = (ip4->ip_hl << 2); 5210 ip = (caddr_t)ip4; 5211 break; 5212 case ETHERTYPE_IPV6: 5213 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ 5214 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? 5215 (struct ip6_hdr *)m->m_next->m_data : 5216 (struct ip6_hdr *)(m->m_data + e_hlen); 5217 /* XXX cannot support offload with IPv6 extensions */ 5218 ip_hlen = sizeof(struct ip6_hdr); 5219 ip = (caddr_t)ip6; 5220 break; 5221 default: 5222 /* We can't offload in this case... */ 5223 /* XXX error stat ??? */ 5224 return (0); 5225 } 5226 5227 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ 5228 l4_off = (e_hlen + ip_hlen); 5229 5230 *parsing_data |= 5231 (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & 5232 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W); 5233 5234 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5235 CSUM_TSO | 5236 CSUM_TCP_IPV6)) { 5237 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; 5238 th = (struct tcphdr *)(ip + ip_hlen); 5239 /* th_off is number of 32-bit words */ 5240 *parsing_data |= ((th->th_off << 5241 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & 5242 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW); 5243 return (l4_off + (th->th_off << 2)); /* entire header length */ 5244 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5245 CSUM_UDP_IPV6)) { 5246 fp->eth_q_stats.tx_ofld_frames_csum_udp++; 5247 return (l4_off + sizeof(struct udphdr)); /* entire header length */ 5248 } else { 5249 /* XXX error stat ??? */ 5250 return (0); 5251 } 5252 } 5253 5254 static uint8_t 5255 bxe_set_pbd_csum(struct bxe_fastpath *fp, 5256 struct mbuf *m, 5257 struct eth_tx_parse_bd_e1x *pbd) 5258 { 5259 struct ether_vlan_header *eh = NULL; 5260 struct ip *ip4 = NULL; 5261 struct ip6_hdr *ip6 = NULL; 5262 caddr_t ip = NULL; 5263 struct tcphdr *th = NULL; 5264 struct udphdr *uh = NULL; 5265 int e_hlen, ip_hlen; 5266 uint16_t proto; 5267 uint8_t hlen; 5268 uint16_t tmp_csum; 5269 uint32_t *tmp_uh; 5270 5271 /* get the Ethernet header */ 5272 eh = mtod(m, struct ether_vlan_header *); 5273 5274 /* handle VLAN encapsulation if present */ 5275 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 5276 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 5277 proto = ntohs(eh->evl_proto); 5278 } else { 5279 e_hlen = ETHER_HDR_LEN; 5280 proto = ntohs(eh->evl_encap_proto); 5281 } 5282 5283 switch (proto) { 5284 case ETHERTYPE_IP: 5285 /* get the IP header, if mbuf len < 20 then header in next mbuf */ 5286 ip4 = (m->m_len < sizeof(struct ip)) ? 5287 (struct ip *)m->m_next->m_data : 5288 (struct ip *)(m->m_data + e_hlen); 5289 /* ip_hl is number of 32-bit words */ 5290 ip_hlen = (ip4->ip_hl << 1); 5291 ip = (caddr_t)ip4; 5292 break; 5293 case ETHERTYPE_IPV6: 5294 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ 5295 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? 5296 (struct ip6_hdr *)m->m_next->m_data : 5297 (struct ip6_hdr *)(m->m_data + e_hlen); 5298 /* XXX cannot support offload with IPv6 extensions */ 5299 ip_hlen = (sizeof(struct ip6_hdr) >> 1); 5300 ip = (caddr_t)ip6; 5301 break; 5302 default: 5303 /* We can't offload in this case... */ 5304 /* XXX error stat ??? */ 5305 return (0); 5306 } 5307 5308 hlen = (e_hlen >> 1); 5309 5310 /* note that rest of global_data is indirectly zeroed here */ 5311 if (m->m_flags & M_VLANTAG) { 5312 pbd->global_data = 5313 htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); 5314 } else { 5315 pbd->global_data = htole16(hlen); 5316 } 5317 5318 pbd->ip_hlen_w = ip_hlen; 5319 5320 hlen += pbd->ip_hlen_w; 5321 5322 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ 5323 5324 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5325 CSUM_TSO | 5326 CSUM_TCP_IPV6)) { 5327 th = (struct tcphdr *)(ip + (ip_hlen << 1)); 5328 /* th_off is number of 32-bit words */ 5329 hlen += (uint16_t)(th->th_off << 1); 5330 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5331 CSUM_UDP_IPV6)) { 5332 uh = (struct udphdr *)(ip + (ip_hlen << 1)); 5333 hlen += (sizeof(struct udphdr) / 2); 5334 } else { 5335 /* valid case as only CSUM_IP was set */ 5336 return (0); 5337 } 5338 5339 pbd->total_hlen_w = htole16(hlen); 5340 5341 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5342 CSUM_TSO | 5343 CSUM_TCP_IPV6)) { 5344 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; 5345 pbd->tcp_pseudo_csum = ntohs(th->th_sum); 5346 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5347 CSUM_UDP_IPV6)) { 5348 fp->eth_q_stats.tx_ofld_frames_csum_udp++; 5349 5350 /* 5351 * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP 5352 * checksums and does not know anything about the UDP header and where 5353 * the checksum field is located. It only knows about TCP. Therefore 5354 * we "lie" to the hardware for outgoing UDP packets w/ checksum 5355 * offload. Since the checksum field offset for TCP is 16 bytes and 5356 * for UDP it is 6 bytes we pass a pointer to the hardware that is 10 5357 * bytes less than the start of the UDP header. This allows the 5358 * hardware to write the checksum in the correct spot. But the 5359 * hardware will compute a checksum which includes the last 10 bytes 5360 * of the IP header. To correct this we tweak the stack computed 5361 * pseudo checksum by folding in the calculation of the inverse 5362 * checksum for those final 10 bytes of the IP header. This allows 5363 * the correct checksum to be computed by the hardware. 5364 */ 5365 5366 /* set pointer 10 bytes before UDP header */ 5367 tmp_uh = (uint32_t *)((uint8_t *)uh - 10); 5368 5369 /* calculate a pseudo header checksum over the first 10 bytes */ 5370 tmp_csum = in_pseudo(*tmp_uh, 5371 *(tmp_uh + 1), 5372 *(uint16_t *)(tmp_uh + 2)); 5373 5374 pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum)); 5375 } 5376 5377 return (hlen * 2); /* entire header length, number of bytes */ 5378 } 5379 5380 static void 5381 bxe_set_pbd_lso_e2(struct mbuf *m, 5382 uint32_t *parsing_data) 5383 { 5384 *parsing_data |= ((m->m_pkthdr.tso_segsz << 5385 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & 5386 ETH_TX_PARSE_BD_E2_LSO_MSS); 5387 5388 /* XXX test for IPv6 with extension header... */ 5389 #if 0 5390 struct ip6_hdr *ip6; 5391 if (ip6 && ip6->ip6_nxt == 'some ipv6 extension header') 5392 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; 5393 #endif 5394 } 5395 5396 static void 5397 bxe_set_pbd_lso(struct mbuf *m, 5398 struct eth_tx_parse_bd_e1x *pbd) 5399 { 5400 struct ether_vlan_header *eh = NULL; 5401 struct ip *ip = NULL; 5402 struct tcphdr *th = NULL; 5403 int e_hlen; 5404 5405 /* get the Ethernet header */ 5406 eh = mtod(m, struct ether_vlan_header *); 5407 5408 /* handle VLAN encapsulation if present */ 5409 e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ? 5410 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN; 5411 5412 /* get the IP and TCP header, with LSO entire header in first mbuf */ 5413 /* XXX assuming IPv4 */ 5414 ip = (struct ip *)(m->m_data + e_hlen); 5415 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 5416 5417 pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz); 5418 pbd->tcp_send_seq = ntohl(th->th_seq); 5419 pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff); 5420 5421 #if 1 5422 /* XXX IPv4 */ 5423 pbd->ip_id = ntohs(ip->ip_id); 5424 pbd->tcp_pseudo_csum = 5425 ntohs(in_pseudo(ip->ip_src.s_addr, 5426 ip->ip_dst.s_addr, 5427 htons(IPPROTO_TCP))); 5428 #else 5429 /* XXX IPv6 */ 5430 pbd->tcp_pseudo_csum = 5431 ntohs(in_pseudo(&ip6->ip6_src, 5432 &ip6->ip6_dst, 5433 htons(IPPROTO_TCP))); 5434 #endif 5435 5436 pbd->global_data |= 5437 htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); 5438 } 5439 5440 /* 5441 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory 5442 * visible to the controller. 5443 * 5444 * If an mbuf is submitted to this routine and cannot be given to the 5445 * controller (e.g. it has too many fragments) then the function may free 5446 * the mbuf and return to the caller. 5447 * 5448 * Returns: 5449 * 0 = Success, !0 = Failure 5450 * Note the side effect that an mbuf may be freed if it causes a problem. 5451 */ 5452 static int 5453 bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head) 5454 { 5455 bus_dma_segment_t segs[32]; 5456 struct mbuf *m0; 5457 struct bxe_sw_tx_bd *tx_buf; 5458 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 5459 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 5460 /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */ 5461 struct eth_tx_bd *tx_data_bd; 5462 struct eth_tx_bd *tx_total_pkt_size_bd; 5463 struct eth_tx_start_bd *tx_start_bd; 5464 uint16_t bd_prod, pkt_prod, total_pkt_size; 5465 uint8_t mac_type; 5466 int defragged, error, nsegs, rc, nbds, vlan_off, ovlan; 5467 struct bxe_softc *sc; 5468 uint16_t tx_bd_avail; 5469 struct ether_vlan_header *eh; 5470 uint32_t pbd_e2_parsing_data = 0; 5471 uint8_t hlen = 0; 5472 int tmp_bd; 5473 int i; 5474 5475 sc = fp->sc; 5476 5477 M_ASSERTPKTHDR(*m_head); 5478 5479 m0 = *m_head; 5480 rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0; 5481 tx_start_bd = NULL; 5482 tx_data_bd = NULL; 5483 tx_total_pkt_size_bd = NULL; 5484 5485 /* get the H/W pointer for packets and BDs */ 5486 pkt_prod = fp->tx_pkt_prod; 5487 bd_prod = fp->tx_bd_prod; 5488 5489 mac_type = UNICAST_ADDRESS; 5490 5491 /* map the mbuf into the next open DMAable memory */ 5492 tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)]; 5493 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5494 tx_buf->m_map, m0, 5495 segs, &nsegs, BUS_DMA_NOWAIT); 5496 5497 /* mapping errors */ 5498 if(__predict_false(error != 0)) { 5499 fp->eth_q_stats.tx_dma_mapping_failure++; 5500 if (error == ENOMEM) { 5501 /* resource issue, try again later */ 5502 rc = ENOMEM; 5503 } else if (error == EFBIG) { 5504 /* possibly recoverable with defragmentation */ 5505 fp->eth_q_stats.mbuf_defrag_attempts++; 5506 m0 = m_defrag(*m_head, M_NOWAIT); 5507 if (m0 == NULL) { 5508 fp->eth_q_stats.mbuf_defrag_failures++; 5509 rc = ENOBUFS; 5510 } else { 5511 /* defrag successful, try mapping again */ 5512 *m_head = m0; 5513 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5514 tx_buf->m_map, m0, 5515 segs, &nsegs, BUS_DMA_NOWAIT); 5516 if (error) { 5517 fp->eth_q_stats.tx_dma_mapping_failure++; 5518 rc = error; 5519 } 5520 } 5521 } else { 5522 /* unknown, unrecoverable mapping error */ 5523 BLOGE(sc, "Unknown TX mapping error rc=%d\n", error); 5524 bxe_dump_mbuf(sc, m0, FALSE); 5525 rc = error; 5526 } 5527 5528 goto bxe_tx_encap_continue; 5529 } 5530 5531 tx_bd_avail = bxe_tx_avail(sc, fp); 5532 5533 /* make sure there is enough room in the send queue */ 5534 if (__predict_false(tx_bd_avail < (nsegs + 2))) { 5535 /* Recoverable, try again later. */ 5536 fp->eth_q_stats.tx_hw_queue_full++; 5537 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5538 rc = ENOMEM; 5539 goto bxe_tx_encap_continue; 5540 } 5541 5542 /* capture the current H/W TX chain high watermark */ 5543 if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth < 5544 (TX_BD_USABLE - tx_bd_avail))) { 5545 fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail); 5546 } 5547 5548 /* make sure it fits in the packet window */ 5549 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { 5550 /* 5551 * The mbuf may be to big for the controller to handle. If the frame 5552 * is a TSO frame we'll need to do an additional check. 5553 */ 5554 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5555 if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) { 5556 goto bxe_tx_encap_continue; /* OK to send */ 5557 } else { 5558 fp->eth_q_stats.tx_window_violation_tso++; 5559 } 5560 } else { 5561 fp->eth_q_stats.tx_window_violation_std++; 5562 } 5563 5564 /* lets try to defragment this mbuf and remap it */ 5565 fp->eth_q_stats.mbuf_defrag_attempts++; 5566 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5567 5568 m0 = m_defrag(*m_head, M_NOWAIT); 5569 if (m0 == NULL) { 5570 fp->eth_q_stats.mbuf_defrag_failures++; 5571 /* Ugh, just drop the frame... :( */ 5572 rc = ENOBUFS; 5573 } else { 5574 /* defrag successful, try mapping again */ 5575 *m_head = m0; 5576 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5577 tx_buf->m_map, m0, 5578 segs, &nsegs, BUS_DMA_NOWAIT); 5579 if (error) { 5580 fp->eth_q_stats.tx_dma_mapping_failure++; 5581 /* No sense in trying to defrag/copy chain, drop it. :( */ 5582 rc = error; 5583 } 5584 else { 5585 /* if the chain is still too long then drop it */ 5586 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { 5587 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5588 rc = ENODEV; 5589 } 5590 } 5591 } 5592 } 5593 5594 bxe_tx_encap_continue: 5595 5596 /* Check for errors */ 5597 if (rc) { 5598 if (rc == ENOMEM) { 5599 /* recoverable try again later */ 5600 } else { 5601 fp->eth_q_stats.tx_soft_errors++; 5602 fp->eth_q_stats.mbuf_alloc_tx--; 5603 m_freem(*m_head); 5604 *m_head = NULL; 5605 } 5606 5607 return (rc); 5608 } 5609 5610 /* set flag according to packet type (UNICAST_ADDRESS is default) */ 5611 if (m0->m_flags & M_BCAST) { 5612 mac_type = BROADCAST_ADDRESS; 5613 } else if (m0->m_flags & M_MCAST) { 5614 mac_type = MULTICAST_ADDRESS; 5615 } 5616 5617 /* store the mbuf into the mbuf ring */ 5618 tx_buf->m = m0; 5619 tx_buf->first_bd = fp->tx_bd_prod; 5620 tx_buf->flags = 0; 5621 5622 /* prepare the first transmit (start) BD for the mbuf */ 5623 tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd; 5624 5625 BLOGD(sc, DBG_TX, 5626 "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n", 5627 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); 5628 5629 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 5630 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 5631 tx_start_bd->nbytes = htole16(segs[0].ds_len); 5632 total_pkt_size += tx_start_bd->nbytes; 5633 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 5634 5635 tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 5636 5637 /* all frames have at least Start BD + Parsing BD */ 5638 nbds = nsegs + 1; 5639 tx_start_bd->nbd = htole16(nbds); 5640 5641 if (m0->m_flags & M_VLANTAG) { 5642 tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag); 5643 tx_start_bd->bd_flags.as_bitfield |= 5644 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 5645 } else { 5646 /* vf tx, start bd must hold the ethertype for fw to enforce it */ 5647 if (IS_VF(sc)) { 5648 /* map ethernet header to find type and header length */ 5649 eh = mtod(m0, struct ether_vlan_header *); 5650 tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto; 5651 } else { 5652 /* used by FW for packet accounting */ 5653 tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod); 5654 #if 0 5655 /* 5656 * If NPAR-SD is active then FW should do the tagging regardless 5657 * of value of priority. Otherwise, if priority indicates this is 5658 * a control packet we need to indicate to FW to avoid tagging. 5659 */ 5660 if (!IS_MF_AFEX(sc) && (mbuf priority == PRIO_CONTROL)) { 5661 SET_FLAG(tx_start_bd->general_data, 5662 ETH_TX_START_BD_FORCE_VLAN_MODE, 1); 5663 } 5664 #endif 5665 } 5666 } 5667 5668 /* 5669 * add a parsing BD from the chain. The parsing BD is always added 5670 * though it is only used for TSO and chksum 5671 */ 5672 bd_prod = TX_BD_NEXT(bd_prod); 5673 5674 if (m0->m_pkthdr.csum_flags) { 5675 if (m0->m_pkthdr.csum_flags & CSUM_IP) { 5676 fp->eth_q_stats.tx_ofld_frames_csum_ip++; 5677 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; 5678 } 5679 5680 if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) { 5681 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | 5682 ETH_TX_BD_FLAGS_L4_CSUM); 5683 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) { 5684 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | 5685 ETH_TX_BD_FLAGS_IS_UDP | 5686 ETH_TX_BD_FLAGS_L4_CSUM); 5687 } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) || 5688 (m0->m_pkthdr.csum_flags & CSUM_TSO)) { 5689 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; 5690 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) { 5691 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM | 5692 ETH_TX_BD_FLAGS_IS_UDP); 5693 } 5694 } 5695 5696 if (!CHIP_IS_E1x(sc)) { 5697 pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2; 5698 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 5699 5700 if (m0->m_pkthdr.csum_flags) { 5701 hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data); 5702 } 5703 5704 #if 0 5705 /* 5706 * Add the MACs to the parsing BD if the module param was 5707 * explicitly set, if this is a vf, or in switch independent 5708 * mode. 5709 */ 5710 if (sc->flags & BXE_TX_SWITCHING || IS_VF(sc) || IS_MF_SI(sc)) { 5711 eh = mtod(m0, struct ether_vlan_header *); 5712 bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, 5713 &pbd_e2->data.mac_addr.src_mid, 5714 &pbd_e2->data.mac_addr.src_lo, 5715 eh->evl_shost); 5716 bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, 5717 &pbd_e2->data.mac_addr.dst_mid, 5718 &pbd_e2->data.mac_addr.dst_lo, 5719 eh->evl_dhost); 5720 } 5721 #endif 5722 5723 SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, 5724 mac_type); 5725 } else { 5726 uint16_t global_data = 0; 5727 5728 pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x; 5729 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 5730 5731 if (m0->m_pkthdr.csum_flags) { 5732 hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x); 5733 } 5734 5735 SET_FLAG(global_data, 5736 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); 5737 pbd_e1x->global_data |= htole16(global_data); 5738 } 5739 5740 /* setup the parsing BD with TSO specific info */ 5741 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5742 fp->eth_q_stats.tx_ofld_frames_lso++; 5743 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; 5744 5745 if (__predict_false(tx_start_bd->nbytes > hlen)) { 5746 fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++; 5747 5748 /* split the first BD into header/data making the fw job easy */ 5749 nbds++; 5750 tx_start_bd->nbd = htole16(nbds); 5751 tx_start_bd->nbytes = htole16(hlen); 5752 5753 bd_prod = TX_BD_NEXT(bd_prod); 5754 5755 /* new transmit BD after the tx_parse_bd */ 5756 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; 5757 tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen)); 5758 tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen)); 5759 tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen); 5760 if (tx_total_pkt_size_bd == NULL) { 5761 tx_total_pkt_size_bd = tx_data_bd; 5762 } 5763 5764 BLOGD(sc, DBG_TX, 5765 "TSO split header size is %d (%x:%x) nbds %d\n", 5766 le16toh(tx_start_bd->nbytes), 5767 le32toh(tx_start_bd->addr_hi), 5768 le32toh(tx_start_bd->addr_lo), 5769 nbds); 5770 } 5771 5772 if (!CHIP_IS_E1x(sc)) { 5773 bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data); 5774 } else { 5775 bxe_set_pbd_lso(m0, pbd_e1x); 5776 } 5777 } 5778 5779 if (pbd_e2_parsing_data) { 5780 pbd_e2->parsing_data = htole32(pbd_e2_parsing_data); 5781 } 5782 5783 /* prepare remaining BDs, start tx bd contains first seg/frag */ 5784 for (i = 1; i < nsegs ; i++) { 5785 bd_prod = TX_BD_NEXT(bd_prod); 5786 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; 5787 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr)); 5788 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr)); 5789 tx_data_bd->nbytes = htole16(segs[i].ds_len); 5790 if (tx_total_pkt_size_bd == NULL) { 5791 tx_total_pkt_size_bd = tx_data_bd; 5792 } 5793 total_pkt_size += tx_data_bd->nbytes; 5794 } 5795 5796 BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd); 5797 5798 if (tx_total_pkt_size_bd != NULL) { 5799 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size; 5800 } 5801 5802 if (__predict_false(sc->debug & DBG_TX)) { 5803 tmp_bd = tx_buf->first_bd; 5804 for (i = 0; i < nbds; i++) 5805 { 5806 if (i == 0) { 5807 BLOGD(sc, DBG_TX, 5808 "TX Strt: %p bd=%d nbd=%d vlan=0x%x " 5809 "bd_flags=0x%x hdr_nbds=%d\n", 5810 tx_start_bd, 5811 tmp_bd, 5812 le16toh(tx_start_bd->nbd), 5813 le16toh(tx_start_bd->vlan_or_ethertype), 5814 tx_start_bd->bd_flags.as_bitfield, 5815 (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS)); 5816 } else if (i == 1) { 5817 if (pbd_e1x) { 5818 BLOGD(sc, DBG_TX, 5819 "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u " 5820 "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x " 5821 "tcp_seq=%u total_hlen_w=%u\n", 5822 pbd_e1x, 5823 tmp_bd, 5824 pbd_e1x->global_data, 5825 pbd_e1x->ip_hlen_w, 5826 pbd_e1x->ip_id, 5827 pbd_e1x->lso_mss, 5828 pbd_e1x->tcp_flags, 5829 pbd_e1x->tcp_pseudo_csum, 5830 pbd_e1x->tcp_send_seq, 5831 le16toh(pbd_e1x->total_hlen_w)); 5832 } else { /* if (pbd_e2) */ 5833 BLOGD(sc, DBG_TX, 5834 "-> Parse: %p bd=%d dst=%02x:%02x:%02x " 5835 "src=%02x:%02x:%02x parsing_data=0x%x\n", 5836 pbd_e2, 5837 tmp_bd, 5838 pbd_e2->data.mac_addr.dst_hi, 5839 pbd_e2->data.mac_addr.dst_mid, 5840 pbd_e2->data.mac_addr.dst_lo, 5841 pbd_e2->data.mac_addr.src_hi, 5842 pbd_e2->data.mac_addr.src_mid, 5843 pbd_e2->data.mac_addr.src_lo, 5844 pbd_e2->parsing_data); 5845 } 5846 } 5847 5848 if (i != 1) { /* skip parse db as it doesn't hold data */ 5849 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd; 5850 BLOGD(sc, DBG_TX, 5851 "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n", 5852 tx_data_bd, 5853 tmp_bd, 5854 le16toh(tx_data_bd->nbytes), 5855 le32toh(tx_data_bd->addr_hi), 5856 le32toh(tx_data_bd->addr_lo)); 5857 } 5858 5859 tmp_bd = TX_BD_NEXT(tmp_bd); 5860 } 5861 } 5862 5863 BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod); 5864 5865 /* update TX BD producer index value for next TX */ 5866 bd_prod = TX_BD_NEXT(bd_prod); 5867 5868 /* 5869 * If the chain of tx_bd's describing this frame is adjacent to or spans 5870 * an eth_tx_next_bd element then we need to increment the nbds value. 5871 */ 5872 if (TX_BD_IDX(bd_prod) < nbds) { 5873 nbds++; 5874 } 5875 5876 /* don't allow reordering of writes for nbd and packets */ 5877 mb(); 5878 5879 fp->tx_db.data.prod += nbds; 5880 5881 /* producer points to the next free tx_bd at this point */ 5882 fp->tx_pkt_prod++; 5883 fp->tx_bd_prod = bd_prod; 5884 5885 DOORBELL(sc, fp->index, fp->tx_db.raw); 5886 5887 fp->eth_q_stats.tx_pkts++; 5888 5889 /* Prevent speculative reads from getting ahead of the status block. */ 5890 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 5891 0, 0, BUS_SPACE_BARRIER_READ); 5892 5893 /* Prevent speculative reads from getting ahead of the doorbell. */ 5894 bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle, 5895 0, 0, BUS_SPACE_BARRIER_READ); 5896 5897 return (0); 5898 } 5899 5900 static void 5901 bxe_tx_start_locked(struct bxe_softc *sc, 5902 if_t ifp, 5903 struct bxe_fastpath *fp) 5904 { 5905 struct mbuf *m = NULL; 5906 int tx_count = 0; 5907 uint16_t tx_bd_avail; 5908 5909 BXE_FP_TX_LOCK_ASSERT(fp); 5910 5911 /* keep adding entries while there are frames to send */ 5912 while (!if_sendq_empty(ifp)) { 5913 5914 /* 5915 * check for any frames to send 5916 * dequeue can still be NULL even if queue is not empty 5917 */ 5918 m = if_dequeue(ifp); 5919 if (__predict_false(m == NULL)) { 5920 break; 5921 } 5922 5923 /* the mbuf now belongs to us */ 5924 fp->eth_q_stats.mbuf_alloc_tx++; 5925 5926 /* 5927 * Put the frame into the transmit ring. If we don't have room, 5928 * place the mbuf back at the head of the TX queue, set the 5929 * OACTIVE flag, and wait for the NIC to drain the chain. 5930 */ 5931 if (__predict_false(bxe_tx_encap(fp, &m))) { 5932 fp->eth_q_stats.tx_encap_failures++; 5933 if (m != NULL) { 5934 /* mark the TX queue as full and return the frame */ 5935 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 5936 if_sendq_prepend(ifp, m); 5937 fp->eth_q_stats.mbuf_alloc_tx--; 5938 fp->eth_q_stats.tx_queue_xoff++; 5939 } 5940 5941 /* stop looking for more work */ 5942 break; 5943 } 5944 5945 /* the frame was enqueued successfully */ 5946 tx_count++; 5947 5948 /* send a copy of the frame to any BPF listeners. */ 5949 if_etherbpfmtap(ifp, m); 5950 5951 tx_bd_avail = bxe_tx_avail(sc, fp); 5952 5953 /* handle any completions if we're running low */ 5954 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 5955 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ 5956 bxe_txeof(sc, fp); 5957 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { 5958 break; 5959 } 5960 } 5961 } 5962 5963 /* all TX packets were dequeued and/or the tx ring is full */ 5964 if (tx_count > 0) { 5965 /* reset the TX watchdog timeout timer */ 5966 fp->watchdog_timer = BXE_TX_TIMEOUT; 5967 } 5968 } 5969 5970 /* Legacy (non-RSS) dispatch routine */ 5971 static void 5972 bxe_tx_start(if_t ifp) 5973 { 5974 struct bxe_softc *sc; 5975 struct bxe_fastpath *fp; 5976 5977 sc = if_getsoftc(ifp); 5978 5979 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 5980 BLOGW(sc, "Interface not running, ignoring transmit request\n"); 5981 return; 5982 } 5983 5984 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { 5985 BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n"); 5986 return; 5987 } 5988 5989 if (!sc->link_vars.link_up) { 5990 BLOGW(sc, "Interface link is down, ignoring transmit request\n"); 5991 return; 5992 } 5993 5994 fp = &sc->fp[0]; 5995 5996 BXE_FP_TX_LOCK(fp); 5997 bxe_tx_start_locked(sc, ifp, fp); 5998 BXE_FP_TX_UNLOCK(fp); 5999 } 6000 6001 #if __FreeBSD_version >= 800000 6002 6003 static int 6004 bxe_tx_mq_start_locked(struct bxe_softc *sc, 6005 if_t ifp, 6006 struct bxe_fastpath *fp, 6007 struct mbuf *m) 6008 { 6009 struct buf_ring *tx_br = fp->tx_br; 6010 struct mbuf *next; 6011 int depth, rc, tx_count; 6012 uint16_t tx_bd_avail; 6013 6014 rc = tx_count = 0; 6015 6016 BXE_FP_TX_LOCK_ASSERT(fp); 6017 6018 if (!tx_br) { 6019 BLOGE(sc, "Multiqueue TX and no buf_ring!\n"); 6020 return (EINVAL); 6021 } 6022 6023 if (!sc->link_vars.link_up || 6024 (ifp->if_drv_flags & 6025 (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) { 6026 rc = drbr_enqueue_drv(ifp, tx_br, m); 6027 goto bxe_tx_mq_start_locked_exit; 6028 } 6029 6030 /* fetch the depth of the driver queue */ 6031 depth = drbr_inuse_drv(ifp, tx_br); 6032 if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) { 6033 fp->eth_q_stats.tx_max_drbr_queue_depth = depth; 6034 } 6035 6036 if (m == NULL) { 6037 /* no new work, check for pending frames */ 6038 next = drbr_dequeue_drv(ifp, tx_br); 6039 } else if (drbr_needs_enqueue_drv(ifp, tx_br)) { 6040 /* have both new and pending work, maintain packet order */ 6041 rc = drbr_enqueue_drv(ifp, tx_br, m); 6042 if (rc != 0) { 6043 fp->eth_q_stats.tx_soft_errors++; 6044 goto bxe_tx_mq_start_locked_exit; 6045 } 6046 next = drbr_dequeue_drv(ifp, tx_br); 6047 } else { 6048 /* new work only and nothing pending */ 6049 next = m; 6050 } 6051 6052 /* keep adding entries while there are frames to send */ 6053 while (next != NULL) { 6054 6055 /* the mbuf now belongs to us */ 6056 fp->eth_q_stats.mbuf_alloc_tx++; 6057 6058 /* 6059 * Put the frame into the transmit ring. If we don't have room, 6060 * place the mbuf back at the head of the TX queue, set the 6061 * OACTIVE flag, and wait for the NIC to drain the chain. 6062 */ 6063 rc = bxe_tx_encap(fp, &next); 6064 if (__predict_false(rc != 0)) { 6065 fp->eth_q_stats.tx_encap_failures++; 6066 if (next != NULL) { 6067 /* mark the TX queue as full and save the frame */ 6068 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 6069 /* XXX this may reorder the frame */ 6070 rc = drbr_enqueue_drv(ifp, tx_br, next); 6071 fp->eth_q_stats.mbuf_alloc_tx--; 6072 fp->eth_q_stats.tx_frames_deferred++; 6073 } 6074 6075 /* stop looking for more work */ 6076 break; 6077 } 6078 6079 /* the transmit frame was enqueued successfully */ 6080 tx_count++; 6081 6082 /* send a copy of the frame to any BPF listeners */ 6083 if_etherbpfmtap(ifp, next); 6084 6085 tx_bd_avail = bxe_tx_avail(sc, fp); 6086 6087 /* handle any completions if we're running low */ 6088 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 6089 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ 6090 bxe_txeof(sc, fp); 6091 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { 6092 break; 6093 } 6094 } 6095 6096 next = drbr_dequeue_drv(ifp, tx_br); 6097 } 6098 6099 /* all TX packets were dequeued and/or the tx ring is full */ 6100 if (tx_count > 0) { 6101 /* reset the TX watchdog timeout timer */ 6102 fp->watchdog_timer = BXE_TX_TIMEOUT; 6103 } 6104 6105 bxe_tx_mq_start_locked_exit: 6106 6107 return (rc); 6108 } 6109 6110 /* Multiqueue (TSS) dispatch routine. */ 6111 static int 6112 bxe_tx_mq_start(struct ifnet *ifp, 6113 struct mbuf *m) 6114 { 6115 struct bxe_softc *sc = if_getsoftc(ifp); 6116 struct bxe_fastpath *fp; 6117 int fp_index, rc; 6118 6119 fp_index = 0; /* default is the first queue */ 6120 6121 /* check if flowid is set */ 6122 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 6123 fp_index = (m->m_pkthdr.flowid % sc->num_queues); 6124 6125 fp = &sc->fp[fp_index]; 6126 6127 if (BXE_FP_TX_TRYLOCK(fp)) { 6128 rc = bxe_tx_mq_start_locked(sc, ifp, fp, m); 6129 BXE_FP_TX_UNLOCK(fp); 6130 } else 6131 rc = drbr_enqueue_drv(ifp, fp->tx_br, m); 6132 6133 return (rc); 6134 } 6135 6136 static void 6137 bxe_mq_flush(struct ifnet *ifp) 6138 { 6139 struct bxe_softc *sc = if_getsoftc(ifp); 6140 struct bxe_fastpath *fp; 6141 struct mbuf *m; 6142 int i; 6143 6144 for (i = 0; i < sc->num_queues; i++) { 6145 fp = &sc->fp[i]; 6146 6147 if (fp->state != BXE_FP_STATE_OPEN) { 6148 BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n", 6149 fp->index, fp->state); 6150 continue; 6151 } 6152 6153 if (fp->tx_br != NULL) { 6154 BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index); 6155 BXE_FP_TX_LOCK(fp); 6156 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { 6157 m_freem(m); 6158 } 6159 BXE_FP_TX_UNLOCK(fp); 6160 } 6161 } 6162 6163 if_qflush(ifp); 6164 } 6165 6166 #endif /* FreeBSD_version >= 800000 */ 6167 6168 static uint16_t 6169 bxe_cid_ilt_lines(struct bxe_softc *sc) 6170 { 6171 if (IS_SRIOV(sc)) { 6172 return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS); 6173 } 6174 return (L2_ILT_LINES(sc)); 6175 } 6176 6177 static void 6178 bxe_ilt_set_info(struct bxe_softc *sc) 6179 { 6180 struct ilt_client_info *ilt_client; 6181 struct ecore_ilt *ilt = sc->ilt; 6182 uint16_t line = 0; 6183 6184 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); 6185 BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line); 6186 6187 /* CDU */ 6188 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 6189 ilt_client->client_num = ILT_CLIENT_CDU; 6190 ilt_client->page_size = CDU_ILT_PAGE_SZ; 6191 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 6192 ilt_client->start = line; 6193 line += bxe_cid_ilt_lines(sc); 6194 6195 if (CNIC_SUPPORT(sc)) { 6196 line += CNIC_ILT_LINES; 6197 } 6198 6199 ilt_client->end = (line - 1); 6200 6201 BLOGD(sc, DBG_LOAD, 6202 "ilt client[CDU]: start %d, end %d, " 6203 "psz 0x%x, flags 0x%x, hw psz %d\n", 6204 ilt_client->start, ilt_client->end, 6205 ilt_client->page_size, 6206 ilt_client->flags, 6207 ilog2(ilt_client->page_size >> 12)); 6208 6209 /* QM */ 6210 if (QM_INIT(sc->qm_cid_count)) { 6211 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 6212 ilt_client->client_num = ILT_CLIENT_QM; 6213 ilt_client->page_size = QM_ILT_PAGE_SZ; 6214 ilt_client->flags = 0; 6215 ilt_client->start = line; 6216 6217 /* 4 bytes for each cid */ 6218 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 6219 QM_ILT_PAGE_SZ); 6220 6221 ilt_client->end = (line - 1); 6222 6223 BLOGD(sc, DBG_LOAD, 6224 "ilt client[QM]: start %d, end %d, " 6225 "psz 0x%x, flags 0x%x, hw psz %d\n", 6226 ilt_client->start, ilt_client->end, 6227 ilt_client->page_size, ilt_client->flags, 6228 ilog2(ilt_client->page_size >> 12)); 6229 } 6230 6231 if (CNIC_SUPPORT(sc)) { 6232 /* SRC */ 6233 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 6234 ilt_client->client_num = ILT_CLIENT_SRC; 6235 ilt_client->page_size = SRC_ILT_PAGE_SZ; 6236 ilt_client->flags = 0; 6237 ilt_client->start = line; 6238 line += SRC_ILT_LINES; 6239 ilt_client->end = (line - 1); 6240 6241 BLOGD(sc, DBG_LOAD, 6242 "ilt client[SRC]: start %d, end %d, " 6243 "psz 0x%x, flags 0x%x, hw psz %d\n", 6244 ilt_client->start, ilt_client->end, 6245 ilt_client->page_size, ilt_client->flags, 6246 ilog2(ilt_client->page_size >> 12)); 6247 6248 /* TM */ 6249 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 6250 ilt_client->client_num = ILT_CLIENT_TM; 6251 ilt_client->page_size = TM_ILT_PAGE_SZ; 6252 ilt_client->flags = 0; 6253 ilt_client->start = line; 6254 line += TM_ILT_LINES; 6255 ilt_client->end = (line - 1); 6256 6257 BLOGD(sc, DBG_LOAD, 6258 "ilt client[TM]: start %d, end %d, " 6259 "psz 0x%x, flags 0x%x, hw psz %d\n", 6260 ilt_client->start, ilt_client->end, 6261 ilt_client->page_size, ilt_client->flags, 6262 ilog2(ilt_client->page_size >> 12)); 6263 } 6264 6265 KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!")); 6266 } 6267 6268 static void 6269 bxe_set_fp_rx_buf_size(struct bxe_softc *sc) 6270 { 6271 int i; 6272 uint32_t rx_buf_size; 6273 6274 rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu); 6275 6276 for (i = 0; i < sc->num_queues; i++) { 6277 if(rx_buf_size <= MCLBYTES){ 6278 sc->fp[i].rx_buf_size = rx_buf_size; 6279 sc->fp[i].mbuf_alloc_size = MCLBYTES; 6280 }else if (rx_buf_size <= MJUMPAGESIZE){ 6281 sc->fp[i].rx_buf_size = rx_buf_size; 6282 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; 6283 }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){ 6284 sc->fp[i].rx_buf_size = MCLBYTES; 6285 sc->fp[i].mbuf_alloc_size = MCLBYTES; 6286 }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){ 6287 sc->fp[i].rx_buf_size = MJUMPAGESIZE; 6288 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; 6289 }else { 6290 sc->fp[i].rx_buf_size = MCLBYTES; 6291 sc->fp[i].mbuf_alloc_size = MCLBYTES; 6292 } 6293 } 6294 } 6295 6296 static int 6297 bxe_alloc_ilt_mem(struct bxe_softc *sc) 6298 { 6299 int rc = 0; 6300 6301 if ((sc->ilt = 6302 (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt), 6303 M_BXE_ILT, 6304 (M_NOWAIT | M_ZERO))) == NULL) { 6305 rc = 1; 6306 } 6307 6308 return (rc); 6309 } 6310 6311 static int 6312 bxe_alloc_ilt_lines_mem(struct bxe_softc *sc) 6313 { 6314 int rc = 0; 6315 6316 if ((sc->ilt->lines = 6317 (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES), 6318 M_BXE_ILT, 6319 (M_NOWAIT | M_ZERO))) == NULL) { 6320 rc = 1; 6321 } 6322 6323 return (rc); 6324 } 6325 6326 static void 6327 bxe_free_ilt_mem(struct bxe_softc *sc) 6328 { 6329 if (sc->ilt != NULL) { 6330 free(sc->ilt, M_BXE_ILT); 6331 sc->ilt = NULL; 6332 } 6333 } 6334 6335 static void 6336 bxe_free_ilt_lines_mem(struct bxe_softc *sc) 6337 { 6338 if (sc->ilt->lines != NULL) { 6339 free(sc->ilt->lines, M_BXE_ILT); 6340 sc->ilt->lines = NULL; 6341 } 6342 } 6343 6344 static void 6345 bxe_free_mem(struct bxe_softc *sc) 6346 { 6347 int i; 6348 6349 #if 0 6350 if (!CONFIGURE_NIC_MODE(sc)) { 6351 /* free searcher T2 table */ 6352 bxe_dma_free(sc, &sc->t2); 6353 } 6354 #endif 6355 6356 for (i = 0; i < L2_ILT_LINES(sc); i++) { 6357 bxe_dma_free(sc, &sc->context[i].vcxt_dma); 6358 sc->context[i].vcxt = NULL; 6359 sc->context[i].size = 0; 6360 } 6361 6362 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); 6363 6364 bxe_free_ilt_lines_mem(sc); 6365 6366 #if 0 6367 bxe_iov_free_mem(sc); 6368 #endif 6369 } 6370 6371 static int 6372 bxe_alloc_mem(struct bxe_softc *sc) 6373 { 6374 int context_size; 6375 int allocated; 6376 int i; 6377 6378 #if 0 6379 if (!CONFIGURE_NIC_MODE(sc)) { 6380 /* allocate searcher T2 table */ 6381 if (bxe_dma_alloc(sc, SRC_T2_SZ, 6382 &sc->t2, "searcher t2 table") != 0) { 6383 return (-1); 6384 } 6385 } 6386 #endif 6387 6388 /* 6389 * Allocate memory for CDU context: 6390 * This memory is allocated separately and not in the generic ILT 6391 * functions because CDU differs in few aspects: 6392 * 1. There can be multiple entities allocating memory for context - 6393 * regular L2, CNIC, and SRIOV drivers. Each separately controls 6394 * its own ILT lines. 6395 * 2. Since CDU page-size is not a single 4KB page (which is the case 6396 * for the other ILT clients), to be efficient we want to support 6397 * allocation of sub-page-size in the last entry. 6398 * 3. Context pointers are used by the driver to pass to FW / update 6399 * the context (for the other ILT clients the pointers are used just to 6400 * free the memory during unload). 6401 */ 6402 context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc)); 6403 for (i = 0, allocated = 0; allocated < context_size; i++) { 6404 sc->context[i].size = min(CDU_ILT_PAGE_SZ, 6405 (context_size - allocated)); 6406 6407 if (bxe_dma_alloc(sc, sc->context[i].size, 6408 &sc->context[i].vcxt_dma, 6409 "cdu context") != 0) { 6410 bxe_free_mem(sc); 6411 return (-1); 6412 } 6413 6414 sc->context[i].vcxt = 6415 (union cdu_context *)sc->context[i].vcxt_dma.vaddr; 6416 6417 allocated += sc->context[i].size; 6418 } 6419 6420 bxe_alloc_ilt_lines_mem(sc); 6421 6422 BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n", 6423 sc->ilt, sc->ilt->start_line, sc->ilt->lines); 6424 { 6425 for (i = 0; i < 4; i++) { 6426 BLOGD(sc, DBG_LOAD, 6427 "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n", 6428 i, 6429 sc->ilt->clients[i].page_size, 6430 sc->ilt->clients[i].start, 6431 sc->ilt->clients[i].end, 6432 sc->ilt->clients[i].client_num, 6433 sc->ilt->clients[i].flags); 6434 } 6435 } 6436 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { 6437 BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n"); 6438 bxe_free_mem(sc); 6439 return (-1); 6440 } 6441 6442 #if 0 6443 if (bxe_iov_alloc_mem(sc)) { 6444 BLOGE(sc, "Failed to allocate memory for SRIOV\n"); 6445 bxe_free_mem(sc); 6446 return (-1); 6447 } 6448 #endif 6449 6450 return (0); 6451 } 6452 6453 static void 6454 bxe_free_rx_bd_chain(struct bxe_fastpath *fp) 6455 { 6456 struct bxe_softc *sc; 6457 int i; 6458 6459 sc = fp->sc; 6460 6461 if (fp->rx_mbuf_tag == NULL) { 6462 return; 6463 } 6464 6465 /* free all mbufs and unload all maps */ 6466 for (i = 0; i < RX_BD_TOTAL; i++) { 6467 if (fp->rx_mbuf_chain[i].m_map != NULL) { 6468 bus_dmamap_sync(fp->rx_mbuf_tag, 6469 fp->rx_mbuf_chain[i].m_map, 6470 BUS_DMASYNC_POSTREAD); 6471 bus_dmamap_unload(fp->rx_mbuf_tag, 6472 fp->rx_mbuf_chain[i].m_map); 6473 } 6474 6475 if (fp->rx_mbuf_chain[i].m != NULL) { 6476 m_freem(fp->rx_mbuf_chain[i].m); 6477 fp->rx_mbuf_chain[i].m = NULL; 6478 fp->eth_q_stats.mbuf_alloc_rx--; 6479 } 6480 } 6481 } 6482 6483 static void 6484 bxe_free_tpa_pool(struct bxe_fastpath *fp) 6485 { 6486 struct bxe_softc *sc; 6487 int i, max_agg_queues; 6488 6489 sc = fp->sc; 6490 6491 if (fp->rx_mbuf_tag == NULL) { 6492 return; 6493 } 6494 6495 max_agg_queues = MAX_AGG_QS(sc); 6496 6497 /* release all mbufs and unload all DMA maps in the TPA pool */ 6498 for (i = 0; i < max_agg_queues; i++) { 6499 if (fp->rx_tpa_info[i].bd.m_map != NULL) { 6500 bus_dmamap_sync(fp->rx_mbuf_tag, 6501 fp->rx_tpa_info[i].bd.m_map, 6502 BUS_DMASYNC_POSTREAD); 6503 bus_dmamap_unload(fp->rx_mbuf_tag, 6504 fp->rx_tpa_info[i].bd.m_map); 6505 } 6506 6507 if (fp->rx_tpa_info[i].bd.m != NULL) { 6508 m_freem(fp->rx_tpa_info[i].bd.m); 6509 fp->rx_tpa_info[i].bd.m = NULL; 6510 fp->eth_q_stats.mbuf_alloc_tpa--; 6511 } 6512 } 6513 } 6514 6515 static void 6516 bxe_free_sge_chain(struct bxe_fastpath *fp) 6517 { 6518 struct bxe_softc *sc; 6519 int i; 6520 6521 sc = fp->sc; 6522 6523 if (fp->rx_sge_mbuf_tag == NULL) { 6524 return; 6525 } 6526 6527 /* rree all mbufs and unload all maps */ 6528 for (i = 0; i < RX_SGE_TOTAL; i++) { 6529 if (fp->rx_sge_mbuf_chain[i].m_map != NULL) { 6530 bus_dmamap_sync(fp->rx_sge_mbuf_tag, 6531 fp->rx_sge_mbuf_chain[i].m_map, 6532 BUS_DMASYNC_POSTREAD); 6533 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 6534 fp->rx_sge_mbuf_chain[i].m_map); 6535 } 6536 6537 if (fp->rx_sge_mbuf_chain[i].m != NULL) { 6538 m_freem(fp->rx_sge_mbuf_chain[i].m); 6539 fp->rx_sge_mbuf_chain[i].m = NULL; 6540 fp->eth_q_stats.mbuf_alloc_sge--; 6541 } 6542 } 6543 } 6544 6545 static void 6546 bxe_free_fp_buffers(struct bxe_softc *sc) 6547 { 6548 struct bxe_fastpath *fp; 6549 int i; 6550 6551 for (i = 0; i < sc->num_queues; i++) { 6552 fp = &sc->fp[i]; 6553 6554 #if __FreeBSD_version >= 800000 6555 if (fp->tx_br != NULL) { 6556 /* just in case bxe_mq_flush() wasn't called */ 6557 if (mtx_initialized(&fp->tx_mtx)) { 6558 struct mbuf *m; 6559 6560 BXE_FP_TX_LOCK(fp); 6561 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) 6562 m_freem(m); 6563 BXE_FP_TX_UNLOCK(fp); 6564 } 6565 buf_ring_free(fp->tx_br, M_DEVBUF); 6566 fp->tx_br = NULL; 6567 } 6568 #endif 6569 6570 /* free all RX buffers */ 6571 bxe_free_rx_bd_chain(fp); 6572 bxe_free_tpa_pool(fp); 6573 bxe_free_sge_chain(fp); 6574 6575 if (fp->eth_q_stats.mbuf_alloc_rx != 0) { 6576 BLOGE(sc, "failed to claim all rx mbufs (%d left)\n", 6577 fp->eth_q_stats.mbuf_alloc_rx); 6578 } 6579 6580 if (fp->eth_q_stats.mbuf_alloc_sge != 0) { 6581 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", 6582 fp->eth_q_stats.mbuf_alloc_sge); 6583 } 6584 6585 if (fp->eth_q_stats.mbuf_alloc_tpa != 0) { 6586 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", 6587 fp->eth_q_stats.mbuf_alloc_tpa); 6588 } 6589 6590 if (fp->eth_q_stats.mbuf_alloc_tx != 0) { 6591 BLOGE(sc, "failed to release tx mbufs (%d left)\n", 6592 fp->eth_q_stats.mbuf_alloc_tx); 6593 } 6594 6595 /* XXX verify all mbufs were reclaimed */ 6596 6597 if (mtx_initialized(&fp->tx_mtx)) { 6598 mtx_destroy(&fp->tx_mtx); 6599 } 6600 6601 if (mtx_initialized(&fp->rx_mtx)) { 6602 mtx_destroy(&fp->rx_mtx); 6603 } 6604 } 6605 } 6606 6607 static int 6608 bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, 6609 uint16_t prev_index, 6610 uint16_t index) 6611 { 6612 struct bxe_sw_rx_bd *rx_buf; 6613 struct eth_rx_bd *rx_bd; 6614 bus_dma_segment_t segs[1]; 6615 bus_dmamap_t map; 6616 struct mbuf *m; 6617 int nsegs, rc; 6618 6619 rc = 0; 6620 6621 /* allocate the new RX BD mbuf */ 6622 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); 6623 if (__predict_false(m == NULL)) { 6624 fp->eth_q_stats.mbuf_rx_bd_alloc_failed++; 6625 return (ENOBUFS); 6626 } 6627 6628 fp->eth_q_stats.mbuf_alloc_rx++; 6629 6630 /* initialize the mbuf buffer length */ 6631 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; 6632 6633 /* map the mbuf into non-paged pool */ 6634 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, 6635 fp->rx_mbuf_spare_map, 6636 m, segs, &nsegs, BUS_DMA_NOWAIT); 6637 if (__predict_false(rc != 0)) { 6638 fp->eth_q_stats.mbuf_rx_bd_mapping_failed++; 6639 m_freem(m); 6640 fp->eth_q_stats.mbuf_alloc_rx--; 6641 return (rc); 6642 } 6643 6644 /* all mbufs must map to a single segment */ 6645 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6646 6647 /* release any existing RX BD mbuf mappings */ 6648 6649 if (prev_index != index) { 6650 rx_buf = &fp->rx_mbuf_chain[prev_index]; 6651 6652 if (rx_buf->m_map != NULL) { 6653 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6654 BUS_DMASYNC_POSTREAD); 6655 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 6656 } 6657 6658 /* 6659 * We only get here from bxe_rxeof() when the maximum number 6660 * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already 6661 * holds the mbuf in the prev_index so it's OK to NULL it out 6662 * here without concern of a memory leak. 6663 */ 6664 fp->rx_mbuf_chain[prev_index].m = NULL; 6665 } 6666 6667 rx_buf = &fp->rx_mbuf_chain[index]; 6668 6669 if (rx_buf->m_map != NULL) { 6670 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6671 BUS_DMASYNC_POSTREAD); 6672 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 6673 } 6674 6675 /* save the mbuf and mapping info for a future packet */ 6676 map = (prev_index != index) ? 6677 fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map; 6678 rx_buf->m_map = fp->rx_mbuf_spare_map; 6679 fp->rx_mbuf_spare_map = map; 6680 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6681 BUS_DMASYNC_PREREAD); 6682 rx_buf->m = m; 6683 6684 rx_bd = &fp->rx_chain[index]; 6685 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 6686 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 6687 6688 return (rc); 6689 } 6690 6691 static int 6692 bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, 6693 int queue) 6694 { 6695 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; 6696 bus_dma_segment_t segs[1]; 6697 bus_dmamap_t map; 6698 struct mbuf *m; 6699 int nsegs; 6700 int rc = 0; 6701 6702 /* allocate the new TPA mbuf */ 6703 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); 6704 if (__predict_false(m == NULL)) { 6705 fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++; 6706 return (ENOBUFS); 6707 } 6708 6709 fp->eth_q_stats.mbuf_alloc_tpa++; 6710 6711 /* initialize the mbuf buffer length */ 6712 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; 6713 6714 /* map the mbuf into non-paged pool */ 6715 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, 6716 fp->rx_tpa_info_mbuf_spare_map, 6717 m, segs, &nsegs, BUS_DMA_NOWAIT); 6718 if (__predict_false(rc != 0)) { 6719 fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++; 6720 m_free(m); 6721 fp->eth_q_stats.mbuf_alloc_tpa--; 6722 return (rc); 6723 } 6724 6725 /* all mbufs must map to a single segment */ 6726 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6727 6728 /* release any existing TPA mbuf mapping */ 6729 if (tpa_info->bd.m_map != NULL) { 6730 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, 6731 BUS_DMASYNC_POSTREAD); 6732 bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map); 6733 } 6734 6735 /* save the mbuf and mapping info for the TPA mbuf */ 6736 map = tpa_info->bd.m_map; 6737 tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map; 6738 fp->rx_tpa_info_mbuf_spare_map = map; 6739 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, 6740 BUS_DMASYNC_PREREAD); 6741 tpa_info->bd.m = m; 6742 tpa_info->seg = segs[0]; 6743 6744 return (rc); 6745 } 6746 6747 /* 6748 * Allocate an mbuf and assign it to the receive scatter gather chain. The 6749 * caller must take care to save a copy of the existing mbuf in the SG mbuf 6750 * chain. 6751 */ 6752 static int 6753 bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, 6754 uint16_t index) 6755 { 6756 struct bxe_sw_rx_bd *sge_buf; 6757 struct eth_rx_sge *sge; 6758 bus_dma_segment_t segs[1]; 6759 bus_dmamap_t map; 6760 struct mbuf *m; 6761 int nsegs; 6762 int rc = 0; 6763 6764 /* allocate a new SGE mbuf */ 6765 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE); 6766 if (__predict_false(m == NULL)) { 6767 fp->eth_q_stats.mbuf_rx_sge_alloc_failed++; 6768 return (ENOMEM); 6769 } 6770 6771 fp->eth_q_stats.mbuf_alloc_sge++; 6772 6773 /* initialize the mbuf buffer length */ 6774 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE; 6775 6776 /* map the SGE mbuf into non-paged pool */ 6777 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag, 6778 fp->rx_sge_mbuf_spare_map, 6779 m, segs, &nsegs, BUS_DMA_NOWAIT); 6780 if (__predict_false(rc != 0)) { 6781 fp->eth_q_stats.mbuf_rx_sge_mapping_failed++; 6782 m_freem(m); 6783 fp->eth_q_stats.mbuf_alloc_sge--; 6784 return (rc); 6785 } 6786 6787 /* all mbufs must map to a single segment */ 6788 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6789 6790 sge_buf = &fp->rx_sge_mbuf_chain[index]; 6791 6792 /* release any existing SGE mbuf mapping */ 6793 if (sge_buf->m_map != NULL) { 6794 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, 6795 BUS_DMASYNC_POSTREAD); 6796 bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map); 6797 } 6798 6799 /* save the mbuf and mapping info for a future packet */ 6800 map = sge_buf->m_map; 6801 sge_buf->m_map = fp->rx_sge_mbuf_spare_map; 6802 fp->rx_sge_mbuf_spare_map = map; 6803 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, 6804 BUS_DMASYNC_PREREAD); 6805 sge_buf->m = m; 6806 6807 sge = &fp->rx_sge_chain[index]; 6808 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 6809 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 6810 6811 return (rc); 6812 } 6813 6814 static __noinline int 6815 bxe_alloc_fp_buffers(struct bxe_softc *sc) 6816 { 6817 struct bxe_fastpath *fp; 6818 int i, j, rc = 0; 6819 int ring_prod, cqe_ring_prod; 6820 int max_agg_queues; 6821 6822 for (i = 0; i < sc->num_queues; i++) { 6823 fp = &sc->fp[i]; 6824 6825 #if __FreeBSD_version >= 800000 6826 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF, 6827 M_NOWAIT, &fp->tx_mtx); 6828 if (fp->tx_br == NULL) { 6829 BLOGE(sc, "buf_ring alloc fail for fp[%02d]\n", i); 6830 goto bxe_alloc_fp_buffers_error; 6831 } 6832 #endif 6833 6834 ring_prod = cqe_ring_prod = 0; 6835 fp->rx_bd_cons = 0; 6836 fp->rx_cq_cons = 0; 6837 6838 /* allocate buffers for the RX BDs in RX BD chain */ 6839 for (j = 0; j < sc->max_rx_bufs; j++) { 6840 rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod); 6841 if (rc != 0) { 6842 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", 6843 i, rc); 6844 goto bxe_alloc_fp_buffers_error; 6845 } 6846 6847 ring_prod = RX_BD_NEXT(ring_prod); 6848 cqe_ring_prod = RCQ_NEXT(cqe_ring_prod); 6849 } 6850 6851 fp->rx_bd_prod = ring_prod; 6852 fp->rx_cq_prod = cqe_ring_prod; 6853 fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0; 6854 6855 max_agg_queues = MAX_AGG_QS(sc); 6856 6857 fp->tpa_enable = TRUE; 6858 6859 /* fill the TPA pool */ 6860 for (j = 0; j < max_agg_queues; j++) { 6861 rc = bxe_alloc_rx_tpa_mbuf(fp, j); 6862 if (rc != 0) { 6863 BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n", 6864 i, j); 6865 fp->tpa_enable = FALSE; 6866 goto bxe_alloc_fp_buffers_error; 6867 } 6868 6869 fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP; 6870 } 6871 6872 if (fp->tpa_enable) { 6873 /* fill the RX SGE chain */ 6874 ring_prod = 0; 6875 for (j = 0; j < RX_SGE_USABLE; j++) { 6876 rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod); 6877 if (rc != 0) { 6878 BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n", 6879 i, ring_prod); 6880 fp->tpa_enable = FALSE; 6881 ring_prod = 0; 6882 goto bxe_alloc_fp_buffers_error; 6883 } 6884 6885 ring_prod = RX_SGE_NEXT(ring_prod); 6886 } 6887 6888 fp->rx_sge_prod = ring_prod; 6889 } 6890 } 6891 6892 return (0); 6893 6894 bxe_alloc_fp_buffers_error: 6895 6896 /* unwind what was already allocated */ 6897 bxe_free_rx_bd_chain(fp); 6898 bxe_free_tpa_pool(fp); 6899 bxe_free_sge_chain(fp); 6900 6901 return (ENOBUFS); 6902 } 6903 6904 static void 6905 bxe_free_fw_stats_mem(struct bxe_softc *sc) 6906 { 6907 bxe_dma_free(sc, &sc->fw_stats_dma); 6908 6909 sc->fw_stats_num = 0; 6910 6911 sc->fw_stats_req_size = 0; 6912 sc->fw_stats_req = NULL; 6913 sc->fw_stats_req_mapping = 0; 6914 6915 sc->fw_stats_data_size = 0; 6916 sc->fw_stats_data = NULL; 6917 sc->fw_stats_data_mapping = 0; 6918 } 6919 6920 static int 6921 bxe_alloc_fw_stats_mem(struct bxe_softc *sc) 6922 { 6923 uint8_t num_queue_stats; 6924 int num_groups; 6925 6926 /* number of queues for statistics is number of eth queues */ 6927 num_queue_stats = BXE_NUM_ETH_QUEUES(sc); 6928 6929 /* 6930 * Total number of FW statistics requests = 6931 * 1 for port stats + 1 for PF stats + num of queues 6932 */ 6933 sc->fw_stats_num = (2 + num_queue_stats); 6934 6935 /* 6936 * Request is built from stats_query_header and an array of 6937 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT 6938 * rules. The real number or requests is configured in the 6939 * stats_query_header. 6940 */ 6941 num_groups = 6942 ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) + 6943 ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0)); 6944 6945 BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n", 6946 sc->fw_stats_num, num_groups); 6947 6948 sc->fw_stats_req_size = 6949 (sizeof(struct stats_query_header) + 6950 (num_groups * sizeof(struct stats_query_cmd_group))); 6951 6952 /* 6953 * Data for statistics requests + stats_counter. 6954 * stats_counter holds per-STORM counters that are incremented when 6955 * STORM has finished with the current request. Memory for FCoE 6956 * offloaded statistics are counted anyway, even if they will not be sent. 6957 * VF stats are not accounted for here as the data of VF stats is stored 6958 * in memory allocated by the VF, not here. 6959 */ 6960 sc->fw_stats_data_size = 6961 (sizeof(struct stats_counter) + 6962 sizeof(struct per_port_stats) + 6963 sizeof(struct per_pf_stats) + 6964 /* sizeof(struct fcoe_statistics_params) + */ 6965 (sizeof(struct per_queue_stats) * num_queue_stats)); 6966 6967 if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), 6968 &sc->fw_stats_dma, "fw stats") != 0) { 6969 bxe_free_fw_stats_mem(sc); 6970 return (-1); 6971 } 6972 6973 /* set up the shortcuts */ 6974 6975 sc->fw_stats_req = 6976 (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr; 6977 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; 6978 6979 sc->fw_stats_data = 6980 (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr + 6981 sc->fw_stats_req_size); 6982 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + 6983 sc->fw_stats_req_size); 6984 6985 BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n", 6986 (uintmax_t)sc->fw_stats_req_mapping); 6987 6988 BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n", 6989 (uintmax_t)sc->fw_stats_data_mapping); 6990 6991 return (0); 6992 } 6993 6994 /* 6995 * Bits map: 6996 * 0-7 - Engine0 load counter. 6997 * 8-15 - Engine1 load counter. 6998 * 16 - Engine0 RESET_IN_PROGRESS bit. 6999 * 17 - Engine1 RESET_IN_PROGRESS bit. 7000 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active 7001 * function on the engine 7002 * 19 - Engine1 ONE_IS_LOADED. 7003 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 7004 * leader to complete (check for both RESET_IN_PROGRESS bits and not 7005 * for just the one belonging to its engine). 7006 */ 7007 #define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 7008 #define BXE_PATH0_LOAD_CNT_MASK 0x000000ff 7009 #define BXE_PATH0_LOAD_CNT_SHIFT 0 7010 #define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00 7011 #define BXE_PATH1_LOAD_CNT_SHIFT 8 7012 #define BXE_PATH0_RST_IN_PROG_BIT 0x00010000 7013 #define BXE_PATH1_RST_IN_PROG_BIT 0x00020000 7014 #define BXE_GLOBAL_RESET_BIT 0x00040000 7015 7016 /* set the GLOBAL_RESET bit, should be run under rtnl lock */ 7017 static void 7018 bxe_set_reset_global(struct bxe_softc *sc) 7019 { 7020 uint32_t val; 7021 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7022 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7023 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT); 7024 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7025 } 7026 7027 /* clear the GLOBAL_RESET bit, should be run under rtnl lock */ 7028 static void 7029 bxe_clear_reset_global(struct bxe_softc *sc) 7030 { 7031 uint32_t val; 7032 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7033 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7034 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT)); 7035 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7036 } 7037 7038 /* checks the GLOBAL_RESET bit, should be run under rtnl lock */ 7039 static uint8_t 7040 bxe_reset_is_global(struct bxe_softc *sc) 7041 { 7042 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7043 BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val); 7044 return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE; 7045 } 7046 7047 /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ 7048 static void 7049 bxe_set_reset_done(struct bxe_softc *sc) 7050 { 7051 uint32_t val; 7052 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : 7053 BXE_PATH0_RST_IN_PROG_BIT; 7054 7055 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7056 7057 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7058 /* Clear the bit */ 7059 val &= ~bit; 7060 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7061 7062 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7063 } 7064 7065 /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ 7066 static void 7067 bxe_set_reset_in_progress(struct bxe_softc *sc) 7068 { 7069 uint32_t val; 7070 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : 7071 BXE_PATH0_RST_IN_PROG_BIT; 7072 7073 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7074 7075 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7076 /* Set the bit */ 7077 val |= bit; 7078 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7079 7080 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7081 } 7082 7083 /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ 7084 static uint8_t 7085 bxe_reset_is_done(struct bxe_softc *sc, 7086 int engine) 7087 { 7088 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7089 uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT : 7090 BXE_PATH0_RST_IN_PROG_BIT; 7091 7092 /* return false if bit is set */ 7093 return (val & bit) ? FALSE : TRUE; 7094 } 7095 7096 /* get the load status for an engine, should be run under rtnl lock */ 7097 static uint8_t 7098 bxe_get_load_status(struct bxe_softc *sc, 7099 int engine) 7100 { 7101 uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK : 7102 BXE_PATH0_LOAD_CNT_MASK; 7103 uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT : 7104 BXE_PATH0_LOAD_CNT_SHIFT; 7105 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7106 7107 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); 7108 7109 val = ((val & mask) >> shift); 7110 7111 BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val); 7112 7113 return (val != 0); 7114 } 7115 7116 /* set pf load mark */ 7117 /* XXX needs to be under rtnl lock */ 7118 static void 7119 bxe_set_pf_load(struct bxe_softc *sc) 7120 { 7121 uint32_t val; 7122 uint32_t val1; 7123 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : 7124 BXE_PATH0_LOAD_CNT_MASK; 7125 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : 7126 BXE_PATH0_LOAD_CNT_SHIFT; 7127 7128 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7129 7130 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7131 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); 7132 7133 /* get the current counter value */ 7134 val1 = ((val & mask) >> shift); 7135 7136 /* set bit of this PF */ 7137 val1 |= (1 << SC_ABS_FUNC(sc)); 7138 7139 /* clear the old value */ 7140 val &= ~mask; 7141 7142 /* set the new one */ 7143 val |= ((val1 << shift) & mask); 7144 7145 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7146 7147 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7148 } 7149 7150 /* clear pf load mark */ 7151 /* XXX needs to be under rtnl lock */ 7152 static uint8_t 7153 bxe_clear_pf_load(struct bxe_softc *sc) 7154 { 7155 uint32_t val1, val; 7156 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : 7157 BXE_PATH0_LOAD_CNT_MASK; 7158 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : 7159 BXE_PATH0_LOAD_CNT_SHIFT; 7160 7161 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7162 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7163 BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val); 7164 7165 /* get the current counter value */ 7166 val1 = (val & mask) >> shift; 7167 7168 /* clear bit of that PF */ 7169 val1 &= ~(1 << SC_ABS_FUNC(sc)); 7170 7171 /* clear the old value */ 7172 val &= ~mask; 7173 7174 /* set the new one */ 7175 val |= ((val1 << shift) & mask); 7176 7177 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7178 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7179 return (val1 != 0); 7180 } 7181 7182 /* send load requrest to mcp and analyze response */ 7183 static int 7184 bxe_nic_load_request(struct bxe_softc *sc, 7185 uint32_t *load_code) 7186 { 7187 /* init fw_seq */ 7188 sc->fw_seq = 7189 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 7190 DRV_MSG_SEQ_NUMBER_MASK); 7191 7192 BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq); 7193 7194 /* get the current FW pulse sequence */ 7195 sc->fw_drv_pulse_wr_seq = 7196 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & 7197 DRV_PULSE_SEQ_MASK); 7198 7199 BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n", 7200 sc->fw_drv_pulse_wr_seq); 7201 7202 /* load request */ 7203 (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 7204 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 7205 7206 /* if the MCP fails to respond we must abort */ 7207 if (!(*load_code)) { 7208 BLOGE(sc, "MCP response failure!\n"); 7209 return (-1); 7210 } 7211 7212 /* if MCP refused then must abort */ 7213 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { 7214 BLOGE(sc, "MCP refused load request\n"); 7215 return (-1); 7216 } 7217 7218 return (0); 7219 } 7220 7221 /* 7222 * Check whether another PF has already loaded FW to chip. In virtualized 7223 * environments a pf from anoth VM may have already initialized the device 7224 * including loading FW. 7225 */ 7226 static int 7227 bxe_nic_load_analyze_req(struct bxe_softc *sc, 7228 uint32_t load_code) 7229 { 7230 uint32_t my_fw, loaded_fw; 7231 7232 /* is another pf loaded on this engine? */ 7233 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 7234 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 7235 /* build my FW version dword */ 7236 my_fw = (BCM_5710_FW_MAJOR_VERSION + 7237 (BCM_5710_FW_MINOR_VERSION << 8 ) + 7238 (BCM_5710_FW_REVISION_VERSION << 16) + 7239 (BCM_5710_FW_ENGINEERING_VERSION << 24)); 7240 7241 /* read loaded FW from chip */ 7242 loaded_fw = REG_RD(sc, XSEM_REG_PRAM); 7243 BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n", 7244 loaded_fw, my_fw); 7245 7246 /* abort nic load if version mismatch */ 7247 if (my_fw != loaded_fw) { 7248 BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)", 7249 loaded_fw, my_fw); 7250 return (-1); 7251 } 7252 } 7253 7254 return (0); 7255 } 7256 7257 /* mark PMF if applicable */ 7258 static void 7259 bxe_nic_load_pmf(struct bxe_softc *sc, 7260 uint32_t load_code) 7261 { 7262 uint32_t ncsi_oem_data_addr; 7263 7264 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 7265 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || 7266 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { 7267 /* 7268 * Barrier here for ordering between the writing to sc->port.pmf here 7269 * and reading it from the periodic task. 7270 */ 7271 sc->port.pmf = 1; 7272 mb(); 7273 } else { 7274 sc->port.pmf = 0; 7275 } 7276 7277 BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf); 7278 7279 /* XXX needed? */ 7280 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { 7281 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { 7282 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); 7283 if (ncsi_oem_data_addr) { 7284 REG_WR(sc, 7285 (ncsi_oem_data_addr + 7286 offsetof(struct glob_ncsi_oem_data, driver_version)), 7287 0); 7288 } 7289 } 7290 } 7291 } 7292 7293 static void 7294 bxe_read_mf_cfg(struct bxe_softc *sc) 7295 { 7296 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); 7297 int abs_func; 7298 int vn; 7299 7300 if (BXE_NOMCP(sc)) { 7301 return; /* what should be the default bvalue in this case */ 7302 } 7303 7304 /* 7305 * The formula for computing the absolute function number is... 7306 * For 2 port configuration (4 functions per port): 7307 * abs_func = 2 * vn + SC_PORT + SC_PATH 7308 * For 4 port configuration (2 functions per port): 7309 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH 7310 */ 7311 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 7312 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); 7313 if (abs_func >= E1H_FUNC_MAX) { 7314 break; 7315 } 7316 sc->devinfo.mf_info.mf_config[vn] = 7317 MFCFG_RD(sc, func_mf_config[abs_func].config); 7318 } 7319 7320 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & 7321 FUNC_MF_CFG_FUNC_DISABLED) { 7322 BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n"); 7323 sc->flags |= BXE_MF_FUNC_DIS; 7324 } else { 7325 BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n"); 7326 sc->flags &= ~BXE_MF_FUNC_DIS; 7327 } 7328 } 7329 7330 /* acquire split MCP access lock register */ 7331 static int bxe_acquire_alr(struct bxe_softc *sc) 7332 { 7333 uint32_t j, val; 7334 7335 for (j = 0; j < 1000; j++) { 7336 val = (1UL << 31); 7337 REG_WR(sc, GRCBASE_MCP + 0x9c, val); 7338 val = REG_RD(sc, GRCBASE_MCP + 0x9c); 7339 if (val & (1L << 31)) 7340 break; 7341 7342 DELAY(5000); 7343 } 7344 7345 if (!(val & (1L << 31))) { 7346 BLOGE(sc, "Cannot acquire MCP access lock register\n"); 7347 return (-1); 7348 } 7349 7350 return (0); 7351 } 7352 7353 /* release split MCP access lock register */ 7354 static void bxe_release_alr(struct bxe_softc *sc) 7355 { 7356 REG_WR(sc, GRCBASE_MCP + 0x9c, 0); 7357 } 7358 7359 static void 7360 bxe_fan_failure(struct bxe_softc *sc) 7361 { 7362 int port = SC_PORT(sc); 7363 uint32_t ext_phy_config; 7364 7365 /* mark the failure */ 7366 ext_phy_config = 7367 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 7368 7369 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 7370 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 7371 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, 7372 ext_phy_config); 7373 7374 /* log the failure */ 7375 BLOGW(sc, "Fan Failure has caused the driver to shutdown " 7376 "the card to prevent permanent damage. " 7377 "Please contact OEM Support for assistance\n"); 7378 7379 /* XXX */ 7380 #if 1 7381 bxe_panic(sc, ("Schedule task to handle fan failure\n")); 7382 #else 7383 /* 7384 * Schedule device reset (unload) 7385 * This is due to some boards consuming sufficient power when driver is 7386 * up to overheat if fan fails. 7387 */ 7388 bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state); 7389 schedule_delayed_work(&sc->sp_rtnl_task, 0); 7390 #endif 7391 } 7392 7393 /* this function is called upon a link interrupt */ 7394 static void 7395 bxe_link_attn(struct bxe_softc *sc) 7396 { 7397 uint32_t pause_enabled = 0; 7398 struct host_port_stats *pstats; 7399 int cmng_fns; 7400 7401 /* Make sure that we are synced with the current statistics */ 7402 bxe_stats_handle(sc, STATS_EVENT_STOP); 7403 7404 elink_link_update(&sc->link_params, &sc->link_vars); 7405 7406 if (sc->link_vars.link_up) { 7407 7408 /* dropless flow control */ 7409 if (!CHIP_IS_E1(sc) && sc->dropless_fc) { 7410 pause_enabled = 0; 7411 7412 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 7413 pause_enabled = 1; 7414 } 7415 7416 REG_WR(sc, 7417 (BAR_USTRORM_INTMEM + 7418 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), 7419 pause_enabled); 7420 } 7421 7422 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { 7423 pstats = BXE_SP(sc, port_stats); 7424 /* reset old mac stats */ 7425 memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx)); 7426 } 7427 7428 if (sc->state == BXE_STATE_OPEN) { 7429 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 7430 } 7431 } 7432 7433 if (sc->link_vars.link_up && sc->link_vars.line_speed) { 7434 cmng_fns = bxe_get_cmng_fns_mode(sc); 7435 7436 if (cmng_fns != CMNG_FNS_NONE) { 7437 bxe_cmng_fns_init(sc, FALSE, cmng_fns); 7438 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 7439 } else { 7440 /* rate shaping and fairness are disabled */ 7441 BLOGD(sc, DBG_LOAD, "single function mode without fairness\n"); 7442 } 7443 } 7444 7445 bxe_link_report_locked(sc); 7446 7447 if (IS_MF(sc)) { 7448 ; // XXX bxe_link_sync_notify(sc); 7449 } 7450 } 7451 7452 static void 7453 bxe_attn_int_asserted(struct bxe_softc *sc, 7454 uint32_t asserted) 7455 { 7456 int port = SC_PORT(sc); 7457 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 7458 MISC_REG_AEU_MASK_ATTN_FUNC_0; 7459 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 7460 NIG_REG_MASK_INTERRUPT_PORT0; 7461 uint32_t aeu_mask; 7462 uint32_t nig_mask = 0; 7463 uint32_t reg_addr; 7464 uint32_t igu_acked; 7465 uint32_t cnt; 7466 7467 if (sc->attn_state & asserted) { 7468 BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted); 7469 } 7470 7471 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 7472 7473 aeu_mask = REG_RD(sc, aeu_addr); 7474 7475 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n", 7476 aeu_mask, asserted); 7477 7478 aeu_mask &= ~(asserted & 0x3ff); 7479 7480 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); 7481 7482 REG_WR(sc, aeu_addr, aeu_mask); 7483 7484 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 7485 7486 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); 7487 sc->attn_state |= asserted; 7488 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); 7489 7490 if (asserted & ATTN_HARD_WIRED_MASK) { 7491 if (asserted & ATTN_NIG_FOR_FUNC) { 7492 7493 bxe_acquire_phy_lock(sc); 7494 /* save nig interrupt mask */ 7495 nig_mask = REG_RD(sc, nig_int_mask_addr); 7496 7497 /* If nig_mask is not set, no need to call the update function */ 7498 if (nig_mask) { 7499 REG_WR(sc, nig_int_mask_addr, 0); 7500 7501 bxe_link_attn(sc); 7502 } 7503 7504 /* handle unicore attn? */ 7505 } 7506 7507 if (asserted & ATTN_SW_TIMER_4_FUNC) { 7508 BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n"); 7509 } 7510 7511 if (asserted & GPIO_2_FUNC) { 7512 BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n"); 7513 } 7514 7515 if (asserted & GPIO_3_FUNC) { 7516 BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n"); 7517 } 7518 7519 if (asserted & GPIO_4_FUNC) { 7520 BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n"); 7521 } 7522 7523 if (port == 0) { 7524 if (asserted & ATTN_GENERAL_ATTN_1) { 7525 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n"); 7526 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 7527 } 7528 if (asserted & ATTN_GENERAL_ATTN_2) { 7529 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n"); 7530 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 7531 } 7532 if (asserted & ATTN_GENERAL_ATTN_3) { 7533 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n"); 7534 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 7535 } 7536 } else { 7537 if (asserted & ATTN_GENERAL_ATTN_4) { 7538 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n"); 7539 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 7540 } 7541 if (asserted & ATTN_GENERAL_ATTN_5) { 7542 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n"); 7543 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 7544 } 7545 if (asserted & ATTN_GENERAL_ATTN_6) { 7546 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n"); 7547 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 7548 } 7549 } 7550 } /* hardwired */ 7551 7552 if (sc->devinfo.int_block == INT_BLOCK_HC) { 7553 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET); 7554 } else { 7555 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); 7556 } 7557 7558 BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", 7559 asserted, 7560 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 7561 REG_WR(sc, reg_addr, asserted); 7562 7563 /* now set back the mask */ 7564 if (asserted & ATTN_NIG_FOR_FUNC) { 7565 /* 7566 * Verify that IGU ack through BAR was written before restoring 7567 * NIG mask. This loop should exit after 2-3 iterations max. 7568 */ 7569 if (sc->devinfo.int_block != INT_BLOCK_HC) { 7570 cnt = 0; 7571 7572 do { 7573 igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); 7574 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && 7575 (++cnt < MAX_IGU_ATTN_ACK_TO)); 7576 7577 if (!igu_acked) { 7578 BLOGE(sc, "Failed to verify IGU ack on time\n"); 7579 } 7580 7581 mb(); 7582 } 7583 7584 REG_WR(sc, nig_int_mask_addr, nig_mask); 7585 7586 bxe_release_phy_lock(sc); 7587 } 7588 } 7589 7590 static void 7591 bxe_print_next_block(struct bxe_softc *sc, 7592 int idx, 7593 const char *blk) 7594 { 7595 BLOGI(sc, "%s%s", idx ? ", " : "", blk); 7596 } 7597 7598 static int 7599 bxe_check_blocks_with_parity0(struct bxe_softc *sc, 7600 uint32_t sig, 7601 int par_num, 7602 uint8_t print) 7603 { 7604 uint32_t cur_bit = 0; 7605 int i = 0; 7606 7607 for (i = 0; sig; i++) { 7608 cur_bit = ((uint32_t)0x1 << i); 7609 if (sig & cur_bit) { 7610 switch (cur_bit) { 7611 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 7612 if (print) 7613 bxe_print_next_block(sc, par_num++, "BRB"); 7614 break; 7615 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 7616 if (print) 7617 bxe_print_next_block(sc, par_num++, "PARSER"); 7618 break; 7619 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 7620 if (print) 7621 bxe_print_next_block(sc, par_num++, "TSDM"); 7622 break; 7623 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 7624 if (print) 7625 bxe_print_next_block(sc, par_num++, "SEARCHER"); 7626 break; 7627 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 7628 if (print) 7629 bxe_print_next_block(sc, par_num++, "TCM"); 7630 break; 7631 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 7632 if (print) 7633 bxe_print_next_block(sc, par_num++, "TSEMI"); 7634 break; 7635 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 7636 if (print) 7637 bxe_print_next_block(sc, par_num++, "XPB"); 7638 break; 7639 } 7640 7641 /* Clear the bit */ 7642 sig &= ~cur_bit; 7643 } 7644 } 7645 7646 return (par_num); 7647 } 7648 7649 static int 7650 bxe_check_blocks_with_parity1(struct bxe_softc *sc, 7651 uint32_t sig, 7652 int par_num, 7653 uint8_t *global, 7654 uint8_t print) 7655 { 7656 int i = 0; 7657 uint32_t cur_bit = 0; 7658 for (i = 0; sig; i++) { 7659 cur_bit = ((uint32_t)0x1 << i); 7660 if (sig & cur_bit) { 7661 switch (cur_bit) { 7662 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 7663 if (print) 7664 bxe_print_next_block(sc, par_num++, "PBF"); 7665 break; 7666 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 7667 if (print) 7668 bxe_print_next_block(sc, par_num++, "QM"); 7669 break; 7670 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 7671 if (print) 7672 bxe_print_next_block(sc, par_num++, "TM"); 7673 break; 7674 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 7675 if (print) 7676 bxe_print_next_block(sc, par_num++, "XSDM"); 7677 break; 7678 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 7679 if (print) 7680 bxe_print_next_block(sc, par_num++, "XCM"); 7681 break; 7682 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 7683 if (print) 7684 bxe_print_next_block(sc, par_num++, "XSEMI"); 7685 break; 7686 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 7687 if (print) 7688 bxe_print_next_block(sc, par_num++, "DOORBELLQ"); 7689 break; 7690 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 7691 if (print) 7692 bxe_print_next_block(sc, par_num++, "NIG"); 7693 break; 7694 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 7695 if (print) 7696 bxe_print_next_block(sc, par_num++, "VAUX PCI CORE"); 7697 *global = TRUE; 7698 break; 7699 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 7700 if (print) 7701 bxe_print_next_block(sc, par_num++, "DEBUG"); 7702 break; 7703 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 7704 if (print) 7705 bxe_print_next_block(sc, par_num++, "USDM"); 7706 break; 7707 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 7708 if (print) 7709 bxe_print_next_block(sc, par_num++, "UCM"); 7710 break; 7711 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 7712 if (print) 7713 bxe_print_next_block(sc, par_num++, "USEMI"); 7714 break; 7715 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 7716 if (print) 7717 bxe_print_next_block(sc, par_num++, "UPB"); 7718 break; 7719 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 7720 if (print) 7721 bxe_print_next_block(sc, par_num++, "CSDM"); 7722 break; 7723 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 7724 if (print) 7725 bxe_print_next_block(sc, par_num++, "CCM"); 7726 break; 7727 } 7728 7729 /* Clear the bit */ 7730 sig &= ~cur_bit; 7731 } 7732 } 7733 7734 return (par_num); 7735 } 7736 7737 static int 7738 bxe_check_blocks_with_parity2(struct bxe_softc *sc, 7739 uint32_t sig, 7740 int par_num, 7741 uint8_t print) 7742 { 7743 uint32_t cur_bit = 0; 7744 int i = 0; 7745 7746 for (i = 0; sig; i++) { 7747 cur_bit = ((uint32_t)0x1 << i); 7748 if (sig & cur_bit) { 7749 switch (cur_bit) { 7750 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 7751 if (print) 7752 bxe_print_next_block(sc, par_num++, "CSEMI"); 7753 break; 7754 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 7755 if (print) 7756 bxe_print_next_block(sc, par_num++, "PXP"); 7757 break; 7758 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 7759 if (print) 7760 bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT"); 7761 break; 7762 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 7763 if (print) 7764 bxe_print_next_block(sc, par_num++, "CFC"); 7765 break; 7766 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 7767 if (print) 7768 bxe_print_next_block(sc, par_num++, "CDU"); 7769 break; 7770 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 7771 if (print) 7772 bxe_print_next_block(sc, par_num++, "DMAE"); 7773 break; 7774 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 7775 if (print) 7776 bxe_print_next_block(sc, par_num++, "IGU"); 7777 break; 7778 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 7779 if (print) 7780 bxe_print_next_block(sc, par_num++, "MISC"); 7781 break; 7782 } 7783 7784 /* Clear the bit */ 7785 sig &= ~cur_bit; 7786 } 7787 } 7788 7789 return (par_num); 7790 } 7791 7792 static int 7793 bxe_check_blocks_with_parity3(struct bxe_softc *sc, 7794 uint32_t sig, 7795 int par_num, 7796 uint8_t *global, 7797 uint8_t print) 7798 { 7799 uint32_t cur_bit = 0; 7800 int i = 0; 7801 7802 for (i = 0; sig; i++) { 7803 cur_bit = ((uint32_t)0x1 << i); 7804 if (sig & cur_bit) { 7805 switch (cur_bit) { 7806 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 7807 if (print) 7808 bxe_print_next_block(sc, par_num++, "MCP ROM"); 7809 *global = TRUE; 7810 break; 7811 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 7812 if (print) 7813 bxe_print_next_block(sc, par_num++, 7814 "MCP UMP RX"); 7815 *global = TRUE; 7816 break; 7817 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 7818 if (print) 7819 bxe_print_next_block(sc, par_num++, 7820 "MCP UMP TX"); 7821 *global = TRUE; 7822 break; 7823 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 7824 if (print) 7825 bxe_print_next_block(sc, par_num++, 7826 "MCP SCPAD"); 7827 *global = TRUE; 7828 break; 7829 } 7830 7831 /* Clear the bit */ 7832 sig &= ~cur_bit; 7833 } 7834 } 7835 7836 return (par_num); 7837 } 7838 7839 static int 7840 bxe_check_blocks_with_parity4(struct bxe_softc *sc, 7841 uint32_t sig, 7842 int par_num, 7843 uint8_t print) 7844 { 7845 uint32_t cur_bit = 0; 7846 int i = 0; 7847 7848 for (i = 0; sig; i++) { 7849 cur_bit = ((uint32_t)0x1 << i); 7850 if (sig & cur_bit) { 7851 switch (cur_bit) { 7852 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 7853 if (print) 7854 bxe_print_next_block(sc, par_num++, "PGLUE_B"); 7855 break; 7856 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 7857 if (print) 7858 bxe_print_next_block(sc, par_num++, "ATC"); 7859 break; 7860 } 7861 7862 /* Clear the bit */ 7863 sig &= ~cur_bit; 7864 } 7865 } 7866 7867 return (par_num); 7868 } 7869 7870 static uint8_t 7871 bxe_parity_attn(struct bxe_softc *sc, 7872 uint8_t *global, 7873 uint8_t print, 7874 uint32_t *sig) 7875 { 7876 int par_num = 0; 7877 7878 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 7879 (sig[1] & HW_PRTY_ASSERT_SET_1) || 7880 (sig[2] & HW_PRTY_ASSERT_SET_2) || 7881 (sig[3] & HW_PRTY_ASSERT_SET_3) || 7882 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 7883 BLOGE(sc, "Parity error: HW block parity attention:\n" 7884 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 7885 (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0), 7886 (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1), 7887 (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2), 7888 (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3), 7889 (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4)); 7890 7891 if (print) 7892 BLOGI(sc, "Parity errors detected in blocks: "); 7893 7894 par_num = 7895 bxe_check_blocks_with_parity0(sc, sig[0] & 7896 HW_PRTY_ASSERT_SET_0, 7897 par_num, print); 7898 par_num = 7899 bxe_check_blocks_with_parity1(sc, sig[1] & 7900 HW_PRTY_ASSERT_SET_1, 7901 par_num, global, print); 7902 par_num = 7903 bxe_check_blocks_with_parity2(sc, sig[2] & 7904 HW_PRTY_ASSERT_SET_2, 7905 par_num, print); 7906 par_num = 7907 bxe_check_blocks_with_parity3(sc, sig[3] & 7908 HW_PRTY_ASSERT_SET_3, 7909 par_num, global, print); 7910 par_num = 7911 bxe_check_blocks_with_parity4(sc, sig[4] & 7912 HW_PRTY_ASSERT_SET_4, 7913 par_num, print); 7914 7915 if (print) 7916 BLOGI(sc, "\n"); 7917 7918 return (TRUE); 7919 } 7920 7921 return (FALSE); 7922 } 7923 7924 static uint8_t 7925 bxe_chk_parity_attn(struct bxe_softc *sc, 7926 uint8_t *global, 7927 uint8_t print) 7928 { 7929 struct attn_route attn = { {0} }; 7930 int port = SC_PORT(sc); 7931 7932 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 7933 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 7934 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 7935 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 7936 7937 if (!CHIP_IS_E1x(sc)) 7938 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 7939 7940 return (bxe_parity_attn(sc, global, print, attn.sig)); 7941 } 7942 7943 static void 7944 bxe_attn_int_deasserted4(struct bxe_softc *sc, 7945 uint32_t attn) 7946 { 7947 uint32_t val; 7948 7949 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 7950 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 7951 BLOGE(sc, "PGLUE hw attention 0x%08x\n", val); 7952 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 7953 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); 7954 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 7955 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); 7956 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 7957 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); 7958 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 7959 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); 7960 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 7961 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); 7962 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 7963 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); 7964 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 7965 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); 7966 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 7967 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); 7968 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 7969 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); 7970 } 7971 7972 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 7973 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); 7974 BLOGE(sc, "ATC hw attention 0x%08x\n", val); 7975 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 7976 BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); 7977 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 7978 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); 7979 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 7980 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); 7981 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 7982 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); 7983 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 7984 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); 7985 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 7986 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); 7987 } 7988 7989 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 7990 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 7991 BLOGE(sc, "FATAL parity attention set4 0x%08x\n", 7992 (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 7993 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 7994 } 7995 } 7996 7997 static void 7998 bxe_e1h_disable(struct bxe_softc *sc) 7999 { 8000 int port = SC_PORT(sc); 8001 8002 bxe_tx_disable(sc); 8003 8004 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); 8005 } 8006 8007 static void 8008 bxe_e1h_enable(struct bxe_softc *sc) 8009 { 8010 int port = SC_PORT(sc); 8011 8012 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); 8013 8014 // XXX bxe_tx_enable(sc); 8015 } 8016 8017 /* 8018 * called due to MCP event (on pmf): 8019 * reread new bandwidth configuration 8020 * configure FW 8021 * notify others function about the change 8022 */ 8023 static void 8024 bxe_config_mf_bw(struct bxe_softc *sc) 8025 { 8026 if (sc->link_vars.link_up) { 8027 bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); 8028 // XXX bxe_link_sync_notify(sc); 8029 } 8030 8031 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 8032 } 8033 8034 static void 8035 bxe_set_mf_bw(struct bxe_softc *sc) 8036 { 8037 bxe_config_mf_bw(sc); 8038 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 8039 } 8040 8041 static void 8042 bxe_handle_eee_event(struct bxe_softc *sc) 8043 { 8044 BLOGD(sc, DBG_INTR, "EEE - LLDP event\n"); 8045 bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 8046 } 8047 8048 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 8049 8050 static void 8051 bxe_drv_info_ether_stat(struct bxe_softc *sc) 8052 { 8053 struct eth_stats_info *ether_stat = 8054 &sc->sp->drv_info_to_mcp.ether_stat; 8055 8056 strlcpy(ether_stat->version, BXE_DRIVER_VERSION, 8057 ETH_STAT_INFO_VERSION_LEN); 8058 8059 /* XXX (+ MAC_PAD) taken from other driver... verify this is right */ 8060 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, 8061 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 8062 ether_stat->mac_local + MAC_PAD, 8063 MAC_PAD, ETH_ALEN); 8064 8065 ether_stat->mtu_size = sc->mtu; 8066 8067 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 8068 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { 8069 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; 8070 } 8071 8072 // XXX ether_stat->feature_flags |= ???; 8073 8074 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; 8075 8076 ether_stat->txq_size = sc->tx_ring_size; 8077 ether_stat->rxq_size = sc->rx_ring_size; 8078 } 8079 8080 static void 8081 bxe_handle_drv_info_req(struct bxe_softc *sc) 8082 { 8083 enum drv_info_opcode op_code; 8084 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); 8085 8086 /* if drv_info version supported by MFW doesn't match - send NACK */ 8087 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 8088 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 8089 return; 8090 } 8091 8092 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 8093 DRV_INFO_CONTROL_OP_CODE_SHIFT); 8094 8095 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); 8096 8097 switch (op_code) { 8098 case ETH_STATS_OPCODE: 8099 bxe_drv_info_ether_stat(sc); 8100 break; 8101 case FCOE_STATS_OPCODE: 8102 case ISCSI_STATS_OPCODE: 8103 default: 8104 /* if op code isn't supported - send NACK */ 8105 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 8106 return; 8107 } 8108 8109 /* 8110 * If we got drv_info attn from MFW then these fields are defined in 8111 * shmem2 for sure 8112 */ 8113 SHMEM2_WR(sc, drv_info_host_addr_lo, 8114 U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp))); 8115 SHMEM2_WR(sc, drv_info_host_addr_hi, 8116 U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp))); 8117 8118 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); 8119 } 8120 8121 static void 8122 bxe_dcc_event(struct bxe_softc *sc, 8123 uint32_t dcc_event) 8124 { 8125 BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event); 8126 8127 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 8128 /* 8129 * This is the only place besides the function initialization 8130 * where the sc->flags can change so it is done without any 8131 * locks 8132 */ 8133 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { 8134 BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n"); 8135 sc->flags |= BXE_MF_FUNC_DIS; 8136 bxe_e1h_disable(sc); 8137 } else { 8138 BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n"); 8139 sc->flags &= ~BXE_MF_FUNC_DIS; 8140 bxe_e1h_enable(sc); 8141 } 8142 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 8143 } 8144 8145 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 8146 bxe_config_mf_bw(sc); 8147 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 8148 } 8149 8150 /* Report results to MCP */ 8151 if (dcc_event) 8152 bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); 8153 else 8154 bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); 8155 } 8156 8157 static void 8158 bxe_pmf_update(struct bxe_softc *sc) 8159 { 8160 int port = SC_PORT(sc); 8161 uint32_t val; 8162 8163 sc->port.pmf = 1; 8164 BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf); 8165 8166 /* 8167 * We need the mb() to ensure the ordering between the writing to 8168 * sc->port.pmf here and reading it from the bxe_periodic_task(). 8169 */ 8170 mb(); 8171 8172 /* queue a periodic task */ 8173 // XXX schedule task... 8174 8175 // XXX bxe_dcbx_pmf_update(sc); 8176 8177 /* enable nig attention */ 8178 val = (0xff0f | (1 << (SC_VN(sc) + 4))); 8179 if (sc->devinfo.int_block == INT_BLOCK_HC) { 8180 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val); 8181 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val); 8182 } else if (!CHIP_IS_E1x(sc)) { 8183 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 8184 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 8185 } 8186 8187 bxe_stats_handle(sc, STATS_EVENT_PMF); 8188 } 8189 8190 static int 8191 bxe_mc_assert(struct bxe_softc *sc) 8192 { 8193 char last_idx; 8194 int i, rc = 0; 8195 uint32_t row0, row1, row2, row3; 8196 8197 /* XSTORM */ 8198 last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); 8199 if (last_idx) 8200 BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8201 8202 /* print the asserts */ 8203 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8204 8205 row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); 8206 row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4); 8207 row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8); 8208 row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12); 8209 8210 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8211 BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8212 i, row3, row2, row1, row0); 8213 rc++; 8214 } else { 8215 break; 8216 } 8217 } 8218 8219 /* TSTORM */ 8220 last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); 8221 if (last_idx) { 8222 BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8223 } 8224 8225 /* print the asserts */ 8226 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8227 8228 row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); 8229 row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4); 8230 row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8); 8231 row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12); 8232 8233 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8234 BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8235 i, row3, row2, row1, row0); 8236 rc++; 8237 } else { 8238 break; 8239 } 8240 } 8241 8242 /* CSTORM */ 8243 last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); 8244 if (last_idx) { 8245 BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8246 } 8247 8248 /* print the asserts */ 8249 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8250 8251 row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); 8252 row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4); 8253 row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8); 8254 row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12); 8255 8256 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8257 BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8258 i, row3, row2, row1, row0); 8259 rc++; 8260 } else { 8261 break; 8262 } 8263 } 8264 8265 /* USTORM */ 8266 last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); 8267 if (last_idx) { 8268 BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8269 } 8270 8271 /* print the asserts */ 8272 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8273 8274 row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); 8275 row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4); 8276 row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8); 8277 row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12); 8278 8279 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8280 BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8281 i, row3, row2, row1, row0); 8282 rc++; 8283 } else { 8284 break; 8285 } 8286 } 8287 8288 return (rc); 8289 } 8290 8291 static void 8292 bxe_attn_int_deasserted3(struct bxe_softc *sc, 8293 uint32_t attn) 8294 { 8295 int func = SC_FUNC(sc); 8296 uint32_t val; 8297 8298 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 8299 8300 if (attn & BXE_PMF_LINK_ASSERT(sc)) { 8301 8302 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 8303 bxe_read_mf_cfg(sc); 8304 sc->devinfo.mf_info.mf_config[SC_VN(sc)] = 8305 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 8306 val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); 8307 8308 if (val & DRV_STATUS_DCC_EVENT_MASK) 8309 bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK)); 8310 8311 if (val & DRV_STATUS_SET_MF_BW) 8312 bxe_set_mf_bw(sc); 8313 8314 if (val & DRV_STATUS_DRV_INFO_REQ) 8315 bxe_handle_drv_info_req(sc); 8316 8317 #if 0 8318 if (val & DRV_STATUS_VF_DISABLED) 8319 bxe_vf_handle_flr_event(sc); 8320 #endif 8321 8322 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) 8323 bxe_pmf_update(sc); 8324 8325 #if 0 8326 if (sc->port.pmf && 8327 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && 8328 (sc->dcbx_enabled > 0)) 8329 /* start dcbx state machine */ 8330 bxe_dcbx_set_params(sc, BXE_DCBX_STATE_NEG_RECEIVED); 8331 #endif 8332 8333 #if 0 8334 if (val & DRV_STATUS_AFEX_EVENT_MASK) 8335 bxe_handle_afex_cmd(sc, val & DRV_STATUS_AFEX_EVENT_MASK); 8336 #endif 8337 8338 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 8339 bxe_handle_eee_event(sc); 8340 8341 if (sc->link_vars.periodic_flags & 8342 ELINK_PERIODIC_FLAGS_LINK_EVENT) { 8343 /* sync with link */ 8344 bxe_acquire_phy_lock(sc); 8345 sc->link_vars.periodic_flags &= 8346 ~ELINK_PERIODIC_FLAGS_LINK_EVENT; 8347 bxe_release_phy_lock(sc); 8348 if (IS_MF(sc)) 8349 ; // XXX bxe_link_sync_notify(sc); 8350 bxe_link_report(sc); 8351 } 8352 8353 /* 8354 * Always call it here: bxe_link_report() will 8355 * prevent the link indication duplication. 8356 */ 8357 bxe_link_status_update(sc); 8358 8359 } else if (attn & BXE_MC_ASSERT_BITS) { 8360 8361 BLOGE(sc, "MC assert!\n"); 8362 bxe_mc_assert(sc); 8363 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); 8364 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); 8365 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); 8366 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); 8367 bxe_panic(sc, ("MC assert!\n")); 8368 8369 } else if (attn & BXE_MCP_ASSERT) { 8370 8371 BLOGE(sc, "MCP assert!\n"); 8372 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); 8373 // XXX bxe_fw_dump(sc); 8374 8375 } else { 8376 BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn); 8377 } 8378 } 8379 8380 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 8381 BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn); 8382 if (attn & BXE_GRC_TIMEOUT) { 8383 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); 8384 BLOGE(sc, "GRC time-out 0x%08x\n", val); 8385 } 8386 if (attn & BXE_GRC_RSV) { 8387 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN); 8388 BLOGE(sc, "GRC reserved 0x%08x\n", val); 8389 } 8390 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 8391 } 8392 } 8393 8394 static void 8395 bxe_attn_int_deasserted2(struct bxe_softc *sc, 8396 uint32_t attn) 8397 { 8398 int port = SC_PORT(sc); 8399 int reg_offset; 8400 uint32_t val0, mask0, val1, mask1; 8401 uint32_t val; 8402 8403 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 8404 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); 8405 BLOGE(sc, "CFC hw attention 0x%08x\n", val); 8406 /* CFC error attention */ 8407 if (val & 0x2) { 8408 BLOGE(sc, "FATAL error from CFC\n"); 8409 } 8410 } 8411 8412 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 8413 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); 8414 BLOGE(sc, "PXP hw attention-0 0x%08x\n", val); 8415 /* RQ_USDMDP_FIFO_OVERFLOW */ 8416 if (val & 0x18000) { 8417 BLOGE(sc, "FATAL error from PXP\n"); 8418 } 8419 8420 if (!CHIP_IS_E1x(sc)) { 8421 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); 8422 BLOGE(sc, "PXP hw attention-1 0x%08x\n", val); 8423 } 8424 } 8425 8426 #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR 8427 #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT 8428 8429 if (attn & AEU_PXP2_HW_INT_BIT) { 8430 /* CQ47854 workaround do not panic on 8431 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 8432 */ 8433 if (!CHIP_IS_E1x(sc)) { 8434 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); 8435 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); 8436 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); 8437 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); 8438 /* 8439 * If the olny PXP2_EOP_ERROR_BIT is set in 8440 * STS0 and STS1 - clear it 8441 * 8442 * probably we lose additional attentions between 8443 * STS0 and STS_CLR0, in this case user will not 8444 * be notified about them 8445 */ 8446 if (val0 & mask0 & PXP2_EOP_ERROR_BIT && 8447 !(val1 & mask1)) 8448 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 8449 8450 /* print the register, since no one can restore it */ 8451 BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0); 8452 8453 /* 8454 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 8455 * then notify 8456 */ 8457 if (val0 & PXP2_EOP_ERROR_BIT) { 8458 BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n"); 8459 8460 /* 8461 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is 8462 * set then clear attention from PXP2 block without panic 8463 */ 8464 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && 8465 ((val1 & mask1) == 0)) 8466 attn &= ~AEU_PXP2_HW_INT_BIT; 8467 } 8468 } 8469 } 8470 8471 if (attn & HW_INTERRUT_ASSERT_SET_2) { 8472 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 8473 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 8474 8475 val = REG_RD(sc, reg_offset); 8476 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 8477 REG_WR(sc, reg_offset, val); 8478 8479 BLOGE(sc, "FATAL HW block attention set2 0x%x\n", 8480 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2)); 8481 bxe_panic(sc, ("HW block attention set2\n")); 8482 } 8483 } 8484 8485 static void 8486 bxe_attn_int_deasserted1(struct bxe_softc *sc, 8487 uint32_t attn) 8488 { 8489 int port = SC_PORT(sc); 8490 int reg_offset; 8491 uint32_t val; 8492 8493 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 8494 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); 8495 BLOGE(sc, "DB hw attention 0x%08x\n", val); 8496 /* DORQ discard attention */ 8497 if (val & 0x2) { 8498 BLOGE(sc, "FATAL error from DORQ\n"); 8499 } 8500 } 8501 8502 if (attn & HW_INTERRUT_ASSERT_SET_1) { 8503 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 8504 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 8505 8506 val = REG_RD(sc, reg_offset); 8507 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 8508 REG_WR(sc, reg_offset, val); 8509 8510 BLOGE(sc, "FATAL HW block attention set1 0x%08x\n", 8511 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1)); 8512 bxe_panic(sc, ("HW block attention set1\n")); 8513 } 8514 } 8515 8516 static void 8517 bxe_attn_int_deasserted0(struct bxe_softc *sc, 8518 uint32_t attn) 8519 { 8520 int port = SC_PORT(sc); 8521 int reg_offset; 8522 uint32_t val; 8523 8524 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 8525 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 8526 8527 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 8528 val = REG_RD(sc, reg_offset); 8529 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 8530 REG_WR(sc, reg_offset, val); 8531 8532 BLOGW(sc, "SPIO5 hw attention\n"); 8533 8534 /* Fan failure attention */ 8535 elink_hw_reset_phy(&sc->link_params); 8536 bxe_fan_failure(sc); 8537 } 8538 8539 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { 8540 bxe_acquire_phy_lock(sc); 8541 elink_handle_module_detect_int(&sc->link_params); 8542 bxe_release_phy_lock(sc); 8543 } 8544 8545 if (attn & HW_INTERRUT_ASSERT_SET_0) { 8546 val = REG_RD(sc, reg_offset); 8547 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 8548 REG_WR(sc, reg_offset, val); 8549 8550 bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n", 8551 (attn & HW_INTERRUT_ASSERT_SET_0))); 8552 } 8553 } 8554 8555 static void 8556 bxe_attn_int_deasserted(struct bxe_softc *sc, 8557 uint32_t deasserted) 8558 { 8559 struct attn_route attn; 8560 struct attn_route *group_mask; 8561 int port = SC_PORT(sc); 8562 int index; 8563 uint32_t reg_addr; 8564 uint32_t val; 8565 uint32_t aeu_mask; 8566 uint8_t global = FALSE; 8567 8568 /* 8569 * Need to take HW lock because MCP or other port might also 8570 * try to handle this event. 8571 */ 8572 bxe_acquire_alr(sc); 8573 8574 if (bxe_chk_parity_attn(sc, &global, TRUE)) { 8575 /* XXX 8576 * In case of parity errors don't handle attentions so that 8577 * other function would "see" parity errors. 8578 */ 8579 sc->recovery_state = BXE_RECOVERY_INIT; 8580 // XXX schedule a recovery task... 8581 /* disable HW interrupts */ 8582 bxe_int_disable(sc); 8583 bxe_release_alr(sc); 8584 return; 8585 } 8586 8587 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 8588 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 8589 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 8590 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 8591 if (!CHIP_IS_E1x(sc)) { 8592 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 8593 } else { 8594 attn.sig[4] = 0; 8595 } 8596 8597 BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 8598 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); 8599 8600 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 8601 if (deasserted & (1 << index)) { 8602 group_mask = &sc->attn_group[index]; 8603 8604 BLOGD(sc, DBG_INTR, 8605 "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index, 8606 group_mask->sig[0], group_mask->sig[1], 8607 group_mask->sig[2], group_mask->sig[3], 8608 group_mask->sig[4]); 8609 8610 bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]); 8611 bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]); 8612 bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]); 8613 bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]); 8614 bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]); 8615 } 8616 } 8617 8618 bxe_release_alr(sc); 8619 8620 if (sc->devinfo.int_block == INT_BLOCK_HC) { 8621 reg_addr = (HC_REG_COMMAND_REG + port*32 + 8622 COMMAND_REG_ATTN_BITS_CLR); 8623 } else { 8624 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); 8625 } 8626 8627 val = ~deasserted; 8628 BLOGD(sc, DBG_INTR, 8629 "about to mask 0x%08x at %s addr 0x%08x\n", val, 8630 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 8631 REG_WR(sc, reg_addr, val); 8632 8633 if (~sc->attn_state & deasserted) { 8634 BLOGE(sc, "IGU error\n"); 8635 } 8636 8637 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 8638 MISC_REG_AEU_MASK_ATTN_FUNC_0; 8639 8640 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 8641 8642 aeu_mask = REG_RD(sc, reg_addr); 8643 8644 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n", 8645 aeu_mask, deasserted); 8646 aeu_mask |= (deasserted & 0x3ff); 8647 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); 8648 8649 REG_WR(sc, reg_addr, aeu_mask); 8650 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 8651 8652 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); 8653 sc->attn_state &= ~deasserted; 8654 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); 8655 } 8656 8657 static void 8658 bxe_attn_int(struct bxe_softc *sc) 8659 { 8660 /* read local copy of bits */ 8661 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); 8662 uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack); 8663 uint32_t attn_state = sc->attn_state; 8664 8665 /* look for changed bits */ 8666 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; 8667 uint32_t deasserted = ~attn_bits & attn_ack & attn_state; 8668 8669 BLOGD(sc, DBG_INTR, 8670 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n", 8671 attn_bits, attn_ack, asserted, deasserted); 8672 8673 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { 8674 BLOGE(sc, "BAD attention state\n"); 8675 } 8676 8677 /* handle bits that were raised */ 8678 if (asserted) { 8679 bxe_attn_int_asserted(sc, asserted); 8680 } 8681 8682 if (deasserted) { 8683 bxe_attn_int_deasserted(sc, deasserted); 8684 } 8685 } 8686 8687 static uint16_t 8688 bxe_update_dsb_idx(struct bxe_softc *sc) 8689 { 8690 struct host_sp_status_block *def_sb = sc->def_sb; 8691 uint16_t rc = 0; 8692 8693 mb(); /* status block is written to by the chip */ 8694 8695 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 8696 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; 8697 rc |= BXE_DEF_SB_ATT_IDX; 8698 } 8699 8700 if (sc->def_idx != def_sb->sp_sb.running_index) { 8701 sc->def_idx = def_sb->sp_sb.running_index; 8702 rc |= BXE_DEF_SB_IDX; 8703 } 8704 8705 mb(); 8706 8707 return (rc); 8708 } 8709 8710 static inline struct ecore_queue_sp_obj * 8711 bxe_cid_to_q_obj(struct bxe_softc *sc, 8712 uint32_t cid) 8713 { 8714 BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid); 8715 return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj); 8716 } 8717 8718 static void 8719 bxe_handle_mcast_eqe(struct bxe_softc *sc) 8720 { 8721 struct ecore_mcast_ramrod_params rparam; 8722 int rc; 8723 8724 memset(&rparam, 0, sizeof(rparam)); 8725 8726 rparam.mcast_obj = &sc->mcast_obj; 8727 8728 BXE_MCAST_LOCK(sc); 8729 8730 /* clear pending state for the last command */ 8731 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); 8732 8733 /* if there are pending mcast commands - send them */ 8734 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { 8735 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 8736 if (rc < 0) { 8737 BLOGD(sc, DBG_SP, 8738 "ERROR: Failed to send pending mcast commands (%d)\n", 8739 rc); 8740 } 8741 } 8742 8743 BXE_MCAST_UNLOCK(sc); 8744 } 8745 8746 static void 8747 bxe_handle_classification_eqe(struct bxe_softc *sc, 8748 union event_ring_elem *elem) 8749 { 8750 unsigned long ramrod_flags = 0; 8751 int rc = 0; 8752 uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; 8753 struct ecore_vlan_mac_obj *vlan_mac_obj; 8754 8755 /* always push next commands out, don't wait here */ 8756 bit_set(&ramrod_flags, RAMROD_CONT); 8757 8758 switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) { 8759 case ECORE_FILTER_MAC_PENDING: 8760 BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n"); 8761 vlan_mac_obj = &sc->sp_objs[cid].mac_obj; 8762 break; 8763 8764 case ECORE_FILTER_MCAST_PENDING: 8765 BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n"); 8766 /* 8767 * This is only relevant for 57710 where multicast MACs are 8768 * configured as unicast MACs using the same ramrod. 8769 */ 8770 bxe_handle_mcast_eqe(sc); 8771 return; 8772 8773 default: 8774 BLOGE(sc, "Unsupported classification command: %d\n", 8775 elem->message.data.eth_event.echo); 8776 return; 8777 } 8778 8779 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); 8780 8781 if (rc < 0) { 8782 BLOGE(sc, "Failed to schedule new commands (%d)\n", rc); 8783 } else if (rc > 0) { 8784 BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n"); 8785 } 8786 } 8787 8788 static void 8789 bxe_handle_rx_mode_eqe(struct bxe_softc *sc, 8790 union event_ring_elem *elem) 8791 { 8792 bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 8793 8794 /* send rx_mode command again if was requested */ 8795 if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, 8796 &sc->sp_state)) { 8797 bxe_set_storm_rx_mode(sc); 8798 } 8799 #if 0 8800 else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_START_SCHED, 8801 &sc->sp_state)) { 8802 bxe_set_iscsi_eth_rx_mode(sc, TRUE); 8803 } 8804 else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_STOP_SCHED, 8805 &sc->sp_state)) { 8806 bxe_set_iscsi_eth_rx_mode(sc, FALSE); 8807 } 8808 #endif 8809 } 8810 8811 static void 8812 bxe_update_eq_prod(struct bxe_softc *sc, 8813 uint16_t prod) 8814 { 8815 storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); 8816 wmb(); /* keep prod updates ordered */ 8817 } 8818 8819 static void 8820 bxe_eq_int(struct bxe_softc *sc) 8821 { 8822 uint16_t hw_cons, sw_cons, sw_prod; 8823 union event_ring_elem *elem; 8824 uint8_t echo; 8825 uint32_t cid; 8826 uint8_t opcode; 8827 int spqe_cnt = 0; 8828 struct ecore_queue_sp_obj *q_obj; 8829 struct ecore_func_sp_obj *f_obj = &sc->func_obj; 8830 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; 8831 8832 hw_cons = le16toh(*sc->eq_cons_sb); 8833 8834 /* 8835 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. 8836 * when we get to the next-page we need to adjust so the loop 8837 * condition below will be met. The next element is the size of a 8838 * regular element and hence incrementing by 1 8839 */ 8840 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { 8841 hw_cons++; 8842 } 8843 8844 /* 8845 * This function may never run in parallel with itself for a 8846 * specific sc and no need for a read memory barrier here. 8847 */ 8848 sw_cons = sc->eq_cons; 8849 sw_prod = sc->eq_prod; 8850 8851 BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n", 8852 hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left)); 8853 8854 for (; 8855 sw_cons != hw_cons; 8856 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 8857 8858 elem = &sc->eq[EQ_DESC(sw_cons)]; 8859 8860 #if 0 8861 int rc; 8862 rc = bxe_iov_eq_sp_event(sc, elem); 8863 if (!rc) { 8864 BLOGE(sc, "bxe_iov_eq_sp_event returned %d\n", rc); 8865 goto next_spqe; 8866 } 8867 #endif 8868 8869 /* elem CID originates from FW, actually LE */ 8870 cid = SW_CID(elem->message.data.cfc_del_event.cid); 8871 opcode = elem->message.opcode; 8872 8873 /* handle eq element */ 8874 switch (opcode) { 8875 #if 0 8876 case EVENT_RING_OPCODE_VF_PF_CHANNEL: 8877 BLOGD(sc, DBG_SP, "vf/pf channel element on eq\n"); 8878 bxe_vf_mbx(sc, &elem->message.data.vf_pf_event); 8879 continue; 8880 #endif 8881 8882 case EVENT_RING_OPCODE_STAT_QUERY: 8883 BLOGD(sc, DBG_SP, "got statistics completion event %d\n", 8884 sc->stats_comp++); 8885 /* nothing to do with stats comp */ 8886 goto next_spqe; 8887 8888 case EVENT_RING_OPCODE_CFC_DEL: 8889 /* handle according to cid range */ 8890 /* we may want to verify here that the sc state is HALTING */ 8891 BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid); 8892 q_obj = bxe_cid_to_q_obj(sc, cid); 8893 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { 8894 break; 8895 } 8896 goto next_spqe; 8897 8898 case EVENT_RING_OPCODE_STOP_TRAFFIC: 8899 BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n"); 8900 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { 8901 break; 8902 } 8903 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED); 8904 goto next_spqe; 8905 8906 case EVENT_RING_OPCODE_START_TRAFFIC: 8907 BLOGD(sc, DBG_SP, "got START TRAFFIC\n"); 8908 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) { 8909 break; 8910 } 8911 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED); 8912 goto next_spqe; 8913 8914 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 8915 echo = elem->message.data.function_update_event.echo; 8916 if (echo == SWITCH_UPDATE) { 8917 BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n"); 8918 if (f_obj->complete_cmd(sc, f_obj, 8919 ECORE_F_CMD_SWITCH_UPDATE)) { 8920 break; 8921 } 8922 } 8923 else { 8924 BLOGD(sc, DBG_SP, 8925 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 8926 #if 0 8927 f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_UPDATE); 8928 /* 8929 * We will perform the queues update from the sp_core_task as 8930 * all queue SP operations should run with CORE_LOCK. 8931 */ 8932 bxe_set_bit(BXE_SP_CORE_AFEX_F_UPDATE, &sc->sp_core_state); 8933 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); 8934 #endif 8935 } 8936 goto next_spqe; 8937 8938 #if 0 8939 case EVENT_RING_OPCODE_AFEX_VIF_LISTS: 8940 f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_VIFLISTS); 8941 bxe_after_afex_vif_lists(sc, elem); 8942 goto next_spqe; 8943 #endif 8944 8945 case EVENT_RING_OPCODE_FORWARD_SETUP: 8946 q_obj = &bxe_fwd_sp_obj(sc, q_obj); 8947 if (q_obj->complete_cmd(sc, q_obj, 8948 ECORE_Q_CMD_SETUP_TX_ONLY)) { 8949 break; 8950 } 8951 goto next_spqe; 8952 8953 case EVENT_RING_OPCODE_FUNCTION_START: 8954 BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n"); 8955 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { 8956 break; 8957 } 8958 goto next_spqe; 8959 8960 case EVENT_RING_OPCODE_FUNCTION_STOP: 8961 BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n"); 8962 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { 8963 break; 8964 } 8965 goto next_spqe; 8966 } 8967 8968 switch (opcode | sc->state) { 8969 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN): 8970 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT): 8971 cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; 8972 BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid); 8973 rss_raw->clear_pending(rss_raw); 8974 break; 8975 8976 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN): 8977 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG): 8978 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT): 8979 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN): 8980 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG): 8981 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8982 BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n"); 8983 bxe_handle_classification_eqe(sc, elem); 8984 break; 8985 8986 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN): 8987 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG): 8988 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8989 BLOGD(sc, DBG_SP, "got mcast ramrod\n"); 8990 bxe_handle_mcast_eqe(sc); 8991 break; 8992 8993 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN): 8994 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG): 8995 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8996 BLOGD(sc, DBG_SP, "got rx_mode ramrod\n"); 8997 bxe_handle_rx_mode_eqe(sc, elem); 8998 break; 8999 9000 default: 9001 /* unknown event log error and continue */ 9002 BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n", 9003 elem->message.opcode, sc->state); 9004 } 9005 9006 next_spqe: 9007 spqe_cnt++; 9008 } /* for */ 9009 9010 mb(); 9011 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); 9012 9013 sc->eq_cons = sw_cons; 9014 sc->eq_prod = sw_prod; 9015 9016 /* make sure that above mem writes were issued towards the memory */ 9017 wmb(); 9018 9019 /* update producer */ 9020 bxe_update_eq_prod(sc, sc->eq_prod); 9021 } 9022 9023 static void 9024 bxe_handle_sp_tq(void *context, 9025 int pending) 9026 { 9027 struct bxe_softc *sc = (struct bxe_softc *)context; 9028 uint16_t status; 9029 9030 BLOGD(sc, DBG_SP, "---> SP TASK <---\n"); 9031 9032 /* what work needs to be performed? */ 9033 status = bxe_update_dsb_idx(sc); 9034 9035 BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status); 9036 9037 /* HW attentions */ 9038 if (status & BXE_DEF_SB_ATT_IDX) { 9039 BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n"); 9040 bxe_attn_int(sc); 9041 status &= ~BXE_DEF_SB_ATT_IDX; 9042 } 9043 9044 /* SP events: STAT_QUERY and others */ 9045 if (status & BXE_DEF_SB_IDX) { 9046 /* handle EQ completions */ 9047 BLOGD(sc, DBG_SP, "---> EQ INTR <---\n"); 9048 bxe_eq_int(sc); 9049 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 9050 le16toh(sc->def_idx), IGU_INT_NOP, 1); 9051 status &= ~BXE_DEF_SB_IDX; 9052 } 9053 9054 /* if status is non zero then something went wrong */ 9055 if (__predict_false(status)) { 9056 BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status); 9057 } 9058 9059 /* ack status block only if something was actually handled */ 9060 bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 9061 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); 9062 9063 /* 9064 * Must be called after the EQ processing (since eq leads to sriov 9065 * ramrod completion flows). 9066 * This flow may have been scheduled by the arrival of a ramrod 9067 * completion, or by the sriov code rescheduling itself. 9068 */ 9069 // XXX bxe_iov_sp_task(sc); 9070 9071 #if 0 9072 /* AFEX - poll to check if VIFSET_ACK should be sent to MFW */ 9073 if (bxe_test_and_clear_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, 9074 &sc->sp_state)) { 9075 bxe_link_report(sc); 9076 bxe_fw_command(sc, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 9077 } 9078 #endif 9079 } 9080 9081 static void 9082 bxe_handle_fp_tq(void *context, 9083 int pending) 9084 { 9085 struct bxe_fastpath *fp = (struct bxe_fastpath *)context; 9086 struct bxe_softc *sc = fp->sc; 9087 uint8_t more_tx = FALSE; 9088 uint8_t more_rx = FALSE; 9089 9090 BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index); 9091 9092 /* XXX 9093 * IFF_DRV_RUNNING state can't be checked here since we process 9094 * slowpath events on a client queue during setup. Instead 9095 * we need to add a "process/continue" flag here that the driver 9096 * can use to tell the task here not to do anything. 9097 */ 9098 #if 0 9099 if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { 9100 return; 9101 } 9102 #endif 9103 9104 /* update the fastpath index */ 9105 bxe_update_fp_sb_idx(fp); 9106 9107 /* XXX add loop here if ever support multiple tx CoS */ 9108 /* fp->txdata[cos] */ 9109 if (bxe_has_tx_work(fp)) { 9110 BXE_FP_TX_LOCK(fp); 9111 more_tx = bxe_txeof(sc, fp); 9112 BXE_FP_TX_UNLOCK(fp); 9113 } 9114 9115 if (bxe_has_rx_work(fp)) { 9116 more_rx = bxe_rxeof(sc, fp); 9117 } 9118 9119 if (more_rx /*|| more_tx*/) { 9120 /* still more work to do */ 9121 taskqueue_enqueue_fast(fp->tq, &fp->tq_task); 9122 return; 9123 } 9124 9125 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 9126 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 9127 } 9128 9129 static void 9130 bxe_task_fp(struct bxe_fastpath *fp) 9131 { 9132 struct bxe_softc *sc = fp->sc; 9133 uint8_t more_tx = FALSE; 9134 uint8_t more_rx = FALSE; 9135 9136 BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index); 9137 9138 /* update the fastpath index */ 9139 bxe_update_fp_sb_idx(fp); 9140 9141 /* XXX add loop here if ever support multiple tx CoS */ 9142 /* fp->txdata[cos] */ 9143 if (bxe_has_tx_work(fp)) { 9144 BXE_FP_TX_LOCK(fp); 9145 more_tx = bxe_txeof(sc, fp); 9146 BXE_FP_TX_UNLOCK(fp); 9147 } 9148 9149 if (bxe_has_rx_work(fp)) { 9150 more_rx = bxe_rxeof(sc, fp); 9151 } 9152 9153 if (more_rx /*|| more_tx*/) { 9154 /* still more work to do, bail out if this ISR and process later */ 9155 taskqueue_enqueue_fast(fp->tq, &fp->tq_task); 9156 return; 9157 } 9158 9159 /* 9160 * Here we write the fastpath index taken before doing any tx or rx work. 9161 * It is very well possible other hw events occurred up to this point and 9162 * they were actually processed accordingly above. Since we're going to 9163 * write an older fastpath index, an interrupt is coming which we might 9164 * not do any work in. 9165 */ 9166 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 9167 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 9168 } 9169 9170 /* 9171 * Legacy interrupt entry point. 9172 * 9173 * Verifies that the controller generated the interrupt and 9174 * then calls a separate routine to handle the various 9175 * interrupt causes: link, RX, and TX. 9176 */ 9177 static void 9178 bxe_intr_legacy(void *xsc) 9179 { 9180 struct bxe_softc *sc = (struct bxe_softc *)xsc; 9181 struct bxe_fastpath *fp; 9182 uint16_t status, mask; 9183 int i; 9184 9185 BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n"); 9186 9187 #if 0 9188 /* Don't handle any interrupts if we're not ready. */ 9189 if (__predict_false(sc->intr_sem != 0)) { 9190 return; 9191 } 9192 #endif 9193 9194 /* 9195 * 0 for ustorm, 1 for cstorm 9196 * the bits returned from ack_int() are 0-15 9197 * bit 0 = attention status block 9198 * bit 1 = fast path status block 9199 * a mask of 0x2 or more = tx/rx event 9200 * a mask of 1 = slow path event 9201 */ 9202 9203 status = bxe_ack_int(sc); 9204 9205 /* the interrupt is not for us */ 9206 if (__predict_false(status == 0)) { 9207 BLOGD(sc, DBG_INTR, "Not our interrupt!\n"); 9208 return; 9209 } 9210 9211 BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status); 9212 9213 FOR_EACH_ETH_QUEUE(sc, i) { 9214 fp = &sc->fp[i]; 9215 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); 9216 if (status & mask) { 9217 /* acknowledge and disable further fastpath interrupts */ 9218 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9219 bxe_task_fp(fp); 9220 status &= ~mask; 9221 } 9222 } 9223 9224 #if 0 9225 if (CNIC_SUPPORT(sc)) { 9226 mask = 0x2; 9227 if (status & (mask | 0x1)) { 9228 ... 9229 status &= ~mask; 9230 } 9231 } 9232 #endif 9233 9234 if (__predict_false(status & 0x1)) { 9235 /* acknowledge and disable further slowpath interrupts */ 9236 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9237 9238 /* schedule slowpath handler */ 9239 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task); 9240 9241 status &= ~0x1; 9242 } 9243 9244 if (__predict_false(status)) { 9245 BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status); 9246 } 9247 } 9248 9249 /* slowpath interrupt entry point */ 9250 static void 9251 bxe_intr_sp(void *xsc) 9252 { 9253 struct bxe_softc *sc = (struct bxe_softc *)xsc; 9254 9255 BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n"); 9256 9257 /* acknowledge and disable further slowpath interrupts */ 9258 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9259 9260 /* schedule slowpath handler */ 9261 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task); 9262 } 9263 9264 /* fastpath interrupt entry point */ 9265 static void 9266 bxe_intr_fp(void *xfp) 9267 { 9268 struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp; 9269 struct bxe_softc *sc = fp->sc; 9270 9271 BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index); 9272 9273 BLOGD(sc, DBG_INTR, 9274 "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n", 9275 curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id); 9276 9277 #if 0 9278 /* Don't handle any interrupts if we're not ready. */ 9279 if (__predict_false(sc->intr_sem != 0)) { 9280 return; 9281 } 9282 #endif 9283 9284 /* acknowledge and disable further fastpath interrupts */ 9285 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9286 9287 bxe_task_fp(fp); 9288 } 9289 9290 /* Release all interrupts allocated by the driver. */ 9291 static void 9292 bxe_interrupt_free(struct bxe_softc *sc) 9293 { 9294 int i; 9295 9296 switch (sc->interrupt_mode) { 9297 case INTR_MODE_INTX: 9298 BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n"); 9299 if (sc->intr[0].resource != NULL) { 9300 bus_release_resource(sc->dev, 9301 SYS_RES_IRQ, 9302 sc->intr[0].rid, 9303 sc->intr[0].resource); 9304 } 9305 break; 9306 case INTR_MODE_MSI: 9307 for (i = 0; i < sc->intr_count; i++) { 9308 BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i); 9309 if (sc->intr[i].resource && sc->intr[i].rid) { 9310 bus_release_resource(sc->dev, 9311 SYS_RES_IRQ, 9312 sc->intr[i].rid, 9313 sc->intr[i].resource); 9314 } 9315 } 9316 pci_release_msi(sc->dev); 9317 break; 9318 case INTR_MODE_MSIX: 9319 for (i = 0; i < sc->intr_count; i++) { 9320 BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i); 9321 if (sc->intr[i].resource && sc->intr[i].rid) { 9322 bus_release_resource(sc->dev, 9323 SYS_RES_IRQ, 9324 sc->intr[i].rid, 9325 sc->intr[i].resource); 9326 } 9327 } 9328 pci_release_msi(sc->dev); 9329 break; 9330 default: 9331 /* nothing to do as initial allocation failed */ 9332 break; 9333 } 9334 } 9335 9336 /* 9337 * This function determines and allocates the appropriate 9338 * interrupt based on system capabilites and user request. 9339 * 9340 * The user may force a particular interrupt mode, specify 9341 * the number of receive queues, specify the method for 9342 * distribuitng received frames to receive queues, or use 9343 * the default settings which will automatically select the 9344 * best supported combination. In addition, the OS may or 9345 * may not support certain combinations of these settings. 9346 * This routine attempts to reconcile the settings requested 9347 * by the user with the capabilites available from the system 9348 * to select the optimal combination of features. 9349 * 9350 * Returns: 9351 * 0 = Success, !0 = Failure. 9352 */ 9353 static int 9354 bxe_interrupt_alloc(struct bxe_softc *sc) 9355 { 9356 int msix_count = 0; 9357 int msi_count = 0; 9358 int num_requested = 0; 9359 int num_allocated = 0; 9360 int rid, i, j; 9361 int rc; 9362 9363 /* get the number of available MSI/MSI-X interrupts from the OS */ 9364 if (sc->interrupt_mode > 0) { 9365 if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) { 9366 msix_count = pci_msix_count(sc->dev); 9367 } 9368 9369 if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) { 9370 msi_count = pci_msi_count(sc->dev); 9371 } 9372 9373 BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n", 9374 msi_count, msix_count); 9375 } 9376 9377 do { /* try allocating MSI-X interrupt resources (at least 2) */ 9378 if (sc->interrupt_mode != INTR_MODE_MSIX) { 9379 break; 9380 } 9381 9382 if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) || 9383 (msix_count < 2)) { 9384 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9385 break; 9386 } 9387 9388 /* ask for the necessary number of MSI-X vectors */ 9389 num_requested = min((sc->num_queues + 1), msix_count); 9390 9391 BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested); 9392 9393 num_allocated = num_requested; 9394 if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) { 9395 BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc); 9396 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9397 break; 9398 } 9399 9400 if (num_allocated < 2) { /* possible? */ 9401 BLOGE(sc, "MSI-X allocation less than 2!\n"); 9402 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9403 pci_release_msi(sc->dev); 9404 break; 9405 } 9406 9407 BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n", 9408 num_requested, num_allocated); 9409 9410 /* best effort so use the number of vectors allocated to us */ 9411 sc->intr_count = num_allocated; 9412 sc->num_queues = num_allocated - 1; 9413 9414 rid = 1; /* initial resource identifier */ 9415 9416 /* allocate the MSI-X vectors */ 9417 for (i = 0; i < num_allocated; i++) { 9418 sc->intr[i].rid = (rid + i); 9419 9420 if ((sc->intr[i].resource = 9421 bus_alloc_resource_any(sc->dev, 9422 SYS_RES_IRQ, 9423 &sc->intr[i].rid, 9424 RF_ACTIVE)) == NULL) { 9425 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n", 9426 i, (rid + i)); 9427 9428 for (j = (i - 1); j >= 0; j--) { 9429 bus_release_resource(sc->dev, 9430 SYS_RES_IRQ, 9431 sc->intr[j].rid, 9432 sc->intr[j].resource); 9433 } 9434 9435 sc->intr_count = 0; 9436 sc->num_queues = 0; 9437 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9438 pci_release_msi(sc->dev); 9439 break; 9440 } 9441 9442 BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i)); 9443 } 9444 } while (0); 9445 9446 do { /* try allocating MSI vector resources (at least 2) */ 9447 if (sc->interrupt_mode != INTR_MODE_MSI) { 9448 break; 9449 } 9450 9451 if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) || 9452 (msi_count < 1)) { 9453 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9454 break; 9455 } 9456 9457 /* ask for a single MSI vector */ 9458 num_requested = 1; 9459 9460 BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested); 9461 9462 num_allocated = num_requested; 9463 if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) { 9464 BLOGE(sc, "MSI alloc failed (%d)!\n", rc); 9465 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9466 break; 9467 } 9468 9469 if (num_allocated != 1) { /* possible? */ 9470 BLOGE(sc, "MSI allocation is not 1!\n"); 9471 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9472 pci_release_msi(sc->dev); 9473 break; 9474 } 9475 9476 BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n", 9477 num_requested, num_allocated); 9478 9479 /* best effort so use the number of vectors allocated to us */ 9480 sc->intr_count = num_allocated; 9481 sc->num_queues = num_allocated; 9482 9483 rid = 1; /* initial resource identifier */ 9484 9485 sc->intr[0].rid = rid; 9486 9487 if ((sc->intr[0].resource = 9488 bus_alloc_resource_any(sc->dev, 9489 SYS_RES_IRQ, 9490 &sc->intr[0].rid, 9491 RF_ACTIVE)) == NULL) { 9492 BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid); 9493 sc->intr_count = 0; 9494 sc->num_queues = 0; 9495 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9496 pci_release_msi(sc->dev); 9497 break; 9498 } 9499 9500 BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid); 9501 } while (0); 9502 9503 do { /* try allocating INTx vector resources */ 9504 if (sc->interrupt_mode != INTR_MODE_INTX) { 9505 break; 9506 } 9507 9508 BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n"); 9509 9510 /* only one vector for INTx */ 9511 sc->intr_count = 1; 9512 sc->num_queues = 1; 9513 9514 rid = 0; /* initial resource identifier */ 9515 9516 sc->intr[0].rid = rid; 9517 9518 if ((sc->intr[0].resource = 9519 bus_alloc_resource_any(sc->dev, 9520 SYS_RES_IRQ, 9521 &sc->intr[0].rid, 9522 (RF_ACTIVE | RF_SHAREABLE))) == NULL) { 9523 BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid); 9524 sc->intr_count = 0; 9525 sc->num_queues = 0; 9526 sc->interrupt_mode = -1; /* Failed! */ 9527 break; 9528 } 9529 9530 BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid); 9531 } while (0); 9532 9533 if (sc->interrupt_mode == -1) { 9534 BLOGE(sc, "Interrupt Allocation: FAILED!!!\n"); 9535 rc = 1; 9536 } else { 9537 BLOGD(sc, DBG_LOAD, 9538 "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n", 9539 sc->interrupt_mode, sc->num_queues); 9540 rc = 0; 9541 } 9542 9543 return (rc); 9544 } 9545 9546 static void 9547 bxe_interrupt_detach(struct bxe_softc *sc) 9548 { 9549 struct bxe_fastpath *fp; 9550 int i; 9551 9552 /* release interrupt resources */ 9553 for (i = 0; i < sc->intr_count; i++) { 9554 if (sc->intr[i].resource && sc->intr[i].tag) { 9555 BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i); 9556 bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag); 9557 } 9558 } 9559 9560 for (i = 0; i < sc->num_queues; i++) { 9561 fp = &sc->fp[i]; 9562 if (fp->tq) { 9563 taskqueue_drain(fp->tq, &fp->tq_task); 9564 taskqueue_free(fp->tq); 9565 fp->tq = NULL; 9566 } 9567 } 9568 9569 9570 if (sc->sp_tq) { 9571 taskqueue_drain(sc->sp_tq, &sc->sp_tq_task); 9572 taskqueue_free(sc->sp_tq); 9573 sc->sp_tq = NULL; 9574 } 9575 } 9576 9577 /* 9578 * Enables interrupts and attach to the ISR. 9579 * 9580 * When using multiple MSI/MSI-X vectors the first vector 9581 * is used for slowpath operations while all remaining 9582 * vectors are used for fastpath operations. If only a 9583 * single MSI/MSI-X vector is used (SINGLE_ISR) then the 9584 * ISR must look for both slowpath and fastpath completions. 9585 */ 9586 static int 9587 bxe_interrupt_attach(struct bxe_softc *sc) 9588 { 9589 struct bxe_fastpath *fp; 9590 int rc = 0; 9591 int i; 9592 9593 snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name), 9594 "bxe%d_sp_tq", sc->unit); 9595 TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc); 9596 sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT, 9597 taskqueue_thread_enqueue, 9598 &sc->sp_tq); 9599 taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */ 9600 "%s", sc->sp_tq_name); 9601 9602 9603 for (i = 0; i < sc->num_queues; i++) { 9604 fp = &sc->fp[i]; 9605 snprintf(fp->tq_name, sizeof(fp->tq_name), 9606 "bxe%d_fp%d_tq", sc->unit, i); 9607 TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); 9608 fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT, 9609 taskqueue_thread_enqueue, 9610 &fp->tq); 9611 taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */ 9612 "%s", fp->tq_name); 9613 } 9614 9615 /* setup interrupt handlers */ 9616 if (sc->interrupt_mode == INTR_MODE_MSIX) { 9617 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n"); 9618 9619 /* 9620 * Setup the interrupt handler. Note that we pass the driver instance 9621 * to the interrupt handler for the slowpath. 9622 */ 9623 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9624 (INTR_TYPE_NET | INTR_MPSAFE), 9625 NULL, bxe_intr_sp, sc, 9626 &sc->intr[0].tag)) != 0) { 9627 BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc); 9628 goto bxe_interrupt_attach_exit; 9629 } 9630 9631 bus_describe_intr(sc->dev, sc->intr[0].resource, 9632 sc->intr[0].tag, "sp"); 9633 9634 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */ 9635 9636 /* initialize the fastpath vectors (note the first was used for sp) */ 9637 for (i = 0; i < sc->num_queues; i++) { 9638 fp = &sc->fp[i]; 9639 BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1)); 9640 9641 /* 9642 * Setup the interrupt handler. Note that we pass the 9643 * fastpath context to the interrupt handler in this 9644 * case. 9645 */ 9646 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource, 9647 (INTR_TYPE_NET | INTR_MPSAFE), 9648 NULL, bxe_intr_fp, fp, 9649 &sc->intr[i + 1].tag)) != 0) { 9650 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n", 9651 (i + 1), rc); 9652 goto bxe_interrupt_attach_exit; 9653 } 9654 9655 bus_describe_intr(sc->dev, sc->intr[i + 1].resource, 9656 sc->intr[i + 1].tag, "fp%02d", i); 9657 9658 /* bind the fastpath instance to a cpu */ 9659 if (sc->num_queues > 1) { 9660 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i); 9661 } 9662 9663 fp->state = BXE_FP_STATE_IRQ; 9664 } 9665 } else if (sc->interrupt_mode == INTR_MODE_MSI) { 9666 BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n"); 9667 9668 /* 9669 * Setup the interrupt handler. Note that we pass the 9670 * driver instance to the interrupt handler which 9671 * will handle both the slowpath and fastpath. 9672 */ 9673 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9674 (INTR_TYPE_NET | INTR_MPSAFE), 9675 NULL, bxe_intr_legacy, sc, 9676 &sc->intr[0].tag)) != 0) { 9677 BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc); 9678 goto bxe_interrupt_attach_exit; 9679 } 9680 9681 } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */ 9682 BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n"); 9683 9684 /* 9685 * Setup the interrupt handler. Note that we pass the 9686 * driver instance to the interrupt handler which 9687 * will handle both the slowpath and fastpath. 9688 */ 9689 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9690 (INTR_TYPE_NET | INTR_MPSAFE), 9691 NULL, bxe_intr_legacy, sc, 9692 &sc->intr[0].tag)) != 0) { 9693 BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc); 9694 goto bxe_interrupt_attach_exit; 9695 } 9696 } 9697 9698 bxe_interrupt_attach_exit: 9699 9700 return (rc); 9701 } 9702 9703 static int bxe_init_hw_common_chip(struct bxe_softc *sc); 9704 static int bxe_init_hw_common(struct bxe_softc *sc); 9705 static int bxe_init_hw_port(struct bxe_softc *sc); 9706 static int bxe_init_hw_func(struct bxe_softc *sc); 9707 static void bxe_reset_common(struct bxe_softc *sc); 9708 static void bxe_reset_port(struct bxe_softc *sc); 9709 static void bxe_reset_func(struct bxe_softc *sc); 9710 static int bxe_gunzip_init(struct bxe_softc *sc); 9711 static void bxe_gunzip_end(struct bxe_softc *sc); 9712 static int bxe_init_firmware(struct bxe_softc *sc); 9713 static void bxe_release_firmware(struct bxe_softc *sc); 9714 9715 static struct 9716 ecore_func_sp_drv_ops bxe_func_sp_drv = { 9717 .init_hw_cmn_chip = bxe_init_hw_common_chip, 9718 .init_hw_cmn = bxe_init_hw_common, 9719 .init_hw_port = bxe_init_hw_port, 9720 .init_hw_func = bxe_init_hw_func, 9721 9722 .reset_hw_cmn = bxe_reset_common, 9723 .reset_hw_port = bxe_reset_port, 9724 .reset_hw_func = bxe_reset_func, 9725 9726 .gunzip_init = bxe_gunzip_init, 9727 .gunzip_end = bxe_gunzip_end, 9728 9729 .init_fw = bxe_init_firmware, 9730 .release_fw = bxe_release_firmware, 9731 }; 9732 9733 static void 9734 bxe_init_func_obj(struct bxe_softc *sc) 9735 { 9736 sc->dmae_ready = 0; 9737 9738 ecore_init_func_obj(sc, 9739 &sc->func_obj, 9740 BXE_SP(sc, func_rdata), 9741 BXE_SP_MAPPING(sc, func_rdata), 9742 BXE_SP(sc, func_afex_rdata), 9743 BXE_SP_MAPPING(sc, func_afex_rdata), 9744 &bxe_func_sp_drv); 9745 } 9746 9747 static int 9748 bxe_init_hw(struct bxe_softc *sc, 9749 uint32_t load_code) 9750 { 9751 struct ecore_func_state_params func_params = { NULL }; 9752 int rc; 9753 9754 /* prepare the parameters for function state transitions */ 9755 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); 9756 9757 func_params.f_obj = &sc->func_obj; 9758 func_params.cmd = ECORE_F_CMD_HW_INIT; 9759 9760 func_params.params.hw_init.load_phase = load_code; 9761 9762 /* 9763 * Via a plethora of function pointers, we will eventually reach 9764 * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func(). 9765 */ 9766 rc = ecore_func_state_change(sc, &func_params); 9767 9768 return (rc); 9769 } 9770 9771 static void 9772 bxe_fill(struct bxe_softc *sc, 9773 uint32_t addr, 9774 int fill, 9775 uint32_t len) 9776 { 9777 uint32_t i; 9778 9779 if (!(len % 4) && !(addr % 4)) { 9780 for (i = 0; i < len; i += 4) { 9781 REG_WR(sc, (addr + i), fill); 9782 } 9783 } else { 9784 for (i = 0; i < len; i++) { 9785 REG_WR8(sc, (addr + i), fill); 9786 } 9787 } 9788 } 9789 9790 /* writes FP SP data to FW - data_size in dwords */ 9791 static void 9792 bxe_wr_fp_sb_data(struct bxe_softc *sc, 9793 int fw_sb_id, 9794 uint32_t *sb_data_p, 9795 uint32_t data_size) 9796 { 9797 int index; 9798 9799 for (index = 0; index < data_size; index++) { 9800 REG_WR(sc, 9801 (BAR_CSTRORM_INTMEM + 9802 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 9803 (sizeof(uint32_t) * index)), 9804 *(sb_data_p + index)); 9805 } 9806 } 9807 9808 static void 9809 bxe_zero_fp_sb(struct bxe_softc *sc, 9810 int fw_sb_id) 9811 { 9812 struct hc_status_block_data_e2 sb_data_e2; 9813 struct hc_status_block_data_e1x sb_data_e1x; 9814 uint32_t *sb_data_p; 9815 uint32_t data_size = 0; 9816 9817 if (!CHIP_IS_E1x(sc)) { 9818 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 9819 sb_data_e2.common.state = SB_DISABLED; 9820 sb_data_e2.common.p_func.vf_valid = FALSE; 9821 sb_data_p = (uint32_t *)&sb_data_e2; 9822 data_size = (sizeof(struct hc_status_block_data_e2) / 9823 sizeof(uint32_t)); 9824 } else { 9825 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); 9826 sb_data_e1x.common.state = SB_DISABLED; 9827 sb_data_e1x.common.p_func.vf_valid = FALSE; 9828 sb_data_p = (uint32_t *)&sb_data_e1x; 9829 data_size = (sizeof(struct hc_status_block_data_e1x) / 9830 sizeof(uint32_t)); 9831 } 9832 9833 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 9834 9835 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 9836 0, CSTORM_STATUS_BLOCK_SIZE); 9837 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), 9838 0, CSTORM_SYNC_BLOCK_SIZE); 9839 } 9840 9841 static void 9842 bxe_wr_sp_sb_data(struct bxe_softc *sc, 9843 struct hc_sp_status_block_data *sp_sb_data) 9844 { 9845 int i; 9846 9847 for (i = 0; 9848 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); 9849 i++) { 9850 REG_WR(sc, 9851 (BAR_CSTRORM_INTMEM + 9852 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + 9853 (i * sizeof(uint32_t))), 9854 *((uint32_t *)sp_sb_data + i)); 9855 } 9856 } 9857 9858 static void 9859 bxe_zero_sp_sb(struct bxe_softc *sc) 9860 { 9861 struct hc_sp_status_block_data sp_sb_data; 9862 9863 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 9864 9865 sp_sb_data.state = SB_DISABLED; 9866 sp_sb_data.p_func.vf_valid = FALSE; 9867 9868 bxe_wr_sp_sb_data(sc, &sp_sb_data); 9869 9870 bxe_fill(sc, 9871 (BAR_CSTRORM_INTMEM + 9872 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), 9873 0, CSTORM_SP_STATUS_BLOCK_SIZE); 9874 bxe_fill(sc, 9875 (BAR_CSTRORM_INTMEM + 9876 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), 9877 0, CSTORM_SP_SYNC_BLOCK_SIZE); 9878 } 9879 9880 static void 9881 bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, 9882 int igu_sb_id, 9883 int igu_seg_id) 9884 { 9885 hc_sm->igu_sb_id = igu_sb_id; 9886 hc_sm->igu_seg_id = igu_seg_id; 9887 hc_sm->timer_value = 0xFF; 9888 hc_sm->time_to_expire = 0xFFFFFFFF; 9889 } 9890 9891 static void 9892 bxe_map_sb_state_machines(struct hc_index_data *index_data) 9893 { 9894 /* zero out state machine indices */ 9895 9896 /* rx indices */ 9897 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 9898 9899 /* tx indices */ 9900 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 9901 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 9902 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 9903 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 9904 9905 /* map indices */ 9906 9907 /* rx indices */ 9908 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 9909 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9910 9911 /* tx indices */ 9912 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 9913 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9914 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 9915 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9916 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 9917 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9918 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 9919 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9920 } 9921 9922 static void 9923 bxe_init_sb(struct bxe_softc *sc, 9924 bus_addr_t busaddr, 9925 int vfid, 9926 uint8_t vf_valid, 9927 int fw_sb_id, 9928 int igu_sb_id) 9929 { 9930 struct hc_status_block_data_e2 sb_data_e2; 9931 struct hc_status_block_data_e1x sb_data_e1x; 9932 struct hc_status_block_sm *hc_sm_p; 9933 uint32_t *sb_data_p; 9934 int igu_seg_id; 9935 int data_size; 9936 9937 if (CHIP_INT_MODE_IS_BC(sc)) { 9938 igu_seg_id = HC_SEG_ACCESS_NORM; 9939 } else { 9940 igu_seg_id = IGU_SEG_ACCESS_NORM; 9941 } 9942 9943 bxe_zero_fp_sb(sc, fw_sb_id); 9944 9945 if (!CHIP_IS_E1x(sc)) { 9946 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 9947 sb_data_e2.common.state = SB_ENABLED; 9948 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); 9949 sb_data_e2.common.p_func.vf_id = vfid; 9950 sb_data_e2.common.p_func.vf_valid = vf_valid; 9951 sb_data_e2.common.p_func.vnic_id = SC_VN(sc); 9952 sb_data_e2.common.same_igu_sb_1b = TRUE; 9953 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); 9954 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); 9955 hc_sm_p = sb_data_e2.common.state_machine; 9956 sb_data_p = (uint32_t *)&sb_data_e2; 9957 data_size = (sizeof(struct hc_status_block_data_e2) / 9958 sizeof(uint32_t)); 9959 bxe_map_sb_state_machines(sb_data_e2.index_data); 9960 } else { 9961 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); 9962 sb_data_e1x.common.state = SB_ENABLED; 9963 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); 9964 sb_data_e1x.common.p_func.vf_id = 0xff; 9965 sb_data_e1x.common.p_func.vf_valid = FALSE; 9966 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); 9967 sb_data_e1x.common.same_igu_sb_1b = TRUE; 9968 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); 9969 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); 9970 hc_sm_p = sb_data_e1x.common.state_machine; 9971 sb_data_p = (uint32_t *)&sb_data_e1x; 9972 data_size = (sizeof(struct hc_status_block_data_e1x) / 9973 sizeof(uint32_t)); 9974 bxe_map_sb_state_machines(sb_data_e1x.index_data); 9975 } 9976 9977 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); 9978 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); 9979 9980 BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id); 9981 9982 /* write indices to HW - PCI guarantees endianity of regpairs */ 9983 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 9984 } 9985 9986 static inline uint8_t 9987 bxe_fp_qzone_id(struct bxe_fastpath *fp) 9988 { 9989 if (CHIP_IS_E1x(fp->sc)) { 9990 return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H); 9991 } else { 9992 return (fp->cl_id); 9993 } 9994 } 9995 9996 static inline uint32_t 9997 bxe_rx_ustorm_prods_offset(struct bxe_softc *sc, 9998 struct bxe_fastpath *fp) 9999 { 10000 uint32_t offset = BAR_USTRORM_INTMEM; 10001 10002 #if 0 10003 if (IS_VF(sc)) { 10004 return (PXP_VF_ADDR_USDM_QUEUES_START + 10005 (sc->acquire_resp.resc.hw_qid[fp->index] * 10006 sizeof(struct ustorm_queue_zone_data))); 10007 } else 10008 #endif 10009 if (!CHIP_IS_E1x(sc)) { 10010 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 10011 } else { 10012 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); 10013 } 10014 10015 return (offset); 10016 } 10017 10018 static void 10019 bxe_init_eth_fp(struct bxe_softc *sc, 10020 int idx) 10021 { 10022 struct bxe_fastpath *fp = &sc->fp[idx]; 10023 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; 10024 unsigned long q_type = 0; 10025 int cos; 10026 10027 fp->sc = sc; 10028 fp->index = idx; 10029 10030 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 10031 "bxe%d_fp%d_tx_lock", sc->unit, idx); 10032 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 10033 10034 snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name), 10035 "bxe%d_fp%d_rx_lock", sc->unit, idx); 10036 mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF); 10037 10038 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); 10039 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); 10040 10041 fp->cl_id = (CHIP_IS_E1x(sc)) ? 10042 (SC_L_ID(sc) + idx) : 10043 /* want client ID same as IGU SB ID for non-E1 */ 10044 fp->igu_sb_id; 10045 fp->cl_qzone_id = bxe_fp_qzone_id(fp); 10046 10047 /* setup sb indices */ 10048 if (!CHIP_IS_E1x(sc)) { 10049 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; 10050 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; 10051 } else { 10052 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; 10053 fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index; 10054 } 10055 10056 /* init shortcut */ 10057 fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp); 10058 10059 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; 10060 10061 /* 10062 * XXX If multiple CoS is ever supported then each fastpath structure 10063 * will need to maintain tx producer/consumer/dma/etc values *per* CoS. 10064 */ 10065 for (cos = 0; cos < sc->max_cos; cos++) { 10066 cids[cos] = idx; 10067 } 10068 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; 10069 10070 /* nothing more for a VF to do */ 10071 if (IS_VF(sc)) { 10072 return; 10073 } 10074 10075 bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE, 10076 fp->fw_sb_id, fp->igu_sb_id); 10077 10078 bxe_update_fp_sb_idx(fp); 10079 10080 /* Configure Queue State object */ 10081 bit_set(&q_type, ECORE_Q_TYPE_HAS_RX); 10082 bit_set(&q_type, ECORE_Q_TYPE_HAS_TX); 10083 10084 ecore_init_queue_obj(sc, 10085 &sc->sp_objs[idx].q_obj, 10086 fp->cl_id, 10087 cids, 10088 sc->max_cos, 10089 SC_FUNC(sc), 10090 BXE_SP(sc, q_rdata), 10091 BXE_SP_MAPPING(sc, q_rdata), 10092 q_type); 10093 10094 /* configure classification DBs */ 10095 ecore_init_mac_obj(sc, 10096 &sc->sp_objs[idx].mac_obj, 10097 fp->cl_id, 10098 idx, 10099 SC_FUNC(sc), 10100 BXE_SP(sc, mac_rdata), 10101 BXE_SP_MAPPING(sc, mac_rdata), 10102 ECORE_FILTER_MAC_PENDING, 10103 &sc->sp_state, 10104 ECORE_OBJ_TYPE_RX_TX, 10105 &sc->macs_pool); 10106 10107 BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n", 10108 idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); 10109 } 10110 10111 static inline void 10112 bxe_update_rx_prod(struct bxe_softc *sc, 10113 struct bxe_fastpath *fp, 10114 uint16_t rx_bd_prod, 10115 uint16_t rx_cq_prod, 10116 uint16_t rx_sge_prod) 10117 { 10118 struct ustorm_eth_rx_producers rx_prods = { 0 }; 10119 uint32_t i; 10120 10121 /* update producers */ 10122 rx_prods.bd_prod = rx_bd_prod; 10123 rx_prods.cqe_prod = rx_cq_prod; 10124 rx_prods.sge_prod = rx_sge_prod; 10125 10126 /* 10127 * Make sure that the BD and SGE data is updated before updating the 10128 * producers since FW might read the BD/SGE right after the producer 10129 * is updated. 10130 * This is only applicable for weak-ordered memory model archs such 10131 * as IA-64. The following barrier is also mandatory since FW will 10132 * assumes BDs must have buffers. 10133 */ 10134 wmb(); 10135 10136 for (i = 0; i < (sizeof(rx_prods) / 4); i++) { 10137 REG_WR(sc, 10138 (fp->ustorm_rx_prods_offset + (i * 4)), 10139 ((uint32_t *)&rx_prods)[i]); 10140 } 10141 10142 wmb(); /* keep prod updates ordered */ 10143 10144 BLOGD(sc, DBG_RX, 10145 "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n", 10146 fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod); 10147 } 10148 10149 static void 10150 bxe_init_rx_rings(struct bxe_softc *sc) 10151 { 10152 struct bxe_fastpath *fp; 10153 int i; 10154 10155 for (i = 0; i < sc->num_queues; i++) { 10156 fp = &sc->fp[i]; 10157 10158 fp->rx_bd_cons = 0; 10159 10160 /* 10161 * Activate the BD ring... 10162 * Warning, this will generate an interrupt (to the TSTORM) 10163 * so this can only be done after the chip is initialized 10164 */ 10165 bxe_update_rx_prod(sc, fp, 10166 fp->rx_bd_prod, 10167 fp->rx_cq_prod, 10168 fp->rx_sge_prod); 10169 10170 if (i != 0) { 10171 continue; 10172 } 10173 10174 if (CHIP_IS_E1(sc)) { 10175 REG_WR(sc, 10176 (BAR_USTRORM_INTMEM + 10177 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))), 10178 U64_LO(fp->rcq_dma.paddr)); 10179 REG_WR(sc, 10180 (BAR_USTRORM_INTMEM + 10181 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4), 10182 U64_HI(fp->rcq_dma.paddr)); 10183 } 10184 } 10185 } 10186 10187 static void 10188 bxe_init_tx_ring_one(struct bxe_fastpath *fp) 10189 { 10190 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 10191 fp->tx_db.data.zero_fill1 = 0; 10192 fp->tx_db.data.prod = 0; 10193 10194 fp->tx_pkt_prod = 0; 10195 fp->tx_pkt_cons = 0; 10196 fp->tx_bd_prod = 0; 10197 fp->tx_bd_cons = 0; 10198 fp->eth_q_stats.tx_pkts = 0; 10199 } 10200 10201 static inline void 10202 bxe_init_tx_rings(struct bxe_softc *sc) 10203 { 10204 int i; 10205 10206 for (i = 0; i < sc->num_queues; i++) { 10207 #if 0 10208 uint8_t cos; 10209 for (cos = 0; cos < sc->max_cos; cos++) { 10210 bxe_init_tx_ring_one(&sc->fp[i].txdata[cos]); 10211 } 10212 #else 10213 bxe_init_tx_ring_one(&sc->fp[i]); 10214 #endif 10215 } 10216 } 10217 10218 static void 10219 bxe_init_def_sb(struct bxe_softc *sc) 10220 { 10221 struct host_sp_status_block *def_sb = sc->def_sb; 10222 bus_addr_t mapping = sc->def_sb_dma.paddr; 10223 int igu_sp_sb_index; 10224 int igu_seg_id; 10225 int port = SC_PORT(sc); 10226 int func = SC_FUNC(sc); 10227 int reg_offset, reg_offset_en5; 10228 uint64_t section; 10229 int index, sindex; 10230 struct hc_sp_status_block_data sp_sb_data; 10231 10232 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 10233 10234 if (CHIP_INT_MODE_IS_BC(sc)) { 10235 igu_sp_sb_index = DEF_SB_IGU_ID; 10236 igu_seg_id = HC_SEG_ACCESS_DEF; 10237 } else { 10238 igu_sp_sb_index = sc->igu_dsb_id; 10239 igu_seg_id = IGU_SEG_ACCESS_DEF; 10240 } 10241 10242 /* attentions */ 10243 section = ((uint64_t)mapping + 10244 offsetof(struct host_sp_status_block, atten_status_block)); 10245 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 10246 sc->attn_state = 0; 10247 10248 reg_offset = (port) ? 10249 MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 10250 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 10251 reg_offset_en5 = (port) ? 10252 MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 10253 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; 10254 10255 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 10256 /* take care of sig[0]..sig[4] */ 10257 for (sindex = 0; sindex < 4; sindex++) { 10258 sc->attn_group[index].sig[sindex] = 10259 REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index))); 10260 } 10261 10262 if (!CHIP_IS_E1x(sc)) { 10263 /* 10264 * enable5 is separate from the rest of the registers, 10265 * and the address skip is 4 and not 16 between the 10266 * different groups 10267 */ 10268 sc->attn_group[index].sig[4] = 10269 REG_RD(sc, (reg_offset_en5 + (0x4 * index))); 10270 } else { 10271 sc->attn_group[index].sig[4] = 0; 10272 } 10273 } 10274 10275 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10276 reg_offset = (port) ? 10277 HC_REG_ATTN_MSG1_ADDR_L : 10278 HC_REG_ATTN_MSG0_ADDR_L; 10279 REG_WR(sc, reg_offset, U64_LO(section)); 10280 REG_WR(sc, (reg_offset + 4), U64_HI(section)); 10281 } else if (!CHIP_IS_E1x(sc)) { 10282 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 10283 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 10284 } 10285 10286 section = ((uint64_t)mapping + 10287 offsetof(struct host_sp_status_block, sp_sb)); 10288 10289 bxe_zero_sp_sb(sc); 10290 10291 /* PCI guarantees endianity of regpair */ 10292 sp_sb_data.state = SB_ENABLED; 10293 sp_sb_data.host_sb_addr.lo = U64_LO(section); 10294 sp_sb_data.host_sb_addr.hi = U64_HI(section); 10295 sp_sb_data.igu_sb_id = igu_sp_sb_index; 10296 sp_sb_data.igu_seg_id = igu_seg_id; 10297 sp_sb_data.p_func.pf_id = func; 10298 sp_sb_data.p_func.vnic_id = SC_VN(sc); 10299 sp_sb_data.p_func.vf_id = 0xff; 10300 10301 bxe_wr_sp_sb_data(sc, &sp_sb_data); 10302 10303 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 10304 } 10305 10306 static void 10307 bxe_init_sp_ring(struct bxe_softc *sc) 10308 { 10309 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); 10310 sc->spq_prod_idx = 0; 10311 sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; 10312 sc->spq_prod_bd = sc->spq; 10313 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); 10314 } 10315 10316 static void 10317 bxe_init_eq_ring(struct bxe_softc *sc) 10318 { 10319 union event_ring_elem *elem; 10320 int i; 10321 10322 for (i = 1; i <= NUM_EQ_PAGES; i++) { 10323 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; 10324 10325 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + 10326 BCM_PAGE_SIZE * 10327 (i % NUM_EQ_PAGES))); 10328 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + 10329 BCM_PAGE_SIZE * 10330 (i % NUM_EQ_PAGES))); 10331 } 10332 10333 sc->eq_cons = 0; 10334 sc->eq_prod = NUM_EQ_DESC; 10335 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; 10336 10337 atomic_store_rel_long(&sc->eq_spq_left, 10338 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), 10339 NUM_EQ_DESC) - 1)); 10340 } 10341 10342 static void 10343 bxe_init_internal_common(struct bxe_softc *sc) 10344 { 10345 int i; 10346 10347 if (IS_MF_SI(sc)) { 10348 /* 10349 * In switch independent mode, the TSTORM needs to accept 10350 * packets that failed classification, since approximate match 10351 * mac addresses aren't written to NIG LLH. 10352 */ 10353 REG_WR8(sc, 10354 (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 10355 2); 10356 } else if (!CHIP_IS_E1(sc)) { /* 57710 doesn't support MF */ 10357 REG_WR8(sc, 10358 (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 10359 0); 10360 } 10361 10362 /* 10363 * Zero this manually as its initialization is currently missing 10364 * in the initTool. 10365 */ 10366 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { 10367 REG_WR(sc, 10368 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), 10369 0); 10370 } 10371 10372 if (!CHIP_IS_E1x(sc)) { 10373 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), 10374 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE); 10375 } 10376 } 10377 10378 static void 10379 bxe_init_internal(struct bxe_softc *sc, 10380 uint32_t load_code) 10381 { 10382 switch (load_code) { 10383 case FW_MSG_CODE_DRV_LOAD_COMMON: 10384 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 10385 bxe_init_internal_common(sc); 10386 /* no break */ 10387 10388 case FW_MSG_CODE_DRV_LOAD_PORT: 10389 /* nothing to do */ 10390 /* no break */ 10391 10392 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 10393 /* internal memory per function is initialized inside bxe_pf_init */ 10394 break; 10395 10396 default: 10397 BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code); 10398 break; 10399 } 10400 } 10401 10402 static void 10403 storm_memset_func_cfg(struct bxe_softc *sc, 10404 struct tstorm_eth_function_common_config *tcfg, 10405 uint16_t abs_fid) 10406 { 10407 uint32_t addr; 10408 size_t size; 10409 10410 addr = (BAR_TSTRORM_INTMEM + 10411 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); 10412 size = sizeof(struct tstorm_eth_function_common_config); 10413 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg); 10414 } 10415 10416 static void 10417 bxe_func_init(struct bxe_softc *sc, 10418 struct bxe_func_init_params *p) 10419 { 10420 struct tstorm_eth_function_common_config tcfg = { 0 }; 10421 10422 if (CHIP_IS_E1x(sc)) { 10423 storm_memset_func_cfg(sc, &tcfg, p->func_id); 10424 } 10425 10426 /* Enable the function in the FW */ 10427 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); 10428 storm_memset_func_en(sc, p->func_id, 1); 10429 10430 /* spq */ 10431 if (p->func_flgs & FUNC_FLG_SPQ) { 10432 storm_memset_spq_addr(sc, p->spq_map, p->func_id); 10433 REG_WR(sc, 10434 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)), 10435 p->spq_prod); 10436 } 10437 } 10438 10439 /* 10440 * Calculates the sum of vn_min_rates. 10441 * It's needed for further normalizing of the min_rates. 10442 * Returns: 10443 * sum of vn_min_rates. 10444 * or 10445 * 0 - if all the min_rates are 0. 10446 * In the later case fainess algorithm should be deactivated. 10447 * If all min rates are not zero then those that are zeroes will be set to 1. 10448 */ 10449 static void 10450 bxe_calc_vn_min(struct bxe_softc *sc, 10451 struct cmng_init_input *input) 10452 { 10453 uint32_t vn_cfg; 10454 uint32_t vn_min_rate; 10455 int all_zero = 1; 10456 int vn; 10457 10458 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10459 vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 10460 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 10461 FUNC_MF_CFG_MIN_BW_SHIFT) * 100); 10462 10463 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 10464 /* skip hidden VNs */ 10465 vn_min_rate = 0; 10466 } else if (!vn_min_rate) { 10467 /* If min rate is zero - set it to 100 */ 10468 vn_min_rate = DEF_MIN_RATE; 10469 } else { 10470 all_zero = 0; 10471 } 10472 10473 input->vnic_min_rate[vn] = vn_min_rate; 10474 } 10475 10476 /* if ETS or all min rates are zeros - disable fairness */ 10477 if (BXE_IS_ETS_ENABLED(sc)) { 10478 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10479 BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n"); 10480 } else if (all_zero) { 10481 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10482 BLOGD(sc, DBG_LOAD, 10483 "Fariness disabled (all MIN values are zeroes)\n"); 10484 } else { 10485 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10486 } 10487 } 10488 10489 static inline uint16_t 10490 bxe_extract_max_cfg(struct bxe_softc *sc, 10491 uint32_t mf_cfg) 10492 { 10493 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 10494 FUNC_MF_CFG_MAX_BW_SHIFT); 10495 10496 if (!max_cfg) { 10497 BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n"); 10498 max_cfg = 100; 10499 } 10500 10501 return (max_cfg); 10502 } 10503 10504 static void 10505 bxe_calc_vn_max(struct bxe_softc *sc, 10506 int vn, 10507 struct cmng_init_input *input) 10508 { 10509 uint16_t vn_max_rate; 10510 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 10511 uint32_t max_cfg; 10512 10513 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 10514 vn_max_rate = 0; 10515 } else { 10516 max_cfg = bxe_extract_max_cfg(sc, vn_cfg); 10517 10518 if (IS_MF_SI(sc)) { 10519 /* max_cfg in percents of linkspeed */ 10520 vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100); 10521 } else { /* SD modes */ 10522 /* max_cfg is absolute in 100Mb units */ 10523 vn_max_rate = (max_cfg * 100); 10524 } 10525 } 10526 10527 BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); 10528 10529 input->vnic_max_rate[vn] = vn_max_rate; 10530 } 10531 10532 static void 10533 bxe_cmng_fns_init(struct bxe_softc *sc, 10534 uint8_t read_cfg, 10535 uint8_t cmng_type) 10536 { 10537 struct cmng_init_input input; 10538 int vn; 10539 10540 memset(&input, 0, sizeof(struct cmng_init_input)); 10541 10542 input.port_rate = sc->link_vars.line_speed; 10543 10544 if (cmng_type == CMNG_FNS_MINMAX) { 10545 /* read mf conf from shmem */ 10546 if (read_cfg) { 10547 bxe_read_mf_cfg(sc); 10548 } 10549 10550 /* get VN min rate and enable fairness if not 0 */ 10551 bxe_calc_vn_min(sc, &input); 10552 10553 /* get VN max rate */ 10554 if (sc->port.pmf) { 10555 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10556 bxe_calc_vn_max(sc, vn, &input); 10557 } 10558 } 10559 10560 /* always enable rate shaping and fairness */ 10561 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 10562 10563 ecore_init_cmng(&input, &sc->cmng); 10564 return; 10565 } 10566 10567 /* rate shaping and fairness are disabled */ 10568 BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n"); 10569 } 10570 10571 static int 10572 bxe_get_cmng_fns_mode(struct bxe_softc *sc) 10573 { 10574 if (CHIP_REV_IS_SLOW(sc)) { 10575 return (CMNG_FNS_NONE); 10576 } 10577 10578 if (IS_MF(sc)) { 10579 return (CMNG_FNS_MINMAX); 10580 } 10581 10582 return (CMNG_FNS_NONE); 10583 } 10584 10585 static void 10586 storm_memset_cmng(struct bxe_softc *sc, 10587 struct cmng_init *cmng, 10588 uint8_t port) 10589 { 10590 int vn; 10591 int func; 10592 uint32_t addr; 10593 size_t size; 10594 10595 addr = (BAR_XSTRORM_INTMEM + 10596 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); 10597 size = sizeof(struct cmng_struct_per_port); 10598 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port); 10599 10600 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10601 func = func_by_vn(sc, vn); 10602 10603 addr = (BAR_XSTRORM_INTMEM + 10604 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); 10605 size = sizeof(struct rate_shaping_vars_per_vn); 10606 ecore_storm_memset_struct(sc, addr, size, 10607 (uint32_t *)&cmng->vnic.vnic_max_rate[vn]); 10608 10609 addr = (BAR_XSTRORM_INTMEM + 10610 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); 10611 size = sizeof(struct fairness_vars_per_vn); 10612 ecore_storm_memset_struct(sc, addr, size, 10613 (uint32_t *)&cmng->vnic.vnic_min_rate[vn]); 10614 } 10615 } 10616 10617 static void 10618 bxe_pf_init(struct bxe_softc *sc) 10619 { 10620 struct bxe_func_init_params func_init = { 0 }; 10621 struct event_ring_data eq_data = { { 0 } }; 10622 uint16_t flags; 10623 10624 if (!CHIP_IS_E1x(sc)) { 10625 /* reset IGU PF statistics: MSIX + ATTN */ 10626 /* PF */ 10627 REG_WR(sc, 10628 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 10629 (BXE_IGU_STAS_MSG_VF_CNT * 4) + 10630 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 10631 0); 10632 /* ATTN */ 10633 REG_WR(sc, 10634 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 10635 (BXE_IGU_STAS_MSG_VF_CNT * 4) + 10636 (BXE_IGU_STAS_MSG_PF_CNT * 4) + 10637 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 10638 0); 10639 } 10640 10641 /* function setup flags */ 10642 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 10643 10644 /* 10645 * This flag is relevant for E1x only. 10646 * E2 doesn't have a TPA configuration in a function level. 10647 */ 10648 flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0; 10649 10650 func_init.func_flgs = flags; 10651 func_init.pf_id = SC_FUNC(sc); 10652 func_init.func_id = SC_FUNC(sc); 10653 func_init.spq_map = sc->spq_dma.paddr; 10654 func_init.spq_prod = sc->spq_prod_idx; 10655 10656 bxe_func_init(sc, &func_init); 10657 10658 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); 10659 10660 /* 10661 * Congestion management values depend on the link rate. 10662 * There is no active link so initial link rate is set to 10Gbps. 10663 * When the link comes up the congestion management values are 10664 * re-calculated according to the actual link rate. 10665 */ 10666 sc->link_vars.line_speed = SPEED_10000; 10667 bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc)); 10668 10669 /* Only the PMF sets the HW */ 10670 if (sc->port.pmf) { 10671 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 10672 } 10673 10674 /* init Event Queue - PCI bus guarantees correct endainity */ 10675 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); 10676 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); 10677 eq_data.producer = sc->eq_prod; 10678 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 10679 eq_data.sb_id = DEF_SB_ID; 10680 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); 10681 } 10682 10683 static void 10684 bxe_hc_int_enable(struct bxe_softc *sc) 10685 { 10686 int port = SC_PORT(sc); 10687 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 10688 uint32_t val = REG_RD(sc, addr); 10689 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; 10690 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && 10691 (sc->intr_count == 1)) ? TRUE : FALSE; 10692 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; 10693 10694 if (msix) { 10695 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10696 HC_CONFIG_0_REG_INT_LINE_EN_0); 10697 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10698 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10699 if (single_msix) { 10700 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 10701 } 10702 } else if (msi) { 10703 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 10704 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10705 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10706 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10707 } else { 10708 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10709 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10710 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10711 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10712 10713 if (!CHIP_IS_E1(sc)) { 10714 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", 10715 val, port, addr); 10716 10717 REG_WR(sc, addr, val); 10718 10719 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 10720 } 10721 } 10722 10723 if (CHIP_IS_E1(sc)) { 10724 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF); 10725 } 10726 10727 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", 10728 val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 10729 10730 REG_WR(sc, addr, val); 10731 10732 /* ensure that HC_CONFIG is written before leading/trailing edge config */ 10733 mb(); 10734 10735 if (!CHIP_IS_E1(sc)) { 10736 /* init leading/trailing edge */ 10737 if (IS_MF(sc)) { 10738 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 10739 if (sc->port.pmf) { 10740 /* enable nig and gpio3 attention */ 10741 val |= 0x1100; 10742 } 10743 } else { 10744 val = 0xffff; 10745 } 10746 10747 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val); 10748 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val); 10749 } 10750 10751 /* make sure that interrupts are indeed enabled from here on */ 10752 mb(); 10753 } 10754 10755 static void 10756 bxe_igu_int_enable(struct bxe_softc *sc) 10757 { 10758 uint32_t val; 10759 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; 10760 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && 10761 (sc->intr_count == 1)) ? TRUE : FALSE; 10762 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; 10763 10764 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 10765 10766 if (msix) { 10767 val &= ~(IGU_PF_CONF_INT_LINE_EN | 10768 IGU_PF_CONF_SINGLE_ISR_EN); 10769 val |= (IGU_PF_CONF_MSI_MSIX_EN | 10770 IGU_PF_CONF_ATTN_BIT_EN); 10771 if (single_msix) { 10772 val |= IGU_PF_CONF_SINGLE_ISR_EN; 10773 } 10774 } else if (msi) { 10775 val &= ~IGU_PF_CONF_INT_LINE_EN; 10776 val |= (IGU_PF_CONF_MSI_MSIX_EN | 10777 IGU_PF_CONF_ATTN_BIT_EN | 10778 IGU_PF_CONF_SINGLE_ISR_EN); 10779 } else { 10780 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 10781 val |= (IGU_PF_CONF_INT_LINE_EN | 10782 IGU_PF_CONF_ATTN_BIT_EN | 10783 IGU_PF_CONF_SINGLE_ISR_EN); 10784 } 10785 10786 /* clean previous status - need to configure igu prior to ack*/ 10787 if ((!msix) || single_msix) { 10788 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10789 bxe_ack_int(sc); 10790 } 10791 10792 val |= IGU_PF_CONF_FUNC_EN; 10793 10794 BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n", 10795 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 10796 10797 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10798 10799 mb(); 10800 10801 /* init leading/trailing edge */ 10802 if (IS_MF(sc)) { 10803 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 10804 if (sc->port.pmf) { 10805 /* enable nig and gpio3 attention */ 10806 val |= 0x1100; 10807 } 10808 } else { 10809 val = 0xffff; 10810 } 10811 10812 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 10813 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 10814 10815 /* make sure that interrupts are indeed enabled from here on */ 10816 mb(); 10817 } 10818 10819 static void 10820 bxe_int_enable(struct bxe_softc *sc) 10821 { 10822 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10823 bxe_hc_int_enable(sc); 10824 } else { 10825 bxe_igu_int_enable(sc); 10826 } 10827 } 10828 10829 static void 10830 bxe_hc_int_disable(struct bxe_softc *sc) 10831 { 10832 int port = SC_PORT(sc); 10833 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 10834 uint32_t val = REG_RD(sc, addr); 10835 10836 /* 10837 * In E1 we must use only PCI configuration space to disable MSI/MSIX 10838 * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC 10839 * block 10840 */ 10841 if (CHIP_IS_E1(sc)) { 10842 /* 10843 * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register 10844 * to prevent from HC sending interrupts after we exit the function 10845 */ 10846 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0); 10847 10848 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10849 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10850 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10851 } else { 10852 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10853 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10854 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10855 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10856 } 10857 10858 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); 10859 10860 /* flush all outstanding writes */ 10861 mb(); 10862 10863 REG_WR(sc, addr, val); 10864 if (REG_RD(sc, addr) != val) { 10865 BLOGE(sc, "proper val not read from HC IGU!\n"); 10866 } 10867 } 10868 10869 static void 10870 bxe_igu_int_disable(struct bxe_softc *sc) 10871 { 10872 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 10873 10874 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 10875 IGU_PF_CONF_INT_LINE_EN | 10876 IGU_PF_CONF_ATTN_BIT_EN); 10877 10878 BLOGD(sc, DBG_INTR, "write %x to IGU\n", val); 10879 10880 /* flush all outstanding writes */ 10881 mb(); 10882 10883 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10884 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { 10885 BLOGE(sc, "proper val not read from IGU!\n"); 10886 } 10887 } 10888 10889 static void 10890 bxe_int_disable(struct bxe_softc *sc) 10891 { 10892 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10893 bxe_hc_int_disable(sc); 10894 } else { 10895 bxe_igu_int_disable(sc); 10896 } 10897 } 10898 10899 static void 10900 bxe_nic_init(struct bxe_softc *sc, 10901 int load_code) 10902 { 10903 int i; 10904 10905 for (i = 0; i < sc->num_queues; i++) { 10906 bxe_init_eth_fp(sc, i); 10907 } 10908 10909 rmb(); /* ensure status block indices were read */ 10910 10911 bxe_init_rx_rings(sc); 10912 bxe_init_tx_rings(sc); 10913 10914 if (IS_VF(sc)) { 10915 return; 10916 } 10917 10918 /* initialize MOD_ABS interrupts */ 10919 elink_init_mod_abs_int(sc, &sc->link_vars, 10920 sc->devinfo.chip_id, 10921 sc->devinfo.shmem_base, 10922 sc->devinfo.shmem2_base, 10923 SC_PORT(sc)); 10924 10925 bxe_init_def_sb(sc); 10926 bxe_update_dsb_idx(sc); 10927 bxe_init_sp_ring(sc); 10928 bxe_init_eq_ring(sc); 10929 bxe_init_internal(sc, load_code); 10930 bxe_pf_init(sc); 10931 bxe_stats_init(sc); 10932 10933 /* flush all before enabling interrupts */ 10934 mb(); 10935 10936 bxe_int_enable(sc); 10937 10938 /* check for SPIO5 */ 10939 bxe_attn_int_deasserted0(sc, 10940 REG_RD(sc, 10941 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 10942 SC_PORT(sc)*4)) & 10943 AEU_INPUTS_ATTN_BITS_SPIO5); 10944 } 10945 10946 static inline void 10947 bxe_init_objs(struct bxe_softc *sc) 10948 { 10949 /* mcast rules must be added to tx if tx switching is enabled */ 10950 ecore_obj_type o_type = 10951 (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX : 10952 ECORE_OBJ_TYPE_RX; 10953 10954 /* RX_MODE controlling object */ 10955 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); 10956 10957 /* multicast configuration controlling object */ 10958 ecore_init_mcast_obj(sc, 10959 &sc->mcast_obj, 10960 sc->fp[0].cl_id, 10961 sc->fp[0].index, 10962 SC_FUNC(sc), 10963 SC_FUNC(sc), 10964 BXE_SP(sc, mcast_rdata), 10965 BXE_SP_MAPPING(sc, mcast_rdata), 10966 ECORE_FILTER_MCAST_PENDING, 10967 &sc->sp_state, 10968 o_type); 10969 10970 /* Setup CAM credit pools */ 10971 ecore_init_mac_credit_pool(sc, 10972 &sc->macs_pool, 10973 SC_FUNC(sc), 10974 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 10975 VNICS_PER_PATH(sc)); 10976 10977 ecore_init_vlan_credit_pool(sc, 10978 &sc->vlans_pool, 10979 SC_ABS_FUNC(sc) >> 1, 10980 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 10981 VNICS_PER_PATH(sc)); 10982 10983 /* RSS configuration object */ 10984 ecore_init_rss_config_obj(sc, 10985 &sc->rss_conf_obj, 10986 sc->fp[0].cl_id, 10987 sc->fp[0].index, 10988 SC_FUNC(sc), 10989 SC_FUNC(sc), 10990 BXE_SP(sc, rss_rdata), 10991 BXE_SP_MAPPING(sc, rss_rdata), 10992 ECORE_FILTER_RSS_CONF_PENDING, 10993 &sc->sp_state, ECORE_OBJ_TYPE_RX); 10994 } 10995 10996 /* 10997 * Initialize the function. This must be called before sending CLIENT_SETUP 10998 * for the first client. 10999 */ 11000 static inline int 11001 bxe_func_start(struct bxe_softc *sc) 11002 { 11003 struct ecore_func_state_params func_params = { NULL }; 11004 struct ecore_func_start_params *start_params = &func_params.params.start; 11005 11006 /* Prepare parameters for function state transitions */ 11007 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); 11008 11009 func_params.f_obj = &sc->func_obj; 11010 func_params.cmd = ECORE_F_CMD_START; 11011 11012 /* Function parameters */ 11013 start_params->mf_mode = sc->devinfo.mf_info.mf_mode; 11014 start_params->sd_vlan_tag = OVLAN(sc); 11015 11016 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { 11017 start_params->network_cos_mode = STATIC_COS; 11018 } else { /* CHIP_IS_E1X */ 11019 start_params->network_cos_mode = FW_WRR; 11020 } 11021 11022 start_params->gre_tunnel_mode = 0; 11023 start_params->gre_tunnel_rss = 0; 11024 11025 return (ecore_func_state_change(sc, &func_params)); 11026 } 11027 11028 static int 11029 bxe_set_power_state(struct bxe_softc *sc, 11030 uint8_t state) 11031 { 11032 uint16_t pmcsr; 11033 11034 /* If there is no power capability, silently succeed */ 11035 if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) { 11036 BLOGW(sc, "No power capability\n"); 11037 return (0); 11038 } 11039 11040 pmcsr = pci_read_config(sc->dev, 11041 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 11042 2); 11043 11044 switch (state) { 11045 case PCI_PM_D0: 11046 pci_write_config(sc->dev, 11047 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 11048 ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2); 11049 11050 if (pmcsr & PCIM_PSTAT_DMASK) { 11051 /* delay required during transition out of D3hot */ 11052 DELAY(20000); 11053 } 11054 11055 break; 11056 11057 case PCI_PM_D3hot: 11058 /* XXX if there are other clients above don't shut down the power */ 11059 11060 /* don't shut down the power for emulation and FPGA */ 11061 if (CHIP_REV_IS_SLOW(sc)) { 11062 return (0); 11063 } 11064 11065 pmcsr &= ~PCIM_PSTAT_DMASK; 11066 pmcsr |= PCIM_PSTAT_D3; 11067 11068 if (sc->wol) { 11069 pmcsr |= PCIM_PSTAT_PMEENABLE; 11070 } 11071 11072 pci_write_config(sc->dev, 11073 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 11074 pmcsr, 4); 11075 11076 /* 11077 * No more memory access after this point until device is brought back 11078 * to D0 state. 11079 */ 11080 break; 11081 11082 default: 11083 BLOGE(sc, "Can't support PCI power state = %d\n", state); 11084 return (-1); 11085 } 11086 11087 return (0); 11088 } 11089 11090 11091 /* return true if succeeded to acquire the lock */ 11092 static uint8_t 11093 bxe_trylock_hw_lock(struct bxe_softc *sc, 11094 uint32_t resource) 11095 { 11096 uint32_t lock_status; 11097 uint32_t resource_bit = (1 << resource); 11098 int func = SC_FUNC(sc); 11099 uint32_t hw_lock_control_reg; 11100 11101 BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource); 11102 11103 /* Validating that the resource is within range */ 11104 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 11105 BLOGD(sc, DBG_LOAD, 11106 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 11107 resource, HW_LOCK_MAX_RESOURCE_VALUE); 11108 return (FALSE); 11109 } 11110 11111 if (func <= 5) { 11112 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 11113 } else { 11114 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 11115 } 11116 11117 /* try to acquire the lock */ 11118 REG_WR(sc, hw_lock_control_reg + 4, resource_bit); 11119 lock_status = REG_RD(sc, hw_lock_control_reg); 11120 if (lock_status & resource_bit) { 11121 return (TRUE); 11122 } 11123 11124 BLOGE(sc, "Failed to get a resource lock 0x%x\n", resource); 11125 11126 return (FALSE); 11127 } 11128 11129 /* 11130 * Get the recovery leader resource id according to the engine this function 11131 * belongs to. Currently only only 2 engines is supported. 11132 */ 11133 static int 11134 bxe_get_leader_lock_resource(struct bxe_softc *sc) 11135 { 11136 if (SC_PATH(sc)) { 11137 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1); 11138 } else { 11139 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0); 11140 } 11141 } 11142 11143 /* try to acquire a leader lock for current engine */ 11144 static uint8_t 11145 bxe_trylock_leader_lock(struct bxe_softc *sc) 11146 { 11147 return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc))); 11148 } 11149 11150 static int 11151 bxe_release_leader_lock(struct bxe_softc *sc) 11152 { 11153 return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc))); 11154 } 11155 11156 /* close gates #2, #3 and #4 */ 11157 static void 11158 bxe_set_234_gates(struct bxe_softc *sc, 11159 uint8_t close) 11160 { 11161 uint32_t val; 11162 11163 /* gates #2 and #4a are closed/opened for "not E1" only */ 11164 if (!CHIP_IS_E1(sc)) { 11165 /* #4 */ 11166 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close); 11167 /* #2 */ 11168 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); 11169 } 11170 11171 /* #3 */ 11172 if (CHIP_IS_E1x(sc)) { 11173 /* prevent interrupts from HC on both ports */ 11174 val = REG_RD(sc, HC_REG_CONFIG_1); 11175 REG_WR(sc, HC_REG_CONFIG_1, 11176 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : 11177 (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 11178 11179 val = REG_RD(sc, HC_REG_CONFIG_0); 11180 REG_WR(sc, HC_REG_CONFIG_0, 11181 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : 11182 (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 11183 } else { 11184 /* Prevent incomming interrupts in IGU */ 11185 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 11186 11187 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, 11188 (!close) ? 11189 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : 11190 (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 11191 } 11192 11193 BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n", 11194 close ? "closing" : "opening"); 11195 11196 wmb(); 11197 } 11198 11199 /* poll for pending writes bit, it should get cleared in no more than 1s */ 11200 static int 11201 bxe_er_poll_igu_vq(struct bxe_softc *sc) 11202 { 11203 uint32_t cnt = 1000; 11204 uint32_t pend_bits = 0; 11205 11206 do { 11207 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); 11208 11209 if (pend_bits == 0) { 11210 break; 11211 } 11212 11213 DELAY(1000); 11214 } while (--cnt > 0); 11215 11216 if (cnt == 0) { 11217 BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits); 11218 return (-1); 11219 } 11220 11221 return (0); 11222 } 11223 11224 #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ 11225 11226 static void 11227 bxe_clp_reset_prep(struct bxe_softc *sc, 11228 uint32_t *magic_val) 11229 { 11230 /* Do some magic... */ 11231 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 11232 *magic_val = val & SHARED_MF_CLP_MAGIC; 11233 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 11234 } 11235 11236 /* restore the value of the 'magic' bit */ 11237 static void 11238 bxe_clp_reset_done(struct bxe_softc *sc, 11239 uint32_t magic_val) 11240 { 11241 /* Restore the 'magic' bit value... */ 11242 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 11243 MFCFG_WR(sc, shared_mf_config.clp_mb, 11244 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 11245 } 11246 11247 /* prepare for MCP reset, takes care of CLP configurations */ 11248 static void 11249 bxe_reset_mcp_prep(struct bxe_softc *sc, 11250 uint32_t *magic_val) 11251 { 11252 uint32_t shmem; 11253 uint32_t validity_offset; 11254 11255 /* set `magic' bit in order to save MF config */ 11256 if (!CHIP_IS_E1(sc)) { 11257 bxe_clp_reset_prep(sc, magic_val); 11258 } 11259 11260 /* get shmem offset */ 11261 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 11262 validity_offset = 11263 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); 11264 11265 /* Clear validity map flags */ 11266 if (shmem > 0) { 11267 REG_WR(sc, shmem + validity_offset, 0); 11268 } 11269 } 11270 11271 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 11272 #define MCP_ONE_TIMEOUT 100 /* 100 ms */ 11273 11274 static void 11275 bxe_mcp_wait_one(struct bxe_softc *sc) 11276 { 11277 /* special handling for emulation and FPGA (10 times longer) */ 11278 if (CHIP_REV_IS_SLOW(sc)) { 11279 DELAY((MCP_ONE_TIMEOUT*10) * 1000); 11280 } else { 11281 DELAY((MCP_ONE_TIMEOUT) * 1000); 11282 } 11283 } 11284 11285 /* initialize shmem_base and waits for validity signature to appear */ 11286 static int 11287 bxe_init_shmem(struct bxe_softc *sc) 11288 { 11289 int cnt = 0; 11290 uint32_t val = 0; 11291 11292 do { 11293 sc->devinfo.shmem_base = 11294 sc->link_params.shmem_base = 11295 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 11296 11297 if (sc->devinfo.shmem_base) { 11298 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 11299 if (val & SHR_MEM_VALIDITY_MB) 11300 return (0); 11301 } 11302 11303 bxe_mcp_wait_one(sc); 11304 11305 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 11306 11307 BLOGE(sc, "BAD MCP validity signature\n"); 11308 11309 return (-1); 11310 } 11311 11312 static int 11313 bxe_reset_mcp_comp(struct bxe_softc *sc, 11314 uint32_t magic_val) 11315 { 11316 int rc = bxe_init_shmem(sc); 11317 11318 /* Restore the `magic' bit value */ 11319 if (!CHIP_IS_E1(sc)) { 11320 bxe_clp_reset_done(sc, magic_val); 11321 } 11322 11323 return (rc); 11324 } 11325 11326 static void 11327 bxe_pxp_prep(struct bxe_softc *sc) 11328 { 11329 if (!CHIP_IS_E1(sc)) { 11330 REG_WR(sc, PXP2_REG_RD_START_INIT, 0); 11331 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); 11332 wmb(); 11333 } 11334 } 11335 11336 /* 11337 * Reset the whole chip except for: 11338 * - PCIE core 11339 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) 11340 * - IGU 11341 * - MISC (including AEU) 11342 * - GRC 11343 * - RBCN, RBCP 11344 */ 11345 static void 11346 bxe_process_kill_chip_reset(struct bxe_softc *sc, 11347 uint8_t global) 11348 { 11349 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 11350 uint32_t global_bits2, stay_reset2; 11351 11352 /* 11353 * Bits that have to be set in reset_mask2 if we want to reset 'global' 11354 * (per chip) blocks. 11355 */ 11356 global_bits2 = 11357 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 11358 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 11359 11360 /* 11361 * Don't reset the following blocks. 11362 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be 11363 * reset, as in 4 port device they might still be owned 11364 * by the MCP (there is only one leader per path). 11365 */ 11366 not_reset_mask1 = 11367 MISC_REGISTERS_RESET_REG_1_RST_HC | 11368 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 11369 MISC_REGISTERS_RESET_REG_1_RST_PXP; 11370 11371 not_reset_mask2 = 11372 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 11373 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 11374 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 11375 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 11376 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 11377 MISC_REGISTERS_RESET_REG_2_RST_GRC | 11378 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 11379 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 11380 MISC_REGISTERS_RESET_REG_2_RST_ATC | 11381 MISC_REGISTERS_RESET_REG_2_PGLC | 11382 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 11383 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 11384 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 11385 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 11386 MISC_REGISTERS_RESET_REG_2_UMAC0 | 11387 MISC_REGISTERS_RESET_REG_2_UMAC1; 11388 11389 /* 11390 * Keep the following blocks in reset: 11391 * - all xxMACs are handled by the elink code. 11392 */ 11393 stay_reset2 = 11394 MISC_REGISTERS_RESET_REG_2_XMAC | 11395 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 11396 11397 /* Full reset masks according to the chip */ 11398 reset_mask1 = 0xffffffff; 11399 11400 if (CHIP_IS_E1(sc)) 11401 reset_mask2 = 0xffff; 11402 else if (CHIP_IS_E1H(sc)) 11403 reset_mask2 = 0x1ffff; 11404 else if (CHIP_IS_E2(sc)) 11405 reset_mask2 = 0xfffff; 11406 else /* CHIP_IS_E3 */ 11407 reset_mask2 = 0x3ffffff; 11408 11409 /* Don't reset global blocks unless we need to */ 11410 if (!global) 11411 reset_mask2 &= ~global_bits2; 11412 11413 /* 11414 * In case of attention in the QM, we need to reset PXP 11415 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 11416 * because otherwise QM reset would release 'close the gates' shortly 11417 * before resetting the PXP, then the PSWRQ would send a write 11418 * request to PGLUE. Then when PXP is reset, PGLUE would try to 11419 * read the payload data from PSWWR, but PSWWR would not 11420 * respond. The write queue in PGLUE would stuck, dmae commands 11421 * would not return. Therefore it's important to reset the second 11422 * reset register (containing the 11423 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 11424 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 11425 * bit). 11426 */ 11427 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 11428 reset_mask2 & (~not_reset_mask2)); 11429 11430 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 11431 reset_mask1 & (~not_reset_mask1)); 11432 11433 mb(); 11434 wmb(); 11435 11436 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 11437 reset_mask2 & (~stay_reset2)); 11438 11439 mb(); 11440 wmb(); 11441 11442 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 11443 wmb(); 11444 } 11445 11446 static int 11447 bxe_process_kill(struct bxe_softc *sc, 11448 uint8_t global) 11449 { 11450 int cnt = 1000; 11451 uint32_t val = 0; 11452 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 11453 uint32_t tags_63_32 = 0; 11454 11455 /* Empty the Tetris buffer, wait for 1s */ 11456 do { 11457 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); 11458 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); 11459 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); 11460 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); 11461 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); 11462 if (CHIP_IS_E3(sc)) { 11463 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); 11464 } 11465 11466 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 11467 ((port_is_idle_0 & 0x1) == 0x1) && 11468 ((port_is_idle_1 & 0x1) == 0x1) && 11469 (pgl_exp_rom2 == 0xffffffff) && 11470 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) 11471 break; 11472 DELAY(1000); 11473 } while (cnt-- > 0); 11474 11475 if (cnt <= 0) { 11476 BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there " 11477 "are still outstanding read requests after 1s! " 11478 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " 11479 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", 11480 sr_cnt, blk_cnt, port_is_idle_0, 11481 port_is_idle_1, pgl_exp_rom2); 11482 return (-1); 11483 } 11484 11485 mb(); 11486 11487 /* Close gates #2, #3 and #4 */ 11488 bxe_set_234_gates(sc, TRUE); 11489 11490 /* Poll for IGU VQs for 57712 and newer chips */ 11491 if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) { 11492 return (-1); 11493 } 11494 11495 /* XXX indicate that "process kill" is in progress to MCP */ 11496 11497 /* clear "unprepared" bit */ 11498 REG_WR(sc, MISC_REG_UNPREPARED, 0); 11499 mb(); 11500 11501 /* Make sure all is written to the chip before the reset */ 11502 wmb(); 11503 11504 /* 11505 * Wait for 1ms to empty GLUE and PCI-E core queues, 11506 * PSWHST, GRC and PSWRD Tetris buffer. 11507 */ 11508 DELAY(1000); 11509 11510 /* Prepare to chip reset: */ 11511 /* MCP */ 11512 if (global) { 11513 bxe_reset_mcp_prep(sc, &val); 11514 } 11515 11516 /* PXP */ 11517 bxe_pxp_prep(sc); 11518 mb(); 11519 11520 /* reset the chip */ 11521 bxe_process_kill_chip_reset(sc, global); 11522 mb(); 11523 11524 /* clear errors in PGB */ 11525 if (!CHIP_IS_E1(sc)) 11526 REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); 11527 11528 /* Recover after reset: */ 11529 /* MCP */ 11530 if (global && bxe_reset_mcp_comp(sc, val)) { 11531 return (-1); 11532 } 11533 11534 /* XXX add resetting the NO_MCP mode DB here */ 11535 11536 /* Open the gates #2, #3 and #4 */ 11537 bxe_set_234_gates(sc, FALSE); 11538 11539 /* XXX 11540 * IGU/AEU preparation bring back the AEU/IGU to a reset state 11541 * re-enable attentions 11542 */ 11543 11544 return (0); 11545 } 11546 11547 static int 11548 bxe_leader_reset(struct bxe_softc *sc) 11549 { 11550 int rc = 0; 11551 uint8_t global = bxe_reset_is_global(sc); 11552 uint32_t load_code; 11553 11554 /* 11555 * If not going to reset MCP, load "fake" driver to reset HW while 11556 * driver is owner of the HW. 11557 */ 11558 if (!global && !BXE_NOMCP(sc)) { 11559 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 11560 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 11561 if (!load_code) { 11562 BLOGE(sc, "MCP response failure, aborting\n"); 11563 rc = -1; 11564 goto exit_leader_reset; 11565 } 11566 11567 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 11568 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 11569 BLOGE(sc, "MCP unexpected response, aborting\n"); 11570 rc = -1; 11571 goto exit_leader_reset2; 11572 } 11573 11574 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 11575 if (!load_code) { 11576 BLOGE(sc, "MCP response failure, aborting\n"); 11577 rc = -1; 11578 goto exit_leader_reset2; 11579 } 11580 } 11581 11582 /* try to recover after the failure */ 11583 if (bxe_process_kill(sc, global)) { 11584 BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc)); 11585 rc = -1; 11586 goto exit_leader_reset2; 11587 } 11588 11589 /* 11590 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver 11591 * state. 11592 */ 11593 bxe_set_reset_done(sc); 11594 if (global) { 11595 bxe_clear_reset_global(sc); 11596 } 11597 11598 exit_leader_reset2: 11599 11600 /* unload "fake driver" if it was loaded */ 11601 if (!global && !BXE_NOMCP(sc)) { 11602 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 11603 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 11604 } 11605 11606 exit_leader_reset: 11607 11608 sc->is_leader = 0; 11609 bxe_release_leader_lock(sc); 11610 11611 mb(); 11612 return (rc); 11613 } 11614 11615 /* 11616 * prepare INIT transition, parameters configured: 11617 * - HC configuration 11618 * - Queue's CDU context 11619 */ 11620 static void 11621 bxe_pf_q_prep_init(struct bxe_softc *sc, 11622 struct bxe_fastpath *fp, 11623 struct ecore_queue_init_params *init_params) 11624 { 11625 uint8_t cos; 11626 int cxt_index, cxt_offset; 11627 11628 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); 11629 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); 11630 11631 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); 11632 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); 11633 11634 /* HC rate */ 11635 init_params->rx.hc_rate = 11636 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; 11637 init_params->tx.hc_rate = 11638 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; 11639 11640 /* FW SB ID */ 11641 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; 11642 11643 /* CQ index among the SB indices */ 11644 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 11645 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 11646 11647 /* set maximum number of COSs supported by this queue */ 11648 init_params->max_cos = sc->max_cos; 11649 11650 BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n", 11651 fp->index, init_params->max_cos); 11652 11653 /* set the context pointers queue object */ 11654 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 11655 /* XXX change index/cid here if ever support multiple tx CoS */ 11656 /* fp->txdata[cos]->cid */ 11657 cxt_index = fp->index / ILT_PAGE_CIDS; 11658 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); 11659 init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth; 11660 } 11661 } 11662 11663 /* set flags that are common for the Tx-only and not normal connections */ 11664 static unsigned long 11665 bxe_get_common_flags(struct bxe_softc *sc, 11666 struct bxe_fastpath *fp, 11667 uint8_t zero_stats) 11668 { 11669 unsigned long flags = 0; 11670 11671 /* PF driver will always initialize the Queue to an ACTIVE state */ 11672 bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags); 11673 11674 /* 11675 * tx only connections collect statistics (on the same index as the 11676 * parent connection). The statistics are zeroed when the parent 11677 * connection is initialized. 11678 */ 11679 11680 bxe_set_bit(ECORE_Q_FLG_STATS, &flags); 11681 if (zero_stats) { 11682 bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags); 11683 } 11684 11685 /* 11686 * tx only connections can support tx-switching, though their 11687 * CoS-ness doesn't survive the loopback 11688 */ 11689 if (sc->flags & BXE_TX_SWITCHING) { 11690 bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags); 11691 } 11692 11693 bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); 11694 11695 return (flags); 11696 } 11697 11698 static unsigned long 11699 bxe_get_q_flags(struct bxe_softc *sc, 11700 struct bxe_fastpath *fp, 11701 uint8_t leading) 11702 { 11703 unsigned long flags = 0; 11704 11705 if (IS_MF_SD(sc)) { 11706 bxe_set_bit(ECORE_Q_FLG_OV, &flags); 11707 } 11708 11709 if (if_getcapenable(sc->ifp) & IFCAP_LRO) { 11710 bxe_set_bit(ECORE_Q_FLG_TPA, &flags); 11711 bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags); 11712 #if 0 11713 if (fp->mode == TPA_MODE_GRO) 11714 __set_bit(ECORE_Q_FLG_TPA_GRO, &flags); 11715 #endif 11716 } 11717 11718 if (leading) { 11719 bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags); 11720 bxe_set_bit(ECORE_Q_FLG_MCAST, &flags); 11721 } 11722 11723 bxe_set_bit(ECORE_Q_FLG_VLAN, &flags); 11724 11725 #if 0 11726 /* configure silent vlan removal */ 11727 if (IS_MF_AFEX(sc)) { 11728 bxe_set_bit(ECORE_Q_FLG_SILENT_VLAN_REM, &flags); 11729 } 11730 #endif 11731 11732 /* merge with common flags */ 11733 return (flags | bxe_get_common_flags(sc, fp, TRUE)); 11734 } 11735 11736 static void 11737 bxe_pf_q_prep_general(struct bxe_softc *sc, 11738 struct bxe_fastpath *fp, 11739 struct ecore_general_setup_params *gen_init, 11740 uint8_t cos) 11741 { 11742 gen_init->stat_id = bxe_stats_id(fp); 11743 gen_init->spcl_id = fp->cl_id; 11744 gen_init->mtu = sc->mtu; 11745 gen_init->cos = cos; 11746 } 11747 11748 static void 11749 bxe_pf_rx_q_prep(struct bxe_softc *sc, 11750 struct bxe_fastpath *fp, 11751 struct rxq_pause_params *pause, 11752 struct ecore_rxq_setup_params *rxq_init) 11753 { 11754 uint8_t max_sge = 0; 11755 uint16_t sge_sz = 0; 11756 uint16_t tpa_agg_size = 0; 11757 11758 pause->sge_th_lo = SGE_TH_LO(sc); 11759 pause->sge_th_hi = SGE_TH_HI(sc); 11760 11761 /* validate SGE ring has enough to cross high threshold */ 11762 if (sc->dropless_fc && 11763 (pause->sge_th_hi + FW_PREFETCH_CNT) > 11764 (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) { 11765 BLOGW(sc, "sge ring threshold limit\n"); 11766 } 11767 11768 /* minimum max_aggregation_size is 2*MTU (two full buffers) */ 11769 tpa_agg_size = (2 * sc->mtu); 11770 if (tpa_agg_size < sc->max_aggregation_size) { 11771 tpa_agg_size = sc->max_aggregation_size; 11772 } 11773 11774 max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT; 11775 max_sge = ((max_sge + PAGES_PER_SGE - 1) & 11776 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; 11777 sge_sz = (uint16_t)min(SGE_PAGES, 0xffff); 11778 11779 /* pause - not for e1 */ 11780 if (!CHIP_IS_E1(sc)) { 11781 pause->bd_th_lo = BD_TH_LO(sc); 11782 pause->bd_th_hi = BD_TH_HI(sc); 11783 11784 pause->rcq_th_lo = RCQ_TH_LO(sc); 11785 pause->rcq_th_hi = RCQ_TH_HI(sc); 11786 11787 /* validate rings have enough entries to cross high thresholds */ 11788 if (sc->dropless_fc && 11789 pause->bd_th_hi + FW_PREFETCH_CNT > 11790 sc->rx_ring_size) { 11791 BLOGW(sc, "rx bd ring threshold limit\n"); 11792 } 11793 11794 if (sc->dropless_fc && 11795 pause->rcq_th_hi + FW_PREFETCH_CNT > 11796 RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) { 11797 BLOGW(sc, "rcq ring threshold limit\n"); 11798 } 11799 11800 pause->pri_map = 1; 11801 } 11802 11803 /* rxq setup */ 11804 rxq_init->dscr_map = fp->rx_dma.paddr; 11805 rxq_init->sge_map = fp->rx_sge_dma.paddr; 11806 rxq_init->rcq_map = fp->rcq_dma.paddr; 11807 rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE); 11808 11809 /* 11810 * This should be a maximum number of data bytes that may be 11811 * placed on the BD (not including paddings). 11812 */ 11813 rxq_init->buf_sz = (fp->rx_buf_size - 11814 IP_HEADER_ALIGNMENT_PADDING); 11815 11816 rxq_init->cl_qzone_id = fp->cl_qzone_id; 11817 rxq_init->tpa_agg_sz = tpa_agg_size; 11818 rxq_init->sge_buf_sz = sge_sz; 11819 rxq_init->max_sges_pkt = max_sge; 11820 rxq_init->rss_engine_id = SC_FUNC(sc); 11821 rxq_init->mcast_engine_id = SC_FUNC(sc); 11822 11823 /* 11824 * Maximum number or simultaneous TPA aggregation for this Queue. 11825 * For PF Clients it should be the maximum available number. 11826 * VF driver(s) may want to define it to a smaller value. 11827 */ 11828 rxq_init->max_tpa_queues = MAX_AGG_QS(sc); 11829 11830 rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT; 11831 rxq_init->fw_sb_id = fp->fw_sb_id; 11832 11833 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 11834 11835 /* 11836 * configure silent vlan removal 11837 * if multi function mode is afex, then mask default vlan 11838 */ 11839 if (IS_MF_AFEX(sc)) { 11840 rxq_init->silent_removal_value = 11841 sc->devinfo.mf_info.afex_def_vlan_tag; 11842 rxq_init->silent_removal_mask = EVL_VLID_MASK; 11843 } 11844 } 11845 11846 static void 11847 bxe_pf_tx_q_prep(struct bxe_softc *sc, 11848 struct bxe_fastpath *fp, 11849 struct ecore_txq_setup_params *txq_init, 11850 uint8_t cos) 11851 { 11852 /* 11853 * XXX If multiple CoS is ever supported then each fastpath structure 11854 * will need to maintain tx producer/consumer/dma/etc values *per* CoS. 11855 * fp->txdata[cos]->tx_dma.paddr; 11856 */ 11857 txq_init->dscr_map = fp->tx_dma.paddr; 11858 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 11859 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 11860 txq_init->fw_sb_id = fp->fw_sb_id; 11861 11862 /* 11863 * set the TSS leading client id for TX classfication to the 11864 * leading RSS client id 11865 */ 11866 txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id); 11867 } 11868 11869 /* 11870 * This function performs 2 steps in a queue state machine: 11871 * 1) RESET->INIT 11872 * 2) INIT->SETUP 11873 */ 11874 static int 11875 bxe_setup_queue(struct bxe_softc *sc, 11876 struct bxe_fastpath *fp, 11877 uint8_t leading) 11878 { 11879 struct ecore_queue_state_params q_params = { NULL }; 11880 struct ecore_queue_setup_params *setup_params = 11881 &q_params.params.setup; 11882 #if 0 11883 struct ecore_queue_setup_tx_only_params *tx_only_params = 11884 &q_params.params.tx_only; 11885 uint8_t tx_index; 11886 #endif 11887 int rc; 11888 11889 BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index); 11890 11891 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 11892 11893 q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj; 11894 11895 /* we want to wait for completion in this context */ 11896 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 11897 11898 /* prepare the INIT parameters */ 11899 bxe_pf_q_prep_init(sc, fp, &q_params.params.init); 11900 11901 /* Set the command */ 11902 q_params.cmd = ECORE_Q_CMD_INIT; 11903 11904 /* Change the state to INIT */ 11905 rc = ecore_queue_state_change(sc, &q_params); 11906 if (rc) { 11907 BLOGE(sc, "Queue(%d) INIT failed\n", fp->index); 11908 return (rc); 11909 } 11910 11911 BLOGD(sc, DBG_LOAD, "init complete\n"); 11912 11913 /* now move the Queue to the SETUP state */ 11914 memset(setup_params, 0, sizeof(*setup_params)); 11915 11916 /* set Queue flags */ 11917 setup_params->flags = bxe_get_q_flags(sc, fp, leading); 11918 11919 /* set general SETUP parameters */ 11920 bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params, 11921 FIRST_TX_COS_INDEX); 11922 11923 bxe_pf_rx_q_prep(sc, fp, 11924 &setup_params->pause_params, 11925 &setup_params->rxq_params); 11926 11927 bxe_pf_tx_q_prep(sc, fp, 11928 &setup_params->txq_params, 11929 FIRST_TX_COS_INDEX); 11930 11931 /* Set the command */ 11932 q_params.cmd = ECORE_Q_CMD_SETUP; 11933 11934 /* change the state to SETUP */ 11935 rc = ecore_queue_state_change(sc, &q_params); 11936 if (rc) { 11937 BLOGE(sc, "Queue(%d) SETUP failed\n", fp->index); 11938 return (rc); 11939 } 11940 11941 #if 0 11942 /* loop through the relevant tx-only indices */ 11943 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 11944 tx_index < sc->max_cos; 11945 tx_index++) { 11946 /* prepare and send tx-only ramrod*/ 11947 rc = bxe_setup_tx_only(sc, fp, &q_params, 11948 tx_only_params, tx_index, leading); 11949 if (rc) { 11950 BLOGE(sc, "Queue(%d.%d) TX_ONLY_SETUP failed\n", 11951 fp->index, tx_index); 11952 return (rc); 11953 } 11954 } 11955 #endif 11956 11957 return (rc); 11958 } 11959 11960 static int 11961 bxe_setup_leading(struct bxe_softc *sc) 11962 { 11963 return (bxe_setup_queue(sc, &sc->fp[0], TRUE)); 11964 } 11965 11966 static int 11967 bxe_config_rss_pf(struct bxe_softc *sc, 11968 struct ecore_rss_config_obj *rss_obj, 11969 uint8_t config_hash) 11970 { 11971 struct ecore_config_rss_params params = { NULL }; 11972 int i; 11973 11974 /* 11975 * Although RSS is meaningless when there is a single HW queue we 11976 * still need it enabled in order to have HW Rx hash generated. 11977 */ 11978 11979 params.rss_obj = rss_obj; 11980 11981 bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); 11982 11983 bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); 11984 11985 /* RSS configuration */ 11986 bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags); 11987 bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); 11988 bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags); 11989 bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); 11990 if (rss_obj->udp_rss_v4) { 11991 bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); 11992 } 11993 if (rss_obj->udp_rss_v6) { 11994 bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); 11995 } 11996 11997 /* Hash bits */ 11998 params.rss_result_mask = MULTI_MASK; 11999 12000 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); 12001 12002 if (config_hash) { 12003 /* RSS keys */ 12004 for (i = 0; i < sizeof(params.rss_key) / 4; i++) { 12005 params.rss_key[i] = arc4random(); 12006 } 12007 12008 bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); 12009 } 12010 12011 return (ecore_config_rss(sc, ¶ms)); 12012 } 12013 12014 static int 12015 bxe_config_rss_eth(struct bxe_softc *sc, 12016 uint8_t config_hash) 12017 { 12018 return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash)); 12019 } 12020 12021 static int 12022 bxe_init_rss_pf(struct bxe_softc *sc) 12023 { 12024 uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc); 12025 int i; 12026 12027 /* 12028 * Prepare the initial contents of the indirection table if 12029 * RSS is enabled 12030 */ 12031 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { 12032 sc->rss_conf_obj.ind_table[i] = 12033 (sc->fp->cl_id + (i % num_eth_queues)); 12034 } 12035 12036 if (sc->udp_rss) { 12037 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; 12038 } 12039 12040 /* 12041 * For 57710 and 57711 SEARCHER configuration (rss_keys) is 12042 * per-port, so if explicit configuration is needed, do it only 12043 * for a PMF. 12044 * 12045 * For 57712 and newer it's a per-function configuration. 12046 */ 12047 return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc))); 12048 } 12049 12050 static int 12051 bxe_set_mac_one(struct bxe_softc *sc, 12052 uint8_t *mac, 12053 struct ecore_vlan_mac_obj *obj, 12054 uint8_t set, 12055 int mac_type, 12056 unsigned long *ramrod_flags) 12057 { 12058 struct ecore_vlan_mac_ramrod_params ramrod_param; 12059 int rc; 12060 12061 memset(&ramrod_param, 0, sizeof(ramrod_param)); 12062 12063 /* fill in general parameters */ 12064 ramrod_param.vlan_mac_obj = obj; 12065 ramrod_param.ramrod_flags = *ramrod_flags; 12066 12067 /* fill a user request section if needed */ 12068 if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) { 12069 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); 12070 12071 bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); 12072 12073 /* Set the command: ADD or DEL */ 12074 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : 12075 ECORE_VLAN_MAC_DEL; 12076 } 12077 12078 rc = ecore_config_vlan_mac(sc, &ramrod_param); 12079 12080 if (rc == ECORE_EXISTS) { 12081 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); 12082 /* do not treat adding same MAC as error */ 12083 rc = 0; 12084 } else if (rc < 0) { 12085 BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc); 12086 } 12087 12088 return (rc); 12089 } 12090 12091 static int 12092 bxe_set_eth_mac(struct bxe_softc *sc, 12093 uint8_t set) 12094 { 12095 unsigned long ramrod_flags = 0; 12096 12097 BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n"); 12098 12099 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 12100 12101 /* Eth MAC is set on RSS leading client (fp[0]) */ 12102 return (bxe_set_mac_one(sc, sc->link_params.mac_addr, 12103 &sc->sp_objs->mac_obj, 12104 set, ECORE_ETH_MAC, &ramrod_flags)); 12105 } 12106 12107 #if 0 12108 static void 12109 bxe_update_max_mf_config(struct bxe_softc *sc, 12110 uint32_t value) 12111 { 12112 /* load old values */ 12113 uint32_t mf_cfg = sc->devinfo.mf_info.mf_config[SC_VN(sc)]; 12114 12115 if (value != bxe_extract_max_cfg(sc, mf_cfg)) { 12116 /* leave all but MAX value */ 12117 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; 12118 12119 /* set new MAX value */ 12120 mf_cfg |= ((value << FUNC_MF_CFG_MAX_BW_SHIFT) & 12121 FUNC_MF_CFG_MAX_BW_MASK); 12122 12123 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW, mf_cfg); 12124 } 12125 } 12126 #endif 12127 12128 static int 12129 bxe_get_cur_phy_idx(struct bxe_softc *sc) 12130 { 12131 uint32_t sel_phy_idx = 0; 12132 12133 if (sc->link_params.num_phys <= 1) { 12134 return (ELINK_INT_PHY); 12135 } 12136 12137 if (sc->link_vars.link_up) { 12138 sel_phy_idx = ELINK_EXT_PHY1; 12139 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ 12140 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && 12141 (sc->link_params.phy[ELINK_EXT_PHY2].supported & 12142 ELINK_SUPPORTED_FIBRE)) 12143 sel_phy_idx = ELINK_EXT_PHY2; 12144 } else { 12145 switch (elink_phy_selection(&sc->link_params)) { 12146 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 12147 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: 12148 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 12149 sel_phy_idx = ELINK_EXT_PHY1; 12150 break; 12151 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: 12152 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 12153 sel_phy_idx = ELINK_EXT_PHY2; 12154 break; 12155 } 12156 } 12157 12158 return (sel_phy_idx); 12159 } 12160 12161 static int 12162 bxe_get_link_cfg_idx(struct bxe_softc *sc) 12163 { 12164 uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc); 12165 12166 /* 12167 * The selected activated PHY is always after swapping (in case PHY 12168 * swapping is enabled). So when swapping is enabled, we need to reverse 12169 * the configuration 12170 */ 12171 12172 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 12173 if (sel_phy_idx == ELINK_EXT_PHY1) 12174 sel_phy_idx = ELINK_EXT_PHY2; 12175 else if (sel_phy_idx == ELINK_EXT_PHY2) 12176 sel_phy_idx = ELINK_EXT_PHY1; 12177 } 12178 12179 return (ELINK_LINK_CONFIG_IDX(sel_phy_idx)); 12180 } 12181 12182 static void 12183 bxe_set_requested_fc(struct bxe_softc *sc) 12184 { 12185 /* 12186 * Initialize link parameters structure variables 12187 * It is recommended to turn off RX FC for jumbo frames 12188 * for better performance 12189 */ 12190 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { 12191 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; 12192 } else { 12193 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; 12194 } 12195 } 12196 12197 static void 12198 bxe_calc_fc_adv(struct bxe_softc *sc) 12199 { 12200 uint8_t cfg_idx = bxe_get_link_cfg_idx(sc); 12201 switch (sc->link_vars.ieee_fc & 12202 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 12203 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 12204 default: 12205 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 12206 ADVERTISED_Pause); 12207 break; 12208 12209 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 12210 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 12211 ADVERTISED_Pause); 12212 break; 12213 12214 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 12215 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 12216 break; 12217 } 12218 } 12219 12220 static uint16_t 12221 bxe_get_mf_speed(struct bxe_softc *sc) 12222 { 12223 uint16_t line_speed = sc->link_vars.line_speed; 12224 if (IS_MF(sc)) { 12225 uint16_t maxCfg = 12226 bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]); 12227 12228 /* calculate the current MAX line speed limit for the MF devices */ 12229 if (IS_MF_SI(sc)) { 12230 line_speed = (line_speed * maxCfg) / 100; 12231 } else { /* SD mode */ 12232 uint16_t vn_max_rate = maxCfg * 100; 12233 12234 if (vn_max_rate < line_speed) { 12235 line_speed = vn_max_rate; 12236 } 12237 } 12238 } 12239 12240 return (line_speed); 12241 } 12242 12243 static void 12244 bxe_fill_report_data(struct bxe_softc *sc, 12245 struct bxe_link_report_data *data) 12246 { 12247 uint16_t line_speed = bxe_get_mf_speed(sc); 12248 12249 memset(data, 0, sizeof(*data)); 12250 12251 /* fill the report data with the effective line speed */ 12252 data->line_speed = line_speed; 12253 12254 /* Link is down */ 12255 if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) { 12256 bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags); 12257 } 12258 12259 /* Full DUPLEX */ 12260 if (sc->link_vars.duplex == DUPLEX_FULL) { 12261 bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags); 12262 } 12263 12264 /* Rx Flow Control is ON */ 12265 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { 12266 bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags); 12267 } 12268 12269 /* Tx Flow Control is ON */ 12270 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 12271 bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags); 12272 } 12273 } 12274 12275 /* report link status to OS, should be called under phy_lock */ 12276 static void 12277 bxe_link_report_locked(struct bxe_softc *sc) 12278 { 12279 struct bxe_link_report_data cur_data; 12280 12281 /* reread mf_cfg */ 12282 if (IS_PF(sc) && !CHIP_IS_E1(sc)) { 12283 bxe_read_mf_cfg(sc); 12284 } 12285 12286 /* Read the current link report info */ 12287 bxe_fill_report_data(sc, &cur_data); 12288 12289 /* Don't report link down or exactly the same link status twice */ 12290 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || 12291 (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 12292 &sc->last_reported_link.link_report_flags) && 12293 bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 12294 &cur_data.link_report_flags))) { 12295 return; 12296 } 12297 12298 sc->link_cnt++; 12299 12300 /* report new link params and remember the state for the next time */ 12301 memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); 12302 12303 if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 12304 &cur_data.link_report_flags)) { 12305 if_link_state_change(sc->ifp, LINK_STATE_DOWN); 12306 BLOGI(sc, "NIC Link is Down\n"); 12307 } else { 12308 const char *duplex; 12309 const char *flow; 12310 12311 if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX, 12312 &cur_data.link_report_flags)) { 12313 duplex = "full"; 12314 } else { 12315 duplex = "half"; 12316 } 12317 12318 /* 12319 * Handle the FC at the end so that only these flags would be 12320 * possibly set. This way we may easily check if there is no FC 12321 * enabled. 12322 */ 12323 if (cur_data.link_report_flags) { 12324 if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 12325 &cur_data.link_report_flags) && 12326 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 12327 &cur_data.link_report_flags)) { 12328 flow = "ON - receive & transmit"; 12329 } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 12330 &cur_data.link_report_flags) && 12331 !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 12332 &cur_data.link_report_flags)) { 12333 flow = "ON - receive"; 12334 } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 12335 &cur_data.link_report_flags) && 12336 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 12337 &cur_data.link_report_flags)) { 12338 flow = "ON - transmit"; 12339 } else { 12340 flow = "none"; /* possible? */ 12341 } 12342 } else { 12343 flow = "none"; 12344 } 12345 12346 if_link_state_change(sc->ifp, LINK_STATE_UP); 12347 BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", 12348 cur_data.line_speed, duplex, flow); 12349 } 12350 } 12351 12352 static void 12353 bxe_link_report(struct bxe_softc *sc) 12354 { 12355 bxe_acquire_phy_lock(sc); 12356 bxe_link_report_locked(sc); 12357 bxe_release_phy_lock(sc); 12358 } 12359 12360 static void 12361 bxe_link_status_update(struct bxe_softc *sc) 12362 { 12363 if (sc->state != BXE_STATE_OPEN) { 12364 return; 12365 } 12366 12367 #if 0 12368 /* read updated dcb configuration */ 12369 if (IS_PF(sc)) 12370 bxe_dcbx_pmf_update(sc); 12371 #endif 12372 12373 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { 12374 elink_link_status_update(&sc->link_params, &sc->link_vars); 12375 } else { 12376 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | 12377 ELINK_SUPPORTED_10baseT_Full | 12378 ELINK_SUPPORTED_100baseT_Half | 12379 ELINK_SUPPORTED_100baseT_Full | 12380 ELINK_SUPPORTED_1000baseT_Full | 12381 ELINK_SUPPORTED_2500baseX_Full | 12382 ELINK_SUPPORTED_10000baseT_Full | 12383 ELINK_SUPPORTED_TP | 12384 ELINK_SUPPORTED_FIBRE | 12385 ELINK_SUPPORTED_Autoneg | 12386 ELINK_SUPPORTED_Pause | 12387 ELINK_SUPPORTED_Asym_Pause); 12388 sc->port.advertising[0] = sc->port.supported[0]; 12389 12390 sc->link_params.sc = sc; 12391 sc->link_params.port = SC_PORT(sc); 12392 sc->link_params.req_duplex[0] = DUPLEX_FULL; 12393 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; 12394 sc->link_params.req_line_speed[0] = SPEED_10000; 12395 sc->link_params.speed_cap_mask[0] = 0x7f0000; 12396 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; 12397 12398 if (CHIP_REV_IS_FPGA(sc)) { 12399 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; 12400 sc->link_vars.line_speed = ELINK_SPEED_1000; 12401 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 12402 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); 12403 } else { 12404 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; 12405 sc->link_vars.line_speed = ELINK_SPEED_10000; 12406 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 12407 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); 12408 } 12409 12410 sc->link_vars.link_up = 1; 12411 12412 sc->link_vars.duplex = DUPLEX_FULL; 12413 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; 12414 12415 if (IS_PF(sc)) { 12416 REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0); 12417 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12418 bxe_link_report(sc); 12419 } 12420 } 12421 12422 if (IS_PF(sc)) { 12423 if (sc->link_vars.link_up) { 12424 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12425 } else { 12426 bxe_stats_handle(sc, STATS_EVENT_STOP); 12427 } 12428 bxe_link_report(sc); 12429 } else { 12430 bxe_link_report(sc); 12431 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12432 } 12433 } 12434 12435 static int 12436 bxe_initial_phy_init(struct bxe_softc *sc, 12437 int load_mode) 12438 { 12439 int rc, cfg_idx = bxe_get_link_cfg_idx(sc); 12440 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; 12441 struct elink_params *lp = &sc->link_params; 12442 12443 bxe_set_requested_fc(sc); 12444 12445 if (CHIP_REV_IS_SLOW(sc)) { 12446 uint32_t bond = CHIP_BOND_ID(sc); 12447 uint32_t feat = 0; 12448 12449 if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) { 12450 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 12451 } else if (bond & 0x4) { 12452 if (CHIP_IS_E3(sc)) { 12453 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC; 12454 } else { 12455 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 12456 } 12457 } else if (bond & 0x8) { 12458 if (CHIP_IS_E3(sc)) { 12459 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC; 12460 } else { 12461 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 12462 } 12463 } 12464 12465 /* disable EMAC for E3 and above */ 12466 if (bond & 0x2) { 12467 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 12468 } 12469 12470 sc->link_params.feature_config_flags |= feat; 12471 } 12472 12473 bxe_acquire_phy_lock(sc); 12474 12475 if (load_mode == LOAD_DIAG) { 12476 lp->loopback_mode = ELINK_LOOPBACK_XGXS; 12477 /* Prefer doing PHY loopback at 10G speed, if possible */ 12478 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { 12479 if (lp->speed_cap_mask[cfg_idx] & 12480 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 12481 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; 12482 } else { 12483 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; 12484 } 12485 } 12486 } 12487 12488 if (load_mode == LOAD_LOOPBACK_EXT) { 12489 lp->loopback_mode = ELINK_LOOPBACK_EXT; 12490 } 12491 12492 rc = elink_phy_init(&sc->link_params, &sc->link_vars); 12493 12494 bxe_release_phy_lock(sc); 12495 12496 bxe_calc_fc_adv(sc); 12497 12498 if (sc->link_vars.link_up) { 12499 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12500 bxe_link_report(sc); 12501 } 12502 12503 if (!CHIP_REV_IS_SLOW(sc)) { 12504 bxe_periodic_start(sc); 12505 } 12506 12507 sc->link_params.req_line_speed[cfg_idx] = req_line_speed; 12508 return (rc); 12509 } 12510 12511 /* must be called under IF_ADDR_LOCK */ 12512 12513 static int 12514 bxe_set_mc_list(struct bxe_softc *sc) 12515 { 12516 struct ecore_mcast_ramrod_params rparam = { NULL }; 12517 int rc = 0; 12518 int mc_count = 0; 12519 int mcnt, i; 12520 struct ecore_mcast_list_elem *mc_mac, *mc_mac_start; 12521 unsigned char *mta; 12522 if_t ifp = sc->ifp; 12523 12524 mc_count = if_multiaddr_count(ifp, -1);/* XXX they don't have a limit */ 12525 if (!mc_count) 12526 return (0); 12527 12528 mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN * 12529 mc_count, M_DEVBUF, M_NOWAIT); 12530 12531 if(mta == NULL) { 12532 BLOGE(sc, "Failed to allocate temp mcast list\n"); 12533 return (-1); 12534 } 12535 bzero(mta, (sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count)); 12536 12537 mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, (M_NOWAIT | M_ZERO)); 12538 mc_mac_start = mc_mac; 12539 12540 if (!mc_mac) { 12541 free(mta, M_DEVBUF); 12542 BLOGE(sc, "Failed to allocate temp mcast list\n"); 12543 return (-1); 12544 } 12545 bzero(mc_mac, (sizeof(*mc_mac) * mc_count)); 12546 12547 /* mta and mcnt not expected to be different */ 12548 if_multiaddr_array(ifp, mta, &mcnt, mc_count); 12549 12550 12551 rparam.mcast_obj = &sc->mcast_obj; 12552 ECORE_LIST_INIT(&rparam.mcast_list); 12553 12554 for(i=0; i< mcnt; i++) { 12555 12556 mc_mac->mac = (uint8_t *)(mta + (i * ETHER_ADDR_LEN)); 12557 ECORE_LIST_PUSH_TAIL(&mc_mac->link, &rparam.mcast_list); 12558 12559 BLOGD(sc, DBG_LOAD, 12560 "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n", 12561 mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2], 12562 mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]); 12563 12564 mc_mac++; 12565 } 12566 rparam.mcast_list_len = mc_count; 12567 12568 BXE_MCAST_LOCK(sc); 12569 12570 /* first, clear all configured multicast MACs */ 12571 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 12572 if (rc < 0) { 12573 BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc); 12574 BXE_MCAST_UNLOCK(sc); 12575 free(mc_mac_start, M_DEVBUF); 12576 free(mta, M_DEVBUF); 12577 return (rc); 12578 } 12579 12580 /* Now add the new MACs */ 12581 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD); 12582 if (rc < 0) { 12583 BLOGE(sc, "Failed to set new mcast config (%d)\n", rc); 12584 } 12585 12586 BXE_MCAST_UNLOCK(sc); 12587 12588 free(mc_mac_start, M_DEVBUF); 12589 free(mta, M_DEVBUF); 12590 12591 return (rc); 12592 } 12593 12594 static int 12595 bxe_set_uc_list(struct bxe_softc *sc) 12596 { 12597 if_t ifp = sc->ifp; 12598 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 12599 struct ifaddr *ifa; 12600 unsigned long ramrod_flags = 0; 12601 int rc; 12602 12603 #if __FreeBSD_version < 800000 12604 IF_ADDR_LOCK(ifp); 12605 #else 12606 if_addr_rlock(ifp); 12607 #endif 12608 12609 /* first schedule a cleanup up of old configuration */ 12610 rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE); 12611 if (rc < 0) { 12612 BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc); 12613 #if __FreeBSD_version < 800000 12614 IF_ADDR_UNLOCK(ifp); 12615 #else 12616 if_addr_runlock(ifp); 12617 #endif 12618 return (rc); 12619 } 12620 12621 ifa = if_getifaddr(ifp); /* XXX Is this structure */ 12622 while (ifa) { 12623 if (ifa->ifa_addr->sa_family != AF_LINK) { 12624 ifa = TAILQ_NEXT(ifa, ifa_link); 12625 continue; 12626 } 12627 12628 rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 12629 mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags); 12630 if (rc == -EEXIST) { 12631 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); 12632 /* do not treat adding same MAC as an error */ 12633 rc = 0; 12634 } else if (rc < 0) { 12635 BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc); 12636 #if __FreeBSD_version < 800000 12637 IF_ADDR_UNLOCK(ifp); 12638 #else 12639 if_addr_runlock(ifp); 12640 #endif 12641 return (rc); 12642 } 12643 12644 ifa = TAILQ_NEXT(ifa, ifa_link); 12645 } 12646 12647 #if __FreeBSD_version < 800000 12648 IF_ADDR_UNLOCK(ifp); 12649 #else 12650 if_addr_runlock(ifp); 12651 #endif 12652 12653 /* Execute the pending commands */ 12654 bit_set(&ramrod_flags, RAMROD_CONT); 12655 return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */, 12656 ECORE_UC_LIST_MAC, &ramrod_flags)); 12657 } 12658 12659 static void 12660 bxe_set_rx_mode(struct bxe_softc *sc) 12661 { 12662 if_t ifp = sc->ifp; 12663 uint32_t rx_mode = BXE_RX_MODE_NORMAL; 12664 12665 if (sc->state != BXE_STATE_OPEN) { 12666 BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state); 12667 return; 12668 } 12669 12670 BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp)); 12671 12672 if (if_getflags(ifp) & IFF_PROMISC) { 12673 rx_mode = BXE_RX_MODE_PROMISC; 12674 } else if ((if_getflags(ifp) & IFF_ALLMULTI) || 12675 ((if_getamcount(ifp) > BXE_MAX_MULTICAST) && 12676 CHIP_IS_E1(sc))) { 12677 rx_mode = BXE_RX_MODE_ALLMULTI; 12678 } else { 12679 if (IS_PF(sc)) { 12680 /* some multicasts */ 12681 if (bxe_set_mc_list(sc) < 0) { 12682 rx_mode = BXE_RX_MODE_ALLMULTI; 12683 } 12684 if (bxe_set_uc_list(sc) < 0) { 12685 rx_mode = BXE_RX_MODE_PROMISC; 12686 } 12687 } 12688 #if 0 12689 else { 12690 /* 12691 * Configuring mcast to a VF involves sleeping (when we 12692 * wait for the PF's response). Since this function is 12693 * called from a non sleepable context we must schedule 12694 * a work item for this purpose 12695 */ 12696 bxe_set_bit(BXE_SP_RTNL_VFPF_MCAST, &sc->sp_rtnl_state); 12697 schedule_delayed_work(&sc->sp_rtnl_task, 0); 12698 } 12699 #endif 12700 } 12701 12702 sc->rx_mode = rx_mode; 12703 12704 /* schedule the rx_mode command */ 12705 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 12706 BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n"); 12707 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 12708 return; 12709 } 12710 12711 if (IS_PF(sc)) { 12712 bxe_set_storm_rx_mode(sc); 12713 } 12714 #if 0 12715 else { 12716 /* 12717 * Configuring mcast to a VF involves sleeping (when we 12718 * wait for the PF's response). Since this function is 12719 * called from a non sleepable context we must schedule 12720 * a work item for this purpose 12721 */ 12722 bxe_set_bit(BXE_SP_RTNL_VFPF_STORM_RX_MODE, &sc->sp_rtnl_state); 12723 schedule_delayed_work(&sc->sp_rtnl_task, 0); 12724 } 12725 #endif 12726 12727 } 12728 12729 12730 /* update flags in shmem */ 12731 static void 12732 bxe_update_drv_flags(struct bxe_softc *sc, 12733 uint32_t flags, 12734 uint32_t set) 12735 { 12736 uint32_t drv_flags; 12737 12738 if (SHMEM2_HAS(sc, drv_flags)) { 12739 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 12740 drv_flags = SHMEM2_RD(sc, drv_flags); 12741 12742 if (set) { 12743 SET_FLAGS(drv_flags, flags); 12744 } else { 12745 RESET_FLAGS(drv_flags, flags); 12746 } 12747 12748 SHMEM2_WR(sc, drv_flags, drv_flags); 12749 BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags); 12750 12751 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 12752 } 12753 } 12754 12755 /* periodic timer callout routine, only runs when the interface is up */ 12756 12757 static void 12758 bxe_periodic_callout_func(void *xsc) 12759 { 12760 struct bxe_softc *sc = (struct bxe_softc *)xsc; 12761 int i; 12762 12763 if (!BXE_CORE_TRYLOCK(sc)) { 12764 /* just bail and try again next time */ 12765 12766 if ((sc->state == BXE_STATE_OPEN) && 12767 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { 12768 /* schedule the next periodic callout */ 12769 callout_reset(&sc->periodic_callout, hz, 12770 bxe_periodic_callout_func, sc); 12771 } 12772 12773 return; 12774 } 12775 12776 if ((sc->state != BXE_STATE_OPEN) || 12777 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { 12778 BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state); 12779 BXE_CORE_UNLOCK(sc); 12780 return; 12781 } 12782 12783 /* Check for TX timeouts on any fastpath. */ 12784 FOR_EACH_QUEUE(sc, i) { 12785 if (bxe_watchdog(sc, &sc->fp[i]) != 0) { 12786 /* Ruh-Roh, chip was reset! */ 12787 break; 12788 } 12789 } 12790 12791 if (!CHIP_REV_IS_SLOW(sc)) { 12792 /* 12793 * This barrier is needed to ensure the ordering between the writing 12794 * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and 12795 * the reading here. 12796 */ 12797 mb(); 12798 if (sc->port.pmf) { 12799 bxe_acquire_phy_lock(sc); 12800 elink_period_func(&sc->link_params, &sc->link_vars); 12801 bxe_release_phy_lock(sc); 12802 } 12803 } 12804 12805 if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) { 12806 int mb_idx = SC_FW_MB_IDX(sc); 12807 uint32_t drv_pulse; 12808 uint32_t mcp_pulse; 12809 12810 ++sc->fw_drv_pulse_wr_seq; 12811 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 12812 12813 drv_pulse = sc->fw_drv_pulse_wr_seq; 12814 bxe_drv_pulse(sc); 12815 12816 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & 12817 MCP_PULSE_SEQ_MASK); 12818 12819 /* 12820 * The delta between driver pulse and mcp response should 12821 * be 1 (before mcp response) or 0 (after mcp response). 12822 */ 12823 if ((drv_pulse != mcp_pulse) && 12824 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 12825 /* someone lost a heartbeat... */ 12826 BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n", 12827 drv_pulse, mcp_pulse); 12828 } 12829 } 12830 12831 /* state is BXE_STATE_OPEN */ 12832 bxe_stats_handle(sc, STATS_EVENT_UPDATE); 12833 12834 #if 0 12835 /* sample VF bulletin board for new posts from PF */ 12836 if (IS_VF(sc)) { 12837 bxe_sample_bulletin(sc); 12838 } 12839 #endif 12840 12841 BXE_CORE_UNLOCK(sc); 12842 12843 if ((sc->state == BXE_STATE_OPEN) && 12844 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { 12845 /* schedule the next periodic callout */ 12846 callout_reset(&sc->periodic_callout, hz, 12847 bxe_periodic_callout_func, sc); 12848 } 12849 } 12850 12851 static void 12852 bxe_periodic_start(struct bxe_softc *sc) 12853 { 12854 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); 12855 callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); 12856 } 12857 12858 static void 12859 bxe_periodic_stop(struct bxe_softc *sc) 12860 { 12861 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); 12862 callout_drain(&sc->periodic_callout); 12863 } 12864 12865 /* start the controller */ 12866 static __noinline int 12867 bxe_nic_load(struct bxe_softc *sc, 12868 int load_mode) 12869 { 12870 uint32_t val; 12871 int load_code = 0; 12872 int i, rc = 0; 12873 12874 BXE_CORE_LOCK_ASSERT(sc); 12875 12876 BLOGD(sc, DBG_LOAD, "Starting NIC load...\n"); 12877 12878 sc->state = BXE_STATE_OPENING_WAITING_LOAD; 12879 12880 if (IS_PF(sc)) { 12881 /* must be called before memory allocation and HW init */ 12882 bxe_ilt_set_info(sc); 12883 } 12884 12885 sc->last_reported_link_state = LINK_STATE_UNKNOWN; 12886 12887 bxe_set_fp_rx_buf_size(sc); 12888 12889 if (bxe_alloc_fp_buffers(sc) != 0) { 12890 BLOGE(sc, "Failed to allocate fastpath memory\n"); 12891 sc->state = BXE_STATE_CLOSED; 12892 rc = ENOMEM; 12893 goto bxe_nic_load_error0; 12894 } 12895 12896 if (bxe_alloc_mem(sc) != 0) { 12897 sc->state = BXE_STATE_CLOSED; 12898 rc = ENOMEM; 12899 goto bxe_nic_load_error0; 12900 } 12901 12902 if (bxe_alloc_fw_stats_mem(sc) != 0) { 12903 sc->state = BXE_STATE_CLOSED; 12904 rc = ENOMEM; 12905 goto bxe_nic_load_error0; 12906 } 12907 12908 if (IS_PF(sc)) { 12909 /* set pf load just before approaching the MCP */ 12910 bxe_set_pf_load(sc); 12911 12912 /* if MCP exists send load request and analyze response */ 12913 if (!BXE_NOMCP(sc)) { 12914 /* attempt to load pf */ 12915 if (bxe_nic_load_request(sc, &load_code) != 0) { 12916 sc->state = BXE_STATE_CLOSED; 12917 rc = ENXIO; 12918 goto bxe_nic_load_error1; 12919 } 12920 12921 /* what did the MCP say? */ 12922 if (bxe_nic_load_analyze_req(sc, load_code) != 0) { 12923 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12924 sc->state = BXE_STATE_CLOSED; 12925 rc = ENXIO; 12926 goto bxe_nic_load_error2; 12927 } 12928 } else { 12929 BLOGI(sc, "Device has no MCP!\n"); 12930 load_code = bxe_nic_load_no_mcp(sc); 12931 } 12932 12933 /* mark PMF if applicable */ 12934 bxe_nic_load_pmf(sc, load_code); 12935 12936 /* Init Function state controlling object */ 12937 bxe_init_func_obj(sc); 12938 12939 /* Initialize HW */ 12940 if (bxe_init_hw(sc, load_code) != 0) { 12941 BLOGE(sc, "HW init failed\n"); 12942 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12943 sc->state = BXE_STATE_CLOSED; 12944 rc = ENXIO; 12945 goto bxe_nic_load_error2; 12946 } 12947 } 12948 12949 /* set ALWAYS_ALIVE bit in shmem */ 12950 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 12951 bxe_drv_pulse(sc); 12952 sc->flags |= BXE_NO_PULSE; 12953 12954 /* attach interrupts */ 12955 if (bxe_interrupt_attach(sc) != 0) { 12956 sc->state = BXE_STATE_CLOSED; 12957 rc = ENXIO; 12958 goto bxe_nic_load_error2; 12959 } 12960 12961 bxe_nic_init(sc, load_code); 12962 12963 /* Init per-function objects */ 12964 if (IS_PF(sc)) { 12965 bxe_init_objs(sc); 12966 // XXX bxe_iov_nic_init(sc); 12967 12968 /* set AFEX default VLAN tag to an invalid value */ 12969 sc->devinfo.mf_info.afex_def_vlan_tag = -1; 12970 // XXX bxe_nic_load_afex_dcc(sc, load_code); 12971 12972 sc->state = BXE_STATE_OPENING_WAITING_PORT; 12973 rc = bxe_func_start(sc); 12974 if (rc) { 12975 BLOGE(sc, "Function start failed!\n"); 12976 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12977 sc->state = BXE_STATE_ERROR; 12978 goto bxe_nic_load_error3; 12979 } 12980 12981 /* send LOAD_DONE command to MCP */ 12982 if (!BXE_NOMCP(sc)) { 12983 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12984 if (!load_code) { 12985 BLOGE(sc, "MCP response failure, aborting\n"); 12986 sc->state = BXE_STATE_ERROR; 12987 rc = ENXIO; 12988 goto bxe_nic_load_error3; 12989 } 12990 } 12991 12992 rc = bxe_setup_leading(sc); 12993 if (rc) { 12994 BLOGE(sc, "Setup leading failed!\n"); 12995 sc->state = BXE_STATE_ERROR; 12996 goto bxe_nic_load_error3; 12997 } 12998 12999 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { 13000 rc = bxe_setup_queue(sc, &sc->fp[i], FALSE); 13001 if (rc) { 13002 BLOGE(sc, "Queue(%d) setup failed\n", i); 13003 sc->state = BXE_STATE_ERROR; 13004 goto bxe_nic_load_error3; 13005 } 13006 } 13007 13008 rc = bxe_init_rss_pf(sc); 13009 if (rc) { 13010 BLOGE(sc, "PF RSS init failed\n"); 13011 sc->state = BXE_STATE_ERROR; 13012 goto bxe_nic_load_error3; 13013 } 13014 } 13015 /* XXX VF */ 13016 #if 0 13017 else { /* VF */ 13018 FOR_EACH_ETH_QUEUE(sc, i) { 13019 rc = bxe_vfpf_setup_q(sc, i); 13020 if (rc) { 13021 BLOGE(sc, "Queue(%d) setup failed\n", i); 13022 sc->state = BXE_STATE_ERROR; 13023 goto bxe_nic_load_error3; 13024 } 13025 } 13026 } 13027 #endif 13028 13029 /* now when Clients are configured we are ready to work */ 13030 sc->state = BXE_STATE_OPEN; 13031 13032 /* Configure a ucast MAC */ 13033 if (IS_PF(sc)) { 13034 rc = bxe_set_eth_mac(sc, TRUE); 13035 } 13036 #if 0 13037 else { /* IS_VF(sc) */ 13038 rc = bxe_vfpf_set_mac(sc); 13039 } 13040 #endif 13041 if (rc) { 13042 BLOGE(sc, "Setting Ethernet MAC failed\n"); 13043 sc->state = BXE_STATE_ERROR; 13044 goto bxe_nic_load_error3; 13045 } 13046 13047 #if 0 13048 if (IS_PF(sc) && sc->pending_max) { 13049 /* for AFEX */ 13050 bxe_update_max_mf_config(sc, sc->pending_max); 13051 sc->pending_max = 0; 13052 } 13053 #endif 13054 13055 if (sc->port.pmf) { 13056 rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN); 13057 if (rc) { 13058 sc->state = BXE_STATE_ERROR; 13059 goto bxe_nic_load_error3; 13060 } 13061 } 13062 13063 sc->link_params.feature_config_flags &= 13064 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; 13065 13066 /* start fast path */ 13067 13068 /* Initialize Rx filter */ 13069 bxe_set_rx_mode(sc); 13070 13071 /* start the Tx */ 13072 switch (/* XXX load_mode */LOAD_OPEN) { 13073 case LOAD_NORMAL: 13074 case LOAD_OPEN: 13075 break; 13076 13077 case LOAD_DIAG: 13078 case LOAD_LOOPBACK_EXT: 13079 sc->state = BXE_STATE_DIAG; 13080 break; 13081 13082 default: 13083 break; 13084 } 13085 13086 if (sc->port.pmf) { 13087 bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); 13088 } else { 13089 bxe_link_status_update(sc); 13090 } 13091 13092 /* start the periodic timer callout */ 13093 bxe_periodic_start(sc); 13094 13095 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 13096 /* mark driver is loaded in shmem2 */ 13097 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 13098 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 13099 (val | 13100 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | 13101 DRV_FLAGS_CAPABILITIES_LOADED_L2)); 13102 } 13103 13104 /* wait for all pending SP commands to complete */ 13105 if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) { 13106 BLOGE(sc, "Timeout waiting for all SPs to complete!\n"); 13107 bxe_periodic_stop(sc); 13108 bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE); 13109 return (ENXIO); 13110 } 13111 13112 #if 0 13113 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ 13114 if (sc->port.pmf && (sc->state != BXE_STATE_DIAG)) { 13115 bxe_dcbx_init(sc, FALSE); 13116 } 13117 #endif 13118 13119 /* Tell the stack the driver is running! */ 13120 if_setdrvflags(sc->ifp, IFF_DRV_RUNNING); 13121 13122 BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n"); 13123 13124 return (0); 13125 13126 bxe_nic_load_error3: 13127 13128 if (IS_PF(sc)) { 13129 bxe_int_disable_sync(sc, 1); 13130 13131 /* clean out queued objects */ 13132 bxe_squeeze_objects(sc); 13133 } 13134 13135 bxe_interrupt_detach(sc); 13136 13137 bxe_nic_load_error2: 13138 13139 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 13140 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 13141 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 13142 } 13143 13144 sc->port.pmf = 0; 13145 13146 bxe_nic_load_error1: 13147 13148 /* clear pf_load status, as it was already set */ 13149 if (IS_PF(sc)) { 13150 bxe_clear_pf_load(sc); 13151 } 13152 13153 bxe_nic_load_error0: 13154 13155 bxe_free_fw_stats_mem(sc); 13156 bxe_free_fp_buffers(sc); 13157 bxe_free_mem(sc); 13158 13159 return (rc); 13160 } 13161 13162 static int 13163 bxe_init_locked(struct bxe_softc *sc) 13164 { 13165 int other_engine = SC_PATH(sc) ? 0 : 1; 13166 uint8_t other_load_status, load_status; 13167 uint8_t global = FALSE; 13168 int rc; 13169 13170 BXE_CORE_LOCK_ASSERT(sc); 13171 13172 /* check if the driver is already running */ 13173 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { 13174 BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n"); 13175 return (0); 13176 } 13177 13178 bxe_set_power_state(sc, PCI_PM_D0); 13179 13180 /* 13181 * If parity occurred during the unload, then attentions and/or 13182 * RECOVERY_IN_PROGRES may still be set. If so we want the first function 13183 * loaded on the current engine to complete the recovery. Parity recovery 13184 * is only relevant for PF driver. 13185 */ 13186 if (IS_PF(sc)) { 13187 other_load_status = bxe_get_load_status(sc, other_engine); 13188 load_status = bxe_get_load_status(sc, SC_PATH(sc)); 13189 13190 if (!bxe_reset_is_done(sc, SC_PATH(sc)) || 13191 bxe_chk_parity_attn(sc, &global, TRUE)) { 13192 do { 13193 /* 13194 * If there are attentions and they are in global blocks, set 13195 * the GLOBAL_RESET bit regardless whether it will be this 13196 * function that will complete the recovery or not. 13197 */ 13198 if (global) { 13199 bxe_set_reset_global(sc); 13200 } 13201 13202 /* 13203 * Only the first function on the current engine should try 13204 * to recover in open. In case of attentions in global blocks 13205 * only the first in the chip should try to recover. 13206 */ 13207 if ((!load_status && (!global || !other_load_status)) && 13208 bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) { 13209 BLOGI(sc, "Recovered during init\n"); 13210 break; 13211 } 13212 13213 /* recovery has failed... */ 13214 bxe_set_power_state(sc, PCI_PM_D3hot); 13215 sc->recovery_state = BXE_RECOVERY_FAILED; 13216 13217 BLOGE(sc, "Recovery flow hasn't properly " 13218 "completed yet, try again later. " 13219 "If you still see this message after a " 13220 "few retries then power cycle is required.\n"); 13221 13222 rc = ENXIO; 13223 goto bxe_init_locked_done; 13224 } while (0); 13225 } 13226 } 13227 13228 sc->recovery_state = BXE_RECOVERY_DONE; 13229 13230 rc = bxe_nic_load(sc, LOAD_OPEN); 13231 13232 bxe_init_locked_done: 13233 13234 if (rc) { 13235 /* Tell the stack the driver is NOT running! */ 13236 BLOGE(sc, "Initialization failed, " 13237 "stack notified driver is NOT running!\n"); 13238 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); 13239 } 13240 13241 return (rc); 13242 } 13243 13244 static int 13245 bxe_stop_locked(struct bxe_softc *sc) 13246 { 13247 BXE_CORE_LOCK_ASSERT(sc); 13248 return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE)); 13249 } 13250 13251 /* 13252 * Handles controller initialization when called from an unlocked routine. 13253 * ifconfig calls this function. 13254 * 13255 * Returns: 13256 * void 13257 */ 13258 static void 13259 bxe_init(void *xsc) 13260 { 13261 struct bxe_softc *sc = (struct bxe_softc *)xsc; 13262 13263 BXE_CORE_LOCK(sc); 13264 bxe_init_locked(sc); 13265 BXE_CORE_UNLOCK(sc); 13266 } 13267 13268 static int 13269 bxe_init_ifnet(struct bxe_softc *sc) 13270 { 13271 if_t ifp; 13272 int capabilities; 13273 13274 /* ifconfig entrypoint for media type/status reporting */ 13275 ifmedia_init(&sc->ifmedia, IFM_IMASK, 13276 bxe_ifmedia_update, 13277 bxe_ifmedia_status); 13278 13279 /* set the default interface values */ 13280 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL); 13281 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); 13282 ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO)); 13283 13284 sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */ 13285 13286 /* allocate the ifnet structure */ 13287 if ((ifp = if_gethandle(IFT_ETHER)) == NULL) { 13288 BLOGE(sc, "Interface allocation failed!\n"); 13289 return (ENXIO); 13290 } 13291 13292 if_setsoftc(ifp, sc); 13293 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); 13294 if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST)); 13295 if_setioctlfn(ifp, bxe_ioctl); 13296 if_setstartfn(ifp, bxe_tx_start); 13297 if_setgetcounterfn(ifp, bxe_get_counter); 13298 #if __FreeBSD_version >= 800000 13299 if_settransmitfn(ifp, bxe_tx_mq_start); 13300 if_setqflushfn(ifp, bxe_mq_flush); 13301 #endif 13302 #ifdef FreeBSD8_0 13303 if_settimer(ifp, 0); 13304 #endif 13305 if_setinitfn(ifp, bxe_init); 13306 if_setmtu(ifp, sc->mtu); 13307 if_sethwassist(ifp, (CSUM_IP | 13308 CSUM_TCP | 13309 CSUM_UDP | 13310 CSUM_TSO | 13311 CSUM_TCP_IPV6 | 13312 CSUM_UDP_IPV6)); 13313 13314 capabilities = 13315 #if __FreeBSD_version < 700000 13316 (IFCAP_VLAN_MTU | 13317 IFCAP_VLAN_HWTAGGING | 13318 IFCAP_HWCSUM | 13319 IFCAP_JUMBO_MTU | 13320 IFCAP_LRO); 13321 #else 13322 (IFCAP_VLAN_MTU | 13323 IFCAP_VLAN_HWTAGGING | 13324 IFCAP_VLAN_HWTSO | 13325 IFCAP_VLAN_HWFILTER | 13326 IFCAP_VLAN_HWCSUM | 13327 IFCAP_HWCSUM | 13328 IFCAP_JUMBO_MTU | 13329 IFCAP_LRO | 13330 IFCAP_TSO4 | 13331 IFCAP_TSO6 | 13332 IFCAP_WOL_MAGIC); 13333 #endif 13334 if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */ 13335 if_setbaudrate(ifp, IF_Gbps(10)); 13336 /* XXX */ 13337 if_setsendqlen(ifp, sc->tx_ring_size); 13338 if_setsendqready(ifp); 13339 /* XXX */ 13340 13341 sc->ifp = ifp; 13342 13343 /* attach to the Ethernet interface list */ 13344 ether_ifattach(ifp, sc->link_params.mac_addr); 13345 13346 return (0); 13347 } 13348 13349 static void 13350 bxe_deallocate_bars(struct bxe_softc *sc) 13351 { 13352 int i; 13353 13354 for (i = 0; i < MAX_BARS; i++) { 13355 if (sc->bar[i].resource != NULL) { 13356 bus_release_resource(sc->dev, 13357 SYS_RES_MEMORY, 13358 sc->bar[i].rid, 13359 sc->bar[i].resource); 13360 BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n", 13361 i, PCIR_BAR(i)); 13362 } 13363 } 13364 } 13365 13366 static int 13367 bxe_allocate_bars(struct bxe_softc *sc) 13368 { 13369 u_int flags; 13370 int i; 13371 13372 memset(sc->bar, 0, sizeof(sc->bar)); 13373 13374 for (i = 0; i < MAX_BARS; i++) { 13375 13376 /* memory resources reside at BARs 0, 2, 4 */ 13377 /* Run `pciconf -lb` to see mappings */ 13378 if ((i != 0) && (i != 2) && (i != 4)) { 13379 continue; 13380 } 13381 13382 sc->bar[i].rid = PCIR_BAR(i); 13383 13384 flags = RF_ACTIVE; 13385 if (i == 0) { 13386 flags |= RF_SHAREABLE; 13387 } 13388 13389 if ((sc->bar[i].resource = 13390 bus_alloc_resource_any(sc->dev, 13391 SYS_RES_MEMORY, 13392 &sc->bar[i].rid, 13393 flags)) == NULL) { 13394 #if 0 13395 /* BAR4 doesn't exist for E1 */ 13396 BLOGE(sc, "PCI BAR%d [%02x] memory allocation failed\n", 13397 i, PCIR_BAR(i)); 13398 #endif 13399 return (0); 13400 } 13401 13402 sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource); 13403 sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource); 13404 sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource); 13405 13406 BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%ld) -> %p\n", 13407 i, PCIR_BAR(i), 13408 (void *)rman_get_start(sc->bar[i].resource), 13409 (void *)rman_get_end(sc->bar[i].resource), 13410 rman_get_size(sc->bar[i].resource), 13411 (void *)sc->bar[i].kva); 13412 } 13413 13414 return (0); 13415 } 13416 13417 static void 13418 bxe_get_function_num(struct bxe_softc *sc) 13419 { 13420 uint32_t val = 0; 13421 13422 /* 13423 * Read the ME register to get the function number. The ME register 13424 * holds the relative-function number and absolute-function number. The 13425 * absolute-function number appears only in E2 and above. Before that 13426 * these bits always contained zero, therefore we cannot blindly use them. 13427 */ 13428 13429 val = REG_RD(sc, BAR_ME_REGISTER); 13430 13431 sc->pfunc_rel = 13432 (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); 13433 sc->path_id = 13434 (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1; 13435 13436 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 13437 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); 13438 } else { 13439 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); 13440 } 13441 13442 BLOGD(sc, DBG_LOAD, 13443 "Relative function %d, Absolute function %d, Path %d\n", 13444 sc->pfunc_rel, sc->pfunc_abs, sc->path_id); 13445 } 13446 13447 static uint32_t 13448 bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc) 13449 { 13450 uint32_t shmem2_size; 13451 uint32_t offset; 13452 uint32_t mf_cfg_offset_value; 13453 13454 /* Non 57712 */ 13455 offset = (SHMEM_RD(sc, func_mb) + 13456 (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); 13457 13458 /* 57712 plus */ 13459 if (sc->devinfo.shmem2_base != 0) { 13460 shmem2_size = SHMEM2_RD(sc, size); 13461 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { 13462 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); 13463 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { 13464 offset = mf_cfg_offset_value; 13465 } 13466 } 13467 } 13468 13469 return (offset); 13470 } 13471 13472 static uint32_t 13473 bxe_pcie_capability_read(struct bxe_softc *sc, 13474 int reg, 13475 int width) 13476 { 13477 int pcie_reg; 13478 13479 /* ensure PCIe capability is enabled */ 13480 if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) { 13481 if (pcie_reg != 0) { 13482 BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg); 13483 return (pci_read_config(sc->dev, (pcie_reg + reg), width)); 13484 } 13485 } 13486 13487 BLOGE(sc, "PCIe capability NOT FOUND!!!\n"); 13488 13489 return (0); 13490 } 13491 13492 static uint8_t 13493 bxe_is_pcie_pending(struct bxe_softc *sc) 13494 { 13495 return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) & 13496 PCIM_EXP_STA_TRANSACTION_PND); 13497 } 13498 13499 /* 13500 * Walk the PCI capabiites list for the device to find what features are 13501 * supported. These capabilites may be enabled/disabled by firmware so it's 13502 * best to walk the list rather than make assumptions. 13503 */ 13504 static void 13505 bxe_probe_pci_caps(struct bxe_softc *sc) 13506 { 13507 uint16_t link_status; 13508 int reg; 13509 13510 /* check if PCI Power Management is enabled */ 13511 if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) { 13512 if (reg != 0) { 13513 BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg); 13514 13515 sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG; 13516 sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg; 13517 } 13518 } 13519 13520 link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2); 13521 13522 /* handle PCIe 2.0 workarounds for 57710 */ 13523 if (CHIP_IS_E1(sc)) { 13524 /* workaround for 57710 errata E4_57710_27462 */ 13525 sc->devinfo.pcie_link_speed = 13526 (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1; 13527 13528 /* workaround for 57710 errata E4_57710_27488 */ 13529 sc->devinfo.pcie_link_width = 13530 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 13531 if (sc->devinfo.pcie_link_speed > 1) { 13532 sc->devinfo.pcie_link_width = 13533 ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1; 13534 } 13535 } else { 13536 sc->devinfo.pcie_link_speed = 13537 (link_status & PCIM_LINK_STA_SPEED); 13538 sc->devinfo.pcie_link_width = 13539 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 13540 } 13541 13542 BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n", 13543 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); 13544 13545 sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG; 13546 sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg; 13547 13548 /* check if MSI capability is enabled */ 13549 if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) { 13550 if (reg != 0) { 13551 BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg); 13552 13553 sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG; 13554 sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg; 13555 } 13556 } 13557 13558 /* check if MSI-X capability is enabled */ 13559 if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) { 13560 if (reg != 0) { 13561 BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg); 13562 13563 sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG; 13564 sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg; 13565 } 13566 } 13567 } 13568 13569 static int 13570 bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc) 13571 { 13572 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13573 uint32_t val; 13574 13575 /* get the outer vlan if we're in switch-dependent mode */ 13576 13577 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13578 mf_info->ext_id = (uint16_t)val; 13579 13580 mf_info->multi_vnics_mode = 1; 13581 13582 if (!VALID_OVLAN(mf_info->ext_id)) { 13583 BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id); 13584 return (1); 13585 } 13586 13587 /* get the capabilities */ 13588 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 13589 FUNC_MF_CFG_PROTOCOL_ISCSI) { 13590 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; 13591 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 13592 FUNC_MF_CFG_PROTOCOL_FCOE) { 13593 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; 13594 } else { 13595 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; 13596 } 13597 13598 mf_info->vnics_per_port = 13599 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13600 13601 return (0); 13602 } 13603 13604 static uint32_t 13605 bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc) 13606 { 13607 uint32_t retval = 0; 13608 uint32_t val; 13609 13610 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 13611 13612 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { 13613 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { 13614 retval |= MF_PROTO_SUPPORT_ETHERNET; 13615 } 13616 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 13617 retval |= MF_PROTO_SUPPORT_ISCSI; 13618 } 13619 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 13620 retval |= MF_PROTO_SUPPORT_FCOE; 13621 } 13622 } 13623 13624 return (retval); 13625 } 13626 13627 static int 13628 bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc) 13629 { 13630 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13631 uint32_t val; 13632 13633 /* 13634 * There is no outer vlan if we're in switch-independent mode. 13635 * If the mac is valid then assume multi-function. 13636 */ 13637 13638 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 13639 13640 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); 13641 13642 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); 13643 13644 mf_info->vnics_per_port = 13645 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13646 13647 return (0); 13648 } 13649 13650 static int 13651 bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc) 13652 { 13653 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13654 uint32_t e1hov_tag; 13655 uint32_t func_config; 13656 uint32_t niv_config; 13657 13658 mf_info->multi_vnics_mode = 1; 13659 13660 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13661 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 13662 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); 13663 13664 mf_info->ext_id = 13665 (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> 13666 FUNC_MF_CFG_E1HOV_TAG_SHIFT); 13667 13668 mf_info->default_vlan = 13669 (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> 13670 FUNC_MF_CFG_AFEX_VLAN_SHIFT); 13671 13672 mf_info->niv_allowed_priorities = 13673 (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 13674 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); 13675 13676 mf_info->niv_default_cos = 13677 (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 13678 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); 13679 13680 mf_info->afex_vlan_mode = 13681 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 13682 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); 13683 13684 mf_info->niv_mba_enabled = 13685 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> 13686 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); 13687 13688 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); 13689 13690 mf_info->vnics_per_port = 13691 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13692 13693 return (0); 13694 } 13695 13696 static int 13697 bxe_check_valid_mf_cfg(struct bxe_softc *sc) 13698 { 13699 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13700 uint32_t mf_cfg1; 13701 uint32_t mf_cfg2; 13702 uint32_t ovlan1; 13703 uint32_t ovlan2; 13704 uint8_t i, j; 13705 13706 BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n", 13707 SC_PORT(sc)); 13708 BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n", 13709 mf_info->mf_config[SC_VN(sc)]); 13710 BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n", 13711 mf_info->multi_vnics_mode); 13712 BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n", 13713 mf_info->vnics_per_port); 13714 BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n", 13715 mf_info->ext_id); 13716 BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n", 13717 mf_info->min_bw[0], mf_info->min_bw[1], 13718 mf_info->min_bw[2], mf_info->min_bw[3]); 13719 BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n", 13720 mf_info->max_bw[0], mf_info->max_bw[1], 13721 mf_info->max_bw[2], mf_info->max_bw[3]); 13722 BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n", 13723 sc->mac_addr_str); 13724 13725 /* various MF mode sanity checks... */ 13726 13727 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { 13728 BLOGE(sc, "Enumerated function %d is marked as hidden\n", 13729 SC_PORT(sc)); 13730 return (1); 13731 } 13732 13733 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { 13734 BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n", 13735 mf_info->vnics_per_port, mf_info->multi_vnics_mode); 13736 return (1); 13737 } 13738 13739 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 13740 /* vnic id > 0 must have valid ovlan in switch-dependent mode */ 13741 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { 13742 BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n", 13743 SC_VN(sc), OVLAN(sc)); 13744 return (1); 13745 } 13746 13747 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { 13748 BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n", 13749 mf_info->multi_vnics_mode, OVLAN(sc)); 13750 return (1); 13751 } 13752 13753 /* 13754 * Verify all functions are either MF or SF mode. If MF, make sure 13755 * sure that all non-hidden functions have a valid ovlan. If SF, 13756 * make sure that all non-hidden functions have an invalid ovlan. 13757 */ 13758 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13759 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 13760 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 13761 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 13762 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) || 13763 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) { 13764 BLOGE(sc, "mf_mode=SD function %d MF config " 13765 "mismatch, multi_vnics_mode=%d ovlan=%d\n", 13766 i, mf_info->multi_vnics_mode, ovlan1); 13767 return (1); 13768 } 13769 } 13770 13771 /* Verify all funcs on the same port each have a different ovlan. */ 13772 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13773 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 13774 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 13775 /* iterate from the next function on the port to the max func */ 13776 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { 13777 mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config); 13778 ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag); 13779 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 13780 VALID_OVLAN(ovlan1) && 13781 !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) && 13782 VALID_OVLAN(ovlan2) && 13783 (ovlan1 == ovlan2)) { 13784 BLOGE(sc, "mf_mode=SD functions %d and %d " 13785 "have the same ovlan (%d)\n", 13786 i, j, ovlan1); 13787 return (1); 13788 } 13789 } 13790 } 13791 } /* MULTI_FUNCTION_SD */ 13792 13793 return (0); 13794 } 13795 13796 static int 13797 bxe_get_mf_cfg_info(struct bxe_softc *sc) 13798 { 13799 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13800 uint32_t val, mac_upper; 13801 uint8_t i, vnic; 13802 13803 /* initialize mf_info defaults */ 13804 mf_info->vnics_per_port = 1; 13805 mf_info->multi_vnics_mode = FALSE; 13806 mf_info->path_has_ovlan = FALSE; 13807 mf_info->mf_mode = SINGLE_FUNCTION; 13808 13809 if (!CHIP_IS_MF_CAP(sc)) { 13810 return (0); 13811 } 13812 13813 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { 13814 BLOGE(sc, "Invalid mf_cfg_base!\n"); 13815 return (1); 13816 } 13817 13818 /* get the MF mode (switch dependent / independent / single-function) */ 13819 13820 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 13821 13822 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) 13823 { 13824 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 13825 13826 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13827 13828 /* check for legal upper mac bytes */ 13829 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { 13830 mf_info->mf_mode = MULTI_FUNCTION_SI; 13831 } else { 13832 BLOGE(sc, "Invalid config for Switch Independent mode\n"); 13833 } 13834 13835 break; 13836 13837 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 13838 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: 13839 13840 /* get outer vlan configuration */ 13841 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13842 13843 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != 13844 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 13845 mf_info->mf_mode = MULTI_FUNCTION_SD; 13846 } else { 13847 BLOGE(sc, "Invalid config for Switch Dependent mode\n"); 13848 } 13849 13850 break; 13851 13852 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: 13853 13854 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ 13855 return (0); 13856 13857 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 13858 13859 /* 13860 * Mark MF mode as NIV if MCP version includes NPAR-SD support 13861 * and the MAC address is valid. 13862 */ 13863 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13864 13865 if ((SHMEM2_HAS(sc, afex_driver_support)) && 13866 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { 13867 mf_info->mf_mode = MULTI_FUNCTION_AFEX; 13868 } else { 13869 BLOGE(sc, "Invalid config for AFEX mode\n"); 13870 } 13871 13872 break; 13873 13874 default: 13875 13876 BLOGE(sc, "Unknown MF mode (0x%08x)\n", 13877 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); 13878 13879 return (1); 13880 } 13881 13882 /* set path mf_mode (which could be different than function mf_mode) */ 13883 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 13884 mf_info->path_has_ovlan = TRUE; 13885 } else if (mf_info->mf_mode == SINGLE_FUNCTION) { 13886 /* 13887 * Decide on path multi vnics mode. If we're not in MF mode and in 13888 * 4-port mode, this is good enough to check vnic-0 of the other port 13889 * on the same path 13890 */ 13891 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 13892 uint8_t other_port = !(PORT_ID(sc) & 1); 13893 uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port)); 13894 13895 val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag); 13896 13897 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0; 13898 } 13899 } 13900 13901 if (mf_info->mf_mode == SINGLE_FUNCTION) { 13902 /* invalid MF config */ 13903 if (SC_VN(sc) >= 1) { 13904 BLOGE(sc, "VNIC ID >= 1 in SF mode\n"); 13905 return (1); 13906 } 13907 13908 return (0); 13909 } 13910 13911 /* get the MF configuration */ 13912 mf_info->mf_config[SC_VN(sc)] = 13913 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 13914 13915 switch(mf_info->mf_mode) 13916 { 13917 case MULTI_FUNCTION_SD: 13918 13919 bxe_get_shmem_mf_cfg_info_sd(sc); 13920 break; 13921 13922 case MULTI_FUNCTION_SI: 13923 13924 bxe_get_shmem_mf_cfg_info_si(sc); 13925 break; 13926 13927 case MULTI_FUNCTION_AFEX: 13928 13929 bxe_get_shmem_mf_cfg_info_niv(sc); 13930 break; 13931 13932 default: 13933 13934 BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n", 13935 mf_info->mf_mode); 13936 return (1); 13937 } 13938 13939 /* get the congestion management parameters */ 13940 13941 vnic = 0; 13942 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13943 /* get min/max bw */ 13944 val = MFCFG_RD(sc, func_mf_config[i].config); 13945 mf_info->min_bw[vnic] = 13946 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT); 13947 mf_info->max_bw[vnic] = 13948 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); 13949 vnic++; 13950 } 13951 13952 return (bxe_check_valid_mf_cfg(sc)); 13953 } 13954 13955 static int 13956 bxe_get_shmem_info(struct bxe_softc *sc) 13957 { 13958 int port; 13959 uint32_t mac_hi, mac_lo, val; 13960 13961 port = SC_PORT(sc); 13962 mac_hi = mac_lo = 0; 13963 13964 sc->link_params.sc = sc; 13965 sc->link_params.port = port; 13966 13967 /* get the hardware config info */ 13968 sc->devinfo.hw_config = 13969 SHMEM_RD(sc, dev_info.shared_hw_config.config); 13970 sc->devinfo.hw_config2 = 13971 SHMEM_RD(sc, dev_info.shared_hw_config.config2); 13972 13973 sc->link_params.hw_led_mode = 13974 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> 13975 SHARED_HW_CFG_LED_MODE_SHIFT); 13976 13977 /* get the port feature config */ 13978 sc->port.config = 13979 SHMEM_RD(sc, dev_info.port_feature_config[port].config), 13980 13981 /* get the link params */ 13982 sc->link_params.speed_cap_mask[0] = 13983 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask); 13984 sc->link_params.speed_cap_mask[1] = 13985 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2); 13986 13987 /* get the lane config */ 13988 sc->link_params.lane_config = 13989 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); 13990 13991 /* get the link config */ 13992 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); 13993 sc->port.link_config[ELINK_INT_PHY] = val; 13994 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); 13995 sc->port.link_config[ELINK_EXT_PHY1] = 13996 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); 13997 13998 /* get the override preemphasis flag and enable it or turn it off */ 13999 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 14000 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { 14001 sc->link_params.feature_config_flags |= 14002 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 14003 } else { 14004 sc->link_params.feature_config_flags &= 14005 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 14006 } 14007 14008 /* get the initial value of the link params */ 14009 sc->link_params.multi_phy_config = 14010 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); 14011 14012 /* get external phy info */ 14013 sc->port.ext_phy_config = 14014 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 14015 14016 /* get the multifunction configuration */ 14017 bxe_get_mf_cfg_info(sc); 14018 14019 /* get the mac address */ 14020 if (IS_MF(sc)) { 14021 mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 14022 mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); 14023 } else { 14024 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); 14025 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); 14026 } 14027 14028 if ((mac_lo == 0) && (mac_hi == 0)) { 14029 *sc->mac_addr_str = 0; 14030 BLOGE(sc, "No Ethernet address programmed!\n"); 14031 } else { 14032 sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8); 14033 sc->link_params.mac_addr[1] = (uint8_t)(mac_hi); 14034 sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24); 14035 sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16); 14036 sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8); 14037 sc->link_params.mac_addr[5] = (uint8_t)(mac_lo); 14038 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), 14039 "%02x:%02x:%02x:%02x:%02x:%02x", 14040 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1], 14041 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3], 14042 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]); 14043 BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str); 14044 } 14045 14046 #if 0 14047 if (!IS_MF(sc) && 14048 ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 14049 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE)) { 14050 sc->flags |= BXE_NO_ISCSI; 14051 } 14052 if (!IS_MF(sc) && 14053 ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 14054 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI)) { 14055 sc->flags |= BXE_NO_FCOE_FLAG; 14056 } 14057 #endif 14058 14059 return (0); 14060 } 14061 14062 static void 14063 bxe_get_tunable_params(struct bxe_softc *sc) 14064 { 14065 /* sanity checks */ 14066 14067 if ((bxe_interrupt_mode != INTR_MODE_INTX) && 14068 (bxe_interrupt_mode != INTR_MODE_MSI) && 14069 (bxe_interrupt_mode != INTR_MODE_MSIX)) { 14070 BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode); 14071 bxe_interrupt_mode = INTR_MODE_MSIX; 14072 } 14073 14074 if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) { 14075 BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count); 14076 bxe_queue_count = 0; 14077 } 14078 14079 if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) { 14080 if (bxe_max_rx_bufs == 0) { 14081 bxe_max_rx_bufs = RX_BD_USABLE; 14082 } else { 14083 BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs); 14084 bxe_max_rx_bufs = 2048; 14085 } 14086 } 14087 14088 if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) { 14089 BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks); 14090 bxe_hc_rx_ticks = 25; 14091 } 14092 14093 if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) { 14094 BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks); 14095 bxe_hc_tx_ticks = 50; 14096 } 14097 14098 if (bxe_max_aggregation_size == 0) { 14099 bxe_max_aggregation_size = TPA_AGG_SIZE; 14100 } 14101 14102 if (bxe_max_aggregation_size > 0xffff) { 14103 BLOGW(sc, "invalid max_aggregation_size (%d)\n", 14104 bxe_max_aggregation_size); 14105 bxe_max_aggregation_size = TPA_AGG_SIZE; 14106 } 14107 14108 if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) { 14109 BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs); 14110 bxe_mrrs = -1; 14111 } 14112 14113 if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) { 14114 BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen); 14115 bxe_autogreeen = 0; 14116 } 14117 14118 if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) { 14119 BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss); 14120 bxe_udp_rss = 0; 14121 } 14122 14123 /* pull in user settings */ 14124 14125 sc->interrupt_mode = bxe_interrupt_mode; 14126 sc->max_rx_bufs = bxe_max_rx_bufs; 14127 sc->hc_rx_ticks = bxe_hc_rx_ticks; 14128 sc->hc_tx_ticks = bxe_hc_tx_ticks; 14129 sc->max_aggregation_size = bxe_max_aggregation_size; 14130 sc->mrrs = bxe_mrrs; 14131 sc->autogreeen = bxe_autogreeen; 14132 sc->udp_rss = bxe_udp_rss; 14133 14134 if (bxe_interrupt_mode == INTR_MODE_INTX) { 14135 sc->num_queues = 1; 14136 } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */ 14137 sc->num_queues = 14138 min((bxe_queue_count ? bxe_queue_count : mp_ncpus), 14139 MAX_RSS_CHAINS); 14140 if (sc->num_queues > mp_ncpus) { 14141 sc->num_queues = mp_ncpus; 14142 } 14143 } 14144 14145 BLOGD(sc, DBG_LOAD, 14146 "User Config: " 14147 "debug=0x%lx " 14148 "interrupt_mode=%d " 14149 "queue_count=%d " 14150 "hc_rx_ticks=%d " 14151 "hc_tx_ticks=%d " 14152 "rx_budget=%d " 14153 "max_aggregation_size=%d " 14154 "mrrs=%d " 14155 "autogreeen=%d " 14156 "udp_rss=%d\n", 14157 bxe_debug, 14158 sc->interrupt_mode, 14159 sc->num_queues, 14160 sc->hc_rx_ticks, 14161 sc->hc_tx_ticks, 14162 bxe_rx_budget, 14163 sc->max_aggregation_size, 14164 sc->mrrs, 14165 sc->autogreeen, 14166 sc->udp_rss); 14167 } 14168 14169 static void 14170 bxe_media_detect(struct bxe_softc *sc) 14171 { 14172 uint32_t phy_idx = bxe_get_cur_phy_idx(sc); 14173 switch (sc->link_params.phy[phy_idx].media_type) { 14174 case ELINK_ETH_PHY_SFPP_10G_FIBER: 14175 case ELINK_ETH_PHY_XFP_FIBER: 14176 BLOGI(sc, "Found 10Gb Fiber media.\n"); 14177 sc->media = IFM_10G_SR; 14178 break; 14179 case ELINK_ETH_PHY_SFP_1G_FIBER: 14180 BLOGI(sc, "Found 1Gb Fiber media.\n"); 14181 sc->media = IFM_1000_SX; 14182 break; 14183 case ELINK_ETH_PHY_KR: 14184 case ELINK_ETH_PHY_CX4: 14185 BLOGI(sc, "Found 10GBase-CX4 media.\n"); 14186 sc->media = IFM_10G_CX4; 14187 break; 14188 case ELINK_ETH_PHY_DA_TWINAX: 14189 BLOGI(sc, "Found 10Gb Twinax media.\n"); 14190 sc->media = IFM_10G_TWINAX; 14191 break; 14192 case ELINK_ETH_PHY_BASE_T: 14193 if (sc->link_params.speed_cap_mask[0] & 14194 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 14195 BLOGI(sc, "Found 10GBase-T media.\n"); 14196 sc->media = IFM_10G_T; 14197 } else { 14198 BLOGI(sc, "Found 1000Base-T media.\n"); 14199 sc->media = IFM_1000_T; 14200 } 14201 break; 14202 case ELINK_ETH_PHY_NOT_PRESENT: 14203 BLOGI(sc, "Media not present.\n"); 14204 sc->media = 0; 14205 break; 14206 case ELINK_ETH_PHY_UNSPECIFIED: 14207 default: 14208 BLOGI(sc, "Unknown media!\n"); 14209 sc->media = 0; 14210 break; 14211 } 14212 } 14213 14214 #define GET_FIELD(value, fname) \ 14215 (((value) & (fname##_MASK)) >> (fname##_SHIFT)) 14216 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 14217 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 14218 14219 static int 14220 bxe_get_igu_cam_info(struct bxe_softc *sc) 14221 { 14222 int pfid = SC_FUNC(sc); 14223 int igu_sb_id; 14224 uint32_t val; 14225 uint8_t fid, igu_sb_cnt = 0; 14226 14227 sc->igu_base_sb = 0xff; 14228 14229 if (CHIP_INT_MODE_IS_BC(sc)) { 14230 int vn = SC_VN(sc); 14231 igu_sb_cnt = sc->igu_sb_cnt; 14232 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * 14233 FP_SB_MAX_E1x); 14234 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + 14235 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); 14236 return (0); 14237 } 14238 14239 /* IGU in normal mode - read CAM */ 14240 for (igu_sb_id = 0; 14241 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; 14242 igu_sb_id++) { 14243 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 14244 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { 14245 continue; 14246 } 14247 fid = IGU_FID(val); 14248 if ((fid & IGU_FID_ENCODE_IS_PF)) { 14249 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { 14250 continue; 14251 } 14252 if (IGU_VEC(val) == 0) { 14253 /* default status block */ 14254 sc->igu_dsb_id = igu_sb_id; 14255 } else { 14256 if (sc->igu_base_sb == 0xff) { 14257 sc->igu_base_sb = igu_sb_id; 14258 } 14259 igu_sb_cnt++; 14260 } 14261 } 14262 } 14263 14264 /* 14265 * Due to new PF resource allocation by MFW T7.4 and above, it's optional 14266 * that number of CAM entries will not be equal to the value advertised in 14267 * PCI. Driver should use the minimal value of both as the actual status 14268 * block count 14269 */ 14270 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); 14271 14272 if (igu_sb_cnt == 0) { 14273 BLOGE(sc, "CAM configuration error\n"); 14274 return (-1); 14275 } 14276 14277 return (0); 14278 } 14279 14280 /* 14281 * Gather various information from the device config space, the device itself, 14282 * shmem, and the user input. 14283 */ 14284 static int 14285 bxe_get_device_info(struct bxe_softc *sc) 14286 { 14287 uint32_t val; 14288 int rc; 14289 14290 /* Get the data for the device */ 14291 sc->devinfo.vendor_id = pci_get_vendor(sc->dev); 14292 sc->devinfo.device_id = pci_get_device(sc->dev); 14293 sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev); 14294 sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev); 14295 14296 /* get the chip revision (chip metal comes from pci config space) */ 14297 sc->devinfo.chip_id = 14298 sc->link_params.chip_id = 14299 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | 14300 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | 14301 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | 14302 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); 14303 14304 /* force 57811 according to MISC register */ 14305 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 14306 if (CHIP_IS_57810(sc)) { 14307 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | 14308 (sc->devinfo.chip_id & 0x0000ffff)); 14309 } else if (CHIP_IS_57810_MF(sc)) { 14310 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | 14311 (sc->devinfo.chip_id & 0x0000ffff)); 14312 } 14313 sc->devinfo.chip_id |= 0x1; 14314 } 14315 14316 BLOGD(sc, DBG_LOAD, 14317 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n", 14318 sc->devinfo.chip_id, 14319 ((sc->devinfo.chip_id >> 16) & 0xffff), 14320 ((sc->devinfo.chip_id >> 12) & 0xf), 14321 ((sc->devinfo.chip_id >> 4) & 0xff), 14322 ((sc->devinfo.chip_id >> 0) & 0xf)); 14323 14324 val = (REG_RD(sc, 0x2874) & 0x55); 14325 if ((sc->devinfo.chip_id & 0x1) || 14326 (CHIP_IS_E1(sc) && val) || 14327 (CHIP_IS_E1H(sc) && (val == 0x55))) { 14328 sc->flags |= BXE_ONE_PORT_FLAG; 14329 BLOGD(sc, DBG_LOAD, "single port device\n"); 14330 } 14331 14332 /* set the doorbell size */ 14333 sc->doorbell_size = (1 << BXE_DB_SHIFT); 14334 14335 /* determine whether the device is in 2 port or 4 port mode */ 14336 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/ 14337 if (CHIP_IS_E2E3(sc)) { 14338 /* 14339 * Read port4mode_en_ovwr[0]: 14340 * If 1, four port mode is in port4mode_en_ovwr[1]. 14341 * If 0, four port mode is in port4mode_en[0]. 14342 */ 14343 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); 14344 if (val & 1) { 14345 val = ((val >> 1) & 1); 14346 } else { 14347 val = REG_RD(sc, MISC_REG_PORT4MODE_EN); 14348 } 14349 14350 sc->devinfo.chip_port_mode = 14351 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; 14352 14353 BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2"); 14354 } 14355 14356 /* get the function and path info for the device */ 14357 bxe_get_function_num(sc); 14358 14359 /* get the shared memory base address */ 14360 sc->devinfo.shmem_base = 14361 sc->link_params.shmem_base = 14362 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 14363 sc->devinfo.shmem2_base = 14364 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : 14365 MISC_REG_GENERIC_CR_0)); 14366 14367 BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n", 14368 sc->devinfo.shmem_base, sc->devinfo.shmem2_base); 14369 14370 if (!sc->devinfo.shmem_base) { 14371 /* this should ONLY prevent upcoming shmem reads */ 14372 BLOGI(sc, "MCP not active\n"); 14373 sc->flags |= BXE_NO_MCP_FLAG; 14374 return (0); 14375 } 14376 14377 /* make sure the shared memory contents are valid */ 14378 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 14379 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != 14380 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { 14381 BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val); 14382 return (0); 14383 } 14384 BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val); 14385 14386 /* get the bootcode version */ 14387 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); 14388 snprintf(sc->devinfo.bc_ver_str, 14389 sizeof(sc->devinfo.bc_ver_str), 14390 "%d.%d.%d", 14391 ((sc->devinfo.bc_ver >> 24) & 0xff), 14392 ((sc->devinfo.bc_ver >> 16) & 0xff), 14393 ((sc->devinfo.bc_ver >> 8) & 0xff)); 14394 BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str); 14395 14396 /* get the bootcode shmem address */ 14397 sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc); 14398 BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base); 14399 14400 /* clean indirect addresses as they're not used */ 14401 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 14402 if (IS_PF(sc)) { 14403 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); 14404 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); 14405 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); 14406 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); 14407 if (CHIP_IS_E1x(sc)) { 14408 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); 14409 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); 14410 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); 14411 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); 14412 } 14413 14414 /* 14415 * Enable internal target-read (in case we are probed after PF 14416 * FLR). Must be done prior to any BAR read access. Only for 14417 * 57712 and up 14418 */ 14419 if (!CHIP_IS_E1x(sc)) { 14420 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 14421 } 14422 } 14423 14424 /* get the nvram size */ 14425 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); 14426 sc->devinfo.flash_size = 14427 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); 14428 BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size); 14429 14430 /* get PCI capabilites */ 14431 bxe_probe_pci_caps(sc); 14432 14433 bxe_set_power_state(sc, PCI_PM_D0); 14434 14435 /* get various configuration parameters from shmem */ 14436 bxe_get_shmem_info(sc); 14437 14438 if (sc->devinfo.pcie_msix_cap_reg != 0) { 14439 val = pci_read_config(sc->dev, 14440 (sc->devinfo.pcie_msix_cap_reg + 14441 PCIR_MSIX_CTRL), 14442 2); 14443 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE); 14444 } else { 14445 sc->igu_sb_cnt = 1; 14446 } 14447 14448 sc->igu_base_addr = BAR_IGU_INTMEM; 14449 14450 /* initialize IGU parameters */ 14451 if (CHIP_IS_E1x(sc)) { 14452 sc->devinfo.int_block = INT_BLOCK_HC; 14453 sc->igu_dsb_id = DEF_SB_IGU_ID; 14454 sc->igu_base_sb = 0; 14455 } else { 14456 sc->devinfo.int_block = INT_BLOCK_IGU; 14457 14458 /* do not allow device reset during IGU info preocessing */ 14459 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14460 14461 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 14462 14463 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 14464 int tout = 5000; 14465 14466 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n"); 14467 14468 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 14469 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); 14470 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); 14471 14472 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 14473 tout--; 14474 DELAY(1000); 14475 } 14476 14477 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 14478 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n"); 14479 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14480 return (-1); 14481 } 14482 } 14483 14484 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 14485 BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n"); 14486 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; 14487 } else { 14488 BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n"); 14489 } 14490 14491 rc = bxe_get_igu_cam_info(sc); 14492 14493 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14494 14495 if (rc) { 14496 return (rc); 14497 } 14498 } 14499 14500 /* 14501 * Get base FW non-default (fast path) status block ID. This value is 14502 * used to initialize the fw_sb_id saved on the fp/queue structure to 14503 * determine the id used by the FW. 14504 */ 14505 if (CHIP_IS_E1x(sc)) { 14506 sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); 14507 } else { 14508 /* 14509 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of 14510 * the same queue are indicated on the same IGU SB). So we prefer 14511 * FW and IGU SBs to be the same value. 14512 */ 14513 sc->base_fw_ndsb = sc->igu_base_sb; 14514 } 14515 14516 BLOGD(sc, DBG_LOAD, 14517 "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n", 14518 sc->igu_dsb_id, sc->igu_base_sb, 14519 sc->igu_sb_cnt, sc->base_fw_ndsb); 14520 14521 elink_phy_probe(&sc->link_params); 14522 14523 return (0); 14524 } 14525 14526 static void 14527 bxe_link_settings_supported(struct bxe_softc *sc, 14528 uint32_t switch_cfg) 14529 { 14530 uint32_t cfg_size = 0; 14531 uint32_t idx; 14532 uint8_t port = SC_PORT(sc); 14533 14534 /* aggregation of supported attributes of all external phys */ 14535 sc->port.supported[0] = 0; 14536 sc->port.supported[1] = 0; 14537 14538 switch (sc->link_params.num_phys) { 14539 case 1: 14540 sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported; 14541 cfg_size = 1; 14542 break; 14543 case 2: 14544 sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; 14545 cfg_size = 1; 14546 break; 14547 case 3: 14548 if (sc->link_params.multi_phy_config & 14549 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 14550 sc->port.supported[1] = 14551 sc->link_params.phy[ELINK_EXT_PHY1].supported; 14552 sc->port.supported[0] = 14553 sc->link_params.phy[ELINK_EXT_PHY2].supported; 14554 } else { 14555 sc->port.supported[0] = 14556 sc->link_params.phy[ELINK_EXT_PHY1].supported; 14557 sc->port.supported[1] = 14558 sc->link_params.phy[ELINK_EXT_PHY2].supported; 14559 } 14560 cfg_size = 2; 14561 break; 14562 } 14563 14564 if (!(sc->port.supported[0] || sc->port.supported[1])) { 14565 BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n", 14566 SHMEM_RD(sc, 14567 dev_info.port_hw_config[port].external_phy_config), 14568 SHMEM_RD(sc, 14569 dev_info.port_hw_config[port].external_phy_config2)); 14570 return; 14571 } 14572 14573 if (CHIP_IS_E3(sc)) 14574 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); 14575 else { 14576 switch (switch_cfg) { 14577 case ELINK_SWITCH_CFG_1G: 14578 sc->port.phy_addr = 14579 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); 14580 break; 14581 case ELINK_SWITCH_CFG_10G: 14582 sc->port.phy_addr = 14583 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); 14584 break; 14585 default: 14586 BLOGE(sc, "Invalid switch config in link_config=0x%08x\n", 14587 sc->port.link_config[0]); 14588 return; 14589 } 14590 } 14591 14592 BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr); 14593 14594 /* mask what we support according to speed_cap_mask per configuration */ 14595 for (idx = 0; idx < cfg_size; idx++) { 14596 if (!(sc->link_params.speed_cap_mask[idx] & 14597 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { 14598 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half; 14599 } 14600 14601 if (!(sc->link_params.speed_cap_mask[idx] & 14602 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { 14603 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full; 14604 } 14605 14606 if (!(sc->link_params.speed_cap_mask[idx] & 14607 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { 14608 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half; 14609 } 14610 14611 if (!(sc->link_params.speed_cap_mask[idx] & 14612 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { 14613 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full; 14614 } 14615 14616 if (!(sc->link_params.speed_cap_mask[idx] & 14617 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { 14618 sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full; 14619 } 14620 14621 if (!(sc->link_params.speed_cap_mask[idx] & 14622 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { 14623 sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full; 14624 } 14625 14626 if (!(sc->link_params.speed_cap_mask[idx] & 14627 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 14628 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full; 14629 } 14630 14631 if (!(sc->link_params.speed_cap_mask[idx] & 14632 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { 14633 sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full; 14634 } 14635 } 14636 14637 BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n", 14638 sc->port.supported[0], sc->port.supported[1]); 14639 } 14640 14641 static void 14642 bxe_link_settings_requested(struct bxe_softc *sc) 14643 { 14644 uint32_t link_config; 14645 uint32_t idx; 14646 uint32_t cfg_size = 0; 14647 14648 sc->port.advertising[0] = 0; 14649 sc->port.advertising[1] = 0; 14650 14651 switch (sc->link_params.num_phys) { 14652 case 1: 14653 case 2: 14654 cfg_size = 1; 14655 break; 14656 case 3: 14657 cfg_size = 2; 14658 break; 14659 } 14660 14661 for (idx = 0; idx < cfg_size; idx++) { 14662 sc->link_params.req_duplex[idx] = DUPLEX_FULL; 14663 link_config = sc->port.link_config[idx]; 14664 14665 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 14666 case PORT_FEATURE_LINK_SPEED_AUTO: 14667 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { 14668 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; 14669 sc->port.advertising[idx] |= sc->port.supported[idx]; 14670 if (sc->link_params.phy[ELINK_EXT_PHY1].type == 14671 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 14672 sc->port.advertising[idx] |= 14673 (ELINK_SUPPORTED_100baseT_Half | 14674 ELINK_SUPPORTED_100baseT_Full); 14675 } else { 14676 /* force 10G, no AN */ 14677 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; 14678 sc->port.advertising[idx] |= 14679 (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); 14680 continue; 14681 } 14682 break; 14683 14684 case PORT_FEATURE_LINK_SPEED_10M_FULL: 14685 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) { 14686 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; 14687 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full | 14688 ADVERTISED_TP); 14689 } else { 14690 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14691 "speed_cap_mask=0x%08x\n", 14692 link_config, sc->link_params.speed_cap_mask[idx]); 14693 return; 14694 } 14695 break; 14696 14697 case PORT_FEATURE_LINK_SPEED_10M_HALF: 14698 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) { 14699 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; 14700 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 14701 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half | 14702 ADVERTISED_TP); 14703 } else { 14704 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14705 "speed_cap_mask=0x%08x\n", 14706 link_config, sc->link_params.speed_cap_mask[idx]); 14707 return; 14708 } 14709 break; 14710 14711 case PORT_FEATURE_LINK_SPEED_100M_FULL: 14712 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) { 14713 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; 14714 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full | 14715 ADVERTISED_TP); 14716 } else { 14717 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14718 "speed_cap_mask=0x%08x\n", 14719 link_config, sc->link_params.speed_cap_mask[idx]); 14720 return; 14721 } 14722 break; 14723 14724 case PORT_FEATURE_LINK_SPEED_100M_HALF: 14725 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) { 14726 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; 14727 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 14728 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half | 14729 ADVERTISED_TP); 14730 } else { 14731 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14732 "speed_cap_mask=0x%08x\n", 14733 link_config, sc->link_params.speed_cap_mask[idx]); 14734 return; 14735 } 14736 break; 14737 14738 case PORT_FEATURE_LINK_SPEED_1G: 14739 if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) { 14740 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000; 14741 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full | 14742 ADVERTISED_TP); 14743 } else { 14744 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14745 "speed_cap_mask=0x%08x\n", 14746 link_config, sc->link_params.speed_cap_mask[idx]); 14747 return; 14748 } 14749 break; 14750 14751 case PORT_FEATURE_LINK_SPEED_2_5G: 14752 if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) { 14753 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500; 14754 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full | 14755 ADVERTISED_TP); 14756 } else { 14757 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14758 "speed_cap_mask=0x%08x\n", 14759 link_config, sc->link_params.speed_cap_mask[idx]); 14760 return; 14761 } 14762 break; 14763 14764 case PORT_FEATURE_LINK_SPEED_10G_CX4: 14765 if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) { 14766 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; 14767 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | 14768 ADVERTISED_FIBRE); 14769 } else { 14770 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14771 "speed_cap_mask=0x%08x\n", 14772 link_config, sc->link_params.speed_cap_mask[idx]); 14773 return; 14774 } 14775 break; 14776 14777 case PORT_FEATURE_LINK_SPEED_20G: 14778 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; 14779 break; 14780 14781 default: 14782 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14783 "speed_cap_mask=0x%08x\n", 14784 link_config, sc->link_params.speed_cap_mask[idx]); 14785 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; 14786 sc->port.advertising[idx] = sc->port.supported[idx]; 14787 break; 14788 } 14789 14790 sc->link_params.req_flow_ctrl[idx] = 14791 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); 14792 14793 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { 14794 if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { 14795 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE; 14796 } else { 14797 bxe_set_requested_fc(sc); 14798 } 14799 } 14800 14801 BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d " 14802 "req_flow_ctrl=0x%x advertising=0x%x\n", 14803 sc->link_params.req_line_speed[idx], 14804 sc->link_params.req_duplex[idx], 14805 sc->link_params.req_flow_ctrl[idx], 14806 sc->port.advertising[idx]); 14807 } 14808 } 14809 14810 static void 14811 bxe_get_phy_info(struct bxe_softc *sc) 14812 { 14813 uint8_t port = SC_PORT(sc); 14814 uint32_t config = sc->port.config; 14815 uint32_t eee_mode; 14816 14817 /* shmem data already read in bxe_get_shmem_info() */ 14818 14819 BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x " 14820 "link_config0=0x%08x\n", 14821 sc->link_params.lane_config, 14822 sc->link_params.speed_cap_mask[0], 14823 sc->port.link_config[0]); 14824 14825 bxe_link_settings_supported(sc, sc->link_params.switch_cfg); 14826 bxe_link_settings_requested(sc); 14827 14828 if (sc->autogreeen == AUTO_GREEN_FORCE_ON) { 14829 sc->link_params.feature_config_flags |= 14830 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14831 } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) { 14832 sc->link_params.feature_config_flags &= 14833 ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14834 } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) { 14835 sc->link_params.feature_config_flags |= 14836 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14837 } 14838 14839 /* configure link feature according to nvram value */ 14840 eee_mode = 14841 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) & 14842 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 14843 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 14844 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 14845 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | 14846 ELINK_EEE_MODE_ENABLE_LPI | 14847 ELINK_EEE_MODE_OUTPUT_TIME); 14848 } else { 14849 sc->link_params.eee_mode = 0; 14850 } 14851 14852 /* get the media type */ 14853 bxe_media_detect(sc); 14854 } 14855 14856 static void 14857 bxe_get_params(struct bxe_softc *sc) 14858 { 14859 /* get user tunable params */ 14860 bxe_get_tunable_params(sc); 14861 14862 /* select the RX and TX ring sizes */ 14863 sc->tx_ring_size = TX_BD_USABLE; 14864 sc->rx_ring_size = RX_BD_USABLE; 14865 14866 /* XXX disable WoL */ 14867 sc->wol = 0; 14868 } 14869 14870 static void 14871 bxe_set_modes_bitmap(struct bxe_softc *sc) 14872 { 14873 uint32_t flags = 0; 14874 14875 if (CHIP_REV_IS_FPGA(sc)) { 14876 SET_FLAGS(flags, MODE_FPGA); 14877 } else if (CHIP_REV_IS_EMUL(sc)) { 14878 SET_FLAGS(flags, MODE_EMUL); 14879 } else { 14880 SET_FLAGS(flags, MODE_ASIC); 14881 } 14882 14883 if (CHIP_IS_MODE_4_PORT(sc)) { 14884 SET_FLAGS(flags, MODE_PORT4); 14885 } else { 14886 SET_FLAGS(flags, MODE_PORT2); 14887 } 14888 14889 if (CHIP_IS_E2(sc)) { 14890 SET_FLAGS(flags, MODE_E2); 14891 } else if (CHIP_IS_E3(sc)) { 14892 SET_FLAGS(flags, MODE_E3); 14893 if (CHIP_REV(sc) == CHIP_REV_Ax) { 14894 SET_FLAGS(flags, MODE_E3_A0); 14895 } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ { 14896 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); 14897 } 14898 } 14899 14900 if (IS_MF(sc)) { 14901 SET_FLAGS(flags, MODE_MF); 14902 switch (sc->devinfo.mf_info.mf_mode) { 14903 case MULTI_FUNCTION_SD: 14904 SET_FLAGS(flags, MODE_MF_SD); 14905 break; 14906 case MULTI_FUNCTION_SI: 14907 SET_FLAGS(flags, MODE_MF_SI); 14908 break; 14909 case MULTI_FUNCTION_AFEX: 14910 SET_FLAGS(flags, MODE_MF_AFEX); 14911 break; 14912 } 14913 } else { 14914 SET_FLAGS(flags, MODE_SF); 14915 } 14916 14917 #if defined(__LITTLE_ENDIAN) 14918 SET_FLAGS(flags, MODE_LITTLE_ENDIAN); 14919 #else /* __BIG_ENDIAN */ 14920 SET_FLAGS(flags, MODE_BIG_ENDIAN); 14921 #endif 14922 14923 INIT_MODE_FLAGS(sc) = flags; 14924 } 14925 14926 static int 14927 bxe_alloc_hsi_mem(struct bxe_softc *sc) 14928 { 14929 struct bxe_fastpath *fp; 14930 bus_addr_t busaddr; 14931 int max_agg_queues; 14932 int max_segments; 14933 bus_size_t max_size; 14934 bus_size_t max_seg_size; 14935 char buf[32]; 14936 int rc; 14937 int i, j; 14938 14939 /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */ 14940 14941 /* allocate the parent bus DMA tag */ 14942 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */ 14943 1, /* alignment */ 14944 0, /* boundary limit */ 14945 BUS_SPACE_MAXADDR, /* restricted low */ 14946 BUS_SPACE_MAXADDR, /* restricted hi */ 14947 NULL, /* addr filter() */ 14948 NULL, /* addr filter() arg */ 14949 BUS_SPACE_MAXSIZE_32BIT, /* max map size */ 14950 BUS_SPACE_UNRESTRICTED, /* num discontinuous */ 14951 BUS_SPACE_MAXSIZE_32BIT, /* max seg size */ 14952 0, /* flags */ 14953 NULL, /* lock() */ 14954 NULL, /* lock() arg */ 14955 &sc->parent_dma_tag); /* returned dma tag */ 14956 if (rc != 0) { 14957 BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc); 14958 return (1); 14959 } 14960 14961 /************************/ 14962 /* DEFAULT STATUS BLOCK */ 14963 /************************/ 14964 14965 if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block), 14966 &sc->def_sb_dma, "default status block") != 0) { 14967 /* XXX */ 14968 bus_dma_tag_destroy(sc->parent_dma_tag); 14969 return (1); 14970 } 14971 14972 sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr; 14973 14974 /***************/ 14975 /* EVENT QUEUE */ 14976 /***************/ 14977 14978 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, 14979 &sc->eq_dma, "event queue") != 0) { 14980 /* XXX */ 14981 bxe_dma_free(sc, &sc->def_sb_dma); 14982 sc->def_sb = NULL; 14983 bus_dma_tag_destroy(sc->parent_dma_tag); 14984 return (1); 14985 } 14986 14987 sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr; 14988 14989 /*************/ 14990 /* SLOW PATH */ 14991 /*************/ 14992 14993 if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath), 14994 &sc->sp_dma, "slow path") != 0) { 14995 /* XXX */ 14996 bxe_dma_free(sc, &sc->eq_dma); 14997 sc->eq = NULL; 14998 bxe_dma_free(sc, &sc->def_sb_dma); 14999 sc->def_sb = NULL; 15000 bus_dma_tag_destroy(sc->parent_dma_tag); 15001 return (1); 15002 } 15003 15004 sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr; 15005 15006 /*******************/ 15007 /* SLOW PATH QUEUE */ 15008 /*******************/ 15009 15010 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, 15011 &sc->spq_dma, "slow path queue") != 0) { 15012 /* XXX */ 15013 bxe_dma_free(sc, &sc->sp_dma); 15014 sc->sp = NULL; 15015 bxe_dma_free(sc, &sc->eq_dma); 15016 sc->eq = NULL; 15017 bxe_dma_free(sc, &sc->def_sb_dma); 15018 sc->def_sb = NULL; 15019 bus_dma_tag_destroy(sc->parent_dma_tag); 15020 return (1); 15021 } 15022 15023 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; 15024 15025 /***************************/ 15026 /* FW DECOMPRESSION BUFFER */ 15027 /***************************/ 15028 15029 if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, 15030 "fw decompression buffer") != 0) { 15031 /* XXX */ 15032 bxe_dma_free(sc, &sc->spq_dma); 15033 sc->spq = NULL; 15034 bxe_dma_free(sc, &sc->sp_dma); 15035 sc->sp = NULL; 15036 bxe_dma_free(sc, &sc->eq_dma); 15037 sc->eq = NULL; 15038 bxe_dma_free(sc, &sc->def_sb_dma); 15039 sc->def_sb = NULL; 15040 bus_dma_tag_destroy(sc->parent_dma_tag); 15041 return (1); 15042 } 15043 15044 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; 15045 15046 if ((sc->gz_strm = 15047 malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) { 15048 /* XXX */ 15049 bxe_dma_free(sc, &sc->gz_buf_dma); 15050 sc->gz_buf = NULL; 15051 bxe_dma_free(sc, &sc->spq_dma); 15052 sc->spq = NULL; 15053 bxe_dma_free(sc, &sc->sp_dma); 15054 sc->sp = NULL; 15055 bxe_dma_free(sc, &sc->eq_dma); 15056 sc->eq = NULL; 15057 bxe_dma_free(sc, &sc->def_sb_dma); 15058 sc->def_sb = NULL; 15059 bus_dma_tag_destroy(sc->parent_dma_tag); 15060 return (1); 15061 } 15062 15063 /*************/ 15064 /* FASTPATHS */ 15065 /*************/ 15066 15067 /* allocate DMA memory for each fastpath structure */ 15068 for (i = 0; i < sc->num_queues; i++) { 15069 fp = &sc->fp[i]; 15070 fp->sc = sc; 15071 fp->index = i; 15072 15073 /*******************/ 15074 /* FP STATUS BLOCK */ 15075 /*******************/ 15076 15077 snprintf(buf, sizeof(buf), "fp %d status block", i); 15078 if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block), 15079 &fp->sb_dma, buf) != 0) { 15080 /* XXX unwind and free previous fastpath allocations */ 15081 BLOGE(sc, "Failed to alloc %s\n", buf); 15082 return (1); 15083 } else { 15084 if (CHIP_IS_E2E3(sc)) { 15085 fp->status_block.e2_sb = 15086 (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr; 15087 } else { 15088 fp->status_block.e1x_sb = 15089 (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr; 15090 } 15091 } 15092 15093 /******************/ 15094 /* FP TX BD CHAIN */ 15095 /******************/ 15096 15097 snprintf(buf, sizeof(buf), "fp %d tx bd chain", i); 15098 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES), 15099 &fp->tx_dma, buf) != 0) { 15100 /* XXX unwind and free previous fastpath allocations */ 15101 BLOGE(sc, "Failed to alloc %s\n", buf); 15102 return (1); 15103 } else { 15104 fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr; 15105 } 15106 15107 /* link together the tx bd chain pages */ 15108 for (j = 1; j <= TX_BD_NUM_PAGES; j++) { 15109 /* index into the tx bd chain array to last entry per page */ 15110 struct eth_tx_next_bd *tx_next_bd = 15111 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd; 15112 /* point to the next page and wrap from last page */ 15113 busaddr = (fp->tx_dma.paddr + 15114 (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES))); 15115 tx_next_bd->addr_hi = htole32(U64_HI(busaddr)); 15116 tx_next_bd->addr_lo = htole32(U64_LO(busaddr)); 15117 } 15118 15119 /******************/ 15120 /* FP RX BD CHAIN */ 15121 /******************/ 15122 15123 snprintf(buf, sizeof(buf), "fp %d rx bd chain", i); 15124 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES), 15125 &fp->rx_dma, buf) != 0) { 15126 /* XXX unwind and free previous fastpath allocations */ 15127 BLOGE(sc, "Failed to alloc %s\n", buf); 15128 return (1); 15129 } else { 15130 fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr; 15131 } 15132 15133 /* link together the rx bd chain pages */ 15134 for (j = 1; j <= RX_BD_NUM_PAGES; j++) { 15135 /* index into the rx bd chain array to last entry per page */ 15136 struct eth_rx_bd *rx_bd = 15137 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2]; 15138 /* point to the next page and wrap from last page */ 15139 busaddr = (fp->rx_dma.paddr + 15140 (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES))); 15141 rx_bd->addr_hi = htole32(U64_HI(busaddr)); 15142 rx_bd->addr_lo = htole32(U64_LO(busaddr)); 15143 } 15144 15145 /*******************/ 15146 /* FP RX RCQ CHAIN */ 15147 /*******************/ 15148 15149 snprintf(buf, sizeof(buf), "fp %d rcq chain", i); 15150 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES), 15151 &fp->rcq_dma, buf) != 0) { 15152 /* XXX unwind and free previous fastpath allocations */ 15153 BLOGE(sc, "Failed to alloc %s\n", buf); 15154 return (1); 15155 } else { 15156 fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr; 15157 } 15158 15159 /* link together the rcq chain pages */ 15160 for (j = 1; j <= RCQ_NUM_PAGES; j++) { 15161 /* index into the rcq chain array to last entry per page */ 15162 struct eth_rx_cqe_next_page *rx_cqe_next = 15163 (struct eth_rx_cqe_next_page *) 15164 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1]; 15165 /* point to the next page and wrap from last page */ 15166 busaddr = (fp->rcq_dma.paddr + 15167 (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES))); 15168 rx_cqe_next->addr_hi = htole32(U64_HI(busaddr)); 15169 rx_cqe_next->addr_lo = htole32(U64_LO(busaddr)); 15170 } 15171 15172 /*******************/ 15173 /* FP RX SGE CHAIN */ 15174 /*******************/ 15175 15176 snprintf(buf, sizeof(buf), "fp %d sge chain", i); 15177 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES), 15178 &fp->rx_sge_dma, buf) != 0) { 15179 /* XXX unwind and free previous fastpath allocations */ 15180 BLOGE(sc, "Failed to alloc %s\n", buf); 15181 return (1); 15182 } else { 15183 fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr; 15184 } 15185 15186 /* link together the sge chain pages */ 15187 for (j = 1; j <= RX_SGE_NUM_PAGES; j++) { 15188 /* index into the rcq chain array to last entry per page */ 15189 struct eth_rx_sge *rx_sge = 15190 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2]; 15191 /* point to the next page and wrap from last page */ 15192 busaddr = (fp->rx_sge_dma.paddr + 15193 (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES))); 15194 rx_sge->addr_hi = htole32(U64_HI(busaddr)); 15195 rx_sge->addr_lo = htole32(U64_LO(busaddr)); 15196 } 15197 15198 /***********************/ 15199 /* FP TX MBUF DMA MAPS */ 15200 /***********************/ 15201 15202 /* set required sizes before mapping to conserve resources */ 15203 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { 15204 max_size = BXE_TSO_MAX_SIZE; 15205 max_segments = BXE_TSO_MAX_SEGMENTS; 15206 max_seg_size = BXE_TSO_MAX_SEG_SIZE; 15207 } else { 15208 max_size = (MCLBYTES * BXE_MAX_SEGMENTS); 15209 max_segments = BXE_MAX_SEGMENTS; 15210 max_seg_size = MCLBYTES; 15211 } 15212 15213 /* create a dma tag for the tx mbufs */ 15214 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15215 1, /* alignment */ 15216 0, /* boundary limit */ 15217 BUS_SPACE_MAXADDR, /* restricted low */ 15218 BUS_SPACE_MAXADDR, /* restricted hi */ 15219 NULL, /* addr filter() */ 15220 NULL, /* addr filter() arg */ 15221 max_size, /* max map size */ 15222 max_segments, /* num discontinuous */ 15223 max_seg_size, /* max seg size */ 15224 0, /* flags */ 15225 NULL, /* lock() */ 15226 NULL, /* lock() arg */ 15227 &fp->tx_mbuf_tag); /* returned dma tag */ 15228 if (rc != 0) { 15229 /* XXX unwind and free previous fastpath allocations */ 15230 BLOGE(sc, "Failed to create dma tag for " 15231 "'fp %d tx mbufs' (%d)\n", 15232 i, rc); 15233 return (1); 15234 } 15235 15236 /* create dma maps for each of the tx mbuf clusters */ 15237 for (j = 0; j < TX_BD_TOTAL; j++) { 15238 if (bus_dmamap_create(fp->tx_mbuf_tag, 15239 BUS_DMA_NOWAIT, 15240 &fp->tx_mbuf_chain[j].m_map)) { 15241 /* XXX unwind and free previous fastpath allocations */ 15242 BLOGE(sc, "Failed to create dma map for " 15243 "'fp %d tx mbuf %d' (%d)\n", 15244 i, j, rc); 15245 return (1); 15246 } 15247 } 15248 15249 /***********************/ 15250 /* FP RX MBUF DMA MAPS */ 15251 /***********************/ 15252 15253 /* create a dma tag for the rx mbufs */ 15254 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15255 1, /* alignment */ 15256 0, /* boundary limit */ 15257 BUS_SPACE_MAXADDR, /* restricted low */ 15258 BUS_SPACE_MAXADDR, /* restricted hi */ 15259 NULL, /* addr filter() */ 15260 NULL, /* addr filter() arg */ 15261 MJUM9BYTES, /* max map size */ 15262 1, /* num discontinuous */ 15263 MJUM9BYTES, /* max seg size */ 15264 0, /* flags */ 15265 NULL, /* lock() */ 15266 NULL, /* lock() arg */ 15267 &fp->rx_mbuf_tag); /* returned dma tag */ 15268 if (rc != 0) { 15269 /* XXX unwind and free previous fastpath allocations */ 15270 BLOGE(sc, "Failed to create dma tag for " 15271 "'fp %d rx mbufs' (%d)\n", 15272 i, rc); 15273 return (1); 15274 } 15275 15276 /* create dma maps for each of the rx mbuf clusters */ 15277 for (j = 0; j < RX_BD_TOTAL; j++) { 15278 if (bus_dmamap_create(fp->rx_mbuf_tag, 15279 BUS_DMA_NOWAIT, 15280 &fp->rx_mbuf_chain[j].m_map)) { 15281 /* XXX unwind and free previous fastpath allocations */ 15282 BLOGE(sc, "Failed to create dma map for " 15283 "'fp %d rx mbuf %d' (%d)\n", 15284 i, j, rc); 15285 return (1); 15286 } 15287 } 15288 15289 /* create dma map for the spare rx mbuf cluster */ 15290 if (bus_dmamap_create(fp->rx_mbuf_tag, 15291 BUS_DMA_NOWAIT, 15292 &fp->rx_mbuf_spare_map)) { 15293 /* XXX unwind and free previous fastpath allocations */ 15294 BLOGE(sc, "Failed to create dma map for " 15295 "'fp %d spare rx mbuf' (%d)\n", 15296 i, rc); 15297 return (1); 15298 } 15299 15300 /***************************/ 15301 /* FP RX SGE MBUF DMA MAPS */ 15302 /***************************/ 15303 15304 /* create a dma tag for the rx sge mbufs */ 15305 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15306 1, /* alignment */ 15307 0, /* boundary limit */ 15308 BUS_SPACE_MAXADDR, /* restricted low */ 15309 BUS_SPACE_MAXADDR, /* restricted hi */ 15310 NULL, /* addr filter() */ 15311 NULL, /* addr filter() arg */ 15312 BCM_PAGE_SIZE, /* max map size */ 15313 1, /* num discontinuous */ 15314 BCM_PAGE_SIZE, /* max seg size */ 15315 0, /* flags */ 15316 NULL, /* lock() */ 15317 NULL, /* lock() arg */ 15318 &fp->rx_sge_mbuf_tag); /* returned dma tag */ 15319 if (rc != 0) { 15320 /* XXX unwind and free previous fastpath allocations */ 15321 BLOGE(sc, "Failed to create dma tag for " 15322 "'fp %d rx sge mbufs' (%d)\n", 15323 i, rc); 15324 return (1); 15325 } 15326 15327 /* create dma maps for the rx sge mbuf clusters */ 15328 for (j = 0; j < RX_SGE_TOTAL; j++) { 15329 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, 15330 BUS_DMA_NOWAIT, 15331 &fp->rx_sge_mbuf_chain[j].m_map)) { 15332 /* XXX unwind and free previous fastpath allocations */ 15333 BLOGE(sc, "Failed to create dma map for " 15334 "'fp %d rx sge mbuf %d' (%d)\n", 15335 i, j, rc); 15336 return (1); 15337 } 15338 } 15339 15340 /* create dma map for the spare rx sge mbuf cluster */ 15341 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, 15342 BUS_DMA_NOWAIT, 15343 &fp->rx_sge_mbuf_spare_map)) { 15344 /* XXX unwind and free previous fastpath allocations */ 15345 BLOGE(sc, "Failed to create dma map for " 15346 "'fp %d spare rx sge mbuf' (%d)\n", 15347 i, rc); 15348 return (1); 15349 } 15350 15351 /***************************/ 15352 /* FP RX TPA MBUF DMA MAPS */ 15353 /***************************/ 15354 15355 /* create dma maps for the rx tpa mbuf clusters */ 15356 max_agg_queues = MAX_AGG_QS(sc); 15357 15358 for (j = 0; j < max_agg_queues; j++) { 15359 if (bus_dmamap_create(fp->rx_mbuf_tag, 15360 BUS_DMA_NOWAIT, 15361 &fp->rx_tpa_info[j].bd.m_map)) { 15362 /* XXX unwind and free previous fastpath allocations */ 15363 BLOGE(sc, "Failed to create dma map for " 15364 "'fp %d rx tpa mbuf %d' (%d)\n", 15365 i, j, rc); 15366 return (1); 15367 } 15368 } 15369 15370 /* create dma map for the spare rx tpa mbuf cluster */ 15371 if (bus_dmamap_create(fp->rx_mbuf_tag, 15372 BUS_DMA_NOWAIT, 15373 &fp->rx_tpa_info_mbuf_spare_map)) { 15374 /* XXX unwind and free previous fastpath allocations */ 15375 BLOGE(sc, "Failed to create dma map for " 15376 "'fp %d spare rx tpa mbuf' (%d)\n", 15377 i, rc); 15378 return (1); 15379 } 15380 15381 bxe_init_sge_ring_bit_mask(fp); 15382 } 15383 15384 return (0); 15385 } 15386 15387 static void 15388 bxe_free_hsi_mem(struct bxe_softc *sc) 15389 { 15390 struct bxe_fastpath *fp; 15391 int max_agg_queues; 15392 int i, j; 15393 15394 if (sc->parent_dma_tag == NULL) { 15395 return; /* assume nothing was allocated */ 15396 } 15397 15398 for (i = 0; i < sc->num_queues; i++) { 15399 fp = &sc->fp[i]; 15400 15401 /*******************/ 15402 /* FP STATUS BLOCK */ 15403 /*******************/ 15404 15405 bxe_dma_free(sc, &fp->sb_dma); 15406 memset(&fp->status_block, 0, sizeof(fp->status_block)); 15407 15408 /******************/ 15409 /* FP TX BD CHAIN */ 15410 /******************/ 15411 15412 bxe_dma_free(sc, &fp->tx_dma); 15413 fp->tx_chain = NULL; 15414 15415 /******************/ 15416 /* FP RX BD CHAIN */ 15417 /******************/ 15418 15419 bxe_dma_free(sc, &fp->rx_dma); 15420 fp->rx_chain = NULL; 15421 15422 /*******************/ 15423 /* FP RX RCQ CHAIN */ 15424 /*******************/ 15425 15426 bxe_dma_free(sc, &fp->rcq_dma); 15427 fp->rcq_chain = NULL; 15428 15429 /*******************/ 15430 /* FP RX SGE CHAIN */ 15431 /*******************/ 15432 15433 bxe_dma_free(sc, &fp->rx_sge_dma); 15434 fp->rx_sge_chain = NULL; 15435 15436 /***********************/ 15437 /* FP TX MBUF DMA MAPS */ 15438 /***********************/ 15439 15440 if (fp->tx_mbuf_tag != NULL) { 15441 for (j = 0; j < TX_BD_TOTAL; j++) { 15442 if (fp->tx_mbuf_chain[j].m_map != NULL) { 15443 bus_dmamap_unload(fp->tx_mbuf_tag, 15444 fp->tx_mbuf_chain[j].m_map); 15445 bus_dmamap_destroy(fp->tx_mbuf_tag, 15446 fp->tx_mbuf_chain[j].m_map); 15447 } 15448 } 15449 15450 bus_dma_tag_destroy(fp->tx_mbuf_tag); 15451 fp->tx_mbuf_tag = NULL; 15452 } 15453 15454 /***********************/ 15455 /* FP RX MBUF DMA MAPS */ 15456 /***********************/ 15457 15458 if (fp->rx_mbuf_tag != NULL) { 15459 for (j = 0; j < RX_BD_TOTAL; j++) { 15460 if (fp->rx_mbuf_chain[j].m_map != NULL) { 15461 bus_dmamap_unload(fp->rx_mbuf_tag, 15462 fp->rx_mbuf_chain[j].m_map); 15463 bus_dmamap_destroy(fp->rx_mbuf_tag, 15464 fp->rx_mbuf_chain[j].m_map); 15465 } 15466 } 15467 15468 if (fp->rx_mbuf_spare_map != NULL) { 15469 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); 15470 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); 15471 } 15472 15473 /***************************/ 15474 /* FP RX TPA MBUF DMA MAPS */ 15475 /***************************/ 15476 15477 max_agg_queues = MAX_AGG_QS(sc); 15478 15479 for (j = 0; j < max_agg_queues; j++) { 15480 if (fp->rx_tpa_info[j].bd.m_map != NULL) { 15481 bus_dmamap_unload(fp->rx_mbuf_tag, 15482 fp->rx_tpa_info[j].bd.m_map); 15483 bus_dmamap_destroy(fp->rx_mbuf_tag, 15484 fp->rx_tpa_info[j].bd.m_map); 15485 } 15486 } 15487 15488 if (fp->rx_tpa_info_mbuf_spare_map != NULL) { 15489 bus_dmamap_unload(fp->rx_mbuf_tag, 15490 fp->rx_tpa_info_mbuf_spare_map); 15491 bus_dmamap_destroy(fp->rx_mbuf_tag, 15492 fp->rx_tpa_info_mbuf_spare_map); 15493 } 15494 15495 bus_dma_tag_destroy(fp->rx_mbuf_tag); 15496 fp->rx_mbuf_tag = NULL; 15497 } 15498 15499 /***************************/ 15500 /* FP RX SGE MBUF DMA MAPS */ 15501 /***************************/ 15502 15503 if (fp->rx_sge_mbuf_tag != NULL) { 15504 for (j = 0; j < RX_SGE_TOTAL; j++) { 15505 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) { 15506 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 15507 fp->rx_sge_mbuf_chain[j].m_map); 15508 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, 15509 fp->rx_sge_mbuf_chain[j].m_map); 15510 } 15511 } 15512 15513 if (fp->rx_sge_mbuf_spare_map != NULL) { 15514 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 15515 fp->rx_sge_mbuf_spare_map); 15516 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, 15517 fp->rx_sge_mbuf_spare_map); 15518 } 15519 15520 bus_dma_tag_destroy(fp->rx_sge_mbuf_tag); 15521 fp->rx_sge_mbuf_tag = NULL; 15522 } 15523 } 15524 15525 /***************************/ 15526 /* FW DECOMPRESSION BUFFER */ 15527 /***************************/ 15528 15529 bxe_dma_free(sc, &sc->gz_buf_dma); 15530 sc->gz_buf = NULL; 15531 free(sc->gz_strm, M_DEVBUF); 15532 sc->gz_strm = NULL; 15533 15534 /*******************/ 15535 /* SLOW PATH QUEUE */ 15536 /*******************/ 15537 15538 bxe_dma_free(sc, &sc->spq_dma); 15539 sc->spq = NULL; 15540 15541 /*************/ 15542 /* SLOW PATH */ 15543 /*************/ 15544 15545 bxe_dma_free(sc, &sc->sp_dma); 15546 sc->sp = NULL; 15547 15548 /***************/ 15549 /* EVENT QUEUE */ 15550 /***************/ 15551 15552 bxe_dma_free(sc, &sc->eq_dma); 15553 sc->eq = NULL; 15554 15555 /************************/ 15556 /* DEFAULT STATUS BLOCK */ 15557 /************************/ 15558 15559 bxe_dma_free(sc, &sc->def_sb_dma); 15560 sc->def_sb = NULL; 15561 15562 bus_dma_tag_destroy(sc->parent_dma_tag); 15563 sc->parent_dma_tag = NULL; 15564 } 15565 15566 /* 15567 * Previous driver DMAE transaction may have occurred when pre-boot stage 15568 * ended and boot began. This would invalidate the addresses of the 15569 * transaction, resulting in was-error bit set in the PCI causing all 15570 * hw-to-host PCIe transactions to timeout. If this happened we want to clear 15571 * the interrupt which detected this from the pglueb and the was-done bit 15572 */ 15573 static void 15574 bxe_prev_interrupted_dmae(struct bxe_softc *sc) 15575 { 15576 uint32_t val; 15577 15578 if (!CHIP_IS_E1x(sc)) { 15579 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); 15580 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 15581 BLOGD(sc, DBG_LOAD, 15582 "Clearing 'was-error' bit that was set in pglueb"); 15583 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc)); 15584 } 15585 } 15586 } 15587 15588 static int 15589 bxe_prev_mcp_done(struct bxe_softc *sc) 15590 { 15591 uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 15592 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 15593 if (!rc) { 15594 BLOGE(sc, "MCP response failure, aborting\n"); 15595 return (-1); 15596 } 15597 15598 return (0); 15599 } 15600 15601 static struct bxe_prev_list_node * 15602 bxe_prev_path_get_entry(struct bxe_softc *sc) 15603 { 15604 struct bxe_prev_list_node *tmp; 15605 15606 LIST_FOREACH(tmp, &bxe_prev_list, node) { 15607 if ((sc->pcie_bus == tmp->bus) && 15608 (sc->pcie_device == tmp->slot) && 15609 (SC_PATH(sc) == tmp->path)) { 15610 return (tmp); 15611 } 15612 } 15613 15614 return (NULL); 15615 } 15616 15617 static uint8_t 15618 bxe_prev_is_path_marked(struct bxe_softc *sc) 15619 { 15620 struct bxe_prev_list_node *tmp; 15621 int rc = FALSE; 15622 15623 mtx_lock(&bxe_prev_mtx); 15624 15625 tmp = bxe_prev_path_get_entry(sc); 15626 if (tmp) { 15627 if (tmp->aer) { 15628 BLOGD(sc, DBG_LOAD, 15629 "Path %d/%d/%d was marked by AER\n", 15630 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15631 } else { 15632 rc = TRUE; 15633 BLOGD(sc, DBG_LOAD, 15634 "Path %d/%d/%d was already cleaned from previous drivers\n", 15635 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15636 } 15637 } 15638 15639 mtx_unlock(&bxe_prev_mtx); 15640 15641 return (rc); 15642 } 15643 15644 static int 15645 bxe_prev_mark_path(struct bxe_softc *sc, 15646 uint8_t after_undi) 15647 { 15648 struct bxe_prev_list_node *tmp; 15649 15650 mtx_lock(&bxe_prev_mtx); 15651 15652 /* Check whether the entry for this path already exists */ 15653 tmp = bxe_prev_path_get_entry(sc); 15654 if (tmp) { 15655 if (!tmp->aer) { 15656 BLOGD(sc, DBG_LOAD, 15657 "Re-marking AER in path %d/%d/%d\n", 15658 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15659 } else { 15660 BLOGD(sc, DBG_LOAD, 15661 "Removing AER indication from path %d/%d/%d\n", 15662 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15663 tmp->aer = 0; 15664 } 15665 15666 mtx_unlock(&bxe_prev_mtx); 15667 return (0); 15668 } 15669 15670 mtx_unlock(&bxe_prev_mtx); 15671 15672 /* Create an entry for this path and add it */ 15673 tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF, 15674 (M_NOWAIT | M_ZERO)); 15675 if (!tmp) { 15676 BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n"); 15677 return (-1); 15678 } 15679 15680 tmp->bus = sc->pcie_bus; 15681 tmp->slot = sc->pcie_device; 15682 tmp->path = SC_PATH(sc); 15683 tmp->aer = 0; 15684 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; 15685 15686 mtx_lock(&bxe_prev_mtx); 15687 15688 BLOGD(sc, DBG_LOAD, 15689 "Marked path %d/%d/%d - finished previous unload\n", 15690 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15691 LIST_INSERT_HEAD(&bxe_prev_list, tmp, node); 15692 15693 mtx_unlock(&bxe_prev_mtx); 15694 15695 return (0); 15696 } 15697 15698 static int 15699 bxe_do_flr(struct bxe_softc *sc) 15700 { 15701 int i; 15702 15703 /* only E2 and onwards support FLR */ 15704 if (CHIP_IS_E1x(sc)) { 15705 BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n"); 15706 return (-1); 15707 } 15708 15709 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ 15710 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 15711 BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n", 15712 sc->devinfo.bc_ver); 15713 return (-1); 15714 } 15715 15716 /* Wait for Transaction Pending bit clean */ 15717 for (i = 0; i < 4; i++) { 15718 if (i) { 15719 DELAY(((1 << (i - 1)) * 100) * 1000); 15720 } 15721 15722 if (!bxe_is_pcie_pending(sc)) { 15723 goto clear; 15724 } 15725 } 15726 15727 BLOGE(sc, "PCIE transaction is not cleared, " 15728 "proceeding with reset anyway\n"); 15729 15730 clear: 15731 15732 BLOGD(sc, DBG_LOAD, "Initiating FLR\n"); 15733 bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); 15734 15735 return (0); 15736 } 15737 15738 struct bxe_mac_vals { 15739 uint32_t xmac_addr; 15740 uint32_t xmac_val; 15741 uint32_t emac_addr; 15742 uint32_t emac_val; 15743 uint32_t umac_addr; 15744 uint32_t umac_val; 15745 uint32_t bmac_addr; 15746 uint32_t bmac_val[2]; 15747 }; 15748 15749 static void 15750 bxe_prev_unload_close_mac(struct bxe_softc *sc, 15751 struct bxe_mac_vals *vals) 15752 { 15753 uint32_t val, base_addr, offset, mask, reset_reg; 15754 uint8_t mac_stopped = FALSE; 15755 uint8_t port = SC_PORT(sc); 15756 uint32_t wb_data[2]; 15757 15758 /* reset addresses as they also mark which values were changed */ 15759 vals->bmac_addr = 0; 15760 vals->umac_addr = 0; 15761 vals->xmac_addr = 0; 15762 vals->emac_addr = 0; 15763 15764 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); 15765 15766 if (!CHIP_IS_E3(sc)) { 15767 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 15768 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 15769 if ((mask & reset_reg) && val) { 15770 BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n"); 15771 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM 15772 : NIG_REG_INGRESS_BMAC0_MEM; 15773 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL 15774 : BIGMAC_REGISTER_BMAC_CONTROL; 15775 15776 /* 15777 * use rd/wr since we cannot use dmae. This is safe 15778 * since MCP won't access the bus due to the request 15779 * to unload, and no function on the path can be 15780 * loaded at this time. 15781 */ 15782 wb_data[0] = REG_RD(sc, base_addr + offset); 15783 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); 15784 vals->bmac_addr = base_addr + offset; 15785 vals->bmac_val[0] = wb_data[0]; 15786 vals->bmac_val[1] = wb_data[1]; 15787 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; 15788 REG_WR(sc, vals->bmac_addr, wb_data[0]); 15789 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); 15790 } 15791 15792 BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n"); 15793 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4; 15794 vals->emac_val = REG_RD(sc, vals->emac_addr); 15795 REG_WR(sc, vals->emac_addr, 0); 15796 mac_stopped = TRUE; 15797 } else { 15798 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 15799 BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n"); 15800 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 15801 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); 15802 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1)); 15803 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1)); 15804 vals->xmac_addr = base_addr + XMAC_REG_CTRL; 15805 vals->xmac_val = REG_RD(sc, vals->xmac_addr); 15806 REG_WR(sc, vals->xmac_addr, 0); 15807 mac_stopped = TRUE; 15808 } 15809 15810 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 15811 if (mask & reset_reg) { 15812 BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n"); 15813 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 15814 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; 15815 vals->umac_val = REG_RD(sc, vals->umac_addr); 15816 REG_WR(sc, vals->umac_addr, 0); 15817 mac_stopped = TRUE; 15818 } 15819 } 15820 15821 if (mac_stopped) { 15822 DELAY(20000); 15823 } 15824 } 15825 15826 #define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 15827 #define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff) 15828 #define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 15829 #define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 15830 15831 static void 15832 bxe_prev_unload_undi_inc(struct bxe_softc *sc, 15833 uint8_t port, 15834 uint8_t inc) 15835 { 15836 uint16_t rcq, bd; 15837 uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port)); 15838 15839 rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc; 15840 bd = BXE_PREV_UNDI_BD(tmp_reg) + inc; 15841 15842 tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd); 15843 REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg); 15844 15845 BLOGD(sc, DBG_LOAD, 15846 "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", 15847 port, bd, rcq); 15848 } 15849 15850 static int 15851 bxe_prev_unload_common(struct bxe_softc *sc) 15852 { 15853 uint32_t reset_reg, tmp_reg = 0, rc; 15854 uint8_t prev_undi = FALSE; 15855 struct bxe_mac_vals mac_vals; 15856 uint32_t timer_count = 1000; 15857 uint32_t prev_brb; 15858 15859 /* 15860 * It is possible a previous function received 'common' answer, 15861 * but hasn't loaded yet, therefore creating a scenario of 15862 * multiple functions receiving 'common' on the same path. 15863 */ 15864 BLOGD(sc, DBG_LOAD, "Common unload Flow\n"); 15865 15866 memset(&mac_vals, 0, sizeof(mac_vals)); 15867 15868 if (bxe_prev_is_path_marked(sc)) { 15869 return (bxe_prev_mcp_done(sc)); 15870 } 15871 15872 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); 15873 15874 /* Reset should be performed after BRB is emptied */ 15875 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 15876 /* Close the MAC Rx to prevent BRB from filling up */ 15877 bxe_prev_unload_close_mac(sc, &mac_vals); 15878 15879 /* close LLH filters towards the BRB */ 15880 elink_set_rx_filter(&sc->link_params, 0); 15881 15882 /* 15883 * Check if the UNDI driver was previously loaded. 15884 * UNDI driver initializes CID offset for normal bell to 0x7 15885 */ 15886 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 15887 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); 15888 if (tmp_reg == 0x7) { 15889 BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n"); 15890 prev_undi = TRUE; 15891 /* clear the UNDI indication */ 15892 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); 15893 /* clear possible idle check errors */ 15894 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); 15895 } 15896 } 15897 15898 /* wait until BRB is empty */ 15899 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 15900 while (timer_count) { 15901 prev_brb = tmp_reg; 15902 15903 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 15904 if (!tmp_reg) { 15905 break; 15906 } 15907 15908 BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg); 15909 15910 /* reset timer as long as BRB actually gets emptied */ 15911 if (prev_brb > tmp_reg) { 15912 timer_count = 1000; 15913 } else { 15914 timer_count--; 15915 } 15916 15917 /* If UNDI resides in memory, manually increment it */ 15918 if (prev_undi) { 15919 bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1); 15920 } 15921 15922 DELAY(10); 15923 } 15924 15925 if (!timer_count) { 15926 BLOGE(sc, "Failed to empty BRB\n"); 15927 } 15928 } 15929 15930 /* No packets are in the pipeline, path is ready for reset */ 15931 bxe_reset_common(sc); 15932 15933 if (mac_vals.xmac_addr) { 15934 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); 15935 } 15936 if (mac_vals.umac_addr) { 15937 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); 15938 } 15939 if (mac_vals.emac_addr) { 15940 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); 15941 } 15942 if (mac_vals.bmac_addr) { 15943 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); 15944 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); 15945 } 15946 15947 rc = bxe_prev_mark_path(sc, prev_undi); 15948 if (rc) { 15949 bxe_prev_mcp_done(sc); 15950 return (rc); 15951 } 15952 15953 return (bxe_prev_mcp_done(sc)); 15954 } 15955 15956 static int 15957 bxe_prev_unload_uncommon(struct bxe_softc *sc) 15958 { 15959 int rc; 15960 15961 BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n"); 15962 15963 /* Test if previous unload process was already finished for this path */ 15964 if (bxe_prev_is_path_marked(sc)) { 15965 return (bxe_prev_mcp_done(sc)); 15966 } 15967 15968 BLOGD(sc, DBG_LOAD, "Path is unmarked\n"); 15969 15970 /* 15971 * If function has FLR capabilities, and existing FW version matches 15972 * the one required, then FLR will be sufficient to clean any residue 15973 * left by previous driver 15974 */ 15975 rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); 15976 if (!rc) { 15977 /* fw version is good */ 15978 BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n"); 15979 rc = bxe_do_flr(sc); 15980 } 15981 15982 if (!rc) { 15983 /* FLR was performed */ 15984 BLOGD(sc, DBG_LOAD, "FLR successful\n"); 15985 return (0); 15986 } 15987 15988 BLOGD(sc, DBG_LOAD, "Could not FLR\n"); 15989 15990 /* Close the MCP request, return failure*/ 15991 rc = bxe_prev_mcp_done(sc); 15992 if (!rc) { 15993 rc = BXE_PREV_WAIT_NEEDED; 15994 } 15995 15996 return (rc); 15997 } 15998 15999 static int 16000 bxe_prev_unload(struct bxe_softc *sc) 16001 { 16002 int time_counter = 10; 16003 uint32_t fw, hw_lock_reg, hw_lock_val; 16004 uint32_t rc = 0; 16005 16006 /* 16007 * Clear HW from errors which may have resulted from an interrupted 16008 * DMAE transaction. 16009 */ 16010 bxe_prev_interrupted_dmae(sc); 16011 16012 /* Release previously held locks */ 16013 hw_lock_reg = 16014 (SC_FUNC(sc) <= 5) ? 16015 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : 16016 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); 16017 16018 hw_lock_val = (REG_RD(sc, hw_lock_reg)); 16019 if (hw_lock_val) { 16020 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 16021 BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n"); 16022 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 16023 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); 16024 } 16025 BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n"); 16026 REG_WR(sc, hw_lock_reg, 0xffffffff); 16027 } else { 16028 BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n"); 16029 } 16030 16031 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { 16032 BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n"); 16033 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); 16034 } 16035 16036 do { 16037 /* Lock MCP using an unload request */ 16038 fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 16039 if (!fw) { 16040 BLOGE(sc, "MCP response failure, aborting\n"); 16041 rc = -1; 16042 break; 16043 } 16044 16045 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { 16046 rc = bxe_prev_unload_common(sc); 16047 break; 16048 } 16049 16050 /* non-common reply from MCP night require looping */ 16051 rc = bxe_prev_unload_uncommon(sc); 16052 if (rc != BXE_PREV_WAIT_NEEDED) { 16053 break; 16054 } 16055 16056 DELAY(20000); 16057 } while (--time_counter); 16058 16059 if (!time_counter || rc) { 16060 BLOGE(sc, "Failed to unload previous driver!\n"); 16061 rc = -1; 16062 } 16063 16064 return (rc); 16065 } 16066 16067 void 16068 bxe_dcbx_set_state(struct bxe_softc *sc, 16069 uint8_t dcb_on, 16070 uint32_t dcbx_enabled) 16071 { 16072 if (!CHIP_IS_E1x(sc)) { 16073 sc->dcb_state = dcb_on; 16074 sc->dcbx_enabled = dcbx_enabled; 16075 } else { 16076 sc->dcb_state = FALSE; 16077 sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID; 16078 } 16079 BLOGD(sc, DBG_LOAD, 16080 "DCB state [%s:%s]\n", 16081 dcb_on ? "ON" : "OFF", 16082 (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" : 16083 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" : 16084 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ? 16085 "on-chip with negotiation" : "invalid"); 16086 } 16087 16088 /* must be called after sriov-enable */ 16089 static int 16090 bxe_set_qm_cid_count(struct bxe_softc *sc) 16091 { 16092 int cid_count = BXE_L2_MAX_CID(sc); 16093 16094 if (IS_SRIOV(sc)) { 16095 cid_count += BXE_VF_CIDS; 16096 } 16097 16098 if (CNIC_SUPPORT(sc)) { 16099 cid_count += CNIC_CID_MAX; 16100 } 16101 16102 return (roundup(cid_count, QM_CID_ROUND)); 16103 } 16104 16105 static void 16106 bxe_init_multi_cos(struct bxe_softc *sc) 16107 { 16108 int pri, cos; 16109 16110 uint32_t pri_map = 0; /* XXX change to user config */ 16111 16112 for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) { 16113 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); 16114 if (cos < sc->max_cos) { 16115 sc->prio_to_cos[pri] = cos; 16116 } else { 16117 BLOGW(sc, "Invalid COS %d for priority %d " 16118 "(max COS is %d), setting to 0\n", 16119 cos, pri, (sc->max_cos - 1)); 16120 sc->prio_to_cos[pri] = 0; 16121 } 16122 } 16123 } 16124 16125 static int 16126 bxe_sysctl_state(SYSCTL_HANDLER_ARGS) 16127 { 16128 struct bxe_softc *sc; 16129 int error, result; 16130 16131 result = 0; 16132 error = sysctl_handle_int(oidp, &result, 0, req); 16133 16134 if (error || !req->newptr) { 16135 return (error); 16136 } 16137 16138 if (result == 1) { 16139 uint32_t temp; 16140 sc = (struct bxe_softc *)arg1; 16141 16142 BLOGI(sc, "... dumping driver state ...\n"); 16143 temp = SHMEM2_RD(sc, temperature_in_half_celsius); 16144 BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2)); 16145 } 16146 16147 return (error); 16148 } 16149 16150 static int 16151 bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS) 16152 { 16153 struct bxe_softc *sc = (struct bxe_softc *)arg1; 16154 uint32_t *eth_stats = (uint32_t *)&sc->eth_stats; 16155 uint32_t *offset; 16156 uint64_t value = 0; 16157 int index = (int)arg2; 16158 16159 if (index >= BXE_NUM_ETH_STATS) { 16160 BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index); 16161 return (-1); 16162 } 16163 16164 offset = (eth_stats + bxe_eth_stats_arr[index].offset); 16165 16166 switch (bxe_eth_stats_arr[index].size) { 16167 case 4: 16168 value = (uint64_t)*offset; 16169 break; 16170 case 8: 16171 value = HILO_U64(*offset, *(offset + 1)); 16172 break; 16173 default: 16174 BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n", 16175 index, bxe_eth_stats_arr[index].size); 16176 return (-1); 16177 } 16178 16179 return (sysctl_handle_64(oidp, &value, 0, req)); 16180 } 16181 16182 static int 16183 bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS) 16184 { 16185 struct bxe_softc *sc = (struct bxe_softc *)arg1; 16186 uint32_t *eth_stats; 16187 uint32_t *offset; 16188 uint64_t value = 0; 16189 uint32_t q_stat = (uint32_t)arg2; 16190 uint32_t fp_index = ((q_stat >> 16) & 0xffff); 16191 uint32_t index = (q_stat & 0xffff); 16192 16193 eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats; 16194 16195 if (index >= BXE_NUM_ETH_Q_STATS) { 16196 BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index); 16197 return (-1); 16198 } 16199 16200 offset = (eth_stats + bxe_eth_q_stats_arr[index].offset); 16201 16202 switch (bxe_eth_q_stats_arr[index].size) { 16203 case 4: 16204 value = (uint64_t)*offset; 16205 break; 16206 case 8: 16207 value = HILO_U64(*offset, *(offset + 1)); 16208 break; 16209 default: 16210 BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n", 16211 index, bxe_eth_q_stats_arr[index].size); 16212 return (-1); 16213 } 16214 16215 return (sysctl_handle_64(oidp, &value, 0, req)); 16216 } 16217 16218 static void 16219 bxe_add_sysctls(struct bxe_softc *sc) 16220 { 16221 struct sysctl_ctx_list *ctx; 16222 struct sysctl_oid_list *children; 16223 struct sysctl_oid *queue_top, *queue; 16224 struct sysctl_oid_list *queue_top_children, *queue_children; 16225 char queue_num_buf[32]; 16226 uint32_t q_stat; 16227 int i, j; 16228 16229 ctx = device_get_sysctl_ctx(sc->dev); 16230 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 16231 16232 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version", 16233 CTLFLAG_RD, BXE_DRIVER_VERSION, 0, 16234 "version"); 16235 16236 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", 16237 CTLFLAG_RD, sc->devinfo.bc_ver_str, 0, 16238 "bootcode version"); 16239 16240 snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d", 16241 BCM_5710_FW_MAJOR_VERSION, 16242 BCM_5710_FW_MINOR_VERSION, 16243 BCM_5710_FW_REVISION_VERSION, 16244 BCM_5710_FW_ENGINEERING_VERSION); 16245 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", 16246 CTLFLAG_RD, sc->fw_ver_str, 0, 16247 "firmware version"); 16248 16249 snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s", 16250 ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" : 16251 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" : 16252 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" : 16253 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" : 16254 "Unknown")); 16255 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", 16256 CTLFLAG_RD, sc->mf_mode_str, 0, 16257 "multifunction mode"); 16258 16259 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics", 16260 CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0, 16261 "multifunction vnics per port"); 16262 16263 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", 16264 CTLFLAG_RD, sc->mac_addr_str, 0, 16265 "mac address"); 16266 16267 snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d", 16268 ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" : 16269 (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" : 16270 (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" : 16271 "???GT/s"), 16272 sc->devinfo.pcie_link_width); 16273 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", 16274 CTLFLAG_RD, sc->pci_link_str, 0, 16275 "pci link status"); 16276 16277 sc->debug = bxe_debug; 16278 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug", 16279 CTLFLAG_RW, &sc->debug, 16280 "debug logging mode"); 16281 16282 sc->trigger_grcdump = 0; 16283 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump", 16284 CTLFLAG_RW, &sc->trigger_grcdump, 0, 16285 "set by driver when a grcdump is needed"); 16286 16287 16288 sc->rx_budget = bxe_rx_budget; 16289 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget", 16290 CTLFLAG_RW, &sc->rx_budget, 0, 16291 "rx processing budget"); 16292 16293 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state", 16294 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, 16295 bxe_sysctl_state, "IU", "dump driver state"); 16296 16297 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 16298 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 16299 bxe_eth_stats_arr[i].string, 16300 CTLTYPE_U64 | CTLFLAG_RD, sc, i, 16301 bxe_sysctl_eth_stat, "LU", 16302 bxe_eth_stats_arr[i].string); 16303 } 16304 16305 /* add a new parent node for all queues "dev.bxe.#.queue" */ 16306 queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue", 16307 CTLFLAG_RD, NULL, "queue"); 16308 queue_top_children = SYSCTL_CHILDREN(queue_top); 16309 16310 for (i = 0; i < sc->num_queues; i++) { 16311 /* add a new parent node for a single queue "dev.bxe.#.queue.#" */ 16312 snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i); 16313 queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO, 16314 queue_num_buf, CTLFLAG_RD, NULL, 16315 "single queue"); 16316 queue_children = SYSCTL_CHILDREN(queue); 16317 16318 for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) { 16319 q_stat = ((i << 16) | j); 16320 SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO, 16321 bxe_eth_q_stats_arr[j].string, 16322 CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat, 16323 bxe_sysctl_eth_q_stat, "LU", 16324 bxe_eth_q_stats_arr[j].string); 16325 } 16326 } 16327 } 16328 16329 /* 16330 * Device attach function. 16331 * 16332 * Allocates device resources, performs secondary chip identification, and 16333 * initializes driver instance variables. This function is called from driver 16334 * load after a successful probe. 16335 * 16336 * Returns: 16337 * 0 = Success, >0 = Failure 16338 */ 16339 static int 16340 bxe_attach(device_t dev) 16341 { 16342 struct bxe_softc *sc; 16343 16344 sc = device_get_softc(dev); 16345 16346 BLOGD(sc, DBG_LOAD, "Starting attach...\n"); 16347 16348 sc->state = BXE_STATE_CLOSED; 16349 16350 sc->dev = dev; 16351 sc->unit = device_get_unit(dev); 16352 16353 BLOGD(sc, DBG_LOAD, "softc = %p\n", sc); 16354 16355 sc->pcie_bus = pci_get_bus(dev); 16356 sc->pcie_device = pci_get_slot(dev); 16357 sc->pcie_func = pci_get_function(dev); 16358 16359 /* enable bus master capability */ 16360 pci_enable_busmaster(dev); 16361 16362 /* get the BARs */ 16363 if (bxe_allocate_bars(sc) != 0) { 16364 return (ENXIO); 16365 } 16366 16367 /* initialize the mutexes */ 16368 bxe_init_mutexes(sc); 16369 16370 /* prepare the periodic callout */ 16371 callout_init(&sc->periodic_callout, 0); 16372 16373 /* prepare the chip taskqueue */ 16374 sc->chip_tq_flags = CHIP_TQ_NONE; 16375 snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name), 16376 "bxe%d_chip_tq", sc->unit); 16377 TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc); 16378 sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT, 16379 taskqueue_thread_enqueue, 16380 &sc->chip_tq); 16381 taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */ 16382 "%s", sc->chip_tq_name); 16383 16384 /* get device info and set params */ 16385 if (bxe_get_device_info(sc) != 0) { 16386 BLOGE(sc, "getting device info\n"); 16387 bxe_deallocate_bars(sc); 16388 pci_disable_busmaster(dev); 16389 return (ENXIO); 16390 } 16391 16392 /* get final misc params */ 16393 bxe_get_params(sc); 16394 16395 /* set the default MTU (changed via ifconfig) */ 16396 sc->mtu = ETHERMTU; 16397 16398 bxe_set_modes_bitmap(sc); 16399 16400 /* XXX 16401 * If in AFEX mode and the function is configured for FCoE 16402 * then bail... no L2 allowed. 16403 */ 16404 16405 /* get phy settings from shmem and 'and' against admin settings */ 16406 bxe_get_phy_info(sc); 16407 16408 /* initialize the FreeBSD ifnet interface */ 16409 if (bxe_init_ifnet(sc) != 0) { 16410 bxe_release_mutexes(sc); 16411 bxe_deallocate_bars(sc); 16412 pci_disable_busmaster(dev); 16413 return (ENXIO); 16414 } 16415 16416 if (bxe_add_cdev(sc) != 0) { 16417 if (sc->ifp != NULL) { 16418 ether_ifdetach(sc->ifp); 16419 } 16420 ifmedia_removeall(&sc->ifmedia); 16421 bxe_release_mutexes(sc); 16422 bxe_deallocate_bars(sc); 16423 pci_disable_busmaster(dev); 16424 return (ENXIO); 16425 } 16426 16427 /* allocate device interrupts */ 16428 if (bxe_interrupt_alloc(sc) != 0) { 16429 bxe_del_cdev(sc); 16430 if (sc->ifp != NULL) { 16431 ether_ifdetach(sc->ifp); 16432 } 16433 ifmedia_removeall(&sc->ifmedia); 16434 bxe_release_mutexes(sc); 16435 bxe_deallocate_bars(sc); 16436 pci_disable_busmaster(dev); 16437 return (ENXIO); 16438 } 16439 16440 /* allocate ilt */ 16441 if (bxe_alloc_ilt_mem(sc) != 0) { 16442 bxe_interrupt_free(sc); 16443 bxe_del_cdev(sc); 16444 if (sc->ifp != NULL) { 16445 ether_ifdetach(sc->ifp); 16446 } 16447 ifmedia_removeall(&sc->ifmedia); 16448 bxe_release_mutexes(sc); 16449 bxe_deallocate_bars(sc); 16450 pci_disable_busmaster(dev); 16451 return (ENXIO); 16452 } 16453 16454 /* allocate the host hardware/software hsi structures */ 16455 if (bxe_alloc_hsi_mem(sc) != 0) { 16456 bxe_free_ilt_mem(sc); 16457 bxe_interrupt_free(sc); 16458 bxe_del_cdev(sc); 16459 if (sc->ifp != NULL) { 16460 ether_ifdetach(sc->ifp); 16461 } 16462 ifmedia_removeall(&sc->ifmedia); 16463 bxe_release_mutexes(sc); 16464 bxe_deallocate_bars(sc); 16465 pci_disable_busmaster(dev); 16466 return (ENXIO); 16467 } 16468 16469 /* need to reset chip if UNDI was active */ 16470 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 16471 /* init fw_seq */ 16472 sc->fw_seq = 16473 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 16474 DRV_MSG_SEQ_NUMBER_MASK); 16475 BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq); 16476 bxe_prev_unload(sc); 16477 } 16478 16479 #if 1 16480 /* XXX */ 16481 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); 16482 #else 16483 if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) && 16484 SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) && 16485 SHMEM2_RD(sc, dcbx_lldp_params_offset) && 16486 SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) { 16487 bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON); 16488 bxe_dcbx_init_params(sc); 16489 } else { 16490 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); 16491 } 16492 #endif 16493 16494 /* calculate qm_cid_count */ 16495 sc->qm_cid_count = bxe_set_qm_cid_count(sc); 16496 BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count); 16497 16498 sc->max_cos = 1; 16499 bxe_init_multi_cos(sc); 16500 16501 bxe_add_sysctls(sc); 16502 16503 return (0); 16504 } 16505 16506 /* 16507 * Device detach function. 16508 * 16509 * Stops the controller, resets the controller, and releases resources. 16510 * 16511 * Returns: 16512 * 0 = Success, >0 = Failure 16513 */ 16514 static int 16515 bxe_detach(device_t dev) 16516 { 16517 struct bxe_softc *sc; 16518 if_t ifp; 16519 16520 sc = device_get_softc(dev); 16521 16522 BLOGD(sc, DBG_LOAD, "Starting detach...\n"); 16523 16524 ifp = sc->ifp; 16525 if (ifp != NULL && if_vlantrunkinuse(ifp)) { 16526 BLOGE(sc, "Cannot detach while VLANs are in use.\n"); 16527 return(EBUSY); 16528 } 16529 16530 bxe_del_cdev(sc); 16531 16532 /* stop the periodic callout */ 16533 bxe_periodic_stop(sc); 16534 16535 /* stop the chip taskqueue */ 16536 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE); 16537 if (sc->chip_tq) { 16538 taskqueue_drain(sc->chip_tq, &sc->chip_tq_task); 16539 taskqueue_free(sc->chip_tq); 16540 sc->chip_tq = NULL; 16541 } 16542 16543 /* stop and reset the controller if it was open */ 16544 if (sc->state != BXE_STATE_CLOSED) { 16545 BXE_CORE_LOCK(sc); 16546 bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE); 16547 BXE_CORE_UNLOCK(sc); 16548 } 16549 16550 /* release the network interface */ 16551 if (ifp != NULL) { 16552 ether_ifdetach(ifp); 16553 } 16554 ifmedia_removeall(&sc->ifmedia); 16555 16556 /* XXX do the following based on driver state... */ 16557 16558 /* free the host hardware/software hsi structures */ 16559 bxe_free_hsi_mem(sc); 16560 16561 /* free ilt */ 16562 bxe_free_ilt_mem(sc); 16563 16564 /* release the interrupts */ 16565 bxe_interrupt_free(sc); 16566 16567 /* Release the mutexes*/ 16568 bxe_release_mutexes(sc); 16569 16570 /* Release the PCIe BAR mapped memory */ 16571 bxe_deallocate_bars(sc); 16572 16573 /* Release the FreeBSD interface. */ 16574 if (sc->ifp != NULL) { 16575 if_free(sc->ifp); 16576 } 16577 16578 pci_disable_busmaster(dev); 16579 16580 return (0); 16581 } 16582 16583 /* 16584 * Device shutdown function. 16585 * 16586 * Stops and resets the controller. 16587 * 16588 * Returns: 16589 * Nothing 16590 */ 16591 static int 16592 bxe_shutdown(device_t dev) 16593 { 16594 struct bxe_softc *sc; 16595 16596 sc = device_get_softc(dev); 16597 16598 BLOGD(sc, DBG_LOAD, "Starting shutdown...\n"); 16599 16600 /* stop the periodic callout */ 16601 bxe_periodic_stop(sc); 16602 16603 BXE_CORE_LOCK(sc); 16604 bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE); 16605 BXE_CORE_UNLOCK(sc); 16606 16607 return (0); 16608 } 16609 16610 void 16611 bxe_igu_ack_sb(struct bxe_softc *sc, 16612 uint8_t igu_sb_id, 16613 uint8_t segment, 16614 uint16_t index, 16615 uint8_t op, 16616 uint8_t update) 16617 { 16618 uint32_t igu_addr = sc->igu_base_addr; 16619 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; 16620 bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr); 16621 } 16622 16623 static void 16624 bxe_igu_clear_sb_gen(struct bxe_softc *sc, 16625 uint8_t func, 16626 uint8_t idu_sb_id, 16627 uint8_t is_pf) 16628 { 16629 uint32_t data, ctl, cnt = 100; 16630 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 16631 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 16632 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 16633 uint32_t sb_bit = 1 << (idu_sb_id%32); 16634 uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 16635 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 16636 16637 /* Not supported in BC mode */ 16638 if (CHIP_INT_MODE_IS_BC(sc)) { 16639 return; 16640 } 16641 16642 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << 16643 IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 16644 IGU_REGULAR_CLEANUP_SET | 16645 IGU_REGULAR_BCLEANUP); 16646 16647 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | 16648 (func_encode << IGU_CTRL_REG_FID_SHIFT) | 16649 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); 16650 16651 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 16652 data, igu_addr_data); 16653 REG_WR(sc, igu_addr_data, data); 16654 16655 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 16656 BUS_SPACE_BARRIER_WRITE); 16657 mb(); 16658 16659 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 16660 ctl, igu_addr_ctl); 16661 REG_WR(sc, igu_addr_ctl, ctl); 16662 16663 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 16664 BUS_SPACE_BARRIER_WRITE); 16665 mb(); 16666 16667 /* wait for clean up to finish */ 16668 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { 16669 DELAY(20000); 16670 } 16671 16672 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { 16673 BLOGD(sc, DBG_LOAD, 16674 "Unable to finish IGU cleanup: " 16675 "idu_sb_id %d offset %d bit %d (cnt %d)\n", 16676 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 16677 } 16678 } 16679 16680 static void 16681 bxe_igu_clear_sb(struct bxe_softc *sc, 16682 uint8_t idu_sb_id) 16683 { 16684 bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); 16685 } 16686 16687 16688 16689 16690 16691 16692 16693 /*******************/ 16694 /* ECORE CALLBACKS */ 16695 /*******************/ 16696 16697 static void 16698 bxe_reset_common(struct bxe_softc *sc) 16699 { 16700 uint32_t val = 0x1400; 16701 16702 /* reset_common */ 16703 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f); 16704 16705 if (CHIP_IS_E3(sc)) { 16706 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 16707 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 16708 } 16709 16710 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); 16711 } 16712 16713 static void 16714 bxe_common_init_phy(struct bxe_softc *sc) 16715 { 16716 uint32_t shmem_base[2]; 16717 uint32_t shmem2_base[2]; 16718 16719 /* Avoid common init in case MFW supports LFA */ 16720 if (SHMEM2_RD(sc, size) > 16721 (uint32_t)offsetof(struct shmem2_region, 16722 lfa_host_addr[SC_PORT(sc)])) { 16723 return; 16724 } 16725 16726 shmem_base[0] = sc->devinfo.shmem_base; 16727 shmem2_base[0] = sc->devinfo.shmem2_base; 16728 16729 if (!CHIP_IS_E1x(sc)) { 16730 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); 16731 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); 16732 } 16733 16734 bxe_acquire_phy_lock(sc); 16735 elink_common_init_phy(sc, shmem_base, shmem2_base, 16736 sc->devinfo.chip_id, 0); 16737 bxe_release_phy_lock(sc); 16738 } 16739 16740 static void 16741 bxe_pf_disable(struct bxe_softc *sc) 16742 { 16743 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 16744 16745 val &= ~IGU_PF_CONF_FUNC_EN; 16746 16747 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 16748 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 16749 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); 16750 } 16751 16752 static void 16753 bxe_init_pxp(struct bxe_softc *sc) 16754 { 16755 uint16_t devctl; 16756 int r_order, w_order; 16757 16758 devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2); 16759 16760 BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl); 16761 16762 w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5); 16763 16764 if (sc->mrrs == -1) { 16765 r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12); 16766 } else { 16767 BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs); 16768 r_order = sc->mrrs; 16769 } 16770 16771 ecore_init_pxp_arb(sc, r_order, w_order); 16772 } 16773 16774 static uint32_t 16775 bxe_get_pretend_reg(struct bxe_softc *sc) 16776 { 16777 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; 16778 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); 16779 return (base + (SC_ABS_FUNC(sc)) * stride); 16780 } 16781 16782 /* 16783 * Called only on E1H or E2. 16784 * When pretending to be PF, the pretend value is the function number 0..7. 16785 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 16786 * combination. 16787 */ 16788 static int 16789 bxe_pretend_func(struct bxe_softc *sc, 16790 uint16_t pretend_func_val) 16791 { 16792 uint32_t pretend_reg; 16793 16794 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) { 16795 return (-1); 16796 } 16797 16798 /* get my own pretend register */ 16799 pretend_reg = bxe_get_pretend_reg(sc); 16800 REG_WR(sc, pretend_reg, pretend_func_val); 16801 REG_RD(sc, pretend_reg); 16802 return (0); 16803 } 16804 16805 static void 16806 bxe_iov_init_dmae(struct bxe_softc *sc) 16807 { 16808 return; 16809 #if 0 16810 BLOGD(sc, DBG_LOAD, "SRIOV is %s\n", IS_SRIOV(sc) ? "ON" : "OFF"); 16811 16812 if (!IS_SRIOV(sc)) { 16813 return; 16814 } 16815 16816 REG_WR(sc, DMAE_REG_BACKWARD_COMP_EN, 0); 16817 #endif 16818 } 16819 16820 #if 0 16821 static int 16822 bxe_iov_init_ilt(struct bxe_softc *sc, 16823 uint16_t line) 16824 { 16825 return (line); 16826 #if 0 16827 int i; 16828 struct ecore_ilt* ilt = sc->ilt; 16829 16830 if (!IS_SRIOV(sc)) { 16831 return (line); 16832 } 16833 16834 /* set vfs ilt lines */ 16835 for (i = 0; i < BXE_VF_CIDS/ILT_PAGE_CIDS ; i++) { 16836 struct hw_dma *hw_cxt = SC_VF_CXT_PAGE(sc,i); 16837 ilt->lines[line+i].page = hw_cxt->addr; 16838 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 16839 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 16840 } 16841 return (line+i); 16842 #endif 16843 } 16844 #endif 16845 16846 static void 16847 bxe_iov_init_dq(struct bxe_softc *sc) 16848 { 16849 return; 16850 #if 0 16851 if (!IS_SRIOV(sc)) { 16852 return; 16853 } 16854 16855 /* Set the DQ such that the CID reflect the abs_vfid */ 16856 REG_WR(sc, DORQ_REG_VF_NORM_VF_BASE, 0); 16857 REG_WR(sc, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 16858 16859 /* 16860 * Set VFs starting CID. If its > 0 the preceding CIDs are belong to 16861 * the PF L2 queues 16862 */ 16863 REG_WR(sc, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 16864 16865 /* The VF window size is the log2 of the max number of CIDs per VF */ 16866 REG_WR(sc, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 16867 16868 /* 16869 * The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 16870 * the Pf doorbell size although the 2 are independent. 16871 */ 16872 REG_WR(sc, DORQ_REG_VF_NORM_CID_OFST, 16873 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); 16874 16875 /* 16876 * No security checks for now - 16877 * configure single rule (out of 16) mask = 0x1, value = 0x0, 16878 * CID range 0 - 0x1ffff 16879 */ 16880 REG_WR(sc, DORQ_REG_VF_TYPE_MASK_0, 1); 16881 REG_WR(sc, DORQ_REG_VF_TYPE_VALUE_0, 0); 16882 REG_WR(sc, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 16883 REG_WR(sc, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 16884 16885 /* set the number of VF alllowed doorbells to the full DQ range */ 16886 REG_WR(sc, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); 16887 16888 /* set the VF doorbell threshold */ 16889 REG_WR(sc, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 16890 #endif 16891 } 16892 16893 /* send a NIG loopback debug packet */ 16894 static void 16895 bxe_lb_pckt(struct bxe_softc *sc) 16896 { 16897 uint32_t wb_write[3]; 16898 16899 /* Ethernet source and destination addresses */ 16900 wb_write[0] = 0x55555555; 16901 wb_write[1] = 0x55555555; 16902 wb_write[2] = 0x20; /* SOP */ 16903 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 16904 16905 /* NON-IP protocol */ 16906 wb_write[0] = 0x09000000; 16907 wb_write[1] = 0x55555555; 16908 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ 16909 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 16910 } 16911 16912 /* 16913 * Some of the internal memories are not directly readable from the driver. 16914 * To test them we send debug packets. 16915 */ 16916 static int 16917 bxe_int_mem_test(struct bxe_softc *sc) 16918 { 16919 int factor; 16920 int count, i; 16921 uint32_t val = 0; 16922 16923 if (CHIP_REV_IS_FPGA(sc)) { 16924 factor = 120; 16925 } else if (CHIP_REV_IS_EMUL(sc)) { 16926 factor = 200; 16927 } else { 16928 factor = 1; 16929 } 16930 16931 /* disable inputs of parser neighbor blocks */ 16932 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); 16933 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); 16934 REG_WR(sc, CFC_REG_DEBUG0, 0x1); 16935 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); 16936 16937 /* write 0 to parser credits for CFC search request */ 16938 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 16939 16940 /* send Ethernet packet */ 16941 bxe_lb_pckt(sc); 16942 16943 /* TODO do i reset NIG statistic? */ 16944 /* Wait until NIG register shows 1 packet of size 0x10 */ 16945 count = 1000 * factor; 16946 while (count) { 16947 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 16948 val = *BXE_SP(sc, wb_data[0]); 16949 if (val == 0x10) { 16950 break; 16951 } 16952 16953 DELAY(10000); 16954 count--; 16955 } 16956 16957 if (val != 0x10) { 16958 BLOGE(sc, "NIG timeout val=0x%x\n", val); 16959 return (-1); 16960 } 16961 16962 /* wait until PRS register shows 1 packet */ 16963 count = (1000 * factor); 16964 while (count) { 16965 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16966 if (val == 1) { 16967 break; 16968 } 16969 16970 DELAY(10000); 16971 count--; 16972 } 16973 16974 if (val != 0x1) { 16975 BLOGE(sc, "PRS timeout val=0x%x\n", val); 16976 return (-2); 16977 } 16978 16979 /* Reset and init BRB, PRS */ 16980 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 16981 DELAY(50000); 16982 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 16983 DELAY(50000); 16984 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 16985 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 16986 16987 /* Disable inputs of parser neighbor blocks */ 16988 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); 16989 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); 16990 REG_WR(sc, CFC_REG_DEBUG0, 0x1); 16991 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); 16992 16993 /* Write 0 to parser credits for CFC search request */ 16994 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 16995 16996 /* send 10 Ethernet packets */ 16997 for (i = 0; i < 10; i++) { 16998 bxe_lb_pckt(sc); 16999 } 17000 17001 /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */ 17002 count = (1000 * factor); 17003 while (count) { 17004 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 17005 val = *BXE_SP(sc, wb_data[0]); 17006 if (val == 0xb0) { 17007 break; 17008 } 17009 17010 DELAY(10000); 17011 count--; 17012 } 17013 17014 if (val != 0xb0) { 17015 BLOGE(sc, "NIG timeout val=0x%x\n", val); 17016 return (-3); 17017 } 17018 17019 /* Wait until PRS register shows 2 packets */ 17020 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 17021 if (val != 2) { 17022 BLOGE(sc, "PRS timeout val=0x%x\n", val); 17023 } 17024 17025 /* Write 1 to parser credits for CFC search request */ 17026 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); 17027 17028 /* Wait until PRS register shows 3 packets */ 17029 DELAY(10000 * factor); 17030 17031 /* Wait until NIG register shows 1 packet of size 0x10 */ 17032 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 17033 if (val != 3) { 17034 BLOGE(sc, "PRS timeout val=0x%x\n", val); 17035 } 17036 17037 /* clear NIG EOP FIFO */ 17038 for (i = 0; i < 11; i++) { 17039 REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO); 17040 } 17041 17042 val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY); 17043 if (val != 1) { 17044 BLOGE(sc, "clear of NIG failed\n"); 17045 return (-4); 17046 } 17047 17048 /* Reset and init BRB, PRS, NIG */ 17049 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 17050 DELAY(50000); 17051 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 17052 DELAY(50000); 17053 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 17054 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 17055 if (!CNIC_SUPPORT(sc)) { 17056 /* set NIC mode */ 17057 REG_WR(sc, PRS_REG_NIC_MODE, 1); 17058 } 17059 17060 /* Enable inputs of parser neighbor blocks */ 17061 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff); 17062 REG_WR(sc, TCM_REG_PRS_IFEN, 0x1); 17063 REG_WR(sc, CFC_REG_DEBUG0, 0x0); 17064 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1); 17065 17066 return (0); 17067 } 17068 17069 static void 17070 bxe_setup_fan_failure_detection(struct bxe_softc *sc) 17071 { 17072 int is_required; 17073 uint32_t val; 17074 int port; 17075 17076 is_required = 0; 17077 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & 17078 SHARED_HW_CFG_FAN_FAILURE_MASK); 17079 17080 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { 17081 is_required = 1; 17082 } 17083 /* 17084 * The fan failure mechanism is usually related to the PHY type since 17085 * the power consumption of the board is affected by the PHY. Currently, 17086 * fan is required for most designs with SFX7101, BCM8727 and BCM8481. 17087 */ 17088 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { 17089 for (port = PORT_0; port < PORT_MAX; port++) { 17090 is_required |= elink_fan_failure_det_req(sc, 17091 sc->devinfo.shmem_base, 17092 sc->devinfo.shmem2_base, 17093 port); 17094 } 17095 } 17096 17097 BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required); 17098 17099 if (is_required == 0) { 17100 return; 17101 } 17102 17103 /* Fan failure is indicated by SPIO 5 */ 17104 bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); 17105 17106 /* set to active low mode */ 17107 val = REG_RD(sc, MISC_REG_SPIO_INT); 17108 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); 17109 REG_WR(sc, MISC_REG_SPIO_INT, val); 17110 17111 /* enable interrupt to signal the IGU */ 17112 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 17113 val |= MISC_SPIO_SPIO5; 17114 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); 17115 } 17116 17117 static void 17118 bxe_enable_blocks_attention(struct bxe_softc *sc) 17119 { 17120 uint32_t val; 17121 17122 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 17123 if (!CHIP_IS_E1x(sc)) { 17124 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); 17125 } else { 17126 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); 17127 } 17128 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 17129 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 17130 /* 17131 * mask read length error interrupts in brb for parser 17132 * (parsing unit and 'checksum and crc' unit) 17133 * these errors are legal (PU reads fixed length and CAC can cause 17134 * read length error on truncated packets) 17135 */ 17136 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); 17137 REG_WR(sc, QM_REG_QM_INT_MASK, 0); 17138 REG_WR(sc, TM_REG_TM_INT_MASK, 0); 17139 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); 17140 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); 17141 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); 17142 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ 17143 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ 17144 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); 17145 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); 17146 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); 17147 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ 17148 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ 17149 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 17150 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); 17151 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); 17152 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); 17153 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ 17154 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ 17155 17156 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 17157 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 17158 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); 17159 if (!CHIP_IS_E1x(sc)) { 17160 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | 17161 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); 17162 } 17163 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); 17164 17165 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); 17166 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); 17167 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); 17168 /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ 17169 17170 if (!CHIP_IS_E1x(sc)) { 17171 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 17172 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 17173 } 17174 17175 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); 17176 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); 17177 /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ 17178 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 17179 } 17180 17181 /** 17182 * bxe_init_hw_common - initialize the HW at the COMMON phase. 17183 * 17184 * @sc: driver handle 17185 */ 17186 static int 17187 bxe_init_hw_common(struct bxe_softc *sc) 17188 { 17189 uint8_t abs_func_id; 17190 uint32_t val; 17191 17192 BLOGD(sc, DBG_LOAD, "starting common init for func %d\n", 17193 SC_ABS_FUNC(sc)); 17194 17195 /* 17196 * take the RESET lock to protect undi_unload flow from accessing 17197 * registers while we are resetting the chip 17198 */ 17199 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 17200 17201 bxe_reset_common(sc); 17202 17203 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); 17204 17205 val = 0xfffc; 17206 if (CHIP_IS_E3(sc)) { 17207 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 17208 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 17209 } 17210 17211 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); 17212 17213 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 17214 17215 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); 17216 BLOGD(sc, DBG_LOAD, "after misc block init\n"); 17217 17218 if (!CHIP_IS_E1x(sc)) { 17219 /* 17220 * 4-port mode or 2-port mode we need to turn off master-enable for 17221 * everyone. After that we turn it back on for self. So, we disregard 17222 * multi-function, and always disable all functions on the given path, 17223 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 17224 */ 17225 for (abs_func_id = SC_PATH(sc); 17226 abs_func_id < (E2_FUNC_MAX * 2); 17227 abs_func_id += 2) { 17228 if (abs_func_id == SC_ABS_FUNC(sc)) { 17229 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17230 continue; 17231 } 17232 17233 bxe_pretend_func(sc, abs_func_id); 17234 17235 /* clear pf enable */ 17236 bxe_pf_disable(sc); 17237 17238 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 17239 } 17240 } 17241 17242 BLOGD(sc, DBG_LOAD, "after pf disable\n"); 17243 17244 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); 17245 17246 if (CHIP_IS_E1(sc)) { 17247 /* 17248 * enable HW interrupt from PXP on USDM overflow 17249 * bit 16 on INT_MASK_0 17250 */ 17251 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 17252 } 17253 17254 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); 17255 bxe_init_pxp(sc); 17256 17257 #ifdef __BIG_ENDIAN 17258 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); 17259 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); 17260 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 17261 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 17262 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 17263 /* make sure this value is 0 */ 17264 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); 17265 17266 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); 17267 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); 17268 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); 17269 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); 17270 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 17271 #endif 17272 17273 ecore_ilt_init_page_size(sc, INITOP_SET); 17274 17275 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { 17276 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 17277 } 17278 17279 /* let the HW do it's magic... */ 17280 DELAY(100000); 17281 17282 /* finish PXP init */ 17283 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); 17284 if (val != 1) { 17285 BLOGE(sc, "PXP2 CFG failed\n"); 17286 return (-1); 17287 } 17288 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); 17289 if (val != 1) { 17290 BLOGE(sc, "PXP2 RD_INIT failed\n"); 17291 return (-1); 17292 } 17293 17294 BLOGD(sc, DBG_LOAD, "after pxp init\n"); 17295 17296 /* 17297 * Timer bug workaround for E2 only. We need to set the entire ILT to have 17298 * entries with value "0" and valid bit on. This needs to be done by the 17299 * first PF that is loaded in a path (i.e. common phase) 17300 */ 17301 if (!CHIP_IS_E1x(sc)) { 17302 /* 17303 * In E2 there is a bug in the timers block that can cause function 6 / 7 17304 * (i.e. vnic3) to start even if it is marked as "scan-off". 17305 * This occurs when a different function (func2,3) is being marked 17306 * as "scan-off". Real-life scenario for example: if a driver is being 17307 * load-unloaded while func6,7 are down. This will cause the timer to access 17308 * the ilt, translate to a logical address and send a request to read/write. 17309 * Since the ilt for the function that is down is not valid, this will cause 17310 * a translation error which is unrecoverable. 17311 * The Workaround is intended to make sure that when this happens nothing 17312 * fatal will occur. The workaround: 17313 * 1. First PF driver which loads on a path will: 17314 * a. After taking the chip out of reset, by using pretend, 17315 * it will write "0" to the following registers of 17316 * the other vnics. 17317 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 17318 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 17319 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 17320 * And for itself it will write '1' to 17321 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 17322 * dmae-operations (writing to pram for example.) 17323 * note: can be done for only function 6,7 but cleaner this 17324 * way. 17325 * b. Write zero+valid to the entire ILT. 17326 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 17327 * VNIC3 (of that port). The range allocated will be the 17328 * entire ILT. This is needed to prevent ILT range error. 17329 * 2. Any PF driver load flow: 17330 * a. ILT update with the physical addresses of the allocated 17331 * logical pages. 17332 * b. Wait 20msec. - note that this timeout is needed to make 17333 * sure there are no requests in one of the PXP internal 17334 * queues with "old" ILT addresses. 17335 * c. PF enable in the PGLC. 17336 * d. Clear the was_error of the PF in the PGLC. (could have 17337 * occurred while driver was down) 17338 * e. PF enable in the CFC (WEAK + STRONG) 17339 * f. Timers scan enable 17340 * 3. PF driver unload flow: 17341 * a. Clear the Timers scan_en. 17342 * b. Polling for scan_on=0 for that PF. 17343 * c. Clear the PF enable bit in the PXP. 17344 * d. Clear the PF enable in the CFC (WEAK + STRONG) 17345 * e. Write zero+valid to all ILT entries (The valid bit must 17346 * stay set) 17347 * f. If this is VNIC 3 of a port then also init 17348 * first_timers_ilt_entry to zero and last_timers_ilt_entry 17349 * to the last enrty in the ILT. 17350 * 17351 * Notes: 17352 * Currently the PF error in the PGLC is non recoverable. 17353 * In the future the there will be a recovery routine for this error. 17354 * Currently attention is masked. 17355 * Having an MCP lock on the load/unload process does not guarantee that 17356 * there is no Timer disable during Func6/7 enable. This is because the 17357 * Timers scan is currently being cleared by the MCP on FLR. 17358 * Step 2.d can be done only for PF6/7 and the driver can also check if 17359 * there is error before clearing it. But the flow above is simpler and 17360 * more general. 17361 * All ILT entries are written by zero+valid and not just PF6/7 17362 * ILT entries since in the future the ILT entries allocation for 17363 * PF-s might be dynamic. 17364 */ 17365 struct ilt_client_info ilt_cli; 17366 struct ecore_ilt ilt; 17367 17368 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 17369 memset(&ilt, 0, sizeof(struct ecore_ilt)); 17370 17371 /* initialize dummy TM client */ 17372 ilt_cli.start = 0; 17373 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 17374 ilt_cli.client_num = ILT_CLIENT_TM; 17375 17376 /* 17377 * Step 1: set zeroes to all ilt page entries with valid bit on 17378 * Step 2: set the timers first/last ilt entry to point 17379 * to the entire range to prevent ILT range error for 3rd/4th 17380 * vnic (this code assumes existence of the vnic) 17381 * 17382 * both steps performed by call to ecore_ilt_client_init_op() 17383 * with dummy TM client 17384 * 17385 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 17386 * and his brother are split registers 17387 */ 17388 17389 bxe_pretend_func(sc, (SC_PATH(sc) + 6)); 17390 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); 17391 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 17392 17393 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN); 17394 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN); 17395 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 17396 } 17397 17398 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); 17399 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); 17400 17401 if (!CHIP_IS_E1x(sc)) { 17402 int factor = CHIP_REV_IS_EMUL(sc) ? 1000 : 17403 (CHIP_REV_IS_FPGA(sc) ? 400 : 0); 17404 17405 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); 17406 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); 17407 17408 /* let the HW do it's magic... */ 17409 do { 17410 DELAY(200000); 17411 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); 17412 } while (factor-- && (val != 1)); 17413 17414 if (val != 1) { 17415 BLOGE(sc, "ATC_INIT failed\n"); 17416 return (-1); 17417 } 17418 } 17419 17420 BLOGD(sc, DBG_LOAD, "after pglue and atc init\n"); 17421 17422 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); 17423 17424 bxe_iov_init_dmae(sc); 17425 17426 /* clean the DMAE memory */ 17427 sc->dmae_ready = 1; 17428 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1); 17429 17430 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); 17431 17432 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); 17433 17434 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); 17435 17436 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); 17437 17438 bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); 17439 bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); 17440 bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); 17441 bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); 17442 17443 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); 17444 17445 /* QM queues pointers table */ 17446 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); 17447 17448 /* soft reset pulse */ 17449 REG_WR(sc, QM_REG_SOFT_RESET, 1); 17450 REG_WR(sc, QM_REG_SOFT_RESET, 0); 17451 17452 if (CNIC_SUPPORT(sc)) 17453 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); 17454 17455 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); 17456 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT); 17457 if (!CHIP_REV_IS_SLOW(sc)) { 17458 /* enable hw interrupt from doorbell Q */ 17459 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 17460 } 17461 17462 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 17463 17464 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 17465 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); 17466 17467 if (!CHIP_IS_E1(sc)) { 17468 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); 17469 } 17470 17471 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { 17472 if (IS_MF_AFEX(sc)) { 17473 /* 17474 * configure that AFEX and VLAN headers must be 17475 * received in AFEX mode 17476 */ 17477 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); 17478 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); 17479 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 17480 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 17481 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); 17482 } else { 17483 /* 17484 * Bit-map indicating which L2 hdrs may appear 17485 * after the basic Ethernet header 17486 */ 17487 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 17488 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 17489 } 17490 } 17491 17492 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); 17493 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); 17494 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); 17495 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); 17496 17497 if (!CHIP_IS_E1x(sc)) { 17498 /* reset VFC memories */ 17499 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 17500 VFC_MEMORIES_RST_REG_CAM_RST | 17501 VFC_MEMORIES_RST_REG_RAM_RST); 17502 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 17503 VFC_MEMORIES_RST_REG_CAM_RST | 17504 VFC_MEMORIES_RST_REG_RAM_RST); 17505 17506 DELAY(20000); 17507 } 17508 17509 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); 17510 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); 17511 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); 17512 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); 17513 17514 /* sync semi rtc */ 17515 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 17516 0x80000000); 17517 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 17518 0x80000000); 17519 17520 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); 17521 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); 17522 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); 17523 17524 if (!CHIP_IS_E1x(sc)) { 17525 if (IS_MF_AFEX(sc)) { 17526 /* 17527 * configure that AFEX and VLAN headers must be 17528 * sent in AFEX mode 17529 */ 17530 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); 17531 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); 17532 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 17533 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 17534 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); 17535 } else { 17536 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 17537 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 17538 } 17539 } 17540 17541 REG_WR(sc, SRC_REG_SOFT_RST, 1); 17542 17543 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); 17544 17545 if (CNIC_SUPPORT(sc)) { 17546 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); 17547 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 17548 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); 17549 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); 17550 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); 17551 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 17552 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); 17553 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 17554 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); 17555 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); 17556 } 17557 REG_WR(sc, SRC_REG_SOFT_RST, 0); 17558 17559 if (sizeof(union cdu_context) != 1024) { 17560 /* we currently assume that a context is 1024 bytes */ 17561 BLOGE(sc, "please adjust the size of cdu_context(%ld)\n", 17562 (long)sizeof(union cdu_context)); 17563 } 17564 17565 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); 17566 val = (4 << 24) + (0 << 12) + 1024; 17567 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); 17568 17569 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); 17570 17571 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); 17572 /* enable context validation interrupt from CFC */ 17573 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 17574 17575 /* set the thresholds to prevent CFC/CDU race */ 17576 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); 17577 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); 17578 17579 if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) { 17580 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); 17581 } 17582 17583 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); 17584 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); 17585 17586 /* Reset PCIE errors for debug */ 17587 REG_WR(sc, 0x2814, 0xffffffff); 17588 REG_WR(sc, 0x3820, 0xffffffff); 17589 17590 if (!CHIP_IS_E1x(sc)) { 17591 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 17592 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 17593 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 17594 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 17595 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 17596 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 17597 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 17598 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 17599 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 17600 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 17601 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 17602 } 17603 17604 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); 17605 17606 if (!CHIP_IS_E1(sc)) { 17607 /* in E3 this done in per-port section */ 17608 if (!CHIP_IS_E3(sc)) 17609 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); 17610 } 17611 17612 if (CHIP_IS_E1H(sc)) { 17613 /* not applicable for E2 (and above ...) */ 17614 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); 17615 } 17616 17617 if (CHIP_REV_IS_SLOW(sc)) { 17618 DELAY(200000); 17619 } 17620 17621 /* finish CFC init */ 17622 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); 17623 if (val != 1) { 17624 BLOGE(sc, "CFC LL_INIT failed\n"); 17625 return (-1); 17626 } 17627 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); 17628 if (val != 1) { 17629 BLOGE(sc, "CFC AC_INIT failed\n"); 17630 return (-1); 17631 } 17632 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 17633 if (val != 1) { 17634 BLOGE(sc, "CFC CAM_INIT failed\n"); 17635 return (-1); 17636 } 17637 REG_WR(sc, CFC_REG_DEBUG0, 0); 17638 17639 if (CHIP_IS_E1(sc)) { 17640 /* read NIG statistic to see if this is our first up since powerup */ 17641 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 17642 val = *BXE_SP(sc, wb_data[0]); 17643 17644 /* do internal memory self test */ 17645 if ((val == 0) && bxe_int_mem_test(sc)) { 17646 BLOGE(sc, "internal mem self test failed\n"); 17647 return (-1); 17648 } 17649 } 17650 17651 bxe_setup_fan_failure_detection(sc); 17652 17653 /* clear PXP2 attentions */ 17654 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 17655 17656 bxe_enable_blocks_attention(sc); 17657 17658 if (!CHIP_REV_IS_SLOW(sc)) { 17659 ecore_enable_blocks_parity(sc); 17660 } 17661 17662 if (!BXE_NOMCP(sc)) { 17663 if (CHIP_IS_E1x(sc)) { 17664 bxe_common_init_phy(sc); 17665 } 17666 } 17667 17668 return (0); 17669 } 17670 17671 /** 17672 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase. 17673 * 17674 * @sc: driver handle 17675 */ 17676 static int 17677 bxe_init_hw_common_chip(struct bxe_softc *sc) 17678 { 17679 int rc = bxe_init_hw_common(sc); 17680 17681 if (rc) { 17682 return (rc); 17683 } 17684 17685 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 17686 if (!BXE_NOMCP(sc)) { 17687 bxe_common_init_phy(sc); 17688 } 17689 17690 return (0); 17691 } 17692 17693 static int 17694 bxe_init_hw_port(struct bxe_softc *sc) 17695 { 17696 int port = SC_PORT(sc); 17697 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 17698 uint32_t low, high; 17699 uint32_t val; 17700 17701 BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port); 17702 17703 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 17704 17705 ecore_init_block(sc, BLOCK_MISC, init_phase); 17706 ecore_init_block(sc, BLOCK_PXP, init_phase); 17707 ecore_init_block(sc, BLOCK_PXP2, init_phase); 17708 17709 /* 17710 * Timers bug workaround: disables the pf_master bit in pglue at 17711 * common phase, we need to enable it here before any dmae access are 17712 * attempted. Therefore we manually added the enable-master to the 17713 * port phase (it also happens in the function phase) 17714 */ 17715 if (!CHIP_IS_E1x(sc)) { 17716 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17717 } 17718 17719 ecore_init_block(sc, BLOCK_ATC, init_phase); 17720 ecore_init_block(sc, BLOCK_DMAE, init_phase); 17721 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 17722 ecore_init_block(sc, BLOCK_QM, init_phase); 17723 17724 ecore_init_block(sc, BLOCK_TCM, init_phase); 17725 ecore_init_block(sc, BLOCK_UCM, init_phase); 17726 ecore_init_block(sc, BLOCK_CCM, init_phase); 17727 ecore_init_block(sc, BLOCK_XCM, init_phase); 17728 17729 /* QM cid (connection) count */ 17730 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); 17731 17732 if (CNIC_SUPPORT(sc)) { 17733 ecore_init_block(sc, BLOCK_TM, init_phase); 17734 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20); 17735 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 17736 } 17737 17738 ecore_init_block(sc, BLOCK_DORQ, init_phase); 17739 17740 ecore_init_block(sc, BLOCK_BRB1, init_phase); 17741 17742 if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) { 17743 if (IS_MF(sc)) { 17744 low = (BXE_ONE_PORT(sc) ? 160 : 246); 17745 } else if (sc->mtu > 4096) { 17746 if (BXE_ONE_PORT(sc)) { 17747 low = 160; 17748 } else { 17749 val = sc->mtu; 17750 /* (24*1024 + val*4)/256 */ 17751 low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); 17752 } 17753 } else { 17754 low = (BXE_ONE_PORT(sc) ? 80 : 160); 17755 } 17756 high = (low + 56); /* 14*1024/256 */ 17757 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); 17758 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); 17759 } 17760 17761 if (CHIP_IS_MODE_4_PORT(sc)) { 17762 REG_WR(sc, SC_PORT(sc) ? 17763 BRB1_REG_MAC_GUARANTIED_1 : 17764 BRB1_REG_MAC_GUARANTIED_0, 40); 17765 } 17766 17767 ecore_init_block(sc, BLOCK_PRS, init_phase); 17768 if (CHIP_IS_E3B0(sc)) { 17769 if (IS_MF_AFEX(sc)) { 17770 /* configure headers for AFEX mode */ 17771 REG_WR(sc, SC_PORT(sc) ? 17772 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 17773 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); 17774 REG_WR(sc, SC_PORT(sc) ? 17775 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : 17776 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); 17777 REG_WR(sc, SC_PORT(sc) ? 17778 PRS_REG_MUST_HAVE_HDRS_PORT_1 : 17779 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 17780 } else { 17781 /* Ovlan exists only if we are in multi-function + 17782 * switch-dependent mode, in switch-independent there 17783 * is no ovlan headers 17784 */ 17785 REG_WR(sc, SC_PORT(sc) ? 17786 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 17787 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 17788 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); 17789 } 17790 } 17791 17792 ecore_init_block(sc, BLOCK_TSDM, init_phase); 17793 ecore_init_block(sc, BLOCK_CSDM, init_phase); 17794 ecore_init_block(sc, BLOCK_USDM, init_phase); 17795 ecore_init_block(sc, BLOCK_XSDM, init_phase); 17796 17797 ecore_init_block(sc, BLOCK_TSEM, init_phase); 17798 ecore_init_block(sc, BLOCK_USEM, init_phase); 17799 ecore_init_block(sc, BLOCK_CSEM, init_phase); 17800 ecore_init_block(sc, BLOCK_XSEM, init_phase); 17801 17802 ecore_init_block(sc, BLOCK_UPB, init_phase); 17803 ecore_init_block(sc, BLOCK_XPB, init_phase); 17804 17805 ecore_init_block(sc, BLOCK_PBF, init_phase); 17806 17807 if (CHIP_IS_E1x(sc)) { 17808 /* configure PBF to work without PAUSE mtu 9000 */ 17809 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 17810 17811 /* update threshold */ 17812 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 17813 /* update init credit */ 17814 REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 17815 17816 /* probe changes */ 17817 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1); 17818 DELAY(50); 17819 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0); 17820 } 17821 17822 if (CNIC_SUPPORT(sc)) { 17823 ecore_init_block(sc, BLOCK_SRC, init_phase); 17824 } 17825 17826 ecore_init_block(sc, BLOCK_CDU, init_phase); 17827 ecore_init_block(sc, BLOCK_CFC, init_phase); 17828 17829 if (CHIP_IS_E1(sc)) { 17830 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 17831 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 17832 } 17833 ecore_init_block(sc, BLOCK_HC, init_phase); 17834 17835 ecore_init_block(sc, BLOCK_IGU, init_phase); 17836 17837 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 17838 /* init aeu_mask_attn_func_0/1: 17839 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 17840 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 17841 * bits 4-7 are used for "per vn group attention" */ 17842 val = IS_MF(sc) ? 0xF7 : 0x7; 17843 /* Enable DCBX attention for all but E1 */ 17844 val |= CHIP_IS_E1(sc) ? 0 : 0x10; 17845 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 17846 17847 ecore_init_block(sc, BLOCK_NIG, init_phase); 17848 17849 if (!CHIP_IS_E1x(sc)) { 17850 /* Bit-map indicating which L2 hdrs may appear after the 17851 * basic Ethernet header 17852 */ 17853 if (IS_MF_AFEX(sc)) { 17854 REG_WR(sc, SC_PORT(sc) ? 17855 NIG_REG_P1_HDRS_AFTER_BASIC : 17856 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 17857 } else { 17858 REG_WR(sc, SC_PORT(sc) ? 17859 NIG_REG_P1_HDRS_AFTER_BASIC : 17860 NIG_REG_P0_HDRS_AFTER_BASIC, 17861 IS_MF_SD(sc) ? 7 : 6); 17862 } 17863 17864 if (CHIP_IS_E3(sc)) { 17865 REG_WR(sc, SC_PORT(sc) ? 17866 NIG_REG_LLH1_MF_MODE : 17867 NIG_REG_LLH_MF_MODE, IS_MF(sc)); 17868 } 17869 } 17870 if (!CHIP_IS_E3(sc)) { 17871 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 17872 } 17873 17874 if (!CHIP_IS_E1(sc)) { 17875 /* 0x2 disable mf_ov, 0x1 enable */ 17876 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 17877 (IS_MF_SD(sc) ? 0x1 : 0x2)); 17878 17879 if (!CHIP_IS_E1x(sc)) { 17880 val = 0; 17881 switch (sc->devinfo.mf_info.mf_mode) { 17882 case MULTI_FUNCTION_SD: 17883 val = 1; 17884 break; 17885 case MULTI_FUNCTION_SI: 17886 case MULTI_FUNCTION_AFEX: 17887 val = 2; 17888 break; 17889 } 17890 17891 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : 17892 NIG_REG_LLH0_CLS_TYPE), val); 17893 } 17894 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 17895 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 17896 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); 17897 } 17898 17899 /* If SPIO5 is set to generate interrupts, enable it for this port */ 17900 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 17901 if (val & MISC_SPIO_SPIO5) { 17902 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 17903 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 17904 val = REG_RD(sc, reg_addr); 17905 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 17906 REG_WR(sc, reg_addr, val); 17907 } 17908 17909 return (0); 17910 } 17911 17912 static uint32_t 17913 bxe_flr_clnup_reg_poll(struct bxe_softc *sc, 17914 uint32_t reg, 17915 uint32_t expected, 17916 uint32_t poll_count) 17917 { 17918 uint32_t cur_cnt = poll_count; 17919 uint32_t val; 17920 17921 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { 17922 DELAY(FLR_WAIT_INTERVAL); 17923 } 17924 17925 return (val); 17926 } 17927 17928 static int 17929 bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc, 17930 uint32_t reg, 17931 char *msg, 17932 uint32_t poll_cnt) 17933 { 17934 uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); 17935 17936 if (val != 0) { 17937 BLOGE(sc, "%s usage count=%d\n", msg, val); 17938 return (1); 17939 } 17940 17941 return (0); 17942 } 17943 17944 /* Common routines with VF FLR cleanup */ 17945 static uint32_t 17946 bxe_flr_clnup_poll_count(struct bxe_softc *sc) 17947 { 17948 /* adjust polling timeout */ 17949 if (CHIP_REV_IS_EMUL(sc)) { 17950 return (FLR_POLL_CNT * 2000); 17951 } 17952 17953 if (CHIP_REV_IS_FPGA(sc)) { 17954 return (FLR_POLL_CNT * 120); 17955 } 17956 17957 return (FLR_POLL_CNT); 17958 } 17959 17960 static int 17961 bxe_poll_hw_usage_counters(struct bxe_softc *sc, 17962 uint32_t poll_cnt) 17963 { 17964 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 17965 if (bxe_flr_clnup_poll_hw_counter(sc, 17966 CFC_REG_NUM_LCIDS_INSIDE_PF, 17967 "CFC PF usage counter timed out", 17968 poll_cnt)) { 17969 return (1); 17970 } 17971 17972 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 17973 if (bxe_flr_clnup_poll_hw_counter(sc, 17974 DORQ_REG_PF_USAGE_CNT, 17975 "DQ PF usage counter timed out", 17976 poll_cnt)) { 17977 return (1); 17978 } 17979 17980 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 17981 if (bxe_flr_clnup_poll_hw_counter(sc, 17982 QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc), 17983 "QM PF usage counter timed out", 17984 poll_cnt)) { 17985 return (1); 17986 } 17987 17988 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 17989 if (bxe_flr_clnup_poll_hw_counter(sc, 17990 TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc), 17991 "Timers VNIC usage counter timed out", 17992 poll_cnt)) { 17993 return (1); 17994 } 17995 17996 if (bxe_flr_clnup_poll_hw_counter(sc, 17997 TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc), 17998 "Timers NUM_SCANS usage counter timed out", 17999 poll_cnt)) { 18000 return (1); 18001 } 18002 18003 /* Wait DMAE PF usage counter to zero */ 18004 if (bxe_flr_clnup_poll_hw_counter(sc, 18005 dmae_reg_go_c[INIT_DMAE_C(sc)], 18006 "DMAE dommand register timed out", 18007 poll_cnt)) { 18008 return (1); 18009 } 18010 18011 return (0); 18012 } 18013 18014 #define OP_GEN_PARAM(param) \ 18015 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 18016 #define OP_GEN_TYPE(type) \ 18017 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 18018 #define OP_GEN_AGG_VECT(index) \ 18019 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 18020 18021 static int 18022 bxe_send_final_clnup(struct bxe_softc *sc, 18023 uint8_t clnup_func, 18024 uint32_t poll_cnt) 18025 { 18026 uint32_t op_gen_command = 0; 18027 uint32_t comp_addr = (BAR_CSTRORM_INTMEM + 18028 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); 18029 int ret = 0; 18030 18031 if (REG_RD(sc, comp_addr)) { 18032 BLOGE(sc, "Cleanup complete was not 0 before sending\n"); 18033 return (1); 18034 } 18035 18036 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 18037 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 18038 op_gen_command |= OP_GEN_AGG_VECT(clnup_func); 18039 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 18040 18041 BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n"); 18042 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); 18043 18044 if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { 18045 BLOGE(sc, "FW final cleanup did not succeed\n"); 18046 BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n", 18047 (REG_RD(sc, comp_addr))); 18048 bxe_panic(sc, ("FLR cleanup failed\n")); 18049 return (1); 18050 } 18051 18052 /* Zero completion for nxt FLR */ 18053 REG_WR(sc, comp_addr, 0); 18054 18055 return (ret); 18056 } 18057 18058 static void 18059 bxe_pbf_pN_buf_flushed(struct bxe_softc *sc, 18060 struct pbf_pN_buf_regs *regs, 18061 uint32_t poll_count) 18062 { 18063 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; 18064 uint32_t cur_cnt = poll_count; 18065 18066 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); 18067 crd = crd_start = REG_RD(sc, regs->crd); 18068 init_crd = REG_RD(sc, regs->init_crd); 18069 18070 BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); 18071 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd); 18072 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); 18073 18074 while ((crd != init_crd) && 18075 ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) < 18076 (init_crd - crd_start))) { 18077 if (cur_cnt--) { 18078 DELAY(FLR_WAIT_INTERVAL); 18079 crd = REG_RD(sc, regs->crd); 18080 crd_freed = REG_RD(sc, regs->crd_freed); 18081 } else { 18082 BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN); 18083 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd); 18084 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed); 18085 break; 18086 } 18087 } 18088 18089 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n", 18090 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 18091 } 18092 18093 static void 18094 bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc, 18095 struct pbf_pN_cmd_regs *regs, 18096 uint32_t poll_count) 18097 { 18098 uint32_t occup, to_free, freed, freed_start; 18099 uint32_t cur_cnt = poll_count; 18100 18101 occup = to_free = REG_RD(sc, regs->lines_occup); 18102 freed = freed_start = REG_RD(sc, regs->lines_freed); 18103 18104 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 18105 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 18106 18107 while (occup && 18108 ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) { 18109 if (cur_cnt--) { 18110 DELAY(FLR_WAIT_INTERVAL); 18111 occup = REG_RD(sc, regs->lines_occup); 18112 freed = REG_RD(sc, regs->lines_freed); 18113 } else { 18114 BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN); 18115 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 18116 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 18117 break; 18118 } 18119 } 18120 18121 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n", 18122 poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 18123 } 18124 18125 static void 18126 bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count) 18127 { 18128 struct pbf_pN_cmd_regs cmd_regs[] = { 18129 {0, (CHIP_IS_E3B0(sc)) ? 18130 PBF_REG_TQ_OCCUPANCY_Q0 : 18131 PBF_REG_P0_TQ_OCCUPANCY, 18132 (CHIP_IS_E3B0(sc)) ? 18133 PBF_REG_TQ_LINES_FREED_CNT_Q0 : 18134 PBF_REG_P0_TQ_LINES_FREED_CNT}, 18135 {1, (CHIP_IS_E3B0(sc)) ? 18136 PBF_REG_TQ_OCCUPANCY_Q1 : 18137 PBF_REG_P1_TQ_OCCUPANCY, 18138 (CHIP_IS_E3B0(sc)) ? 18139 PBF_REG_TQ_LINES_FREED_CNT_Q1 : 18140 PBF_REG_P1_TQ_LINES_FREED_CNT}, 18141 {4, (CHIP_IS_E3B0(sc)) ? 18142 PBF_REG_TQ_OCCUPANCY_LB_Q : 18143 PBF_REG_P4_TQ_OCCUPANCY, 18144 (CHIP_IS_E3B0(sc)) ? 18145 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 18146 PBF_REG_P4_TQ_LINES_FREED_CNT} 18147 }; 18148 18149 struct pbf_pN_buf_regs buf_regs[] = { 18150 {0, (CHIP_IS_E3B0(sc)) ? 18151 PBF_REG_INIT_CRD_Q0 : 18152 PBF_REG_P0_INIT_CRD , 18153 (CHIP_IS_E3B0(sc)) ? 18154 PBF_REG_CREDIT_Q0 : 18155 PBF_REG_P0_CREDIT, 18156 (CHIP_IS_E3B0(sc)) ? 18157 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 18158 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 18159 {1, (CHIP_IS_E3B0(sc)) ? 18160 PBF_REG_INIT_CRD_Q1 : 18161 PBF_REG_P1_INIT_CRD, 18162 (CHIP_IS_E3B0(sc)) ? 18163 PBF_REG_CREDIT_Q1 : 18164 PBF_REG_P1_CREDIT, 18165 (CHIP_IS_E3B0(sc)) ? 18166 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 18167 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 18168 {4, (CHIP_IS_E3B0(sc)) ? 18169 PBF_REG_INIT_CRD_LB_Q : 18170 PBF_REG_P4_INIT_CRD, 18171 (CHIP_IS_E3B0(sc)) ? 18172 PBF_REG_CREDIT_LB_Q : 18173 PBF_REG_P4_CREDIT, 18174 (CHIP_IS_E3B0(sc)) ? 18175 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 18176 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 18177 }; 18178 18179 int i; 18180 18181 /* Verify the command queues are flushed P0, P1, P4 */ 18182 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { 18183 bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); 18184 } 18185 18186 /* Verify the transmission buffers are flushed P0, P1, P4 */ 18187 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { 18188 bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); 18189 } 18190 } 18191 18192 static void 18193 bxe_hw_enable_status(struct bxe_softc *sc) 18194 { 18195 uint32_t val; 18196 18197 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); 18198 BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); 18199 18200 val = REG_RD(sc, PBF_REG_DISABLE_PF); 18201 BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val); 18202 18203 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); 18204 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); 18205 18206 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); 18207 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); 18208 18209 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 18210 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); 18211 18212 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 18213 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); 18214 18215 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 18216 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); 18217 18218 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 18219 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val); 18220 } 18221 18222 static int 18223 bxe_pf_flr_clnup(struct bxe_softc *sc) 18224 { 18225 uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc); 18226 18227 BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc)); 18228 18229 /* Re-enable PF target read access */ 18230 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 18231 18232 /* Poll HW usage counters */ 18233 BLOGD(sc, DBG_LOAD, "Polling usage counters\n"); 18234 if (bxe_poll_hw_usage_counters(sc, poll_cnt)) { 18235 return (-1); 18236 } 18237 18238 /* Zero the igu 'trailing edge' and 'leading edge' */ 18239 18240 /* Send the FW cleanup command */ 18241 if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) { 18242 return (-1); 18243 } 18244 18245 /* ATC cleanup */ 18246 18247 /* Verify TX hw is flushed */ 18248 bxe_tx_hw_flushed(sc, poll_cnt); 18249 18250 /* Wait 100ms (not adjusted according to platform) */ 18251 DELAY(100000); 18252 18253 /* Verify no pending pci transactions */ 18254 if (bxe_is_pcie_pending(sc)) { 18255 BLOGE(sc, "PCIE Transactions still pending\n"); 18256 } 18257 18258 /* Debug */ 18259 bxe_hw_enable_status(sc); 18260 18261 /* 18262 * Master enable - Due to WB DMAE writes performed before this 18263 * register is re-initialized as part of the regular function init 18264 */ 18265 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 18266 18267 return (0); 18268 } 18269 18270 #if 0 18271 static void 18272 bxe_init_searcher(struct bxe_softc *sc) 18273 { 18274 int port = SC_PORT(sc); 18275 ecore_src_init_t2(sc, sc->t2, sc->t2_mapping, SRC_CONN_NUM); 18276 /* T1 hash bits value determines the T1 number of entries */ 18277 REG_WR(sc, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); 18278 } 18279 #endif 18280 18281 static int 18282 bxe_init_hw_func(struct bxe_softc *sc) 18283 { 18284 int port = SC_PORT(sc); 18285 int func = SC_FUNC(sc); 18286 int init_phase = PHASE_PF0 + func; 18287 struct ecore_ilt *ilt = sc->ilt; 18288 uint16_t cdu_ilt_start; 18289 uint32_t addr, val; 18290 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; 18291 int i, main_mem_width, rc; 18292 18293 BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func); 18294 18295 /* FLR cleanup */ 18296 if (!CHIP_IS_E1x(sc)) { 18297 rc = bxe_pf_flr_clnup(sc); 18298 if (rc) { 18299 BLOGE(sc, "FLR cleanup failed!\n"); 18300 // XXX bxe_fw_dump(sc); 18301 // XXX bxe_idle_chk(sc); 18302 return (rc); 18303 } 18304 } 18305 18306 /* set MSI reconfigure capability */ 18307 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18308 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 18309 val = REG_RD(sc, addr); 18310 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 18311 REG_WR(sc, addr, val); 18312 } 18313 18314 ecore_init_block(sc, BLOCK_PXP, init_phase); 18315 ecore_init_block(sc, BLOCK_PXP2, init_phase); 18316 18317 ilt = sc->ilt; 18318 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 18319 18320 #if 0 18321 if (IS_SRIOV(sc)) { 18322 cdu_ilt_start += BXE_FIRST_VF_CID/ILT_PAGE_CIDS; 18323 } 18324 cdu_ilt_start = bxe_iov_init_ilt(sc, cdu_ilt_start); 18325 18326 #if (BXE_FIRST_VF_CID > 0) 18327 /* 18328 * If BXE_FIRST_VF_CID > 0 then the PF L2 cids precedes 18329 * those of the VFs, so start line should be reset 18330 */ 18331 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 18332 #endif 18333 #endif 18334 18335 for (i = 0; i < L2_ILT_LINES(sc); i++) { 18336 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; 18337 ilt->lines[cdu_ilt_start + i].page_mapping = 18338 sc->context[i].vcxt_dma.paddr; 18339 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; 18340 } 18341 ecore_ilt_init_op(sc, INITOP_SET); 18342 18343 #if 0 18344 if (!CONFIGURE_NIC_MODE(sc)) { 18345 bxe_init_searcher(sc); 18346 REG_WR(sc, PRS_REG_NIC_MODE, 0); 18347 BLOGD(sc, DBG_LOAD, "NIC MODE disabled\n"); 18348 } else 18349 #endif 18350 { 18351 /* Set NIC mode */ 18352 REG_WR(sc, PRS_REG_NIC_MODE, 1); 18353 BLOGD(sc, DBG_LOAD, "NIC MODE configured\n"); 18354 } 18355 18356 if (!CHIP_IS_E1x(sc)) { 18357 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; 18358 18359 /* Turn on a single ISR mode in IGU if driver is going to use 18360 * INT#x or MSI 18361 */ 18362 if (sc->interrupt_mode != INTR_MODE_MSIX) { 18363 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 18364 } 18365 18366 /* 18367 * Timers workaround bug: function init part. 18368 * Need to wait 20msec after initializing ILT, 18369 * needed to make sure there are no requests in 18370 * one of the PXP internal queues with "old" ILT addresses 18371 */ 18372 DELAY(20000); 18373 18374 /* 18375 * Master enable - Due to WB DMAE writes performed before this 18376 * register is re-initialized as part of the regular function 18377 * init 18378 */ 18379 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 18380 /* Enable the function in IGU */ 18381 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); 18382 } 18383 18384 sc->dmae_ready = 1; 18385 18386 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 18387 18388 if (!CHIP_IS_E1x(sc)) 18389 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); 18390 18391 ecore_init_block(sc, BLOCK_ATC, init_phase); 18392 ecore_init_block(sc, BLOCK_DMAE, init_phase); 18393 ecore_init_block(sc, BLOCK_NIG, init_phase); 18394 ecore_init_block(sc, BLOCK_SRC, init_phase); 18395 ecore_init_block(sc, BLOCK_MISC, init_phase); 18396 ecore_init_block(sc, BLOCK_TCM, init_phase); 18397 ecore_init_block(sc, BLOCK_UCM, init_phase); 18398 ecore_init_block(sc, BLOCK_CCM, init_phase); 18399 ecore_init_block(sc, BLOCK_XCM, init_phase); 18400 ecore_init_block(sc, BLOCK_TSEM, init_phase); 18401 ecore_init_block(sc, BLOCK_USEM, init_phase); 18402 ecore_init_block(sc, BLOCK_CSEM, init_phase); 18403 ecore_init_block(sc, BLOCK_XSEM, init_phase); 18404 18405 if (!CHIP_IS_E1x(sc)) 18406 REG_WR(sc, QM_REG_PF_EN, 1); 18407 18408 if (!CHIP_IS_E1x(sc)) { 18409 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18410 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18411 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18412 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18413 } 18414 ecore_init_block(sc, BLOCK_QM, init_phase); 18415 18416 ecore_init_block(sc, BLOCK_TM, init_phase); 18417 ecore_init_block(sc, BLOCK_DORQ, init_phase); 18418 18419 bxe_iov_init_dq(sc); 18420 18421 ecore_init_block(sc, BLOCK_BRB1, init_phase); 18422 ecore_init_block(sc, BLOCK_PRS, init_phase); 18423 ecore_init_block(sc, BLOCK_TSDM, init_phase); 18424 ecore_init_block(sc, BLOCK_CSDM, init_phase); 18425 ecore_init_block(sc, BLOCK_USDM, init_phase); 18426 ecore_init_block(sc, BLOCK_XSDM, init_phase); 18427 ecore_init_block(sc, BLOCK_UPB, init_phase); 18428 ecore_init_block(sc, BLOCK_XPB, init_phase); 18429 ecore_init_block(sc, BLOCK_PBF, init_phase); 18430 if (!CHIP_IS_E1x(sc)) 18431 REG_WR(sc, PBF_REG_DISABLE_PF, 0); 18432 18433 ecore_init_block(sc, BLOCK_CDU, init_phase); 18434 18435 ecore_init_block(sc, BLOCK_CFC, init_phase); 18436 18437 if (!CHIP_IS_E1x(sc)) 18438 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); 18439 18440 if (IS_MF(sc)) { 18441 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); 18442 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc)); 18443 } 18444 18445 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 18446 18447 /* HC init per function */ 18448 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18449 if (CHIP_IS_E1H(sc)) { 18450 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 18451 18452 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 18453 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 18454 } 18455 ecore_init_block(sc, BLOCK_HC, init_phase); 18456 18457 } else { 18458 int num_segs, sb_idx, prod_offset; 18459 18460 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 18461 18462 if (!CHIP_IS_E1x(sc)) { 18463 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 18464 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 18465 } 18466 18467 ecore_init_block(sc, BLOCK_IGU, init_phase); 18468 18469 if (!CHIP_IS_E1x(sc)) { 18470 int dsb_idx = 0; 18471 /** 18472 * Producer memory: 18473 * E2 mode: address 0-135 match to the mapping memory; 18474 * 136 - PF0 default prod; 137 - PF1 default prod; 18475 * 138 - PF2 default prod; 139 - PF3 default prod; 18476 * 140 - PF0 attn prod; 141 - PF1 attn prod; 18477 * 142 - PF2 attn prod; 143 - PF3 attn prod; 18478 * 144-147 reserved. 18479 * 18480 * E1.5 mode - In backward compatible mode; 18481 * for non default SB; each even line in the memory 18482 * holds the U producer and each odd line hold 18483 * the C producer. The first 128 producers are for 18484 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 18485 * producers are for the DSB for each PF. 18486 * Each PF has five segments: (the order inside each 18487 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 18488 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 18489 * 144-147 attn prods; 18490 */ 18491 /* non-default-status-blocks */ 18492 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 18493 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 18494 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { 18495 prod_offset = (sc->igu_base_sb + sb_idx) * 18496 num_segs; 18497 18498 for (i = 0; i < num_segs; i++) { 18499 addr = IGU_REG_PROD_CONS_MEMORY + 18500 (prod_offset + i) * 4; 18501 REG_WR(sc, addr, 0); 18502 } 18503 /* send consumer update with value 0 */ 18504 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx, 18505 USTORM_ID, 0, IGU_INT_NOP, 1); 18506 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); 18507 } 18508 18509 /* default-status-blocks */ 18510 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 18511 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 18512 18513 if (CHIP_IS_MODE_4_PORT(sc)) 18514 dsb_idx = SC_FUNC(sc); 18515 else 18516 dsb_idx = SC_VN(sc); 18517 18518 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? 18519 IGU_BC_BASE_DSB_PROD + dsb_idx : 18520 IGU_NORM_BASE_DSB_PROD + dsb_idx); 18521 18522 /* 18523 * igu prods come in chunks of E1HVN_MAX (4) - 18524 * does not matters what is the current chip mode 18525 */ 18526 for (i = 0; i < (num_segs * E1HVN_MAX); 18527 i += E1HVN_MAX) { 18528 addr = IGU_REG_PROD_CONS_MEMORY + 18529 (prod_offset + i)*4; 18530 REG_WR(sc, addr, 0); 18531 } 18532 /* send consumer update with 0 */ 18533 if (CHIP_INT_MODE_IS_BC(sc)) { 18534 bxe_ack_sb(sc, sc->igu_dsb_id, 18535 USTORM_ID, 0, IGU_INT_NOP, 1); 18536 bxe_ack_sb(sc, sc->igu_dsb_id, 18537 CSTORM_ID, 0, IGU_INT_NOP, 1); 18538 bxe_ack_sb(sc, sc->igu_dsb_id, 18539 XSTORM_ID, 0, IGU_INT_NOP, 1); 18540 bxe_ack_sb(sc, sc->igu_dsb_id, 18541 TSTORM_ID, 0, IGU_INT_NOP, 1); 18542 bxe_ack_sb(sc, sc->igu_dsb_id, 18543 ATTENTION_ID, 0, IGU_INT_NOP, 1); 18544 } else { 18545 bxe_ack_sb(sc, sc->igu_dsb_id, 18546 USTORM_ID, 0, IGU_INT_NOP, 1); 18547 bxe_ack_sb(sc, sc->igu_dsb_id, 18548 ATTENTION_ID, 0, IGU_INT_NOP, 1); 18549 } 18550 bxe_igu_clear_sb(sc, sc->igu_dsb_id); 18551 18552 /* !!! these should become driver const once 18553 rf-tool supports split-68 const */ 18554 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 18555 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 18556 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); 18557 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); 18558 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); 18559 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); 18560 } 18561 } 18562 18563 /* Reset PCIE errors for debug */ 18564 REG_WR(sc, 0x2114, 0xffffffff); 18565 REG_WR(sc, 0x2120, 0xffffffff); 18566 18567 if (CHIP_IS_E1x(sc)) { 18568 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ 18569 main_mem_base = HC_REG_MAIN_MEMORY + 18570 SC_PORT(sc) * (main_mem_size * 4); 18571 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 18572 main_mem_width = 8; 18573 18574 val = REG_RD(sc, main_mem_prty_clr); 18575 if (val) { 18576 BLOGD(sc, DBG_LOAD, 18577 "Parity errors in HC block during function init (0x%x)!\n", 18578 val); 18579 } 18580 18581 /* Clear "false" parity errors in MSI-X table */ 18582 for (i = main_mem_base; 18583 i < main_mem_base + main_mem_size * 4; 18584 i += main_mem_width) { 18585 bxe_read_dmae(sc, i, main_mem_width / 4); 18586 bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data), 18587 i, main_mem_width / 4); 18588 } 18589 /* Clear HC parity attention */ 18590 REG_RD(sc, main_mem_prty_clr); 18591 } 18592 18593 #if 1 18594 /* Enable STORMs SP logging */ 18595 REG_WR8(sc, BAR_USTRORM_INTMEM + 18596 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18597 REG_WR8(sc, BAR_TSTRORM_INTMEM + 18598 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18599 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18600 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18601 REG_WR8(sc, BAR_XSTRORM_INTMEM + 18602 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18603 #endif 18604 18605 elink_phy_probe(&sc->link_params); 18606 18607 return (0); 18608 } 18609 18610 static void 18611 bxe_link_reset(struct bxe_softc *sc) 18612 { 18613 if (!BXE_NOMCP(sc)) { 18614 bxe_acquire_phy_lock(sc); 18615 elink_lfa_reset(&sc->link_params, &sc->link_vars); 18616 bxe_release_phy_lock(sc); 18617 } else { 18618 if (!CHIP_REV_IS_SLOW(sc)) { 18619 BLOGW(sc, "Bootcode is missing - cannot reset link\n"); 18620 } 18621 } 18622 } 18623 18624 static void 18625 bxe_reset_port(struct bxe_softc *sc) 18626 { 18627 int port = SC_PORT(sc); 18628 uint32_t val; 18629 18630 /* reset physical Link */ 18631 bxe_link_reset(sc); 18632 18633 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 18634 18635 /* Do not rcv packets to BRB */ 18636 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); 18637 /* Do not direct rcv packets that are not for MCP to the BRB */ 18638 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 18639 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 18640 18641 /* Configure AEU */ 18642 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); 18643 18644 DELAY(100000); 18645 18646 /* Check for BRB port occupancy */ 18647 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 18648 if (val) { 18649 BLOGD(sc, DBG_LOAD, 18650 "BRB1 is not empty, %d blocks are occupied\n", val); 18651 } 18652 18653 /* TODO: Close Doorbell port? */ 18654 } 18655 18656 static void 18657 bxe_ilt_wr(struct bxe_softc *sc, 18658 uint32_t index, 18659 bus_addr_t addr) 18660 { 18661 int reg; 18662 uint32_t wb_write[2]; 18663 18664 if (CHIP_IS_E1(sc)) { 18665 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 18666 } else { 18667 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 18668 } 18669 18670 wb_write[0] = ONCHIP_ADDR1(addr); 18671 wb_write[1] = ONCHIP_ADDR2(addr); 18672 REG_WR_DMAE(sc, reg, wb_write, 2); 18673 } 18674 18675 static void 18676 bxe_clear_func_ilt(struct bxe_softc *sc, 18677 uint32_t func) 18678 { 18679 uint32_t i, base = FUNC_ILT_BASE(func); 18680 for (i = base; i < base + ILT_PER_FUNC; i++) { 18681 bxe_ilt_wr(sc, i, 0); 18682 } 18683 } 18684 18685 static void 18686 bxe_reset_func(struct bxe_softc *sc) 18687 { 18688 struct bxe_fastpath *fp; 18689 int port = SC_PORT(sc); 18690 int func = SC_FUNC(sc); 18691 int i; 18692 18693 /* Disable the function in the FW */ 18694 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 18695 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 18696 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 18697 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 18698 18699 /* FP SBs */ 18700 FOR_EACH_ETH_QUEUE(sc, i) { 18701 fp = &sc->fp[i]; 18702 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18703 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 18704 SB_DISABLED); 18705 } 18706 18707 #if 0 18708 if (CNIC_LOADED(sc)) { 18709 /* CNIC SB */ 18710 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18711 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET 18712 (bxe_cnic_fw_sb_id(sc)), SB_DISABLED); 18713 } 18714 #endif 18715 18716 /* SP SB */ 18717 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18718 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 18719 SB_DISABLED); 18720 18721 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { 18722 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0); 18723 } 18724 18725 /* Configure IGU */ 18726 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18727 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 18728 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 18729 } else { 18730 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 18731 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 18732 } 18733 18734 if (CNIC_LOADED(sc)) { 18735 /* Disable Timer scan */ 18736 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 18737 /* 18738 * Wait for at least 10ms and up to 2 second for the timers 18739 * scan to complete 18740 */ 18741 for (i = 0; i < 200; i++) { 18742 DELAY(10000); 18743 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4)) 18744 break; 18745 } 18746 } 18747 18748 /* Clear ILT */ 18749 bxe_clear_func_ilt(sc, func); 18750 18751 /* 18752 * Timers workaround bug for E2: if this is vnic-3, 18753 * we need to set the entire ilt range for this timers. 18754 */ 18755 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { 18756 struct ilt_client_info ilt_cli; 18757 /* use dummy TM client */ 18758 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 18759 ilt_cli.start = 0; 18760 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 18761 ilt_cli.client_num = ILT_CLIENT_TM; 18762 18763 ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR); 18764 } 18765 18766 /* this assumes that reset_port() called before reset_func()*/ 18767 if (!CHIP_IS_E1x(sc)) { 18768 bxe_pf_disable(sc); 18769 } 18770 18771 sc->dmae_ready = 0; 18772 } 18773 18774 static int 18775 bxe_gunzip_init(struct bxe_softc *sc) 18776 { 18777 return (0); 18778 } 18779 18780 static void 18781 bxe_gunzip_end(struct bxe_softc *sc) 18782 { 18783 return; 18784 } 18785 18786 static int 18787 bxe_init_firmware(struct bxe_softc *sc) 18788 { 18789 if (CHIP_IS_E1(sc)) { 18790 ecore_init_e1_firmware(sc); 18791 sc->iro_array = e1_iro_arr; 18792 } else if (CHIP_IS_E1H(sc)) { 18793 ecore_init_e1h_firmware(sc); 18794 sc->iro_array = e1h_iro_arr; 18795 } else if (!CHIP_IS_E1x(sc)) { 18796 ecore_init_e2_firmware(sc); 18797 sc->iro_array = e2_iro_arr; 18798 } else { 18799 BLOGE(sc, "Unsupported chip revision\n"); 18800 return (-1); 18801 } 18802 18803 return (0); 18804 } 18805 18806 static void 18807 bxe_release_firmware(struct bxe_softc *sc) 18808 { 18809 /* Do nothing */ 18810 return; 18811 } 18812 18813 static int 18814 ecore_gunzip(struct bxe_softc *sc, 18815 const uint8_t *zbuf, 18816 int len) 18817 { 18818 /* XXX : Implement... */ 18819 BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n"); 18820 return (FALSE); 18821 } 18822 18823 static void 18824 ecore_reg_wr_ind(struct bxe_softc *sc, 18825 uint32_t addr, 18826 uint32_t val) 18827 { 18828 bxe_reg_wr_ind(sc, addr, val); 18829 } 18830 18831 static void 18832 ecore_write_dmae_phys_len(struct bxe_softc *sc, 18833 bus_addr_t phys_addr, 18834 uint32_t addr, 18835 uint32_t len) 18836 { 18837 bxe_write_dmae_phys_len(sc, phys_addr, addr, len); 18838 } 18839 18840 void 18841 ecore_storm_memset_struct(struct bxe_softc *sc, 18842 uint32_t addr, 18843 size_t size, 18844 uint32_t *data) 18845 { 18846 uint8_t i; 18847 for (i = 0; i < size/4; i++) { 18848 REG_WR(sc, addr + (i * 4), data[i]); 18849 } 18850 } 18851 18852 18853 /* 18854 * character device - ioctl interface definitions 18855 */ 18856 18857 18858 #include "bxe_dump.h" 18859 #include "bxe_ioctl.h" 18860 #include <sys/conf.h> 18861 18862 static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, 18863 struct thread *td); 18864 18865 static struct cdevsw bxe_cdevsw = { 18866 .d_version = D_VERSION, 18867 .d_ioctl = bxe_eioctl, 18868 .d_name = "bxecnic", 18869 }; 18870 18871 #define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1)) 18872 18873 18874 #define DUMP_ALL_PRESETS 0x1FFF 18875 #define DUMP_MAX_PRESETS 13 18876 #define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1) 18877 #define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H) 18878 #define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2) 18879 #define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0) 18880 #define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0) 18881 18882 #define IS_REG_IN_PRESET(presets, idx) \ 18883 ((presets & (1 << (idx-1))) == (1 << (idx-1))) 18884 18885 18886 static int 18887 bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset) 18888 { 18889 if (CHIP_IS_E1(sc)) 18890 return dump_num_registers[0][preset-1]; 18891 else if (CHIP_IS_E1H(sc)) 18892 return dump_num_registers[1][preset-1]; 18893 else if (CHIP_IS_E2(sc)) 18894 return dump_num_registers[2][preset-1]; 18895 else if (CHIP_IS_E3A0(sc)) 18896 return dump_num_registers[3][preset-1]; 18897 else if (CHIP_IS_E3B0(sc)) 18898 return dump_num_registers[4][preset-1]; 18899 else 18900 return 0; 18901 } 18902 18903 static int 18904 bxe_get_max_regs_len(struct bxe_softc *sc) 18905 { 18906 uint32_t preset_idx; 18907 int regdump_len32, len32; 18908 18909 regdump_len32 = bxe_get_preset_regs_len(sc, 1); 18910 18911 /* Calculate the total preset regs length */ 18912 for (preset_idx = 2; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { 18913 18914 len32 = bxe_get_preset_regs_len(sc, preset_idx); 18915 18916 if (regdump_len32 < len32) 18917 regdump_len32 = len32; 18918 } 18919 18920 return regdump_len32; 18921 } 18922 18923 static int 18924 bxe_get_total_regs_len32(struct bxe_softc *sc) 18925 { 18926 uint32_t preset_idx; 18927 int regdump_len32 = 0; 18928 18929 18930 /* Calculate the total preset regs length */ 18931 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { 18932 regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx); 18933 } 18934 18935 return regdump_len32; 18936 } 18937 18938 static const uint32_t * 18939 __bxe_get_page_addr_ar(struct bxe_softc *sc) 18940 { 18941 if (CHIP_IS_E2(sc)) 18942 return page_vals_e2; 18943 else if (CHIP_IS_E3(sc)) 18944 return page_vals_e3; 18945 else 18946 return NULL; 18947 } 18948 18949 static uint32_t 18950 __bxe_get_page_reg_num(struct bxe_softc *sc) 18951 { 18952 if (CHIP_IS_E2(sc)) 18953 return PAGE_MODE_VALUES_E2; 18954 else if (CHIP_IS_E3(sc)) 18955 return PAGE_MODE_VALUES_E3; 18956 else 18957 return 0; 18958 } 18959 18960 static const uint32_t * 18961 __bxe_get_page_write_ar(struct bxe_softc *sc) 18962 { 18963 if (CHIP_IS_E2(sc)) 18964 return page_write_regs_e2; 18965 else if (CHIP_IS_E3(sc)) 18966 return page_write_regs_e3; 18967 else 18968 return NULL; 18969 } 18970 18971 static uint32_t 18972 __bxe_get_page_write_num(struct bxe_softc *sc) 18973 { 18974 if (CHIP_IS_E2(sc)) 18975 return PAGE_WRITE_REGS_E2; 18976 else if (CHIP_IS_E3(sc)) 18977 return PAGE_WRITE_REGS_E3; 18978 else 18979 return 0; 18980 } 18981 18982 static const struct reg_addr * 18983 __bxe_get_page_read_ar(struct bxe_softc *sc) 18984 { 18985 if (CHIP_IS_E2(sc)) 18986 return page_read_regs_e2; 18987 else if (CHIP_IS_E3(sc)) 18988 return page_read_regs_e3; 18989 else 18990 return NULL; 18991 } 18992 18993 static uint32_t 18994 __bxe_get_page_read_num(struct bxe_softc *sc) 18995 { 18996 if (CHIP_IS_E2(sc)) 18997 return PAGE_READ_REGS_E2; 18998 else if (CHIP_IS_E3(sc)) 18999 return PAGE_READ_REGS_E3; 19000 else 19001 return 0; 19002 } 19003 19004 static bool 19005 bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info) 19006 { 19007 if (CHIP_IS_E1(sc)) 19008 return IS_E1_REG(reg_info->chips); 19009 else if (CHIP_IS_E1H(sc)) 19010 return IS_E1H_REG(reg_info->chips); 19011 else if (CHIP_IS_E2(sc)) 19012 return IS_E2_REG(reg_info->chips); 19013 else if (CHIP_IS_E3A0(sc)) 19014 return IS_E3A0_REG(reg_info->chips); 19015 else if (CHIP_IS_E3B0(sc)) 19016 return IS_E3B0_REG(reg_info->chips); 19017 else 19018 return 0; 19019 } 19020 19021 static bool 19022 bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info) 19023 { 19024 if (CHIP_IS_E1(sc)) 19025 return IS_E1_REG(wreg_info->chips); 19026 else if (CHIP_IS_E1H(sc)) 19027 return IS_E1H_REG(wreg_info->chips); 19028 else if (CHIP_IS_E2(sc)) 19029 return IS_E2_REG(wreg_info->chips); 19030 else if (CHIP_IS_E3A0(sc)) 19031 return IS_E3A0_REG(wreg_info->chips); 19032 else if (CHIP_IS_E3B0(sc)) 19033 return IS_E3B0_REG(wreg_info->chips); 19034 else 19035 return 0; 19036 } 19037 19038 /** 19039 * bxe_read_pages_regs - read "paged" registers 19040 * 19041 * @bp device handle 19042 * @p output buffer 19043 * 19044 * Reads "paged" memories: memories that may only be read by first writing to a 19045 * specific address ("write address") and then reading from a specific address 19046 * ("read address"). There may be more than one write address per "page" and 19047 * more than one read address per write address. 19048 */ 19049 static void 19050 bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) 19051 { 19052 uint32_t i, j, k, n; 19053 19054 /* addresses of the paged registers */ 19055 const uint32_t *page_addr = __bxe_get_page_addr_ar(sc); 19056 /* number of paged registers */ 19057 int num_pages = __bxe_get_page_reg_num(sc); 19058 /* write addresses */ 19059 const uint32_t *write_addr = __bxe_get_page_write_ar(sc); 19060 /* number of write addresses */ 19061 int write_num = __bxe_get_page_write_num(sc); 19062 /* read addresses info */ 19063 const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc); 19064 /* number of read addresses */ 19065 int read_num = __bxe_get_page_read_num(sc); 19066 uint32_t addr, size; 19067 19068 for (i = 0; i < num_pages; i++) { 19069 for (j = 0; j < write_num; j++) { 19070 REG_WR(sc, write_addr[j], page_addr[i]); 19071 19072 for (k = 0; k < read_num; k++) { 19073 if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) { 19074 size = read_addr[k].size; 19075 for (n = 0; n < size; n++) { 19076 addr = read_addr[k].addr + n*4; 19077 *p++ = REG_RD(sc, addr); 19078 } 19079 } 19080 } 19081 } 19082 } 19083 return; 19084 } 19085 19086 19087 static int 19088 bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) 19089 { 19090 uint32_t i, j, addr; 19091 const struct wreg_addr *wreg_addr_p = NULL; 19092 19093 if (CHIP_IS_E1(sc)) 19094 wreg_addr_p = &wreg_addr_e1; 19095 else if (CHIP_IS_E1H(sc)) 19096 wreg_addr_p = &wreg_addr_e1h; 19097 else if (CHIP_IS_E2(sc)) 19098 wreg_addr_p = &wreg_addr_e2; 19099 else if (CHIP_IS_E3A0(sc)) 19100 wreg_addr_p = &wreg_addr_e3; 19101 else if (CHIP_IS_E3B0(sc)) 19102 wreg_addr_p = &wreg_addr_e3b0; 19103 else 19104 return (-1); 19105 19106 /* Read the idle_chk registers */ 19107 for (i = 0; i < IDLE_REGS_COUNT; i++) { 19108 if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) && 19109 IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) { 19110 for (j = 0; j < idle_reg_addrs[i].size; j++) 19111 *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4); 19112 } 19113 } 19114 19115 /* Read the regular registers */ 19116 for (i = 0; i < REGS_COUNT; i++) { 19117 if (bxe_is_reg_in_chip(sc, ®_addrs[i]) && 19118 IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) { 19119 for (j = 0; j < reg_addrs[i].size; j++) 19120 *p++ = REG_RD(sc, reg_addrs[i].addr + j*4); 19121 } 19122 } 19123 19124 /* Read the CAM registers */ 19125 if (bxe_is_wreg_in_chip(sc, wreg_addr_p) && 19126 IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) { 19127 for (i = 0; i < wreg_addr_p->size; i++) { 19128 *p++ = REG_RD(sc, wreg_addr_p->addr + i*4); 19129 19130 /* In case of wreg_addr register, read additional 19131 registers from read_regs array 19132 */ 19133 for (j = 0; j < wreg_addr_p->read_regs_count; j++) { 19134 addr = *(wreg_addr_p->read_regs); 19135 *p++ = REG_RD(sc, addr + j*4); 19136 } 19137 } 19138 } 19139 19140 /* Paged registers are supported in E2 & E3 only */ 19141 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { 19142 /* Read "paged" registers */ 19143 bxe_read_pages_regs(sc, p, preset); 19144 } 19145 19146 return 0; 19147 } 19148 19149 static int 19150 bxe_grc_dump(struct bxe_softc *sc, bxe_grcdump_t *dump) 19151 { 19152 int rval = 0; 19153 uint32_t preset_idx; 19154 uint8_t *buf; 19155 uint32_t size; 19156 struct dump_header *d_hdr; 19157 19158 ecore_disable_blocks_parity(sc); 19159 19160 buf = dump->grcdump; 19161 d_hdr = dump->grcdump; 19162 19163 d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1; 19164 d_hdr->version = BNX2X_DUMP_VERSION; 19165 d_hdr->preset = DUMP_ALL_PRESETS; 19166 19167 if (CHIP_IS_E1(sc)) { 19168 d_hdr->dump_meta_data = DUMP_CHIP_E1; 19169 } else if (CHIP_IS_E1H(sc)) { 19170 d_hdr->dump_meta_data = DUMP_CHIP_E1H; 19171 } else if (CHIP_IS_E2(sc)) { 19172 d_hdr->dump_meta_data = DUMP_CHIP_E2 | 19173 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); 19174 } else if (CHIP_IS_E3A0(sc)) { 19175 d_hdr->dump_meta_data = DUMP_CHIP_E3A0 | 19176 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); 19177 } else if (CHIP_IS_E3B0(sc)) { 19178 d_hdr->dump_meta_data = DUMP_CHIP_E3B0 | 19179 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); 19180 } 19181 19182 dump->grcdump_dwords = sizeof(struct dump_header) >> 2; 19183 buf += sizeof(struct dump_header); 19184 19185 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { 19186 19187 /* Skip presets with IOR */ 19188 if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) || 19189 (preset_idx == 11)) 19190 continue; 19191 19192 rval = bxe_get_preset_regs(sc, sc->grc_dump, preset_idx); 19193 19194 if (rval) 19195 break; 19196 19197 size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t)); 19198 19199 rval = copyout(sc->grc_dump, buf, size); 19200 19201 if (rval) 19202 break; 19203 19204 dump->grcdump_dwords += (size / (sizeof (uint32_t))); 19205 19206 buf += size; 19207 } 19208 19209 ecore_clear_blocks_parity(sc); 19210 ecore_enable_blocks_parity(sc); 19211 19212 sc->grcdump_done = 1; 19213 return(rval); 19214 } 19215 19216 static int 19217 bxe_add_cdev(struct bxe_softc *sc) 19218 { 19219 int max_preset_size; 19220 19221 max_preset_size = bxe_get_max_regs_len(sc) * (sizeof (uint32_t)); 19222 19223 sc->grc_dump = malloc(max_preset_size, M_DEVBUF, M_NOWAIT); 19224 19225 if (sc->grc_dump == NULL) 19226 return (-1); 19227 19228 sc->ioctl_dev = make_dev(&bxe_cdevsw, 19229 sc->ifp->if_dunit, 19230 UID_ROOT, 19231 GID_WHEEL, 19232 0600, 19233 "%s", 19234 if_name(sc->ifp)); 19235 19236 if (sc->ioctl_dev == NULL) { 19237 19238 free(sc->grc_dump, M_DEVBUF); 19239 19240 return (-1); 19241 } 19242 19243 sc->ioctl_dev->si_drv1 = sc; 19244 19245 return (0); 19246 } 19247 19248 static void 19249 bxe_del_cdev(struct bxe_softc *sc) 19250 { 19251 if (sc->ioctl_dev != NULL) 19252 destroy_dev(sc->ioctl_dev); 19253 19254 if (sc->grc_dump == NULL) 19255 free(sc->grc_dump, M_DEVBUF); 19256 19257 return; 19258 } 19259 19260 static int 19261 bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, 19262 struct thread *td) 19263 { 19264 struct bxe_softc *sc; 19265 int rval = 0; 19266 device_t pci_dev; 19267 bxe_grcdump_t *dump = NULL; 19268 int grc_dump_size; 19269 19270 if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL) 19271 return ENXIO; 19272 19273 pci_dev= sc->dev; 19274 19275 dump = (bxe_grcdump_t *)data; 19276 19277 switch(cmd) { 19278 19279 case BXE_GRC_DUMP_SIZE: 19280 dump->pci_func = sc->pcie_func; 19281 dump->grcdump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + 19282 sizeof(struct dump_header); 19283 break; 19284 19285 case BXE_GRC_DUMP: 19286 19287 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + 19288 sizeof(struct dump_header); 19289 19290 if ((sc->grc_dump == NULL) || (dump->grcdump == NULL) || 19291 (dump->grcdump_size < grc_dump_size)) { 19292 rval = EINVAL; 19293 break; 19294 } 19295 19296 rval = bxe_grc_dump(sc, dump); 19297 19298 break; 19299 19300 default: 19301 break; 19302 } 19303 19304 return (rval); 19305 } 19306