1 /*- 2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 24 * THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #define BXE_DRIVER_VERSION "1.78.79" 31 32 #include "bxe.h" 33 #include "ecore_sp.h" 34 #include "ecore_init.h" 35 #include "ecore_init_ops.h" 36 37 #include "57710_int_offsets.h" 38 #include "57711_int_offsets.h" 39 #include "57712_int_offsets.h" 40 41 /* 42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these 43 * explicitly here for older kernels that don't include this changeset. 44 */ 45 #ifndef CTLTYPE_U64 46 #define CTLTYPE_U64 CTLTYPE_QUAD 47 #define sysctl_handle_64 sysctl_handle_quad 48 #endif 49 50 /* 51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these 52 * here as zero(0) for older kernels that don't include this changeset 53 * thereby masking the functionality. 54 */ 55 #ifndef CSUM_TCP_IPV6 56 #define CSUM_TCP_IPV6 0 57 #define CSUM_UDP_IPV6 0 58 #endif 59 60 /* 61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap 62 * for older kernels that don't include this changeset. 63 */ 64 #if __FreeBSD_version < 900035 65 #define pci_find_cap pci_find_extcap 66 #endif 67 68 #define BXE_DEF_SB_ATT_IDX 0x0001 69 #define BXE_DEF_SB_IDX 0x0002 70 71 /* 72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per 73 * function HW initialization. 74 */ 75 #define FLR_WAIT_USEC 10000 /* 10 msecs */ 76 #define FLR_WAIT_INTERVAL 50 /* usecs */ 77 #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ 78 79 struct pbf_pN_buf_regs { 80 int pN; 81 uint32_t init_crd; 82 uint32_t crd; 83 uint32_t crd_freed; 84 }; 85 86 struct pbf_pN_cmd_regs { 87 int pN; 88 uint32_t lines_occup; 89 uint32_t lines_freed; 90 }; 91 92 /* 93 * PCI Device ID Table used by bxe_probe(). 94 */ 95 #define BXE_DEVDESC_MAX 64 96 static struct bxe_device_type bxe_devs[] = { 97 { 98 BRCM_VENDORID, 99 CHIP_NUM_57710, 100 PCI_ANY_ID, PCI_ANY_ID, 101 "QLogic NetXtreme II BCM57710 10GbE" 102 }, 103 { 104 BRCM_VENDORID, 105 CHIP_NUM_57711, 106 PCI_ANY_ID, PCI_ANY_ID, 107 "QLogic NetXtreme II BCM57711 10GbE" 108 }, 109 { 110 BRCM_VENDORID, 111 CHIP_NUM_57711E, 112 PCI_ANY_ID, PCI_ANY_ID, 113 "QLogic NetXtreme II BCM57711E 10GbE" 114 }, 115 { 116 BRCM_VENDORID, 117 CHIP_NUM_57712, 118 PCI_ANY_ID, PCI_ANY_ID, 119 "QLogic NetXtreme II BCM57712 10GbE" 120 }, 121 { 122 BRCM_VENDORID, 123 CHIP_NUM_57712_MF, 124 PCI_ANY_ID, PCI_ANY_ID, 125 "QLogic NetXtreme II BCM57712 MF 10GbE" 126 }, 127 #if 0 128 { 129 BRCM_VENDORID, 130 CHIP_NUM_57712_VF, 131 PCI_ANY_ID, PCI_ANY_ID, 132 "QLogic NetXtreme II BCM57712 VF 10GbE" 133 }, 134 #endif 135 { 136 BRCM_VENDORID, 137 CHIP_NUM_57800, 138 PCI_ANY_ID, PCI_ANY_ID, 139 "QLogic NetXtreme II BCM57800 10GbE" 140 }, 141 { 142 BRCM_VENDORID, 143 CHIP_NUM_57800_MF, 144 PCI_ANY_ID, PCI_ANY_ID, 145 "QLogic NetXtreme II BCM57800 MF 10GbE" 146 }, 147 #if 0 148 { 149 BRCM_VENDORID, 150 CHIP_NUM_57800_VF, 151 PCI_ANY_ID, PCI_ANY_ID, 152 "QLogic NetXtreme II BCM57800 VF 10GbE" 153 }, 154 #endif 155 { 156 BRCM_VENDORID, 157 CHIP_NUM_57810, 158 PCI_ANY_ID, PCI_ANY_ID, 159 "QLogic NetXtreme II BCM57810 10GbE" 160 }, 161 { 162 BRCM_VENDORID, 163 CHIP_NUM_57810_MF, 164 PCI_ANY_ID, PCI_ANY_ID, 165 "QLogic NetXtreme II BCM57810 MF 10GbE" 166 }, 167 #if 0 168 { 169 BRCM_VENDORID, 170 CHIP_NUM_57810_VF, 171 PCI_ANY_ID, PCI_ANY_ID, 172 "QLogic NetXtreme II BCM57810 VF 10GbE" 173 }, 174 #endif 175 { 176 BRCM_VENDORID, 177 CHIP_NUM_57811, 178 PCI_ANY_ID, PCI_ANY_ID, 179 "QLogic NetXtreme II BCM57811 10GbE" 180 }, 181 { 182 BRCM_VENDORID, 183 CHIP_NUM_57811_MF, 184 PCI_ANY_ID, PCI_ANY_ID, 185 "QLogic NetXtreme II BCM57811 MF 10GbE" 186 }, 187 #if 0 188 { 189 BRCM_VENDORID, 190 CHIP_NUM_57811_VF, 191 PCI_ANY_ID, PCI_ANY_ID, 192 "QLogic NetXtreme II BCM57811 VF 10GbE" 193 }, 194 #endif 195 { 196 BRCM_VENDORID, 197 CHIP_NUM_57840_4_10, 198 PCI_ANY_ID, PCI_ANY_ID, 199 "QLogic NetXtreme II BCM57840 4x10GbE" 200 }, 201 #if 0 202 { 203 BRCM_VENDORID, 204 CHIP_NUM_57840_2_20, 205 PCI_ANY_ID, PCI_ANY_ID, 206 "QLogic NetXtreme II BCM57840 2x20GbE" 207 }, 208 #endif 209 { 210 BRCM_VENDORID, 211 CHIP_NUM_57840_MF, 212 PCI_ANY_ID, PCI_ANY_ID, 213 "QLogic NetXtreme II BCM57840 MF 10GbE" 214 }, 215 #if 0 216 { 217 BRCM_VENDORID, 218 CHIP_NUM_57840_VF, 219 PCI_ANY_ID, PCI_ANY_ID, 220 "QLogic NetXtreme II BCM57840 VF 10GbE" 221 }, 222 #endif 223 { 224 0, 0, 0, 0, NULL 225 } 226 }; 227 228 MALLOC_DECLARE(M_BXE_ILT); 229 MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer"); 230 231 /* 232 * FreeBSD device entry points. 233 */ 234 static int bxe_probe(device_t); 235 static int bxe_attach(device_t); 236 static int bxe_detach(device_t); 237 static int bxe_shutdown(device_t); 238 239 /* 240 * FreeBSD KLD module/device interface event handler method. 241 */ 242 static device_method_t bxe_methods[] = { 243 /* Device interface (device_if.h) */ 244 DEVMETHOD(device_probe, bxe_probe), 245 DEVMETHOD(device_attach, bxe_attach), 246 DEVMETHOD(device_detach, bxe_detach), 247 DEVMETHOD(device_shutdown, bxe_shutdown), 248 #if 0 249 DEVMETHOD(device_suspend, bxe_suspend), 250 DEVMETHOD(device_resume, bxe_resume), 251 #endif 252 /* Bus interface (bus_if.h) */ 253 DEVMETHOD(bus_print_child, bus_generic_print_child), 254 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 255 KOBJMETHOD_END 256 }; 257 258 /* 259 * FreeBSD KLD Module data declaration 260 */ 261 static driver_t bxe_driver = { 262 "bxe", /* module name */ 263 bxe_methods, /* event handler */ 264 sizeof(struct bxe_softc) /* extra data */ 265 }; 266 267 /* 268 * FreeBSD dev class is needed to manage dev instances and 269 * to associate with a bus type 270 */ 271 static devclass_t bxe_devclass; 272 273 MODULE_DEPEND(bxe, pci, 1, 1, 1); 274 MODULE_DEPEND(bxe, ether, 1, 1, 1); 275 DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0); 276 277 /* resources needed for unloading a previously loaded device */ 278 279 #define BXE_PREV_WAIT_NEEDED 1 280 struct mtx bxe_prev_mtx; 281 MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF); 282 struct bxe_prev_list_node { 283 LIST_ENTRY(bxe_prev_list_node) node; 284 uint8_t bus; 285 uint8_t slot; 286 uint8_t path; 287 uint8_t aer; /* XXX automatic error recovery */ 288 uint8_t undi; 289 }; 290 static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list); 291 292 static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ 293 294 /* Tunable device values... */ 295 296 SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters"); 297 298 /* Debug */ 299 unsigned long bxe_debug = 0; 300 SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN, 301 &bxe_debug, 0, "Debug logging mode"); 302 303 /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ 304 static int bxe_interrupt_mode = INTR_MODE_MSIX; 305 SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN, 306 &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode"); 307 308 /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */ 309 static int bxe_queue_count = 4; 310 SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, 311 &bxe_queue_count, 0, "Multi-Queue queue count"); 312 313 /* max number of buffers per queue (default RX_BD_USABLE) */ 314 static int bxe_max_rx_bufs = 0; 315 SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN, 316 &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue"); 317 318 /* Host interrupt coalescing RX tick timer (usecs) */ 319 static int bxe_hc_rx_ticks = 25; 320 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN, 321 &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks"); 322 323 /* Host interrupt coalescing TX tick timer (usecs) */ 324 static int bxe_hc_tx_ticks = 50; 325 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN, 326 &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks"); 327 328 /* Maximum number of Rx packets to process at a time */ 329 static int bxe_rx_budget = 0xffffffff; 330 SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN, 331 &bxe_rx_budget, 0, "Rx processing budget"); 332 333 /* Maximum LRO aggregation size */ 334 static int bxe_max_aggregation_size = 0; 335 SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN, 336 &bxe_max_aggregation_size, 0, "max aggregation size"); 337 338 /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */ 339 static int bxe_mrrs = -1; 340 SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN, 341 &bxe_mrrs, 0, "PCIe maximum read request size"); 342 343 /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */ 344 static int bxe_autogreeen = 0; 345 SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN, 346 &bxe_autogreeen, 0, "AutoGrEEEn support"); 347 348 /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */ 349 static int bxe_udp_rss = 0; 350 SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN, 351 &bxe_udp_rss, 0, "UDP RSS support"); 352 353 354 #define STAT_NAME_LEN 32 /* no stat names below can be longer than this */ 355 356 #define STATS_OFFSET32(stat_name) \ 357 (offsetof(struct bxe_eth_stats, stat_name) / 4) 358 359 #define Q_STATS_OFFSET32(stat_name) \ 360 (offsetof(struct bxe_eth_q_stats, stat_name) / 4) 361 362 static const struct { 363 uint32_t offset; 364 uint32_t size; 365 uint32_t flags; 366 #define STATS_FLAGS_PORT 1 367 #define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */ 368 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) 369 char string[STAT_NAME_LEN]; 370 } bxe_eth_stats_arr[] = { 371 { STATS_OFFSET32(total_bytes_received_hi), 372 8, STATS_FLAGS_BOTH, "rx_bytes" }, 373 { STATS_OFFSET32(error_bytes_received_hi), 374 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, 375 { STATS_OFFSET32(total_unicast_packets_received_hi), 376 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, 377 { STATS_OFFSET32(total_multicast_packets_received_hi), 378 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, 379 { STATS_OFFSET32(total_broadcast_packets_received_hi), 380 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, 381 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 382 8, STATS_FLAGS_PORT, "rx_crc_errors" }, 383 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 384 8, STATS_FLAGS_PORT, "rx_align_errors" }, 385 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 386 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, 387 { STATS_OFFSET32(etherstatsoverrsizepkts_hi), 388 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, 389 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 390 8, STATS_FLAGS_PORT, "rx_fragments" }, 391 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 392 8, STATS_FLAGS_PORT, "rx_jabbers" }, 393 { STATS_OFFSET32(no_buff_discard_hi), 394 8, STATS_FLAGS_BOTH, "rx_discards" }, 395 { STATS_OFFSET32(mac_filter_discard), 396 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, 397 { STATS_OFFSET32(mf_tag_discard), 398 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, 399 { STATS_OFFSET32(pfc_frames_received_hi), 400 8, STATS_FLAGS_PORT, "pfc_frames_received" }, 401 { STATS_OFFSET32(pfc_frames_sent_hi), 402 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, 403 { STATS_OFFSET32(brb_drop_hi), 404 8, STATS_FLAGS_PORT, "rx_brb_discard" }, 405 { STATS_OFFSET32(brb_truncate_hi), 406 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, 407 { STATS_OFFSET32(pause_frames_received_hi), 408 8, STATS_FLAGS_PORT, "rx_pause_frames" }, 409 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 410 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, 411 { STATS_OFFSET32(nig_timer_max), 412 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, 413 { STATS_OFFSET32(total_bytes_transmitted_hi), 414 8, STATS_FLAGS_BOTH, "tx_bytes" }, 415 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 416 8, STATS_FLAGS_PORT, "tx_error_bytes" }, 417 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 418 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, 419 { STATS_OFFSET32(total_multicast_packets_transmitted_hi), 420 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, 421 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 422 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, 423 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 424 8, STATS_FLAGS_PORT, "tx_mac_errors" }, 425 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 426 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, 427 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 428 8, STATS_FLAGS_PORT, "tx_single_collisions" }, 429 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 430 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, 431 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 432 8, STATS_FLAGS_PORT, "tx_deferred" }, 433 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 434 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, 435 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 436 8, STATS_FLAGS_PORT, "tx_late_collisions" }, 437 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 438 8, STATS_FLAGS_PORT, "tx_total_collisions" }, 439 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 440 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, 441 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 442 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, 443 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 444 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, 445 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 446 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, 447 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 448 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, 449 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 450 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, 451 { STATS_OFFSET32(etherstatspktsover1522octets_hi), 452 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, 453 { STATS_OFFSET32(pause_frames_sent_hi), 454 8, STATS_FLAGS_PORT, "tx_pause_frames" }, 455 { STATS_OFFSET32(total_tpa_aggregations_hi), 456 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, 457 { STATS_OFFSET32(total_tpa_aggregated_frames_hi), 458 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, 459 { STATS_OFFSET32(total_tpa_bytes_hi), 460 8, STATS_FLAGS_FUNC, "tpa_bytes"}, 461 #if 0 462 { STATS_OFFSET32(recoverable_error), 463 4, STATS_FLAGS_FUNC, "recoverable_errors" }, 464 { STATS_OFFSET32(unrecoverable_error), 465 4, STATS_FLAGS_FUNC, "unrecoverable_errors" }, 466 #endif 467 { STATS_OFFSET32(eee_tx_lpi), 468 4, STATS_FLAGS_PORT, "eee_tx_lpi"}, 469 { STATS_OFFSET32(rx_calls), 470 4, STATS_FLAGS_FUNC, "rx_calls"}, 471 { STATS_OFFSET32(rx_pkts), 472 4, STATS_FLAGS_FUNC, "rx_pkts"}, 473 { STATS_OFFSET32(rx_tpa_pkts), 474 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"}, 475 { STATS_OFFSET32(rx_jumbo_sge_pkts), 476 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"}, 477 { STATS_OFFSET32(rx_soft_errors), 478 4, STATS_FLAGS_FUNC, "rx_soft_errors"}, 479 { STATS_OFFSET32(rx_hw_csum_errors), 480 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"}, 481 { STATS_OFFSET32(rx_ofld_frames_csum_ip), 482 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"}, 483 { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 484 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"}, 485 { STATS_OFFSET32(rx_budget_reached), 486 4, STATS_FLAGS_FUNC, "rx_budget_reached"}, 487 { STATS_OFFSET32(tx_pkts), 488 4, STATS_FLAGS_FUNC, "tx_pkts"}, 489 { STATS_OFFSET32(tx_soft_errors), 490 4, STATS_FLAGS_FUNC, "tx_soft_errors"}, 491 { STATS_OFFSET32(tx_ofld_frames_csum_ip), 492 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"}, 493 { STATS_OFFSET32(tx_ofld_frames_csum_tcp), 494 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"}, 495 { STATS_OFFSET32(tx_ofld_frames_csum_udp), 496 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"}, 497 { STATS_OFFSET32(tx_ofld_frames_lso), 498 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"}, 499 { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 500 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"}, 501 { STATS_OFFSET32(tx_encap_failures), 502 4, STATS_FLAGS_FUNC, "tx_encap_failures"}, 503 { STATS_OFFSET32(tx_hw_queue_full), 504 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"}, 505 { STATS_OFFSET32(tx_hw_max_queue_depth), 506 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"}, 507 { STATS_OFFSET32(tx_dma_mapping_failure), 508 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"}, 509 { STATS_OFFSET32(tx_max_drbr_queue_depth), 510 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"}, 511 { STATS_OFFSET32(tx_window_violation_std), 512 4, STATS_FLAGS_FUNC, "tx_window_violation_std"}, 513 { STATS_OFFSET32(tx_window_violation_tso), 514 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"}, 515 #if 0 516 { STATS_OFFSET32(tx_unsupported_tso_request_ipv6), 517 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_ipv6"}, 518 { STATS_OFFSET32(tx_unsupported_tso_request_not_tcp), 519 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_not_tcp"}, 520 #endif 521 { STATS_OFFSET32(tx_chain_lost_mbuf), 522 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"}, 523 { STATS_OFFSET32(tx_frames_deferred), 524 4, STATS_FLAGS_FUNC, "tx_frames_deferred"}, 525 { STATS_OFFSET32(tx_queue_xoff), 526 4, STATS_FLAGS_FUNC, "tx_queue_xoff"}, 527 { STATS_OFFSET32(mbuf_defrag_attempts), 528 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"}, 529 { STATS_OFFSET32(mbuf_defrag_failures), 530 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"}, 531 { STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 532 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"}, 533 { STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 534 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"}, 535 { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 536 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"}, 537 { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 538 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"}, 539 { STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 540 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"}, 541 { STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 542 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"}, 543 { STATS_OFFSET32(mbuf_alloc_tx), 544 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"}, 545 { STATS_OFFSET32(mbuf_alloc_rx), 546 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"}, 547 { STATS_OFFSET32(mbuf_alloc_sge), 548 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"}, 549 { STATS_OFFSET32(mbuf_alloc_tpa), 550 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"} 551 }; 552 553 static const struct { 554 uint32_t offset; 555 uint32_t size; 556 char string[STAT_NAME_LEN]; 557 } bxe_eth_q_stats_arr[] = { 558 { Q_STATS_OFFSET32(total_bytes_received_hi), 559 8, "rx_bytes" }, 560 { Q_STATS_OFFSET32(total_unicast_packets_received_hi), 561 8, "rx_ucast_packets" }, 562 { Q_STATS_OFFSET32(total_multicast_packets_received_hi), 563 8, "rx_mcast_packets" }, 564 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), 565 8, "rx_bcast_packets" }, 566 { Q_STATS_OFFSET32(no_buff_discard_hi), 567 8, "rx_discards" }, 568 { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 569 8, "tx_bytes" }, 570 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 571 8, "tx_ucast_packets" }, 572 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), 573 8, "tx_mcast_packets" }, 574 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 575 8, "tx_bcast_packets" }, 576 { Q_STATS_OFFSET32(total_tpa_aggregations_hi), 577 8, "tpa_aggregations" }, 578 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), 579 8, "tpa_aggregated_frames"}, 580 { Q_STATS_OFFSET32(total_tpa_bytes_hi), 581 8, "tpa_bytes"}, 582 { Q_STATS_OFFSET32(rx_calls), 583 4, "rx_calls"}, 584 { Q_STATS_OFFSET32(rx_pkts), 585 4, "rx_pkts"}, 586 { Q_STATS_OFFSET32(rx_tpa_pkts), 587 4, "rx_tpa_pkts"}, 588 { Q_STATS_OFFSET32(rx_jumbo_sge_pkts), 589 4, "rx_jumbo_sge_pkts"}, 590 { Q_STATS_OFFSET32(rx_soft_errors), 591 4, "rx_soft_errors"}, 592 { Q_STATS_OFFSET32(rx_hw_csum_errors), 593 4, "rx_hw_csum_errors"}, 594 { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip), 595 4, "rx_ofld_frames_csum_ip"}, 596 { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 597 4, "rx_ofld_frames_csum_tcp_udp"}, 598 { Q_STATS_OFFSET32(rx_budget_reached), 599 4, "rx_budget_reached"}, 600 { Q_STATS_OFFSET32(tx_pkts), 601 4, "tx_pkts"}, 602 { Q_STATS_OFFSET32(tx_soft_errors), 603 4, "tx_soft_errors"}, 604 { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip), 605 4, "tx_ofld_frames_csum_ip"}, 606 { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp), 607 4, "tx_ofld_frames_csum_tcp"}, 608 { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp), 609 4, "tx_ofld_frames_csum_udp"}, 610 { Q_STATS_OFFSET32(tx_ofld_frames_lso), 611 4, "tx_ofld_frames_lso"}, 612 { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 613 4, "tx_ofld_frames_lso_hdr_splits"}, 614 { Q_STATS_OFFSET32(tx_encap_failures), 615 4, "tx_encap_failures"}, 616 { Q_STATS_OFFSET32(tx_hw_queue_full), 617 4, "tx_hw_queue_full"}, 618 { Q_STATS_OFFSET32(tx_hw_max_queue_depth), 619 4, "tx_hw_max_queue_depth"}, 620 { Q_STATS_OFFSET32(tx_dma_mapping_failure), 621 4, "tx_dma_mapping_failure"}, 622 { Q_STATS_OFFSET32(tx_max_drbr_queue_depth), 623 4, "tx_max_drbr_queue_depth"}, 624 { Q_STATS_OFFSET32(tx_window_violation_std), 625 4, "tx_window_violation_std"}, 626 { Q_STATS_OFFSET32(tx_window_violation_tso), 627 4, "tx_window_violation_tso"}, 628 #if 0 629 { Q_STATS_OFFSET32(tx_unsupported_tso_request_ipv6), 630 4, "tx_unsupported_tso_request_ipv6"}, 631 { Q_STATS_OFFSET32(tx_unsupported_tso_request_not_tcp), 632 4, "tx_unsupported_tso_request_not_tcp"}, 633 #endif 634 { Q_STATS_OFFSET32(tx_chain_lost_mbuf), 635 4, "tx_chain_lost_mbuf"}, 636 { Q_STATS_OFFSET32(tx_frames_deferred), 637 4, "tx_frames_deferred"}, 638 { Q_STATS_OFFSET32(tx_queue_xoff), 639 4, "tx_queue_xoff"}, 640 { Q_STATS_OFFSET32(mbuf_defrag_attempts), 641 4, "mbuf_defrag_attempts"}, 642 { Q_STATS_OFFSET32(mbuf_defrag_failures), 643 4, "mbuf_defrag_failures"}, 644 { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 645 4, "mbuf_rx_bd_alloc_failed"}, 646 { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 647 4, "mbuf_rx_bd_mapping_failed"}, 648 { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 649 4, "mbuf_rx_tpa_alloc_failed"}, 650 { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 651 4, "mbuf_rx_tpa_mapping_failed"}, 652 { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 653 4, "mbuf_rx_sge_alloc_failed"}, 654 { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 655 4, "mbuf_rx_sge_mapping_failed"}, 656 { Q_STATS_OFFSET32(mbuf_alloc_tx), 657 4, "mbuf_alloc_tx"}, 658 { Q_STATS_OFFSET32(mbuf_alloc_rx), 659 4, "mbuf_alloc_rx"}, 660 { Q_STATS_OFFSET32(mbuf_alloc_sge), 661 4, "mbuf_alloc_sge"}, 662 { Q_STATS_OFFSET32(mbuf_alloc_tpa), 663 4, "mbuf_alloc_tpa"} 664 }; 665 666 #define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr) 667 #define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr) 668 669 670 static void bxe_cmng_fns_init(struct bxe_softc *sc, 671 uint8_t read_cfg, 672 uint8_t cmng_type); 673 static int bxe_get_cmng_fns_mode(struct bxe_softc *sc); 674 static void storm_memset_cmng(struct bxe_softc *sc, 675 struct cmng_init *cmng, 676 uint8_t port); 677 static void bxe_set_reset_global(struct bxe_softc *sc); 678 static void bxe_set_reset_in_progress(struct bxe_softc *sc); 679 static uint8_t bxe_reset_is_done(struct bxe_softc *sc, 680 int engine); 681 static uint8_t bxe_clear_pf_load(struct bxe_softc *sc); 682 static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, 683 uint8_t *global, 684 uint8_t print); 685 static void bxe_int_disable(struct bxe_softc *sc); 686 static int bxe_release_leader_lock(struct bxe_softc *sc); 687 static void bxe_pf_disable(struct bxe_softc *sc); 688 static void bxe_free_fp_buffers(struct bxe_softc *sc); 689 static inline void bxe_update_rx_prod(struct bxe_softc *sc, 690 struct bxe_fastpath *fp, 691 uint16_t rx_bd_prod, 692 uint16_t rx_cq_prod, 693 uint16_t rx_sge_prod); 694 static void bxe_link_report_locked(struct bxe_softc *sc); 695 static void bxe_link_report(struct bxe_softc *sc); 696 static void bxe_link_status_update(struct bxe_softc *sc); 697 static void bxe_periodic_callout_func(void *xsc); 698 static void bxe_periodic_start(struct bxe_softc *sc); 699 static void bxe_periodic_stop(struct bxe_softc *sc); 700 static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, 701 uint16_t prev_index, 702 uint16_t index); 703 static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, 704 int queue); 705 static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, 706 uint16_t index); 707 static uint8_t bxe_txeof(struct bxe_softc *sc, 708 struct bxe_fastpath *fp); 709 static void bxe_task_fp(struct bxe_fastpath *fp); 710 static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, 711 struct mbuf *m, 712 uint8_t contents); 713 static int bxe_alloc_mem(struct bxe_softc *sc); 714 static void bxe_free_mem(struct bxe_softc *sc); 715 static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc); 716 static void bxe_free_fw_stats_mem(struct bxe_softc *sc); 717 static int bxe_interrupt_attach(struct bxe_softc *sc); 718 static void bxe_interrupt_detach(struct bxe_softc *sc); 719 static void bxe_set_rx_mode(struct bxe_softc *sc); 720 static int bxe_init_locked(struct bxe_softc *sc); 721 static int bxe_stop_locked(struct bxe_softc *sc); 722 static __noinline int bxe_nic_load(struct bxe_softc *sc, 723 int load_mode); 724 static __noinline int bxe_nic_unload(struct bxe_softc *sc, 725 uint32_t unload_mode, 726 uint8_t keep_link); 727 728 static void bxe_handle_sp_tq(void *context, int pending); 729 static void bxe_handle_fp_tq(void *context, int pending); 730 731 732 /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */ 733 uint32_t 734 calc_crc32(uint8_t *crc32_packet, 735 uint32_t crc32_length, 736 uint32_t crc32_seed, 737 uint8_t complement) 738 { 739 uint32_t byte = 0; 740 uint32_t bit = 0; 741 uint8_t msb = 0; 742 uint32_t temp = 0; 743 uint32_t shft = 0; 744 uint8_t current_byte = 0; 745 uint32_t crc32_result = crc32_seed; 746 const uint32_t CRC32_POLY = 0x1edc6f41; 747 748 if ((crc32_packet == NULL) || 749 (crc32_length == 0) || 750 ((crc32_length % 8) != 0)) 751 { 752 return (crc32_result); 753 } 754 755 for (byte = 0; byte < crc32_length; byte = byte + 1) 756 { 757 current_byte = crc32_packet[byte]; 758 for (bit = 0; bit < 8; bit = bit + 1) 759 { 760 /* msb = crc32_result[31]; */ 761 msb = (uint8_t)(crc32_result >> 31); 762 763 crc32_result = crc32_result << 1; 764 765 /* it (msb != current_byte[bit]) */ 766 if (msb != (0x1 & (current_byte >> bit))) 767 { 768 crc32_result = crc32_result ^ CRC32_POLY; 769 /* crc32_result[0] = 1 */ 770 crc32_result |= 1; 771 } 772 } 773 } 774 775 /* Last step is to: 776 * 1. "mirror" every bit 777 * 2. swap the 4 bytes 778 * 3. complement each bit 779 */ 780 781 /* Mirror */ 782 temp = crc32_result; 783 shft = sizeof(crc32_result) * 8 - 1; 784 785 for (crc32_result >>= 1; crc32_result; crc32_result >>= 1) 786 { 787 temp <<= 1; 788 temp |= crc32_result & 1; 789 shft-- ; 790 } 791 792 /* temp[31-bit] = crc32_result[bit] */ 793 temp <<= shft; 794 795 /* Swap */ 796 /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */ 797 { 798 uint32_t t0, t1, t2, t3; 799 t0 = (0x000000ff & (temp >> 24)); 800 t1 = (0x0000ff00 & (temp >> 8)); 801 t2 = (0x00ff0000 & (temp << 8)); 802 t3 = (0xff000000 & (temp << 24)); 803 crc32_result = t0 | t1 | t2 | t3; 804 } 805 806 /* Complement */ 807 if (complement) 808 { 809 crc32_result = ~crc32_result; 810 } 811 812 return (crc32_result); 813 } 814 815 int 816 bxe_test_bit(int nr, 817 volatile unsigned long *addr) 818 { 819 return ((atomic_load_acq_long(addr) & (1 << nr)) != 0); 820 } 821 822 void 823 bxe_set_bit(unsigned int nr, 824 volatile unsigned long *addr) 825 { 826 atomic_set_acq_long(addr, (1 << nr)); 827 } 828 829 void 830 bxe_clear_bit(int nr, 831 volatile unsigned long *addr) 832 { 833 atomic_clear_acq_long(addr, (1 << nr)); 834 } 835 836 int 837 bxe_test_and_set_bit(int nr, 838 volatile unsigned long *addr) 839 { 840 unsigned long x; 841 nr = (1 << nr); 842 do { 843 x = *addr; 844 } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0); 845 // if (x & nr) bit_was_set; else bit_was_not_set; 846 return (x & nr); 847 } 848 849 int 850 bxe_test_and_clear_bit(int nr, 851 volatile unsigned long *addr) 852 { 853 unsigned long x; 854 nr = (1 << nr); 855 do { 856 x = *addr; 857 } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0); 858 // if (x & nr) bit_was_set; else bit_was_not_set; 859 return (x & nr); 860 } 861 862 int 863 bxe_cmpxchg(volatile int *addr, 864 int old, 865 int new) 866 { 867 int x; 868 do { 869 x = *addr; 870 } while (atomic_cmpset_acq_int(addr, old, new) == 0); 871 return (x); 872 } 873 874 /* 875 * Get DMA memory from the OS. 876 * 877 * Validates that the OS has provided DMA buffers in response to a 878 * bus_dmamap_load call and saves the physical address of those buffers. 879 * When the callback is used the OS will return 0 for the mapping function 880 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any 881 * failures back to the caller. 882 * 883 * Returns: 884 * Nothing. 885 */ 886 static void 887 bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 888 { 889 struct bxe_dma *dma = arg; 890 891 if (error) { 892 dma->paddr = 0; 893 dma->nseg = 0; 894 BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error); 895 } else { 896 dma->paddr = segs->ds_addr; 897 dma->nseg = nseg; 898 #if 0 899 BLOGD(dma->sc, DBG_LOAD, 900 "DMA alloc '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n", 901 dma->msg, dma->vaddr, (void *)dma->paddr, 902 dma->nseg, dma->size); 903 #endif 904 } 905 } 906 907 /* 908 * Allocate a block of memory and map it for DMA. No partial completions 909 * allowed and release any resources acquired if we can't acquire all 910 * resources. 911 * 912 * Returns: 913 * 0 = Success, !0 = Failure 914 */ 915 int 916 bxe_dma_alloc(struct bxe_softc *sc, 917 bus_size_t size, 918 struct bxe_dma *dma, 919 const char *msg) 920 { 921 int rc; 922 923 if (dma->size > 0) { 924 BLOGE(sc, "dma block '%s' already has size %lu\n", msg, 925 (unsigned long)dma->size); 926 return (1); 927 } 928 929 memset(dma, 0, sizeof(*dma)); /* sanity */ 930 dma->sc = sc; 931 dma->size = size; 932 snprintf(dma->msg, sizeof(dma->msg), "%s", msg); 933 934 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 935 BCM_PAGE_SIZE, /* alignment */ 936 0, /* boundary limit */ 937 BUS_SPACE_MAXADDR, /* restricted low */ 938 BUS_SPACE_MAXADDR, /* restricted hi */ 939 NULL, /* addr filter() */ 940 NULL, /* addr filter() arg */ 941 size, /* max map size */ 942 1, /* num discontinuous */ 943 size, /* max seg size */ 944 BUS_DMA_ALLOCNOW, /* flags */ 945 NULL, /* lock() */ 946 NULL, /* lock() arg */ 947 &dma->tag); /* returned dma tag */ 948 if (rc != 0) { 949 BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc); 950 memset(dma, 0, sizeof(*dma)); 951 return (1); 952 } 953 954 rc = bus_dmamem_alloc(dma->tag, 955 (void **)&dma->vaddr, 956 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), 957 &dma->map); 958 if (rc != 0) { 959 BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc); 960 bus_dma_tag_destroy(dma->tag); 961 memset(dma, 0, sizeof(*dma)); 962 return (1); 963 } 964 965 rc = bus_dmamap_load(dma->tag, 966 dma->map, 967 dma->vaddr, 968 size, 969 bxe_dma_map_addr, /* BLOGD in here */ 970 dma, 971 BUS_DMA_NOWAIT); 972 if (rc != 0) { 973 BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc); 974 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 975 bus_dma_tag_destroy(dma->tag); 976 memset(dma, 0, sizeof(*dma)); 977 return (1); 978 } 979 980 return (0); 981 } 982 983 void 984 bxe_dma_free(struct bxe_softc *sc, 985 struct bxe_dma *dma) 986 { 987 if (dma->size > 0) { 988 #if 0 989 BLOGD(sc, DBG_LOAD, 990 "DMA free '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n", 991 dma->msg, dma->vaddr, (void *)dma->paddr, 992 dma->nseg, dma->size); 993 #endif 994 995 DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL")); 996 997 bus_dmamap_sync(dma->tag, dma->map, 998 (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)); 999 bus_dmamap_unload(dma->tag, dma->map); 1000 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1001 bus_dma_tag_destroy(dma->tag); 1002 } 1003 1004 memset(dma, 0, sizeof(*dma)); 1005 } 1006 1007 /* 1008 * These indirect read and write routines are only during init. 1009 * The locking is handled by the MCP. 1010 */ 1011 1012 void 1013 bxe_reg_wr_ind(struct bxe_softc *sc, 1014 uint32_t addr, 1015 uint32_t val) 1016 { 1017 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); 1018 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4); 1019 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 1020 } 1021 1022 uint32_t 1023 bxe_reg_rd_ind(struct bxe_softc *sc, 1024 uint32_t addr) 1025 { 1026 uint32_t val; 1027 1028 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); 1029 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4); 1030 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 1031 1032 return (val); 1033 } 1034 1035 #if 0 1036 void bxe_dp_dmae(struct bxe_softc *sc, struct dmae_command *dmae, int msglvl) 1037 { 1038 uint32_t src_type = dmae->opcode & DMAE_COMMAND_SRC; 1039 1040 switch (dmae->opcode & DMAE_COMMAND_DST) { 1041 case DMAE_CMD_DST_PCI: 1042 if (src_type == DMAE_CMD_SRC_PCI) 1043 DP(msglvl, "DMAE: opcode 0x%08x\n" 1044 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" 1045 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1046 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 1047 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 1048 dmae->comp_addr_hi, dmae->comp_addr_lo, 1049 dmae->comp_val); 1050 else 1051 DP(msglvl, "DMAE: opcode 0x%08x\n" 1052 "src [%08x], len [%d*4], dst [%x:%08x]\n" 1053 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1054 dmae->opcode, dmae->src_addr_lo >> 2, 1055 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 1056 dmae->comp_addr_hi, dmae->comp_addr_lo, 1057 dmae->comp_val); 1058 break; 1059 case DMAE_CMD_DST_GRC: 1060 if (src_type == DMAE_CMD_SRC_PCI) 1061 DP(msglvl, "DMAE: opcode 0x%08x\n" 1062 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" 1063 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1064 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 1065 dmae->len, dmae->dst_addr_lo >> 2, 1066 dmae->comp_addr_hi, dmae->comp_addr_lo, 1067 dmae->comp_val); 1068 else 1069 DP(msglvl, "DMAE: opcode 0x%08x\n" 1070 "src [%08x], len [%d*4], dst [%08x]\n" 1071 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1072 dmae->opcode, dmae->src_addr_lo >> 2, 1073 dmae->len, dmae->dst_addr_lo >> 2, 1074 dmae->comp_addr_hi, dmae->comp_addr_lo, 1075 dmae->comp_val); 1076 break; 1077 default: 1078 if (src_type == DMAE_CMD_SRC_PCI) 1079 DP(msglvl, "DMAE: opcode 0x%08x\n" 1080 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" 1081 "comp_addr [%x:%08x] comp_val 0x%08x\n", 1082 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 1083 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 1084 dmae->comp_val); 1085 else 1086 DP(msglvl, "DMAE: opcode 0x%08x\n" 1087 "src_addr [%08x] len [%d * 4] dst_addr [none]\n" 1088 "comp_addr [%x:%08x] comp_val 0x%08x\n", 1089 dmae->opcode, dmae->src_addr_lo >> 2, 1090 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 1091 dmae->comp_val); 1092 break; 1093 } 1094 1095 } 1096 #endif 1097 1098 static int 1099 bxe_acquire_hw_lock(struct bxe_softc *sc, 1100 uint32_t resource) 1101 { 1102 uint32_t lock_status; 1103 uint32_t resource_bit = (1 << resource); 1104 int func = SC_FUNC(sc); 1105 uint32_t hw_lock_control_reg; 1106 int cnt; 1107 1108 /* validate the resource is within range */ 1109 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1110 BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource); 1111 return (-1); 1112 } 1113 1114 if (func <= 5) { 1115 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 1116 } else { 1117 hw_lock_control_reg = 1118 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 1119 } 1120 1121 /* validate the resource is not already taken */ 1122 lock_status = REG_RD(sc, hw_lock_control_reg); 1123 if (lock_status & resource_bit) { 1124 BLOGE(sc, "resource in use (status 0x%x bit 0x%x)\n", 1125 lock_status, resource_bit); 1126 return (-1); 1127 } 1128 1129 /* try every 5ms for 5 seconds */ 1130 for (cnt = 0; cnt < 1000; cnt++) { 1131 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); 1132 lock_status = REG_RD(sc, hw_lock_control_reg); 1133 if (lock_status & resource_bit) { 1134 return (0); 1135 } 1136 DELAY(5000); 1137 } 1138 1139 BLOGE(sc, "Resource lock timeout!\n"); 1140 return (-1); 1141 } 1142 1143 static int 1144 bxe_release_hw_lock(struct bxe_softc *sc, 1145 uint32_t resource) 1146 { 1147 uint32_t lock_status; 1148 uint32_t resource_bit = (1 << resource); 1149 int func = SC_FUNC(sc); 1150 uint32_t hw_lock_control_reg; 1151 1152 /* validate the resource is within range */ 1153 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1154 BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource); 1155 return (-1); 1156 } 1157 1158 if (func <= 5) { 1159 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 1160 } else { 1161 hw_lock_control_reg = 1162 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 1163 } 1164 1165 /* validate the resource is currently taken */ 1166 lock_status = REG_RD(sc, hw_lock_control_reg); 1167 if (!(lock_status & resource_bit)) { 1168 BLOGE(sc, "resource not in use (status 0x%x bit 0x%x)\n", 1169 lock_status, resource_bit); 1170 return (-1); 1171 } 1172 1173 REG_WR(sc, hw_lock_control_reg, resource_bit); 1174 return (0); 1175 } 1176 static void bxe_acquire_phy_lock(struct bxe_softc *sc) 1177 { 1178 BXE_PHY_LOCK(sc); 1179 bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); 1180 } 1181 1182 static void bxe_release_phy_lock(struct bxe_softc *sc) 1183 { 1184 bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); 1185 BXE_PHY_UNLOCK(sc); 1186 } 1187 /* 1188 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise, 1189 * had we done things the other way around, if two pfs from the same port 1190 * would attempt to access nvram at the same time, we could run into a 1191 * scenario such as: 1192 * pf A takes the port lock. 1193 * pf B succeeds in taking the same lock since they are from the same port. 1194 * pf A takes the per pf misc lock. Performs eeprom access. 1195 * pf A finishes. Unlocks the per pf misc lock. 1196 * Pf B takes the lock and proceeds to perform it's own access. 1197 * pf A unlocks the per port lock, while pf B is still working (!). 1198 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own 1199 * access corrupted by pf B).* 1200 */ 1201 static int 1202 bxe_acquire_nvram_lock(struct bxe_softc *sc) 1203 { 1204 int port = SC_PORT(sc); 1205 int count, i; 1206 uint32_t val = 0; 1207 1208 /* acquire HW lock: protect against other PFs in PF Direct Assignment */ 1209 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); 1210 1211 /* adjust timeout for emulation/FPGA */ 1212 count = NVRAM_TIMEOUT_COUNT; 1213 if (CHIP_REV_IS_SLOW(sc)) { 1214 count *= 100; 1215 } 1216 1217 /* request access to nvram interface */ 1218 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 1219 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); 1220 1221 for (i = 0; i < count*10; i++) { 1222 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); 1223 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1224 break; 1225 } 1226 1227 DELAY(5); 1228 } 1229 1230 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1231 BLOGE(sc, "Cannot get access to nvram interface\n"); 1232 return (-1); 1233 } 1234 1235 return (0); 1236 } 1237 1238 static int 1239 bxe_release_nvram_lock(struct bxe_softc *sc) 1240 { 1241 int port = SC_PORT(sc); 1242 int count, i; 1243 uint32_t val = 0; 1244 1245 /* adjust timeout for emulation/FPGA */ 1246 count = NVRAM_TIMEOUT_COUNT; 1247 if (CHIP_REV_IS_SLOW(sc)) { 1248 count *= 100; 1249 } 1250 1251 /* relinquish nvram interface */ 1252 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 1253 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); 1254 1255 for (i = 0; i < count*10; i++) { 1256 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); 1257 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1258 break; 1259 } 1260 1261 DELAY(5); 1262 } 1263 1264 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1265 BLOGE(sc, "Cannot free access to nvram interface\n"); 1266 return (-1); 1267 } 1268 1269 /* release HW lock: protect against other PFs in PF Direct Assignment */ 1270 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); 1271 1272 return (0); 1273 } 1274 1275 static void 1276 bxe_enable_nvram_access(struct bxe_softc *sc) 1277 { 1278 uint32_t val; 1279 1280 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1281 1282 /* enable both bits, even on read */ 1283 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1284 (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN)); 1285 } 1286 1287 static void 1288 bxe_disable_nvram_access(struct bxe_softc *sc) 1289 { 1290 uint32_t val; 1291 1292 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1293 1294 /* disable both bits, even after read */ 1295 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1296 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | 1297 MCPR_NVM_ACCESS_ENABLE_WR_EN))); 1298 } 1299 1300 static int 1301 bxe_nvram_read_dword(struct bxe_softc *sc, 1302 uint32_t offset, 1303 uint32_t *ret_val, 1304 uint32_t cmd_flags) 1305 { 1306 int count, i, rc; 1307 uint32_t val; 1308 1309 /* build the command word */ 1310 cmd_flags |= MCPR_NVM_COMMAND_DOIT; 1311 1312 /* need to clear DONE bit separately */ 1313 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1314 1315 /* address of the NVRAM to read from */ 1316 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, 1317 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1318 1319 /* issue a read command */ 1320 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1321 1322 /* adjust timeout for emulation/FPGA */ 1323 count = NVRAM_TIMEOUT_COUNT; 1324 if (CHIP_REV_IS_SLOW(sc)) { 1325 count *= 100; 1326 } 1327 1328 /* wait for completion */ 1329 *ret_val = 0; 1330 rc = -1; 1331 for (i = 0; i < count; i++) { 1332 DELAY(5); 1333 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); 1334 1335 if (val & MCPR_NVM_COMMAND_DONE) { 1336 val = REG_RD(sc, MCP_REG_MCPR_NVM_READ); 1337 /* we read nvram data in cpu order 1338 * but ethtool sees it as an array of bytes 1339 * converting to big-endian will do the work 1340 */ 1341 *ret_val = htobe32(val); 1342 rc = 0; 1343 break; 1344 } 1345 } 1346 1347 if (rc == -1) { 1348 BLOGE(sc, "nvram read timeout expired\n"); 1349 } 1350 1351 return (rc); 1352 } 1353 1354 static int 1355 bxe_nvram_read(struct bxe_softc *sc, 1356 uint32_t offset, 1357 uint8_t *ret_buf, 1358 int buf_size) 1359 { 1360 uint32_t cmd_flags; 1361 uint32_t val; 1362 int rc; 1363 1364 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { 1365 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", 1366 offset, buf_size); 1367 return (-1); 1368 } 1369 1370 if ((offset + buf_size) > sc->devinfo.flash_size) { 1371 BLOGE(sc, "Invalid parameter, " 1372 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1373 offset, buf_size, sc->devinfo.flash_size); 1374 return (-1); 1375 } 1376 1377 /* request access to nvram interface */ 1378 rc = bxe_acquire_nvram_lock(sc); 1379 if (rc) { 1380 return (rc); 1381 } 1382 1383 /* enable access to nvram interface */ 1384 bxe_enable_nvram_access(sc); 1385 1386 /* read the first word(s) */ 1387 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1388 while ((buf_size > sizeof(uint32_t)) && (rc == 0)) { 1389 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); 1390 memcpy(ret_buf, &val, 4); 1391 1392 /* advance to the next dword */ 1393 offset += sizeof(uint32_t); 1394 ret_buf += sizeof(uint32_t); 1395 buf_size -= sizeof(uint32_t); 1396 cmd_flags = 0; 1397 } 1398 1399 if (rc == 0) { 1400 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1401 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); 1402 memcpy(ret_buf, &val, 4); 1403 } 1404 1405 /* disable access to nvram interface */ 1406 bxe_disable_nvram_access(sc); 1407 bxe_release_nvram_lock(sc); 1408 1409 return (rc); 1410 } 1411 1412 static int 1413 bxe_nvram_write_dword(struct bxe_softc *sc, 1414 uint32_t offset, 1415 uint32_t val, 1416 uint32_t cmd_flags) 1417 { 1418 int count, i, rc; 1419 1420 /* build the command word */ 1421 cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR); 1422 1423 /* need to clear DONE bit separately */ 1424 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1425 1426 /* write the data */ 1427 REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val); 1428 1429 /* address of the NVRAM to write to */ 1430 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, 1431 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1432 1433 /* issue the write command */ 1434 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1435 1436 /* adjust timeout for emulation/FPGA */ 1437 count = NVRAM_TIMEOUT_COUNT; 1438 if (CHIP_REV_IS_SLOW(sc)) { 1439 count *= 100; 1440 } 1441 1442 /* wait for completion */ 1443 rc = -1; 1444 for (i = 0; i < count; i++) { 1445 DELAY(5); 1446 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); 1447 if (val & MCPR_NVM_COMMAND_DONE) { 1448 rc = 0; 1449 break; 1450 } 1451 } 1452 1453 if (rc == -1) { 1454 BLOGE(sc, "nvram write timeout expired\n"); 1455 } 1456 1457 return (rc); 1458 } 1459 1460 #define BYTE_OFFSET(offset) (8 * (offset & 0x03)) 1461 1462 static int 1463 bxe_nvram_write1(struct bxe_softc *sc, 1464 uint32_t offset, 1465 uint8_t *data_buf, 1466 int buf_size) 1467 { 1468 uint32_t cmd_flags; 1469 uint32_t align_offset; 1470 uint32_t val; 1471 int rc; 1472 1473 if ((offset + buf_size) > sc->devinfo.flash_size) { 1474 BLOGE(sc, "Invalid parameter, " 1475 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1476 offset, buf_size, sc->devinfo.flash_size); 1477 return (-1); 1478 } 1479 1480 /* request access to nvram interface */ 1481 rc = bxe_acquire_nvram_lock(sc); 1482 if (rc) { 1483 return (rc); 1484 } 1485 1486 /* enable access to nvram interface */ 1487 bxe_enable_nvram_access(sc); 1488 1489 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); 1490 align_offset = (offset & ~0x03); 1491 rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags); 1492 1493 if (rc == 0) { 1494 val &= ~(0xff << BYTE_OFFSET(offset)); 1495 val |= (*data_buf << BYTE_OFFSET(offset)); 1496 1497 /* nvram data is returned as an array of bytes 1498 * convert it back to cpu order 1499 */ 1500 val = be32toh(val); 1501 1502 rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags); 1503 } 1504 1505 /* disable access to nvram interface */ 1506 bxe_disable_nvram_access(sc); 1507 bxe_release_nvram_lock(sc); 1508 1509 return (rc); 1510 } 1511 1512 static int 1513 bxe_nvram_write(struct bxe_softc *sc, 1514 uint32_t offset, 1515 uint8_t *data_buf, 1516 int buf_size) 1517 { 1518 uint32_t cmd_flags; 1519 uint32_t val; 1520 uint32_t written_so_far; 1521 int rc; 1522 1523 if (buf_size == 1) { 1524 return (bxe_nvram_write1(sc, offset, data_buf, buf_size)); 1525 } 1526 1527 if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) { 1528 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", 1529 offset, buf_size); 1530 return (-1); 1531 } 1532 1533 if (buf_size == 0) { 1534 return (0); /* nothing to do */ 1535 } 1536 1537 if ((offset + buf_size) > sc->devinfo.flash_size) { 1538 BLOGE(sc, "Invalid parameter, " 1539 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1540 offset, buf_size, sc->devinfo.flash_size); 1541 return (-1); 1542 } 1543 1544 /* request access to nvram interface */ 1545 rc = bxe_acquire_nvram_lock(sc); 1546 if (rc) { 1547 return (rc); 1548 } 1549 1550 /* enable access to nvram interface */ 1551 bxe_enable_nvram_access(sc); 1552 1553 written_so_far = 0; 1554 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1555 while ((written_so_far < buf_size) && (rc == 0)) { 1556 if (written_so_far == (buf_size - sizeof(uint32_t))) { 1557 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1558 } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) { 1559 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1560 } else if ((offset % NVRAM_PAGE_SIZE) == 0) { 1561 cmd_flags |= MCPR_NVM_COMMAND_FIRST; 1562 } 1563 1564 memcpy(&val, data_buf, 4); 1565 1566 rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags); 1567 1568 /* advance to the next dword */ 1569 offset += sizeof(uint32_t); 1570 data_buf += sizeof(uint32_t); 1571 written_so_far += sizeof(uint32_t); 1572 cmd_flags = 0; 1573 } 1574 1575 /* disable access to nvram interface */ 1576 bxe_disable_nvram_access(sc); 1577 bxe_release_nvram_lock(sc); 1578 1579 return (rc); 1580 } 1581 1582 /* copy command into DMAE command memory and set DMAE command Go */ 1583 void 1584 bxe_post_dmae(struct bxe_softc *sc, 1585 struct dmae_command *dmae, 1586 int idx) 1587 { 1588 uint32_t cmd_offset; 1589 int i; 1590 1591 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx)); 1592 for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) { 1593 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i)); 1594 } 1595 1596 REG_WR(sc, dmae_reg_go_c[idx], 1); 1597 } 1598 1599 uint32_t 1600 bxe_dmae_opcode_add_comp(uint32_t opcode, 1601 uint8_t comp_type) 1602 { 1603 return (opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | 1604 DMAE_COMMAND_C_TYPE_ENABLE)); 1605 } 1606 1607 uint32_t 1608 bxe_dmae_opcode_clr_src_reset(uint32_t opcode) 1609 { 1610 return (opcode & ~DMAE_COMMAND_SRC_RESET); 1611 } 1612 1613 uint32_t 1614 bxe_dmae_opcode(struct bxe_softc *sc, 1615 uint8_t src_type, 1616 uint8_t dst_type, 1617 uint8_t with_comp, 1618 uint8_t comp_type) 1619 { 1620 uint32_t opcode = 0; 1621 1622 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | 1623 (dst_type << DMAE_COMMAND_DST_SHIFT)); 1624 1625 opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET); 1626 1627 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 1628 1629 opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) | 1630 (SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT)); 1631 1632 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 1633 1634 #ifdef __BIG_ENDIAN 1635 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 1636 #else 1637 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 1638 #endif 1639 1640 if (with_comp) { 1641 opcode = bxe_dmae_opcode_add_comp(opcode, comp_type); 1642 } 1643 1644 return (opcode); 1645 } 1646 1647 static void 1648 bxe_prep_dmae_with_comp(struct bxe_softc *sc, 1649 struct dmae_command *dmae, 1650 uint8_t src_type, 1651 uint8_t dst_type) 1652 { 1653 memset(dmae, 0, sizeof(struct dmae_command)); 1654 1655 /* set the opcode */ 1656 dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type, 1657 TRUE, DMAE_COMP_PCI); 1658 1659 /* fill in the completion parameters */ 1660 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp)); 1661 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp)); 1662 dmae->comp_val = DMAE_COMP_VAL; 1663 } 1664 1665 /* issue a DMAE command over the init channel and wait for completion */ 1666 static int 1667 bxe_issue_dmae_with_comp(struct bxe_softc *sc, 1668 struct dmae_command *dmae) 1669 { 1670 uint32_t *wb_comp = BXE_SP(sc, wb_comp); 1671 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; 1672 1673 BXE_DMAE_LOCK(sc); 1674 1675 /* reset completion */ 1676 *wb_comp = 0; 1677 1678 /* post the command on the channel used for initializations */ 1679 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc)); 1680 1681 /* wait for completion */ 1682 DELAY(5); 1683 1684 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 1685 if (!timeout || 1686 (sc->recovery_state != BXE_RECOVERY_DONE && 1687 sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) { 1688 BLOGE(sc, "DMAE timeout!\n"); 1689 BXE_DMAE_UNLOCK(sc); 1690 return (DMAE_TIMEOUT); 1691 } 1692 1693 timeout--; 1694 DELAY(50); 1695 } 1696 1697 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 1698 BLOGE(sc, "DMAE PCI error!\n"); 1699 BXE_DMAE_UNLOCK(sc); 1700 return (DMAE_PCI_ERROR); 1701 } 1702 1703 BXE_DMAE_UNLOCK(sc); 1704 return (0); 1705 } 1706 1707 void 1708 bxe_read_dmae(struct bxe_softc *sc, 1709 uint32_t src_addr, 1710 uint32_t len32) 1711 { 1712 struct dmae_command dmae; 1713 uint32_t *data; 1714 int i, rc; 1715 1716 DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32)); 1717 1718 if (!sc->dmae_ready) { 1719 data = BXE_SP(sc, wb_data[0]); 1720 1721 for (i = 0; i < len32; i++) { 1722 data[i] = (CHIP_IS_E1(sc)) ? 1723 bxe_reg_rd_ind(sc, (src_addr + (i * 4))) : 1724 REG_RD(sc, (src_addr + (i * 4))); 1725 } 1726 1727 return; 1728 } 1729 1730 /* set opcode and fixed command fields */ 1731 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 1732 1733 /* fill in addresses and len */ 1734 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ 1735 dmae.src_addr_hi = 0; 1736 dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data)); 1737 dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data)); 1738 dmae.len = len32; 1739 1740 /* issue the command and wait for completion */ 1741 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { 1742 bxe_panic(sc, ("DMAE failed (%d)\n", rc)); 1743 }; 1744 } 1745 1746 void 1747 bxe_write_dmae(struct bxe_softc *sc, 1748 bus_addr_t dma_addr, 1749 uint32_t dst_addr, 1750 uint32_t len32) 1751 { 1752 struct dmae_command dmae; 1753 int rc; 1754 1755 if (!sc->dmae_ready) { 1756 DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32)); 1757 1758 if (CHIP_IS_E1(sc)) { 1759 ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); 1760 } else { 1761 ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); 1762 } 1763 1764 return; 1765 } 1766 1767 /* set opcode and fixed command fields */ 1768 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 1769 1770 /* fill in addresses and len */ 1771 dmae.src_addr_lo = U64_LO(dma_addr); 1772 dmae.src_addr_hi = U64_HI(dma_addr); 1773 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ 1774 dmae.dst_addr_hi = 0; 1775 dmae.len = len32; 1776 1777 /* issue the command and wait for completion */ 1778 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { 1779 bxe_panic(sc, ("DMAE failed (%d)\n", rc)); 1780 } 1781 } 1782 1783 void 1784 bxe_write_dmae_phys_len(struct bxe_softc *sc, 1785 bus_addr_t phys_addr, 1786 uint32_t addr, 1787 uint32_t len) 1788 { 1789 int dmae_wr_max = DMAE_LEN32_WR_MAX(sc); 1790 int offset = 0; 1791 1792 while (len > dmae_wr_max) { 1793 bxe_write_dmae(sc, 1794 (phys_addr + offset), /* src DMA address */ 1795 (addr + offset), /* dst GRC address */ 1796 dmae_wr_max); 1797 offset += (dmae_wr_max * 4); 1798 len -= dmae_wr_max; 1799 } 1800 1801 bxe_write_dmae(sc, 1802 (phys_addr + offset), /* src DMA address */ 1803 (addr + offset), /* dst GRC address */ 1804 len); 1805 } 1806 1807 void 1808 bxe_set_ctx_validation(struct bxe_softc *sc, 1809 struct eth_context *cxt, 1810 uint32_t cid) 1811 { 1812 /* ustorm cxt validation */ 1813 cxt->ustorm_ag_context.cdu_usage = 1814 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 1815 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); 1816 /* xcontext validation */ 1817 cxt->xstorm_ag_context.cdu_reserved = 1818 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 1819 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); 1820 } 1821 1822 static void 1823 bxe_storm_memset_hc_timeout(struct bxe_softc *sc, 1824 uint8_t port, 1825 uint8_t fw_sb_id, 1826 uint8_t sb_index, 1827 uint8_t ticks) 1828 { 1829 uint32_t addr = 1830 (BAR_CSTRORM_INTMEM + 1831 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); 1832 1833 REG_WR8(sc, addr, ticks); 1834 1835 BLOGD(sc, DBG_LOAD, 1836 "port %d fw_sb_id %d sb_index %d ticks %d\n", 1837 port, fw_sb_id, sb_index, ticks); 1838 } 1839 1840 static void 1841 bxe_storm_memset_hc_disable(struct bxe_softc *sc, 1842 uint8_t port, 1843 uint16_t fw_sb_id, 1844 uint8_t sb_index, 1845 uint8_t disable) 1846 { 1847 uint32_t enable_flag = 1848 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 1849 uint32_t addr = 1850 (BAR_CSTRORM_INTMEM + 1851 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); 1852 uint8_t flags; 1853 1854 /* clear and set */ 1855 flags = REG_RD8(sc, addr); 1856 flags &= ~HC_INDEX_DATA_HC_ENABLED; 1857 flags |= enable_flag; 1858 REG_WR8(sc, addr, flags); 1859 1860 BLOGD(sc, DBG_LOAD, 1861 "port %d fw_sb_id %d sb_index %d disable %d\n", 1862 port, fw_sb_id, sb_index, disable); 1863 } 1864 1865 void 1866 bxe_update_coalesce_sb_index(struct bxe_softc *sc, 1867 uint8_t fw_sb_id, 1868 uint8_t sb_index, 1869 uint8_t disable, 1870 uint16_t usec) 1871 { 1872 int port = SC_PORT(sc); 1873 uint8_t ticks = (usec / 4); /* XXX ??? */ 1874 1875 bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks); 1876 1877 disable = (disable) ? 1 : ((usec) ? 0 : 1); 1878 bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable); 1879 } 1880 1881 void 1882 elink_cb_udelay(struct bxe_softc *sc, 1883 uint32_t usecs) 1884 { 1885 DELAY(usecs); 1886 } 1887 1888 uint32_t 1889 elink_cb_reg_read(struct bxe_softc *sc, 1890 uint32_t reg_addr) 1891 { 1892 return (REG_RD(sc, reg_addr)); 1893 } 1894 1895 void 1896 elink_cb_reg_write(struct bxe_softc *sc, 1897 uint32_t reg_addr, 1898 uint32_t val) 1899 { 1900 REG_WR(sc, reg_addr, val); 1901 } 1902 1903 void 1904 elink_cb_reg_wb_write(struct bxe_softc *sc, 1905 uint32_t offset, 1906 uint32_t *wb_write, 1907 uint16_t len) 1908 { 1909 REG_WR_DMAE(sc, offset, wb_write, len); 1910 } 1911 1912 void 1913 elink_cb_reg_wb_read(struct bxe_softc *sc, 1914 uint32_t offset, 1915 uint32_t *wb_write, 1916 uint16_t len) 1917 { 1918 REG_RD_DMAE(sc, offset, wb_write, len); 1919 } 1920 1921 uint8_t 1922 elink_cb_path_id(struct bxe_softc *sc) 1923 { 1924 return (SC_PATH(sc)); 1925 } 1926 1927 void 1928 elink_cb_event_log(struct bxe_softc *sc, 1929 const elink_log_id_t elink_log_id, 1930 ...) 1931 { 1932 /* XXX */ 1933 #if 0 1934 //va_list ap; 1935 va_start(ap, elink_log_id); 1936 _XXX_(sc, lm_log_id, ap); 1937 va_end(ap); 1938 #endif 1939 BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id); 1940 } 1941 1942 static int 1943 bxe_set_spio(struct bxe_softc *sc, 1944 int spio, 1945 uint32_t mode) 1946 { 1947 uint32_t spio_reg; 1948 1949 /* Only 2 SPIOs are configurable */ 1950 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { 1951 BLOGE(sc, "Invalid SPIO 0x%x\n", spio); 1952 return (-1); 1953 } 1954 1955 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 1956 1957 /* read SPIO and mask except the float bits */ 1958 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); 1959 1960 switch (mode) { 1961 case MISC_SPIO_OUTPUT_LOW: 1962 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio); 1963 /* clear FLOAT and set CLR */ 1964 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 1965 spio_reg |= (spio << MISC_SPIO_CLR_POS); 1966 break; 1967 1968 case MISC_SPIO_OUTPUT_HIGH: 1969 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio); 1970 /* clear FLOAT and set SET */ 1971 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 1972 spio_reg |= (spio << MISC_SPIO_SET_POS); 1973 break; 1974 1975 case MISC_SPIO_INPUT_HI_Z: 1976 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio); 1977 /* set FLOAT */ 1978 spio_reg |= (spio << MISC_SPIO_FLOAT_POS); 1979 break; 1980 1981 default: 1982 break; 1983 } 1984 1985 REG_WR(sc, MISC_REG_SPIO, spio_reg); 1986 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 1987 1988 return (0); 1989 } 1990 1991 static int 1992 bxe_gpio_read(struct bxe_softc *sc, 1993 int gpio_num, 1994 uint8_t port) 1995 { 1996 /* The GPIO should be swapped if swap register is set and active */ 1997 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 1998 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 1999 int gpio_shift = (gpio_num + 2000 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2001 uint32_t gpio_mask = (1 << gpio_shift); 2002 uint32_t gpio_reg; 2003 2004 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2005 BLOGE(sc, "Invalid GPIO %d\n", gpio_num); 2006 return (-1); 2007 } 2008 2009 /* read GPIO value */ 2010 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 2011 2012 /* get the requested pin value */ 2013 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; 2014 } 2015 2016 static int 2017 bxe_gpio_write(struct bxe_softc *sc, 2018 int gpio_num, 2019 uint32_t mode, 2020 uint8_t port) 2021 { 2022 /* The GPIO should be swapped if swap register is set and active */ 2023 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 2024 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 2025 int gpio_shift = (gpio_num + 2026 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2027 uint32_t gpio_mask = (1 << gpio_shift); 2028 uint32_t gpio_reg; 2029 2030 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2031 BLOGE(sc, "Invalid GPIO %d\n", gpio_num); 2032 return (-1); 2033 } 2034 2035 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2036 2037 /* read GPIO and mask except the float bits */ 2038 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 2039 2040 switch (mode) { 2041 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2042 BLOGD(sc, DBG_PHY, 2043 "Set GPIO %d (shift %d) -> output low\n", 2044 gpio_num, gpio_shift); 2045 /* clear FLOAT and set CLR */ 2046 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2047 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 2048 break; 2049 2050 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2051 BLOGD(sc, DBG_PHY, 2052 "Set GPIO %d (shift %d) -> output high\n", 2053 gpio_num, gpio_shift); 2054 /* clear FLOAT and set SET */ 2055 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2056 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 2057 break; 2058 2059 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2060 BLOGD(sc, DBG_PHY, 2061 "Set GPIO %d (shift %d) -> input\n", 2062 gpio_num, gpio_shift); 2063 /* set FLOAT */ 2064 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2065 break; 2066 2067 default: 2068 break; 2069 } 2070 2071 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 2072 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2073 2074 return (0); 2075 } 2076 2077 static int 2078 bxe_gpio_mult_write(struct bxe_softc *sc, 2079 uint8_t pins, 2080 uint32_t mode) 2081 { 2082 uint32_t gpio_reg; 2083 2084 /* any port swapping should be handled by caller */ 2085 2086 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2087 2088 /* read GPIO and mask except the float bits */ 2089 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 2090 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2091 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 2092 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 2093 2094 switch (mode) { 2095 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2096 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins); 2097 /* set CLR */ 2098 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 2099 break; 2100 2101 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2102 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins); 2103 /* set SET */ 2104 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 2105 break; 2106 2107 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2108 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins); 2109 /* set FLOAT */ 2110 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2111 break; 2112 2113 default: 2114 BLOGE(sc, "Invalid GPIO mode assignment %d\n", mode); 2115 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2116 return (-1); 2117 } 2118 2119 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 2120 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2121 2122 return (0); 2123 } 2124 2125 static int 2126 bxe_gpio_int_write(struct bxe_softc *sc, 2127 int gpio_num, 2128 uint32_t mode, 2129 uint8_t port) 2130 { 2131 /* The GPIO should be swapped if swap register is set and active */ 2132 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 2133 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 2134 int gpio_shift = (gpio_num + 2135 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2136 uint32_t gpio_mask = (1 << gpio_shift); 2137 uint32_t gpio_reg; 2138 2139 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2140 BLOGE(sc, "Invalid GPIO %d\n", gpio_num); 2141 return (-1); 2142 } 2143 2144 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2145 2146 /* read GPIO int */ 2147 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); 2148 2149 switch (mode) { 2150 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 2151 BLOGD(sc, DBG_PHY, 2152 "Clear GPIO INT %d (shift %d) -> output low\n", 2153 gpio_num, gpio_shift); 2154 /* clear SET and set CLR */ 2155 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2156 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2157 break; 2158 2159 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 2160 BLOGD(sc, DBG_PHY, 2161 "Set GPIO INT %d (shift %d) -> output high\n", 2162 gpio_num, gpio_shift); 2163 /* clear CLR and set SET */ 2164 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2165 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2166 break; 2167 2168 default: 2169 break; 2170 } 2171 2172 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); 2173 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2174 2175 return (0); 2176 } 2177 2178 uint32_t 2179 elink_cb_gpio_read(struct bxe_softc *sc, 2180 uint16_t gpio_num, 2181 uint8_t port) 2182 { 2183 return (bxe_gpio_read(sc, gpio_num, port)); 2184 } 2185 2186 uint8_t 2187 elink_cb_gpio_write(struct bxe_softc *sc, 2188 uint16_t gpio_num, 2189 uint8_t mode, /* 0=low 1=high */ 2190 uint8_t port) 2191 { 2192 return (bxe_gpio_write(sc, gpio_num, mode, port)); 2193 } 2194 2195 uint8_t 2196 elink_cb_gpio_mult_write(struct bxe_softc *sc, 2197 uint8_t pins, 2198 uint8_t mode) /* 0=low 1=high */ 2199 { 2200 return (bxe_gpio_mult_write(sc, pins, mode)); 2201 } 2202 2203 uint8_t 2204 elink_cb_gpio_int_write(struct bxe_softc *sc, 2205 uint16_t gpio_num, 2206 uint8_t mode, /* 0=low 1=high */ 2207 uint8_t port) 2208 { 2209 return (bxe_gpio_int_write(sc, gpio_num, mode, port)); 2210 } 2211 2212 void 2213 elink_cb_notify_link_changed(struct bxe_softc *sc) 2214 { 2215 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + 2216 (SC_FUNC(sc) * sizeof(uint32_t))), 1); 2217 } 2218 2219 /* send the MCP a request, block until there is a reply */ 2220 uint32_t 2221 elink_cb_fw_command(struct bxe_softc *sc, 2222 uint32_t command, 2223 uint32_t param) 2224 { 2225 int mb_idx = SC_FW_MB_IDX(sc); 2226 uint32_t seq; 2227 uint32_t rc = 0; 2228 uint32_t cnt = 1; 2229 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; 2230 2231 BXE_FWMB_LOCK(sc); 2232 2233 seq = ++sc->fw_seq; 2234 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); 2235 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); 2236 2237 BLOGD(sc, DBG_PHY, 2238 "wrote command 0x%08x to FW MB param 0x%08x\n", 2239 (command | seq), param); 2240 2241 /* Let the FW do it's magic. GIve it up to 5 seconds... */ 2242 do { 2243 DELAY(delay * 1000); 2244 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); 2245 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2246 2247 BLOGD(sc, DBG_PHY, 2248 "[after %d ms] read 0x%x seq 0x%x from FW MB\n", 2249 cnt*delay, rc, seq); 2250 2251 /* is this a reply to our command? */ 2252 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 2253 rc &= FW_MSG_CODE_MASK; 2254 } else { 2255 /* Ruh-roh! */ 2256 BLOGE(sc, "FW failed to respond!\n"); 2257 // XXX bxe_fw_dump(sc); 2258 rc = 0; 2259 } 2260 2261 BXE_FWMB_UNLOCK(sc); 2262 return (rc); 2263 } 2264 2265 static uint32_t 2266 bxe_fw_command(struct bxe_softc *sc, 2267 uint32_t command, 2268 uint32_t param) 2269 { 2270 return (elink_cb_fw_command(sc, command, param)); 2271 } 2272 2273 static void 2274 __storm_memset_dma_mapping(struct bxe_softc *sc, 2275 uint32_t addr, 2276 bus_addr_t mapping) 2277 { 2278 REG_WR(sc, addr, U64_LO(mapping)); 2279 REG_WR(sc, (addr + 4), U64_HI(mapping)); 2280 } 2281 2282 static void 2283 storm_memset_spq_addr(struct bxe_softc *sc, 2284 bus_addr_t mapping, 2285 uint16_t abs_fid) 2286 { 2287 uint32_t addr = (XSEM_REG_FAST_MEMORY + 2288 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); 2289 __storm_memset_dma_mapping(sc, addr, mapping); 2290 } 2291 2292 static void 2293 storm_memset_vf_to_pf(struct bxe_softc *sc, 2294 uint16_t abs_fid, 2295 uint16_t pf_id) 2296 { 2297 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2298 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2299 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2300 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2301 } 2302 2303 static void 2304 storm_memset_func_en(struct bxe_softc *sc, 2305 uint16_t abs_fid, 2306 uint8_t enable) 2307 { 2308 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2309 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2310 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2311 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2312 } 2313 2314 static void 2315 storm_memset_eq_data(struct bxe_softc *sc, 2316 struct event_ring_data *eq_data, 2317 uint16_t pfid) 2318 { 2319 uint32_t addr; 2320 size_t size; 2321 2322 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); 2323 size = sizeof(struct event_ring_data); 2324 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data); 2325 } 2326 2327 static void 2328 storm_memset_eq_prod(struct bxe_softc *sc, 2329 uint16_t eq_prod, 2330 uint16_t pfid) 2331 { 2332 uint32_t addr = (BAR_CSTRORM_INTMEM + 2333 CSTORM_EVENT_RING_PROD_OFFSET(pfid)); 2334 REG_WR16(sc, addr, eq_prod); 2335 } 2336 2337 /* 2338 * Post a slowpath command. 2339 * 2340 * A slowpath command is used to propogate a configuration change through 2341 * the controller in a controlled manner, allowing each STORM processor and 2342 * other H/W blocks to phase in the change. The commands sent on the 2343 * slowpath are referred to as ramrods. Depending on the ramrod used the 2344 * completion of the ramrod will occur in different ways. Here's a 2345 * breakdown of ramrods and how they complete: 2346 * 2347 * RAMROD_CMD_ID_ETH_PORT_SETUP 2348 * Used to setup the leading connection on a port. Completes on the 2349 * Receive Completion Queue (RCQ) of that port (typically fp[0]). 2350 * 2351 * RAMROD_CMD_ID_ETH_CLIENT_SETUP 2352 * Used to setup an additional connection on a port. Completes on the 2353 * RCQ of the multi-queue/RSS connection being initialized. 2354 * 2355 * RAMROD_CMD_ID_ETH_STAT_QUERY 2356 * Used to force the storm processors to update the statistics database 2357 * in host memory. This ramrod is send on the leading connection CID and 2358 * completes as an index increment of the CSTORM on the default status 2359 * block. 2360 * 2361 * RAMROD_CMD_ID_ETH_UPDATE 2362 * Used to update the state of the leading connection, usually to udpate 2363 * the RSS indirection table. Completes on the RCQ of the leading 2364 * connection. (Not currently used under FreeBSD until OS support becomes 2365 * available.) 2366 * 2367 * RAMROD_CMD_ID_ETH_HALT 2368 * Used when tearing down a connection prior to driver unload. Completes 2369 * on the RCQ of the multi-queue/RSS connection being torn down. Don't 2370 * use this on the leading connection. 2371 * 2372 * RAMROD_CMD_ID_ETH_SET_MAC 2373 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on 2374 * the RCQ of the leading connection. 2375 * 2376 * RAMROD_CMD_ID_ETH_CFC_DEL 2377 * Used when tearing down a conneciton prior to driver unload. Completes 2378 * on the RCQ of the leading connection (since the current connection 2379 * has been completely removed from controller memory). 2380 * 2381 * RAMROD_CMD_ID_ETH_PORT_DEL 2382 * Used to tear down the leading connection prior to driver unload, 2383 * typically fp[0]. Completes as an index increment of the CSTORM on the 2384 * default status block. 2385 * 2386 * RAMROD_CMD_ID_ETH_FORWARD_SETUP 2387 * Used for connection offload. Completes on the RCQ of the multi-queue 2388 * RSS connection that is being offloaded. (Not currently used under 2389 * FreeBSD.) 2390 * 2391 * There can only be one command pending per function. 2392 * 2393 * Returns: 2394 * 0 = Success, !0 = Failure. 2395 */ 2396 2397 /* must be called under the spq lock */ 2398 static inline 2399 struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc) 2400 { 2401 struct eth_spe *next_spe = sc->spq_prod_bd; 2402 2403 if (sc->spq_prod_bd == sc->spq_last_bd) { 2404 /* wrap back to the first eth_spq */ 2405 sc->spq_prod_bd = sc->spq; 2406 sc->spq_prod_idx = 0; 2407 } else { 2408 sc->spq_prod_bd++; 2409 sc->spq_prod_idx++; 2410 } 2411 2412 return (next_spe); 2413 } 2414 2415 /* must be called under the spq lock */ 2416 static inline 2417 void bxe_sp_prod_update(struct bxe_softc *sc) 2418 { 2419 int func = SC_FUNC(sc); 2420 2421 /* 2422 * Make sure that BD data is updated before writing the producer. 2423 * BD data is written to the memory, the producer is read from the 2424 * memory, thus we need a full memory barrier to ensure the ordering. 2425 */ 2426 mb(); 2427 2428 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), 2429 sc->spq_prod_idx); 2430 2431 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 2432 BUS_SPACE_BARRIER_WRITE); 2433 } 2434 2435 /** 2436 * bxe_is_contextless_ramrod - check if the current command ends on EQ 2437 * 2438 * @cmd: command to check 2439 * @cmd_type: command type 2440 */ 2441 static inline 2442 int bxe_is_contextless_ramrod(int cmd, 2443 int cmd_type) 2444 { 2445 if ((cmd_type == NONE_CONNECTION_TYPE) || 2446 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 2447 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 2448 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 2449 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 2450 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 2451 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { 2452 return (TRUE); 2453 } else { 2454 return (FALSE); 2455 } 2456 } 2457 2458 /** 2459 * bxe_sp_post - place a single command on an SP ring 2460 * 2461 * @sc: driver handle 2462 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 2463 * @cid: SW CID the command is related to 2464 * @data_hi: command private data address (high 32 bits) 2465 * @data_lo: command private data address (low 32 bits) 2466 * @cmd_type: command type (e.g. NONE, ETH) 2467 * 2468 * SP data is handled as if it's always an address pair, thus data fields are 2469 * not swapped to little endian in upper functions. Instead this function swaps 2470 * data as if it's two uint32 fields. 2471 */ 2472 int 2473 bxe_sp_post(struct bxe_softc *sc, 2474 int command, 2475 int cid, 2476 uint32_t data_hi, 2477 uint32_t data_lo, 2478 int cmd_type) 2479 { 2480 struct eth_spe *spe; 2481 uint16_t type; 2482 int common; 2483 2484 common = bxe_is_contextless_ramrod(command, cmd_type); 2485 2486 BXE_SP_LOCK(sc); 2487 2488 if (common) { 2489 if (!atomic_load_acq_long(&sc->eq_spq_left)) { 2490 BLOGE(sc, "EQ ring is full!\n"); 2491 BXE_SP_UNLOCK(sc); 2492 return (-1); 2493 } 2494 } else { 2495 if (!atomic_load_acq_long(&sc->cq_spq_left)) { 2496 BLOGE(sc, "SPQ ring is full!\n"); 2497 BXE_SP_UNLOCK(sc); 2498 return (-1); 2499 } 2500 } 2501 2502 spe = bxe_sp_get_next(sc); 2503 2504 /* CID needs port number to be encoded int it */ 2505 spe->hdr.conn_and_cmd_data = 2506 htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid)); 2507 2508 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 2509 2510 /* TBD: Check if it works for VFs */ 2511 type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) & 2512 SPE_HDR_FUNCTION_ID); 2513 2514 spe->hdr.type = htole16(type); 2515 2516 spe->data.update_data_addr.hi = htole32(data_hi); 2517 spe->data.update_data_addr.lo = htole32(data_lo); 2518 2519 /* 2520 * It's ok if the actual decrement is issued towards the memory 2521 * somewhere between the lock and unlock. Thus no more explict 2522 * memory barrier is needed. 2523 */ 2524 if (common) { 2525 atomic_subtract_acq_long(&sc->eq_spq_left, 1); 2526 } else { 2527 atomic_subtract_acq_long(&sc->cq_spq_left, 1); 2528 } 2529 2530 BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr); 2531 BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n", 2532 BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata)); 2533 BLOGD(sc, DBG_SP, 2534 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n", 2535 sc->spq_prod_idx, 2536 (uint32_t)U64_HI(sc->spq_dma.paddr), 2537 (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq), 2538 command, 2539 common, 2540 HW_CID(sc, cid), 2541 data_hi, 2542 data_lo, 2543 type, 2544 atomic_load_acq_long(&sc->cq_spq_left), 2545 atomic_load_acq_long(&sc->eq_spq_left)); 2546 2547 bxe_sp_prod_update(sc); 2548 2549 BXE_SP_UNLOCK(sc); 2550 return (0); 2551 } 2552 2553 /** 2554 * bxe_debug_print_ind_table - prints the indirection table configuration. 2555 * 2556 * @sc: driver hanlde 2557 * @p: pointer to rss configuration 2558 */ 2559 #if 0 2560 static void 2561 bxe_debug_print_ind_table(struct bxe_softc *sc, 2562 struct ecore_config_rss_params *p) 2563 { 2564 int i; 2565 2566 BLOGD(sc, DBG_LOAD, "Setting indirection table to:\n"); 2567 BLOGD(sc, DBG_LOAD, " 0x0000: "); 2568 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { 2569 BLOGD(sc, DBG_LOAD, "0x%02x ", p->ind_table[i]); 2570 2571 /* Print 4 bytes in a line */ 2572 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && 2573 (((i + 1) & 0x3) == 0)) { 2574 BLOGD(sc, DBG_LOAD, "\n"); 2575 BLOGD(sc, DBG_LOAD, "0x%04x: ", i + 1); 2576 } 2577 } 2578 2579 BLOGD(sc, DBG_LOAD, "\n"); 2580 } 2581 #endif 2582 2583 /* 2584 * FreeBSD Device probe function. 2585 * 2586 * Compares the device found to the driver's list of supported devices and 2587 * reports back to the bsd loader whether this is the right driver for the device. 2588 * This is the driver entry function called from the "kldload" command. 2589 * 2590 * Returns: 2591 * BUS_PROBE_DEFAULT on success, positive value on failure. 2592 */ 2593 static int 2594 bxe_probe(device_t dev) 2595 { 2596 struct bxe_softc *sc; 2597 struct bxe_device_type *t; 2598 char *descbuf; 2599 uint16_t did, sdid, svid, vid; 2600 2601 /* Find our device structure */ 2602 sc = device_get_softc(dev); 2603 sc->dev = dev; 2604 t = bxe_devs; 2605 2606 /* Get the data for the device to be probed. */ 2607 vid = pci_get_vendor(dev); 2608 did = pci_get_device(dev); 2609 svid = pci_get_subvendor(dev); 2610 sdid = pci_get_subdevice(dev); 2611 2612 BLOGD(sc, DBG_LOAD, 2613 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, " 2614 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid); 2615 2616 /* Look through the list of known devices for a match. */ 2617 while (t->bxe_name != NULL) { 2618 if ((vid == t->bxe_vid) && (did == t->bxe_did) && 2619 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) && 2620 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) { 2621 descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT); 2622 if (descbuf == NULL) 2623 return (ENOMEM); 2624 2625 /* Print out the device identity. */ 2626 snprintf(descbuf, BXE_DEVDESC_MAX, 2627 "%s (%c%d) BXE v:%s\n", t->bxe_name, 2628 (((pci_read_config(dev, PCIR_REVID, 4) & 2629 0xf0) >> 4) + 'A'), 2630 (pci_read_config(dev, PCIR_REVID, 4) & 0xf), 2631 BXE_DRIVER_VERSION); 2632 2633 device_set_desc_copy(dev, descbuf); 2634 free(descbuf, M_TEMP); 2635 return (BUS_PROBE_DEFAULT); 2636 } 2637 t++; 2638 } 2639 2640 return (ENXIO); 2641 } 2642 2643 static void 2644 bxe_init_mutexes(struct bxe_softc *sc) 2645 { 2646 #ifdef BXE_CORE_LOCK_SX 2647 snprintf(sc->core_sx_name, sizeof(sc->core_sx_name), 2648 "bxe%d_core_lock", sc->unit); 2649 sx_init(&sc->core_sx, sc->core_sx_name); 2650 #else 2651 snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name), 2652 "bxe%d_core_lock", sc->unit); 2653 mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF); 2654 #endif 2655 2656 snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name), 2657 "bxe%d_sp_lock", sc->unit); 2658 mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF); 2659 2660 snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name), 2661 "bxe%d_dmae_lock", sc->unit); 2662 mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF); 2663 2664 snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name), 2665 "bxe%d_phy_lock", sc->unit); 2666 mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF); 2667 2668 snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name), 2669 "bxe%d_fwmb_lock", sc->unit); 2670 mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF); 2671 2672 snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name), 2673 "bxe%d_print_lock", sc->unit); 2674 mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF); 2675 2676 snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name), 2677 "bxe%d_stats_lock", sc->unit); 2678 mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF); 2679 2680 snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name), 2681 "bxe%d_mcast_lock", sc->unit); 2682 mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF); 2683 } 2684 2685 static void 2686 bxe_release_mutexes(struct bxe_softc *sc) 2687 { 2688 #ifdef BXE_CORE_LOCK_SX 2689 sx_destroy(&sc->core_sx); 2690 #else 2691 if (mtx_initialized(&sc->core_mtx)) { 2692 mtx_destroy(&sc->core_mtx); 2693 } 2694 #endif 2695 2696 if (mtx_initialized(&sc->sp_mtx)) { 2697 mtx_destroy(&sc->sp_mtx); 2698 } 2699 2700 if (mtx_initialized(&sc->dmae_mtx)) { 2701 mtx_destroy(&sc->dmae_mtx); 2702 } 2703 2704 if (mtx_initialized(&sc->port.phy_mtx)) { 2705 mtx_destroy(&sc->port.phy_mtx); 2706 } 2707 2708 if (mtx_initialized(&sc->fwmb_mtx)) { 2709 mtx_destroy(&sc->fwmb_mtx); 2710 } 2711 2712 if (mtx_initialized(&sc->print_mtx)) { 2713 mtx_destroy(&sc->print_mtx); 2714 } 2715 2716 if (mtx_initialized(&sc->stats_mtx)) { 2717 mtx_destroy(&sc->stats_mtx); 2718 } 2719 2720 if (mtx_initialized(&sc->mcast_mtx)) { 2721 mtx_destroy(&sc->mcast_mtx); 2722 } 2723 } 2724 2725 static void 2726 bxe_tx_disable(struct bxe_softc* sc) 2727 { 2728 if_t ifp = sc->ifp; 2729 2730 /* tell the stack the driver is stopped and TX queue is full */ 2731 if (ifp != NULL) { 2732 if_setdrvflags(ifp, 0); 2733 } 2734 } 2735 2736 static void 2737 bxe_drv_pulse(struct bxe_softc *sc) 2738 { 2739 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, 2740 sc->fw_drv_pulse_wr_seq); 2741 } 2742 2743 static inline uint16_t 2744 bxe_tx_avail(struct bxe_softc *sc, 2745 struct bxe_fastpath *fp) 2746 { 2747 int16_t used; 2748 uint16_t prod; 2749 uint16_t cons; 2750 2751 prod = fp->tx_bd_prod; 2752 cons = fp->tx_bd_cons; 2753 2754 used = SUB_S16(prod, cons); 2755 2756 #if 0 2757 KASSERT((used < 0), ("used tx bds < 0")); 2758 KASSERT((used > sc->tx_ring_size), ("used tx bds > tx_ring_size")); 2759 KASSERT(((sc->tx_ring_size - used) > MAX_TX_AVAIL), 2760 ("invalid number of tx bds used")); 2761 #endif 2762 2763 return (int16_t)(sc->tx_ring_size) - used; 2764 } 2765 2766 static inline int 2767 bxe_tx_queue_has_work(struct bxe_fastpath *fp) 2768 { 2769 uint16_t hw_cons; 2770 2771 mb(); /* status block fields can change */ 2772 hw_cons = le16toh(*fp->tx_cons_sb); 2773 return (hw_cons != fp->tx_pkt_cons); 2774 } 2775 2776 static inline uint8_t 2777 bxe_has_tx_work(struct bxe_fastpath *fp) 2778 { 2779 /* expand this for multi-cos if ever supported */ 2780 return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE; 2781 } 2782 2783 static inline int 2784 bxe_has_rx_work(struct bxe_fastpath *fp) 2785 { 2786 uint16_t rx_cq_cons_sb; 2787 2788 mb(); /* status block fields can change */ 2789 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); 2790 if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX) 2791 rx_cq_cons_sb++; 2792 return (fp->rx_cq_cons != rx_cq_cons_sb); 2793 } 2794 2795 static void 2796 bxe_sp_event(struct bxe_softc *sc, 2797 struct bxe_fastpath *fp, 2798 union eth_rx_cqe *rr_cqe) 2799 { 2800 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 2801 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 2802 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; 2803 struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj; 2804 2805 BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n", 2806 fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type); 2807 2808 #if 0 2809 /* 2810 * If cid is within VF range, replace the slowpath object with the 2811 * one corresponding to this VF 2812 */ 2813 if ((cid >= BXE_FIRST_VF_CID) && (cid < BXE_FIRST_VF_CID + BXE_VF_CIDS)) { 2814 bxe_iov_set_queue_sp_obj(sc, cid, &q_obj); 2815 } 2816 #endif 2817 2818 switch (command) { 2819 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 2820 BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid); 2821 drv_cmd = ECORE_Q_CMD_UPDATE; 2822 break; 2823 2824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 2825 BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid); 2826 drv_cmd = ECORE_Q_CMD_SETUP; 2827 break; 2828 2829 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 2830 BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); 2831 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; 2832 break; 2833 2834 case (RAMROD_CMD_ID_ETH_HALT): 2835 BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid); 2836 drv_cmd = ECORE_Q_CMD_HALT; 2837 break; 2838 2839 case (RAMROD_CMD_ID_ETH_TERMINATE): 2840 BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid); 2841 drv_cmd = ECORE_Q_CMD_TERMINATE; 2842 break; 2843 2844 case (RAMROD_CMD_ID_ETH_EMPTY): 2845 BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid); 2846 drv_cmd = ECORE_Q_CMD_EMPTY; 2847 break; 2848 2849 default: 2850 BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n", 2851 command, fp->index); 2852 return; 2853 } 2854 2855 if ((drv_cmd != ECORE_Q_CMD_MAX) && 2856 q_obj->complete_cmd(sc, q_obj, drv_cmd)) { 2857 /* 2858 * q_obj->complete_cmd() failure means that this was 2859 * an unexpected completion. 2860 * 2861 * In this case we don't want to increase the sc->spq_left 2862 * because apparently we haven't sent this command the first 2863 * place. 2864 */ 2865 // bxe_panic(sc, ("Unexpected SP completion\n")); 2866 return; 2867 } 2868 2869 #if 0 2870 /* SRIOV: reschedule any 'in_progress' operations */ 2871 bxe_iov_sp_event(sc, cid, TRUE); 2872 #endif 2873 2874 atomic_add_acq_long(&sc->cq_spq_left, 1); 2875 2876 BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n", 2877 atomic_load_acq_long(&sc->cq_spq_left)); 2878 2879 #if 0 2880 if ((drv_cmd == ECORE_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && 2881 (!!bxe_test_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state))) { 2882 /* 2883 * If Queue update ramrod is completed for last Queue in AFEX VIF set 2884 * flow, then ACK MCP at the end. Mark pending ACK to MCP bit to 2885 * prevent case that both bits are cleared. At the end of load/unload 2886 * driver checks that sp_state is cleared and this order prevents 2887 * races. 2888 */ 2889 bxe_set_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, &sc->sp_state); 2890 wmb(); 2891 bxe_clear_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state); 2892 2893 /* schedule the sp task as MCP ack is required */ 2894 bxe_schedule_sp_task(sc); 2895 } 2896 #endif 2897 } 2898 2899 /* 2900 * The current mbuf is part of an aggregation. Move the mbuf into the TPA 2901 * aggregation queue, put an empty mbuf back onto the receive chain, and mark 2902 * the current aggregation queue as in-progress. 2903 */ 2904 static void 2905 bxe_tpa_start(struct bxe_softc *sc, 2906 struct bxe_fastpath *fp, 2907 uint16_t queue, 2908 uint16_t cons, 2909 uint16_t prod, 2910 struct eth_fast_path_rx_cqe *cqe) 2911 { 2912 struct bxe_sw_rx_bd tmp_bd; 2913 struct bxe_sw_rx_bd *rx_buf; 2914 struct eth_rx_bd *rx_bd; 2915 int max_agg_queues; 2916 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; 2917 uint16_t index; 2918 2919 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START " 2920 "cons=%d prod=%d\n", 2921 fp->index, queue, cons, prod); 2922 2923 max_agg_queues = MAX_AGG_QS(sc); 2924 2925 KASSERT((queue < max_agg_queues), 2926 ("fp[%02d] invalid aggr queue (%d >= %d)!", 2927 fp->index, queue, max_agg_queues)); 2928 2929 KASSERT((tpa_info->state == BXE_TPA_STATE_STOP), 2930 ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!", 2931 fp->index, queue)); 2932 2933 /* copy the existing mbuf and mapping from the TPA pool */ 2934 tmp_bd = tpa_info->bd; 2935 2936 if (tmp_bd.m == NULL) { 2937 BLOGE(sc, "fp[%02d].tpa[%02d] mbuf not allocated!\n", 2938 fp->index, queue); 2939 /* XXX Error handling? */ 2940 return; 2941 } 2942 2943 /* change the TPA queue to the start state */ 2944 tpa_info->state = BXE_TPA_STATE_START; 2945 tpa_info->placement_offset = cqe->placement_offset; 2946 tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags); 2947 tpa_info->vlan_tag = le16toh(cqe->vlan_tag); 2948 tpa_info->len_on_bd = le16toh(cqe->len_on_bd); 2949 2950 fp->rx_tpa_queue_used |= (1 << queue); 2951 2952 /* 2953 * If all the buffer descriptors are filled with mbufs then fill in 2954 * the current consumer index with a new BD. Else if a maximum Rx 2955 * buffer limit is imposed then fill in the next producer index. 2956 */ 2957 index = (sc->max_rx_bufs != RX_BD_USABLE) ? 2958 prod : cons; 2959 2960 /* move the received mbuf and mapping to TPA pool */ 2961 tpa_info->bd = fp->rx_mbuf_chain[cons]; 2962 2963 /* release any existing RX BD mbuf mappings */ 2964 if (cons != index) { 2965 rx_buf = &fp->rx_mbuf_chain[cons]; 2966 2967 if (rx_buf->m_map != NULL) { 2968 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 2969 BUS_DMASYNC_POSTREAD); 2970 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 2971 } 2972 2973 /* 2974 * We get here when the maximum number of rx buffers is less than 2975 * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL 2976 * it out here without concern of a memory leak. 2977 */ 2978 fp->rx_mbuf_chain[cons].m = NULL; 2979 } 2980 2981 /* update the Rx SW BD with the mbuf info from the TPA pool */ 2982 fp->rx_mbuf_chain[index] = tmp_bd; 2983 2984 /* update the Rx BD with the empty mbuf phys address from the TPA pool */ 2985 rx_bd = &fp->rx_chain[index]; 2986 rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr)); 2987 rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr)); 2988 } 2989 2990 /* 2991 * When a TPA aggregation is completed, loop through the individual mbufs 2992 * of the aggregation, combining them into a single mbuf which will be sent 2993 * up the stack. Refill all freed SGEs with mbufs as we go along. 2994 */ 2995 static int 2996 bxe_fill_frag_mbuf(struct bxe_softc *sc, 2997 struct bxe_fastpath *fp, 2998 struct bxe_sw_tpa_info *tpa_info, 2999 uint16_t queue, 3000 uint16_t pages, 3001 struct mbuf *m, 3002 struct eth_end_agg_rx_cqe *cqe, 3003 uint16_t cqe_idx) 3004 { 3005 struct mbuf *m_frag; 3006 uint32_t frag_len, frag_size, i; 3007 uint16_t sge_idx; 3008 int rc = 0; 3009 int j; 3010 3011 frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd; 3012 3013 BLOGD(sc, DBG_LRO, 3014 "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n", 3015 fp->index, queue, tpa_info->len_on_bd, frag_size, pages); 3016 3017 /* make sure the aggregated frame is not too big to handle */ 3018 if (pages > 8 * PAGES_PER_SGE) { 3019 BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! " 3020 "pkt_len=%d len_on_bd=%d frag_size=%d\n", 3021 fp->index, cqe_idx, pages, le16toh(cqe->pkt_len), 3022 tpa_info->len_on_bd, frag_size); 3023 bxe_panic(sc, ("sge page count error\n")); 3024 return (EINVAL); 3025 } 3026 3027 /* 3028 * Scan through the scatter gather list pulling individual mbufs into a 3029 * single mbuf for the host stack. 3030 */ 3031 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { 3032 sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j])); 3033 3034 /* 3035 * Firmware gives the indices of the SGE as if the ring is an array 3036 * (meaning that the "next" element will consume 2 indices). 3037 */ 3038 frag_len = min(frag_size, (uint32_t)(SGE_PAGES)); 3039 3040 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d " 3041 "sge_idx=%d frag_size=%d frag_len=%d\n", 3042 fp->index, queue, i, j, sge_idx, frag_size, frag_len); 3043 3044 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; 3045 3046 /* allocate a new mbuf for the SGE */ 3047 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); 3048 if (rc) { 3049 /* Leave all remaining SGEs in the ring! */ 3050 return (rc); 3051 } 3052 3053 /* update the fragment length */ 3054 m_frag->m_len = frag_len; 3055 3056 /* concatenate the fragment to the head mbuf */ 3057 m_cat(m, m_frag); 3058 fp->eth_q_stats.mbuf_alloc_sge--; 3059 3060 /* update the TPA mbuf size and remaining fragment size */ 3061 m->m_pkthdr.len += frag_len; 3062 frag_size -= frag_len; 3063 } 3064 3065 BLOGD(sc, DBG_LRO, 3066 "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n", 3067 fp->index, queue, frag_size); 3068 3069 return (rc); 3070 } 3071 3072 static inline void 3073 bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp) 3074 { 3075 int i, j; 3076 3077 for (i = 1; i <= RX_SGE_NUM_PAGES; i++) { 3078 int idx = RX_SGE_TOTAL_PER_PAGE * i - 1; 3079 3080 for (j = 0; j < 2; j++) { 3081 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); 3082 idx--; 3083 } 3084 } 3085 } 3086 3087 static inline void 3088 bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp) 3089 { 3090 /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */ 3091 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); 3092 3093 /* 3094 * Clear the two last indices in the page to 1. These are the indices that 3095 * correspond to the "next" element, hence will never be indicated and 3096 * should be removed from the calculations. 3097 */ 3098 bxe_clear_sge_mask_next_elems(fp); 3099 } 3100 3101 static inline void 3102 bxe_update_last_max_sge(struct bxe_fastpath *fp, 3103 uint16_t idx) 3104 { 3105 uint16_t last_max = fp->last_max_sge; 3106 3107 if (SUB_S16(idx, last_max) > 0) { 3108 fp->last_max_sge = idx; 3109 } 3110 } 3111 3112 static inline void 3113 bxe_update_sge_prod(struct bxe_softc *sc, 3114 struct bxe_fastpath *fp, 3115 uint16_t sge_len, 3116 union eth_sgl_or_raw_data *cqe) 3117 { 3118 uint16_t last_max, last_elem, first_elem; 3119 uint16_t delta = 0; 3120 uint16_t i; 3121 3122 if (!sge_len) { 3123 return; 3124 } 3125 3126 /* first mark all used pages */ 3127 for (i = 0; i < sge_len; i++) { 3128 BIT_VEC64_CLEAR_BIT(fp->sge_mask, 3129 RX_SGE(le16toh(cqe->sgl[i]))); 3130 } 3131 3132 BLOGD(sc, DBG_LRO, 3133 "fp[%02d] fp_cqe->sgl[%d] = %d\n", 3134 fp->index, sge_len - 1, 3135 le16toh(cqe->sgl[sge_len - 1])); 3136 3137 /* assume that the last SGE index is the biggest */ 3138 bxe_update_last_max_sge(fp, 3139 le16toh(cqe->sgl[sge_len - 1])); 3140 3141 last_max = RX_SGE(fp->last_max_sge); 3142 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; 3143 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; 3144 3145 /* if ring is not full */ 3146 if (last_elem + 1 != first_elem) { 3147 last_elem++; 3148 } 3149 3150 /* now update the prod */ 3151 for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) { 3152 if (__predict_true(fp->sge_mask[i])) { 3153 break; 3154 } 3155 3156 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; 3157 delta += BIT_VEC64_ELEM_SZ; 3158 } 3159 3160 if (delta > 0) { 3161 fp->rx_sge_prod += delta; 3162 /* clear page-end entries */ 3163 bxe_clear_sge_mask_next_elems(fp); 3164 } 3165 3166 BLOGD(sc, DBG_LRO, 3167 "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n", 3168 fp->index, fp->last_max_sge, fp->rx_sge_prod); 3169 } 3170 3171 /* 3172 * The aggregation on the current TPA queue has completed. Pull the individual 3173 * mbuf fragments together into a single mbuf, perform all necessary checksum 3174 * calculations, and send the resuting mbuf to the stack. 3175 */ 3176 static void 3177 bxe_tpa_stop(struct bxe_softc *sc, 3178 struct bxe_fastpath *fp, 3179 struct bxe_sw_tpa_info *tpa_info, 3180 uint16_t queue, 3181 uint16_t pages, 3182 struct eth_end_agg_rx_cqe *cqe, 3183 uint16_t cqe_idx) 3184 { 3185 if_t ifp = sc->ifp; 3186 struct mbuf *m; 3187 int rc = 0; 3188 3189 BLOGD(sc, DBG_LRO, 3190 "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n", 3191 fp->index, queue, tpa_info->placement_offset, 3192 le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag); 3193 3194 m = tpa_info->bd.m; 3195 3196 /* allocate a replacement before modifying existing mbuf */ 3197 rc = bxe_alloc_rx_tpa_mbuf(fp, queue); 3198 if (rc) { 3199 /* drop the frame and log an error */ 3200 fp->eth_q_stats.rx_soft_errors++; 3201 goto bxe_tpa_stop_exit; 3202 } 3203 3204 /* we have a replacement, fixup the current mbuf */ 3205 m_adj(m, tpa_info->placement_offset); 3206 m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd; 3207 3208 /* mark the checksums valid (taken care of by the firmware) */ 3209 fp->eth_q_stats.rx_ofld_frames_csum_ip++; 3210 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; 3211 m->m_pkthdr.csum_data = 0xffff; 3212 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | 3213 CSUM_IP_VALID | 3214 CSUM_DATA_VALID | 3215 CSUM_PSEUDO_HDR); 3216 3217 /* aggregate all of the SGEs into a single mbuf */ 3218 rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx); 3219 if (rc) { 3220 /* drop the packet and log an error */ 3221 fp->eth_q_stats.rx_soft_errors++; 3222 m_freem(m); 3223 } else { 3224 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) { 3225 m->m_pkthdr.ether_vtag = tpa_info->vlan_tag; 3226 m->m_flags |= M_VLANTAG; 3227 } 3228 3229 /* assign packet to this interface interface */ 3230 if_setrcvif(m, ifp); 3231 3232 #if __FreeBSD_version >= 800000 3233 /* specify what RSS queue was used for this flow */ 3234 m->m_pkthdr.flowid = fp->index; 3235 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); 3236 #endif 3237 3238 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 3239 fp->eth_q_stats.rx_tpa_pkts++; 3240 3241 /* pass the frame to the stack */ 3242 if_input(ifp, m); 3243 } 3244 3245 /* we passed an mbuf up the stack or dropped the frame */ 3246 fp->eth_q_stats.mbuf_alloc_tpa--; 3247 3248 bxe_tpa_stop_exit: 3249 3250 fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP; 3251 fp->rx_tpa_queue_used &= ~(1 << queue); 3252 } 3253 3254 static uint8_t 3255 bxe_service_rxsgl( 3256 struct bxe_fastpath *fp, 3257 uint16_t len, 3258 uint16_t lenonbd, 3259 struct mbuf *m, 3260 struct eth_fast_path_rx_cqe *cqe_fp) 3261 { 3262 struct mbuf *m_frag; 3263 uint16_t frags, frag_len; 3264 uint16_t sge_idx = 0; 3265 uint16_t j; 3266 uint8_t i, rc = 0; 3267 uint32_t frag_size; 3268 3269 /* adjust the mbuf */ 3270 m->m_len = lenonbd; 3271 3272 frag_size = len - lenonbd; 3273 frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; 3274 3275 for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) { 3276 sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j])); 3277 3278 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; 3279 frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE)); 3280 m_frag->m_len = frag_len; 3281 3282 /* allocate a new mbuf for the SGE */ 3283 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); 3284 if (rc) { 3285 /* Leave all remaining SGEs in the ring! */ 3286 return (rc); 3287 } 3288 fp->eth_q_stats.mbuf_alloc_sge--; 3289 3290 /* concatenate the fragment to the head mbuf */ 3291 m_cat(m, m_frag); 3292 3293 frag_size -= frag_len; 3294 } 3295 3296 bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data); 3297 3298 return rc; 3299 } 3300 3301 static uint8_t 3302 bxe_rxeof(struct bxe_softc *sc, 3303 struct bxe_fastpath *fp) 3304 { 3305 if_t ifp = sc->ifp; 3306 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 3307 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; 3308 int rx_pkts = 0; 3309 int rc = 0; 3310 3311 BXE_FP_RX_LOCK(fp); 3312 3313 /* CQ "next element" is of the size of the regular element */ 3314 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); 3315 if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) { 3316 hw_cq_cons++; 3317 } 3318 3319 bd_cons = fp->rx_bd_cons; 3320 bd_prod = fp->rx_bd_prod; 3321 bd_prod_fw = bd_prod; 3322 sw_cq_cons = fp->rx_cq_cons; 3323 sw_cq_prod = fp->rx_cq_prod; 3324 3325 /* 3326 * Memory barrier necessary as speculative reads of the rx 3327 * buffer can be ahead of the index in the status block 3328 */ 3329 rmb(); 3330 3331 BLOGD(sc, DBG_RX, 3332 "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n", 3333 fp->index, hw_cq_cons, sw_cq_cons); 3334 3335 while (sw_cq_cons != hw_cq_cons) { 3336 struct bxe_sw_rx_bd *rx_buf = NULL; 3337 union eth_rx_cqe *cqe; 3338 struct eth_fast_path_rx_cqe *cqe_fp; 3339 uint8_t cqe_fp_flags; 3340 enum eth_rx_cqe_type cqe_fp_type; 3341 uint16_t len, lenonbd, pad; 3342 struct mbuf *m = NULL; 3343 3344 comp_ring_cons = RCQ(sw_cq_cons); 3345 bd_prod = RX_BD(bd_prod); 3346 bd_cons = RX_BD(bd_cons); 3347 3348 cqe = &fp->rcq_chain[comp_ring_cons]; 3349 cqe_fp = &cqe->fast_path_cqe; 3350 cqe_fp_flags = cqe_fp->type_error_flags; 3351 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 3352 3353 BLOGD(sc, DBG_RX, 3354 "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d " 3355 "BD prod=%d cons=%d CQE type=0x%x err=0x%x " 3356 "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n", 3357 fp->index, 3358 hw_cq_cons, 3359 sw_cq_cons, 3360 bd_prod, 3361 bd_cons, 3362 CQE_TYPE(cqe_fp_flags), 3363 cqe_fp_flags, 3364 cqe_fp->status_flags, 3365 le32toh(cqe_fp->rss_hash_result), 3366 le16toh(cqe_fp->vlan_tag), 3367 le16toh(cqe_fp->pkt_len_or_gro_seg_len), 3368 le16toh(cqe_fp->len_on_bd)); 3369 3370 /* is this a slowpath msg? */ 3371 if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) { 3372 bxe_sp_event(sc, fp, cqe); 3373 goto next_cqe; 3374 } 3375 3376 rx_buf = &fp->rx_mbuf_chain[bd_cons]; 3377 3378 if (!CQE_TYPE_FAST(cqe_fp_type)) { 3379 struct bxe_sw_tpa_info *tpa_info; 3380 uint16_t frag_size, pages; 3381 uint8_t queue; 3382 3383 #if 0 3384 /* sanity check */ 3385 if (!fp->tpa_enable && 3386 (CQE_TYPE_START(cqe_fp_type) || CQE_TYPE_STOP(cqe_fp_type))) { 3387 BLOGE(sc, "START/STOP packet while !tpa_enable type (0x%x)\n", 3388 CQE_TYPE(cqe_fp_type)); 3389 } 3390 #endif 3391 3392 if (CQE_TYPE_START(cqe_fp_type)) { 3393 bxe_tpa_start(sc, fp, cqe_fp->queue_index, 3394 bd_cons, bd_prod, cqe_fp); 3395 m = NULL; /* packet not ready yet */ 3396 goto next_rx; 3397 } 3398 3399 KASSERT(CQE_TYPE_STOP(cqe_fp_type), 3400 ("CQE type is not STOP! (0x%x)\n", cqe_fp_type)); 3401 3402 queue = cqe->end_agg_cqe.queue_index; 3403 tpa_info = &fp->rx_tpa_info[queue]; 3404 3405 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n", 3406 fp->index, queue); 3407 3408 frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) - 3409 tpa_info->len_on_bd); 3410 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; 3411 3412 bxe_tpa_stop(sc, fp, tpa_info, queue, pages, 3413 &cqe->end_agg_cqe, comp_ring_cons); 3414 3415 bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data); 3416 3417 goto next_cqe; 3418 } 3419 3420 /* non TPA */ 3421 3422 /* is this an error packet? */ 3423 if (__predict_false(cqe_fp_flags & 3424 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { 3425 BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons); 3426 fp->eth_q_stats.rx_soft_errors++; 3427 goto next_rx; 3428 } 3429 3430 len = le16toh(cqe_fp->pkt_len_or_gro_seg_len); 3431 lenonbd = le16toh(cqe_fp->len_on_bd); 3432 pad = cqe_fp->placement_offset; 3433 3434 m = rx_buf->m; 3435 3436 if (__predict_false(m == NULL)) { 3437 BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n", 3438 bd_cons, fp->index); 3439 goto next_rx; 3440 } 3441 3442 /* XXX double copy if packet length under a threshold */ 3443 3444 /* 3445 * If all the buffer descriptors are filled with mbufs then fill in 3446 * the current consumer index with a new BD. Else if a maximum Rx 3447 * buffer limit is imposed then fill in the next producer index. 3448 */ 3449 rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons, 3450 (sc->max_rx_bufs != RX_BD_USABLE) ? 3451 bd_prod : bd_cons); 3452 if (rc != 0) { 3453 3454 /* we simply reuse the received mbuf and don't post it to the stack */ 3455 m = NULL; 3456 3457 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", 3458 fp->index, rc); 3459 fp->eth_q_stats.rx_soft_errors++; 3460 3461 if (sc->max_rx_bufs != RX_BD_USABLE) { 3462 /* copy this consumer index to the producer index */ 3463 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf, 3464 sizeof(struct bxe_sw_rx_bd)); 3465 memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd)); 3466 } 3467 3468 goto next_rx; 3469 } 3470 3471 /* current mbuf was detached from the bd */ 3472 fp->eth_q_stats.mbuf_alloc_rx--; 3473 3474 /* we allocated a replacement mbuf, fixup the current one */ 3475 m_adj(m, pad); 3476 m->m_pkthdr.len = m->m_len = len; 3477 3478 if (len != lenonbd){ 3479 rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp); 3480 if (rc) 3481 break; 3482 fp->eth_q_stats.rx_jumbo_sge_pkts++; 3483 } 3484 3485 /* assign packet to this interface interface */ 3486 if_setrcvif(m, ifp); 3487 3488 /* assume no hardware checksum has complated */ 3489 m->m_pkthdr.csum_flags = 0; 3490 3491 /* validate checksum if offload enabled */ 3492 if (if_getcapenable(ifp) & IFCAP_RXCSUM) { 3493 /* check for a valid IP frame */ 3494 if (!(cqe->fast_path_cqe.status_flags & 3495 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) { 3496 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 3497 if (__predict_false(cqe_fp_flags & 3498 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) { 3499 fp->eth_q_stats.rx_hw_csum_errors++; 3500 } else { 3501 fp->eth_q_stats.rx_ofld_frames_csum_ip++; 3502 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3503 } 3504 } 3505 3506 /* check for a valid TCP/UDP frame */ 3507 if (!(cqe->fast_path_cqe.status_flags & 3508 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) { 3509 if (__predict_false(cqe_fp_flags & 3510 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) { 3511 fp->eth_q_stats.rx_hw_csum_errors++; 3512 } else { 3513 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; 3514 m->m_pkthdr.csum_data = 0xFFFF; 3515 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | 3516 CSUM_PSEUDO_HDR); 3517 } 3518 } 3519 } 3520 3521 /* if there is a VLAN tag then flag that info */ 3522 if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_VLAN) { 3523 m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag; 3524 m->m_flags |= M_VLANTAG; 3525 } 3526 3527 #if __FreeBSD_version >= 800000 3528 /* specify what RSS queue was used for this flow */ 3529 m->m_pkthdr.flowid = fp->index; 3530 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); 3531 #endif 3532 3533 next_rx: 3534 3535 bd_cons = RX_BD_NEXT(bd_cons); 3536 bd_prod = RX_BD_NEXT(bd_prod); 3537 bd_prod_fw = RX_BD_NEXT(bd_prod_fw); 3538 3539 /* pass the frame to the stack */ 3540 if (__predict_true(m != NULL)) { 3541 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 3542 rx_pkts++; 3543 if_input(ifp, m); 3544 } 3545 3546 next_cqe: 3547 3548 sw_cq_prod = RCQ_NEXT(sw_cq_prod); 3549 sw_cq_cons = RCQ_NEXT(sw_cq_cons); 3550 3551 /* limit spinning on the queue */ 3552 if (rc != 0) 3553 break; 3554 3555 if (rx_pkts == sc->rx_budget) { 3556 fp->eth_q_stats.rx_budget_reached++; 3557 break; 3558 } 3559 } /* while work to do */ 3560 3561 fp->rx_bd_cons = bd_cons; 3562 fp->rx_bd_prod = bd_prod_fw; 3563 fp->rx_cq_cons = sw_cq_cons; 3564 fp->rx_cq_prod = sw_cq_prod; 3565 3566 /* Update producers */ 3567 bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod); 3568 3569 fp->eth_q_stats.rx_pkts += rx_pkts; 3570 fp->eth_q_stats.rx_calls++; 3571 3572 BXE_FP_RX_UNLOCK(fp); 3573 3574 return (sw_cq_cons != hw_cq_cons); 3575 } 3576 3577 static uint16_t 3578 bxe_free_tx_pkt(struct bxe_softc *sc, 3579 struct bxe_fastpath *fp, 3580 uint16_t idx) 3581 { 3582 struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx]; 3583 struct eth_tx_start_bd *tx_start_bd; 3584 uint16_t bd_idx = TX_BD(tx_buf->first_bd); 3585 uint16_t new_cons; 3586 int nbd; 3587 3588 /* unmap the mbuf from non-paged memory */ 3589 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 3590 3591 tx_start_bd = &fp->tx_chain[bd_idx].start_bd; 3592 nbd = le16toh(tx_start_bd->nbd) - 1; 3593 3594 #if 0 3595 if ((nbd - 1) > (MAX_MBUF_FRAGS + 2)) { 3596 bxe_panic(sc, ("BAD nbd!\n")); 3597 } 3598 #endif 3599 3600 new_cons = (tx_buf->first_bd + nbd); 3601 3602 #if 0 3603 struct eth_tx_bd *tx_data_bd; 3604 3605 /* 3606 * The following code doesn't do anything but is left here 3607 * for clarity on what the new value of new_cons skipped. 3608 */ 3609 3610 /* get the next bd */ 3611 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3612 3613 /* skip the parse bd */ 3614 --nbd; 3615 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3616 3617 /* skip the TSO split header bd since they have no mapping */ 3618 if (tx_buf->flags & BXE_TSO_SPLIT_BD) { 3619 --nbd; 3620 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3621 } 3622 3623 /* now free frags */ 3624 while (nbd > 0) { 3625 tx_data_bd = &fp->tx_chain[bd_idx].reg_bd; 3626 if (--nbd) { 3627 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3628 } 3629 } 3630 #endif 3631 3632 /* free the mbuf */ 3633 if (__predict_true(tx_buf->m != NULL)) { 3634 m_freem(tx_buf->m); 3635 fp->eth_q_stats.mbuf_alloc_tx--; 3636 } else { 3637 fp->eth_q_stats.tx_chain_lost_mbuf++; 3638 } 3639 3640 tx_buf->m = NULL; 3641 tx_buf->first_bd = 0; 3642 3643 return (new_cons); 3644 } 3645 3646 /* transmit timeout watchdog */ 3647 static int 3648 bxe_watchdog(struct bxe_softc *sc, 3649 struct bxe_fastpath *fp) 3650 { 3651 BXE_FP_TX_LOCK(fp); 3652 3653 if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) { 3654 BXE_FP_TX_UNLOCK(fp); 3655 return (0); 3656 } 3657 3658 BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index); 3659 3660 BXE_FP_TX_UNLOCK(fp); 3661 3662 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT); 3663 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); 3664 3665 return (-1); 3666 } 3667 3668 /* processes transmit completions */ 3669 static uint8_t 3670 bxe_txeof(struct bxe_softc *sc, 3671 struct bxe_fastpath *fp) 3672 { 3673 if_t ifp = sc->ifp; 3674 uint16_t bd_cons, hw_cons, sw_cons, pkt_cons; 3675 uint16_t tx_bd_avail; 3676 3677 BXE_FP_TX_LOCK_ASSERT(fp); 3678 3679 bd_cons = fp->tx_bd_cons; 3680 hw_cons = le16toh(*fp->tx_cons_sb); 3681 sw_cons = fp->tx_pkt_cons; 3682 3683 while (sw_cons != hw_cons) { 3684 pkt_cons = TX_BD(sw_cons); 3685 3686 BLOGD(sc, DBG_TX, 3687 "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n", 3688 fp->index, hw_cons, sw_cons, pkt_cons); 3689 3690 bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons); 3691 3692 sw_cons++; 3693 } 3694 3695 fp->tx_pkt_cons = sw_cons; 3696 fp->tx_bd_cons = bd_cons; 3697 3698 BLOGD(sc, DBG_TX, 3699 "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n", 3700 fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod); 3701 3702 mb(); 3703 3704 tx_bd_avail = bxe_tx_avail(sc, fp); 3705 3706 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 3707 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 3708 } else { 3709 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 3710 } 3711 3712 if (fp->tx_pkt_prod != fp->tx_pkt_cons) { 3713 /* reset the watchdog timer if there are pending transmits */ 3714 fp->watchdog_timer = BXE_TX_TIMEOUT; 3715 return (TRUE); 3716 } else { 3717 /* clear watchdog when there are no pending transmits */ 3718 fp->watchdog_timer = 0; 3719 return (FALSE); 3720 } 3721 } 3722 3723 static void 3724 bxe_drain_tx_queues(struct bxe_softc *sc) 3725 { 3726 struct bxe_fastpath *fp; 3727 int i, count; 3728 3729 /* wait until all TX fastpath tasks have completed */ 3730 for (i = 0; i < sc->num_queues; i++) { 3731 fp = &sc->fp[i]; 3732 3733 count = 1000; 3734 3735 while (bxe_has_tx_work(fp)) { 3736 3737 BXE_FP_TX_LOCK(fp); 3738 bxe_txeof(sc, fp); 3739 BXE_FP_TX_UNLOCK(fp); 3740 3741 if (count == 0) { 3742 BLOGE(sc, "Timeout waiting for fp[%d] " 3743 "transmits to complete!\n", i); 3744 bxe_panic(sc, ("tx drain failure\n")); 3745 return; 3746 } 3747 3748 count--; 3749 DELAY(1000); 3750 rmb(); 3751 } 3752 } 3753 3754 return; 3755 } 3756 3757 static int 3758 bxe_del_all_macs(struct bxe_softc *sc, 3759 struct ecore_vlan_mac_obj *mac_obj, 3760 int mac_type, 3761 uint8_t wait_for_comp) 3762 { 3763 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 3764 int rc; 3765 3766 /* wait for completion of requested */ 3767 if (wait_for_comp) { 3768 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3769 } 3770 3771 /* Set the mac type of addresses we want to clear */ 3772 bxe_set_bit(mac_type, &vlan_mac_flags); 3773 3774 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); 3775 if (rc < 0) { 3776 BLOGE(sc, "Failed to delete MACs (%d)\n", rc); 3777 } 3778 3779 return (rc); 3780 } 3781 3782 static int 3783 bxe_fill_accept_flags(struct bxe_softc *sc, 3784 uint32_t rx_mode, 3785 unsigned long *rx_accept_flags, 3786 unsigned long *tx_accept_flags) 3787 { 3788 /* Clear the flags first */ 3789 *rx_accept_flags = 0; 3790 *tx_accept_flags = 0; 3791 3792 switch (rx_mode) { 3793 case BXE_RX_MODE_NONE: 3794 /* 3795 * 'drop all' supersedes any accept flags that may have been 3796 * passed to the function. 3797 */ 3798 break; 3799 3800 case BXE_RX_MODE_NORMAL: 3801 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3802 bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags); 3803 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3804 3805 /* internal switching mode */ 3806 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3807 bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags); 3808 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3809 3810 break; 3811 3812 case BXE_RX_MODE_ALLMULTI: 3813 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3814 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 3815 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3816 3817 /* internal switching mode */ 3818 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3819 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 3820 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3821 3822 break; 3823 3824 case BXE_RX_MODE_PROMISC: 3825 /* 3826 * According to deffinition of SI mode, iface in promisc mode 3827 * should receive matched and unmatched (in resolution of port) 3828 * unicast packets. 3829 */ 3830 bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); 3831 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3832 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 3833 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3834 3835 /* internal switching mode */ 3836 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 3837 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3838 3839 if (IS_MF_SI(sc)) { 3840 bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags); 3841 } else { 3842 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3843 } 3844 3845 break; 3846 3847 default: 3848 BLOGE(sc, "Unknown rx_mode (%d)\n", rx_mode); 3849 return (-1); 3850 } 3851 3852 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ 3853 if (rx_mode != BXE_RX_MODE_NONE) { 3854 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); 3855 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); 3856 } 3857 3858 return (0); 3859 } 3860 3861 static int 3862 bxe_set_q_rx_mode(struct bxe_softc *sc, 3863 uint8_t cl_id, 3864 unsigned long rx_mode_flags, 3865 unsigned long rx_accept_flags, 3866 unsigned long tx_accept_flags, 3867 unsigned long ramrod_flags) 3868 { 3869 struct ecore_rx_mode_ramrod_params ramrod_param; 3870 int rc; 3871 3872 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3873 3874 /* Prepare ramrod parameters */ 3875 ramrod_param.cid = 0; 3876 ramrod_param.cl_id = cl_id; 3877 ramrod_param.rx_mode_obj = &sc->rx_mode_obj; 3878 ramrod_param.func_id = SC_FUNC(sc); 3879 3880 ramrod_param.pstate = &sc->sp_state; 3881 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; 3882 3883 ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata); 3884 ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata); 3885 3886 bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 3887 3888 ramrod_param.ramrod_flags = ramrod_flags; 3889 ramrod_param.rx_mode_flags = rx_mode_flags; 3890 3891 ramrod_param.rx_accept_flags = rx_accept_flags; 3892 ramrod_param.tx_accept_flags = tx_accept_flags; 3893 3894 rc = ecore_config_rx_mode(sc, &ramrod_param); 3895 if (rc < 0) { 3896 BLOGE(sc, "Set rx_mode %d failed\n", sc->rx_mode); 3897 return (rc); 3898 } 3899 3900 return (0); 3901 } 3902 3903 static int 3904 bxe_set_storm_rx_mode(struct bxe_softc *sc) 3905 { 3906 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 3907 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 3908 int rc; 3909 3910 rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, 3911 &tx_accept_flags); 3912 if (rc) { 3913 return (rc); 3914 } 3915 3916 bxe_set_bit(RAMROD_RX, &ramrod_flags); 3917 bxe_set_bit(RAMROD_TX, &ramrod_flags); 3918 3919 /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */ 3920 return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, 3921 rx_accept_flags, tx_accept_flags, 3922 ramrod_flags)); 3923 } 3924 3925 /* returns the "mcp load_code" according to global load_count array */ 3926 static int 3927 bxe_nic_load_no_mcp(struct bxe_softc *sc) 3928 { 3929 int path = SC_PATH(sc); 3930 int port = SC_PORT(sc); 3931 3932 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", 3933 path, load_count[path][0], load_count[path][1], 3934 load_count[path][2]); 3935 load_count[path][0]++; 3936 load_count[path][1 + port]++; 3937 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", 3938 path, load_count[path][0], load_count[path][1], 3939 load_count[path][2]); 3940 if (load_count[path][0] == 1) { 3941 return (FW_MSG_CODE_DRV_LOAD_COMMON); 3942 } else if (load_count[path][1 + port] == 1) { 3943 return (FW_MSG_CODE_DRV_LOAD_PORT); 3944 } else { 3945 return (FW_MSG_CODE_DRV_LOAD_FUNCTION); 3946 } 3947 } 3948 3949 /* returns the "mcp load_code" according to global load_count array */ 3950 static int 3951 bxe_nic_unload_no_mcp(struct bxe_softc *sc) 3952 { 3953 int port = SC_PORT(sc); 3954 int path = SC_PATH(sc); 3955 3956 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", 3957 path, load_count[path][0], load_count[path][1], 3958 load_count[path][2]); 3959 load_count[path][0]--; 3960 load_count[path][1 + port]--; 3961 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", 3962 path, load_count[path][0], load_count[path][1], 3963 load_count[path][2]); 3964 if (load_count[path][0] == 0) { 3965 return (FW_MSG_CODE_DRV_UNLOAD_COMMON); 3966 } else if (load_count[path][1 + port] == 0) { 3967 return (FW_MSG_CODE_DRV_UNLOAD_PORT); 3968 } else { 3969 return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION); 3970 } 3971 } 3972 3973 /* request unload mode from the MCP: COMMON, PORT or FUNCTION */ 3974 static uint32_t 3975 bxe_send_unload_req(struct bxe_softc *sc, 3976 int unload_mode) 3977 { 3978 uint32_t reset_code = 0; 3979 #if 0 3980 int port = SC_PORT(sc); 3981 int path = SC_PATH(sc); 3982 #endif 3983 3984 /* Select the UNLOAD request mode */ 3985 if (unload_mode == UNLOAD_NORMAL) { 3986 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 3987 } 3988 #if 0 3989 else if (sc->flags & BXE_NO_WOL_FLAG) { 3990 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 3991 } else if (sc->wol) { 3992 uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 3993 uint8_t *mac_addr = sc->dev->dev_addr; 3994 uint32_t val; 3995 uint16_t pmc; 3996 3997 /* 3998 * The mac address is written to entries 1-4 to 3999 * preserve entry 0 which is used by the PMF 4000 */ 4001 uint8_t entry = (SC_VN(sc) + 1)*8; 4002 4003 val = (mac_addr[0] << 8) | mac_addr[1]; 4004 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry, val); 4005 4006 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 4007 (mac_addr[4] << 8) | mac_addr[5]; 4008 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 4009 4010 /* Enable the PME and clear the status */ 4011 pmc = pci_read_config(sc->dev, 4012 (sc->devinfo.pcie_pm_cap_reg + 4013 PCIR_POWER_STATUS), 4014 2); 4015 pmc |= PCIM_PSTAT_PMEENABLE | PCIM_PSTAT_PME; 4016 pci_write_config(sc->dev, 4017 (sc->devinfo.pcie_pm_cap_reg + 4018 PCIR_POWER_STATUS), 4019 pmc, 4); 4020 4021 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 4022 } 4023 #endif 4024 else { 4025 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 4026 } 4027 4028 /* Send the request to the MCP */ 4029 if (!BXE_NOMCP(sc)) { 4030 reset_code = bxe_fw_command(sc, reset_code, 0); 4031 } else { 4032 reset_code = bxe_nic_unload_no_mcp(sc); 4033 } 4034 4035 return (reset_code); 4036 } 4037 4038 /* send UNLOAD_DONE command to the MCP */ 4039 static void 4040 bxe_send_unload_done(struct bxe_softc *sc, 4041 uint8_t keep_link) 4042 { 4043 uint32_t reset_param = 4044 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; 4045 4046 /* Report UNLOAD_DONE to MCP */ 4047 if (!BXE_NOMCP(sc)) { 4048 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); 4049 } 4050 } 4051 4052 static int 4053 bxe_func_wait_started(struct bxe_softc *sc) 4054 { 4055 int tout = 50; 4056 4057 if (!sc->port.pmf) { 4058 return (0); 4059 } 4060 4061 /* 4062 * (assumption: No Attention from MCP at this stage) 4063 * PMF probably in the middle of TX disable/enable transaction 4064 * 1. Sync IRS for default SB 4065 * 2. Sync SP queue - this guarantees us that attention handling started 4066 * 3. Wait, that TX disable/enable transaction completes 4067 * 4068 * 1+2 guarantee that if DCBX attention was scheduled it already changed 4069 * pending bit of transaction from STARTED-->TX_STOPPED, if we already 4070 * received completion for the transaction the state is TX_STOPPED. 4071 * State will return to STARTED after completion of TX_STOPPED-->STARTED 4072 * transaction. 4073 */ 4074 4075 /* XXX make sure default SB ISR is done */ 4076 /* need a way to synchronize an irq (intr_mtx?) */ 4077 4078 /* XXX flush any work queues */ 4079 4080 while (ecore_func_get_state(sc, &sc->func_obj) != 4081 ECORE_F_STATE_STARTED && tout--) { 4082 DELAY(20000); 4083 } 4084 4085 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { 4086 /* 4087 * Failed to complete the transaction in a "good way" 4088 * Force both transactions with CLR bit. 4089 */ 4090 struct ecore_func_state_params func_params = { NULL }; 4091 4092 BLOGE(sc, "Unexpected function state! " 4093 "Forcing STARTED-->TX_STOPPED-->STARTED\n"); 4094 4095 func_params.f_obj = &sc->func_obj; 4096 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 4097 4098 /* STARTED-->TX_STOPPED */ 4099 func_params.cmd = ECORE_F_CMD_TX_STOP; 4100 ecore_func_state_change(sc, &func_params); 4101 4102 /* TX_STOPPED-->STARTED */ 4103 func_params.cmd = ECORE_F_CMD_TX_START; 4104 return (ecore_func_state_change(sc, &func_params)); 4105 } 4106 4107 return (0); 4108 } 4109 4110 static int 4111 bxe_stop_queue(struct bxe_softc *sc, 4112 int index) 4113 { 4114 struct bxe_fastpath *fp = &sc->fp[index]; 4115 struct ecore_queue_state_params q_params = { NULL }; 4116 int rc; 4117 4118 BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index); 4119 4120 q_params.q_obj = &sc->sp_objs[fp->index].q_obj; 4121 /* We want to wait for completion in this context */ 4122 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 4123 4124 /* Stop the primary connection: */ 4125 4126 /* ...halt the connection */ 4127 q_params.cmd = ECORE_Q_CMD_HALT; 4128 rc = ecore_queue_state_change(sc, &q_params); 4129 if (rc) { 4130 return (rc); 4131 } 4132 4133 /* ...terminate the connection */ 4134 q_params.cmd = ECORE_Q_CMD_TERMINATE; 4135 memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate)); 4136 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 4137 rc = ecore_queue_state_change(sc, &q_params); 4138 if (rc) { 4139 return (rc); 4140 } 4141 4142 /* ...delete cfc entry */ 4143 q_params.cmd = ECORE_Q_CMD_CFC_DEL; 4144 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); 4145 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 4146 return (ecore_queue_state_change(sc, &q_params)); 4147 } 4148 4149 /* wait for the outstanding SP commands */ 4150 static inline uint8_t 4151 bxe_wait_sp_comp(struct bxe_softc *sc, 4152 unsigned long mask) 4153 { 4154 unsigned long tmp; 4155 int tout = 5000; /* wait for 5 secs tops */ 4156 4157 while (tout--) { 4158 mb(); 4159 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { 4160 return (TRUE); 4161 } 4162 4163 DELAY(1000); 4164 } 4165 4166 mb(); 4167 4168 tmp = atomic_load_acq_long(&sc->sp_state); 4169 if (tmp & mask) { 4170 BLOGE(sc, "Filtering completion timed out: " 4171 "sp_state 0x%lx, mask 0x%lx\n", 4172 tmp, mask); 4173 return (FALSE); 4174 } 4175 4176 return (FALSE); 4177 } 4178 4179 static int 4180 bxe_func_stop(struct bxe_softc *sc) 4181 { 4182 struct ecore_func_state_params func_params = { NULL }; 4183 int rc; 4184 4185 /* prepare parameters for function state transitions */ 4186 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 4187 func_params.f_obj = &sc->func_obj; 4188 func_params.cmd = ECORE_F_CMD_STOP; 4189 4190 /* 4191 * Try to stop the function the 'good way'. If it fails (in case 4192 * of a parity error during bxe_chip_cleanup()) and we are 4193 * not in a debug mode, perform a state transaction in order to 4194 * enable further HW_RESET transaction. 4195 */ 4196 rc = ecore_func_state_change(sc, &func_params); 4197 if (rc) { 4198 BLOGE(sc, "FUNC_STOP ramrod failed. " 4199 "Running a dry transaction\n"); 4200 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 4201 return (ecore_func_state_change(sc, &func_params)); 4202 } 4203 4204 return (0); 4205 } 4206 4207 static int 4208 bxe_reset_hw(struct bxe_softc *sc, 4209 uint32_t load_code) 4210 { 4211 struct ecore_func_state_params func_params = { NULL }; 4212 4213 /* Prepare parameters for function state transitions */ 4214 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 4215 4216 func_params.f_obj = &sc->func_obj; 4217 func_params.cmd = ECORE_F_CMD_HW_RESET; 4218 4219 func_params.params.hw_init.load_phase = load_code; 4220 4221 return (ecore_func_state_change(sc, &func_params)); 4222 } 4223 4224 static void 4225 bxe_int_disable_sync(struct bxe_softc *sc, 4226 int disable_hw) 4227 { 4228 if (disable_hw) { 4229 /* prevent the HW from sending interrupts */ 4230 bxe_int_disable(sc); 4231 } 4232 4233 /* XXX need a way to synchronize ALL irqs (intr_mtx?) */ 4234 /* make sure all ISRs are done */ 4235 4236 /* XXX make sure sp_task is not running */ 4237 /* cancel and flush work queues */ 4238 } 4239 4240 static void 4241 bxe_chip_cleanup(struct bxe_softc *sc, 4242 uint32_t unload_mode, 4243 uint8_t keep_link) 4244 { 4245 int port = SC_PORT(sc); 4246 struct ecore_mcast_ramrod_params rparam = { NULL }; 4247 uint32_t reset_code; 4248 int i, rc = 0; 4249 4250 bxe_drain_tx_queues(sc); 4251 4252 /* give HW time to discard old tx messages */ 4253 DELAY(1000); 4254 4255 /* Clean all ETH MACs */ 4256 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE); 4257 if (rc < 0) { 4258 BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc); 4259 } 4260 4261 /* Clean up UC list */ 4262 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE); 4263 if (rc < 0) { 4264 BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc); 4265 } 4266 4267 /* Disable LLH */ 4268 if (!CHIP_IS_E1(sc)) { 4269 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); 4270 } 4271 4272 /* Set "drop all" to stop Rx */ 4273 4274 /* 4275 * We need to take the BXE_MCAST_LOCK() here in order to prevent 4276 * a race between the completion code and this code. 4277 */ 4278 BXE_MCAST_LOCK(sc); 4279 4280 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 4281 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 4282 } else { 4283 bxe_set_storm_rx_mode(sc); 4284 } 4285 4286 /* Clean up multicast configuration */ 4287 rparam.mcast_obj = &sc->mcast_obj; 4288 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 4289 if (rc < 0) { 4290 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); 4291 } 4292 4293 BXE_MCAST_UNLOCK(sc); 4294 4295 // XXX bxe_iov_chip_cleanup(sc); 4296 4297 /* 4298 * Send the UNLOAD_REQUEST to the MCP. This will return if 4299 * this function should perform FUNCTION, PORT, or COMMON HW 4300 * reset. 4301 */ 4302 reset_code = bxe_send_unload_req(sc, unload_mode); 4303 4304 /* 4305 * (assumption: No Attention from MCP at this stage) 4306 * PMF probably in the middle of TX disable/enable transaction 4307 */ 4308 rc = bxe_func_wait_started(sc); 4309 if (rc) { 4310 BLOGE(sc, "bxe_func_wait_started failed\n"); 4311 } 4312 4313 /* 4314 * Close multi and leading connections 4315 * Completions for ramrods are collected in a synchronous way 4316 */ 4317 for (i = 0; i < sc->num_queues; i++) { 4318 if (bxe_stop_queue(sc, i)) { 4319 goto unload_error; 4320 } 4321 } 4322 4323 /* 4324 * If SP settings didn't get completed so far - something 4325 * very wrong has happen. 4326 */ 4327 if (!bxe_wait_sp_comp(sc, ~0x0UL)) { 4328 BLOGE(sc, "Common slow path ramrods got stuck!\n"); 4329 } 4330 4331 unload_error: 4332 4333 rc = bxe_func_stop(sc); 4334 if (rc) { 4335 BLOGE(sc, "Function stop failed!\n"); 4336 } 4337 4338 /* disable HW interrupts */ 4339 bxe_int_disable_sync(sc, TRUE); 4340 4341 /* detach interrupts */ 4342 bxe_interrupt_detach(sc); 4343 4344 /* Reset the chip */ 4345 rc = bxe_reset_hw(sc, reset_code); 4346 if (rc) { 4347 BLOGE(sc, "Hardware reset failed\n"); 4348 } 4349 4350 /* Report UNLOAD_DONE to MCP */ 4351 bxe_send_unload_done(sc, keep_link); 4352 } 4353 4354 static void 4355 bxe_disable_close_the_gate(struct bxe_softc *sc) 4356 { 4357 uint32_t val; 4358 int port = SC_PORT(sc); 4359 4360 BLOGD(sc, DBG_LOAD, 4361 "Disabling 'close the gates'\n"); 4362 4363 if (CHIP_IS_E1(sc)) { 4364 uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4365 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4366 val = REG_RD(sc, addr); 4367 val &= ~(0x300); 4368 REG_WR(sc, addr, val); 4369 } else { 4370 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); 4371 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 4372 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 4373 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); 4374 } 4375 } 4376 4377 /* 4378 * Cleans the object that have internal lists without sending 4379 * ramrods. Should be run when interrutps are disabled. 4380 */ 4381 static void 4382 bxe_squeeze_objects(struct bxe_softc *sc) 4383 { 4384 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 4385 struct ecore_mcast_ramrod_params rparam = { NULL }; 4386 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 4387 int rc; 4388 4389 /* Cleanup MACs' object first... */ 4390 4391 /* Wait for completion of requested */ 4392 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 4393 /* Perform a dry cleanup */ 4394 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); 4395 4396 /* Clean ETH primary MAC */ 4397 bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags); 4398 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, 4399 &ramrod_flags); 4400 if (rc != 0) { 4401 BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc); 4402 } 4403 4404 /* Cleanup UC list */ 4405 vlan_mac_flags = 0; 4406 bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags); 4407 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, 4408 &ramrod_flags); 4409 if (rc != 0) { 4410 BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc); 4411 } 4412 4413 /* Now clean mcast object... */ 4414 4415 rparam.mcast_obj = &sc->mcast_obj; 4416 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); 4417 4418 /* Add a DEL command... */ 4419 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 4420 if (rc < 0) { 4421 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); 4422 } 4423 4424 /* now wait until all pending commands are cleared */ 4425 4426 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4427 while (rc != 0) { 4428 if (rc < 0) { 4429 BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc); 4430 return; 4431 } 4432 4433 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4434 } 4435 } 4436 4437 /* stop the controller */ 4438 static __noinline int 4439 bxe_nic_unload(struct bxe_softc *sc, 4440 uint32_t unload_mode, 4441 uint8_t keep_link) 4442 { 4443 uint8_t global = FALSE; 4444 uint32_t val; 4445 4446 BXE_CORE_LOCK_ASSERT(sc); 4447 4448 BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n"); 4449 4450 /* mark driver as unloaded in shmem2 */ 4451 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 4452 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 4453 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 4454 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 4455 } 4456 4457 if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE && 4458 (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) { 4459 /* 4460 * We can get here if the driver has been unloaded 4461 * during parity error recovery and is either waiting for a 4462 * leader to complete or for other functions to unload and 4463 * then ifconfig down has been issued. In this case we want to 4464 * unload and let other functions to complete a recovery 4465 * process. 4466 */ 4467 sc->recovery_state = BXE_RECOVERY_DONE; 4468 sc->is_leader = 0; 4469 bxe_release_leader_lock(sc); 4470 mb(); 4471 4472 BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n"); 4473 BLOGE(sc, "Can't unload in closed or error state\n"); 4474 return (-1); 4475 } 4476 4477 /* 4478 * Nothing to do during unload if previous bxe_nic_load() 4479 * did not completed succesfully - all resourses are released. 4480 */ 4481 if ((sc->state == BXE_STATE_CLOSED) || 4482 (sc->state == BXE_STATE_ERROR)) { 4483 return (0); 4484 } 4485 4486 sc->state = BXE_STATE_CLOSING_WAITING_HALT; 4487 mb(); 4488 4489 /* stop tx */ 4490 bxe_tx_disable(sc); 4491 4492 sc->rx_mode = BXE_RX_MODE_NONE; 4493 /* XXX set rx mode ??? */ 4494 4495 if (IS_PF(sc)) { 4496 /* set ALWAYS_ALIVE bit in shmem */ 4497 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 4498 4499 bxe_drv_pulse(sc); 4500 4501 bxe_stats_handle(sc, STATS_EVENT_STOP); 4502 bxe_save_statistics(sc); 4503 } 4504 4505 /* wait till consumers catch up with producers in all queues */ 4506 bxe_drain_tx_queues(sc); 4507 4508 /* if VF indicate to PF this function is going down (PF will delete sp 4509 * elements and clear initializations 4510 */ 4511 if (IS_VF(sc)) { 4512 ; /* bxe_vfpf_close_vf(sc); */ 4513 } else if (unload_mode != UNLOAD_RECOVERY) { 4514 /* if this is a normal/close unload need to clean up chip */ 4515 bxe_chip_cleanup(sc, unload_mode, keep_link); 4516 } else { 4517 /* Send the UNLOAD_REQUEST to the MCP */ 4518 bxe_send_unload_req(sc, unload_mode); 4519 4520 /* 4521 * Prevent transactions to host from the functions on the 4522 * engine that doesn't reset global blocks in case of global 4523 * attention once gloabl blocks are reset and gates are opened 4524 * (the engine which leader will perform the recovery 4525 * last). 4526 */ 4527 if (!CHIP_IS_E1x(sc)) { 4528 bxe_pf_disable(sc); 4529 } 4530 4531 /* disable HW interrupts */ 4532 bxe_int_disable_sync(sc, TRUE); 4533 4534 /* detach interrupts */ 4535 bxe_interrupt_detach(sc); 4536 4537 /* Report UNLOAD_DONE to MCP */ 4538 bxe_send_unload_done(sc, FALSE); 4539 } 4540 4541 /* 4542 * At this stage no more interrupts will arrive so we may safely clean 4543 * the queue'able objects here in case they failed to get cleaned so far. 4544 */ 4545 if (IS_PF(sc)) { 4546 bxe_squeeze_objects(sc); 4547 } 4548 4549 /* There should be no more pending SP commands at this stage */ 4550 sc->sp_state = 0; 4551 4552 sc->port.pmf = 0; 4553 4554 bxe_free_fp_buffers(sc); 4555 4556 if (IS_PF(sc)) { 4557 bxe_free_mem(sc); 4558 } 4559 4560 bxe_free_fw_stats_mem(sc); 4561 4562 sc->state = BXE_STATE_CLOSED; 4563 4564 /* 4565 * Check if there are pending parity attentions. If there are - set 4566 * RECOVERY_IN_PROGRESS. 4567 */ 4568 if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) { 4569 bxe_set_reset_in_progress(sc); 4570 4571 /* Set RESET_IS_GLOBAL if needed */ 4572 if (global) { 4573 bxe_set_reset_global(sc); 4574 } 4575 } 4576 4577 /* 4578 * The last driver must disable a "close the gate" if there is no 4579 * parity attention or "process kill" pending. 4580 */ 4581 if (IS_PF(sc) && !bxe_clear_pf_load(sc) && 4582 bxe_reset_is_done(sc, SC_PATH(sc))) { 4583 bxe_disable_close_the_gate(sc); 4584 } 4585 4586 BLOGD(sc, DBG_LOAD, "Ended NIC unload\n"); 4587 4588 return (0); 4589 } 4590 4591 /* 4592 * Called by the OS to set various media options (i.e. link, speed, etc.) when 4593 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...". 4594 */ 4595 static int 4596 bxe_ifmedia_update(struct ifnet *ifp) 4597 { 4598 struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp); 4599 struct ifmedia *ifm; 4600 4601 ifm = &sc->ifmedia; 4602 4603 /* We only support Ethernet media type. */ 4604 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 4605 return (EINVAL); 4606 } 4607 4608 switch (IFM_SUBTYPE(ifm->ifm_media)) { 4609 case IFM_AUTO: 4610 break; 4611 case IFM_10G_CX4: 4612 case IFM_10G_SR: 4613 case IFM_10G_T: 4614 case IFM_10G_TWINAX: 4615 default: 4616 /* We don't support changing the media type. */ 4617 BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n", 4618 IFM_SUBTYPE(ifm->ifm_media)); 4619 return (EINVAL); 4620 } 4621 4622 return (0); 4623 } 4624 4625 /* 4626 * Called by the OS to get the current media status (i.e. link, speed, etc.). 4627 */ 4628 static void 4629 bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) 4630 { 4631 struct bxe_softc *sc = if_getsoftc(ifp); 4632 4633 /* Report link down if the driver isn't running. */ 4634 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 4635 ifmr->ifm_active |= IFM_NONE; 4636 return; 4637 } 4638 4639 /* Setup the default interface info. */ 4640 ifmr->ifm_status = IFM_AVALID; 4641 ifmr->ifm_active = IFM_ETHER; 4642 4643 if (sc->link_vars.link_up) { 4644 ifmr->ifm_status |= IFM_ACTIVE; 4645 } else { 4646 ifmr->ifm_active |= IFM_NONE; 4647 return; 4648 } 4649 4650 ifmr->ifm_active |= sc->media; 4651 4652 if (sc->link_vars.duplex == DUPLEX_FULL) { 4653 ifmr->ifm_active |= IFM_FDX; 4654 } else { 4655 ifmr->ifm_active |= IFM_HDX; 4656 } 4657 } 4658 4659 static int 4660 bxe_ioctl_nvram(struct bxe_softc *sc, 4661 uint32_t priv_op, 4662 struct ifreq *ifr) 4663 { 4664 struct bxe_nvram_data nvdata_base; 4665 struct bxe_nvram_data *nvdata; 4666 int len; 4667 int error = 0; 4668 4669 copyin(ifr->ifr_data, &nvdata_base, sizeof(nvdata_base)); 4670 4671 len = (sizeof(struct bxe_nvram_data) + 4672 nvdata_base.len - 4673 sizeof(uint32_t)); 4674 4675 if (len > sizeof(struct bxe_nvram_data)) { 4676 if ((nvdata = (struct bxe_nvram_data *) 4677 malloc(len, M_DEVBUF, 4678 (M_NOWAIT | M_ZERO))) == NULL) { 4679 BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed\n"); 4680 return (1); 4681 } 4682 memcpy(nvdata, &nvdata_base, sizeof(struct bxe_nvram_data)); 4683 } else { 4684 nvdata = &nvdata_base; 4685 } 4686 4687 if (priv_op == BXE_IOC_RD_NVRAM) { 4688 BLOGD(sc, DBG_IOCTL, "IOC_RD_NVRAM 0x%x %d\n", 4689 nvdata->offset, nvdata->len); 4690 error = bxe_nvram_read(sc, 4691 nvdata->offset, 4692 (uint8_t *)nvdata->value, 4693 nvdata->len); 4694 copyout(nvdata, ifr->ifr_data, len); 4695 } else { /* BXE_IOC_WR_NVRAM */ 4696 BLOGD(sc, DBG_IOCTL, "IOC_WR_NVRAM 0x%x %d\n", 4697 nvdata->offset, nvdata->len); 4698 copyin(ifr->ifr_data, nvdata, len); 4699 error = bxe_nvram_write(sc, 4700 nvdata->offset, 4701 (uint8_t *)nvdata->value, 4702 nvdata->len); 4703 } 4704 4705 if (len > sizeof(struct bxe_nvram_data)) { 4706 free(nvdata, M_DEVBUF); 4707 } 4708 4709 return (error); 4710 } 4711 4712 static int 4713 bxe_ioctl_stats_show(struct bxe_softc *sc, 4714 uint32_t priv_op, 4715 struct ifreq *ifr) 4716 { 4717 const size_t str_size = (BXE_NUM_ETH_STATS * STAT_NAME_LEN); 4718 const size_t stats_size = (BXE_NUM_ETH_STATS * sizeof(uint64_t)); 4719 caddr_t p_tmp; 4720 uint32_t *offset; 4721 int i; 4722 4723 switch (priv_op) 4724 { 4725 case BXE_IOC_STATS_SHOW_NUM: 4726 memset(ifr->ifr_data, 0, sizeof(union bxe_stats_show_data)); 4727 ((union bxe_stats_show_data *)ifr->ifr_data)->desc.num = 4728 BXE_NUM_ETH_STATS; 4729 ((union bxe_stats_show_data *)ifr->ifr_data)->desc.len = 4730 STAT_NAME_LEN; 4731 return (0); 4732 4733 case BXE_IOC_STATS_SHOW_STR: 4734 memset(ifr->ifr_data, 0, str_size); 4735 p_tmp = ifr->ifr_data; 4736 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 4737 strcpy(p_tmp, bxe_eth_stats_arr[i].string); 4738 p_tmp += STAT_NAME_LEN; 4739 } 4740 return (0); 4741 4742 case BXE_IOC_STATS_SHOW_CNT: 4743 memset(ifr->ifr_data, 0, stats_size); 4744 p_tmp = ifr->ifr_data; 4745 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 4746 offset = ((uint32_t *)&sc->eth_stats + 4747 bxe_eth_stats_arr[i].offset); 4748 switch (bxe_eth_stats_arr[i].size) { 4749 case 4: 4750 *((uint64_t *)p_tmp) = (uint64_t)*offset; 4751 break; 4752 case 8: 4753 *((uint64_t *)p_tmp) = HILO_U64(*offset, *(offset + 1)); 4754 break; 4755 default: 4756 *((uint64_t *)p_tmp) = 0; 4757 } 4758 p_tmp += sizeof(uint64_t); 4759 } 4760 return (0); 4761 4762 default: 4763 return (-1); 4764 } 4765 } 4766 4767 static void 4768 bxe_handle_chip_tq(void *context, 4769 int pending) 4770 { 4771 struct bxe_softc *sc = (struct bxe_softc *)context; 4772 long work = atomic_load_acq_long(&sc->chip_tq_flags); 4773 4774 switch (work) 4775 { 4776 4777 case CHIP_TQ_REINIT: 4778 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { 4779 /* restart the interface */ 4780 BLOGD(sc, DBG_LOAD, "Restarting the interface...\n"); 4781 bxe_periodic_stop(sc); 4782 BXE_CORE_LOCK(sc); 4783 bxe_stop_locked(sc); 4784 bxe_init_locked(sc); 4785 BXE_CORE_UNLOCK(sc); 4786 } 4787 break; 4788 4789 default: 4790 break; 4791 } 4792 } 4793 4794 /* 4795 * Handles any IOCTL calls from the operating system. 4796 * 4797 * Returns: 4798 * 0 = Success, >0 Failure 4799 */ 4800 static int 4801 bxe_ioctl(if_t ifp, 4802 u_long command, 4803 caddr_t data) 4804 { 4805 struct bxe_softc *sc = if_getsoftc(ifp); 4806 struct ifreq *ifr = (struct ifreq *)data; 4807 struct bxe_nvram_data *nvdata; 4808 uint32_t priv_op; 4809 int mask = 0; 4810 int reinit = 0; 4811 int error = 0; 4812 4813 int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN); 4814 int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING); 4815 4816 switch (command) 4817 { 4818 case SIOCSIFMTU: 4819 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n", 4820 ifr->ifr_mtu); 4821 4822 if (sc->mtu == ifr->ifr_mtu) { 4823 /* nothing to change */ 4824 break; 4825 } 4826 4827 if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) { 4828 BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n", 4829 ifr->ifr_mtu, mtu_min, mtu_max); 4830 error = EINVAL; 4831 break; 4832 } 4833 4834 atomic_store_rel_int((volatile unsigned int *)&sc->mtu, 4835 (unsigned long)ifr->ifr_mtu); 4836 /* 4837 atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp), 4838 (unsigned long)ifr->ifr_mtu); 4839 XXX - Not sure why it needs to be atomic 4840 */ 4841 if_setmtu(ifp, ifr->ifr_mtu); 4842 reinit = 1; 4843 break; 4844 4845 case SIOCSIFFLAGS: 4846 /* toggle the interface state up or down */ 4847 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n"); 4848 4849 BXE_CORE_LOCK(sc); 4850 /* check if the interface is up */ 4851 if (if_getflags(ifp) & IFF_UP) { 4852 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4853 /* set the receive mode flags */ 4854 bxe_set_rx_mode(sc); 4855 } else { 4856 bxe_init_locked(sc); 4857 } 4858 } else { 4859 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4860 bxe_periodic_stop(sc); 4861 bxe_stop_locked(sc); 4862 } 4863 } 4864 BXE_CORE_UNLOCK(sc); 4865 4866 break; 4867 4868 case SIOCADDMULTI: 4869 case SIOCDELMULTI: 4870 /* add/delete multicast addresses */ 4871 BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n"); 4872 4873 /* check if the interface is up */ 4874 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4875 /* set the receive mode flags */ 4876 BXE_CORE_LOCK(sc); 4877 bxe_set_rx_mode(sc); 4878 BXE_CORE_UNLOCK(sc); 4879 } 4880 4881 break; 4882 4883 case SIOCSIFCAP: 4884 /* find out which capabilities have changed */ 4885 mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp)); 4886 4887 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n", 4888 mask); 4889 4890 /* toggle the LRO capabilites enable flag */ 4891 if (mask & IFCAP_LRO) { 4892 if_togglecapenable(ifp, IFCAP_LRO); 4893 BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n", 4894 (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF"); 4895 reinit = 1; 4896 } 4897 4898 /* toggle the TXCSUM checksum capabilites enable flag */ 4899 if (mask & IFCAP_TXCSUM) { 4900 if_togglecapenable(ifp, IFCAP_TXCSUM); 4901 BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n", 4902 (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF"); 4903 if (if_getcapenable(ifp) & IFCAP_TXCSUM) { 4904 if_sethwassistbits(ifp, (CSUM_IP | 4905 CSUM_TCP | 4906 CSUM_UDP | 4907 CSUM_TSO | 4908 CSUM_TCP_IPV6 | 4909 CSUM_UDP_IPV6), 0); 4910 } else { 4911 if_clearhwassist(ifp); /* XXX */ 4912 } 4913 } 4914 4915 /* toggle the RXCSUM checksum capabilities enable flag */ 4916 if (mask & IFCAP_RXCSUM) { 4917 if_togglecapenable(ifp, IFCAP_RXCSUM); 4918 BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n", 4919 (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF"); 4920 if (if_getcapenable(ifp) & IFCAP_RXCSUM) { 4921 if_sethwassistbits(ifp, (CSUM_IP | 4922 CSUM_TCP | 4923 CSUM_UDP | 4924 CSUM_TSO | 4925 CSUM_TCP_IPV6 | 4926 CSUM_UDP_IPV6), 0); 4927 } else { 4928 if_clearhwassist(ifp); /* XXX */ 4929 } 4930 } 4931 4932 /* toggle TSO4 capabilities enabled flag */ 4933 if (mask & IFCAP_TSO4) { 4934 if_togglecapenable(ifp, IFCAP_TSO4); 4935 BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n", 4936 (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF"); 4937 } 4938 4939 /* toggle TSO6 capabilities enabled flag */ 4940 if (mask & IFCAP_TSO6) { 4941 if_togglecapenable(ifp, IFCAP_TSO6); 4942 BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n", 4943 (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF"); 4944 } 4945 4946 /* toggle VLAN_HWTSO capabilities enabled flag */ 4947 if (mask & IFCAP_VLAN_HWTSO) { 4948 4949 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 4950 BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n", 4951 (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF"); 4952 } 4953 4954 /* toggle VLAN_HWCSUM capabilities enabled flag */ 4955 if (mask & IFCAP_VLAN_HWCSUM) { 4956 /* XXX investigate this... */ 4957 BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n"); 4958 error = EINVAL; 4959 } 4960 4961 /* toggle VLAN_MTU capabilities enable flag */ 4962 if (mask & IFCAP_VLAN_MTU) { 4963 /* XXX investigate this... */ 4964 BLOGE(sc, "Changing VLAN_MTU is not supported!\n"); 4965 error = EINVAL; 4966 } 4967 4968 /* toggle VLAN_HWTAGGING capabilities enabled flag */ 4969 if (mask & IFCAP_VLAN_HWTAGGING) { 4970 /* XXX investigate this... */ 4971 BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n"); 4972 error = EINVAL; 4973 } 4974 4975 /* toggle VLAN_HWFILTER capabilities enabled flag */ 4976 if (mask & IFCAP_VLAN_HWFILTER) { 4977 /* XXX investigate this... */ 4978 BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n"); 4979 error = EINVAL; 4980 } 4981 4982 /* XXX not yet... 4983 * IFCAP_WOL_MAGIC 4984 */ 4985 4986 break; 4987 4988 case SIOCSIFMEDIA: 4989 case SIOCGIFMEDIA: 4990 /* set/get interface media */ 4991 BLOGD(sc, DBG_IOCTL, 4992 "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n", 4993 (command & 0xff)); 4994 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 4995 break; 4996 4997 case SIOCGPRIVATE_0: 4998 copyin(ifr->ifr_data, &priv_op, sizeof(priv_op)); 4999 5000 switch (priv_op) 5001 { 5002 case BXE_IOC_RD_NVRAM: 5003 case BXE_IOC_WR_NVRAM: 5004 nvdata = (struct bxe_nvram_data *)ifr->ifr_data; 5005 BLOGD(sc, DBG_IOCTL, 5006 "Received Private NVRAM ioctl addr=0x%x size=%u\n", 5007 nvdata->offset, nvdata->len); 5008 error = bxe_ioctl_nvram(sc, priv_op, ifr); 5009 break; 5010 5011 case BXE_IOC_STATS_SHOW_NUM: 5012 case BXE_IOC_STATS_SHOW_STR: 5013 case BXE_IOC_STATS_SHOW_CNT: 5014 BLOGD(sc, DBG_IOCTL, "Received Private Stats ioctl (%d)\n", 5015 priv_op); 5016 error = bxe_ioctl_stats_show(sc, priv_op, ifr); 5017 break; 5018 5019 default: 5020 BLOGW(sc, "Received Private Unknown ioctl (%d)\n", priv_op); 5021 error = EINVAL; 5022 break; 5023 } 5024 5025 break; 5026 5027 default: 5028 BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n", 5029 (command & 0xff)); 5030 error = ether_ioctl(ifp, command, data); 5031 break; 5032 } 5033 5034 if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { 5035 BLOGD(sc, DBG_LOAD | DBG_IOCTL, 5036 "Re-initializing hardware from IOCTL change\n"); 5037 bxe_periodic_stop(sc); 5038 BXE_CORE_LOCK(sc); 5039 bxe_stop_locked(sc); 5040 bxe_init_locked(sc); 5041 BXE_CORE_UNLOCK(sc); 5042 } 5043 5044 return (error); 5045 } 5046 5047 static __noinline void 5048 bxe_dump_mbuf(struct bxe_softc *sc, 5049 struct mbuf *m, 5050 uint8_t contents) 5051 { 5052 char * type; 5053 int i = 0; 5054 5055 if (!(sc->debug & DBG_MBUF)) { 5056 return; 5057 } 5058 5059 if (m == NULL) { 5060 BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n"); 5061 return; 5062 } 5063 5064 while (m) { 5065 BLOGD(sc, DBG_MBUF, 5066 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", 5067 i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data); 5068 5069 if (m->m_flags & M_PKTHDR) { 5070 BLOGD(sc, DBG_MBUF, 5071 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", 5072 i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS, 5073 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 5074 } 5075 5076 if (m->m_flags & M_EXT) { 5077 switch (m->m_ext.ext_type) { 5078 case EXT_CLUSTER: type = "EXT_CLUSTER"; break; 5079 case EXT_SFBUF: type = "EXT_SFBUF"; break; 5080 case EXT_JUMBOP: type = "EXT_JUMBOP"; break; 5081 case EXT_JUMBO9: type = "EXT_JUMBO9"; break; 5082 case EXT_JUMBO16: type = "EXT_JUMBO16"; break; 5083 case EXT_PACKET: type = "EXT_PACKET"; break; 5084 case EXT_MBUF: type = "EXT_MBUF"; break; 5085 case EXT_NET_DRV: type = "EXT_NET_DRV"; break; 5086 case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break; 5087 case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break; 5088 case EXT_EXTREF: type = "EXT_EXTREF"; break; 5089 default: type = "UNKNOWN"; break; 5090 } 5091 5092 BLOGD(sc, DBG_MBUF, 5093 "%02d: - m_ext: %p ext_size=%d type=%s\n", 5094 i, m->m_ext.ext_buf, m->m_ext.ext_size, type); 5095 } 5096 5097 if (contents) { 5098 bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE); 5099 } 5100 5101 m = m->m_next; 5102 i++; 5103 } 5104 } 5105 5106 /* 5107 * Checks to ensure the 13 bd sliding window is >= MSS for TSO. 5108 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS. 5109 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD 5110 * The headers comes in a seperate bd in FreeBSD so 13-3=10. 5111 * Returns: 0 if OK to send, 1 if packet needs further defragmentation 5112 */ 5113 static int 5114 bxe_chktso_window(struct bxe_softc *sc, 5115 int nsegs, 5116 bus_dma_segment_t *segs, 5117 struct mbuf *m) 5118 { 5119 uint32_t num_wnds, wnd_size, wnd_sum; 5120 int32_t frag_idx, wnd_idx; 5121 unsigned short lso_mss; 5122 int defrag; 5123 5124 defrag = 0; 5125 wnd_sum = 0; 5126 wnd_size = 10; 5127 num_wnds = nsegs - wnd_size; 5128 lso_mss = htole16(m->m_pkthdr.tso_segsz); 5129 5130 /* 5131 * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the 5132 * first window sum of data while skipping the first assuming it is the 5133 * header in FreeBSD. 5134 */ 5135 for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) { 5136 wnd_sum += htole16(segs[frag_idx].ds_len); 5137 } 5138 5139 /* check the first 10 bd window size */ 5140 if (wnd_sum < lso_mss) { 5141 return (1); 5142 } 5143 5144 /* run through the windows */ 5145 for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) { 5146 /* subtract the first mbuf->m_len of the last wndw(-header) */ 5147 wnd_sum -= htole16(segs[wnd_idx+1].ds_len); 5148 /* add the next mbuf len to the len of our new window */ 5149 wnd_sum += htole16(segs[frag_idx].ds_len); 5150 if (wnd_sum < lso_mss) { 5151 return (1); 5152 } 5153 } 5154 5155 return (0); 5156 } 5157 5158 static uint8_t 5159 bxe_set_pbd_csum_e2(struct bxe_fastpath *fp, 5160 struct mbuf *m, 5161 uint32_t *parsing_data) 5162 { 5163 struct ether_vlan_header *eh = NULL; 5164 struct ip *ip4 = NULL; 5165 struct ip6_hdr *ip6 = NULL; 5166 caddr_t ip = NULL; 5167 struct tcphdr *th = NULL; 5168 int e_hlen, ip_hlen, l4_off; 5169 uint16_t proto; 5170 5171 if (m->m_pkthdr.csum_flags == CSUM_IP) { 5172 /* no L4 checksum offload needed */ 5173 return (0); 5174 } 5175 5176 /* get the Ethernet header */ 5177 eh = mtod(m, struct ether_vlan_header *); 5178 5179 /* handle VLAN encapsulation if present */ 5180 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 5181 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 5182 proto = ntohs(eh->evl_proto); 5183 } else { 5184 e_hlen = ETHER_HDR_LEN; 5185 proto = ntohs(eh->evl_encap_proto); 5186 } 5187 5188 switch (proto) { 5189 case ETHERTYPE_IP: 5190 /* get the IP header, if mbuf len < 20 then header in next mbuf */ 5191 ip4 = (m->m_len < sizeof(struct ip)) ? 5192 (struct ip *)m->m_next->m_data : 5193 (struct ip *)(m->m_data + e_hlen); 5194 /* ip_hl is number of 32-bit words */ 5195 ip_hlen = (ip4->ip_hl << 2); 5196 ip = (caddr_t)ip4; 5197 break; 5198 case ETHERTYPE_IPV6: 5199 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ 5200 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? 5201 (struct ip6_hdr *)m->m_next->m_data : 5202 (struct ip6_hdr *)(m->m_data + e_hlen); 5203 /* XXX cannot support offload with IPv6 extensions */ 5204 ip_hlen = sizeof(struct ip6_hdr); 5205 ip = (caddr_t)ip6; 5206 break; 5207 default: 5208 /* We can't offload in this case... */ 5209 /* XXX error stat ??? */ 5210 return (0); 5211 } 5212 5213 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ 5214 l4_off = (e_hlen + ip_hlen); 5215 5216 *parsing_data |= 5217 (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & 5218 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W); 5219 5220 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5221 CSUM_TSO | 5222 CSUM_TCP_IPV6)) { 5223 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; 5224 th = (struct tcphdr *)(ip + ip_hlen); 5225 /* th_off is number of 32-bit words */ 5226 *parsing_data |= ((th->th_off << 5227 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & 5228 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW); 5229 return (l4_off + (th->th_off << 2)); /* entire header length */ 5230 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5231 CSUM_UDP_IPV6)) { 5232 fp->eth_q_stats.tx_ofld_frames_csum_udp++; 5233 return (l4_off + sizeof(struct udphdr)); /* entire header length */ 5234 } else { 5235 /* XXX error stat ??? */ 5236 return (0); 5237 } 5238 } 5239 5240 static uint8_t 5241 bxe_set_pbd_csum(struct bxe_fastpath *fp, 5242 struct mbuf *m, 5243 struct eth_tx_parse_bd_e1x *pbd) 5244 { 5245 struct ether_vlan_header *eh = NULL; 5246 struct ip *ip4 = NULL; 5247 struct ip6_hdr *ip6 = NULL; 5248 caddr_t ip = NULL; 5249 struct tcphdr *th = NULL; 5250 struct udphdr *uh = NULL; 5251 int e_hlen, ip_hlen; 5252 uint16_t proto; 5253 uint8_t hlen; 5254 uint16_t tmp_csum; 5255 uint32_t *tmp_uh; 5256 5257 /* get the Ethernet header */ 5258 eh = mtod(m, struct ether_vlan_header *); 5259 5260 /* handle VLAN encapsulation if present */ 5261 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 5262 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 5263 proto = ntohs(eh->evl_proto); 5264 } else { 5265 e_hlen = ETHER_HDR_LEN; 5266 proto = ntohs(eh->evl_encap_proto); 5267 } 5268 5269 switch (proto) { 5270 case ETHERTYPE_IP: 5271 /* get the IP header, if mbuf len < 20 then header in next mbuf */ 5272 ip4 = (m->m_len < sizeof(struct ip)) ? 5273 (struct ip *)m->m_next->m_data : 5274 (struct ip *)(m->m_data + e_hlen); 5275 /* ip_hl is number of 32-bit words */ 5276 ip_hlen = (ip4->ip_hl << 1); 5277 ip = (caddr_t)ip4; 5278 break; 5279 case ETHERTYPE_IPV6: 5280 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ 5281 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? 5282 (struct ip6_hdr *)m->m_next->m_data : 5283 (struct ip6_hdr *)(m->m_data + e_hlen); 5284 /* XXX cannot support offload with IPv6 extensions */ 5285 ip_hlen = (sizeof(struct ip6_hdr) >> 1); 5286 ip = (caddr_t)ip6; 5287 break; 5288 default: 5289 /* We can't offload in this case... */ 5290 /* XXX error stat ??? */ 5291 return (0); 5292 } 5293 5294 hlen = (e_hlen >> 1); 5295 5296 /* note that rest of global_data is indirectly zeroed here */ 5297 if (m->m_flags & M_VLANTAG) { 5298 pbd->global_data = 5299 htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); 5300 } else { 5301 pbd->global_data = htole16(hlen); 5302 } 5303 5304 pbd->ip_hlen_w = ip_hlen; 5305 5306 hlen += pbd->ip_hlen_w; 5307 5308 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ 5309 5310 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5311 CSUM_TSO | 5312 CSUM_TCP_IPV6)) { 5313 th = (struct tcphdr *)(ip + (ip_hlen << 1)); 5314 /* th_off is number of 32-bit words */ 5315 hlen += (uint16_t)(th->th_off << 1); 5316 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5317 CSUM_UDP_IPV6)) { 5318 uh = (struct udphdr *)(ip + (ip_hlen << 1)); 5319 hlen += (sizeof(struct udphdr) / 2); 5320 } else { 5321 /* valid case as only CSUM_IP was set */ 5322 return (0); 5323 } 5324 5325 pbd->total_hlen_w = htole16(hlen); 5326 5327 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5328 CSUM_TSO | 5329 CSUM_TCP_IPV6)) { 5330 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; 5331 pbd->tcp_pseudo_csum = ntohs(th->th_sum); 5332 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5333 CSUM_UDP_IPV6)) { 5334 fp->eth_q_stats.tx_ofld_frames_csum_udp++; 5335 5336 /* 5337 * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP 5338 * checksums and does not know anything about the UDP header and where 5339 * the checksum field is located. It only knows about TCP. Therefore 5340 * we "lie" to the hardware for outgoing UDP packets w/ checksum 5341 * offload. Since the checksum field offset for TCP is 16 bytes and 5342 * for UDP it is 6 bytes we pass a pointer to the hardware that is 10 5343 * bytes less than the start of the UDP header. This allows the 5344 * hardware to write the checksum in the correct spot. But the 5345 * hardware will compute a checksum which includes the last 10 bytes 5346 * of the IP header. To correct this we tweak the stack computed 5347 * pseudo checksum by folding in the calculation of the inverse 5348 * checksum for those final 10 bytes of the IP header. This allows 5349 * the correct checksum to be computed by the hardware. 5350 */ 5351 5352 /* set pointer 10 bytes before UDP header */ 5353 tmp_uh = (uint32_t *)((uint8_t *)uh - 10); 5354 5355 /* calculate a pseudo header checksum over the first 10 bytes */ 5356 tmp_csum = in_pseudo(*tmp_uh, 5357 *(tmp_uh + 1), 5358 *(uint16_t *)(tmp_uh + 2)); 5359 5360 pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum)); 5361 } 5362 5363 return (hlen * 2); /* entire header length, number of bytes */ 5364 } 5365 5366 static void 5367 bxe_set_pbd_lso_e2(struct mbuf *m, 5368 uint32_t *parsing_data) 5369 { 5370 *parsing_data |= ((m->m_pkthdr.tso_segsz << 5371 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & 5372 ETH_TX_PARSE_BD_E2_LSO_MSS); 5373 5374 /* XXX test for IPv6 with extension header... */ 5375 #if 0 5376 struct ip6_hdr *ip6; 5377 if (ip6 && ip6->ip6_nxt == 'some ipv6 extension header') 5378 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; 5379 #endif 5380 } 5381 5382 static void 5383 bxe_set_pbd_lso(struct mbuf *m, 5384 struct eth_tx_parse_bd_e1x *pbd) 5385 { 5386 struct ether_vlan_header *eh = NULL; 5387 struct ip *ip = NULL; 5388 struct tcphdr *th = NULL; 5389 int e_hlen; 5390 5391 /* get the Ethernet header */ 5392 eh = mtod(m, struct ether_vlan_header *); 5393 5394 /* handle VLAN encapsulation if present */ 5395 e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ? 5396 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN; 5397 5398 /* get the IP and TCP header, with LSO entire header in first mbuf */ 5399 /* XXX assuming IPv4 */ 5400 ip = (struct ip *)(m->m_data + e_hlen); 5401 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 5402 5403 pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz); 5404 pbd->tcp_send_seq = ntohl(th->th_seq); 5405 pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff); 5406 5407 #if 1 5408 /* XXX IPv4 */ 5409 pbd->ip_id = ntohs(ip->ip_id); 5410 pbd->tcp_pseudo_csum = 5411 ntohs(in_pseudo(ip->ip_src.s_addr, 5412 ip->ip_dst.s_addr, 5413 htons(IPPROTO_TCP))); 5414 #else 5415 /* XXX IPv6 */ 5416 pbd->tcp_pseudo_csum = 5417 ntohs(in_pseudo(&ip6->ip6_src, 5418 &ip6->ip6_dst, 5419 htons(IPPROTO_TCP))); 5420 #endif 5421 5422 pbd->global_data |= 5423 htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); 5424 } 5425 5426 /* 5427 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory 5428 * visible to the controller. 5429 * 5430 * If an mbuf is submitted to this routine and cannot be given to the 5431 * controller (e.g. it has too many fragments) then the function may free 5432 * the mbuf and return to the caller. 5433 * 5434 * Returns: 5435 * 0 = Success, !0 = Failure 5436 * Note the side effect that an mbuf may be freed if it causes a problem. 5437 */ 5438 static int 5439 bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head) 5440 { 5441 bus_dma_segment_t segs[32]; 5442 struct mbuf *m0; 5443 struct bxe_sw_tx_bd *tx_buf; 5444 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 5445 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 5446 /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */ 5447 struct eth_tx_bd *tx_data_bd; 5448 struct eth_tx_bd *tx_total_pkt_size_bd; 5449 struct eth_tx_start_bd *tx_start_bd; 5450 uint16_t bd_prod, pkt_prod, total_pkt_size; 5451 uint8_t mac_type; 5452 int defragged, error, nsegs, rc, nbds, vlan_off, ovlan; 5453 struct bxe_softc *sc; 5454 uint16_t tx_bd_avail; 5455 struct ether_vlan_header *eh; 5456 uint32_t pbd_e2_parsing_data = 0; 5457 uint8_t hlen = 0; 5458 int tmp_bd; 5459 int i; 5460 5461 sc = fp->sc; 5462 5463 M_ASSERTPKTHDR(*m_head); 5464 5465 m0 = *m_head; 5466 rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0; 5467 tx_start_bd = NULL; 5468 tx_data_bd = NULL; 5469 tx_total_pkt_size_bd = NULL; 5470 5471 /* get the H/W pointer for packets and BDs */ 5472 pkt_prod = fp->tx_pkt_prod; 5473 bd_prod = fp->tx_bd_prod; 5474 5475 mac_type = UNICAST_ADDRESS; 5476 5477 /* map the mbuf into the next open DMAable memory */ 5478 tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)]; 5479 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5480 tx_buf->m_map, m0, 5481 segs, &nsegs, BUS_DMA_NOWAIT); 5482 5483 /* mapping errors */ 5484 if(__predict_false(error != 0)) { 5485 fp->eth_q_stats.tx_dma_mapping_failure++; 5486 if (error == ENOMEM) { 5487 /* resource issue, try again later */ 5488 rc = ENOMEM; 5489 } else if (error == EFBIG) { 5490 /* possibly recoverable with defragmentation */ 5491 fp->eth_q_stats.mbuf_defrag_attempts++; 5492 m0 = m_defrag(*m_head, M_NOWAIT); 5493 if (m0 == NULL) { 5494 fp->eth_q_stats.mbuf_defrag_failures++; 5495 rc = ENOBUFS; 5496 } else { 5497 /* defrag successful, try mapping again */ 5498 *m_head = m0; 5499 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5500 tx_buf->m_map, m0, 5501 segs, &nsegs, BUS_DMA_NOWAIT); 5502 if (error) { 5503 fp->eth_q_stats.tx_dma_mapping_failure++; 5504 rc = error; 5505 } 5506 } 5507 } else { 5508 /* unknown, unrecoverable mapping error */ 5509 BLOGE(sc, "Unknown TX mapping error rc=%d\n", error); 5510 bxe_dump_mbuf(sc, m0, FALSE); 5511 rc = error; 5512 } 5513 5514 goto bxe_tx_encap_continue; 5515 } 5516 5517 tx_bd_avail = bxe_tx_avail(sc, fp); 5518 5519 /* make sure there is enough room in the send queue */ 5520 if (__predict_false(tx_bd_avail < (nsegs + 2))) { 5521 /* Recoverable, try again later. */ 5522 fp->eth_q_stats.tx_hw_queue_full++; 5523 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5524 rc = ENOMEM; 5525 goto bxe_tx_encap_continue; 5526 } 5527 5528 /* capture the current H/W TX chain high watermark */ 5529 if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth < 5530 (TX_BD_USABLE - tx_bd_avail))) { 5531 fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail); 5532 } 5533 5534 /* make sure it fits in the packet window */ 5535 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { 5536 /* 5537 * The mbuf may be to big for the controller to handle. If the frame 5538 * is a TSO frame we'll need to do an additional check. 5539 */ 5540 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5541 if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) { 5542 goto bxe_tx_encap_continue; /* OK to send */ 5543 } else { 5544 fp->eth_q_stats.tx_window_violation_tso++; 5545 } 5546 } else { 5547 fp->eth_q_stats.tx_window_violation_std++; 5548 } 5549 5550 /* lets try to defragment this mbuf and remap it */ 5551 fp->eth_q_stats.mbuf_defrag_attempts++; 5552 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5553 5554 m0 = m_defrag(*m_head, M_NOWAIT); 5555 if (m0 == NULL) { 5556 fp->eth_q_stats.mbuf_defrag_failures++; 5557 /* Ugh, just drop the frame... :( */ 5558 rc = ENOBUFS; 5559 } else { 5560 /* defrag successful, try mapping again */ 5561 *m_head = m0; 5562 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5563 tx_buf->m_map, m0, 5564 segs, &nsegs, BUS_DMA_NOWAIT); 5565 if (error) { 5566 fp->eth_q_stats.tx_dma_mapping_failure++; 5567 /* No sense in trying to defrag/copy chain, drop it. :( */ 5568 rc = error; 5569 } 5570 else { 5571 /* if the chain is still too long then drop it */ 5572 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { 5573 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5574 rc = ENODEV; 5575 } 5576 } 5577 } 5578 } 5579 5580 bxe_tx_encap_continue: 5581 5582 /* Check for errors */ 5583 if (rc) { 5584 if (rc == ENOMEM) { 5585 /* recoverable try again later */ 5586 } else { 5587 fp->eth_q_stats.tx_soft_errors++; 5588 fp->eth_q_stats.mbuf_alloc_tx--; 5589 m_freem(*m_head); 5590 *m_head = NULL; 5591 } 5592 5593 return (rc); 5594 } 5595 5596 /* set flag according to packet type (UNICAST_ADDRESS is default) */ 5597 if (m0->m_flags & M_BCAST) { 5598 mac_type = BROADCAST_ADDRESS; 5599 } else if (m0->m_flags & M_MCAST) { 5600 mac_type = MULTICAST_ADDRESS; 5601 } 5602 5603 /* store the mbuf into the mbuf ring */ 5604 tx_buf->m = m0; 5605 tx_buf->first_bd = fp->tx_bd_prod; 5606 tx_buf->flags = 0; 5607 5608 /* prepare the first transmit (start) BD for the mbuf */ 5609 tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd; 5610 5611 BLOGD(sc, DBG_TX, 5612 "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n", 5613 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); 5614 5615 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 5616 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 5617 tx_start_bd->nbytes = htole16(segs[0].ds_len); 5618 total_pkt_size += tx_start_bd->nbytes; 5619 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 5620 5621 tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 5622 5623 /* all frames have at least Start BD + Parsing BD */ 5624 nbds = nsegs + 1; 5625 tx_start_bd->nbd = htole16(nbds); 5626 5627 if (m0->m_flags & M_VLANTAG) { 5628 tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag); 5629 tx_start_bd->bd_flags.as_bitfield |= 5630 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 5631 } else { 5632 /* vf tx, start bd must hold the ethertype for fw to enforce it */ 5633 if (IS_VF(sc)) { 5634 /* map ethernet header to find type and header length */ 5635 eh = mtod(m0, struct ether_vlan_header *); 5636 tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto; 5637 } else { 5638 /* used by FW for packet accounting */ 5639 tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod); 5640 #if 0 5641 /* 5642 * If NPAR-SD is active then FW should do the tagging regardless 5643 * of value of priority. Otherwise, if priority indicates this is 5644 * a control packet we need to indicate to FW to avoid tagging. 5645 */ 5646 if (!IS_MF_AFEX(sc) && (mbuf priority == PRIO_CONTROL)) { 5647 SET_FLAG(tx_start_bd->general_data, 5648 ETH_TX_START_BD_FORCE_VLAN_MODE, 1); 5649 } 5650 #endif 5651 } 5652 } 5653 5654 /* 5655 * add a parsing BD from the chain. The parsing BD is always added 5656 * though it is only used for TSO and chksum 5657 */ 5658 bd_prod = TX_BD_NEXT(bd_prod); 5659 5660 if (m0->m_pkthdr.csum_flags) { 5661 if (m0->m_pkthdr.csum_flags & CSUM_IP) { 5662 fp->eth_q_stats.tx_ofld_frames_csum_ip++; 5663 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; 5664 } 5665 5666 if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) { 5667 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | 5668 ETH_TX_BD_FLAGS_L4_CSUM); 5669 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) { 5670 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | 5671 ETH_TX_BD_FLAGS_IS_UDP | 5672 ETH_TX_BD_FLAGS_L4_CSUM); 5673 } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) || 5674 (m0->m_pkthdr.csum_flags & CSUM_TSO)) { 5675 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; 5676 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) { 5677 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM | 5678 ETH_TX_BD_FLAGS_IS_UDP); 5679 } 5680 } 5681 5682 if (!CHIP_IS_E1x(sc)) { 5683 pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2; 5684 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 5685 5686 if (m0->m_pkthdr.csum_flags) { 5687 hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data); 5688 } 5689 5690 #if 0 5691 /* 5692 * Add the MACs to the parsing BD if the module param was 5693 * explicitly set, if this is a vf, or in switch independent 5694 * mode. 5695 */ 5696 if (sc->flags & BXE_TX_SWITCHING || IS_VF(sc) || IS_MF_SI(sc)) { 5697 eh = mtod(m0, struct ether_vlan_header *); 5698 bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, 5699 &pbd_e2->data.mac_addr.src_mid, 5700 &pbd_e2->data.mac_addr.src_lo, 5701 eh->evl_shost); 5702 bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, 5703 &pbd_e2->data.mac_addr.dst_mid, 5704 &pbd_e2->data.mac_addr.dst_lo, 5705 eh->evl_dhost); 5706 } 5707 #endif 5708 5709 SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, 5710 mac_type); 5711 } else { 5712 uint16_t global_data = 0; 5713 5714 pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x; 5715 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 5716 5717 if (m0->m_pkthdr.csum_flags) { 5718 hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x); 5719 } 5720 5721 SET_FLAG(global_data, 5722 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); 5723 pbd_e1x->global_data |= htole16(global_data); 5724 } 5725 5726 /* setup the parsing BD with TSO specific info */ 5727 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5728 fp->eth_q_stats.tx_ofld_frames_lso++; 5729 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; 5730 5731 if (__predict_false(tx_start_bd->nbytes > hlen)) { 5732 fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++; 5733 5734 /* split the first BD into header/data making the fw job easy */ 5735 nbds++; 5736 tx_start_bd->nbd = htole16(nbds); 5737 tx_start_bd->nbytes = htole16(hlen); 5738 5739 bd_prod = TX_BD_NEXT(bd_prod); 5740 5741 /* new transmit BD after the tx_parse_bd */ 5742 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; 5743 tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen)); 5744 tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen)); 5745 tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen); 5746 if (tx_total_pkt_size_bd == NULL) { 5747 tx_total_pkt_size_bd = tx_data_bd; 5748 } 5749 5750 BLOGD(sc, DBG_TX, 5751 "TSO split header size is %d (%x:%x) nbds %d\n", 5752 le16toh(tx_start_bd->nbytes), 5753 le32toh(tx_start_bd->addr_hi), 5754 le32toh(tx_start_bd->addr_lo), 5755 nbds); 5756 } 5757 5758 if (!CHIP_IS_E1x(sc)) { 5759 bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data); 5760 } else { 5761 bxe_set_pbd_lso(m0, pbd_e1x); 5762 } 5763 } 5764 5765 if (pbd_e2_parsing_data) { 5766 pbd_e2->parsing_data = htole32(pbd_e2_parsing_data); 5767 } 5768 5769 /* prepare remaining BDs, start tx bd contains first seg/frag */ 5770 for (i = 1; i < nsegs ; i++) { 5771 bd_prod = TX_BD_NEXT(bd_prod); 5772 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; 5773 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr)); 5774 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr)); 5775 tx_data_bd->nbytes = htole16(segs[i].ds_len); 5776 if (tx_total_pkt_size_bd == NULL) { 5777 tx_total_pkt_size_bd = tx_data_bd; 5778 } 5779 total_pkt_size += tx_data_bd->nbytes; 5780 } 5781 5782 BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd); 5783 5784 if (tx_total_pkt_size_bd != NULL) { 5785 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size; 5786 } 5787 5788 if (__predict_false(sc->debug & DBG_TX)) { 5789 tmp_bd = tx_buf->first_bd; 5790 for (i = 0; i < nbds; i++) 5791 { 5792 if (i == 0) { 5793 BLOGD(sc, DBG_TX, 5794 "TX Strt: %p bd=%d nbd=%d vlan=0x%x " 5795 "bd_flags=0x%x hdr_nbds=%d\n", 5796 tx_start_bd, 5797 tmp_bd, 5798 le16toh(tx_start_bd->nbd), 5799 le16toh(tx_start_bd->vlan_or_ethertype), 5800 tx_start_bd->bd_flags.as_bitfield, 5801 (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS)); 5802 } else if (i == 1) { 5803 if (pbd_e1x) { 5804 BLOGD(sc, DBG_TX, 5805 "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u " 5806 "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x " 5807 "tcp_seq=%u total_hlen_w=%u\n", 5808 pbd_e1x, 5809 tmp_bd, 5810 pbd_e1x->global_data, 5811 pbd_e1x->ip_hlen_w, 5812 pbd_e1x->ip_id, 5813 pbd_e1x->lso_mss, 5814 pbd_e1x->tcp_flags, 5815 pbd_e1x->tcp_pseudo_csum, 5816 pbd_e1x->tcp_send_seq, 5817 le16toh(pbd_e1x->total_hlen_w)); 5818 } else { /* if (pbd_e2) */ 5819 BLOGD(sc, DBG_TX, 5820 "-> Parse: %p bd=%d dst=%02x:%02x:%02x " 5821 "src=%02x:%02x:%02x parsing_data=0x%x\n", 5822 pbd_e2, 5823 tmp_bd, 5824 pbd_e2->data.mac_addr.dst_hi, 5825 pbd_e2->data.mac_addr.dst_mid, 5826 pbd_e2->data.mac_addr.dst_lo, 5827 pbd_e2->data.mac_addr.src_hi, 5828 pbd_e2->data.mac_addr.src_mid, 5829 pbd_e2->data.mac_addr.src_lo, 5830 pbd_e2->parsing_data); 5831 } 5832 } 5833 5834 if (i != 1) { /* skip parse db as it doesn't hold data */ 5835 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd; 5836 BLOGD(sc, DBG_TX, 5837 "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n", 5838 tx_data_bd, 5839 tmp_bd, 5840 le16toh(tx_data_bd->nbytes), 5841 le32toh(tx_data_bd->addr_hi), 5842 le32toh(tx_data_bd->addr_lo)); 5843 } 5844 5845 tmp_bd = TX_BD_NEXT(tmp_bd); 5846 } 5847 } 5848 5849 BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod); 5850 5851 /* update TX BD producer index value for next TX */ 5852 bd_prod = TX_BD_NEXT(bd_prod); 5853 5854 /* 5855 * If the chain of tx_bd's describing this frame is adjacent to or spans 5856 * an eth_tx_next_bd element then we need to increment the nbds value. 5857 */ 5858 if (TX_BD_IDX(bd_prod) < nbds) { 5859 nbds++; 5860 } 5861 5862 /* don't allow reordering of writes for nbd and packets */ 5863 mb(); 5864 5865 fp->tx_db.data.prod += nbds; 5866 5867 /* producer points to the next free tx_bd at this point */ 5868 fp->tx_pkt_prod++; 5869 fp->tx_bd_prod = bd_prod; 5870 5871 DOORBELL(sc, fp->index, fp->tx_db.raw); 5872 5873 fp->eth_q_stats.tx_pkts++; 5874 5875 /* Prevent speculative reads from getting ahead of the status block. */ 5876 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 5877 0, 0, BUS_SPACE_BARRIER_READ); 5878 5879 /* Prevent speculative reads from getting ahead of the doorbell. */ 5880 bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle, 5881 0, 0, BUS_SPACE_BARRIER_READ); 5882 5883 return (0); 5884 } 5885 5886 static void 5887 bxe_tx_start_locked(struct bxe_softc *sc, 5888 if_t ifp, 5889 struct bxe_fastpath *fp) 5890 { 5891 struct mbuf *m = NULL; 5892 int tx_count = 0; 5893 uint16_t tx_bd_avail; 5894 5895 BXE_FP_TX_LOCK_ASSERT(fp); 5896 5897 /* keep adding entries while there are frames to send */ 5898 while (!if_sendq_empty(ifp)) { 5899 5900 /* 5901 * check for any frames to send 5902 * dequeue can still be NULL even if queue is not empty 5903 */ 5904 m = if_dequeue(ifp); 5905 if (__predict_false(m == NULL)) { 5906 break; 5907 } 5908 5909 /* the mbuf now belongs to us */ 5910 fp->eth_q_stats.mbuf_alloc_tx++; 5911 5912 /* 5913 * Put the frame into the transmit ring. If we don't have room, 5914 * place the mbuf back at the head of the TX queue, set the 5915 * OACTIVE flag, and wait for the NIC to drain the chain. 5916 */ 5917 if (__predict_false(bxe_tx_encap(fp, &m))) { 5918 fp->eth_q_stats.tx_encap_failures++; 5919 if (m != NULL) { 5920 /* mark the TX queue as full and return the frame */ 5921 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 5922 if_sendq_prepend(ifp, m); 5923 fp->eth_q_stats.mbuf_alloc_tx--; 5924 fp->eth_q_stats.tx_queue_xoff++; 5925 } 5926 5927 /* stop looking for more work */ 5928 break; 5929 } 5930 5931 /* the frame was enqueued successfully */ 5932 tx_count++; 5933 5934 /* send a copy of the frame to any BPF listeners. */ 5935 if_etherbpfmtap(ifp, m); 5936 5937 tx_bd_avail = bxe_tx_avail(sc, fp); 5938 5939 /* handle any completions if we're running low */ 5940 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 5941 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ 5942 bxe_txeof(sc, fp); 5943 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { 5944 break; 5945 } 5946 } 5947 } 5948 5949 /* all TX packets were dequeued and/or the tx ring is full */ 5950 if (tx_count > 0) { 5951 /* reset the TX watchdog timeout timer */ 5952 fp->watchdog_timer = BXE_TX_TIMEOUT; 5953 } 5954 } 5955 5956 /* Legacy (non-RSS) dispatch routine */ 5957 static void 5958 bxe_tx_start(if_t ifp) 5959 { 5960 struct bxe_softc *sc; 5961 struct bxe_fastpath *fp; 5962 5963 sc = if_getsoftc(ifp); 5964 5965 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 5966 BLOGW(sc, "Interface not running, ignoring transmit request\n"); 5967 return; 5968 } 5969 5970 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { 5971 BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n"); 5972 return; 5973 } 5974 5975 if (!sc->link_vars.link_up) { 5976 BLOGW(sc, "Interface link is down, ignoring transmit request\n"); 5977 return; 5978 } 5979 5980 fp = &sc->fp[0]; 5981 5982 BXE_FP_TX_LOCK(fp); 5983 bxe_tx_start_locked(sc, ifp, fp); 5984 BXE_FP_TX_UNLOCK(fp); 5985 } 5986 5987 #if __FreeBSD_version >= 800000 5988 5989 static int 5990 bxe_tx_mq_start_locked(struct bxe_softc *sc, 5991 if_t ifp, 5992 struct bxe_fastpath *fp, 5993 struct mbuf *m) 5994 { 5995 struct buf_ring *tx_br = fp->tx_br; 5996 struct mbuf *next; 5997 int depth, rc, tx_count; 5998 uint16_t tx_bd_avail; 5999 6000 rc = tx_count = 0; 6001 6002 if (!tx_br) { 6003 BLOGE(sc, "Multiqueue TX and no buf_ring!\n"); 6004 return (EINVAL); 6005 } 6006 6007 /* fetch the depth of the driver queue */ 6008 depth = drbr_inuse_drv(ifp, tx_br); 6009 if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) { 6010 fp->eth_q_stats.tx_max_drbr_queue_depth = depth; 6011 } 6012 6013 BXE_FP_TX_LOCK_ASSERT(fp); 6014 6015 if (m == NULL) { 6016 /* no new work, check for pending frames */ 6017 next = drbr_dequeue_drv(ifp, tx_br); 6018 } else if (drbr_needs_enqueue_drv(ifp, tx_br)) { 6019 /* have both new and pending work, maintain packet order */ 6020 rc = drbr_enqueue_drv(ifp, tx_br, m); 6021 if (rc != 0) { 6022 fp->eth_q_stats.tx_soft_errors++; 6023 goto bxe_tx_mq_start_locked_exit; 6024 } 6025 next = drbr_dequeue_drv(ifp, tx_br); 6026 } else { 6027 /* new work only and nothing pending */ 6028 next = m; 6029 } 6030 6031 /* keep adding entries while there are frames to send */ 6032 while (next != NULL) { 6033 6034 /* the mbuf now belongs to us */ 6035 fp->eth_q_stats.mbuf_alloc_tx++; 6036 6037 /* 6038 * Put the frame into the transmit ring. If we don't have room, 6039 * place the mbuf back at the head of the TX queue, set the 6040 * OACTIVE flag, and wait for the NIC to drain the chain. 6041 */ 6042 rc = bxe_tx_encap(fp, &next); 6043 if (__predict_false(rc != 0)) { 6044 fp->eth_q_stats.tx_encap_failures++; 6045 if (next != NULL) { 6046 /* mark the TX queue as full and save the frame */ 6047 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 6048 /* XXX this may reorder the frame */ 6049 rc = drbr_enqueue_drv(ifp, tx_br, next); 6050 fp->eth_q_stats.mbuf_alloc_tx--; 6051 fp->eth_q_stats.tx_frames_deferred++; 6052 } 6053 6054 /* stop looking for more work */ 6055 break; 6056 } 6057 6058 /* the transmit frame was enqueued successfully */ 6059 tx_count++; 6060 6061 /* send a copy of the frame to any BPF listeners */ 6062 if_etherbpfmtap(ifp, next); 6063 6064 tx_bd_avail = bxe_tx_avail(sc, fp); 6065 6066 /* handle any completions if we're running low */ 6067 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 6068 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ 6069 bxe_txeof(sc, fp); 6070 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { 6071 break; 6072 } 6073 } 6074 6075 next = drbr_dequeue_drv(ifp, tx_br); 6076 } 6077 6078 /* all TX packets were dequeued and/or the tx ring is full */ 6079 if (tx_count > 0) { 6080 /* reset the TX watchdog timeout timer */ 6081 fp->watchdog_timer = BXE_TX_TIMEOUT; 6082 } 6083 6084 bxe_tx_mq_start_locked_exit: 6085 6086 return (rc); 6087 } 6088 6089 /* Multiqueue (TSS) dispatch routine. */ 6090 static int 6091 bxe_tx_mq_start(struct ifnet *ifp, 6092 struct mbuf *m) 6093 { 6094 struct bxe_softc *sc = if_getsoftc(ifp); 6095 struct bxe_fastpath *fp; 6096 int fp_index, rc; 6097 6098 fp_index = 0; /* default is the first queue */ 6099 6100 /* check if flowid is set */ 6101 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 6102 fp_index = (m->m_pkthdr.flowid % sc->num_queues); 6103 6104 fp = &sc->fp[fp_index]; 6105 6106 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 6107 BLOGW(sc, "Interface not running, ignoring transmit request\n"); 6108 return (ENETDOWN); 6109 } 6110 6111 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { 6112 BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n"); 6113 return (EBUSY); 6114 } 6115 6116 if (!sc->link_vars.link_up) { 6117 BLOGW(sc, "Interface link is down, ignoring transmit request\n"); 6118 return (ENETDOWN); 6119 } 6120 6121 /* XXX change to TRYLOCK here and if failed then schedule taskqueue */ 6122 6123 BXE_FP_TX_LOCK(fp); 6124 rc = bxe_tx_mq_start_locked(sc, ifp, fp, m); 6125 BXE_FP_TX_UNLOCK(fp); 6126 6127 return (rc); 6128 } 6129 6130 static void 6131 bxe_mq_flush(struct ifnet *ifp) 6132 { 6133 struct bxe_softc *sc = if_getsoftc(ifp); 6134 struct bxe_fastpath *fp; 6135 struct mbuf *m; 6136 int i; 6137 6138 for (i = 0; i < sc->num_queues; i++) { 6139 fp = &sc->fp[i]; 6140 6141 if (fp->state != BXE_FP_STATE_OPEN) { 6142 BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n", 6143 fp->index, fp->state); 6144 continue; 6145 } 6146 6147 if (fp->tx_br != NULL) { 6148 BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index); 6149 BXE_FP_TX_LOCK(fp); 6150 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { 6151 m_freem(m); 6152 } 6153 BXE_FP_TX_UNLOCK(fp); 6154 } 6155 } 6156 6157 if_qflush(ifp); 6158 } 6159 6160 #endif /* FreeBSD_version >= 800000 */ 6161 6162 static uint16_t 6163 bxe_cid_ilt_lines(struct bxe_softc *sc) 6164 { 6165 if (IS_SRIOV(sc)) { 6166 return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS); 6167 } 6168 return (L2_ILT_LINES(sc)); 6169 } 6170 6171 static void 6172 bxe_ilt_set_info(struct bxe_softc *sc) 6173 { 6174 struct ilt_client_info *ilt_client; 6175 struct ecore_ilt *ilt = sc->ilt; 6176 uint16_t line = 0; 6177 6178 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); 6179 BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line); 6180 6181 /* CDU */ 6182 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 6183 ilt_client->client_num = ILT_CLIENT_CDU; 6184 ilt_client->page_size = CDU_ILT_PAGE_SZ; 6185 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 6186 ilt_client->start = line; 6187 line += bxe_cid_ilt_lines(sc); 6188 6189 if (CNIC_SUPPORT(sc)) { 6190 line += CNIC_ILT_LINES; 6191 } 6192 6193 ilt_client->end = (line - 1); 6194 6195 BLOGD(sc, DBG_LOAD, 6196 "ilt client[CDU]: start %d, end %d, " 6197 "psz 0x%x, flags 0x%x, hw psz %d\n", 6198 ilt_client->start, ilt_client->end, 6199 ilt_client->page_size, 6200 ilt_client->flags, 6201 ilog2(ilt_client->page_size >> 12)); 6202 6203 /* QM */ 6204 if (QM_INIT(sc->qm_cid_count)) { 6205 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 6206 ilt_client->client_num = ILT_CLIENT_QM; 6207 ilt_client->page_size = QM_ILT_PAGE_SZ; 6208 ilt_client->flags = 0; 6209 ilt_client->start = line; 6210 6211 /* 4 bytes for each cid */ 6212 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 6213 QM_ILT_PAGE_SZ); 6214 6215 ilt_client->end = (line - 1); 6216 6217 BLOGD(sc, DBG_LOAD, 6218 "ilt client[QM]: start %d, end %d, " 6219 "psz 0x%x, flags 0x%x, hw psz %d\n", 6220 ilt_client->start, ilt_client->end, 6221 ilt_client->page_size, ilt_client->flags, 6222 ilog2(ilt_client->page_size >> 12)); 6223 } 6224 6225 if (CNIC_SUPPORT(sc)) { 6226 /* SRC */ 6227 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 6228 ilt_client->client_num = ILT_CLIENT_SRC; 6229 ilt_client->page_size = SRC_ILT_PAGE_SZ; 6230 ilt_client->flags = 0; 6231 ilt_client->start = line; 6232 line += SRC_ILT_LINES; 6233 ilt_client->end = (line - 1); 6234 6235 BLOGD(sc, DBG_LOAD, 6236 "ilt client[SRC]: start %d, end %d, " 6237 "psz 0x%x, flags 0x%x, hw psz %d\n", 6238 ilt_client->start, ilt_client->end, 6239 ilt_client->page_size, ilt_client->flags, 6240 ilog2(ilt_client->page_size >> 12)); 6241 6242 /* TM */ 6243 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 6244 ilt_client->client_num = ILT_CLIENT_TM; 6245 ilt_client->page_size = TM_ILT_PAGE_SZ; 6246 ilt_client->flags = 0; 6247 ilt_client->start = line; 6248 line += TM_ILT_LINES; 6249 ilt_client->end = (line - 1); 6250 6251 BLOGD(sc, DBG_LOAD, 6252 "ilt client[TM]: start %d, end %d, " 6253 "psz 0x%x, flags 0x%x, hw psz %d\n", 6254 ilt_client->start, ilt_client->end, 6255 ilt_client->page_size, ilt_client->flags, 6256 ilog2(ilt_client->page_size >> 12)); 6257 } 6258 6259 KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!")); 6260 } 6261 6262 static void 6263 bxe_set_fp_rx_buf_size(struct bxe_softc *sc) 6264 { 6265 int i; 6266 uint32_t rx_buf_size; 6267 6268 rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu); 6269 6270 for (i = 0; i < sc->num_queues; i++) { 6271 if(rx_buf_size <= MCLBYTES){ 6272 sc->fp[i].rx_buf_size = rx_buf_size; 6273 sc->fp[i].mbuf_alloc_size = MCLBYTES; 6274 }else if (rx_buf_size <= MJUMPAGESIZE){ 6275 sc->fp[i].rx_buf_size = rx_buf_size; 6276 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; 6277 }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){ 6278 sc->fp[i].rx_buf_size = MCLBYTES; 6279 sc->fp[i].mbuf_alloc_size = MCLBYTES; 6280 }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){ 6281 sc->fp[i].rx_buf_size = MJUMPAGESIZE; 6282 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; 6283 }else { 6284 sc->fp[i].rx_buf_size = MCLBYTES; 6285 sc->fp[i].mbuf_alloc_size = MCLBYTES; 6286 } 6287 } 6288 } 6289 6290 static int 6291 bxe_alloc_ilt_mem(struct bxe_softc *sc) 6292 { 6293 int rc = 0; 6294 6295 if ((sc->ilt = 6296 (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt), 6297 M_BXE_ILT, 6298 (M_NOWAIT | M_ZERO))) == NULL) { 6299 rc = 1; 6300 } 6301 6302 return (rc); 6303 } 6304 6305 static int 6306 bxe_alloc_ilt_lines_mem(struct bxe_softc *sc) 6307 { 6308 int rc = 0; 6309 6310 if ((sc->ilt->lines = 6311 (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES), 6312 M_BXE_ILT, 6313 (M_NOWAIT | M_ZERO))) == NULL) { 6314 rc = 1; 6315 } 6316 6317 return (rc); 6318 } 6319 6320 static void 6321 bxe_free_ilt_mem(struct bxe_softc *sc) 6322 { 6323 if (sc->ilt != NULL) { 6324 free(sc->ilt, M_BXE_ILT); 6325 sc->ilt = NULL; 6326 } 6327 } 6328 6329 static void 6330 bxe_free_ilt_lines_mem(struct bxe_softc *sc) 6331 { 6332 if (sc->ilt->lines != NULL) { 6333 free(sc->ilt->lines, M_BXE_ILT); 6334 sc->ilt->lines = NULL; 6335 } 6336 } 6337 6338 static void 6339 bxe_free_mem(struct bxe_softc *sc) 6340 { 6341 int i; 6342 6343 #if 0 6344 if (!CONFIGURE_NIC_MODE(sc)) { 6345 /* free searcher T2 table */ 6346 bxe_dma_free(sc, &sc->t2); 6347 } 6348 #endif 6349 6350 for (i = 0; i < L2_ILT_LINES(sc); i++) { 6351 bxe_dma_free(sc, &sc->context[i].vcxt_dma); 6352 sc->context[i].vcxt = NULL; 6353 sc->context[i].size = 0; 6354 } 6355 6356 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); 6357 6358 bxe_free_ilt_lines_mem(sc); 6359 6360 #if 0 6361 bxe_iov_free_mem(sc); 6362 #endif 6363 } 6364 6365 static int 6366 bxe_alloc_mem(struct bxe_softc *sc) 6367 { 6368 int context_size; 6369 int allocated; 6370 int i; 6371 6372 #if 0 6373 if (!CONFIGURE_NIC_MODE(sc)) { 6374 /* allocate searcher T2 table */ 6375 if (bxe_dma_alloc(sc, SRC_T2_SZ, 6376 &sc->t2, "searcher t2 table") != 0) { 6377 return (-1); 6378 } 6379 } 6380 #endif 6381 6382 /* 6383 * Allocate memory for CDU context: 6384 * This memory is allocated separately and not in the generic ILT 6385 * functions because CDU differs in few aspects: 6386 * 1. There can be multiple entities allocating memory for context - 6387 * regular L2, CNIC, and SRIOV drivers. Each separately controls 6388 * its own ILT lines. 6389 * 2. Since CDU page-size is not a single 4KB page (which is the case 6390 * for the other ILT clients), to be efficient we want to support 6391 * allocation of sub-page-size in the last entry. 6392 * 3. Context pointers are used by the driver to pass to FW / update 6393 * the context (for the other ILT clients the pointers are used just to 6394 * free the memory during unload). 6395 */ 6396 context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc)); 6397 for (i = 0, allocated = 0; allocated < context_size; i++) { 6398 sc->context[i].size = min(CDU_ILT_PAGE_SZ, 6399 (context_size - allocated)); 6400 6401 if (bxe_dma_alloc(sc, sc->context[i].size, 6402 &sc->context[i].vcxt_dma, 6403 "cdu context") != 0) { 6404 bxe_free_mem(sc); 6405 return (-1); 6406 } 6407 6408 sc->context[i].vcxt = 6409 (union cdu_context *)sc->context[i].vcxt_dma.vaddr; 6410 6411 allocated += sc->context[i].size; 6412 } 6413 6414 bxe_alloc_ilt_lines_mem(sc); 6415 6416 BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n", 6417 sc->ilt, sc->ilt->start_line, sc->ilt->lines); 6418 { 6419 for (i = 0; i < 4; i++) { 6420 BLOGD(sc, DBG_LOAD, 6421 "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n", 6422 i, 6423 sc->ilt->clients[i].page_size, 6424 sc->ilt->clients[i].start, 6425 sc->ilt->clients[i].end, 6426 sc->ilt->clients[i].client_num, 6427 sc->ilt->clients[i].flags); 6428 } 6429 } 6430 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { 6431 BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n"); 6432 bxe_free_mem(sc); 6433 return (-1); 6434 } 6435 6436 #if 0 6437 if (bxe_iov_alloc_mem(sc)) { 6438 BLOGE(sc, "Failed to allocate memory for SRIOV\n"); 6439 bxe_free_mem(sc); 6440 return (-1); 6441 } 6442 #endif 6443 6444 return (0); 6445 } 6446 6447 static void 6448 bxe_free_rx_bd_chain(struct bxe_fastpath *fp) 6449 { 6450 struct bxe_softc *sc; 6451 int i; 6452 6453 sc = fp->sc; 6454 6455 if (fp->rx_mbuf_tag == NULL) { 6456 return; 6457 } 6458 6459 /* free all mbufs and unload all maps */ 6460 for (i = 0; i < RX_BD_TOTAL; i++) { 6461 if (fp->rx_mbuf_chain[i].m_map != NULL) { 6462 bus_dmamap_sync(fp->rx_mbuf_tag, 6463 fp->rx_mbuf_chain[i].m_map, 6464 BUS_DMASYNC_POSTREAD); 6465 bus_dmamap_unload(fp->rx_mbuf_tag, 6466 fp->rx_mbuf_chain[i].m_map); 6467 } 6468 6469 if (fp->rx_mbuf_chain[i].m != NULL) { 6470 m_freem(fp->rx_mbuf_chain[i].m); 6471 fp->rx_mbuf_chain[i].m = NULL; 6472 fp->eth_q_stats.mbuf_alloc_rx--; 6473 } 6474 } 6475 } 6476 6477 static void 6478 bxe_free_tpa_pool(struct bxe_fastpath *fp) 6479 { 6480 struct bxe_softc *sc; 6481 int i, max_agg_queues; 6482 6483 sc = fp->sc; 6484 6485 if (fp->rx_mbuf_tag == NULL) { 6486 return; 6487 } 6488 6489 max_agg_queues = MAX_AGG_QS(sc); 6490 6491 /* release all mbufs and unload all DMA maps in the TPA pool */ 6492 for (i = 0; i < max_agg_queues; i++) { 6493 if (fp->rx_tpa_info[i].bd.m_map != NULL) { 6494 bus_dmamap_sync(fp->rx_mbuf_tag, 6495 fp->rx_tpa_info[i].bd.m_map, 6496 BUS_DMASYNC_POSTREAD); 6497 bus_dmamap_unload(fp->rx_mbuf_tag, 6498 fp->rx_tpa_info[i].bd.m_map); 6499 } 6500 6501 if (fp->rx_tpa_info[i].bd.m != NULL) { 6502 m_freem(fp->rx_tpa_info[i].bd.m); 6503 fp->rx_tpa_info[i].bd.m = NULL; 6504 fp->eth_q_stats.mbuf_alloc_tpa--; 6505 } 6506 } 6507 } 6508 6509 static void 6510 bxe_free_sge_chain(struct bxe_fastpath *fp) 6511 { 6512 struct bxe_softc *sc; 6513 int i; 6514 6515 sc = fp->sc; 6516 6517 if (fp->rx_sge_mbuf_tag == NULL) { 6518 return; 6519 } 6520 6521 /* rree all mbufs and unload all maps */ 6522 for (i = 0; i < RX_SGE_TOTAL; i++) { 6523 if (fp->rx_sge_mbuf_chain[i].m_map != NULL) { 6524 bus_dmamap_sync(fp->rx_sge_mbuf_tag, 6525 fp->rx_sge_mbuf_chain[i].m_map, 6526 BUS_DMASYNC_POSTREAD); 6527 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 6528 fp->rx_sge_mbuf_chain[i].m_map); 6529 } 6530 6531 if (fp->rx_sge_mbuf_chain[i].m != NULL) { 6532 m_freem(fp->rx_sge_mbuf_chain[i].m); 6533 fp->rx_sge_mbuf_chain[i].m = NULL; 6534 fp->eth_q_stats.mbuf_alloc_sge--; 6535 } 6536 } 6537 } 6538 6539 static void 6540 bxe_free_fp_buffers(struct bxe_softc *sc) 6541 { 6542 struct bxe_fastpath *fp; 6543 int i; 6544 6545 for (i = 0; i < sc->num_queues; i++) { 6546 fp = &sc->fp[i]; 6547 6548 #if __FreeBSD_version >= 800000 6549 if (fp->tx_br != NULL) { 6550 struct mbuf *m; 6551 /* just in case bxe_mq_flush() wasn't called */ 6552 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { 6553 m_freem(m); 6554 } 6555 buf_ring_free(fp->tx_br, M_DEVBUF); 6556 fp->tx_br = NULL; 6557 } 6558 #endif 6559 6560 /* free all RX buffers */ 6561 bxe_free_rx_bd_chain(fp); 6562 bxe_free_tpa_pool(fp); 6563 bxe_free_sge_chain(fp); 6564 6565 if (fp->eth_q_stats.mbuf_alloc_rx != 0) { 6566 BLOGE(sc, "failed to claim all rx mbufs (%d left)\n", 6567 fp->eth_q_stats.mbuf_alloc_rx); 6568 } 6569 6570 if (fp->eth_q_stats.mbuf_alloc_sge != 0) { 6571 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", 6572 fp->eth_q_stats.mbuf_alloc_sge); 6573 } 6574 6575 if (fp->eth_q_stats.mbuf_alloc_tpa != 0) { 6576 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", 6577 fp->eth_q_stats.mbuf_alloc_tpa); 6578 } 6579 6580 if (fp->eth_q_stats.mbuf_alloc_tx != 0) { 6581 BLOGE(sc, "failed to release tx mbufs (%d left)\n", 6582 fp->eth_q_stats.mbuf_alloc_tx); 6583 } 6584 6585 /* XXX verify all mbufs were reclaimed */ 6586 6587 if (mtx_initialized(&fp->tx_mtx)) { 6588 mtx_destroy(&fp->tx_mtx); 6589 } 6590 6591 if (mtx_initialized(&fp->rx_mtx)) { 6592 mtx_destroy(&fp->rx_mtx); 6593 } 6594 } 6595 } 6596 6597 static int 6598 bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, 6599 uint16_t prev_index, 6600 uint16_t index) 6601 { 6602 struct bxe_sw_rx_bd *rx_buf; 6603 struct eth_rx_bd *rx_bd; 6604 bus_dma_segment_t segs[1]; 6605 bus_dmamap_t map; 6606 struct mbuf *m; 6607 int nsegs, rc; 6608 6609 rc = 0; 6610 6611 /* allocate the new RX BD mbuf */ 6612 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); 6613 if (__predict_false(m == NULL)) { 6614 fp->eth_q_stats.mbuf_rx_bd_alloc_failed++; 6615 return (ENOBUFS); 6616 } 6617 6618 fp->eth_q_stats.mbuf_alloc_rx++; 6619 6620 /* initialize the mbuf buffer length */ 6621 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; 6622 6623 /* map the mbuf into non-paged pool */ 6624 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, 6625 fp->rx_mbuf_spare_map, 6626 m, segs, &nsegs, BUS_DMA_NOWAIT); 6627 if (__predict_false(rc != 0)) { 6628 fp->eth_q_stats.mbuf_rx_bd_mapping_failed++; 6629 m_freem(m); 6630 fp->eth_q_stats.mbuf_alloc_rx--; 6631 return (rc); 6632 } 6633 6634 /* all mbufs must map to a single segment */ 6635 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6636 6637 /* release any existing RX BD mbuf mappings */ 6638 6639 if (prev_index != index) { 6640 rx_buf = &fp->rx_mbuf_chain[prev_index]; 6641 6642 if (rx_buf->m_map != NULL) { 6643 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6644 BUS_DMASYNC_POSTREAD); 6645 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 6646 } 6647 6648 /* 6649 * We only get here from bxe_rxeof() when the maximum number 6650 * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already 6651 * holds the mbuf in the prev_index so it's OK to NULL it out 6652 * here without concern of a memory leak. 6653 */ 6654 fp->rx_mbuf_chain[prev_index].m = NULL; 6655 } 6656 6657 rx_buf = &fp->rx_mbuf_chain[index]; 6658 6659 if (rx_buf->m_map != NULL) { 6660 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6661 BUS_DMASYNC_POSTREAD); 6662 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 6663 } 6664 6665 /* save the mbuf and mapping info for a future packet */ 6666 map = (prev_index != index) ? 6667 fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map; 6668 rx_buf->m_map = fp->rx_mbuf_spare_map; 6669 fp->rx_mbuf_spare_map = map; 6670 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6671 BUS_DMASYNC_PREREAD); 6672 rx_buf->m = m; 6673 6674 rx_bd = &fp->rx_chain[index]; 6675 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 6676 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 6677 6678 return (rc); 6679 } 6680 6681 static int 6682 bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, 6683 int queue) 6684 { 6685 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; 6686 bus_dma_segment_t segs[1]; 6687 bus_dmamap_t map; 6688 struct mbuf *m; 6689 int nsegs; 6690 int rc = 0; 6691 6692 /* allocate the new TPA mbuf */ 6693 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); 6694 if (__predict_false(m == NULL)) { 6695 fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++; 6696 return (ENOBUFS); 6697 } 6698 6699 fp->eth_q_stats.mbuf_alloc_tpa++; 6700 6701 /* initialize the mbuf buffer length */ 6702 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; 6703 6704 /* map the mbuf into non-paged pool */ 6705 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, 6706 fp->rx_tpa_info_mbuf_spare_map, 6707 m, segs, &nsegs, BUS_DMA_NOWAIT); 6708 if (__predict_false(rc != 0)) { 6709 fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++; 6710 m_free(m); 6711 fp->eth_q_stats.mbuf_alloc_tpa--; 6712 return (rc); 6713 } 6714 6715 /* all mbufs must map to a single segment */ 6716 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6717 6718 /* release any existing TPA mbuf mapping */ 6719 if (tpa_info->bd.m_map != NULL) { 6720 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, 6721 BUS_DMASYNC_POSTREAD); 6722 bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map); 6723 } 6724 6725 /* save the mbuf and mapping info for the TPA mbuf */ 6726 map = tpa_info->bd.m_map; 6727 tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map; 6728 fp->rx_tpa_info_mbuf_spare_map = map; 6729 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, 6730 BUS_DMASYNC_PREREAD); 6731 tpa_info->bd.m = m; 6732 tpa_info->seg = segs[0]; 6733 6734 return (rc); 6735 } 6736 6737 /* 6738 * Allocate an mbuf and assign it to the receive scatter gather chain. The 6739 * caller must take care to save a copy of the existing mbuf in the SG mbuf 6740 * chain. 6741 */ 6742 static int 6743 bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, 6744 uint16_t index) 6745 { 6746 struct bxe_sw_rx_bd *sge_buf; 6747 struct eth_rx_sge *sge; 6748 bus_dma_segment_t segs[1]; 6749 bus_dmamap_t map; 6750 struct mbuf *m; 6751 int nsegs; 6752 int rc = 0; 6753 6754 /* allocate a new SGE mbuf */ 6755 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE); 6756 if (__predict_false(m == NULL)) { 6757 fp->eth_q_stats.mbuf_rx_sge_alloc_failed++; 6758 return (ENOMEM); 6759 } 6760 6761 fp->eth_q_stats.mbuf_alloc_sge++; 6762 6763 /* initialize the mbuf buffer length */ 6764 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE; 6765 6766 /* map the SGE mbuf into non-paged pool */ 6767 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag, 6768 fp->rx_sge_mbuf_spare_map, 6769 m, segs, &nsegs, BUS_DMA_NOWAIT); 6770 if (__predict_false(rc != 0)) { 6771 fp->eth_q_stats.mbuf_rx_sge_mapping_failed++; 6772 m_freem(m); 6773 fp->eth_q_stats.mbuf_alloc_sge--; 6774 return (rc); 6775 } 6776 6777 /* all mbufs must map to a single segment */ 6778 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6779 6780 sge_buf = &fp->rx_sge_mbuf_chain[index]; 6781 6782 /* release any existing SGE mbuf mapping */ 6783 if (sge_buf->m_map != NULL) { 6784 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, 6785 BUS_DMASYNC_POSTREAD); 6786 bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map); 6787 } 6788 6789 /* save the mbuf and mapping info for a future packet */ 6790 map = sge_buf->m_map; 6791 sge_buf->m_map = fp->rx_sge_mbuf_spare_map; 6792 fp->rx_sge_mbuf_spare_map = map; 6793 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, 6794 BUS_DMASYNC_PREREAD); 6795 sge_buf->m = m; 6796 6797 sge = &fp->rx_sge_chain[index]; 6798 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 6799 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 6800 6801 return (rc); 6802 } 6803 6804 static __noinline int 6805 bxe_alloc_fp_buffers(struct bxe_softc *sc) 6806 { 6807 struct bxe_fastpath *fp; 6808 int i, j, rc = 0; 6809 int ring_prod, cqe_ring_prod; 6810 int max_agg_queues; 6811 6812 for (i = 0; i < sc->num_queues; i++) { 6813 fp = &sc->fp[i]; 6814 6815 #if __FreeBSD_version >= 800000 6816 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF, 6817 M_NOWAIT, &fp->tx_mtx); 6818 if (fp->tx_br == NULL) { 6819 BLOGE(sc, "buf_ring alloc fail for fp[%02d]\n", i); 6820 goto bxe_alloc_fp_buffers_error; 6821 } 6822 #endif 6823 6824 ring_prod = cqe_ring_prod = 0; 6825 fp->rx_bd_cons = 0; 6826 fp->rx_cq_cons = 0; 6827 6828 /* allocate buffers for the RX BDs in RX BD chain */ 6829 for (j = 0; j < sc->max_rx_bufs; j++) { 6830 rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod); 6831 if (rc != 0) { 6832 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", 6833 i, rc); 6834 goto bxe_alloc_fp_buffers_error; 6835 } 6836 6837 ring_prod = RX_BD_NEXT(ring_prod); 6838 cqe_ring_prod = RCQ_NEXT(cqe_ring_prod); 6839 } 6840 6841 fp->rx_bd_prod = ring_prod; 6842 fp->rx_cq_prod = cqe_ring_prod; 6843 fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0; 6844 6845 max_agg_queues = MAX_AGG_QS(sc); 6846 6847 fp->tpa_enable = TRUE; 6848 6849 /* fill the TPA pool */ 6850 for (j = 0; j < max_agg_queues; j++) { 6851 rc = bxe_alloc_rx_tpa_mbuf(fp, j); 6852 if (rc != 0) { 6853 BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n", 6854 i, j); 6855 fp->tpa_enable = FALSE; 6856 goto bxe_alloc_fp_buffers_error; 6857 } 6858 6859 fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP; 6860 } 6861 6862 if (fp->tpa_enable) { 6863 /* fill the RX SGE chain */ 6864 ring_prod = 0; 6865 for (j = 0; j < RX_SGE_USABLE; j++) { 6866 rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod); 6867 if (rc != 0) { 6868 BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n", 6869 i, ring_prod); 6870 fp->tpa_enable = FALSE; 6871 ring_prod = 0; 6872 goto bxe_alloc_fp_buffers_error; 6873 } 6874 6875 ring_prod = RX_SGE_NEXT(ring_prod); 6876 } 6877 6878 fp->rx_sge_prod = ring_prod; 6879 } 6880 } 6881 6882 return (0); 6883 6884 bxe_alloc_fp_buffers_error: 6885 6886 /* unwind what was already allocated */ 6887 bxe_free_rx_bd_chain(fp); 6888 bxe_free_tpa_pool(fp); 6889 bxe_free_sge_chain(fp); 6890 6891 return (ENOBUFS); 6892 } 6893 6894 static void 6895 bxe_free_fw_stats_mem(struct bxe_softc *sc) 6896 { 6897 bxe_dma_free(sc, &sc->fw_stats_dma); 6898 6899 sc->fw_stats_num = 0; 6900 6901 sc->fw_stats_req_size = 0; 6902 sc->fw_stats_req = NULL; 6903 sc->fw_stats_req_mapping = 0; 6904 6905 sc->fw_stats_data_size = 0; 6906 sc->fw_stats_data = NULL; 6907 sc->fw_stats_data_mapping = 0; 6908 } 6909 6910 static int 6911 bxe_alloc_fw_stats_mem(struct bxe_softc *sc) 6912 { 6913 uint8_t num_queue_stats; 6914 int num_groups; 6915 6916 /* number of queues for statistics is number of eth queues */ 6917 num_queue_stats = BXE_NUM_ETH_QUEUES(sc); 6918 6919 /* 6920 * Total number of FW statistics requests = 6921 * 1 for port stats + 1 for PF stats + num of queues 6922 */ 6923 sc->fw_stats_num = (2 + num_queue_stats); 6924 6925 /* 6926 * Request is built from stats_query_header and an array of 6927 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT 6928 * rules. The real number or requests is configured in the 6929 * stats_query_header. 6930 */ 6931 num_groups = 6932 ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) + 6933 ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0)); 6934 6935 BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n", 6936 sc->fw_stats_num, num_groups); 6937 6938 sc->fw_stats_req_size = 6939 (sizeof(struct stats_query_header) + 6940 (num_groups * sizeof(struct stats_query_cmd_group))); 6941 6942 /* 6943 * Data for statistics requests + stats_counter. 6944 * stats_counter holds per-STORM counters that are incremented when 6945 * STORM has finished with the current request. Memory for FCoE 6946 * offloaded statistics are counted anyway, even if they will not be sent. 6947 * VF stats are not accounted for here as the data of VF stats is stored 6948 * in memory allocated by the VF, not here. 6949 */ 6950 sc->fw_stats_data_size = 6951 (sizeof(struct stats_counter) + 6952 sizeof(struct per_port_stats) + 6953 sizeof(struct per_pf_stats) + 6954 /* sizeof(struct fcoe_statistics_params) + */ 6955 (sizeof(struct per_queue_stats) * num_queue_stats)); 6956 6957 if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), 6958 &sc->fw_stats_dma, "fw stats") != 0) { 6959 bxe_free_fw_stats_mem(sc); 6960 return (-1); 6961 } 6962 6963 /* set up the shortcuts */ 6964 6965 sc->fw_stats_req = 6966 (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr; 6967 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; 6968 6969 sc->fw_stats_data = 6970 (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr + 6971 sc->fw_stats_req_size); 6972 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + 6973 sc->fw_stats_req_size); 6974 6975 BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n", 6976 (uintmax_t)sc->fw_stats_req_mapping); 6977 6978 BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n", 6979 (uintmax_t)sc->fw_stats_data_mapping); 6980 6981 return (0); 6982 } 6983 6984 /* 6985 * Bits map: 6986 * 0-7 - Engine0 load counter. 6987 * 8-15 - Engine1 load counter. 6988 * 16 - Engine0 RESET_IN_PROGRESS bit. 6989 * 17 - Engine1 RESET_IN_PROGRESS bit. 6990 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active 6991 * function on the engine 6992 * 19 - Engine1 ONE_IS_LOADED. 6993 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 6994 * leader to complete (check for both RESET_IN_PROGRESS bits and not 6995 * for just the one belonging to its engine). 6996 */ 6997 #define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 6998 #define BXE_PATH0_LOAD_CNT_MASK 0x000000ff 6999 #define BXE_PATH0_LOAD_CNT_SHIFT 0 7000 #define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00 7001 #define BXE_PATH1_LOAD_CNT_SHIFT 8 7002 #define BXE_PATH0_RST_IN_PROG_BIT 0x00010000 7003 #define BXE_PATH1_RST_IN_PROG_BIT 0x00020000 7004 #define BXE_GLOBAL_RESET_BIT 0x00040000 7005 7006 /* set the GLOBAL_RESET bit, should be run under rtnl lock */ 7007 static void 7008 bxe_set_reset_global(struct bxe_softc *sc) 7009 { 7010 uint32_t val; 7011 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7012 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7013 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT); 7014 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7015 } 7016 7017 /* clear the GLOBAL_RESET bit, should be run under rtnl lock */ 7018 static void 7019 bxe_clear_reset_global(struct bxe_softc *sc) 7020 { 7021 uint32_t val; 7022 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7023 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7024 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT)); 7025 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7026 } 7027 7028 /* checks the GLOBAL_RESET bit, should be run under rtnl lock */ 7029 static uint8_t 7030 bxe_reset_is_global(struct bxe_softc *sc) 7031 { 7032 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7033 BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val); 7034 return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE; 7035 } 7036 7037 /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ 7038 static void 7039 bxe_set_reset_done(struct bxe_softc *sc) 7040 { 7041 uint32_t val; 7042 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : 7043 BXE_PATH0_RST_IN_PROG_BIT; 7044 7045 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7046 7047 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7048 /* Clear the bit */ 7049 val &= ~bit; 7050 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7051 7052 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7053 } 7054 7055 /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ 7056 static void 7057 bxe_set_reset_in_progress(struct bxe_softc *sc) 7058 { 7059 uint32_t val; 7060 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : 7061 BXE_PATH0_RST_IN_PROG_BIT; 7062 7063 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7064 7065 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7066 /* Set the bit */ 7067 val |= bit; 7068 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7069 7070 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7071 } 7072 7073 /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ 7074 static uint8_t 7075 bxe_reset_is_done(struct bxe_softc *sc, 7076 int engine) 7077 { 7078 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7079 uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT : 7080 BXE_PATH0_RST_IN_PROG_BIT; 7081 7082 /* return false if bit is set */ 7083 return (val & bit) ? FALSE : TRUE; 7084 } 7085 7086 /* get the load status for an engine, should be run under rtnl lock */ 7087 static uint8_t 7088 bxe_get_load_status(struct bxe_softc *sc, 7089 int engine) 7090 { 7091 uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK : 7092 BXE_PATH0_LOAD_CNT_MASK; 7093 uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT : 7094 BXE_PATH0_LOAD_CNT_SHIFT; 7095 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7096 7097 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); 7098 7099 val = ((val & mask) >> shift); 7100 7101 BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val); 7102 7103 return (val != 0); 7104 } 7105 7106 /* set pf load mark */ 7107 /* XXX needs to be under rtnl lock */ 7108 static void 7109 bxe_set_pf_load(struct bxe_softc *sc) 7110 { 7111 uint32_t val; 7112 uint32_t val1; 7113 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : 7114 BXE_PATH0_LOAD_CNT_MASK; 7115 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : 7116 BXE_PATH0_LOAD_CNT_SHIFT; 7117 7118 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7119 7120 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7121 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); 7122 7123 /* get the current counter value */ 7124 val1 = ((val & mask) >> shift); 7125 7126 /* set bit of this PF */ 7127 val1 |= (1 << SC_ABS_FUNC(sc)); 7128 7129 /* clear the old value */ 7130 val &= ~mask; 7131 7132 /* set the new one */ 7133 val |= ((val1 << shift) & mask); 7134 7135 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7136 7137 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7138 } 7139 7140 /* clear pf load mark */ 7141 /* XXX needs to be under rtnl lock */ 7142 static uint8_t 7143 bxe_clear_pf_load(struct bxe_softc *sc) 7144 { 7145 uint32_t val1, val; 7146 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : 7147 BXE_PATH0_LOAD_CNT_MASK; 7148 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : 7149 BXE_PATH0_LOAD_CNT_SHIFT; 7150 7151 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7152 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7153 BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val); 7154 7155 /* get the current counter value */ 7156 val1 = (val & mask) >> shift; 7157 7158 /* clear bit of that PF */ 7159 val1 &= ~(1 << SC_ABS_FUNC(sc)); 7160 7161 /* clear the old value */ 7162 val &= ~mask; 7163 7164 /* set the new one */ 7165 val |= ((val1 << shift) & mask); 7166 7167 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7168 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7169 return (val1 != 0); 7170 } 7171 7172 /* send load requrest to mcp and analyze response */ 7173 static int 7174 bxe_nic_load_request(struct bxe_softc *sc, 7175 uint32_t *load_code) 7176 { 7177 /* init fw_seq */ 7178 sc->fw_seq = 7179 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 7180 DRV_MSG_SEQ_NUMBER_MASK); 7181 7182 BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq); 7183 7184 /* get the current FW pulse sequence */ 7185 sc->fw_drv_pulse_wr_seq = 7186 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & 7187 DRV_PULSE_SEQ_MASK); 7188 7189 BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n", 7190 sc->fw_drv_pulse_wr_seq); 7191 7192 /* load request */ 7193 (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 7194 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 7195 7196 /* if the MCP fails to respond we must abort */ 7197 if (!(*load_code)) { 7198 BLOGE(sc, "MCP response failure!\n"); 7199 return (-1); 7200 } 7201 7202 /* if MCP refused then must abort */ 7203 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { 7204 BLOGE(sc, "MCP refused load request\n"); 7205 return (-1); 7206 } 7207 7208 return (0); 7209 } 7210 7211 /* 7212 * Check whether another PF has already loaded FW to chip. In virtualized 7213 * environments a pf from anoth VM may have already initialized the device 7214 * including loading FW. 7215 */ 7216 static int 7217 bxe_nic_load_analyze_req(struct bxe_softc *sc, 7218 uint32_t load_code) 7219 { 7220 uint32_t my_fw, loaded_fw; 7221 7222 /* is another pf loaded on this engine? */ 7223 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 7224 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 7225 /* build my FW version dword */ 7226 my_fw = (BCM_5710_FW_MAJOR_VERSION + 7227 (BCM_5710_FW_MINOR_VERSION << 8 ) + 7228 (BCM_5710_FW_REVISION_VERSION << 16) + 7229 (BCM_5710_FW_ENGINEERING_VERSION << 24)); 7230 7231 /* read loaded FW from chip */ 7232 loaded_fw = REG_RD(sc, XSEM_REG_PRAM); 7233 BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n", 7234 loaded_fw, my_fw); 7235 7236 /* abort nic load if version mismatch */ 7237 if (my_fw != loaded_fw) { 7238 BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)", 7239 loaded_fw, my_fw); 7240 return (-1); 7241 } 7242 } 7243 7244 return (0); 7245 } 7246 7247 /* mark PMF if applicable */ 7248 static void 7249 bxe_nic_load_pmf(struct bxe_softc *sc, 7250 uint32_t load_code) 7251 { 7252 uint32_t ncsi_oem_data_addr; 7253 7254 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 7255 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || 7256 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { 7257 /* 7258 * Barrier here for ordering between the writing to sc->port.pmf here 7259 * and reading it from the periodic task. 7260 */ 7261 sc->port.pmf = 1; 7262 mb(); 7263 } else { 7264 sc->port.pmf = 0; 7265 } 7266 7267 BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf); 7268 7269 /* XXX needed? */ 7270 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { 7271 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { 7272 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); 7273 if (ncsi_oem_data_addr) { 7274 REG_WR(sc, 7275 (ncsi_oem_data_addr + 7276 offsetof(struct glob_ncsi_oem_data, driver_version)), 7277 0); 7278 } 7279 } 7280 } 7281 } 7282 7283 static void 7284 bxe_read_mf_cfg(struct bxe_softc *sc) 7285 { 7286 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); 7287 int abs_func; 7288 int vn; 7289 7290 if (BXE_NOMCP(sc)) { 7291 return; /* what should be the default bvalue in this case */ 7292 } 7293 7294 /* 7295 * The formula for computing the absolute function number is... 7296 * For 2 port configuration (4 functions per port): 7297 * abs_func = 2 * vn + SC_PORT + SC_PATH 7298 * For 4 port configuration (2 functions per port): 7299 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH 7300 */ 7301 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 7302 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); 7303 if (abs_func >= E1H_FUNC_MAX) { 7304 break; 7305 } 7306 sc->devinfo.mf_info.mf_config[vn] = 7307 MFCFG_RD(sc, func_mf_config[abs_func].config); 7308 } 7309 7310 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & 7311 FUNC_MF_CFG_FUNC_DISABLED) { 7312 BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n"); 7313 sc->flags |= BXE_MF_FUNC_DIS; 7314 } else { 7315 BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n"); 7316 sc->flags &= ~BXE_MF_FUNC_DIS; 7317 } 7318 } 7319 7320 /* acquire split MCP access lock register */ 7321 static int bxe_acquire_alr(struct bxe_softc *sc) 7322 { 7323 uint32_t j, val; 7324 7325 for (j = 0; j < 1000; j++) { 7326 val = (1UL << 31); 7327 REG_WR(sc, GRCBASE_MCP + 0x9c, val); 7328 val = REG_RD(sc, GRCBASE_MCP + 0x9c); 7329 if (val & (1L << 31)) 7330 break; 7331 7332 DELAY(5000); 7333 } 7334 7335 if (!(val & (1L << 31))) { 7336 BLOGE(sc, "Cannot acquire MCP access lock register\n"); 7337 return (-1); 7338 } 7339 7340 return (0); 7341 } 7342 7343 /* release split MCP access lock register */ 7344 static void bxe_release_alr(struct bxe_softc *sc) 7345 { 7346 REG_WR(sc, GRCBASE_MCP + 0x9c, 0); 7347 } 7348 7349 static void 7350 bxe_fan_failure(struct bxe_softc *sc) 7351 { 7352 int port = SC_PORT(sc); 7353 uint32_t ext_phy_config; 7354 7355 /* mark the failure */ 7356 ext_phy_config = 7357 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 7358 7359 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 7360 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 7361 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, 7362 ext_phy_config); 7363 7364 /* log the failure */ 7365 BLOGW(sc, "Fan Failure has caused the driver to shutdown " 7366 "the card to prevent permanent damage. " 7367 "Please contact OEM Support for assistance\n"); 7368 7369 /* XXX */ 7370 #if 1 7371 bxe_panic(sc, ("Schedule task to handle fan failure\n")); 7372 #else 7373 /* 7374 * Schedule device reset (unload) 7375 * This is due to some boards consuming sufficient power when driver is 7376 * up to overheat if fan fails. 7377 */ 7378 bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state); 7379 schedule_delayed_work(&sc->sp_rtnl_task, 0); 7380 #endif 7381 } 7382 7383 /* this function is called upon a link interrupt */ 7384 static void 7385 bxe_link_attn(struct bxe_softc *sc) 7386 { 7387 uint32_t pause_enabled = 0; 7388 struct host_port_stats *pstats; 7389 int cmng_fns; 7390 7391 /* Make sure that we are synced with the current statistics */ 7392 bxe_stats_handle(sc, STATS_EVENT_STOP); 7393 7394 elink_link_update(&sc->link_params, &sc->link_vars); 7395 7396 if (sc->link_vars.link_up) { 7397 7398 /* dropless flow control */ 7399 if (!CHIP_IS_E1(sc) && sc->dropless_fc) { 7400 pause_enabled = 0; 7401 7402 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 7403 pause_enabled = 1; 7404 } 7405 7406 REG_WR(sc, 7407 (BAR_USTRORM_INTMEM + 7408 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), 7409 pause_enabled); 7410 } 7411 7412 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { 7413 pstats = BXE_SP(sc, port_stats); 7414 /* reset old mac stats */ 7415 memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx)); 7416 } 7417 7418 if (sc->state == BXE_STATE_OPEN) { 7419 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 7420 } 7421 } 7422 7423 if (sc->link_vars.link_up && sc->link_vars.line_speed) { 7424 cmng_fns = bxe_get_cmng_fns_mode(sc); 7425 7426 if (cmng_fns != CMNG_FNS_NONE) { 7427 bxe_cmng_fns_init(sc, FALSE, cmng_fns); 7428 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 7429 } else { 7430 /* rate shaping and fairness are disabled */ 7431 BLOGD(sc, DBG_LOAD, "single function mode without fairness\n"); 7432 } 7433 } 7434 7435 bxe_link_report_locked(sc); 7436 7437 if (IS_MF(sc)) { 7438 ; // XXX bxe_link_sync_notify(sc); 7439 } 7440 } 7441 7442 static void 7443 bxe_attn_int_asserted(struct bxe_softc *sc, 7444 uint32_t asserted) 7445 { 7446 int port = SC_PORT(sc); 7447 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 7448 MISC_REG_AEU_MASK_ATTN_FUNC_0; 7449 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 7450 NIG_REG_MASK_INTERRUPT_PORT0; 7451 uint32_t aeu_mask; 7452 uint32_t nig_mask = 0; 7453 uint32_t reg_addr; 7454 uint32_t igu_acked; 7455 uint32_t cnt; 7456 7457 if (sc->attn_state & asserted) { 7458 BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted); 7459 } 7460 7461 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 7462 7463 aeu_mask = REG_RD(sc, aeu_addr); 7464 7465 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n", 7466 aeu_mask, asserted); 7467 7468 aeu_mask &= ~(asserted & 0x3ff); 7469 7470 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); 7471 7472 REG_WR(sc, aeu_addr, aeu_mask); 7473 7474 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 7475 7476 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); 7477 sc->attn_state |= asserted; 7478 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); 7479 7480 if (asserted & ATTN_HARD_WIRED_MASK) { 7481 if (asserted & ATTN_NIG_FOR_FUNC) { 7482 7483 bxe_acquire_phy_lock(sc); 7484 /* save nig interrupt mask */ 7485 nig_mask = REG_RD(sc, nig_int_mask_addr); 7486 7487 /* If nig_mask is not set, no need to call the update function */ 7488 if (nig_mask) { 7489 REG_WR(sc, nig_int_mask_addr, 0); 7490 7491 bxe_link_attn(sc); 7492 } 7493 7494 /* handle unicore attn? */ 7495 } 7496 7497 if (asserted & ATTN_SW_TIMER_4_FUNC) { 7498 BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n"); 7499 } 7500 7501 if (asserted & GPIO_2_FUNC) { 7502 BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n"); 7503 } 7504 7505 if (asserted & GPIO_3_FUNC) { 7506 BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n"); 7507 } 7508 7509 if (asserted & GPIO_4_FUNC) { 7510 BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n"); 7511 } 7512 7513 if (port == 0) { 7514 if (asserted & ATTN_GENERAL_ATTN_1) { 7515 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n"); 7516 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 7517 } 7518 if (asserted & ATTN_GENERAL_ATTN_2) { 7519 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n"); 7520 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 7521 } 7522 if (asserted & ATTN_GENERAL_ATTN_3) { 7523 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n"); 7524 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 7525 } 7526 } else { 7527 if (asserted & ATTN_GENERAL_ATTN_4) { 7528 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n"); 7529 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 7530 } 7531 if (asserted & ATTN_GENERAL_ATTN_5) { 7532 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n"); 7533 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 7534 } 7535 if (asserted & ATTN_GENERAL_ATTN_6) { 7536 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n"); 7537 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 7538 } 7539 } 7540 } /* hardwired */ 7541 7542 if (sc->devinfo.int_block == INT_BLOCK_HC) { 7543 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET); 7544 } else { 7545 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); 7546 } 7547 7548 BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", 7549 asserted, 7550 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 7551 REG_WR(sc, reg_addr, asserted); 7552 7553 /* now set back the mask */ 7554 if (asserted & ATTN_NIG_FOR_FUNC) { 7555 /* 7556 * Verify that IGU ack through BAR was written before restoring 7557 * NIG mask. This loop should exit after 2-3 iterations max. 7558 */ 7559 if (sc->devinfo.int_block != INT_BLOCK_HC) { 7560 cnt = 0; 7561 7562 do { 7563 igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); 7564 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && 7565 (++cnt < MAX_IGU_ATTN_ACK_TO)); 7566 7567 if (!igu_acked) { 7568 BLOGE(sc, "Failed to verify IGU ack on time\n"); 7569 } 7570 7571 mb(); 7572 } 7573 7574 REG_WR(sc, nig_int_mask_addr, nig_mask); 7575 7576 bxe_release_phy_lock(sc); 7577 } 7578 } 7579 7580 static void 7581 bxe_print_next_block(struct bxe_softc *sc, 7582 int idx, 7583 const char *blk) 7584 { 7585 BLOGI(sc, "%s%s", idx ? ", " : "", blk); 7586 } 7587 7588 static int 7589 bxe_check_blocks_with_parity0(struct bxe_softc *sc, 7590 uint32_t sig, 7591 int par_num, 7592 uint8_t print) 7593 { 7594 uint32_t cur_bit = 0; 7595 int i = 0; 7596 7597 for (i = 0; sig; i++) { 7598 cur_bit = ((uint32_t)0x1 << i); 7599 if (sig & cur_bit) { 7600 switch (cur_bit) { 7601 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 7602 if (print) 7603 bxe_print_next_block(sc, par_num++, "BRB"); 7604 break; 7605 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 7606 if (print) 7607 bxe_print_next_block(sc, par_num++, "PARSER"); 7608 break; 7609 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 7610 if (print) 7611 bxe_print_next_block(sc, par_num++, "TSDM"); 7612 break; 7613 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 7614 if (print) 7615 bxe_print_next_block(sc, par_num++, "SEARCHER"); 7616 break; 7617 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 7618 if (print) 7619 bxe_print_next_block(sc, par_num++, "TCM"); 7620 break; 7621 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 7622 if (print) 7623 bxe_print_next_block(sc, par_num++, "TSEMI"); 7624 break; 7625 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 7626 if (print) 7627 bxe_print_next_block(sc, par_num++, "XPB"); 7628 break; 7629 } 7630 7631 /* Clear the bit */ 7632 sig &= ~cur_bit; 7633 } 7634 } 7635 7636 return (par_num); 7637 } 7638 7639 static int 7640 bxe_check_blocks_with_parity1(struct bxe_softc *sc, 7641 uint32_t sig, 7642 int par_num, 7643 uint8_t *global, 7644 uint8_t print) 7645 { 7646 int i = 0; 7647 uint32_t cur_bit = 0; 7648 for (i = 0; sig; i++) { 7649 cur_bit = ((uint32_t)0x1 << i); 7650 if (sig & cur_bit) { 7651 switch (cur_bit) { 7652 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 7653 if (print) 7654 bxe_print_next_block(sc, par_num++, "PBF"); 7655 break; 7656 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 7657 if (print) 7658 bxe_print_next_block(sc, par_num++, "QM"); 7659 break; 7660 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 7661 if (print) 7662 bxe_print_next_block(sc, par_num++, "TM"); 7663 break; 7664 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 7665 if (print) 7666 bxe_print_next_block(sc, par_num++, "XSDM"); 7667 break; 7668 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 7669 if (print) 7670 bxe_print_next_block(sc, par_num++, "XCM"); 7671 break; 7672 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 7673 if (print) 7674 bxe_print_next_block(sc, par_num++, "XSEMI"); 7675 break; 7676 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 7677 if (print) 7678 bxe_print_next_block(sc, par_num++, "DOORBELLQ"); 7679 break; 7680 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 7681 if (print) 7682 bxe_print_next_block(sc, par_num++, "NIG"); 7683 break; 7684 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 7685 if (print) 7686 bxe_print_next_block(sc, par_num++, "VAUX PCI CORE"); 7687 *global = TRUE; 7688 break; 7689 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 7690 if (print) 7691 bxe_print_next_block(sc, par_num++, "DEBUG"); 7692 break; 7693 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 7694 if (print) 7695 bxe_print_next_block(sc, par_num++, "USDM"); 7696 break; 7697 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 7698 if (print) 7699 bxe_print_next_block(sc, par_num++, "UCM"); 7700 break; 7701 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 7702 if (print) 7703 bxe_print_next_block(sc, par_num++, "USEMI"); 7704 break; 7705 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 7706 if (print) 7707 bxe_print_next_block(sc, par_num++, "UPB"); 7708 break; 7709 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 7710 if (print) 7711 bxe_print_next_block(sc, par_num++, "CSDM"); 7712 break; 7713 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 7714 if (print) 7715 bxe_print_next_block(sc, par_num++, "CCM"); 7716 break; 7717 } 7718 7719 /* Clear the bit */ 7720 sig &= ~cur_bit; 7721 } 7722 } 7723 7724 return (par_num); 7725 } 7726 7727 static int 7728 bxe_check_blocks_with_parity2(struct bxe_softc *sc, 7729 uint32_t sig, 7730 int par_num, 7731 uint8_t print) 7732 { 7733 uint32_t cur_bit = 0; 7734 int i = 0; 7735 7736 for (i = 0; sig; i++) { 7737 cur_bit = ((uint32_t)0x1 << i); 7738 if (sig & cur_bit) { 7739 switch (cur_bit) { 7740 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 7741 if (print) 7742 bxe_print_next_block(sc, par_num++, "CSEMI"); 7743 break; 7744 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 7745 if (print) 7746 bxe_print_next_block(sc, par_num++, "PXP"); 7747 break; 7748 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 7749 if (print) 7750 bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT"); 7751 break; 7752 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 7753 if (print) 7754 bxe_print_next_block(sc, par_num++, "CFC"); 7755 break; 7756 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 7757 if (print) 7758 bxe_print_next_block(sc, par_num++, "CDU"); 7759 break; 7760 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 7761 if (print) 7762 bxe_print_next_block(sc, par_num++, "DMAE"); 7763 break; 7764 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 7765 if (print) 7766 bxe_print_next_block(sc, par_num++, "IGU"); 7767 break; 7768 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 7769 if (print) 7770 bxe_print_next_block(sc, par_num++, "MISC"); 7771 break; 7772 } 7773 7774 /* Clear the bit */ 7775 sig &= ~cur_bit; 7776 } 7777 } 7778 7779 return (par_num); 7780 } 7781 7782 static int 7783 bxe_check_blocks_with_parity3(struct bxe_softc *sc, 7784 uint32_t sig, 7785 int par_num, 7786 uint8_t *global, 7787 uint8_t print) 7788 { 7789 uint32_t cur_bit = 0; 7790 int i = 0; 7791 7792 for (i = 0; sig; i++) { 7793 cur_bit = ((uint32_t)0x1 << i); 7794 if (sig & cur_bit) { 7795 switch (cur_bit) { 7796 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 7797 if (print) 7798 bxe_print_next_block(sc, par_num++, "MCP ROM"); 7799 *global = TRUE; 7800 break; 7801 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 7802 if (print) 7803 bxe_print_next_block(sc, par_num++, 7804 "MCP UMP RX"); 7805 *global = TRUE; 7806 break; 7807 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 7808 if (print) 7809 bxe_print_next_block(sc, par_num++, 7810 "MCP UMP TX"); 7811 *global = TRUE; 7812 break; 7813 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 7814 if (print) 7815 bxe_print_next_block(sc, par_num++, 7816 "MCP SCPAD"); 7817 *global = TRUE; 7818 break; 7819 } 7820 7821 /* Clear the bit */ 7822 sig &= ~cur_bit; 7823 } 7824 } 7825 7826 return (par_num); 7827 } 7828 7829 static int 7830 bxe_check_blocks_with_parity4(struct bxe_softc *sc, 7831 uint32_t sig, 7832 int par_num, 7833 uint8_t print) 7834 { 7835 uint32_t cur_bit = 0; 7836 int i = 0; 7837 7838 for (i = 0; sig; i++) { 7839 cur_bit = ((uint32_t)0x1 << i); 7840 if (sig & cur_bit) { 7841 switch (cur_bit) { 7842 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 7843 if (print) 7844 bxe_print_next_block(sc, par_num++, "PGLUE_B"); 7845 break; 7846 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 7847 if (print) 7848 bxe_print_next_block(sc, par_num++, "ATC"); 7849 break; 7850 } 7851 7852 /* Clear the bit */ 7853 sig &= ~cur_bit; 7854 } 7855 } 7856 7857 return (par_num); 7858 } 7859 7860 static uint8_t 7861 bxe_parity_attn(struct bxe_softc *sc, 7862 uint8_t *global, 7863 uint8_t print, 7864 uint32_t *sig) 7865 { 7866 int par_num = 0; 7867 7868 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 7869 (sig[1] & HW_PRTY_ASSERT_SET_1) || 7870 (sig[2] & HW_PRTY_ASSERT_SET_2) || 7871 (sig[3] & HW_PRTY_ASSERT_SET_3) || 7872 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 7873 BLOGE(sc, "Parity error: HW block parity attention:\n" 7874 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 7875 (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0), 7876 (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1), 7877 (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2), 7878 (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3), 7879 (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4)); 7880 7881 if (print) 7882 BLOGI(sc, "Parity errors detected in blocks: "); 7883 7884 par_num = 7885 bxe_check_blocks_with_parity0(sc, sig[0] & 7886 HW_PRTY_ASSERT_SET_0, 7887 par_num, print); 7888 par_num = 7889 bxe_check_blocks_with_parity1(sc, sig[1] & 7890 HW_PRTY_ASSERT_SET_1, 7891 par_num, global, print); 7892 par_num = 7893 bxe_check_blocks_with_parity2(sc, sig[2] & 7894 HW_PRTY_ASSERT_SET_2, 7895 par_num, print); 7896 par_num = 7897 bxe_check_blocks_with_parity3(sc, sig[3] & 7898 HW_PRTY_ASSERT_SET_3, 7899 par_num, global, print); 7900 par_num = 7901 bxe_check_blocks_with_parity4(sc, sig[4] & 7902 HW_PRTY_ASSERT_SET_4, 7903 par_num, print); 7904 7905 if (print) 7906 BLOGI(sc, "\n"); 7907 7908 return (TRUE); 7909 } 7910 7911 return (FALSE); 7912 } 7913 7914 static uint8_t 7915 bxe_chk_parity_attn(struct bxe_softc *sc, 7916 uint8_t *global, 7917 uint8_t print) 7918 { 7919 struct attn_route attn = { {0} }; 7920 int port = SC_PORT(sc); 7921 7922 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 7923 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 7924 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 7925 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 7926 7927 if (!CHIP_IS_E1x(sc)) 7928 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 7929 7930 return (bxe_parity_attn(sc, global, print, attn.sig)); 7931 } 7932 7933 static void 7934 bxe_attn_int_deasserted4(struct bxe_softc *sc, 7935 uint32_t attn) 7936 { 7937 uint32_t val; 7938 7939 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 7940 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 7941 BLOGE(sc, "PGLUE hw attention 0x%08x\n", val); 7942 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 7943 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); 7944 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 7945 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); 7946 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 7947 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); 7948 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 7949 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); 7950 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 7951 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); 7952 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 7953 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); 7954 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 7955 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); 7956 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 7957 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); 7958 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 7959 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); 7960 } 7961 7962 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 7963 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); 7964 BLOGE(sc, "ATC hw attention 0x%08x\n", val); 7965 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 7966 BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); 7967 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 7968 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); 7969 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 7970 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); 7971 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 7972 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); 7973 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 7974 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); 7975 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 7976 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); 7977 } 7978 7979 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 7980 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 7981 BLOGE(sc, "FATAL parity attention set4 0x%08x\n", 7982 (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 7983 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 7984 } 7985 } 7986 7987 static void 7988 bxe_e1h_disable(struct bxe_softc *sc) 7989 { 7990 int port = SC_PORT(sc); 7991 7992 bxe_tx_disable(sc); 7993 7994 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); 7995 } 7996 7997 static void 7998 bxe_e1h_enable(struct bxe_softc *sc) 7999 { 8000 int port = SC_PORT(sc); 8001 8002 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); 8003 8004 // XXX bxe_tx_enable(sc); 8005 } 8006 8007 /* 8008 * called due to MCP event (on pmf): 8009 * reread new bandwidth configuration 8010 * configure FW 8011 * notify others function about the change 8012 */ 8013 static void 8014 bxe_config_mf_bw(struct bxe_softc *sc) 8015 { 8016 if (sc->link_vars.link_up) { 8017 bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); 8018 // XXX bxe_link_sync_notify(sc); 8019 } 8020 8021 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 8022 } 8023 8024 static void 8025 bxe_set_mf_bw(struct bxe_softc *sc) 8026 { 8027 bxe_config_mf_bw(sc); 8028 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 8029 } 8030 8031 static void 8032 bxe_handle_eee_event(struct bxe_softc *sc) 8033 { 8034 BLOGD(sc, DBG_INTR, "EEE - LLDP event\n"); 8035 bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 8036 } 8037 8038 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 8039 8040 static void 8041 bxe_drv_info_ether_stat(struct bxe_softc *sc) 8042 { 8043 struct eth_stats_info *ether_stat = 8044 &sc->sp->drv_info_to_mcp.ether_stat; 8045 8046 strlcpy(ether_stat->version, BXE_DRIVER_VERSION, 8047 ETH_STAT_INFO_VERSION_LEN); 8048 8049 /* XXX (+ MAC_PAD) taken from other driver... verify this is right */ 8050 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, 8051 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 8052 ether_stat->mac_local + MAC_PAD, 8053 MAC_PAD, ETH_ALEN); 8054 8055 ether_stat->mtu_size = sc->mtu; 8056 8057 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 8058 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { 8059 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; 8060 } 8061 8062 // XXX ether_stat->feature_flags |= ???; 8063 8064 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; 8065 8066 ether_stat->txq_size = sc->tx_ring_size; 8067 ether_stat->rxq_size = sc->rx_ring_size; 8068 } 8069 8070 static void 8071 bxe_handle_drv_info_req(struct bxe_softc *sc) 8072 { 8073 enum drv_info_opcode op_code; 8074 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); 8075 8076 /* if drv_info version supported by MFW doesn't match - send NACK */ 8077 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 8078 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 8079 return; 8080 } 8081 8082 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 8083 DRV_INFO_CONTROL_OP_CODE_SHIFT); 8084 8085 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); 8086 8087 switch (op_code) { 8088 case ETH_STATS_OPCODE: 8089 bxe_drv_info_ether_stat(sc); 8090 break; 8091 case FCOE_STATS_OPCODE: 8092 case ISCSI_STATS_OPCODE: 8093 default: 8094 /* if op code isn't supported - send NACK */ 8095 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 8096 return; 8097 } 8098 8099 /* 8100 * If we got drv_info attn from MFW then these fields are defined in 8101 * shmem2 for sure 8102 */ 8103 SHMEM2_WR(sc, drv_info_host_addr_lo, 8104 U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp))); 8105 SHMEM2_WR(sc, drv_info_host_addr_hi, 8106 U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp))); 8107 8108 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); 8109 } 8110 8111 static void 8112 bxe_dcc_event(struct bxe_softc *sc, 8113 uint32_t dcc_event) 8114 { 8115 BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event); 8116 8117 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 8118 /* 8119 * This is the only place besides the function initialization 8120 * where the sc->flags can change so it is done without any 8121 * locks 8122 */ 8123 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { 8124 BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n"); 8125 sc->flags |= BXE_MF_FUNC_DIS; 8126 bxe_e1h_disable(sc); 8127 } else { 8128 BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n"); 8129 sc->flags &= ~BXE_MF_FUNC_DIS; 8130 bxe_e1h_enable(sc); 8131 } 8132 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 8133 } 8134 8135 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 8136 bxe_config_mf_bw(sc); 8137 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 8138 } 8139 8140 /* Report results to MCP */ 8141 if (dcc_event) 8142 bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); 8143 else 8144 bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); 8145 } 8146 8147 static void 8148 bxe_pmf_update(struct bxe_softc *sc) 8149 { 8150 int port = SC_PORT(sc); 8151 uint32_t val; 8152 8153 sc->port.pmf = 1; 8154 BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf); 8155 8156 /* 8157 * We need the mb() to ensure the ordering between the writing to 8158 * sc->port.pmf here and reading it from the bxe_periodic_task(). 8159 */ 8160 mb(); 8161 8162 /* queue a periodic task */ 8163 // XXX schedule task... 8164 8165 // XXX bxe_dcbx_pmf_update(sc); 8166 8167 /* enable nig attention */ 8168 val = (0xff0f | (1 << (SC_VN(sc) + 4))); 8169 if (sc->devinfo.int_block == INT_BLOCK_HC) { 8170 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val); 8171 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val); 8172 } else if (!CHIP_IS_E1x(sc)) { 8173 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 8174 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 8175 } 8176 8177 bxe_stats_handle(sc, STATS_EVENT_PMF); 8178 } 8179 8180 static int 8181 bxe_mc_assert(struct bxe_softc *sc) 8182 { 8183 char last_idx; 8184 int i, rc = 0; 8185 uint32_t row0, row1, row2, row3; 8186 8187 /* XSTORM */ 8188 last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); 8189 if (last_idx) 8190 BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8191 8192 /* print the asserts */ 8193 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8194 8195 row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); 8196 row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4); 8197 row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8); 8198 row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12); 8199 8200 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8201 BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8202 i, row3, row2, row1, row0); 8203 rc++; 8204 } else { 8205 break; 8206 } 8207 } 8208 8209 /* TSTORM */ 8210 last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); 8211 if (last_idx) { 8212 BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8213 } 8214 8215 /* print the asserts */ 8216 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8217 8218 row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); 8219 row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4); 8220 row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8); 8221 row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12); 8222 8223 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8224 BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8225 i, row3, row2, row1, row0); 8226 rc++; 8227 } else { 8228 break; 8229 } 8230 } 8231 8232 /* CSTORM */ 8233 last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); 8234 if (last_idx) { 8235 BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8236 } 8237 8238 /* print the asserts */ 8239 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8240 8241 row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); 8242 row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4); 8243 row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8); 8244 row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12); 8245 8246 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8247 BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8248 i, row3, row2, row1, row0); 8249 rc++; 8250 } else { 8251 break; 8252 } 8253 } 8254 8255 /* USTORM */ 8256 last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); 8257 if (last_idx) { 8258 BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8259 } 8260 8261 /* print the asserts */ 8262 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8263 8264 row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); 8265 row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4); 8266 row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8); 8267 row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12); 8268 8269 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8270 BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8271 i, row3, row2, row1, row0); 8272 rc++; 8273 } else { 8274 break; 8275 } 8276 } 8277 8278 return (rc); 8279 } 8280 8281 static void 8282 bxe_attn_int_deasserted3(struct bxe_softc *sc, 8283 uint32_t attn) 8284 { 8285 int func = SC_FUNC(sc); 8286 uint32_t val; 8287 8288 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 8289 8290 if (attn & BXE_PMF_LINK_ASSERT(sc)) { 8291 8292 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 8293 bxe_read_mf_cfg(sc); 8294 sc->devinfo.mf_info.mf_config[SC_VN(sc)] = 8295 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 8296 val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); 8297 8298 if (val & DRV_STATUS_DCC_EVENT_MASK) 8299 bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK)); 8300 8301 if (val & DRV_STATUS_SET_MF_BW) 8302 bxe_set_mf_bw(sc); 8303 8304 if (val & DRV_STATUS_DRV_INFO_REQ) 8305 bxe_handle_drv_info_req(sc); 8306 8307 #if 0 8308 if (val & DRV_STATUS_VF_DISABLED) 8309 bxe_vf_handle_flr_event(sc); 8310 #endif 8311 8312 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) 8313 bxe_pmf_update(sc); 8314 8315 #if 0 8316 if (sc->port.pmf && 8317 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && 8318 (sc->dcbx_enabled > 0)) 8319 /* start dcbx state machine */ 8320 bxe_dcbx_set_params(sc, BXE_DCBX_STATE_NEG_RECEIVED); 8321 #endif 8322 8323 #if 0 8324 if (val & DRV_STATUS_AFEX_EVENT_MASK) 8325 bxe_handle_afex_cmd(sc, val & DRV_STATUS_AFEX_EVENT_MASK); 8326 #endif 8327 8328 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 8329 bxe_handle_eee_event(sc); 8330 8331 if (sc->link_vars.periodic_flags & 8332 ELINK_PERIODIC_FLAGS_LINK_EVENT) { 8333 /* sync with link */ 8334 bxe_acquire_phy_lock(sc); 8335 sc->link_vars.periodic_flags &= 8336 ~ELINK_PERIODIC_FLAGS_LINK_EVENT; 8337 bxe_release_phy_lock(sc); 8338 if (IS_MF(sc)) 8339 ; // XXX bxe_link_sync_notify(sc); 8340 bxe_link_report(sc); 8341 } 8342 8343 /* 8344 * Always call it here: bxe_link_report() will 8345 * prevent the link indication duplication. 8346 */ 8347 bxe_link_status_update(sc); 8348 8349 } else if (attn & BXE_MC_ASSERT_BITS) { 8350 8351 BLOGE(sc, "MC assert!\n"); 8352 bxe_mc_assert(sc); 8353 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); 8354 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); 8355 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); 8356 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); 8357 bxe_panic(sc, ("MC assert!\n")); 8358 8359 } else if (attn & BXE_MCP_ASSERT) { 8360 8361 BLOGE(sc, "MCP assert!\n"); 8362 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); 8363 // XXX bxe_fw_dump(sc); 8364 8365 } else { 8366 BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn); 8367 } 8368 } 8369 8370 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 8371 BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn); 8372 if (attn & BXE_GRC_TIMEOUT) { 8373 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); 8374 BLOGE(sc, "GRC time-out 0x%08x\n", val); 8375 } 8376 if (attn & BXE_GRC_RSV) { 8377 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN); 8378 BLOGE(sc, "GRC reserved 0x%08x\n", val); 8379 } 8380 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 8381 } 8382 } 8383 8384 static void 8385 bxe_attn_int_deasserted2(struct bxe_softc *sc, 8386 uint32_t attn) 8387 { 8388 int port = SC_PORT(sc); 8389 int reg_offset; 8390 uint32_t val0, mask0, val1, mask1; 8391 uint32_t val; 8392 8393 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 8394 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); 8395 BLOGE(sc, "CFC hw attention 0x%08x\n", val); 8396 /* CFC error attention */ 8397 if (val & 0x2) { 8398 BLOGE(sc, "FATAL error from CFC\n"); 8399 } 8400 } 8401 8402 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 8403 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); 8404 BLOGE(sc, "PXP hw attention-0 0x%08x\n", val); 8405 /* RQ_USDMDP_FIFO_OVERFLOW */ 8406 if (val & 0x18000) { 8407 BLOGE(sc, "FATAL error from PXP\n"); 8408 } 8409 8410 if (!CHIP_IS_E1x(sc)) { 8411 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); 8412 BLOGE(sc, "PXP hw attention-1 0x%08x\n", val); 8413 } 8414 } 8415 8416 #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR 8417 #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT 8418 8419 if (attn & AEU_PXP2_HW_INT_BIT) { 8420 /* CQ47854 workaround do not panic on 8421 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 8422 */ 8423 if (!CHIP_IS_E1x(sc)) { 8424 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); 8425 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); 8426 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); 8427 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); 8428 /* 8429 * If the olny PXP2_EOP_ERROR_BIT is set in 8430 * STS0 and STS1 - clear it 8431 * 8432 * probably we lose additional attentions between 8433 * STS0 and STS_CLR0, in this case user will not 8434 * be notified about them 8435 */ 8436 if (val0 & mask0 & PXP2_EOP_ERROR_BIT && 8437 !(val1 & mask1)) 8438 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 8439 8440 /* print the register, since no one can restore it */ 8441 BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0); 8442 8443 /* 8444 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 8445 * then notify 8446 */ 8447 if (val0 & PXP2_EOP_ERROR_BIT) { 8448 BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n"); 8449 8450 /* 8451 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is 8452 * set then clear attention from PXP2 block without panic 8453 */ 8454 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && 8455 ((val1 & mask1) == 0)) 8456 attn &= ~AEU_PXP2_HW_INT_BIT; 8457 } 8458 } 8459 } 8460 8461 if (attn & HW_INTERRUT_ASSERT_SET_2) { 8462 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 8463 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 8464 8465 val = REG_RD(sc, reg_offset); 8466 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 8467 REG_WR(sc, reg_offset, val); 8468 8469 BLOGE(sc, "FATAL HW block attention set2 0x%x\n", 8470 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2)); 8471 bxe_panic(sc, ("HW block attention set2\n")); 8472 } 8473 } 8474 8475 static void 8476 bxe_attn_int_deasserted1(struct bxe_softc *sc, 8477 uint32_t attn) 8478 { 8479 int port = SC_PORT(sc); 8480 int reg_offset; 8481 uint32_t val; 8482 8483 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 8484 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); 8485 BLOGE(sc, "DB hw attention 0x%08x\n", val); 8486 /* DORQ discard attention */ 8487 if (val & 0x2) { 8488 BLOGE(sc, "FATAL error from DORQ\n"); 8489 } 8490 } 8491 8492 if (attn & HW_INTERRUT_ASSERT_SET_1) { 8493 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 8494 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 8495 8496 val = REG_RD(sc, reg_offset); 8497 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 8498 REG_WR(sc, reg_offset, val); 8499 8500 BLOGE(sc, "FATAL HW block attention set1 0x%08x\n", 8501 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1)); 8502 bxe_panic(sc, ("HW block attention set1\n")); 8503 } 8504 } 8505 8506 static void 8507 bxe_attn_int_deasserted0(struct bxe_softc *sc, 8508 uint32_t attn) 8509 { 8510 int port = SC_PORT(sc); 8511 int reg_offset; 8512 uint32_t val; 8513 8514 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 8515 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 8516 8517 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 8518 val = REG_RD(sc, reg_offset); 8519 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 8520 REG_WR(sc, reg_offset, val); 8521 8522 BLOGW(sc, "SPIO5 hw attention\n"); 8523 8524 /* Fan failure attention */ 8525 elink_hw_reset_phy(&sc->link_params); 8526 bxe_fan_failure(sc); 8527 } 8528 8529 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { 8530 bxe_acquire_phy_lock(sc); 8531 elink_handle_module_detect_int(&sc->link_params); 8532 bxe_release_phy_lock(sc); 8533 } 8534 8535 if (attn & HW_INTERRUT_ASSERT_SET_0) { 8536 val = REG_RD(sc, reg_offset); 8537 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 8538 REG_WR(sc, reg_offset, val); 8539 8540 bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n", 8541 (attn & HW_INTERRUT_ASSERT_SET_0))); 8542 } 8543 } 8544 8545 static void 8546 bxe_attn_int_deasserted(struct bxe_softc *sc, 8547 uint32_t deasserted) 8548 { 8549 struct attn_route attn; 8550 struct attn_route *group_mask; 8551 int port = SC_PORT(sc); 8552 int index; 8553 uint32_t reg_addr; 8554 uint32_t val; 8555 uint32_t aeu_mask; 8556 uint8_t global = FALSE; 8557 8558 /* 8559 * Need to take HW lock because MCP or other port might also 8560 * try to handle this event. 8561 */ 8562 bxe_acquire_alr(sc); 8563 8564 if (bxe_chk_parity_attn(sc, &global, TRUE)) { 8565 /* XXX 8566 * In case of parity errors don't handle attentions so that 8567 * other function would "see" parity errors. 8568 */ 8569 sc->recovery_state = BXE_RECOVERY_INIT; 8570 // XXX schedule a recovery task... 8571 /* disable HW interrupts */ 8572 bxe_int_disable(sc); 8573 bxe_release_alr(sc); 8574 return; 8575 } 8576 8577 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 8578 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 8579 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 8580 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 8581 if (!CHIP_IS_E1x(sc)) { 8582 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 8583 } else { 8584 attn.sig[4] = 0; 8585 } 8586 8587 BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 8588 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); 8589 8590 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 8591 if (deasserted & (1 << index)) { 8592 group_mask = &sc->attn_group[index]; 8593 8594 BLOGD(sc, DBG_INTR, 8595 "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index, 8596 group_mask->sig[0], group_mask->sig[1], 8597 group_mask->sig[2], group_mask->sig[3], 8598 group_mask->sig[4]); 8599 8600 bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]); 8601 bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]); 8602 bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]); 8603 bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]); 8604 bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]); 8605 } 8606 } 8607 8608 bxe_release_alr(sc); 8609 8610 if (sc->devinfo.int_block == INT_BLOCK_HC) { 8611 reg_addr = (HC_REG_COMMAND_REG + port*32 + 8612 COMMAND_REG_ATTN_BITS_CLR); 8613 } else { 8614 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); 8615 } 8616 8617 val = ~deasserted; 8618 BLOGD(sc, DBG_INTR, 8619 "about to mask 0x%08x at %s addr 0x%08x\n", val, 8620 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 8621 REG_WR(sc, reg_addr, val); 8622 8623 if (~sc->attn_state & deasserted) { 8624 BLOGE(sc, "IGU error\n"); 8625 } 8626 8627 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 8628 MISC_REG_AEU_MASK_ATTN_FUNC_0; 8629 8630 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 8631 8632 aeu_mask = REG_RD(sc, reg_addr); 8633 8634 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n", 8635 aeu_mask, deasserted); 8636 aeu_mask |= (deasserted & 0x3ff); 8637 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); 8638 8639 REG_WR(sc, reg_addr, aeu_mask); 8640 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 8641 8642 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); 8643 sc->attn_state &= ~deasserted; 8644 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); 8645 } 8646 8647 static void 8648 bxe_attn_int(struct bxe_softc *sc) 8649 { 8650 /* read local copy of bits */ 8651 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); 8652 uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack); 8653 uint32_t attn_state = sc->attn_state; 8654 8655 /* look for changed bits */ 8656 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; 8657 uint32_t deasserted = ~attn_bits & attn_ack & attn_state; 8658 8659 BLOGD(sc, DBG_INTR, 8660 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n", 8661 attn_bits, attn_ack, asserted, deasserted); 8662 8663 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { 8664 BLOGE(sc, "BAD attention state\n"); 8665 } 8666 8667 /* handle bits that were raised */ 8668 if (asserted) { 8669 bxe_attn_int_asserted(sc, asserted); 8670 } 8671 8672 if (deasserted) { 8673 bxe_attn_int_deasserted(sc, deasserted); 8674 } 8675 } 8676 8677 static uint16_t 8678 bxe_update_dsb_idx(struct bxe_softc *sc) 8679 { 8680 struct host_sp_status_block *def_sb = sc->def_sb; 8681 uint16_t rc = 0; 8682 8683 mb(); /* status block is written to by the chip */ 8684 8685 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 8686 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; 8687 rc |= BXE_DEF_SB_ATT_IDX; 8688 } 8689 8690 if (sc->def_idx != def_sb->sp_sb.running_index) { 8691 sc->def_idx = def_sb->sp_sb.running_index; 8692 rc |= BXE_DEF_SB_IDX; 8693 } 8694 8695 mb(); 8696 8697 return (rc); 8698 } 8699 8700 static inline struct ecore_queue_sp_obj * 8701 bxe_cid_to_q_obj(struct bxe_softc *sc, 8702 uint32_t cid) 8703 { 8704 BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid); 8705 return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj); 8706 } 8707 8708 static void 8709 bxe_handle_mcast_eqe(struct bxe_softc *sc) 8710 { 8711 struct ecore_mcast_ramrod_params rparam; 8712 int rc; 8713 8714 memset(&rparam, 0, sizeof(rparam)); 8715 8716 rparam.mcast_obj = &sc->mcast_obj; 8717 8718 BXE_MCAST_LOCK(sc); 8719 8720 /* clear pending state for the last command */ 8721 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); 8722 8723 /* if there are pending mcast commands - send them */ 8724 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { 8725 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 8726 if (rc < 0) { 8727 BLOGD(sc, DBG_SP, 8728 "ERROR: Failed to send pending mcast commands (%d)\n", 8729 rc); 8730 } 8731 } 8732 8733 BXE_MCAST_UNLOCK(sc); 8734 } 8735 8736 static void 8737 bxe_handle_classification_eqe(struct bxe_softc *sc, 8738 union event_ring_elem *elem) 8739 { 8740 unsigned long ramrod_flags = 0; 8741 int rc = 0; 8742 uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; 8743 struct ecore_vlan_mac_obj *vlan_mac_obj; 8744 8745 /* always push next commands out, don't wait here */ 8746 bit_set(&ramrod_flags, RAMROD_CONT); 8747 8748 switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) { 8749 case ECORE_FILTER_MAC_PENDING: 8750 BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n"); 8751 vlan_mac_obj = &sc->sp_objs[cid].mac_obj; 8752 break; 8753 8754 case ECORE_FILTER_MCAST_PENDING: 8755 BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n"); 8756 /* 8757 * This is only relevant for 57710 where multicast MACs are 8758 * configured as unicast MACs using the same ramrod. 8759 */ 8760 bxe_handle_mcast_eqe(sc); 8761 return; 8762 8763 default: 8764 BLOGE(sc, "Unsupported classification command: %d\n", 8765 elem->message.data.eth_event.echo); 8766 return; 8767 } 8768 8769 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); 8770 8771 if (rc < 0) { 8772 BLOGE(sc, "Failed to schedule new commands (%d)\n", rc); 8773 } else if (rc > 0) { 8774 BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n"); 8775 } 8776 } 8777 8778 static void 8779 bxe_handle_rx_mode_eqe(struct bxe_softc *sc, 8780 union event_ring_elem *elem) 8781 { 8782 bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 8783 8784 /* send rx_mode command again if was requested */ 8785 if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, 8786 &sc->sp_state)) { 8787 bxe_set_storm_rx_mode(sc); 8788 } 8789 #if 0 8790 else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_START_SCHED, 8791 &sc->sp_state)) { 8792 bxe_set_iscsi_eth_rx_mode(sc, TRUE); 8793 } 8794 else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_STOP_SCHED, 8795 &sc->sp_state)) { 8796 bxe_set_iscsi_eth_rx_mode(sc, FALSE); 8797 } 8798 #endif 8799 } 8800 8801 static void 8802 bxe_update_eq_prod(struct bxe_softc *sc, 8803 uint16_t prod) 8804 { 8805 storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); 8806 wmb(); /* keep prod updates ordered */ 8807 } 8808 8809 static void 8810 bxe_eq_int(struct bxe_softc *sc) 8811 { 8812 uint16_t hw_cons, sw_cons, sw_prod; 8813 union event_ring_elem *elem; 8814 uint8_t echo; 8815 uint32_t cid; 8816 uint8_t opcode; 8817 int spqe_cnt = 0; 8818 struct ecore_queue_sp_obj *q_obj; 8819 struct ecore_func_sp_obj *f_obj = &sc->func_obj; 8820 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; 8821 8822 hw_cons = le16toh(*sc->eq_cons_sb); 8823 8824 /* 8825 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. 8826 * when we get to the next-page we need to adjust so the loop 8827 * condition below will be met. The next element is the size of a 8828 * regular element and hence incrementing by 1 8829 */ 8830 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { 8831 hw_cons++; 8832 } 8833 8834 /* 8835 * This function may never run in parallel with itself for a 8836 * specific sc and no need for a read memory barrier here. 8837 */ 8838 sw_cons = sc->eq_cons; 8839 sw_prod = sc->eq_prod; 8840 8841 BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n", 8842 hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left)); 8843 8844 for (; 8845 sw_cons != hw_cons; 8846 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 8847 8848 elem = &sc->eq[EQ_DESC(sw_cons)]; 8849 8850 #if 0 8851 int rc; 8852 rc = bxe_iov_eq_sp_event(sc, elem); 8853 if (!rc) { 8854 BLOGE(sc, "bxe_iov_eq_sp_event returned %d\n", rc); 8855 goto next_spqe; 8856 } 8857 #endif 8858 8859 /* elem CID originates from FW, actually LE */ 8860 cid = SW_CID(elem->message.data.cfc_del_event.cid); 8861 opcode = elem->message.opcode; 8862 8863 /* handle eq element */ 8864 switch (opcode) { 8865 #if 0 8866 case EVENT_RING_OPCODE_VF_PF_CHANNEL: 8867 BLOGD(sc, DBG_SP, "vf/pf channel element on eq\n"); 8868 bxe_vf_mbx(sc, &elem->message.data.vf_pf_event); 8869 continue; 8870 #endif 8871 8872 case EVENT_RING_OPCODE_STAT_QUERY: 8873 BLOGD(sc, DBG_SP, "got statistics completion event %d\n", 8874 sc->stats_comp++); 8875 /* nothing to do with stats comp */ 8876 goto next_spqe; 8877 8878 case EVENT_RING_OPCODE_CFC_DEL: 8879 /* handle according to cid range */ 8880 /* we may want to verify here that the sc state is HALTING */ 8881 BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid); 8882 q_obj = bxe_cid_to_q_obj(sc, cid); 8883 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { 8884 break; 8885 } 8886 goto next_spqe; 8887 8888 case EVENT_RING_OPCODE_STOP_TRAFFIC: 8889 BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n"); 8890 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { 8891 break; 8892 } 8893 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED); 8894 goto next_spqe; 8895 8896 case EVENT_RING_OPCODE_START_TRAFFIC: 8897 BLOGD(sc, DBG_SP, "got START TRAFFIC\n"); 8898 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) { 8899 break; 8900 } 8901 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED); 8902 goto next_spqe; 8903 8904 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 8905 echo = elem->message.data.function_update_event.echo; 8906 if (echo == SWITCH_UPDATE) { 8907 BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n"); 8908 if (f_obj->complete_cmd(sc, f_obj, 8909 ECORE_F_CMD_SWITCH_UPDATE)) { 8910 break; 8911 } 8912 } 8913 else { 8914 BLOGD(sc, DBG_SP, 8915 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 8916 #if 0 8917 f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_UPDATE); 8918 /* 8919 * We will perform the queues update from the sp_core_task as 8920 * all queue SP operations should run with CORE_LOCK. 8921 */ 8922 bxe_set_bit(BXE_SP_CORE_AFEX_F_UPDATE, &sc->sp_core_state); 8923 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); 8924 #endif 8925 } 8926 goto next_spqe; 8927 8928 #if 0 8929 case EVENT_RING_OPCODE_AFEX_VIF_LISTS: 8930 f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_VIFLISTS); 8931 bxe_after_afex_vif_lists(sc, elem); 8932 goto next_spqe; 8933 #endif 8934 8935 case EVENT_RING_OPCODE_FORWARD_SETUP: 8936 q_obj = &bxe_fwd_sp_obj(sc, q_obj); 8937 if (q_obj->complete_cmd(sc, q_obj, 8938 ECORE_Q_CMD_SETUP_TX_ONLY)) { 8939 break; 8940 } 8941 goto next_spqe; 8942 8943 case EVENT_RING_OPCODE_FUNCTION_START: 8944 BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n"); 8945 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { 8946 break; 8947 } 8948 goto next_spqe; 8949 8950 case EVENT_RING_OPCODE_FUNCTION_STOP: 8951 BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n"); 8952 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { 8953 break; 8954 } 8955 goto next_spqe; 8956 } 8957 8958 switch (opcode | sc->state) { 8959 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN): 8960 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT): 8961 cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; 8962 BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid); 8963 rss_raw->clear_pending(rss_raw); 8964 break; 8965 8966 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN): 8967 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG): 8968 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT): 8969 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN): 8970 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG): 8971 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8972 BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n"); 8973 bxe_handle_classification_eqe(sc, elem); 8974 break; 8975 8976 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN): 8977 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG): 8978 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8979 BLOGD(sc, DBG_SP, "got mcast ramrod\n"); 8980 bxe_handle_mcast_eqe(sc); 8981 break; 8982 8983 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN): 8984 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG): 8985 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8986 BLOGD(sc, DBG_SP, "got rx_mode ramrod\n"); 8987 bxe_handle_rx_mode_eqe(sc, elem); 8988 break; 8989 8990 default: 8991 /* unknown event log error and continue */ 8992 BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n", 8993 elem->message.opcode, sc->state); 8994 } 8995 8996 next_spqe: 8997 spqe_cnt++; 8998 } /* for */ 8999 9000 mb(); 9001 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); 9002 9003 sc->eq_cons = sw_cons; 9004 sc->eq_prod = sw_prod; 9005 9006 /* make sure that above mem writes were issued towards the memory */ 9007 wmb(); 9008 9009 /* update producer */ 9010 bxe_update_eq_prod(sc, sc->eq_prod); 9011 } 9012 9013 static void 9014 bxe_handle_sp_tq(void *context, 9015 int pending) 9016 { 9017 struct bxe_softc *sc = (struct bxe_softc *)context; 9018 uint16_t status; 9019 9020 BLOGD(sc, DBG_SP, "---> SP TASK <---\n"); 9021 9022 /* what work needs to be performed? */ 9023 status = bxe_update_dsb_idx(sc); 9024 9025 BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status); 9026 9027 /* HW attentions */ 9028 if (status & BXE_DEF_SB_ATT_IDX) { 9029 BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n"); 9030 bxe_attn_int(sc); 9031 status &= ~BXE_DEF_SB_ATT_IDX; 9032 } 9033 9034 /* SP events: STAT_QUERY and others */ 9035 if (status & BXE_DEF_SB_IDX) { 9036 /* handle EQ completions */ 9037 BLOGD(sc, DBG_SP, "---> EQ INTR <---\n"); 9038 bxe_eq_int(sc); 9039 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 9040 le16toh(sc->def_idx), IGU_INT_NOP, 1); 9041 status &= ~BXE_DEF_SB_IDX; 9042 } 9043 9044 /* if status is non zero then something went wrong */ 9045 if (__predict_false(status)) { 9046 BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status); 9047 } 9048 9049 /* ack status block only if something was actually handled */ 9050 bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 9051 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); 9052 9053 /* 9054 * Must be called after the EQ processing (since eq leads to sriov 9055 * ramrod completion flows). 9056 * This flow may have been scheduled by the arrival of a ramrod 9057 * completion, or by the sriov code rescheduling itself. 9058 */ 9059 // XXX bxe_iov_sp_task(sc); 9060 9061 #if 0 9062 /* AFEX - poll to check if VIFSET_ACK should be sent to MFW */ 9063 if (bxe_test_and_clear_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, 9064 &sc->sp_state)) { 9065 bxe_link_report(sc); 9066 bxe_fw_command(sc, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 9067 } 9068 #endif 9069 } 9070 9071 static void 9072 bxe_handle_fp_tq(void *context, 9073 int pending) 9074 { 9075 struct bxe_fastpath *fp = (struct bxe_fastpath *)context; 9076 struct bxe_softc *sc = fp->sc; 9077 uint8_t more_tx = FALSE; 9078 uint8_t more_rx = FALSE; 9079 9080 BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index); 9081 9082 /* XXX 9083 * IFF_DRV_RUNNING state can't be checked here since we process 9084 * slowpath events on a client queue during setup. Instead 9085 * we need to add a "process/continue" flag here that the driver 9086 * can use to tell the task here not to do anything. 9087 */ 9088 #if 0 9089 if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { 9090 return; 9091 } 9092 #endif 9093 9094 /* update the fastpath index */ 9095 bxe_update_fp_sb_idx(fp); 9096 9097 /* XXX add loop here if ever support multiple tx CoS */ 9098 /* fp->txdata[cos] */ 9099 if (bxe_has_tx_work(fp)) { 9100 BXE_FP_TX_LOCK(fp); 9101 more_tx = bxe_txeof(sc, fp); 9102 BXE_FP_TX_UNLOCK(fp); 9103 } 9104 9105 if (bxe_has_rx_work(fp)) { 9106 more_rx = bxe_rxeof(sc, fp); 9107 } 9108 9109 if (more_rx /*|| more_tx*/) { 9110 /* still more work to do */ 9111 taskqueue_enqueue_fast(fp->tq, &fp->tq_task); 9112 return; 9113 } 9114 9115 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 9116 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 9117 } 9118 9119 static void 9120 bxe_task_fp(struct bxe_fastpath *fp) 9121 { 9122 struct bxe_softc *sc = fp->sc; 9123 uint8_t more_tx = FALSE; 9124 uint8_t more_rx = FALSE; 9125 9126 BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index); 9127 9128 /* update the fastpath index */ 9129 bxe_update_fp_sb_idx(fp); 9130 9131 /* XXX add loop here if ever support multiple tx CoS */ 9132 /* fp->txdata[cos] */ 9133 if (bxe_has_tx_work(fp)) { 9134 BXE_FP_TX_LOCK(fp); 9135 more_tx = bxe_txeof(sc, fp); 9136 BXE_FP_TX_UNLOCK(fp); 9137 } 9138 9139 if (bxe_has_rx_work(fp)) { 9140 more_rx = bxe_rxeof(sc, fp); 9141 } 9142 9143 if (more_rx /*|| more_tx*/) { 9144 /* still more work to do, bail out if this ISR and process later */ 9145 taskqueue_enqueue_fast(fp->tq, &fp->tq_task); 9146 return; 9147 } 9148 9149 /* 9150 * Here we write the fastpath index taken before doing any tx or rx work. 9151 * It is very well possible other hw events occurred up to this point and 9152 * they were actually processed accordingly above. Since we're going to 9153 * write an older fastpath index, an interrupt is coming which we might 9154 * not do any work in. 9155 */ 9156 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 9157 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 9158 } 9159 9160 /* 9161 * Legacy interrupt entry point. 9162 * 9163 * Verifies that the controller generated the interrupt and 9164 * then calls a separate routine to handle the various 9165 * interrupt causes: link, RX, and TX. 9166 */ 9167 static void 9168 bxe_intr_legacy(void *xsc) 9169 { 9170 struct bxe_softc *sc = (struct bxe_softc *)xsc; 9171 struct bxe_fastpath *fp; 9172 uint16_t status, mask; 9173 int i; 9174 9175 BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n"); 9176 9177 #if 0 9178 /* Don't handle any interrupts if we're not ready. */ 9179 if (__predict_false(sc->intr_sem != 0)) { 9180 return; 9181 } 9182 #endif 9183 9184 /* 9185 * 0 for ustorm, 1 for cstorm 9186 * the bits returned from ack_int() are 0-15 9187 * bit 0 = attention status block 9188 * bit 1 = fast path status block 9189 * a mask of 0x2 or more = tx/rx event 9190 * a mask of 1 = slow path event 9191 */ 9192 9193 status = bxe_ack_int(sc); 9194 9195 /* the interrupt is not for us */ 9196 if (__predict_false(status == 0)) { 9197 BLOGD(sc, DBG_INTR, "Not our interrupt!\n"); 9198 return; 9199 } 9200 9201 BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status); 9202 9203 FOR_EACH_ETH_QUEUE(sc, i) { 9204 fp = &sc->fp[i]; 9205 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); 9206 if (status & mask) { 9207 /* acknowledge and disable further fastpath interrupts */ 9208 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9209 bxe_task_fp(fp); 9210 status &= ~mask; 9211 } 9212 } 9213 9214 #if 0 9215 if (CNIC_SUPPORT(sc)) { 9216 mask = 0x2; 9217 if (status & (mask | 0x1)) { 9218 ... 9219 status &= ~mask; 9220 } 9221 } 9222 #endif 9223 9224 if (__predict_false(status & 0x1)) { 9225 /* acknowledge and disable further slowpath interrupts */ 9226 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9227 9228 /* schedule slowpath handler */ 9229 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task); 9230 9231 status &= ~0x1; 9232 } 9233 9234 if (__predict_false(status)) { 9235 BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status); 9236 } 9237 } 9238 9239 /* slowpath interrupt entry point */ 9240 static void 9241 bxe_intr_sp(void *xsc) 9242 { 9243 struct bxe_softc *sc = (struct bxe_softc *)xsc; 9244 9245 BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n"); 9246 9247 /* acknowledge and disable further slowpath interrupts */ 9248 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9249 9250 /* schedule slowpath handler */ 9251 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task); 9252 } 9253 9254 /* fastpath interrupt entry point */ 9255 static void 9256 bxe_intr_fp(void *xfp) 9257 { 9258 struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp; 9259 struct bxe_softc *sc = fp->sc; 9260 9261 BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index); 9262 9263 BLOGD(sc, DBG_INTR, 9264 "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n", 9265 curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id); 9266 9267 #if 0 9268 /* Don't handle any interrupts if we're not ready. */ 9269 if (__predict_false(sc->intr_sem != 0)) { 9270 return; 9271 } 9272 #endif 9273 9274 /* acknowledge and disable further fastpath interrupts */ 9275 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9276 9277 bxe_task_fp(fp); 9278 } 9279 9280 /* Release all interrupts allocated by the driver. */ 9281 static void 9282 bxe_interrupt_free(struct bxe_softc *sc) 9283 { 9284 int i; 9285 9286 switch (sc->interrupt_mode) { 9287 case INTR_MODE_INTX: 9288 BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n"); 9289 if (sc->intr[0].resource != NULL) { 9290 bus_release_resource(sc->dev, 9291 SYS_RES_IRQ, 9292 sc->intr[0].rid, 9293 sc->intr[0].resource); 9294 } 9295 break; 9296 case INTR_MODE_MSI: 9297 for (i = 0; i < sc->intr_count; i++) { 9298 BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i); 9299 if (sc->intr[i].resource && sc->intr[i].rid) { 9300 bus_release_resource(sc->dev, 9301 SYS_RES_IRQ, 9302 sc->intr[i].rid, 9303 sc->intr[i].resource); 9304 } 9305 } 9306 pci_release_msi(sc->dev); 9307 break; 9308 case INTR_MODE_MSIX: 9309 for (i = 0; i < sc->intr_count; i++) { 9310 BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i); 9311 if (sc->intr[i].resource && sc->intr[i].rid) { 9312 bus_release_resource(sc->dev, 9313 SYS_RES_IRQ, 9314 sc->intr[i].rid, 9315 sc->intr[i].resource); 9316 } 9317 } 9318 pci_release_msi(sc->dev); 9319 break; 9320 default: 9321 /* nothing to do as initial allocation failed */ 9322 break; 9323 } 9324 } 9325 9326 /* 9327 * This function determines and allocates the appropriate 9328 * interrupt based on system capabilites and user request. 9329 * 9330 * The user may force a particular interrupt mode, specify 9331 * the number of receive queues, specify the method for 9332 * distribuitng received frames to receive queues, or use 9333 * the default settings which will automatically select the 9334 * best supported combination. In addition, the OS may or 9335 * may not support certain combinations of these settings. 9336 * This routine attempts to reconcile the settings requested 9337 * by the user with the capabilites available from the system 9338 * to select the optimal combination of features. 9339 * 9340 * Returns: 9341 * 0 = Success, !0 = Failure. 9342 */ 9343 static int 9344 bxe_interrupt_alloc(struct bxe_softc *sc) 9345 { 9346 int msix_count = 0; 9347 int msi_count = 0; 9348 int num_requested = 0; 9349 int num_allocated = 0; 9350 int rid, i, j; 9351 int rc; 9352 9353 /* get the number of available MSI/MSI-X interrupts from the OS */ 9354 if (sc->interrupt_mode > 0) { 9355 if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) { 9356 msix_count = pci_msix_count(sc->dev); 9357 } 9358 9359 if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) { 9360 msi_count = pci_msi_count(sc->dev); 9361 } 9362 9363 BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n", 9364 msi_count, msix_count); 9365 } 9366 9367 do { /* try allocating MSI-X interrupt resources (at least 2) */ 9368 if (sc->interrupt_mode != INTR_MODE_MSIX) { 9369 break; 9370 } 9371 9372 if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) || 9373 (msix_count < 2)) { 9374 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9375 break; 9376 } 9377 9378 /* ask for the necessary number of MSI-X vectors */ 9379 num_requested = min((sc->num_queues + 1), msix_count); 9380 9381 BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested); 9382 9383 num_allocated = num_requested; 9384 if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) { 9385 BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc); 9386 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9387 break; 9388 } 9389 9390 if (num_allocated < 2) { /* possible? */ 9391 BLOGE(sc, "MSI-X allocation less than 2!\n"); 9392 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9393 pci_release_msi(sc->dev); 9394 break; 9395 } 9396 9397 BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n", 9398 num_requested, num_allocated); 9399 9400 /* best effort so use the number of vectors allocated to us */ 9401 sc->intr_count = num_allocated; 9402 sc->num_queues = num_allocated - 1; 9403 9404 rid = 1; /* initial resource identifier */ 9405 9406 /* allocate the MSI-X vectors */ 9407 for (i = 0; i < num_allocated; i++) { 9408 sc->intr[i].rid = (rid + i); 9409 9410 if ((sc->intr[i].resource = 9411 bus_alloc_resource_any(sc->dev, 9412 SYS_RES_IRQ, 9413 &sc->intr[i].rid, 9414 RF_ACTIVE)) == NULL) { 9415 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n", 9416 i, (rid + i)); 9417 9418 for (j = (i - 1); j >= 0; j--) { 9419 bus_release_resource(sc->dev, 9420 SYS_RES_IRQ, 9421 sc->intr[j].rid, 9422 sc->intr[j].resource); 9423 } 9424 9425 sc->intr_count = 0; 9426 sc->num_queues = 0; 9427 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9428 pci_release_msi(sc->dev); 9429 break; 9430 } 9431 9432 BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i)); 9433 } 9434 } while (0); 9435 9436 do { /* try allocating MSI vector resources (at least 2) */ 9437 if (sc->interrupt_mode != INTR_MODE_MSI) { 9438 break; 9439 } 9440 9441 if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) || 9442 (msi_count < 1)) { 9443 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9444 break; 9445 } 9446 9447 /* ask for a single MSI vector */ 9448 num_requested = 1; 9449 9450 BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested); 9451 9452 num_allocated = num_requested; 9453 if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) { 9454 BLOGE(sc, "MSI alloc failed (%d)!\n", rc); 9455 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9456 break; 9457 } 9458 9459 if (num_allocated != 1) { /* possible? */ 9460 BLOGE(sc, "MSI allocation is not 1!\n"); 9461 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9462 pci_release_msi(sc->dev); 9463 break; 9464 } 9465 9466 BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n", 9467 num_requested, num_allocated); 9468 9469 /* best effort so use the number of vectors allocated to us */ 9470 sc->intr_count = num_allocated; 9471 sc->num_queues = num_allocated; 9472 9473 rid = 1; /* initial resource identifier */ 9474 9475 sc->intr[0].rid = rid; 9476 9477 if ((sc->intr[0].resource = 9478 bus_alloc_resource_any(sc->dev, 9479 SYS_RES_IRQ, 9480 &sc->intr[0].rid, 9481 RF_ACTIVE)) == NULL) { 9482 BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid); 9483 sc->intr_count = 0; 9484 sc->num_queues = 0; 9485 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9486 pci_release_msi(sc->dev); 9487 break; 9488 } 9489 9490 BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid); 9491 } while (0); 9492 9493 do { /* try allocating INTx vector resources */ 9494 if (sc->interrupt_mode != INTR_MODE_INTX) { 9495 break; 9496 } 9497 9498 BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n"); 9499 9500 /* only one vector for INTx */ 9501 sc->intr_count = 1; 9502 sc->num_queues = 1; 9503 9504 rid = 0; /* initial resource identifier */ 9505 9506 sc->intr[0].rid = rid; 9507 9508 if ((sc->intr[0].resource = 9509 bus_alloc_resource_any(sc->dev, 9510 SYS_RES_IRQ, 9511 &sc->intr[0].rid, 9512 (RF_ACTIVE | RF_SHAREABLE))) == NULL) { 9513 BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid); 9514 sc->intr_count = 0; 9515 sc->num_queues = 0; 9516 sc->interrupt_mode = -1; /* Failed! */ 9517 break; 9518 } 9519 9520 BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid); 9521 } while (0); 9522 9523 if (sc->interrupt_mode == -1) { 9524 BLOGE(sc, "Interrupt Allocation: FAILED!!!\n"); 9525 rc = 1; 9526 } else { 9527 BLOGD(sc, DBG_LOAD, 9528 "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n", 9529 sc->interrupt_mode, sc->num_queues); 9530 rc = 0; 9531 } 9532 9533 return (rc); 9534 } 9535 9536 static void 9537 bxe_interrupt_detach(struct bxe_softc *sc) 9538 { 9539 struct bxe_fastpath *fp; 9540 int i; 9541 9542 /* release interrupt resources */ 9543 for (i = 0; i < sc->intr_count; i++) { 9544 if (sc->intr[i].resource && sc->intr[i].tag) { 9545 BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i); 9546 bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag); 9547 } 9548 } 9549 9550 for (i = 0; i < sc->num_queues; i++) { 9551 fp = &sc->fp[i]; 9552 if (fp->tq) { 9553 taskqueue_drain(fp->tq, &fp->tq_task); 9554 taskqueue_free(fp->tq); 9555 fp->tq = NULL; 9556 } 9557 } 9558 9559 9560 if (sc->sp_tq) { 9561 taskqueue_drain(sc->sp_tq, &sc->sp_tq_task); 9562 taskqueue_free(sc->sp_tq); 9563 sc->sp_tq = NULL; 9564 } 9565 } 9566 9567 /* 9568 * Enables interrupts and attach to the ISR. 9569 * 9570 * When using multiple MSI/MSI-X vectors the first vector 9571 * is used for slowpath operations while all remaining 9572 * vectors are used for fastpath operations. If only a 9573 * single MSI/MSI-X vector is used (SINGLE_ISR) then the 9574 * ISR must look for both slowpath and fastpath completions. 9575 */ 9576 static int 9577 bxe_interrupt_attach(struct bxe_softc *sc) 9578 { 9579 struct bxe_fastpath *fp; 9580 int rc = 0; 9581 int i; 9582 9583 snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name), 9584 "bxe%d_sp_tq", sc->unit); 9585 TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc); 9586 sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT, 9587 taskqueue_thread_enqueue, 9588 &sc->sp_tq); 9589 taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */ 9590 "%s", sc->sp_tq_name); 9591 9592 9593 for (i = 0; i < sc->num_queues; i++) { 9594 fp = &sc->fp[i]; 9595 snprintf(fp->tq_name, sizeof(fp->tq_name), 9596 "bxe%d_fp%d_tq", sc->unit, i); 9597 TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); 9598 fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT, 9599 taskqueue_thread_enqueue, 9600 &fp->tq); 9601 taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */ 9602 "%s", fp->tq_name); 9603 } 9604 9605 /* setup interrupt handlers */ 9606 if (sc->interrupt_mode == INTR_MODE_MSIX) { 9607 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n"); 9608 9609 /* 9610 * Setup the interrupt handler. Note that we pass the driver instance 9611 * to the interrupt handler for the slowpath. 9612 */ 9613 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9614 (INTR_TYPE_NET | INTR_MPSAFE), 9615 NULL, bxe_intr_sp, sc, 9616 &sc->intr[0].tag)) != 0) { 9617 BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc); 9618 goto bxe_interrupt_attach_exit; 9619 } 9620 9621 bus_describe_intr(sc->dev, sc->intr[0].resource, 9622 sc->intr[0].tag, "sp"); 9623 9624 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */ 9625 9626 /* initialize the fastpath vectors (note the first was used for sp) */ 9627 for (i = 0; i < sc->num_queues; i++) { 9628 fp = &sc->fp[i]; 9629 BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1)); 9630 9631 /* 9632 * Setup the interrupt handler. Note that we pass the 9633 * fastpath context to the interrupt handler in this 9634 * case. 9635 */ 9636 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource, 9637 (INTR_TYPE_NET | INTR_MPSAFE), 9638 NULL, bxe_intr_fp, fp, 9639 &sc->intr[i + 1].tag)) != 0) { 9640 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n", 9641 (i + 1), rc); 9642 goto bxe_interrupt_attach_exit; 9643 } 9644 9645 bus_describe_intr(sc->dev, sc->intr[i + 1].resource, 9646 sc->intr[i + 1].tag, "fp%02d", i); 9647 9648 /* bind the fastpath instance to a cpu */ 9649 if (sc->num_queues > 1) { 9650 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i); 9651 } 9652 9653 fp->state = BXE_FP_STATE_IRQ; 9654 } 9655 } else if (sc->interrupt_mode == INTR_MODE_MSI) { 9656 BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n"); 9657 9658 /* 9659 * Setup the interrupt handler. Note that we pass the 9660 * driver instance to the interrupt handler which 9661 * will handle both the slowpath and fastpath. 9662 */ 9663 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9664 (INTR_TYPE_NET | INTR_MPSAFE), 9665 NULL, bxe_intr_legacy, sc, 9666 &sc->intr[0].tag)) != 0) { 9667 BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc); 9668 goto bxe_interrupt_attach_exit; 9669 } 9670 9671 } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */ 9672 BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n"); 9673 9674 /* 9675 * Setup the interrupt handler. Note that we pass the 9676 * driver instance to the interrupt handler which 9677 * will handle both the slowpath and fastpath. 9678 */ 9679 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9680 (INTR_TYPE_NET | INTR_MPSAFE), 9681 NULL, bxe_intr_legacy, sc, 9682 &sc->intr[0].tag)) != 0) { 9683 BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc); 9684 goto bxe_interrupt_attach_exit; 9685 } 9686 } 9687 9688 bxe_interrupt_attach_exit: 9689 9690 return (rc); 9691 } 9692 9693 static int bxe_init_hw_common_chip(struct bxe_softc *sc); 9694 static int bxe_init_hw_common(struct bxe_softc *sc); 9695 static int bxe_init_hw_port(struct bxe_softc *sc); 9696 static int bxe_init_hw_func(struct bxe_softc *sc); 9697 static void bxe_reset_common(struct bxe_softc *sc); 9698 static void bxe_reset_port(struct bxe_softc *sc); 9699 static void bxe_reset_func(struct bxe_softc *sc); 9700 static int bxe_gunzip_init(struct bxe_softc *sc); 9701 static void bxe_gunzip_end(struct bxe_softc *sc); 9702 static int bxe_init_firmware(struct bxe_softc *sc); 9703 static void bxe_release_firmware(struct bxe_softc *sc); 9704 9705 static struct 9706 ecore_func_sp_drv_ops bxe_func_sp_drv = { 9707 .init_hw_cmn_chip = bxe_init_hw_common_chip, 9708 .init_hw_cmn = bxe_init_hw_common, 9709 .init_hw_port = bxe_init_hw_port, 9710 .init_hw_func = bxe_init_hw_func, 9711 9712 .reset_hw_cmn = bxe_reset_common, 9713 .reset_hw_port = bxe_reset_port, 9714 .reset_hw_func = bxe_reset_func, 9715 9716 .gunzip_init = bxe_gunzip_init, 9717 .gunzip_end = bxe_gunzip_end, 9718 9719 .init_fw = bxe_init_firmware, 9720 .release_fw = bxe_release_firmware, 9721 }; 9722 9723 static void 9724 bxe_init_func_obj(struct bxe_softc *sc) 9725 { 9726 sc->dmae_ready = 0; 9727 9728 ecore_init_func_obj(sc, 9729 &sc->func_obj, 9730 BXE_SP(sc, func_rdata), 9731 BXE_SP_MAPPING(sc, func_rdata), 9732 BXE_SP(sc, func_afex_rdata), 9733 BXE_SP_MAPPING(sc, func_afex_rdata), 9734 &bxe_func_sp_drv); 9735 } 9736 9737 static int 9738 bxe_init_hw(struct bxe_softc *sc, 9739 uint32_t load_code) 9740 { 9741 struct ecore_func_state_params func_params = { NULL }; 9742 int rc; 9743 9744 /* prepare the parameters for function state transitions */ 9745 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); 9746 9747 func_params.f_obj = &sc->func_obj; 9748 func_params.cmd = ECORE_F_CMD_HW_INIT; 9749 9750 func_params.params.hw_init.load_phase = load_code; 9751 9752 /* 9753 * Via a plethora of function pointers, we will eventually reach 9754 * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func(). 9755 */ 9756 rc = ecore_func_state_change(sc, &func_params); 9757 9758 return (rc); 9759 } 9760 9761 static void 9762 bxe_fill(struct bxe_softc *sc, 9763 uint32_t addr, 9764 int fill, 9765 uint32_t len) 9766 { 9767 uint32_t i; 9768 9769 if (!(len % 4) && !(addr % 4)) { 9770 for (i = 0; i < len; i += 4) { 9771 REG_WR(sc, (addr + i), fill); 9772 } 9773 } else { 9774 for (i = 0; i < len; i++) { 9775 REG_WR8(sc, (addr + i), fill); 9776 } 9777 } 9778 } 9779 9780 /* writes FP SP data to FW - data_size in dwords */ 9781 static void 9782 bxe_wr_fp_sb_data(struct bxe_softc *sc, 9783 int fw_sb_id, 9784 uint32_t *sb_data_p, 9785 uint32_t data_size) 9786 { 9787 int index; 9788 9789 for (index = 0; index < data_size; index++) { 9790 REG_WR(sc, 9791 (BAR_CSTRORM_INTMEM + 9792 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 9793 (sizeof(uint32_t) * index)), 9794 *(sb_data_p + index)); 9795 } 9796 } 9797 9798 static void 9799 bxe_zero_fp_sb(struct bxe_softc *sc, 9800 int fw_sb_id) 9801 { 9802 struct hc_status_block_data_e2 sb_data_e2; 9803 struct hc_status_block_data_e1x sb_data_e1x; 9804 uint32_t *sb_data_p; 9805 uint32_t data_size = 0; 9806 9807 if (!CHIP_IS_E1x(sc)) { 9808 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 9809 sb_data_e2.common.state = SB_DISABLED; 9810 sb_data_e2.common.p_func.vf_valid = FALSE; 9811 sb_data_p = (uint32_t *)&sb_data_e2; 9812 data_size = (sizeof(struct hc_status_block_data_e2) / 9813 sizeof(uint32_t)); 9814 } else { 9815 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); 9816 sb_data_e1x.common.state = SB_DISABLED; 9817 sb_data_e1x.common.p_func.vf_valid = FALSE; 9818 sb_data_p = (uint32_t *)&sb_data_e1x; 9819 data_size = (sizeof(struct hc_status_block_data_e1x) / 9820 sizeof(uint32_t)); 9821 } 9822 9823 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 9824 9825 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 9826 0, CSTORM_STATUS_BLOCK_SIZE); 9827 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), 9828 0, CSTORM_SYNC_BLOCK_SIZE); 9829 } 9830 9831 static void 9832 bxe_wr_sp_sb_data(struct bxe_softc *sc, 9833 struct hc_sp_status_block_data *sp_sb_data) 9834 { 9835 int i; 9836 9837 for (i = 0; 9838 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); 9839 i++) { 9840 REG_WR(sc, 9841 (BAR_CSTRORM_INTMEM + 9842 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + 9843 (i * sizeof(uint32_t))), 9844 *((uint32_t *)sp_sb_data + i)); 9845 } 9846 } 9847 9848 static void 9849 bxe_zero_sp_sb(struct bxe_softc *sc) 9850 { 9851 struct hc_sp_status_block_data sp_sb_data; 9852 9853 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 9854 9855 sp_sb_data.state = SB_DISABLED; 9856 sp_sb_data.p_func.vf_valid = FALSE; 9857 9858 bxe_wr_sp_sb_data(sc, &sp_sb_data); 9859 9860 bxe_fill(sc, 9861 (BAR_CSTRORM_INTMEM + 9862 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), 9863 0, CSTORM_SP_STATUS_BLOCK_SIZE); 9864 bxe_fill(sc, 9865 (BAR_CSTRORM_INTMEM + 9866 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), 9867 0, CSTORM_SP_SYNC_BLOCK_SIZE); 9868 } 9869 9870 static void 9871 bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, 9872 int igu_sb_id, 9873 int igu_seg_id) 9874 { 9875 hc_sm->igu_sb_id = igu_sb_id; 9876 hc_sm->igu_seg_id = igu_seg_id; 9877 hc_sm->timer_value = 0xFF; 9878 hc_sm->time_to_expire = 0xFFFFFFFF; 9879 } 9880 9881 static void 9882 bxe_map_sb_state_machines(struct hc_index_data *index_data) 9883 { 9884 /* zero out state machine indices */ 9885 9886 /* rx indices */ 9887 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 9888 9889 /* tx indices */ 9890 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 9891 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 9892 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 9893 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 9894 9895 /* map indices */ 9896 9897 /* rx indices */ 9898 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 9899 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9900 9901 /* tx indices */ 9902 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 9903 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9904 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 9905 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9906 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 9907 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9908 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 9909 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9910 } 9911 9912 static void 9913 bxe_init_sb(struct bxe_softc *sc, 9914 bus_addr_t busaddr, 9915 int vfid, 9916 uint8_t vf_valid, 9917 int fw_sb_id, 9918 int igu_sb_id) 9919 { 9920 struct hc_status_block_data_e2 sb_data_e2; 9921 struct hc_status_block_data_e1x sb_data_e1x; 9922 struct hc_status_block_sm *hc_sm_p; 9923 uint32_t *sb_data_p; 9924 int igu_seg_id; 9925 int data_size; 9926 9927 if (CHIP_INT_MODE_IS_BC(sc)) { 9928 igu_seg_id = HC_SEG_ACCESS_NORM; 9929 } else { 9930 igu_seg_id = IGU_SEG_ACCESS_NORM; 9931 } 9932 9933 bxe_zero_fp_sb(sc, fw_sb_id); 9934 9935 if (!CHIP_IS_E1x(sc)) { 9936 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 9937 sb_data_e2.common.state = SB_ENABLED; 9938 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); 9939 sb_data_e2.common.p_func.vf_id = vfid; 9940 sb_data_e2.common.p_func.vf_valid = vf_valid; 9941 sb_data_e2.common.p_func.vnic_id = SC_VN(sc); 9942 sb_data_e2.common.same_igu_sb_1b = TRUE; 9943 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); 9944 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); 9945 hc_sm_p = sb_data_e2.common.state_machine; 9946 sb_data_p = (uint32_t *)&sb_data_e2; 9947 data_size = (sizeof(struct hc_status_block_data_e2) / 9948 sizeof(uint32_t)); 9949 bxe_map_sb_state_machines(sb_data_e2.index_data); 9950 } else { 9951 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); 9952 sb_data_e1x.common.state = SB_ENABLED; 9953 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); 9954 sb_data_e1x.common.p_func.vf_id = 0xff; 9955 sb_data_e1x.common.p_func.vf_valid = FALSE; 9956 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); 9957 sb_data_e1x.common.same_igu_sb_1b = TRUE; 9958 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); 9959 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); 9960 hc_sm_p = sb_data_e1x.common.state_machine; 9961 sb_data_p = (uint32_t *)&sb_data_e1x; 9962 data_size = (sizeof(struct hc_status_block_data_e1x) / 9963 sizeof(uint32_t)); 9964 bxe_map_sb_state_machines(sb_data_e1x.index_data); 9965 } 9966 9967 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); 9968 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); 9969 9970 BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id); 9971 9972 /* write indices to HW - PCI guarantees endianity of regpairs */ 9973 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 9974 } 9975 9976 static inline uint8_t 9977 bxe_fp_qzone_id(struct bxe_fastpath *fp) 9978 { 9979 if (CHIP_IS_E1x(fp->sc)) { 9980 return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H); 9981 } else { 9982 return (fp->cl_id); 9983 } 9984 } 9985 9986 static inline uint32_t 9987 bxe_rx_ustorm_prods_offset(struct bxe_softc *sc, 9988 struct bxe_fastpath *fp) 9989 { 9990 uint32_t offset = BAR_USTRORM_INTMEM; 9991 9992 #if 0 9993 if (IS_VF(sc)) { 9994 return (PXP_VF_ADDR_USDM_QUEUES_START + 9995 (sc->acquire_resp.resc.hw_qid[fp->index] * 9996 sizeof(struct ustorm_queue_zone_data))); 9997 } else 9998 #endif 9999 if (!CHIP_IS_E1x(sc)) { 10000 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 10001 } else { 10002 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); 10003 } 10004 10005 return (offset); 10006 } 10007 10008 static void 10009 bxe_init_eth_fp(struct bxe_softc *sc, 10010 int idx) 10011 { 10012 struct bxe_fastpath *fp = &sc->fp[idx]; 10013 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; 10014 unsigned long q_type = 0; 10015 int cos; 10016 10017 fp->sc = sc; 10018 fp->index = idx; 10019 10020 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 10021 "bxe%d_fp%d_tx_lock", sc->unit, idx); 10022 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 10023 10024 snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name), 10025 "bxe%d_fp%d_rx_lock", sc->unit, idx); 10026 mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF); 10027 10028 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); 10029 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); 10030 10031 fp->cl_id = (CHIP_IS_E1x(sc)) ? 10032 (SC_L_ID(sc) + idx) : 10033 /* want client ID same as IGU SB ID for non-E1 */ 10034 fp->igu_sb_id; 10035 fp->cl_qzone_id = bxe_fp_qzone_id(fp); 10036 10037 /* setup sb indices */ 10038 if (!CHIP_IS_E1x(sc)) { 10039 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; 10040 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; 10041 } else { 10042 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; 10043 fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index; 10044 } 10045 10046 /* init shortcut */ 10047 fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp); 10048 10049 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; 10050 10051 /* 10052 * XXX If multiple CoS is ever supported then each fastpath structure 10053 * will need to maintain tx producer/consumer/dma/etc values *per* CoS. 10054 */ 10055 for (cos = 0; cos < sc->max_cos; cos++) { 10056 cids[cos] = idx; 10057 } 10058 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; 10059 10060 /* nothing more for a VF to do */ 10061 if (IS_VF(sc)) { 10062 return; 10063 } 10064 10065 bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE, 10066 fp->fw_sb_id, fp->igu_sb_id); 10067 10068 bxe_update_fp_sb_idx(fp); 10069 10070 /* Configure Queue State object */ 10071 bit_set(&q_type, ECORE_Q_TYPE_HAS_RX); 10072 bit_set(&q_type, ECORE_Q_TYPE_HAS_TX); 10073 10074 ecore_init_queue_obj(sc, 10075 &sc->sp_objs[idx].q_obj, 10076 fp->cl_id, 10077 cids, 10078 sc->max_cos, 10079 SC_FUNC(sc), 10080 BXE_SP(sc, q_rdata), 10081 BXE_SP_MAPPING(sc, q_rdata), 10082 q_type); 10083 10084 /* configure classification DBs */ 10085 ecore_init_mac_obj(sc, 10086 &sc->sp_objs[idx].mac_obj, 10087 fp->cl_id, 10088 idx, 10089 SC_FUNC(sc), 10090 BXE_SP(sc, mac_rdata), 10091 BXE_SP_MAPPING(sc, mac_rdata), 10092 ECORE_FILTER_MAC_PENDING, 10093 &sc->sp_state, 10094 ECORE_OBJ_TYPE_RX_TX, 10095 &sc->macs_pool); 10096 10097 BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n", 10098 idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); 10099 } 10100 10101 static inline void 10102 bxe_update_rx_prod(struct bxe_softc *sc, 10103 struct bxe_fastpath *fp, 10104 uint16_t rx_bd_prod, 10105 uint16_t rx_cq_prod, 10106 uint16_t rx_sge_prod) 10107 { 10108 struct ustorm_eth_rx_producers rx_prods = { 0 }; 10109 uint32_t i; 10110 10111 /* update producers */ 10112 rx_prods.bd_prod = rx_bd_prod; 10113 rx_prods.cqe_prod = rx_cq_prod; 10114 rx_prods.sge_prod = rx_sge_prod; 10115 10116 /* 10117 * Make sure that the BD and SGE data is updated before updating the 10118 * producers since FW might read the BD/SGE right after the producer 10119 * is updated. 10120 * This is only applicable for weak-ordered memory model archs such 10121 * as IA-64. The following barrier is also mandatory since FW will 10122 * assumes BDs must have buffers. 10123 */ 10124 wmb(); 10125 10126 for (i = 0; i < (sizeof(rx_prods) / 4); i++) { 10127 REG_WR(sc, 10128 (fp->ustorm_rx_prods_offset + (i * 4)), 10129 ((uint32_t *)&rx_prods)[i]); 10130 } 10131 10132 wmb(); /* keep prod updates ordered */ 10133 10134 BLOGD(sc, DBG_RX, 10135 "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n", 10136 fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod); 10137 } 10138 10139 static void 10140 bxe_init_rx_rings(struct bxe_softc *sc) 10141 { 10142 struct bxe_fastpath *fp; 10143 int i; 10144 10145 for (i = 0; i < sc->num_queues; i++) { 10146 fp = &sc->fp[i]; 10147 10148 fp->rx_bd_cons = 0; 10149 10150 /* 10151 * Activate the BD ring... 10152 * Warning, this will generate an interrupt (to the TSTORM) 10153 * so this can only be done after the chip is initialized 10154 */ 10155 bxe_update_rx_prod(sc, fp, 10156 fp->rx_bd_prod, 10157 fp->rx_cq_prod, 10158 fp->rx_sge_prod); 10159 10160 if (i != 0) { 10161 continue; 10162 } 10163 10164 if (CHIP_IS_E1(sc)) { 10165 REG_WR(sc, 10166 (BAR_USTRORM_INTMEM + 10167 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))), 10168 U64_LO(fp->rcq_dma.paddr)); 10169 REG_WR(sc, 10170 (BAR_USTRORM_INTMEM + 10171 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4), 10172 U64_HI(fp->rcq_dma.paddr)); 10173 } 10174 } 10175 } 10176 10177 static void 10178 bxe_init_tx_ring_one(struct bxe_fastpath *fp) 10179 { 10180 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 10181 fp->tx_db.data.zero_fill1 = 0; 10182 fp->tx_db.data.prod = 0; 10183 10184 fp->tx_pkt_prod = 0; 10185 fp->tx_pkt_cons = 0; 10186 fp->tx_bd_prod = 0; 10187 fp->tx_bd_cons = 0; 10188 fp->eth_q_stats.tx_pkts = 0; 10189 } 10190 10191 static inline void 10192 bxe_init_tx_rings(struct bxe_softc *sc) 10193 { 10194 int i; 10195 10196 for (i = 0; i < sc->num_queues; i++) { 10197 #if 0 10198 uint8_t cos; 10199 for (cos = 0; cos < sc->max_cos; cos++) { 10200 bxe_init_tx_ring_one(&sc->fp[i].txdata[cos]); 10201 } 10202 #else 10203 bxe_init_tx_ring_one(&sc->fp[i]); 10204 #endif 10205 } 10206 } 10207 10208 static void 10209 bxe_init_def_sb(struct bxe_softc *sc) 10210 { 10211 struct host_sp_status_block *def_sb = sc->def_sb; 10212 bus_addr_t mapping = sc->def_sb_dma.paddr; 10213 int igu_sp_sb_index; 10214 int igu_seg_id; 10215 int port = SC_PORT(sc); 10216 int func = SC_FUNC(sc); 10217 int reg_offset, reg_offset_en5; 10218 uint64_t section; 10219 int index, sindex; 10220 struct hc_sp_status_block_data sp_sb_data; 10221 10222 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 10223 10224 if (CHIP_INT_MODE_IS_BC(sc)) { 10225 igu_sp_sb_index = DEF_SB_IGU_ID; 10226 igu_seg_id = HC_SEG_ACCESS_DEF; 10227 } else { 10228 igu_sp_sb_index = sc->igu_dsb_id; 10229 igu_seg_id = IGU_SEG_ACCESS_DEF; 10230 } 10231 10232 /* attentions */ 10233 section = ((uint64_t)mapping + 10234 offsetof(struct host_sp_status_block, atten_status_block)); 10235 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 10236 sc->attn_state = 0; 10237 10238 reg_offset = (port) ? 10239 MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 10240 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 10241 reg_offset_en5 = (port) ? 10242 MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 10243 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; 10244 10245 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 10246 /* take care of sig[0]..sig[4] */ 10247 for (sindex = 0; sindex < 4; sindex++) { 10248 sc->attn_group[index].sig[sindex] = 10249 REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index))); 10250 } 10251 10252 if (!CHIP_IS_E1x(sc)) { 10253 /* 10254 * enable5 is separate from the rest of the registers, 10255 * and the address skip is 4 and not 16 between the 10256 * different groups 10257 */ 10258 sc->attn_group[index].sig[4] = 10259 REG_RD(sc, (reg_offset_en5 + (0x4 * index))); 10260 } else { 10261 sc->attn_group[index].sig[4] = 0; 10262 } 10263 } 10264 10265 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10266 reg_offset = (port) ? 10267 HC_REG_ATTN_MSG1_ADDR_L : 10268 HC_REG_ATTN_MSG0_ADDR_L; 10269 REG_WR(sc, reg_offset, U64_LO(section)); 10270 REG_WR(sc, (reg_offset + 4), U64_HI(section)); 10271 } else if (!CHIP_IS_E1x(sc)) { 10272 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 10273 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 10274 } 10275 10276 section = ((uint64_t)mapping + 10277 offsetof(struct host_sp_status_block, sp_sb)); 10278 10279 bxe_zero_sp_sb(sc); 10280 10281 /* PCI guarantees endianity of regpair */ 10282 sp_sb_data.state = SB_ENABLED; 10283 sp_sb_data.host_sb_addr.lo = U64_LO(section); 10284 sp_sb_data.host_sb_addr.hi = U64_HI(section); 10285 sp_sb_data.igu_sb_id = igu_sp_sb_index; 10286 sp_sb_data.igu_seg_id = igu_seg_id; 10287 sp_sb_data.p_func.pf_id = func; 10288 sp_sb_data.p_func.vnic_id = SC_VN(sc); 10289 sp_sb_data.p_func.vf_id = 0xff; 10290 10291 bxe_wr_sp_sb_data(sc, &sp_sb_data); 10292 10293 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 10294 } 10295 10296 static void 10297 bxe_init_sp_ring(struct bxe_softc *sc) 10298 { 10299 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); 10300 sc->spq_prod_idx = 0; 10301 sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; 10302 sc->spq_prod_bd = sc->spq; 10303 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); 10304 } 10305 10306 static void 10307 bxe_init_eq_ring(struct bxe_softc *sc) 10308 { 10309 union event_ring_elem *elem; 10310 int i; 10311 10312 for (i = 1; i <= NUM_EQ_PAGES; i++) { 10313 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; 10314 10315 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + 10316 BCM_PAGE_SIZE * 10317 (i % NUM_EQ_PAGES))); 10318 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + 10319 BCM_PAGE_SIZE * 10320 (i % NUM_EQ_PAGES))); 10321 } 10322 10323 sc->eq_cons = 0; 10324 sc->eq_prod = NUM_EQ_DESC; 10325 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; 10326 10327 atomic_store_rel_long(&sc->eq_spq_left, 10328 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), 10329 NUM_EQ_DESC) - 1)); 10330 } 10331 10332 static void 10333 bxe_init_internal_common(struct bxe_softc *sc) 10334 { 10335 int i; 10336 10337 if (IS_MF_SI(sc)) { 10338 /* 10339 * In switch independent mode, the TSTORM needs to accept 10340 * packets that failed classification, since approximate match 10341 * mac addresses aren't written to NIG LLH. 10342 */ 10343 REG_WR8(sc, 10344 (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 10345 2); 10346 } else if (!CHIP_IS_E1(sc)) { /* 57710 doesn't support MF */ 10347 REG_WR8(sc, 10348 (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 10349 0); 10350 } 10351 10352 /* 10353 * Zero this manually as its initialization is currently missing 10354 * in the initTool. 10355 */ 10356 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { 10357 REG_WR(sc, 10358 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), 10359 0); 10360 } 10361 10362 if (!CHIP_IS_E1x(sc)) { 10363 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), 10364 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE); 10365 } 10366 } 10367 10368 static void 10369 bxe_init_internal(struct bxe_softc *sc, 10370 uint32_t load_code) 10371 { 10372 switch (load_code) { 10373 case FW_MSG_CODE_DRV_LOAD_COMMON: 10374 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 10375 bxe_init_internal_common(sc); 10376 /* no break */ 10377 10378 case FW_MSG_CODE_DRV_LOAD_PORT: 10379 /* nothing to do */ 10380 /* no break */ 10381 10382 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 10383 /* internal memory per function is initialized inside bxe_pf_init */ 10384 break; 10385 10386 default: 10387 BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code); 10388 break; 10389 } 10390 } 10391 10392 static void 10393 storm_memset_func_cfg(struct bxe_softc *sc, 10394 struct tstorm_eth_function_common_config *tcfg, 10395 uint16_t abs_fid) 10396 { 10397 uint32_t addr; 10398 size_t size; 10399 10400 addr = (BAR_TSTRORM_INTMEM + 10401 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); 10402 size = sizeof(struct tstorm_eth_function_common_config); 10403 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg); 10404 } 10405 10406 static void 10407 bxe_func_init(struct bxe_softc *sc, 10408 struct bxe_func_init_params *p) 10409 { 10410 struct tstorm_eth_function_common_config tcfg = { 0 }; 10411 10412 if (CHIP_IS_E1x(sc)) { 10413 storm_memset_func_cfg(sc, &tcfg, p->func_id); 10414 } 10415 10416 /* Enable the function in the FW */ 10417 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); 10418 storm_memset_func_en(sc, p->func_id, 1); 10419 10420 /* spq */ 10421 if (p->func_flgs & FUNC_FLG_SPQ) { 10422 storm_memset_spq_addr(sc, p->spq_map, p->func_id); 10423 REG_WR(sc, 10424 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)), 10425 p->spq_prod); 10426 } 10427 } 10428 10429 /* 10430 * Calculates the sum of vn_min_rates. 10431 * It's needed for further normalizing of the min_rates. 10432 * Returns: 10433 * sum of vn_min_rates. 10434 * or 10435 * 0 - if all the min_rates are 0. 10436 * In the later case fainess algorithm should be deactivated. 10437 * If all min rates are not zero then those that are zeroes will be set to 1. 10438 */ 10439 static void 10440 bxe_calc_vn_min(struct bxe_softc *sc, 10441 struct cmng_init_input *input) 10442 { 10443 uint32_t vn_cfg; 10444 uint32_t vn_min_rate; 10445 int all_zero = 1; 10446 int vn; 10447 10448 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10449 vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 10450 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 10451 FUNC_MF_CFG_MIN_BW_SHIFT) * 100); 10452 10453 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 10454 /* skip hidden VNs */ 10455 vn_min_rate = 0; 10456 } else if (!vn_min_rate) { 10457 /* If min rate is zero - set it to 100 */ 10458 vn_min_rate = DEF_MIN_RATE; 10459 } else { 10460 all_zero = 0; 10461 } 10462 10463 input->vnic_min_rate[vn] = vn_min_rate; 10464 } 10465 10466 /* if ETS or all min rates are zeros - disable fairness */ 10467 if (BXE_IS_ETS_ENABLED(sc)) { 10468 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10469 BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n"); 10470 } else if (all_zero) { 10471 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10472 BLOGD(sc, DBG_LOAD, 10473 "Fariness disabled (all MIN values are zeroes)\n"); 10474 } else { 10475 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10476 } 10477 } 10478 10479 static inline uint16_t 10480 bxe_extract_max_cfg(struct bxe_softc *sc, 10481 uint32_t mf_cfg) 10482 { 10483 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 10484 FUNC_MF_CFG_MAX_BW_SHIFT); 10485 10486 if (!max_cfg) { 10487 BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n"); 10488 max_cfg = 100; 10489 } 10490 10491 return (max_cfg); 10492 } 10493 10494 static void 10495 bxe_calc_vn_max(struct bxe_softc *sc, 10496 int vn, 10497 struct cmng_init_input *input) 10498 { 10499 uint16_t vn_max_rate; 10500 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 10501 uint32_t max_cfg; 10502 10503 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 10504 vn_max_rate = 0; 10505 } else { 10506 max_cfg = bxe_extract_max_cfg(sc, vn_cfg); 10507 10508 if (IS_MF_SI(sc)) { 10509 /* max_cfg in percents of linkspeed */ 10510 vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100); 10511 } else { /* SD modes */ 10512 /* max_cfg is absolute in 100Mb units */ 10513 vn_max_rate = (max_cfg * 100); 10514 } 10515 } 10516 10517 BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); 10518 10519 input->vnic_max_rate[vn] = vn_max_rate; 10520 } 10521 10522 static void 10523 bxe_cmng_fns_init(struct bxe_softc *sc, 10524 uint8_t read_cfg, 10525 uint8_t cmng_type) 10526 { 10527 struct cmng_init_input input; 10528 int vn; 10529 10530 memset(&input, 0, sizeof(struct cmng_init_input)); 10531 10532 input.port_rate = sc->link_vars.line_speed; 10533 10534 if (cmng_type == CMNG_FNS_MINMAX) { 10535 /* read mf conf from shmem */ 10536 if (read_cfg) { 10537 bxe_read_mf_cfg(sc); 10538 } 10539 10540 /* get VN min rate and enable fairness if not 0 */ 10541 bxe_calc_vn_min(sc, &input); 10542 10543 /* get VN max rate */ 10544 if (sc->port.pmf) { 10545 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10546 bxe_calc_vn_max(sc, vn, &input); 10547 } 10548 } 10549 10550 /* always enable rate shaping and fairness */ 10551 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 10552 10553 ecore_init_cmng(&input, &sc->cmng); 10554 return; 10555 } 10556 10557 /* rate shaping and fairness are disabled */ 10558 BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n"); 10559 } 10560 10561 static int 10562 bxe_get_cmng_fns_mode(struct bxe_softc *sc) 10563 { 10564 if (CHIP_REV_IS_SLOW(sc)) { 10565 return (CMNG_FNS_NONE); 10566 } 10567 10568 if (IS_MF(sc)) { 10569 return (CMNG_FNS_MINMAX); 10570 } 10571 10572 return (CMNG_FNS_NONE); 10573 } 10574 10575 static void 10576 storm_memset_cmng(struct bxe_softc *sc, 10577 struct cmng_init *cmng, 10578 uint8_t port) 10579 { 10580 int vn; 10581 int func; 10582 uint32_t addr; 10583 size_t size; 10584 10585 addr = (BAR_XSTRORM_INTMEM + 10586 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); 10587 size = sizeof(struct cmng_struct_per_port); 10588 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port); 10589 10590 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10591 func = func_by_vn(sc, vn); 10592 10593 addr = (BAR_XSTRORM_INTMEM + 10594 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); 10595 size = sizeof(struct rate_shaping_vars_per_vn); 10596 ecore_storm_memset_struct(sc, addr, size, 10597 (uint32_t *)&cmng->vnic.vnic_max_rate[vn]); 10598 10599 addr = (BAR_XSTRORM_INTMEM + 10600 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); 10601 size = sizeof(struct fairness_vars_per_vn); 10602 ecore_storm_memset_struct(sc, addr, size, 10603 (uint32_t *)&cmng->vnic.vnic_min_rate[vn]); 10604 } 10605 } 10606 10607 static void 10608 bxe_pf_init(struct bxe_softc *sc) 10609 { 10610 struct bxe_func_init_params func_init = { 0 }; 10611 struct event_ring_data eq_data = { { 0 } }; 10612 uint16_t flags; 10613 10614 if (!CHIP_IS_E1x(sc)) { 10615 /* reset IGU PF statistics: MSIX + ATTN */ 10616 /* PF */ 10617 REG_WR(sc, 10618 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 10619 (BXE_IGU_STAS_MSG_VF_CNT * 4) + 10620 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 10621 0); 10622 /* ATTN */ 10623 REG_WR(sc, 10624 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 10625 (BXE_IGU_STAS_MSG_VF_CNT * 4) + 10626 (BXE_IGU_STAS_MSG_PF_CNT * 4) + 10627 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 10628 0); 10629 } 10630 10631 /* function setup flags */ 10632 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 10633 10634 /* 10635 * This flag is relevant for E1x only. 10636 * E2 doesn't have a TPA configuration in a function level. 10637 */ 10638 flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0; 10639 10640 func_init.func_flgs = flags; 10641 func_init.pf_id = SC_FUNC(sc); 10642 func_init.func_id = SC_FUNC(sc); 10643 func_init.spq_map = sc->spq_dma.paddr; 10644 func_init.spq_prod = sc->spq_prod_idx; 10645 10646 bxe_func_init(sc, &func_init); 10647 10648 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); 10649 10650 /* 10651 * Congestion management values depend on the link rate. 10652 * There is no active link so initial link rate is set to 10Gbps. 10653 * When the link comes up the congestion management values are 10654 * re-calculated according to the actual link rate. 10655 */ 10656 sc->link_vars.line_speed = SPEED_10000; 10657 bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc)); 10658 10659 /* Only the PMF sets the HW */ 10660 if (sc->port.pmf) { 10661 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 10662 } 10663 10664 /* init Event Queue - PCI bus guarantees correct endainity */ 10665 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); 10666 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); 10667 eq_data.producer = sc->eq_prod; 10668 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 10669 eq_data.sb_id = DEF_SB_ID; 10670 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); 10671 } 10672 10673 static void 10674 bxe_hc_int_enable(struct bxe_softc *sc) 10675 { 10676 int port = SC_PORT(sc); 10677 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 10678 uint32_t val = REG_RD(sc, addr); 10679 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; 10680 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && 10681 (sc->intr_count == 1)) ? TRUE : FALSE; 10682 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; 10683 10684 if (msix) { 10685 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10686 HC_CONFIG_0_REG_INT_LINE_EN_0); 10687 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10688 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10689 if (single_msix) { 10690 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 10691 } 10692 } else if (msi) { 10693 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 10694 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10695 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10696 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10697 } else { 10698 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10699 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10700 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10701 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10702 10703 if (!CHIP_IS_E1(sc)) { 10704 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", 10705 val, port, addr); 10706 10707 REG_WR(sc, addr, val); 10708 10709 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 10710 } 10711 } 10712 10713 if (CHIP_IS_E1(sc)) { 10714 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF); 10715 } 10716 10717 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", 10718 val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 10719 10720 REG_WR(sc, addr, val); 10721 10722 /* ensure that HC_CONFIG is written before leading/trailing edge config */ 10723 mb(); 10724 10725 if (!CHIP_IS_E1(sc)) { 10726 /* init leading/trailing edge */ 10727 if (IS_MF(sc)) { 10728 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 10729 if (sc->port.pmf) { 10730 /* enable nig and gpio3 attention */ 10731 val |= 0x1100; 10732 } 10733 } else { 10734 val = 0xffff; 10735 } 10736 10737 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val); 10738 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val); 10739 } 10740 10741 /* make sure that interrupts are indeed enabled from here on */ 10742 mb(); 10743 } 10744 10745 static void 10746 bxe_igu_int_enable(struct bxe_softc *sc) 10747 { 10748 uint32_t val; 10749 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; 10750 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && 10751 (sc->intr_count == 1)) ? TRUE : FALSE; 10752 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; 10753 10754 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 10755 10756 if (msix) { 10757 val &= ~(IGU_PF_CONF_INT_LINE_EN | 10758 IGU_PF_CONF_SINGLE_ISR_EN); 10759 val |= (IGU_PF_CONF_MSI_MSIX_EN | 10760 IGU_PF_CONF_ATTN_BIT_EN); 10761 if (single_msix) { 10762 val |= IGU_PF_CONF_SINGLE_ISR_EN; 10763 } 10764 } else if (msi) { 10765 val &= ~IGU_PF_CONF_INT_LINE_EN; 10766 val |= (IGU_PF_CONF_MSI_MSIX_EN | 10767 IGU_PF_CONF_ATTN_BIT_EN | 10768 IGU_PF_CONF_SINGLE_ISR_EN); 10769 } else { 10770 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 10771 val |= (IGU_PF_CONF_INT_LINE_EN | 10772 IGU_PF_CONF_ATTN_BIT_EN | 10773 IGU_PF_CONF_SINGLE_ISR_EN); 10774 } 10775 10776 /* clean previous status - need to configure igu prior to ack*/ 10777 if ((!msix) || single_msix) { 10778 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10779 bxe_ack_int(sc); 10780 } 10781 10782 val |= IGU_PF_CONF_FUNC_EN; 10783 10784 BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n", 10785 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 10786 10787 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10788 10789 mb(); 10790 10791 /* init leading/trailing edge */ 10792 if (IS_MF(sc)) { 10793 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 10794 if (sc->port.pmf) { 10795 /* enable nig and gpio3 attention */ 10796 val |= 0x1100; 10797 } 10798 } else { 10799 val = 0xffff; 10800 } 10801 10802 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 10803 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 10804 10805 /* make sure that interrupts are indeed enabled from here on */ 10806 mb(); 10807 } 10808 10809 static void 10810 bxe_int_enable(struct bxe_softc *sc) 10811 { 10812 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10813 bxe_hc_int_enable(sc); 10814 } else { 10815 bxe_igu_int_enable(sc); 10816 } 10817 } 10818 10819 static void 10820 bxe_hc_int_disable(struct bxe_softc *sc) 10821 { 10822 int port = SC_PORT(sc); 10823 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 10824 uint32_t val = REG_RD(sc, addr); 10825 10826 /* 10827 * In E1 we must use only PCI configuration space to disable MSI/MSIX 10828 * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC 10829 * block 10830 */ 10831 if (CHIP_IS_E1(sc)) { 10832 /* 10833 * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register 10834 * to prevent from HC sending interrupts after we exit the function 10835 */ 10836 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0); 10837 10838 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10839 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10840 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10841 } else { 10842 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10843 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10844 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10845 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10846 } 10847 10848 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); 10849 10850 /* flush all outstanding writes */ 10851 mb(); 10852 10853 REG_WR(sc, addr, val); 10854 if (REG_RD(sc, addr) != val) { 10855 BLOGE(sc, "proper val not read from HC IGU!\n"); 10856 } 10857 } 10858 10859 static void 10860 bxe_igu_int_disable(struct bxe_softc *sc) 10861 { 10862 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 10863 10864 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 10865 IGU_PF_CONF_INT_LINE_EN | 10866 IGU_PF_CONF_ATTN_BIT_EN); 10867 10868 BLOGD(sc, DBG_INTR, "write %x to IGU\n", val); 10869 10870 /* flush all outstanding writes */ 10871 mb(); 10872 10873 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10874 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { 10875 BLOGE(sc, "proper val not read from IGU!\n"); 10876 } 10877 } 10878 10879 static void 10880 bxe_int_disable(struct bxe_softc *sc) 10881 { 10882 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10883 bxe_hc_int_disable(sc); 10884 } else { 10885 bxe_igu_int_disable(sc); 10886 } 10887 } 10888 10889 static void 10890 bxe_nic_init(struct bxe_softc *sc, 10891 int load_code) 10892 { 10893 int i; 10894 10895 for (i = 0; i < sc->num_queues; i++) { 10896 bxe_init_eth_fp(sc, i); 10897 } 10898 10899 rmb(); /* ensure status block indices were read */ 10900 10901 bxe_init_rx_rings(sc); 10902 bxe_init_tx_rings(sc); 10903 10904 if (IS_VF(sc)) { 10905 return; 10906 } 10907 10908 /* initialize MOD_ABS interrupts */ 10909 elink_init_mod_abs_int(sc, &sc->link_vars, 10910 sc->devinfo.chip_id, 10911 sc->devinfo.shmem_base, 10912 sc->devinfo.shmem2_base, 10913 SC_PORT(sc)); 10914 10915 bxe_init_def_sb(sc); 10916 bxe_update_dsb_idx(sc); 10917 bxe_init_sp_ring(sc); 10918 bxe_init_eq_ring(sc); 10919 bxe_init_internal(sc, load_code); 10920 bxe_pf_init(sc); 10921 bxe_stats_init(sc); 10922 10923 /* flush all before enabling interrupts */ 10924 mb(); 10925 10926 bxe_int_enable(sc); 10927 10928 /* check for SPIO5 */ 10929 bxe_attn_int_deasserted0(sc, 10930 REG_RD(sc, 10931 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 10932 SC_PORT(sc)*4)) & 10933 AEU_INPUTS_ATTN_BITS_SPIO5); 10934 } 10935 10936 static inline void 10937 bxe_init_objs(struct bxe_softc *sc) 10938 { 10939 /* mcast rules must be added to tx if tx switching is enabled */ 10940 ecore_obj_type o_type = 10941 (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX : 10942 ECORE_OBJ_TYPE_RX; 10943 10944 /* RX_MODE controlling object */ 10945 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); 10946 10947 /* multicast configuration controlling object */ 10948 ecore_init_mcast_obj(sc, 10949 &sc->mcast_obj, 10950 sc->fp[0].cl_id, 10951 sc->fp[0].index, 10952 SC_FUNC(sc), 10953 SC_FUNC(sc), 10954 BXE_SP(sc, mcast_rdata), 10955 BXE_SP_MAPPING(sc, mcast_rdata), 10956 ECORE_FILTER_MCAST_PENDING, 10957 &sc->sp_state, 10958 o_type); 10959 10960 /* Setup CAM credit pools */ 10961 ecore_init_mac_credit_pool(sc, 10962 &sc->macs_pool, 10963 SC_FUNC(sc), 10964 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 10965 VNICS_PER_PATH(sc)); 10966 10967 ecore_init_vlan_credit_pool(sc, 10968 &sc->vlans_pool, 10969 SC_ABS_FUNC(sc) >> 1, 10970 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 10971 VNICS_PER_PATH(sc)); 10972 10973 /* RSS configuration object */ 10974 ecore_init_rss_config_obj(sc, 10975 &sc->rss_conf_obj, 10976 sc->fp[0].cl_id, 10977 sc->fp[0].index, 10978 SC_FUNC(sc), 10979 SC_FUNC(sc), 10980 BXE_SP(sc, rss_rdata), 10981 BXE_SP_MAPPING(sc, rss_rdata), 10982 ECORE_FILTER_RSS_CONF_PENDING, 10983 &sc->sp_state, ECORE_OBJ_TYPE_RX); 10984 } 10985 10986 /* 10987 * Initialize the function. This must be called before sending CLIENT_SETUP 10988 * for the first client. 10989 */ 10990 static inline int 10991 bxe_func_start(struct bxe_softc *sc) 10992 { 10993 struct ecore_func_state_params func_params = { NULL }; 10994 struct ecore_func_start_params *start_params = &func_params.params.start; 10995 10996 /* Prepare parameters for function state transitions */ 10997 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); 10998 10999 func_params.f_obj = &sc->func_obj; 11000 func_params.cmd = ECORE_F_CMD_START; 11001 11002 /* Function parameters */ 11003 start_params->mf_mode = sc->devinfo.mf_info.mf_mode; 11004 start_params->sd_vlan_tag = OVLAN(sc); 11005 11006 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { 11007 start_params->network_cos_mode = STATIC_COS; 11008 } else { /* CHIP_IS_E1X */ 11009 start_params->network_cos_mode = FW_WRR; 11010 } 11011 11012 start_params->gre_tunnel_mode = 0; 11013 start_params->gre_tunnel_rss = 0; 11014 11015 return (ecore_func_state_change(sc, &func_params)); 11016 } 11017 11018 static int 11019 bxe_set_power_state(struct bxe_softc *sc, 11020 uint8_t state) 11021 { 11022 uint16_t pmcsr; 11023 11024 /* If there is no power capability, silently succeed */ 11025 if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) { 11026 BLOGW(sc, "No power capability\n"); 11027 return (0); 11028 } 11029 11030 pmcsr = pci_read_config(sc->dev, 11031 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 11032 2); 11033 11034 switch (state) { 11035 case PCI_PM_D0: 11036 pci_write_config(sc->dev, 11037 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 11038 ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2); 11039 11040 if (pmcsr & PCIM_PSTAT_DMASK) { 11041 /* delay required during transition out of D3hot */ 11042 DELAY(20000); 11043 } 11044 11045 break; 11046 11047 case PCI_PM_D3hot: 11048 /* XXX if there are other clients above don't shut down the power */ 11049 11050 /* don't shut down the power for emulation and FPGA */ 11051 if (CHIP_REV_IS_SLOW(sc)) { 11052 return (0); 11053 } 11054 11055 pmcsr &= ~PCIM_PSTAT_DMASK; 11056 pmcsr |= PCIM_PSTAT_D3; 11057 11058 if (sc->wol) { 11059 pmcsr |= PCIM_PSTAT_PMEENABLE; 11060 } 11061 11062 pci_write_config(sc->dev, 11063 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 11064 pmcsr, 4); 11065 11066 /* 11067 * No more memory access after this point until device is brought back 11068 * to D0 state. 11069 */ 11070 break; 11071 11072 default: 11073 BLOGE(sc, "Can't support PCI power state = %d\n", state); 11074 return (-1); 11075 } 11076 11077 return (0); 11078 } 11079 11080 11081 /* return true if succeeded to acquire the lock */ 11082 static uint8_t 11083 bxe_trylock_hw_lock(struct bxe_softc *sc, 11084 uint32_t resource) 11085 { 11086 uint32_t lock_status; 11087 uint32_t resource_bit = (1 << resource); 11088 int func = SC_FUNC(sc); 11089 uint32_t hw_lock_control_reg; 11090 11091 BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource); 11092 11093 /* Validating that the resource is within range */ 11094 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 11095 BLOGD(sc, DBG_LOAD, 11096 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 11097 resource, HW_LOCK_MAX_RESOURCE_VALUE); 11098 return (FALSE); 11099 } 11100 11101 if (func <= 5) { 11102 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 11103 } else { 11104 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 11105 } 11106 11107 /* try to acquire the lock */ 11108 REG_WR(sc, hw_lock_control_reg + 4, resource_bit); 11109 lock_status = REG_RD(sc, hw_lock_control_reg); 11110 if (lock_status & resource_bit) { 11111 return (TRUE); 11112 } 11113 11114 BLOGE(sc, "Failed to get a resource lock 0x%x\n", resource); 11115 11116 return (FALSE); 11117 } 11118 11119 /* 11120 * Get the recovery leader resource id according to the engine this function 11121 * belongs to. Currently only only 2 engines is supported. 11122 */ 11123 static int 11124 bxe_get_leader_lock_resource(struct bxe_softc *sc) 11125 { 11126 if (SC_PATH(sc)) { 11127 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1); 11128 } else { 11129 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0); 11130 } 11131 } 11132 11133 /* try to acquire a leader lock for current engine */ 11134 static uint8_t 11135 bxe_trylock_leader_lock(struct bxe_softc *sc) 11136 { 11137 return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc))); 11138 } 11139 11140 static int 11141 bxe_release_leader_lock(struct bxe_softc *sc) 11142 { 11143 return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc))); 11144 } 11145 11146 /* close gates #2, #3 and #4 */ 11147 static void 11148 bxe_set_234_gates(struct bxe_softc *sc, 11149 uint8_t close) 11150 { 11151 uint32_t val; 11152 11153 /* gates #2 and #4a are closed/opened for "not E1" only */ 11154 if (!CHIP_IS_E1(sc)) { 11155 /* #4 */ 11156 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close); 11157 /* #2 */ 11158 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); 11159 } 11160 11161 /* #3 */ 11162 if (CHIP_IS_E1x(sc)) { 11163 /* prevent interrupts from HC on both ports */ 11164 val = REG_RD(sc, HC_REG_CONFIG_1); 11165 REG_WR(sc, HC_REG_CONFIG_1, 11166 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : 11167 (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 11168 11169 val = REG_RD(sc, HC_REG_CONFIG_0); 11170 REG_WR(sc, HC_REG_CONFIG_0, 11171 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : 11172 (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 11173 } else { 11174 /* Prevent incomming interrupts in IGU */ 11175 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 11176 11177 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, 11178 (!close) ? 11179 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : 11180 (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 11181 } 11182 11183 BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n", 11184 close ? "closing" : "opening"); 11185 11186 wmb(); 11187 } 11188 11189 /* poll for pending writes bit, it should get cleared in no more than 1s */ 11190 static int 11191 bxe_er_poll_igu_vq(struct bxe_softc *sc) 11192 { 11193 uint32_t cnt = 1000; 11194 uint32_t pend_bits = 0; 11195 11196 do { 11197 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); 11198 11199 if (pend_bits == 0) { 11200 break; 11201 } 11202 11203 DELAY(1000); 11204 } while (--cnt > 0); 11205 11206 if (cnt == 0) { 11207 BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits); 11208 return (-1); 11209 } 11210 11211 return (0); 11212 } 11213 11214 #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ 11215 11216 static void 11217 bxe_clp_reset_prep(struct bxe_softc *sc, 11218 uint32_t *magic_val) 11219 { 11220 /* Do some magic... */ 11221 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 11222 *magic_val = val & SHARED_MF_CLP_MAGIC; 11223 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 11224 } 11225 11226 /* restore the value of the 'magic' bit */ 11227 static void 11228 bxe_clp_reset_done(struct bxe_softc *sc, 11229 uint32_t magic_val) 11230 { 11231 /* Restore the 'magic' bit value... */ 11232 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 11233 MFCFG_WR(sc, shared_mf_config.clp_mb, 11234 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 11235 } 11236 11237 /* prepare for MCP reset, takes care of CLP configurations */ 11238 static void 11239 bxe_reset_mcp_prep(struct bxe_softc *sc, 11240 uint32_t *magic_val) 11241 { 11242 uint32_t shmem; 11243 uint32_t validity_offset; 11244 11245 /* set `magic' bit in order to save MF config */ 11246 if (!CHIP_IS_E1(sc)) { 11247 bxe_clp_reset_prep(sc, magic_val); 11248 } 11249 11250 /* get shmem offset */ 11251 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 11252 validity_offset = 11253 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); 11254 11255 /* Clear validity map flags */ 11256 if (shmem > 0) { 11257 REG_WR(sc, shmem + validity_offset, 0); 11258 } 11259 } 11260 11261 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 11262 #define MCP_ONE_TIMEOUT 100 /* 100 ms */ 11263 11264 static void 11265 bxe_mcp_wait_one(struct bxe_softc *sc) 11266 { 11267 /* special handling for emulation and FPGA (10 times longer) */ 11268 if (CHIP_REV_IS_SLOW(sc)) { 11269 DELAY((MCP_ONE_TIMEOUT*10) * 1000); 11270 } else { 11271 DELAY((MCP_ONE_TIMEOUT) * 1000); 11272 } 11273 } 11274 11275 /* initialize shmem_base and waits for validity signature to appear */ 11276 static int 11277 bxe_init_shmem(struct bxe_softc *sc) 11278 { 11279 int cnt = 0; 11280 uint32_t val = 0; 11281 11282 do { 11283 sc->devinfo.shmem_base = 11284 sc->link_params.shmem_base = 11285 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 11286 11287 if (sc->devinfo.shmem_base) { 11288 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 11289 if (val & SHR_MEM_VALIDITY_MB) 11290 return (0); 11291 } 11292 11293 bxe_mcp_wait_one(sc); 11294 11295 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 11296 11297 BLOGE(sc, "BAD MCP validity signature\n"); 11298 11299 return (-1); 11300 } 11301 11302 static int 11303 bxe_reset_mcp_comp(struct bxe_softc *sc, 11304 uint32_t magic_val) 11305 { 11306 int rc = bxe_init_shmem(sc); 11307 11308 /* Restore the `magic' bit value */ 11309 if (!CHIP_IS_E1(sc)) { 11310 bxe_clp_reset_done(sc, magic_val); 11311 } 11312 11313 return (rc); 11314 } 11315 11316 static void 11317 bxe_pxp_prep(struct bxe_softc *sc) 11318 { 11319 if (!CHIP_IS_E1(sc)) { 11320 REG_WR(sc, PXP2_REG_RD_START_INIT, 0); 11321 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); 11322 wmb(); 11323 } 11324 } 11325 11326 /* 11327 * Reset the whole chip except for: 11328 * - PCIE core 11329 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) 11330 * - IGU 11331 * - MISC (including AEU) 11332 * - GRC 11333 * - RBCN, RBCP 11334 */ 11335 static void 11336 bxe_process_kill_chip_reset(struct bxe_softc *sc, 11337 uint8_t global) 11338 { 11339 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 11340 uint32_t global_bits2, stay_reset2; 11341 11342 /* 11343 * Bits that have to be set in reset_mask2 if we want to reset 'global' 11344 * (per chip) blocks. 11345 */ 11346 global_bits2 = 11347 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 11348 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 11349 11350 /* 11351 * Don't reset the following blocks. 11352 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be 11353 * reset, as in 4 port device they might still be owned 11354 * by the MCP (there is only one leader per path). 11355 */ 11356 not_reset_mask1 = 11357 MISC_REGISTERS_RESET_REG_1_RST_HC | 11358 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 11359 MISC_REGISTERS_RESET_REG_1_RST_PXP; 11360 11361 not_reset_mask2 = 11362 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 11363 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 11364 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 11365 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 11366 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 11367 MISC_REGISTERS_RESET_REG_2_RST_GRC | 11368 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 11369 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 11370 MISC_REGISTERS_RESET_REG_2_RST_ATC | 11371 MISC_REGISTERS_RESET_REG_2_PGLC | 11372 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 11373 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 11374 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 11375 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 11376 MISC_REGISTERS_RESET_REG_2_UMAC0 | 11377 MISC_REGISTERS_RESET_REG_2_UMAC1; 11378 11379 /* 11380 * Keep the following blocks in reset: 11381 * - all xxMACs are handled by the elink code. 11382 */ 11383 stay_reset2 = 11384 MISC_REGISTERS_RESET_REG_2_XMAC | 11385 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 11386 11387 /* Full reset masks according to the chip */ 11388 reset_mask1 = 0xffffffff; 11389 11390 if (CHIP_IS_E1(sc)) 11391 reset_mask2 = 0xffff; 11392 else if (CHIP_IS_E1H(sc)) 11393 reset_mask2 = 0x1ffff; 11394 else if (CHIP_IS_E2(sc)) 11395 reset_mask2 = 0xfffff; 11396 else /* CHIP_IS_E3 */ 11397 reset_mask2 = 0x3ffffff; 11398 11399 /* Don't reset global blocks unless we need to */ 11400 if (!global) 11401 reset_mask2 &= ~global_bits2; 11402 11403 /* 11404 * In case of attention in the QM, we need to reset PXP 11405 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 11406 * because otherwise QM reset would release 'close the gates' shortly 11407 * before resetting the PXP, then the PSWRQ would send a write 11408 * request to PGLUE. Then when PXP is reset, PGLUE would try to 11409 * read the payload data from PSWWR, but PSWWR would not 11410 * respond. The write queue in PGLUE would stuck, dmae commands 11411 * would not return. Therefore it's important to reset the second 11412 * reset register (containing the 11413 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 11414 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 11415 * bit). 11416 */ 11417 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 11418 reset_mask2 & (~not_reset_mask2)); 11419 11420 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 11421 reset_mask1 & (~not_reset_mask1)); 11422 11423 mb(); 11424 wmb(); 11425 11426 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 11427 reset_mask2 & (~stay_reset2)); 11428 11429 mb(); 11430 wmb(); 11431 11432 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 11433 wmb(); 11434 } 11435 11436 static int 11437 bxe_process_kill(struct bxe_softc *sc, 11438 uint8_t global) 11439 { 11440 int cnt = 1000; 11441 uint32_t val = 0; 11442 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 11443 uint32_t tags_63_32 = 0; 11444 11445 /* Empty the Tetris buffer, wait for 1s */ 11446 do { 11447 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); 11448 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); 11449 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); 11450 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); 11451 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); 11452 if (CHIP_IS_E3(sc)) { 11453 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); 11454 } 11455 11456 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 11457 ((port_is_idle_0 & 0x1) == 0x1) && 11458 ((port_is_idle_1 & 0x1) == 0x1) && 11459 (pgl_exp_rom2 == 0xffffffff) && 11460 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) 11461 break; 11462 DELAY(1000); 11463 } while (cnt-- > 0); 11464 11465 if (cnt <= 0) { 11466 BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there " 11467 "are still outstanding read requests after 1s! " 11468 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " 11469 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", 11470 sr_cnt, blk_cnt, port_is_idle_0, 11471 port_is_idle_1, pgl_exp_rom2); 11472 return (-1); 11473 } 11474 11475 mb(); 11476 11477 /* Close gates #2, #3 and #4 */ 11478 bxe_set_234_gates(sc, TRUE); 11479 11480 /* Poll for IGU VQs for 57712 and newer chips */ 11481 if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) { 11482 return (-1); 11483 } 11484 11485 /* XXX indicate that "process kill" is in progress to MCP */ 11486 11487 /* clear "unprepared" bit */ 11488 REG_WR(sc, MISC_REG_UNPREPARED, 0); 11489 mb(); 11490 11491 /* Make sure all is written to the chip before the reset */ 11492 wmb(); 11493 11494 /* 11495 * Wait for 1ms to empty GLUE and PCI-E core queues, 11496 * PSWHST, GRC and PSWRD Tetris buffer. 11497 */ 11498 DELAY(1000); 11499 11500 /* Prepare to chip reset: */ 11501 /* MCP */ 11502 if (global) { 11503 bxe_reset_mcp_prep(sc, &val); 11504 } 11505 11506 /* PXP */ 11507 bxe_pxp_prep(sc); 11508 mb(); 11509 11510 /* reset the chip */ 11511 bxe_process_kill_chip_reset(sc, global); 11512 mb(); 11513 11514 /* clear errors in PGB */ 11515 if (!CHIP_IS_E1(sc)) 11516 REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); 11517 11518 /* Recover after reset: */ 11519 /* MCP */ 11520 if (global && bxe_reset_mcp_comp(sc, val)) { 11521 return (-1); 11522 } 11523 11524 /* XXX add resetting the NO_MCP mode DB here */ 11525 11526 /* Open the gates #2, #3 and #4 */ 11527 bxe_set_234_gates(sc, FALSE); 11528 11529 /* XXX 11530 * IGU/AEU preparation bring back the AEU/IGU to a reset state 11531 * re-enable attentions 11532 */ 11533 11534 return (0); 11535 } 11536 11537 static int 11538 bxe_leader_reset(struct bxe_softc *sc) 11539 { 11540 int rc = 0; 11541 uint8_t global = bxe_reset_is_global(sc); 11542 uint32_t load_code; 11543 11544 /* 11545 * If not going to reset MCP, load "fake" driver to reset HW while 11546 * driver is owner of the HW. 11547 */ 11548 if (!global && !BXE_NOMCP(sc)) { 11549 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 11550 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 11551 if (!load_code) { 11552 BLOGE(sc, "MCP response failure, aborting\n"); 11553 rc = -1; 11554 goto exit_leader_reset; 11555 } 11556 11557 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 11558 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 11559 BLOGE(sc, "MCP unexpected response, aborting\n"); 11560 rc = -1; 11561 goto exit_leader_reset2; 11562 } 11563 11564 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 11565 if (!load_code) { 11566 BLOGE(sc, "MCP response failure, aborting\n"); 11567 rc = -1; 11568 goto exit_leader_reset2; 11569 } 11570 } 11571 11572 /* try to recover after the failure */ 11573 if (bxe_process_kill(sc, global)) { 11574 BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc)); 11575 rc = -1; 11576 goto exit_leader_reset2; 11577 } 11578 11579 /* 11580 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver 11581 * state. 11582 */ 11583 bxe_set_reset_done(sc); 11584 if (global) { 11585 bxe_clear_reset_global(sc); 11586 } 11587 11588 exit_leader_reset2: 11589 11590 /* unload "fake driver" if it was loaded */ 11591 if (!global && !BXE_NOMCP(sc)) { 11592 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 11593 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 11594 } 11595 11596 exit_leader_reset: 11597 11598 sc->is_leader = 0; 11599 bxe_release_leader_lock(sc); 11600 11601 mb(); 11602 return (rc); 11603 } 11604 11605 /* 11606 * prepare INIT transition, parameters configured: 11607 * - HC configuration 11608 * - Queue's CDU context 11609 */ 11610 static void 11611 bxe_pf_q_prep_init(struct bxe_softc *sc, 11612 struct bxe_fastpath *fp, 11613 struct ecore_queue_init_params *init_params) 11614 { 11615 uint8_t cos; 11616 int cxt_index, cxt_offset; 11617 11618 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); 11619 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); 11620 11621 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); 11622 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); 11623 11624 /* HC rate */ 11625 init_params->rx.hc_rate = 11626 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; 11627 init_params->tx.hc_rate = 11628 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; 11629 11630 /* FW SB ID */ 11631 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; 11632 11633 /* CQ index among the SB indices */ 11634 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 11635 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 11636 11637 /* set maximum number of COSs supported by this queue */ 11638 init_params->max_cos = sc->max_cos; 11639 11640 BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n", 11641 fp->index, init_params->max_cos); 11642 11643 /* set the context pointers queue object */ 11644 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 11645 /* XXX change index/cid here if ever support multiple tx CoS */ 11646 /* fp->txdata[cos]->cid */ 11647 cxt_index = fp->index / ILT_PAGE_CIDS; 11648 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); 11649 init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth; 11650 } 11651 } 11652 11653 /* set flags that are common for the Tx-only and not normal connections */ 11654 static unsigned long 11655 bxe_get_common_flags(struct bxe_softc *sc, 11656 struct bxe_fastpath *fp, 11657 uint8_t zero_stats) 11658 { 11659 unsigned long flags = 0; 11660 11661 /* PF driver will always initialize the Queue to an ACTIVE state */ 11662 bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags); 11663 11664 /* 11665 * tx only connections collect statistics (on the same index as the 11666 * parent connection). The statistics are zeroed when the parent 11667 * connection is initialized. 11668 */ 11669 11670 bxe_set_bit(ECORE_Q_FLG_STATS, &flags); 11671 if (zero_stats) { 11672 bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags); 11673 } 11674 11675 /* 11676 * tx only connections can support tx-switching, though their 11677 * CoS-ness doesn't survive the loopback 11678 */ 11679 if (sc->flags & BXE_TX_SWITCHING) { 11680 bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags); 11681 } 11682 11683 bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); 11684 11685 return (flags); 11686 } 11687 11688 static unsigned long 11689 bxe_get_q_flags(struct bxe_softc *sc, 11690 struct bxe_fastpath *fp, 11691 uint8_t leading) 11692 { 11693 unsigned long flags = 0; 11694 11695 if (IS_MF_SD(sc)) { 11696 bxe_set_bit(ECORE_Q_FLG_OV, &flags); 11697 } 11698 11699 if (if_getcapenable(sc->ifp) & IFCAP_LRO) { 11700 bxe_set_bit(ECORE_Q_FLG_TPA, &flags); 11701 bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags); 11702 #if 0 11703 if (fp->mode == TPA_MODE_GRO) 11704 __set_bit(ECORE_Q_FLG_TPA_GRO, &flags); 11705 #endif 11706 } 11707 11708 if (leading) { 11709 bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags); 11710 bxe_set_bit(ECORE_Q_FLG_MCAST, &flags); 11711 } 11712 11713 bxe_set_bit(ECORE_Q_FLG_VLAN, &flags); 11714 11715 #if 0 11716 /* configure silent vlan removal */ 11717 if (IS_MF_AFEX(sc)) { 11718 bxe_set_bit(ECORE_Q_FLG_SILENT_VLAN_REM, &flags); 11719 } 11720 #endif 11721 11722 /* merge with common flags */ 11723 return (flags | bxe_get_common_flags(sc, fp, TRUE)); 11724 } 11725 11726 static void 11727 bxe_pf_q_prep_general(struct bxe_softc *sc, 11728 struct bxe_fastpath *fp, 11729 struct ecore_general_setup_params *gen_init, 11730 uint8_t cos) 11731 { 11732 gen_init->stat_id = bxe_stats_id(fp); 11733 gen_init->spcl_id = fp->cl_id; 11734 gen_init->mtu = sc->mtu; 11735 gen_init->cos = cos; 11736 } 11737 11738 static void 11739 bxe_pf_rx_q_prep(struct bxe_softc *sc, 11740 struct bxe_fastpath *fp, 11741 struct rxq_pause_params *pause, 11742 struct ecore_rxq_setup_params *rxq_init) 11743 { 11744 uint8_t max_sge = 0; 11745 uint16_t sge_sz = 0; 11746 uint16_t tpa_agg_size = 0; 11747 11748 pause->sge_th_lo = SGE_TH_LO(sc); 11749 pause->sge_th_hi = SGE_TH_HI(sc); 11750 11751 /* validate SGE ring has enough to cross high threshold */ 11752 if (sc->dropless_fc && 11753 (pause->sge_th_hi + FW_PREFETCH_CNT) > 11754 (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) { 11755 BLOGW(sc, "sge ring threshold limit\n"); 11756 } 11757 11758 /* minimum max_aggregation_size is 2*MTU (two full buffers) */ 11759 tpa_agg_size = (2 * sc->mtu); 11760 if (tpa_agg_size < sc->max_aggregation_size) { 11761 tpa_agg_size = sc->max_aggregation_size; 11762 } 11763 11764 max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT; 11765 max_sge = ((max_sge + PAGES_PER_SGE - 1) & 11766 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; 11767 sge_sz = (uint16_t)min(SGE_PAGES, 0xffff); 11768 11769 /* pause - not for e1 */ 11770 if (!CHIP_IS_E1(sc)) { 11771 pause->bd_th_lo = BD_TH_LO(sc); 11772 pause->bd_th_hi = BD_TH_HI(sc); 11773 11774 pause->rcq_th_lo = RCQ_TH_LO(sc); 11775 pause->rcq_th_hi = RCQ_TH_HI(sc); 11776 11777 /* validate rings have enough entries to cross high thresholds */ 11778 if (sc->dropless_fc && 11779 pause->bd_th_hi + FW_PREFETCH_CNT > 11780 sc->rx_ring_size) { 11781 BLOGW(sc, "rx bd ring threshold limit\n"); 11782 } 11783 11784 if (sc->dropless_fc && 11785 pause->rcq_th_hi + FW_PREFETCH_CNT > 11786 RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) { 11787 BLOGW(sc, "rcq ring threshold limit\n"); 11788 } 11789 11790 pause->pri_map = 1; 11791 } 11792 11793 /* rxq setup */ 11794 rxq_init->dscr_map = fp->rx_dma.paddr; 11795 rxq_init->sge_map = fp->rx_sge_dma.paddr; 11796 rxq_init->rcq_map = fp->rcq_dma.paddr; 11797 rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE); 11798 11799 /* 11800 * This should be a maximum number of data bytes that may be 11801 * placed on the BD (not including paddings). 11802 */ 11803 rxq_init->buf_sz = (fp->rx_buf_size - 11804 IP_HEADER_ALIGNMENT_PADDING); 11805 11806 rxq_init->cl_qzone_id = fp->cl_qzone_id; 11807 rxq_init->tpa_agg_sz = tpa_agg_size; 11808 rxq_init->sge_buf_sz = sge_sz; 11809 rxq_init->max_sges_pkt = max_sge; 11810 rxq_init->rss_engine_id = SC_FUNC(sc); 11811 rxq_init->mcast_engine_id = SC_FUNC(sc); 11812 11813 /* 11814 * Maximum number or simultaneous TPA aggregation for this Queue. 11815 * For PF Clients it should be the maximum available number. 11816 * VF driver(s) may want to define it to a smaller value. 11817 */ 11818 rxq_init->max_tpa_queues = MAX_AGG_QS(sc); 11819 11820 rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT; 11821 rxq_init->fw_sb_id = fp->fw_sb_id; 11822 11823 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 11824 11825 /* 11826 * configure silent vlan removal 11827 * if multi function mode is afex, then mask default vlan 11828 */ 11829 if (IS_MF_AFEX(sc)) { 11830 rxq_init->silent_removal_value = 11831 sc->devinfo.mf_info.afex_def_vlan_tag; 11832 rxq_init->silent_removal_mask = EVL_VLID_MASK; 11833 } 11834 } 11835 11836 static void 11837 bxe_pf_tx_q_prep(struct bxe_softc *sc, 11838 struct bxe_fastpath *fp, 11839 struct ecore_txq_setup_params *txq_init, 11840 uint8_t cos) 11841 { 11842 /* 11843 * XXX If multiple CoS is ever supported then each fastpath structure 11844 * will need to maintain tx producer/consumer/dma/etc values *per* CoS. 11845 * fp->txdata[cos]->tx_dma.paddr; 11846 */ 11847 txq_init->dscr_map = fp->tx_dma.paddr; 11848 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 11849 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 11850 txq_init->fw_sb_id = fp->fw_sb_id; 11851 11852 /* 11853 * set the TSS leading client id for TX classfication to the 11854 * leading RSS client id 11855 */ 11856 txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id); 11857 } 11858 11859 /* 11860 * This function performs 2 steps in a queue state machine: 11861 * 1) RESET->INIT 11862 * 2) INIT->SETUP 11863 */ 11864 static int 11865 bxe_setup_queue(struct bxe_softc *sc, 11866 struct bxe_fastpath *fp, 11867 uint8_t leading) 11868 { 11869 struct ecore_queue_state_params q_params = { NULL }; 11870 struct ecore_queue_setup_params *setup_params = 11871 &q_params.params.setup; 11872 #if 0 11873 struct ecore_queue_setup_tx_only_params *tx_only_params = 11874 &q_params.params.tx_only; 11875 uint8_t tx_index; 11876 #endif 11877 int rc; 11878 11879 BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index); 11880 11881 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 11882 11883 q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj; 11884 11885 /* we want to wait for completion in this context */ 11886 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 11887 11888 /* prepare the INIT parameters */ 11889 bxe_pf_q_prep_init(sc, fp, &q_params.params.init); 11890 11891 /* Set the command */ 11892 q_params.cmd = ECORE_Q_CMD_INIT; 11893 11894 /* Change the state to INIT */ 11895 rc = ecore_queue_state_change(sc, &q_params); 11896 if (rc) { 11897 BLOGE(sc, "Queue(%d) INIT failed\n", fp->index); 11898 return (rc); 11899 } 11900 11901 BLOGD(sc, DBG_LOAD, "init complete\n"); 11902 11903 /* now move the Queue to the SETUP state */ 11904 memset(setup_params, 0, sizeof(*setup_params)); 11905 11906 /* set Queue flags */ 11907 setup_params->flags = bxe_get_q_flags(sc, fp, leading); 11908 11909 /* set general SETUP parameters */ 11910 bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params, 11911 FIRST_TX_COS_INDEX); 11912 11913 bxe_pf_rx_q_prep(sc, fp, 11914 &setup_params->pause_params, 11915 &setup_params->rxq_params); 11916 11917 bxe_pf_tx_q_prep(sc, fp, 11918 &setup_params->txq_params, 11919 FIRST_TX_COS_INDEX); 11920 11921 /* Set the command */ 11922 q_params.cmd = ECORE_Q_CMD_SETUP; 11923 11924 /* change the state to SETUP */ 11925 rc = ecore_queue_state_change(sc, &q_params); 11926 if (rc) { 11927 BLOGE(sc, "Queue(%d) SETUP failed\n", fp->index); 11928 return (rc); 11929 } 11930 11931 #if 0 11932 /* loop through the relevant tx-only indices */ 11933 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 11934 tx_index < sc->max_cos; 11935 tx_index++) { 11936 /* prepare and send tx-only ramrod*/ 11937 rc = bxe_setup_tx_only(sc, fp, &q_params, 11938 tx_only_params, tx_index, leading); 11939 if (rc) { 11940 BLOGE(sc, "Queue(%d.%d) TX_ONLY_SETUP failed\n", 11941 fp->index, tx_index); 11942 return (rc); 11943 } 11944 } 11945 #endif 11946 11947 return (rc); 11948 } 11949 11950 static int 11951 bxe_setup_leading(struct bxe_softc *sc) 11952 { 11953 return (bxe_setup_queue(sc, &sc->fp[0], TRUE)); 11954 } 11955 11956 static int 11957 bxe_config_rss_pf(struct bxe_softc *sc, 11958 struct ecore_rss_config_obj *rss_obj, 11959 uint8_t config_hash) 11960 { 11961 struct ecore_config_rss_params params = { NULL }; 11962 int i; 11963 11964 /* 11965 * Although RSS is meaningless when there is a single HW queue we 11966 * still need it enabled in order to have HW Rx hash generated. 11967 */ 11968 11969 params.rss_obj = rss_obj; 11970 11971 bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); 11972 11973 bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); 11974 11975 /* RSS configuration */ 11976 bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags); 11977 bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); 11978 bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags); 11979 bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); 11980 if (rss_obj->udp_rss_v4) { 11981 bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); 11982 } 11983 if (rss_obj->udp_rss_v6) { 11984 bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); 11985 } 11986 11987 /* Hash bits */ 11988 params.rss_result_mask = MULTI_MASK; 11989 11990 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); 11991 11992 if (config_hash) { 11993 /* RSS keys */ 11994 for (i = 0; i < sizeof(params.rss_key) / 4; i++) { 11995 params.rss_key[i] = arc4random(); 11996 } 11997 11998 bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); 11999 } 12000 12001 return (ecore_config_rss(sc, ¶ms)); 12002 } 12003 12004 static int 12005 bxe_config_rss_eth(struct bxe_softc *sc, 12006 uint8_t config_hash) 12007 { 12008 return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash)); 12009 } 12010 12011 static int 12012 bxe_init_rss_pf(struct bxe_softc *sc) 12013 { 12014 uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc); 12015 int i; 12016 12017 /* 12018 * Prepare the initial contents of the indirection table if 12019 * RSS is enabled 12020 */ 12021 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { 12022 sc->rss_conf_obj.ind_table[i] = 12023 (sc->fp->cl_id + (i % num_eth_queues)); 12024 } 12025 12026 if (sc->udp_rss) { 12027 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; 12028 } 12029 12030 /* 12031 * For 57710 and 57711 SEARCHER configuration (rss_keys) is 12032 * per-port, so if explicit configuration is needed, do it only 12033 * for a PMF. 12034 * 12035 * For 57712 and newer it's a per-function configuration. 12036 */ 12037 return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc))); 12038 } 12039 12040 static int 12041 bxe_set_mac_one(struct bxe_softc *sc, 12042 uint8_t *mac, 12043 struct ecore_vlan_mac_obj *obj, 12044 uint8_t set, 12045 int mac_type, 12046 unsigned long *ramrod_flags) 12047 { 12048 struct ecore_vlan_mac_ramrod_params ramrod_param; 12049 int rc; 12050 12051 memset(&ramrod_param, 0, sizeof(ramrod_param)); 12052 12053 /* fill in general parameters */ 12054 ramrod_param.vlan_mac_obj = obj; 12055 ramrod_param.ramrod_flags = *ramrod_flags; 12056 12057 /* fill a user request section if needed */ 12058 if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) { 12059 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); 12060 12061 bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); 12062 12063 /* Set the command: ADD or DEL */ 12064 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : 12065 ECORE_VLAN_MAC_DEL; 12066 } 12067 12068 rc = ecore_config_vlan_mac(sc, &ramrod_param); 12069 12070 if (rc == ECORE_EXISTS) { 12071 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); 12072 /* do not treat adding same MAC as error */ 12073 rc = 0; 12074 } else if (rc < 0) { 12075 BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc); 12076 } 12077 12078 return (rc); 12079 } 12080 12081 static int 12082 bxe_set_eth_mac(struct bxe_softc *sc, 12083 uint8_t set) 12084 { 12085 unsigned long ramrod_flags = 0; 12086 12087 BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n"); 12088 12089 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 12090 12091 /* Eth MAC is set on RSS leading client (fp[0]) */ 12092 return (bxe_set_mac_one(sc, sc->link_params.mac_addr, 12093 &sc->sp_objs->mac_obj, 12094 set, ECORE_ETH_MAC, &ramrod_flags)); 12095 } 12096 12097 #if 0 12098 static void 12099 bxe_update_max_mf_config(struct bxe_softc *sc, 12100 uint32_t value) 12101 { 12102 /* load old values */ 12103 uint32_t mf_cfg = sc->devinfo.mf_info.mf_config[SC_VN(sc)]; 12104 12105 if (value != bxe_extract_max_cfg(sc, mf_cfg)) { 12106 /* leave all but MAX value */ 12107 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; 12108 12109 /* set new MAX value */ 12110 mf_cfg |= ((value << FUNC_MF_CFG_MAX_BW_SHIFT) & 12111 FUNC_MF_CFG_MAX_BW_MASK); 12112 12113 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW, mf_cfg); 12114 } 12115 } 12116 #endif 12117 12118 static int 12119 bxe_get_cur_phy_idx(struct bxe_softc *sc) 12120 { 12121 uint32_t sel_phy_idx = 0; 12122 12123 if (sc->link_params.num_phys <= 1) { 12124 return (ELINK_INT_PHY); 12125 } 12126 12127 if (sc->link_vars.link_up) { 12128 sel_phy_idx = ELINK_EXT_PHY1; 12129 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ 12130 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && 12131 (sc->link_params.phy[ELINK_EXT_PHY2].supported & 12132 ELINK_SUPPORTED_FIBRE)) 12133 sel_phy_idx = ELINK_EXT_PHY2; 12134 } else { 12135 switch (elink_phy_selection(&sc->link_params)) { 12136 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 12137 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: 12138 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 12139 sel_phy_idx = ELINK_EXT_PHY1; 12140 break; 12141 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: 12142 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 12143 sel_phy_idx = ELINK_EXT_PHY2; 12144 break; 12145 } 12146 } 12147 12148 return (sel_phy_idx); 12149 } 12150 12151 static int 12152 bxe_get_link_cfg_idx(struct bxe_softc *sc) 12153 { 12154 uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc); 12155 12156 /* 12157 * The selected activated PHY is always after swapping (in case PHY 12158 * swapping is enabled). So when swapping is enabled, we need to reverse 12159 * the configuration 12160 */ 12161 12162 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 12163 if (sel_phy_idx == ELINK_EXT_PHY1) 12164 sel_phy_idx = ELINK_EXT_PHY2; 12165 else if (sel_phy_idx == ELINK_EXT_PHY2) 12166 sel_phy_idx = ELINK_EXT_PHY1; 12167 } 12168 12169 return (ELINK_LINK_CONFIG_IDX(sel_phy_idx)); 12170 } 12171 12172 static void 12173 bxe_set_requested_fc(struct bxe_softc *sc) 12174 { 12175 /* 12176 * Initialize link parameters structure variables 12177 * It is recommended to turn off RX FC for jumbo frames 12178 * for better performance 12179 */ 12180 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { 12181 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; 12182 } else { 12183 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; 12184 } 12185 } 12186 12187 static void 12188 bxe_calc_fc_adv(struct bxe_softc *sc) 12189 { 12190 uint8_t cfg_idx = bxe_get_link_cfg_idx(sc); 12191 switch (sc->link_vars.ieee_fc & 12192 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 12193 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 12194 default: 12195 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 12196 ADVERTISED_Pause); 12197 break; 12198 12199 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 12200 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 12201 ADVERTISED_Pause); 12202 break; 12203 12204 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 12205 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 12206 break; 12207 } 12208 } 12209 12210 static uint16_t 12211 bxe_get_mf_speed(struct bxe_softc *sc) 12212 { 12213 uint16_t line_speed = sc->link_vars.line_speed; 12214 if (IS_MF(sc)) { 12215 uint16_t maxCfg = 12216 bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]); 12217 12218 /* calculate the current MAX line speed limit for the MF devices */ 12219 if (IS_MF_SI(sc)) { 12220 line_speed = (line_speed * maxCfg) / 100; 12221 } else { /* SD mode */ 12222 uint16_t vn_max_rate = maxCfg * 100; 12223 12224 if (vn_max_rate < line_speed) { 12225 line_speed = vn_max_rate; 12226 } 12227 } 12228 } 12229 12230 return (line_speed); 12231 } 12232 12233 static void 12234 bxe_fill_report_data(struct bxe_softc *sc, 12235 struct bxe_link_report_data *data) 12236 { 12237 uint16_t line_speed = bxe_get_mf_speed(sc); 12238 12239 memset(data, 0, sizeof(*data)); 12240 12241 /* fill the report data with the effective line speed */ 12242 data->line_speed = line_speed; 12243 12244 /* Link is down */ 12245 if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) { 12246 bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags); 12247 } 12248 12249 /* Full DUPLEX */ 12250 if (sc->link_vars.duplex == DUPLEX_FULL) { 12251 bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags); 12252 } 12253 12254 /* Rx Flow Control is ON */ 12255 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { 12256 bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags); 12257 } 12258 12259 /* Tx Flow Control is ON */ 12260 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 12261 bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags); 12262 } 12263 } 12264 12265 /* report link status to OS, should be called under phy_lock */ 12266 static void 12267 bxe_link_report_locked(struct bxe_softc *sc) 12268 { 12269 struct bxe_link_report_data cur_data; 12270 12271 /* reread mf_cfg */ 12272 if (IS_PF(sc) && !CHIP_IS_E1(sc)) { 12273 bxe_read_mf_cfg(sc); 12274 } 12275 12276 /* Read the current link report info */ 12277 bxe_fill_report_data(sc, &cur_data); 12278 12279 /* Don't report link down or exactly the same link status twice */ 12280 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || 12281 (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 12282 &sc->last_reported_link.link_report_flags) && 12283 bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 12284 &cur_data.link_report_flags))) { 12285 return; 12286 } 12287 12288 sc->link_cnt++; 12289 12290 /* report new link params and remember the state for the next time */ 12291 memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); 12292 12293 if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 12294 &cur_data.link_report_flags)) { 12295 if_link_state_change(sc->ifp, LINK_STATE_DOWN); 12296 BLOGI(sc, "NIC Link is Down\n"); 12297 } else { 12298 const char *duplex; 12299 const char *flow; 12300 12301 if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX, 12302 &cur_data.link_report_flags)) { 12303 duplex = "full"; 12304 } else { 12305 duplex = "half"; 12306 } 12307 12308 /* 12309 * Handle the FC at the end so that only these flags would be 12310 * possibly set. This way we may easily check if there is no FC 12311 * enabled. 12312 */ 12313 if (cur_data.link_report_flags) { 12314 if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 12315 &cur_data.link_report_flags) && 12316 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 12317 &cur_data.link_report_flags)) { 12318 flow = "ON - receive & transmit"; 12319 } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 12320 &cur_data.link_report_flags) && 12321 !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 12322 &cur_data.link_report_flags)) { 12323 flow = "ON - receive"; 12324 } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 12325 &cur_data.link_report_flags) && 12326 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 12327 &cur_data.link_report_flags)) { 12328 flow = "ON - transmit"; 12329 } else { 12330 flow = "none"; /* possible? */ 12331 } 12332 } else { 12333 flow = "none"; 12334 } 12335 12336 if_link_state_change(sc->ifp, LINK_STATE_UP); 12337 BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", 12338 cur_data.line_speed, duplex, flow); 12339 } 12340 } 12341 12342 static void 12343 bxe_link_report(struct bxe_softc *sc) 12344 { 12345 bxe_acquire_phy_lock(sc); 12346 bxe_link_report_locked(sc); 12347 bxe_release_phy_lock(sc); 12348 } 12349 12350 static void 12351 bxe_link_status_update(struct bxe_softc *sc) 12352 { 12353 if (sc->state != BXE_STATE_OPEN) { 12354 return; 12355 } 12356 12357 #if 0 12358 /* read updated dcb configuration */ 12359 if (IS_PF(sc)) 12360 bxe_dcbx_pmf_update(sc); 12361 #endif 12362 12363 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { 12364 elink_link_status_update(&sc->link_params, &sc->link_vars); 12365 } else { 12366 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | 12367 ELINK_SUPPORTED_10baseT_Full | 12368 ELINK_SUPPORTED_100baseT_Half | 12369 ELINK_SUPPORTED_100baseT_Full | 12370 ELINK_SUPPORTED_1000baseT_Full | 12371 ELINK_SUPPORTED_2500baseX_Full | 12372 ELINK_SUPPORTED_10000baseT_Full | 12373 ELINK_SUPPORTED_TP | 12374 ELINK_SUPPORTED_FIBRE | 12375 ELINK_SUPPORTED_Autoneg | 12376 ELINK_SUPPORTED_Pause | 12377 ELINK_SUPPORTED_Asym_Pause); 12378 sc->port.advertising[0] = sc->port.supported[0]; 12379 12380 sc->link_params.sc = sc; 12381 sc->link_params.port = SC_PORT(sc); 12382 sc->link_params.req_duplex[0] = DUPLEX_FULL; 12383 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; 12384 sc->link_params.req_line_speed[0] = SPEED_10000; 12385 sc->link_params.speed_cap_mask[0] = 0x7f0000; 12386 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; 12387 12388 if (CHIP_REV_IS_FPGA(sc)) { 12389 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; 12390 sc->link_vars.line_speed = ELINK_SPEED_1000; 12391 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 12392 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); 12393 } else { 12394 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; 12395 sc->link_vars.line_speed = ELINK_SPEED_10000; 12396 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 12397 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); 12398 } 12399 12400 sc->link_vars.link_up = 1; 12401 12402 sc->link_vars.duplex = DUPLEX_FULL; 12403 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; 12404 12405 if (IS_PF(sc)) { 12406 REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0); 12407 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12408 bxe_link_report(sc); 12409 } 12410 } 12411 12412 if (IS_PF(sc)) { 12413 if (sc->link_vars.link_up) { 12414 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12415 } else { 12416 bxe_stats_handle(sc, STATS_EVENT_STOP); 12417 } 12418 bxe_link_report(sc); 12419 } else { 12420 bxe_link_report(sc); 12421 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12422 } 12423 } 12424 12425 static int 12426 bxe_initial_phy_init(struct bxe_softc *sc, 12427 int load_mode) 12428 { 12429 int rc, cfg_idx = bxe_get_link_cfg_idx(sc); 12430 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; 12431 struct elink_params *lp = &sc->link_params; 12432 12433 bxe_set_requested_fc(sc); 12434 12435 if (CHIP_REV_IS_SLOW(sc)) { 12436 uint32_t bond = CHIP_BOND_ID(sc); 12437 uint32_t feat = 0; 12438 12439 if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) { 12440 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 12441 } else if (bond & 0x4) { 12442 if (CHIP_IS_E3(sc)) { 12443 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC; 12444 } else { 12445 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 12446 } 12447 } else if (bond & 0x8) { 12448 if (CHIP_IS_E3(sc)) { 12449 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC; 12450 } else { 12451 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 12452 } 12453 } 12454 12455 /* disable EMAC for E3 and above */ 12456 if (bond & 0x2) { 12457 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 12458 } 12459 12460 sc->link_params.feature_config_flags |= feat; 12461 } 12462 12463 bxe_acquire_phy_lock(sc); 12464 12465 if (load_mode == LOAD_DIAG) { 12466 lp->loopback_mode = ELINK_LOOPBACK_XGXS; 12467 /* Prefer doing PHY loopback at 10G speed, if possible */ 12468 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { 12469 if (lp->speed_cap_mask[cfg_idx] & 12470 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 12471 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; 12472 } else { 12473 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; 12474 } 12475 } 12476 } 12477 12478 if (load_mode == LOAD_LOOPBACK_EXT) { 12479 lp->loopback_mode = ELINK_LOOPBACK_EXT; 12480 } 12481 12482 rc = elink_phy_init(&sc->link_params, &sc->link_vars); 12483 12484 bxe_release_phy_lock(sc); 12485 12486 bxe_calc_fc_adv(sc); 12487 12488 if (sc->link_vars.link_up) { 12489 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12490 bxe_link_report(sc); 12491 } 12492 12493 if (!CHIP_REV_IS_SLOW(sc)) { 12494 bxe_periodic_start(sc); 12495 } 12496 12497 sc->link_params.req_line_speed[cfg_idx] = req_line_speed; 12498 return (rc); 12499 } 12500 12501 /* must be called under IF_ADDR_LOCK */ 12502 12503 static int 12504 bxe_set_mc_list(struct bxe_softc *sc) 12505 { 12506 struct ecore_mcast_ramrod_params rparam = { NULL }; 12507 int rc = 0; 12508 int mc_count = 0; 12509 int mcnt, i; 12510 struct ecore_mcast_list_elem *mc_mac, *mc_mac_start; 12511 unsigned char *mta; 12512 if_t ifp = sc->ifp; 12513 12514 mc_count = if_multiaddr_count(ifp, -1);/* XXX they don't have a limit */ 12515 if (!mc_count) 12516 return (0); 12517 12518 mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN * 12519 mc_count, M_DEVBUF, M_NOWAIT); 12520 12521 if(mta == NULL) { 12522 BLOGE(sc, "Failed to allocate temp mcast list\n"); 12523 return (-1); 12524 } 12525 bzero(mta, (sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count)); 12526 12527 mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, (M_NOWAIT | M_ZERO)); 12528 mc_mac_start = mc_mac; 12529 12530 if (!mc_mac) { 12531 free(mta, M_DEVBUF); 12532 BLOGE(sc, "Failed to allocate temp mcast list\n"); 12533 return (-1); 12534 } 12535 bzero(mc_mac, (sizeof(*mc_mac) * mc_count)); 12536 12537 /* mta and mcnt not expected to be different */ 12538 if_multiaddr_array(ifp, mta, &mcnt, mc_count); 12539 12540 12541 rparam.mcast_obj = &sc->mcast_obj; 12542 ECORE_LIST_INIT(&rparam.mcast_list); 12543 12544 for(i=0; i< mcnt; i++) { 12545 12546 mc_mac->mac = (uint8_t *)(mta + (i * ETHER_ADDR_LEN)); 12547 ECORE_LIST_PUSH_TAIL(&mc_mac->link, &rparam.mcast_list); 12548 12549 BLOGD(sc, DBG_LOAD, 12550 "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n", 12551 mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2], 12552 mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]); 12553 12554 mc_mac++; 12555 } 12556 rparam.mcast_list_len = mc_count; 12557 12558 BXE_MCAST_LOCK(sc); 12559 12560 /* first, clear all configured multicast MACs */ 12561 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 12562 if (rc < 0) { 12563 BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc); 12564 BXE_MCAST_UNLOCK(sc); 12565 free(mc_mac_start, M_DEVBUF); 12566 free(mta, M_DEVBUF); 12567 return (rc); 12568 } 12569 12570 /* Now add the new MACs */ 12571 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD); 12572 if (rc < 0) { 12573 BLOGE(sc, "Failed to set new mcast config (%d)\n", rc); 12574 } 12575 12576 BXE_MCAST_UNLOCK(sc); 12577 12578 free(mc_mac_start, M_DEVBUF); 12579 free(mta, M_DEVBUF); 12580 12581 return (rc); 12582 } 12583 12584 static int 12585 bxe_set_uc_list(struct bxe_softc *sc) 12586 { 12587 if_t ifp = sc->ifp; 12588 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 12589 struct ifaddr *ifa; 12590 unsigned long ramrod_flags = 0; 12591 int rc; 12592 12593 #if __FreeBSD_version < 800000 12594 IF_ADDR_LOCK(ifp); 12595 #else 12596 if_addr_rlock(ifp); 12597 #endif 12598 12599 /* first schedule a cleanup up of old configuration */ 12600 rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE); 12601 if (rc < 0) { 12602 BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc); 12603 #if __FreeBSD_version < 800000 12604 IF_ADDR_UNLOCK(ifp); 12605 #else 12606 if_addr_runlock(ifp); 12607 #endif 12608 return (rc); 12609 } 12610 12611 ifa = if_getifaddr(ifp); /* XXX Is this structure */ 12612 while (ifa) { 12613 if (ifa->ifa_addr->sa_family != AF_LINK) { 12614 ifa = TAILQ_NEXT(ifa, ifa_link); 12615 continue; 12616 } 12617 12618 rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 12619 mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags); 12620 if (rc == -EEXIST) { 12621 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); 12622 /* do not treat adding same MAC as an error */ 12623 rc = 0; 12624 } else if (rc < 0) { 12625 BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc); 12626 #if __FreeBSD_version < 800000 12627 IF_ADDR_UNLOCK(ifp); 12628 #else 12629 if_addr_runlock(ifp); 12630 #endif 12631 return (rc); 12632 } 12633 12634 ifa = TAILQ_NEXT(ifa, ifa_link); 12635 } 12636 12637 #if __FreeBSD_version < 800000 12638 IF_ADDR_UNLOCK(ifp); 12639 #else 12640 if_addr_runlock(ifp); 12641 #endif 12642 12643 /* Execute the pending commands */ 12644 bit_set(&ramrod_flags, RAMROD_CONT); 12645 return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */, 12646 ECORE_UC_LIST_MAC, &ramrod_flags)); 12647 } 12648 12649 static void 12650 bxe_set_rx_mode(struct bxe_softc *sc) 12651 { 12652 if_t ifp = sc->ifp; 12653 uint32_t rx_mode = BXE_RX_MODE_NORMAL; 12654 12655 if (sc->state != BXE_STATE_OPEN) { 12656 BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state); 12657 return; 12658 } 12659 12660 BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp)); 12661 12662 if (if_getflags(ifp) & IFF_PROMISC) { 12663 rx_mode = BXE_RX_MODE_PROMISC; 12664 } else if ((if_getflags(ifp) & IFF_ALLMULTI) || 12665 ((if_getamcount(ifp) > BXE_MAX_MULTICAST) && 12666 CHIP_IS_E1(sc))) { 12667 rx_mode = BXE_RX_MODE_ALLMULTI; 12668 } else { 12669 if (IS_PF(sc)) { 12670 /* some multicasts */ 12671 if (bxe_set_mc_list(sc) < 0) { 12672 rx_mode = BXE_RX_MODE_ALLMULTI; 12673 } 12674 if (bxe_set_uc_list(sc) < 0) { 12675 rx_mode = BXE_RX_MODE_PROMISC; 12676 } 12677 } 12678 #if 0 12679 else { 12680 /* 12681 * Configuring mcast to a VF involves sleeping (when we 12682 * wait for the PF's response). Since this function is 12683 * called from a non sleepable context we must schedule 12684 * a work item for this purpose 12685 */ 12686 bxe_set_bit(BXE_SP_RTNL_VFPF_MCAST, &sc->sp_rtnl_state); 12687 schedule_delayed_work(&sc->sp_rtnl_task, 0); 12688 } 12689 #endif 12690 } 12691 12692 sc->rx_mode = rx_mode; 12693 12694 /* schedule the rx_mode command */ 12695 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 12696 BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n"); 12697 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 12698 return; 12699 } 12700 12701 if (IS_PF(sc)) { 12702 bxe_set_storm_rx_mode(sc); 12703 } 12704 #if 0 12705 else { 12706 /* 12707 * Configuring mcast to a VF involves sleeping (when we 12708 * wait for the PF's response). Since this function is 12709 * called from a non sleepable context we must schedule 12710 * a work item for this purpose 12711 */ 12712 bxe_set_bit(BXE_SP_RTNL_VFPF_STORM_RX_MODE, &sc->sp_rtnl_state); 12713 schedule_delayed_work(&sc->sp_rtnl_task, 0); 12714 } 12715 #endif 12716 12717 } 12718 12719 12720 /* update flags in shmem */ 12721 static void 12722 bxe_update_drv_flags(struct bxe_softc *sc, 12723 uint32_t flags, 12724 uint32_t set) 12725 { 12726 uint32_t drv_flags; 12727 12728 if (SHMEM2_HAS(sc, drv_flags)) { 12729 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 12730 drv_flags = SHMEM2_RD(sc, drv_flags); 12731 12732 if (set) { 12733 SET_FLAGS(drv_flags, flags); 12734 } else { 12735 RESET_FLAGS(drv_flags, flags); 12736 } 12737 12738 SHMEM2_WR(sc, drv_flags, drv_flags); 12739 BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags); 12740 12741 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 12742 } 12743 } 12744 12745 /* periodic timer callout routine, only runs when the interface is up */ 12746 12747 static void 12748 bxe_periodic_callout_func(void *xsc) 12749 { 12750 struct bxe_softc *sc = (struct bxe_softc *)xsc; 12751 int i; 12752 12753 if (!BXE_CORE_TRYLOCK(sc)) { 12754 /* just bail and try again next time */ 12755 12756 if ((sc->state == BXE_STATE_OPEN) && 12757 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { 12758 /* schedule the next periodic callout */ 12759 callout_reset(&sc->periodic_callout, hz, 12760 bxe_periodic_callout_func, sc); 12761 } 12762 12763 return; 12764 } 12765 12766 if ((sc->state != BXE_STATE_OPEN) || 12767 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { 12768 BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state); 12769 BXE_CORE_UNLOCK(sc); 12770 return; 12771 } 12772 12773 /* Check for TX timeouts on any fastpath. */ 12774 FOR_EACH_QUEUE(sc, i) { 12775 if (bxe_watchdog(sc, &sc->fp[i]) != 0) { 12776 /* Ruh-Roh, chip was reset! */ 12777 break; 12778 } 12779 } 12780 12781 if (!CHIP_REV_IS_SLOW(sc)) { 12782 /* 12783 * This barrier is needed to ensure the ordering between the writing 12784 * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and 12785 * the reading here. 12786 */ 12787 mb(); 12788 if (sc->port.pmf) { 12789 bxe_acquire_phy_lock(sc); 12790 elink_period_func(&sc->link_params, &sc->link_vars); 12791 bxe_release_phy_lock(sc); 12792 } 12793 } 12794 12795 if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) { 12796 int mb_idx = SC_FW_MB_IDX(sc); 12797 uint32_t drv_pulse; 12798 uint32_t mcp_pulse; 12799 12800 ++sc->fw_drv_pulse_wr_seq; 12801 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 12802 12803 drv_pulse = sc->fw_drv_pulse_wr_seq; 12804 bxe_drv_pulse(sc); 12805 12806 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & 12807 MCP_PULSE_SEQ_MASK); 12808 12809 /* 12810 * The delta between driver pulse and mcp response should 12811 * be 1 (before mcp response) or 0 (after mcp response). 12812 */ 12813 if ((drv_pulse != mcp_pulse) && 12814 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 12815 /* someone lost a heartbeat... */ 12816 BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n", 12817 drv_pulse, mcp_pulse); 12818 } 12819 } 12820 12821 /* state is BXE_STATE_OPEN */ 12822 bxe_stats_handle(sc, STATS_EVENT_UPDATE); 12823 12824 #if 0 12825 /* sample VF bulletin board for new posts from PF */ 12826 if (IS_VF(sc)) { 12827 bxe_sample_bulletin(sc); 12828 } 12829 #endif 12830 12831 BXE_CORE_UNLOCK(sc); 12832 12833 if ((sc->state == BXE_STATE_OPEN) && 12834 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { 12835 /* schedule the next periodic callout */ 12836 callout_reset(&sc->periodic_callout, hz, 12837 bxe_periodic_callout_func, sc); 12838 } 12839 } 12840 12841 static void 12842 bxe_periodic_start(struct bxe_softc *sc) 12843 { 12844 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); 12845 callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); 12846 } 12847 12848 static void 12849 bxe_periodic_stop(struct bxe_softc *sc) 12850 { 12851 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); 12852 callout_drain(&sc->periodic_callout); 12853 } 12854 12855 /* start the controller */ 12856 static __noinline int 12857 bxe_nic_load(struct bxe_softc *sc, 12858 int load_mode) 12859 { 12860 uint32_t val; 12861 int load_code = 0; 12862 int i, rc = 0; 12863 12864 BXE_CORE_LOCK_ASSERT(sc); 12865 12866 BLOGD(sc, DBG_LOAD, "Starting NIC load...\n"); 12867 12868 sc->state = BXE_STATE_OPENING_WAITING_LOAD; 12869 12870 if (IS_PF(sc)) { 12871 /* must be called before memory allocation and HW init */ 12872 bxe_ilt_set_info(sc); 12873 } 12874 12875 sc->last_reported_link_state = LINK_STATE_UNKNOWN; 12876 12877 bxe_set_fp_rx_buf_size(sc); 12878 12879 if (bxe_alloc_fp_buffers(sc) != 0) { 12880 BLOGE(sc, "Failed to allocate fastpath memory\n"); 12881 sc->state = BXE_STATE_CLOSED; 12882 rc = ENOMEM; 12883 goto bxe_nic_load_error0; 12884 } 12885 12886 if (bxe_alloc_mem(sc) != 0) { 12887 sc->state = BXE_STATE_CLOSED; 12888 rc = ENOMEM; 12889 goto bxe_nic_load_error0; 12890 } 12891 12892 if (bxe_alloc_fw_stats_mem(sc) != 0) { 12893 sc->state = BXE_STATE_CLOSED; 12894 rc = ENOMEM; 12895 goto bxe_nic_load_error0; 12896 } 12897 12898 if (IS_PF(sc)) { 12899 /* set pf load just before approaching the MCP */ 12900 bxe_set_pf_load(sc); 12901 12902 /* if MCP exists send load request and analyze response */ 12903 if (!BXE_NOMCP(sc)) { 12904 /* attempt to load pf */ 12905 if (bxe_nic_load_request(sc, &load_code) != 0) { 12906 sc->state = BXE_STATE_CLOSED; 12907 rc = ENXIO; 12908 goto bxe_nic_load_error1; 12909 } 12910 12911 /* what did the MCP say? */ 12912 if (bxe_nic_load_analyze_req(sc, load_code) != 0) { 12913 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12914 sc->state = BXE_STATE_CLOSED; 12915 rc = ENXIO; 12916 goto bxe_nic_load_error2; 12917 } 12918 } else { 12919 BLOGI(sc, "Device has no MCP!\n"); 12920 load_code = bxe_nic_load_no_mcp(sc); 12921 } 12922 12923 /* mark PMF if applicable */ 12924 bxe_nic_load_pmf(sc, load_code); 12925 12926 /* Init Function state controlling object */ 12927 bxe_init_func_obj(sc); 12928 12929 /* Initialize HW */ 12930 if (bxe_init_hw(sc, load_code) != 0) { 12931 BLOGE(sc, "HW init failed\n"); 12932 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12933 sc->state = BXE_STATE_CLOSED; 12934 rc = ENXIO; 12935 goto bxe_nic_load_error2; 12936 } 12937 } 12938 12939 /* set ALWAYS_ALIVE bit in shmem */ 12940 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 12941 bxe_drv_pulse(sc); 12942 sc->flags |= BXE_NO_PULSE; 12943 12944 /* attach interrupts */ 12945 if (bxe_interrupt_attach(sc) != 0) { 12946 sc->state = BXE_STATE_CLOSED; 12947 rc = ENXIO; 12948 goto bxe_nic_load_error2; 12949 } 12950 12951 bxe_nic_init(sc, load_code); 12952 12953 /* Init per-function objects */ 12954 if (IS_PF(sc)) { 12955 bxe_init_objs(sc); 12956 // XXX bxe_iov_nic_init(sc); 12957 12958 /* set AFEX default VLAN tag to an invalid value */ 12959 sc->devinfo.mf_info.afex_def_vlan_tag = -1; 12960 // XXX bxe_nic_load_afex_dcc(sc, load_code); 12961 12962 sc->state = BXE_STATE_OPENING_WAITING_PORT; 12963 rc = bxe_func_start(sc); 12964 if (rc) { 12965 BLOGE(sc, "Function start failed!\n"); 12966 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12967 sc->state = BXE_STATE_ERROR; 12968 goto bxe_nic_load_error3; 12969 } 12970 12971 /* send LOAD_DONE command to MCP */ 12972 if (!BXE_NOMCP(sc)) { 12973 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12974 if (!load_code) { 12975 BLOGE(sc, "MCP response failure, aborting\n"); 12976 sc->state = BXE_STATE_ERROR; 12977 rc = ENXIO; 12978 goto bxe_nic_load_error3; 12979 } 12980 } 12981 12982 rc = bxe_setup_leading(sc); 12983 if (rc) { 12984 BLOGE(sc, "Setup leading failed!\n"); 12985 sc->state = BXE_STATE_ERROR; 12986 goto bxe_nic_load_error3; 12987 } 12988 12989 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { 12990 rc = bxe_setup_queue(sc, &sc->fp[i], FALSE); 12991 if (rc) { 12992 BLOGE(sc, "Queue(%d) setup failed\n", i); 12993 sc->state = BXE_STATE_ERROR; 12994 goto bxe_nic_load_error3; 12995 } 12996 } 12997 12998 rc = bxe_init_rss_pf(sc); 12999 if (rc) { 13000 BLOGE(sc, "PF RSS init failed\n"); 13001 sc->state = BXE_STATE_ERROR; 13002 goto bxe_nic_load_error3; 13003 } 13004 } 13005 /* XXX VF */ 13006 #if 0 13007 else { /* VF */ 13008 FOR_EACH_ETH_QUEUE(sc, i) { 13009 rc = bxe_vfpf_setup_q(sc, i); 13010 if (rc) { 13011 BLOGE(sc, "Queue(%d) setup failed\n", i); 13012 sc->state = BXE_STATE_ERROR; 13013 goto bxe_nic_load_error3; 13014 } 13015 } 13016 } 13017 #endif 13018 13019 /* now when Clients are configured we are ready to work */ 13020 sc->state = BXE_STATE_OPEN; 13021 13022 /* Configure a ucast MAC */ 13023 if (IS_PF(sc)) { 13024 rc = bxe_set_eth_mac(sc, TRUE); 13025 } 13026 #if 0 13027 else { /* IS_VF(sc) */ 13028 rc = bxe_vfpf_set_mac(sc); 13029 } 13030 #endif 13031 if (rc) { 13032 BLOGE(sc, "Setting Ethernet MAC failed\n"); 13033 sc->state = BXE_STATE_ERROR; 13034 goto bxe_nic_load_error3; 13035 } 13036 13037 #if 0 13038 if (IS_PF(sc) && sc->pending_max) { 13039 /* for AFEX */ 13040 bxe_update_max_mf_config(sc, sc->pending_max); 13041 sc->pending_max = 0; 13042 } 13043 #endif 13044 13045 if (sc->port.pmf) { 13046 rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN); 13047 if (rc) { 13048 sc->state = BXE_STATE_ERROR; 13049 goto bxe_nic_load_error3; 13050 } 13051 } 13052 13053 sc->link_params.feature_config_flags &= 13054 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; 13055 13056 /* start fast path */ 13057 13058 /* Initialize Rx filter */ 13059 bxe_set_rx_mode(sc); 13060 13061 /* start the Tx */ 13062 switch (/* XXX load_mode */LOAD_OPEN) { 13063 case LOAD_NORMAL: 13064 case LOAD_OPEN: 13065 break; 13066 13067 case LOAD_DIAG: 13068 case LOAD_LOOPBACK_EXT: 13069 sc->state = BXE_STATE_DIAG; 13070 break; 13071 13072 default: 13073 break; 13074 } 13075 13076 if (sc->port.pmf) { 13077 bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); 13078 } else { 13079 bxe_link_status_update(sc); 13080 } 13081 13082 /* start the periodic timer callout */ 13083 bxe_periodic_start(sc); 13084 13085 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 13086 /* mark driver is loaded in shmem2 */ 13087 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 13088 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 13089 (val | 13090 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | 13091 DRV_FLAGS_CAPABILITIES_LOADED_L2)); 13092 } 13093 13094 /* wait for all pending SP commands to complete */ 13095 if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) { 13096 BLOGE(sc, "Timeout waiting for all SPs to complete!\n"); 13097 bxe_periodic_stop(sc); 13098 bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE); 13099 return (ENXIO); 13100 } 13101 13102 #if 0 13103 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ 13104 if (sc->port.pmf && (sc->state != BXE_STATE_DIAG)) { 13105 bxe_dcbx_init(sc, FALSE); 13106 } 13107 #endif 13108 13109 /* Tell the stack the driver is running! */ 13110 if_setdrvflags(sc->ifp, IFF_DRV_RUNNING); 13111 13112 BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n"); 13113 13114 return (0); 13115 13116 bxe_nic_load_error3: 13117 13118 if (IS_PF(sc)) { 13119 bxe_int_disable_sync(sc, 1); 13120 13121 /* clean out queued objects */ 13122 bxe_squeeze_objects(sc); 13123 } 13124 13125 bxe_interrupt_detach(sc); 13126 13127 bxe_nic_load_error2: 13128 13129 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 13130 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 13131 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 13132 } 13133 13134 sc->port.pmf = 0; 13135 13136 bxe_nic_load_error1: 13137 13138 /* clear pf_load status, as it was already set */ 13139 if (IS_PF(sc)) { 13140 bxe_clear_pf_load(sc); 13141 } 13142 13143 bxe_nic_load_error0: 13144 13145 bxe_free_fw_stats_mem(sc); 13146 bxe_free_fp_buffers(sc); 13147 bxe_free_mem(sc); 13148 13149 return (rc); 13150 } 13151 13152 static int 13153 bxe_init_locked(struct bxe_softc *sc) 13154 { 13155 int other_engine = SC_PATH(sc) ? 0 : 1; 13156 uint8_t other_load_status, load_status; 13157 uint8_t global = FALSE; 13158 int rc; 13159 13160 BXE_CORE_LOCK_ASSERT(sc); 13161 13162 /* check if the driver is already running */ 13163 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { 13164 BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n"); 13165 return (0); 13166 } 13167 13168 bxe_set_power_state(sc, PCI_PM_D0); 13169 13170 /* 13171 * If parity occurred during the unload, then attentions and/or 13172 * RECOVERY_IN_PROGRES may still be set. If so we want the first function 13173 * loaded on the current engine to complete the recovery. Parity recovery 13174 * is only relevant for PF driver. 13175 */ 13176 if (IS_PF(sc)) { 13177 other_load_status = bxe_get_load_status(sc, other_engine); 13178 load_status = bxe_get_load_status(sc, SC_PATH(sc)); 13179 13180 if (!bxe_reset_is_done(sc, SC_PATH(sc)) || 13181 bxe_chk_parity_attn(sc, &global, TRUE)) { 13182 do { 13183 /* 13184 * If there are attentions and they are in global blocks, set 13185 * the GLOBAL_RESET bit regardless whether it will be this 13186 * function that will complete the recovery or not. 13187 */ 13188 if (global) { 13189 bxe_set_reset_global(sc); 13190 } 13191 13192 /* 13193 * Only the first function on the current engine should try 13194 * to recover in open. In case of attentions in global blocks 13195 * only the first in the chip should try to recover. 13196 */ 13197 if ((!load_status && (!global || !other_load_status)) && 13198 bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) { 13199 BLOGI(sc, "Recovered during init\n"); 13200 break; 13201 } 13202 13203 /* recovery has failed... */ 13204 bxe_set_power_state(sc, PCI_PM_D3hot); 13205 sc->recovery_state = BXE_RECOVERY_FAILED; 13206 13207 BLOGE(sc, "Recovery flow hasn't properly " 13208 "completed yet, try again later. " 13209 "If you still see this message after a " 13210 "few retries then power cycle is required.\n"); 13211 13212 rc = ENXIO; 13213 goto bxe_init_locked_done; 13214 } while (0); 13215 } 13216 } 13217 13218 sc->recovery_state = BXE_RECOVERY_DONE; 13219 13220 rc = bxe_nic_load(sc, LOAD_OPEN); 13221 13222 bxe_init_locked_done: 13223 13224 if (rc) { 13225 /* Tell the stack the driver is NOT running! */ 13226 BLOGE(sc, "Initialization failed, " 13227 "stack notified driver is NOT running!\n"); 13228 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); 13229 } 13230 13231 return (rc); 13232 } 13233 13234 static int 13235 bxe_stop_locked(struct bxe_softc *sc) 13236 { 13237 BXE_CORE_LOCK_ASSERT(sc); 13238 return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE)); 13239 } 13240 13241 /* 13242 * Handles controller initialization when called from an unlocked routine. 13243 * ifconfig calls this function. 13244 * 13245 * Returns: 13246 * void 13247 */ 13248 static void 13249 bxe_init(void *xsc) 13250 { 13251 struct bxe_softc *sc = (struct bxe_softc *)xsc; 13252 13253 BXE_CORE_LOCK(sc); 13254 bxe_init_locked(sc); 13255 BXE_CORE_UNLOCK(sc); 13256 } 13257 13258 static int 13259 bxe_init_ifnet(struct bxe_softc *sc) 13260 { 13261 if_t ifp; 13262 int capabilities; 13263 13264 /* ifconfig entrypoint for media type/status reporting */ 13265 ifmedia_init(&sc->ifmedia, IFM_IMASK, 13266 bxe_ifmedia_update, 13267 bxe_ifmedia_status); 13268 13269 /* set the default interface values */ 13270 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL); 13271 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); 13272 ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO)); 13273 13274 sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */ 13275 13276 /* allocate the ifnet structure */ 13277 if ((ifp = if_gethandle(IFT_ETHER)) == NULL) { 13278 BLOGE(sc, "Interface allocation failed!\n"); 13279 return (ENXIO); 13280 } 13281 13282 if_setsoftc(ifp, sc); 13283 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); 13284 if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST)); 13285 if_setioctlfn(ifp, bxe_ioctl); 13286 if_setstartfn(ifp, bxe_tx_start); 13287 if_setgetcounterfn(ifp, bxe_get_counter); 13288 #if __FreeBSD_version >= 800000 13289 if_settransmitfn(ifp, bxe_tx_mq_start); 13290 if_setqflushfn(ifp, bxe_mq_flush); 13291 #endif 13292 #ifdef FreeBSD8_0 13293 if_settimer(ifp, 0); 13294 #endif 13295 if_setinitfn(ifp, bxe_init); 13296 if_setmtu(ifp, sc->mtu); 13297 if_sethwassist(ifp, (CSUM_IP | 13298 CSUM_TCP | 13299 CSUM_UDP | 13300 CSUM_TSO | 13301 CSUM_TCP_IPV6 | 13302 CSUM_UDP_IPV6)); 13303 13304 capabilities = 13305 #if __FreeBSD_version < 700000 13306 (IFCAP_VLAN_MTU | 13307 IFCAP_VLAN_HWTAGGING | 13308 IFCAP_HWCSUM | 13309 IFCAP_JUMBO_MTU | 13310 IFCAP_LRO); 13311 #else 13312 (IFCAP_VLAN_MTU | 13313 IFCAP_VLAN_HWTAGGING | 13314 IFCAP_VLAN_HWTSO | 13315 IFCAP_VLAN_HWFILTER | 13316 IFCAP_VLAN_HWCSUM | 13317 IFCAP_HWCSUM | 13318 IFCAP_JUMBO_MTU | 13319 IFCAP_LRO | 13320 IFCAP_TSO4 | 13321 IFCAP_TSO6 | 13322 IFCAP_WOL_MAGIC); 13323 #endif 13324 if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */ 13325 if_setbaudrate(ifp, IF_Gbps(10)); 13326 /* XXX */ 13327 if_setsendqlen(ifp, sc->tx_ring_size); 13328 if_setsendqready(ifp); 13329 /* XXX */ 13330 13331 sc->ifp = ifp; 13332 13333 /* attach to the Ethernet interface list */ 13334 ether_ifattach(ifp, sc->link_params.mac_addr); 13335 13336 return (0); 13337 } 13338 13339 static void 13340 bxe_deallocate_bars(struct bxe_softc *sc) 13341 { 13342 int i; 13343 13344 for (i = 0; i < MAX_BARS; i++) { 13345 if (sc->bar[i].resource != NULL) { 13346 bus_release_resource(sc->dev, 13347 SYS_RES_MEMORY, 13348 sc->bar[i].rid, 13349 sc->bar[i].resource); 13350 BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n", 13351 i, PCIR_BAR(i)); 13352 } 13353 } 13354 } 13355 13356 static int 13357 bxe_allocate_bars(struct bxe_softc *sc) 13358 { 13359 u_int flags; 13360 int i; 13361 13362 memset(sc->bar, 0, sizeof(sc->bar)); 13363 13364 for (i = 0; i < MAX_BARS; i++) { 13365 13366 /* memory resources reside at BARs 0, 2, 4 */ 13367 /* Run `pciconf -lb` to see mappings */ 13368 if ((i != 0) && (i != 2) && (i != 4)) { 13369 continue; 13370 } 13371 13372 sc->bar[i].rid = PCIR_BAR(i); 13373 13374 flags = RF_ACTIVE; 13375 if (i == 0) { 13376 flags |= RF_SHAREABLE; 13377 } 13378 13379 if ((sc->bar[i].resource = 13380 bus_alloc_resource_any(sc->dev, 13381 SYS_RES_MEMORY, 13382 &sc->bar[i].rid, 13383 flags)) == NULL) { 13384 #if 0 13385 /* BAR4 doesn't exist for E1 */ 13386 BLOGE(sc, "PCI BAR%d [%02x] memory allocation failed\n", 13387 i, PCIR_BAR(i)); 13388 #endif 13389 return (0); 13390 } 13391 13392 sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource); 13393 sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource); 13394 sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource); 13395 13396 BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%ld) -> %p\n", 13397 i, PCIR_BAR(i), 13398 (void *)rman_get_start(sc->bar[i].resource), 13399 (void *)rman_get_end(sc->bar[i].resource), 13400 rman_get_size(sc->bar[i].resource), 13401 (void *)sc->bar[i].kva); 13402 } 13403 13404 return (0); 13405 } 13406 13407 static void 13408 bxe_get_function_num(struct bxe_softc *sc) 13409 { 13410 uint32_t val = 0; 13411 13412 /* 13413 * Read the ME register to get the function number. The ME register 13414 * holds the relative-function number and absolute-function number. The 13415 * absolute-function number appears only in E2 and above. Before that 13416 * these bits always contained zero, therefore we cannot blindly use them. 13417 */ 13418 13419 val = REG_RD(sc, BAR_ME_REGISTER); 13420 13421 sc->pfunc_rel = 13422 (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); 13423 sc->path_id = 13424 (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1; 13425 13426 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 13427 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); 13428 } else { 13429 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); 13430 } 13431 13432 BLOGD(sc, DBG_LOAD, 13433 "Relative function %d, Absolute function %d, Path %d\n", 13434 sc->pfunc_rel, sc->pfunc_abs, sc->path_id); 13435 } 13436 13437 static uint32_t 13438 bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc) 13439 { 13440 uint32_t shmem2_size; 13441 uint32_t offset; 13442 uint32_t mf_cfg_offset_value; 13443 13444 /* Non 57712 */ 13445 offset = (SHMEM_RD(sc, func_mb) + 13446 (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); 13447 13448 /* 57712 plus */ 13449 if (sc->devinfo.shmem2_base != 0) { 13450 shmem2_size = SHMEM2_RD(sc, size); 13451 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { 13452 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); 13453 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { 13454 offset = mf_cfg_offset_value; 13455 } 13456 } 13457 } 13458 13459 return (offset); 13460 } 13461 13462 static uint32_t 13463 bxe_pcie_capability_read(struct bxe_softc *sc, 13464 int reg, 13465 int width) 13466 { 13467 int pcie_reg; 13468 13469 /* ensure PCIe capability is enabled */ 13470 if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) { 13471 if (pcie_reg != 0) { 13472 BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg); 13473 return (pci_read_config(sc->dev, (pcie_reg + reg), width)); 13474 } 13475 } 13476 13477 BLOGE(sc, "PCIe capability NOT FOUND!!!\n"); 13478 13479 return (0); 13480 } 13481 13482 static uint8_t 13483 bxe_is_pcie_pending(struct bxe_softc *sc) 13484 { 13485 return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) & 13486 PCIM_EXP_STA_TRANSACTION_PND); 13487 } 13488 13489 /* 13490 * Walk the PCI capabiites list for the device to find what features are 13491 * supported. These capabilites may be enabled/disabled by firmware so it's 13492 * best to walk the list rather than make assumptions. 13493 */ 13494 static void 13495 bxe_probe_pci_caps(struct bxe_softc *sc) 13496 { 13497 uint16_t link_status; 13498 int reg; 13499 13500 /* check if PCI Power Management is enabled */ 13501 if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) { 13502 if (reg != 0) { 13503 BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg); 13504 13505 sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG; 13506 sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg; 13507 } 13508 } 13509 13510 link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2); 13511 13512 /* handle PCIe 2.0 workarounds for 57710 */ 13513 if (CHIP_IS_E1(sc)) { 13514 /* workaround for 57710 errata E4_57710_27462 */ 13515 sc->devinfo.pcie_link_speed = 13516 (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1; 13517 13518 /* workaround for 57710 errata E4_57710_27488 */ 13519 sc->devinfo.pcie_link_width = 13520 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 13521 if (sc->devinfo.pcie_link_speed > 1) { 13522 sc->devinfo.pcie_link_width = 13523 ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1; 13524 } 13525 } else { 13526 sc->devinfo.pcie_link_speed = 13527 (link_status & PCIM_LINK_STA_SPEED); 13528 sc->devinfo.pcie_link_width = 13529 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 13530 } 13531 13532 BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n", 13533 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); 13534 13535 sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG; 13536 sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg; 13537 13538 /* check if MSI capability is enabled */ 13539 if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) { 13540 if (reg != 0) { 13541 BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg); 13542 13543 sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG; 13544 sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg; 13545 } 13546 } 13547 13548 /* check if MSI-X capability is enabled */ 13549 if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) { 13550 if (reg != 0) { 13551 BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg); 13552 13553 sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG; 13554 sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg; 13555 } 13556 } 13557 } 13558 13559 static int 13560 bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc) 13561 { 13562 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13563 uint32_t val; 13564 13565 /* get the outer vlan if we're in switch-dependent mode */ 13566 13567 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13568 mf_info->ext_id = (uint16_t)val; 13569 13570 mf_info->multi_vnics_mode = 1; 13571 13572 if (!VALID_OVLAN(mf_info->ext_id)) { 13573 BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id); 13574 return (1); 13575 } 13576 13577 /* get the capabilities */ 13578 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 13579 FUNC_MF_CFG_PROTOCOL_ISCSI) { 13580 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; 13581 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 13582 FUNC_MF_CFG_PROTOCOL_FCOE) { 13583 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; 13584 } else { 13585 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; 13586 } 13587 13588 mf_info->vnics_per_port = 13589 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13590 13591 return (0); 13592 } 13593 13594 static uint32_t 13595 bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc) 13596 { 13597 uint32_t retval = 0; 13598 uint32_t val; 13599 13600 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 13601 13602 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { 13603 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { 13604 retval |= MF_PROTO_SUPPORT_ETHERNET; 13605 } 13606 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 13607 retval |= MF_PROTO_SUPPORT_ISCSI; 13608 } 13609 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 13610 retval |= MF_PROTO_SUPPORT_FCOE; 13611 } 13612 } 13613 13614 return (retval); 13615 } 13616 13617 static int 13618 bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc) 13619 { 13620 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13621 uint32_t val; 13622 13623 /* 13624 * There is no outer vlan if we're in switch-independent mode. 13625 * If the mac is valid then assume multi-function. 13626 */ 13627 13628 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 13629 13630 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); 13631 13632 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); 13633 13634 mf_info->vnics_per_port = 13635 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13636 13637 return (0); 13638 } 13639 13640 static int 13641 bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc) 13642 { 13643 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13644 uint32_t e1hov_tag; 13645 uint32_t func_config; 13646 uint32_t niv_config; 13647 13648 mf_info->multi_vnics_mode = 1; 13649 13650 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13651 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 13652 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); 13653 13654 mf_info->ext_id = 13655 (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> 13656 FUNC_MF_CFG_E1HOV_TAG_SHIFT); 13657 13658 mf_info->default_vlan = 13659 (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> 13660 FUNC_MF_CFG_AFEX_VLAN_SHIFT); 13661 13662 mf_info->niv_allowed_priorities = 13663 (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 13664 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); 13665 13666 mf_info->niv_default_cos = 13667 (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 13668 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); 13669 13670 mf_info->afex_vlan_mode = 13671 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 13672 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); 13673 13674 mf_info->niv_mba_enabled = 13675 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> 13676 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); 13677 13678 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); 13679 13680 mf_info->vnics_per_port = 13681 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13682 13683 return (0); 13684 } 13685 13686 static int 13687 bxe_check_valid_mf_cfg(struct bxe_softc *sc) 13688 { 13689 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13690 uint32_t mf_cfg1; 13691 uint32_t mf_cfg2; 13692 uint32_t ovlan1; 13693 uint32_t ovlan2; 13694 uint8_t i, j; 13695 13696 BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n", 13697 SC_PORT(sc)); 13698 BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n", 13699 mf_info->mf_config[SC_VN(sc)]); 13700 BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n", 13701 mf_info->multi_vnics_mode); 13702 BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n", 13703 mf_info->vnics_per_port); 13704 BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n", 13705 mf_info->ext_id); 13706 BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n", 13707 mf_info->min_bw[0], mf_info->min_bw[1], 13708 mf_info->min_bw[2], mf_info->min_bw[3]); 13709 BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n", 13710 mf_info->max_bw[0], mf_info->max_bw[1], 13711 mf_info->max_bw[2], mf_info->max_bw[3]); 13712 BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n", 13713 sc->mac_addr_str); 13714 13715 /* various MF mode sanity checks... */ 13716 13717 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { 13718 BLOGE(sc, "Enumerated function %d is marked as hidden\n", 13719 SC_PORT(sc)); 13720 return (1); 13721 } 13722 13723 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { 13724 BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n", 13725 mf_info->vnics_per_port, mf_info->multi_vnics_mode); 13726 return (1); 13727 } 13728 13729 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 13730 /* vnic id > 0 must have valid ovlan in switch-dependent mode */ 13731 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { 13732 BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n", 13733 SC_VN(sc), OVLAN(sc)); 13734 return (1); 13735 } 13736 13737 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { 13738 BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n", 13739 mf_info->multi_vnics_mode, OVLAN(sc)); 13740 return (1); 13741 } 13742 13743 /* 13744 * Verify all functions are either MF or SF mode. If MF, make sure 13745 * sure that all non-hidden functions have a valid ovlan. If SF, 13746 * make sure that all non-hidden functions have an invalid ovlan. 13747 */ 13748 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13749 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 13750 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 13751 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 13752 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) || 13753 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) { 13754 BLOGE(sc, "mf_mode=SD function %d MF config " 13755 "mismatch, multi_vnics_mode=%d ovlan=%d\n", 13756 i, mf_info->multi_vnics_mode, ovlan1); 13757 return (1); 13758 } 13759 } 13760 13761 /* Verify all funcs on the same port each have a different ovlan. */ 13762 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13763 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 13764 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 13765 /* iterate from the next function on the port to the max func */ 13766 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { 13767 mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config); 13768 ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag); 13769 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 13770 VALID_OVLAN(ovlan1) && 13771 !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) && 13772 VALID_OVLAN(ovlan2) && 13773 (ovlan1 == ovlan2)) { 13774 BLOGE(sc, "mf_mode=SD functions %d and %d " 13775 "have the same ovlan (%d)\n", 13776 i, j, ovlan1); 13777 return (1); 13778 } 13779 } 13780 } 13781 } /* MULTI_FUNCTION_SD */ 13782 13783 return (0); 13784 } 13785 13786 static int 13787 bxe_get_mf_cfg_info(struct bxe_softc *sc) 13788 { 13789 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13790 uint32_t val, mac_upper; 13791 uint8_t i, vnic; 13792 13793 /* initialize mf_info defaults */ 13794 mf_info->vnics_per_port = 1; 13795 mf_info->multi_vnics_mode = FALSE; 13796 mf_info->path_has_ovlan = FALSE; 13797 mf_info->mf_mode = SINGLE_FUNCTION; 13798 13799 if (!CHIP_IS_MF_CAP(sc)) { 13800 return (0); 13801 } 13802 13803 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { 13804 BLOGE(sc, "Invalid mf_cfg_base!\n"); 13805 return (1); 13806 } 13807 13808 /* get the MF mode (switch dependent / independent / single-function) */ 13809 13810 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 13811 13812 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) 13813 { 13814 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 13815 13816 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13817 13818 /* check for legal upper mac bytes */ 13819 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { 13820 mf_info->mf_mode = MULTI_FUNCTION_SI; 13821 } else { 13822 BLOGE(sc, "Invalid config for Switch Independent mode\n"); 13823 } 13824 13825 break; 13826 13827 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 13828 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: 13829 13830 /* get outer vlan configuration */ 13831 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13832 13833 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != 13834 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 13835 mf_info->mf_mode = MULTI_FUNCTION_SD; 13836 } else { 13837 BLOGE(sc, "Invalid config for Switch Dependent mode\n"); 13838 } 13839 13840 break; 13841 13842 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: 13843 13844 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ 13845 return (0); 13846 13847 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 13848 13849 /* 13850 * Mark MF mode as NIV if MCP version includes NPAR-SD support 13851 * and the MAC address is valid. 13852 */ 13853 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13854 13855 if ((SHMEM2_HAS(sc, afex_driver_support)) && 13856 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { 13857 mf_info->mf_mode = MULTI_FUNCTION_AFEX; 13858 } else { 13859 BLOGE(sc, "Invalid config for AFEX mode\n"); 13860 } 13861 13862 break; 13863 13864 default: 13865 13866 BLOGE(sc, "Unknown MF mode (0x%08x)\n", 13867 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); 13868 13869 return (1); 13870 } 13871 13872 /* set path mf_mode (which could be different than function mf_mode) */ 13873 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 13874 mf_info->path_has_ovlan = TRUE; 13875 } else if (mf_info->mf_mode == SINGLE_FUNCTION) { 13876 /* 13877 * Decide on path multi vnics mode. If we're not in MF mode and in 13878 * 4-port mode, this is good enough to check vnic-0 of the other port 13879 * on the same path 13880 */ 13881 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 13882 uint8_t other_port = !(PORT_ID(sc) & 1); 13883 uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port)); 13884 13885 val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag); 13886 13887 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0; 13888 } 13889 } 13890 13891 if (mf_info->mf_mode == SINGLE_FUNCTION) { 13892 /* invalid MF config */ 13893 if (SC_VN(sc) >= 1) { 13894 BLOGE(sc, "VNIC ID >= 1 in SF mode\n"); 13895 return (1); 13896 } 13897 13898 return (0); 13899 } 13900 13901 /* get the MF configuration */ 13902 mf_info->mf_config[SC_VN(sc)] = 13903 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 13904 13905 switch(mf_info->mf_mode) 13906 { 13907 case MULTI_FUNCTION_SD: 13908 13909 bxe_get_shmem_mf_cfg_info_sd(sc); 13910 break; 13911 13912 case MULTI_FUNCTION_SI: 13913 13914 bxe_get_shmem_mf_cfg_info_si(sc); 13915 break; 13916 13917 case MULTI_FUNCTION_AFEX: 13918 13919 bxe_get_shmem_mf_cfg_info_niv(sc); 13920 break; 13921 13922 default: 13923 13924 BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n", 13925 mf_info->mf_mode); 13926 return (1); 13927 } 13928 13929 /* get the congestion management parameters */ 13930 13931 vnic = 0; 13932 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13933 /* get min/max bw */ 13934 val = MFCFG_RD(sc, func_mf_config[i].config); 13935 mf_info->min_bw[vnic] = 13936 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT); 13937 mf_info->max_bw[vnic] = 13938 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); 13939 vnic++; 13940 } 13941 13942 return (bxe_check_valid_mf_cfg(sc)); 13943 } 13944 13945 static int 13946 bxe_get_shmem_info(struct bxe_softc *sc) 13947 { 13948 int port; 13949 uint32_t mac_hi, mac_lo, val; 13950 13951 port = SC_PORT(sc); 13952 mac_hi = mac_lo = 0; 13953 13954 sc->link_params.sc = sc; 13955 sc->link_params.port = port; 13956 13957 /* get the hardware config info */ 13958 sc->devinfo.hw_config = 13959 SHMEM_RD(sc, dev_info.shared_hw_config.config); 13960 sc->devinfo.hw_config2 = 13961 SHMEM_RD(sc, dev_info.shared_hw_config.config2); 13962 13963 sc->link_params.hw_led_mode = 13964 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> 13965 SHARED_HW_CFG_LED_MODE_SHIFT); 13966 13967 /* get the port feature config */ 13968 sc->port.config = 13969 SHMEM_RD(sc, dev_info.port_feature_config[port].config), 13970 13971 /* get the link params */ 13972 sc->link_params.speed_cap_mask[0] = 13973 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask); 13974 sc->link_params.speed_cap_mask[1] = 13975 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2); 13976 13977 /* get the lane config */ 13978 sc->link_params.lane_config = 13979 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); 13980 13981 /* get the link config */ 13982 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); 13983 sc->port.link_config[ELINK_INT_PHY] = val; 13984 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); 13985 sc->port.link_config[ELINK_EXT_PHY1] = 13986 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); 13987 13988 /* get the override preemphasis flag and enable it or turn it off */ 13989 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 13990 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { 13991 sc->link_params.feature_config_flags |= 13992 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 13993 } else { 13994 sc->link_params.feature_config_flags &= 13995 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 13996 } 13997 13998 /* get the initial value of the link params */ 13999 sc->link_params.multi_phy_config = 14000 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); 14001 14002 /* get external phy info */ 14003 sc->port.ext_phy_config = 14004 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 14005 14006 /* get the multifunction configuration */ 14007 bxe_get_mf_cfg_info(sc); 14008 14009 /* get the mac address */ 14010 if (IS_MF(sc)) { 14011 mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 14012 mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); 14013 } else { 14014 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); 14015 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); 14016 } 14017 14018 if ((mac_lo == 0) && (mac_hi == 0)) { 14019 *sc->mac_addr_str = 0; 14020 BLOGE(sc, "No Ethernet address programmed!\n"); 14021 } else { 14022 sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8); 14023 sc->link_params.mac_addr[1] = (uint8_t)(mac_hi); 14024 sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24); 14025 sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16); 14026 sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8); 14027 sc->link_params.mac_addr[5] = (uint8_t)(mac_lo); 14028 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), 14029 "%02x:%02x:%02x:%02x:%02x:%02x", 14030 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1], 14031 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3], 14032 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]); 14033 BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str); 14034 } 14035 14036 #if 0 14037 if (!IS_MF(sc) && 14038 ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 14039 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE)) { 14040 sc->flags |= BXE_NO_ISCSI; 14041 } 14042 if (!IS_MF(sc) && 14043 ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 14044 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI)) { 14045 sc->flags |= BXE_NO_FCOE_FLAG; 14046 } 14047 #endif 14048 14049 return (0); 14050 } 14051 14052 static void 14053 bxe_get_tunable_params(struct bxe_softc *sc) 14054 { 14055 /* sanity checks */ 14056 14057 if ((bxe_interrupt_mode != INTR_MODE_INTX) && 14058 (bxe_interrupt_mode != INTR_MODE_MSI) && 14059 (bxe_interrupt_mode != INTR_MODE_MSIX)) { 14060 BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode); 14061 bxe_interrupt_mode = INTR_MODE_MSIX; 14062 } 14063 14064 if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) { 14065 BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count); 14066 bxe_queue_count = 0; 14067 } 14068 14069 if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) { 14070 if (bxe_max_rx_bufs == 0) { 14071 bxe_max_rx_bufs = RX_BD_USABLE; 14072 } else { 14073 BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs); 14074 bxe_max_rx_bufs = 2048; 14075 } 14076 } 14077 14078 if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) { 14079 BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks); 14080 bxe_hc_rx_ticks = 25; 14081 } 14082 14083 if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) { 14084 BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks); 14085 bxe_hc_tx_ticks = 50; 14086 } 14087 14088 if (bxe_max_aggregation_size == 0) { 14089 bxe_max_aggregation_size = TPA_AGG_SIZE; 14090 } 14091 14092 if (bxe_max_aggregation_size > 0xffff) { 14093 BLOGW(sc, "invalid max_aggregation_size (%d)\n", 14094 bxe_max_aggregation_size); 14095 bxe_max_aggregation_size = TPA_AGG_SIZE; 14096 } 14097 14098 if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) { 14099 BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs); 14100 bxe_mrrs = -1; 14101 } 14102 14103 if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) { 14104 BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen); 14105 bxe_autogreeen = 0; 14106 } 14107 14108 if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) { 14109 BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss); 14110 bxe_udp_rss = 0; 14111 } 14112 14113 /* pull in user settings */ 14114 14115 sc->interrupt_mode = bxe_interrupt_mode; 14116 sc->max_rx_bufs = bxe_max_rx_bufs; 14117 sc->hc_rx_ticks = bxe_hc_rx_ticks; 14118 sc->hc_tx_ticks = bxe_hc_tx_ticks; 14119 sc->max_aggregation_size = bxe_max_aggregation_size; 14120 sc->mrrs = bxe_mrrs; 14121 sc->autogreeen = bxe_autogreeen; 14122 sc->udp_rss = bxe_udp_rss; 14123 14124 if (bxe_interrupt_mode == INTR_MODE_INTX) { 14125 sc->num_queues = 1; 14126 } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */ 14127 sc->num_queues = 14128 min((bxe_queue_count ? bxe_queue_count : mp_ncpus), 14129 MAX_RSS_CHAINS); 14130 if (sc->num_queues > mp_ncpus) { 14131 sc->num_queues = mp_ncpus; 14132 } 14133 } 14134 14135 BLOGD(sc, DBG_LOAD, 14136 "User Config: " 14137 "debug=0x%lx " 14138 "interrupt_mode=%d " 14139 "queue_count=%d " 14140 "hc_rx_ticks=%d " 14141 "hc_tx_ticks=%d " 14142 "rx_budget=%d " 14143 "max_aggregation_size=%d " 14144 "mrrs=%d " 14145 "autogreeen=%d " 14146 "udp_rss=%d\n", 14147 bxe_debug, 14148 sc->interrupt_mode, 14149 sc->num_queues, 14150 sc->hc_rx_ticks, 14151 sc->hc_tx_ticks, 14152 bxe_rx_budget, 14153 sc->max_aggregation_size, 14154 sc->mrrs, 14155 sc->autogreeen, 14156 sc->udp_rss); 14157 } 14158 14159 static void 14160 bxe_media_detect(struct bxe_softc *sc) 14161 { 14162 uint32_t phy_idx = bxe_get_cur_phy_idx(sc); 14163 switch (sc->link_params.phy[phy_idx].media_type) { 14164 case ELINK_ETH_PHY_SFPP_10G_FIBER: 14165 case ELINK_ETH_PHY_XFP_FIBER: 14166 BLOGI(sc, "Found 10Gb Fiber media.\n"); 14167 sc->media = IFM_10G_SR; 14168 break; 14169 case ELINK_ETH_PHY_SFP_1G_FIBER: 14170 BLOGI(sc, "Found 1Gb Fiber media.\n"); 14171 sc->media = IFM_1000_SX; 14172 break; 14173 case ELINK_ETH_PHY_KR: 14174 case ELINK_ETH_PHY_CX4: 14175 BLOGI(sc, "Found 10GBase-CX4 media.\n"); 14176 sc->media = IFM_10G_CX4; 14177 break; 14178 case ELINK_ETH_PHY_DA_TWINAX: 14179 BLOGI(sc, "Found 10Gb Twinax media.\n"); 14180 sc->media = IFM_10G_TWINAX; 14181 break; 14182 case ELINK_ETH_PHY_BASE_T: 14183 if (sc->link_params.speed_cap_mask[0] & 14184 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 14185 BLOGI(sc, "Found 10GBase-T media.\n"); 14186 sc->media = IFM_10G_T; 14187 } else { 14188 BLOGI(sc, "Found 1000Base-T media.\n"); 14189 sc->media = IFM_1000_T; 14190 } 14191 break; 14192 case ELINK_ETH_PHY_NOT_PRESENT: 14193 BLOGI(sc, "Media not present.\n"); 14194 sc->media = 0; 14195 break; 14196 case ELINK_ETH_PHY_UNSPECIFIED: 14197 default: 14198 BLOGI(sc, "Unknown media!\n"); 14199 sc->media = 0; 14200 break; 14201 } 14202 } 14203 14204 #define GET_FIELD(value, fname) \ 14205 (((value) & (fname##_MASK)) >> (fname##_SHIFT)) 14206 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 14207 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 14208 14209 static int 14210 bxe_get_igu_cam_info(struct bxe_softc *sc) 14211 { 14212 int pfid = SC_FUNC(sc); 14213 int igu_sb_id; 14214 uint32_t val; 14215 uint8_t fid, igu_sb_cnt = 0; 14216 14217 sc->igu_base_sb = 0xff; 14218 14219 if (CHIP_INT_MODE_IS_BC(sc)) { 14220 int vn = SC_VN(sc); 14221 igu_sb_cnt = sc->igu_sb_cnt; 14222 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * 14223 FP_SB_MAX_E1x); 14224 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + 14225 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); 14226 return (0); 14227 } 14228 14229 /* IGU in normal mode - read CAM */ 14230 for (igu_sb_id = 0; 14231 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; 14232 igu_sb_id++) { 14233 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 14234 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { 14235 continue; 14236 } 14237 fid = IGU_FID(val); 14238 if ((fid & IGU_FID_ENCODE_IS_PF)) { 14239 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { 14240 continue; 14241 } 14242 if (IGU_VEC(val) == 0) { 14243 /* default status block */ 14244 sc->igu_dsb_id = igu_sb_id; 14245 } else { 14246 if (sc->igu_base_sb == 0xff) { 14247 sc->igu_base_sb = igu_sb_id; 14248 } 14249 igu_sb_cnt++; 14250 } 14251 } 14252 } 14253 14254 /* 14255 * Due to new PF resource allocation by MFW T7.4 and above, it's optional 14256 * that number of CAM entries will not be equal to the value advertised in 14257 * PCI. Driver should use the minimal value of both as the actual status 14258 * block count 14259 */ 14260 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); 14261 14262 if (igu_sb_cnt == 0) { 14263 BLOGE(sc, "CAM configuration error\n"); 14264 return (-1); 14265 } 14266 14267 return (0); 14268 } 14269 14270 /* 14271 * Gather various information from the device config space, the device itself, 14272 * shmem, and the user input. 14273 */ 14274 static int 14275 bxe_get_device_info(struct bxe_softc *sc) 14276 { 14277 uint32_t val; 14278 int rc; 14279 14280 /* Get the data for the device */ 14281 sc->devinfo.vendor_id = pci_get_vendor(sc->dev); 14282 sc->devinfo.device_id = pci_get_device(sc->dev); 14283 sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev); 14284 sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev); 14285 14286 /* get the chip revision (chip metal comes from pci config space) */ 14287 sc->devinfo.chip_id = 14288 sc->link_params.chip_id = 14289 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | 14290 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | 14291 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | 14292 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); 14293 14294 /* force 57811 according to MISC register */ 14295 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 14296 if (CHIP_IS_57810(sc)) { 14297 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | 14298 (sc->devinfo.chip_id & 0x0000ffff)); 14299 } else if (CHIP_IS_57810_MF(sc)) { 14300 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | 14301 (sc->devinfo.chip_id & 0x0000ffff)); 14302 } 14303 sc->devinfo.chip_id |= 0x1; 14304 } 14305 14306 BLOGD(sc, DBG_LOAD, 14307 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n", 14308 sc->devinfo.chip_id, 14309 ((sc->devinfo.chip_id >> 16) & 0xffff), 14310 ((sc->devinfo.chip_id >> 12) & 0xf), 14311 ((sc->devinfo.chip_id >> 4) & 0xff), 14312 ((sc->devinfo.chip_id >> 0) & 0xf)); 14313 14314 val = (REG_RD(sc, 0x2874) & 0x55); 14315 if ((sc->devinfo.chip_id & 0x1) || 14316 (CHIP_IS_E1(sc) && val) || 14317 (CHIP_IS_E1H(sc) && (val == 0x55))) { 14318 sc->flags |= BXE_ONE_PORT_FLAG; 14319 BLOGD(sc, DBG_LOAD, "single port device\n"); 14320 } 14321 14322 /* set the doorbell size */ 14323 sc->doorbell_size = (1 << BXE_DB_SHIFT); 14324 14325 /* determine whether the device is in 2 port or 4 port mode */ 14326 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/ 14327 if (CHIP_IS_E2E3(sc)) { 14328 /* 14329 * Read port4mode_en_ovwr[0]: 14330 * If 1, four port mode is in port4mode_en_ovwr[1]. 14331 * If 0, four port mode is in port4mode_en[0]. 14332 */ 14333 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); 14334 if (val & 1) { 14335 val = ((val >> 1) & 1); 14336 } else { 14337 val = REG_RD(sc, MISC_REG_PORT4MODE_EN); 14338 } 14339 14340 sc->devinfo.chip_port_mode = 14341 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; 14342 14343 BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2"); 14344 } 14345 14346 /* get the function and path info for the device */ 14347 bxe_get_function_num(sc); 14348 14349 /* get the shared memory base address */ 14350 sc->devinfo.shmem_base = 14351 sc->link_params.shmem_base = 14352 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 14353 sc->devinfo.shmem2_base = 14354 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : 14355 MISC_REG_GENERIC_CR_0)); 14356 14357 BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n", 14358 sc->devinfo.shmem_base, sc->devinfo.shmem2_base); 14359 14360 if (!sc->devinfo.shmem_base) { 14361 /* this should ONLY prevent upcoming shmem reads */ 14362 BLOGI(sc, "MCP not active\n"); 14363 sc->flags |= BXE_NO_MCP_FLAG; 14364 return (0); 14365 } 14366 14367 /* make sure the shared memory contents are valid */ 14368 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 14369 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != 14370 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { 14371 BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val); 14372 return (0); 14373 } 14374 BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val); 14375 14376 /* get the bootcode version */ 14377 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); 14378 snprintf(sc->devinfo.bc_ver_str, 14379 sizeof(sc->devinfo.bc_ver_str), 14380 "%d.%d.%d", 14381 ((sc->devinfo.bc_ver >> 24) & 0xff), 14382 ((sc->devinfo.bc_ver >> 16) & 0xff), 14383 ((sc->devinfo.bc_ver >> 8) & 0xff)); 14384 BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str); 14385 14386 /* get the bootcode shmem address */ 14387 sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc); 14388 BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base); 14389 14390 /* clean indirect addresses as they're not used */ 14391 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 14392 if (IS_PF(sc)) { 14393 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); 14394 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); 14395 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); 14396 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); 14397 if (CHIP_IS_E1x(sc)) { 14398 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); 14399 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); 14400 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); 14401 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); 14402 } 14403 14404 /* 14405 * Enable internal target-read (in case we are probed after PF 14406 * FLR). Must be done prior to any BAR read access. Only for 14407 * 57712 and up 14408 */ 14409 if (!CHIP_IS_E1x(sc)) { 14410 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 14411 } 14412 } 14413 14414 /* get the nvram size */ 14415 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); 14416 sc->devinfo.flash_size = 14417 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); 14418 BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size); 14419 14420 /* get PCI capabilites */ 14421 bxe_probe_pci_caps(sc); 14422 14423 bxe_set_power_state(sc, PCI_PM_D0); 14424 14425 /* get various configuration parameters from shmem */ 14426 bxe_get_shmem_info(sc); 14427 14428 if (sc->devinfo.pcie_msix_cap_reg != 0) { 14429 val = pci_read_config(sc->dev, 14430 (sc->devinfo.pcie_msix_cap_reg + 14431 PCIR_MSIX_CTRL), 14432 2); 14433 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE); 14434 } else { 14435 sc->igu_sb_cnt = 1; 14436 } 14437 14438 sc->igu_base_addr = BAR_IGU_INTMEM; 14439 14440 /* initialize IGU parameters */ 14441 if (CHIP_IS_E1x(sc)) { 14442 sc->devinfo.int_block = INT_BLOCK_HC; 14443 sc->igu_dsb_id = DEF_SB_IGU_ID; 14444 sc->igu_base_sb = 0; 14445 } else { 14446 sc->devinfo.int_block = INT_BLOCK_IGU; 14447 14448 /* do not allow device reset during IGU info preocessing */ 14449 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14450 14451 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 14452 14453 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 14454 int tout = 5000; 14455 14456 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n"); 14457 14458 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 14459 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); 14460 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); 14461 14462 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 14463 tout--; 14464 DELAY(1000); 14465 } 14466 14467 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 14468 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n"); 14469 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14470 return (-1); 14471 } 14472 } 14473 14474 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 14475 BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n"); 14476 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; 14477 } else { 14478 BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n"); 14479 } 14480 14481 rc = bxe_get_igu_cam_info(sc); 14482 14483 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14484 14485 if (rc) { 14486 return (rc); 14487 } 14488 } 14489 14490 /* 14491 * Get base FW non-default (fast path) status block ID. This value is 14492 * used to initialize the fw_sb_id saved on the fp/queue structure to 14493 * determine the id used by the FW. 14494 */ 14495 if (CHIP_IS_E1x(sc)) { 14496 sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); 14497 } else { 14498 /* 14499 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of 14500 * the same queue are indicated on the same IGU SB). So we prefer 14501 * FW and IGU SBs to be the same value. 14502 */ 14503 sc->base_fw_ndsb = sc->igu_base_sb; 14504 } 14505 14506 BLOGD(sc, DBG_LOAD, 14507 "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n", 14508 sc->igu_dsb_id, sc->igu_base_sb, 14509 sc->igu_sb_cnt, sc->base_fw_ndsb); 14510 14511 elink_phy_probe(&sc->link_params); 14512 14513 return (0); 14514 } 14515 14516 static void 14517 bxe_link_settings_supported(struct bxe_softc *sc, 14518 uint32_t switch_cfg) 14519 { 14520 uint32_t cfg_size = 0; 14521 uint32_t idx; 14522 uint8_t port = SC_PORT(sc); 14523 14524 /* aggregation of supported attributes of all external phys */ 14525 sc->port.supported[0] = 0; 14526 sc->port.supported[1] = 0; 14527 14528 switch (sc->link_params.num_phys) { 14529 case 1: 14530 sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported; 14531 cfg_size = 1; 14532 break; 14533 case 2: 14534 sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; 14535 cfg_size = 1; 14536 break; 14537 case 3: 14538 if (sc->link_params.multi_phy_config & 14539 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 14540 sc->port.supported[1] = 14541 sc->link_params.phy[ELINK_EXT_PHY1].supported; 14542 sc->port.supported[0] = 14543 sc->link_params.phy[ELINK_EXT_PHY2].supported; 14544 } else { 14545 sc->port.supported[0] = 14546 sc->link_params.phy[ELINK_EXT_PHY1].supported; 14547 sc->port.supported[1] = 14548 sc->link_params.phy[ELINK_EXT_PHY2].supported; 14549 } 14550 cfg_size = 2; 14551 break; 14552 } 14553 14554 if (!(sc->port.supported[0] || sc->port.supported[1])) { 14555 BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n", 14556 SHMEM_RD(sc, 14557 dev_info.port_hw_config[port].external_phy_config), 14558 SHMEM_RD(sc, 14559 dev_info.port_hw_config[port].external_phy_config2)); 14560 return; 14561 } 14562 14563 if (CHIP_IS_E3(sc)) 14564 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); 14565 else { 14566 switch (switch_cfg) { 14567 case ELINK_SWITCH_CFG_1G: 14568 sc->port.phy_addr = 14569 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); 14570 break; 14571 case ELINK_SWITCH_CFG_10G: 14572 sc->port.phy_addr = 14573 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); 14574 break; 14575 default: 14576 BLOGE(sc, "Invalid switch config in link_config=0x%08x\n", 14577 sc->port.link_config[0]); 14578 return; 14579 } 14580 } 14581 14582 BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr); 14583 14584 /* mask what we support according to speed_cap_mask per configuration */ 14585 for (idx = 0; idx < cfg_size; idx++) { 14586 if (!(sc->link_params.speed_cap_mask[idx] & 14587 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { 14588 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half; 14589 } 14590 14591 if (!(sc->link_params.speed_cap_mask[idx] & 14592 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { 14593 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full; 14594 } 14595 14596 if (!(sc->link_params.speed_cap_mask[idx] & 14597 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { 14598 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half; 14599 } 14600 14601 if (!(sc->link_params.speed_cap_mask[idx] & 14602 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { 14603 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full; 14604 } 14605 14606 if (!(sc->link_params.speed_cap_mask[idx] & 14607 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { 14608 sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full; 14609 } 14610 14611 if (!(sc->link_params.speed_cap_mask[idx] & 14612 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { 14613 sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full; 14614 } 14615 14616 if (!(sc->link_params.speed_cap_mask[idx] & 14617 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 14618 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full; 14619 } 14620 14621 if (!(sc->link_params.speed_cap_mask[idx] & 14622 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { 14623 sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full; 14624 } 14625 } 14626 14627 BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n", 14628 sc->port.supported[0], sc->port.supported[1]); 14629 } 14630 14631 static void 14632 bxe_link_settings_requested(struct bxe_softc *sc) 14633 { 14634 uint32_t link_config; 14635 uint32_t idx; 14636 uint32_t cfg_size = 0; 14637 14638 sc->port.advertising[0] = 0; 14639 sc->port.advertising[1] = 0; 14640 14641 switch (sc->link_params.num_phys) { 14642 case 1: 14643 case 2: 14644 cfg_size = 1; 14645 break; 14646 case 3: 14647 cfg_size = 2; 14648 break; 14649 } 14650 14651 for (idx = 0; idx < cfg_size; idx++) { 14652 sc->link_params.req_duplex[idx] = DUPLEX_FULL; 14653 link_config = sc->port.link_config[idx]; 14654 14655 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 14656 case PORT_FEATURE_LINK_SPEED_AUTO: 14657 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { 14658 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; 14659 sc->port.advertising[idx] |= sc->port.supported[idx]; 14660 if (sc->link_params.phy[ELINK_EXT_PHY1].type == 14661 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 14662 sc->port.advertising[idx] |= 14663 (ELINK_SUPPORTED_100baseT_Half | 14664 ELINK_SUPPORTED_100baseT_Full); 14665 } else { 14666 /* force 10G, no AN */ 14667 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; 14668 sc->port.advertising[idx] |= 14669 (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); 14670 continue; 14671 } 14672 break; 14673 14674 case PORT_FEATURE_LINK_SPEED_10M_FULL: 14675 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) { 14676 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; 14677 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full | 14678 ADVERTISED_TP); 14679 } else { 14680 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14681 "speed_cap_mask=0x%08x\n", 14682 link_config, sc->link_params.speed_cap_mask[idx]); 14683 return; 14684 } 14685 break; 14686 14687 case PORT_FEATURE_LINK_SPEED_10M_HALF: 14688 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) { 14689 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; 14690 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 14691 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half | 14692 ADVERTISED_TP); 14693 } else { 14694 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14695 "speed_cap_mask=0x%08x\n", 14696 link_config, sc->link_params.speed_cap_mask[idx]); 14697 return; 14698 } 14699 break; 14700 14701 case PORT_FEATURE_LINK_SPEED_100M_FULL: 14702 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) { 14703 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; 14704 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full | 14705 ADVERTISED_TP); 14706 } else { 14707 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14708 "speed_cap_mask=0x%08x\n", 14709 link_config, sc->link_params.speed_cap_mask[idx]); 14710 return; 14711 } 14712 break; 14713 14714 case PORT_FEATURE_LINK_SPEED_100M_HALF: 14715 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) { 14716 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; 14717 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 14718 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half | 14719 ADVERTISED_TP); 14720 } else { 14721 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14722 "speed_cap_mask=0x%08x\n", 14723 link_config, sc->link_params.speed_cap_mask[idx]); 14724 return; 14725 } 14726 break; 14727 14728 case PORT_FEATURE_LINK_SPEED_1G: 14729 if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) { 14730 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000; 14731 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full | 14732 ADVERTISED_TP); 14733 } else { 14734 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14735 "speed_cap_mask=0x%08x\n", 14736 link_config, sc->link_params.speed_cap_mask[idx]); 14737 return; 14738 } 14739 break; 14740 14741 case PORT_FEATURE_LINK_SPEED_2_5G: 14742 if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) { 14743 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500; 14744 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full | 14745 ADVERTISED_TP); 14746 } else { 14747 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14748 "speed_cap_mask=0x%08x\n", 14749 link_config, sc->link_params.speed_cap_mask[idx]); 14750 return; 14751 } 14752 break; 14753 14754 case PORT_FEATURE_LINK_SPEED_10G_CX4: 14755 if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) { 14756 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; 14757 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | 14758 ADVERTISED_FIBRE); 14759 } else { 14760 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14761 "speed_cap_mask=0x%08x\n", 14762 link_config, sc->link_params.speed_cap_mask[idx]); 14763 return; 14764 } 14765 break; 14766 14767 case PORT_FEATURE_LINK_SPEED_20G: 14768 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; 14769 break; 14770 14771 default: 14772 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14773 "speed_cap_mask=0x%08x\n", 14774 link_config, sc->link_params.speed_cap_mask[idx]); 14775 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; 14776 sc->port.advertising[idx] = sc->port.supported[idx]; 14777 break; 14778 } 14779 14780 sc->link_params.req_flow_ctrl[idx] = 14781 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); 14782 14783 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { 14784 if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { 14785 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE; 14786 } else { 14787 bxe_set_requested_fc(sc); 14788 } 14789 } 14790 14791 BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d " 14792 "req_flow_ctrl=0x%x advertising=0x%x\n", 14793 sc->link_params.req_line_speed[idx], 14794 sc->link_params.req_duplex[idx], 14795 sc->link_params.req_flow_ctrl[idx], 14796 sc->port.advertising[idx]); 14797 } 14798 } 14799 14800 static void 14801 bxe_get_phy_info(struct bxe_softc *sc) 14802 { 14803 uint8_t port = SC_PORT(sc); 14804 uint32_t config = sc->port.config; 14805 uint32_t eee_mode; 14806 14807 /* shmem data already read in bxe_get_shmem_info() */ 14808 14809 BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x " 14810 "link_config0=0x%08x\n", 14811 sc->link_params.lane_config, 14812 sc->link_params.speed_cap_mask[0], 14813 sc->port.link_config[0]); 14814 14815 bxe_link_settings_supported(sc, sc->link_params.switch_cfg); 14816 bxe_link_settings_requested(sc); 14817 14818 if (sc->autogreeen == AUTO_GREEN_FORCE_ON) { 14819 sc->link_params.feature_config_flags |= 14820 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14821 } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) { 14822 sc->link_params.feature_config_flags &= 14823 ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14824 } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) { 14825 sc->link_params.feature_config_flags |= 14826 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14827 } 14828 14829 /* configure link feature according to nvram value */ 14830 eee_mode = 14831 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) & 14832 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 14833 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 14834 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 14835 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | 14836 ELINK_EEE_MODE_ENABLE_LPI | 14837 ELINK_EEE_MODE_OUTPUT_TIME); 14838 } else { 14839 sc->link_params.eee_mode = 0; 14840 } 14841 14842 /* get the media type */ 14843 bxe_media_detect(sc); 14844 } 14845 14846 static void 14847 bxe_get_params(struct bxe_softc *sc) 14848 { 14849 /* get user tunable params */ 14850 bxe_get_tunable_params(sc); 14851 14852 /* select the RX and TX ring sizes */ 14853 sc->tx_ring_size = TX_BD_USABLE; 14854 sc->rx_ring_size = RX_BD_USABLE; 14855 14856 /* XXX disable WoL */ 14857 sc->wol = 0; 14858 } 14859 14860 static void 14861 bxe_set_modes_bitmap(struct bxe_softc *sc) 14862 { 14863 uint32_t flags = 0; 14864 14865 if (CHIP_REV_IS_FPGA(sc)) { 14866 SET_FLAGS(flags, MODE_FPGA); 14867 } else if (CHIP_REV_IS_EMUL(sc)) { 14868 SET_FLAGS(flags, MODE_EMUL); 14869 } else { 14870 SET_FLAGS(flags, MODE_ASIC); 14871 } 14872 14873 if (CHIP_IS_MODE_4_PORT(sc)) { 14874 SET_FLAGS(flags, MODE_PORT4); 14875 } else { 14876 SET_FLAGS(flags, MODE_PORT2); 14877 } 14878 14879 if (CHIP_IS_E2(sc)) { 14880 SET_FLAGS(flags, MODE_E2); 14881 } else if (CHIP_IS_E3(sc)) { 14882 SET_FLAGS(flags, MODE_E3); 14883 if (CHIP_REV(sc) == CHIP_REV_Ax) { 14884 SET_FLAGS(flags, MODE_E3_A0); 14885 } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ { 14886 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); 14887 } 14888 } 14889 14890 if (IS_MF(sc)) { 14891 SET_FLAGS(flags, MODE_MF); 14892 switch (sc->devinfo.mf_info.mf_mode) { 14893 case MULTI_FUNCTION_SD: 14894 SET_FLAGS(flags, MODE_MF_SD); 14895 break; 14896 case MULTI_FUNCTION_SI: 14897 SET_FLAGS(flags, MODE_MF_SI); 14898 break; 14899 case MULTI_FUNCTION_AFEX: 14900 SET_FLAGS(flags, MODE_MF_AFEX); 14901 break; 14902 } 14903 } else { 14904 SET_FLAGS(flags, MODE_SF); 14905 } 14906 14907 #if defined(__LITTLE_ENDIAN) 14908 SET_FLAGS(flags, MODE_LITTLE_ENDIAN); 14909 #else /* __BIG_ENDIAN */ 14910 SET_FLAGS(flags, MODE_BIG_ENDIAN); 14911 #endif 14912 14913 INIT_MODE_FLAGS(sc) = flags; 14914 } 14915 14916 static int 14917 bxe_alloc_hsi_mem(struct bxe_softc *sc) 14918 { 14919 struct bxe_fastpath *fp; 14920 bus_addr_t busaddr; 14921 int max_agg_queues; 14922 int max_segments; 14923 bus_size_t max_size; 14924 bus_size_t max_seg_size; 14925 char buf[32]; 14926 int rc; 14927 int i, j; 14928 14929 /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */ 14930 14931 /* allocate the parent bus DMA tag */ 14932 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */ 14933 1, /* alignment */ 14934 0, /* boundary limit */ 14935 BUS_SPACE_MAXADDR, /* restricted low */ 14936 BUS_SPACE_MAXADDR, /* restricted hi */ 14937 NULL, /* addr filter() */ 14938 NULL, /* addr filter() arg */ 14939 BUS_SPACE_MAXSIZE_32BIT, /* max map size */ 14940 BUS_SPACE_UNRESTRICTED, /* num discontinuous */ 14941 BUS_SPACE_MAXSIZE_32BIT, /* max seg size */ 14942 0, /* flags */ 14943 NULL, /* lock() */ 14944 NULL, /* lock() arg */ 14945 &sc->parent_dma_tag); /* returned dma tag */ 14946 if (rc != 0) { 14947 BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc); 14948 return (1); 14949 } 14950 14951 /************************/ 14952 /* DEFAULT STATUS BLOCK */ 14953 /************************/ 14954 14955 if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block), 14956 &sc->def_sb_dma, "default status block") != 0) { 14957 /* XXX */ 14958 bus_dma_tag_destroy(sc->parent_dma_tag); 14959 return (1); 14960 } 14961 14962 sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr; 14963 14964 /***************/ 14965 /* EVENT QUEUE */ 14966 /***************/ 14967 14968 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, 14969 &sc->eq_dma, "event queue") != 0) { 14970 /* XXX */ 14971 bxe_dma_free(sc, &sc->def_sb_dma); 14972 sc->def_sb = NULL; 14973 bus_dma_tag_destroy(sc->parent_dma_tag); 14974 return (1); 14975 } 14976 14977 sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr; 14978 14979 /*************/ 14980 /* SLOW PATH */ 14981 /*************/ 14982 14983 if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath), 14984 &sc->sp_dma, "slow path") != 0) { 14985 /* XXX */ 14986 bxe_dma_free(sc, &sc->eq_dma); 14987 sc->eq = NULL; 14988 bxe_dma_free(sc, &sc->def_sb_dma); 14989 sc->def_sb = NULL; 14990 bus_dma_tag_destroy(sc->parent_dma_tag); 14991 return (1); 14992 } 14993 14994 sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr; 14995 14996 /*******************/ 14997 /* SLOW PATH QUEUE */ 14998 /*******************/ 14999 15000 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, 15001 &sc->spq_dma, "slow path queue") != 0) { 15002 /* XXX */ 15003 bxe_dma_free(sc, &sc->sp_dma); 15004 sc->sp = NULL; 15005 bxe_dma_free(sc, &sc->eq_dma); 15006 sc->eq = NULL; 15007 bxe_dma_free(sc, &sc->def_sb_dma); 15008 sc->def_sb = NULL; 15009 bus_dma_tag_destroy(sc->parent_dma_tag); 15010 return (1); 15011 } 15012 15013 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; 15014 15015 /***************************/ 15016 /* FW DECOMPRESSION BUFFER */ 15017 /***************************/ 15018 15019 if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, 15020 "fw decompression buffer") != 0) { 15021 /* XXX */ 15022 bxe_dma_free(sc, &sc->spq_dma); 15023 sc->spq = NULL; 15024 bxe_dma_free(sc, &sc->sp_dma); 15025 sc->sp = NULL; 15026 bxe_dma_free(sc, &sc->eq_dma); 15027 sc->eq = NULL; 15028 bxe_dma_free(sc, &sc->def_sb_dma); 15029 sc->def_sb = NULL; 15030 bus_dma_tag_destroy(sc->parent_dma_tag); 15031 return (1); 15032 } 15033 15034 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; 15035 15036 if ((sc->gz_strm = 15037 malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) { 15038 /* XXX */ 15039 bxe_dma_free(sc, &sc->gz_buf_dma); 15040 sc->gz_buf = NULL; 15041 bxe_dma_free(sc, &sc->spq_dma); 15042 sc->spq = NULL; 15043 bxe_dma_free(sc, &sc->sp_dma); 15044 sc->sp = NULL; 15045 bxe_dma_free(sc, &sc->eq_dma); 15046 sc->eq = NULL; 15047 bxe_dma_free(sc, &sc->def_sb_dma); 15048 sc->def_sb = NULL; 15049 bus_dma_tag_destroy(sc->parent_dma_tag); 15050 return (1); 15051 } 15052 15053 /*************/ 15054 /* FASTPATHS */ 15055 /*************/ 15056 15057 /* allocate DMA memory for each fastpath structure */ 15058 for (i = 0; i < sc->num_queues; i++) { 15059 fp = &sc->fp[i]; 15060 fp->sc = sc; 15061 fp->index = i; 15062 15063 /*******************/ 15064 /* FP STATUS BLOCK */ 15065 /*******************/ 15066 15067 snprintf(buf, sizeof(buf), "fp %d status block", i); 15068 if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block), 15069 &fp->sb_dma, buf) != 0) { 15070 /* XXX unwind and free previous fastpath allocations */ 15071 BLOGE(sc, "Failed to alloc %s\n", buf); 15072 return (1); 15073 } else { 15074 if (CHIP_IS_E2E3(sc)) { 15075 fp->status_block.e2_sb = 15076 (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr; 15077 } else { 15078 fp->status_block.e1x_sb = 15079 (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr; 15080 } 15081 } 15082 15083 /******************/ 15084 /* FP TX BD CHAIN */ 15085 /******************/ 15086 15087 snprintf(buf, sizeof(buf), "fp %d tx bd chain", i); 15088 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES), 15089 &fp->tx_dma, buf) != 0) { 15090 /* XXX unwind and free previous fastpath allocations */ 15091 BLOGE(sc, "Failed to alloc %s\n", buf); 15092 return (1); 15093 } else { 15094 fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr; 15095 } 15096 15097 /* link together the tx bd chain pages */ 15098 for (j = 1; j <= TX_BD_NUM_PAGES; j++) { 15099 /* index into the tx bd chain array to last entry per page */ 15100 struct eth_tx_next_bd *tx_next_bd = 15101 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd; 15102 /* point to the next page and wrap from last page */ 15103 busaddr = (fp->tx_dma.paddr + 15104 (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES))); 15105 tx_next_bd->addr_hi = htole32(U64_HI(busaddr)); 15106 tx_next_bd->addr_lo = htole32(U64_LO(busaddr)); 15107 } 15108 15109 /******************/ 15110 /* FP RX BD CHAIN */ 15111 /******************/ 15112 15113 snprintf(buf, sizeof(buf), "fp %d rx bd chain", i); 15114 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES), 15115 &fp->rx_dma, buf) != 0) { 15116 /* XXX unwind and free previous fastpath allocations */ 15117 BLOGE(sc, "Failed to alloc %s\n", buf); 15118 return (1); 15119 } else { 15120 fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr; 15121 } 15122 15123 /* link together the rx bd chain pages */ 15124 for (j = 1; j <= RX_BD_NUM_PAGES; j++) { 15125 /* index into the rx bd chain array to last entry per page */ 15126 struct eth_rx_bd *rx_bd = 15127 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2]; 15128 /* point to the next page and wrap from last page */ 15129 busaddr = (fp->rx_dma.paddr + 15130 (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES))); 15131 rx_bd->addr_hi = htole32(U64_HI(busaddr)); 15132 rx_bd->addr_lo = htole32(U64_LO(busaddr)); 15133 } 15134 15135 /*******************/ 15136 /* FP RX RCQ CHAIN */ 15137 /*******************/ 15138 15139 snprintf(buf, sizeof(buf), "fp %d rcq chain", i); 15140 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES), 15141 &fp->rcq_dma, buf) != 0) { 15142 /* XXX unwind and free previous fastpath allocations */ 15143 BLOGE(sc, "Failed to alloc %s\n", buf); 15144 return (1); 15145 } else { 15146 fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr; 15147 } 15148 15149 /* link together the rcq chain pages */ 15150 for (j = 1; j <= RCQ_NUM_PAGES; j++) { 15151 /* index into the rcq chain array to last entry per page */ 15152 struct eth_rx_cqe_next_page *rx_cqe_next = 15153 (struct eth_rx_cqe_next_page *) 15154 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1]; 15155 /* point to the next page and wrap from last page */ 15156 busaddr = (fp->rcq_dma.paddr + 15157 (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES))); 15158 rx_cqe_next->addr_hi = htole32(U64_HI(busaddr)); 15159 rx_cqe_next->addr_lo = htole32(U64_LO(busaddr)); 15160 } 15161 15162 /*******************/ 15163 /* FP RX SGE CHAIN */ 15164 /*******************/ 15165 15166 snprintf(buf, sizeof(buf), "fp %d sge chain", i); 15167 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES), 15168 &fp->rx_sge_dma, buf) != 0) { 15169 /* XXX unwind and free previous fastpath allocations */ 15170 BLOGE(sc, "Failed to alloc %s\n", buf); 15171 return (1); 15172 } else { 15173 fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr; 15174 } 15175 15176 /* link together the sge chain pages */ 15177 for (j = 1; j <= RX_SGE_NUM_PAGES; j++) { 15178 /* index into the rcq chain array to last entry per page */ 15179 struct eth_rx_sge *rx_sge = 15180 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2]; 15181 /* point to the next page and wrap from last page */ 15182 busaddr = (fp->rx_sge_dma.paddr + 15183 (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES))); 15184 rx_sge->addr_hi = htole32(U64_HI(busaddr)); 15185 rx_sge->addr_lo = htole32(U64_LO(busaddr)); 15186 } 15187 15188 /***********************/ 15189 /* FP TX MBUF DMA MAPS */ 15190 /***********************/ 15191 15192 /* set required sizes before mapping to conserve resources */ 15193 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { 15194 max_size = BXE_TSO_MAX_SIZE; 15195 max_segments = BXE_TSO_MAX_SEGMENTS; 15196 max_seg_size = BXE_TSO_MAX_SEG_SIZE; 15197 } else { 15198 max_size = (MCLBYTES * BXE_MAX_SEGMENTS); 15199 max_segments = BXE_MAX_SEGMENTS; 15200 max_seg_size = MCLBYTES; 15201 } 15202 15203 /* create a dma tag for the tx mbufs */ 15204 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15205 1, /* alignment */ 15206 0, /* boundary limit */ 15207 BUS_SPACE_MAXADDR, /* restricted low */ 15208 BUS_SPACE_MAXADDR, /* restricted hi */ 15209 NULL, /* addr filter() */ 15210 NULL, /* addr filter() arg */ 15211 max_size, /* max map size */ 15212 max_segments, /* num discontinuous */ 15213 max_seg_size, /* max seg size */ 15214 0, /* flags */ 15215 NULL, /* lock() */ 15216 NULL, /* lock() arg */ 15217 &fp->tx_mbuf_tag); /* returned dma tag */ 15218 if (rc != 0) { 15219 /* XXX unwind and free previous fastpath allocations */ 15220 BLOGE(sc, "Failed to create dma tag for " 15221 "'fp %d tx mbufs' (%d)\n", 15222 i, rc); 15223 return (1); 15224 } 15225 15226 /* create dma maps for each of the tx mbuf clusters */ 15227 for (j = 0; j < TX_BD_TOTAL; j++) { 15228 if (bus_dmamap_create(fp->tx_mbuf_tag, 15229 BUS_DMA_NOWAIT, 15230 &fp->tx_mbuf_chain[j].m_map)) { 15231 /* XXX unwind and free previous fastpath allocations */ 15232 BLOGE(sc, "Failed to create dma map for " 15233 "'fp %d tx mbuf %d' (%d)\n", 15234 i, j, rc); 15235 return (1); 15236 } 15237 } 15238 15239 /***********************/ 15240 /* FP RX MBUF DMA MAPS */ 15241 /***********************/ 15242 15243 /* create a dma tag for the rx mbufs */ 15244 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15245 1, /* alignment */ 15246 0, /* boundary limit */ 15247 BUS_SPACE_MAXADDR, /* restricted low */ 15248 BUS_SPACE_MAXADDR, /* restricted hi */ 15249 NULL, /* addr filter() */ 15250 NULL, /* addr filter() arg */ 15251 MJUM9BYTES, /* max map size */ 15252 1, /* num discontinuous */ 15253 MJUM9BYTES, /* max seg size */ 15254 0, /* flags */ 15255 NULL, /* lock() */ 15256 NULL, /* lock() arg */ 15257 &fp->rx_mbuf_tag); /* returned dma tag */ 15258 if (rc != 0) { 15259 /* XXX unwind and free previous fastpath allocations */ 15260 BLOGE(sc, "Failed to create dma tag for " 15261 "'fp %d rx mbufs' (%d)\n", 15262 i, rc); 15263 return (1); 15264 } 15265 15266 /* create dma maps for each of the rx mbuf clusters */ 15267 for (j = 0; j < RX_BD_TOTAL; j++) { 15268 if (bus_dmamap_create(fp->rx_mbuf_tag, 15269 BUS_DMA_NOWAIT, 15270 &fp->rx_mbuf_chain[j].m_map)) { 15271 /* XXX unwind and free previous fastpath allocations */ 15272 BLOGE(sc, "Failed to create dma map for " 15273 "'fp %d rx mbuf %d' (%d)\n", 15274 i, j, rc); 15275 return (1); 15276 } 15277 } 15278 15279 /* create dma map for the spare rx mbuf cluster */ 15280 if (bus_dmamap_create(fp->rx_mbuf_tag, 15281 BUS_DMA_NOWAIT, 15282 &fp->rx_mbuf_spare_map)) { 15283 /* XXX unwind and free previous fastpath allocations */ 15284 BLOGE(sc, "Failed to create dma map for " 15285 "'fp %d spare rx mbuf' (%d)\n", 15286 i, rc); 15287 return (1); 15288 } 15289 15290 /***************************/ 15291 /* FP RX SGE MBUF DMA MAPS */ 15292 /***************************/ 15293 15294 /* create a dma tag for the rx sge mbufs */ 15295 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15296 1, /* alignment */ 15297 0, /* boundary limit */ 15298 BUS_SPACE_MAXADDR, /* restricted low */ 15299 BUS_SPACE_MAXADDR, /* restricted hi */ 15300 NULL, /* addr filter() */ 15301 NULL, /* addr filter() arg */ 15302 BCM_PAGE_SIZE, /* max map size */ 15303 1, /* num discontinuous */ 15304 BCM_PAGE_SIZE, /* max seg size */ 15305 0, /* flags */ 15306 NULL, /* lock() */ 15307 NULL, /* lock() arg */ 15308 &fp->rx_sge_mbuf_tag); /* returned dma tag */ 15309 if (rc != 0) { 15310 /* XXX unwind and free previous fastpath allocations */ 15311 BLOGE(sc, "Failed to create dma tag for " 15312 "'fp %d rx sge mbufs' (%d)\n", 15313 i, rc); 15314 return (1); 15315 } 15316 15317 /* create dma maps for the rx sge mbuf clusters */ 15318 for (j = 0; j < RX_SGE_TOTAL; j++) { 15319 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, 15320 BUS_DMA_NOWAIT, 15321 &fp->rx_sge_mbuf_chain[j].m_map)) { 15322 /* XXX unwind and free previous fastpath allocations */ 15323 BLOGE(sc, "Failed to create dma map for " 15324 "'fp %d rx sge mbuf %d' (%d)\n", 15325 i, j, rc); 15326 return (1); 15327 } 15328 } 15329 15330 /* create dma map for the spare rx sge mbuf cluster */ 15331 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, 15332 BUS_DMA_NOWAIT, 15333 &fp->rx_sge_mbuf_spare_map)) { 15334 /* XXX unwind and free previous fastpath allocations */ 15335 BLOGE(sc, "Failed to create dma map for " 15336 "'fp %d spare rx sge mbuf' (%d)\n", 15337 i, rc); 15338 return (1); 15339 } 15340 15341 /***************************/ 15342 /* FP RX TPA MBUF DMA MAPS */ 15343 /***************************/ 15344 15345 /* create dma maps for the rx tpa mbuf clusters */ 15346 max_agg_queues = MAX_AGG_QS(sc); 15347 15348 for (j = 0; j < max_agg_queues; j++) { 15349 if (bus_dmamap_create(fp->rx_mbuf_tag, 15350 BUS_DMA_NOWAIT, 15351 &fp->rx_tpa_info[j].bd.m_map)) { 15352 /* XXX unwind and free previous fastpath allocations */ 15353 BLOGE(sc, "Failed to create dma map for " 15354 "'fp %d rx tpa mbuf %d' (%d)\n", 15355 i, j, rc); 15356 return (1); 15357 } 15358 } 15359 15360 /* create dma map for the spare rx tpa mbuf cluster */ 15361 if (bus_dmamap_create(fp->rx_mbuf_tag, 15362 BUS_DMA_NOWAIT, 15363 &fp->rx_tpa_info_mbuf_spare_map)) { 15364 /* XXX unwind and free previous fastpath allocations */ 15365 BLOGE(sc, "Failed to create dma map for " 15366 "'fp %d spare rx tpa mbuf' (%d)\n", 15367 i, rc); 15368 return (1); 15369 } 15370 15371 bxe_init_sge_ring_bit_mask(fp); 15372 } 15373 15374 return (0); 15375 } 15376 15377 static void 15378 bxe_free_hsi_mem(struct bxe_softc *sc) 15379 { 15380 struct bxe_fastpath *fp; 15381 int max_agg_queues; 15382 int i, j; 15383 15384 if (sc->parent_dma_tag == NULL) { 15385 return; /* assume nothing was allocated */ 15386 } 15387 15388 for (i = 0; i < sc->num_queues; i++) { 15389 fp = &sc->fp[i]; 15390 15391 /*******************/ 15392 /* FP STATUS BLOCK */ 15393 /*******************/ 15394 15395 bxe_dma_free(sc, &fp->sb_dma); 15396 memset(&fp->status_block, 0, sizeof(fp->status_block)); 15397 15398 /******************/ 15399 /* FP TX BD CHAIN */ 15400 /******************/ 15401 15402 bxe_dma_free(sc, &fp->tx_dma); 15403 fp->tx_chain = NULL; 15404 15405 /******************/ 15406 /* FP RX BD CHAIN */ 15407 /******************/ 15408 15409 bxe_dma_free(sc, &fp->rx_dma); 15410 fp->rx_chain = NULL; 15411 15412 /*******************/ 15413 /* FP RX RCQ CHAIN */ 15414 /*******************/ 15415 15416 bxe_dma_free(sc, &fp->rcq_dma); 15417 fp->rcq_chain = NULL; 15418 15419 /*******************/ 15420 /* FP RX SGE CHAIN */ 15421 /*******************/ 15422 15423 bxe_dma_free(sc, &fp->rx_sge_dma); 15424 fp->rx_sge_chain = NULL; 15425 15426 /***********************/ 15427 /* FP TX MBUF DMA MAPS */ 15428 /***********************/ 15429 15430 if (fp->tx_mbuf_tag != NULL) { 15431 for (j = 0; j < TX_BD_TOTAL; j++) { 15432 if (fp->tx_mbuf_chain[j].m_map != NULL) { 15433 bus_dmamap_unload(fp->tx_mbuf_tag, 15434 fp->tx_mbuf_chain[j].m_map); 15435 bus_dmamap_destroy(fp->tx_mbuf_tag, 15436 fp->tx_mbuf_chain[j].m_map); 15437 } 15438 } 15439 15440 bus_dma_tag_destroy(fp->tx_mbuf_tag); 15441 fp->tx_mbuf_tag = NULL; 15442 } 15443 15444 /***********************/ 15445 /* FP RX MBUF DMA MAPS */ 15446 /***********************/ 15447 15448 if (fp->rx_mbuf_tag != NULL) { 15449 for (j = 0; j < RX_BD_TOTAL; j++) { 15450 if (fp->rx_mbuf_chain[j].m_map != NULL) { 15451 bus_dmamap_unload(fp->rx_mbuf_tag, 15452 fp->rx_mbuf_chain[j].m_map); 15453 bus_dmamap_destroy(fp->rx_mbuf_tag, 15454 fp->rx_mbuf_chain[j].m_map); 15455 } 15456 } 15457 15458 if (fp->rx_mbuf_spare_map != NULL) { 15459 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); 15460 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); 15461 } 15462 15463 /***************************/ 15464 /* FP RX TPA MBUF DMA MAPS */ 15465 /***************************/ 15466 15467 max_agg_queues = MAX_AGG_QS(sc); 15468 15469 for (j = 0; j < max_agg_queues; j++) { 15470 if (fp->rx_tpa_info[j].bd.m_map != NULL) { 15471 bus_dmamap_unload(fp->rx_mbuf_tag, 15472 fp->rx_tpa_info[j].bd.m_map); 15473 bus_dmamap_destroy(fp->rx_mbuf_tag, 15474 fp->rx_tpa_info[j].bd.m_map); 15475 } 15476 } 15477 15478 if (fp->rx_tpa_info_mbuf_spare_map != NULL) { 15479 bus_dmamap_unload(fp->rx_mbuf_tag, 15480 fp->rx_tpa_info_mbuf_spare_map); 15481 bus_dmamap_destroy(fp->rx_mbuf_tag, 15482 fp->rx_tpa_info_mbuf_spare_map); 15483 } 15484 15485 bus_dma_tag_destroy(fp->rx_mbuf_tag); 15486 fp->rx_mbuf_tag = NULL; 15487 } 15488 15489 /***************************/ 15490 /* FP RX SGE MBUF DMA MAPS */ 15491 /***************************/ 15492 15493 if (fp->rx_sge_mbuf_tag != NULL) { 15494 for (j = 0; j < RX_SGE_TOTAL; j++) { 15495 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) { 15496 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 15497 fp->rx_sge_mbuf_chain[j].m_map); 15498 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, 15499 fp->rx_sge_mbuf_chain[j].m_map); 15500 } 15501 } 15502 15503 if (fp->rx_sge_mbuf_spare_map != NULL) { 15504 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 15505 fp->rx_sge_mbuf_spare_map); 15506 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, 15507 fp->rx_sge_mbuf_spare_map); 15508 } 15509 15510 bus_dma_tag_destroy(fp->rx_sge_mbuf_tag); 15511 fp->rx_sge_mbuf_tag = NULL; 15512 } 15513 } 15514 15515 /***************************/ 15516 /* FW DECOMPRESSION BUFFER */ 15517 /***************************/ 15518 15519 bxe_dma_free(sc, &sc->gz_buf_dma); 15520 sc->gz_buf = NULL; 15521 free(sc->gz_strm, M_DEVBUF); 15522 sc->gz_strm = NULL; 15523 15524 /*******************/ 15525 /* SLOW PATH QUEUE */ 15526 /*******************/ 15527 15528 bxe_dma_free(sc, &sc->spq_dma); 15529 sc->spq = NULL; 15530 15531 /*************/ 15532 /* SLOW PATH */ 15533 /*************/ 15534 15535 bxe_dma_free(sc, &sc->sp_dma); 15536 sc->sp = NULL; 15537 15538 /***************/ 15539 /* EVENT QUEUE */ 15540 /***************/ 15541 15542 bxe_dma_free(sc, &sc->eq_dma); 15543 sc->eq = NULL; 15544 15545 /************************/ 15546 /* DEFAULT STATUS BLOCK */ 15547 /************************/ 15548 15549 bxe_dma_free(sc, &sc->def_sb_dma); 15550 sc->def_sb = NULL; 15551 15552 bus_dma_tag_destroy(sc->parent_dma_tag); 15553 sc->parent_dma_tag = NULL; 15554 } 15555 15556 /* 15557 * Previous driver DMAE transaction may have occurred when pre-boot stage 15558 * ended and boot began. This would invalidate the addresses of the 15559 * transaction, resulting in was-error bit set in the PCI causing all 15560 * hw-to-host PCIe transactions to timeout. If this happened we want to clear 15561 * the interrupt which detected this from the pglueb and the was-done bit 15562 */ 15563 static void 15564 bxe_prev_interrupted_dmae(struct bxe_softc *sc) 15565 { 15566 uint32_t val; 15567 15568 if (!CHIP_IS_E1x(sc)) { 15569 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); 15570 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 15571 BLOGD(sc, DBG_LOAD, 15572 "Clearing 'was-error' bit that was set in pglueb"); 15573 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc)); 15574 } 15575 } 15576 } 15577 15578 static int 15579 bxe_prev_mcp_done(struct bxe_softc *sc) 15580 { 15581 uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 15582 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 15583 if (!rc) { 15584 BLOGE(sc, "MCP response failure, aborting\n"); 15585 return (-1); 15586 } 15587 15588 return (0); 15589 } 15590 15591 static struct bxe_prev_list_node * 15592 bxe_prev_path_get_entry(struct bxe_softc *sc) 15593 { 15594 struct bxe_prev_list_node *tmp; 15595 15596 LIST_FOREACH(tmp, &bxe_prev_list, node) { 15597 if ((sc->pcie_bus == tmp->bus) && 15598 (sc->pcie_device == tmp->slot) && 15599 (SC_PATH(sc) == tmp->path)) { 15600 return (tmp); 15601 } 15602 } 15603 15604 return (NULL); 15605 } 15606 15607 static uint8_t 15608 bxe_prev_is_path_marked(struct bxe_softc *sc) 15609 { 15610 struct bxe_prev_list_node *tmp; 15611 int rc = FALSE; 15612 15613 mtx_lock(&bxe_prev_mtx); 15614 15615 tmp = bxe_prev_path_get_entry(sc); 15616 if (tmp) { 15617 if (tmp->aer) { 15618 BLOGD(sc, DBG_LOAD, 15619 "Path %d/%d/%d was marked by AER\n", 15620 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15621 } else { 15622 rc = TRUE; 15623 BLOGD(sc, DBG_LOAD, 15624 "Path %d/%d/%d was already cleaned from previous drivers\n", 15625 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15626 } 15627 } 15628 15629 mtx_unlock(&bxe_prev_mtx); 15630 15631 return (rc); 15632 } 15633 15634 static int 15635 bxe_prev_mark_path(struct bxe_softc *sc, 15636 uint8_t after_undi) 15637 { 15638 struct bxe_prev_list_node *tmp; 15639 15640 mtx_lock(&bxe_prev_mtx); 15641 15642 /* Check whether the entry for this path already exists */ 15643 tmp = bxe_prev_path_get_entry(sc); 15644 if (tmp) { 15645 if (!tmp->aer) { 15646 BLOGD(sc, DBG_LOAD, 15647 "Re-marking AER in path %d/%d/%d\n", 15648 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15649 } else { 15650 BLOGD(sc, DBG_LOAD, 15651 "Removing AER indication from path %d/%d/%d\n", 15652 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15653 tmp->aer = 0; 15654 } 15655 15656 mtx_unlock(&bxe_prev_mtx); 15657 return (0); 15658 } 15659 15660 mtx_unlock(&bxe_prev_mtx); 15661 15662 /* Create an entry for this path and add it */ 15663 tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF, 15664 (M_NOWAIT | M_ZERO)); 15665 if (!tmp) { 15666 BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n"); 15667 return (-1); 15668 } 15669 15670 tmp->bus = sc->pcie_bus; 15671 tmp->slot = sc->pcie_device; 15672 tmp->path = SC_PATH(sc); 15673 tmp->aer = 0; 15674 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; 15675 15676 mtx_lock(&bxe_prev_mtx); 15677 15678 BLOGD(sc, DBG_LOAD, 15679 "Marked path %d/%d/%d - finished previous unload\n", 15680 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15681 LIST_INSERT_HEAD(&bxe_prev_list, tmp, node); 15682 15683 mtx_unlock(&bxe_prev_mtx); 15684 15685 return (0); 15686 } 15687 15688 static int 15689 bxe_do_flr(struct bxe_softc *sc) 15690 { 15691 int i; 15692 15693 /* only E2 and onwards support FLR */ 15694 if (CHIP_IS_E1x(sc)) { 15695 BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n"); 15696 return (-1); 15697 } 15698 15699 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ 15700 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 15701 BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n", 15702 sc->devinfo.bc_ver); 15703 return (-1); 15704 } 15705 15706 /* Wait for Transaction Pending bit clean */ 15707 for (i = 0; i < 4; i++) { 15708 if (i) { 15709 DELAY(((1 << (i - 1)) * 100) * 1000); 15710 } 15711 15712 if (!bxe_is_pcie_pending(sc)) { 15713 goto clear; 15714 } 15715 } 15716 15717 BLOGE(sc, "PCIE transaction is not cleared, " 15718 "proceeding with reset anyway\n"); 15719 15720 clear: 15721 15722 BLOGD(sc, DBG_LOAD, "Initiating FLR\n"); 15723 bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); 15724 15725 return (0); 15726 } 15727 15728 struct bxe_mac_vals { 15729 uint32_t xmac_addr; 15730 uint32_t xmac_val; 15731 uint32_t emac_addr; 15732 uint32_t emac_val; 15733 uint32_t umac_addr; 15734 uint32_t umac_val; 15735 uint32_t bmac_addr; 15736 uint32_t bmac_val[2]; 15737 }; 15738 15739 static void 15740 bxe_prev_unload_close_mac(struct bxe_softc *sc, 15741 struct bxe_mac_vals *vals) 15742 { 15743 uint32_t val, base_addr, offset, mask, reset_reg; 15744 uint8_t mac_stopped = FALSE; 15745 uint8_t port = SC_PORT(sc); 15746 uint32_t wb_data[2]; 15747 15748 /* reset addresses as they also mark which values were changed */ 15749 vals->bmac_addr = 0; 15750 vals->umac_addr = 0; 15751 vals->xmac_addr = 0; 15752 vals->emac_addr = 0; 15753 15754 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); 15755 15756 if (!CHIP_IS_E3(sc)) { 15757 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 15758 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 15759 if ((mask & reset_reg) && val) { 15760 BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n"); 15761 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM 15762 : NIG_REG_INGRESS_BMAC0_MEM; 15763 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL 15764 : BIGMAC_REGISTER_BMAC_CONTROL; 15765 15766 /* 15767 * use rd/wr since we cannot use dmae. This is safe 15768 * since MCP won't access the bus due to the request 15769 * to unload, and no function on the path can be 15770 * loaded at this time. 15771 */ 15772 wb_data[0] = REG_RD(sc, base_addr + offset); 15773 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); 15774 vals->bmac_addr = base_addr + offset; 15775 vals->bmac_val[0] = wb_data[0]; 15776 vals->bmac_val[1] = wb_data[1]; 15777 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; 15778 REG_WR(sc, vals->bmac_addr, wb_data[0]); 15779 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); 15780 } 15781 15782 BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n"); 15783 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4; 15784 vals->emac_val = REG_RD(sc, vals->emac_addr); 15785 REG_WR(sc, vals->emac_addr, 0); 15786 mac_stopped = TRUE; 15787 } else { 15788 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 15789 BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n"); 15790 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 15791 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); 15792 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1)); 15793 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1)); 15794 vals->xmac_addr = base_addr + XMAC_REG_CTRL; 15795 vals->xmac_val = REG_RD(sc, vals->xmac_addr); 15796 REG_WR(sc, vals->xmac_addr, 0); 15797 mac_stopped = TRUE; 15798 } 15799 15800 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 15801 if (mask & reset_reg) { 15802 BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n"); 15803 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 15804 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; 15805 vals->umac_val = REG_RD(sc, vals->umac_addr); 15806 REG_WR(sc, vals->umac_addr, 0); 15807 mac_stopped = TRUE; 15808 } 15809 } 15810 15811 if (mac_stopped) { 15812 DELAY(20000); 15813 } 15814 } 15815 15816 #define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 15817 #define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff) 15818 #define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 15819 #define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 15820 15821 static void 15822 bxe_prev_unload_undi_inc(struct bxe_softc *sc, 15823 uint8_t port, 15824 uint8_t inc) 15825 { 15826 uint16_t rcq, bd; 15827 uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port)); 15828 15829 rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc; 15830 bd = BXE_PREV_UNDI_BD(tmp_reg) + inc; 15831 15832 tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd); 15833 REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg); 15834 15835 BLOGD(sc, DBG_LOAD, 15836 "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", 15837 port, bd, rcq); 15838 } 15839 15840 static int 15841 bxe_prev_unload_common(struct bxe_softc *sc) 15842 { 15843 uint32_t reset_reg, tmp_reg = 0, rc; 15844 uint8_t prev_undi = FALSE; 15845 struct bxe_mac_vals mac_vals; 15846 uint32_t timer_count = 1000; 15847 uint32_t prev_brb; 15848 15849 /* 15850 * It is possible a previous function received 'common' answer, 15851 * but hasn't loaded yet, therefore creating a scenario of 15852 * multiple functions receiving 'common' on the same path. 15853 */ 15854 BLOGD(sc, DBG_LOAD, "Common unload Flow\n"); 15855 15856 memset(&mac_vals, 0, sizeof(mac_vals)); 15857 15858 if (bxe_prev_is_path_marked(sc)) { 15859 return (bxe_prev_mcp_done(sc)); 15860 } 15861 15862 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); 15863 15864 /* Reset should be performed after BRB is emptied */ 15865 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 15866 /* Close the MAC Rx to prevent BRB from filling up */ 15867 bxe_prev_unload_close_mac(sc, &mac_vals); 15868 15869 /* close LLH filters towards the BRB */ 15870 elink_set_rx_filter(&sc->link_params, 0); 15871 15872 /* 15873 * Check if the UNDI driver was previously loaded. 15874 * UNDI driver initializes CID offset for normal bell to 0x7 15875 */ 15876 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 15877 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); 15878 if (tmp_reg == 0x7) { 15879 BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n"); 15880 prev_undi = TRUE; 15881 /* clear the UNDI indication */ 15882 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); 15883 /* clear possible idle check errors */ 15884 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); 15885 } 15886 } 15887 15888 /* wait until BRB is empty */ 15889 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 15890 while (timer_count) { 15891 prev_brb = tmp_reg; 15892 15893 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 15894 if (!tmp_reg) { 15895 break; 15896 } 15897 15898 BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg); 15899 15900 /* reset timer as long as BRB actually gets emptied */ 15901 if (prev_brb > tmp_reg) { 15902 timer_count = 1000; 15903 } else { 15904 timer_count--; 15905 } 15906 15907 /* If UNDI resides in memory, manually increment it */ 15908 if (prev_undi) { 15909 bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1); 15910 } 15911 15912 DELAY(10); 15913 } 15914 15915 if (!timer_count) { 15916 BLOGE(sc, "Failed to empty BRB\n"); 15917 } 15918 } 15919 15920 /* No packets are in the pipeline, path is ready for reset */ 15921 bxe_reset_common(sc); 15922 15923 if (mac_vals.xmac_addr) { 15924 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); 15925 } 15926 if (mac_vals.umac_addr) { 15927 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); 15928 } 15929 if (mac_vals.emac_addr) { 15930 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); 15931 } 15932 if (mac_vals.bmac_addr) { 15933 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); 15934 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); 15935 } 15936 15937 rc = bxe_prev_mark_path(sc, prev_undi); 15938 if (rc) { 15939 bxe_prev_mcp_done(sc); 15940 return (rc); 15941 } 15942 15943 return (bxe_prev_mcp_done(sc)); 15944 } 15945 15946 static int 15947 bxe_prev_unload_uncommon(struct bxe_softc *sc) 15948 { 15949 int rc; 15950 15951 BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n"); 15952 15953 /* Test if previous unload process was already finished for this path */ 15954 if (bxe_prev_is_path_marked(sc)) { 15955 return (bxe_prev_mcp_done(sc)); 15956 } 15957 15958 BLOGD(sc, DBG_LOAD, "Path is unmarked\n"); 15959 15960 /* 15961 * If function has FLR capabilities, and existing FW version matches 15962 * the one required, then FLR will be sufficient to clean any residue 15963 * left by previous driver 15964 */ 15965 rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); 15966 if (!rc) { 15967 /* fw version is good */ 15968 BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n"); 15969 rc = bxe_do_flr(sc); 15970 } 15971 15972 if (!rc) { 15973 /* FLR was performed */ 15974 BLOGD(sc, DBG_LOAD, "FLR successful\n"); 15975 return (0); 15976 } 15977 15978 BLOGD(sc, DBG_LOAD, "Could not FLR\n"); 15979 15980 /* Close the MCP request, return failure*/ 15981 rc = bxe_prev_mcp_done(sc); 15982 if (!rc) { 15983 rc = BXE_PREV_WAIT_NEEDED; 15984 } 15985 15986 return (rc); 15987 } 15988 15989 static int 15990 bxe_prev_unload(struct bxe_softc *sc) 15991 { 15992 int time_counter = 10; 15993 uint32_t fw, hw_lock_reg, hw_lock_val; 15994 uint32_t rc = 0; 15995 15996 /* 15997 * Clear HW from errors which may have resulted from an interrupted 15998 * DMAE transaction. 15999 */ 16000 bxe_prev_interrupted_dmae(sc); 16001 16002 /* Release previously held locks */ 16003 hw_lock_reg = 16004 (SC_FUNC(sc) <= 5) ? 16005 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : 16006 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); 16007 16008 hw_lock_val = (REG_RD(sc, hw_lock_reg)); 16009 if (hw_lock_val) { 16010 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 16011 BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n"); 16012 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 16013 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); 16014 } 16015 BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n"); 16016 REG_WR(sc, hw_lock_reg, 0xffffffff); 16017 } else { 16018 BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n"); 16019 } 16020 16021 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { 16022 BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n"); 16023 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); 16024 } 16025 16026 do { 16027 /* Lock MCP using an unload request */ 16028 fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 16029 if (!fw) { 16030 BLOGE(sc, "MCP response failure, aborting\n"); 16031 rc = -1; 16032 break; 16033 } 16034 16035 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { 16036 rc = bxe_prev_unload_common(sc); 16037 break; 16038 } 16039 16040 /* non-common reply from MCP night require looping */ 16041 rc = bxe_prev_unload_uncommon(sc); 16042 if (rc != BXE_PREV_WAIT_NEEDED) { 16043 break; 16044 } 16045 16046 DELAY(20000); 16047 } while (--time_counter); 16048 16049 if (!time_counter || rc) { 16050 BLOGE(sc, "Failed to unload previous driver!\n"); 16051 rc = -1; 16052 } 16053 16054 return (rc); 16055 } 16056 16057 void 16058 bxe_dcbx_set_state(struct bxe_softc *sc, 16059 uint8_t dcb_on, 16060 uint32_t dcbx_enabled) 16061 { 16062 if (!CHIP_IS_E1x(sc)) { 16063 sc->dcb_state = dcb_on; 16064 sc->dcbx_enabled = dcbx_enabled; 16065 } else { 16066 sc->dcb_state = FALSE; 16067 sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID; 16068 } 16069 BLOGD(sc, DBG_LOAD, 16070 "DCB state [%s:%s]\n", 16071 dcb_on ? "ON" : "OFF", 16072 (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" : 16073 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" : 16074 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ? 16075 "on-chip with negotiation" : "invalid"); 16076 } 16077 16078 /* must be called after sriov-enable */ 16079 static int 16080 bxe_set_qm_cid_count(struct bxe_softc *sc) 16081 { 16082 int cid_count = BXE_L2_MAX_CID(sc); 16083 16084 if (IS_SRIOV(sc)) { 16085 cid_count += BXE_VF_CIDS; 16086 } 16087 16088 if (CNIC_SUPPORT(sc)) { 16089 cid_count += CNIC_CID_MAX; 16090 } 16091 16092 return (roundup(cid_count, QM_CID_ROUND)); 16093 } 16094 16095 static void 16096 bxe_init_multi_cos(struct bxe_softc *sc) 16097 { 16098 int pri, cos; 16099 16100 uint32_t pri_map = 0; /* XXX change to user config */ 16101 16102 for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) { 16103 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); 16104 if (cos < sc->max_cos) { 16105 sc->prio_to_cos[pri] = cos; 16106 } else { 16107 BLOGW(sc, "Invalid COS %d for priority %d " 16108 "(max COS is %d), setting to 0\n", 16109 cos, pri, (sc->max_cos - 1)); 16110 sc->prio_to_cos[pri] = 0; 16111 } 16112 } 16113 } 16114 16115 static int 16116 bxe_sysctl_state(SYSCTL_HANDLER_ARGS) 16117 { 16118 struct bxe_softc *sc; 16119 int error, result; 16120 16121 result = 0; 16122 error = sysctl_handle_int(oidp, &result, 0, req); 16123 16124 if (error || !req->newptr) { 16125 return (error); 16126 } 16127 16128 if (result == 1) { 16129 sc = (struct bxe_softc *)arg1; 16130 BLOGI(sc, "... dumping driver state ...\n"); 16131 /* XXX */ 16132 } 16133 16134 return (error); 16135 } 16136 16137 static int 16138 bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS) 16139 { 16140 struct bxe_softc *sc = (struct bxe_softc *)arg1; 16141 uint32_t *eth_stats = (uint32_t *)&sc->eth_stats; 16142 uint32_t *offset; 16143 uint64_t value = 0; 16144 int index = (int)arg2; 16145 16146 if (index >= BXE_NUM_ETH_STATS) { 16147 BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index); 16148 return (-1); 16149 } 16150 16151 offset = (eth_stats + bxe_eth_stats_arr[index].offset); 16152 16153 switch (bxe_eth_stats_arr[index].size) { 16154 case 4: 16155 value = (uint64_t)*offset; 16156 break; 16157 case 8: 16158 value = HILO_U64(*offset, *(offset + 1)); 16159 break; 16160 default: 16161 BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n", 16162 index, bxe_eth_stats_arr[index].size); 16163 return (-1); 16164 } 16165 16166 return (sysctl_handle_64(oidp, &value, 0, req)); 16167 } 16168 16169 static int 16170 bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS) 16171 { 16172 struct bxe_softc *sc = (struct bxe_softc *)arg1; 16173 uint32_t *eth_stats; 16174 uint32_t *offset; 16175 uint64_t value = 0; 16176 uint32_t q_stat = (uint32_t)arg2; 16177 uint32_t fp_index = ((q_stat >> 16) & 0xffff); 16178 uint32_t index = (q_stat & 0xffff); 16179 16180 eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats; 16181 16182 if (index >= BXE_NUM_ETH_Q_STATS) { 16183 BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index); 16184 return (-1); 16185 } 16186 16187 offset = (eth_stats + bxe_eth_q_stats_arr[index].offset); 16188 16189 switch (bxe_eth_q_stats_arr[index].size) { 16190 case 4: 16191 value = (uint64_t)*offset; 16192 break; 16193 case 8: 16194 value = HILO_U64(*offset, *(offset + 1)); 16195 break; 16196 default: 16197 BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n", 16198 index, bxe_eth_q_stats_arr[index].size); 16199 return (-1); 16200 } 16201 16202 return (sysctl_handle_64(oidp, &value, 0, req)); 16203 } 16204 16205 static void 16206 bxe_add_sysctls(struct bxe_softc *sc) 16207 { 16208 struct sysctl_ctx_list *ctx; 16209 struct sysctl_oid_list *children; 16210 struct sysctl_oid *queue_top, *queue; 16211 struct sysctl_oid_list *queue_top_children, *queue_children; 16212 char queue_num_buf[32]; 16213 uint32_t q_stat; 16214 int i, j; 16215 16216 ctx = device_get_sysctl_ctx(sc->dev); 16217 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 16218 16219 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version", 16220 CTLFLAG_RD, BXE_DRIVER_VERSION, 0, 16221 "version"); 16222 16223 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", 16224 CTLFLAG_RD, sc->devinfo.bc_ver_str, 0, 16225 "bootcode version"); 16226 16227 snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d", 16228 BCM_5710_FW_MAJOR_VERSION, 16229 BCM_5710_FW_MINOR_VERSION, 16230 BCM_5710_FW_REVISION_VERSION, 16231 BCM_5710_FW_ENGINEERING_VERSION); 16232 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", 16233 CTLFLAG_RD, sc->fw_ver_str, 0, 16234 "firmware version"); 16235 16236 snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s", 16237 ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" : 16238 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" : 16239 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" : 16240 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" : 16241 "Unknown")); 16242 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", 16243 CTLFLAG_RD, sc->mf_mode_str, 0, 16244 "multifunction mode"); 16245 16246 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics", 16247 CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0, 16248 "multifunction vnics per port"); 16249 16250 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", 16251 CTLFLAG_RD, sc->mac_addr_str, 0, 16252 "mac address"); 16253 16254 snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d", 16255 ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" : 16256 (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" : 16257 (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" : 16258 "???GT/s"), 16259 sc->devinfo.pcie_link_width); 16260 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", 16261 CTLFLAG_RD, sc->pci_link_str, 0, 16262 "pci link status"); 16263 16264 sc->debug = bxe_debug; 16265 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug", 16266 CTLFLAG_RW, &sc->debug, 16267 "debug logging mode"); 16268 16269 sc->rx_budget = bxe_rx_budget; 16270 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget", 16271 CTLFLAG_RW, &sc->rx_budget, 0, 16272 "rx processing budget"); 16273 16274 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state", 16275 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, 16276 bxe_sysctl_state, "IU", "dump driver state"); 16277 16278 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 16279 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 16280 bxe_eth_stats_arr[i].string, 16281 CTLTYPE_U64 | CTLFLAG_RD, sc, i, 16282 bxe_sysctl_eth_stat, "LU", 16283 bxe_eth_stats_arr[i].string); 16284 } 16285 16286 /* add a new parent node for all queues "dev.bxe.#.queue" */ 16287 queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue", 16288 CTLFLAG_RD, NULL, "queue"); 16289 queue_top_children = SYSCTL_CHILDREN(queue_top); 16290 16291 for (i = 0; i < sc->num_queues; i++) { 16292 /* add a new parent node for a single queue "dev.bxe.#.queue.#" */ 16293 snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i); 16294 queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO, 16295 queue_num_buf, CTLFLAG_RD, NULL, 16296 "single queue"); 16297 queue_children = SYSCTL_CHILDREN(queue); 16298 16299 for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) { 16300 q_stat = ((i << 16) | j); 16301 SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO, 16302 bxe_eth_q_stats_arr[j].string, 16303 CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat, 16304 bxe_sysctl_eth_q_stat, "LU", 16305 bxe_eth_q_stats_arr[j].string); 16306 } 16307 } 16308 } 16309 16310 /* 16311 * Device attach function. 16312 * 16313 * Allocates device resources, performs secondary chip identification, and 16314 * initializes driver instance variables. This function is called from driver 16315 * load after a successful probe. 16316 * 16317 * Returns: 16318 * 0 = Success, >0 = Failure 16319 */ 16320 static int 16321 bxe_attach(device_t dev) 16322 { 16323 struct bxe_softc *sc; 16324 16325 sc = device_get_softc(dev); 16326 16327 BLOGD(sc, DBG_LOAD, "Starting attach...\n"); 16328 16329 sc->state = BXE_STATE_CLOSED; 16330 16331 sc->dev = dev; 16332 sc->unit = device_get_unit(dev); 16333 16334 BLOGD(sc, DBG_LOAD, "softc = %p\n", sc); 16335 16336 sc->pcie_bus = pci_get_bus(dev); 16337 sc->pcie_device = pci_get_slot(dev); 16338 sc->pcie_func = pci_get_function(dev); 16339 16340 /* enable bus master capability */ 16341 pci_enable_busmaster(dev); 16342 16343 /* get the BARs */ 16344 if (bxe_allocate_bars(sc) != 0) { 16345 return (ENXIO); 16346 } 16347 16348 /* initialize the mutexes */ 16349 bxe_init_mutexes(sc); 16350 16351 /* prepare the periodic callout */ 16352 callout_init(&sc->periodic_callout, 0); 16353 16354 /* prepare the chip taskqueue */ 16355 sc->chip_tq_flags = CHIP_TQ_NONE; 16356 snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name), 16357 "bxe%d_chip_tq", sc->unit); 16358 TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc); 16359 sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT, 16360 taskqueue_thread_enqueue, 16361 &sc->chip_tq); 16362 taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */ 16363 "%s", sc->chip_tq_name); 16364 16365 /* get device info and set params */ 16366 if (bxe_get_device_info(sc) != 0) { 16367 BLOGE(sc, "getting device info\n"); 16368 bxe_deallocate_bars(sc); 16369 pci_disable_busmaster(dev); 16370 return (ENXIO); 16371 } 16372 16373 /* get final misc params */ 16374 bxe_get_params(sc); 16375 16376 /* set the default MTU (changed via ifconfig) */ 16377 sc->mtu = ETHERMTU; 16378 16379 bxe_set_modes_bitmap(sc); 16380 16381 /* XXX 16382 * If in AFEX mode and the function is configured for FCoE 16383 * then bail... no L2 allowed. 16384 */ 16385 16386 /* get phy settings from shmem and 'and' against admin settings */ 16387 bxe_get_phy_info(sc); 16388 16389 /* initialize the FreeBSD ifnet interface */ 16390 if (bxe_init_ifnet(sc) != 0) { 16391 bxe_release_mutexes(sc); 16392 bxe_deallocate_bars(sc); 16393 pci_disable_busmaster(dev); 16394 return (ENXIO); 16395 } 16396 16397 /* allocate device interrupts */ 16398 if (bxe_interrupt_alloc(sc) != 0) { 16399 if (sc->ifp != NULL) { 16400 ether_ifdetach(sc->ifp); 16401 } 16402 ifmedia_removeall(&sc->ifmedia); 16403 bxe_release_mutexes(sc); 16404 bxe_deallocate_bars(sc); 16405 pci_disable_busmaster(dev); 16406 return (ENXIO); 16407 } 16408 16409 /* allocate ilt */ 16410 if (bxe_alloc_ilt_mem(sc) != 0) { 16411 bxe_interrupt_free(sc); 16412 if (sc->ifp != NULL) { 16413 ether_ifdetach(sc->ifp); 16414 } 16415 ifmedia_removeall(&sc->ifmedia); 16416 bxe_release_mutexes(sc); 16417 bxe_deallocate_bars(sc); 16418 pci_disable_busmaster(dev); 16419 return (ENXIO); 16420 } 16421 16422 /* allocate the host hardware/software hsi structures */ 16423 if (bxe_alloc_hsi_mem(sc) != 0) { 16424 bxe_free_ilt_mem(sc); 16425 bxe_interrupt_free(sc); 16426 if (sc->ifp != NULL) { 16427 ether_ifdetach(sc->ifp); 16428 } 16429 ifmedia_removeall(&sc->ifmedia); 16430 bxe_release_mutexes(sc); 16431 bxe_deallocate_bars(sc); 16432 pci_disable_busmaster(dev); 16433 return (ENXIO); 16434 } 16435 16436 /* need to reset chip if UNDI was active */ 16437 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 16438 /* init fw_seq */ 16439 sc->fw_seq = 16440 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 16441 DRV_MSG_SEQ_NUMBER_MASK); 16442 BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq); 16443 bxe_prev_unload(sc); 16444 } 16445 16446 #if 1 16447 /* XXX */ 16448 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); 16449 #else 16450 if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) && 16451 SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) && 16452 SHMEM2_RD(sc, dcbx_lldp_params_offset) && 16453 SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) { 16454 bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON); 16455 bxe_dcbx_init_params(sc); 16456 } else { 16457 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); 16458 } 16459 #endif 16460 16461 /* calculate qm_cid_count */ 16462 sc->qm_cid_count = bxe_set_qm_cid_count(sc); 16463 BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count); 16464 16465 sc->max_cos = 1; 16466 bxe_init_multi_cos(sc); 16467 16468 bxe_add_sysctls(sc); 16469 16470 return (0); 16471 } 16472 16473 /* 16474 * Device detach function. 16475 * 16476 * Stops the controller, resets the controller, and releases resources. 16477 * 16478 * Returns: 16479 * 0 = Success, >0 = Failure 16480 */ 16481 static int 16482 bxe_detach(device_t dev) 16483 { 16484 struct bxe_softc *sc; 16485 if_t ifp; 16486 16487 sc = device_get_softc(dev); 16488 16489 BLOGD(sc, DBG_LOAD, "Starting detach...\n"); 16490 16491 ifp = sc->ifp; 16492 if (ifp != NULL && if_vlantrunkinuse(ifp)) { 16493 BLOGE(sc, "Cannot detach while VLANs are in use.\n"); 16494 return(EBUSY); 16495 } 16496 16497 /* stop the periodic callout */ 16498 bxe_periodic_stop(sc); 16499 16500 /* stop the chip taskqueue */ 16501 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE); 16502 if (sc->chip_tq) { 16503 taskqueue_drain(sc->chip_tq, &sc->chip_tq_task); 16504 taskqueue_free(sc->chip_tq); 16505 sc->chip_tq = NULL; 16506 } 16507 16508 /* stop and reset the controller if it was open */ 16509 if (sc->state != BXE_STATE_CLOSED) { 16510 BXE_CORE_LOCK(sc); 16511 bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE); 16512 BXE_CORE_UNLOCK(sc); 16513 } 16514 16515 /* release the network interface */ 16516 if (ifp != NULL) { 16517 ether_ifdetach(ifp); 16518 } 16519 ifmedia_removeall(&sc->ifmedia); 16520 16521 /* XXX do the following based on driver state... */ 16522 16523 /* free the host hardware/software hsi structures */ 16524 bxe_free_hsi_mem(sc); 16525 16526 /* free ilt */ 16527 bxe_free_ilt_mem(sc); 16528 16529 /* release the interrupts */ 16530 bxe_interrupt_free(sc); 16531 16532 /* Release the mutexes*/ 16533 bxe_release_mutexes(sc); 16534 16535 /* Release the PCIe BAR mapped memory */ 16536 bxe_deallocate_bars(sc); 16537 16538 /* Release the FreeBSD interface. */ 16539 if (sc->ifp != NULL) { 16540 if_free(sc->ifp); 16541 } 16542 16543 pci_disable_busmaster(dev); 16544 16545 return (0); 16546 } 16547 16548 /* 16549 * Device shutdown function. 16550 * 16551 * Stops and resets the controller. 16552 * 16553 * Returns: 16554 * Nothing 16555 */ 16556 static int 16557 bxe_shutdown(device_t dev) 16558 { 16559 struct bxe_softc *sc; 16560 16561 sc = device_get_softc(dev); 16562 16563 BLOGD(sc, DBG_LOAD, "Starting shutdown...\n"); 16564 16565 /* stop the periodic callout */ 16566 bxe_periodic_stop(sc); 16567 16568 BXE_CORE_LOCK(sc); 16569 bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE); 16570 BXE_CORE_UNLOCK(sc); 16571 16572 return (0); 16573 } 16574 16575 void 16576 bxe_igu_ack_sb(struct bxe_softc *sc, 16577 uint8_t igu_sb_id, 16578 uint8_t segment, 16579 uint16_t index, 16580 uint8_t op, 16581 uint8_t update) 16582 { 16583 uint32_t igu_addr = sc->igu_base_addr; 16584 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; 16585 bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr); 16586 } 16587 16588 static void 16589 bxe_igu_clear_sb_gen(struct bxe_softc *sc, 16590 uint8_t func, 16591 uint8_t idu_sb_id, 16592 uint8_t is_pf) 16593 { 16594 uint32_t data, ctl, cnt = 100; 16595 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 16596 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 16597 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 16598 uint32_t sb_bit = 1 << (idu_sb_id%32); 16599 uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 16600 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 16601 16602 /* Not supported in BC mode */ 16603 if (CHIP_INT_MODE_IS_BC(sc)) { 16604 return; 16605 } 16606 16607 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << 16608 IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 16609 IGU_REGULAR_CLEANUP_SET | 16610 IGU_REGULAR_BCLEANUP); 16611 16612 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | 16613 (func_encode << IGU_CTRL_REG_FID_SHIFT) | 16614 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); 16615 16616 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 16617 data, igu_addr_data); 16618 REG_WR(sc, igu_addr_data, data); 16619 16620 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 16621 BUS_SPACE_BARRIER_WRITE); 16622 mb(); 16623 16624 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 16625 ctl, igu_addr_ctl); 16626 REG_WR(sc, igu_addr_ctl, ctl); 16627 16628 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 16629 BUS_SPACE_BARRIER_WRITE); 16630 mb(); 16631 16632 /* wait for clean up to finish */ 16633 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { 16634 DELAY(20000); 16635 } 16636 16637 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { 16638 BLOGD(sc, DBG_LOAD, 16639 "Unable to finish IGU cleanup: " 16640 "idu_sb_id %d offset %d bit %d (cnt %d)\n", 16641 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 16642 } 16643 } 16644 16645 static void 16646 bxe_igu_clear_sb(struct bxe_softc *sc, 16647 uint8_t idu_sb_id) 16648 { 16649 bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); 16650 } 16651 16652 16653 16654 16655 16656 16657 16658 /*******************/ 16659 /* ECORE CALLBACKS */ 16660 /*******************/ 16661 16662 static void 16663 bxe_reset_common(struct bxe_softc *sc) 16664 { 16665 uint32_t val = 0x1400; 16666 16667 /* reset_common */ 16668 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f); 16669 16670 if (CHIP_IS_E3(sc)) { 16671 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 16672 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 16673 } 16674 16675 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); 16676 } 16677 16678 static void 16679 bxe_common_init_phy(struct bxe_softc *sc) 16680 { 16681 uint32_t shmem_base[2]; 16682 uint32_t shmem2_base[2]; 16683 16684 /* Avoid common init in case MFW supports LFA */ 16685 if (SHMEM2_RD(sc, size) > 16686 (uint32_t)offsetof(struct shmem2_region, 16687 lfa_host_addr[SC_PORT(sc)])) { 16688 return; 16689 } 16690 16691 shmem_base[0] = sc->devinfo.shmem_base; 16692 shmem2_base[0] = sc->devinfo.shmem2_base; 16693 16694 if (!CHIP_IS_E1x(sc)) { 16695 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); 16696 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); 16697 } 16698 16699 bxe_acquire_phy_lock(sc); 16700 elink_common_init_phy(sc, shmem_base, shmem2_base, 16701 sc->devinfo.chip_id, 0); 16702 bxe_release_phy_lock(sc); 16703 } 16704 16705 static void 16706 bxe_pf_disable(struct bxe_softc *sc) 16707 { 16708 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 16709 16710 val &= ~IGU_PF_CONF_FUNC_EN; 16711 16712 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 16713 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 16714 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); 16715 } 16716 16717 static void 16718 bxe_init_pxp(struct bxe_softc *sc) 16719 { 16720 uint16_t devctl; 16721 int r_order, w_order; 16722 16723 devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2); 16724 16725 BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl); 16726 16727 w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5); 16728 16729 if (sc->mrrs == -1) { 16730 r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12); 16731 } else { 16732 BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs); 16733 r_order = sc->mrrs; 16734 } 16735 16736 ecore_init_pxp_arb(sc, r_order, w_order); 16737 } 16738 16739 static uint32_t 16740 bxe_get_pretend_reg(struct bxe_softc *sc) 16741 { 16742 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; 16743 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); 16744 return (base + (SC_ABS_FUNC(sc)) * stride); 16745 } 16746 16747 /* 16748 * Called only on E1H or E2. 16749 * When pretending to be PF, the pretend value is the function number 0..7. 16750 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 16751 * combination. 16752 */ 16753 static int 16754 bxe_pretend_func(struct bxe_softc *sc, 16755 uint16_t pretend_func_val) 16756 { 16757 uint32_t pretend_reg; 16758 16759 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) { 16760 return (-1); 16761 } 16762 16763 /* get my own pretend register */ 16764 pretend_reg = bxe_get_pretend_reg(sc); 16765 REG_WR(sc, pretend_reg, pretend_func_val); 16766 REG_RD(sc, pretend_reg); 16767 return (0); 16768 } 16769 16770 static void 16771 bxe_iov_init_dmae(struct bxe_softc *sc) 16772 { 16773 return; 16774 #if 0 16775 BLOGD(sc, DBG_LOAD, "SRIOV is %s\n", IS_SRIOV(sc) ? "ON" : "OFF"); 16776 16777 if (!IS_SRIOV(sc)) { 16778 return; 16779 } 16780 16781 REG_WR(sc, DMAE_REG_BACKWARD_COMP_EN, 0); 16782 #endif 16783 } 16784 16785 #if 0 16786 static int 16787 bxe_iov_init_ilt(struct bxe_softc *sc, 16788 uint16_t line) 16789 { 16790 return (line); 16791 #if 0 16792 int i; 16793 struct ecore_ilt* ilt = sc->ilt; 16794 16795 if (!IS_SRIOV(sc)) { 16796 return (line); 16797 } 16798 16799 /* set vfs ilt lines */ 16800 for (i = 0; i < BXE_VF_CIDS/ILT_PAGE_CIDS ; i++) { 16801 struct hw_dma *hw_cxt = SC_VF_CXT_PAGE(sc,i); 16802 ilt->lines[line+i].page = hw_cxt->addr; 16803 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 16804 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 16805 } 16806 return (line+i); 16807 #endif 16808 } 16809 #endif 16810 16811 static void 16812 bxe_iov_init_dq(struct bxe_softc *sc) 16813 { 16814 return; 16815 #if 0 16816 if (!IS_SRIOV(sc)) { 16817 return; 16818 } 16819 16820 /* Set the DQ such that the CID reflect the abs_vfid */ 16821 REG_WR(sc, DORQ_REG_VF_NORM_VF_BASE, 0); 16822 REG_WR(sc, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 16823 16824 /* 16825 * Set VFs starting CID. If its > 0 the preceding CIDs are belong to 16826 * the PF L2 queues 16827 */ 16828 REG_WR(sc, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 16829 16830 /* The VF window size is the log2 of the max number of CIDs per VF */ 16831 REG_WR(sc, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 16832 16833 /* 16834 * The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 16835 * the Pf doorbell size although the 2 are independent. 16836 */ 16837 REG_WR(sc, DORQ_REG_VF_NORM_CID_OFST, 16838 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); 16839 16840 /* 16841 * No security checks for now - 16842 * configure single rule (out of 16) mask = 0x1, value = 0x0, 16843 * CID range 0 - 0x1ffff 16844 */ 16845 REG_WR(sc, DORQ_REG_VF_TYPE_MASK_0, 1); 16846 REG_WR(sc, DORQ_REG_VF_TYPE_VALUE_0, 0); 16847 REG_WR(sc, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 16848 REG_WR(sc, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 16849 16850 /* set the number of VF alllowed doorbells to the full DQ range */ 16851 REG_WR(sc, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); 16852 16853 /* set the VF doorbell threshold */ 16854 REG_WR(sc, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 16855 #endif 16856 } 16857 16858 /* send a NIG loopback debug packet */ 16859 static void 16860 bxe_lb_pckt(struct bxe_softc *sc) 16861 { 16862 uint32_t wb_write[3]; 16863 16864 /* Ethernet source and destination addresses */ 16865 wb_write[0] = 0x55555555; 16866 wb_write[1] = 0x55555555; 16867 wb_write[2] = 0x20; /* SOP */ 16868 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 16869 16870 /* NON-IP protocol */ 16871 wb_write[0] = 0x09000000; 16872 wb_write[1] = 0x55555555; 16873 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ 16874 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 16875 } 16876 16877 /* 16878 * Some of the internal memories are not directly readable from the driver. 16879 * To test them we send debug packets. 16880 */ 16881 static int 16882 bxe_int_mem_test(struct bxe_softc *sc) 16883 { 16884 int factor; 16885 int count, i; 16886 uint32_t val = 0; 16887 16888 if (CHIP_REV_IS_FPGA(sc)) { 16889 factor = 120; 16890 } else if (CHIP_REV_IS_EMUL(sc)) { 16891 factor = 200; 16892 } else { 16893 factor = 1; 16894 } 16895 16896 /* disable inputs of parser neighbor blocks */ 16897 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); 16898 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); 16899 REG_WR(sc, CFC_REG_DEBUG0, 0x1); 16900 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); 16901 16902 /* write 0 to parser credits for CFC search request */ 16903 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 16904 16905 /* send Ethernet packet */ 16906 bxe_lb_pckt(sc); 16907 16908 /* TODO do i reset NIG statistic? */ 16909 /* Wait until NIG register shows 1 packet of size 0x10 */ 16910 count = 1000 * factor; 16911 while (count) { 16912 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 16913 val = *BXE_SP(sc, wb_data[0]); 16914 if (val == 0x10) { 16915 break; 16916 } 16917 16918 DELAY(10000); 16919 count--; 16920 } 16921 16922 if (val != 0x10) { 16923 BLOGE(sc, "NIG timeout val=0x%x\n", val); 16924 return (-1); 16925 } 16926 16927 /* wait until PRS register shows 1 packet */ 16928 count = (1000 * factor); 16929 while (count) { 16930 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16931 if (val == 1) { 16932 break; 16933 } 16934 16935 DELAY(10000); 16936 count--; 16937 } 16938 16939 if (val != 0x1) { 16940 BLOGE(sc, "PRS timeout val=0x%x\n", val); 16941 return (-2); 16942 } 16943 16944 /* Reset and init BRB, PRS */ 16945 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 16946 DELAY(50000); 16947 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 16948 DELAY(50000); 16949 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 16950 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 16951 16952 /* Disable inputs of parser neighbor blocks */ 16953 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); 16954 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); 16955 REG_WR(sc, CFC_REG_DEBUG0, 0x1); 16956 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); 16957 16958 /* Write 0 to parser credits for CFC search request */ 16959 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 16960 16961 /* send 10 Ethernet packets */ 16962 for (i = 0; i < 10; i++) { 16963 bxe_lb_pckt(sc); 16964 } 16965 16966 /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */ 16967 count = (1000 * factor); 16968 while (count) { 16969 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 16970 val = *BXE_SP(sc, wb_data[0]); 16971 if (val == 0xb0) { 16972 break; 16973 } 16974 16975 DELAY(10000); 16976 count--; 16977 } 16978 16979 if (val != 0xb0) { 16980 BLOGE(sc, "NIG timeout val=0x%x\n", val); 16981 return (-3); 16982 } 16983 16984 /* Wait until PRS register shows 2 packets */ 16985 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16986 if (val != 2) { 16987 BLOGE(sc, "PRS timeout val=0x%x\n", val); 16988 } 16989 16990 /* Write 1 to parser credits for CFC search request */ 16991 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); 16992 16993 /* Wait until PRS register shows 3 packets */ 16994 DELAY(10000 * factor); 16995 16996 /* Wait until NIG register shows 1 packet of size 0x10 */ 16997 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16998 if (val != 3) { 16999 BLOGE(sc, "PRS timeout val=0x%x\n", val); 17000 } 17001 17002 /* clear NIG EOP FIFO */ 17003 for (i = 0; i < 11; i++) { 17004 REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO); 17005 } 17006 17007 val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY); 17008 if (val != 1) { 17009 BLOGE(sc, "clear of NIG failed\n"); 17010 return (-4); 17011 } 17012 17013 /* Reset and init BRB, PRS, NIG */ 17014 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 17015 DELAY(50000); 17016 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 17017 DELAY(50000); 17018 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 17019 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 17020 if (!CNIC_SUPPORT(sc)) { 17021 /* set NIC mode */ 17022 REG_WR(sc, PRS_REG_NIC_MODE, 1); 17023 } 17024 17025 /* Enable inputs of parser neighbor blocks */ 17026 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff); 17027 REG_WR(sc, TCM_REG_PRS_IFEN, 0x1); 17028 REG_WR(sc, CFC_REG_DEBUG0, 0x0); 17029 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1); 17030 17031 return (0); 17032 } 17033 17034 static void 17035 bxe_setup_fan_failure_detection(struct bxe_softc *sc) 17036 { 17037 int is_required; 17038 uint32_t val; 17039 int port; 17040 17041 is_required = 0; 17042 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & 17043 SHARED_HW_CFG_FAN_FAILURE_MASK); 17044 17045 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { 17046 is_required = 1; 17047 } 17048 /* 17049 * The fan failure mechanism is usually related to the PHY type since 17050 * the power consumption of the board is affected by the PHY. Currently, 17051 * fan is required for most designs with SFX7101, BCM8727 and BCM8481. 17052 */ 17053 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { 17054 for (port = PORT_0; port < PORT_MAX; port++) { 17055 is_required |= elink_fan_failure_det_req(sc, 17056 sc->devinfo.shmem_base, 17057 sc->devinfo.shmem2_base, 17058 port); 17059 } 17060 } 17061 17062 BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required); 17063 17064 if (is_required == 0) { 17065 return; 17066 } 17067 17068 /* Fan failure is indicated by SPIO 5 */ 17069 bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); 17070 17071 /* set to active low mode */ 17072 val = REG_RD(sc, MISC_REG_SPIO_INT); 17073 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); 17074 REG_WR(sc, MISC_REG_SPIO_INT, val); 17075 17076 /* enable interrupt to signal the IGU */ 17077 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 17078 val |= MISC_SPIO_SPIO5; 17079 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); 17080 } 17081 17082 static void 17083 bxe_enable_blocks_attention(struct bxe_softc *sc) 17084 { 17085 uint32_t val; 17086 17087 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 17088 if (!CHIP_IS_E1x(sc)) { 17089 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); 17090 } else { 17091 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); 17092 } 17093 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 17094 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 17095 /* 17096 * mask read length error interrupts in brb for parser 17097 * (parsing unit and 'checksum and crc' unit) 17098 * these errors are legal (PU reads fixed length and CAC can cause 17099 * read length error on truncated packets) 17100 */ 17101 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); 17102 REG_WR(sc, QM_REG_QM_INT_MASK, 0); 17103 REG_WR(sc, TM_REG_TM_INT_MASK, 0); 17104 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); 17105 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); 17106 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); 17107 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ 17108 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ 17109 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); 17110 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); 17111 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); 17112 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ 17113 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ 17114 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 17115 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); 17116 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); 17117 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); 17118 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ 17119 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ 17120 17121 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 17122 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 17123 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); 17124 if (!CHIP_IS_E1x(sc)) { 17125 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | 17126 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); 17127 } 17128 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); 17129 17130 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); 17131 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); 17132 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); 17133 /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ 17134 17135 if (!CHIP_IS_E1x(sc)) { 17136 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 17137 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 17138 } 17139 17140 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); 17141 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); 17142 /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ 17143 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 17144 } 17145 17146 /** 17147 * bxe_init_hw_common - initialize the HW at the COMMON phase. 17148 * 17149 * @sc: driver handle 17150 */ 17151 static int 17152 bxe_init_hw_common(struct bxe_softc *sc) 17153 { 17154 uint8_t abs_func_id; 17155 uint32_t val; 17156 17157 BLOGD(sc, DBG_LOAD, "starting common init for func %d\n", 17158 SC_ABS_FUNC(sc)); 17159 17160 /* 17161 * take the RESET lock to protect undi_unload flow from accessing 17162 * registers while we are resetting the chip 17163 */ 17164 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 17165 17166 bxe_reset_common(sc); 17167 17168 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); 17169 17170 val = 0xfffc; 17171 if (CHIP_IS_E3(sc)) { 17172 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 17173 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 17174 } 17175 17176 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); 17177 17178 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 17179 17180 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); 17181 BLOGD(sc, DBG_LOAD, "after misc block init\n"); 17182 17183 if (!CHIP_IS_E1x(sc)) { 17184 /* 17185 * 4-port mode or 2-port mode we need to turn off master-enable for 17186 * everyone. After that we turn it back on for self. So, we disregard 17187 * multi-function, and always disable all functions on the given path, 17188 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 17189 */ 17190 for (abs_func_id = SC_PATH(sc); 17191 abs_func_id < (E2_FUNC_MAX * 2); 17192 abs_func_id += 2) { 17193 if (abs_func_id == SC_ABS_FUNC(sc)) { 17194 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17195 continue; 17196 } 17197 17198 bxe_pretend_func(sc, abs_func_id); 17199 17200 /* clear pf enable */ 17201 bxe_pf_disable(sc); 17202 17203 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 17204 } 17205 } 17206 17207 BLOGD(sc, DBG_LOAD, "after pf disable\n"); 17208 17209 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); 17210 17211 if (CHIP_IS_E1(sc)) { 17212 /* 17213 * enable HW interrupt from PXP on USDM overflow 17214 * bit 16 on INT_MASK_0 17215 */ 17216 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 17217 } 17218 17219 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); 17220 bxe_init_pxp(sc); 17221 17222 #ifdef __BIG_ENDIAN 17223 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); 17224 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); 17225 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 17226 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 17227 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 17228 /* make sure this value is 0 */ 17229 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); 17230 17231 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); 17232 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); 17233 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); 17234 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); 17235 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 17236 #endif 17237 17238 ecore_ilt_init_page_size(sc, INITOP_SET); 17239 17240 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { 17241 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 17242 } 17243 17244 /* let the HW do it's magic... */ 17245 DELAY(100000); 17246 17247 /* finish PXP init */ 17248 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); 17249 if (val != 1) { 17250 BLOGE(sc, "PXP2 CFG failed\n"); 17251 return (-1); 17252 } 17253 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); 17254 if (val != 1) { 17255 BLOGE(sc, "PXP2 RD_INIT failed\n"); 17256 return (-1); 17257 } 17258 17259 BLOGD(sc, DBG_LOAD, "after pxp init\n"); 17260 17261 /* 17262 * Timer bug workaround for E2 only. We need to set the entire ILT to have 17263 * entries with value "0" and valid bit on. This needs to be done by the 17264 * first PF that is loaded in a path (i.e. common phase) 17265 */ 17266 if (!CHIP_IS_E1x(sc)) { 17267 /* 17268 * In E2 there is a bug in the timers block that can cause function 6 / 7 17269 * (i.e. vnic3) to start even if it is marked as "scan-off". 17270 * This occurs when a different function (func2,3) is being marked 17271 * as "scan-off". Real-life scenario for example: if a driver is being 17272 * load-unloaded while func6,7 are down. This will cause the timer to access 17273 * the ilt, translate to a logical address and send a request to read/write. 17274 * Since the ilt for the function that is down is not valid, this will cause 17275 * a translation error which is unrecoverable. 17276 * The Workaround is intended to make sure that when this happens nothing 17277 * fatal will occur. The workaround: 17278 * 1. First PF driver which loads on a path will: 17279 * a. After taking the chip out of reset, by using pretend, 17280 * it will write "0" to the following registers of 17281 * the other vnics. 17282 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 17283 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 17284 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 17285 * And for itself it will write '1' to 17286 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 17287 * dmae-operations (writing to pram for example.) 17288 * note: can be done for only function 6,7 but cleaner this 17289 * way. 17290 * b. Write zero+valid to the entire ILT. 17291 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 17292 * VNIC3 (of that port). The range allocated will be the 17293 * entire ILT. This is needed to prevent ILT range error. 17294 * 2. Any PF driver load flow: 17295 * a. ILT update with the physical addresses of the allocated 17296 * logical pages. 17297 * b. Wait 20msec. - note that this timeout is needed to make 17298 * sure there are no requests in one of the PXP internal 17299 * queues with "old" ILT addresses. 17300 * c. PF enable in the PGLC. 17301 * d. Clear the was_error of the PF in the PGLC. (could have 17302 * occurred while driver was down) 17303 * e. PF enable in the CFC (WEAK + STRONG) 17304 * f. Timers scan enable 17305 * 3. PF driver unload flow: 17306 * a. Clear the Timers scan_en. 17307 * b. Polling for scan_on=0 for that PF. 17308 * c. Clear the PF enable bit in the PXP. 17309 * d. Clear the PF enable in the CFC (WEAK + STRONG) 17310 * e. Write zero+valid to all ILT entries (The valid bit must 17311 * stay set) 17312 * f. If this is VNIC 3 of a port then also init 17313 * first_timers_ilt_entry to zero and last_timers_ilt_entry 17314 * to the last enrty in the ILT. 17315 * 17316 * Notes: 17317 * Currently the PF error in the PGLC is non recoverable. 17318 * In the future the there will be a recovery routine for this error. 17319 * Currently attention is masked. 17320 * Having an MCP lock on the load/unload process does not guarantee that 17321 * there is no Timer disable during Func6/7 enable. This is because the 17322 * Timers scan is currently being cleared by the MCP on FLR. 17323 * Step 2.d can be done only for PF6/7 and the driver can also check if 17324 * there is error before clearing it. But the flow above is simpler and 17325 * more general. 17326 * All ILT entries are written by zero+valid and not just PF6/7 17327 * ILT entries since in the future the ILT entries allocation for 17328 * PF-s might be dynamic. 17329 */ 17330 struct ilt_client_info ilt_cli; 17331 struct ecore_ilt ilt; 17332 17333 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 17334 memset(&ilt, 0, sizeof(struct ecore_ilt)); 17335 17336 /* initialize dummy TM client */ 17337 ilt_cli.start = 0; 17338 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 17339 ilt_cli.client_num = ILT_CLIENT_TM; 17340 17341 /* 17342 * Step 1: set zeroes to all ilt page entries with valid bit on 17343 * Step 2: set the timers first/last ilt entry to point 17344 * to the entire range to prevent ILT range error for 3rd/4th 17345 * vnic (this code assumes existence of the vnic) 17346 * 17347 * both steps performed by call to ecore_ilt_client_init_op() 17348 * with dummy TM client 17349 * 17350 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 17351 * and his brother are split registers 17352 */ 17353 17354 bxe_pretend_func(sc, (SC_PATH(sc) + 6)); 17355 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); 17356 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 17357 17358 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN); 17359 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN); 17360 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 17361 } 17362 17363 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); 17364 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); 17365 17366 if (!CHIP_IS_E1x(sc)) { 17367 int factor = CHIP_REV_IS_EMUL(sc) ? 1000 : 17368 (CHIP_REV_IS_FPGA(sc) ? 400 : 0); 17369 17370 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); 17371 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); 17372 17373 /* let the HW do it's magic... */ 17374 do { 17375 DELAY(200000); 17376 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); 17377 } while (factor-- && (val != 1)); 17378 17379 if (val != 1) { 17380 BLOGE(sc, "ATC_INIT failed\n"); 17381 return (-1); 17382 } 17383 } 17384 17385 BLOGD(sc, DBG_LOAD, "after pglue and atc init\n"); 17386 17387 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); 17388 17389 bxe_iov_init_dmae(sc); 17390 17391 /* clean the DMAE memory */ 17392 sc->dmae_ready = 1; 17393 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1); 17394 17395 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); 17396 17397 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); 17398 17399 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); 17400 17401 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); 17402 17403 bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); 17404 bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); 17405 bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); 17406 bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); 17407 17408 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); 17409 17410 /* QM queues pointers table */ 17411 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); 17412 17413 /* soft reset pulse */ 17414 REG_WR(sc, QM_REG_SOFT_RESET, 1); 17415 REG_WR(sc, QM_REG_SOFT_RESET, 0); 17416 17417 if (CNIC_SUPPORT(sc)) 17418 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); 17419 17420 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); 17421 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT); 17422 if (!CHIP_REV_IS_SLOW(sc)) { 17423 /* enable hw interrupt from doorbell Q */ 17424 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 17425 } 17426 17427 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 17428 17429 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 17430 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); 17431 17432 if (!CHIP_IS_E1(sc)) { 17433 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); 17434 } 17435 17436 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { 17437 if (IS_MF_AFEX(sc)) { 17438 /* 17439 * configure that AFEX and VLAN headers must be 17440 * received in AFEX mode 17441 */ 17442 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); 17443 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); 17444 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 17445 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 17446 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); 17447 } else { 17448 /* 17449 * Bit-map indicating which L2 hdrs may appear 17450 * after the basic Ethernet header 17451 */ 17452 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 17453 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 17454 } 17455 } 17456 17457 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); 17458 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); 17459 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); 17460 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); 17461 17462 if (!CHIP_IS_E1x(sc)) { 17463 /* reset VFC memories */ 17464 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 17465 VFC_MEMORIES_RST_REG_CAM_RST | 17466 VFC_MEMORIES_RST_REG_RAM_RST); 17467 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 17468 VFC_MEMORIES_RST_REG_CAM_RST | 17469 VFC_MEMORIES_RST_REG_RAM_RST); 17470 17471 DELAY(20000); 17472 } 17473 17474 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); 17475 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); 17476 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); 17477 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); 17478 17479 /* sync semi rtc */ 17480 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 17481 0x80000000); 17482 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 17483 0x80000000); 17484 17485 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); 17486 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); 17487 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); 17488 17489 if (!CHIP_IS_E1x(sc)) { 17490 if (IS_MF_AFEX(sc)) { 17491 /* 17492 * configure that AFEX and VLAN headers must be 17493 * sent in AFEX mode 17494 */ 17495 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); 17496 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); 17497 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 17498 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 17499 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); 17500 } else { 17501 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 17502 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 17503 } 17504 } 17505 17506 REG_WR(sc, SRC_REG_SOFT_RST, 1); 17507 17508 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); 17509 17510 if (CNIC_SUPPORT(sc)) { 17511 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); 17512 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 17513 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); 17514 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); 17515 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); 17516 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 17517 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); 17518 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 17519 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); 17520 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); 17521 } 17522 REG_WR(sc, SRC_REG_SOFT_RST, 0); 17523 17524 if (sizeof(union cdu_context) != 1024) { 17525 /* we currently assume that a context is 1024 bytes */ 17526 BLOGE(sc, "please adjust the size of cdu_context(%ld)\n", 17527 (long)sizeof(union cdu_context)); 17528 } 17529 17530 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); 17531 val = (4 << 24) + (0 << 12) + 1024; 17532 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); 17533 17534 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); 17535 17536 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); 17537 /* enable context validation interrupt from CFC */ 17538 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 17539 17540 /* set the thresholds to prevent CFC/CDU race */ 17541 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); 17542 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); 17543 17544 if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) { 17545 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); 17546 } 17547 17548 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); 17549 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); 17550 17551 /* Reset PCIE errors for debug */ 17552 REG_WR(sc, 0x2814, 0xffffffff); 17553 REG_WR(sc, 0x3820, 0xffffffff); 17554 17555 if (!CHIP_IS_E1x(sc)) { 17556 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 17557 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 17558 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 17559 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 17560 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 17561 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 17562 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 17563 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 17564 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 17565 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 17566 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 17567 } 17568 17569 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); 17570 17571 if (!CHIP_IS_E1(sc)) { 17572 /* in E3 this done in per-port section */ 17573 if (!CHIP_IS_E3(sc)) 17574 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); 17575 } 17576 17577 if (CHIP_IS_E1H(sc)) { 17578 /* not applicable for E2 (and above ...) */ 17579 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); 17580 } 17581 17582 if (CHIP_REV_IS_SLOW(sc)) { 17583 DELAY(200000); 17584 } 17585 17586 /* finish CFC init */ 17587 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); 17588 if (val != 1) { 17589 BLOGE(sc, "CFC LL_INIT failed\n"); 17590 return (-1); 17591 } 17592 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); 17593 if (val != 1) { 17594 BLOGE(sc, "CFC AC_INIT failed\n"); 17595 return (-1); 17596 } 17597 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 17598 if (val != 1) { 17599 BLOGE(sc, "CFC CAM_INIT failed\n"); 17600 return (-1); 17601 } 17602 REG_WR(sc, CFC_REG_DEBUG0, 0); 17603 17604 if (CHIP_IS_E1(sc)) { 17605 /* read NIG statistic to see if this is our first up since powerup */ 17606 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 17607 val = *BXE_SP(sc, wb_data[0]); 17608 17609 /* do internal memory self test */ 17610 if ((val == 0) && bxe_int_mem_test(sc)) { 17611 BLOGE(sc, "internal mem self test failed\n"); 17612 return (-1); 17613 } 17614 } 17615 17616 bxe_setup_fan_failure_detection(sc); 17617 17618 /* clear PXP2 attentions */ 17619 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 17620 17621 bxe_enable_blocks_attention(sc); 17622 17623 if (!CHIP_REV_IS_SLOW(sc)) { 17624 ecore_enable_blocks_parity(sc); 17625 } 17626 17627 if (!BXE_NOMCP(sc)) { 17628 if (CHIP_IS_E1x(sc)) { 17629 bxe_common_init_phy(sc); 17630 } 17631 } 17632 17633 return (0); 17634 } 17635 17636 /** 17637 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase. 17638 * 17639 * @sc: driver handle 17640 */ 17641 static int 17642 bxe_init_hw_common_chip(struct bxe_softc *sc) 17643 { 17644 int rc = bxe_init_hw_common(sc); 17645 17646 if (rc) { 17647 return (rc); 17648 } 17649 17650 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 17651 if (!BXE_NOMCP(sc)) { 17652 bxe_common_init_phy(sc); 17653 } 17654 17655 return (0); 17656 } 17657 17658 static int 17659 bxe_init_hw_port(struct bxe_softc *sc) 17660 { 17661 int port = SC_PORT(sc); 17662 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 17663 uint32_t low, high; 17664 uint32_t val; 17665 17666 BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port); 17667 17668 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 17669 17670 ecore_init_block(sc, BLOCK_MISC, init_phase); 17671 ecore_init_block(sc, BLOCK_PXP, init_phase); 17672 ecore_init_block(sc, BLOCK_PXP2, init_phase); 17673 17674 /* 17675 * Timers bug workaround: disables the pf_master bit in pglue at 17676 * common phase, we need to enable it here before any dmae access are 17677 * attempted. Therefore we manually added the enable-master to the 17678 * port phase (it also happens in the function phase) 17679 */ 17680 if (!CHIP_IS_E1x(sc)) { 17681 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17682 } 17683 17684 ecore_init_block(sc, BLOCK_ATC, init_phase); 17685 ecore_init_block(sc, BLOCK_DMAE, init_phase); 17686 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 17687 ecore_init_block(sc, BLOCK_QM, init_phase); 17688 17689 ecore_init_block(sc, BLOCK_TCM, init_phase); 17690 ecore_init_block(sc, BLOCK_UCM, init_phase); 17691 ecore_init_block(sc, BLOCK_CCM, init_phase); 17692 ecore_init_block(sc, BLOCK_XCM, init_phase); 17693 17694 /* QM cid (connection) count */ 17695 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); 17696 17697 if (CNIC_SUPPORT(sc)) { 17698 ecore_init_block(sc, BLOCK_TM, init_phase); 17699 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20); 17700 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 17701 } 17702 17703 ecore_init_block(sc, BLOCK_DORQ, init_phase); 17704 17705 ecore_init_block(sc, BLOCK_BRB1, init_phase); 17706 17707 if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) { 17708 if (IS_MF(sc)) { 17709 low = (BXE_ONE_PORT(sc) ? 160 : 246); 17710 } else if (sc->mtu > 4096) { 17711 if (BXE_ONE_PORT(sc)) { 17712 low = 160; 17713 } else { 17714 val = sc->mtu; 17715 /* (24*1024 + val*4)/256 */ 17716 low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); 17717 } 17718 } else { 17719 low = (BXE_ONE_PORT(sc) ? 80 : 160); 17720 } 17721 high = (low + 56); /* 14*1024/256 */ 17722 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); 17723 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); 17724 } 17725 17726 if (CHIP_IS_MODE_4_PORT(sc)) { 17727 REG_WR(sc, SC_PORT(sc) ? 17728 BRB1_REG_MAC_GUARANTIED_1 : 17729 BRB1_REG_MAC_GUARANTIED_0, 40); 17730 } 17731 17732 ecore_init_block(sc, BLOCK_PRS, init_phase); 17733 if (CHIP_IS_E3B0(sc)) { 17734 if (IS_MF_AFEX(sc)) { 17735 /* configure headers for AFEX mode */ 17736 REG_WR(sc, SC_PORT(sc) ? 17737 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 17738 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); 17739 REG_WR(sc, SC_PORT(sc) ? 17740 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : 17741 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); 17742 REG_WR(sc, SC_PORT(sc) ? 17743 PRS_REG_MUST_HAVE_HDRS_PORT_1 : 17744 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 17745 } else { 17746 /* Ovlan exists only if we are in multi-function + 17747 * switch-dependent mode, in switch-independent there 17748 * is no ovlan headers 17749 */ 17750 REG_WR(sc, SC_PORT(sc) ? 17751 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 17752 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 17753 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); 17754 } 17755 } 17756 17757 ecore_init_block(sc, BLOCK_TSDM, init_phase); 17758 ecore_init_block(sc, BLOCK_CSDM, init_phase); 17759 ecore_init_block(sc, BLOCK_USDM, init_phase); 17760 ecore_init_block(sc, BLOCK_XSDM, init_phase); 17761 17762 ecore_init_block(sc, BLOCK_TSEM, init_phase); 17763 ecore_init_block(sc, BLOCK_USEM, init_phase); 17764 ecore_init_block(sc, BLOCK_CSEM, init_phase); 17765 ecore_init_block(sc, BLOCK_XSEM, init_phase); 17766 17767 ecore_init_block(sc, BLOCK_UPB, init_phase); 17768 ecore_init_block(sc, BLOCK_XPB, init_phase); 17769 17770 ecore_init_block(sc, BLOCK_PBF, init_phase); 17771 17772 if (CHIP_IS_E1x(sc)) { 17773 /* configure PBF to work without PAUSE mtu 9000 */ 17774 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 17775 17776 /* update threshold */ 17777 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 17778 /* update init credit */ 17779 REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 17780 17781 /* probe changes */ 17782 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1); 17783 DELAY(50); 17784 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0); 17785 } 17786 17787 if (CNIC_SUPPORT(sc)) { 17788 ecore_init_block(sc, BLOCK_SRC, init_phase); 17789 } 17790 17791 ecore_init_block(sc, BLOCK_CDU, init_phase); 17792 ecore_init_block(sc, BLOCK_CFC, init_phase); 17793 17794 if (CHIP_IS_E1(sc)) { 17795 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 17796 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 17797 } 17798 ecore_init_block(sc, BLOCK_HC, init_phase); 17799 17800 ecore_init_block(sc, BLOCK_IGU, init_phase); 17801 17802 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 17803 /* init aeu_mask_attn_func_0/1: 17804 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 17805 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 17806 * bits 4-7 are used for "per vn group attention" */ 17807 val = IS_MF(sc) ? 0xF7 : 0x7; 17808 /* Enable DCBX attention for all but E1 */ 17809 val |= CHIP_IS_E1(sc) ? 0 : 0x10; 17810 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 17811 17812 ecore_init_block(sc, BLOCK_NIG, init_phase); 17813 17814 if (!CHIP_IS_E1x(sc)) { 17815 /* Bit-map indicating which L2 hdrs may appear after the 17816 * basic Ethernet header 17817 */ 17818 if (IS_MF_AFEX(sc)) { 17819 REG_WR(sc, SC_PORT(sc) ? 17820 NIG_REG_P1_HDRS_AFTER_BASIC : 17821 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 17822 } else { 17823 REG_WR(sc, SC_PORT(sc) ? 17824 NIG_REG_P1_HDRS_AFTER_BASIC : 17825 NIG_REG_P0_HDRS_AFTER_BASIC, 17826 IS_MF_SD(sc) ? 7 : 6); 17827 } 17828 17829 if (CHIP_IS_E3(sc)) { 17830 REG_WR(sc, SC_PORT(sc) ? 17831 NIG_REG_LLH1_MF_MODE : 17832 NIG_REG_LLH_MF_MODE, IS_MF(sc)); 17833 } 17834 } 17835 if (!CHIP_IS_E3(sc)) { 17836 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 17837 } 17838 17839 if (!CHIP_IS_E1(sc)) { 17840 /* 0x2 disable mf_ov, 0x1 enable */ 17841 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 17842 (IS_MF_SD(sc) ? 0x1 : 0x2)); 17843 17844 if (!CHIP_IS_E1x(sc)) { 17845 val = 0; 17846 switch (sc->devinfo.mf_info.mf_mode) { 17847 case MULTI_FUNCTION_SD: 17848 val = 1; 17849 break; 17850 case MULTI_FUNCTION_SI: 17851 case MULTI_FUNCTION_AFEX: 17852 val = 2; 17853 break; 17854 } 17855 17856 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : 17857 NIG_REG_LLH0_CLS_TYPE), val); 17858 } 17859 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 17860 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 17861 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); 17862 } 17863 17864 /* If SPIO5 is set to generate interrupts, enable it for this port */ 17865 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 17866 if (val & MISC_SPIO_SPIO5) { 17867 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 17868 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 17869 val = REG_RD(sc, reg_addr); 17870 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 17871 REG_WR(sc, reg_addr, val); 17872 } 17873 17874 return (0); 17875 } 17876 17877 static uint32_t 17878 bxe_flr_clnup_reg_poll(struct bxe_softc *sc, 17879 uint32_t reg, 17880 uint32_t expected, 17881 uint32_t poll_count) 17882 { 17883 uint32_t cur_cnt = poll_count; 17884 uint32_t val; 17885 17886 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { 17887 DELAY(FLR_WAIT_INTERVAL); 17888 } 17889 17890 return (val); 17891 } 17892 17893 static int 17894 bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc, 17895 uint32_t reg, 17896 char *msg, 17897 uint32_t poll_cnt) 17898 { 17899 uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); 17900 17901 if (val != 0) { 17902 BLOGE(sc, "%s usage count=%d\n", msg, val); 17903 return (1); 17904 } 17905 17906 return (0); 17907 } 17908 17909 /* Common routines with VF FLR cleanup */ 17910 static uint32_t 17911 bxe_flr_clnup_poll_count(struct bxe_softc *sc) 17912 { 17913 /* adjust polling timeout */ 17914 if (CHIP_REV_IS_EMUL(sc)) { 17915 return (FLR_POLL_CNT * 2000); 17916 } 17917 17918 if (CHIP_REV_IS_FPGA(sc)) { 17919 return (FLR_POLL_CNT * 120); 17920 } 17921 17922 return (FLR_POLL_CNT); 17923 } 17924 17925 static int 17926 bxe_poll_hw_usage_counters(struct bxe_softc *sc, 17927 uint32_t poll_cnt) 17928 { 17929 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 17930 if (bxe_flr_clnup_poll_hw_counter(sc, 17931 CFC_REG_NUM_LCIDS_INSIDE_PF, 17932 "CFC PF usage counter timed out", 17933 poll_cnt)) { 17934 return (1); 17935 } 17936 17937 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 17938 if (bxe_flr_clnup_poll_hw_counter(sc, 17939 DORQ_REG_PF_USAGE_CNT, 17940 "DQ PF usage counter timed out", 17941 poll_cnt)) { 17942 return (1); 17943 } 17944 17945 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 17946 if (bxe_flr_clnup_poll_hw_counter(sc, 17947 QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc), 17948 "QM PF usage counter timed out", 17949 poll_cnt)) { 17950 return (1); 17951 } 17952 17953 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 17954 if (bxe_flr_clnup_poll_hw_counter(sc, 17955 TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc), 17956 "Timers VNIC usage counter timed out", 17957 poll_cnt)) { 17958 return (1); 17959 } 17960 17961 if (bxe_flr_clnup_poll_hw_counter(sc, 17962 TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc), 17963 "Timers NUM_SCANS usage counter timed out", 17964 poll_cnt)) { 17965 return (1); 17966 } 17967 17968 /* Wait DMAE PF usage counter to zero */ 17969 if (bxe_flr_clnup_poll_hw_counter(sc, 17970 dmae_reg_go_c[INIT_DMAE_C(sc)], 17971 "DMAE dommand register timed out", 17972 poll_cnt)) { 17973 return (1); 17974 } 17975 17976 return (0); 17977 } 17978 17979 #define OP_GEN_PARAM(param) \ 17980 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 17981 #define OP_GEN_TYPE(type) \ 17982 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 17983 #define OP_GEN_AGG_VECT(index) \ 17984 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 17985 17986 static int 17987 bxe_send_final_clnup(struct bxe_softc *sc, 17988 uint8_t clnup_func, 17989 uint32_t poll_cnt) 17990 { 17991 uint32_t op_gen_command = 0; 17992 uint32_t comp_addr = (BAR_CSTRORM_INTMEM + 17993 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); 17994 int ret = 0; 17995 17996 if (REG_RD(sc, comp_addr)) { 17997 BLOGE(sc, "Cleanup complete was not 0 before sending\n"); 17998 return (1); 17999 } 18000 18001 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 18002 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 18003 op_gen_command |= OP_GEN_AGG_VECT(clnup_func); 18004 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 18005 18006 BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n"); 18007 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); 18008 18009 if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { 18010 BLOGE(sc, "FW final cleanup did not succeed\n"); 18011 BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n", 18012 (REG_RD(sc, comp_addr))); 18013 bxe_panic(sc, ("FLR cleanup failed\n")); 18014 return (1); 18015 } 18016 18017 /* Zero completion for nxt FLR */ 18018 REG_WR(sc, comp_addr, 0); 18019 18020 return (ret); 18021 } 18022 18023 static void 18024 bxe_pbf_pN_buf_flushed(struct bxe_softc *sc, 18025 struct pbf_pN_buf_regs *regs, 18026 uint32_t poll_count) 18027 { 18028 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; 18029 uint32_t cur_cnt = poll_count; 18030 18031 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); 18032 crd = crd_start = REG_RD(sc, regs->crd); 18033 init_crd = REG_RD(sc, regs->init_crd); 18034 18035 BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); 18036 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd); 18037 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); 18038 18039 while ((crd != init_crd) && 18040 ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) < 18041 (init_crd - crd_start))) { 18042 if (cur_cnt--) { 18043 DELAY(FLR_WAIT_INTERVAL); 18044 crd = REG_RD(sc, regs->crd); 18045 crd_freed = REG_RD(sc, regs->crd_freed); 18046 } else { 18047 BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN); 18048 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd); 18049 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed); 18050 break; 18051 } 18052 } 18053 18054 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n", 18055 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 18056 } 18057 18058 static void 18059 bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc, 18060 struct pbf_pN_cmd_regs *regs, 18061 uint32_t poll_count) 18062 { 18063 uint32_t occup, to_free, freed, freed_start; 18064 uint32_t cur_cnt = poll_count; 18065 18066 occup = to_free = REG_RD(sc, regs->lines_occup); 18067 freed = freed_start = REG_RD(sc, regs->lines_freed); 18068 18069 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 18070 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 18071 18072 while (occup && 18073 ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) { 18074 if (cur_cnt--) { 18075 DELAY(FLR_WAIT_INTERVAL); 18076 occup = REG_RD(sc, regs->lines_occup); 18077 freed = REG_RD(sc, regs->lines_freed); 18078 } else { 18079 BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN); 18080 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 18081 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 18082 break; 18083 } 18084 } 18085 18086 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n", 18087 poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 18088 } 18089 18090 static void 18091 bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count) 18092 { 18093 struct pbf_pN_cmd_regs cmd_regs[] = { 18094 {0, (CHIP_IS_E3B0(sc)) ? 18095 PBF_REG_TQ_OCCUPANCY_Q0 : 18096 PBF_REG_P0_TQ_OCCUPANCY, 18097 (CHIP_IS_E3B0(sc)) ? 18098 PBF_REG_TQ_LINES_FREED_CNT_Q0 : 18099 PBF_REG_P0_TQ_LINES_FREED_CNT}, 18100 {1, (CHIP_IS_E3B0(sc)) ? 18101 PBF_REG_TQ_OCCUPANCY_Q1 : 18102 PBF_REG_P1_TQ_OCCUPANCY, 18103 (CHIP_IS_E3B0(sc)) ? 18104 PBF_REG_TQ_LINES_FREED_CNT_Q1 : 18105 PBF_REG_P1_TQ_LINES_FREED_CNT}, 18106 {4, (CHIP_IS_E3B0(sc)) ? 18107 PBF_REG_TQ_OCCUPANCY_LB_Q : 18108 PBF_REG_P4_TQ_OCCUPANCY, 18109 (CHIP_IS_E3B0(sc)) ? 18110 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 18111 PBF_REG_P4_TQ_LINES_FREED_CNT} 18112 }; 18113 18114 struct pbf_pN_buf_regs buf_regs[] = { 18115 {0, (CHIP_IS_E3B0(sc)) ? 18116 PBF_REG_INIT_CRD_Q0 : 18117 PBF_REG_P0_INIT_CRD , 18118 (CHIP_IS_E3B0(sc)) ? 18119 PBF_REG_CREDIT_Q0 : 18120 PBF_REG_P0_CREDIT, 18121 (CHIP_IS_E3B0(sc)) ? 18122 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 18123 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 18124 {1, (CHIP_IS_E3B0(sc)) ? 18125 PBF_REG_INIT_CRD_Q1 : 18126 PBF_REG_P1_INIT_CRD, 18127 (CHIP_IS_E3B0(sc)) ? 18128 PBF_REG_CREDIT_Q1 : 18129 PBF_REG_P1_CREDIT, 18130 (CHIP_IS_E3B0(sc)) ? 18131 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 18132 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 18133 {4, (CHIP_IS_E3B0(sc)) ? 18134 PBF_REG_INIT_CRD_LB_Q : 18135 PBF_REG_P4_INIT_CRD, 18136 (CHIP_IS_E3B0(sc)) ? 18137 PBF_REG_CREDIT_LB_Q : 18138 PBF_REG_P4_CREDIT, 18139 (CHIP_IS_E3B0(sc)) ? 18140 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 18141 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 18142 }; 18143 18144 int i; 18145 18146 /* Verify the command queues are flushed P0, P1, P4 */ 18147 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { 18148 bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); 18149 } 18150 18151 /* Verify the transmission buffers are flushed P0, P1, P4 */ 18152 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { 18153 bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); 18154 } 18155 } 18156 18157 static void 18158 bxe_hw_enable_status(struct bxe_softc *sc) 18159 { 18160 uint32_t val; 18161 18162 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); 18163 BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); 18164 18165 val = REG_RD(sc, PBF_REG_DISABLE_PF); 18166 BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val); 18167 18168 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); 18169 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); 18170 18171 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); 18172 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); 18173 18174 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 18175 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); 18176 18177 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 18178 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); 18179 18180 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 18181 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); 18182 18183 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 18184 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val); 18185 } 18186 18187 static int 18188 bxe_pf_flr_clnup(struct bxe_softc *sc) 18189 { 18190 uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc); 18191 18192 BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc)); 18193 18194 /* Re-enable PF target read access */ 18195 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 18196 18197 /* Poll HW usage counters */ 18198 BLOGD(sc, DBG_LOAD, "Polling usage counters\n"); 18199 if (bxe_poll_hw_usage_counters(sc, poll_cnt)) { 18200 return (-1); 18201 } 18202 18203 /* Zero the igu 'trailing edge' and 'leading edge' */ 18204 18205 /* Send the FW cleanup command */ 18206 if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) { 18207 return (-1); 18208 } 18209 18210 /* ATC cleanup */ 18211 18212 /* Verify TX hw is flushed */ 18213 bxe_tx_hw_flushed(sc, poll_cnt); 18214 18215 /* Wait 100ms (not adjusted according to platform) */ 18216 DELAY(100000); 18217 18218 /* Verify no pending pci transactions */ 18219 if (bxe_is_pcie_pending(sc)) { 18220 BLOGE(sc, "PCIE Transactions still pending\n"); 18221 } 18222 18223 /* Debug */ 18224 bxe_hw_enable_status(sc); 18225 18226 /* 18227 * Master enable - Due to WB DMAE writes performed before this 18228 * register is re-initialized as part of the regular function init 18229 */ 18230 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 18231 18232 return (0); 18233 } 18234 18235 #if 0 18236 static void 18237 bxe_init_searcher(struct bxe_softc *sc) 18238 { 18239 int port = SC_PORT(sc); 18240 ecore_src_init_t2(sc, sc->t2, sc->t2_mapping, SRC_CONN_NUM); 18241 /* T1 hash bits value determines the T1 number of entries */ 18242 REG_WR(sc, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); 18243 } 18244 #endif 18245 18246 static int 18247 bxe_init_hw_func(struct bxe_softc *sc) 18248 { 18249 int port = SC_PORT(sc); 18250 int func = SC_FUNC(sc); 18251 int init_phase = PHASE_PF0 + func; 18252 struct ecore_ilt *ilt = sc->ilt; 18253 uint16_t cdu_ilt_start; 18254 uint32_t addr, val; 18255 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; 18256 int i, main_mem_width, rc; 18257 18258 BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func); 18259 18260 /* FLR cleanup */ 18261 if (!CHIP_IS_E1x(sc)) { 18262 rc = bxe_pf_flr_clnup(sc); 18263 if (rc) { 18264 BLOGE(sc, "FLR cleanup failed!\n"); 18265 // XXX bxe_fw_dump(sc); 18266 // XXX bxe_idle_chk(sc); 18267 return (rc); 18268 } 18269 } 18270 18271 /* set MSI reconfigure capability */ 18272 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18273 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 18274 val = REG_RD(sc, addr); 18275 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 18276 REG_WR(sc, addr, val); 18277 } 18278 18279 ecore_init_block(sc, BLOCK_PXP, init_phase); 18280 ecore_init_block(sc, BLOCK_PXP2, init_phase); 18281 18282 ilt = sc->ilt; 18283 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 18284 18285 #if 0 18286 if (IS_SRIOV(sc)) { 18287 cdu_ilt_start += BXE_FIRST_VF_CID/ILT_PAGE_CIDS; 18288 } 18289 cdu_ilt_start = bxe_iov_init_ilt(sc, cdu_ilt_start); 18290 18291 #if (BXE_FIRST_VF_CID > 0) 18292 /* 18293 * If BXE_FIRST_VF_CID > 0 then the PF L2 cids precedes 18294 * those of the VFs, so start line should be reset 18295 */ 18296 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 18297 #endif 18298 #endif 18299 18300 for (i = 0; i < L2_ILT_LINES(sc); i++) { 18301 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; 18302 ilt->lines[cdu_ilt_start + i].page_mapping = 18303 sc->context[i].vcxt_dma.paddr; 18304 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; 18305 } 18306 ecore_ilt_init_op(sc, INITOP_SET); 18307 18308 #if 0 18309 if (!CONFIGURE_NIC_MODE(sc)) { 18310 bxe_init_searcher(sc); 18311 REG_WR(sc, PRS_REG_NIC_MODE, 0); 18312 BLOGD(sc, DBG_LOAD, "NIC MODE disabled\n"); 18313 } else 18314 #endif 18315 { 18316 /* Set NIC mode */ 18317 REG_WR(sc, PRS_REG_NIC_MODE, 1); 18318 BLOGD(sc, DBG_LOAD, "NIC MODE configured\n"); 18319 } 18320 18321 if (!CHIP_IS_E1x(sc)) { 18322 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; 18323 18324 /* Turn on a single ISR mode in IGU if driver is going to use 18325 * INT#x or MSI 18326 */ 18327 if (sc->interrupt_mode != INTR_MODE_MSIX) { 18328 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 18329 } 18330 18331 /* 18332 * Timers workaround bug: function init part. 18333 * Need to wait 20msec after initializing ILT, 18334 * needed to make sure there are no requests in 18335 * one of the PXP internal queues with "old" ILT addresses 18336 */ 18337 DELAY(20000); 18338 18339 /* 18340 * Master enable - Due to WB DMAE writes performed before this 18341 * register is re-initialized as part of the regular function 18342 * init 18343 */ 18344 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 18345 /* Enable the function in IGU */ 18346 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); 18347 } 18348 18349 sc->dmae_ready = 1; 18350 18351 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 18352 18353 if (!CHIP_IS_E1x(sc)) 18354 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); 18355 18356 ecore_init_block(sc, BLOCK_ATC, init_phase); 18357 ecore_init_block(sc, BLOCK_DMAE, init_phase); 18358 ecore_init_block(sc, BLOCK_NIG, init_phase); 18359 ecore_init_block(sc, BLOCK_SRC, init_phase); 18360 ecore_init_block(sc, BLOCK_MISC, init_phase); 18361 ecore_init_block(sc, BLOCK_TCM, init_phase); 18362 ecore_init_block(sc, BLOCK_UCM, init_phase); 18363 ecore_init_block(sc, BLOCK_CCM, init_phase); 18364 ecore_init_block(sc, BLOCK_XCM, init_phase); 18365 ecore_init_block(sc, BLOCK_TSEM, init_phase); 18366 ecore_init_block(sc, BLOCK_USEM, init_phase); 18367 ecore_init_block(sc, BLOCK_CSEM, init_phase); 18368 ecore_init_block(sc, BLOCK_XSEM, init_phase); 18369 18370 if (!CHIP_IS_E1x(sc)) 18371 REG_WR(sc, QM_REG_PF_EN, 1); 18372 18373 if (!CHIP_IS_E1x(sc)) { 18374 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18375 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18376 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18377 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18378 } 18379 ecore_init_block(sc, BLOCK_QM, init_phase); 18380 18381 ecore_init_block(sc, BLOCK_TM, init_phase); 18382 ecore_init_block(sc, BLOCK_DORQ, init_phase); 18383 18384 bxe_iov_init_dq(sc); 18385 18386 ecore_init_block(sc, BLOCK_BRB1, init_phase); 18387 ecore_init_block(sc, BLOCK_PRS, init_phase); 18388 ecore_init_block(sc, BLOCK_TSDM, init_phase); 18389 ecore_init_block(sc, BLOCK_CSDM, init_phase); 18390 ecore_init_block(sc, BLOCK_USDM, init_phase); 18391 ecore_init_block(sc, BLOCK_XSDM, init_phase); 18392 ecore_init_block(sc, BLOCK_UPB, init_phase); 18393 ecore_init_block(sc, BLOCK_XPB, init_phase); 18394 ecore_init_block(sc, BLOCK_PBF, init_phase); 18395 if (!CHIP_IS_E1x(sc)) 18396 REG_WR(sc, PBF_REG_DISABLE_PF, 0); 18397 18398 ecore_init_block(sc, BLOCK_CDU, init_phase); 18399 18400 ecore_init_block(sc, BLOCK_CFC, init_phase); 18401 18402 if (!CHIP_IS_E1x(sc)) 18403 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); 18404 18405 if (IS_MF(sc)) { 18406 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); 18407 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc)); 18408 } 18409 18410 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 18411 18412 /* HC init per function */ 18413 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18414 if (CHIP_IS_E1H(sc)) { 18415 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 18416 18417 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 18418 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 18419 } 18420 ecore_init_block(sc, BLOCK_HC, init_phase); 18421 18422 } else { 18423 int num_segs, sb_idx, prod_offset; 18424 18425 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 18426 18427 if (!CHIP_IS_E1x(sc)) { 18428 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 18429 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 18430 } 18431 18432 ecore_init_block(sc, BLOCK_IGU, init_phase); 18433 18434 if (!CHIP_IS_E1x(sc)) { 18435 int dsb_idx = 0; 18436 /** 18437 * Producer memory: 18438 * E2 mode: address 0-135 match to the mapping memory; 18439 * 136 - PF0 default prod; 137 - PF1 default prod; 18440 * 138 - PF2 default prod; 139 - PF3 default prod; 18441 * 140 - PF0 attn prod; 141 - PF1 attn prod; 18442 * 142 - PF2 attn prod; 143 - PF3 attn prod; 18443 * 144-147 reserved. 18444 * 18445 * E1.5 mode - In backward compatible mode; 18446 * for non default SB; each even line in the memory 18447 * holds the U producer and each odd line hold 18448 * the C producer. The first 128 producers are for 18449 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 18450 * producers are for the DSB for each PF. 18451 * Each PF has five segments: (the order inside each 18452 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 18453 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 18454 * 144-147 attn prods; 18455 */ 18456 /* non-default-status-blocks */ 18457 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 18458 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 18459 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { 18460 prod_offset = (sc->igu_base_sb + sb_idx) * 18461 num_segs; 18462 18463 for (i = 0; i < num_segs; i++) { 18464 addr = IGU_REG_PROD_CONS_MEMORY + 18465 (prod_offset + i) * 4; 18466 REG_WR(sc, addr, 0); 18467 } 18468 /* send consumer update with value 0 */ 18469 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx, 18470 USTORM_ID, 0, IGU_INT_NOP, 1); 18471 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); 18472 } 18473 18474 /* default-status-blocks */ 18475 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 18476 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 18477 18478 if (CHIP_IS_MODE_4_PORT(sc)) 18479 dsb_idx = SC_FUNC(sc); 18480 else 18481 dsb_idx = SC_VN(sc); 18482 18483 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? 18484 IGU_BC_BASE_DSB_PROD + dsb_idx : 18485 IGU_NORM_BASE_DSB_PROD + dsb_idx); 18486 18487 /* 18488 * igu prods come in chunks of E1HVN_MAX (4) - 18489 * does not matters what is the current chip mode 18490 */ 18491 for (i = 0; i < (num_segs * E1HVN_MAX); 18492 i += E1HVN_MAX) { 18493 addr = IGU_REG_PROD_CONS_MEMORY + 18494 (prod_offset + i)*4; 18495 REG_WR(sc, addr, 0); 18496 } 18497 /* send consumer update with 0 */ 18498 if (CHIP_INT_MODE_IS_BC(sc)) { 18499 bxe_ack_sb(sc, sc->igu_dsb_id, 18500 USTORM_ID, 0, IGU_INT_NOP, 1); 18501 bxe_ack_sb(sc, sc->igu_dsb_id, 18502 CSTORM_ID, 0, IGU_INT_NOP, 1); 18503 bxe_ack_sb(sc, sc->igu_dsb_id, 18504 XSTORM_ID, 0, IGU_INT_NOP, 1); 18505 bxe_ack_sb(sc, sc->igu_dsb_id, 18506 TSTORM_ID, 0, IGU_INT_NOP, 1); 18507 bxe_ack_sb(sc, sc->igu_dsb_id, 18508 ATTENTION_ID, 0, IGU_INT_NOP, 1); 18509 } else { 18510 bxe_ack_sb(sc, sc->igu_dsb_id, 18511 USTORM_ID, 0, IGU_INT_NOP, 1); 18512 bxe_ack_sb(sc, sc->igu_dsb_id, 18513 ATTENTION_ID, 0, IGU_INT_NOP, 1); 18514 } 18515 bxe_igu_clear_sb(sc, sc->igu_dsb_id); 18516 18517 /* !!! these should become driver const once 18518 rf-tool supports split-68 const */ 18519 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 18520 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 18521 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); 18522 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); 18523 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); 18524 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); 18525 } 18526 } 18527 18528 /* Reset PCIE errors for debug */ 18529 REG_WR(sc, 0x2114, 0xffffffff); 18530 REG_WR(sc, 0x2120, 0xffffffff); 18531 18532 if (CHIP_IS_E1x(sc)) { 18533 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ 18534 main_mem_base = HC_REG_MAIN_MEMORY + 18535 SC_PORT(sc) * (main_mem_size * 4); 18536 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 18537 main_mem_width = 8; 18538 18539 val = REG_RD(sc, main_mem_prty_clr); 18540 if (val) { 18541 BLOGD(sc, DBG_LOAD, 18542 "Parity errors in HC block during function init (0x%x)!\n", 18543 val); 18544 } 18545 18546 /* Clear "false" parity errors in MSI-X table */ 18547 for (i = main_mem_base; 18548 i < main_mem_base + main_mem_size * 4; 18549 i += main_mem_width) { 18550 bxe_read_dmae(sc, i, main_mem_width / 4); 18551 bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data), 18552 i, main_mem_width / 4); 18553 } 18554 /* Clear HC parity attention */ 18555 REG_RD(sc, main_mem_prty_clr); 18556 } 18557 18558 #if 1 18559 /* Enable STORMs SP logging */ 18560 REG_WR8(sc, BAR_USTRORM_INTMEM + 18561 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18562 REG_WR8(sc, BAR_TSTRORM_INTMEM + 18563 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18564 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18565 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18566 REG_WR8(sc, BAR_XSTRORM_INTMEM + 18567 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18568 #endif 18569 18570 elink_phy_probe(&sc->link_params); 18571 18572 return (0); 18573 } 18574 18575 static void 18576 bxe_link_reset(struct bxe_softc *sc) 18577 { 18578 if (!BXE_NOMCP(sc)) { 18579 bxe_acquire_phy_lock(sc); 18580 elink_lfa_reset(&sc->link_params, &sc->link_vars); 18581 bxe_release_phy_lock(sc); 18582 } else { 18583 if (!CHIP_REV_IS_SLOW(sc)) { 18584 BLOGW(sc, "Bootcode is missing - cannot reset link\n"); 18585 } 18586 } 18587 } 18588 18589 static void 18590 bxe_reset_port(struct bxe_softc *sc) 18591 { 18592 int port = SC_PORT(sc); 18593 uint32_t val; 18594 18595 /* reset physical Link */ 18596 bxe_link_reset(sc); 18597 18598 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 18599 18600 /* Do not rcv packets to BRB */ 18601 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); 18602 /* Do not direct rcv packets that are not for MCP to the BRB */ 18603 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 18604 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 18605 18606 /* Configure AEU */ 18607 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); 18608 18609 DELAY(100000); 18610 18611 /* Check for BRB port occupancy */ 18612 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 18613 if (val) { 18614 BLOGD(sc, DBG_LOAD, 18615 "BRB1 is not empty, %d blocks are occupied\n", val); 18616 } 18617 18618 /* TODO: Close Doorbell port? */ 18619 } 18620 18621 static void 18622 bxe_ilt_wr(struct bxe_softc *sc, 18623 uint32_t index, 18624 bus_addr_t addr) 18625 { 18626 int reg; 18627 uint32_t wb_write[2]; 18628 18629 if (CHIP_IS_E1(sc)) { 18630 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 18631 } else { 18632 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 18633 } 18634 18635 wb_write[0] = ONCHIP_ADDR1(addr); 18636 wb_write[1] = ONCHIP_ADDR2(addr); 18637 REG_WR_DMAE(sc, reg, wb_write, 2); 18638 } 18639 18640 static void 18641 bxe_clear_func_ilt(struct bxe_softc *sc, 18642 uint32_t func) 18643 { 18644 uint32_t i, base = FUNC_ILT_BASE(func); 18645 for (i = base; i < base + ILT_PER_FUNC; i++) { 18646 bxe_ilt_wr(sc, i, 0); 18647 } 18648 } 18649 18650 static void 18651 bxe_reset_func(struct bxe_softc *sc) 18652 { 18653 struct bxe_fastpath *fp; 18654 int port = SC_PORT(sc); 18655 int func = SC_FUNC(sc); 18656 int i; 18657 18658 /* Disable the function in the FW */ 18659 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 18660 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 18661 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 18662 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 18663 18664 /* FP SBs */ 18665 FOR_EACH_ETH_QUEUE(sc, i) { 18666 fp = &sc->fp[i]; 18667 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18668 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 18669 SB_DISABLED); 18670 } 18671 18672 #if 0 18673 if (CNIC_LOADED(sc)) { 18674 /* CNIC SB */ 18675 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18676 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET 18677 (bxe_cnic_fw_sb_id(sc)), SB_DISABLED); 18678 } 18679 #endif 18680 18681 /* SP SB */ 18682 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18683 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 18684 SB_DISABLED); 18685 18686 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { 18687 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0); 18688 } 18689 18690 /* Configure IGU */ 18691 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18692 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 18693 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 18694 } else { 18695 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 18696 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 18697 } 18698 18699 if (CNIC_LOADED(sc)) { 18700 /* Disable Timer scan */ 18701 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 18702 /* 18703 * Wait for at least 10ms and up to 2 second for the timers 18704 * scan to complete 18705 */ 18706 for (i = 0; i < 200; i++) { 18707 DELAY(10000); 18708 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4)) 18709 break; 18710 } 18711 } 18712 18713 /* Clear ILT */ 18714 bxe_clear_func_ilt(sc, func); 18715 18716 /* 18717 * Timers workaround bug for E2: if this is vnic-3, 18718 * we need to set the entire ilt range for this timers. 18719 */ 18720 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { 18721 struct ilt_client_info ilt_cli; 18722 /* use dummy TM client */ 18723 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 18724 ilt_cli.start = 0; 18725 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 18726 ilt_cli.client_num = ILT_CLIENT_TM; 18727 18728 ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR); 18729 } 18730 18731 /* this assumes that reset_port() called before reset_func()*/ 18732 if (!CHIP_IS_E1x(sc)) { 18733 bxe_pf_disable(sc); 18734 } 18735 18736 sc->dmae_ready = 0; 18737 } 18738 18739 static int 18740 bxe_gunzip_init(struct bxe_softc *sc) 18741 { 18742 return (0); 18743 } 18744 18745 static void 18746 bxe_gunzip_end(struct bxe_softc *sc) 18747 { 18748 return; 18749 } 18750 18751 static int 18752 bxe_init_firmware(struct bxe_softc *sc) 18753 { 18754 if (CHIP_IS_E1(sc)) { 18755 ecore_init_e1_firmware(sc); 18756 sc->iro_array = e1_iro_arr; 18757 } else if (CHIP_IS_E1H(sc)) { 18758 ecore_init_e1h_firmware(sc); 18759 sc->iro_array = e1h_iro_arr; 18760 } else if (!CHIP_IS_E1x(sc)) { 18761 ecore_init_e2_firmware(sc); 18762 sc->iro_array = e2_iro_arr; 18763 } else { 18764 BLOGE(sc, "Unsupported chip revision\n"); 18765 return (-1); 18766 } 18767 18768 return (0); 18769 } 18770 18771 static void 18772 bxe_release_firmware(struct bxe_softc *sc) 18773 { 18774 /* Do nothing */ 18775 return; 18776 } 18777 18778 static int 18779 ecore_gunzip(struct bxe_softc *sc, 18780 const uint8_t *zbuf, 18781 int len) 18782 { 18783 /* XXX : Implement... */ 18784 BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n"); 18785 return (FALSE); 18786 } 18787 18788 static void 18789 ecore_reg_wr_ind(struct bxe_softc *sc, 18790 uint32_t addr, 18791 uint32_t val) 18792 { 18793 bxe_reg_wr_ind(sc, addr, val); 18794 } 18795 18796 static void 18797 ecore_write_dmae_phys_len(struct bxe_softc *sc, 18798 bus_addr_t phys_addr, 18799 uint32_t addr, 18800 uint32_t len) 18801 { 18802 bxe_write_dmae_phys_len(sc, phys_addr, addr, len); 18803 } 18804 18805 void 18806 ecore_storm_memset_struct(struct bxe_softc *sc, 18807 uint32_t addr, 18808 size_t size, 18809 uint32_t *data) 18810 { 18811 uint8_t i; 18812 for (i = 0; i < size/4; i++) { 18813 REG_WR(sc, addr + (i * 4), data[i]); 18814 } 18815 } 18816 18817