1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001-2024, Intel Corporation 5 * Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org> 6 * Copyright (c) 2021-2024 Rubicon Communications, LLC (Netgate) 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 #include "if_igc.h" 32 #include <sys/sbuf.h> 33 #include <machine/_inttypes.h> 34 35 #include <net/rss_config.h> 36 #include <netinet/in_rss.h> 37 38 39 /********************************************************************* 40 * PCI Device ID Table 41 * 42 * Used by probe to select devices to load on 43 * Last entry must be all 0s 44 * 45 * { Vendor ID, Device ID, String } 46 *********************************************************************/ 47 48 static const pci_vendor_info_t igc_vendor_info_array[] = 49 { 50 /* Intel(R) PRO/1000 Network Connection - igc */ 51 PVID(0x8086, IGC_DEV_ID_I225_LM, 52 "Intel(R) Ethernet Controller I225-LM"), 53 PVID(0x8086, IGC_DEV_ID_I225_V, 54 "Intel(R) Ethernet Controller I225-V"), 55 PVID(0x8086, IGC_DEV_ID_I225_K, 56 "Intel(R) Ethernet Controller I225-K"), 57 PVID(0x8086, IGC_DEV_ID_I225_I, 58 "Intel(R) Ethernet Controller I225-IT"), 59 PVID(0x8086, IGC_DEV_ID_I220_V, 60 "Intel(R) Ethernet Controller I220-V"), 61 PVID(0x8086, IGC_DEV_ID_I225_K2, 62 "Intel(R) Ethernet Controller I225-K(2)"), 63 PVID(0x8086, IGC_DEV_ID_I225_LMVP, 64 "Intel(R) Ethernet Controller I225-LMvP(2)"), 65 PVID(0x8086, IGC_DEV_ID_I226_K, 66 "Intel(R) Ethernet Controller I226-K"), 67 PVID(0x8086, IGC_DEV_ID_I226_LMVP, 68 "Intel(R) Ethernet Controller I226-LMvP"), 69 PVID(0x8086, IGC_DEV_ID_I225_IT, 70 "Intel(R) Ethernet Controller I225-IT(2)"), 71 PVID(0x8086, IGC_DEV_ID_I226_LM, 72 "Intel(R) Ethernet Controller I226-LM"), 73 PVID(0x8086, IGC_DEV_ID_I226_V, 74 "Intel(R) Ethernet Controller I226-V"), 75 PVID(0x8086, IGC_DEV_ID_I226_IT, 76 "Intel(R) Ethernet Controller I226-IT"), 77 PVID(0x8086, IGC_DEV_ID_I221_V, 78 "Intel(R) Ethernet Controller I221-V"), 79 PVID(0x8086, IGC_DEV_ID_I226_BLANK_NVM, 80 "Intel(R) Ethernet Controller I226(blankNVM)"), 81 PVID(0x8086, IGC_DEV_ID_I225_BLANK_NVM, 82 "Intel(R) Ethernet Controller I225(blankNVM)"), 83 /* required last entry */ 84 PVID_END 85 }; 86 87 /********************************************************************* 88 * Function prototypes 89 *********************************************************************/ 90 static void *igc_register(device_t); 91 static int igc_if_attach_pre(if_ctx_t); 92 static int igc_if_attach_post(if_ctx_t); 93 static int igc_if_detach(if_ctx_t); 94 static int igc_if_shutdown(if_ctx_t); 95 static int igc_if_suspend(if_ctx_t); 96 static int igc_if_resume(if_ctx_t); 97 98 static int igc_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, 99 int); 100 static int igc_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, 101 int); 102 static void igc_if_queues_free(if_ctx_t); 103 104 static uint64_t igc_if_get_counter(if_ctx_t, ift_counter); 105 static void igc_if_init(if_ctx_t); 106 static void igc_if_stop(if_ctx_t); 107 static void igc_if_media_status(if_ctx_t, struct ifmediareq *); 108 static int igc_if_media_change(if_ctx_t); 109 static int igc_if_mtu_set(if_ctx_t, uint32_t); 110 static void igc_if_timer(if_ctx_t, uint16_t); 111 static void igc_if_watchdog_reset(if_ctx_t); 112 static bool igc_if_needs_restart(if_ctx_t, enum iflib_restart_event); 113 114 static void igc_identify_hardware(if_ctx_t); 115 static int igc_allocate_pci_resources(if_ctx_t); 116 static void igc_free_pci_resources(if_ctx_t); 117 static void igc_reset(if_ctx_t); 118 static int igc_setup_interface(if_ctx_t); 119 static int igc_setup_msix(if_ctx_t); 120 121 static void igc_initialize_transmit_unit(if_ctx_t); 122 static void igc_initialize_receive_unit(if_ctx_t); 123 124 static void igc_if_intr_enable(if_ctx_t); 125 static void igc_if_intr_disable(if_ctx_t); 126 static int igc_if_rx_queue_intr_enable(if_ctx_t, uint16_t); 127 static int igc_if_tx_queue_intr_enable(if_ctx_t, uint16_t); 128 static void igc_if_multi_set(if_ctx_t); 129 static void igc_if_update_admin_status(if_ctx_t); 130 static void igc_if_debug(if_ctx_t); 131 static void igc_update_stats_counters(struct igc_softc *); 132 static void igc_add_hw_stats(struct igc_softc *); 133 static int igc_if_set_promisc(if_ctx_t, int); 134 static void igc_setup_vlan_hw_support(if_ctx_t); 135 static void igc_fw_version(struct igc_softc *); 136 static void igc_sbuf_fw_version(struct igc_fw_version *, struct sbuf *); 137 static void igc_print_fw_version(struct igc_softc *); 138 static int igc_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS); 139 static int igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS); 140 static void igc_print_nvm_info(struct igc_softc *); 141 static int igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 142 static int igc_get_rs(SYSCTL_HANDLER_ARGS); 143 static void igc_print_debug_info(struct igc_softc *); 144 static int igc_is_valid_ether_addr(u8 *); 145 static void igc_neweitr(struct igc_softc *, struct igc_rx_queue *, 146 struct tx_ring *, struct rx_ring *); 147 static int igc_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS); 148 /* Management and WOL Support */ 149 static void igc_get_hw_control(struct igc_softc *); 150 static void igc_release_hw_control(struct igc_softc *); 151 static void igc_get_wakeup(if_ctx_t); 152 static void igc_enable_wakeup(if_ctx_t); 153 154 int igc_intr(void *); 155 156 /* MSI-X handlers */ 157 static int igc_if_msix_intr_assign(if_ctx_t, int); 158 static int igc_msix_link(void *); 159 static void igc_handle_link(void *context); 160 161 static int igc_set_flowcntl(SYSCTL_HANDLER_ARGS); 162 static int igc_sysctl_dmac(SYSCTL_HANDLER_ARGS); 163 static int igc_sysctl_eee(SYSCTL_HANDLER_ARGS); 164 165 static int igc_get_regs(SYSCTL_HANDLER_ARGS); 166 167 static void igc_configure_queues(struct igc_softc *); 168 169 170 /********************************************************************* 171 * FreeBSD Device Interface Entry Points 172 *********************************************************************/ 173 static device_method_t igc_methods[] = { 174 /* Device interface */ 175 DEVMETHOD(device_register, igc_register), 176 DEVMETHOD(device_probe, iflib_device_probe), 177 DEVMETHOD(device_attach, iflib_device_attach), 178 DEVMETHOD(device_detach, iflib_device_detach), 179 DEVMETHOD(device_shutdown, iflib_device_shutdown), 180 DEVMETHOD(device_suspend, iflib_device_suspend), 181 DEVMETHOD(device_resume, iflib_device_resume), 182 DEVMETHOD_END 183 }; 184 185 static driver_t igc_driver = { 186 "igc", igc_methods, sizeof(struct igc_softc), 187 }; 188 189 DRIVER_MODULE(igc, pci, igc_driver, 0, 0); 190 191 MODULE_DEPEND(igc, pci, 1, 1, 1); 192 MODULE_DEPEND(igc, ether, 1, 1, 1); 193 MODULE_DEPEND(igc, iflib, 1, 1, 1); 194 195 IFLIB_PNP_INFO(pci, igc, igc_vendor_info_array); 196 197 static device_method_t igc_if_methods[] = { 198 DEVMETHOD(ifdi_attach_pre, igc_if_attach_pre), 199 DEVMETHOD(ifdi_attach_post, igc_if_attach_post), 200 DEVMETHOD(ifdi_detach, igc_if_detach), 201 DEVMETHOD(ifdi_shutdown, igc_if_shutdown), 202 DEVMETHOD(ifdi_suspend, igc_if_suspend), 203 DEVMETHOD(ifdi_resume, igc_if_resume), 204 DEVMETHOD(ifdi_init, igc_if_init), 205 DEVMETHOD(ifdi_stop, igc_if_stop), 206 DEVMETHOD(ifdi_msix_intr_assign, igc_if_msix_intr_assign), 207 DEVMETHOD(ifdi_intr_enable, igc_if_intr_enable), 208 DEVMETHOD(ifdi_intr_disable, igc_if_intr_disable), 209 DEVMETHOD(ifdi_tx_queues_alloc, igc_if_tx_queues_alloc), 210 DEVMETHOD(ifdi_rx_queues_alloc, igc_if_rx_queues_alloc), 211 DEVMETHOD(ifdi_queues_free, igc_if_queues_free), 212 DEVMETHOD(ifdi_update_admin_status, igc_if_update_admin_status), 213 DEVMETHOD(ifdi_multi_set, igc_if_multi_set), 214 DEVMETHOD(ifdi_media_status, igc_if_media_status), 215 DEVMETHOD(ifdi_media_change, igc_if_media_change), 216 DEVMETHOD(ifdi_mtu_set, igc_if_mtu_set), 217 DEVMETHOD(ifdi_promisc_set, igc_if_set_promisc), 218 DEVMETHOD(ifdi_timer, igc_if_timer), 219 DEVMETHOD(ifdi_watchdog_reset, igc_if_watchdog_reset), 220 DEVMETHOD(ifdi_get_counter, igc_if_get_counter), 221 DEVMETHOD(ifdi_rx_queue_intr_enable, igc_if_rx_queue_intr_enable), 222 DEVMETHOD(ifdi_tx_queue_intr_enable, igc_if_tx_queue_intr_enable), 223 DEVMETHOD(ifdi_debug, igc_if_debug), 224 DEVMETHOD(ifdi_needs_restart, igc_if_needs_restart), 225 DEVMETHOD_END 226 }; 227 228 static driver_t igc_if_driver = { 229 "igc_if", igc_if_methods, sizeof(struct igc_softc) 230 }; 231 232 /********************************************************************* 233 * Tunable default values. 234 *********************************************************************/ 235 236 /* Allow common code without TSO */ 237 #ifndef CSUM_TSO 238 #define CSUM_TSO 0 239 #endif 240 241 static SYSCTL_NODE(_hw, OID_AUTO, igc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 242 "igc driver parameters"); 243 244 static int igc_disable_crc_stripping = 0; 245 SYSCTL_INT(_hw_igc, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN, 246 &igc_disable_crc_stripping, 0, "Disable CRC Stripping"); 247 248 static int igc_smart_pwr_down = false; 249 SYSCTL_INT(_hw_igc, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, 250 &igc_smart_pwr_down, 251 0, "Set to true to leave smart power down enabled on newer adapters"); 252 253 /* Controls whether promiscuous also shows bad packets */ 254 static int igc_debug_sbp = false; 255 SYSCTL_INT(_hw_igc, OID_AUTO, sbp, CTLFLAG_RDTUN, &igc_debug_sbp, 0, 256 "Show bad packets in promiscuous mode"); 257 258 /* Energy efficient ethernet - default to OFF */ 259 static int igc_eee_setting = 1; 260 SYSCTL_INT(_hw_igc, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &igc_eee_setting, 0, 261 "Enable Energy Efficient Ethernet"); 262 263 /* 264 * AIM: Adaptive Interrupt Moderation 265 * which means that the interrupt rate is varied over time based on the 266 * traffic for that interrupt vector 267 */ 268 static int igc_enable_aim = 1; 269 SYSCTL_INT(_hw_igc, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &igc_enable_aim, 270 0, "Enable adaptive interrupt moderation (1=normal, 2=lowlatency)"); 271 272 /* 273 ** Tuneable Interrupt rate 274 */ 275 static int igc_max_interrupt_rate = IGC_INTS_DEFAULT; 276 SYSCTL_INT(_hw_igc, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 277 &igc_max_interrupt_rate, 0, "Maximum interrupts per second"); 278 279 extern struct if_txrx igc_txrx; 280 281 static struct if_shared_ctx igc_sctx_init = { 282 .isc_magic = IFLIB_MAGIC, 283 .isc_q_align = PAGE_SIZE, 284 .isc_tx_maxsize = IGC_TSO_SIZE + sizeof(struct ether_vlan_header), 285 .isc_tx_maxsegsize = PAGE_SIZE, 286 .isc_tso_maxsize = IGC_TSO_SIZE + sizeof(struct ether_vlan_header), 287 .isc_tso_maxsegsize = IGC_TSO_SEG_SIZE, 288 .isc_rx_maxsize = MAX_JUMBO_FRAME_SIZE, 289 .isc_rx_nsegments = 1, 290 .isc_rx_maxsegsize = MJUM9BYTES, 291 .isc_nfl = 1, 292 .isc_nrxqs = 1, 293 .isc_ntxqs = 1, 294 .isc_admin_intrcnt = 1, 295 .isc_vendor_info = igc_vendor_info_array, 296 .isc_driver_version = "1", 297 .isc_driver = &igc_if_driver, 298 .isc_flags = 299 IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM, 300 301 .isc_nrxd_min = {IGC_MIN_RXD}, 302 .isc_ntxd_min = {IGC_MIN_TXD}, 303 .isc_nrxd_max = {IGC_MAX_RXD}, 304 .isc_ntxd_max = {IGC_MAX_TXD}, 305 .isc_nrxd_default = {IGC_DEFAULT_RXD}, 306 .isc_ntxd_default = {IGC_DEFAULT_TXD}, 307 }; 308 309 /***************************************************************** 310 * 311 * Dump Registers 312 * 313 ****************************************************************/ 314 #define IGC_REGS_LEN 739 315 316 static int igc_get_regs(SYSCTL_HANDLER_ARGS) 317 { 318 struct igc_softc *sc = (struct igc_softc *)arg1; 319 struct igc_hw *hw = &sc->hw; 320 struct sbuf *sb; 321 u32 *regs_buff; 322 int rc; 323 324 regs_buff = malloc(sizeof(u32) * IGC_REGS_LEN, M_DEVBUF, M_WAITOK); 325 memset(regs_buff, 0, IGC_REGS_LEN * sizeof(u32)); 326 327 rc = sysctl_wire_old_buffer(req, 0); 328 MPASS(rc == 0); 329 if (rc != 0) { 330 free(regs_buff, M_DEVBUF); 331 return (rc); 332 } 333 334 sb = sbuf_new_for_sysctl(NULL, NULL, 32*400, req); 335 MPASS(sb != NULL); 336 if (sb == NULL) { 337 free(regs_buff, M_DEVBUF); 338 return (ENOMEM); 339 } 340 341 /* General Registers */ 342 regs_buff[0] = IGC_READ_REG(hw, IGC_CTRL); 343 regs_buff[1] = IGC_READ_REG(hw, IGC_STATUS); 344 regs_buff[2] = IGC_READ_REG(hw, IGC_CTRL_EXT); 345 regs_buff[3] = IGC_READ_REG(hw, IGC_ICR); 346 regs_buff[4] = IGC_READ_REG(hw, IGC_RCTL); 347 regs_buff[5] = IGC_READ_REG(hw, IGC_RDLEN(0)); 348 regs_buff[6] = IGC_READ_REG(hw, IGC_RDH(0)); 349 regs_buff[7] = IGC_READ_REG(hw, IGC_RDT(0)); 350 regs_buff[8] = IGC_READ_REG(hw, IGC_RXDCTL(0)); 351 regs_buff[9] = IGC_READ_REG(hw, IGC_RDBAL(0)); 352 regs_buff[10] = IGC_READ_REG(hw, IGC_RDBAH(0)); 353 regs_buff[11] = IGC_READ_REG(hw, IGC_TCTL); 354 regs_buff[12] = IGC_READ_REG(hw, IGC_TDBAL(0)); 355 regs_buff[13] = IGC_READ_REG(hw, IGC_TDBAH(0)); 356 regs_buff[14] = IGC_READ_REG(hw, IGC_TDLEN(0)); 357 regs_buff[15] = IGC_READ_REG(hw, IGC_TDH(0)); 358 regs_buff[16] = IGC_READ_REG(hw, IGC_TDT(0)); 359 regs_buff[17] = IGC_READ_REG(hw, IGC_TXDCTL(0)); 360 361 sbuf_printf(sb, "General Registers\n"); 362 sbuf_printf(sb, "\tCTRL\t %08x\n", regs_buff[0]); 363 sbuf_printf(sb, "\tSTATUS\t %08x\n", regs_buff[1]); 364 sbuf_printf(sb, "\tCTRL_EXIT\t %08x\n\n", regs_buff[2]); 365 366 sbuf_printf(sb, "Interrupt Registers\n"); 367 sbuf_printf(sb, "\tICR\t %08x\n\n", regs_buff[3]); 368 369 sbuf_printf(sb, "RX Registers\n"); 370 sbuf_printf(sb, "\tRCTL\t %08x\n", regs_buff[4]); 371 sbuf_printf(sb, "\tRDLEN\t %08x\n", regs_buff[5]); 372 sbuf_printf(sb, "\tRDH\t %08x\n", regs_buff[6]); 373 sbuf_printf(sb, "\tRDT\t %08x\n", regs_buff[7]); 374 sbuf_printf(sb, "\tRXDCTL\t %08x\n", regs_buff[8]); 375 sbuf_printf(sb, "\tRDBAL\t %08x\n", regs_buff[9]); 376 sbuf_printf(sb, "\tRDBAH\t %08x\n\n", regs_buff[10]); 377 378 sbuf_printf(sb, "TX Registers\n"); 379 sbuf_printf(sb, "\tTCTL\t %08x\n", regs_buff[11]); 380 sbuf_printf(sb, "\tTDBAL\t %08x\n", regs_buff[12]); 381 sbuf_printf(sb, "\tTDBAH\t %08x\n", regs_buff[13]); 382 sbuf_printf(sb, "\tTDLEN\t %08x\n", regs_buff[14]); 383 sbuf_printf(sb, "\tTDH\t %08x\n", regs_buff[15]); 384 sbuf_printf(sb, "\tTDT\t %08x\n", regs_buff[16]); 385 sbuf_printf(sb, "\tTXDCTL\t %08x\n", regs_buff[17]); 386 sbuf_printf(sb, "\tTDFH\t %08x\n", regs_buff[18]); 387 sbuf_printf(sb, "\tTDFT\t %08x\n", regs_buff[19]); 388 sbuf_printf(sb, "\tTDFHS\t %08x\n", regs_buff[20]); 389 sbuf_printf(sb, "\tTDFPC\t %08x\n\n", regs_buff[21]); 390 391 free(regs_buff, M_DEVBUF); 392 393 #ifdef DUMP_DESCS 394 { 395 if_softc_ctx_t scctx = sc->shared; 396 struct rx_ring *rxr = &rx_que->rxr; 397 struct tx_ring *txr = &tx_que->txr; 398 int ntxd = scctx->isc_ntxd[0]; 399 int nrxd = scctx->isc_nrxd[0]; 400 int j; 401 402 for (j = 0; j < nrxd; j++) { 403 u32 staterr = le32toh(rxr->rx_base[j].wb.upper.status_error); 404 u32 length = le32toh(rxr->rx_base[j].wb.upper.length); 405 sbuf_printf(sb, "\tReceive Descriptor Address %d: %08" 406 PRIx64 " Error:%d Length:%d\n", 407 j, rxr->rx_base[j].read.buffer_addr, staterr, length); 408 } 409 410 for (j = 0; j < min(ntxd, 256); j++) { 411 unsigned int *ptr = (unsigned int *)&txr->tx_base[j]; 412 413 sbuf_printf(sb, "\tTXD[%03d] [0]: %08x [1]: %08x [2]: %08x" 414 "[3]: %08x eop: %d DD=%d\n", 415 j, ptr[0], ptr[1], ptr[2], ptr[3], buf->eop, 416 buf->eop != -1 ? 417 txr->tx_base[buf->eop].upper.fields.status & 418 IGC_TXD_STAT_DD : 0); 419 420 } 421 } 422 #endif 423 424 rc = sbuf_finish(sb); 425 sbuf_delete(sb); 426 return(rc); 427 } 428 429 static void * 430 igc_register(device_t dev) 431 { 432 return (&igc_sctx_init); 433 } 434 435 static int 436 igc_set_num_queues(if_ctx_t ctx) 437 { 438 int maxqueues; 439 440 maxqueues = 4; 441 442 return (maxqueues); 443 } 444 445 #define IGC_CAPS \ 446 IFCAP_HWCSUM | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | \ 447 IFCAP_VLAN_HWCSUM | IFCAP_WOL | IFCAP_TSO4 | IFCAP_LRO | \ 448 IFCAP_VLAN_HWTSO | IFCAP_JUMBO_MTU | IFCAP_HWCSUM_IPV6 | IFCAP_TSO6 449 450 /********************************************************************* 451 * Device initialization routine 452 * 453 * The attach entry point is called when the driver is being loaded. 454 * This routine identifies the type of hardware, allocates all resources 455 * and initializes the hardware. 456 * 457 * return 0 on success, positive on failure 458 *********************************************************************/ 459 static int 460 igc_if_attach_pre(if_ctx_t ctx) 461 { 462 struct igc_softc *sc; 463 if_softc_ctx_t scctx; 464 device_t dev; 465 struct igc_hw *hw; 466 int error = 0; 467 468 INIT_DEBUGOUT("igc_if_attach_pre: begin"); 469 dev = iflib_get_dev(ctx); 470 sc = iflib_get_softc(ctx); 471 472 sc->ctx = sc->osdep.ctx = ctx; 473 sc->dev = sc->osdep.dev = dev; 474 scctx = sc->shared = iflib_get_softc_ctx(ctx); 475 sc->media = iflib_get_media(ctx); 476 hw = &sc->hw; 477 478 /* SYSCTL stuff */ 479 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 480 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 481 OID_AUTO, "nvm", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 482 sc, 0, igc_sysctl_nvm_info, "I", "NVM Information"); 483 484 sc->enable_aim = igc_enable_aim; 485 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 486 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 487 OID_AUTO, "enable_aim", CTLFLAG_RW, 488 &sc->enable_aim, 0, 489 "Interrupt Moderation (1=normal, 2=lowlatency)"); 490 491 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 492 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 493 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, 494 sc, 0, igc_sysctl_print_fw_version, "A", 495 "Prints FW/NVM Versions"); 496 497 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 498 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 499 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 500 sc, 0, igc_sysctl_debug_info, "I", "Debug Information"); 501 502 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 503 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 504 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 505 sc, 0, igc_set_flowcntl, "I", "Flow Control"); 506 507 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 508 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 509 OID_AUTO, "reg_dump", 510 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 511 igc_get_regs, "A", "Dump Registers"); 512 513 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 514 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 515 OID_AUTO, "rs_dump", 516 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 517 igc_get_rs, "I", "Dump RS indexes"); 518 519 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 520 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 521 OID_AUTO, "dmac", 522 CTLTYPE_INT | CTLFLAG_RW, sc, 0, 523 igc_sysctl_dmac, "I", "DMA Coalesce"); 524 525 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 526 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 527 OID_AUTO, "tso_tcp_flags_mask_first_segment", 528 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 529 sc, 0, igc_sysctl_tso_tcp_flags_mask, "IU", 530 "TSO TCP flags mask for first segment"); 531 532 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 533 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 534 OID_AUTO, "tso_tcp_flags_mask_middle_segment", 535 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 536 sc, 1, igc_sysctl_tso_tcp_flags_mask, "IU", 537 "TSO TCP flags mask for middle segment"); 538 539 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 540 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 541 OID_AUTO, "tso_tcp_flags_mask_last_segment", 542 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 543 sc, 2, igc_sysctl_tso_tcp_flags_mask, "IU", 544 "TSO TCP flags mask for last segment"); 545 546 /* Determine hardware and mac info */ 547 igc_identify_hardware(ctx); 548 549 scctx->isc_tx_nsegments = IGC_MAX_SCATTER; 550 scctx->isc_nrxqsets_max = 551 scctx->isc_ntxqsets_max = igc_set_num_queues(ctx); 552 if (bootverbose) 553 device_printf(dev, "attach_pre capping queues at %d\n", 554 scctx->isc_ntxqsets_max); 555 556 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * 557 sizeof(union igc_adv_tx_desc), IGC_DBA_ALIGN); 558 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * 559 sizeof(union igc_adv_rx_desc), IGC_DBA_ALIGN); 560 scctx->isc_txd_size[0] = sizeof(union igc_adv_tx_desc); 561 scctx->isc_rxd_size[0] = sizeof(union igc_adv_rx_desc); 562 scctx->isc_txrx = &igc_txrx; 563 scctx->isc_tx_tso_segments_max = IGC_MAX_SCATTER; 564 scctx->isc_tx_tso_size_max = IGC_TSO_SIZE; 565 scctx->isc_tx_tso_segsize_max = IGC_TSO_SEG_SIZE; 566 scctx->isc_capabilities = scctx->isc_capenable = IGC_CAPS; 567 scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_TSO | 568 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_SCTP | CSUM_IP6_SCTP; 569 570 /* 571 ** Some new devices, as with ixgbe, now may 572 ** use a different BAR, so we need to keep 573 ** track of which is used. 574 */ 575 scctx->isc_msix_bar = PCIR_BAR(IGC_MSIX_BAR); 576 if (pci_read_config(dev, scctx->isc_msix_bar, 4) == 0) 577 scctx->isc_msix_bar += 4; 578 579 /* Setup PCI resources */ 580 if (igc_allocate_pci_resources(ctx)) { 581 device_printf(dev, "Allocation of PCI resources failed\n"); 582 error = ENXIO; 583 goto err_pci; 584 } 585 586 /* Do Shared Code initialization */ 587 error = igc_setup_init_funcs(hw, true); 588 if (error) { 589 device_printf(dev, "Setup of Shared code failed, error %d\n", 590 error); 591 error = ENXIO; 592 goto err_pci; 593 } 594 595 igc_setup_msix(ctx); 596 igc_get_bus_info(hw); 597 598 hw->mac.autoneg = DO_AUTO_NEG; 599 hw->phy.autoneg_wait_to_complete = false; 600 hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 601 602 /* Copper options */ 603 if (hw->phy.media_type == igc_media_type_copper) { 604 hw->phy.mdix = AUTO_ALL_MODES; 605 } 606 607 /* 608 * Set the frame limits assuming 609 * standard ethernet sized frames. 610 */ 611 scctx->isc_max_frame_size = sc->hw.mac.max_frame_size = 612 ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; 613 614 /* Allocate multicast array memory. */ 615 sc->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN * 616 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); 617 if (sc->mta == NULL) { 618 device_printf(dev, 619 "Can not allocate multicast setup array\n"); 620 error = ENOMEM; 621 goto err_late; 622 } 623 624 /* Check SOL/IDER usage */ 625 if (igc_check_reset_block(hw)) 626 device_printf(dev, "PHY reset is blocked" 627 " due to SOL/IDER session.\n"); 628 629 /* Sysctl for setting Energy Efficient Ethernet */ 630 sc->hw.dev_spec._i225.eee_disable = igc_eee_setting; 631 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 632 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 633 OID_AUTO, "eee_control", 634 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 635 sc, 0, igc_sysctl_eee, "I", 636 "Disable Energy Efficient Ethernet"); 637 638 /* 639 ** Start from a known state, this is 640 ** important in reading the nvm and 641 ** mac from that. 642 */ 643 igc_reset_hw(hw); 644 645 /* Make sure we have a good EEPROM before we read from it */ 646 if (igc_validate_nvm_checksum(hw) < 0) { 647 /* 648 ** Some PCI-E parts fail the first check due to 649 ** the link being in sleep state, call it again, 650 ** if it fails a second time its a real issue. 651 */ 652 if (igc_validate_nvm_checksum(hw) < 0) { 653 device_printf(dev, 654 "The EEPROM Checksum Is Not Valid\n"); 655 error = EIO; 656 goto err_late; 657 } 658 } 659 660 /* Copy the permanent MAC address out of the EEPROM */ 661 if (igc_read_mac_addr(hw) < 0) { 662 device_printf(dev, "EEPROM read error while reading MAC" 663 " address\n"); 664 error = EIO; 665 goto err_late; 666 } 667 668 if (!igc_is_valid_ether_addr(hw->mac.addr)) { 669 device_printf(dev, "Invalid MAC address\n"); 670 error = EIO; 671 goto err_late; 672 } 673 674 /* Save the EEPROM/NVM versions */ 675 igc_fw_version(sc); 676 677 igc_print_fw_version(sc); 678 679 /* 680 * Get Wake-on-Lan and Management info for later use 681 */ 682 igc_get_wakeup(ctx); 683 684 /* Enable only WOL MAGIC by default */ 685 scctx->isc_capenable &= ~IFCAP_WOL; 686 if (sc->wol != 0) 687 scctx->isc_capenable |= IFCAP_WOL_MAGIC; 688 689 iflib_set_mac(ctx, hw->mac.addr); 690 691 return (0); 692 693 err_late: 694 igc_release_hw_control(sc); 695 err_pci: 696 igc_free_pci_resources(ctx); 697 free(sc->mta, M_DEVBUF); 698 699 return (error); 700 } 701 702 static int 703 igc_if_attach_post(if_ctx_t ctx) 704 { 705 struct igc_softc *sc = iflib_get_softc(ctx); 706 struct igc_hw *hw = &sc->hw; 707 int error = 0; 708 709 /* Setup OS specific network interface */ 710 error = igc_setup_interface(ctx); 711 if (error != 0) { 712 goto err_late; 713 } 714 715 igc_reset(ctx); 716 717 /* Initialize statistics */ 718 igc_update_stats_counters(sc); 719 hw->mac.get_link_status = true; 720 igc_if_update_admin_status(ctx); 721 igc_add_hw_stats(sc); 722 723 /* the driver can now take control from firmware */ 724 igc_get_hw_control(sc); 725 726 INIT_DEBUGOUT("igc_if_attach_post: end"); 727 728 return (error); 729 730 err_late: 731 igc_release_hw_control(sc); 732 igc_free_pci_resources(ctx); 733 igc_if_queues_free(ctx); 734 free(sc->mta, M_DEVBUF); 735 736 return (error); 737 } 738 739 /********************************************************************* 740 * Device removal routine 741 * 742 * The detach entry point is called when the driver is being removed. 743 * This routine stops the adapter and deallocates all the resources 744 * that were allocated for driver operation. 745 * 746 * return 0 on success, positive on failure 747 *********************************************************************/ 748 static int 749 igc_if_detach(if_ctx_t ctx) 750 { 751 struct igc_softc *sc = iflib_get_softc(ctx); 752 753 INIT_DEBUGOUT("igc_if_detach: begin"); 754 755 igc_phy_hw_reset(&sc->hw); 756 757 igc_release_hw_control(sc); 758 igc_free_pci_resources(ctx); 759 760 return (0); 761 } 762 763 /********************************************************************* 764 * 765 * Shutdown entry point 766 * 767 **********************************************************************/ 768 769 static int 770 igc_if_shutdown(if_ctx_t ctx) 771 { 772 return igc_if_suspend(ctx); 773 } 774 775 /* 776 * Suspend/resume device methods. 777 */ 778 static int 779 igc_if_suspend(if_ctx_t ctx) 780 { 781 struct igc_softc *sc = iflib_get_softc(ctx); 782 783 igc_release_hw_control(sc); 784 igc_enable_wakeup(ctx); 785 return (0); 786 } 787 788 static int 789 igc_if_resume(if_ctx_t ctx) 790 { 791 igc_if_init(ctx); 792 793 return(0); 794 } 795 796 static int 797 igc_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 798 { 799 int max_frame_size; 800 struct igc_softc *sc = iflib_get_softc(ctx); 801 if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx); 802 803 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); 804 805 /* 9K Jumbo Frame size */ 806 max_frame_size = 9234; 807 808 if (mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) { 809 return (EINVAL); 810 } 811 812 scctx->isc_max_frame_size = sc->hw.mac.max_frame_size = 813 mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 814 return (0); 815 } 816 817 /********************************************************************* 818 * Init entry point 819 * 820 * This routine is used in two ways. It is used by the stack as 821 * init entry point in network interface structure. It is also used 822 * by the driver as a hw/sw initialization routine to get to a 823 * consistent state. 824 * 825 **********************************************************************/ 826 static void 827 igc_if_init(if_ctx_t ctx) 828 { 829 struct igc_softc *sc = iflib_get_softc(ctx); 830 if_softc_ctx_t scctx = sc->shared; 831 if_t ifp = iflib_get_ifp(ctx); 832 struct igc_tx_queue *tx_que; 833 int i; 834 835 INIT_DEBUGOUT("igc_if_init: begin"); 836 837 /* Get the latest mac address, User can use a LAA */ 838 bcopy(if_getlladdr(ifp), sc->hw.mac.addr, 839 ETHER_ADDR_LEN); 840 841 /* Put the address into the Receive Address Array */ 842 igc_rar_set(&sc->hw, sc->hw.mac.addr, 0); 843 844 /* Initialize the hardware */ 845 igc_reset(ctx); 846 igc_if_update_admin_status(ctx); 847 848 for (i = 0, tx_que = sc->tx_queues; i < sc->tx_num_queues; 849 i++, tx_que++) { 850 struct tx_ring *txr = &tx_que->txr; 851 852 txr->tx_rs_cidx = txr->tx_rs_pidx; 853 854 /* Initialize the last processed descriptor to be the end of 855 * the ring, rather than the start, so that we avoid an 856 * off-by-one error when calculating how many descriptors are 857 * done in the credits_update function. 858 */ 859 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 860 } 861 862 /* Setup VLAN support, basic and offload if available */ 863 IGC_WRITE_REG(&sc->hw, IGC_VET, ETHERTYPE_VLAN); 864 865 /* Prepare transmit descriptors and buffers */ 866 igc_initialize_transmit_unit(ctx); 867 868 /* Setup Multicast table */ 869 igc_if_multi_set(ctx); 870 871 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); 872 igc_initialize_receive_unit(ctx); 873 874 /* Set up VLAN support */ 875 igc_setup_vlan_hw_support(ctx); 876 877 /* Don't lose promiscuous settings */ 878 igc_if_set_promisc(ctx, if_getflags(ifp)); 879 igc_clear_hw_cntrs_base_generic(&sc->hw); 880 881 if (sc->intr_type == IFLIB_INTR_MSIX) /* Set up queue routing */ 882 igc_configure_queues(sc); 883 884 /* this clears any pending interrupts */ 885 IGC_READ_REG(&sc->hw, IGC_ICR); 886 IGC_WRITE_REG(&sc->hw, IGC_ICS, IGC_ICS_LSC); 887 888 /* the driver can now take control from firmware */ 889 igc_get_hw_control(sc); 890 891 /* Set Energy Efficient Ethernet */ 892 igc_set_eee_i225(&sc->hw, true, true, true); 893 } 894 895 enum eitr_latency_target { 896 eitr_latency_disabled = 0, 897 eitr_latency_lowest = 1, 898 eitr_latency_low = 2, 899 eitr_latency_bulk = 3 900 }; 901 /********************************************************************* 902 * 903 * Helper to calculate next EITR value for AIM 904 * 905 *********************************************************************/ 906 static void 907 igc_neweitr(struct igc_softc *sc, struct igc_rx_queue *que, 908 struct tx_ring *txr, struct rx_ring *rxr) 909 { 910 struct igc_hw *hw = &sc->hw; 911 unsigned long bytes, bytes_per_packet, packets; 912 unsigned long rxbytes, rxpackets, txbytes, txpackets; 913 u32 neweitr; 914 u8 nextlatency; 915 916 rxbytes = atomic_load_long(&rxr->rx_bytes); 917 txbytes = atomic_load_long(&txr->tx_bytes); 918 919 /* Idle, do nothing */ 920 if (txbytes == 0 && rxbytes == 0) 921 return; 922 923 neweitr = 0; 924 925 if (sc->enable_aim) { 926 nextlatency = rxr->rx_nextlatency; 927 928 /* Use half default (4K) ITR if sub-gig */ 929 if (sc->link_speed < 1000) { 930 neweitr = IGC_INTS_4K; 931 goto igc_set_next_eitr; 932 } 933 /* Want at least enough packet buffer for two frames to AIM */ 934 if (sc->shared->isc_max_frame_size * 2 > (sc->pba << 10)) { 935 neweitr = igc_max_interrupt_rate; 936 sc->enable_aim = 0; 937 goto igc_set_next_eitr; 938 } 939 940 bytes = bytes_per_packet = 0; 941 /* Get largest values from the associated tx and rx ring */ 942 txpackets = atomic_load_long(&txr->tx_packets); 943 if (txpackets != 0) { 944 bytes = txbytes; 945 bytes_per_packet = txbytes / txpackets; 946 packets = txpackets; 947 } 948 rxpackets = atomic_load_long(&rxr->rx_packets); 949 if (rxpackets != 0) { 950 bytes = lmax(bytes, rxbytes); 951 bytes_per_packet = 952 lmax(bytes_per_packet, rxbytes / rxpackets); 953 packets = lmax(packets, rxpackets); 954 } 955 956 /* Latency state machine */ 957 switch (nextlatency) { 958 case eitr_latency_disabled: /* Bootstrapping */ 959 nextlatency = eitr_latency_low; 960 break; 961 case eitr_latency_lowest: /* 70k ints/s */ 962 /* TSO and jumbo frames */ 963 if (bytes_per_packet > 8000) 964 nextlatency = eitr_latency_bulk; 965 else if ((packets < 5) && (bytes > 512)) 966 nextlatency = eitr_latency_low; 967 break; 968 case eitr_latency_low: /* 20k ints/s */ 969 if (bytes > 10000) { 970 /* Handle TSO */ 971 if (bytes_per_packet > 8000) 972 nextlatency = eitr_latency_bulk; 973 else if ((packets < 10) || 974 (bytes_per_packet > 1200)) 975 nextlatency = eitr_latency_bulk; 976 else if (packets > 35) 977 nextlatency = eitr_latency_lowest; 978 } else if (bytes_per_packet > 2000) { 979 nextlatency = eitr_latency_bulk; 980 } else if (packets < 3 && bytes < 512) { 981 nextlatency = eitr_latency_lowest; 982 } 983 break; 984 case eitr_latency_bulk: /* 4k ints/s */ 985 if (bytes > 25000) { 986 if (packets > 35) 987 nextlatency = eitr_latency_low; 988 } else if (bytes < 1500) 989 nextlatency = eitr_latency_low; 990 break; 991 default: 992 nextlatency = eitr_latency_low; 993 device_printf(sc->dev, 994 "Unexpected neweitr transition %d\n", 995 nextlatency); 996 break; 997 } 998 999 /* Trim itr_latency_lowest for default AIM setting */ 1000 if (sc->enable_aim == 1 && nextlatency == eitr_latency_lowest) 1001 nextlatency = eitr_latency_low; 1002 1003 /* Request new latency */ 1004 rxr->rx_nextlatency = nextlatency; 1005 } else { 1006 /* We may have toggled to AIM disabled */ 1007 nextlatency = eitr_latency_disabled; 1008 rxr->rx_nextlatency = nextlatency; 1009 } 1010 1011 /* ITR state machine */ 1012 switch(nextlatency) { 1013 case eitr_latency_lowest: 1014 neweitr = IGC_INTS_70K; 1015 break; 1016 case eitr_latency_low: 1017 neweitr = IGC_INTS_20K; 1018 break; 1019 case eitr_latency_bulk: 1020 neweitr = IGC_INTS_4K; 1021 break; 1022 case eitr_latency_disabled: 1023 default: 1024 neweitr = igc_max_interrupt_rate; 1025 break; 1026 } 1027 1028 igc_set_next_eitr: 1029 neweitr = IGC_INTS_TO_EITR(neweitr); 1030 1031 neweitr |= IGC_EITR_CNT_IGNR; 1032 1033 if (neweitr != que->eitr_setting) { 1034 que->eitr_setting = neweitr; 1035 IGC_WRITE_REG(hw, IGC_EITR(que->msix), que->eitr_setting); 1036 } 1037 } 1038 1039 /********************************************************************* 1040 * 1041 * Fast Legacy/MSI Combined Interrupt Service routine 1042 * 1043 *********************************************************************/ 1044 int 1045 igc_intr(void *arg) 1046 { 1047 struct igc_softc *sc = arg; 1048 struct igc_hw *hw = &sc->hw; 1049 struct igc_rx_queue *que = &sc->rx_queues[0]; 1050 struct tx_ring *txr = &sc->tx_queues[0].txr; 1051 struct rx_ring *rxr = &que->rxr; 1052 if_ctx_t ctx = sc->ctx; 1053 u32 reg_icr; 1054 1055 reg_icr = IGC_READ_REG(hw, IGC_ICR); 1056 1057 /* Hot eject? */ 1058 if (reg_icr == 0xffffffff) 1059 return FILTER_STRAY; 1060 1061 /* Definitely not our interrupt. */ 1062 if (reg_icr == 0x0) 1063 return FILTER_STRAY; 1064 1065 if ((reg_icr & IGC_ICR_INT_ASSERTED) == 0) 1066 return FILTER_STRAY; 1067 1068 /* 1069 * Only MSI-X interrupts have one-shot behavior by taking advantage 1070 * of the EIAC register. Thus, explicitly disable interrupts. This 1071 * also works around the MSI message reordering errata on certain 1072 * systems. 1073 */ 1074 IFDI_INTR_DISABLE(ctx); 1075 1076 /* Link status change */ 1077 if (reg_icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) 1078 igc_handle_link(ctx); 1079 1080 if (reg_icr & IGC_ICR_RXO) 1081 sc->rx_overruns++; 1082 1083 igc_neweitr(sc, que, txr, rxr); 1084 1085 /* Reset state */ 1086 txr->tx_bytes = 0; 1087 txr->tx_packets = 0; 1088 rxr->rx_bytes = 0; 1089 rxr->rx_packets = 0; 1090 1091 return (FILTER_SCHEDULE_THREAD); 1092 } 1093 1094 static int 1095 igc_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 1096 { 1097 struct igc_softc *sc = iflib_get_softc(ctx); 1098 struct igc_rx_queue *rxq = &sc->rx_queues[rxqid]; 1099 1100 IGC_WRITE_REG(&sc->hw, IGC_EIMS, rxq->eims); 1101 return (0); 1102 } 1103 1104 static int 1105 igc_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) 1106 { 1107 struct igc_softc *sc = iflib_get_softc(ctx); 1108 struct igc_tx_queue *txq = &sc->tx_queues[txqid]; 1109 1110 IGC_WRITE_REG(&sc->hw, IGC_EIMS, txq->eims); 1111 return (0); 1112 } 1113 1114 /********************************************************************* 1115 * 1116 * MSI-X RX Interrupt Service routine 1117 * 1118 **********************************************************************/ 1119 static int 1120 igc_msix_que(void *arg) 1121 { 1122 struct igc_rx_queue *que = arg; 1123 struct igc_softc *sc = que->sc; 1124 struct tx_ring *txr = &sc->tx_queues[que->msix].txr; 1125 struct rx_ring *rxr = &que->rxr; 1126 1127 ++que->irqs; 1128 1129 igc_neweitr(sc, que, txr, rxr); 1130 1131 /* Reset state */ 1132 txr->tx_bytes = 0; 1133 txr->tx_packets = 0; 1134 rxr->rx_bytes = 0; 1135 rxr->rx_packets = 0; 1136 1137 return (FILTER_SCHEDULE_THREAD); 1138 } 1139 1140 /********************************************************************* 1141 * 1142 * MSI-X Link Fast Interrupt Service routine 1143 * 1144 **********************************************************************/ 1145 static int 1146 igc_msix_link(void *arg) 1147 { 1148 struct igc_softc *sc = arg; 1149 u32 reg_icr; 1150 1151 ++sc->link_irq; 1152 MPASS(sc->hw.back != NULL); 1153 reg_icr = IGC_READ_REG(&sc->hw, IGC_ICR); 1154 1155 if (reg_icr & IGC_ICR_RXO) 1156 sc->rx_overruns++; 1157 1158 if (reg_icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 1159 igc_handle_link(sc->ctx); 1160 } 1161 1162 IGC_WRITE_REG(&sc->hw, IGC_IMS, IGC_IMS_LSC); 1163 IGC_WRITE_REG(&sc->hw, IGC_EIMS, sc->link_mask); 1164 1165 return (FILTER_HANDLED); 1166 } 1167 1168 static void 1169 igc_handle_link(void *context) 1170 { 1171 if_ctx_t ctx = context; 1172 struct igc_softc *sc = iflib_get_softc(ctx); 1173 1174 sc->hw.mac.get_link_status = true; 1175 iflib_admin_intr_deferred(ctx); 1176 } 1177 1178 /********************************************************************* 1179 * 1180 * Media Ioctl callback 1181 * 1182 * This routine is called whenever the user queries the status of 1183 * the interface using ifconfig. 1184 * 1185 **********************************************************************/ 1186 static void 1187 igc_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) 1188 { 1189 struct igc_softc *sc = iflib_get_softc(ctx); 1190 1191 INIT_DEBUGOUT("igc_if_media_status: begin"); 1192 1193 iflib_admin_intr_deferred(ctx); 1194 1195 ifmr->ifm_status = IFM_AVALID; 1196 ifmr->ifm_active = IFM_ETHER; 1197 1198 if (!sc->link_active) { 1199 return; 1200 } 1201 1202 ifmr->ifm_status |= IFM_ACTIVE; 1203 1204 switch (sc->link_speed) { 1205 case 10: 1206 ifmr->ifm_active |= IFM_10_T; 1207 break; 1208 case 100: 1209 ifmr->ifm_active |= IFM_100_TX; 1210 break; 1211 case 1000: 1212 ifmr->ifm_active |= IFM_1000_T; 1213 break; 1214 case 2500: 1215 ifmr->ifm_active |= IFM_2500_T; 1216 break; 1217 } 1218 1219 if (sc->link_duplex == FULL_DUPLEX) 1220 ifmr->ifm_active |= IFM_FDX; 1221 else 1222 ifmr->ifm_active |= IFM_HDX; 1223 } 1224 1225 /********************************************************************* 1226 * 1227 * Media Ioctl callback 1228 * 1229 * This routine is called when the user changes speed/duplex using 1230 * media/mediopt option with ifconfig. 1231 * 1232 **********************************************************************/ 1233 static int 1234 igc_if_media_change(if_ctx_t ctx) 1235 { 1236 struct igc_softc *sc = iflib_get_softc(ctx); 1237 struct ifmedia *ifm = iflib_get_media(ctx); 1238 1239 INIT_DEBUGOUT("igc_if_media_change: begin"); 1240 1241 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1242 return (EINVAL); 1243 1244 sc->hw.mac.autoneg = DO_AUTO_NEG; 1245 1246 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1247 case IFM_AUTO: 1248 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1249 break; 1250 case IFM_2500_T: 1251 sc->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; 1252 break; 1253 case IFM_1000_T: 1254 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1255 break; 1256 case IFM_100_TX: 1257 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1258 sc->hw.phy.autoneg_advertised = ADVERTISE_100_FULL; 1259 else 1260 sc->hw.phy.autoneg_advertised = ADVERTISE_100_HALF; 1261 break; 1262 case IFM_10_T: 1263 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1264 sc->hw.phy.autoneg_advertised = ADVERTISE_10_FULL; 1265 else 1266 sc->hw.phy.autoneg_advertised = ADVERTISE_10_HALF; 1267 break; 1268 default: 1269 device_printf(sc->dev, "Unsupported media type\n"); 1270 } 1271 1272 igc_if_init(ctx); 1273 1274 return (0); 1275 } 1276 1277 static int 1278 igc_if_set_promisc(if_ctx_t ctx, int flags) 1279 { 1280 struct igc_softc *sc = iflib_get_softc(ctx); 1281 if_t ifp = iflib_get_ifp(ctx); 1282 u32 reg_rctl; 1283 int mcnt = 0; 1284 1285 reg_rctl = IGC_READ_REG(&sc->hw, IGC_RCTL); 1286 reg_rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_UPE); 1287 if (flags & IFF_ALLMULTI) 1288 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 1289 else 1290 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES); 1291 1292 /* Don't disable if in MAX groups */ 1293 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1294 reg_rctl &= (~IGC_RCTL_MPE); 1295 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl); 1296 1297 if (flags & IFF_PROMISC) { 1298 reg_rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE); 1299 /* Turn this on if you want to see bad packets */ 1300 if (igc_debug_sbp) 1301 reg_rctl |= IGC_RCTL_SBP; 1302 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl); 1303 } else if (flags & IFF_ALLMULTI) { 1304 reg_rctl |= IGC_RCTL_MPE; 1305 reg_rctl &= ~IGC_RCTL_UPE; 1306 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl); 1307 } 1308 return (0); 1309 } 1310 1311 static u_int 1312 igc_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int idx) 1313 { 1314 u8 *mta = arg; 1315 1316 if (idx == MAX_NUM_MULTICAST_ADDRESSES) 1317 return (0); 1318 1319 bcopy(LLADDR(sdl), &mta[idx * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1320 1321 return (1); 1322 } 1323 1324 /********************************************************************* 1325 * Multicast Update 1326 * 1327 * This routine is called whenever multicast address list is updated. 1328 * 1329 **********************************************************************/ 1330 1331 static void 1332 igc_if_multi_set(if_ctx_t ctx) 1333 { 1334 struct igc_softc *sc = iflib_get_softc(ctx); 1335 if_t ifp = iflib_get_ifp(ctx); 1336 u8 *mta; /* Multicast array memory */ 1337 u32 reg_rctl = 0; 1338 int mcnt = 0; 1339 1340 IOCTL_DEBUGOUT("igc_set_multi: begin"); 1341 1342 mta = sc->mta; 1343 bzero(mta, sizeof(u8) * ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 1344 1345 mcnt = if_foreach_llmaddr(ifp, igc_copy_maddr, mta); 1346 1347 reg_rctl = IGC_READ_REG(&sc->hw, IGC_RCTL); 1348 1349 if (if_getflags(ifp) & IFF_PROMISC) { 1350 reg_rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE); 1351 /* Turn this on if you want to see bad packets */ 1352 if (igc_debug_sbp) 1353 reg_rctl |= IGC_RCTL_SBP; 1354 } else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 1355 if_getflags(ifp) & IFF_ALLMULTI) { 1356 reg_rctl |= IGC_RCTL_MPE; 1357 reg_rctl &= ~IGC_RCTL_UPE; 1358 } else 1359 reg_rctl &= ~(IGC_RCTL_UPE | IGC_RCTL_MPE); 1360 1361 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1362 igc_update_mc_addr_list(&sc->hw, mta, mcnt); 1363 1364 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl); 1365 } 1366 1367 /********************************************************************* 1368 * Timer routine 1369 * 1370 * This routine schedules igc_if_update_admin_status() to check for 1371 * link status and to gather statistics as well as to perform some 1372 * controller-specific hardware patting. 1373 * 1374 **********************************************************************/ 1375 static void 1376 igc_if_timer(if_ctx_t ctx, uint16_t qid) 1377 { 1378 1379 if (qid != 0) 1380 return; 1381 1382 iflib_admin_intr_deferred(ctx); 1383 } 1384 1385 static void 1386 igc_if_update_admin_status(if_ctx_t ctx) 1387 { 1388 struct igc_softc *sc = iflib_get_softc(ctx); 1389 struct igc_hw *hw = &sc->hw; 1390 device_t dev = iflib_get_dev(ctx); 1391 u32 link_check, thstat, ctrl; 1392 1393 link_check = thstat = ctrl = 0; 1394 /* Get the cached link value or read phy for real */ 1395 switch (hw->phy.media_type) { 1396 case igc_media_type_copper: 1397 if (hw->mac.get_link_status == true) { 1398 /* Do the work to read phy */ 1399 igc_check_for_link(hw); 1400 link_check = !hw->mac.get_link_status; 1401 } else 1402 link_check = true; 1403 break; 1404 case igc_media_type_unknown: 1405 igc_check_for_link(hw); 1406 link_check = !hw->mac.get_link_status; 1407 /* FALLTHROUGH */ 1408 default: 1409 break; 1410 } 1411 1412 /* Now check for a transition */ 1413 if (link_check && (sc->link_active == 0)) { 1414 igc_get_speed_and_duplex(hw, &sc->link_speed, 1415 &sc->link_duplex); 1416 if (bootverbose) 1417 device_printf(dev, "Link is up %d Mbps %s\n", 1418 sc->link_speed, 1419 ((sc->link_duplex == FULL_DUPLEX) ? 1420 "Full Duplex" : "Half Duplex")); 1421 sc->link_active = 1; 1422 iflib_link_state_change(ctx, LINK_STATE_UP, 1423 IF_Mbps(sc->link_speed)); 1424 } else if (!link_check && (sc->link_active == 1)) { 1425 sc->link_speed = 0; 1426 sc->link_duplex = 0; 1427 sc->link_active = 0; 1428 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 1429 } 1430 igc_update_stats_counters(sc); 1431 } 1432 1433 static void 1434 igc_if_watchdog_reset(if_ctx_t ctx) 1435 { 1436 struct igc_softc *sc = iflib_get_softc(ctx); 1437 1438 /* 1439 * Just count the event; iflib(4) will already trigger a 1440 * sufficient reset of the controller. 1441 */ 1442 sc->watchdog_events++; 1443 } 1444 1445 /********************************************************************* 1446 * 1447 * This routine disables all traffic on the adapter by issuing a 1448 * global reset on the MAC. 1449 * 1450 **********************************************************************/ 1451 static void 1452 igc_if_stop(if_ctx_t ctx) 1453 { 1454 struct igc_softc *sc = iflib_get_softc(ctx); 1455 1456 INIT_DEBUGOUT("igc_if_stop: begin"); 1457 1458 igc_reset_hw(&sc->hw); 1459 IGC_WRITE_REG(&sc->hw, IGC_WUC, 0); 1460 } 1461 1462 /********************************************************************* 1463 * 1464 * Determine hardware revision. 1465 * 1466 **********************************************************************/ 1467 static void 1468 igc_identify_hardware(if_ctx_t ctx) 1469 { 1470 device_t dev = iflib_get_dev(ctx); 1471 struct igc_softc *sc = iflib_get_softc(ctx); 1472 1473 /* Make sure our PCI config space has the necessary stuff set */ 1474 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 1475 1476 /* Save off the information about this board */ 1477 sc->hw.vendor_id = pci_get_vendor(dev); 1478 sc->hw.device_id = pci_get_device(dev); 1479 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 1480 sc->hw.subsystem_vendor_id = 1481 pci_read_config(dev, PCIR_SUBVEND_0, 2); 1482 sc->hw.subsystem_device_id = 1483 pci_read_config(dev, PCIR_SUBDEV_0, 2); 1484 1485 /* Do Shared Code Init and Setup */ 1486 if (igc_set_mac_type(&sc->hw)) { 1487 device_printf(dev, "Setup init failure\n"); 1488 return; 1489 } 1490 } 1491 1492 static int 1493 igc_allocate_pci_resources(if_ctx_t ctx) 1494 { 1495 struct igc_softc *sc = iflib_get_softc(ctx); 1496 device_t dev = iflib_get_dev(ctx); 1497 int rid; 1498 1499 rid = PCIR_BAR(0); 1500 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1501 &rid, RF_ACTIVE); 1502 if (sc->memory == NULL) { 1503 device_printf(dev, 1504 "Unable to allocate bus resource: memory\n"); 1505 return (ENXIO); 1506 } 1507 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 1508 sc->osdep.mem_bus_space_handle = 1509 rman_get_bushandle(sc->memory); 1510 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle; 1511 1512 sc->hw.back = &sc->osdep; 1513 1514 return (0); 1515 } 1516 1517 /********************************************************************* 1518 * 1519 * Set up the MSI-X Interrupt handlers 1520 * 1521 **********************************************************************/ 1522 static int 1523 igc_if_msix_intr_assign(if_ctx_t ctx, int msix) 1524 { 1525 struct igc_softc *sc = iflib_get_softc(ctx); 1526 struct igc_rx_queue *rx_que = sc->rx_queues; 1527 struct igc_tx_queue *tx_que = sc->tx_queues; 1528 int error, rid, i, vector = 0, rx_vectors; 1529 char buf[16]; 1530 1531 /* First set up ring resources */ 1532 for (i = 0; i < sc->rx_num_queues; i++, rx_que++, vector++) { 1533 rid = vector + 1; 1534 snprintf(buf, sizeof(buf), "rxq%d", i); 1535 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 1536 IFLIB_INTR_RXTX, igc_msix_que, rx_que, rx_que->me, buf); 1537 if (error) { 1538 device_printf(iflib_get_dev(ctx), 1539 "Failed to allocate que int %d err: %d", 1540 i, error); 1541 sc->rx_num_queues = i + 1; 1542 goto fail; 1543 } 1544 1545 rx_que->msix = vector; 1546 1547 /* 1548 * Set the bit to enable interrupt 1549 * in IGC_IMS -- bits 20 and 21 1550 * are for RX0 and RX1, note this has 1551 * NOTHING to do with the MSI-X vector 1552 */ 1553 rx_que->eims = 1 << vector; 1554 } 1555 rx_vectors = vector; 1556 1557 vector = 0; 1558 for (i = 0; i < sc->tx_num_queues; i++, tx_que++, vector++) { 1559 snprintf(buf, sizeof(buf), "txq%d", i); 1560 tx_que = &sc->tx_queues[i]; 1561 iflib_softirq_alloc_generic(ctx, 1562 &sc->rx_queues[i % sc->rx_num_queues].que_irq, 1563 IFLIB_INTR_TX, tx_que, tx_que->me, buf); 1564 1565 tx_que->msix = (vector % sc->rx_num_queues); 1566 1567 /* 1568 * Set the bit to enable interrupt 1569 * in IGC_IMS -- bits 22 and 23 1570 * are for TX0 and TX1, note this has 1571 * NOTHING to do with the MSI-X vector 1572 */ 1573 tx_que->eims = 1 << i; 1574 } 1575 1576 /* Link interrupt */ 1577 rid = rx_vectors + 1; 1578 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, IFLIB_INTR_ADMIN, 1579 igc_msix_link, sc, 0, "aq"); 1580 1581 if (error) { 1582 device_printf(iflib_get_dev(ctx), 1583 "Failed to register admin handler"); 1584 goto fail; 1585 } 1586 sc->linkvec = rx_vectors; 1587 return (0); 1588 fail: 1589 iflib_irq_free(ctx, &sc->irq); 1590 rx_que = sc->rx_queues; 1591 for (int i = 0; i < sc->rx_num_queues; i++, rx_que++) 1592 iflib_irq_free(ctx, &rx_que->que_irq); 1593 return (error); 1594 } 1595 1596 static void 1597 igc_configure_queues(struct igc_softc *sc) 1598 { 1599 struct igc_hw *hw = &sc->hw; 1600 struct igc_rx_queue *rx_que; 1601 struct igc_tx_queue *tx_que; 1602 u32 ivar = 0, newitr = 0; 1603 1604 /* First turn on RSS capability */ 1605 IGC_WRITE_REG(hw, IGC_GPIE, 1606 IGC_GPIE_MSIX_MODE | IGC_GPIE_EIAME | IGC_GPIE_PBA | 1607 IGC_GPIE_NSICR); 1608 1609 /* Turn on MSI-X */ 1610 /* RX entries */ 1611 for (int i = 0; i < sc->rx_num_queues; i++) { 1612 u32 index = i >> 1; 1613 ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index); 1614 rx_que = &sc->rx_queues[i]; 1615 if (i & 1) { 1616 ivar &= 0xFF00FFFF; 1617 ivar |= (rx_que->msix | IGC_IVAR_VALID) << 16; 1618 } else { 1619 ivar &= 0xFFFFFF00; 1620 ivar |= rx_que->msix | IGC_IVAR_VALID; 1621 } 1622 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar); 1623 } 1624 /* TX entries */ 1625 for (int i = 0; i < sc->tx_num_queues; i++) { 1626 u32 index = i >> 1; 1627 ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index); 1628 tx_que = &sc->tx_queues[i]; 1629 if (i & 1) { 1630 ivar &= 0x00FFFFFF; 1631 ivar |= (tx_que->msix | IGC_IVAR_VALID) << 24; 1632 } else { 1633 ivar &= 0xFFFF00FF; 1634 ivar |= (tx_que->msix | IGC_IVAR_VALID) << 8; 1635 } 1636 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar); 1637 sc->que_mask |= tx_que->eims; 1638 } 1639 1640 /* And for the link interrupt */ 1641 ivar = (sc->linkvec | IGC_IVAR_VALID) << 8; 1642 sc->link_mask = 1 << sc->linkvec; 1643 IGC_WRITE_REG(hw, IGC_IVAR_MISC, ivar); 1644 1645 /* Set the starting interrupt rate */ 1646 if (igc_max_interrupt_rate > 0) 1647 newitr = IGC_INTS_TO_EITR(igc_max_interrupt_rate); 1648 1649 newitr |= IGC_EITR_CNT_IGNR; 1650 1651 for (int i = 0; i < sc->rx_num_queues; i++) { 1652 rx_que = &sc->rx_queues[i]; 1653 IGC_WRITE_REG(hw, IGC_EITR(rx_que->msix), newitr); 1654 } 1655 1656 return; 1657 } 1658 1659 static void 1660 igc_free_pci_resources(if_ctx_t ctx) 1661 { 1662 struct igc_softc *sc = iflib_get_softc(ctx); 1663 struct igc_rx_queue *que = sc->rx_queues; 1664 device_t dev = iflib_get_dev(ctx); 1665 1666 /* Release all MSI-X queue resources */ 1667 if (sc->intr_type == IFLIB_INTR_MSIX) 1668 iflib_irq_free(ctx, &sc->irq); 1669 1670 for (int i = 0; i < sc->rx_num_queues; i++, que++) { 1671 iflib_irq_free(ctx, &que->que_irq); 1672 } 1673 1674 if (sc->memory != NULL) { 1675 bus_release_resource(dev, SYS_RES_MEMORY, 1676 rman_get_rid(sc->memory), sc->memory); 1677 sc->memory = NULL; 1678 } 1679 1680 if (sc->flash != NULL) { 1681 bus_release_resource(dev, SYS_RES_MEMORY, 1682 rman_get_rid(sc->flash), sc->flash); 1683 sc->flash = NULL; 1684 } 1685 1686 if (sc->ioport != NULL) { 1687 bus_release_resource(dev, SYS_RES_IOPORT, 1688 rman_get_rid(sc->ioport), sc->ioport); 1689 sc->ioport = NULL; 1690 } 1691 } 1692 1693 /* Set up MSI or MSI-X */ 1694 static int 1695 igc_setup_msix(if_ctx_t ctx) 1696 { 1697 return (0); 1698 } 1699 1700 /********************************************************************* 1701 * 1702 * Initialize the DMA Coalescing feature 1703 * 1704 **********************************************************************/ 1705 static void 1706 igc_init_dmac(struct igc_softc *sc, u32 pba) 1707 { 1708 device_t dev = sc->dev; 1709 struct igc_hw *hw = &sc->hw; 1710 u32 dmac, reg = ~IGC_DMACR_DMAC_EN; 1711 u16 hwm; 1712 u16 max_frame_size; 1713 int status; 1714 1715 max_frame_size = sc->shared->isc_max_frame_size; 1716 1717 if (sc->dmac == 0) { /* Disabling it */ 1718 IGC_WRITE_REG(hw, IGC_DMACR, reg); 1719 return; 1720 } else 1721 device_printf(dev, "DMA Coalescing enabled\n"); 1722 1723 /* Set starting threshold */ 1724 IGC_WRITE_REG(hw, IGC_DMCTXTH, 0); 1725 1726 hwm = 64 * pba - max_frame_size / 16; 1727 if (hwm < 64 * (pba - 6)) 1728 hwm = 64 * (pba - 6); 1729 reg = IGC_READ_REG(hw, IGC_FCRTC); 1730 reg &= ~IGC_FCRTC_RTH_COAL_MASK; 1731 reg |= ((hwm << IGC_FCRTC_RTH_COAL_SHIFT) 1732 & IGC_FCRTC_RTH_COAL_MASK); 1733 IGC_WRITE_REG(hw, IGC_FCRTC, reg); 1734 1735 dmac = pba - max_frame_size / 512; 1736 if (dmac < pba - 10) 1737 dmac = pba - 10; 1738 reg = IGC_READ_REG(hw, IGC_DMACR); 1739 reg &= ~IGC_DMACR_DMACTHR_MASK; 1740 reg |= ((dmac << IGC_DMACR_DMACTHR_SHIFT) 1741 & IGC_DMACR_DMACTHR_MASK); 1742 1743 /* transition to L0x or L1 if available..*/ 1744 reg |= (IGC_DMACR_DMAC_EN | IGC_DMACR_DMAC_LX_MASK); 1745 1746 /* Check if status is 2.5Gb backplane connection 1747 * before configuration of watchdog timer, which is 1748 * in msec values in 12.8usec intervals 1749 * watchdog timer= msec values in 32usec intervals 1750 * for non 2.5Gb connection 1751 */ 1752 status = IGC_READ_REG(hw, IGC_STATUS); 1753 if ((status & IGC_STATUS_2P5_SKU) && 1754 (!(status & IGC_STATUS_2P5_SKU_OVER))) 1755 reg |= ((sc->dmac * 5) >> 6); 1756 else 1757 reg |= (sc->dmac >> 5); 1758 1759 IGC_WRITE_REG(hw, IGC_DMACR, reg); 1760 1761 IGC_WRITE_REG(hw, IGC_DMCRTRH, 0); 1762 1763 /* Set the interval before transition */ 1764 reg = IGC_READ_REG(hw, IGC_DMCTLX); 1765 reg |= IGC_DMCTLX_DCFLUSH_DIS; 1766 1767 /* 1768 ** in 2.5Gb connection, TTLX unit is 0.4 usec 1769 ** which is 0x4*2 = 0xA. But delay is still 4 usec 1770 */ 1771 status = IGC_READ_REG(hw, IGC_STATUS); 1772 if ((status & IGC_STATUS_2P5_SKU) && 1773 (!(status & IGC_STATUS_2P5_SKU_OVER))) 1774 reg |= 0xA; 1775 else 1776 reg |= 0x4; 1777 1778 IGC_WRITE_REG(hw, IGC_DMCTLX, reg); 1779 1780 /* free space in tx packet buffer to wake from DMA coal */ 1781 IGC_WRITE_REG(hw, IGC_DMCTXTH, (IGC_TXPBSIZE - 1782 (2 * max_frame_size)) >> 6); 1783 1784 /* make low power state decision controlled by DMA coal */ 1785 reg = IGC_READ_REG(hw, IGC_PCIEMISC); 1786 reg &= ~IGC_PCIEMISC_LX_DECISION; 1787 IGC_WRITE_REG(hw, IGC_PCIEMISC, reg); 1788 } 1789 1790 /********************************************************************* 1791 * 1792 * Initialize the hardware to a configuration as specified by the 1793 * softc structure. 1794 * 1795 **********************************************************************/ 1796 static void 1797 igc_reset(if_ctx_t ctx) 1798 { 1799 device_t dev = iflib_get_dev(ctx); 1800 struct igc_softc *sc = iflib_get_softc(ctx); 1801 struct igc_hw *hw = &sc->hw; 1802 u32 rx_buffer_size; 1803 u32 pba; 1804 1805 INIT_DEBUGOUT("igc_reset: begin"); 1806 /* Let the firmware know the OS is in control */ 1807 igc_get_hw_control(sc); 1808 1809 /* 1810 * Packet Buffer Allocation (PBA) 1811 * Writing PBA sets the receive portion of the buffer 1812 * the remainder is used for the transmit buffer. 1813 */ 1814 pba = IGC_PBA_34K; 1815 1816 INIT_DEBUGOUT1("igc_reset: pba=%dK",pba); 1817 1818 /* 1819 * These parameters control the automatic generation (Tx) and 1820 * response (Rx) to Ethernet PAUSE frames. 1821 * - High water mark should allow for at least two frames to be 1822 * received after sending an XOFF. 1823 * - Low water mark works best when it is very near the high water 1824 * mark. 1825 * This allows the receiver to restart by sending XON when it has 1826 * drained a bit. Here we use an arbitrary value of 1500 which will 1827 * restart after one full frame is pulled from the buffer. There 1828 * could be several smaller frames in the buffer and if so they will 1829 * not trigger the XON until their total number reduces the buffer 1830 * by 1500. 1831 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 1832 */ 1833 rx_buffer_size = (pba & 0xffff) << 10; 1834 hw->fc.high_water = rx_buffer_size - 1835 roundup2(sc->hw.mac.max_frame_size, 1024); 1836 /* 16-byte granularity */ 1837 hw->fc.low_water = hw->fc.high_water - 16; 1838 1839 if (sc->fc) /* locally set flow control value? */ 1840 hw->fc.requested_mode = sc->fc; 1841 else 1842 hw->fc.requested_mode = igc_fc_full; 1843 1844 hw->fc.pause_time = IGC_FC_PAUSE_TIME; 1845 1846 hw->fc.send_xon = true; 1847 1848 /* Issue a global reset */ 1849 igc_reset_hw(hw); 1850 IGC_WRITE_REG(hw, IGC_WUC, 0); 1851 1852 /* and a re-init */ 1853 if (igc_init_hw(hw) < 0) { 1854 device_printf(dev, "Hardware Initialization Failed\n"); 1855 return; 1856 } 1857 1858 /* Setup DMA Coalescing */ 1859 igc_init_dmac(sc, pba); 1860 1861 /* Save the final PBA off if it needs to be used elsewhere i.e. AIM */ 1862 sc->pba = pba; 1863 1864 IGC_WRITE_REG(hw, IGC_VET, ETHERTYPE_VLAN); 1865 igc_get_phy_info(hw); 1866 igc_check_for_link(hw); 1867 } 1868 1869 /* 1870 * Initialise the RSS mapping for NICs that support multiple transmit/ 1871 * receive rings. 1872 */ 1873 1874 #define RSSKEYLEN 10 1875 static void 1876 igc_initialize_rss_mapping(struct igc_softc *sc) 1877 { 1878 struct igc_hw *hw = &sc->hw; 1879 int i; 1880 int queue_id; 1881 u32 reta; 1882 u32 rss_key[RSSKEYLEN], mrqc, shift = 0; 1883 1884 /* 1885 * The redirection table controls which destination 1886 * queue each bucket redirects traffic to. 1887 * Each DWORD represents four queues, with the LSB 1888 * being the first queue in the DWORD. 1889 * 1890 * This just allocates buckets to queues using round-robin 1891 * allocation. 1892 * 1893 * NOTE: It Just Happens to line up with the default 1894 * RSS allocation method. 1895 */ 1896 1897 /* Warning FM follows */ 1898 reta = 0; 1899 for (i = 0; i < 128; i++) { 1900 #ifdef RSS 1901 queue_id = rss_get_indirection_to_bucket(i); 1902 /* 1903 * If we have more queues than buckets, we'll 1904 * end up mapping buckets to a subset of the 1905 * queues. 1906 * 1907 * If we have more buckets than queues, we'll 1908 * end up instead assigning multiple buckets 1909 * to queues. 1910 * 1911 * Both are suboptimal, but we need to handle 1912 * the case so we don't go out of bounds 1913 * indexing arrays and such. 1914 */ 1915 queue_id = queue_id % sc->rx_num_queues; 1916 #else 1917 queue_id = (i % sc->rx_num_queues); 1918 #endif 1919 /* Adjust if required */ 1920 queue_id = queue_id << shift; 1921 1922 /* 1923 * The low 8 bits are for hash value (n+0); 1924 * The next 8 bits are for hash value (n+1), etc. 1925 */ 1926 reta = reta >> 8; 1927 reta = reta | ( ((uint32_t) queue_id) << 24); 1928 if ((i & 3) == 3) { 1929 IGC_WRITE_REG(hw, IGC_RETA(i >> 2), reta); 1930 reta = 0; 1931 } 1932 } 1933 1934 /* Now fill in hash table */ 1935 1936 /* 1937 * MRQC: Multiple Receive Queues Command 1938 * Set queuing to RSS control, number depends on the device. 1939 */ 1940 mrqc = IGC_MRQC_ENABLE_RSS_4Q; 1941 1942 /* XXX ew typecasting */ 1943 rss_getkey((uint8_t *) &rss_key); 1944 for (i = 0; i < RSSKEYLEN; i++) 1945 IGC_WRITE_REG_ARRAY(hw, IGC_RSSRK(0), i, rss_key[i]); 1946 1947 /* 1948 * Configure the RSS fields to hash upon. 1949 */ 1950 mrqc |= (IGC_MRQC_RSS_FIELD_IPV4 | 1951 IGC_MRQC_RSS_FIELD_IPV4_TCP); 1952 mrqc |= (IGC_MRQC_RSS_FIELD_IPV6 | 1953 IGC_MRQC_RSS_FIELD_IPV6_TCP); 1954 mrqc |=( IGC_MRQC_RSS_FIELD_IPV4_UDP | 1955 IGC_MRQC_RSS_FIELD_IPV6_UDP); 1956 mrqc |=( IGC_MRQC_RSS_FIELD_IPV6_UDP_EX | 1957 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX); 1958 1959 IGC_WRITE_REG(hw, IGC_MRQC, mrqc); 1960 } 1961 1962 /********************************************************************* 1963 * 1964 * Setup networking device structure and register interface media. 1965 * 1966 **********************************************************************/ 1967 static int 1968 igc_setup_interface(if_ctx_t ctx) 1969 { 1970 if_t ifp = iflib_get_ifp(ctx); 1971 struct igc_softc *sc = iflib_get_softc(ctx); 1972 if_softc_ctx_t scctx = sc->shared; 1973 1974 INIT_DEBUGOUT("igc_setup_interface: begin"); 1975 1976 /* Single Queue */ 1977 if (sc->tx_num_queues == 1) { 1978 if_setsendqlen(ifp, scctx->isc_ntxd[0] - 1); 1979 if_setsendqready(ifp); 1980 } 1981 1982 /* 1983 * Specify the media types supported by this adapter and register 1984 * callbacks to update media and link information 1985 */ 1986 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1987 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); 1988 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1989 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); 1990 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1991 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1992 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL); 1993 1994 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1995 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 1996 return (0); 1997 } 1998 1999 static int 2000 igc_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 2001 int ntxqs, int ntxqsets) 2002 { 2003 struct igc_softc *sc = iflib_get_softc(ctx); 2004 if_softc_ctx_t scctx = sc->shared; 2005 int error = IGC_SUCCESS; 2006 struct igc_tx_queue *que; 2007 int i, j; 2008 2009 MPASS(sc->tx_num_queues > 0); 2010 MPASS(sc->tx_num_queues == ntxqsets); 2011 2012 /* First allocate the top level queue structs */ 2013 if (!(sc->tx_queues = 2014 (struct igc_tx_queue *) malloc(sizeof(struct igc_tx_queue) * 2015 sc->tx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { 2016 device_printf(iflib_get_dev(ctx), 2017 "Unable to allocate queue memory\n"); 2018 return(ENOMEM); 2019 } 2020 2021 for (i = 0, que = sc->tx_queues; i < sc->tx_num_queues; i++, que++) { 2022 /* Set up some basics */ 2023 2024 struct tx_ring *txr = &que->txr; 2025 txr->sc = que->sc = sc; 2026 que->me = txr->me = i; 2027 2028 /* Allocate report status array */ 2029 if (!(txr->tx_rsq = (qidx_t *) malloc(sizeof(qidx_t) * 2030 scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) { 2031 device_printf(iflib_get_dev(ctx), 2032 "failed to allocate rs_idxs memory\n"); 2033 error = ENOMEM; 2034 goto fail; 2035 } 2036 for (j = 0; j < scctx->isc_ntxd[0]; j++) 2037 txr->tx_rsq[j] = QIDX_INVALID; 2038 /* get virtual and physical address of the hardware queues */ 2039 txr->tx_base = (struct igc_tx_desc *)vaddrs[i*ntxqs]; 2040 txr->tx_paddr = paddrs[i*ntxqs]; 2041 } 2042 2043 if (bootverbose) 2044 device_printf(iflib_get_dev(ctx), 2045 "allocated for %d tx_queues\n", sc->tx_num_queues); 2046 return (0); 2047 fail: 2048 igc_if_queues_free(ctx); 2049 return (error); 2050 } 2051 2052 static int 2053 igc_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 2054 int nrxqs, int nrxqsets) 2055 { 2056 struct igc_softc *sc = iflib_get_softc(ctx); 2057 int error = IGC_SUCCESS; 2058 struct igc_rx_queue *que; 2059 int i; 2060 2061 MPASS(sc->rx_num_queues > 0); 2062 MPASS(sc->rx_num_queues == nrxqsets); 2063 2064 /* First allocate the top level queue structs */ 2065 if (!(sc->rx_queues = 2066 (struct igc_rx_queue *) malloc(sizeof(struct igc_rx_queue) * 2067 sc->rx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { 2068 device_printf(iflib_get_dev(ctx), 2069 "Unable to allocate queue memory\n"); 2070 error = ENOMEM; 2071 goto fail; 2072 } 2073 2074 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) { 2075 /* Set up some basics */ 2076 struct rx_ring *rxr = &que->rxr; 2077 rxr->sc = que->sc = sc; 2078 rxr->que = que; 2079 que->me = rxr->me = i; 2080 2081 /* get virtual and physical address of the hardware queues */ 2082 rxr->rx_base = (union igc_rx_desc_extended *)vaddrs[i*nrxqs]; 2083 rxr->rx_paddr = paddrs[i*nrxqs]; 2084 } 2085 2086 if (bootverbose) 2087 device_printf(iflib_get_dev(ctx), 2088 "allocated for %d rx_queues\n", sc->rx_num_queues); 2089 2090 return (0); 2091 fail: 2092 igc_if_queues_free(ctx); 2093 return (error); 2094 } 2095 2096 static void 2097 igc_if_queues_free(if_ctx_t ctx) 2098 { 2099 struct igc_softc *sc = iflib_get_softc(ctx); 2100 struct igc_tx_queue *tx_que = sc->tx_queues; 2101 struct igc_rx_queue *rx_que = sc->rx_queues; 2102 2103 if (tx_que != NULL) { 2104 for (int i = 0; i < sc->tx_num_queues; i++, tx_que++) { 2105 struct tx_ring *txr = &tx_que->txr; 2106 if (txr->tx_rsq == NULL) 2107 break; 2108 2109 free(txr->tx_rsq, M_DEVBUF); 2110 txr->tx_rsq = NULL; 2111 } 2112 free(sc->tx_queues, M_DEVBUF); 2113 sc->tx_queues = NULL; 2114 } 2115 2116 if (rx_que != NULL) { 2117 free(sc->rx_queues, M_DEVBUF); 2118 sc->rx_queues = NULL; 2119 } 2120 2121 if (sc->mta != NULL) { 2122 free(sc->mta, M_DEVBUF); 2123 } 2124 } 2125 2126 /********************************************************************* 2127 * 2128 * Enable transmit unit. 2129 * 2130 **********************************************************************/ 2131 static void 2132 igc_initialize_transmit_unit(if_ctx_t ctx) 2133 { 2134 struct igc_softc *sc = iflib_get_softc(ctx); 2135 if_softc_ctx_t scctx = sc->shared; 2136 struct igc_tx_queue *que; 2137 struct tx_ring *txr; 2138 struct igc_hw *hw = &sc->hw; 2139 u32 tctl, txdctl = 0; 2140 2141 INIT_DEBUGOUT("igc_initialize_transmit_unit: begin"); 2142 2143 for (int i = 0; i < sc->tx_num_queues; i++, txr++) { 2144 u64 bus_addr; 2145 caddr_t offp, endp; 2146 2147 que = &sc->tx_queues[i]; 2148 txr = &que->txr; 2149 bus_addr = txr->tx_paddr; 2150 2151 /* Clear checksum offload context. */ 2152 offp = (caddr_t)&txr->csum_flags; 2153 endp = (caddr_t)(txr + 1); 2154 bzero(offp, endp - offp); 2155 2156 /* Base and Len of TX Ring */ 2157 IGC_WRITE_REG(hw, IGC_TDLEN(i), 2158 scctx->isc_ntxd[0] * sizeof(struct igc_tx_desc)); 2159 IGC_WRITE_REG(hw, IGC_TDBAH(i), 2160 (u32)(bus_addr >> 32)); 2161 IGC_WRITE_REG(hw, IGC_TDBAL(i), 2162 (u32)bus_addr); 2163 /* Init the HEAD/TAIL indices */ 2164 IGC_WRITE_REG(hw, IGC_TDT(i), 0); 2165 IGC_WRITE_REG(hw, IGC_TDH(i), 0); 2166 2167 HW_DEBUGOUT2("Base = %x, Length = %x\n", 2168 IGC_READ_REG(&sc->hw, IGC_TDBAL(i)), 2169 IGC_READ_REG(&sc->hw, IGC_TDLEN(i))); 2170 2171 txdctl = 0; /* clear txdctl */ 2172 txdctl |= 0x1f; /* PTHRESH */ 2173 txdctl |= 1 << 8; /* HTHRESH */ 2174 txdctl |= 1 << 16;/* WTHRESH */ 2175 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ 2176 txdctl |= IGC_TXDCTL_GRAN; 2177 txdctl |= 1 << 25; /* LWTHRESH */ 2178 2179 IGC_WRITE_REG(hw, IGC_TXDCTL(i), txdctl); 2180 } 2181 2182 /* Program the Transmit Control Register */ 2183 tctl = IGC_READ_REG(&sc->hw, IGC_TCTL); 2184 tctl &= ~IGC_TCTL_CT; 2185 tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN | 2186 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT)); 2187 2188 /* This write will effectively turn on the transmit unit. */ 2189 IGC_WRITE_REG(&sc->hw, IGC_TCTL, tctl); 2190 } 2191 2192 /********************************************************************* 2193 * 2194 * Enable receive unit. 2195 * 2196 **********************************************************************/ 2197 #define BSIZEPKT_ROUNDUP ((1<<IGC_SRRCTL_BSIZEPKT_SHIFT)-1) 2198 2199 static void 2200 igc_initialize_receive_unit(if_ctx_t ctx) 2201 { 2202 struct igc_softc *sc = iflib_get_softc(ctx); 2203 if_softc_ctx_t scctx = sc->shared; 2204 if_t ifp = iflib_get_ifp(ctx); 2205 struct igc_hw *hw = &sc->hw; 2206 struct igc_rx_queue *que; 2207 int i; 2208 u32 psize, rctl, rxcsum, srrctl = 0; 2209 2210 INIT_DEBUGOUT("igc_initialize_receive_units: begin"); 2211 2212 /* 2213 * Make sure receives are disabled while setting 2214 * up the descriptor ring 2215 */ 2216 rctl = IGC_READ_REG(hw, IGC_RCTL); 2217 IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN); 2218 2219 /* Setup the Receive Control Register */ 2220 rctl &= ~(3 << IGC_RCTL_MO_SHIFT); 2221 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | 2222 IGC_RCTL_LBM_NO | IGC_RCTL_RDMTS_HALF | 2223 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); 2224 2225 /* Do not store bad packets */ 2226 rctl &= ~IGC_RCTL_SBP; 2227 2228 /* Enable Long Packet receive */ 2229 if (if_getmtu(ifp) > ETHERMTU) 2230 rctl |= IGC_RCTL_LPE; 2231 else 2232 rctl &= ~IGC_RCTL_LPE; 2233 2234 /* Strip the CRC */ 2235 if (!igc_disable_crc_stripping) 2236 rctl |= IGC_RCTL_SECRC; 2237 2238 rxcsum = IGC_READ_REG(hw, IGC_RXCSUM); 2239 if (if_getcapenable(ifp) & IFCAP_RXCSUM) { 2240 rxcsum |= IGC_RXCSUM_CRCOFL; 2241 if (sc->tx_num_queues > 1) 2242 rxcsum |= IGC_RXCSUM_PCSD; 2243 else 2244 rxcsum |= IGC_RXCSUM_IPPCSE; 2245 } else { 2246 if (sc->tx_num_queues > 1) 2247 rxcsum |= IGC_RXCSUM_PCSD; 2248 else 2249 rxcsum &= ~IGC_RXCSUM_TUOFL; 2250 } 2251 IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum); 2252 2253 if (sc->rx_num_queues > 1) 2254 igc_initialize_rss_mapping(sc); 2255 2256 if (if_getmtu(ifp) > ETHERMTU) { 2257 psize = scctx->isc_max_frame_size; 2258 /* are we on a vlan? */ 2259 if (if_vlantrunkinuse(ifp)) 2260 psize += VLAN_TAG_SIZE; 2261 IGC_WRITE_REG(&sc->hw, IGC_RLPML, psize); 2262 } 2263 2264 /* Set maximum packet buffer len */ 2265 srrctl |= (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 2266 IGC_SRRCTL_BSIZEPKT_SHIFT; 2267 /* srrctl above overrides this but set the register to a sane value */ 2268 rctl |= IGC_RCTL_SZ_2048; 2269 2270 /* 2271 * If TX flow control is disabled and there's >1 queue defined, 2272 * enable DROP. 2273 * 2274 * This drops frames rather than hanging the RX MAC for all queues. 2275 */ 2276 if ((sc->rx_num_queues > 1) && 2277 (sc->fc == igc_fc_none || 2278 sc->fc == igc_fc_rx_pause)) { 2279 srrctl |= IGC_SRRCTL_DROP_EN; 2280 } 2281 2282 /* Setup the Base and Length of the Rx Descriptor Rings */ 2283 for (i = 0, que = sc->rx_queues; i < sc->rx_num_queues; i++, que++) { 2284 struct rx_ring *rxr = &que->rxr; 2285 u64 bus_addr = rxr->rx_paddr; 2286 u32 rxdctl; 2287 2288 #ifdef notyet 2289 /* Configure for header split? -- ignore for now */ 2290 rxr->hdr_split = igc_header_split; 2291 #else 2292 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 2293 #endif 2294 2295 IGC_WRITE_REG(hw, IGC_RDLEN(i), 2296 scctx->isc_nrxd[0] * sizeof(struct igc_rx_desc)); 2297 IGC_WRITE_REG(hw, IGC_RDBAH(i), (uint32_t)(bus_addr >> 32)); 2298 IGC_WRITE_REG(hw, IGC_RDBAL(i), (uint32_t)bus_addr); 2299 IGC_WRITE_REG(hw, IGC_SRRCTL(i), srrctl); 2300 /* Setup the Head and Tail Descriptor Pointers */ 2301 IGC_WRITE_REG(hw, IGC_RDH(i), 0); 2302 IGC_WRITE_REG(hw, IGC_RDT(i), 0); 2303 /* Enable this Queue */ 2304 rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(i)); 2305 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; 2306 rxdctl &= 0xFFF00000; 2307 rxdctl |= IGC_RX_PTHRESH; 2308 rxdctl |= IGC_RX_HTHRESH << 8; 2309 rxdctl |= IGC_RX_WTHRESH << 16; 2310 IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl); 2311 } 2312 2313 /* Make sure VLAN Filters are off */ 2314 rctl &= ~IGC_RCTL_VFE; 2315 2316 /* Write out the settings */ 2317 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 2318 2319 return; 2320 } 2321 2322 static void 2323 igc_setup_vlan_hw_support(if_ctx_t ctx) 2324 { 2325 struct igc_softc *sc = iflib_get_softc(ctx); 2326 struct igc_hw *hw = &sc->hw; 2327 struct ifnet *ifp = iflib_get_ifp(ctx); 2328 u32 reg; 2329 2330 /* igc hardware doesn't seem to implement VFTA for HWFILTER */ 2331 2332 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING && 2333 !igc_disable_crc_stripping) { 2334 reg = IGC_READ_REG(hw, IGC_CTRL); 2335 reg |= IGC_CTRL_VME; 2336 IGC_WRITE_REG(hw, IGC_CTRL, reg); 2337 } else { 2338 reg = IGC_READ_REG(hw, IGC_CTRL); 2339 reg &= ~IGC_CTRL_VME; 2340 IGC_WRITE_REG(hw, IGC_CTRL, reg); 2341 } 2342 } 2343 2344 static void 2345 igc_if_intr_enable(if_ctx_t ctx) 2346 { 2347 struct igc_softc *sc = iflib_get_softc(ctx); 2348 struct igc_hw *hw = &sc->hw; 2349 u32 mask; 2350 2351 if (__predict_true(sc->intr_type == IFLIB_INTR_MSIX)) { 2352 mask = (sc->que_mask | sc->link_mask); 2353 IGC_WRITE_REG(hw, IGC_EIAC, mask); 2354 IGC_WRITE_REG(hw, IGC_EIAM, mask); 2355 IGC_WRITE_REG(hw, IGC_EIMS, mask); 2356 IGC_WRITE_REG(hw, IGC_IMS, IGC_IMS_LSC); 2357 } else 2358 IGC_WRITE_REG(hw, IGC_IMS, IMS_ENABLE_MASK); 2359 IGC_WRITE_FLUSH(hw); 2360 } 2361 2362 static void 2363 igc_if_intr_disable(if_ctx_t ctx) 2364 { 2365 struct igc_softc *sc = iflib_get_softc(ctx); 2366 struct igc_hw *hw = &sc->hw; 2367 2368 if (__predict_true(sc->intr_type == IFLIB_INTR_MSIX)) { 2369 IGC_WRITE_REG(hw, IGC_EIMC, 0xffffffff); 2370 IGC_WRITE_REG(hw, IGC_EIAC, 0); 2371 } 2372 IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff); 2373 IGC_WRITE_FLUSH(hw); 2374 } 2375 2376 /* 2377 * igc_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit. 2378 * For ASF and Pass Through versions of f/w this means 2379 * that the driver is loaded. For AMT version type f/w 2380 * this means that the network i/f is open. 2381 */ 2382 static void 2383 igc_get_hw_control(struct igc_softc *sc) 2384 { 2385 u32 ctrl_ext; 2386 2387 if (sc->vf_ifp) 2388 return; 2389 2390 ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT); 2391 IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, 2392 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 2393 } 2394 2395 /* 2396 * igc_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 2397 * For ASF and Pass Through versions of f/w this means that 2398 * the driver is no longer loaded. For AMT versions of the 2399 * f/w this means that the network i/f is closed. 2400 */ 2401 static void 2402 igc_release_hw_control(struct igc_softc *sc) 2403 { 2404 u32 ctrl_ext; 2405 2406 ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT); 2407 IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, 2408 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 2409 return; 2410 } 2411 2412 static int 2413 igc_is_valid_ether_addr(u8 *addr) 2414 { 2415 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; 2416 2417 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { 2418 return (false); 2419 } 2420 2421 return (true); 2422 } 2423 2424 /* 2425 ** Parse the interface capabilities with regard 2426 ** to both system management and wake-on-lan for 2427 ** later use. 2428 */ 2429 static void 2430 igc_get_wakeup(if_ctx_t ctx) 2431 { 2432 struct igc_softc *sc = iflib_get_softc(ctx); 2433 u16 eeprom_data = 0, apme_mask; 2434 2435 apme_mask = IGC_WUC_APME; 2436 eeprom_data = IGC_READ_REG(&sc->hw, IGC_WUC); 2437 2438 if (eeprom_data & apme_mask) 2439 sc->wol = IGC_WUFC_LNKC; 2440 } 2441 2442 2443 /* 2444 * Enable PCI Wake On Lan capability 2445 */ 2446 static void 2447 igc_enable_wakeup(if_ctx_t ctx) 2448 { 2449 struct igc_softc *sc = iflib_get_softc(ctx); 2450 device_t dev = iflib_get_dev(ctx); 2451 if_t ifp = iflib_get_ifp(ctx); 2452 int error = 0; 2453 u32 ctrl, rctl; 2454 2455 if (!pci_has_pm(dev)) 2456 return; 2457 2458 /* 2459 * Determine type of Wakeup: note that wol 2460 * is set with all bits on by default. 2461 */ 2462 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) == 0) 2463 sc->wol &= ~IGC_WUFC_MAG; 2464 2465 if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) == 0) 2466 sc->wol &= ~IGC_WUFC_EX; 2467 2468 if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) == 0) 2469 sc->wol &= ~IGC_WUFC_MC; 2470 else { 2471 rctl = IGC_READ_REG(&sc->hw, IGC_RCTL); 2472 rctl |= IGC_RCTL_MPE; 2473 IGC_WRITE_REG(&sc->hw, IGC_RCTL, rctl); 2474 } 2475 2476 if (!(sc->wol & (IGC_WUFC_EX | IGC_WUFC_MAG | IGC_WUFC_MC))) 2477 goto pme; 2478 2479 /* Advertise the wakeup capability */ 2480 ctrl = IGC_READ_REG(&sc->hw, IGC_CTRL); 2481 ctrl |= IGC_CTRL_ADVD3WUC; 2482 IGC_WRITE_REG(&sc->hw, IGC_CTRL, ctrl); 2483 2484 /* Enable wakeup by the MAC */ 2485 IGC_WRITE_REG(&sc->hw, IGC_WUC, IGC_WUC_PME_EN); 2486 IGC_WRITE_REG(&sc->hw, IGC_WUFC, sc->wol); 2487 2488 pme: 2489 if (!error && (if_getcapenable(ifp) & IFCAP_WOL)) 2490 pci_enable_pme(dev); 2491 2492 return; 2493 } 2494 2495 /********************************************************************** 2496 * 2497 * Update the board statistics counters. 2498 * 2499 **********************************************************************/ 2500 static void 2501 igc_update_stats_counters(struct igc_softc *sc) 2502 { 2503 u64 prev_xoffrxc = sc->stats.xoffrxc; 2504 2505 sc->stats.crcerrs += IGC_READ_REG(&sc->hw, IGC_CRCERRS); 2506 sc->stats.mpc += IGC_READ_REG(&sc->hw, IGC_MPC); 2507 sc->stats.scc += IGC_READ_REG(&sc->hw, IGC_SCC); 2508 sc->stats.ecol += IGC_READ_REG(&sc->hw, IGC_ECOL); 2509 2510 sc->stats.mcc += IGC_READ_REG(&sc->hw, IGC_MCC); 2511 sc->stats.latecol += IGC_READ_REG(&sc->hw, IGC_LATECOL); 2512 sc->stats.colc += IGC_READ_REG(&sc->hw, IGC_COLC); 2513 sc->stats.colc += IGC_READ_REG(&sc->hw, IGC_RERC); 2514 sc->stats.dc += IGC_READ_REG(&sc->hw, IGC_DC); 2515 sc->stats.rlec += IGC_READ_REG(&sc->hw, IGC_RLEC); 2516 sc->stats.xonrxc += IGC_READ_REG(&sc->hw, IGC_XONRXC); 2517 sc->stats.xontxc += IGC_READ_REG(&sc->hw, IGC_XONTXC); 2518 sc->stats.xoffrxc += IGC_READ_REG(&sc->hw, IGC_XOFFRXC); 2519 /* 2520 * For watchdog management we need to know if we have been 2521 * paused during the last interval, so capture that here. 2522 */ 2523 if (sc->stats.xoffrxc != prev_xoffrxc) 2524 sc->shared->isc_pause_frames = 1; 2525 sc->stats.xofftxc += IGC_READ_REG(&sc->hw, IGC_XOFFTXC); 2526 sc->stats.fcruc += IGC_READ_REG(&sc->hw, IGC_FCRUC); 2527 sc->stats.prc64 += IGC_READ_REG(&sc->hw, IGC_PRC64); 2528 sc->stats.prc127 += IGC_READ_REG(&sc->hw, IGC_PRC127); 2529 sc->stats.prc255 += IGC_READ_REG(&sc->hw, IGC_PRC255); 2530 sc->stats.prc511 += IGC_READ_REG(&sc->hw, IGC_PRC511); 2531 sc->stats.prc1023 += IGC_READ_REG(&sc->hw, IGC_PRC1023); 2532 sc->stats.prc1522 += IGC_READ_REG(&sc->hw, IGC_PRC1522); 2533 sc->stats.tlpic += IGC_READ_REG(&sc->hw, IGC_TLPIC); 2534 sc->stats.rlpic += IGC_READ_REG(&sc->hw, IGC_RLPIC); 2535 sc->stats.gprc += IGC_READ_REG(&sc->hw, IGC_GPRC); 2536 sc->stats.bprc += IGC_READ_REG(&sc->hw, IGC_BPRC); 2537 sc->stats.mprc += IGC_READ_REG(&sc->hw, IGC_MPRC); 2538 sc->stats.gptc += IGC_READ_REG(&sc->hw, IGC_GPTC); 2539 2540 /* For the 64-bit byte counters the low dword must be read first. */ 2541 /* Both registers clear on the read of the high dword */ 2542 2543 sc->stats.gorc += IGC_READ_REG(&sc->hw, IGC_GORCL) + 2544 ((u64)IGC_READ_REG(&sc->hw, IGC_GORCH) << 32); 2545 sc->stats.gotc += IGC_READ_REG(&sc->hw, IGC_GOTCL) + 2546 ((u64)IGC_READ_REG(&sc->hw, IGC_GOTCH) << 32); 2547 2548 sc->stats.rnbc += IGC_READ_REG(&sc->hw, IGC_RNBC); 2549 sc->stats.ruc += IGC_READ_REG(&sc->hw, IGC_RUC); 2550 sc->stats.rfc += IGC_READ_REG(&sc->hw, IGC_RFC); 2551 sc->stats.roc += IGC_READ_REG(&sc->hw, IGC_ROC); 2552 sc->stats.rjc += IGC_READ_REG(&sc->hw, IGC_RJC); 2553 2554 sc->stats.mgprc += IGC_READ_REG(&sc->hw, IGC_MGTPRC); 2555 sc->stats.mgpdc += IGC_READ_REG(&sc->hw, IGC_MGTPDC); 2556 sc->stats.mgptc += IGC_READ_REG(&sc->hw, IGC_MGTPTC); 2557 2558 sc->stats.tor += IGC_READ_REG(&sc->hw, IGC_TORH); 2559 sc->stats.tot += IGC_READ_REG(&sc->hw, IGC_TOTH); 2560 2561 sc->stats.tpr += IGC_READ_REG(&sc->hw, IGC_TPR); 2562 sc->stats.tpt += IGC_READ_REG(&sc->hw, IGC_TPT); 2563 sc->stats.ptc64 += IGC_READ_REG(&sc->hw, IGC_PTC64); 2564 sc->stats.ptc127 += IGC_READ_REG(&sc->hw, IGC_PTC127); 2565 sc->stats.ptc255 += IGC_READ_REG(&sc->hw, IGC_PTC255); 2566 sc->stats.ptc511 += IGC_READ_REG(&sc->hw, IGC_PTC511); 2567 sc->stats.ptc1023 += IGC_READ_REG(&sc->hw, IGC_PTC1023); 2568 sc->stats.ptc1522 += IGC_READ_REG(&sc->hw, IGC_PTC1522); 2569 sc->stats.mptc += IGC_READ_REG(&sc->hw, IGC_MPTC); 2570 sc->stats.bptc += IGC_READ_REG(&sc->hw, IGC_BPTC); 2571 2572 /* Interrupt Counts */ 2573 sc->stats.iac += IGC_READ_REG(&sc->hw, IGC_IAC); 2574 sc->stats.rxdmtc += IGC_READ_REG(&sc->hw, IGC_RXDMTC); 2575 2576 sc->stats.algnerrc += IGC_READ_REG(&sc->hw, IGC_ALGNERRC); 2577 sc->stats.tncrs += IGC_READ_REG(&sc->hw, IGC_TNCRS); 2578 sc->stats.htdpmc += IGC_READ_REG(&sc->hw, IGC_HTDPMC); 2579 sc->stats.tsctc += IGC_READ_REG(&sc->hw, IGC_TSCTC); 2580 } 2581 2582 static uint64_t 2583 igc_if_get_counter(if_ctx_t ctx, ift_counter cnt) 2584 { 2585 struct igc_softc *sc = iflib_get_softc(ctx); 2586 if_t ifp = iflib_get_ifp(ctx); 2587 2588 switch (cnt) { 2589 case IFCOUNTER_COLLISIONS: 2590 return (sc->stats.colc); 2591 case IFCOUNTER_IERRORS: 2592 return (sc->dropped_pkts + sc->stats.rxerrc + 2593 sc->stats.crcerrs + sc->stats.algnerrc + 2594 sc->stats.ruc + sc->stats.roc + 2595 sc->stats.mpc + sc->stats.htdpmc); 2596 case IFCOUNTER_OERRORS: 2597 return (if_get_counter_default(ifp, cnt) + 2598 sc->stats.ecol + sc->stats.latecol + sc->watchdog_events); 2599 default: 2600 return (if_get_counter_default(ifp, cnt)); 2601 } 2602 } 2603 2604 /* igc_if_needs_restart - Tell iflib when the driver needs to be reinitialized 2605 * @ctx: iflib context 2606 * @event: event code to check 2607 * 2608 * Defaults to returning false for unknown events. 2609 * 2610 * @returns true if iflib needs to reinit the interface 2611 */ 2612 static bool 2613 igc_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 2614 { 2615 switch (event) { 2616 case IFLIB_RESTART_VLAN_CONFIG: 2617 default: 2618 return (false); 2619 } 2620 } 2621 2622 /* Export a single 32-bit register via a read-only sysctl. */ 2623 static int 2624 igc_sysctl_reg_handler(SYSCTL_HANDLER_ARGS) 2625 { 2626 struct igc_softc *sc; 2627 u_int val; 2628 2629 sc = oidp->oid_arg1; 2630 val = IGC_READ_REG(&sc->hw, oidp->oid_arg2); 2631 return (sysctl_handle_int(oidp, &val, 0, req)); 2632 } 2633 2634 /* Per queue holdoff interrupt rate handler */ 2635 static int 2636 igc_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 2637 { 2638 struct igc_rx_queue *rque; 2639 struct igc_tx_queue *tque; 2640 struct igc_hw *hw; 2641 int error; 2642 u32 reg, usec, rate; 2643 2644 bool tx = oidp->oid_arg2; 2645 2646 if (tx) { 2647 tque = oidp->oid_arg1; 2648 hw = &tque->sc->hw; 2649 reg = IGC_READ_REG(hw, IGC_EITR(tque->me)); 2650 } else { 2651 rque = oidp->oid_arg1; 2652 hw = &rque->sc->hw; 2653 reg = IGC_READ_REG(hw, IGC_EITR(rque->msix)); 2654 } 2655 2656 usec = (reg & IGC_QVECTOR_MASK); 2657 if (usec > 0) 2658 rate = IGC_INTS_TO_EITR(usec); 2659 else 2660 rate = 0; 2661 2662 error = sysctl_handle_int(oidp, &rate, 0, req); 2663 if (error || !req->newptr) 2664 return error; 2665 return 0; 2666 } 2667 2668 /* 2669 * Add sysctl variables, one per statistic, to the system. 2670 */ 2671 static void 2672 igc_add_hw_stats(struct igc_softc *sc) 2673 { 2674 device_t dev = iflib_get_dev(sc->ctx); 2675 struct igc_tx_queue *tx_que = sc->tx_queues; 2676 struct igc_rx_queue *rx_que = sc->rx_queues; 2677 2678 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2679 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 2680 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 2681 struct igc_hw_stats *stats = &sc->stats; 2682 2683 struct sysctl_oid *stat_node, *queue_node, *int_node; 2684 struct sysctl_oid_list *stat_list, *queue_list, *int_list; 2685 2686 #define QUEUE_NAME_LEN 32 2687 char namebuf[QUEUE_NAME_LEN]; 2688 2689 /* Driver Statistics */ 2690 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 2691 CTLFLAG_RD, &sc->dropped_pkts, 2692 "Driver dropped packets"); 2693 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 2694 CTLFLAG_RD, &sc->link_irq, 2695 "Link MSI-X IRQ Handled"); 2696 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns", 2697 CTLFLAG_RD, &sc->rx_overruns, 2698 "RX overruns"); 2699 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts", 2700 CTLFLAG_RD, &sc->watchdog_events, 2701 "Watchdog timeouts"); 2702 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control", 2703 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2704 sc, IGC_CTRL, igc_sysctl_reg_handler, "IU", 2705 "Device Control Register"); 2706 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control", 2707 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2708 sc, IGC_RCTL, igc_sysctl_reg_handler, "IU", 2709 "Receiver Control Register"); 2710 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", 2711 CTLFLAG_RD, &sc->hw.fc.high_water, 0, 2712 "Flow Control High Watermark"); 2713 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", 2714 CTLFLAG_RD, &sc->hw.fc.low_water, 0, 2715 "Flow Control Low Watermark"); 2716 2717 for (int i = 0; i < sc->tx_num_queues; i++, tx_que++) { 2718 struct tx_ring *txr = &tx_que->txr; 2719 snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i); 2720 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 2721 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Name"); 2722 queue_list = SYSCTL_CHILDREN(queue_node); 2723 2724 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 2725 CTLTYPE_UINT | CTLFLAG_RD, tx_que, 2726 true, igc_sysctl_interrupt_rate_handler, "IU", 2727 "Interrupt Rate"); 2728 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 2729 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 2730 IGC_TDH(txr->me), igc_sysctl_reg_handler, "IU", 2731 "Transmit Descriptor Head"); 2732 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 2733 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 2734 IGC_TDT(txr->me), igc_sysctl_reg_handler, "IU", 2735 "Transmit Descriptor Tail"); 2736 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq", 2737 CTLFLAG_RD, &txr->tx_irq, 2738 "Queue MSI-X Transmit Interrupts"); 2739 } 2740 2741 for (int j = 0; j < sc->rx_num_queues; j++, rx_que++) { 2742 struct rx_ring *rxr = &rx_que->rxr; 2743 snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", j); 2744 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 2745 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Name"); 2746 queue_list = SYSCTL_CHILDREN(queue_node); 2747 2748 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 2749 CTLTYPE_UINT | CTLFLAG_RD, rx_que, 2750 false, igc_sysctl_interrupt_rate_handler, "IU", 2751 "Interrupt Rate"); 2752 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 2753 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 2754 IGC_RDH(rxr->me), igc_sysctl_reg_handler, "IU", 2755 "Receive Descriptor Head"); 2756 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 2757 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 2758 IGC_RDT(rxr->me), igc_sysctl_reg_handler, "IU", 2759 "Receive Descriptor Tail"); 2760 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq", 2761 CTLFLAG_RD, &rxr->rx_irq, 2762 "Queue MSI-X Receive Interrupts"); 2763 } 2764 2765 /* MAC stats get their own sub node */ 2766 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 2767 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Statistics"); 2768 stat_list = SYSCTL_CHILDREN(stat_node); 2769 2770 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll", 2771 CTLFLAG_RD, &stats->ecol, 2772 "Excessive collisions"); 2773 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll", 2774 CTLFLAG_RD, &stats->scc, 2775 "Single collisions"); 2776 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll", 2777 CTLFLAG_RD, &stats->mcc, 2778 "Multiple collisions"); 2779 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll", 2780 CTLFLAG_RD, &stats->latecol, 2781 "Late collisions"); 2782 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count", 2783 CTLFLAG_RD, &stats->colc, 2784 "Collision Count"); 2785 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors", 2786 CTLFLAG_RD, &sc->stats.symerrs, 2787 "Symbol Errors"); 2788 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors", 2789 CTLFLAG_RD, &sc->stats.sec, 2790 "Sequence Errors"); 2791 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count", 2792 CTLFLAG_RD, &sc->stats.dc, 2793 "Defer Count"); 2794 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets", 2795 CTLFLAG_RD, &sc->stats.mpc, 2796 "Missed Packets"); 2797 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_length_errors", 2798 CTLFLAG_RD, &sc->stats.rlec, 2799 "Receive Length Errors"); 2800 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff", 2801 CTLFLAG_RD, &sc->stats.rnbc, 2802 "Receive No Buffers"); 2803 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize", 2804 CTLFLAG_RD, &sc->stats.ruc, 2805 "Receive Undersize"); 2806 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 2807 CTLFLAG_RD, &sc->stats.rfc, 2808 "Fragmented Packets Received "); 2809 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize", 2810 CTLFLAG_RD, &sc->stats.roc, 2811 "Oversized Packets Received"); 2812 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber", 2813 CTLFLAG_RD, &sc->stats.rjc, 2814 "Received Jabber"); 2815 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs", 2816 CTLFLAG_RD, &sc->stats.rxerrc, 2817 "Receive Errors"); 2818 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 2819 CTLFLAG_RD, &sc->stats.crcerrs, 2820 "CRC errors"); 2821 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs", 2822 CTLFLAG_RD, &sc->stats.algnerrc, 2823 "Alignment Errors"); 2824 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 2825 CTLFLAG_RD, &sc->stats.xonrxc, 2826 "XON Received"); 2827 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 2828 CTLFLAG_RD, &sc->stats.xontxc, 2829 "XON Transmitted"); 2830 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 2831 CTLFLAG_RD, &sc->stats.xoffrxc, 2832 "XOFF Received"); 2833 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 2834 CTLFLAG_RD, &sc->stats.xofftxc, 2835 "XOFF Transmitted"); 2836 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "unsupported_fc_recvd", 2837 CTLFLAG_RD, &sc->stats.fcruc, 2838 "Unsupported Flow Control Received"); 2839 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_recvd", 2840 CTLFLAG_RD, &sc->stats.mgprc, 2841 "Management Packets Received"); 2842 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_drop", 2843 CTLFLAG_RD, &sc->stats.mgpdc, 2844 "Management Packets Dropped"); 2845 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_txd", 2846 CTLFLAG_RD, &sc->stats.mgptc, 2847 "Management Packets Transmitted"); 2848 2849 /* Packet Reception Stats */ 2850 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd", 2851 CTLFLAG_RD, &sc->stats.tpr, 2852 "Total Packets Received "); 2853 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", 2854 CTLFLAG_RD, &sc->stats.gprc, 2855 "Good Packets Received"); 2856 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd", 2857 CTLFLAG_RD, &sc->stats.bprc, 2858 "Broadcast Packets Received"); 2859 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", 2860 CTLFLAG_RD, &sc->stats.mprc, 2861 "Multicast Packets Received"); 2862 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 2863 CTLFLAG_RD, &sc->stats.prc64, 2864 "64 byte frames received "); 2865 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 2866 CTLFLAG_RD, &sc->stats.prc127, 2867 "65-127 byte frames received"); 2868 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 2869 CTLFLAG_RD, &sc->stats.prc255, 2870 "128-255 byte frames received"); 2871 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 2872 CTLFLAG_RD, &sc->stats.prc511, 2873 "256-511 byte frames received"); 2874 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 2875 CTLFLAG_RD, &sc->stats.prc1023, 2876 "512-1023 byte frames received"); 2877 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 2878 CTLFLAG_RD, &sc->stats.prc1522, 2879 "1023-1522 byte frames received"); 2880 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", 2881 CTLFLAG_RD, &sc->stats.gorc, 2882 "Good Octets Received"); 2883 2884 /* Packet Transmission Stats */ 2885 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 2886 CTLFLAG_RD, &sc->stats.gotc, 2887 "Good Octets Transmitted"); 2888 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 2889 CTLFLAG_RD, &sc->stats.tpt, 2890 "Total Packets Transmitted"); 2891 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 2892 CTLFLAG_RD, &sc->stats.gptc, 2893 "Good Packets Transmitted"); 2894 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 2895 CTLFLAG_RD, &sc->stats.bptc, 2896 "Broadcast Packets Transmitted"); 2897 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 2898 CTLFLAG_RD, &sc->stats.mptc, 2899 "Multicast Packets Transmitted"); 2900 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 2901 CTLFLAG_RD, &sc->stats.ptc64, 2902 "64 byte frames transmitted "); 2903 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 2904 CTLFLAG_RD, &sc->stats.ptc127, 2905 "65-127 byte frames transmitted"); 2906 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 2907 CTLFLAG_RD, &sc->stats.ptc255, 2908 "128-255 byte frames transmitted"); 2909 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 2910 CTLFLAG_RD, &sc->stats.ptc511, 2911 "256-511 byte frames transmitted"); 2912 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 2913 CTLFLAG_RD, &sc->stats.ptc1023, 2914 "512-1023 byte frames transmitted"); 2915 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 2916 CTLFLAG_RD, &sc->stats.ptc1522, 2917 "1024-1522 byte frames transmitted"); 2918 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd", 2919 CTLFLAG_RD, &sc->stats.tsctc, 2920 "TSO Contexts Transmitted"); 2921 2922 /* Interrupt Stats */ 2923 int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", 2924 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Interrupt Statistics"); 2925 int_list = SYSCTL_CHILDREN(int_node); 2926 2927 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts", 2928 CTLFLAG_RD, &sc->stats.iac, 2929 "Interrupt Assertion Count"); 2930 2931 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh", 2932 CTLFLAG_RD, &sc->stats.rxdmtc, 2933 "Rx Desc Min Thresh Count"); 2934 } 2935 2936 static void 2937 igc_fw_version(struct igc_softc *sc) 2938 { 2939 struct igc_hw *hw = &sc->hw; 2940 struct igc_fw_version *fw_ver = &sc->fw_ver; 2941 2942 *fw_ver = (struct igc_fw_version){0}; 2943 2944 igc_get_fw_version(hw, fw_ver); 2945 } 2946 2947 static void 2948 igc_sbuf_fw_version(struct igc_fw_version *fw_ver, struct sbuf *buf) 2949 { 2950 const char *space = ""; 2951 2952 if (fw_ver->eep_major || fw_ver->eep_minor || fw_ver->eep_build) { 2953 sbuf_printf(buf, "EEPROM V%d.%d-%d", fw_ver->eep_major, 2954 fw_ver->eep_minor, fw_ver->eep_build); 2955 space = " "; 2956 } 2957 2958 if (fw_ver->invm_major || fw_ver->invm_minor || 2959 fw_ver->invm_img_type) { 2960 sbuf_printf(buf, "%sNVM V%d.%d imgtype%d", 2961 space, fw_ver->invm_major, fw_ver->invm_minor, 2962 fw_ver->invm_img_type); 2963 space = " "; 2964 } 2965 2966 if (fw_ver->or_valid) { 2967 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d", 2968 space, fw_ver->or_major, fw_ver->or_build, 2969 fw_ver->or_patch); 2970 space = " "; 2971 } 2972 2973 if (fw_ver->etrack_id) 2974 sbuf_printf(buf, "%seTrack 0x%08x", space, fw_ver->etrack_id); 2975 } 2976 2977 static void 2978 igc_print_fw_version(struct igc_softc *sc ) 2979 { 2980 device_t dev = sc->dev; 2981 struct sbuf *buf; 2982 int error = 0; 2983 2984 buf = sbuf_new_auto(); 2985 if (!buf) { 2986 device_printf(dev, "Could not allocate sbuf for output.\n"); 2987 return; 2988 } 2989 2990 igc_sbuf_fw_version(&sc->fw_ver, buf); 2991 2992 error = sbuf_finish(buf); 2993 if (error) 2994 device_printf(dev, "Error finishing sbuf: %d\n", error); 2995 else if (sbuf_len(buf)) 2996 device_printf(dev, "%s\n", sbuf_data(buf)); 2997 2998 sbuf_delete(buf); 2999 } 3000 3001 static int 3002 igc_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS) 3003 { 3004 struct igc_softc *sc = (struct igc_softc *)arg1; 3005 device_t dev = sc->dev; 3006 struct sbuf *buf; 3007 int error = 0; 3008 3009 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3010 if (!buf) { 3011 device_printf(dev, "Could not allocate sbuf for output.\n"); 3012 return (ENOMEM); 3013 } 3014 3015 igc_sbuf_fw_version(&sc->fw_ver, buf); 3016 3017 error = sbuf_finish(buf); 3018 if (error) 3019 device_printf(dev, "Error finishing sbuf: %d\n", error); 3020 3021 sbuf_delete(buf); 3022 3023 return (0); 3024 } 3025 3026 /********************************************************************** 3027 * 3028 * This routine provides a way to dump out the adapter eeprom, 3029 * often a useful debug/service tool. This only dumps the first 3030 * 32 words, stuff that matters is in that extent. 3031 * 3032 **********************************************************************/ 3033 static int 3034 igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS) 3035 { 3036 struct igc_softc *sc = (struct igc_softc *)arg1; 3037 int error; 3038 int result; 3039 3040 result = -1; 3041 error = sysctl_handle_int(oidp, &result, 0, req); 3042 3043 if (error || !req->newptr) 3044 return (error); 3045 3046 /* 3047 * This value will cause a hex dump of the 3048 * first 32 16-bit words of the EEPROM to 3049 * the screen. 3050 */ 3051 if (result == 1) 3052 igc_print_nvm_info(sc); 3053 3054 return (error); 3055 } 3056 3057 static void 3058 igc_print_nvm_info(struct igc_softc *sc) 3059 { 3060 u16 eeprom_data; 3061 int i, j, row = 0; 3062 3063 /* Its a bit crude, but it gets the job done */ 3064 printf("\nInterface EEPROM Dump:\n"); 3065 printf("Offset\n0x0000 "); 3066 for (i = 0, j = 0; i < 32; i++, j++) { 3067 if (j == 8) { /* Make the offset block */ 3068 j = 0; ++row; 3069 printf("\n0x00%x0 ",row); 3070 } 3071 igc_read_nvm(&sc->hw, i, 1, &eeprom_data); 3072 printf("%04x ", eeprom_data); 3073 } 3074 printf("\n"); 3075 } 3076 3077 static int 3078 igc_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS) 3079 { 3080 struct igc_softc *sc; 3081 u32 reg, val, shift; 3082 int error, mask; 3083 3084 sc = oidp->oid_arg1; 3085 switch (oidp->oid_arg2) { 3086 case 0: 3087 reg = IGC_DTXTCPFLGL; 3088 shift = 0; 3089 break; 3090 case 1: 3091 reg = IGC_DTXTCPFLGL; 3092 shift = 16; 3093 break; 3094 case 2: 3095 reg = IGC_DTXTCPFLGH; 3096 shift = 0; 3097 break; 3098 default: 3099 return (EINVAL); 3100 break; 3101 } 3102 val = IGC_READ_REG(&sc->hw, reg); 3103 mask = (val >> shift) & 0xfff; 3104 error = sysctl_handle_int(oidp, &mask, 0, req); 3105 if (error != 0 || req->newptr == NULL) 3106 return (error); 3107 if (mask < 0 || mask > 0xfff) 3108 return (EINVAL); 3109 val = (val & ~(0xfff << shift)) | (mask << shift); 3110 IGC_WRITE_REG(&sc->hw, reg, val); 3111 return (0); 3112 } 3113 3114 /* 3115 * Set flow control using sysctl: 3116 * Flow control values: 3117 * 0 - off 3118 * 1 - rx pause 3119 * 2 - tx pause 3120 * 3 - full 3121 */ 3122 static int 3123 igc_set_flowcntl(SYSCTL_HANDLER_ARGS) 3124 { 3125 int error; 3126 static int input = 3; /* default is full */ 3127 struct igc_softc *sc = (struct igc_softc *) arg1; 3128 3129 error = sysctl_handle_int(oidp, &input, 0, req); 3130 3131 if ((error) || (req->newptr == NULL)) 3132 return (error); 3133 3134 if (input == sc->fc) /* no change? */ 3135 return (error); 3136 3137 switch (input) { 3138 case igc_fc_rx_pause: 3139 case igc_fc_tx_pause: 3140 case igc_fc_full: 3141 case igc_fc_none: 3142 sc->hw.fc.requested_mode = input; 3143 sc->fc = input; 3144 break; 3145 default: 3146 /* Do nothing */ 3147 return (error); 3148 } 3149 3150 sc->hw.fc.current_mode = sc->hw.fc.requested_mode; 3151 igc_force_mac_fc(&sc->hw); 3152 return (error); 3153 } 3154 3155 /* 3156 * Manage DMA Coalesce: 3157 * Control values: 3158 * 0/1 - off/on 3159 * Legal timer values are: 3160 * 250,500,1000-10000 in thousands 3161 */ 3162 static int 3163 igc_sysctl_dmac(SYSCTL_HANDLER_ARGS) 3164 { 3165 struct igc_softc *sc = (struct igc_softc *) arg1; 3166 int error; 3167 3168 error = sysctl_handle_int(oidp, &sc->dmac, 0, req); 3169 3170 if ((error) || (req->newptr == NULL)) 3171 return (error); 3172 3173 switch (sc->dmac) { 3174 case 0: 3175 /* Disabling */ 3176 break; 3177 case 1: /* Just enable and use default */ 3178 sc->dmac = 1000; 3179 break; 3180 case 250: 3181 case 500: 3182 case 1000: 3183 case 2000: 3184 case 3000: 3185 case 4000: 3186 case 5000: 3187 case 6000: 3188 case 7000: 3189 case 8000: 3190 case 9000: 3191 case 10000: 3192 /* Legal values - allow */ 3193 break; 3194 default: 3195 /* Do nothing, illegal value */ 3196 sc->dmac = 0; 3197 return (EINVAL); 3198 } 3199 /* Reinit the interface */ 3200 igc_if_init(sc->ctx); 3201 return (error); 3202 } 3203 3204 /* 3205 * Manage Energy Efficient Ethernet: 3206 * Control values: 3207 * 0/1 - enabled/disabled 3208 */ 3209 static int 3210 igc_sysctl_eee(SYSCTL_HANDLER_ARGS) 3211 { 3212 struct igc_softc *sc = (struct igc_softc *) arg1; 3213 int error, value; 3214 3215 value = sc->hw.dev_spec._i225.eee_disable; 3216 error = sysctl_handle_int(oidp, &value, 0, req); 3217 if (error || req->newptr == NULL) 3218 return (error); 3219 3220 sc->hw.dev_spec._i225.eee_disable = (value != 0); 3221 igc_if_init(sc->ctx); 3222 3223 return (0); 3224 } 3225 3226 static int 3227 igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3228 { 3229 struct igc_softc *sc; 3230 int error; 3231 int result; 3232 3233 result = -1; 3234 error = sysctl_handle_int(oidp, &result, 0, req); 3235 3236 if (error || !req->newptr) 3237 return (error); 3238 3239 if (result == 1) { 3240 sc = (struct igc_softc *) arg1; 3241 igc_print_debug_info(sc); 3242 } 3243 3244 return (error); 3245 } 3246 3247 static int 3248 igc_get_rs(SYSCTL_HANDLER_ARGS) 3249 { 3250 struct igc_softc *sc = (struct igc_softc *) arg1; 3251 int error; 3252 int result; 3253 3254 result = 0; 3255 error = sysctl_handle_int(oidp, &result, 0, req); 3256 3257 if (error || !req->newptr || result != 1) 3258 return (error); 3259 igc_dump_rs(sc); 3260 3261 return (error); 3262 } 3263 3264 static void 3265 igc_if_debug(if_ctx_t ctx) 3266 { 3267 igc_dump_rs(iflib_get_softc(ctx)); 3268 } 3269 3270 /* 3271 * This routine is meant to be fluid, add whatever is 3272 * needed for debugging a problem. -jfv 3273 */ 3274 static void 3275 igc_print_debug_info(struct igc_softc *sc) 3276 { 3277 device_t dev = iflib_get_dev(sc->ctx); 3278 if_t ifp = iflib_get_ifp(sc->ctx); 3279 struct tx_ring *txr = &sc->tx_queues->txr; 3280 struct rx_ring *rxr = &sc->rx_queues->rxr; 3281 3282 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 3283 printf("Interface is RUNNING "); 3284 else 3285 printf("Interface is NOT RUNNING\n"); 3286 3287 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) 3288 printf("and INACTIVE\n"); 3289 else 3290 printf("and ACTIVE\n"); 3291 3292 for (int i = 0; i < sc->tx_num_queues; i++, txr++) { 3293 device_printf(dev, "TX Queue %d ------\n", i); 3294 device_printf(dev, "hw tdh = %d, hw tdt = %d\n", 3295 IGC_READ_REG(&sc->hw, IGC_TDH(i)), 3296 IGC_READ_REG(&sc->hw, IGC_TDT(i))); 3297 3298 } 3299 for (int j=0; j < sc->rx_num_queues; j++, rxr++) { 3300 device_printf(dev, "RX Queue %d ------\n", j); 3301 device_printf(dev, "hw rdh = %d, hw rdt = %d\n", 3302 IGC_READ_REG(&sc->hw, IGC_RDH(j)), 3303 IGC_READ_REG(&sc->hw, IGC_RDT(j))); 3304 } 3305 } 3306