1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001-2024, Intel Corporation 5 * Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org> 6 * Copyright (c) 2021-2024 Rubicon Communications, LLC (Netgate) 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 #include "if_igc.h" 32 #include <sys/sbuf.h> 33 #include <machine/_inttypes.h> 34 35 #ifdef RSS 36 #include <net/rss_config.h> 37 #include <netinet/in_rss.h> 38 #endif 39 40 /********************************************************************* 41 * PCI Device ID Table 42 * 43 * Used by probe to select devices to load on 44 * Last entry must be all 0s 45 * 46 * { Vendor ID, Device ID, String } 47 *********************************************************************/ 48 49 static const pci_vendor_info_t igc_vendor_info_array[] = 50 { 51 /* Intel(R) PRO/1000 Network Connection - igc */ 52 PVID(0x8086, IGC_DEV_ID_I225_LM, "Intel(R) Ethernet Controller I225-LM"), 53 PVID(0x8086, IGC_DEV_ID_I225_V, "Intel(R) Ethernet Controller I225-V"), 54 PVID(0x8086, IGC_DEV_ID_I225_K, "Intel(R) Ethernet Controller I225-K"), 55 PVID(0x8086, IGC_DEV_ID_I225_I, "Intel(R) Ethernet Controller I225-I"), 56 PVID(0x8086, IGC_DEV_ID_I220_V, "Intel(R) Ethernet Controller I220-V"), 57 PVID(0x8086, IGC_DEV_ID_I225_K2, "Intel(R) Ethernet Controller I225-K(2)"), 58 PVID(0x8086, IGC_DEV_ID_I225_LMVP, "Intel(R) Ethernet Controller I225-LMvP(2)"), 59 PVID(0x8086, IGC_DEV_ID_I226_K, "Intel(R) Ethernet Controller I226-K"), 60 PVID(0x8086, IGC_DEV_ID_I226_LMVP, "Intel(R) Ethernet Controller I226-LMvP"), 61 PVID(0x8086, IGC_DEV_ID_I225_IT, "Intel(R) Ethernet Controller I225-IT(2)"), 62 PVID(0x8086, IGC_DEV_ID_I226_LM, "Intel(R) Ethernet Controller I226-LM"), 63 PVID(0x8086, IGC_DEV_ID_I226_V, "Intel(R) Ethernet Controller I226-V"), 64 PVID(0x8086, IGC_DEV_ID_I226_IT, "Intel(R) Ethernet Controller I226-IT"), 65 PVID(0x8086, IGC_DEV_ID_I221_V, "Intel(R) Ethernet Controller I221-V"), 66 PVID(0x8086, IGC_DEV_ID_I226_BLANK_NVM, "Intel(R) Ethernet Controller I226(blankNVM)"), 67 PVID(0x8086, IGC_DEV_ID_I225_BLANK_NVM, "Intel(R) Ethernet Controller I225(blankNVM)"), 68 /* required last entry */ 69 PVID_END 70 }; 71 72 /********************************************************************* 73 * Function prototypes 74 *********************************************************************/ 75 static void *igc_register(device_t); 76 static int igc_if_attach_pre(if_ctx_t); 77 static int igc_if_attach_post(if_ctx_t); 78 static int igc_if_detach(if_ctx_t); 79 static int igc_if_shutdown(if_ctx_t); 80 static int igc_if_suspend(if_ctx_t); 81 static int igc_if_resume(if_ctx_t); 82 83 static int igc_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 84 static int igc_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 85 static void igc_if_queues_free(if_ctx_t); 86 87 static uint64_t igc_if_get_counter(if_ctx_t, ift_counter); 88 static void igc_if_init(if_ctx_t); 89 static void igc_if_stop(if_ctx_t); 90 static void igc_if_media_status(if_ctx_t, struct ifmediareq *); 91 static int igc_if_media_change(if_ctx_t); 92 static int igc_if_mtu_set(if_ctx_t, uint32_t); 93 static void igc_if_timer(if_ctx_t, uint16_t); 94 static void igc_if_watchdog_reset(if_ctx_t); 95 static bool igc_if_needs_restart(if_ctx_t, enum iflib_restart_event); 96 97 static void igc_identify_hardware(if_ctx_t); 98 static int igc_allocate_pci_resources(if_ctx_t); 99 static void igc_free_pci_resources(if_ctx_t); 100 static void igc_reset(if_ctx_t); 101 static int igc_setup_interface(if_ctx_t); 102 static int igc_setup_msix(if_ctx_t); 103 104 static void igc_initialize_transmit_unit(if_ctx_t); 105 static void igc_initialize_receive_unit(if_ctx_t); 106 107 static void igc_if_intr_enable(if_ctx_t); 108 static void igc_if_intr_disable(if_ctx_t); 109 static int igc_if_rx_queue_intr_enable(if_ctx_t, uint16_t); 110 static int igc_if_tx_queue_intr_enable(if_ctx_t, uint16_t); 111 static void igc_if_multi_set(if_ctx_t); 112 static void igc_if_update_admin_status(if_ctx_t); 113 static void igc_if_debug(if_ctx_t); 114 static void igc_update_stats_counters(struct igc_softc *); 115 static void igc_add_hw_stats(struct igc_softc *); 116 static int igc_if_set_promisc(if_ctx_t, int); 117 static void igc_setup_vlan_hw_support(if_ctx_t); 118 static void igc_fw_version(struct igc_softc *); 119 static void igc_sbuf_fw_version(struct igc_fw_version *, struct sbuf *); 120 static void igc_print_fw_version(struct igc_softc *); 121 static int igc_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS); 122 static int igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS); 123 static void igc_print_nvm_info(struct igc_softc *); 124 static int igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 125 static int igc_get_rs(SYSCTL_HANDLER_ARGS); 126 static void igc_print_debug_info(struct igc_softc *); 127 static int igc_is_valid_ether_addr(u8 *); 128 static void igc_neweitr(struct igc_softc *, struct igc_rx_queue *, 129 struct tx_ring *, struct rx_ring *); 130 /* Management and WOL Support */ 131 static void igc_get_hw_control(struct igc_softc *); 132 static void igc_release_hw_control(struct igc_softc *); 133 static void igc_get_wakeup(if_ctx_t); 134 static void igc_enable_wakeup(if_ctx_t); 135 136 int igc_intr(void *); 137 138 /* MSI-X handlers */ 139 static int igc_if_msix_intr_assign(if_ctx_t, int); 140 static int igc_msix_link(void *); 141 static void igc_handle_link(void *context); 142 143 static int igc_set_flowcntl(SYSCTL_HANDLER_ARGS); 144 static int igc_sysctl_dmac(SYSCTL_HANDLER_ARGS); 145 static int igc_sysctl_eee(SYSCTL_HANDLER_ARGS); 146 147 static int igc_get_regs(SYSCTL_HANDLER_ARGS); 148 149 static void igc_configure_queues(struct igc_softc *); 150 151 152 /********************************************************************* 153 * FreeBSD Device Interface Entry Points 154 *********************************************************************/ 155 static device_method_t igc_methods[] = { 156 /* Device interface */ 157 DEVMETHOD(device_register, igc_register), 158 DEVMETHOD(device_probe, iflib_device_probe), 159 DEVMETHOD(device_attach, iflib_device_attach), 160 DEVMETHOD(device_detach, iflib_device_detach), 161 DEVMETHOD(device_shutdown, iflib_device_shutdown), 162 DEVMETHOD(device_suspend, iflib_device_suspend), 163 DEVMETHOD(device_resume, iflib_device_resume), 164 DEVMETHOD_END 165 }; 166 167 static driver_t igc_driver = { 168 "igc", igc_methods, sizeof(struct igc_softc), 169 }; 170 171 DRIVER_MODULE(igc, pci, igc_driver, 0, 0); 172 173 MODULE_DEPEND(igc, pci, 1, 1, 1); 174 MODULE_DEPEND(igc, ether, 1, 1, 1); 175 MODULE_DEPEND(igc, iflib, 1, 1, 1); 176 177 IFLIB_PNP_INFO(pci, igc, igc_vendor_info_array); 178 179 static device_method_t igc_if_methods[] = { 180 DEVMETHOD(ifdi_attach_pre, igc_if_attach_pre), 181 DEVMETHOD(ifdi_attach_post, igc_if_attach_post), 182 DEVMETHOD(ifdi_detach, igc_if_detach), 183 DEVMETHOD(ifdi_shutdown, igc_if_shutdown), 184 DEVMETHOD(ifdi_suspend, igc_if_suspend), 185 DEVMETHOD(ifdi_resume, igc_if_resume), 186 DEVMETHOD(ifdi_init, igc_if_init), 187 DEVMETHOD(ifdi_stop, igc_if_stop), 188 DEVMETHOD(ifdi_msix_intr_assign, igc_if_msix_intr_assign), 189 DEVMETHOD(ifdi_intr_enable, igc_if_intr_enable), 190 DEVMETHOD(ifdi_intr_disable, igc_if_intr_disable), 191 DEVMETHOD(ifdi_tx_queues_alloc, igc_if_tx_queues_alloc), 192 DEVMETHOD(ifdi_rx_queues_alloc, igc_if_rx_queues_alloc), 193 DEVMETHOD(ifdi_queues_free, igc_if_queues_free), 194 DEVMETHOD(ifdi_update_admin_status, igc_if_update_admin_status), 195 DEVMETHOD(ifdi_multi_set, igc_if_multi_set), 196 DEVMETHOD(ifdi_media_status, igc_if_media_status), 197 DEVMETHOD(ifdi_media_change, igc_if_media_change), 198 DEVMETHOD(ifdi_mtu_set, igc_if_mtu_set), 199 DEVMETHOD(ifdi_promisc_set, igc_if_set_promisc), 200 DEVMETHOD(ifdi_timer, igc_if_timer), 201 DEVMETHOD(ifdi_watchdog_reset, igc_if_watchdog_reset), 202 DEVMETHOD(ifdi_get_counter, igc_if_get_counter), 203 DEVMETHOD(ifdi_rx_queue_intr_enable, igc_if_rx_queue_intr_enable), 204 DEVMETHOD(ifdi_tx_queue_intr_enable, igc_if_tx_queue_intr_enable), 205 DEVMETHOD(ifdi_debug, igc_if_debug), 206 DEVMETHOD(ifdi_needs_restart, igc_if_needs_restart), 207 DEVMETHOD_END 208 }; 209 210 static driver_t igc_if_driver = { 211 "igc_if", igc_if_methods, sizeof(struct igc_softc) 212 }; 213 214 /********************************************************************* 215 * Tunable default values. 216 *********************************************************************/ 217 218 /* Allow common code without TSO */ 219 #ifndef CSUM_TSO 220 #define CSUM_TSO 0 221 #endif 222 223 static SYSCTL_NODE(_hw, OID_AUTO, igc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 224 "igc driver parameters"); 225 226 static int igc_disable_crc_stripping = 0; 227 SYSCTL_INT(_hw_igc, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN, 228 &igc_disable_crc_stripping, 0, "Disable CRC Stripping"); 229 230 static int igc_smart_pwr_down = false; 231 SYSCTL_INT(_hw_igc, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &igc_smart_pwr_down, 232 0, "Set to true to leave smart power down enabled on newer adapters"); 233 234 /* Controls whether promiscuous also shows bad packets */ 235 static int igc_debug_sbp = true; 236 SYSCTL_INT(_hw_igc, OID_AUTO, sbp, CTLFLAG_RDTUN, &igc_debug_sbp, 0, 237 "Show bad packets in promiscuous mode"); 238 239 /* Energy efficient ethernet - default to OFF */ 240 static int igc_eee_setting = 1; 241 SYSCTL_INT(_hw_igc, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &igc_eee_setting, 0, 242 "Enable Energy Efficient Ethernet"); 243 244 /* 245 * AIM: Adaptive Interrupt Moderation 246 * which means that the interrupt rate is varied over time based on the 247 * traffic for that interrupt vector 248 */ 249 static int igc_enable_aim = 1; 250 SYSCTL_INT(_hw_igc, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &igc_enable_aim, 251 0, "Enable adaptive interrupt moderation (1=normal, 2=lowlatency)"); 252 253 /* 254 ** Tuneable Interrupt rate 255 */ 256 static int igc_max_interrupt_rate = IGC_INTS_DEFAULT; 257 SYSCTL_INT(_hw_igc, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 258 &igc_max_interrupt_rate, 0, "Maximum interrupts per second"); 259 260 extern struct if_txrx igc_txrx; 261 262 static struct if_shared_ctx igc_sctx_init = { 263 .isc_magic = IFLIB_MAGIC, 264 .isc_q_align = PAGE_SIZE, 265 .isc_tx_maxsize = IGC_TSO_SIZE + sizeof(struct ether_vlan_header), 266 .isc_tx_maxsegsize = PAGE_SIZE, 267 .isc_tso_maxsize = IGC_TSO_SIZE + sizeof(struct ether_vlan_header), 268 .isc_tso_maxsegsize = IGC_TSO_SEG_SIZE, 269 .isc_rx_maxsize = MAX_JUMBO_FRAME_SIZE, 270 .isc_rx_nsegments = 1, 271 .isc_rx_maxsegsize = MJUM9BYTES, 272 .isc_nfl = 1, 273 .isc_nrxqs = 1, 274 .isc_ntxqs = 1, 275 .isc_admin_intrcnt = 1, 276 .isc_vendor_info = igc_vendor_info_array, 277 .isc_driver_version = "1", 278 .isc_driver = &igc_if_driver, 279 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM, 280 281 .isc_nrxd_min = {IGC_MIN_RXD}, 282 .isc_ntxd_min = {IGC_MIN_TXD}, 283 .isc_nrxd_max = {IGC_MAX_RXD}, 284 .isc_ntxd_max = {IGC_MAX_TXD}, 285 .isc_nrxd_default = {IGC_DEFAULT_RXD}, 286 .isc_ntxd_default = {IGC_DEFAULT_TXD}, 287 }; 288 289 /***************************************************************** 290 * 291 * Dump Registers 292 * 293 ****************************************************************/ 294 #define IGC_REGS_LEN 739 295 296 static int igc_get_regs(SYSCTL_HANDLER_ARGS) 297 { 298 struct igc_softc *sc = (struct igc_softc *)arg1; 299 struct igc_hw *hw = &sc->hw; 300 struct sbuf *sb; 301 u32 *regs_buff; 302 int rc; 303 304 regs_buff = malloc(sizeof(u32) * IGC_REGS_LEN, M_DEVBUF, M_WAITOK); 305 memset(regs_buff, 0, IGC_REGS_LEN * sizeof(u32)); 306 307 rc = sysctl_wire_old_buffer(req, 0); 308 MPASS(rc == 0); 309 if (rc != 0) { 310 free(regs_buff, M_DEVBUF); 311 return (rc); 312 } 313 314 sb = sbuf_new_for_sysctl(NULL, NULL, 32*400, req); 315 MPASS(sb != NULL); 316 if (sb == NULL) { 317 free(regs_buff, M_DEVBUF); 318 return (ENOMEM); 319 } 320 321 /* General Registers */ 322 regs_buff[0] = IGC_READ_REG(hw, IGC_CTRL); 323 regs_buff[1] = IGC_READ_REG(hw, IGC_STATUS); 324 regs_buff[2] = IGC_READ_REG(hw, IGC_CTRL_EXT); 325 regs_buff[3] = IGC_READ_REG(hw, IGC_ICR); 326 regs_buff[4] = IGC_READ_REG(hw, IGC_RCTL); 327 regs_buff[5] = IGC_READ_REG(hw, IGC_RDLEN(0)); 328 regs_buff[6] = IGC_READ_REG(hw, IGC_RDH(0)); 329 regs_buff[7] = IGC_READ_REG(hw, IGC_RDT(0)); 330 regs_buff[8] = IGC_READ_REG(hw, IGC_RXDCTL(0)); 331 regs_buff[9] = IGC_READ_REG(hw, IGC_RDBAL(0)); 332 regs_buff[10] = IGC_READ_REG(hw, IGC_RDBAH(0)); 333 regs_buff[11] = IGC_READ_REG(hw, IGC_TCTL); 334 regs_buff[12] = IGC_READ_REG(hw, IGC_TDBAL(0)); 335 regs_buff[13] = IGC_READ_REG(hw, IGC_TDBAH(0)); 336 regs_buff[14] = IGC_READ_REG(hw, IGC_TDLEN(0)); 337 regs_buff[15] = IGC_READ_REG(hw, IGC_TDH(0)); 338 regs_buff[16] = IGC_READ_REG(hw, IGC_TDT(0)); 339 regs_buff[17] = IGC_READ_REG(hw, IGC_TXDCTL(0)); 340 341 sbuf_printf(sb, "General Registers\n"); 342 sbuf_printf(sb, "\tCTRL\t %08x\n", regs_buff[0]); 343 sbuf_printf(sb, "\tSTATUS\t %08x\n", regs_buff[1]); 344 sbuf_printf(sb, "\tCTRL_EXIT\t %08x\n\n", regs_buff[2]); 345 346 sbuf_printf(sb, "Interrupt Registers\n"); 347 sbuf_printf(sb, "\tICR\t %08x\n\n", regs_buff[3]); 348 349 sbuf_printf(sb, "RX Registers\n"); 350 sbuf_printf(sb, "\tRCTL\t %08x\n", regs_buff[4]); 351 sbuf_printf(sb, "\tRDLEN\t %08x\n", regs_buff[5]); 352 sbuf_printf(sb, "\tRDH\t %08x\n", regs_buff[6]); 353 sbuf_printf(sb, "\tRDT\t %08x\n", regs_buff[7]); 354 sbuf_printf(sb, "\tRXDCTL\t %08x\n", regs_buff[8]); 355 sbuf_printf(sb, "\tRDBAL\t %08x\n", regs_buff[9]); 356 sbuf_printf(sb, "\tRDBAH\t %08x\n\n", regs_buff[10]); 357 358 sbuf_printf(sb, "TX Registers\n"); 359 sbuf_printf(sb, "\tTCTL\t %08x\n", regs_buff[11]); 360 sbuf_printf(sb, "\tTDBAL\t %08x\n", regs_buff[12]); 361 sbuf_printf(sb, "\tTDBAH\t %08x\n", regs_buff[13]); 362 sbuf_printf(sb, "\tTDLEN\t %08x\n", regs_buff[14]); 363 sbuf_printf(sb, "\tTDH\t %08x\n", regs_buff[15]); 364 sbuf_printf(sb, "\tTDT\t %08x\n", regs_buff[16]); 365 sbuf_printf(sb, "\tTXDCTL\t %08x\n", regs_buff[17]); 366 sbuf_printf(sb, "\tTDFH\t %08x\n", regs_buff[18]); 367 sbuf_printf(sb, "\tTDFT\t %08x\n", regs_buff[19]); 368 sbuf_printf(sb, "\tTDFHS\t %08x\n", regs_buff[20]); 369 sbuf_printf(sb, "\tTDFPC\t %08x\n\n", regs_buff[21]); 370 371 free(regs_buff, M_DEVBUF); 372 373 #ifdef DUMP_DESCS 374 { 375 if_softc_ctx_t scctx = sc->shared; 376 struct rx_ring *rxr = &rx_que->rxr; 377 struct tx_ring *txr = &tx_que->txr; 378 int ntxd = scctx->isc_ntxd[0]; 379 int nrxd = scctx->isc_nrxd[0]; 380 int j; 381 382 for (j = 0; j < nrxd; j++) { 383 u32 staterr = le32toh(rxr->rx_base[j].wb.upper.status_error); 384 u32 length = le32toh(rxr->rx_base[j].wb.upper.length); 385 sbuf_printf(sb, "\tReceive Descriptor Address %d: %08" PRIx64 " Error:%d Length:%d\n", j, rxr->rx_base[j].read.buffer_addr, staterr, length); 386 } 387 388 for (j = 0; j < min(ntxd, 256); j++) { 389 unsigned int *ptr = (unsigned int *)&txr->tx_base[j]; 390 391 sbuf_printf(sb, "\tTXD[%03d] [0]: %08x [1]: %08x [2]: %08x [3]: %08x eop: %d DD=%d\n", 392 j, ptr[0], ptr[1], ptr[2], ptr[3], buf->eop, 393 buf->eop != -1 ? txr->tx_base[buf->eop].upper.fields.status & IGC_TXD_STAT_DD : 0); 394 395 } 396 } 397 #endif 398 399 rc = sbuf_finish(sb); 400 sbuf_delete(sb); 401 return(rc); 402 } 403 404 static void * 405 igc_register(device_t dev) 406 { 407 return (&igc_sctx_init); 408 } 409 410 static int 411 igc_set_num_queues(if_ctx_t ctx) 412 { 413 int maxqueues; 414 415 maxqueues = 4; 416 417 return (maxqueues); 418 } 419 420 #define IGC_CAPS \ 421 IFCAP_HWCSUM | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | \ 422 IFCAP_VLAN_HWCSUM | IFCAP_WOL | IFCAP_TSO4 | IFCAP_LRO | \ 423 IFCAP_VLAN_HWTSO | IFCAP_JUMBO_MTU | IFCAP_HWCSUM_IPV6 | IFCAP_TSO6 424 425 /********************************************************************* 426 * Device initialization routine 427 * 428 * The attach entry point is called when the driver is being loaded. 429 * This routine identifies the type of hardware, allocates all resources 430 * and initializes the hardware. 431 * 432 * return 0 on success, positive on failure 433 *********************************************************************/ 434 static int 435 igc_if_attach_pre(if_ctx_t ctx) 436 { 437 struct igc_softc *sc; 438 if_softc_ctx_t scctx; 439 device_t dev; 440 struct igc_hw *hw; 441 int error = 0; 442 443 INIT_DEBUGOUT("igc_if_attach_pre: begin"); 444 dev = iflib_get_dev(ctx); 445 sc = iflib_get_softc(ctx); 446 447 sc->ctx = sc->osdep.ctx = ctx; 448 sc->dev = sc->osdep.dev = dev; 449 scctx = sc->shared = iflib_get_softc_ctx(ctx); 450 sc->media = iflib_get_media(ctx); 451 hw = &sc->hw; 452 453 /* SYSCTL stuff */ 454 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 455 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 456 OID_AUTO, "nvm", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 457 sc, 0, igc_sysctl_nvm_info, "I", "NVM Information"); 458 459 sc->enable_aim = igc_enable_aim; 460 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 461 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 462 OID_AUTO, "enable_aim", CTLFLAG_RW, 463 &sc->enable_aim, 0, 464 "Interrupt Moderation (1=normal, 2=lowlatency)"); 465 466 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 467 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 468 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, 469 sc, 0, igc_sysctl_print_fw_version, "A", 470 "Prints FW/NVM Versions"); 471 472 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 473 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 474 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 475 sc, 0, igc_sysctl_debug_info, "I", "Debug Information"); 476 477 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 478 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 479 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 480 sc, 0, igc_set_flowcntl, "I", "Flow Control"); 481 482 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 483 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 484 OID_AUTO, "reg_dump", 485 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 486 igc_get_regs, "A", "Dump Registers"); 487 488 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 489 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 490 OID_AUTO, "rs_dump", 491 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 492 igc_get_rs, "I", "Dump RS indexes"); 493 494 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 495 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 496 OID_AUTO, "dmac", 497 CTLTYPE_INT | CTLFLAG_RW, sc, 0, 498 igc_sysctl_dmac, "I", "DMA Coalesce"); 499 500 /* Determine hardware and mac info */ 501 igc_identify_hardware(ctx); 502 503 scctx->isc_tx_nsegments = IGC_MAX_SCATTER; 504 scctx->isc_nrxqsets_max = scctx->isc_ntxqsets_max = igc_set_num_queues(ctx); 505 if (bootverbose) 506 device_printf(dev, "attach_pre capping queues at %d\n", 507 scctx->isc_ntxqsets_max); 508 509 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(union igc_adv_tx_desc), IGC_DBA_ALIGN); 510 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union igc_adv_rx_desc), IGC_DBA_ALIGN); 511 scctx->isc_txd_size[0] = sizeof(union igc_adv_tx_desc); 512 scctx->isc_rxd_size[0] = sizeof(union igc_adv_rx_desc); 513 scctx->isc_txrx = &igc_txrx; 514 scctx->isc_tx_tso_segments_max = IGC_MAX_SCATTER; 515 scctx->isc_tx_tso_size_max = IGC_TSO_SIZE; 516 scctx->isc_tx_tso_segsize_max = IGC_TSO_SEG_SIZE; 517 scctx->isc_capabilities = scctx->isc_capenable = IGC_CAPS; 518 scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_TSO | 519 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_SCTP | CSUM_IP6_SCTP; 520 521 /* 522 ** Some new devices, as with ixgbe, now may 523 ** use a different BAR, so we need to keep 524 ** track of which is used. 525 */ 526 scctx->isc_msix_bar = PCIR_BAR(IGC_MSIX_BAR); 527 if (pci_read_config(dev, scctx->isc_msix_bar, 4) == 0) 528 scctx->isc_msix_bar += 4; 529 530 /* Setup PCI resources */ 531 if (igc_allocate_pci_resources(ctx)) { 532 device_printf(dev, "Allocation of PCI resources failed\n"); 533 error = ENXIO; 534 goto err_pci; 535 } 536 537 /* Do Shared Code initialization */ 538 error = igc_setup_init_funcs(hw, true); 539 if (error) { 540 device_printf(dev, "Setup of Shared code failed, error %d\n", 541 error); 542 error = ENXIO; 543 goto err_pci; 544 } 545 546 igc_setup_msix(ctx); 547 igc_get_bus_info(hw); 548 549 hw->mac.autoneg = DO_AUTO_NEG; 550 hw->phy.autoneg_wait_to_complete = false; 551 hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 552 553 /* Copper options */ 554 if (hw->phy.media_type == igc_media_type_copper) { 555 hw->phy.mdix = AUTO_ALL_MODES; 556 } 557 558 /* 559 * Set the frame limits assuming 560 * standard ethernet sized frames. 561 */ 562 scctx->isc_max_frame_size = sc->hw.mac.max_frame_size = 563 ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; 564 565 /* Allocate multicast array memory. */ 566 sc->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN * 567 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); 568 if (sc->mta == NULL) { 569 device_printf(dev, "Can not allocate multicast setup array\n"); 570 error = ENOMEM; 571 goto err_late; 572 } 573 574 /* Check SOL/IDER usage */ 575 if (igc_check_reset_block(hw)) 576 device_printf(dev, "PHY reset is blocked" 577 " due to SOL/IDER session.\n"); 578 579 /* Sysctl for setting Energy Efficient Ethernet */ 580 sc->hw.dev_spec._i225.eee_disable = igc_eee_setting; 581 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 582 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 583 OID_AUTO, "eee_control", 584 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 585 sc, 0, igc_sysctl_eee, "I", 586 "Disable Energy Efficient Ethernet"); 587 588 /* 589 ** Start from a known state, this is 590 ** important in reading the nvm and 591 ** mac from that. 592 */ 593 igc_reset_hw(hw); 594 595 /* Make sure we have a good EEPROM before we read from it */ 596 if (igc_validate_nvm_checksum(hw) < 0) { 597 /* 598 ** Some PCI-E parts fail the first check due to 599 ** the link being in sleep state, call it again, 600 ** if it fails a second time its a real issue. 601 */ 602 if (igc_validate_nvm_checksum(hw) < 0) { 603 device_printf(dev, 604 "The EEPROM Checksum Is Not Valid\n"); 605 error = EIO; 606 goto err_late; 607 } 608 } 609 610 /* Copy the permanent MAC address out of the EEPROM */ 611 if (igc_read_mac_addr(hw) < 0) { 612 device_printf(dev, "EEPROM read error while reading MAC" 613 " address\n"); 614 error = EIO; 615 goto err_late; 616 } 617 618 if (!igc_is_valid_ether_addr(hw->mac.addr)) { 619 device_printf(dev, "Invalid MAC address\n"); 620 error = EIO; 621 goto err_late; 622 } 623 624 /* Save the EEPROM/NVM versions */ 625 igc_fw_version(sc); 626 627 igc_print_fw_version(sc); 628 629 /* 630 * Get Wake-on-Lan and Management info for later use 631 */ 632 igc_get_wakeup(ctx); 633 634 /* Enable only WOL MAGIC by default */ 635 scctx->isc_capenable &= ~IFCAP_WOL; 636 if (sc->wol != 0) 637 scctx->isc_capenable |= IFCAP_WOL_MAGIC; 638 639 iflib_set_mac(ctx, hw->mac.addr); 640 641 return (0); 642 643 err_late: 644 igc_release_hw_control(sc); 645 err_pci: 646 igc_free_pci_resources(ctx); 647 free(sc->mta, M_DEVBUF); 648 649 return (error); 650 } 651 652 static int 653 igc_if_attach_post(if_ctx_t ctx) 654 { 655 struct igc_softc *sc = iflib_get_softc(ctx); 656 struct igc_hw *hw = &sc->hw; 657 int error = 0; 658 659 /* Setup OS specific network interface */ 660 error = igc_setup_interface(ctx); 661 if (error != 0) { 662 goto err_late; 663 } 664 665 igc_reset(ctx); 666 667 /* Initialize statistics */ 668 igc_update_stats_counters(sc); 669 hw->mac.get_link_status = true; 670 igc_if_update_admin_status(ctx); 671 igc_add_hw_stats(sc); 672 673 /* the driver can now take control from firmware */ 674 igc_get_hw_control(sc); 675 676 INIT_DEBUGOUT("igc_if_attach_post: end"); 677 678 return (error); 679 680 err_late: 681 igc_release_hw_control(sc); 682 igc_free_pci_resources(ctx); 683 igc_if_queues_free(ctx); 684 free(sc->mta, M_DEVBUF); 685 686 return (error); 687 } 688 689 /********************************************************************* 690 * Device removal routine 691 * 692 * The detach entry point is called when the driver is being removed. 693 * This routine stops the adapter and deallocates all the resources 694 * that were allocated for driver operation. 695 * 696 * return 0 on success, positive on failure 697 *********************************************************************/ 698 static int 699 igc_if_detach(if_ctx_t ctx) 700 { 701 struct igc_softc *sc = iflib_get_softc(ctx); 702 703 INIT_DEBUGOUT("igc_if_detach: begin"); 704 705 igc_phy_hw_reset(&sc->hw); 706 707 igc_release_hw_control(sc); 708 igc_free_pci_resources(ctx); 709 710 return (0); 711 } 712 713 /********************************************************************* 714 * 715 * Shutdown entry point 716 * 717 **********************************************************************/ 718 719 static int 720 igc_if_shutdown(if_ctx_t ctx) 721 { 722 return igc_if_suspend(ctx); 723 } 724 725 /* 726 * Suspend/resume device methods. 727 */ 728 static int 729 igc_if_suspend(if_ctx_t ctx) 730 { 731 struct igc_softc *sc = iflib_get_softc(ctx); 732 733 igc_release_hw_control(sc); 734 igc_enable_wakeup(ctx); 735 return (0); 736 } 737 738 static int 739 igc_if_resume(if_ctx_t ctx) 740 { 741 igc_if_init(ctx); 742 743 return(0); 744 } 745 746 static int 747 igc_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 748 { 749 int max_frame_size; 750 struct igc_softc *sc = iflib_get_softc(ctx); 751 if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx); 752 753 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); 754 755 /* 9K Jumbo Frame size */ 756 max_frame_size = 9234; 757 758 if (mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) { 759 return (EINVAL); 760 } 761 762 scctx->isc_max_frame_size = sc->hw.mac.max_frame_size = 763 mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 764 return (0); 765 } 766 767 /********************************************************************* 768 * Init entry point 769 * 770 * This routine is used in two ways. It is used by the stack as 771 * init entry point in network interface structure. It is also used 772 * by the driver as a hw/sw initialization routine to get to a 773 * consistent state. 774 * 775 **********************************************************************/ 776 static void 777 igc_if_init(if_ctx_t ctx) 778 { 779 struct igc_softc *sc = iflib_get_softc(ctx); 780 if_softc_ctx_t scctx = sc->shared; 781 if_t ifp = iflib_get_ifp(ctx); 782 struct igc_tx_queue *tx_que; 783 int i; 784 785 INIT_DEBUGOUT("igc_if_init: begin"); 786 787 /* Get the latest mac address, User can use a LAA */ 788 bcopy(if_getlladdr(ifp), sc->hw.mac.addr, 789 ETHER_ADDR_LEN); 790 791 /* Put the address into the Receive Address Array */ 792 igc_rar_set(&sc->hw, sc->hw.mac.addr, 0); 793 794 /* Initialize the hardware */ 795 igc_reset(ctx); 796 igc_if_update_admin_status(ctx); 797 798 for (i = 0, tx_que = sc->tx_queues; i < sc->tx_num_queues; i++, tx_que++) { 799 struct tx_ring *txr = &tx_que->txr; 800 801 txr->tx_rs_cidx = txr->tx_rs_pidx; 802 803 /* Initialize the last processed descriptor to be the end of 804 * the ring, rather than the start, so that we avoid an 805 * off-by-one error when calculating how many descriptors are 806 * done in the credits_update function. 807 */ 808 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 809 } 810 811 /* Setup VLAN support, basic and offload if available */ 812 IGC_WRITE_REG(&sc->hw, IGC_VET, ETHERTYPE_VLAN); 813 814 /* Prepare transmit descriptors and buffers */ 815 igc_initialize_transmit_unit(ctx); 816 817 /* Setup Multicast table */ 818 igc_if_multi_set(ctx); 819 820 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); 821 igc_initialize_receive_unit(ctx); 822 823 /* Set up VLAN support */ 824 igc_setup_vlan_hw_support(ctx); 825 826 /* Don't lose promiscuous settings */ 827 igc_if_set_promisc(ctx, if_getflags(ifp)); 828 igc_clear_hw_cntrs_base_generic(&sc->hw); 829 830 if (sc->intr_type == IFLIB_INTR_MSIX) /* Set up queue routing */ 831 igc_configure_queues(sc); 832 833 /* this clears any pending interrupts */ 834 IGC_READ_REG(&sc->hw, IGC_ICR); 835 IGC_WRITE_REG(&sc->hw, IGC_ICS, IGC_ICS_LSC); 836 837 /* the driver can now take control from firmware */ 838 igc_get_hw_control(sc); 839 840 /* Set Energy Efficient Ethernet */ 841 igc_set_eee_i225(&sc->hw, true, true, true); 842 } 843 844 enum eitr_latency_target { 845 eitr_latency_disabled = 0, 846 eitr_latency_lowest = 1, 847 eitr_latency_low = 2, 848 eitr_latency_bulk = 3 849 }; 850 /********************************************************************* 851 * 852 * Helper to calculate next EITR value for AIM 853 * 854 *********************************************************************/ 855 static void 856 igc_neweitr(struct igc_softc *sc, struct igc_rx_queue *que, 857 struct tx_ring *txr, struct rx_ring *rxr) 858 { 859 struct igc_hw *hw = &sc->hw; 860 u32 neweitr; 861 u32 bytes; 862 u32 bytes_packets; 863 u32 packets; 864 u8 nextlatency; 865 866 /* Idle, do nothing */ 867 if ((txr->tx_bytes == 0) && (rxr->rx_bytes == 0)) 868 return; 869 870 neweitr = 0; 871 872 if (sc->enable_aim) { 873 nextlatency = rxr->rx_nextlatency; 874 875 /* Use half default (4K) ITR if sub-gig */ 876 if (sc->link_speed < 1000) { 877 neweitr = IGC_INTS_4K; 878 goto igc_set_next_eitr; 879 } 880 /* Want at least enough packet buffer for two frames to AIM */ 881 if (sc->shared->isc_max_frame_size * 2 > (sc->pba << 10)) { 882 neweitr = igc_max_interrupt_rate; 883 sc->enable_aim = 0; 884 goto igc_set_next_eitr; 885 } 886 887 /* Get the largest values from the associated tx and rx ring */ 888 if (txr->tx_bytes && txr->tx_packets) { 889 bytes = txr->tx_bytes; 890 bytes_packets = txr->tx_bytes/txr->tx_packets; 891 packets = txr->tx_packets; 892 } 893 if (rxr->rx_bytes && rxr->rx_packets) { 894 bytes = max(bytes, rxr->rx_bytes); 895 bytes_packets = max(bytes_packets, rxr->rx_bytes/rxr->rx_packets); 896 packets = max(packets, rxr->rx_packets); 897 } 898 899 /* Latency state machine */ 900 switch (nextlatency) { 901 case eitr_latency_disabled: /* Bootstrapping */ 902 nextlatency = eitr_latency_low; 903 break; 904 case eitr_latency_lowest: /* 70k ints/s */ 905 /* TSO and jumbo frames */ 906 if (bytes_packets > 8000) 907 nextlatency = eitr_latency_bulk; 908 else if ((packets < 5) && (bytes > 512)) 909 nextlatency = eitr_latency_low; 910 break; 911 case eitr_latency_low: /* 20k ints/s */ 912 if (bytes > 10000) { 913 /* Handle TSO */ 914 if (bytes_packets > 8000) 915 nextlatency = eitr_latency_bulk; 916 else if ((packets < 10) || (bytes_packets > 1200)) 917 nextlatency = eitr_latency_bulk; 918 else if (packets > 35) 919 nextlatency = eitr_latency_lowest; 920 } else if (bytes_packets > 2000) { 921 nextlatency = eitr_latency_bulk; 922 } else if (packets < 3 && bytes < 512) { 923 nextlatency = eitr_latency_lowest; 924 } 925 break; 926 case eitr_latency_bulk: /* 4k ints/s */ 927 if (bytes > 25000) { 928 if (packets > 35) 929 nextlatency = eitr_latency_low; 930 } else if (bytes < 1500) 931 nextlatency = eitr_latency_low; 932 break; 933 default: 934 nextlatency = eitr_latency_low; 935 device_printf(sc->dev, "Unexpected neweitr transition %d\n", 936 nextlatency); 937 break; 938 } 939 940 /* Trim itr_latency_lowest for default AIM setting */ 941 if (sc->enable_aim == 1 && nextlatency == eitr_latency_lowest) 942 nextlatency = eitr_latency_low; 943 944 /* Request new latency */ 945 rxr->rx_nextlatency = nextlatency; 946 } else { 947 /* We may have toggled to AIM disabled */ 948 nextlatency = eitr_latency_disabled; 949 rxr->rx_nextlatency = nextlatency; 950 } 951 952 /* ITR state machine */ 953 switch(nextlatency) { 954 case eitr_latency_lowest: 955 neweitr = IGC_INTS_70K; 956 break; 957 case eitr_latency_low: 958 neweitr = IGC_INTS_20K; 959 break; 960 case eitr_latency_bulk: 961 neweitr = IGC_INTS_4K; 962 break; 963 case eitr_latency_disabled: 964 default: 965 neweitr = igc_max_interrupt_rate; 966 break; 967 } 968 969 igc_set_next_eitr: 970 neweitr = IGC_INTS_TO_EITR(neweitr); 971 972 neweitr |= IGC_EITR_CNT_IGNR; 973 974 if (neweitr != que->eitr_setting) { 975 que->eitr_setting = neweitr; 976 IGC_WRITE_REG(hw, IGC_EITR(que->msix), que->eitr_setting); 977 } 978 } 979 980 /********************************************************************* 981 * 982 * Fast Legacy/MSI Combined Interrupt Service routine 983 * 984 *********************************************************************/ 985 int 986 igc_intr(void *arg) 987 { 988 struct igc_softc *sc = arg; 989 struct igc_hw *hw = &sc->hw; 990 struct igc_rx_queue *que = &sc->rx_queues[0]; 991 struct tx_ring *txr = &sc->tx_queues[0].txr; 992 struct rx_ring *rxr = &que->rxr; 993 if_ctx_t ctx = sc->ctx; 994 u32 reg_icr; 995 996 reg_icr = IGC_READ_REG(hw, IGC_ICR); 997 998 /* Hot eject? */ 999 if (reg_icr == 0xffffffff) 1000 return FILTER_STRAY; 1001 1002 /* Definitely not our interrupt. */ 1003 if (reg_icr == 0x0) 1004 return FILTER_STRAY; 1005 1006 if ((reg_icr & IGC_ICR_INT_ASSERTED) == 0) 1007 return FILTER_STRAY; 1008 1009 /* 1010 * Only MSI-X interrupts have one-shot behavior by taking advantage 1011 * of the EIAC register. Thus, explicitly disable interrupts. This 1012 * also works around the MSI message reordering errata on certain 1013 * systems. 1014 */ 1015 IFDI_INTR_DISABLE(ctx); 1016 1017 /* Link status change */ 1018 if (reg_icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) 1019 igc_handle_link(ctx); 1020 1021 if (reg_icr & IGC_ICR_RXO) 1022 sc->rx_overruns++; 1023 1024 igc_neweitr(sc, que, txr, rxr); 1025 1026 /* Reset state */ 1027 txr->tx_bytes = 0; 1028 txr->tx_packets = 0; 1029 rxr->rx_bytes = 0; 1030 rxr->rx_packets = 0; 1031 1032 return (FILTER_SCHEDULE_THREAD); 1033 } 1034 1035 static int 1036 igc_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 1037 { 1038 struct igc_softc *sc = iflib_get_softc(ctx); 1039 struct igc_rx_queue *rxq = &sc->rx_queues[rxqid]; 1040 1041 IGC_WRITE_REG(&sc->hw, IGC_EIMS, rxq->eims); 1042 return (0); 1043 } 1044 1045 static int 1046 igc_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) 1047 { 1048 struct igc_softc *sc = iflib_get_softc(ctx); 1049 struct igc_tx_queue *txq = &sc->tx_queues[txqid]; 1050 1051 IGC_WRITE_REG(&sc->hw, IGC_EIMS, txq->eims); 1052 return (0); 1053 } 1054 1055 /********************************************************************* 1056 * 1057 * MSI-X RX Interrupt Service routine 1058 * 1059 **********************************************************************/ 1060 static int 1061 igc_msix_que(void *arg) 1062 { 1063 struct igc_rx_queue *que = arg; 1064 struct igc_softc *sc = que->sc; 1065 struct tx_ring *txr = &sc->tx_queues[que->msix].txr; 1066 struct rx_ring *rxr = &que->rxr; 1067 1068 ++que->irqs; 1069 1070 igc_neweitr(sc, que, txr, rxr); 1071 1072 /* Reset state */ 1073 txr->tx_bytes = 0; 1074 txr->tx_packets = 0; 1075 rxr->rx_bytes = 0; 1076 rxr->rx_packets = 0; 1077 1078 return (FILTER_SCHEDULE_THREAD); 1079 } 1080 1081 /********************************************************************* 1082 * 1083 * MSI-X Link Fast Interrupt Service routine 1084 * 1085 **********************************************************************/ 1086 static int 1087 igc_msix_link(void *arg) 1088 { 1089 struct igc_softc *sc = arg; 1090 u32 reg_icr; 1091 1092 ++sc->link_irq; 1093 MPASS(sc->hw.back != NULL); 1094 reg_icr = IGC_READ_REG(&sc->hw, IGC_ICR); 1095 1096 if (reg_icr & IGC_ICR_RXO) 1097 sc->rx_overruns++; 1098 1099 if (reg_icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 1100 igc_handle_link(sc->ctx); 1101 } 1102 1103 IGC_WRITE_REG(&sc->hw, IGC_IMS, IGC_IMS_LSC); 1104 IGC_WRITE_REG(&sc->hw, IGC_EIMS, sc->link_mask); 1105 1106 return (FILTER_HANDLED); 1107 } 1108 1109 static void 1110 igc_handle_link(void *context) 1111 { 1112 if_ctx_t ctx = context; 1113 struct igc_softc *sc = iflib_get_softc(ctx); 1114 1115 sc->hw.mac.get_link_status = true; 1116 iflib_admin_intr_deferred(ctx); 1117 } 1118 1119 /********************************************************************* 1120 * 1121 * Media Ioctl callback 1122 * 1123 * This routine is called whenever the user queries the status of 1124 * the interface using ifconfig. 1125 * 1126 **********************************************************************/ 1127 static void 1128 igc_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) 1129 { 1130 struct igc_softc *sc = iflib_get_softc(ctx); 1131 1132 INIT_DEBUGOUT("igc_if_media_status: begin"); 1133 1134 iflib_admin_intr_deferred(ctx); 1135 1136 ifmr->ifm_status = IFM_AVALID; 1137 ifmr->ifm_active = IFM_ETHER; 1138 1139 if (!sc->link_active) { 1140 return; 1141 } 1142 1143 ifmr->ifm_status |= IFM_ACTIVE; 1144 1145 switch (sc->link_speed) { 1146 case 10: 1147 ifmr->ifm_active |= IFM_10_T; 1148 break; 1149 case 100: 1150 ifmr->ifm_active |= IFM_100_TX; 1151 break; 1152 case 1000: 1153 ifmr->ifm_active |= IFM_1000_T; 1154 break; 1155 case 2500: 1156 ifmr->ifm_active |= IFM_2500_T; 1157 break; 1158 } 1159 1160 if (sc->link_duplex == FULL_DUPLEX) 1161 ifmr->ifm_active |= IFM_FDX; 1162 else 1163 ifmr->ifm_active |= IFM_HDX; 1164 } 1165 1166 /********************************************************************* 1167 * 1168 * Media Ioctl callback 1169 * 1170 * This routine is called when the user changes speed/duplex using 1171 * media/mediopt option with ifconfig. 1172 * 1173 **********************************************************************/ 1174 static int 1175 igc_if_media_change(if_ctx_t ctx) 1176 { 1177 struct igc_softc *sc = iflib_get_softc(ctx); 1178 struct ifmedia *ifm = iflib_get_media(ctx); 1179 1180 INIT_DEBUGOUT("igc_if_media_change: begin"); 1181 1182 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1183 return (EINVAL); 1184 1185 sc->hw.mac.autoneg = DO_AUTO_NEG; 1186 1187 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1188 case IFM_AUTO: 1189 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1190 break; 1191 case IFM_2500_T: 1192 sc->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; 1193 break; 1194 case IFM_1000_T: 1195 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1196 break; 1197 case IFM_100_TX: 1198 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1199 sc->hw.phy.autoneg_advertised = ADVERTISE_100_FULL; 1200 else 1201 sc->hw.phy.autoneg_advertised = ADVERTISE_100_HALF; 1202 break; 1203 case IFM_10_T: 1204 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1205 sc->hw.phy.autoneg_advertised = ADVERTISE_10_FULL; 1206 else 1207 sc->hw.phy.autoneg_advertised = ADVERTISE_10_HALF; 1208 break; 1209 default: 1210 device_printf(sc->dev, "Unsupported media type\n"); 1211 } 1212 1213 igc_if_init(ctx); 1214 1215 return (0); 1216 } 1217 1218 static int 1219 igc_if_set_promisc(if_ctx_t ctx, int flags) 1220 { 1221 struct igc_softc *sc = iflib_get_softc(ctx); 1222 if_t ifp = iflib_get_ifp(ctx); 1223 u32 reg_rctl; 1224 int mcnt = 0; 1225 1226 reg_rctl = IGC_READ_REG(&sc->hw, IGC_RCTL); 1227 reg_rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_UPE); 1228 if (flags & IFF_ALLMULTI) 1229 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 1230 else 1231 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES); 1232 1233 /* Don't disable if in MAX groups */ 1234 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1235 reg_rctl &= (~IGC_RCTL_MPE); 1236 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl); 1237 1238 if (flags & IFF_PROMISC) { 1239 reg_rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE); 1240 /* Turn this on if you want to see bad packets */ 1241 if (igc_debug_sbp) 1242 reg_rctl |= IGC_RCTL_SBP; 1243 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl); 1244 } else if (flags & IFF_ALLMULTI) { 1245 reg_rctl |= IGC_RCTL_MPE; 1246 reg_rctl &= ~IGC_RCTL_UPE; 1247 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl); 1248 } 1249 return (0); 1250 } 1251 1252 static u_int 1253 igc_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int idx) 1254 { 1255 u8 *mta = arg; 1256 1257 if (idx == MAX_NUM_MULTICAST_ADDRESSES) 1258 return (0); 1259 1260 bcopy(LLADDR(sdl), &mta[idx * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1261 1262 return (1); 1263 } 1264 1265 /********************************************************************* 1266 * Multicast Update 1267 * 1268 * This routine is called whenever multicast address list is updated. 1269 * 1270 **********************************************************************/ 1271 1272 static void 1273 igc_if_multi_set(if_ctx_t ctx) 1274 { 1275 struct igc_softc *sc = iflib_get_softc(ctx); 1276 if_t ifp = iflib_get_ifp(ctx); 1277 u8 *mta; /* Multicast array memory */ 1278 u32 reg_rctl = 0; 1279 int mcnt = 0; 1280 1281 IOCTL_DEBUGOUT("igc_set_multi: begin"); 1282 1283 mta = sc->mta; 1284 bzero(mta, sizeof(u8) * ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 1285 1286 mcnt = if_foreach_llmaddr(ifp, igc_copy_maddr, mta); 1287 1288 reg_rctl = IGC_READ_REG(&sc->hw, IGC_RCTL); 1289 1290 if (if_getflags(ifp) & IFF_PROMISC) { 1291 reg_rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE); 1292 /* Turn this on if you want to see bad packets */ 1293 if (igc_debug_sbp) 1294 reg_rctl |= IGC_RCTL_SBP; 1295 } else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 1296 if_getflags(ifp) & IFF_ALLMULTI) { 1297 reg_rctl |= IGC_RCTL_MPE; 1298 reg_rctl &= ~IGC_RCTL_UPE; 1299 } else 1300 reg_rctl &= ~(IGC_RCTL_UPE | IGC_RCTL_MPE); 1301 1302 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1303 igc_update_mc_addr_list(&sc->hw, mta, mcnt); 1304 1305 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl); 1306 } 1307 1308 /********************************************************************* 1309 * Timer routine 1310 * 1311 * This routine schedules igc_if_update_admin_status() to check for 1312 * link status and to gather statistics as well as to perform some 1313 * controller-specific hardware patting. 1314 * 1315 **********************************************************************/ 1316 static void 1317 igc_if_timer(if_ctx_t ctx, uint16_t qid) 1318 { 1319 1320 if (qid != 0) 1321 return; 1322 1323 iflib_admin_intr_deferred(ctx); 1324 } 1325 1326 static void 1327 igc_if_update_admin_status(if_ctx_t ctx) 1328 { 1329 struct igc_softc *sc = iflib_get_softc(ctx); 1330 struct igc_hw *hw = &sc->hw; 1331 device_t dev = iflib_get_dev(ctx); 1332 u32 link_check, thstat, ctrl; 1333 1334 link_check = thstat = ctrl = 0; 1335 /* Get the cached link value or read phy for real */ 1336 switch (hw->phy.media_type) { 1337 case igc_media_type_copper: 1338 if (hw->mac.get_link_status == true) { 1339 /* Do the work to read phy */ 1340 igc_check_for_link(hw); 1341 link_check = !hw->mac.get_link_status; 1342 } else 1343 link_check = true; 1344 break; 1345 case igc_media_type_unknown: 1346 igc_check_for_link(hw); 1347 link_check = !hw->mac.get_link_status; 1348 /* FALLTHROUGH */ 1349 default: 1350 break; 1351 } 1352 1353 /* Now check for a transition */ 1354 if (link_check && (sc->link_active == 0)) { 1355 igc_get_speed_and_duplex(hw, &sc->link_speed, 1356 &sc->link_duplex); 1357 if (bootverbose) 1358 device_printf(dev, "Link is up %d Mbps %s\n", 1359 sc->link_speed, 1360 ((sc->link_duplex == FULL_DUPLEX) ? 1361 "Full Duplex" : "Half Duplex")); 1362 sc->link_active = 1; 1363 iflib_link_state_change(ctx, LINK_STATE_UP, 1364 IF_Mbps(sc->link_speed)); 1365 } else if (!link_check && (sc->link_active == 1)) { 1366 sc->link_speed = 0; 1367 sc->link_duplex = 0; 1368 sc->link_active = 0; 1369 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 1370 } 1371 igc_update_stats_counters(sc); 1372 } 1373 1374 static void 1375 igc_if_watchdog_reset(if_ctx_t ctx) 1376 { 1377 struct igc_softc *sc = iflib_get_softc(ctx); 1378 1379 /* 1380 * Just count the event; iflib(4) will already trigger a 1381 * sufficient reset of the controller. 1382 */ 1383 sc->watchdog_events++; 1384 } 1385 1386 /********************************************************************* 1387 * 1388 * This routine disables all traffic on the adapter by issuing a 1389 * global reset on the MAC. 1390 * 1391 **********************************************************************/ 1392 static void 1393 igc_if_stop(if_ctx_t ctx) 1394 { 1395 struct igc_softc *sc = iflib_get_softc(ctx); 1396 1397 INIT_DEBUGOUT("igc_if_stop: begin"); 1398 1399 igc_reset_hw(&sc->hw); 1400 IGC_WRITE_REG(&sc->hw, IGC_WUC, 0); 1401 } 1402 1403 /********************************************************************* 1404 * 1405 * Determine hardware revision. 1406 * 1407 **********************************************************************/ 1408 static void 1409 igc_identify_hardware(if_ctx_t ctx) 1410 { 1411 device_t dev = iflib_get_dev(ctx); 1412 struct igc_softc *sc = iflib_get_softc(ctx); 1413 1414 /* Make sure our PCI config space has the necessary stuff set */ 1415 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 1416 1417 /* Save off the information about this board */ 1418 sc->hw.vendor_id = pci_get_vendor(dev); 1419 sc->hw.device_id = pci_get_device(dev); 1420 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 1421 sc->hw.subsystem_vendor_id = 1422 pci_read_config(dev, PCIR_SUBVEND_0, 2); 1423 sc->hw.subsystem_device_id = 1424 pci_read_config(dev, PCIR_SUBDEV_0, 2); 1425 1426 /* Do Shared Code Init and Setup */ 1427 if (igc_set_mac_type(&sc->hw)) { 1428 device_printf(dev, "Setup init failure\n"); 1429 return; 1430 } 1431 } 1432 1433 static int 1434 igc_allocate_pci_resources(if_ctx_t ctx) 1435 { 1436 struct igc_softc *sc = iflib_get_softc(ctx); 1437 device_t dev = iflib_get_dev(ctx); 1438 int rid; 1439 1440 rid = PCIR_BAR(0); 1441 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1442 &rid, RF_ACTIVE); 1443 if (sc->memory == NULL) { 1444 device_printf(dev, "Unable to allocate bus resource: memory\n"); 1445 return (ENXIO); 1446 } 1447 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 1448 sc->osdep.mem_bus_space_handle = 1449 rman_get_bushandle(sc->memory); 1450 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle; 1451 1452 sc->hw.back = &sc->osdep; 1453 1454 return (0); 1455 } 1456 1457 /********************************************************************* 1458 * 1459 * Set up the MSI-X Interrupt handlers 1460 * 1461 **********************************************************************/ 1462 static int 1463 igc_if_msix_intr_assign(if_ctx_t ctx, int msix) 1464 { 1465 struct igc_softc *sc = iflib_get_softc(ctx); 1466 struct igc_rx_queue *rx_que = sc->rx_queues; 1467 struct igc_tx_queue *tx_que = sc->tx_queues; 1468 int error, rid, i, vector = 0, rx_vectors; 1469 char buf[16]; 1470 1471 /* First set up ring resources */ 1472 for (i = 0; i < sc->rx_num_queues; i++, rx_que++, vector++) { 1473 rid = vector + 1; 1474 snprintf(buf, sizeof(buf), "rxq%d", i); 1475 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RXTX, igc_msix_que, rx_que, rx_que->me, buf); 1476 if (error) { 1477 device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error); 1478 sc->rx_num_queues = i + 1; 1479 goto fail; 1480 } 1481 1482 rx_que->msix = vector; 1483 1484 /* 1485 * Set the bit to enable interrupt 1486 * in IGC_IMS -- bits 20 and 21 1487 * are for RX0 and RX1, note this has 1488 * NOTHING to do with the MSI-X vector 1489 */ 1490 rx_que->eims = 1 << vector; 1491 } 1492 rx_vectors = vector; 1493 1494 vector = 0; 1495 for (i = 0; i < sc->tx_num_queues; i++, tx_que++, vector++) { 1496 snprintf(buf, sizeof(buf), "txq%d", i); 1497 tx_que = &sc->tx_queues[i]; 1498 iflib_softirq_alloc_generic(ctx, 1499 &sc->rx_queues[i % sc->rx_num_queues].que_irq, 1500 IFLIB_INTR_TX, tx_que, tx_que->me, buf); 1501 1502 tx_que->msix = (vector % sc->rx_num_queues); 1503 1504 /* 1505 * Set the bit to enable interrupt 1506 * in IGC_IMS -- bits 22 and 23 1507 * are for TX0 and TX1, note this has 1508 * NOTHING to do with the MSI-X vector 1509 */ 1510 tx_que->eims = 1 << i; 1511 } 1512 1513 /* Link interrupt */ 1514 rid = rx_vectors + 1; 1515 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, IFLIB_INTR_ADMIN, igc_msix_link, sc, 0, "aq"); 1516 1517 if (error) { 1518 device_printf(iflib_get_dev(ctx), "Failed to register admin handler"); 1519 goto fail; 1520 } 1521 sc->linkvec = rx_vectors; 1522 return (0); 1523 fail: 1524 iflib_irq_free(ctx, &sc->irq); 1525 rx_que = sc->rx_queues; 1526 for (int i = 0; i < sc->rx_num_queues; i++, rx_que++) 1527 iflib_irq_free(ctx, &rx_que->que_irq); 1528 return (error); 1529 } 1530 1531 static void 1532 igc_configure_queues(struct igc_softc *sc) 1533 { 1534 struct igc_hw *hw = &sc->hw; 1535 struct igc_rx_queue *rx_que; 1536 struct igc_tx_queue *tx_que; 1537 u32 ivar = 0, newitr = 0; 1538 1539 /* First turn on RSS capability */ 1540 IGC_WRITE_REG(hw, IGC_GPIE, 1541 IGC_GPIE_MSIX_MODE | IGC_GPIE_EIAME | IGC_GPIE_PBA | 1542 IGC_GPIE_NSICR); 1543 1544 /* Turn on MSI-X */ 1545 /* RX entries */ 1546 for (int i = 0; i < sc->rx_num_queues; i++) { 1547 u32 index = i >> 1; 1548 ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index); 1549 rx_que = &sc->rx_queues[i]; 1550 if (i & 1) { 1551 ivar &= 0xFF00FFFF; 1552 ivar |= (rx_que->msix | IGC_IVAR_VALID) << 16; 1553 } else { 1554 ivar &= 0xFFFFFF00; 1555 ivar |= rx_que->msix | IGC_IVAR_VALID; 1556 } 1557 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar); 1558 } 1559 /* TX entries */ 1560 for (int i = 0; i < sc->tx_num_queues; i++) { 1561 u32 index = i >> 1; 1562 ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index); 1563 tx_que = &sc->tx_queues[i]; 1564 if (i & 1) { 1565 ivar &= 0x00FFFFFF; 1566 ivar |= (tx_que->msix | IGC_IVAR_VALID) << 24; 1567 } else { 1568 ivar &= 0xFFFF00FF; 1569 ivar |= (tx_que->msix | IGC_IVAR_VALID) << 8; 1570 } 1571 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar); 1572 sc->que_mask |= tx_que->eims; 1573 } 1574 1575 /* And for the link interrupt */ 1576 ivar = (sc->linkvec | IGC_IVAR_VALID) << 8; 1577 sc->link_mask = 1 << sc->linkvec; 1578 IGC_WRITE_REG(hw, IGC_IVAR_MISC, ivar); 1579 1580 /* Set the starting interrupt rate */ 1581 if (igc_max_interrupt_rate > 0) 1582 newitr = IGC_INTS_TO_EITR(igc_max_interrupt_rate); 1583 1584 newitr |= IGC_EITR_CNT_IGNR; 1585 1586 for (int i = 0; i < sc->rx_num_queues; i++) { 1587 rx_que = &sc->rx_queues[i]; 1588 IGC_WRITE_REG(hw, IGC_EITR(rx_que->msix), newitr); 1589 } 1590 1591 return; 1592 } 1593 1594 static void 1595 igc_free_pci_resources(if_ctx_t ctx) 1596 { 1597 struct igc_softc *sc = iflib_get_softc(ctx); 1598 struct igc_rx_queue *que = sc->rx_queues; 1599 device_t dev = iflib_get_dev(ctx); 1600 1601 /* Release all MSI-X queue resources */ 1602 if (sc->intr_type == IFLIB_INTR_MSIX) 1603 iflib_irq_free(ctx, &sc->irq); 1604 1605 for (int i = 0; i < sc->rx_num_queues; i++, que++) { 1606 iflib_irq_free(ctx, &que->que_irq); 1607 } 1608 1609 if (sc->memory != NULL) { 1610 bus_release_resource(dev, SYS_RES_MEMORY, 1611 rman_get_rid(sc->memory), sc->memory); 1612 sc->memory = NULL; 1613 } 1614 1615 if (sc->flash != NULL) { 1616 bus_release_resource(dev, SYS_RES_MEMORY, 1617 rman_get_rid(sc->flash), sc->flash); 1618 sc->flash = NULL; 1619 } 1620 1621 if (sc->ioport != NULL) { 1622 bus_release_resource(dev, SYS_RES_IOPORT, 1623 rman_get_rid(sc->ioport), sc->ioport); 1624 sc->ioport = NULL; 1625 } 1626 } 1627 1628 /* Set up MSI or MSI-X */ 1629 static int 1630 igc_setup_msix(if_ctx_t ctx) 1631 { 1632 return (0); 1633 } 1634 1635 /********************************************************************* 1636 * 1637 * Initialize the DMA Coalescing feature 1638 * 1639 **********************************************************************/ 1640 static void 1641 igc_init_dmac(struct igc_softc *sc, u32 pba) 1642 { 1643 device_t dev = sc->dev; 1644 struct igc_hw *hw = &sc->hw; 1645 u32 dmac, reg = ~IGC_DMACR_DMAC_EN; 1646 u16 hwm; 1647 u16 max_frame_size; 1648 int status; 1649 1650 max_frame_size = sc->shared->isc_max_frame_size; 1651 1652 if (sc->dmac == 0) { /* Disabling it */ 1653 IGC_WRITE_REG(hw, IGC_DMACR, reg); 1654 return; 1655 } else 1656 device_printf(dev, "DMA Coalescing enabled\n"); 1657 1658 /* Set starting threshold */ 1659 IGC_WRITE_REG(hw, IGC_DMCTXTH, 0); 1660 1661 hwm = 64 * pba - max_frame_size / 16; 1662 if (hwm < 64 * (pba - 6)) 1663 hwm = 64 * (pba - 6); 1664 reg = IGC_READ_REG(hw, IGC_FCRTC); 1665 reg &= ~IGC_FCRTC_RTH_COAL_MASK; 1666 reg |= ((hwm << IGC_FCRTC_RTH_COAL_SHIFT) 1667 & IGC_FCRTC_RTH_COAL_MASK); 1668 IGC_WRITE_REG(hw, IGC_FCRTC, reg); 1669 1670 dmac = pba - max_frame_size / 512; 1671 if (dmac < pba - 10) 1672 dmac = pba - 10; 1673 reg = IGC_READ_REG(hw, IGC_DMACR); 1674 reg &= ~IGC_DMACR_DMACTHR_MASK; 1675 reg |= ((dmac << IGC_DMACR_DMACTHR_SHIFT) 1676 & IGC_DMACR_DMACTHR_MASK); 1677 1678 /* transition to L0x or L1 if available..*/ 1679 reg |= (IGC_DMACR_DMAC_EN | IGC_DMACR_DMAC_LX_MASK); 1680 1681 /* Check if status is 2.5Gb backplane connection 1682 * before configuration of watchdog timer, which is 1683 * in msec values in 12.8usec intervals 1684 * watchdog timer= msec values in 32usec intervals 1685 * for non 2.5Gb connection 1686 */ 1687 status = IGC_READ_REG(hw, IGC_STATUS); 1688 if ((status & IGC_STATUS_2P5_SKU) && 1689 (!(status & IGC_STATUS_2P5_SKU_OVER))) 1690 reg |= ((sc->dmac * 5) >> 6); 1691 else 1692 reg |= (sc->dmac >> 5); 1693 1694 IGC_WRITE_REG(hw, IGC_DMACR, reg); 1695 1696 IGC_WRITE_REG(hw, IGC_DMCRTRH, 0); 1697 1698 /* Set the interval before transition */ 1699 reg = IGC_READ_REG(hw, IGC_DMCTLX); 1700 reg |= IGC_DMCTLX_DCFLUSH_DIS; 1701 1702 /* 1703 ** in 2.5Gb connection, TTLX unit is 0.4 usec 1704 ** which is 0x4*2 = 0xA. But delay is still 4 usec 1705 */ 1706 status = IGC_READ_REG(hw, IGC_STATUS); 1707 if ((status & IGC_STATUS_2P5_SKU) && 1708 (!(status & IGC_STATUS_2P5_SKU_OVER))) 1709 reg |= 0xA; 1710 else 1711 reg |= 0x4; 1712 1713 IGC_WRITE_REG(hw, IGC_DMCTLX, reg); 1714 1715 /* free space in tx packet buffer to wake from DMA coal */ 1716 IGC_WRITE_REG(hw, IGC_DMCTXTH, (IGC_TXPBSIZE - 1717 (2 * max_frame_size)) >> 6); 1718 1719 /* make low power state decision controlled by DMA coal */ 1720 reg = IGC_READ_REG(hw, IGC_PCIEMISC); 1721 reg &= ~IGC_PCIEMISC_LX_DECISION; 1722 IGC_WRITE_REG(hw, IGC_PCIEMISC, reg); 1723 } 1724 1725 /********************************************************************* 1726 * 1727 * Initialize the hardware to a configuration as specified by the 1728 * softc structure. 1729 * 1730 **********************************************************************/ 1731 static void 1732 igc_reset(if_ctx_t ctx) 1733 { 1734 device_t dev = iflib_get_dev(ctx); 1735 struct igc_softc *sc = iflib_get_softc(ctx); 1736 struct igc_hw *hw = &sc->hw; 1737 u32 rx_buffer_size; 1738 u32 pba; 1739 1740 INIT_DEBUGOUT("igc_reset: begin"); 1741 /* Let the firmware know the OS is in control */ 1742 igc_get_hw_control(sc); 1743 1744 /* 1745 * Packet Buffer Allocation (PBA) 1746 * Writing PBA sets the receive portion of the buffer 1747 * the remainder is used for the transmit buffer. 1748 */ 1749 pba = IGC_PBA_34K; 1750 1751 INIT_DEBUGOUT1("igc_reset: pba=%dK",pba); 1752 1753 /* 1754 * These parameters control the automatic generation (Tx) and 1755 * response (Rx) to Ethernet PAUSE frames. 1756 * - High water mark should allow for at least two frames to be 1757 * received after sending an XOFF. 1758 * - Low water mark works best when it is very near the high water mark. 1759 * This allows the receiver to restart by sending XON when it has 1760 * drained a bit. Here we use an arbitrary value of 1500 which will 1761 * restart after one full frame is pulled from the buffer. There 1762 * could be several smaller frames in the buffer and if so they will 1763 * not trigger the XON until their total number reduces the buffer 1764 * by 1500. 1765 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 1766 */ 1767 rx_buffer_size = (pba & 0xffff) << 10; 1768 hw->fc.high_water = rx_buffer_size - 1769 roundup2(sc->hw.mac.max_frame_size, 1024); 1770 /* 16-byte granularity */ 1771 hw->fc.low_water = hw->fc.high_water - 16; 1772 1773 if (sc->fc) /* locally set flow control value? */ 1774 hw->fc.requested_mode = sc->fc; 1775 else 1776 hw->fc.requested_mode = igc_fc_full; 1777 1778 hw->fc.pause_time = IGC_FC_PAUSE_TIME; 1779 1780 hw->fc.send_xon = true; 1781 1782 /* Issue a global reset */ 1783 igc_reset_hw(hw); 1784 IGC_WRITE_REG(hw, IGC_WUC, 0); 1785 1786 /* and a re-init */ 1787 if (igc_init_hw(hw) < 0) { 1788 device_printf(dev, "Hardware Initialization Failed\n"); 1789 return; 1790 } 1791 1792 /* Setup DMA Coalescing */ 1793 igc_init_dmac(sc, pba); 1794 1795 /* Save the final PBA off if it needs to be used elsewhere i.e. AIM */ 1796 sc->pba = pba; 1797 1798 IGC_WRITE_REG(hw, IGC_VET, ETHERTYPE_VLAN); 1799 igc_get_phy_info(hw); 1800 igc_check_for_link(hw); 1801 } 1802 1803 /* 1804 * Initialise the RSS mapping for NICs that support multiple transmit/ 1805 * receive rings. 1806 */ 1807 1808 #define RSSKEYLEN 10 1809 static void 1810 igc_initialize_rss_mapping(struct igc_softc *sc) 1811 { 1812 struct igc_hw *hw = &sc->hw; 1813 int i; 1814 int queue_id; 1815 u32 reta; 1816 u32 rss_key[RSSKEYLEN], mrqc, shift = 0; 1817 1818 /* 1819 * The redirection table controls which destination 1820 * queue each bucket redirects traffic to. 1821 * Each DWORD represents four queues, with the LSB 1822 * being the first queue in the DWORD. 1823 * 1824 * This just allocates buckets to queues using round-robin 1825 * allocation. 1826 * 1827 * NOTE: It Just Happens to line up with the default 1828 * RSS allocation method. 1829 */ 1830 1831 /* Warning FM follows */ 1832 reta = 0; 1833 for (i = 0; i < 128; i++) { 1834 #ifdef RSS 1835 queue_id = rss_get_indirection_to_bucket(i); 1836 /* 1837 * If we have more queues than buckets, we'll 1838 * end up mapping buckets to a subset of the 1839 * queues. 1840 * 1841 * If we have more buckets than queues, we'll 1842 * end up instead assigning multiple buckets 1843 * to queues. 1844 * 1845 * Both are suboptimal, but we need to handle 1846 * the case so we don't go out of bounds 1847 * indexing arrays and such. 1848 */ 1849 queue_id = queue_id % sc->rx_num_queues; 1850 #else 1851 queue_id = (i % sc->rx_num_queues); 1852 #endif 1853 /* Adjust if required */ 1854 queue_id = queue_id << shift; 1855 1856 /* 1857 * The low 8 bits are for hash value (n+0); 1858 * The next 8 bits are for hash value (n+1), etc. 1859 */ 1860 reta = reta >> 8; 1861 reta = reta | ( ((uint32_t) queue_id) << 24); 1862 if ((i & 3) == 3) { 1863 IGC_WRITE_REG(hw, IGC_RETA(i >> 2), reta); 1864 reta = 0; 1865 } 1866 } 1867 1868 /* Now fill in hash table */ 1869 1870 /* 1871 * MRQC: Multiple Receive Queues Command 1872 * Set queuing to RSS control, number depends on the device. 1873 */ 1874 mrqc = IGC_MRQC_ENABLE_RSS_4Q; 1875 1876 #ifdef RSS 1877 /* XXX ew typecasting */ 1878 rss_getkey((uint8_t *) &rss_key); 1879 #else 1880 arc4rand(&rss_key, sizeof(rss_key), 0); 1881 #endif 1882 for (i = 0; i < RSSKEYLEN; i++) 1883 IGC_WRITE_REG_ARRAY(hw, IGC_RSSRK(0), i, rss_key[i]); 1884 1885 /* 1886 * Configure the RSS fields to hash upon. 1887 */ 1888 mrqc |= (IGC_MRQC_RSS_FIELD_IPV4 | 1889 IGC_MRQC_RSS_FIELD_IPV4_TCP); 1890 mrqc |= (IGC_MRQC_RSS_FIELD_IPV6 | 1891 IGC_MRQC_RSS_FIELD_IPV6_TCP); 1892 mrqc |=( IGC_MRQC_RSS_FIELD_IPV4_UDP | 1893 IGC_MRQC_RSS_FIELD_IPV6_UDP); 1894 mrqc |=( IGC_MRQC_RSS_FIELD_IPV6_UDP_EX | 1895 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX); 1896 1897 IGC_WRITE_REG(hw, IGC_MRQC, mrqc); 1898 } 1899 1900 /********************************************************************* 1901 * 1902 * Setup networking device structure and register interface media. 1903 * 1904 **********************************************************************/ 1905 static int 1906 igc_setup_interface(if_ctx_t ctx) 1907 { 1908 if_t ifp = iflib_get_ifp(ctx); 1909 struct igc_softc *sc = iflib_get_softc(ctx); 1910 if_softc_ctx_t scctx = sc->shared; 1911 1912 INIT_DEBUGOUT("igc_setup_interface: begin"); 1913 1914 /* Single Queue */ 1915 if (sc->tx_num_queues == 1) { 1916 if_setsendqlen(ifp, scctx->isc_ntxd[0] - 1); 1917 if_setsendqready(ifp); 1918 } 1919 1920 /* 1921 * Specify the media types supported by this adapter and register 1922 * callbacks to update media and link information 1923 */ 1924 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1925 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); 1926 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1927 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); 1928 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1929 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1930 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL); 1931 1932 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1933 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 1934 return (0); 1935 } 1936 1937 static int 1938 igc_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) 1939 { 1940 struct igc_softc *sc = iflib_get_softc(ctx); 1941 if_softc_ctx_t scctx = sc->shared; 1942 int error = IGC_SUCCESS; 1943 struct igc_tx_queue *que; 1944 int i, j; 1945 1946 MPASS(sc->tx_num_queues > 0); 1947 MPASS(sc->tx_num_queues == ntxqsets); 1948 1949 /* First allocate the top level queue structs */ 1950 if (!(sc->tx_queues = 1951 (struct igc_tx_queue *) malloc(sizeof(struct igc_tx_queue) * 1952 sc->tx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { 1953 device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n"); 1954 return(ENOMEM); 1955 } 1956 1957 for (i = 0, que = sc->tx_queues; i < sc->tx_num_queues; i++, que++) { 1958 /* Set up some basics */ 1959 1960 struct tx_ring *txr = &que->txr; 1961 txr->sc = que->sc = sc; 1962 que->me = txr->me = i; 1963 1964 /* Allocate report status array */ 1965 if (!(txr->tx_rsq = (qidx_t *) malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) { 1966 device_printf(iflib_get_dev(ctx), "failed to allocate rs_idxs memory\n"); 1967 error = ENOMEM; 1968 goto fail; 1969 } 1970 for (j = 0; j < scctx->isc_ntxd[0]; j++) 1971 txr->tx_rsq[j] = QIDX_INVALID; 1972 /* get the virtual and physical address of the hardware queues */ 1973 txr->tx_base = (struct igc_tx_desc *)vaddrs[i*ntxqs]; 1974 txr->tx_paddr = paddrs[i*ntxqs]; 1975 } 1976 1977 if (bootverbose) 1978 device_printf(iflib_get_dev(ctx), 1979 "allocated for %d tx_queues\n", sc->tx_num_queues); 1980 return (0); 1981 fail: 1982 igc_if_queues_free(ctx); 1983 return (error); 1984 } 1985 1986 static int 1987 igc_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) 1988 { 1989 struct igc_softc *sc = iflib_get_softc(ctx); 1990 int error = IGC_SUCCESS; 1991 struct igc_rx_queue *que; 1992 int i; 1993 1994 MPASS(sc->rx_num_queues > 0); 1995 MPASS(sc->rx_num_queues == nrxqsets); 1996 1997 /* First allocate the top level queue structs */ 1998 if (!(sc->rx_queues = 1999 (struct igc_rx_queue *) malloc(sizeof(struct igc_rx_queue) * 2000 sc->rx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { 2001 device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n"); 2002 error = ENOMEM; 2003 goto fail; 2004 } 2005 2006 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) { 2007 /* Set up some basics */ 2008 struct rx_ring *rxr = &que->rxr; 2009 rxr->sc = que->sc = sc; 2010 rxr->que = que; 2011 que->me = rxr->me = i; 2012 2013 /* get the virtual and physical address of the hardware queues */ 2014 rxr->rx_base = (union igc_rx_desc_extended *)vaddrs[i*nrxqs]; 2015 rxr->rx_paddr = paddrs[i*nrxqs]; 2016 } 2017 2018 if (bootverbose) 2019 device_printf(iflib_get_dev(ctx), 2020 "allocated for %d rx_queues\n", sc->rx_num_queues); 2021 2022 return (0); 2023 fail: 2024 igc_if_queues_free(ctx); 2025 return (error); 2026 } 2027 2028 static void 2029 igc_if_queues_free(if_ctx_t ctx) 2030 { 2031 struct igc_softc *sc = iflib_get_softc(ctx); 2032 struct igc_tx_queue *tx_que = sc->tx_queues; 2033 struct igc_rx_queue *rx_que = sc->rx_queues; 2034 2035 if (tx_que != NULL) { 2036 for (int i = 0; i < sc->tx_num_queues; i++, tx_que++) { 2037 struct tx_ring *txr = &tx_que->txr; 2038 if (txr->tx_rsq == NULL) 2039 break; 2040 2041 free(txr->tx_rsq, M_DEVBUF); 2042 txr->tx_rsq = NULL; 2043 } 2044 free(sc->tx_queues, M_DEVBUF); 2045 sc->tx_queues = NULL; 2046 } 2047 2048 if (rx_que != NULL) { 2049 free(sc->rx_queues, M_DEVBUF); 2050 sc->rx_queues = NULL; 2051 } 2052 2053 igc_release_hw_control(sc); 2054 2055 if (sc->mta != NULL) { 2056 free(sc->mta, M_DEVBUF); 2057 } 2058 } 2059 2060 /********************************************************************* 2061 * 2062 * Enable transmit unit. 2063 * 2064 **********************************************************************/ 2065 static void 2066 igc_initialize_transmit_unit(if_ctx_t ctx) 2067 { 2068 struct igc_softc *sc = iflib_get_softc(ctx); 2069 if_softc_ctx_t scctx = sc->shared; 2070 struct igc_tx_queue *que; 2071 struct tx_ring *txr; 2072 struct igc_hw *hw = &sc->hw; 2073 u32 tctl, txdctl = 0; 2074 2075 INIT_DEBUGOUT("igc_initialize_transmit_unit: begin"); 2076 2077 for (int i = 0; i < sc->tx_num_queues; i++, txr++) { 2078 u64 bus_addr; 2079 caddr_t offp, endp; 2080 2081 que = &sc->tx_queues[i]; 2082 txr = &que->txr; 2083 bus_addr = txr->tx_paddr; 2084 2085 /* Clear checksum offload context. */ 2086 offp = (caddr_t)&txr->csum_flags; 2087 endp = (caddr_t)(txr + 1); 2088 bzero(offp, endp - offp); 2089 2090 /* Base and Len of TX Ring */ 2091 IGC_WRITE_REG(hw, IGC_TDLEN(i), 2092 scctx->isc_ntxd[0] * sizeof(struct igc_tx_desc)); 2093 IGC_WRITE_REG(hw, IGC_TDBAH(i), 2094 (u32)(bus_addr >> 32)); 2095 IGC_WRITE_REG(hw, IGC_TDBAL(i), 2096 (u32)bus_addr); 2097 /* Init the HEAD/TAIL indices */ 2098 IGC_WRITE_REG(hw, IGC_TDT(i), 0); 2099 IGC_WRITE_REG(hw, IGC_TDH(i), 0); 2100 2101 HW_DEBUGOUT2("Base = %x, Length = %x\n", 2102 IGC_READ_REG(&sc->hw, IGC_TDBAL(i)), 2103 IGC_READ_REG(&sc->hw, IGC_TDLEN(i))); 2104 2105 txdctl = 0; /* clear txdctl */ 2106 txdctl |= 0x1f; /* PTHRESH */ 2107 txdctl |= 1 << 8; /* HTHRESH */ 2108 txdctl |= 1 << 16;/* WTHRESH */ 2109 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ 2110 txdctl |= IGC_TXDCTL_GRAN; 2111 txdctl |= 1 << 25; /* LWTHRESH */ 2112 2113 IGC_WRITE_REG(hw, IGC_TXDCTL(i), txdctl); 2114 } 2115 2116 /* Program the Transmit Control Register */ 2117 tctl = IGC_READ_REG(&sc->hw, IGC_TCTL); 2118 tctl &= ~IGC_TCTL_CT; 2119 tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN | 2120 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT)); 2121 2122 /* This write will effectively turn on the transmit unit. */ 2123 IGC_WRITE_REG(&sc->hw, IGC_TCTL, tctl); 2124 } 2125 2126 /********************************************************************* 2127 * 2128 * Enable receive unit. 2129 * 2130 **********************************************************************/ 2131 #define BSIZEPKT_ROUNDUP ((1<<IGC_SRRCTL_BSIZEPKT_SHIFT)-1) 2132 2133 static void 2134 igc_initialize_receive_unit(if_ctx_t ctx) 2135 { 2136 struct igc_softc *sc = iflib_get_softc(ctx); 2137 if_softc_ctx_t scctx = sc->shared; 2138 if_t ifp = iflib_get_ifp(ctx); 2139 struct igc_hw *hw = &sc->hw; 2140 struct igc_rx_queue *que; 2141 int i; 2142 u32 psize, rctl, rxcsum, srrctl = 0; 2143 2144 INIT_DEBUGOUT("igc_initialize_receive_units: begin"); 2145 2146 /* 2147 * Make sure receives are disabled while setting 2148 * up the descriptor ring 2149 */ 2150 rctl = IGC_READ_REG(hw, IGC_RCTL); 2151 IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN); 2152 2153 /* Setup the Receive Control Register */ 2154 rctl &= ~(3 << IGC_RCTL_MO_SHIFT); 2155 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | 2156 IGC_RCTL_LBM_NO | IGC_RCTL_RDMTS_HALF | 2157 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); 2158 2159 /* Do not store bad packets */ 2160 rctl &= ~IGC_RCTL_SBP; 2161 2162 /* Enable Long Packet receive */ 2163 if (if_getmtu(ifp) > ETHERMTU) 2164 rctl |= IGC_RCTL_LPE; 2165 else 2166 rctl &= ~IGC_RCTL_LPE; 2167 2168 /* Strip the CRC */ 2169 if (!igc_disable_crc_stripping) 2170 rctl |= IGC_RCTL_SECRC; 2171 2172 rxcsum = IGC_READ_REG(hw, IGC_RXCSUM); 2173 if (if_getcapenable(ifp) & IFCAP_RXCSUM) { 2174 rxcsum |= IGC_RXCSUM_CRCOFL; 2175 if (sc->tx_num_queues > 1) 2176 rxcsum |= IGC_RXCSUM_PCSD; 2177 else 2178 rxcsum |= IGC_RXCSUM_IPPCSE; 2179 } else { 2180 if (sc->tx_num_queues > 1) 2181 rxcsum |= IGC_RXCSUM_PCSD; 2182 else 2183 rxcsum &= ~IGC_RXCSUM_TUOFL; 2184 } 2185 IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum); 2186 2187 if (sc->rx_num_queues > 1) 2188 igc_initialize_rss_mapping(sc); 2189 2190 if (if_getmtu(ifp) > ETHERMTU) { 2191 psize = scctx->isc_max_frame_size; 2192 /* are we on a vlan? */ 2193 if (if_vlantrunkinuse(ifp)) 2194 psize += VLAN_TAG_SIZE; 2195 IGC_WRITE_REG(&sc->hw, IGC_RLPML, psize); 2196 } 2197 2198 /* Set maximum packet buffer len */ 2199 srrctl |= (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 2200 IGC_SRRCTL_BSIZEPKT_SHIFT; 2201 /* srrctl above overrides this but set the register to a sane value */ 2202 rctl |= IGC_RCTL_SZ_2048; 2203 2204 /* 2205 * If TX flow control is disabled and there's >1 queue defined, 2206 * enable DROP. 2207 * 2208 * This drops frames rather than hanging the RX MAC for all queues. 2209 */ 2210 if ((sc->rx_num_queues > 1) && 2211 (sc->fc == igc_fc_none || 2212 sc->fc == igc_fc_rx_pause)) { 2213 srrctl |= IGC_SRRCTL_DROP_EN; 2214 } 2215 2216 /* Setup the Base and Length of the Rx Descriptor Rings */ 2217 for (i = 0, que = sc->rx_queues; i < sc->rx_num_queues; i++, que++) { 2218 struct rx_ring *rxr = &que->rxr; 2219 u64 bus_addr = rxr->rx_paddr; 2220 u32 rxdctl; 2221 2222 #ifdef notyet 2223 /* Configure for header split? -- ignore for now */ 2224 rxr->hdr_split = igc_header_split; 2225 #else 2226 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 2227 #endif 2228 2229 IGC_WRITE_REG(hw, IGC_RDLEN(i), 2230 scctx->isc_nrxd[0] * sizeof(struct igc_rx_desc)); 2231 IGC_WRITE_REG(hw, IGC_RDBAH(i), 2232 (uint32_t)(bus_addr >> 32)); 2233 IGC_WRITE_REG(hw, IGC_RDBAL(i), 2234 (uint32_t)bus_addr); 2235 IGC_WRITE_REG(hw, IGC_SRRCTL(i), srrctl); 2236 /* Setup the Head and Tail Descriptor Pointers */ 2237 IGC_WRITE_REG(hw, IGC_RDH(i), 0); 2238 IGC_WRITE_REG(hw, IGC_RDT(i), 0); 2239 /* Enable this Queue */ 2240 rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(i)); 2241 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; 2242 rxdctl &= 0xFFF00000; 2243 rxdctl |= IGC_RX_PTHRESH; 2244 rxdctl |= IGC_RX_HTHRESH << 8; 2245 rxdctl |= IGC_RX_WTHRESH << 16; 2246 IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl); 2247 } 2248 2249 /* Make sure VLAN Filters are off */ 2250 rctl &= ~IGC_RCTL_VFE; 2251 2252 /* Write out the settings */ 2253 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 2254 2255 return; 2256 } 2257 2258 static void 2259 igc_setup_vlan_hw_support(if_ctx_t ctx) 2260 { 2261 struct igc_softc *sc = iflib_get_softc(ctx); 2262 struct igc_hw *hw = &sc->hw; 2263 struct ifnet *ifp = iflib_get_ifp(ctx); 2264 u32 reg; 2265 2266 /* igc hardware doesn't seem to implement VFTA for HWFILTER */ 2267 2268 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING && 2269 !igc_disable_crc_stripping) { 2270 reg = IGC_READ_REG(hw, IGC_CTRL); 2271 reg |= IGC_CTRL_VME; 2272 IGC_WRITE_REG(hw, IGC_CTRL, reg); 2273 } else { 2274 reg = IGC_READ_REG(hw, IGC_CTRL); 2275 reg &= ~IGC_CTRL_VME; 2276 IGC_WRITE_REG(hw, IGC_CTRL, reg); 2277 } 2278 } 2279 2280 static void 2281 igc_if_intr_enable(if_ctx_t ctx) 2282 { 2283 struct igc_softc *sc = iflib_get_softc(ctx); 2284 struct igc_hw *hw = &sc->hw; 2285 u32 mask; 2286 2287 if (__predict_true(sc->intr_type == IFLIB_INTR_MSIX)) { 2288 mask = (sc->que_mask | sc->link_mask); 2289 IGC_WRITE_REG(hw, IGC_EIAC, mask); 2290 IGC_WRITE_REG(hw, IGC_EIAM, mask); 2291 IGC_WRITE_REG(hw, IGC_EIMS, mask); 2292 IGC_WRITE_REG(hw, IGC_IMS, IGC_IMS_LSC); 2293 } else 2294 IGC_WRITE_REG(hw, IGC_IMS, IMS_ENABLE_MASK); 2295 IGC_WRITE_FLUSH(hw); 2296 } 2297 2298 static void 2299 igc_if_intr_disable(if_ctx_t ctx) 2300 { 2301 struct igc_softc *sc = iflib_get_softc(ctx); 2302 struct igc_hw *hw = &sc->hw; 2303 2304 if (__predict_true(sc->intr_type == IFLIB_INTR_MSIX)) { 2305 IGC_WRITE_REG(hw, IGC_EIMC, 0xffffffff); 2306 IGC_WRITE_REG(hw, IGC_EIAC, 0); 2307 } 2308 IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff); 2309 IGC_WRITE_FLUSH(hw); 2310 } 2311 2312 /* 2313 * igc_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit. 2314 * For ASF and Pass Through versions of f/w this means 2315 * that the driver is loaded. For AMT version type f/w 2316 * this means that the network i/f is open. 2317 */ 2318 static void 2319 igc_get_hw_control(struct igc_softc *sc) 2320 { 2321 u32 ctrl_ext; 2322 2323 if (sc->vf_ifp) 2324 return; 2325 2326 ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT); 2327 IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, 2328 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 2329 } 2330 2331 /* 2332 * igc_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 2333 * For ASF and Pass Through versions of f/w this means that 2334 * the driver is no longer loaded. For AMT versions of the 2335 * f/w this means that the network i/f is closed. 2336 */ 2337 static void 2338 igc_release_hw_control(struct igc_softc *sc) 2339 { 2340 u32 ctrl_ext; 2341 2342 ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT); 2343 IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, 2344 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 2345 return; 2346 } 2347 2348 static int 2349 igc_is_valid_ether_addr(u8 *addr) 2350 { 2351 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; 2352 2353 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { 2354 return (false); 2355 } 2356 2357 return (true); 2358 } 2359 2360 /* 2361 ** Parse the interface capabilities with regard 2362 ** to both system management and wake-on-lan for 2363 ** later use. 2364 */ 2365 static void 2366 igc_get_wakeup(if_ctx_t ctx) 2367 { 2368 struct igc_softc *sc = iflib_get_softc(ctx); 2369 u16 eeprom_data = 0, apme_mask; 2370 2371 apme_mask = IGC_WUC_APME; 2372 eeprom_data = IGC_READ_REG(&sc->hw, IGC_WUC); 2373 2374 if (eeprom_data & apme_mask) 2375 sc->wol = IGC_WUFC_LNKC; 2376 } 2377 2378 2379 /* 2380 * Enable PCI Wake On Lan capability 2381 */ 2382 static void 2383 igc_enable_wakeup(if_ctx_t ctx) 2384 { 2385 struct igc_softc *sc = iflib_get_softc(ctx); 2386 device_t dev = iflib_get_dev(ctx); 2387 if_t ifp = iflib_get_ifp(ctx); 2388 int error = 0; 2389 u32 pmc, ctrl, rctl; 2390 u16 status; 2391 2392 if (pci_find_cap(dev, PCIY_PMG, &pmc) != 0) 2393 return; 2394 2395 /* 2396 * Determine type of Wakeup: note that wol 2397 * is set with all bits on by default. 2398 */ 2399 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) == 0) 2400 sc->wol &= ~IGC_WUFC_MAG; 2401 2402 if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) == 0) 2403 sc->wol &= ~IGC_WUFC_EX; 2404 2405 if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) == 0) 2406 sc->wol &= ~IGC_WUFC_MC; 2407 else { 2408 rctl = IGC_READ_REG(&sc->hw, IGC_RCTL); 2409 rctl |= IGC_RCTL_MPE; 2410 IGC_WRITE_REG(&sc->hw, IGC_RCTL, rctl); 2411 } 2412 2413 if (!(sc->wol & (IGC_WUFC_EX | IGC_WUFC_MAG | IGC_WUFC_MC))) 2414 goto pme; 2415 2416 /* Advertise the wakeup capability */ 2417 ctrl = IGC_READ_REG(&sc->hw, IGC_CTRL); 2418 ctrl |= IGC_CTRL_ADVD3WUC; 2419 IGC_WRITE_REG(&sc->hw, IGC_CTRL, ctrl); 2420 2421 /* Enable wakeup by the MAC */ 2422 IGC_WRITE_REG(&sc->hw, IGC_WUC, IGC_WUC_PME_EN); 2423 IGC_WRITE_REG(&sc->hw, IGC_WUFC, sc->wol); 2424 2425 pme: 2426 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2); 2427 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2428 if (!error && (if_getcapenable(ifp) & IFCAP_WOL)) 2429 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2430 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2); 2431 2432 return; 2433 } 2434 2435 /********************************************************************** 2436 * 2437 * Update the board statistics counters. 2438 * 2439 **********************************************************************/ 2440 static void 2441 igc_update_stats_counters(struct igc_softc *sc) 2442 { 2443 u64 prev_xoffrxc = sc->stats.xoffrxc; 2444 2445 sc->stats.crcerrs += IGC_READ_REG(&sc->hw, IGC_CRCERRS); 2446 sc->stats.mpc += IGC_READ_REG(&sc->hw, IGC_MPC); 2447 sc->stats.scc += IGC_READ_REG(&sc->hw, IGC_SCC); 2448 sc->stats.ecol += IGC_READ_REG(&sc->hw, IGC_ECOL); 2449 2450 sc->stats.mcc += IGC_READ_REG(&sc->hw, IGC_MCC); 2451 sc->stats.latecol += IGC_READ_REG(&sc->hw, IGC_LATECOL); 2452 sc->stats.colc += IGC_READ_REG(&sc->hw, IGC_COLC); 2453 sc->stats.colc += IGC_READ_REG(&sc->hw, IGC_RERC); 2454 sc->stats.dc += IGC_READ_REG(&sc->hw, IGC_DC); 2455 sc->stats.rlec += IGC_READ_REG(&sc->hw, IGC_RLEC); 2456 sc->stats.xonrxc += IGC_READ_REG(&sc->hw, IGC_XONRXC); 2457 sc->stats.xontxc += IGC_READ_REG(&sc->hw, IGC_XONTXC); 2458 sc->stats.xoffrxc += IGC_READ_REG(&sc->hw, IGC_XOFFRXC); 2459 /* 2460 * For watchdog management we need to know if we have been 2461 * paused during the last interval, so capture that here. 2462 */ 2463 if (sc->stats.xoffrxc != prev_xoffrxc) 2464 sc->shared->isc_pause_frames = 1; 2465 sc->stats.xofftxc += IGC_READ_REG(&sc->hw, IGC_XOFFTXC); 2466 sc->stats.fcruc += IGC_READ_REG(&sc->hw, IGC_FCRUC); 2467 sc->stats.prc64 += IGC_READ_REG(&sc->hw, IGC_PRC64); 2468 sc->stats.prc127 += IGC_READ_REG(&sc->hw, IGC_PRC127); 2469 sc->stats.prc255 += IGC_READ_REG(&sc->hw, IGC_PRC255); 2470 sc->stats.prc511 += IGC_READ_REG(&sc->hw, IGC_PRC511); 2471 sc->stats.prc1023 += IGC_READ_REG(&sc->hw, IGC_PRC1023); 2472 sc->stats.prc1522 += IGC_READ_REG(&sc->hw, IGC_PRC1522); 2473 sc->stats.tlpic += IGC_READ_REG(&sc->hw, IGC_TLPIC); 2474 sc->stats.rlpic += IGC_READ_REG(&sc->hw, IGC_RLPIC); 2475 sc->stats.gprc += IGC_READ_REG(&sc->hw, IGC_GPRC); 2476 sc->stats.bprc += IGC_READ_REG(&sc->hw, IGC_BPRC); 2477 sc->stats.mprc += IGC_READ_REG(&sc->hw, IGC_MPRC); 2478 sc->stats.gptc += IGC_READ_REG(&sc->hw, IGC_GPTC); 2479 2480 /* For the 64-bit byte counters the low dword must be read first. */ 2481 /* Both registers clear on the read of the high dword */ 2482 2483 sc->stats.gorc += IGC_READ_REG(&sc->hw, IGC_GORCL) + 2484 ((u64)IGC_READ_REG(&sc->hw, IGC_GORCH) << 32); 2485 sc->stats.gotc += IGC_READ_REG(&sc->hw, IGC_GOTCL) + 2486 ((u64)IGC_READ_REG(&sc->hw, IGC_GOTCH) << 32); 2487 2488 sc->stats.rnbc += IGC_READ_REG(&sc->hw, IGC_RNBC); 2489 sc->stats.ruc += IGC_READ_REG(&sc->hw, IGC_RUC); 2490 sc->stats.rfc += IGC_READ_REG(&sc->hw, IGC_RFC); 2491 sc->stats.roc += IGC_READ_REG(&sc->hw, IGC_ROC); 2492 sc->stats.rjc += IGC_READ_REG(&sc->hw, IGC_RJC); 2493 2494 sc->stats.mgprc += IGC_READ_REG(&sc->hw, IGC_MGTPRC); 2495 sc->stats.mgpdc += IGC_READ_REG(&sc->hw, IGC_MGTPDC); 2496 sc->stats.mgptc += IGC_READ_REG(&sc->hw, IGC_MGTPTC); 2497 2498 sc->stats.tor += IGC_READ_REG(&sc->hw, IGC_TORH); 2499 sc->stats.tot += IGC_READ_REG(&sc->hw, IGC_TOTH); 2500 2501 sc->stats.tpr += IGC_READ_REG(&sc->hw, IGC_TPR); 2502 sc->stats.tpt += IGC_READ_REG(&sc->hw, IGC_TPT); 2503 sc->stats.ptc64 += IGC_READ_REG(&sc->hw, IGC_PTC64); 2504 sc->stats.ptc127 += IGC_READ_REG(&sc->hw, IGC_PTC127); 2505 sc->stats.ptc255 += IGC_READ_REG(&sc->hw, IGC_PTC255); 2506 sc->stats.ptc511 += IGC_READ_REG(&sc->hw, IGC_PTC511); 2507 sc->stats.ptc1023 += IGC_READ_REG(&sc->hw, IGC_PTC1023); 2508 sc->stats.ptc1522 += IGC_READ_REG(&sc->hw, IGC_PTC1522); 2509 sc->stats.mptc += IGC_READ_REG(&sc->hw, IGC_MPTC); 2510 sc->stats.bptc += IGC_READ_REG(&sc->hw, IGC_BPTC); 2511 2512 /* Interrupt Counts */ 2513 sc->stats.iac += IGC_READ_REG(&sc->hw, IGC_IAC); 2514 sc->stats.rxdmtc += IGC_READ_REG(&sc->hw, IGC_RXDMTC); 2515 2516 sc->stats.algnerrc += IGC_READ_REG(&sc->hw, IGC_ALGNERRC); 2517 sc->stats.tncrs += IGC_READ_REG(&sc->hw, IGC_TNCRS); 2518 sc->stats.htdpmc += IGC_READ_REG(&sc->hw, IGC_HTDPMC); 2519 sc->stats.tsctc += IGC_READ_REG(&sc->hw, IGC_TSCTC); 2520 } 2521 2522 static uint64_t 2523 igc_if_get_counter(if_ctx_t ctx, ift_counter cnt) 2524 { 2525 struct igc_softc *sc = iflib_get_softc(ctx); 2526 if_t ifp = iflib_get_ifp(ctx); 2527 2528 switch (cnt) { 2529 case IFCOUNTER_COLLISIONS: 2530 return (sc->stats.colc); 2531 case IFCOUNTER_IERRORS: 2532 return (sc->dropped_pkts + sc->stats.rxerrc + 2533 sc->stats.crcerrs + sc->stats.algnerrc + 2534 sc->stats.ruc + sc->stats.roc + 2535 sc->stats.mpc + sc->stats.htdpmc); 2536 case IFCOUNTER_OERRORS: 2537 return (sc->stats.ecol + sc->stats.latecol + 2538 sc->watchdog_events); 2539 default: 2540 return (if_get_counter_default(ifp, cnt)); 2541 } 2542 } 2543 2544 /* igc_if_needs_restart - Tell iflib when the driver needs to be reinitialized 2545 * @ctx: iflib context 2546 * @event: event code to check 2547 * 2548 * Defaults to returning false for unknown events. 2549 * 2550 * @returns true if iflib needs to reinit the interface 2551 */ 2552 static bool 2553 igc_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 2554 { 2555 switch (event) { 2556 case IFLIB_RESTART_VLAN_CONFIG: 2557 default: 2558 return (false); 2559 } 2560 } 2561 2562 /* Export a single 32-bit register via a read-only sysctl. */ 2563 static int 2564 igc_sysctl_reg_handler(SYSCTL_HANDLER_ARGS) 2565 { 2566 struct igc_softc *sc; 2567 u_int val; 2568 2569 sc = oidp->oid_arg1; 2570 val = IGC_READ_REG(&sc->hw, oidp->oid_arg2); 2571 return (sysctl_handle_int(oidp, &val, 0, req)); 2572 } 2573 2574 /* Per queue holdoff interrupt rate handler */ 2575 static int 2576 igc_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 2577 { 2578 struct igc_rx_queue *rque; 2579 struct igc_tx_queue *tque; 2580 struct igc_hw *hw; 2581 int error; 2582 u32 reg, usec, rate; 2583 2584 bool tx = oidp->oid_arg2; 2585 2586 if (tx) { 2587 tque = oidp->oid_arg1; 2588 hw = &tque->sc->hw; 2589 reg = IGC_READ_REG(hw, IGC_EITR(tque->me)); 2590 } else { 2591 rque = oidp->oid_arg1; 2592 hw = &rque->sc->hw; 2593 reg = IGC_READ_REG(hw, IGC_EITR(rque->msix)); 2594 } 2595 2596 usec = (reg & IGC_QVECTOR_MASK); 2597 if (usec > 0) 2598 rate = IGC_INTS_TO_EITR(usec); 2599 else 2600 rate = 0; 2601 2602 error = sysctl_handle_int(oidp, &rate, 0, req); 2603 if (error || !req->newptr) 2604 return error; 2605 return 0; 2606 } 2607 2608 /* 2609 * Add sysctl variables, one per statistic, to the system. 2610 */ 2611 static void 2612 igc_add_hw_stats(struct igc_softc *sc) 2613 { 2614 device_t dev = iflib_get_dev(sc->ctx); 2615 struct igc_tx_queue *tx_que = sc->tx_queues; 2616 struct igc_rx_queue *rx_que = sc->rx_queues; 2617 2618 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2619 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 2620 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 2621 struct igc_hw_stats *stats = &sc->stats; 2622 2623 struct sysctl_oid *stat_node, *queue_node, *int_node; 2624 struct sysctl_oid_list *stat_list, *queue_list, *int_list; 2625 2626 #define QUEUE_NAME_LEN 32 2627 char namebuf[QUEUE_NAME_LEN]; 2628 2629 /* Driver Statistics */ 2630 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 2631 CTLFLAG_RD, &sc->dropped_pkts, 2632 "Driver dropped packets"); 2633 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 2634 CTLFLAG_RD, &sc->link_irq, 2635 "Link MSI-X IRQ Handled"); 2636 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns", 2637 CTLFLAG_RD, &sc->rx_overruns, 2638 "RX overruns"); 2639 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts", 2640 CTLFLAG_RD, &sc->watchdog_events, 2641 "Watchdog timeouts"); 2642 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control", 2643 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2644 sc, IGC_CTRL, igc_sysctl_reg_handler, "IU", 2645 "Device Control Register"); 2646 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control", 2647 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2648 sc, IGC_RCTL, igc_sysctl_reg_handler, "IU", 2649 "Receiver Control Register"); 2650 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", 2651 CTLFLAG_RD, &sc->hw.fc.high_water, 0, 2652 "Flow Control High Watermark"); 2653 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", 2654 CTLFLAG_RD, &sc->hw.fc.low_water, 0, 2655 "Flow Control Low Watermark"); 2656 2657 for (int i = 0; i < sc->tx_num_queues; i++, tx_que++) { 2658 struct tx_ring *txr = &tx_que->txr; 2659 snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i); 2660 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 2661 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Name"); 2662 queue_list = SYSCTL_CHILDREN(queue_node); 2663 2664 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 2665 CTLTYPE_UINT | CTLFLAG_RD, tx_que, 2666 true, igc_sysctl_interrupt_rate_handler, "IU", 2667 "Interrupt Rate"); 2668 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 2669 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 2670 IGC_TDH(txr->me), igc_sysctl_reg_handler, "IU", 2671 "Transmit Descriptor Head"); 2672 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 2673 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 2674 IGC_TDT(txr->me), igc_sysctl_reg_handler, "IU", 2675 "Transmit Descriptor Tail"); 2676 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq", 2677 CTLFLAG_RD, &txr->tx_irq, 2678 "Queue MSI-X Transmit Interrupts"); 2679 } 2680 2681 for (int j = 0; j < sc->rx_num_queues; j++, rx_que++) { 2682 struct rx_ring *rxr = &rx_que->rxr; 2683 snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", j); 2684 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 2685 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Name"); 2686 queue_list = SYSCTL_CHILDREN(queue_node); 2687 2688 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 2689 CTLTYPE_UINT | CTLFLAG_RD, rx_que, 2690 false, igc_sysctl_interrupt_rate_handler, "IU", 2691 "Interrupt Rate"); 2692 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 2693 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 2694 IGC_RDH(rxr->me), igc_sysctl_reg_handler, "IU", 2695 "Receive Descriptor Head"); 2696 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 2697 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 2698 IGC_RDT(rxr->me), igc_sysctl_reg_handler, "IU", 2699 "Receive Descriptor Tail"); 2700 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq", 2701 CTLFLAG_RD, &rxr->rx_irq, 2702 "Queue MSI-X Receive Interrupts"); 2703 } 2704 2705 /* MAC stats get their own sub node */ 2706 2707 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 2708 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Statistics"); 2709 stat_list = SYSCTL_CHILDREN(stat_node); 2710 2711 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll", 2712 CTLFLAG_RD, &stats->ecol, 2713 "Excessive collisions"); 2714 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll", 2715 CTLFLAG_RD, &stats->scc, 2716 "Single collisions"); 2717 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll", 2718 CTLFLAG_RD, &stats->mcc, 2719 "Multiple collisions"); 2720 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll", 2721 CTLFLAG_RD, &stats->latecol, 2722 "Late collisions"); 2723 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count", 2724 CTLFLAG_RD, &stats->colc, 2725 "Collision Count"); 2726 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors", 2727 CTLFLAG_RD, &sc->stats.symerrs, 2728 "Symbol Errors"); 2729 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors", 2730 CTLFLAG_RD, &sc->stats.sec, 2731 "Sequence Errors"); 2732 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count", 2733 CTLFLAG_RD, &sc->stats.dc, 2734 "Defer Count"); 2735 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets", 2736 CTLFLAG_RD, &sc->stats.mpc, 2737 "Missed Packets"); 2738 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_length_errors", 2739 CTLFLAG_RD, &sc->stats.rlec, 2740 "Receive Length Errors"); 2741 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff", 2742 CTLFLAG_RD, &sc->stats.rnbc, 2743 "Receive No Buffers"); 2744 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize", 2745 CTLFLAG_RD, &sc->stats.ruc, 2746 "Receive Undersize"); 2747 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 2748 CTLFLAG_RD, &sc->stats.rfc, 2749 "Fragmented Packets Received "); 2750 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize", 2751 CTLFLAG_RD, &sc->stats.roc, 2752 "Oversized Packets Received"); 2753 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber", 2754 CTLFLAG_RD, &sc->stats.rjc, 2755 "Recevied Jabber"); 2756 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs", 2757 CTLFLAG_RD, &sc->stats.rxerrc, 2758 "Receive Errors"); 2759 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 2760 CTLFLAG_RD, &sc->stats.crcerrs, 2761 "CRC errors"); 2762 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs", 2763 CTLFLAG_RD, &sc->stats.algnerrc, 2764 "Alignment Errors"); 2765 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 2766 CTLFLAG_RD, &sc->stats.xonrxc, 2767 "XON Received"); 2768 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 2769 CTLFLAG_RD, &sc->stats.xontxc, 2770 "XON Transmitted"); 2771 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 2772 CTLFLAG_RD, &sc->stats.xoffrxc, 2773 "XOFF Received"); 2774 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 2775 CTLFLAG_RD, &sc->stats.xofftxc, 2776 "XOFF Transmitted"); 2777 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "unsupported_fc_recvd", 2778 CTLFLAG_RD, &sc->stats.fcruc, 2779 "Unsupported Flow Control Received"); 2780 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_recvd", 2781 CTLFLAG_RD, &sc->stats.mgprc, 2782 "Management Packets Received"); 2783 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_drop", 2784 CTLFLAG_RD, &sc->stats.mgpdc, 2785 "Management Packets Dropped"); 2786 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_txd", 2787 CTLFLAG_RD, &sc->stats.mgptc, 2788 "Management Packets Transmitted"); 2789 2790 /* Packet Reception Stats */ 2791 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd", 2792 CTLFLAG_RD, &sc->stats.tpr, 2793 "Total Packets Received "); 2794 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", 2795 CTLFLAG_RD, &sc->stats.gprc, 2796 "Good Packets Received"); 2797 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd", 2798 CTLFLAG_RD, &sc->stats.bprc, 2799 "Broadcast Packets Received"); 2800 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", 2801 CTLFLAG_RD, &sc->stats.mprc, 2802 "Multicast Packets Received"); 2803 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 2804 CTLFLAG_RD, &sc->stats.prc64, 2805 "64 byte frames received "); 2806 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 2807 CTLFLAG_RD, &sc->stats.prc127, 2808 "65-127 byte frames received"); 2809 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 2810 CTLFLAG_RD, &sc->stats.prc255, 2811 "128-255 byte frames received"); 2812 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 2813 CTLFLAG_RD, &sc->stats.prc511, 2814 "256-511 byte frames received"); 2815 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 2816 CTLFLAG_RD, &sc->stats.prc1023, 2817 "512-1023 byte frames received"); 2818 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 2819 CTLFLAG_RD, &sc->stats.prc1522, 2820 "1023-1522 byte frames received"); 2821 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", 2822 CTLFLAG_RD, &sc->stats.gorc, 2823 "Good Octets Received"); 2824 2825 /* Packet Transmission Stats */ 2826 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 2827 CTLFLAG_RD, &sc->stats.gotc, 2828 "Good Octets Transmitted"); 2829 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 2830 CTLFLAG_RD, &sc->stats.tpt, 2831 "Total Packets Transmitted"); 2832 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 2833 CTLFLAG_RD, &sc->stats.gptc, 2834 "Good Packets Transmitted"); 2835 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 2836 CTLFLAG_RD, &sc->stats.bptc, 2837 "Broadcast Packets Transmitted"); 2838 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 2839 CTLFLAG_RD, &sc->stats.mptc, 2840 "Multicast Packets Transmitted"); 2841 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 2842 CTLFLAG_RD, &sc->stats.ptc64, 2843 "64 byte frames transmitted "); 2844 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 2845 CTLFLAG_RD, &sc->stats.ptc127, 2846 "65-127 byte frames transmitted"); 2847 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 2848 CTLFLAG_RD, &sc->stats.ptc255, 2849 "128-255 byte frames transmitted"); 2850 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 2851 CTLFLAG_RD, &sc->stats.ptc511, 2852 "256-511 byte frames transmitted"); 2853 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 2854 CTLFLAG_RD, &sc->stats.ptc1023, 2855 "512-1023 byte frames transmitted"); 2856 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 2857 CTLFLAG_RD, &sc->stats.ptc1522, 2858 "1024-1522 byte frames transmitted"); 2859 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd", 2860 CTLFLAG_RD, &sc->stats.tsctc, 2861 "TSO Contexts Transmitted"); 2862 2863 /* Interrupt Stats */ 2864 2865 int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", 2866 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Interrupt Statistics"); 2867 int_list = SYSCTL_CHILDREN(int_node); 2868 2869 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts", 2870 CTLFLAG_RD, &sc->stats.iac, 2871 "Interrupt Assertion Count"); 2872 2873 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh", 2874 CTLFLAG_RD, &sc->stats.rxdmtc, 2875 "Rx Desc Min Thresh Count"); 2876 } 2877 2878 static void 2879 igc_fw_version(struct igc_softc *sc) 2880 { 2881 struct igc_hw *hw = &sc->hw; 2882 struct igc_fw_version *fw_ver = &sc->fw_ver; 2883 2884 *fw_ver = (struct igc_fw_version){0}; 2885 2886 igc_get_fw_version(hw, fw_ver); 2887 } 2888 2889 static void 2890 igc_sbuf_fw_version(struct igc_fw_version *fw_ver, struct sbuf *buf) 2891 { 2892 const char *space = ""; 2893 2894 if (fw_ver->eep_major || fw_ver->eep_minor || fw_ver->eep_build) { 2895 sbuf_printf(buf, "EEPROM V%d.%d-%d", fw_ver->eep_major, 2896 fw_ver->eep_minor, fw_ver->eep_build); 2897 space = " "; 2898 } 2899 2900 if (fw_ver->invm_major || fw_ver->invm_minor || fw_ver->invm_img_type) { 2901 sbuf_printf(buf, "%sNVM V%d.%d imgtype%d", 2902 space, fw_ver->invm_major, fw_ver->invm_minor, 2903 fw_ver->invm_img_type); 2904 space = " "; 2905 } 2906 2907 if (fw_ver->or_valid) { 2908 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d", 2909 space, fw_ver->or_major, fw_ver->or_build, 2910 fw_ver->or_patch); 2911 space = " "; 2912 } 2913 2914 if (fw_ver->etrack_id) 2915 sbuf_printf(buf, "%seTrack 0x%08x", space, fw_ver->etrack_id); 2916 } 2917 2918 static void 2919 igc_print_fw_version(struct igc_softc *sc ) 2920 { 2921 device_t dev = sc->dev; 2922 struct sbuf *buf; 2923 int error = 0; 2924 2925 buf = sbuf_new_auto(); 2926 if (!buf) { 2927 device_printf(dev, "Could not allocate sbuf for output.\n"); 2928 return; 2929 } 2930 2931 igc_sbuf_fw_version(&sc->fw_ver, buf); 2932 2933 error = sbuf_finish(buf); 2934 if (error) 2935 device_printf(dev, "Error finishing sbuf: %d\n", error); 2936 else if (sbuf_len(buf)) 2937 device_printf(dev, "%s\n", sbuf_data(buf)); 2938 2939 sbuf_delete(buf); 2940 } 2941 2942 static int 2943 igc_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS) 2944 { 2945 struct igc_softc *sc = (struct igc_softc *)arg1; 2946 device_t dev = sc->dev; 2947 struct sbuf *buf; 2948 int error = 0; 2949 2950 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2951 if (!buf) { 2952 device_printf(dev, "Could not allocate sbuf for output.\n"); 2953 return (ENOMEM); 2954 } 2955 2956 igc_sbuf_fw_version(&sc->fw_ver, buf); 2957 2958 error = sbuf_finish(buf); 2959 if (error) 2960 device_printf(dev, "Error finishing sbuf: %d\n", error); 2961 2962 sbuf_delete(buf); 2963 2964 return (0); 2965 } 2966 2967 /********************************************************************** 2968 * 2969 * This routine provides a way to dump out the adapter eeprom, 2970 * often a useful debug/service tool. This only dumps the first 2971 * 32 words, stuff that matters is in that extent. 2972 * 2973 **********************************************************************/ 2974 static int 2975 igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS) 2976 { 2977 struct igc_softc *sc = (struct igc_softc *)arg1; 2978 int error; 2979 int result; 2980 2981 result = -1; 2982 error = sysctl_handle_int(oidp, &result, 0, req); 2983 2984 if (error || !req->newptr) 2985 return (error); 2986 2987 /* 2988 * This value will cause a hex dump of the 2989 * first 32 16-bit words of the EEPROM to 2990 * the screen. 2991 */ 2992 if (result == 1) 2993 igc_print_nvm_info(sc); 2994 2995 return (error); 2996 } 2997 2998 static void 2999 igc_print_nvm_info(struct igc_softc *sc) 3000 { 3001 u16 eeprom_data; 3002 int i, j, row = 0; 3003 3004 /* Its a bit crude, but it gets the job done */ 3005 printf("\nInterface EEPROM Dump:\n"); 3006 printf("Offset\n0x0000 "); 3007 for (i = 0, j = 0; i < 32; i++, j++) { 3008 if (j == 8) { /* Make the offset block */ 3009 j = 0; ++row; 3010 printf("\n0x00%x0 ",row); 3011 } 3012 igc_read_nvm(&sc->hw, i, 1, &eeprom_data); 3013 printf("%04x ", eeprom_data); 3014 } 3015 printf("\n"); 3016 } 3017 3018 /* 3019 * Set flow control using sysctl: 3020 * Flow control values: 3021 * 0 - off 3022 * 1 - rx pause 3023 * 2 - tx pause 3024 * 3 - full 3025 */ 3026 static int 3027 igc_set_flowcntl(SYSCTL_HANDLER_ARGS) 3028 { 3029 int error; 3030 static int input = 3; /* default is full */ 3031 struct igc_softc *sc = (struct igc_softc *) arg1; 3032 3033 error = sysctl_handle_int(oidp, &input, 0, req); 3034 3035 if ((error) || (req->newptr == NULL)) 3036 return (error); 3037 3038 if (input == sc->fc) /* no change? */ 3039 return (error); 3040 3041 switch (input) { 3042 case igc_fc_rx_pause: 3043 case igc_fc_tx_pause: 3044 case igc_fc_full: 3045 case igc_fc_none: 3046 sc->hw.fc.requested_mode = input; 3047 sc->fc = input; 3048 break; 3049 default: 3050 /* Do nothing */ 3051 return (error); 3052 } 3053 3054 sc->hw.fc.current_mode = sc->hw.fc.requested_mode; 3055 igc_force_mac_fc(&sc->hw); 3056 return (error); 3057 } 3058 3059 /* 3060 * Manage DMA Coalesce: 3061 * Control values: 3062 * 0/1 - off/on 3063 * Legal timer values are: 3064 * 250,500,1000-10000 in thousands 3065 */ 3066 static int 3067 igc_sysctl_dmac(SYSCTL_HANDLER_ARGS) 3068 { 3069 struct igc_softc *sc = (struct igc_softc *) arg1; 3070 int error; 3071 3072 error = sysctl_handle_int(oidp, &sc->dmac, 0, req); 3073 3074 if ((error) || (req->newptr == NULL)) 3075 return (error); 3076 3077 switch (sc->dmac) { 3078 case 0: 3079 /* Disabling */ 3080 break; 3081 case 1: /* Just enable and use default */ 3082 sc->dmac = 1000; 3083 break; 3084 case 250: 3085 case 500: 3086 case 1000: 3087 case 2000: 3088 case 3000: 3089 case 4000: 3090 case 5000: 3091 case 6000: 3092 case 7000: 3093 case 8000: 3094 case 9000: 3095 case 10000: 3096 /* Legal values - allow */ 3097 break; 3098 default: 3099 /* Do nothing, illegal value */ 3100 sc->dmac = 0; 3101 return (EINVAL); 3102 } 3103 /* Reinit the interface */ 3104 igc_if_init(sc->ctx); 3105 return (error); 3106 } 3107 3108 /* 3109 * Manage Energy Efficient Ethernet: 3110 * Control values: 3111 * 0/1 - enabled/disabled 3112 */ 3113 static int 3114 igc_sysctl_eee(SYSCTL_HANDLER_ARGS) 3115 { 3116 struct igc_softc *sc = (struct igc_softc *) arg1; 3117 int error, value; 3118 3119 value = sc->hw.dev_spec._i225.eee_disable; 3120 error = sysctl_handle_int(oidp, &value, 0, req); 3121 if (error || req->newptr == NULL) 3122 return (error); 3123 3124 sc->hw.dev_spec._i225.eee_disable = (value != 0); 3125 igc_if_init(sc->ctx); 3126 3127 return (0); 3128 } 3129 3130 static int 3131 igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3132 { 3133 struct igc_softc *sc; 3134 int error; 3135 int result; 3136 3137 result = -1; 3138 error = sysctl_handle_int(oidp, &result, 0, req); 3139 3140 if (error || !req->newptr) 3141 return (error); 3142 3143 if (result == 1) { 3144 sc = (struct igc_softc *) arg1; 3145 igc_print_debug_info(sc); 3146 } 3147 3148 return (error); 3149 } 3150 3151 static int 3152 igc_get_rs(SYSCTL_HANDLER_ARGS) 3153 { 3154 struct igc_softc *sc = (struct igc_softc *) arg1; 3155 int error; 3156 int result; 3157 3158 result = 0; 3159 error = sysctl_handle_int(oidp, &result, 0, req); 3160 3161 if (error || !req->newptr || result != 1) 3162 return (error); 3163 igc_dump_rs(sc); 3164 3165 return (error); 3166 } 3167 3168 static void 3169 igc_if_debug(if_ctx_t ctx) 3170 { 3171 igc_dump_rs(iflib_get_softc(ctx)); 3172 } 3173 3174 /* 3175 * This routine is meant to be fluid, add whatever is 3176 * needed for debugging a problem. -jfv 3177 */ 3178 static void 3179 igc_print_debug_info(struct igc_softc *sc) 3180 { 3181 device_t dev = iflib_get_dev(sc->ctx); 3182 if_t ifp = iflib_get_ifp(sc->ctx); 3183 struct tx_ring *txr = &sc->tx_queues->txr; 3184 struct rx_ring *rxr = &sc->rx_queues->rxr; 3185 3186 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 3187 printf("Interface is RUNNING "); 3188 else 3189 printf("Interface is NOT RUNNING\n"); 3190 3191 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) 3192 printf("and INACTIVE\n"); 3193 else 3194 printf("and ACTIVE\n"); 3195 3196 for (int i = 0; i < sc->tx_num_queues; i++, txr++) { 3197 device_printf(dev, "TX Queue %d ------\n", i); 3198 device_printf(dev, "hw tdh = %d, hw tdt = %d\n", 3199 IGC_READ_REG(&sc->hw, IGC_TDH(i)), 3200 IGC_READ_REG(&sc->hw, IGC_TDT(i))); 3201 3202 } 3203 for (int j=0; j < sc->rx_num_queues; j++, rxr++) { 3204 device_printf(dev, "RX Queue %d ------\n", j); 3205 device_printf(dev, "hw rdh = %d, hw rdt = %d\n", 3206 IGC_READ_REG(&sc->hw, IGC_RDH(j)), 3207 IGC_READ_REG(&sc->hw, IGC_RDT(j))); 3208 } 3209 } 3210