1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001-2024, Intel Corporation 5 * Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org> 6 * Copyright (c) 2021-2024 Rubicon Communications, LLC (Netgate) 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 #include "if_igc.h" 32 #include <sys/sbuf.h> 33 #include <machine/_inttypes.h> 34 35 #ifdef RSS 36 #include <net/rss_config.h> 37 #include <netinet/in_rss.h> 38 #endif 39 40 /********************************************************************* 41 * PCI Device ID Table 42 * 43 * Used by probe to select devices to load on 44 * Last entry must be all 0s 45 * 46 * { Vendor ID, Device ID, String } 47 *********************************************************************/ 48 49 static const pci_vendor_info_t igc_vendor_info_array[] = 50 { 51 /* Intel(R) PRO/1000 Network Connection - igc */ 52 PVID(0x8086, IGC_DEV_ID_I225_LM, "Intel(R) Ethernet Controller I225-LM"), 53 PVID(0x8086, IGC_DEV_ID_I225_V, "Intel(R) Ethernet Controller I225-V"), 54 PVID(0x8086, IGC_DEV_ID_I225_K, "Intel(R) Ethernet Controller I225-K"), 55 PVID(0x8086, IGC_DEV_ID_I225_I, "Intel(R) Ethernet Controller I225-I"), 56 PVID(0x8086, IGC_DEV_ID_I220_V, "Intel(R) Ethernet Controller I220-V"), 57 PVID(0x8086, IGC_DEV_ID_I225_K2, "Intel(R) Ethernet Controller I225-K(2)"), 58 PVID(0x8086, IGC_DEV_ID_I225_LMVP, "Intel(R) Ethernet Controller I225-LMvP(2)"), 59 PVID(0x8086, IGC_DEV_ID_I226_K, "Intel(R) Ethernet Controller I226-K"), 60 PVID(0x8086, IGC_DEV_ID_I226_LMVP, "Intel(R) Ethernet Controller I226-LMvP"), 61 PVID(0x8086, IGC_DEV_ID_I225_IT, "Intel(R) Ethernet Controller I225-IT(2)"), 62 PVID(0x8086, IGC_DEV_ID_I226_LM, "Intel(R) Ethernet Controller I226-LM"), 63 PVID(0x8086, IGC_DEV_ID_I226_V, "Intel(R) Ethernet Controller I226-V"), 64 PVID(0x8086, IGC_DEV_ID_I226_IT, "Intel(R) Ethernet Controller I226-IT"), 65 PVID(0x8086, IGC_DEV_ID_I221_V, "Intel(R) Ethernet Controller I221-V"), 66 PVID(0x8086, IGC_DEV_ID_I226_BLANK_NVM, "Intel(R) Ethernet Controller I226(blankNVM)"), 67 PVID(0x8086, IGC_DEV_ID_I225_BLANK_NVM, "Intel(R) Ethernet Controller I225(blankNVM)"), 68 /* required last entry */ 69 PVID_END 70 }; 71 72 /********************************************************************* 73 * Function prototypes 74 *********************************************************************/ 75 static void *igc_register(device_t); 76 static int igc_if_attach_pre(if_ctx_t); 77 static int igc_if_attach_post(if_ctx_t); 78 static int igc_if_detach(if_ctx_t); 79 static int igc_if_shutdown(if_ctx_t); 80 static int igc_if_suspend(if_ctx_t); 81 static int igc_if_resume(if_ctx_t); 82 83 static int igc_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 84 static int igc_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 85 static void igc_if_queues_free(if_ctx_t); 86 87 static uint64_t igc_if_get_counter(if_ctx_t, ift_counter); 88 static void igc_if_init(if_ctx_t); 89 static void igc_if_stop(if_ctx_t); 90 static void igc_if_media_status(if_ctx_t, struct ifmediareq *); 91 static int igc_if_media_change(if_ctx_t); 92 static int igc_if_mtu_set(if_ctx_t, uint32_t); 93 static void igc_if_timer(if_ctx_t, uint16_t); 94 static void igc_if_watchdog_reset(if_ctx_t); 95 static bool igc_if_needs_restart(if_ctx_t, enum iflib_restart_event); 96 97 static void igc_identify_hardware(if_ctx_t); 98 static int igc_allocate_pci_resources(if_ctx_t); 99 static void igc_free_pci_resources(if_ctx_t); 100 static void igc_reset(if_ctx_t); 101 static int igc_setup_interface(if_ctx_t); 102 static int igc_setup_msix(if_ctx_t); 103 104 static void igc_initialize_transmit_unit(if_ctx_t); 105 static void igc_initialize_receive_unit(if_ctx_t); 106 107 static void igc_if_intr_enable(if_ctx_t); 108 static void igc_if_intr_disable(if_ctx_t); 109 static int igc_if_rx_queue_intr_enable(if_ctx_t, uint16_t); 110 static int igc_if_tx_queue_intr_enable(if_ctx_t, uint16_t); 111 static void igc_if_multi_set(if_ctx_t); 112 static void igc_if_update_admin_status(if_ctx_t); 113 static void igc_if_debug(if_ctx_t); 114 static void igc_update_stats_counters(struct igc_softc *); 115 static void igc_add_hw_stats(struct igc_softc *); 116 static int igc_if_set_promisc(if_ctx_t, int); 117 static void igc_setup_vlan_hw_support(if_ctx_t); 118 static void igc_fw_version(struct igc_softc *); 119 static void igc_sbuf_fw_version(struct igc_fw_version *, struct sbuf *); 120 static void igc_print_fw_version(struct igc_softc *); 121 static int igc_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS); 122 static int igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS); 123 static void igc_print_nvm_info(struct igc_softc *); 124 static int igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 125 static int igc_get_rs(SYSCTL_HANDLER_ARGS); 126 static void igc_print_debug_info(struct igc_softc *); 127 static int igc_is_valid_ether_addr(u8 *); 128 static void igc_neweitr(struct igc_softc *, struct igc_rx_queue *, 129 struct tx_ring *, struct rx_ring *); 130 static int igc_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS); 131 /* Management and WOL Support */ 132 static void igc_get_hw_control(struct igc_softc *); 133 static void igc_release_hw_control(struct igc_softc *); 134 static void igc_get_wakeup(if_ctx_t); 135 static void igc_enable_wakeup(if_ctx_t); 136 137 int igc_intr(void *); 138 139 /* MSI-X handlers */ 140 static int igc_if_msix_intr_assign(if_ctx_t, int); 141 static int igc_msix_link(void *); 142 static void igc_handle_link(void *context); 143 144 static int igc_set_flowcntl(SYSCTL_HANDLER_ARGS); 145 static int igc_sysctl_dmac(SYSCTL_HANDLER_ARGS); 146 static int igc_sysctl_eee(SYSCTL_HANDLER_ARGS); 147 148 static int igc_get_regs(SYSCTL_HANDLER_ARGS); 149 150 static void igc_configure_queues(struct igc_softc *); 151 152 153 /********************************************************************* 154 * FreeBSD Device Interface Entry Points 155 *********************************************************************/ 156 static device_method_t igc_methods[] = { 157 /* Device interface */ 158 DEVMETHOD(device_register, igc_register), 159 DEVMETHOD(device_probe, iflib_device_probe), 160 DEVMETHOD(device_attach, iflib_device_attach), 161 DEVMETHOD(device_detach, iflib_device_detach), 162 DEVMETHOD(device_shutdown, iflib_device_shutdown), 163 DEVMETHOD(device_suspend, iflib_device_suspend), 164 DEVMETHOD(device_resume, iflib_device_resume), 165 DEVMETHOD_END 166 }; 167 168 static driver_t igc_driver = { 169 "igc", igc_methods, sizeof(struct igc_softc), 170 }; 171 172 DRIVER_MODULE(igc, pci, igc_driver, 0, 0); 173 174 MODULE_DEPEND(igc, pci, 1, 1, 1); 175 MODULE_DEPEND(igc, ether, 1, 1, 1); 176 MODULE_DEPEND(igc, iflib, 1, 1, 1); 177 178 IFLIB_PNP_INFO(pci, igc, igc_vendor_info_array); 179 180 static device_method_t igc_if_methods[] = { 181 DEVMETHOD(ifdi_attach_pre, igc_if_attach_pre), 182 DEVMETHOD(ifdi_attach_post, igc_if_attach_post), 183 DEVMETHOD(ifdi_detach, igc_if_detach), 184 DEVMETHOD(ifdi_shutdown, igc_if_shutdown), 185 DEVMETHOD(ifdi_suspend, igc_if_suspend), 186 DEVMETHOD(ifdi_resume, igc_if_resume), 187 DEVMETHOD(ifdi_init, igc_if_init), 188 DEVMETHOD(ifdi_stop, igc_if_stop), 189 DEVMETHOD(ifdi_msix_intr_assign, igc_if_msix_intr_assign), 190 DEVMETHOD(ifdi_intr_enable, igc_if_intr_enable), 191 DEVMETHOD(ifdi_intr_disable, igc_if_intr_disable), 192 DEVMETHOD(ifdi_tx_queues_alloc, igc_if_tx_queues_alloc), 193 DEVMETHOD(ifdi_rx_queues_alloc, igc_if_rx_queues_alloc), 194 DEVMETHOD(ifdi_queues_free, igc_if_queues_free), 195 DEVMETHOD(ifdi_update_admin_status, igc_if_update_admin_status), 196 DEVMETHOD(ifdi_multi_set, igc_if_multi_set), 197 DEVMETHOD(ifdi_media_status, igc_if_media_status), 198 DEVMETHOD(ifdi_media_change, igc_if_media_change), 199 DEVMETHOD(ifdi_mtu_set, igc_if_mtu_set), 200 DEVMETHOD(ifdi_promisc_set, igc_if_set_promisc), 201 DEVMETHOD(ifdi_timer, igc_if_timer), 202 DEVMETHOD(ifdi_watchdog_reset, igc_if_watchdog_reset), 203 DEVMETHOD(ifdi_get_counter, igc_if_get_counter), 204 DEVMETHOD(ifdi_rx_queue_intr_enable, igc_if_rx_queue_intr_enable), 205 DEVMETHOD(ifdi_tx_queue_intr_enable, igc_if_tx_queue_intr_enable), 206 DEVMETHOD(ifdi_debug, igc_if_debug), 207 DEVMETHOD(ifdi_needs_restart, igc_if_needs_restart), 208 DEVMETHOD_END 209 }; 210 211 static driver_t igc_if_driver = { 212 "igc_if", igc_if_methods, sizeof(struct igc_softc) 213 }; 214 215 /********************************************************************* 216 * Tunable default values. 217 *********************************************************************/ 218 219 /* Allow common code without TSO */ 220 #ifndef CSUM_TSO 221 #define CSUM_TSO 0 222 #endif 223 224 static SYSCTL_NODE(_hw, OID_AUTO, igc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 225 "igc driver parameters"); 226 227 static int igc_disable_crc_stripping = 0; 228 SYSCTL_INT(_hw_igc, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN, 229 &igc_disable_crc_stripping, 0, "Disable CRC Stripping"); 230 231 static int igc_smart_pwr_down = false; 232 SYSCTL_INT(_hw_igc, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &igc_smart_pwr_down, 233 0, "Set to true to leave smart power down enabled on newer adapters"); 234 235 /* Controls whether promiscuous also shows bad packets */ 236 static int igc_debug_sbp = true; 237 SYSCTL_INT(_hw_igc, OID_AUTO, sbp, CTLFLAG_RDTUN, &igc_debug_sbp, 0, 238 "Show bad packets in promiscuous mode"); 239 240 /* Energy efficient ethernet - default to OFF */ 241 static int igc_eee_setting = 1; 242 SYSCTL_INT(_hw_igc, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &igc_eee_setting, 0, 243 "Enable Energy Efficient Ethernet"); 244 245 /* 246 * AIM: Adaptive Interrupt Moderation 247 * which means that the interrupt rate is varied over time based on the 248 * traffic for that interrupt vector 249 */ 250 static int igc_enable_aim = 1; 251 SYSCTL_INT(_hw_igc, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &igc_enable_aim, 252 0, "Enable adaptive interrupt moderation (1=normal, 2=lowlatency)"); 253 254 /* 255 ** Tuneable Interrupt rate 256 */ 257 static int igc_max_interrupt_rate = IGC_INTS_DEFAULT; 258 SYSCTL_INT(_hw_igc, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 259 &igc_max_interrupt_rate, 0, "Maximum interrupts per second"); 260 261 extern struct if_txrx igc_txrx; 262 263 static struct if_shared_ctx igc_sctx_init = { 264 .isc_magic = IFLIB_MAGIC, 265 .isc_q_align = PAGE_SIZE, 266 .isc_tx_maxsize = IGC_TSO_SIZE + sizeof(struct ether_vlan_header), 267 .isc_tx_maxsegsize = PAGE_SIZE, 268 .isc_tso_maxsize = IGC_TSO_SIZE + sizeof(struct ether_vlan_header), 269 .isc_tso_maxsegsize = IGC_TSO_SEG_SIZE, 270 .isc_rx_maxsize = MAX_JUMBO_FRAME_SIZE, 271 .isc_rx_nsegments = 1, 272 .isc_rx_maxsegsize = MJUM9BYTES, 273 .isc_nfl = 1, 274 .isc_nrxqs = 1, 275 .isc_ntxqs = 1, 276 .isc_admin_intrcnt = 1, 277 .isc_vendor_info = igc_vendor_info_array, 278 .isc_driver_version = "1", 279 .isc_driver = &igc_if_driver, 280 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM, 281 282 .isc_nrxd_min = {IGC_MIN_RXD}, 283 .isc_ntxd_min = {IGC_MIN_TXD}, 284 .isc_nrxd_max = {IGC_MAX_RXD}, 285 .isc_ntxd_max = {IGC_MAX_TXD}, 286 .isc_nrxd_default = {IGC_DEFAULT_RXD}, 287 .isc_ntxd_default = {IGC_DEFAULT_TXD}, 288 }; 289 290 /***************************************************************** 291 * 292 * Dump Registers 293 * 294 ****************************************************************/ 295 #define IGC_REGS_LEN 739 296 297 static int igc_get_regs(SYSCTL_HANDLER_ARGS) 298 { 299 struct igc_softc *sc = (struct igc_softc *)arg1; 300 struct igc_hw *hw = &sc->hw; 301 struct sbuf *sb; 302 u32 *regs_buff; 303 int rc; 304 305 regs_buff = malloc(sizeof(u32) * IGC_REGS_LEN, M_DEVBUF, M_WAITOK); 306 memset(regs_buff, 0, IGC_REGS_LEN * sizeof(u32)); 307 308 rc = sysctl_wire_old_buffer(req, 0); 309 MPASS(rc == 0); 310 if (rc != 0) { 311 free(regs_buff, M_DEVBUF); 312 return (rc); 313 } 314 315 sb = sbuf_new_for_sysctl(NULL, NULL, 32*400, req); 316 MPASS(sb != NULL); 317 if (sb == NULL) { 318 free(regs_buff, M_DEVBUF); 319 return (ENOMEM); 320 } 321 322 /* General Registers */ 323 regs_buff[0] = IGC_READ_REG(hw, IGC_CTRL); 324 regs_buff[1] = IGC_READ_REG(hw, IGC_STATUS); 325 regs_buff[2] = IGC_READ_REG(hw, IGC_CTRL_EXT); 326 regs_buff[3] = IGC_READ_REG(hw, IGC_ICR); 327 regs_buff[4] = IGC_READ_REG(hw, IGC_RCTL); 328 regs_buff[5] = IGC_READ_REG(hw, IGC_RDLEN(0)); 329 regs_buff[6] = IGC_READ_REG(hw, IGC_RDH(0)); 330 regs_buff[7] = IGC_READ_REG(hw, IGC_RDT(0)); 331 regs_buff[8] = IGC_READ_REG(hw, IGC_RXDCTL(0)); 332 regs_buff[9] = IGC_READ_REG(hw, IGC_RDBAL(0)); 333 regs_buff[10] = IGC_READ_REG(hw, IGC_RDBAH(0)); 334 regs_buff[11] = IGC_READ_REG(hw, IGC_TCTL); 335 regs_buff[12] = IGC_READ_REG(hw, IGC_TDBAL(0)); 336 regs_buff[13] = IGC_READ_REG(hw, IGC_TDBAH(0)); 337 regs_buff[14] = IGC_READ_REG(hw, IGC_TDLEN(0)); 338 regs_buff[15] = IGC_READ_REG(hw, IGC_TDH(0)); 339 regs_buff[16] = IGC_READ_REG(hw, IGC_TDT(0)); 340 regs_buff[17] = IGC_READ_REG(hw, IGC_TXDCTL(0)); 341 342 sbuf_printf(sb, "General Registers\n"); 343 sbuf_printf(sb, "\tCTRL\t %08x\n", regs_buff[0]); 344 sbuf_printf(sb, "\tSTATUS\t %08x\n", regs_buff[1]); 345 sbuf_printf(sb, "\tCTRL_EXIT\t %08x\n\n", regs_buff[2]); 346 347 sbuf_printf(sb, "Interrupt Registers\n"); 348 sbuf_printf(sb, "\tICR\t %08x\n\n", regs_buff[3]); 349 350 sbuf_printf(sb, "RX Registers\n"); 351 sbuf_printf(sb, "\tRCTL\t %08x\n", regs_buff[4]); 352 sbuf_printf(sb, "\tRDLEN\t %08x\n", regs_buff[5]); 353 sbuf_printf(sb, "\tRDH\t %08x\n", regs_buff[6]); 354 sbuf_printf(sb, "\tRDT\t %08x\n", regs_buff[7]); 355 sbuf_printf(sb, "\tRXDCTL\t %08x\n", regs_buff[8]); 356 sbuf_printf(sb, "\tRDBAL\t %08x\n", regs_buff[9]); 357 sbuf_printf(sb, "\tRDBAH\t %08x\n\n", regs_buff[10]); 358 359 sbuf_printf(sb, "TX Registers\n"); 360 sbuf_printf(sb, "\tTCTL\t %08x\n", regs_buff[11]); 361 sbuf_printf(sb, "\tTDBAL\t %08x\n", regs_buff[12]); 362 sbuf_printf(sb, "\tTDBAH\t %08x\n", regs_buff[13]); 363 sbuf_printf(sb, "\tTDLEN\t %08x\n", regs_buff[14]); 364 sbuf_printf(sb, "\tTDH\t %08x\n", regs_buff[15]); 365 sbuf_printf(sb, "\tTDT\t %08x\n", regs_buff[16]); 366 sbuf_printf(sb, "\tTXDCTL\t %08x\n", regs_buff[17]); 367 sbuf_printf(sb, "\tTDFH\t %08x\n", regs_buff[18]); 368 sbuf_printf(sb, "\tTDFT\t %08x\n", regs_buff[19]); 369 sbuf_printf(sb, "\tTDFHS\t %08x\n", regs_buff[20]); 370 sbuf_printf(sb, "\tTDFPC\t %08x\n\n", regs_buff[21]); 371 372 free(regs_buff, M_DEVBUF); 373 374 #ifdef DUMP_DESCS 375 { 376 if_softc_ctx_t scctx = sc->shared; 377 struct rx_ring *rxr = &rx_que->rxr; 378 struct tx_ring *txr = &tx_que->txr; 379 int ntxd = scctx->isc_ntxd[0]; 380 int nrxd = scctx->isc_nrxd[0]; 381 int j; 382 383 for (j = 0; j < nrxd; j++) { 384 u32 staterr = le32toh(rxr->rx_base[j].wb.upper.status_error); 385 u32 length = le32toh(rxr->rx_base[j].wb.upper.length); 386 sbuf_printf(sb, "\tReceive Descriptor Address %d: %08" PRIx64 " Error:%d Length:%d\n", j, rxr->rx_base[j].read.buffer_addr, staterr, length); 387 } 388 389 for (j = 0; j < min(ntxd, 256); j++) { 390 unsigned int *ptr = (unsigned int *)&txr->tx_base[j]; 391 392 sbuf_printf(sb, "\tTXD[%03d] [0]: %08x [1]: %08x [2]: %08x [3]: %08x eop: %d DD=%d\n", 393 j, ptr[0], ptr[1], ptr[2], ptr[3], buf->eop, 394 buf->eop != -1 ? txr->tx_base[buf->eop].upper.fields.status & IGC_TXD_STAT_DD : 0); 395 396 } 397 } 398 #endif 399 400 rc = sbuf_finish(sb); 401 sbuf_delete(sb); 402 return(rc); 403 } 404 405 static void * 406 igc_register(device_t dev) 407 { 408 return (&igc_sctx_init); 409 } 410 411 static int 412 igc_set_num_queues(if_ctx_t ctx) 413 { 414 int maxqueues; 415 416 maxqueues = 4; 417 418 return (maxqueues); 419 } 420 421 #define IGC_CAPS \ 422 IFCAP_HWCSUM | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | \ 423 IFCAP_VLAN_HWCSUM | IFCAP_WOL | IFCAP_TSO4 | IFCAP_LRO | \ 424 IFCAP_VLAN_HWTSO | IFCAP_JUMBO_MTU | IFCAP_HWCSUM_IPV6 | IFCAP_TSO6 425 426 /********************************************************************* 427 * Device initialization routine 428 * 429 * The attach entry point is called when the driver is being loaded. 430 * This routine identifies the type of hardware, allocates all resources 431 * and initializes the hardware. 432 * 433 * return 0 on success, positive on failure 434 *********************************************************************/ 435 static int 436 igc_if_attach_pre(if_ctx_t ctx) 437 { 438 struct igc_softc *sc; 439 if_softc_ctx_t scctx; 440 device_t dev; 441 struct igc_hw *hw; 442 int error = 0; 443 444 INIT_DEBUGOUT("igc_if_attach_pre: begin"); 445 dev = iflib_get_dev(ctx); 446 sc = iflib_get_softc(ctx); 447 448 sc->ctx = sc->osdep.ctx = ctx; 449 sc->dev = sc->osdep.dev = dev; 450 scctx = sc->shared = iflib_get_softc_ctx(ctx); 451 sc->media = iflib_get_media(ctx); 452 hw = &sc->hw; 453 454 /* SYSCTL stuff */ 455 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 456 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 457 OID_AUTO, "nvm", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 458 sc, 0, igc_sysctl_nvm_info, "I", "NVM Information"); 459 460 sc->enable_aim = igc_enable_aim; 461 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 462 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 463 OID_AUTO, "enable_aim", CTLFLAG_RW, 464 &sc->enable_aim, 0, 465 "Interrupt Moderation (1=normal, 2=lowlatency)"); 466 467 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 468 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 469 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, 470 sc, 0, igc_sysctl_print_fw_version, "A", 471 "Prints FW/NVM Versions"); 472 473 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 474 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 475 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 476 sc, 0, igc_sysctl_debug_info, "I", "Debug Information"); 477 478 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 479 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 480 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 481 sc, 0, igc_set_flowcntl, "I", "Flow Control"); 482 483 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 484 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 485 OID_AUTO, "reg_dump", 486 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 487 igc_get_regs, "A", "Dump Registers"); 488 489 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 490 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 491 OID_AUTO, "rs_dump", 492 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 493 igc_get_rs, "I", "Dump RS indexes"); 494 495 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 496 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 497 OID_AUTO, "dmac", 498 CTLTYPE_INT | CTLFLAG_RW, sc, 0, 499 igc_sysctl_dmac, "I", "DMA Coalesce"); 500 501 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 502 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 503 OID_AUTO, "tso_tcp_flags_mask_first_segment", 504 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 505 sc, 0, igc_sysctl_tso_tcp_flags_mask, "IU", 506 "TSO TCP flags mask for first segment"); 507 508 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 509 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 510 OID_AUTO, "tso_tcp_flags_mask_middle_segment", 511 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 512 sc, 1, igc_sysctl_tso_tcp_flags_mask, "IU", 513 "TSO TCP flags mask for middle segment"); 514 515 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 516 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 517 OID_AUTO, "tso_tcp_flags_mask_last_segment", 518 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 519 sc, 2, igc_sysctl_tso_tcp_flags_mask, "IU", 520 "TSO TCP flags mask for last segment"); 521 522 /* Determine hardware and mac info */ 523 igc_identify_hardware(ctx); 524 525 scctx->isc_tx_nsegments = IGC_MAX_SCATTER; 526 scctx->isc_nrxqsets_max = scctx->isc_ntxqsets_max = igc_set_num_queues(ctx); 527 if (bootverbose) 528 device_printf(dev, "attach_pre capping queues at %d\n", 529 scctx->isc_ntxqsets_max); 530 531 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(union igc_adv_tx_desc), IGC_DBA_ALIGN); 532 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union igc_adv_rx_desc), IGC_DBA_ALIGN); 533 scctx->isc_txd_size[0] = sizeof(union igc_adv_tx_desc); 534 scctx->isc_rxd_size[0] = sizeof(union igc_adv_rx_desc); 535 scctx->isc_txrx = &igc_txrx; 536 scctx->isc_tx_tso_segments_max = IGC_MAX_SCATTER; 537 scctx->isc_tx_tso_size_max = IGC_TSO_SIZE; 538 scctx->isc_tx_tso_segsize_max = IGC_TSO_SEG_SIZE; 539 scctx->isc_capabilities = scctx->isc_capenable = IGC_CAPS; 540 scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_TSO | 541 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_SCTP | CSUM_IP6_SCTP; 542 543 /* 544 ** Some new devices, as with ixgbe, now may 545 ** use a different BAR, so we need to keep 546 ** track of which is used. 547 */ 548 scctx->isc_msix_bar = PCIR_BAR(IGC_MSIX_BAR); 549 if (pci_read_config(dev, scctx->isc_msix_bar, 4) == 0) 550 scctx->isc_msix_bar += 4; 551 552 /* Setup PCI resources */ 553 if (igc_allocate_pci_resources(ctx)) { 554 device_printf(dev, "Allocation of PCI resources failed\n"); 555 error = ENXIO; 556 goto err_pci; 557 } 558 559 /* Do Shared Code initialization */ 560 error = igc_setup_init_funcs(hw, true); 561 if (error) { 562 device_printf(dev, "Setup of Shared code failed, error %d\n", 563 error); 564 error = ENXIO; 565 goto err_pci; 566 } 567 568 igc_setup_msix(ctx); 569 igc_get_bus_info(hw); 570 571 hw->mac.autoneg = DO_AUTO_NEG; 572 hw->phy.autoneg_wait_to_complete = false; 573 hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 574 575 /* Copper options */ 576 if (hw->phy.media_type == igc_media_type_copper) { 577 hw->phy.mdix = AUTO_ALL_MODES; 578 } 579 580 /* 581 * Set the frame limits assuming 582 * standard ethernet sized frames. 583 */ 584 scctx->isc_max_frame_size = sc->hw.mac.max_frame_size = 585 ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; 586 587 /* Allocate multicast array memory. */ 588 sc->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN * 589 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); 590 if (sc->mta == NULL) { 591 device_printf(dev, "Can not allocate multicast setup array\n"); 592 error = ENOMEM; 593 goto err_late; 594 } 595 596 /* Check SOL/IDER usage */ 597 if (igc_check_reset_block(hw)) 598 device_printf(dev, "PHY reset is blocked" 599 " due to SOL/IDER session.\n"); 600 601 /* Sysctl for setting Energy Efficient Ethernet */ 602 sc->hw.dev_spec._i225.eee_disable = igc_eee_setting; 603 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 604 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 605 OID_AUTO, "eee_control", 606 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 607 sc, 0, igc_sysctl_eee, "I", 608 "Disable Energy Efficient Ethernet"); 609 610 /* 611 ** Start from a known state, this is 612 ** important in reading the nvm and 613 ** mac from that. 614 */ 615 igc_reset_hw(hw); 616 617 /* Make sure we have a good EEPROM before we read from it */ 618 if (igc_validate_nvm_checksum(hw) < 0) { 619 /* 620 ** Some PCI-E parts fail the first check due to 621 ** the link being in sleep state, call it again, 622 ** if it fails a second time its a real issue. 623 */ 624 if (igc_validate_nvm_checksum(hw) < 0) { 625 device_printf(dev, 626 "The EEPROM Checksum Is Not Valid\n"); 627 error = EIO; 628 goto err_late; 629 } 630 } 631 632 /* Copy the permanent MAC address out of the EEPROM */ 633 if (igc_read_mac_addr(hw) < 0) { 634 device_printf(dev, "EEPROM read error while reading MAC" 635 " address\n"); 636 error = EIO; 637 goto err_late; 638 } 639 640 if (!igc_is_valid_ether_addr(hw->mac.addr)) { 641 device_printf(dev, "Invalid MAC address\n"); 642 error = EIO; 643 goto err_late; 644 } 645 646 /* Save the EEPROM/NVM versions */ 647 igc_fw_version(sc); 648 649 igc_print_fw_version(sc); 650 651 /* 652 * Get Wake-on-Lan and Management info for later use 653 */ 654 igc_get_wakeup(ctx); 655 656 /* Enable only WOL MAGIC by default */ 657 scctx->isc_capenable &= ~IFCAP_WOL; 658 if (sc->wol != 0) 659 scctx->isc_capenable |= IFCAP_WOL_MAGIC; 660 661 iflib_set_mac(ctx, hw->mac.addr); 662 663 return (0); 664 665 err_late: 666 igc_release_hw_control(sc); 667 err_pci: 668 igc_free_pci_resources(ctx); 669 free(sc->mta, M_DEVBUF); 670 671 return (error); 672 } 673 674 static int 675 igc_if_attach_post(if_ctx_t ctx) 676 { 677 struct igc_softc *sc = iflib_get_softc(ctx); 678 struct igc_hw *hw = &sc->hw; 679 int error = 0; 680 681 /* Setup OS specific network interface */ 682 error = igc_setup_interface(ctx); 683 if (error != 0) { 684 goto err_late; 685 } 686 687 igc_reset(ctx); 688 689 /* Initialize statistics */ 690 igc_update_stats_counters(sc); 691 hw->mac.get_link_status = true; 692 igc_if_update_admin_status(ctx); 693 igc_add_hw_stats(sc); 694 695 /* the driver can now take control from firmware */ 696 igc_get_hw_control(sc); 697 698 INIT_DEBUGOUT("igc_if_attach_post: end"); 699 700 return (error); 701 702 err_late: 703 igc_release_hw_control(sc); 704 igc_free_pci_resources(ctx); 705 igc_if_queues_free(ctx); 706 free(sc->mta, M_DEVBUF); 707 708 return (error); 709 } 710 711 /********************************************************************* 712 * Device removal routine 713 * 714 * The detach entry point is called when the driver is being removed. 715 * This routine stops the adapter and deallocates all the resources 716 * that were allocated for driver operation. 717 * 718 * return 0 on success, positive on failure 719 *********************************************************************/ 720 static int 721 igc_if_detach(if_ctx_t ctx) 722 { 723 struct igc_softc *sc = iflib_get_softc(ctx); 724 725 INIT_DEBUGOUT("igc_if_detach: begin"); 726 727 igc_phy_hw_reset(&sc->hw); 728 729 igc_release_hw_control(sc); 730 igc_free_pci_resources(ctx); 731 732 return (0); 733 } 734 735 /********************************************************************* 736 * 737 * Shutdown entry point 738 * 739 **********************************************************************/ 740 741 static int 742 igc_if_shutdown(if_ctx_t ctx) 743 { 744 return igc_if_suspend(ctx); 745 } 746 747 /* 748 * Suspend/resume device methods. 749 */ 750 static int 751 igc_if_suspend(if_ctx_t ctx) 752 { 753 struct igc_softc *sc = iflib_get_softc(ctx); 754 755 igc_release_hw_control(sc); 756 igc_enable_wakeup(ctx); 757 return (0); 758 } 759 760 static int 761 igc_if_resume(if_ctx_t ctx) 762 { 763 igc_if_init(ctx); 764 765 return(0); 766 } 767 768 static int 769 igc_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 770 { 771 int max_frame_size; 772 struct igc_softc *sc = iflib_get_softc(ctx); 773 if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx); 774 775 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); 776 777 /* 9K Jumbo Frame size */ 778 max_frame_size = 9234; 779 780 if (mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) { 781 return (EINVAL); 782 } 783 784 scctx->isc_max_frame_size = sc->hw.mac.max_frame_size = 785 mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 786 return (0); 787 } 788 789 /********************************************************************* 790 * Init entry point 791 * 792 * This routine is used in two ways. It is used by the stack as 793 * init entry point in network interface structure. It is also used 794 * by the driver as a hw/sw initialization routine to get to a 795 * consistent state. 796 * 797 **********************************************************************/ 798 static void 799 igc_if_init(if_ctx_t ctx) 800 { 801 struct igc_softc *sc = iflib_get_softc(ctx); 802 if_softc_ctx_t scctx = sc->shared; 803 if_t ifp = iflib_get_ifp(ctx); 804 struct igc_tx_queue *tx_que; 805 int i; 806 807 INIT_DEBUGOUT("igc_if_init: begin"); 808 809 /* Get the latest mac address, User can use a LAA */ 810 bcopy(if_getlladdr(ifp), sc->hw.mac.addr, 811 ETHER_ADDR_LEN); 812 813 /* Put the address into the Receive Address Array */ 814 igc_rar_set(&sc->hw, sc->hw.mac.addr, 0); 815 816 /* Initialize the hardware */ 817 igc_reset(ctx); 818 igc_if_update_admin_status(ctx); 819 820 for (i = 0, tx_que = sc->tx_queues; i < sc->tx_num_queues; i++, tx_que++) { 821 struct tx_ring *txr = &tx_que->txr; 822 823 txr->tx_rs_cidx = txr->tx_rs_pidx; 824 825 /* Initialize the last processed descriptor to be the end of 826 * the ring, rather than the start, so that we avoid an 827 * off-by-one error when calculating how many descriptors are 828 * done in the credits_update function. 829 */ 830 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 831 } 832 833 /* Setup VLAN support, basic and offload if available */ 834 IGC_WRITE_REG(&sc->hw, IGC_VET, ETHERTYPE_VLAN); 835 836 /* Prepare transmit descriptors and buffers */ 837 igc_initialize_transmit_unit(ctx); 838 839 /* Setup Multicast table */ 840 igc_if_multi_set(ctx); 841 842 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); 843 igc_initialize_receive_unit(ctx); 844 845 /* Set up VLAN support */ 846 igc_setup_vlan_hw_support(ctx); 847 848 /* Don't lose promiscuous settings */ 849 igc_if_set_promisc(ctx, if_getflags(ifp)); 850 igc_clear_hw_cntrs_base_generic(&sc->hw); 851 852 if (sc->intr_type == IFLIB_INTR_MSIX) /* Set up queue routing */ 853 igc_configure_queues(sc); 854 855 /* this clears any pending interrupts */ 856 IGC_READ_REG(&sc->hw, IGC_ICR); 857 IGC_WRITE_REG(&sc->hw, IGC_ICS, IGC_ICS_LSC); 858 859 /* the driver can now take control from firmware */ 860 igc_get_hw_control(sc); 861 862 /* Set Energy Efficient Ethernet */ 863 igc_set_eee_i225(&sc->hw, true, true, true); 864 } 865 866 enum eitr_latency_target { 867 eitr_latency_disabled = 0, 868 eitr_latency_lowest = 1, 869 eitr_latency_low = 2, 870 eitr_latency_bulk = 3 871 }; 872 /********************************************************************* 873 * 874 * Helper to calculate next EITR value for AIM 875 * 876 *********************************************************************/ 877 static void 878 igc_neweitr(struct igc_softc *sc, struct igc_rx_queue *que, 879 struct tx_ring *txr, struct rx_ring *rxr) 880 { 881 struct igc_hw *hw = &sc->hw; 882 u32 neweitr; 883 u32 bytes; 884 u32 bytes_packets; 885 u32 packets; 886 u8 nextlatency; 887 888 /* Idle, do nothing */ 889 if ((txr->tx_bytes == 0) && (rxr->rx_bytes == 0)) 890 return; 891 892 neweitr = 0; 893 894 if (sc->enable_aim) { 895 nextlatency = rxr->rx_nextlatency; 896 897 /* Use half default (4K) ITR if sub-gig */ 898 if (sc->link_speed < 1000) { 899 neweitr = IGC_INTS_4K; 900 goto igc_set_next_eitr; 901 } 902 /* Want at least enough packet buffer for two frames to AIM */ 903 if (sc->shared->isc_max_frame_size * 2 > (sc->pba << 10)) { 904 neweitr = igc_max_interrupt_rate; 905 sc->enable_aim = 0; 906 goto igc_set_next_eitr; 907 } 908 909 /* Get the largest values from the associated tx and rx ring */ 910 if (txr->tx_bytes && txr->tx_packets) { 911 bytes = txr->tx_bytes; 912 bytes_packets = txr->tx_bytes/txr->tx_packets; 913 packets = txr->tx_packets; 914 } 915 if (rxr->rx_bytes && rxr->rx_packets) { 916 bytes = max(bytes, rxr->rx_bytes); 917 bytes_packets = max(bytes_packets, rxr->rx_bytes/rxr->rx_packets); 918 packets = max(packets, rxr->rx_packets); 919 } 920 921 /* Latency state machine */ 922 switch (nextlatency) { 923 case eitr_latency_disabled: /* Bootstrapping */ 924 nextlatency = eitr_latency_low; 925 break; 926 case eitr_latency_lowest: /* 70k ints/s */ 927 /* TSO and jumbo frames */ 928 if (bytes_packets > 8000) 929 nextlatency = eitr_latency_bulk; 930 else if ((packets < 5) && (bytes > 512)) 931 nextlatency = eitr_latency_low; 932 break; 933 case eitr_latency_low: /* 20k ints/s */ 934 if (bytes > 10000) { 935 /* Handle TSO */ 936 if (bytes_packets > 8000) 937 nextlatency = eitr_latency_bulk; 938 else if ((packets < 10) || (bytes_packets > 1200)) 939 nextlatency = eitr_latency_bulk; 940 else if (packets > 35) 941 nextlatency = eitr_latency_lowest; 942 } else if (bytes_packets > 2000) { 943 nextlatency = eitr_latency_bulk; 944 } else if (packets < 3 && bytes < 512) { 945 nextlatency = eitr_latency_lowest; 946 } 947 break; 948 case eitr_latency_bulk: /* 4k ints/s */ 949 if (bytes > 25000) { 950 if (packets > 35) 951 nextlatency = eitr_latency_low; 952 } else if (bytes < 1500) 953 nextlatency = eitr_latency_low; 954 break; 955 default: 956 nextlatency = eitr_latency_low; 957 device_printf(sc->dev, "Unexpected neweitr transition %d\n", 958 nextlatency); 959 break; 960 } 961 962 /* Trim itr_latency_lowest for default AIM setting */ 963 if (sc->enable_aim == 1 && nextlatency == eitr_latency_lowest) 964 nextlatency = eitr_latency_low; 965 966 /* Request new latency */ 967 rxr->rx_nextlatency = nextlatency; 968 } else { 969 /* We may have toggled to AIM disabled */ 970 nextlatency = eitr_latency_disabled; 971 rxr->rx_nextlatency = nextlatency; 972 } 973 974 /* ITR state machine */ 975 switch(nextlatency) { 976 case eitr_latency_lowest: 977 neweitr = IGC_INTS_70K; 978 break; 979 case eitr_latency_low: 980 neweitr = IGC_INTS_20K; 981 break; 982 case eitr_latency_bulk: 983 neweitr = IGC_INTS_4K; 984 break; 985 case eitr_latency_disabled: 986 default: 987 neweitr = igc_max_interrupt_rate; 988 break; 989 } 990 991 igc_set_next_eitr: 992 neweitr = IGC_INTS_TO_EITR(neweitr); 993 994 neweitr |= IGC_EITR_CNT_IGNR; 995 996 if (neweitr != que->eitr_setting) { 997 que->eitr_setting = neweitr; 998 IGC_WRITE_REG(hw, IGC_EITR(que->msix), que->eitr_setting); 999 } 1000 } 1001 1002 /********************************************************************* 1003 * 1004 * Fast Legacy/MSI Combined Interrupt Service routine 1005 * 1006 *********************************************************************/ 1007 int 1008 igc_intr(void *arg) 1009 { 1010 struct igc_softc *sc = arg; 1011 struct igc_hw *hw = &sc->hw; 1012 struct igc_rx_queue *que = &sc->rx_queues[0]; 1013 struct tx_ring *txr = &sc->tx_queues[0].txr; 1014 struct rx_ring *rxr = &que->rxr; 1015 if_ctx_t ctx = sc->ctx; 1016 u32 reg_icr; 1017 1018 reg_icr = IGC_READ_REG(hw, IGC_ICR); 1019 1020 /* Hot eject? */ 1021 if (reg_icr == 0xffffffff) 1022 return FILTER_STRAY; 1023 1024 /* Definitely not our interrupt. */ 1025 if (reg_icr == 0x0) 1026 return FILTER_STRAY; 1027 1028 if ((reg_icr & IGC_ICR_INT_ASSERTED) == 0) 1029 return FILTER_STRAY; 1030 1031 /* 1032 * Only MSI-X interrupts have one-shot behavior by taking advantage 1033 * of the EIAC register. Thus, explicitly disable interrupts. This 1034 * also works around the MSI message reordering errata on certain 1035 * systems. 1036 */ 1037 IFDI_INTR_DISABLE(ctx); 1038 1039 /* Link status change */ 1040 if (reg_icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) 1041 igc_handle_link(ctx); 1042 1043 if (reg_icr & IGC_ICR_RXO) 1044 sc->rx_overruns++; 1045 1046 igc_neweitr(sc, que, txr, rxr); 1047 1048 /* Reset state */ 1049 txr->tx_bytes = 0; 1050 txr->tx_packets = 0; 1051 rxr->rx_bytes = 0; 1052 rxr->rx_packets = 0; 1053 1054 return (FILTER_SCHEDULE_THREAD); 1055 } 1056 1057 static int 1058 igc_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 1059 { 1060 struct igc_softc *sc = iflib_get_softc(ctx); 1061 struct igc_rx_queue *rxq = &sc->rx_queues[rxqid]; 1062 1063 IGC_WRITE_REG(&sc->hw, IGC_EIMS, rxq->eims); 1064 return (0); 1065 } 1066 1067 static int 1068 igc_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) 1069 { 1070 struct igc_softc *sc = iflib_get_softc(ctx); 1071 struct igc_tx_queue *txq = &sc->tx_queues[txqid]; 1072 1073 IGC_WRITE_REG(&sc->hw, IGC_EIMS, txq->eims); 1074 return (0); 1075 } 1076 1077 /********************************************************************* 1078 * 1079 * MSI-X RX Interrupt Service routine 1080 * 1081 **********************************************************************/ 1082 static int 1083 igc_msix_que(void *arg) 1084 { 1085 struct igc_rx_queue *que = arg; 1086 struct igc_softc *sc = que->sc; 1087 struct tx_ring *txr = &sc->tx_queues[que->msix].txr; 1088 struct rx_ring *rxr = &que->rxr; 1089 1090 ++que->irqs; 1091 1092 igc_neweitr(sc, que, txr, rxr); 1093 1094 /* Reset state */ 1095 txr->tx_bytes = 0; 1096 txr->tx_packets = 0; 1097 rxr->rx_bytes = 0; 1098 rxr->rx_packets = 0; 1099 1100 return (FILTER_SCHEDULE_THREAD); 1101 } 1102 1103 /********************************************************************* 1104 * 1105 * MSI-X Link Fast Interrupt Service routine 1106 * 1107 **********************************************************************/ 1108 static int 1109 igc_msix_link(void *arg) 1110 { 1111 struct igc_softc *sc = arg; 1112 u32 reg_icr; 1113 1114 ++sc->link_irq; 1115 MPASS(sc->hw.back != NULL); 1116 reg_icr = IGC_READ_REG(&sc->hw, IGC_ICR); 1117 1118 if (reg_icr & IGC_ICR_RXO) 1119 sc->rx_overruns++; 1120 1121 if (reg_icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 1122 igc_handle_link(sc->ctx); 1123 } 1124 1125 IGC_WRITE_REG(&sc->hw, IGC_IMS, IGC_IMS_LSC); 1126 IGC_WRITE_REG(&sc->hw, IGC_EIMS, sc->link_mask); 1127 1128 return (FILTER_HANDLED); 1129 } 1130 1131 static void 1132 igc_handle_link(void *context) 1133 { 1134 if_ctx_t ctx = context; 1135 struct igc_softc *sc = iflib_get_softc(ctx); 1136 1137 sc->hw.mac.get_link_status = true; 1138 iflib_admin_intr_deferred(ctx); 1139 } 1140 1141 /********************************************************************* 1142 * 1143 * Media Ioctl callback 1144 * 1145 * This routine is called whenever the user queries the status of 1146 * the interface using ifconfig. 1147 * 1148 **********************************************************************/ 1149 static void 1150 igc_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) 1151 { 1152 struct igc_softc *sc = iflib_get_softc(ctx); 1153 1154 INIT_DEBUGOUT("igc_if_media_status: begin"); 1155 1156 iflib_admin_intr_deferred(ctx); 1157 1158 ifmr->ifm_status = IFM_AVALID; 1159 ifmr->ifm_active = IFM_ETHER; 1160 1161 if (!sc->link_active) { 1162 return; 1163 } 1164 1165 ifmr->ifm_status |= IFM_ACTIVE; 1166 1167 switch (sc->link_speed) { 1168 case 10: 1169 ifmr->ifm_active |= IFM_10_T; 1170 break; 1171 case 100: 1172 ifmr->ifm_active |= IFM_100_TX; 1173 break; 1174 case 1000: 1175 ifmr->ifm_active |= IFM_1000_T; 1176 break; 1177 case 2500: 1178 ifmr->ifm_active |= IFM_2500_T; 1179 break; 1180 } 1181 1182 if (sc->link_duplex == FULL_DUPLEX) 1183 ifmr->ifm_active |= IFM_FDX; 1184 else 1185 ifmr->ifm_active |= IFM_HDX; 1186 } 1187 1188 /********************************************************************* 1189 * 1190 * Media Ioctl callback 1191 * 1192 * This routine is called when the user changes speed/duplex using 1193 * media/mediopt option with ifconfig. 1194 * 1195 **********************************************************************/ 1196 static int 1197 igc_if_media_change(if_ctx_t ctx) 1198 { 1199 struct igc_softc *sc = iflib_get_softc(ctx); 1200 struct ifmedia *ifm = iflib_get_media(ctx); 1201 1202 INIT_DEBUGOUT("igc_if_media_change: begin"); 1203 1204 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1205 return (EINVAL); 1206 1207 sc->hw.mac.autoneg = DO_AUTO_NEG; 1208 1209 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1210 case IFM_AUTO: 1211 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1212 break; 1213 case IFM_2500_T: 1214 sc->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; 1215 break; 1216 case IFM_1000_T: 1217 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1218 break; 1219 case IFM_100_TX: 1220 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1221 sc->hw.phy.autoneg_advertised = ADVERTISE_100_FULL; 1222 else 1223 sc->hw.phy.autoneg_advertised = ADVERTISE_100_HALF; 1224 break; 1225 case IFM_10_T: 1226 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1227 sc->hw.phy.autoneg_advertised = ADVERTISE_10_FULL; 1228 else 1229 sc->hw.phy.autoneg_advertised = ADVERTISE_10_HALF; 1230 break; 1231 default: 1232 device_printf(sc->dev, "Unsupported media type\n"); 1233 } 1234 1235 igc_if_init(ctx); 1236 1237 return (0); 1238 } 1239 1240 static int 1241 igc_if_set_promisc(if_ctx_t ctx, int flags) 1242 { 1243 struct igc_softc *sc = iflib_get_softc(ctx); 1244 if_t ifp = iflib_get_ifp(ctx); 1245 u32 reg_rctl; 1246 int mcnt = 0; 1247 1248 reg_rctl = IGC_READ_REG(&sc->hw, IGC_RCTL); 1249 reg_rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_UPE); 1250 if (flags & IFF_ALLMULTI) 1251 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 1252 else 1253 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES); 1254 1255 /* Don't disable if in MAX groups */ 1256 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1257 reg_rctl &= (~IGC_RCTL_MPE); 1258 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl); 1259 1260 if (flags & IFF_PROMISC) { 1261 reg_rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE); 1262 /* Turn this on if you want to see bad packets */ 1263 if (igc_debug_sbp) 1264 reg_rctl |= IGC_RCTL_SBP; 1265 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl); 1266 } else if (flags & IFF_ALLMULTI) { 1267 reg_rctl |= IGC_RCTL_MPE; 1268 reg_rctl &= ~IGC_RCTL_UPE; 1269 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl); 1270 } 1271 return (0); 1272 } 1273 1274 static u_int 1275 igc_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int idx) 1276 { 1277 u8 *mta = arg; 1278 1279 if (idx == MAX_NUM_MULTICAST_ADDRESSES) 1280 return (0); 1281 1282 bcopy(LLADDR(sdl), &mta[idx * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1283 1284 return (1); 1285 } 1286 1287 /********************************************************************* 1288 * Multicast Update 1289 * 1290 * This routine is called whenever multicast address list is updated. 1291 * 1292 **********************************************************************/ 1293 1294 static void 1295 igc_if_multi_set(if_ctx_t ctx) 1296 { 1297 struct igc_softc *sc = iflib_get_softc(ctx); 1298 if_t ifp = iflib_get_ifp(ctx); 1299 u8 *mta; /* Multicast array memory */ 1300 u32 reg_rctl = 0; 1301 int mcnt = 0; 1302 1303 IOCTL_DEBUGOUT("igc_set_multi: begin"); 1304 1305 mta = sc->mta; 1306 bzero(mta, sizeof(u8) * ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 1307 1308 mcnt = if_foreach_llmaddr(ifp, igc_copy_maddr, mta); 1309 1310 reg_rctl = IGC_READ_REG(&sc->hw, IGC_RCTL); 1311 1312 if (if_getflags(ifp) & IFF_PROMISC) { 1313 reg_rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE); 1314 /* Turn this on if you want to see bad packets */ 1315 if (igc_debug_sbp) 1316 reg_rctl |= IGC_RCTL_SBP; 1317 } else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 1318 if_getflags(ifp) & IFF_ALLMULTI) { 1319 reg_rctl |= IGC_RCTL_MPE; 1320 reg_rctl &= ~IGC_RCTL_UPE; 1321 } else 1322 reg_rctl &= ~(IGC_RCTL_UPE | IGC_RCTL_MPE); 1323 1324 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1325 igc_update_mc_addr_list(&sc->hw, mta, mcnt); 1326 1327 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl); 1328 } 1329 1330 /********************************************************************* 1331 * Timer routine 1332 * 1333 * This routine schedules igc_if_update_admin_status() to check for 1334 * link status and to gather statistics as well as to perform some 1335 * controller-specific hardware patting. 1336 * 1337 **********************************************************************/ 1338 static void 1339 igc_if_timer(if_ctx_t ctx, uint16_t qid) 1340 { 1341 1342 if (qid != 0) 1343 return; 1344 1345 iflib_admin_intr_deferred(ctx); 1346 } 1347 1348 static void 1349 igc_if_update_admin_status(if_ctx_t ctx) 1350 { 1351 struct igc_softc *sc = iflib_get_softc(ctx); 1352 struct igc_hw *hw = &sc->hw; 1353 device_t dev = iflib_get_dev(ctx); 1354 u32 link_check, thstat, ctrl; 1355 1356 link_check = thstat = ctrl = 0; 1357 /* Get the cached link value or read phy for real */ 1358 switch (hw->phy.media_type) { 1359 case igc_media_type_copper: 1360 if (hw->mac.get_link_status == true) { 1361 /* Do the work to read phy */ 1362 igc_check_for_link(hw); 1363 link_check = !hw->mac.get_link_status; 1364 } else 1365 link_check = true; 1366 break; 1367 case igc_media_type_unknown: 1368 igc_check_for_link(hw); 1369 link_check = !hw->mac.get_link_status; 1370 /* FALLTHROUGH */ 1371 default: 1372 break; 1373 } 1374 1375 /* Now check for a transition */ 1376 if (link_check && (sc->link_active == 0)) { 1377 igc_get_speed_and_duplex(hw, &sc->link_speed, 1378 &sc->link_duplex); 1379 if (bootverbose) 1380 device_printf(dev, "Link is up %d Mbps %s\n", 1381 sc->link_speed, 1382 ((sc->link_duplex == FULL_DUPLEX) ? 1383 "Full Duplex" : "Half Duplex")); 1384 sc->link_active = 1; 1385 iflib_link_state_change(ctx, LINK_STATE_UP, 1386 IF_Mbps(sc->link_speed)); 1387 } else if (!link_check && (sc->link_active == 1)) { 1388 sc->link_speed = 0; 1389 sc->link_duplex = 0; 1390 sc->link_active = 0; 1391 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 1392 } 1393 igc_update_stats_counters(sc); 1394 } 1395 1396 static void 1397 igc_if_watchdog_reset(if_ctx_t ctx) 1398 { 1399 struct igc_softc *sc = iflib_get_softc(ctx); 1400 1401 /* 1402 * Just count the event; iflib(4) will already trigger a 1403 * sufficient reset of the controller. 1404 */ 1405 sc->watchdog_events++; 1406 } 1407 1408 /********************************************************************* 1409 * 1410 * This routine disables all traffic on the adapter by issuing a 1411 * global reset on the MAC. 1412 * 1413 **********************************************************************/ 1414 static void 1415 igc_if_stop(if_ctx_t ctx) 1416 { 1417 struct igc_softc *sc = iflib_get_softc(ctx); 1418 1419 INIT_DEBUGOUT("igc_if_stop: begin"); 1420 1421 igc_reset_hw(&sc->hw); 1422 IGC_WRITE_REG(&sc->hw, IGC_WUC, 0); 1423 } 1424 1425 /********************************************************************* 1426 * 1427 * Determine hardware revision. 1428 * 1429 **********************************************************************/ 1430 static void 1431 igc_identify_hardware(if_ctx_t ctx) 1432 { 1433 device_t dev = iflib_get_dev(ctx); 1434 struct igc_softc *sc = iflib_get_softc(ctx); 1435 1436 /* Make sure our PCI config space has the necessary stuff set */ 1437 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 1438 1439 /* Save off the information about this board */ 1440 sc->hw.vendor_id = pci_get_vendor(dev); 1441 sc->hw.device_id = pci_get_device(dev); 1442 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 1443 sc->hw.subsystem_vendor_id = 1444 pci_read_config(dev, PCIR_SUBVEND_0, 2); 1445 sc->hw.subsystem_device_id = 1446 pci_read_config(dev, PCIR_SUBDEV_0, 2); 1447 1448 /* Do Shared Code Init and Setup */ 1449 if (igc_set_mac_type(&sc->hw)) { 1450 device_printf(dev, "Setup init failure\n"); 1451 return; 1452 } 1453 } 1454 1455 static int 1456 igc_allocate_pci_resources(if_ctx_t ctx) 1457 { 1458 struct igc_softc *sc = iflib_get_softc(ctx); 1459 device_t dev = iflib_get_dev(ctx); 1460 int rid; 1461 1462 rid = PCIR_BAR(0); 1463 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1464 &rid, RF_ACTIVE); 1465 if (sc->memory == NULL) { 1466 device_printf(dev, "Unable to allocate bus resource: memory\n"); 1467 return (ENXIO); 1468 } 1469 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 1470 sc->osdep.mem_bus_space_handle = 1471 rman_get_bushandle(sc->memory); 1472 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle; 1473 1474 sc->hw.back = &sc->osdep; 1475 1476 return (0); 1477 } 1478 1479 /********************************************************************* 1480 * 1481 * Set up the MSI-X Interrupt handlers 1482 * 1483 **********************************************************************/ 1484 static int 1485 igc_if_msix_intr_assign(if_ctx_t ctx, int msix) 1486 { 1487 struct igc_softc *sc = iflib_get_softc(ctx); 1488 struct igc_rx_queue *rx_que = sc->rx_queues; 1489 struct igc_tx_queue *tx_que = sc->tx_queues; 1490 int error, rid, i, vector = 0, rx_vectors; 1491 char buf[16]; 1492 1493 /* First set up ring resources */ 1494 for (i = 0; i < sc->rx_num_queues; i++, rx_que++, vector++) { 1495 rid = vector + 1; 1496 snprintf(buf, sizeof(buf), "rxq%d", i); 1497 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RXTX, igc_msix_que, rx_que, rx_que->me, buf); 1498 if (error) { 1499 device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error); 1500 sc->rx_num_queues = i + 1; 1501 goto fail; 1502 } 1503 1504 rx_que->msix = vector; 1505 1506 /* 1507 * Set the bit to enable interrupt 1508 * in IGC_IMS -- bits 20 and 21 1509 * are for RX0 and RX1, note this has 1510 * NOTHING to do with the MSI-X vector 1511 */ 1512 rx_que->eims = 1 << vector; 1513 } 1514 rx_vectors = vector; 1515 1516 vector = 0; 1517 for (i = 0; i < sc->tx_num_queues; i++, tx_que++, vector++) { 1518 snprintf(buf, sizeof(buf), "txq%d", i); 1519 tx_que = &sc->tx_queues[i]; 1520 iflib_softirq_alloc_generic(ctx, 1521 &sc->rx_queues[i % sc->rx_num_queues].que_irq, 1522 IFLIB_INTR_TX, tx_que, tx_que->me, buf); 1523 1524 tx_que->msix = (vector % sc->rx_num_queues); 1525 1526 /* 1527 * Set the bit to enable interrupt 1528 * in IGC_IMS -- bits 22 and 23 1529 * are for TX0 and TX1, note this has 1530 * NOTHING to do with the MSI-X vector 1531 */ 1532 tx_que->eims = 1 << i; 1533 } 1534 1535 /* Link interrupt */ 1536 rid = rx_vectors + 1; 1537 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, IFLIB_INTR_ADMIN, igc_msix_link, sc, 0, "aq"); 1538 1539 if (error) { 1540 device_printf(iflib_get_dev(ctx), "Failed to register admin handler"); 1541 goto fail; 1542 } 1543 sc->linkvec = rx_vectors; 1544 return (0); 1545 fail: 1546 iflib_irq_free(ctx, &sc->irq); 1547 rx_que = sc->rx_queues; 1548 for (int i = 0; i < sc->rx_num_queues; i++, rx_que++) 1549 iflib_irq_free(ctx, &rx_que->que_irq); 1550 return (error); 1551 } 1552 1553 static void 1554 igc_configure_queues(struct igc_softc *sc) 1555 { 1556 struct igc_hw *hw = &sc->hw; 1557 struct igc_rx_queue *rx_que; 1558 struct igc_tx_queue *tx_que; 1559 u32 ivar = 0, newitr = 0; 1560 1561 /* First turn on RSS capability */ 1562 IGC_WRITE_REG(hw, IGC_GPIE, 1563 IGC_GPIE_MSIX_MODE | IGC_GPIE_EIAME | IGC_GPIE_PBA | 1564 IGC_GPIE_NSICR); 1565 1566 /* Turn on MSI-X */ 1567 /* RX entries */ 1568 for (int i = 0; i < sc->rx_num_queues; i++) { 1569 u32 index = i >> 1; 1570 ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index); 1571 rx_que = &sc->rx_queues[i]; 1572 if (i & 1) { 1573 ivar &= 0xFF00FFFF; 1574 ivar |= (rx_que->msix | IGC_IVAR_VALID) << 16; 1575 } else { 1576 ivar &= 0xFFFFFF00; 1577 ivar |= rx_que->msix | IGC_IVAR_VALID; 1578 } 1579 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar); 1580 } 1581 /* TX entries */ 1582 for (int i = 0; i < sc->tx_num_queues; i++) { 1583 u32 index = i >> 1; 1584 ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index); 1585 tx_que = &sc->tx_queues[i]; 1586 if (i & 1) { 1587 ivar &= 0x00FFFFFF; 1588 ivar |= (tx_que->msix | IGC_IVAR_VALID) << 24; 1589 } else { 1590 ivar &= 0xFFFF00FF; 1591 ivar |= (tx_que->msix | IGC_IVAR_VALID) << 8; 1592 } 1593 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar); 1594 sc->que_mask |= tx_que->eims; 1595 } 1596 1597 /* And for the link interrupt */ 1598 ivar = (sc->linkvec | IGC_IVAR_VALID) << 8; 1599 sc->link_mask = 1 << sc->linkvec; 1600 IGC_WRITE_REG(hw, IGC_IVAR_MISC, ivar); 1601 1602 /* Set the starting interrupt rate */ 1603 if (igc_max_interrupt_rate > 0) 1604 newitr = IGC_INTS_TO_EITR(igc_max_interrupt_rate); 1605 1606 newitr |= IGC_EITR_CNT_IGNR; 1607 1608 for (int i = 0; i < sc->rx_num_queues; i++) { 1609 rx_que = &sc->rx_queues[i]; 1610 IGC_WRITE_REG(hw, IGC_EITR(rx_que->msix), newitr); 1611 } 1612 1613 return; 1614 } 1615 1616 static void 1617 igc_free_pci_resources(if_ctx_t ctx) 1618 { 1619 struct igc_softc *sc = iflib_get_softc(ctx); 1620 struct igc_rx_queue *que = sc->rx_queues; 1621 device_t dev = iflib_get_dev(ctx); 1622 1623 /* Release all MSI-X queue resources */ 1624 if (sc->intr_type == IFLIB_INTR_MSIX) 1625 iflib_irq_free(ctx, &sc->irq); 1626 1627 for (int i = 0; i < sc->rx_num_queues; i++, que++) { 1628 iflib_irq_free(ctx, &que->que_irq); 1629 } 1630 1631 if (sc->memory != NULL) { 1632 bus_release_resource(dev, SYS_RES_MEMORY, 1633 rman_get_rid(sc->memory), sc->memory); 1634 sc->memory = NULL; 1635 } 1636 1637 if (sc->flash != NULL) { 1638 bus_release_resource(dev, SYS_RES_MEMORY, 1639 rman_get_rid(sc->flash), sc->flash); 1640 sc->flash = NULL; 1641 } 1642 1643 if (sc->ioport != NULL) { 1644 bus_release_resource(dev, SYS_RES_IOPORT, 1645 rman_get_rid(sc->ioport), sc->ioport); 1646 sc->ioport = NULL; 1647 } 1648 } 1649 1650 /* Set up MSI or MSI-X */ 1651 static int 1652 igc_setup_msix(if_ctx_t ctx) 1653 { 1654 return (0); 1655 } 1656 1657 /********************************************************************* 1658 * 1659 * Initialize the DMA Coalescing feature 1660 * 1661 **********************************************************************/ 1662 static void 1663 igc_init_dmac(struct igc_softc *sc, u32 pba) 1664 { 1665 device_t dev = sc->dev; 1666 struct igc_hw *hw = &sc->hw; 1667 u32 dmac, reg = ~IGC_DMACR_DMAC_EN; 1668 u16 hwm; 1669 u16 max_frame_size; 1670 int status; 1671 1672 max_frame_size = sc->shared->isc_max_frame_size; 1673 1674 if (sc->dmac == 0) { /* Disabling it */ 1675 IGC_WRITE_REG(hw, IGC_DMACR, reg); 1676 return; 1677 } else 1678 device_printf(dev, "DMA Coalescing enabled\n"); 1679 1680 /* Set starting threshold */ 1681 IGC_WRITE_REG(hw, IGC_DMCTXTH, 0); 1682 1683 hwm = 64 * pba - max_frame_size / 16; 1684 if (hwm < 64 * (pba - 6)) 1685 hwm = 64 * (pba - 6); 1686 reg = IGC_READ_REG(hw, IGC_FCRTC); 1687 reg &= ~IGC_FCRTC_RTH_COAL_MASK; 1688 reg |= ((hwm << IGC_FCRTC_RTH_COAL_SHIFT) 1689 & IGC_FCRTC_RTH_COAL_MASK); 1690 IGC_WRITE_REG(hw, IGC_FCRTC, reg); 1691 1692 dmac = pba - max_frame_size / 512; 1693 if (dmac < pba - 10) 1694 dmac = pba - 10; 1695 reg = IGC_READ_REG(hw, IGC_DMACR); 1696 reg &= ~IGC_DMACR_DMACTHR_MASK; 1697 reg |= ((dmac << IGC_DMACR_DMACTHR_SHIFT) 1698 & IGC_DMACR_DMACTHR_MASK); 1699 1700 /* transition to L0x or L1 if available..*/ 1701 reg |= (IGC_DMACR_DMAC_EN | IGC_DMACR_DMAC_LX_MASK); 1702 1703 /* Check if status is 2.5Gb backplane connection 1704 * before configuration of watchdog timer, which is 1705 * in msec values in 12.8usec intervals 1706 * watchdog timer= msec values in 32usec intervals 1707 * for non 2.5Gb connection 1708 */ 1709 status = IGC_READ_REG(hw, IGC_STATUS); 1710 if ((status & IGC_STATUS_2P5_SKU) && 1711 (!(status & IGC_STATUS_2P5_SKU_OVER))) 1712 reg |= ((sc->dmac * 5) >> 6); 1713 else 1714 reg |= (sc->dmac >> 5); 1715 1716 IGC_WRITE_REG(hw, IGC_DMACR, reg); 1717 1718 IGC_WRITE_REG(hw, IGC_DMCRTRH, 0); 1719 1720 /* Set the interval before transition */ 1721 reg = IGC_READ_REG(hw, IGC_DMCTLX); 1722 reg |= IGC_DMCTLX_DCFLUSH_DIS; 1723 1724 /* 1725 ** in 2.5Gb connection, TTLX unit is 0.4 usec 1726 ** which is 0x4*2 = 0xA. But delay is still 4 usec 1727 */ 1728 status = IGC_READ_REG(hw, IGC_STATUS); 1729 if ((status & IGC_STATUS_2P5_SKU) && 1730 (!(status & IGC_STATUS_2P5_SKU_OVER))) 1731 reg |= 0xA; 1732 else 1733 reg |= 0x4; 1734 1735 IGC_WRITE_REG(hw, IGC_DMCTLX, reg); 1736 1737 /* free space in tx packet buffer to wake from DMA coal */ 1738 IGC_WRITE_REG(hw, IGC_DMCTXTH, (IGC_TXPBSIZE - 1739 (2 * max_frame_size)) >> 6); 1740 1741 /* make low power state decision controlled by DMA coal */ 1742 reg = IGC_READ_REG(hw, IGC_PCIEMISC); 1743 reg &= ~IGC_PCIEMISC_LX_DECISION; 1744 IGC_WRITE_REG(hw, IGC_PCIEMISC, reg); 1745 } 1746 1747 /********************************************************************* 1748 * 1749 * Initialize the hardware to a configuration as specified by the 1750 * softc structure. 1751 * 1752 **********************************************************************/ 1753 static void 1754 igc_reset(if_ctx_t ctx) 1755 { 1756 device_t dev = iflib_get_dev(ctx); 1757 struct igc_softc *sc = iflib_get_softc(ctx); 1758 struct igc_hw *hw = &sc->hw; 1759 u32 rx_buffer_size; 1760 u32 pba; 1761 1762 INIT_DEBUGOUT("igc_reset: begin"); 1763 /* Let the firmware know the OS is in control */ 1764 igc_get_hw_control(sc); 1765 1766 /* 1767 * Packet Buffer Allocation (PBA) 1768 * Writing PBA sets the receive portion of the buffer 1769 * the remainder is used for the transmit buffer. 1770 */ 1771 pba = IGC_PBA_34K; 1772 1773 INIT_DEBUGOUT1("igc_reset: pba=%dK",pba); 1774 1775 /* 1776 * These parameters control the automatic generation (Tx) and 1777 * response (Rx) to Ethernet PAUSE frames. 1778 * - High water mark should allow for at least two frames to be 1779 * received after sending an XOFF. 1780 * - Low water mark works best when it is very near the high water mark. 1781 * This allows the receiver to restart by sending XON when it has 1782 * drained a bit. Here we use an arbitrary value of 1500 which will 1783 * restart after one full frame is pulled from the buffer. There 1784 * could be several smaller frames in the buffer and if so they will 1785 * not trigger the XON until their total number reduces the buffer 1786 * by 1500. 1787 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 1788 */ 1789 rx_buffer_size = (pba & 0xffff) << 10; 1790 hw->fc.high_water = rx_buffer_size - 1791 roundup2(sc->hw.mac.max_frame_size, 1024); 1792 /* 16-byte granularity */ 1793 hw->fc.low_water = hw->fc.high_water - 16; 1794 1795 if (sc->fc) /* locally set flow control value? */ 1796 hw->fc.requested_mode = sc->fc; 1797 else 1798 hw->fc.requested_mode = igc_fc_full; 1799 1800 hw->fc.pause_time = IGC_FC_PAUSE_TIME; 1801 1802 hw->fc.send_xon = true; 1803 1804 /* Issue a global reset */ 1805 igc_reset_hw(hw); 1806 IGC_WRITE_REG(hw, IGC_WUC, 0); 1807 1808 /* and a re-init */ 1809 if (igc_init_hw(hw) < 0) { 1810 device_printf(dev, "Hardware Initialization Failed\n"); 1811 return; 1812 } 1813 1814 /* Setup DMA Coalescing */ 1815 igc_init_dmac(sc, pba); 1816 1817 /* Save the final PBA off if it needs to be used elsewhere i.e. AIM */ 1818 sc->pba = pba; 1819 1820 IGC_WRITE_REG(hw, IGC_VET, ETHERTYPE_VLAN); 1821 igc_get_phy_info(hw); 1822 igc_check_for_link(hw); 1823 } 1824 1825 /* 1826 * Initialise the RSS mapping for NICs that support multiple transmit/ 1827 * receive rings. 1828 */ 1829 1830 #define RSSKEYLEN 10 1831 static void 1832 igc_initialize_rss_mapping(struct igc_softc *sc) 1833 { 1834 struct igc_hw *hw = &sc->hw; 1835 int i; 1836 int queue_id; 1837 u32 reta; 1838 u32 rss_key[RSSKEYLEN], mrqc, shift = 0; 1839 1840 /* 1841 * The redirection table controls which destination 1842 * queue each bucket redirects traffic to. 1843 * Each DWORD represents four queues, with the LSB 1844 * being the first queue in the DWORD. 1845 * 1846 * This just allocates buckets to queues using round-robin 1847 * allocation. 1848 * 1849 * NOTE: It Just Happens to line up with the default 1850 * RSS allocation method. 1851 */ 1852 1853 /* Warning FM follows */ 1854 reta = 0; 1855 for (i = 0; i < 128; i++) { 1856 #ifdef RSS 1857 queue_id = rss_get_indirection_to_bucket(i); 1858 /* 1859 * If we have more queues than buckets, we'll 1860 * end up mapping buckets to a subset of the 1861 * queues. 1862 * 1863 * If we have more buckets than queues, we'll 1864 * end up instead assigning multiple buckets 1865 * to queues. 1866 * 1867 * Both are suboptimal, but we need to handle 1868 * the case so we don't go out of bounds 1869 * indexing arrays and such. 1870 */ 1871 queue_id = queue_id % sc->rx_num_queues; 1872 #else 1873 queue_id = (i % sc->rx_num_queues); 1874 #endif 1875 /* Adjust if required */ 1876 queue_id = queue_id << shift; 1877 1878 /* 1879 * The low 8 bits are for hash value (n+0); 1880 * The next 8 bits are for hash value (n+1), etc. 1881 */ 1882 reta = reta >> 8; 1883 reta = reta | ( ((uint32_t) queue_id) << 24); 1884 if ((i & 3) == 3) { 1885 IGC_WRITE_REG(hw, IGC_RETA(i >> 2), reta); 1886 reta = 0; 1887 } 1888 } 1889 1890 /* Now fill in hash table */ 1891 1892 /* 1893 * MRQC: Multiple Receive Queues Command 1894 * Set queuing to RSS control, number depends on the device. 1895 */ 1896 mrqc = IGC_MRQC_ENABLE_RSS_4Q; 1897 1898 #ifdef RSS 1899 /* XXX ew typecasting */ 1900 rss_getkey((uint8_t *) &rss_key); 1901 #else 1902 arc4rand(&rss_key, sizeof(rss_key), 0); 1903 #endif 1904 for (i = 0; i < RSSKEYLEN; i++) 1905 IGC_WRITE_REG_ARRAY(hw, IGC_RSSRK(0), i, rss_key[i]); 1906 1907 /* 1908 * Configure the RSS fields to hash upon. 1909 */ 1910 mrqc |= (IGC_MRQC_RSS_FIELD_IPV4 | 1911 IGC_MRQC_RSS_FIELD_IPV4_TCP); 1912 mrqc |= (IGC_MRQC_RSS_FIELD_IPV6 | 1913 IGC_MRQC_RSS_FIELD_IPV6_TCP); 1914 mrqc |=( IGC_MRQC_RSS_FIELD_IPV4_UDP | 1915 IGC_MRQC_RSS_FIELD_IPV6_UDP); 1916 mrqc |=( IGC_MRQC_RSS_FIELD_IPV6_UDP_EX | 1917 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX); 1918 1919 IGC_WRITE_REG(hw, IGC_MRQC, mrqc); 1920 } 1921 1922 /********************************************************************* 1923 * 1924 * Setup networking device structure and register interface media. 1925 * 1926 **********************************************************************/ 1927 static int 1928 igc_setup_interface(if_ctx_t ctx) 1929 { 1930 if_t ifp = iflib_get_ifp(ctx); 1931 struct igc_softc *sc = iflib_get_softc(ctx); 1932 if_softc_ctx_t scctx = sc->shared; 1933 1934 INIT_DEBUGOUT("igc_setup_interface: begin"); 1935 1936 /* Single Queue */ 1937 if (sc->tx_num_queues == 1) { 1938 if_setsendqlen(ifp, scctx->isc_ntxd[0] - 1); 1939 if_setsendqready(ifp); 1940 } 1941 1942 /* 1943 * Specify the media types supported by this adapter and register 1944 * callbacks to update media and link information 1945 */ 1946 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1947 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); 1948 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1949 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); 1950 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1951 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1952 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL); 1953 1954 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1955 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 1956 return (0); 1957 } 1958 1959 static int 1960 igc_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) 1961 { 1962 struct igc_softc *sc = iflib_get_softc(ctx); 1963 if_softc_ctx_t scctx = sc->shared; 1964 int error = IGC_SUCCESS; 1965 struct igc_tx_queue *que; 1966 int i, j; 1967 1968 MPASS(sc->tx_num_queues > 0); 1969 MPASS(sc->tx_num_queues == ntxqsets); 1970 1971 /* First allocate the top level queue structs */ 1972 if (!(sc->tx_queues = 1973 (struct igc_tx_queue *) malloc(sizeof(struct igc_tx_queue) * 1974 sc->tx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { 1975 device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n"); 1976 return(ENOMEM); 1977 } 1978 1979 for (i = 0, que = sc->tx_queues; i < sc->tx_num_queues; i++, que++) { 1980 /* Set up some basics */ 1981 1982 struct tx_ring *txr = &que->txr; 1983 txr->sc = que->sc = sc; 1984 que->me = txr->me = i; 1985 1986 /* Allocate report status array */ 1987 if (!(txr->tx_rsq = (qidx_t *) malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) { 1988 device_printf(iflib_get_dev(ctx), "failed to allocate rs_idxs memory\n"); 1989 error = ENOMEM; 1990 goto fail; 1991 } 1992 for (j = 0; j < scctx->isc_ntxd[0]; j++) 1993 txr->tx_rsq[j] = QIDX_INVALID; 1994 /* get the virtual and physical address of the hardware queues */ 1995 txr->tx_base = (struct igc_tx_desc *)vaddrs[i*ntxqs]; 1996 txr->tx_paddr = paddrs[i*ntxqs]; 1997 } 1998 1999 if (bootverbose) 2000 device_printf(iflib_get_dev(ctx), 2001 "allocated for %d tx_queues\n", sc->tx_num_queues); 2002 return (0); 2003 fail: 2004 igc_if_queues_free(ctx); 2005 return (error); 2006 } 2007 2008 static int 2009 igc_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) 2010 { 2011 struct igc_softc *sc = iflib_get_softc(ctx); 2012 int error = IGC_SUCCESS; 2013 struct igc_rx_queue *que; 2014 int i; 2015 2016 MPASS(sc->rx_num_queues > 0); 2017 MPASS(sc->rx_num_queues == nrxqsets); 2018 2019 /* First allocate the top level queue structs */ 2020 if (!(sc->rx_queues = 2021 (struct igc_rx_queue *) malloc(sizeof(struct igc_rx_queue) * 2022 sc->rx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { 2023 device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n"); 2024 error = ENOMEM; 2025 goto fail; 2026 } 2027 2028 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) { 2029 /* Set up some basics */ 2030 struct rx_ring *rxr = &que->rxr; 2031 rxr->sc = que->sc = sc; 2032 rxr->que = que; 2033 que->me = rxr->me = i; 2034 2035 /* get the virtual and physical address of the hardware queues */ 2036 rxr->rx_base = (union igc_rx_desc_extended *)vaddrs[i*nrxqs]; 2037 rxr->rx_paddr = paddrs[i*nrxqs]; 2038 } 2039 2040 if (bootverbose) 2041 device_printf(iflib_get_dev(ctx), 2042 "allocated for %d rx_queues\n", sc->rx_num_queues); 2043 2044 return (0); 2045 fail: 2046 igc_if_queues_free(ctx); 2047 return (error); 2048 } 2049 2050 static void 2051 igc_if_queues_free(if_ctx_t ctx) 2052 { 2053 struct igc_softc *sc = iflib_get_softc(ctx); 2054 struct igc_tx_queue *tx_que = sc->tx_queues; 2055 struct igc_rx_queue *rx_que = sc->rx_queues; 2056 2057 if (tx_que != NULL) { 2058 for (int i = 0; i < sc->tx_num_queues; i++, tx_que++) { 2059 struct tx_ring *txr = &tx_que->txr; 2060 if (txr->tx_rsq == NULL) 2061 break; 2062 2063 free(txr->tx_rsq, M_DEVBUF); 2064 txr->tx_rsq = NULL; 2065 } 2066 free(sc->tx_queues, M_DEVBUF); 2067 sc->tx_queues = NULL; 2068 } 2069 2070 if (rx_que != NULL) { 2071 free(sc->rx_queues, M_DEVBUF); 2072 sc->rx_queues = NULL; 2073 } 2074 2075 if (sc->mta != NULL) { 2076 free(sc->mta, M_DEVBUF); 2077 } 2078 } 2079 2080 /********************************************************************* 2081 * 2082 * Enable transmit unit. 2083 * 2084 **********************************************************************/ 2085 static void 2086 igc_initialize_transmit_unit(if_ctx_t ctx) 2087 { 2088 struct igc_softc *sc = iflib_get_softc(ctx); 2089 if_softc_ctx_t scctx = sc->shared; 2090 struct igc_tx_queue *que; 2091 struct tx_ring *txr; 2092 struct igc_hw *hw = &sc->hw; 2093 u32 tctl, txdctl = 0; 2094 2095 INIT_DEBUGOUT("igc_initialize_transmit_unit: begin"); 2096 2097 for (int i = 0; i < sc->tx_num_queues; i++, txr++) { 2098 u64 bus_addr; 2099 caddr_t offp, endp; 2100 2101 que = &sc->tx_queues[i]; 2102 txr = &que->txr; 2103 bus_addr = txr->tx_paddr; 2104 2105 /* Clear checksum offload context. */ 2106 offp = (caddr_t)&txr->csum_flags; 2107 endp = (caddr_t)(txr + 1); 2108 bzero(offp, endp - offp); 2109 2110 /* Base and Len of TX Ring */ 2111 IGC_WRITE_REG(hw, IGC_TDLEN(i), 2112 scctx->isc_ntxd[0] * sizeof(struct igc_tx_desc)); 2113 IGC_WRITE_REG(hw, IGC_TDBAH(i), 2114 (u32)(bus_addr >> 32)); 2115 IGC_WRITE_REG(hw, IGC_TDBAL(i), 2116 (u32)bus_addr); 2117 /* Init the HEAD/TAIL indices */ 2118 IGC_WRITE_REG(hw, IGC_TDT(i), 0); 2119 IGC_WRITE_REG(hw, IGC_TDH(i), 0); 2120 2121 HW_DEBUGOUT2("Base = %x, Length = %x\n", 2122 IGC_READ_REG(&sc->hw, IGC_TDBAL(i)), 2123 IGC_READ_REG(&sc->hw, IGC_TDLEN(i))); 2124 2125 txdctl = 0; /* clear txdctl */ 2126 txdctl |= 0x1f; /* PTHRESH */ 2127 txdctl |= 1 << 8; /* HTHRESH */ 2128 txdctl |= 1 << 16;/* WTHRESH */ 2129 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ 2130 txdctl |= IGC_TXDCTL_GRAN; 2131 txdctl |= 1 << 25; /* LWTHRESH */ 2132 2133 IGC_WRITE_REG(hw, IGC_TXDCTL(i), txdctl); 2134 } 2135 2136 /* Program the Transmit Control Register */ 2137 tctl = IGC_READ_REG(&sc->hw, IGC_TCTL); 2138 tctl &= ~IGC_TCTL_CT; 2139 tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN | 2140 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT)); 2141 2142 /* This write will effectively turn on the transmit unit. */ 2143 IGC_WRITE_REG(&sc->hw, IGC_TCTL, tctl); 2144 } 2145 2146 /********************************************************************* 2147 * 2148 * Enable receive unit. 2149 * 2150 **********************************************************************/ 2151 #define BSIZEPKT_ROUNDUP ((1<<IGC_SRRCTL_BSIZEPKT_SHIFT)-1) 2152 2153 static void 2154 igc_initialize_receive_unit(if_ctx_t ctx) 2155 { 2156 struct igc_softc *sc = iflib_get_softc(ctx); 2157 if_softc_ctx_t scctx = sc->shared; 2158 if_t ifp = iflib_get_ifp(ctx); 2159 struct igc_hw *hw = &sc->hw; 2160 struct igc_rx_queue *que; 2161 int i; 2162 u32 psize, rctl, rxcsum, srrctl = 0; 2163 2164 INIT_DEBUGOUT("igc_initialize_receive_units: begin"); 2165 2166 /* 2167 * Make sure receives are disabled while setting 2168 * up the descriptor ring 2169 */ 2170 rctl = IGC_READ_REG(hw, IGC_RCTL); 2171 IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN); 2172 2173 /* Setup the Receive Control Register */ 2174 rctl &= ~(3 << IGC_RCTL_MO_SHIFT); 2175 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | 2176 IGC_RCTL_LBM_NO | IGC_RCTL_RDMTS_HALF | 2177 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); 2178 2179 /* Do not store bad packets */ 2180 rctl &= ~IGC_RCTL_SBP; 2181 2182 /* Enable Long Packet receive */ 2183 if (if_getmtu(ifp) > ETHERMTU) 2184 rctl |= IGC_RCTL_LPE; 2185 else 2186 rctl &= ~IGC_RCTL_LPE; 2187 2188 /* Strip the CRC */ 2189 if (!igc_disable_crc_stripping) 2190 rctl |= IGC_RCTL_SECRC; 2191 2192 rxcsum = IGC_READ_REG(hw, IGC_RXCSUM); 2193 if (if_getcapenable(ifp) & IFCAP_RXCSUM) { 2194 rxcsum |= IGC_RXCSUM_CRCOFL; 2195 if (sc->tx_num_queues > 1) 2196 rxcsum |= IGC_RXCSUM_PCSD; 2197 else 2198 rxcsum |= IGC_RXCSUM_IPPCSE; 2199 } else { 2200 if (sc->tx_num_queues > 1) 2201 rxcsum |= IGC_RXCSUM_PCSD; 2202 else 2203 rxcsum &= ~IGC_RXCSUM_TUOFL; 2204 } 2205 IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum); 2206 2207 if (sc->rx_num_queues > 1) 2208 igc_initialize_rss_mapping(sc); 2209 2210 if (if_getmtu(ifp) > ETHERMTU) { 2211 psize = scctx->isc_max_frame_size; 2212 /* are we on a vlan? */ 2213 if (if_vlantrunkinuse(ifp)) 2214 psize += VLAN_TAG_SIZE; 2215 IGC_WRITE_REG(&sc->hw, IGC_RLPML, psize); 2216 } 2217 2218 /* Set maximum packet buffer len */ 2219 srrctl |= (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 2220 IGC_SRRCTL_BSIZEPKT_SHIFT; 2221 /* srrctl above overrides this but set the register to a sane value */ 2222 rctl |= IGC_RCTL_SZ_2048; 2223 2224 /* 2225 * If TX flow control is disabled and there's >1 queue defined, 2226 * enable DROP. 2227 * 2228 * This drops frames rather than hanging the RX MAC for all queues. 2229 */ 2230 if ((sc->rx_num_queues > 1) && 2231 (sc->fc == igc_fc_none || 2232 sc->fc == igc_fc_rx_pause)) { 2233 srrctl |= IGC_SRRCTL_DROP_EN; 2234 } 2235 2236 /* Setup the Base and Length of the Rx Descriptor Rings */ 2237 for (i = 0, que = sc->rx_queues; i < sc->rx_num_queues; i++, que++) { 2238 struct rx_ring *rxr = &que->rxr; 2239 u64 bus_addr = rxr->rx_paddr; 2240 u32 rxdctl; 2241 2242 #ifdef notyet 2243 /* Configure for header split? -- ignore for now */ 2244 rxr->hdr_split = igc_header_split; 2245 #else 2246 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 2247 #endif 2248 2249 IGC_WRITE_REG(hw, IGC_RDLEN(i), 2250 scctx->isc_nrxd[0] * sizeof(struct igc_rx_desc)); 2251 IGC_WRITE_REG(hw, IGC_RDBAH(i), 2252 (uint32_t)(bus_addr >> 32)); 2253 IGC_WRITE_REG(hw, IGC_RDBAL(i), 2254 (uint32_t)bus_addr); 2255 IGC_WRITE_REG(hw, IGC_SRRCTL(i), srrctl); 2256 /* Setup the Head and Tail Descriptor Pointers */ 2257 IGC_WRITE_REG(hw, IGC_RDH(i), 0); 2258 IGC_WRITE_REG(hw, IGC_RDT(i), 0); 2259 /* Enable this Queue */ 2260 rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(i)); 2261 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; 2262 rxdctl &= 0xFFF00000; 2263 rxdctl |= IGC_RX_PTHRESH; 2264 rxdctl |= IGC_RX_HTHRESH << 8; 2265 rxdctl |= IGC_RX_WTHRESH << 16; 2266 IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl); 2267 } 2268 2269 /* Make sure VLAN Filters are off */ 2270 rctl &= ~IGC_RCTL_VFE; 2271 2272 /* Write out the settings */ 2273 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 2274 2275 return; 2276 } 2277 2278 static void 2279 igc_setup_vlan_hw_support(if_ctx_t ctx) 2280 { 2281 struct igc_softc *sc = iflib_get_softc(ctx); 2282 struct igc_hw *hw = &sc->hw; 2283 struct ifnet *ifp = iflib_get_ifp(ctx); 2284 u32 reg; 2285 2286 /* igc hardware doesn't seem to implement VFTA for HWFILTER */ 2287 2288 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING && 2289 !igc_disable_crc_stripping) { 2290 reg = IGC_READ_REG(hw, IGC_CTRL); 2291 reg |= IGC_CTRL_VME; 2292 IGC_WRITE_REG(hw, IGC_CTRL, reg); 2293 } else { 2294 reg = IGC_READ_REG(hw, IGC_CTRL); 2295 reg &= ~IGC_CTRL_VME; 2296 IGC_WRITE_REG(hw, IGC_CTRL, reg); 2297 } 2298 } 2299 2300 static void 2301 igc_if_intr_enable(if_ctx_t ctx) 2302 { 2303 struct igc_softc *sc = iflib_get_softc(ctx); 2304 struct igc_hw *hw = &sc->hw; 2305 u32 mask; 2306 2307 if (__predict_true(sc->intr_type == IFLIB_INTR_MSIX)) { 2308 mask = (sc->que_mask | sc->link_mask); 2309 IGC_WRITE_REG(hw, IGC_EIAC, mask); 2310 IGC_WRITE_REG(hw, IGC_EIAM, mask); 2311 IGC_WRITE_REG(hw, IGC_EIMS, mask); 2312 IGC_WRITE_REG(hw, IGC_IMS, IGC_IMS_LSC); 2313 } else 2314 IGC_WRITE_REG(hw, IGC_IMS, IMS_ENABLE_MASK); 2315 IGC_WRITE_FLUSH(hw); 2316 } 2317 2318 static void 2319 igc_if_intr_disable(if_ctx_t ctx) 2320 { 2321 struct igc_softc *sc = iflib_get_softc(ctx); 2322 struct igc_hw *hw = &sc->hw; 2323 2324 if (__predict_true(sc->intr_type == IFLIB_INTR_MSIX)) { 2325 IGC_WRITE_REG(hw, IGC_EIMC, 0xffffffff); 2326 IGC_WRITE_REG(hw, IGC_EIAC, 0); 2327 } 2328 IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff); 2329 IGC_WRITE_FLUSH(hw); 2330 } 2331 2332 /* 2333 * igc_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit. 2334 * For ASF and Pass Through versions of f/w this means 2335 * that the driver is loaded. For AMT version type f/w 2336 * this means that the network i/f is open. 2337 */ 2338 static void 2339 igc_get_hw_control(struct igc_softc *sc) 2340 { 2341 u32 ctrl_ext; 2342 2343 if (sc->vf_ifp) 2344 return; 2345 2346 ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT); 2347 IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, 2348 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 2349 } 2350 2351 /* 2352 * igc_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 2353 * For ASF and Pass Through versions of f/w this means that 2354 * the driver is no longer loaded. For AMT versions of the 2355 * f/w this means that the network i/f is closed. 2356 */ 2357 static void 2358 igc_release_hw_control(struct igc_softc *sc) 2359 { 2360 u32 ctrl_ext; 2361 2362 ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT); 2363 IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, 2364 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 2365 return; 2366 } 2367 2368 static int 2369 igc_is_valid_ether_addr(u8 *addr) 2370 { 2371 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; 2372 2373 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { 2374 return (false); 2375 } 2376 2377 return (true); 2378 } 2379 2380 /* 2381 ** Parse the interface capabilities with regard 2382 ** to both system management and wake-on-lan for 2383 ** later use. 2384 */ 2385 static void 2386 igc_get_wakeup(if_ctx_t ctx) 2387 { 2388 struct igc_softc *sc = iflib_get_softc(ctx); 2389 u16 eeprom_data = 0, apme_mask; 2390 2391 apme_mask = IGC_WUC_APME; 2392 eeprom_data = IGC_READ_REG(&sc->hw, IGC_WUC); 2393 2394 if (eeprom_data & apme_mask) 2395 sc->wol = IGC_WUFC_LNKC; 2396 } 2397 2398 2399 /* 2400 * Enable PCI Wake On Lan capability 2401 */ 2402 static void 2403 igc_enable_wakeup(if_ctx_t ctx) 2404 { 2405 struct igc_softc *sc = iflib_get_softc(ctx); 2406 device_t dev = iflib_get_dev(ctx); 2407 if_t ifp = iflib_get_ifp(ctx); 2408 int error = 0; 2409 u32 pmc, ctrl, rctl; 2410 u16 status; 2411 2412 if (pci_find_cap(dev, PCIY_PMG, &pmc) != 0) 2413 return; 2414 2415 /* 2416 * Determine type of Wakeup: note that wol 2417 * is set with all bits on by default. 2418 */ 2419 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) == 0) 2420 sc->wol &= ~IGC_WUFC_MAG; 2421 2422 if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) == 0) 2423 sc->wol &= ~IGC_WUFC_EX; 2424 2425 if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) == 0) 2426 sc->wol &= ~IGC_WUFC_MC; 2427 else { 2428 rctl = IGC_READ_REG(&sc->hw, IGC_RCTL); 2429 rctl |= IGC_RCTL_MPE; 2430 IGC_WRITE_REG(&sc->hw, IGC_RCTL, rctl); 2431 } 2432 2433 if (!(sc->wol & (IGC_WUFC_EX | IGC_WUFC_MAG | IGC_WUFC_MC))) 2434 goto pme; 2435 2436 /* Advertise the wakeup capability */ 2437 ctrl = IGC_READ_REG(&sc->hw, IGC_CTRL); 2438 ctrl |= IGC_CTRL_ADVD3WUC; 2439 IGC_WRITE_REG(&sc->hw, IGC_CTRL, ctrl); 2440 2441 /* Enable wakeup by the MAC */ 2442 IGC_WRITE_REG(&sc->hw, IGC_WUC, IGC_WUC_PME_EN); 2443 IGC_WRITE_REG(&sc->hw, IGC_WUFC, sc->wol); 2444 2445 pme: 2446 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2); 2447 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2448 if (!error && (if_getcapenable(ifp) & IFCAP_WOL)) 2449 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2450 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2); 2451 2452 return; 2453 } 2454 2455 /********************************************************************** 2456 * 2457 * Update the board statistics counters. 2458 * 2459 **********************************************************************/ 2460 static void 2461 igc_update_stats_counters(struct igc_softc *sc) 2462 { 2463 u64 prev_xoffrxc = sc->stats.xoffrxc; 2464 2465 sc->stats.crcerrs += IGC_READ_REG(&sc->hw, IGC_CRCERRS); 2466 sc->stats.mpc += IGC_READ_REG(&sc->hw, IGC_MPC); 2467 sc->stats.scc += IGC_READ_REG(&sc->hw, IGC_SCC); 2468 sc->stats.ecol += IGC_READ_REG(&sc->hw, IGC_ECOL); 2469 2470 sc->stats.mcc += IGC_READ_REG(&sc->hw, IGC_MCC); 2471 sc->stats.latecol += IGC_READ_REG(&sc->hw, IGC_LATECOL); 2472 sc->stats.colc += IGC_READ_REG(&sc->hw, IGC_COLC); 2473 sc->stats.colc += IGC_READ_REG(&sc->hw, IGC_RERC); 2474 sc->stats.dc += IGC_READ_REG(&sc->hw, IGC_DC); 2475 sc->stats.rlec += IGC_READ_REG(&sc->hw, IGC_RLEC); 2476 sc->stats.xonrxc += IGC_READ_REG(&sc->hw, IGC_XONRXC); 2477 sc->stats.xontxc += IGC_READ_REG(&sc->hw, IGC_XONTXC); 2478 sc->stats.xoffrxc += IGC_READ_REG(&sc->hw, IGC_XOFFRXC); 2479 /* 2480 * For watchdog management we need to know if we have been 2481 * paused during the last interval, so capture that here. 2482 */ 2483 if (sc->stats.xoffrxc != prev_xoffrxc) 2484 sc->shared->isc_pause_frames = 1; 2485 sc->stats.xofftxc += IGC_READ_REG(&sc->hw, IGC_XOFFTXC); 2486 sc->stats.fcruc += IGC_READ_REG(&sc->hw, IGC_FCRUC); 2487 sc->stats.prc64 += IGC_READ_REG(&sc->hw, IGC_PRC64); 2488 sc->stats.prc127 += IGC_READ_REG(&sc->hw, IGC_PRC127); 2489 sc->stats.prc255 += IGC_READ_REG(&sc->hw, IGC_PRC255); 2490 sc->stats.prc511 += IGC_READ_REG(&sc->hw, IGC_PRC511); 2491 sc->stats.prc1023 += IGC_READ_REG(&sc->hw, IGC_PRC1023); 2492 sc->stats.prc1522 += IGC_READ_REG(&sc->hw, IGC_PRC1522); 2493 sc->stats.tlpic += IGC_READ_REG(&sc->hw, IGC_TLPIC); 2494 sc->stats.rlpic += IGC_READ_REG(&sc->hw, IGC_RLPIC); 2495 sc->stats.gprc += IGC_READ_REG(&sc->hw, IGC_GPRC); 2496 sc->stats.bprc += IGC_READ_REG(&sc->hw, IGC_BPRC); 2497 sc->stats.mprc += IGC_READ_REG(&sc->hw, IGC_MPRC); 2498 sc->stats.gptc += IGC_READ_REG(&sc->hw, IGC_GPTC); 2499 2500 /* For the 64-bit byte counters the low dword must be read first. */ 2501 /* Both registers clear on the read of the high dword */ 2502 2503 sc->stats.gorc += IGC_READ_REG(&sc->hw, IGC_GORCL) + 2504 ((u64)IGC_READ_REG(&sc->hw, IGC_GORCH) << 32); 2505 sc->stats.gotc += IGC_READ_REG(&sc->hw, IGC_GOTCL) + 2506 ((u64)IGC_READ_REG(&sc->hw, IGC_GOTCH) << 32); 2507 2508 sc->stats.rnbc += IGC_READ_REG(&sc->hw, IGC_RNBC); 2509 sc->stats.ruc += IGC_READ_REG(&sc->hw, IGC_RUC); 2510 sc->stats.rfc += IGC_READ_REG(&sc->hw, IGC_RFC); 2511 sc->stats.roc += IGC_READ_REG(&sc->hw, IGC_ROC); 2512 sc->stats.rjc += IGC_READ_REG(&sc->hw, IGC_RJC); 2513 2514 sc->stats.mgprc += IGC_READ_REG(&sc->hw, IGC_MGTPRC); 2515 sc->stats.mgpdc += IGC_READ_REG(&sc->hw, IGC_MGTPDC); 2516 sc->stats.mgptc += IGC_READ_REG(&sc->hw, IGC_MGTPTC); 2517 2518 sc->stats.tor += IGC_READ_REG(&sc->hw, IGC_TORH); 2519 sc->stats.tot += IGC_READ_REG(&sc->hw, IGC_TOTH); 2520 2521 sc->stats.tpr += IGC_READ_REG(&sc->hw, IGC_TPR); 2522 sc->stats.tpt += IGC_READ_REG(&sc->hw, IGC_TPT); 2523 sc->stats.ptc64 += IGC_READ_REG(&sc->hw, IGC_PTC64); 2524 sc->stats.ptc127 += IGC_READ_REG(&sc->hw, IGC_PTC127); 2525 sc->stats.ptc255 += IGC_READ_REG(&sc->hw, IGC_PTC255); 2526 sc->stats.ptc511 += IGC_READ_REG(&sc->hw, IGC_PTC511); 2527 sc->stats.ptc1023 += IGC_READ_REG(&sc->hw, IGC_PTC1023); 2528 sc->stats.ptc1522 += IGC_READ_REG(&sc->hw, IGC_PTC1522); 2529 sc->stats.mptc += IGC_READ_REG(&sc->hw, IGC_MPTC); 2530 sc->stats.bptc += IGC_READ_REG(&sc->hw, IGC_BPTC); 2531 2532 /* Interrupt Counts */ 2533 sc->stats.iac += IGC_READ_REG(&sc->hw, IGC_IAC); 2534 sc->stats.rxdmtc += IGC_READ_REG(&sc->hw, IGC_RXDMTC); 2535 2536 sc->stats.algnerrc += IGC_READ_REG(&sc->hw, IGC_ALGNERRC); 2537 sc->stats.tncrs += IGC_READ_REG(&sc->hw, IGC_TNCRS); 2538 sc->stats.htdpmc += IGC_READ_REG(&sc->hw, IGC_HTDPMC); 2539 sc->stats.tsctc += IGC_READ_REG(&sc->hw, IGC_TSCTC); 2540 } 2541 2542 static uint64_t 2543 igc_if_get_counter(if_ctx_t ctx, ift_counter cnt) 2544 { 2545 struct igc_softc *sc = iflib_get_softc(ctx); 2546 if_t ifp = iflib_get_ifp(ctx); 2547 2548 switch (cnt) { 2549 case IFCOUNTER_COLLISIONS: 2550 return (sc->stats.colc); 2551 case IFCOUNTER_IERRORS: 2552 return (sc->dropped_pkts + sc->stats.rxerrc + 2553 sc->stats.crcerrs + sc->stats.algnerrc + 2554 sc->stats.ruc + sc->stats.roc + 2555 sc->stats.mpc + sc->stats.htdpmc); 2556 case IFCOUNTER_OERRORS: 2557 return (sc->stats.ecol + sc->stats.latecol + 2558 sc->watchdog_events); 2559 default: 2560 return (if_get_counter_default(ifp, cnt)); 2561 } 2562 } 2563 2564 /* igc_if_needs_restart - Tell iflib when the driver needs to be reinitialized 2565 * @ctx: iflib context 2566 * @event: event code to check 2567 * 2568 * Defaults to returning false for unknown events. 2569 * 2570 * @returns true if iflib needs to reinit the interface 2571 */ 2572 static bool 2573 igc_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 2574 { 2575 switch (event) { 2576 case IFLIB_RESTART_VLAN_CONFIG: 2577 default: 2578 return (false); 2579 } 2580 } 2581 2582 /* Export a single 32-bit register via a read-only sysctl. */ 2583 static int 2584 igc_sysctl_reg_handler(SYSCTL_HANDLER_ARGS) 2585 { 2586 struct igc_softc *sc; 2587 u_int val; 2588 2589 sc = oidp->oid_arg1; 2590 val = IGC_READ_REG(&sc->hw, oidp->oid_arg2); 2591 return (sysctl_handle_int(oidp, &val, 0, req)); 2592 } 2593 2594 /* Per queue holdoff interrupt rate handler */ 2595 static int 2596 igc_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 2597 { 2598 struct igc_rx_queue *rque; 2599 struct igc_tx_queue *tque; 2600 struct igc_hw *hw; 2601 int error; 2602 u32 reg, usec, rate; 2603 2604 bool tx = oidp->oid_arg2; 2605 2606 if (tx) { 2607 tque = oidp->oid_arg1; 2608 hw = &tque->sc->hw; 2609 reg = IGC_READ_REG(hw, IGC_EITR(tque->me)); 2610 } else { 2611 rque = oidp->oid_arg1; 2612 hw = &rque->sc->hw; 2613 reg = IGC_READ_REG(hw, IGC_EITR(rque->msix)); 2614 } 2615 2616 usec = (reg & IGC_QVECTOR_MASK); 2617 if (usec > 0) 2618 rate = IGC_INTS_TO_EITR(usec); 2619 else 2620 rate = 0; 2621 2622 error = sysctl_handle_int(oidp, &rate, 0, req); 2623 if (error || !req->newptr) 2624 return error; 2625 return 0; 2626 } 2627 2628 /* 2629 * Add sysctl variables, one per statistic, to the system. 2630 */ 2631 static void 2632 igc_add_hw_stats(struct igc_softc *sc) 2633 { 2634 device_t dev = iflib_get_dev(sc->ctx); 2635 struct igc_tx_queue *tx_que = sc->tx_queues; 2636 struct igc_rx_queue *rx_que = sc->rx_queues; 2637 2638 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2639 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 2640 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 2641 struct igc_hw_stats *stats = &sc->stats; 2642 2643 struct sysctl_oid *stat_node, *queue_node, *int_node; 2644 struct sysctl_oid_list *stat_list, *queue_list, *int_list; 2645 2646 #define QUEUE_NAME_LEN 32 2647 char namebuf[QUEUE_NAME_LEN]; 2648 2649 /* Driver Statistics */ 2650 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 2651 CTLFLAG_RD, &sc->dropped_pkts, 2652 "Driver dropped packets"); 2653 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 2654 CTLFLAG_RD, &sc->link_irq, 2655 "Link MSI-X IRQ Handled"); 2656 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns", 2657 CTLFLAG_RD, &sc->rx_overruns, 2658 "RX overruns"); 2659 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts", 2660 CTLFLAG_RD, &sc->watchdog_events, 2661 "Watchdog timeouts"); 2662 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control", 2663 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2664 sc, IGC_CTRL, igc_sysctl_reg_handler, "IU", 2665 "Device Control Register"); 2666 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control", 2667 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2668 sc, IGC_RCTL, igc_sysctl_reg_handler, "IU", 2669 "Receiver Control Register"); 2670 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", 2671 CTLFLAG_RD, &sc->hw.fc.high_water, 0, 2672 "Flow Control High Watermark"); 2673 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", 2674 CTLFLAG_RD, &sc->hw.fc.low_water, 0, 2675 "Flow Control Low Watermark"); 2676 2677 for (int i = 0; i < sc->tx_num_queues; i++, tx_que++) { 2678 struct tx_ring *txr = &tx_que->txr; 2679 snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i); 2680 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 2681 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Name"); 2682 queue_list = SYSCTL_CHILDREN(queue_node); 2683 2684 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 2685 CTLTYPE_UINT | CTLFLAG_RD, tx_que, 2686 true, igc_sysctl_interrupt_rate_handler, "IU", 2687 "Interrupt Rate"); 2688 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 2689 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 2690 IGC_TDH(txr->me), igc_sysctl_reg_handler, "IU", 2691 "Transmit Descriptor Head"); 2692 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 2693 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 2694 IGC_TDT(txr->me), igc_sysctl_reg_handler, "IU", 2695 "Transmit Descriptor Tail"); 2696 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq", 2697 CTLFLAG_RD, &txr->tx_irq, 2698 "Queue MSI-X Transmit Interrupts"); 2699 } 2700 2701 for (int j = 0; j < sc->rx_num_queues; j++, rx_que++) { 2702 struct rx_ring *rxr = &rx_que->rxr; 2703 snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", j); 2704 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 2705 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Name"); 2706 queue_list = SYSCTL_CHILDREN(queue_node); 2707 2708 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 2709 CTLTYPE_UINT | CTLFLAG_RD, rx_que, 2710 false, igc_sysctl_interrupt_rate_handler, "IU", 2711 "Interrupt Rate"); 2712 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 2713 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 2714 IGC_RDH(rxr->me), igc_sysctl_reg_handler, "IU", 2715 "Receive Descriptor Head"); 2716 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 2717 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 2718 IGC_RDT(rxr->me), igc_sysctl_reg_handler, "IU", 2719 "Receive Descriptor Tail"); 2720 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq", 2721 CTLFLAG_RD, &rxr->rx_irq, 2722 "Queue MSI-X Receive Interrupts"); 2723 } 2724 2725 /* MAC stats get their own sub node */ 2726 2727 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 2728 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Statistics"); 2729 stat_list = SYSCTL_CHILDREN(stat_node); 2730 2731 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll", 2732 CTLFLAG_RD, &stats->ecol, 2733 "Excessive collisions"); 2734 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll", 2735 CTLFLAG_RD, &stats->scc, 2736 "Single collisions"); 2737 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll", 2738 CTLFLAG_RD, &stats->mcc, 2739 "Multiple collisions"); 2740 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll", 2741 CTLFLAG_RD, &stats->latecol, 2742 "Late collisions"); 2743 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count", 2744 CTLFLAG_RD, &stats->colc, 2745 "Collision Count"); 2746 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors", 2747 CTLFLAG_RD, &sc->stats.symerrs, 2748 "Symbol Errors"); 2749 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors", 2750 CTLFLAG_RD, &sc->stats.sec, 2751 "Sequence Errors"); 2752 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count", 2753 CTLFLAG_RD, &sc->stats.dc, 2754 "Defer Count"); 2755 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets", 2756 CTLFLAG_RD, &sc->stats.mpc, 2757 "Missed Packets"); 2758 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_length_errors", 2759 CTLFLAG_RD, &sc->stats.rlec, 2760 "Receive Length Errors"); 2761 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff", 2762 CTLFLAG_RD, &sc->stats.rnbc, 2763 "Receive No Buffers"); 2764 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize", 2765 CTLFLAG_RD, &sc->stats.ruc, 2766 "Receive Undersize"); 2767 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 2768 CTLFLAG_RD, &sc->stats.rfc, 2769 "Fragmented Packets Received "); 2770 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize", 2771 CTLFLAG_RD, &sc->stats.roc, 2772 "Oversized Packets Received"); 2773 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber", 2774 CTLFLAG_RD, &sc->stats.rjc, 2775 "Recevied Jabber"); 2776 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs", 2777 CTLFLAG_RD, &sc->stats.rxerrc, 2778 "Receive Errors"); 2779 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 2780 CTLFLAG_RD, &sc->stats.crcerrs, 2781 "CRC errors"); 2782 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs", 2783 CTLFLAG_RD, &sc->stats.algnerrc, 2784 "Alignment Errors"); 2785 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 2786 CTLFLAG_RD, &sc->stats.xonrxc, 2787 "XON Received"); 2788 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 2789 CTLFLAG_RD, &sc->stats.xontxc, 2790 "XON Transmitted"); 2791 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 2792 CTLFLAG_RD, &sc->stats.xoffrxc, 2793 "XOFF Received"); 2794 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 2795 CTLFLAG_RD, &sc->stats.xofftxc, 2796 "XOFF Transmitted"); 2797 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "unsupported_fc_recvd", 2798 CTLFLAG_RD, &sc->stats.fcruc, 2799 "Unsupported Flow Control Received"); 2800 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_recvd", 2801 CTLFLAG_RD, &sc->stats.mgprc, 2802 "Management Packets Received"); 2803 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_drop", 2804 CTLFLAG_RD, &sc->stats.mgpdc, 2805 "Management Packets Dropped"); 2806 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_txd", 2807 CTLFLAG_RD, &sc->stats.mgptc, 2808 "Management Packets Transmitted"); 2809 2810 /* Packet Reception Stats */ 2811 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd", 2812 CTLFLAG_RD, &sc->stats.tpr, 2813 "Total Packets Received "); 2814 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", 2815 CTLFLAG_RD, &sc->stats.gprc, 2816 "Good Packets Received"); 2817 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd", 2818 CTLFLAG_RD, &sc->stats.bprc, 2819 "Broadcast Packets Received"); 2820 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", 2821 CTLFLAG_RD, &sc->stats.mprc, 2822 "Multicast Packets Received"); 2823 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 2824 CTLFLAG_RD, &sc->stats.prc64, 2825 "64 byte frames received "); 2826 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 2827 CTLFLAG_RD, &sc->stats.prc127, 2828 "65-127 byte frames received"); 2829 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 2830 CTLFLAG_RD, &sc->stats.prc255, 2831 "128-255 byte frames received"); 2832 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 2833 CTLFLAG_RD, &sc->stats.prc511, 2834 "256-511 byte frames received"); 2835 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 2836 CTLFLAG_RD, &sc->stats.prc1023, 2837 "512-1023 byte frames received"); 2838 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 2839 CTLFLAG_RD, &sc->stats.prc1522, 2840 "1023-1522 byte frames received"); 2841 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", 2842 CTLFLAG_RD, &sc->stats.gorc, 2843 "Good Octets Received"); 2844 2845 /* Packet Transmission Stats */ 2846 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 2847 CTLFLAG_RD, &sc->stats.gotc, 2848 "Good Octets Transmitted"); 2849 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 2850 CTLFLAG_RD, &sc->stats.tpt, 2851 "Total Packets Transmitted"); 2852 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 2853 CTLFLAG_RD, &sc->stats.gptc, 2854 "Good Packets Transmitted"); 2855 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 2856 CTLFLAG_RD, &sc->stats.bptc, 2857 "Broadcast Packets Transmitted"); 2858 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 2859 CTLFLAG_RD, &sc->stats.mptc, 2860 "Multicast Packets Transmitted"); 2861 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 2862 CTLFLAG_RD, &sc->stats.ptc64, 2863 "64 byte frames transmitted "); 2864 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 2865 CTLFLAG_RD, &sc->stats.ptc127, 2866 "65-127 byte frames transmitted"); 2867 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 2868 CTLFLAG_RD, &sc->stats.ptc255, 2869 "128-255 byte frames transmitted"); 2870 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 2871 CTLFLAG_RD, &sc->stats.ptc511, 2872 "256-511 byte frames transmitted"); 2873 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 2874 CTLFLAG_RD, &sc->stats.ptc1023, 2875 "512-1023 byte frames transmitted"); 2876 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 2877 CTLFLAG_RD, &sc->stats.ptc1522, 2878 "1024-1522 byte frames transmitted"); 2879 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd", 2880 CTLFLAG_RD, &sc->stats.tsctc, 2881 "TSO Contexts Transmitted"); 2882 2883 /* Interrupt Stats */ 2884 2885 int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", 2886 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Interrupt Statistics"); 2887 int_list = SYSCTL_CHILDREN(int_node); 2888 2889 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts", 2890 CTLFLAG_RD, &sc->stats.iac, 2891 "Interrupt Assertion Count"); 2892 2893 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh", 2894 CTLFLAG_RD, &sc->stats.rxdmtc, 2895 "Rx Desc Min Thresh Count"); 2896 } 2897 2898 static void 2899 igc_fw_version(struct igc_softc *sc) 2900 { 2901 struct igc_hw *hw = &sc->hw; 2902 struct igc_fw_version *fw_ver = &sc->fw_ver; 2903 2904 *fw_ver = (struct igc_fw_version){0}; 2905 2906 igc_get_fw_version(hw, fw_ver); 2907 } 2908 2909 static void 2910 igc_sbuf_fw_version(struct igc_fw_version *fw_ver, struct sbuf *buf) 2911 { 2912 const char *space = ""; 2913 2914 if (fw_ver->eep_major || fw_ver->eep_minor || fw_ver->eep_build) { 2915 sbuf_printf(buf, "EEPROM V%d.%d-%d", fw_ver->eep_major, 2916 fw_ver->eep_minor, fw_ver->eep_build); 2917 space = " "; 2918 } 2919 2920 if (fw_ver->invm_major || fw_ver->invm_minor || fw_ver->invm_img_type) { 2921 sbuf_printf(buf, "%sNVM V%d.%d imgtype%d", 2922 space, fw_ver->invm_major, fw_ver->invm_minor, 2923 fw_ver->invm_img_type); 2924 space = " "; 2925 } 2926 2927 if (fw_ver->or_valid) { 2928 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d", 2929 space, fw_ver->or_major, fw_ver->or_build, 2930 fw_ver->or_patch); 2931 space = " "; 2932 } 2933 2934 if (fw_ver->etrack_id) 2935 sbuf_printf(buf, "%seTrack 0x%08x", space, fw_ver->etrack_id); 2936 } 2937 2938 static void 2939 igc_print_fw_version(struct igc_softc *sc ) 2940 { 2941 device_t dev = sc->dev; 2942 struct sbuf *buf; 2943 int error = 0; 2944 2945 buf = sbuf_new_auto(); 2946 if (!buf) { 2947 device_printf(dev, "Could not allocate sbuf for output.\n"); 2948 return; 2949 } 2950 2951 igc_sbuf_fw_version(&sc->fw_ver, buf); 2952 2953 error = sbuf_finish(buf); 2954 if (error) 2955 device_printf(dev, "Error finishing sbuf: %d\n", error); 2956 else if (sbuf_len(buf)) 2957 device_printf(dev, "%s\n", sbuf_data(buf)); 2958 2959 sbuf_delete(buf); 2960 } 2961 2962 static int 2963 igc_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS) 2964 { 2965 struct igc_softc *sc = (struct igc_softc *)arg1; 2966 device_t dev = sc->dev; 2967 struct sbuf *buf; 2968 int error = 0; 2969 2970 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2971 if (!buf) { 2972 device_printf(dev, "Could not allocate sbuf for output.\n"); 2973 return (ENOMEM); 2974 } 2975 2976 igc_sbuf_fw_version(&sc->fw_ver, buf); 2977 2978 error = sbuf_finish(buf); 2979 if (error) 2980 device_printf(dev, "Error finishing sbuf: %d\n", error); 2981 2982 sbuf_delete(buf); 2983 2984 return (0); 2985 } 2986 2987 /********************************************************************** 2988 * 2989 * This routine provides a way to dump out the adapter eeprom, 2990 * often a useful debug/service tool. This only dumps the first 2991 * 32 words, stuff that matters is in that extent. 2992 * 2993 **********************************************************************/ 2994 static int 2995 igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS) 2996 { 2997 struct igc_softc *sc = (struct igc_softc *)arg1; 2998 int error; 2999 int result; 3000 3001 result = -1; 3002 error = sysctl_handle_int(oidp, &result, 0, req); 3003 3004 if (error || !req->newptr) 3005 return (error); 3006 3007 /* 3008 * This value will cause a hex dump of the 3009 * first 32 16-bit words of the EEPROM to 3010 * the screen. 3011 */ 3012 if (result == 1) 3013 igc_print_nvm_info(sc); 3014 3015 return (error); 3016 } 3017 3018 static void 3019 igc_print_nvm_info(struct igc_softc *sc) 3020 { 3021 u16 eeprom_data; 3022 int i, j, row = 0; 3023 3024 /* Its a bit crude, but it gets the job done */ 3025 printf("\nInterface EEPROM Dump:\n"); 3026 printf("Offset\n0x0000 "); 3027 for (i = 0, j = 0; i < 32; i++, j++) { 3028 if (j == 8) { /* Make the offset block */ 3029 j = 0; ++row; 3030 printf("\n0x00%x0 ",row); 3031 } 3032 igc_read_nvm(&sc->hw, i, 1, &eeprom_data); 3033 printf("%04x ", eeprom_data); 3034 } 3035 printf("\n"); 3036 } 3037 3038 static int 3039 igc_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS) 3040 { 3041 struct igc_softc *sc; 3042 u32 reg, val, shift; 3043 int error, mask; 3044 3045 sc = oidp->oid_arg1; 3046 switch (oidp->oid_arg2) { 3047 case 0: 3048 reg = IGC_DTXTCPFLGL; 3049 shift = 0; 3050 break; 3051 case 1: 3052 reg = IGC_DTXTCPFLGL; 3053 shift = 16; 3054 break; 3055 case 2: 3056 reg = IGC_DTXTCPFLGH; 3057 shift = 0; 3058 break; 3059 default: 3060 return (EINVAL); 3061 break; 3062 } 3063 val = IGC_READ_REG(&sc->hw, reg); 3064 mask = (val >> shift) & 0xfff; 3065 error = sysctl_handle_int(oidp, &mask, 0, req); 3066 if (error != 0 || req->newptr == NULL) 3067 return (error); 3068 if (mask < 0 || mask > 0xfff) 3069 return (EINVAL); 3070 val = (val & ~(0xfff << shift)) | (mask << shift); 3071 IGC_WRITE_REG(&sc->hw, reg, val); 3072 return (0); 3073 } 3074 3075 /* 3076 * Set flow control using sysctl: 3077 * Flow control values: 3078 * 0 - off 3079 * 1 - rx pause 3080 * 2 - tx pause 3081 * 3 - full 3082 */ 3083 static int 3084 igc_set_flowcntl(SYSCTL_HANDLER_ARGS) 3085 { 3086 int error; 3087 static int input = 3; /* default is full */ 3088 struct igc_softc *sc = (struct igc_softc *) arg1; 3089 3090 error = sysctl_handle_int(oidp, &input, 0, req); 3091 3092 if ((error) || (req->newptr == NULL)) 3093 return (error); 3094 3095 if (input == sc->fc) /* no change? */ 3096 return (error); 3097 3098 switch (input) { 3099 case igc_fc_rx_pause: 3100 case igc_fc_tx_pause: 3101 case igc_fc_full: 3102 case igc_fc_none: 3103 sc->hw.fc.requested_mode = input; 3104 sc->fc = input; 3105 break; 3106 default: 3107 /* Do nothing */ 3108 return (error); 3109 } 3110 3111 sc->hw.fc.current_mode = sc->hw.fc.requested_mode; 3112 igc_force_mac_fc(&sc->hw); 3113 return (error); 3114 } 3115 3116 /* 3117 * Manage DMA Coalesce: 3118 * Control values: 3119 * 0/1 - off/on 3120 * Legal timer values are: 3121 * 250,500,1000-10000 in thousands 3122 */ 3123 static int 3124 igc_sysctl_dmac(SYSCTL_HANDLER_ARGS) 3125 { 3126 struct igc_softc *sc = (struct igc_softc *) arg1; 3127 int error; 3128 3129 error = sysctl_handle_int(oidp, &sc->dmac, 0, req); 3130 3131 if ((error) || (req->newptr == NULL)) 3132 return (error); 3133 3134 switch (sc->dmac) { 3135 case 0: 3136 /* Disabling */ 3137 break; 3138 case 1: /* Just enable and use default */ 3139 sc->dmac = 1000; 3140 break; 3141 case 250: 3142 case 500: 3143 case 1000: 3144 case 2000: 3145 case 3000: 3146 case 4000: 3147 case 5000: 3148 case 6000: 3149 case 7000: 3150 case 8000: 3151 case 9000: 3152 case 10000: 3153 /* Legal values - allow */ 3154 break; 3155 default: 3156 /* Do nothing, illegal value */ 3157 sc->dmac = 0; 3158 return (EINVAL); 3159 } 3160 /* Reinit the interface */ 3161 igc_if_init(sc->ctx); 3162 return (error); 3163 } 3164 3165 /* 3166 * Manage Energy Efficient Ethernet: 3167 * Control values: 3168 * 0/1 - enabled/disabled 3169 */ 3170 static int 3171 igc_sysctl_eee(SYSCTL_HANDLER_ARGS) 3172 { 3173 struct igc_softc *sc = (struct igc_softc *) arg1; 3174 int error, value; 3175 3176 value = sc->hw.dev_spec._i225.eee_disable; 3177 error = sysctl_handle_int(oidp, &value, 0, req); 3178 if (error || req->newptr == NULL) 3179 return (error); 3180 3181 sc->hw.dev_spec._i225.eee_disable = (value != 0); 3182 igc_if_init(sc->ctx); 3183 3184 return (0); 3185 } 3186 3187 static int 3188 igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3189 { 3190 struct igc_softc *sc; 3191 int error; 3192 int result; 3193 3194 result = -1; 3195 error = sysctl_handle_int(oidp, &result, 0, req); 3196 3197 if (error || !req->newptr) 3198 return (error); 3199 3200 if (result == 1) { 3201 sc = (struct igc_softc *) arg1; 3202 igc_print_debug_info(sc); 3203 } 3204 3205 return (error); 3206 } 3207 3208 static int 3209 igc_get_rs(SYSCTL_HANDLER_ARGS) 3210 { 3211 struct igc_softc *sc = (struct igc_softc *) arg1; 3212 int error; 3213 int result; 3214 3215 result = 0; 3216 error = sysctl_handle_int(oidp, &result, 0, req); 3217 3218 if (error || !req->newptr || result != 1) 3219 return (error); 3220 igc_dump_rs(sc); 3221 3222 return (error); 3223 } 3224 3225 static void 3226 igc_if_debug(if_ctx_t ctx) 3227 { 3228 igc_dump_rs(iflib_get_softc(ctx)); 3229 } 3230 3231 /* 3232 * This routine is meant to be fluid, add whatever is 3233 * needed for debugging a problem. -jfv 3234 */ 3235 static void 3236 igc_print_debug_info(struct igc_softc *sc) 3237 { 3238 device_t dev = iflib_get_dev(sc->ctx); 3239 if_t ifp = iflib_get_ifp(sc->ctx); 3240 struct tx_ring *txr = &sc->tx_queues->txr; 3241 struct rx_ring *rxr = &sc->rx_queues->rxr; 3242 3243 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 3244 printf("Interface is RUNNING "); 3245 else 3246 printf("Interface is NOT RUNNING\n"); 3247 3248 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) 3249 printf("and INACTIVE\n"); 3250 else 3251 printf("and ACTIVE\n"); 3252 3253 for (int i = 0; i < sc->tx_num_queues; i++, txr++) { 3254 device_printf(dev, "TX Queue %d ------\n", i); 3255 device_printf(dev, "hw tdh = %d, hw tdt = %d\n", 3256 IGC_READ_REG(&sc->hw, IGC_TDH(i)), 3257 IGC_READ_REG(&sc->hw, IGC_TDT(i))); 3258 3259 } 3260 for (int j=0; j < sc->rx_num_queues; j++, rxr++) { 3261 device_printf(dev, "RX Queue %d ------\n", j); 3262 device_printf(dev, "hw rdh = %d, hw rdt = %d\n", 3263 IGC_READ_REG(&sc->hw, IGC_RDH(j)), 3264 IGC_READ_REG(&sc->hw, IGC_RDT(j))); 3265 } 3266 } 3267