1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001-2024, Intel Corporation 5 * Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org> 6 * Copyright (c) 2021-2024 Rubicon Communications, LLC (Netgate) 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 #include "if_igc.h" 32 #include <sys/sbuf.h> 33 #include <machine/_inttypes.h> 34 35 #ifdef RSS 36 #include <net/rss_config.h> 37 #include <netinet/in_rss.h> 38 #endif 39 40 /********************************************************************* 41 * PCI Device ID Table 42 * 43 * Used by probe to select devices to load on 44 * Last entry must be all 0s 45 * 46 * { Vendor ID, Device ID, String } 47 *********************************************************************/ 48 49 static const pci_vendor_info_t igc_vendor_info_array[] = 50 { 51 /* Intel(R) PRO/1000 Network Connection - igc */ 52 PVID(0x8086, IGC_DEV_ID_I225_LM, "Intel(R) Ethernet Controller I225-LM"), 53 PVID(0x8086, IGC_DEV_ID_I225_V, "Intel(R) Ethernet Controller I225-V"), 54 PVID(0x8086, IGC_DEV_ID_I225_K, "Intel(R) Ethernet Controller I225-K"), 55 PVID(0x8086, IGC_DEV_ID_I225_I, "Intel(R) Ethernet Controller I225-I"), 56 PVID(0x8086, IGC_DEV_ID_I220_V, "Intel(R) Ethernet Controller I220-V"), 57 PVID(0x8086, IGC_DEV_ID_I225_K2, "Intel(R) Ethernet Controller I225-K(2)"), 58 PVID(0x8086, IGC_DEV_ID_I225_LMVP, "Intel(R) Ethernet Controller I225-LMvP(2)"), 59 PVID(0x8086, IGC_DEV_ID_I226_K, "Intel(R) Ethernet Controller I226-K"), 60 PVID(0x8086, IGC_DEV_ID_I226_LMVP, "Intel(R) Ethernet Controller I226-LMvP"), 61 PVID(0x8086, IGC_DEV_ID_I225_IT, "Intel(R) Ethernet Controller I225-IT(2)"), 62 PVID(0x8086, IGC_DEV_ID_I226_LM, "Intel(R) Ethernet Controller I226-LM"), 63 PVID(0x8086, IGC_DEV_ID_I226_V, "Intel(R) Ethernet Controller I226-V"), 64 PVID(0x8086, IGC_DEV_ID_I226_IT, "Intel(R) Ethernet Controller I226-IT"), 65 PVID(0x8086, IGC_DEV_ID_I221_V, "Intel(R) Ethernet Controller I221-V"), 66 PVID(0x8086, IGC_DEV_ID_I226_BLANK_NVM, "Intel(R) Ethernet Controller I226(blankNVM)"), 67 PVID(0x8086, IGC_DEV_ID_I225_BLANK_NVM, "Intel(R) Ethernet Controller I225(blankNVM)"), 68 /* required last entry */ 69 PVID_END 70 }; 71 72 /********************************************************************* 73 * Function prototypes 74 *********************************************************************/ 75 static void *igc_register(device_t dev); 76 static int igc_if_attach_pre(if_ctx_t ctx); 77 static int igc_if_attach_post(if_ctx_t ctx); 78 static int igc_if_detach(if_ctx_t ctx); 79 static int igc_if_shutdown(if_ctx_t ctx); 80 static int igc_if_suspend(if_ctx_t ctx); 81 static int igc_if_resume(if_ctx_t ctx); 82 83 static int igc_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); 84 static int igc_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets); 85 static void igc_if_queues_free(if_ctx_t ctx); 86 87 static uint64_t igc_if_get_counter(if_ctx_t, ift_counter); 88 static void igc_if_init(if_ctx_t ctx); 89 static void igc_if_stop(if_ctx_t ctx); 90 static void igc_if_media_status(if_ctx_t, struct ifmediareq *); 91 static int igc_if_media_change(if_ctx_t ctx); 92 static int igc_if_mtu_set(if_ctx_t ctx, uint32_t mtu); 93 static void igc_if_timer(if_ctx_t ctx, uint16_t qid); 94 static void igc_if_watchdog_reset(if_ctx_t ctx); 95 static bool igc_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event); 96 97 static void igc_identify_hardware(if_ctx_t ctx); 98 static int igc_allocate_pci_resources(if_ctx_t ctx); 99 static void igc_free_pci_resources(if_ctx_t ctx); 100 static void igc_reset(if_ctx_t ctx); 101 static int igc_setup_interface(if_ctx_t ctx); 102 static int igc_setup_msix(if_ctx_t ctx); 103 104 static void igc_initialize_transmit_unit(if_ctx_t ctx); 105 static void igc_initialize_receive_unit(if_ctx_t ctx); 106 107 static void igc_if_intr_enable(if_ctx_t ctx); 108 static void igc_if_intr_disable(if_ctx_t ctx); 109 static int igc_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); 110 static int igc_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); 111 static void igc_if_multi_set(if_ctx_t ctx); 112 static void igc_if_update_admin_status(if_ctx_t ctx); 113 static void igc_if_debug(if_ctx_t ctx); 114 static void igc_update_stats_counters(struct igc_adapter *); 115 static void igc_add_hw_stats(struct igc_adapter *adapter); 116 static int igc_if_set_promisc(if_ctx_t ctx, int flags); 117 static void igc_setup_vlan_hw_support(if_ctx_t ctx); 118 static void igc_fw_version(struct igc_adapter *); 119 static void igc_sbuf_fw_version(struct igc_fw_version *, struct sbuf *); 120 static void igc_print_fw_version(struct igc_adapter *); 121 static int igc_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS); 122 static int igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS); 123 static void igc_print_nvm_info(struct igc_adapter *); 124 static int igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 125 static int igc_get_rs(SYSCTL_HANDLER_ARGS); 126 static void igc_print_debug_info(struct igc_adapter *); 127 static int igc_is_valid_ether_addr(u8 *); 128 static void igc_neweitr(struct igc_adapter *, struct igc_rx_queue *, 129 struct tx_ring *, struct rx_ring *); 130 /* Management and WOL Support */ 131 static void igc_get_hw_control(struct igc_adapter *); 132 static void igc_release_hw_control(struct igc_adapter *); 133 static void igc_get_wakeup(if_ctx_t ctx); 134 static void igc_enable_wakeup(if_ctx_t ctx); 135 136 int igc_intr(void *arg); 137 138 /* MSI-X handlers */ 139 static int igc_if_msix_intr_assign(if_ctx_t, int); 140 static int igc_msix_link(void *); 141 static void igc_handle_link(void *context); 142 143 static int igc_set_flowcntl(SYSCTL_HANDLER_ARGS); 144 static int igc_sysctl_eee(SYSCTL_HANDLER_ARGS); 145 146 static int igc_get_regs(SYSCTL_HANDLER_ARGS); 147 148 static void igc_configure_queues(struct igc_adapter *adapter); 149 150 151 /********************************************************************* 152 * FreeBSD Device Interface Entry Points 153 *********************************************************************/ 154 static device_method_t igc_methods[] = { 155 /* Device interface */ 156 DEVMETHOD(device_register, igc_register), 157 DEVMETHOD(device_probe, iflib_device_probe), 158 DEVMETHOD(device_attach, iflib_device_attach), 159 DEVMETHOD(device_detach, iflib_device_detach), 160 DEVMETHOD(device_shutdown, iflib_device_shutdown), 161 DEVMETHOD(device_suspend, iflib_device_suspend), 162 DEVMETHOD(device_resume, iflib_device_resume), 163 DEVMETHOD_END 164 }; 165 166 static driver_t igc_driver = { 167 "igc", igc_methods, sizeof(struct igc_adapter), 168 }; 169 170 DRIVER_MODULE(igc, pci, igc_driver, 0, 0); 171 172 MODULE_DEPEND(igc, pci, 1, 1, 1); 173 MODULE_DEPEND(igc, ether, 1, 1, 1); 174 MODULE_DEPEND(igc, iflib, 1, 1, 1); 175 176 IFLIB_PNP_INFO(pci, igc, igc_vendor_info_array); 177 178 static device_method_t igc_if_methods[] = { 179 DEVMETHOD(ifdi_attach_pre, igc_if_attach_pre), 180 DEVMETHOD(ifdi_attach_post, igc_if_attach_post), 181 DEVMETHOD(ifdi_detach, igc_if_detach), 182 DEVMETHOD(ifdi_shutdown, igc_if_shutdown), 183 DEVMETHOD(ifdi_suspend, igc_if_suspend), 184 DEVMETHOD(ifdi_resume, igc_if_resume), 185 DEVMETHOD(ifdi_init, igc_if_init), 186 DEVMETHOD(ifdi_stop, igc_if_stop), 187 DEVMETHOD(ifdi_msix_intr_assign, igc_if_msix_intr_assign), 188 DEVMETHOD(ifdi_intr_enable, igc_if_intr_enable), 189 DEVMETHOD(ifdi_intr_disable, igc_if_intr_disable), 190 DEVMETHOD(ifdi_tx_queues_alloc, igc_if_tx_queues_alloc), 191 DEVMETHOD(ifdi_rx_queues_alloc, igc_if_rx_queues_alloc), 192 DEVMETHOD(ifdi_queues_free, igc_if_queues_free), 193 DEVMETHOD(ifdi_update_admin_status, igc_if_update_admin_status), 194 DEVMETHOD(ifdi_multi_set, igc_if_multi_set), 195 DEVMETHOD(ifdi_media_status, igc_if_media_status), 196 DEVMETHOD(ifdi_media_change, igc_if_media_change), 197 DEVMETHOD(ifdi_mtu_set, igc_if_mtu_set), 198 DEVMETHOD(ifdi_promisc_set, igc_if_set_promisc), 199 DEVMETHOD(ifdi_timer, igc_if_timer), 200 DEVMETHOD(ifdi_watchdog_reset, igc_if_watchdog_reset), 201 DEVMETHOD(ifdi_get_counter, igc_if_get_counter), 202 DEVMETHOD(ifdi_rx_queue_intr_enable, igc_if_rx_queue_intr_enable), 203 DEVMETHOD(ifdi_tx_queue_intr_enable, igc_if_tx_queue_intr_enable), 204 DEVMETHOD(ifdi_debug, igc_if_debug), 205 DEVMETHOD(ifdi_needs_restart, igc_if_needs_restart), 206 DEVMETHOD_END 207 }; 208 209 static driver_t igc_if_driver = { 210 "igc_if", igc_if_methods, sizeof(struct igc_adapter) 211 }; 212 213 /********************************************************************* 214 * Tunable default values. 215 *********************************************************************/ 216 217 /* Allow common code without TSO */ 218 #ifndef CSUM_TSO 219 #define CSUM_TSO 0 220 #endif 221 222 static SYSCTL_NODE(_hw, OID_AUTO, igc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 223 "igc driver parameters"); 224 225 static int igc_disable_crc_stripping = 0; 226 SYSCTL_INT(_hw_igc, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN, 227 &igc_disable_crc_stripping, 0, "Disable CRC Stripping"); 228 229 static int igc_smart_pwr_down = false; 230 SYSCTL_INT(_hw_igc, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &igc_smart_pwr_down, 231 0, "Set to true to leave smart power down enabled on newer adapters"); 232 233 /* Controls whether promiscuous also shows bad packets */ 234 static int igc_debug_sbp = true; 235 SYSCTL_INT(_hw_igc, OID_AUTO, sbp, CTLFLAG_RDTUN, &igc_debug_sbp, 0, 236 "Show bad packets in promiscuous mode"); 237 238 /* Energy efficient ethernet - default to OFF */ 239 static int igc_eee_setting = 1; 240 SYSCTL_INT(_hw_igc, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &igc_eee_setting, 0, 241 "Enable Energy Efficient Ethernet"); 242 243 /* 244 * AIM: Adaptive Interrupt Moderation 245 * which means that the interrupt rate is varied over time based on the 246 * traffic for that interrupt vector 247 */ 248 static int igc_enable_aim = 1; 249 SYSCTL_INT(_hw_igc, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &igc_enable_aim, 250 0, "Enable adaptive interrupt moderation (1=normal, 2=lowlatency)"); 251 252 /* 253 ** Tuneable Interrupt rate 254 */ 255 static int igc_max_interrupt_rate = IGC_INTS_DEFAULT; 256 SYSCTL_INT(_hw_igc, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 257 &igc_max_interrupt_rate, 0, "Maximum interrupts per second"); 258 259 extern struct if_txrx igc_txrx; 260 261 static struct if_shared_ctx igc_sctx_init = { 262 .isc_magic = IFLIB_MAGIC, 263 .isc_q_align = PAGE_SIZE, 264 .isc_tx_maxsize = IGC_TSO_SIZE + sizeof(struct ether_vlan_header), 265 .isc_tx_maxsegsize = PAGE_SIZE, 266 .isc_tso_maxsize = IGC_TSO_SIZE + sizeof(struct ether_vlan_header), 267 .isc_tso_maxsegsize = IGC_TSO_SEG_SIZE, 268 .isc_rx_maxsize = MAX_JUMBO_FRAME_SIZE, 269 .isc_rx_nsegments = 1, 270 .isc_rx_maxsegsize = MJUM9BYTES, 271 .isc_nfl = 1, 272 .isc_nrxqs = 1, 273 .isc_ntxqs = 1, 274 .isc_admin_intrcnt = 1, 275 .isc_vendor_info = igc_vendor_info_array, 276 .isc_driver_version = "1", 277 .isc_driver = &igc_if_driver, 278 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM, 279 280 .isc_nrxd_min = {IGC_MIN_RXD}, 281 .isc_ntxd_min = {IGC_MIN_TXD}, 282 .isc_nrxd_max = {IGC_MAX_RXD}, 283 .isc_ntxd_max = {IGC_MAX_TXD}, 284 .isc_nrxd_default = {IGC_DEFAULT_RXD}, 285 .isc_ntxd_default = {IGC_DEFAULT_TXD}, 286 }; 287 288 /***************************************************************** 289 * 290 * Dump Registers 291 * 292 ****************************************************************/ 293 #define IGC_REGS_LEN 739 294 295 static int igc_get_regs(SYSCTL_HANDLER_ARGS) 296 { 297 struct igc_adapter *adapter = (struct igc_adapter *)arg1; 298 struct igc_hw *hw = &adapter->hw; 299 struct sbuf *sb; 300 u32 *regs_buff; 301 int rc; 302 303 regs_buff = malloc(sizeof(u32) * IGC_REGS_LEN, M_DEVBUF, M_WAITOK); 304 memset(regs_buff, 0, IGC_REGS_LEN * sizeof(u32)); 305 306 rc = sysctl_wire_old_buffer(req, 0); 307 MPASS(rc == 0); 308 if (rc != 0) { 309 free(regs_buff, M_DEVBUF); 310 return (rc); 311 } 312 313 sb = sbuf_new_for_sysctl(NULL, NULL, 32*400, req); 314 MPASS(sb != NULL); 315 if (sb == NULL) { 316 free(regs_buff, M_DEVBUF); 317 return (ENOMEM); 318 } 319 320 /* General Registers */ 321 regs_buff[0] = IGC_READ_REG(hw, IGC_CTRL); 322 regs_buff[1] = IGC_READ_REG(hw, IGC_STATUS); 323 regs_buff[2] = IGC_READ_REG(hw, IGC_CTRL_EXT); 324 regs_buff[3] = IGC_READ_REG(hw, IGC_ICR); 325 regs_buff[4] = IGC_READ_REG(hw, IGC_RCTL); 326 regs_buff[5] = IGC_READ_REG(hw, IGC_RDLEN(0)); 327 regs_buff[6] = IGC_READ_REG(hw, IGC_RDH(0)); 328 regs_buff[7] = IGC_READ_REG(hw, IGC_RDT(0)); 329 regs_buff[8] = IGC_READ_REG(hw, IGC_RXDCTL(0)); 330 regs_buff[9] = IGC_READ_REG(hw, IGC_RDBAL(0)); 331 regs_buff[10] = IGC_READ_REG(hw, IGC_RDBAH(0)); 332 regs_buff[11] = IGC_READ_REG(hw, IGC_TCTL); 333 regs_buff[12] = IGC_READ_REG(hw, IGC_TDBAL(0)); 334 regs_buff[13] = IGC_READ_REG(hw, IGC_TDBAH(0)); 335 regs_buff[14] = IGC_READ_REG(hw, IGC_TDLEN(0)); 336 regs_buff[15] = IGC_READ_REG(hw, IGC_TDH(0)); 337 regs_buff[16] = IGC_READ_REG(hw, IGC_TDT(0)); 338 regs_buff[17] = IGC_READ_REG(hw, IGC_TXDCTL(0)); 339 340 sbuf_printf(sb, "General Registers\n"); 341 sbuf_printf(sb, "\tCTRL\t %08x\n", regs_buff[0]); 342 sbuf_printf(sb, "\tSTATUS\t %08x\n", regs_buff[1]); 343 sbuf_printf(sb, "\tCTRL_EXIT\t %08x\n\n", regs_buff[2]); 344 345 sbuf_printf(sb, "Interrupt Registers\n"); 346 sbuf_printf(sb, "\tICR\t %08x\n\n", regs_buff[3]); 347 348 sbuf_printf(sb, "RX Registers\n"); 349 sbuf_printf(sb, "\tRCTL\t %08x\n", regs_buff[4]); 350 sbuf_printf(sb, "\tRDLEN\t %08x\n", regs_buff[5]); 351 sbuf_printf(sb, "\tRDH\t %08x\n", regs_buff[6]); 352 sbuf_printf(sb, "\tRDT\t %08x\n", regs_buff[7]); 353 sbuf_printf(sb, "\tRXDCTL\t %08x\n", regs_buff[8]); 354 sbuf_printf(sb, "\tRDBAL\t %08x\n", regs_buff[9]); 355 sbuf_printf(sb, "\tRDBAH\t %08x\n\n", regs_buff[10]); 356 357 sbuf_printf(sb, "TX Registers\n"); 358 sbuf_printf(sb, "\tTCTL\t %08x\n", regs_buff[11]); 359 sbuf_printf(sb, "\tTDBAL\t %08x\n", regs_buff[12]); 360 sbuf_printf(sb, "\tTDBAH\t %08x\n", regs_buff[13]); 361 sbuf_printf(sb, "\tTDLEN\t %08x\n", regs_buff[14]); 362 sbuf_printf(sb, "\tTDH\t %08x\n", regs_buff[15]); 363 sbuf_printf(sb, "\tTDT\t %08x\n", regs_buff[16]); 364 sbuf_printf(sb, "\tTXDCTL\t %08x\n", regs_buff[17]); 365 sbuf_printf(sb, "\tTDFH\t %08x\n", regs_buff[18]); 366 sbuf_printf(sb, "\tTDFT\t %08x\n", regs_buff[19]); 367 sbuf_printf(sb, "\tTDFHS\t %08x\n", regs_buff[20]); 368 sbuf_printf(sb, "\tTDFPC\t %08x\n\n", regs_buff[21]); 369 370 free(regs_buff, M_DEVBUF); 371 372 #ifdef DUMP_DESCS 373 { 374 if_softc_ctx_t scctx = adapter->shared; 375 struct rx_ring *rxr = &rx_que->rxr; 376 struct tx_ring *txr = &tx_que->txr; 377 int ntxd = scctx->isc_ntxd[0]; 378 int nrxd = scctx->isc_nrxd[0]; 379 int j; 380 381 for (j = 0; j < nrxd; j++) { 382 u32 staterr = le32toh(rxr->rx_base[j].wb.upper.status_error); 383 u32 length = le32toh(rxr->rx_base[j].wb.upper.length); 384 sbuf_printf(sb, "\tReceive Descriptor Address %d: %08" PRIx64 " Error:%d Length:%d\n", j, rxr->rx_base[j].read.buffer_addr, staterr, length); 385 } 386 387 for (j = 0; j < min(ntxd, 256); j++) { 388 unsigned int *ptr = (unsigned int *)&txr->tx_base[j]; 389 390 sbuf_printf(sb, "\tTXD[%03d] [0]: %08x [1]: %08x [2]: %08x [3]: %08x eop: %d DD=%d\n", 391 j, ptr[0], ptr[1], ptr[2], ptr[3], buf->eop, 392 buf->eop != -1 ? txr->tx_base[buf->eop].upper.fields.status & IGC_TXD_STAT_DD : 0); 393 394 } 395 } 396 #endif 397 398 rc = sbuf_finish(sb); 399 sbuf_delete(sb); 400 return(rc); 401 } 402 403 static void * 404 igc_register(device_t dev) 405 { 406 return (&igc_sctx_init); 407 } 408 409 static int 410 igc_set_num_queues(if_ctx_t ctx) 411 { 412 int maxqueues; 413 414 maxqueues = 4; 415 416 return (maxqueues); 417 } 418 419 #define IGC_CAPS \ 420 IFCAP_HWCSUM | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | \ 421 IFCAP_VLAN_HWCSUM | IFCAP_WOL | IFCAP_TSO4 | IFCAP_LRO | \ 422 IFCAP_VLAN_HWTSO | IFCAP_JUMBO_MTU | IFCAP_HWCSUM_IPV6 | IFCAP_TSO6 423 424 /********************************************************************* 425 * Device initialization routine 426 * 427 * The attach entry point is called when the driver is being loaded. 428 * This routine identifies the type of hardware, allocates all resources 429 * and initializes the hardware. 430 * 431 * return 0 on success, positive on failure 432 *********************************************************************/ 433 static int 434 igc_if_attach_pre(if_ctx_t ctx) 435 { 436 struct igc_adapter *adapter; 437 if_softc_ctx_t scctx; 438 device_t dev; 439 struct igc_hw *hw; 440 int error = 0; 441 442 INIT_DEBUGOUT("igc_if_attach_pre: begin"); 443 dev = iflib_get_dev(ctx); 444 adapter = iflib_get_softc(ctx); 445 446 adapter->ctx = adapter->osdep.ctx = ctx; 447 adapter->dev = adapter->osdep.dev = dev; 448 scctx = adapter->shared = iflib_get_softc_ctx(ctx); 449 adapter->media = iflib_get_media(ctx); 450 hw = &adapter->hw; 451 452 /* SYSCTL stuff */ 453 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 454 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 455 OID_AUTO, "nvm", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 456 adapter, 0, igc_sysctl_nvm_info, "I", "NVM Information"); 457 458 adapter->enable_aim = igc_enable_aim; 459 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 460 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 461 OID_AUTO, "enable_aim", CTLFLAG_RW, 462 &adapter->enable_aim, 0, 463 "Interrupt Moderation (1=normal, 2=lowlatency)"); 464 465 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 466 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 467 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, 468 adapter, 0, igc_sysctl_print_fw_version, "A", 469 "Prints FW/NVM Versions"); 470 471 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 472 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 473 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 474 adapter, 0, igc_sysctl_debug_info, "I", "Debug Information"); 475 476 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 477 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 478 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 479 adapter, 0, igc_set_flowcntl, "I", "Flow Control"); 480 481 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 482 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 483 OID_AUTO, "reg_dump", 484 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0, 485 igc_get_regs, "A", "Dump Registers"); 486 487 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 488 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 489 OID_AUTO, "rs_dump", 490 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, 491 igc_get_rs, "I", "Dump RS indexes"); 492 493 /* Determine hardware and mac info */ 494 igc_identify_hardware(ctx); 495 496 scctx->isc_tx_nsegments = IGC_MAX_SCATTER; 497 scctx->isc_nrxqsets_max = scctx->isc_ntxqsets_max = igc_set_num_queues(ctx); 498 if (bootverbose) 499 device_printf(dev, "attach_pre capping queues at %d\n", 500 scctx->isc_ntxqsets_max); 501 502 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(union igc_adv_tx_desc), IGC_DBA_ALIGN); 503 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union igc_adv_rx_desc), IGC_DBA_ALIGN); 504 scctx->isc_txd_size[0] = sizeof(union igc_adv_tx_desc); 505 scctx->isc_rxd_size[0] = sizeof(union igc_adv_rx_desc); 506 scctx->isc_txrx = &igc_txrx; 507 scctx->isc_tx_tso_segments_max = IGC_MAX_SCATTER; 508 scctx->isc_tx_tso_size_max = IGC_TSO_SIZE; 509 scctx->isc_tx_tso_segsize_max = IGC_TSO_SEG_SIZE; 510 scctx->isc_capabilities = scctx->isc_capenable = IGC_CAPS; 511 scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_TSO | 512 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_SCTP | CSUM_IP6_SCTP; 513 514 /* 515 ** Some new devices, as with ixgbe, now may 516 ** use a different BAR, so we need to keep 517 ** track of which is used. 518 */ 519 scctx->isc_msix_bar = PCIR_BAR(IGC_MSIX_BAR); 520 if (pci_read_config(dev, scctx->isc_msix_bar, 4) == 0) 521 scctx->isc_msix_bar += 4; 522 523 /* Setup PCI resources */ 524 if (igc_allocate_pci_resources(ctx)) { 525 device_printf(dev, "Allocation of PCI resources failed\n"); 526 error = ENXIO; 527 goto err_pci; 528 } 529 530 /* Do Shared Code initialization */ 531 error = igc_setup_init_funcs(hw, true); 532 if (error) { 533 device_printf(dev, "Setup of Shared code failed, error %d\n", 534 error); 535 error = ENXIO; 536 goto err_pci; 537 } 538 539 igc_setup_msix(ctx); 540 igc_get_bus_info(hw); 541 542 hw->mac.autoneg = DO_AUTO_NEG; 543 hw->phy.autoneg_wait_to_complete = false; 544 hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 545 546 /* Copper options */ 547 if (hw->phy.media_type == igc_media_type_copper) { 548 hw->phy.mdix = AUTO_ALL_MODES; 549 } 550 551 /* 552 * Set the frame limits assuming 553 * standard ethernet sized frames. 554 */ 555 scctx->isc_max_frame_size = adapter->hw.mac.max_frame_size = 556 ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; 557 558 /* Allocate multicast array memory. */ 559 adapter->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN * 560 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); 561 if (adapter->mta == NULL) { 562 device_printf(dev, "Can not allocate multicast setup array\n"); 563 error = ENOMEM; 564 goto err_late; 565 } 566 567 /* Check SOL/IDER usage */ 568 if (igc_check_reset_block(hw)) 569 device_printf(dev, "PHY reset is blocked" 570 " due to SOL/IDER session.\n"); 571 572 /* Sysctl for setting Energy Efficient Ethernet */ 573 adapter->hw.dev_spec._i225.eee_disable = igc_eee_setting; 574 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 575 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 576 OID_AUTO, "eee_control", 577 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 578 adapter, 0, igc_sysctl_eee, "I", 579 "Disable Energy Efficient Ethernet"); 580 581 /* 582 ** Start from a known state, this is 583 ** important in reading the nvm and 584 ** mac from that. 585 */ 586 igc_reset_hw(hw); 587 588 /* Make sure we have a good EEPROM before we read from it */ 589 if (igc_validate_nvm_checksum(hw) < 0) { 590 /* 591 ** Some PCI-E parts fail the first check due to 592 ** the link being in sleep state, call it again, 593 ** if it fails a second time its a real issue. 594 */ 595 if (igc_validate_nvm_checksum(hw) < 0) { 596 device_printf(dev, 597 "The EEPROM Checksum Is Not Valid\n"); 598 error = EIO; 599 goto err_late; 600 } 601 } 602 603 /* Copy the permanent MAC address out of the EEPROM */ 604 if (igc_read_mac_addr(hw) < 0) { 605 device_printf(dev, "EEPROM read error while reading MAC" 606 " address\n"); 607 error = EIO; 608 goto err_late; 609 } 610 611 if (!igc_is_valid_ether_addr(hw->mac.addr)) { 612 device_printf(dev, "Invalid MAC address\n"); 613 error = EIO; 614 goto err_late; 615 } 616 617 /* Save the EEPROM/NVM versions */ 618 igc_fw_version(adapter); 619 620 igc_print_fw_version(adapter); 621 622 /* 623 * Get Wake-on-Lan and Management info for later use 624 */ 625 igc_get_wakeup(ctx); 626 627 /* Enable only WOL MAGIC by default */ 628 scctx->isc_capenable &= ~IFCAP_WOL; 629 if (adapter->wol != 0) 630 scctx->isc_capenable |= IFCAP_WOL_MAGIC; 631 632 iflib_set_mac(ctx, hw->mac.addr); 633 634 return (0); 635 636 err_late: 637 igc_release_hw_control(adapter); 638 err_pci: 639 igc_free_pci_resources(ctx); 640 free(adapter->mta, M_DEVBUF); 641 642 return (error); 643 } 644 645 static int 646 igc_if_attach_post(if_ctx_t ctx) 647 { 648 struct igc_adapter *adapter = iflib_get_softc(ctx); 649 struct igc_hw *hw = &adapter->hw; 650 int error = 0; 651 652 /* Setup OS specific network interface */ 653 error = igc_setup_interface(ctx); 654 if (error != 0) { 655 goto err_late; 656 } 657 658 igc_reset(ctx); 659 660 /* Initialize statistics */ 661 igc_update_stats_counters(adapter); 662 hw->mac.get_link_status = true; 663 igc_if_update_admin_status(ctx); 664 igc_add_hw_stats(adapter); 665 666 /* the driver can now take control from firmware */ 667 igc_get_hw_control(adapter); 668 669 INIT_DEBUGOUT("igc_if_attach_post: end"); 670 671 return (error); 672 673 err_late: 674 igc_release_hw_control(adapter); 675 igc_free_pci_resources(ctx); 676 igc_if_queues_free(ctx); 677 free(adapter->mta, M_DEVBUF); 678 679 return (error); 680 } 681 682 /********************************************************************* 683 * Device removal routine 684 * 685 * The detach entry point is called when the driver is being removed. 686 * This routine stops the adapter and deallocates all the resources 687 * that were allocated for driver operation. 688 * 689 * return 0 on success, positive on failure 690 *********************************************************************/ 691 static int 692 igc_if_detach(if_ctx_t ctx) 693 { 694 struct igc_adapter *adapter = iflib_get_softc(ctx); 695 696 INIT_DEBUGOUT("igc_if_detach: begin"); 697 698 igc_phy_hw_reset(&adapter->hw); 699 700 igc_release_hw_control(adapter); 701 igc_free_pci_resources(ctx); 702 703 return (0); 704 } 705 706 /********************************************************************* 707 * 708 * Shutdown entry point 709 * 710 **********************************************************************/ 711 712 static int 713 igc_if_shutdown(if_ctx_t ctx) 714 { 715 return igc_if_suspend(ctx); 716 } 717 718 /* 719 * Suspend/resume device methods. 720 */ 721 static int 722 igc_if_suspend(if_ctx_t ctx) 723 { 724 struct igc_adapter *adapter = iflib_get_softc(ctx); 725 726 igc_release_hw_control(adapter); 727 igc_enable_wakeup(ctx); 728 return (0); 729 } 730 731 static int 732 igc_if_resume(if_ctx_t ctx) 733 { 734 igc_if_init(ctx); 735 736 return(0); 737 } 738 739 static int 740 igc_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 741 { 742 int max_frame_size; 743 struct igc_adapter *adapter = iflib_get_softc(ctx); 744 if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx); 745 746 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); 747 748 /* 9K Jumbo Frame size */ 749 max_frame_size = 9234; 750 751 if (mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) { 752 return (EINVAL); 753 } 754 755 scctx->isc_max_frame_size = adapter->hw.mac.max_frame_size = 756 mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 757 return (0); 758 } 759 760 /********************************************************************* 761 * Init entry point 762 * 763 * This routine is used in two ways. It is used by the stack as 764 * init entry point in network interface structure. It is also used 765 * by the driver as a hw/sw initialization routine to get to a 766 * consistent state. 767 * 768 **********************************************************************/ 769 static void 770 igc_if_init(if_ctx_t ctx) 771 { 772 struct igc_adapter *adapter = iflib_get_softc(ctx); 773 if_softc_ctx_t scctx = adapter->shared; 774 if_t ifp = iflib_get_ifp(ctx); 775 struct igc_tx_queue *tx_que; 776 int i; 777 778 INIT_DEBUGOUT("igc_if_init: begin"); 779 780 /* Get the latest mac address, User can use a LAA */ 781 bcopy(if_getlladdr(ifp), adapter->hw.mac.addr, 782 ETHER_ADDR_LEN); 783 784 /* Put the address into the Receive Address Array */ 785 igc_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 786 787 /* Initialize the hardware */ 788 igc_reset(ctx); 789 igc_if_update_admin_status(ctx); 790 791 for (i = 0, tx_que = adapter->tx_queues; i < adapter->tx_num_queues; i++, tx_que++) { 792 struct tx_ring *txr = &tx_que->txr; 793 794 txr->tx_rs_cidx = txr->tx_rs_pidx; 795 796 /* Initialize the last processed descriptor to be the end of 797 * the ring, rather than the start, so that we avoid an 798 * off-by-one error when calculating how many descriptors are 799 * done in the credits_update function. 800 */ 801 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 802 } 803 804 /* Setup VLAN support, basic and offload if available */ 805 IGC_WRITE_REG(&adapter->hw, IGC_VET, ETHERTYPE_VLAN); 806 807 /* Prepare transmit descriptors and buffers */ 808 igc_initialize_transmit_unit(ctx); 809 810 /* Setup Multicast table */ 811 igc_if_multi_set(ctx); 812 813 adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); 814 igc_initialize_receive_unit(ctx); 815 816 /* Set up VLAN support */ 817 igc_setup_vlan_hw_support(ctx); 818 819 /* Don't lose promiscuous settings */ 820 igc_if_set_promisc(ctx, if_getflags(ifp)); 821 igc_clear_hw_cntrs_base_generic(&adapter->hw); 822 823 if (adapter->intr_type == IFLIB_INTR_MSIX) /* Set up queue routing */ 824 igc_configure_queues(adapter); 825 826 /* this clears any pending interrupts */ 827 IGC_READ_REG(&adapter->hw, IGC_ICR); 828 IGC_WRITE_REG(&adapter->hw, IGC_ICS, IGC_ICS_LSC); 829 830 /* the driver can now take control from firmware */ 831 igc_get_hw_control(adapter); 832 833 /* Set Energy Efficient Ethernet */ 834 igc_set_eee_i225(&adapter->hw, true, true, true); 835 } 836 837 enum eitr_latency_target { 838 eitr_latency_disabled = 0, 839 eitr_latency_lowest = 1, 840 eitr_latency_low = 2, 841 eitr_latency_bulk = 3 842 }; 843 /********************************************************************* 844 * 845 * Helper to calculate next EITR value for AIM 846 * 847 *********************************************************************/ 848 static void 849 igc_neweitr(struct igc_adapter *sc, struct igc_rx_queue *que, 850 struct tx_ring *txr, struct rx_ring *rxr) 851 { 852 struct igc_hw *hw = &sc->hw; 853 u32 neweitr; 854 u32 bytes; 855 u32 bytes_packets; 856 u32 packets; 857 u8 nextlatency; 858 859 /* Idle, do nothing */ 860 if ((txr->tx_bytes == 0) && (rxr->rx_bytes == 0)) 861 return; 862 863 neweitr = 0; 864 865 if (sc->enable_aim) { 866 nextlatency = rxr->rx_nextlatency; 867 868 /* Use half default (4K) ITR if sub-gig */ 869 if (sc->link_speed != 1000) { 870 neweitr = IGC_INTS_4K; 871 goto igc_set_next_eitr; 872 } 873 /* Want at least enough packet buffer for two frames to AIM */ 874 if (sc->shared->isc_max_frame_size * 2 > (sc->pba << 10)) { 875 neweitr = igc_max_interrupt_rate; 876 sc->enable_aim = 0; 877 goto igc_set_next_eitr; 878 } 879 880 /* Get the largest values from the associated tx and rx ring */ 881 if (txr->tx_bytes && txr->tx_packets) { 882 bytes = txr->tx_bytes; 883 bytes_packets = txr->tx_bytes/txr->tx_packets; 884 packets = txr->tx_packets; 885 } 886 if (rxr->rx_bytes && rxr->rx_packets) { 887 bytes = max(bytes, rxr->rx_bytes); 888 bytes_packets = max(bytes_packets, rxr->rx_bytes/rxr->rx_packets); 889 packets = max(packets, rxr->rx_packets); 890 } 891 892 /* Latency state machine */ 893 switch (nextlatency) { 894 case eitr_latency_disabled: /* Bootstrapping */ 895 nextlatency = eitr_latency_low; 896 break; 897 case eitr_latency_lowest: /* 70k ints/s */ 898 /* TSO and jumbo frames */ 899 if (bytes_packets > 8000) 900 nextlatency = eitr_latency_bulk; 901 else if ((packets < 5) && (bytes > 512)) 902 nextlatency = eitr_latency_low; 903 break; 904 case eitr_latency_low: /* 20k ints/s */ 905 if (bytes > 10000) { 906 /* Handle TSO */ 907 if (bytes_packets > 8000) 908 nextlatency = eitr_latency_bulk; 909 else if ((packets < 10) || (bytes_packets > 1200)) 910 nextlatency = eitr_latency_bulk; 911 else if (packets > 35) 912 nextlatency = eitr_latency_lowest; 913 } else if (bytes_packets > 2000) { 914 nextlatency = eitr_latency_bulk; 915 } else if (packets < 3 && bytes < 512) { 916 nextlatency = eitr_latency_lowest; 917 } 918 break; 919 case eitr_latency_bulk: /* 4k ints/s */ 920 if (bytes > 25000) { 921 if (packets > 35) 922 nextlatency = eitr_latency_low; 923 } else if (bytes < 1500) 924 nextlatency = eitr_latency_low; 925 break; 926 default: 927 nextlatency = eitr_latency_low; 928 device_printf(sc->dev, "Unexpected neweitr transition %d\n", 929 nextlatency); 930 break; 931 } 932 933 /* Trim itr_latency_lowest for default AIM setting */ 934 if (sc->enable_aim == 1 && nextlatency == eitr_latency_lowest) 935 nextlatency = eitr_latency_low; 936 937 /* Request new latency */ 938 rxr->rx_nextlatency = nextlatency; 939 } else { 940 /* We may have toggled to AIM disabled */ 941 nextlatency = eitr_latency_disabled; 942 rxr->rx_nextlatency = nextlatency; 943 } 944 945 /* ITR state machine */ 946 switch(nextlatency) { 947 case eitr_latency_lowest: 948 neweitr = IGC_INTS_70K; 949 break; 950 case eitr_latency_low: 951 neweitr = IGC_INTS_20K; 952 break; 953 case eitr_latency_bulk: 954 neweitr = IGC_INTS_4K; 955 break; 956 case eitr_latency_disabled: 957 default: 958 neweitr = igc_max_interrupt_rate; 959 break; 960 } 961 962 igc_set_next_eitr: 963 neweitr = IGC_INTS_TO_EITR(neweitr); 964 965 neweitr |= IGC_EITR_CNT_IGNR; 966 967 if (neweitr != que->eitr_setting) { 968 que->eitr_setting = neweitr; 969 IGC_WRITE_REG(hw, IGC_EITR(que->msix), que->eitr_setting); 970 } 971 } 972 973 /********************************************************************* 974 * 975 * Fast Legacy/MSI Combined Interrupt Service routine 976 * 977 *********************************************************************/ 978 int 979 igc_intr(void *arg) 980 { 981 struct igc_adapter *adapter = arg; 982 struct igc_hw *hw = &adapter->hw; 983 struct igc_rx_queue *que = &adapter->rx_queues[0]; 984 struct tx_ring *txr = &adapter->tx_queues[0].txr; 985 struct rx_ring *rxr = &que->rxr; 986 if_ctx_t ctx = adapter->ctx; 987 u32 reg_icr; 988 989 reg_icr = IGC_READ_REG(hw, IGC_ICR); 990 991 /* Hot eject? */ 992 if (reg_icr == 0xffffffff) 993 return FILTER_STRAY; 994 995 /* Definitely not our interrupt. */ 996 if (reg_icr == 0x0) 997 return FILTER_STRAY; 998 999 if ((reg_icr & IGC_ICR_INT_ASSERTED) == 0) 1000 return FILTER_STRAY; 1001 1002 /* 1003 * Only MSI-X interrupts have one-shot behavior by taking advantage 1004 * of the EIAC register. Thus, explicitly disable interrupts. This 1005 * also works around the MSI message reordering errata on certain 1006 * systems. 1007 */ 1008 IFDI_INTR_DISABLE(ctx); 1009 1010 /* Link status change */ 1011 if (reg_icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) 1012 igc_handle_link(ctx); 1013 1014 if (reg_icr & IGC_ICR_RXO) 1015 adapter->rx_overruns++; 1016 1017 igc_neweitr(adapter, que, txr, rxr); 1018 1019 /* Reset state */ 1020 txr->tx_bytes = 0; 1021 txr->tx_packets = 0; 1022 rxr->rx_bytes = 0; 1023 rxr->rx_packets = 0; 1024 1025 return (FILTER_SCHEDULE_THREAD); 1026 } 1027 1028 static int 1029 igc_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 1030 { 1031 struct igc_adapter *adapter = iflib_get_softc(ctx); 1032 struct igc_rx_queue *rxq = &adapter->rx_queues[rxqid]; 1033 1034 IGC_WRITE_REG(&adapter->hw, IGC_EIMS, rxq->eims); 1035 return (0); 1036 } 1037 1038 static int 1039 igc_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) 1040 { 1041 struct igc_adapter *adapter = iflib_get_softc(ctx); 1042 struct igc_tx_queue *txq = &adapter->tx_queues[txqid]; 1043 1044 IGC_WRITE_REG(&adapter->hw, IGC_EIMS, txq->eims); 1045 return (0); 1046 } 1047 1048 /********************************************************************* 1049 * 1050 * MSI-X RX Interrupt Service routine 1051 * 1052 **********************************************************************/ 1053 static int 1054 igc_msix_que(void *arg) 1055 { 1056 struct igc_rx_queue *que = arg; 1057 struct igc_adapter *sc = que->adapter; 1058 struct tx_ring *txr = &sc->tx_queues[que->msix].txr; 1059 struct rx_ring *rxr = &que->rxr; 1060 1061 ++que->irqs; 1062 1063 igc_neweitr(sc, que, txr, rxr); 1064 1065 /* Reset state */ 1066 txr->tx_bytes = 0; 1067 txr->tx_packets = 0; 1068 rxr->rx_bytes = 0; 1069 rxr->rx_packets = 0; 1070 1071 return (FILTER_SCHEDULE_THREAD); 1072 } 1073 1074 /********************************************************************* 1075 * 1076 * MSI-X Link Fast Interrupt Service routine 1077 * 1078 **********************************************************************/ 1079 static int 1080 igc_msix_link(void *arg) 1081 { 1082 struct igc_adapter *adapter = arg; 1083 u32 reg_icr; 1084 1085 ++adapter->link_irq; 1086 MPASS(adapter->hw.back != NULL); 1087 reg_icr = IGC_READ_REG(&adapter->hw, IGC_ICR); 1088 1089 if (reg_icr & IGC_ICR_RXO) 1090 adapter->rx_overruns++; 1091 1092 if (reg_icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 1093 igc_handle_link(adapter->ctx); 1094 } 1095 1096 IGC_WRITE_REG(&adapter->hw, IGC_IMS, IGC_IMS_LSC); 1097 IGC_WRITE_REG(&adapter->hw, IGC_EIMS, adapter->link_mask); 1098 1099 return (FILTER_HANDLED); 1100 } 1101 1102 static void 1103 igc_handle_link(void *context) 1104 { 1105 if_ctx_t ctx = context; 1106 struct igc_adapter *adapter = iflib_get_softc(ctx); 1107 1108 adapter->hw.mac.get_link_status = true; 1109 iflib_admin_intr_deferred(ctx); 1110 } 1111 1112 /********************************************************************* 1113 * 1114 * Media Ioctl callback 1115 * 1116 * This routine is called whenever the user queries the status of 1117 * the interface using ifconfig. 1118 * 1119 **********************************************************************/ 1120 static void 1121 igc_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) 1122 { 1123 struct igc_adapter *adapter = iflib_get_softc(ctx); 1124 1125 INIT_DEBUGOUT("igc_if_media_status: begin"); 1126 1127 iflib_admin_intr_deferred(ctx); 1128 1129 ifmr->ifm_status = IFM_AVALID; 1130 ifmr->ifm_active = IFM_ETHER; 1131 1132 if (!adapter->link_active) { 1133 return; 1134 } 1135 1136 ifmr->ifm_status |= IFM_ACTIVE; 1137 1138 switch (adapter->link_speed) { 1139 case 10: 1140 ifmr->ifm_active |= IFM_10_T; 1141 break; 1142 case 100: 1143 ifmr->ifm_active |= IFM_100_TX; 1144 break; 1145 case 1000: 1146 ifmr->ifm_active |= IFM_1000_T; 1147 break; 1148 case 2500: 1149 ifmr->ifm_active |= IFM_2500_T; 1150 break; 1151 } 1152 1153 if (adapter->link_duplex == FULL_DUPLEX) 1154 ifmr->ifm_active |= IFM_FDX; 1155 else 1156 ifmr->ifm_active |= IFM_HDX; 1157 } 1158 1159 /********************************************************************* 1160 * 1161 * Media Ioctl callback 1162 * 1163 * This routine is called when the user changes speed/duplex using 1164 * media/mediopt option with ifconfig. 1165 * 1166 **********************************************************************/ 1167 static int 1168 igc_if_media_change(if_ctx_t ctx) 1169 { 1170 struct igc_adapter *adapter = iflib_get_softc(ctx); 1171 struct ifmedia *ifm = iflib_get_media(ctx); 1172 1173 INIT_DEBUGOUT("igc_if_media_change: begin"); 1174 1175 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1176 return (EINVAL); 1177 1178 adapter->hw.mac.autoneg = DO_AUTO_NEG; 1179 1180 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1181 case IFM_AUTO: 1182 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1183 break; 1184 case IFM_2500_T: 1185 adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; 1186 break; 1187 case IFM_1000_T: 1188 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1189 break; 1190 case IFM_100_TX: 1191 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1192 adapter->hw.phy.autoneg_advertised = ADVERTISE_100_FULL; 1193 else 1194 adapter->hw.phy.autoneg_advertised = ADVERTISE_100_HALF; 1195 break; 1196 case IFM_10_T: 1197 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1198 adapter->hw.phy.autoneg_advertised = ADVERTISE_10_FULL; 1199 else 1200 adapter->hw.phy.autoneg_advertised = ADVERTISE_10_HALF; 1201 break; 1202 default: 1203 device_printf(adapter->dev, "Unsupported media type\n"); 1204 } 1205 1206 igc_if_init(ctx); 1207 1208 return (0); 1209 } 1210 1211 static int 1212 igc_if_set_promisc(if_ctx_t ctx, int flags) 1213 { 1214 struct igc_adapter *adapter = iflib_get_softc(ctx); 1215 if_t ifp = iflib_get_ifp(ctx); 1216 u32 reg_rctl; 1217 int mcnt = 0; 1218 1219 reg_rctl = IGC_READ_REG(&adapter->hw, IGC_RCTL); 1220 reg_rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_UPE); 1221 if (flags & IFF_ALLMULTI) 1222 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 1223 else 1224 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES); 1225 1226 /* Don't disable if in MAX groups */ 1227 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1228 reg_rctl &= (~IGC_RCTL_MPE); 1229 IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl); 1230 1231 if (flags & IFF_PROMISC) { 1232 reg_rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE); 1233 /* Turn this on if you want to see bad packets */ 1234 if (igc_debug_sbp) 1235 reg_rctl |= IGC_RCTL_SBP; 1236 IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl); 1237 } else if (flags & IFF_ALLMULTI) { 1238 reg_rctl |= IGC_RCTL_MPE; 1239 reg_rctl &= ~IGC_RCTL_UPE; 1240 IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl); 1241 } 1242 return (0); 1243 } 1244 1245 static u_int 1246 igc_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int idx) 1247 { 1248 u8 *mta = arg; 1249 1250 if (idx == MAX_NUM_MULTICAST_ADDRESSES) 1251 return (0); 1252 1253 bcopy(LLADDR(sdl), &mta[idx * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1254 1255 return (1); 1256 } 1257 1258 /********************************************************************* 1259 * Multicast Update 1260 * 1261 * This routine is called whenever multicast address list is updated. 1262 * 1263 **********************************************************************/ 1264 1265 static void 1266 igc_if_multi_set(if_ctx_t ctx) 1267 { 1268 struct igc_adapter *adapter = iflib_get_softc(ctx); 1269 if_t ifp = iflib_get_ifp(ctx); 1270 u8 *mta; /* Multicast array memory */ 1271 u32 reg_rctl = 0; 1272 int mcnt = 0; 1273 1274 IOCTL_DEBUGOUT("igc_set_multi: begin"); 1275 1276 mta = adapter->mta; 1277 bzero(mta, sizeof(u8) * ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 1278 1279 mcnt = if_foreach_llmaddr(ifp, igc_copy_maddr, mta); 1280 1281 reg_rctl = IGC_READ_REG(&adapter->hw, IGC_RCTL); 1282 1283 if (if_getflags(ifp) & IFF_PROMISC) { 1284 reg_rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE); 1285 /* Turn this on if you want to see bad packets */ 1286 if (igc_debug_sbp) 1287 reg_rctl |= IGC_RCTL_SBP; 1288 } else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 1289 if_getflags(ifp) & IFF_ALLMULTI) { 1290 reg_rctl |= IGC_RCTL_MPE; 1291 reg_rctl &= ~IGC_RCTL_UPE; 1292 } else 1293 reg_rctl &= ~(IGC_RCTL_UPE | IGC_RCTL_MPE); 1294 1295 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1296 igc_update_mc_addr_list(&adapter->hw, mta, mcnt); 1297 1298 IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl); 1299 } 1300 1301 /********************************************************************* 1302 * Timer routine 1303 * 1304 * This routine schedules igc_if_update_admin_status() to check for 1305 * link status and to gather statistics as well as to perform some 1306 * controller-specific hardware patting. 1307 * 1308 **********************************************************************/ 1309 static void 1310 igc_if_timer(if_ctx_t ctx, uint16_t qid) 1311 { 1312 1313 if (qid != 0) 1314 return; 1315 1316 iflib_admin_intr_deferred(ctx); 1317 } 1318 1319 static void 1320 igc_if_update_admin_status(if_ctx_t ctx) 1321 { 1322 struct igc_adapter *adapter = iflib_get_softc(ctx); 1323 struct igc_hw *hw = &adapter->hw; 1324 device_t dev = iflib_get_dev(ctx); 1325 u32 link_check, thstat, ctrl; 1326 1327 link_check = thstat = ctrl = 0; 1328 /* Get the cached link value or read phy for real */ 1329 switch (hw->phy.media_type) { 1330 case igc_media_type_copper: 1331 if (hw->mac.get_link_status == true) { 1332 /* Do the work to read phy */ 1333 igc_check_for_link(hw); 1334 link_check = !hw->mac.get_link_status; 1335 } else 1336 link_check = true; 1337 break; 1338 case igc_media_type_unknown: 1339 igc_check_for_link(hw); 1340 link_check = !hw->mac.get_link_status; 1341 /* FALLTHROUGH */ 1342 default: 1343 break; 1344 } 1345 1346 /* Now check for a transition */ 1347 if (link_check && (adapter->link_active == 0)) { 1348 igc_get_speed_and_duplex(hw, &adapter->link_speed, 1349 &adapter->link_duplex); 1350 if (bootverbose) 1351 device_printf(dev, "Link is up %d Mbps %s\n", 1352 adapter->link_speed, 1353 ((adapter->link_duplex == FULL_DUPLEX) ? 1354 "Full Duplex" : "Half Duplex")); 1355 adapter->link_active = 1; 1356 iflib_link_state_change(ctx, LINK_STATE_UP, 1357 IF_Mbps(adapter->link_speed)); 1358 } else if (!link_check && (adapter->link_active == 1)) { 1359 adapter->link_speed = 0; 1360 adapter->link_duplex = 0; 1361 adapter->link_active = 0; 1362 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 1363 } 1364 igc_update_stats_counters(adapter); 1365 } 1366 1367 static void 1368 igc_if_watchdog_reset(if_ctx_t ctx) 1369 { 1370 struct igc_adapter *adapter = iflib_get_softc(ctx); 1371 1372 /* 1373 * Just count the event; iflib(4) will already trigger a 1374 * sufficient reset of the controller. 1375 */ 1376 adapter->watchdog_events++; 1377 } 1378 1379 /********************************************************************* 1380 * 1381 * This routine disables all traffic on the adapter by issuing a 1382 * global reset on the MAC. 1383 * 1384 **********************************************************************/ 1385 static void 1386 igc_if_stop(if_ctx_t ctx) 1387 { 1388 struct igc_adapter *adapter = iflib_get_softc(ctx); 1389 1390 INIT_DEBUGOUT("igc_if_stop: begin"); 1391 1392 igc_reset_hw(&adapter->hw); 1393 IGC_WRITE_REG(&adapter->hw, IGC_WUC, 0); 1394 } 1395 1396 /********************************************************************* 1397 * 1398 * Determine hardware revision. 1399 * 1400 **********************************************************************/ 1401 static void 1402 igc_identify_hardware(if_ctx_t ctx) 1403 { 1404 device_t dev = iflib_get_dev(ctx); 1405 struct igc_adapter *adapter = iflib_get_softc(ctx); 1406 1407 /* Make sure our PCI config space has the necessary stuff set */ 1408 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 1409 1410 /* Save off the information about this board */ 1411 adapter->hw.vendor_id = pci_get_vendor(dev); 1412 adapter->hw.device_id = pci_get_device(dev); 1413 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 1414 adapter->hw.subsystem_vendor_id = 1415 pci_read_config(dev, PCIR_SUBVEND_0, 2); 1416 adapter->hw.subsystem_device_id = 1417 pci_read_config(dev, PCIR_SUBDEV_0, 2); 1418 1419 /* Do Shared Code Init and Setup */ 1420 if (igc_set_mac_type(&adapter->hw)) { 1421 device_printf(dev, "Setup init failure\n"); 1422 return; 1423 } 1424 } 1425 1426 static int 1427 igc_allocate_pci_resources(if_ctx_t ctx) 1428 { 1429 struct igc_adapter *adapter = iflib_get_softc(ctx); 1430 device_t dev = iflib_get_dev(ctx); 1431 int rid; 1432 1433 rid = PCIR_BAR(0); 1434 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1435 &rid, RF_ACTIVE); 1436 if (adapter->memory == NULL) { 1437 device_printf(dev, "Unable to allocate bus resource: memory\n"); 1438 return (ENXIO); 1439 } 1440 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->memory); 1441 adapter->osdep.mem_bus_space_handle = 1442 rman_get_bushandle(adapter->memory); 1443 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; 1444 1445 adapter->hw.back = &adapter->osdep; 1446 1447 return (0); 1448 } 1449 1450 /********************************************************************* 1451 * 1452 * Set up the MSI-X Interrupt handlers 1453 * 1454 **********************************************************************/ 1455 static int 1456 igc_if_msix_intr_assign(if_ctx_t ctx, int msix) 1457 { 1458 struct igc_adapter *adapter = iflib_get_softc(ctx); 1459 struct igc_rx_queue *rx_que = adapter->rx_queues; 1460 struct igc_tx_queue *tx_que = adapter->tx_queues; 1461 int error, rid, i, vector = 0, rx_vectors; 1462 char buf[16]; 1463 1464 /* First set up ring resources */ 1465 for (i = 0; i < adapter->rx_num_queues; i++, rx_que++, vector++) { 1466 rid = vector + 1; 1467 snprintf(buf, sizeof(buf), "rxq%d", i); 1468 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RXTX, igc_msix_que, rx_que, rx_que->me, buf); 1469 if (error) { 1470 device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error); 1471 adapter->rx_num_queues = i + 1; 1472 goto fail; 1473 } 1474 1475 rx_que->msix = vector; 1476 1477 /* 1478 * Set the bit to enable interrupt 1479 * in IGC_IMS -- bits 20 and 21 1480 * are for RX0 and RX1, note this has 1481 * NOTHING to do with the MSI-X vector 1482 */ 1483 rx_que->eims = 1 << vector; 1484 } 1485 rx_vectors = vector; 1486 1487 vector = 0; 1488 for (i = 0; i < adapter->tx_num_queues; i++, tx_que++, vector++) { 1489 snprintf(buf, sizeof(buf), "txq%d", i); 1490 tx_que = &adapter->tx_queues[i]; 1491 iflib_softirq_alloc_generic(ctx, 1492 &adapter->rx_queues[i % adapter->rx_num_queues].que_irq, 1493 IFLIB_INTR_TX, tx_que, tx_que->me, buf); 1494 1495 tx_que->msix = (vector % adapter->rx_num_queues); 1496 1497 /* 1498 * Set the bit to enable interrupt 1499 * in IGC_IMS -- bits 22 and 23 1500 * are for TX0 and TX1, note this has 1501 * NOTHING to do with the MSI-X vector 1502 */ 1503 tx_que->eims = 1 << i; 1504 } 1505 1506 /* Link interrupt */ 1507 rid = rx_vectors + 1; 1508 error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, IFLIB_INTR_ADMIN, igc_msix_link, adapter, 0, "aq"); 1509 1510 if (error) { 1511 device_printf(iflib_get_dev(ctx), "Failed to register admin handler"); 1512 goto fail; 1513 } 1514 adapter->linkvec = rx_vectors; 1515 return (0); 1516 fail: 1517 iflib_irq_free(ctx, &adapter->irq); 1518 rx_que = adapter->rx_queues; 1519 for (int i = 0; i < adapter->rx_num_queues; i++, rx_que++) 1520 iflib_irq_free(ctx, &rx_que->que_irq); 1521 return (error); 1522 } 1523 1524 static void 1525 igc_configure_queues(struct igc_adapter *adapter) 1526 { 1527 struct igc_hw *hw = &adapter->hw; 1528 struct igc_rx_queue *rx_que; 1529 struct igc_tx_queue *tx_que; 1530 u32 ivar = 0, newitr = 0; 1531 1532 /* First turn on RSS capability */ 1533 IGC_WRITE_REG(hw, IGC_GPIE, 1534 IGC_GPIE_MSIX_MODE | IGC_GPIE_EIAME | IGC_GPIE_PBA | 1535 IGC_GPIE_NSICR); 1536 1537 /* Turn on MSI-X */ 1538 /* RX entries */ 1539 for (int i = 0; i < adapter->rx_num_queues; i++) { 1540 u32 index = i >> 1; 1541 ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index); 1542 rx_que = &adapter->rx_queues[i]; 1543 if (i & 1) { 1544 ivar &= 0xFF00FFFF; 1545 ivar |= (rx_que->msix | IGC_IVAR_VALID) << 16; 1546 } else { 1547 ivar &= 0xFFFFFF00; 1548 ivar |= rx_que->msix | IGC_IVAR_VALID; 1549 } 1550 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar); 1551 } 1552 /* TX entries */ 1553 for (int i = 0; i < adapter->tx_num_queues; i++) { 1554 u32 index = i >> 1; 1555 ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index); 1556 tx_que = &adapter->tx_queues[i]; 1557 if (i & 1) { 1558 ivar &= 0x00FFFFFF; 1559 ivar |= (tx_que->msix | IGC_IVAR_VALID) << 24; 1560 } else { 1561 ivar &= 0xFFFF00FF; 1562 ivar |= (tx_que->msix | IGC_IVAR_VALID) << 8; 1563 } 1564 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar); 1565 adapter->que_mask |= tx_que->eims; 1566 } 1567 1568 /* And for the link interrupt */ 1569 ivar = (adapter->linkvec | IGC_IVAR_VALID) << 8; 1570 adapter->link_mask = 1 << adapter->linkvec; 1571 IGC_WRITE_REG(hw, IGC_IVAR_MISC, ivar); 1572 1573 /* Set the starting interrupt rate */ 1574 if (igc_max_interrupt_rate > 0) 1575 newitr = IGC_INTS_TO_EITR(igc_max_interrupt_rate); 1576 1577 newitr |= IGC_EITR_CNT_IGNR; 1578 1579 for (int i = 0; i < adapter->rx_num_queues; i++) { 1580 rx_que = &adapter->rx_queues[i]; 1581 IGC_WRITE_REG(hw, IGC_EITR(rx_que->msix), newitr); 1582 } 1583 1584 return; 1585 } 1586 1587 static void 1588 igc_free_pci_resources(if_ctx_t ctx) 1589 { 1590 struct igc_adapter *adapter = iflib_get_softc(ctx); 1591 struct igc_rx_queue *que = adapter->rx_queues; 1592 device_t dev = iflib_get_dev(ctx); 1593 1594 /* Release all MSI-X queue resources */ 1595 if (adapter->intr_type == IFLIB_INTR_MSIX) 1596 iflib_irq_free(ctx, &adapter->irq); 1597 1598 for (int i = 0; i < adapter->rx_num_queues; i++, que++) { 1599 iflib_irq_free(ctx, &que->que_irq); 1600 } 1601 1602 if (adapter->memory != NULL) { 1603 bus_release_resource(dev, SYS_RES_MEMORY, 1604 rman_get_rid(adapter->memory), adapter->memory); 1605 adapter->memory = NULL; 1606 } 1607 1608 if (adapter->flash != NULL) { 1609 bus_release_resource(dev, SYS_RES_MEMORY, 1610 rman_get_rid(adapter->flash), adapter->flash); 1611 adapter->flash = NULL; 1612 } 1613 1614 if (adapter->ioport != NULL) { 1615 bus_release_resource(dev, SYS_RES_IOPORT, 1616 rman_get_rid(adapter->ioport), adapter->ioport); 1617 adapter->ioport = NULL; 1618 } 1619 } 1620 1621 /* Set up MSI or MSI-X */ 1622 static int 1623 igc_setup_msix(if_ctx_t ctx) 1624 { 1625 return (0); 1626 } 1627 1628 /********************************************************************* 1629 * 1630 * Initialize the DMA Coalescing feature 1631 * 1632 **********************************************************************/ 1633 static void 1634 igc_init_dmac(struct igc_adapter *adapter, u32 pba) 1635 { 1636 device_t dev = adapter->dev; 1637 struct igc_hw *hw = &adapter->hw; 1638 u32 dmac, reg = ~IGC_DMACR_DMAC_EN; 1639 u16 hwm; 1640 u16 max_frame_size; 1641 int status; 1642 1643 max_frame_size = adapter->shared->isc_max_frame_size; 1644 1645 if (adapter->dmac == 0) { /* Disabling it */ 1646 IGC_WRITE_REG(hw, IGC_DMACR, reg); 1647 return; 1648 } else 1649 device_printf(dev, "DMA Coalescing enabled\n"); 1650 1651 /* Set starting threshold */ 1652 IGC_WRITE_REG(hw, IGC_DMCTXTH, 0); 1653 1654 hwm = 64 * pba - max_frame_size / 16; 1655 if (hwm < 64 * (pba - 6)) 1656 hwm = 64 * (pba - 6); 1657 reg = IGC_READ_REG(hw, IGC_FCRTC); 1658 reg &= ~IGC_FCRTC_RTH_COAL_MASK; 1659 reg |= ((hwm << IGC_FCRTC_RTH_COAL_SHIFT) 1660 & IGC_FCRTC_RTH_COAL_MASK); 1661 IGC_WRITE_REG(hw, IGC_FCRTC, reg); 1662 1663 dmac = pba - max_frame_size / 512; 1664 if (dmac < pba - 10) 1665 dmac = pba - 10; 1666 reg = IGC_READ_REG(hw, IGC_DMACR); 1667 reg &= ~IGC_DMACR_DMACTHR_MASK; 1668 reg |= ((dmac << IGC_DMACR_DMACTHR_SHIFT) 1669 & IGC_DMACR_DMACTHR_MASK); 1670 1671 /* transition to L0x or L1 if available..*/ 1672 reg |= (IGC_DMACR_DMAC_EN | IGC_DMACR_DMAC_LX_MASK); 1673 1674 /* Check if status is 2.5Gb backplane connection 1675 * before configuration of watchdog timer, which is 1676 * in msec values in 12.8usec intervals 1677 * watchdog timer= msec values in 32usec intervals 1678 * for non 2.5Gb connection 1679 */ 1680 status = IGC_READ_REG(hw, IGC_STATUS); 1681 if ((status & IGC_STATUS_2P5_SKU) && 1682 (!(status & IGC_STATUS_2P5_SKU_OVER))) 1683 reg |= ((adapter->dmac * 5) >> 6); 1684 else 1685 reg |= (adapter->dmac >> 5); 1686 1687 IGC_WRITE_REG(hw, IGC_DMACR, reg); 1688 1689 IGC_WRITE_REG(hw, IGC_DMCRTRH, 0); 1690 1691 /* Set the interval before transition */ 1692 reg = IGC_READ_REG(hw, IGC_DMCTLX); 1693 reg |= IGC_DMCTLX_DCFLUSH_DIS; 1694 1695 /* 1696 ** in 2.5Gb connection, TTLX unit is 0.4 usec 1697 ** which is 0x4*2 = 0xA. But delay is still 4 usec 1698 */ 1699 status = IGC_READ_REG(hw, IGC_STATUS); 1700 if ((status & IGC_STATUS_2P5_SKU) && 1701 (!(status & IGC_STATUS_2P5_SKU_OVER))) 1702 reg |= 0xA; 1703 else 1704 reg |= 0x4; 1705 1706 IGC_WRITE_REG(hw, IGC_DMCTLX, reg); 1707 1708 /* free space in tx packet buffer to wake from DMA coal */ 1709 IGC_WRITE_REG(hw, IGC_DMCTXTH, (IGC_TXPBSIZE - 1710 (2 * max_frame_size)) >> 6); 1711 1712 /* make low power state decision controlled by DMA coal */ 1713 reg = IGC_READ_REG(hw, IGC_PCIEMISC); 1714 reg &= ~IGC_PCIEMISC_LX_DECISION; 1715 IGC_WRITE_REG(hw, IGC_PCIEMISC, reg); 1716 } 1717 1718 /********************************************************************* 1719 * 1720 * Initialize the hardware to a configuration as specified by the 1721 * adapter structure. 1722 * 1723 **********************************************************************/ 1724 static void 1725 igc_reset(if_ctx_t ctx) 1726 { 1727 device_t dev = iflib_get_dev(ctx); 1728 struct igc_adapter *adapter = iflib_get_softc(ctx); 1729 struct igc_hw *hw = &adapter->hw; 1730 u32 rx_buffer_size; 1731 u32 pba; 1732 1733 INIT_DEBUGOUT("igc_reset: begin"); 1734 /* Let the firmware know the OS is in control */ 1735 igc_get_hw_control(adapter); 1736 1737 /* 1738 * Packet Buffer Allocation (PBA) 1739 * Writing PBA sets the receive portion of the buffer 1740 * the remainder is used for the transmit buffer. 1741 */ 1742 pba = IGC_PBA_34K; 1743 1744 INIT_DEBUGOUT1("igc_reset: pba=%dK",pba); 1745 1746 /* 1747 * These parameters control the automatic generation (Tx) and 1748 * response (Rx) to Ethernet PAUSE frames. 1749 * - High water mark should allow for at least two frames to be 1750 * received after sending an XOFF. 1751 * - Low water mark works best when it is very near the high water mark. 1752 * This allows the receiver to restart by sending XON when it has 1753 * drained a bit. Here we use an arbitrary value of 1500 which will 1754 * restart after one full frame is pulled from the buffer. There 1755 * could be several smaller frames in the buffer and if so they will 1756 * not trigger the XON until their total number reduces the buffer 1757 * by 1500. 1758 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 1759 */ 1760 rx_buffer_size = (pba & 0xffff) << 10; 1761 hw->fc.high_water = rx_buffer_size - 1762 roundup2(adapter->hw.mac.max_frame_size, 1024); 1763 /* 16-byte granularity */ 1764 hw->fc.low_water = hw->fc.high_water - 16; 1765 1766 if (adapter->fc) /* locally set flow control value? */ 1767 hw->fc.requested_mode = adapter->fc; 1768 else 1769 hw->fc.requested_mode = igc_fc_full; 1770 1771 hw->fc.pause_time = IGC_FC_PAUSE_TIME; 1772 1773 hw->fc.send_xon = true; 1774 1775 /* Issue a global reset */ 1776 igc_reset_hw(hw); 1777 IGC_WRITE_REG(hw, IGC_WUC, 0); 1778 1779 /* and a re-init */ 1780 if (igc_init_hw(hw) < 0) { 1781 device_printf(dev, "Hardware Initialization Failed\n"); 1782 return; 1783 } 1784 1785 /* Setup DMA Coalescing */ 1786 igc_init_dmac(adapter, pba); 1787 1788 /* Save the final PBA off if it needs to be used elsewhere i.e. AIM */ 1789 adapter->pba = pba; 1790 1791 IGC_WRITE_REG(hw, IGC_VET, ETHERTYPE_VLAN); 1792 igc_get_phy_info(hw); 1793 igc_check_for_link(hw); 1794 } 1795 1796 /* 1797 * Initialise the RSS mapping for NICs that support multiple transmit/ 1798 * receive rings. 1799 */ 1800 1801 #define RSSKEYLEN 10 1802 static void 1803 igc_initialize_rss_mapping(struct igc_adapter *adapter) 1804 { 1805 struct igc_hw *hw = &adapter->hw; 1806 int i; 1807 int queue_id; 1808 u32 reta; 1809 u32 rss_key[RSSKEYLEN], mrqc, shift = 0; 1810 1811 /* 1812 * The redirection table controls which destination 1813 * queue each bucket redirects traffic to. 1814 * Each DWORD represents four queues, with the LSB 1815 * being the first queue in the DWORD. 1816 * 1817 * This just allocates buckets to queues using round-robin 1818 * allocation. 1819 * 1820 * NOTE: It Just Happens to line up with the default 1821 * RSS allocation method. 1822 */ 1823 1824 /* Warning FM follows */ 1825 reta = 0; 1826 for (i = 0; i < 128; i++) { 1827 #ifdef RSS 1828 queue_id = rss_get_indirection_to_bucket(i); 1829 /* 1830 * If we have more queues than buckets, we'll 1831 * end up mapping buckets to a subset of the 1832 * queues. 1833 * 1834 * If we have more buckets than queues, we'll 1835 * end up instead assigning multiple buckets 1836 * to queues. 1837 * 1838 * Both are suboptimal, but we need to handle 1839 * the case so we don't go out of bounds 1840 * indexing arrays and such. 1841 */ 1842 queue_id = queue_id % adapter->rx_num_queues; 1843 #else 1844 queue_id = (i % adapter->rx_num_queues); 1845 #endif 1846 /* Adjust if required */ 1847 queue_id = queue_id << shift; 1848 1849 /* 1850 * The low 8 bits are for hash value (n+0); 1851 * The next 8 bits are for hash value (n+1), etc. 1852 */ 1853 reta = reta >> 8; 1854 reta = reta | ( ((uint32_t) queue_id) << 24); 1855 if ((i & 3) == 3) { 1856 IGC_WRITE_REG(hw, IGC_RETA(i >> 2), reta); 1857 reta = 0; 1858 } 1859 } 1860 1861 /* Now fill in hash table */ 1862 1863 /* 1864 * MRQC: Multiple Receive Queues Command 1865 * Set queuing to RSS control, number depends on the device. 1866 */ 1867 mrqc = IGC_MRQC_ENABLE_RSS_4Q; 1868 1869 #ifdef RSS 1870 /* XXX ew typecasting */ 1871 rss_getkey((uint8_t *) &rss_key); 1872 #else 1873 arc4rand(&rss_key, sizeof(rss_key), 0); 1874 #endif 1875 for (i = 0; i < RSSKEYLEN; i++) 1876 IGC_WRITE_REG_ARRAY(hw, IGC_RSSRK(0), i, rss_key[i]); 1877 1878 /* 1879 * Configure the RSS fields to hash upon. 1880 */ 1881 mrqc |= (IGC_MRQC_RSS_FIELD_IPV4 | 1882 IGC_MRQC_RSS_FIELD_IPV4_TCP); 1883 mrqc |= (IGC_MRQC_RSS_FIELD_IPV6 | 1884 IGC_MRQC_RSS_FIELD_IPV6_TCP); 1885 mrqc |=( IGC_MRQC_RSS_FIELD_IPV4_UDP | 1886 IGC_MRQC_RSS_FIELD_IPV6_UDP); 1887 mrqc |=( IGC_MRQC_RSS_FIELD_IPV6_UDP_EX | 1888 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX); 1889 1890 IGC_WRITE_REG(hw, IGC_MRQC, mrqc); 1891 } 1892 1893 /********************************************************************* 1894 * 1895 * Setup networking device structure and register interface media. 1896 * 1897 **********************************************************************/ 1898 static int 1899 igc_setup_interface(if_ctx_t ctx) 1900 { 1901 if_t ifp = iflib_get_ifp(ctx); 1902 struct igc_adapter *adapter = iflib_get_softc(ctx); 1903 if_softc_ctx_t scctx = adapter->shared; 1904 1905 INIT_DEBUGOUT("igc_setup_interface: begin"); 1906 1907 /* Single Queue */ 1908 if (adapter->tx_num_queues == 1) { 1909 if_setsendqlen(ifp, scctx->isc_ntxd[0] - 1); 1910 if_setsendqready(ifp); 1911 } 1912 1913 /* 1914 * Specify the media types supported by this adapter and register 1915 * callbacks to update media and link information 1916 */ 1917 ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); 1918 ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); 1919 ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1920 ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); 1921 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1922 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1923 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_T, 0, NULL); 1924 1925 ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1926 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); 1927 return (0); 1928 } 1929 1930 static int 1931 igc_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) 1932 { 1933 struct igc_adapter *adapter = iflib_get_softc(ctx); 1934 if_softc_ctx_t scctx = adapter->shared; 1935 int error = IGC_SUCCESS; 1936 struct igc_tx_queue *que; 1937 int i, j; 1938 1939 MPASS(adapter->tx_num_queues > 0); 1940 MPASS(adapter->tx_num_queues == ntxqsets); 1941 1942 /* First allocate the top level queue structs */ 1943 if (!(adapter->tx_queues = 1944 (struct igc_tx_queue *) malloc(sizeof(struct igc_tx_queue) * 1945 adapter->tx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { 1946 device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n"); 1947 return(ENOMEM); 1948 } 1949 1950 for (i = 0, que = adapter->tx_queues; i < adapter->tx_num_queues; i++, que++) { 1951 /* Set up some basics */ 1952 1953 struct tx_ring *txr = &que->txr; 1954 txr->adapter = que->adapter = adapter; 1955 que->me = txr->me = i; 1956 1957 /* Allocate report status array */ 1958 if (!(txr->tx_rsq = (qidx_t *) malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) { 1959 device_printf(iflib_get_dev(ctx), "failed to allocate rs_idxs memory\n"); 1960 error = ENOMEM; 1961 goto fail; 1962 } 1963 for (j = 0; j < scctx->isc_ntxd[0]; j++) 1964 txr->tx_rsq[j] = QIDX_INVALID; 1965 /* get the virtual and physical address of the hardware queues */ 1966 txr->tx_base = (struct igc_tx_desc *)vaddrs[i*ntxqs]; 1967 txr->tx_paddr = paddrs[i*ntxqs]; 1968 } 1969 1970 if (bootverbose) 1971 device_printf(iflib_get_dev(ctx), 1972 "allocated for %d tx_queues\n", adapter->tx_num_queues); 1973 return (0); 1974 fail: 1975 igc_if_queues_free(ctx); 1976 return (error); 1977 } 1978 1979 static int 1980 igc_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) 1981 { 1982 struct igc_adapter *adapter = iflib_get_softc(ctx); 1983 int error = IGC_SUCCESS; 1984 struct igc_rx_queue *que; 1985 int i; 1986 1987 MPASS(adapter->rx_num_queues > 0); 1988 MPASS(adapter->rx_num_queues == nrxqsets); 1989 1990 /* First allocate the top level queue structs */ 1991 if (!(adapter->rx_queues = 1992 (struct igc_rx_queue *) malloc(sizeof(struct igc_rx_queue) * 1993 adapter->rx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { 1994 device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n"); 1995 error = ENOMEM; 1996 goto fail; 1997 } 1998 1999 for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) { 2000 /* Set up some basics */ 2001 struct rx_ring *rxr = &que->rxr; 2002 rxr->adapter = que->adapter = adapter; 2003 rxr->que = que; 2004 que->me = rxr->me = i; 2005 2006 /* get the virtual and physical address of the hardware queues */ 2007 rxr->rx_base = (union igc_rx_desc_extended *)vaddrs[i*nrxqs]; 2008 rxr->rx_paddr = paddrs[i*nrxqs]; 2009 } 2010 2011 if (bootverbose) 2012 device_printf(iflib_get_dev(ctx), 2013 "allocated for %d rx_queues\n", adapter->rx_num_queues); 2014 2015 return (0); 2016 fail: 2017 igc_if_queues_free(ctx); 2018 return (error); 2019 } 2020 2021 static void 2022 igc_if_queues_free(if_ctx_t ctx) 2023 { 2024 struct igc_adapter *adapter = iflib_get_softc(ctx); 2025 struct igc_tx_queue *tx_que = adapter->tx_queues; 2026 struct igc_rx_queue *rx_que = adapter->rx_queues; 2027 2028 if (tx_que != NULL) { 2029 for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) { 2030 struct tx_ring *txr = &tx_que->txr; 2031 if (txr->tx_rsq == NULL) 2032 break; 2033 2034 free(txr->tx_rsq, M_DEVBUF); 2035 txr->tx_rsq = NULL; 2036 } 2037 free(adapter->tx_queues, M_DEVBUF); 2038 adapter->tx_queues = NULL; 2039 } 2040 2041 if (rx_que != NULL) { 2042 free(adapter->rx_queues, M_DEVBUF); 2043 adapter->rx_queues = NULL; 2044 } 2045 2046 igc_release_hw_control(adapter); 2047 2048 if (adapter->mta != NULL) { 2049 free(adapter->mta, M_DEVBUF); 2050 } 2051 } 2052 2053 /********************************************************************* 2054 * 2055 * Enable transmit unit. 2056 * 2057 **********************************************************************/ 2058 static void 2059 igc_initialize_transmit_unit(if_ctx_t ctx) 2060 { 2061 struct igc_adapter *adapter = iflib_get_softc(ctx); 2062 if_softc_ctx_t scctx = adapter->shared; 2063 struct igc_tx_queue *que; 2064 struct tx_ring *txr; 2065 struct igc_hw *hw = &adapter->hw; 2066 u32 tctl, txdctl = 0; 2067 2068 INIT_DEBUGOUT("igc_initialize_transmit_unit: begin"); 2069 2070 for (int i = 0; i < adapter->tx_num_queues; i++, txr++) { 2071 u64 bus_addr; 2072 caddr_t offp, endp; 2073 2074 que = &adapter->tx_queues[i]; 2075 txr = &que->txr; 2076 bus_addr = txr->tx_paddr; 2077 2078 /* Clear checksum offload context. */ 2079 offp = (caddr_t)&txr->csum_flags; 2080 endp = (caddr_t)(txr + 1); 2081 bzero(offp, endp - offp); 2082 2083 /* Base and Len of TX Ring */ 2084 IGC_WRITE_REG(hw, IGC_TDLEN(i), 2085 scctx->isc_ntxd[0] * sizeof(struct igc_tx_desc)); 2086 IGC_WRITE_REG(hw, IGC_TDBAH(i), 2087 (u32)(bus_addr >> 32)); 2088 IGC_WRITE_REG(hw, IGC_TDBAL(i), 2089 (u32)bus_addr); 2090 /* Init the HEAD/TAIL indices */ 2091 IGC_WRITE_REG(hw, IGC_TDT(i), 0); 2092 IGC_WRITE_REG(hw, IGC_TDH(i), 0); 2093 2094 HW_DEBUGOUT2("Base = %x, Length = %x\n", 2095 IGC_READ_REG(&adapter->hw, IGC_TDBAL(i)), 2096 IGC_READ_REG(&adapter->hw, IGC_TDLEN(i))); 2097 2098 txdctl = 0; /* clear txdctl */ 2099 txdctl |= 0x1f; /* PTHRESH */ 2100 txdctl |= 1 << 8; /* HTHRESH */ 2101 txdctl |= 1 << 16;/* WTHRESH */ 2102 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ 2103 txdctl |= IGC_TXDCTL_GRAN; 2104 txdctl |= 1 << 25; /* LWTHRESH */ 2105 2106 IGC_WRITE_REG(hw, IGC_TXDCTL(i), txdctl); 2107 } 2108 2109 /* Program the Transmit Control Register */ 2110 tctl = IGC_READ_REG(&adapter->hw, IGC_TCTL); 2111 tctl &= ~IGC_TCTL_CT; 2112 tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN | 2113 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT)); 2114 2115 /* This write will effectively turn on the transmit unit. */ 2116 IGC_WRITE_REG(&adapter->hw, IGC_TCTL, tctl); 2117 } 2118 2119 /********************************************************************* 2120 * 2121 * Enable receive unit. 2122 * 2123 **********************************************************************/ 2124 #define BSIZEPKT_ROUNDUP ((1<<IGC_SRRCTL_BSIZEPKT_SHIFT)-1) 2125 2126 static void 2127 igc_initialize_receive_unit(if_ctx_t ctx) 2128 { 2129 struct igc_adapter *adapter = iflib_get_softc(ctx); 2130 if_softc_ctx_t scctx = adapter->shared; 2131 if_t ifp = iflib_get_ifp(ctx); 2132 struct igc_hw *hw = &adapter->hw; 2133 struct igc_rx_queue *que; 2134 int i; 2135 u32 psize, rctl, rxcsum, srrctl = 0; 2136 2137 INIT_DEBUGOUT("igc_initialize_receive_units: begin"); 2138 2139 /* 2140 * Make sure receives are disabled while setting 2141 * up the descriptor ring 2142 */ 2143 rctl = IGC_READ_REG(hw, IGC_RCTL); 2144 IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN); 2145 2146 /* Setup the Receive Control Register */ 2147 rctl &= ~(3 << IGC_RCTL_MO_SHIFT); 2148 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | 2149 IGC_RCTL_LBM_NO | IGC_RCTL_RDMTS_HALF | 2150 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); 2151 2152 /* Do not store bad packets */ 2153 rctl &= ~IGC_RCTL_SBP; 2154 2155 /* Enable Long Packet receive */ 2156 if (if_getmtu(ifp) > ETHERMTU) 2157 rctl |= IGC_RCTL_LPE; 2158 else 2159 rctl &= ~IGC_RCTL_LPE; 2160 2161 /* Strip the CRC */ 2162 if (!igc_disable_crc_stripping) 2163 rctl |= IGC_RCTL_SECRC; 2164 2165 rxcsum = IGC_READ_REG(hw, IGC_RXCSUM); 2166 if (if_getcapenable(ifp) & IFCAP_RXCSUM) { 2167 rxcsum |= IGC_RXCSUM_CRCOFL; 2168 if (adapter->tx_num_queues > 1) 2169 rxcsum |= IGC_RXCSUM_PCSD; 2170 else 2171 rxcsum |= IGC_RXCSUM_IPPCSE; 2172 } else { 2173 if (adapter->tx_num_queues > 1) 2174 rxcsum |= IGC_RXCSUM_PCSD; 2175 else 2176 rxcsum &= ~IGC_RXCSUM_TUOFL; 2177 } 2178 IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum); 2179 2180 if (adapter->rx_num_queues > 1) 2181 igc_initialize_rss_mapping(adapter); 2182 2183 if (if_getmtu(ifp) > ETHERMTU) { 2184 psize = scctx->isc_max_frame_size; 2185 /* are we on a vlan? */ 2186 if (if_vlantrunkinuse(ifp)) 2187 psize += VLAN_TAG_SIZE; 2188 IGC_WRITE_REG(&adapter->hw, IGC_RLPML, psize); 2189 } 2190 2191 /* Set maximum packet buffer len */ 2192 srrctl |= (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 2193 IGC_SRRCTL_BSIZEPKT_SHIFT; 2194 /* srrctl above overrides this but set the register to a sane value */ 2195 rctl |= IGC_RCTL_SZ_2048; 2196 2197 /* 2198 * If TX flow control is disabled and there's >1 queue defined, 2199 * enable DROP. 2200 * 2201 * This drops frames rather than hanging the RX MAC for all queues. 2202 */ 2203 if ((adapter->rx_num_queues > 1) && 2204 (adapter->fc == igc_fc_none || 2205 adapter->fc == igc_fc_rx_pause)) { 2206 srrctl |= IGC_SRRCTL_DROP_EN; 2207 } 2208 2209 /* Setup the Base and Length of the Rx Descriptor Rings */ 2210 for (i = 0, que = adapter->rx_queues; i < adapter->rx_num_queues; i++, que++) { 2211 struct rx_ring *rxr = &que->rxr; 2212 u64 bus_addr = rxr->rx_paddr; 2213 u32 rxdctl; 2214 2215 #ifdef notyet 2216 /* Configure for header split? -- ignore for now */ 2217 rxr->hdr_split = igc_header_split; 2218 #else 2219 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 2220 #endif 2221 2222 IGC_WRITE_REG(hw, IGC_RDLEN(i), 2223 scctx->isc_nrxd[0] * sizeof(struct igc_rx_desc)); 2224 IGC_WRITE_REG(hw, IGC_RDBAH(i), 2225 (uint32_t)(bus_addr >> 32)); 2226 IGC_WRITE_REG(hw, IGC_RDBAL(i), 2227 (uint32_t)bus_addr); 2228 IGC_WRITE_REG(hw, IGC_SRRCTL(i), srrctl); 2229 /* Setup the Head and Tail Descriptor Pointers */ 2230 IGC_WRITE_REG(hw, IGC_RDH(i), 0); 2231 IGC_WRITE_REG(hw, IGC_RDT(i), 0); 2232 /* Enable this Queue */ 2233 rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(i)); 2234 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; 2235 rxdctl &= 0xFFF00000; 2236 rxdctl |= IGC_RX_PTHRESH; 2237 rxdctl |= IGC_RX_HTHRESH << 8; 2238 rxdctl |= IGC_RX_WTHRESH << 16; 2239 IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl); 2240 } 2241 2242 /* Make sure VLAN Filters are off */ 2243 rctl &= ~IGC_RCTL_VFE; 2244 2245 /* Write out the settings */ 2246 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 2247 2248 return; 2249 } 2250 2251 static void 2252 igc_setup_vlan_hw_support(if_ctx_t ctx) 2253 { 2254 struct igc_adapter *adapter = iflib_get_softc(ctx); 2255 struct igc_hw *hw = &adapter->hw; 2256 struct ifnet *ifp = iflib_get_ifp(ctx); 2257 u32 reg; 2258 2259 /* igc hardware doesn't seem to implement VFTA for HWFILTER */ 2260 2261 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING && 2262 !igc_disable_crc_stripping) { 2263 reg = IGC_READ_REG(hw, IGC_CTRL); 2264 reg |= IGC_CTRL_VME; 2265 IGC_WRITE_REG(hw, IGC_CTRL, reg); 2266 } else { 2267 reg = IGC_READ_REG(hw, IGC_CTRL); 2268 reg &= ~IGC_CTRL_VME; 2269 IGC_WRITE_REG(hw, IGC_CTRL, reg); 2270 } 2271 } 2272 2273 static void 2274 igc_if_intr_enable(if_ctx_t ctx) 2275 { 2276 struct igc_adapter *adapter = iflib_get_softc(ctx); 2277 struct igc_hw *hw = &adapter->hw; 2278 u32 mask; 2279 2280 if (__predict_true(adapter->intr_type == IFLIB_INTR_MSIX)) { 2281 mask = (adapter->que_mask | adapter->link_mask); 2282 IGC_WRITE_REG(hw, IGC_EIAC, mask); 2283 IGC_WRITE_REG(hw, IGC_EIAM, mask); 2284 IGC_WRITE_REG(hw, IGC_EIMS, mask); 2285 IGC_WRITE_REG(hw, IGC_IMS, IGC_IMS_LSC); 2286 } else 2287 IGC_WRITE_REG(hw, IGC_IMS, IMS_ENABLE_MASK); 2288 IGC_WRITE_FLUSH(hw); 2289 } 2290 2291 static void 2292 igc_if_intr_disable(if_ctx_t ctx) 2293 { 2294 struct igc_adapter *adapter = iflib_get_softc(ctx); 2295 struct igc_hw *hw = &adapter->hw; 2296 2297 if (__predict_true(adapter->intr_type == IFLIB_INTR_MSIX)) { 2298 IGC_WRITE_REG(hw, IGC_EIMC, 0xffffffff); 2299 IGC_WRITE_REG(hw, IGC_EIAC, 0); 2300 } 2301 IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff); 2302 IGC_WRITE_FLUSH(hw); 2303 } 2304 2305 /* 2306 * igc_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit. 2307 * For ASF and Pass Through versions of f/w this means 2308 * that the driver is loaded. For AMT version type f/w 2309 * this means that the network i/f is open. 2310 */ 2311 static void 2312 igc_get_hw_control(struct igc_adapter *adapter) 2313 { 2314 u32 ctrl_ext; 2315 2316 if (adapter->vf_ifp) 2317 return; 2318 2319 ctrl_ext = IGC_READ_REG(&adapter->hw, IGC_CTRL_EXT); 2320 IGC_WRITE_REG(&adapter->hw, IGC_CTRL_EXT, 2321 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 2322 } 2323 2324 /* 2325 * igc_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 2326 * For ASF and Pass Through versions of f/w this means that 2327 * the driver is no longer loaded. For AMT versions of the 2328 * f/w this means that the network i/f is closed. 2329 */ 2330 static void 2331 igc_release_hw_control(struct igc_adapter *adapter) 2332 { 2333 u32 ctrl_ext; 2334 2335 ctrl_ext = IGC_READ_REG(&adapter->hw, IGC_CTRL_EXT); 2336 IGC_WRITE_REG(&adapter->hw, IGC_CTRL_EXT, 2337 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 2338 return; 2339 } 2340 2341 static int 2342 igc_is_valid_ether_addr(u8 *addr) 2343 { 2344 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; 2345 2346 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { 2347 return (false); 2348 } 2349 2350 return (true); 2351 } 2352 2353 /* 2354 ** Parse the interface capabilities with regard 2355 ** to both system management and wake-on-lan for 2356 ** later use. 2357 */ 2358 static void 2359 igc_get_wakeup(if_ctx_t ctx) 2360 { 2361 struct igc_adapter *adapter = iflib_get_softc(ctx); 2362 u16 eeprom_data = 0, apme_mask; 2363 2364 apme_mask = IGC_WUC_APME; 2365 eeprom_data = IGC_READ_REG(&adapter->hw, IGC_WUC); 2366 2367 if (eeprom_data & apme_mask) 2368 adapter->wol = IGC_WUFC_LNKC; 2369 } 2370 2371 2372 /* 2373 * Enable PCI Wake On Lan capability 2374 */ 2375 static void 2376 igc_enable_wakeup(if_ctx_t ctx) 2377 { 2378 struct igc_adapter *adapter = iflib_get_softc(ctx); 2379 device_t dev = iflib_get_dev(ctx); 2380 if_t ifp = iflib_get_ifp(ctx); 2381 int error = 0; 2382 u32 pmc, ctrl, rctl; 2383 u16 status; 2384 2385 if (pci_find_cap(dev, PCIY_PMG, &pmc) != 0) 2386 return; 2387 2388 /* 2389 * Determine type of Wakeup: note that wol 2390 * is set with all bits on by default. 2391 */ 2392 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) == 0) 2393 adapter->wol &= ~IGC_WUFC_MAG; 2394 2395 if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) == 0) 2396 adapter->wol &= ~IGC_WUFC_EX; 2397 2398 if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) == 0) 2399 adapter->wol &= ~IGC_WUFC_MC; 2400 else { 2401 rctl = IGC_READ_REG(&adapter->hw, IGC_RCTL); 2402 rctl |= IGC_RCTL_MPE; 2403 IGC_WRITE_REG(&adapter->hw, IGC_RCTL, rctl); 2404 } 2405 2406 if (!(adapter->wol & (IGC_WUFC_EX | IGC_WUFC_MAG | IGC_WUFC_MC))) 2407 goto pme; 2408 2409 /* Advertise the wakeup capability */ 2410 ctrl = IGC_READ_REG(&adapter->hw, IGC_CTRL); 2411 ctrl |= IGC_CTRL_ADVD3WUC; 2412 IGC_WRITE_REG(&adapter->hw, IGC_CTRL, ctrl); 2413 2414 /* Enable wakeup by the MAC */ 2415 IGC_WRITE_REG(&adapter->hw, IGC_WUC, IGC_WUC_PME_EN); 2416 IGC_WRITE_REG(&adapter->hw, IGC_WUFC, adapter->wol); 2417 2418 pme: 2419 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2); 2420 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2421 if (!error && (if_getcapenable(ifp) & IFCAP_WOL)) 2422 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2423 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2); 2424 2425 return; 2426 } 2427 2428 /********************************************************************** 2429 * 2430 * Update the board statistics counters. 2431 * 2432 **********************************************************************/ 2433 static void 2434 igc_update_stats_counters(struct igc_adapter *adapter) 2435 { 2436 u64 prev_xoffrxc = adapter->stats.xoffrxc; 2437 2438 adapter->stats.crcerrs += IGC_READ_REG(&adapter->hw, IGC_CRCERRS); 2439 adapter->stats.mpc += IGC_READ_REG(&adapter->hw, IGC_MPC); 2440 adapter->stats.scc += IGC_READ_REG(&adapter->hw, IGC_SCC); 2441 adapter->stats.ecol += IGC_READ_REG(&adapter->hw, IGC_ECOL); 2442 2443 adapter->stats.mcc += IGC_READ_REG(&adapter->hw, IGC_MCC); 2444 adapter->stats.latecol += IGC_READ_REG(&adapter->hw, IGC_LATECOL); 2445 adapter->stats.colc += IGC_READ_REG(&adapter->hw, IGC_COLC); 2446 adapter->stats.colc += IGC_READ_REG(&adapter->hw, IGC_RERC); 2447 adapter->stats.dc += IGC_READ_REG(&adapter->hw, IGC_DC); 2448 adapter->stats.rlec += IGC_READ_REG(&adapter->hw, IGC_RLEC); 2449 adapter->stats.xonrxc += IGC_READ_REG(&adapter->hw, IGC_XONRXC); 2450 adapter->stats.xontxc += IGC_READ_REG(&adapter->hw, IGC_XONTXC); 2451 adapter->stats.xoffrxc += IGC_READ_REG(&adapter->hw, IGC_XOFFRXC); 2452 /* 2453 * For watchdog management we need to know if we have been 2454 * paused during the last interval, so capture that here. 2455 */ 2456 if (adapter->stats.xoffrxc != prev_xoffrxc) 2457 adapter->shared->isc_pause_frames = 1; 2458 adapter->stats.xofftxc += IGC_READ_REG(&adapter->hw, IGC_XOFFTXC); 2459 adapter->stats.fcruc += IGC_READ_REG(&adapter->hw, IGC_FCRUC); 2460 adapter->stats.prc64 += IGC_READ_REG(&adapter->hw, IGC_PRC64); 2461 adapter->stats.prc127 += IGC_READ_REG(&adapter->hw, IGC_PRC127); 2462 adapter->stats.prc255 += IGC_READ_REG(&adapter->hw, IGC_PRC255); 2463 adapter->stats.prc511 += IGC_READ_REG(&adapter->hw, IGC_PRC511); 2464 adapter->stats.prc1023 += IGC_READ_REG(&adapter->hw, IGC_PRC1023); 2465 adapter->stats.prc1522 += IGC_READ_REG(&adapter->hw, IGC_PRC1522); 2466 adapter->stats.tlpic += IGC_READ_REG(&adapter->hw, IGC_TLPIC); 2467 adapter->stats.rlpic += IGC_READ_REG(&adapter->hw, IGC_RLPIC); 2468 adapter->stats.gprc += IGC_READ_REG(&adapter->hw, IGC_GPRC); 2469 adapter->stats.bprc += IGC_READ_REG(&adapter->hw, IGC_BPRC); 2470 adapter->stats.mprc += IGC_READ_REG(&adapter->hw, IGC_MPRC); 2471 adapter->stats.gptc += IGC_READ_REG(&adapter->hw, IGC_GPTC); 2472 2473 /* For the 64-bit byte counters the low dword must be read first. */ 2474 /* Both registers clear on the read of the high dword */ 2475 2476 adapter->stats.gorc += IGC_READ_REG(&adapter->hw, IGC_GORCL) + 2477 ((u64)IGC_READ_REG(&adapter->hw, IGC_GORCH) << 32); 2478 adapter->stats.gotc += IGC_READ_REG(&adapter->hw, IGC_GOTCL) + 2479 ((u64)IGC_READ_REG(&adapter->hw, IGC_GOTCH) << 32); 2480 2481 adapter->stats.rnbc += IGC_READ_REG(&adapter->hw, IGC_RNBC); 2482 adapter->stats.ruc += IGC_READ_REG(&adapter->hw, IGC_RUC); 2483 adapter->stats.rfc += IGC_READ_REG(&adapter->hw, IGC_RFC); 2484 adapter->stats.roc += IGC_READ_REG(&adapter->hw, IGC_ROC); 2485 adapter->stats.rjc += IGC_READ_REG(&adapter->hw, IGC_RJC); 2486 2487 adapter->stats.tor += IGC_READ_REG(&adapter->hw, IGC_TORH); 2488 adapter->stats.tot += IGC_READ_REG(&adapter->hw, IGC_TOTH); 2489 2490 adapter->stats.tpr += IGC_READ_REG(&adapter->hw, IGC_TPR); 2491 adapter->stats.tpt += IGC_READ_REG(&adapter->hw, IGC_TPT); 2492 adapter->stats.ptc64 += IGC_READ_REG(&adapter->hw, IGC_PTC64); 2493 adapter->stats.ptc127 += IGC_READ_REG(&adapter->hw, IGC_PTC127); 2494 adapter->stats.ptc255 += IGC_READ_REG(&adapter->hw, IGC_PTC255); 2495 adapter->stats.ptc511 += IGC_READ_REG(&adapter->hw, IGC_PTC511); 2496 adapter->stats.ptc1023 += IGC_READ_REG(&adapter->hw, IGC_PTC1023); 2497 adapter->stats.ptc1522 += IGC_READ_REG(&adapter->hw, IGC_PTC1522); 2498 adapter->stats.mptc += IGC_READ_REG(&adapter->hw, IGC_MPTC); 2499 adapter->stats.bptc += IGC_READ_REG(&adapter->hw, IGC_BPTC); 2500 2501 /* Interrupt Counts */ 2502 adapter->stats.iac += IGC_READ_REG(&adapter->hw, IGC_IAC); 2503 adapter->stats.rxdmtc += IGC_READ_REG(&adapter->hw, IGC_RXDMTC); 2504 2505 adapter->stats.algnerrc += IGC_READ_REG(&adapter->hw, IGC_ALGNERRC); 2506 adapter->stats.tncrs += IGC_READ_REG(&adapter->hw, IGC_TNCRS); 2507 adapter->stats.htdpmc += IGC_READ_REG(&adapter->hw, IGC_HTDPMC); 2508 adapter->stats.tsctc += IGC_READ_REG(&adapter->hw, IGC_TSCTC); 2509 } 2510 2511 static uint64_t 2512 igc_if_get_counter(if_ctx_t ctx, ift_counter cnt) 2513 { 2514 struct igc_adapter *adapter = iflib_get_softc(ctx); 2515 if_t ifp = iflib_get_ifp(ctx); 2516 2517 switch (cnt) { 2518 case IFCOUNTER_COLLISIONS: 2519 return (adapter->stats.colc); 2520 case IFCOUNTER_IERRORS: 2521 return (adapter->dropped_pkts + adapter->stats.rxerrc + 2522 adapter->stats.crcerrs + adapter->stats.algnerrc + 2523 adapter->stats.ruc + adapter->stats.roc + 2524 adapter->stats.mpc + adapter->stats.htdpmc); 2525 case IFCOUNTER_OERRORS: 2526 return (adapter->stats.ecol + adapter->stats.latecol + 2527 adapter->watchdog_events); 2528 default: 2529 return (if_get_counter_default(ifp, cnt)); 2530 } 2531 } 2532 2533 /* igc_if_needs_restart - Tell iflib when the driver needs to be reinitialized 2534 * @ctx: iflib context 2535 * @event: event code to check 2536 * 2537 * Defaults to returning false for unknown events. 2538 * 2539 * @returns true if iflib needs to reinit the interface 2540 */ 2541 static bool 2542 igc_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 2543 { 2544 switch (event) { 2545 case IFLIB_RESTART_VLAN_CONFIG: 2546 default: 2547 return (false); 2548 } 2549 } 2550 2551 /* Export a single 32-bit register via a read-only sysctl. */ 2552 static int 2553 igc_sysctl_reg_handler(SYSCTL_HANDLER_ARGS) 2554 { 2555 struct igc_adapter *adapter; 2556 u_int val; 2557 2558 adapter = oidp->oid_arg1; 2559 val = IGC_READ_REG(&adapter->hw, oidp->oid_arg2); 2560 return (sysctl_handle_int(oidp, &val, 0, req)); 2561 } 2562 2563 /* Per queue holdoff interrupt rate handler */ 2564 static int 2565 igc_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 2566 { 2567 struct igc_rx_queue *rque; 2568 struct igc_tx_queue *tque; 2569 struct igc_hw *hw; 2570 int error; 2571 u32 reg, usec, rate; 2572 2573 bool tx = oidp->oid_arg2; 2574 2575 if (tx) { 2576 tque = oidp->oid_arg1; 2577 hw = &tque->adapter->hw; 2578 reg = IGC_READ_REG(hw, IGC_EITR(tque->me)); 2579 } else { 2580 rque = oidp->oid_arg1; 2581 hw = &rque->adapter->hw; 2582 reg = IGC_READ_REG(hw, IGC_EITR(rque->msix)); 2583 } 2584 2585 usec = (reg & IGC_QVECTOR_MASK); 2586 if (usec > 0) 2587 rate = IGC_INTS_TO_EITR(usec); 2588 else 2589 rate = 0; 2590 2591 error = sysctl_handle_int(oidp, &rate, 0, req); 2592 if (error || !req->newptr) 2593 return error; 2594 return 0; 2595 } 2596 2597 /* 2598 * Add sysctl variables, one per statistic, to the system. 2599 */ 2600 static void 2601 igc_add_hw_stats(struct igc_adapter *adapter) 2602 { 2603 device_t dev = iflib_get_dev(adapter->ctx); 2604 struct igc_tx_queue *tx_que = adapter->tx_queues; 2605 struct igc_rx_queue *rx_que = adapter->rx_queues; 2606 2607 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2608 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 2609 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 2610 struct igc_hw_stats *stats = &adapter->stats; 2611 2612 struct sysctl_oid *stat_node, *queue_node, *int_node; 2613 struct sysctl_oid_list *stat_list, *queue_list, *int_list; 2614 2615 #define QUEUE_NAME_LEN 32 2616 char namebuf[QUEUE_NAME_LEN]; 2617 2618 /* Driver Statistics */ 2619 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 2620 CTLFLAG_RD, &adapter->dropped_pkts, 2621 "Driver dropped packets"); 2622 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 2623 CTLFLAG_RD, &adapter->link_irq, 2624 "Link MSI-X IRQ Handled"); 2625 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns", 2626 CTLFLAG_RD, &adapter->rx_overruns, 2627 "RX overruns"); 2628 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts", 2629 CTLFLAG_RD, &adapter->watchdog_events, 2630 "Watchdog timeouts"); 2631 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control", 2632 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2633 adapter, IGC_CTRL, igc_sysctl_reg_handler, "IU", 2634 "Device Control Register"); 2635 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control", 2636 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2637 adapter, IGC_RCTL, igc_sysctl_reg_handler, "IU", 2638 "Receiver Control Register"); 2639 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", 2640 CTLFLAG_RD, &adapter->hw.fc.high_water, 0, 2641 "Flow Control High Watermark"); 2642 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", 2643 CTLFLAG_RD, &adapter->hw.fc.low_water, 0, 2644 "Flow Control Low Watermark"); 2645 2646 for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) { 2647 struct tx_ring *txr = &tx_que->txr; 2648 snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i); 2649 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 2650 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Name"); 2651 queue_list = SYSCTL_CHILDREN(queue_node); 2652 2653 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 2654 CTLTYPE_UINT | CTLFLAG_RD, tx_que, 2655 true, igc_sysctl_interrupt_rate_handler, "IU", 2656 "Interrupt Rate"); 2657 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 2658 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 2659 IGC_TDH(txr->me), igc_sysctl_reg_handler, "IU", 2660 "Transmit Descriptor Head"); 2661 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 2662 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 2663 IGC_TDT(txr->me), igc_sysctl_reg_handler, "IU", 2664 "Transmit Descriptor Tail"); 2665 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq", 2666 CTLFLAG_RD, &txr->tx_irq, 2667 "Queue MSI-X Transmit Interrupts"); 2668 } 2669 2670 for (int j = 0; j < adapter->rx_num_queues; j++, rx_que++) { 2671 struct rx_ring *rxr = &rx_que->rxr; 2672 snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", j); 2673 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 2674 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Name"); 2675 queue_list = SYSCTL_CHILDREN(queue_node); 2676 2677 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 2678 CTLTYPE_UINT | CTLFLAG_RD, rx_que, 2679 false, igc_sysctl_interrupt_rate_handler, "IU", 2680 "Interrupt Rate"); 2681 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 2682 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 2683 IGC_RDH(rxr->me), igc_sysctl_reg_handler, "IU", 2684 "Receive Descriptor Head"); 2685 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 2686 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 2687 IGC_RDT(rxr->me), igc_sysctl_reg_handler, "IU", 2688 "Receive Descriptor Tail"); 2689 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq", 2690 CTLFLAG_RD, &rxr->rx_irq, 2691 "Queue MSI-X Receive Interrupts"); 2692 } 2693 2694 /* MAC stats get their own sub node */ 2695 2696 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 2697 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Statistics"); 2698 stat_list = SYSCTL_CHILDREN(stat_node); 2699 2700 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll", 2701 CTLFLAG_RD, &stats->ecol, 2702 "Excessive collisions"); 2703 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll", 2704 CTLFLAG_RD, &stats->scc, 2705 "Single collisions"); 2706 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll", 2707 CTLFLAG_RD, &stats->mcc, 2708 "Multiple collisions"); 2709 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll", 2710 CTLFLAG_RD, &stats->latecol, 2711 "Late collisions"); 2712 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count", 2713 CTLFLAG_RD, &stats->colc, 2714 "Collision Count"); 2715 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors", 2716 CTLFLAG_RD, &adapter->stats.symerrs, 2717 "Symbol Errors"); 2718 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors", 2719 CTLFLAG_RD, &adapter->stats.sec, 2720 "Sequence Errors"); 2721 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count", 2722 CTLFLAG_RD, &adapter->stats.dc, 2723 "Defer Count"); 2724 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets", 2725 CTLFLAG_RD, &adapter->stats.mpc, 2726 "Missed Packets"); 2727 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff", 2728 CTLFLAG_RD, &adapter->stats.rnbc, 2729 "Receive No Buffers"); 2730 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize", 2731 CTLFLAG_RD, &adapter->stats.ruc, 2732 "Receive Undersize"); 2733 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 2734 CTLFLAG_RD, &adapter->stats.rfc, 2735 "Fragmented Packets Received "); 2736 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize", 2737 CTLFLAG_RD, &adapter->stats.roc, 2738 "Oversized Packets Received"); 2739 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber", 2740 CTLFLAG_RD, &adapter->stats.rjc, 2741 "Recevied Jabber"); 2742 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs", 2743 CTLFLAG_RD, &adapter->stats.rxerrc, 2744 "Receive Errors"); 2745 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 2746 CTLFLAG_RD, &adapter->stats.crcerrs, 2747 "CRC errors"); 2748 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs", 2749 CTLFLAG_RD, &adapter->stats.algnerrc, 2750 "Alignment Errors"); 2751 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 2752 CTLFLAG_RD, &adapter->stats.xonrxc, 2753 "XON Received"); 2754 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 2755 CTLFLAG_RD, &adapter->stats.xontxc, 2756 "XON Transmitted"); 2757 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 2758 CTLFLAG_RD, &adapter->stats.xoffrxc, 2759 "XOFF Received"); 2760 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 2761 CTLFLAG_RD, &adapter->stats.xofftxc, 2762 "XOFF Transmitted"); 2763 2764 /* Packet Reception Stats */ 2765 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd", 2766 CTLFLAG_RD, &adapter->stats.tpr, 2767 "Total Packets Received "); 2768 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", 2769 CTLFLAG_RD, &adapter->stats.gprc, 2770 "Good Packets Received"); 2771 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd", 2772 CTLFLAG_RD, &adapter->stats.bprc, 2773 "Broadcast Packets Received"); 2774 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", 2775 CTLFLAG_RD, &adapter->stats.mprc, 2776 "Multicast Packets Received"); 2777 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 2778 CTLFLAG_RD, &adapter->stats.prc64, 2779 "64 byte frames received "); 2780 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 2781 CTLFLAG_RD, &adapter->stats.prc127, 2782 "65-127 byte frames received"); 2783 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 2784 CTLFLAG_RD, &adapter->stats.prc255, 2785 "128-255 byte frames received"); 2786 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 2787 CTLFLAG_RD, &adapter->stats.prc511, 2788 "256-511 byte frames received"); 2789 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 2790 CTLFLAG_RD, &adapter->stats.prc1023, 2791 "512-1023 byte frames received"); 2792 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 2793 CTLFLAG_RD, &adapter->stats.prc1522, 2794 "1023-1522 byte frames received"); 2795 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", 2796 CTLFLAG_RD, &adapter->stats.gorc, 2797 "Good Octets Received"); 2798 2799 /* Packet Transmission Stats */ 2800 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 2801 CTLFLAG_RD, &adapter->stats.gotc, 2802 "Good Octets Transmitted"); 2803 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 2804 CTLFLAG_RD, &adapter->stats.tpt, 2805 "Total Packets Transmitted"); 2806 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 2807 CTLFLAG_RD, &adapter->stats.gptc, 2808 "Good Packets Transmitted"); 2809 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 2810 CTLFLAG_RD, &adapter->stats.bptc, 2811 "Broadcast Packets Transmitted"); 2812 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 2813 CTLFLAG_RD, &adapter->stats.mptc, 2814 "Multicast Packets Transmitted"); 2815 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 2816 CTLFLAG_RD, &adapter->stats.ptc64, 2817 "64 byte frames transmitted "); 2818 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 2819 CTLFLAG_RD, &adapter->stats.ptc127, 2820 "65-127 byte frames transmitted"); 2821 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 2822 CTLFLAG_RD, &adapter->stats.ptc255, 2823 "128-255 byte frames transmitted"); 2824 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 2825 CTLFLAG_RD, &adapter->stats.ptc511, 2826 "256-511 byte frames transmitted"); 2827 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 2828 CTLFLAG_RD, &adapter->stats.ptc1023, 2829 "512-1023 byte frames transmitted"); 2830 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 2831 CTLFLAG_RD, &adapter->stats.ptc1522, 2832 "1024-1522 byte frames transmitted"); 2833 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd", 2834 CTLFLAG_RD, &adapter->stats.tsctc, 2835 "TSO Contexts Transmitted"); 2836 2837 /* Interrupt Stats */ 2838 2839 int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", 2840 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Interrupt Statistics"); 2841 int_list = SYSCTL_CHILDREN(int_node); 2842 2843 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts", 2844 CTLFLAG_RD, &adapter->stats.iac, 2845 "Interrupt Assertion Count"); 2846 2847 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh", 2848 CTLFLAG_RD, &adapter->stats.rxdmtc, 2849 "Rx Desc Min Thresh Count"); 2850 } 2851 2852 static void 2853 igc_fw_version(struct igc_adapter *sc) 2854 { 2855 struct igc_hw *hw = &sc->hw; 2856 struct igc_fw_version *fw_ver = &sc->fw_ver; 2857 2858 *fw_ver = (struct igc_fw_version){0}; 2859 2860 igc_get_fw_version(hw, fw_ver); 2861 } 2862 2863 static void 2864 igc_sbuf_fw_version(struct igc_fw_version *fw_ver, struct sbuf *buf) 2865 { 2866 const char *space = ""; 2867 2868 if (fw_ver->eep_major || fw_ver->eep_minor || fw_ver->eep_build) { 2869 sbuf_printf(buf, "EEPROM V%d.%d-%d", fw_ver->eep_major, 2870 fw_ver->eep_minor, fw_ver->eep_build); 2871 space = " "; 2872 } 2873 2874 if (fw_ver->invm_major || fw_ver->invm_minor || fw_ver->invm_img_type) { 2875 sbuf_printf(buf, "%sNVM V%d.%d imgtype%d", 2876 space, fw_ver->invm_major, fw_ver->invm_minor, 2877 fw_ver->invm_img_type); 2878 space = " "; 2879 } 2880 2881 if (fw_ver->or_valid) { 2882 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d", 2883 space, fw_ver->or_major, fw_ver->or_build, 2884 fw_ver->or_patch); 2885 space = " "; 2886 } 2887 2888 if (fw_ver->etrack_id) 2889 sbuf_printf(buf, "%seTrack 0x%08x", space, fw_ver->etrack_id); 2890 } 2891 2892 static void 2893 igc_print_fw_version(struct igc_adapter *sc ) 2894 { 2895 device_t dev = sc->dev; 2896 struct sbuf *buf; 2897 int error = 0; 2898 2899 buf = sbuf_new_auto(); 2900 if (!buf) { 2901 device_printf(dev, "Could not allocate sbuf for output.\n"); 2902 return; 2903 } 2904 2905 igc_sbuf_fw_version(&sc->fw_ver, buf); 2906 2907 error = sbuf_finish(buf); 2908 if (error) 2909 device_printf(dev, "Error finishing sbuf: %d\n", error); 2910 else if (sbuf_len(buf)) 2911 device_printf(dev, "%s\n", sbuf_data(buf)); 2912 2913 sbuf_delete(buf); 2914 } 2915 2916 static int 2917 igc_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS) 2918 { 2919 struct igc_adapter *sc = (struct igc_adapter *)arg1; 2920 device_t dev = sc->dev; 2921 struct sbuf *buf; 2922 int error = 0; 2923 2924 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2925 if (!buf) { 2926 device_printf(dev, "Could not allocate sbuf for output.\n"); 2927 return (ENOMEM); 2928 } 2929 2930 igc_sbuf_fw_version(&sc->fw_ver, buf); 2931 2932 error = sbuf_finish(buf); 2933 if (error) 2934 device_printf(dev, "Error finishing sbuf: %d\n", error); 2935 2936 sbuf_delete(buf); 2937 2938 return (0); 2939 } 2940 2941 /********************************************************************** 2942 * 2943 * This routine provides a way to dump out the adapter eeprom, 2944 * often a useful debug/service tool. This only dumps the first 2945 * 32 words, stuff that matters is in that extent. 2946 * 2947 **********************************************************************/ 2948 static int 2949 igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS) 2950 { 2951 struct igc_adapter *adapter = (struct igc_adapter *)arg1; 2952 int error; 2953 int result; 2954 2955 result = -1; 2956 error = sysctl_handle_int(oidp, &result, 0, req); 2957 2958 if (error || !req->newptr) 2959 return (error); 2960 2961 /* 2962 * This value will cause a hex dump of the 2963 * first 32 16-bit words of the EEPROM to 2964 * the screen. 2965 */ 2966 if (result == 1) 2967 igc_print_nvm_info(adapter); 2968 2969 return (error); 2970 } 2971 2972 static void 2973 igc_print_nvm_info(struct igc_adapter *adapter) 2974 { 2975 u16 eeprom_data; 2976 int i, j, row = 0; 2977 2978 /* Its a bit crude, but it gets the job done */ 2979 printf("\nInterface EEPROM Dump:\n"); 2980 printf("Offset\n0x0000 "); 2981 for (i = 0, j = 0; i < 32; i++, j++) { 2982 if (j == 8) { /* Make the offset block */ 2983 j = 0; ++row; 2984 printf("\n0x00%x0 ",row); 2985 } 2986 igc_read_nvm(&adapter->hw, i, 1, &eeprom_data); 2987 printf("%04x ", eeprom_data); 2988 } 2989 printf("\n"); 2990 } 2991 2992 /* 2993 * Set flow control using sysctl: 2994 * Flow control values: 2995 * 0 - off 2996 * 1 - rx pause 2997 * 2 - tx pause 2998 * 3 - full 2999 */ 3000 static int 3001 igc_set_flowcntl(SYSCTL_HANDLER_ARGS) 3002 { 3003 int error; 3004 static int input = 3; /* default is full */ 3005 struct igc_adapter *adapter = (struct igc_adapter *) arg1; 3006 3007 error = sysctl_handle_int(oidp, &input, 0, req); 3008 3009 if ((error) || (req->newptr == NULL)) 3010 return (error); 3011 3012 if (input == adapter->fc) /* no change? */ 3013 return (error); 3014 3015 switch (input) { 3016 case igc_fc_rx_pause: 3017 case igc_fc_tx_pause: 3018 case igc_fc_full: 3019 case igc_fc_none: 3020 adapter->hw.fc.requested_mode = input; 3021 adapter->fc = input; 3022 break; 3023 default: 3024 /* Do nothing */ 3025 return (error); 3026 } 3027 3028 adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode; 3029 igc_force_mac_fc(&adapter->hw); 3030 return (error); 3031 } 3032 3033 /* 3034 * Manage Energy Efficient Ethernet: 3035 * Control values: 3036 * 0/1 - enabled/disabled 3037 */ 3038 static int 3039 igc_sysctl_eee(SYSCTL_HANDLER_ARGS) 3040 { 3041 struct igc_adapter *adapter = (struct igc_adapter *) arg1; 3042 int error, value; 3043 3044 value = adapter->hw.dev_spec._i225.eee_disable; 3045 error = sysctl_handle_int(oidp, &value, 0, req); 3046 if (error || req->newptr == NULL) 3047 return (error); 3048 3049 adapter->hw.dev_spec._i225.eee_disable = (value != 0); 3050 igc_if_init(adapter->ctx); 3051 3052 return (0); 3053 } 3054 3055 static int 3056 igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3057 { 3058 struct igc_adapter *adapter; 3059 int error; 3060 int result; 3061 3062 result = -1; 3063 error = sysctl_handle_int(oidp, &result, 0, req); 3064 3065 if (error || !req->newptr) 3066 return (error); 3067 3068 if (result == 1) { 3069 adapter = (struct igc_adapter *) arg1; 3070 igc_print_debug_info(adapter); 3071 } 3072 3073 return (error); 3074 } 3075 3076 static int 3077 igc_get_rs(SYSCTL_HANDLER_ARGS) 3078 { 3079 struct igc_adapter *adapter = (struct igc_adapter *) arg1; 3080 int error; 3081 int result; 3082 3083 result = 0; 3084 error = sysctl_handle_int(oidp, &result, 0, req); 3085 3086 if (error || !req->newptr || result != 1) 3087 return (error); 3088 igc_dump_rs(adapter); 3089 3090 return (error); 3091 } 3092 3093 static void 3094 igc_if_debug(if_ctx_t ctx) 3095 { 3096 igc_dump_rs(iflib_get_softc(ctx)); 3097 } 3098 3099 /* 3100 * This routine is meant to be fluid, add whatever is 3101 * needed for debugging a problem. -jfv 3102 */ 3103 static void 3104 igc_print_debug_info(struct igc_adapter *adapter) 3105 { 3106 device_t dev = iflib_get_dev(adapter->ctx); 3107 if_t ifp = iflib_get_ifp(adapter->ctx); 3108 struct tx_ring *txr = &adapter->tx_queues->txr; 3109 struct rx_ring *rxr = &adapter->rx_queues->rxr; 3110 3111 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 3112 printf("Interface is RUNNING "); 3113 else 3114 printf("Interface is NOT RUNNING\n"); 3115 3116 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) 3117 printf("and INACTIVE\n"); 3118 else 3119 printf("and ACTIVE\n"); 3120 3121 for (int i = 0; i < adapter->tx_num_queues; i++, txr++) { 3122 device_printf(dev, "TX Queue %d ------\n", i); 3123 device_printf(dev, "hw tdh = %d, hw tdt = %d\n", 3124 IGC_READ_REG(&adapter->hw, IGC_TDH(i)), 3125 IGC_READ_REG(&adapter->hw, IGC_TDT(i))); 3126 3127 } 3128 for (int j=0; j < adapter->rx_num_queues; j++, rxr++) { 3129 device_printf(dev, "RX Queue %d ------\n", j); 3130 device_printf(dev, "hw rdh = %d, hw rdt = %d\n", 3131 IGC_READ_REG(&adapter->hw, IGC_RDH(j)), 3132 IGC_READ_REG(&adapter->hw, IGC_RDT(j))); 3133 } 3134 } 3135