1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2001-2024, Intel Corporation
5 * Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org>
6 * Copyright (c) 2021-2024 Rubicon Communications, LLC (Netgate)
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 #include "if_igc.h"
32 #include <sys/sbuf.h>
33 #include <machine/_inttypes.h>
34
35 #ifdef RSS
36 #include <net/rss_config.h>
37 #include <netinet/in_rss.h>
38 #endif
39
40 /*********************************************************************
41 * PCI Device ID Table
42 *
43 * Used by probe to select devices to load on
44 * Last entry must be all 0s
45 *
46 * { Vendor ID, Device ID, String }
47 *********************************************************************/
48
49 static const pci_vendor_info_t igc_vendor_info_array[] =
50 {
51 /* Intel(R) PRO/1000 Network Connection - igc */
52 PVID(0x8086, IGC_DEV_ID_I225_LM, "Intel(R) Ethernet Controller I225-LM"),
53 PVID(0x8086, IGC_DEV_ID_I225_V, "Intel(R) Ethernet Controller I225-V"),
54 PVID(0x8086, IGC_DEV_ID_I225_K, "Intel(R) Ethernet Controller I225-K"),
55 PVID(0x8086, IGC_DEV_ID_I225_I, "Intel(R) Ethernet Controller I225-I"),
56 PVID(0x8086, IGC_DEV_ID_I220_V, "Intel(R) Ethernet Controller I220-V"),
57 PVID(0x8086, IGC_DEV_ID_I225_K2, "Intel(R) Ethernet Controller I225-K(2)"),
58 PVID(0x8086, IGC_DEV_ID_I225_LMVP, "Intel(R) Ethernet Controller I225-LMvP(2)"),
59 PVID(0x8086, IGC_DEV_ID_I226_K, "Intel(R) Ethernet Controller I226-K"),
60 PVID(0x8086, IGC_DEV_ID_I226_LMVP, "Intel(R) Ethernet Controller I226-LMvP"),
61 PVID(0x8086, IGC_DEV_ID_I225_IT, "Intel(R) Ethernet Controller I225-IT(2)"),
62 PVID(0x8086, IGC_DEV_ID_I226_LM, "Intel(R) Ethernet Controller I226-LM"),
63 PVID(0x8086, IGC_DEV_ID_I226_V, "Intel(R) Ethernet Controller I226-V"),
64 PVID(0x8086, IGC_DEV_ID_I226_IT, "Intel(R) Ethernet Controller I226-IT"),
65 PVID(0x8086, IGC_DEV_ID_I221_V, "Intel(R) Ethernet Controller I221-V"),
66 PVID(0x8086, IGC_DEV_ID_I226_BLANK_NVM, "Intel(R) Ethernet Controller I226(blankNVM)"),
67 PVID(0x8086, IGC_DEV_ID_I225_BLANK_NVM, "Intel(R) Ethernet Controller I225(blankNVM)"),
68 /* required last entry */
69 PVID_END
70 };
71
72 /*********************************************************************
73 * Function prototypes
74 *********************************************************************/
75 static void *igc_register(device_t);
76 static int igc_if_attach_pre(if_ctx_t);
77 static int igc_if_attach_post(if_ctx_t);
78 static int igc_if_detach(if_ctx_t);
79 static int igc_if_shutdown(if_ctx_t);
80 static int igc_if_suspend(if_ctx_t);
81 static int igc_if_resume(if_ctx_t);
82
83 static int igc_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
84 static int igc_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
85 static void igc_if_queues_free(if_ctx_t);
86
87 static uint64_t igc_if_get_counter(if_ctx_t, ift_counter);
88 static void igc_if_init(if_ctx_t);
89 static void igc_if_stop(if_ctx_t);
90 static void igc_if_media_status(if_ctx_t, struct ifmediareq *);
91 static int igc_if_media_change(if_ctx_t);
92 static int igc_if_mtu_set(if_ctx_t, uint32_t);
93 static void igc_if_timer(if_ctx_t, uint16_t);
94 static void igc_if_watchdog_reset(if_ctx_t);
95 static bool igc_if_needs_restart(if_ctx_t, enum iflib_restart_event);
96
97 static void igc_identify_hardware(if_ctx_t);
98 static int igc_allocate_pci_resources(if_ctx_t);
99 static void igc_free_pci_resources(if_ctx_t);
100 static void igc_reset(if_ctx_t);
101 static int igc_setup_interface(if_ctx_t);
102 static int igc_setup_msix(if_ctx_t);
103
104 static void igc_initialize_transmit_unit(if_ctx_t);
105 static void igc_initialize_receive_unit(if_ctx_t);
106
107 static void igc_if_intr_enable(if_ctx_t);
108 static void igc_if_intr_disable(if_ctx_t);
109 static int igc_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
110 static int igc_if_tx_queue_intr_enable(if_ctx_t, uint16_t);
111 static void igc_if_multi_set(if_ctx_t);
112 static void igc_if_update_admin_status(if_ctx_t);
113 static void igc_if_debug(if_ctx_t);
114 static void igc_update_stats_counters(struct igc_softc *);
115 static void igc_add_hw_stats(struct igc_softc *);
116 static int igc_if_set_promisc(if_ctx_t, int);
117 static void igc_setup_vlan_hw_support(if_ctx_t);
118 static void igc_fw_version(struct igc_softc *);
119 static void igc_sbuf_fw_version(struct igc_fw_version *, struct sbuf *);
120 static void igc_print_fw_version(struct igc_softc *);
121 static int igc_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS);
122 static int igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
123 static void igc_print_nvm_info(struct igc_softc *);
124 static int igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
125 static int igc_get_rs(SYSCTL_HANDLER_ARGS);
126 static void igc_print_debug_info(struct igc_softc *);
127 static int igc_is_valid_ether_addr(u8 *);
128 static void igc_neweitr(struct igc_softc *, struct igc_rx_queue *,
129 struct tx_ring *, struct rx_ring *);
130 /* Management and WOL Support */
131 static void igc_get_hw_control(struct igc_softc *);
132 static void igc_release_hw_control(struct igc_softc *);
133 static void igc_get_wakeup(if_ctx_t);
134 static void igc_enable_wakeup(if_ctx_t);
135
136 int igc_intr(void *);
137
138 /* MSI-X handlers */
139 static int igc_if_msix_intr_assign(if_ctx_t, int);
140 static int igc_msix_link(void *);
141 static void igc_handle_link(void *context);
142
143 static int igc_set_flowcntl(SYSCTL_HANDLER_ARGS);
144 static int igc_sysctl_dmac(SYSCTL_HANDLER_ARGS);
145 static int igc_sysctl_eee(SYSCTL_HANDLER_ARGS);
146
147 static int igc_get_regs(SYSCTL_HANDLER_ARGS);
148
149 static void igc_configure_queues(struct igc_softc *);
150
151
152 /*********************************************************************
153 * FreeBSD Device Interface Entry Points
154 *********************************************************************/
155 static device_method_t igc_methods[] = {
156 /* Device interface */
157 DEVMETHOD(device_register, igc_register),
158 DEVMETHOD(device_probe, iflib_device_probe),
159 DEVMETHOD(device_attach, iflib_device_attach),
160 DEVMETHOD(device_detach, iflib_device_detach),
161 DEVMETHOD(device_shutdown, iflib_device_shutdown),
162 DEVMETHOD(device_suspend, iflib_device_suspend),
163 DEVMETHOD(device_resume, iflib_device_resume),
164 DEVMETHOD_END
165 };
166
167 static driver_t igc_driver = {
168 "igc", igc_methods, sizeof(struct igc_softc),
169 };
170
171 DRIVER_MODULE(igc, pci, igc_driver, 0, 0);
172
173 MODULE_DEPEND(igc, pci, 1, 1, 1);
174 MODULE_DEPEND(igc, ether, 1, 1, 1);
175 MODULE_DEPEND(igc, iflib, 1, 1, 1);
176
177 IFLIB_PNP_INFO(pci, igc, igc_vendor_info_array);
178
179 static device_method_t igc_if_methods[] = {
180 DEVMETHOD(ifdi_attach_pre, igc_if_attach_pre),
181 DEVMETHOD(ifdi_attach_post, igc_if_attach_post),
182 DEVMETHOD(ifdi_detach, igc_if_detach),
183 DEVMETHOD(ifdi_shutdown, igc_if_shutdown),
184 DEVMETHOD(ifdi_suspend, igc_if_suspend),
185 DEVMETHOD(ifdi_resume, igc_if_resume),
186 DEVMETHOD(ifdi_init, igc_if_init),
187 DEVMETHOD(ifdi_stop, igc_if_stop),
188 DEVMETHOD(ifdi_msix_intr_assign, igc_if_msix_intr_assign),
189 DEVMETHOD(ifdi_intr_enable, igc_if_intr_enable),
190 DEVMETHOD(ifdi_intr_disable, igc_if_intr_disable),
191 DEVMETHOD(ifdi_tx_queues_alloc, igc_if_tx_queues_alloc),
192 DEVMETHOD(ifdi_rx_queues_alloc, igc_if_rx_queues_alloc),
193 DEVMETHOD(ifdi_queues_free, igc_if_queues_free),
194 DEVMETHOD(ifdi_update_admin_status, igc_if_update_admin_status),
195 DEVMETHOD(ifdi_multi_set, igc_if_multi_set),
196 DEVMETHOD(ifdi_media_status, igc_if_media_status),
197 DEVMETHOD(ifdi_media_change, igc_if_media_change),
198 DEVMETHOD(ifdi_mtu_set, igc_if_mtu_set),
199 DEVMETHOD(ifdi_promisc_set, igc_if_set_promisc),
200 DEVMETHOD(ifdi_timer, igc_if_timer),
201 DEVMETHOD(ifdi_watchdog_reset, igc_if_watchdog_reset),
202 DEVMETHOD(ifdi_get_counter, igc_if_get_counter),
203 DEVMETHOD(ifdi_rx_queue_intr_enable, igc_if_rx_queue_intr_enable),
204 DEVMETHOD(ifdi_tx_queue_intr_enable, igc_if_tx_queue_intr_enable),
205 DEVMETHOD(ifdi_debug, igc_if_debug),
206 DEVMETHOD(ifdi_needs_restart, igc_if_needs_restart),
207 DEVMETHOD_END
208 };
209
210 static driver_t igc_if_driver = {
211 "igc_if", igc_if_methods, sizeof(struct igc_softc)
212 };
213
214 /*********************************************************************
215 * Tunable default values.
216 *********************************************************************/
217
218 /* Allow common code without TSO */
219 #ifndef CSUM_TSO
220 #define CSUM_TSO 0
221 #endif
222
223 static SYSCTL_NODE(_hw, OID_AUTO, igc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
224 "igc driver parameters");
225
226 static int igc_disable_crc_stripping = 0;
227 SYSCTL_INT(_hw_igc, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN,
228 &igc_disable_crc_stripping, 0, "Disable CRC Stripping");
229
230 static int igc_smart_pwr_down = false;
231 SYSCTL_INT(_hw_igc, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &igc_smart_pwr_down,
232 0, "Set to true to leave smart power down enabled on newer adapters");
233
234 /* Controls whether promiscuous also shows bad packets */
235 static int igc_debug_sbp = true;
236 SYSCTL_INT(_hw_igc, OID_AUTO, sbp, CTLFLAG_RDTUN, &igc_debug_sbp, 0,
237 "Show bad packets in promiscuous mode");
238
239 /* Energy efficient ethernet - default to OFF */
240 static int igc_eee_setting = 1;
241 SYSCTL_INT(_hw_igc, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &igc_eee_setting, 0,
242 "Enable Energy Efficient Ethernet");
243
244 /*
245 * AIM: Adaptive Interrupt Moderation
246 * which means that the interrupt rate is varied over time based on the
247 * traffic for that interrupt vector
248 */
249 static int igc_enable_aim = 1;
250 SYSCTL_INT(_hw_igc, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &igc_enable_aim,
251 0, "Enable adaptive interrupt moderation (1=normal, 2=lowlatency)");
252
253 /*
254 ** Tuneable Interrupt rate
255 */
256 static int igc_max_interrupt_rate = IGC_INTS_DEFAULT;
257 SYSCTL_INT(_hw_igc, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
258 &igc_max_interrupt_rate, 0, "Maximum interrupts per second");
259
260 extern struct if_txrx igc_txrx;
261
262 static struct if_shared_ctx igc_sctx_init = {
263 .isc_magic = IFLIB_MAGIC,
264 .isc_q_align = PAGE_SIZE,
265 .isc_tx_maxsize = IGC_TSO_SIZE + sizeof(struct ether_vlan_header),
266 .isc_tx_maxsegsize = PAGE_SIZE,
267 .isc_tso_maxsize = IGC_TSO_SIZE + sizeof(struct ether_vlan_header),
268 .isc_tso_maxsegsize = IGC_TSO_SEG_SIZE,
269 .isc_rx_maxsize = MAX_JUMBO_FRAME_SIZE,
270 .isc_rx_nsegments = 1,
271 .isc_rx_maxsegsize = MJUM9BYTES,
272 .isc_nfl = 1,
273 .isc_nrxqs = 1,
274 .isc_ntxqs = 1,
275 .isc_admin_intrcnt = 1,
276 .isc_vendor_info = igc_vendor_info_array,
277 .isc_driver_version = "1",
278 .isc_driver = &igc_if_driver,
279 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM,
280
281 .isc_nrxd_min = {IGC_MIN_RXD},
282 .isc_ntxd_min = {IGC_MIN_TXD},
283 .isc_nrxd_max = {IGC_MAX_RXD},
284 .isc_ntxd_max = {IGC_MAX_TXD},
285 .isc_nrxd_default = {IGC_DEFAULT_RXD},
286 .isc_ntxd_default = {IGC_DEFAULT_TXD},
287 };
288
289 /*****************************************************************
290 *
291 * Dump Registers
292 *
293 ****************************************************************/
294 #define IGC_REGS_LEN 739
295
igc_get_regs(SYSCTL_HANDLER_ARGS)296 static int igc_get_regs(SYSCTL_HANDLER_ARGS)
297 {
298 struct igc_softc *sc = (struct igc_softc *)arg1;
299 struct igc_hw *hw = &sc->hw;
300 struct sbuf *sb;
301 u32 *regs_buff;
302 int rc;
303
304 regs_buff = malloc(sizeof(u32) * IGC_REGS_LEN, M_DEVBUF, M_WAITOK);
305 memset(regs_buff, 0, IGC_REGS_LEN * sizeof(u32));
306
307 rc = sysctl_wire_old_buffer(req, 0);
308 MPASS(rc == 0);
309 if (rc != 0) {
310 free(regs_buff, M_DEVBUF);
311 return (rc);
312 }
313
314 sb = sbuf_new_for_sysctl(NULL, NULL, 32*400, req);
315 MPASS(sb != NULL);
316 if (sb == NULL) {
317 free(regs_buff, M_DEVBUF);
318 return (ENOMEM);
319 }
320
321 /* General Registers */
322 regs_buff[0] = IGC_READ_REG(hw, IGC_CTRL);
323 regs_buff[1] = IGC_READ_REG(hw, IGC_STATUS);
324 regs_buff[2] = IGC_READ_REG(hw, IGC_CTRL_EXT);
325 regs_buff[3] = IGC_READ_REG(hw, IGC_ICR);
326 regs_buff[4] = IGC_READ_REG(hw, IGC_RCTL);
327 regs_buff[5] = IGC_READ_REG(hw, IGC_RDLEN(0));
328 regs_buff[6] = IGC_READ_REG(hw, IGC_RDH(0));
329 regs_buff[7] = IGC_READ_REG(hw, IGC_RDT(0));
330 regs_buff[8] = IGC_READ_REG(hw, IGC_RXDCTL(0));
331 regs_buff[9] = IGC_READ_REG(hw, IGC_RDBAL(0));
332 regs_buff[10] = IGC_READ_REG(hw, IGC_RDBAH(0));
333 regs_buff[11] = IGC_READ_REG(hw, IGC_TCTL);
334 regs_buff[12] = IGC_READ_REG(hw, IGC_TDBAL(0));
335 regs_buff[13] = IGC_READ_REG(hw, IGC_TDBAH(0));
336 regs_buff[14] = IGC_READ_REG(hw, IGC_TDLEN(0));
337 regs_buff[15] = IGC_READ_REG(hw, IGC_TDH(0));
338 regs_buff[16] = IGC_READ_REG(hw, IGC_TDT(0));
339 regs_buff[17] = IGC_READ_REG(hw, IGC_TXDCTL(0));
340
341 sbuf_printf(sb, "General Registers\n");
342 sbuf_printf(sb, "\tCTRL\t %08x\n", regs_buff[0]);
343 sbuf_printf(sb, "\tSTATUS\t %08x\n", regs_buff[1]);
344 sbuf_printf(sb, "\tCTRL_EXIT\t %08x\n\n", regs_buff[2]);
345
346 sbuf_printf(sb, "Interrupt Registers\n");
347 sbuf_printf(sb, "\tICR\t %08x\n\n", regs_buff[3]);
348
349 sbuf_printf(sb, "RX Registers\n");
350 sbuf_printf(sb, "\tRCTL\t %08x\n", regs_buff[4]);
351 sbuf_printf(sb, "\tRDLEN\t %08x\n", regs_buff[5]);
352 sbuf_printf(sb, "\tRDH\t %08x\n", regs_buff[6]);
353 sbuf_printf(sb, "\tRDT\t %08x\n", regs_buff[7]);
354 sbuf_printf(sb, "\tRXDCTL\t %08x\n", regs_buff[8]);
355 sbuf_printf(sb, "\tRDBAL\t %08x\n", regs_buff[9]);
356 sbuf_printf(sb, "\tRDBAH\t %08x\n\n", regs_buff[10]);
357
358 sbuf_printf(sb, "TX Registers\n");
359 sbuf_printf(sb, "\tTCTL\t %08x\n", regs_buff[11]);
360 sbuf_printf(sb, "\tTDBAL\t %08x\n", regs_buff[12]);
361 sbuf_printf(sb, "\tTDBAH\t %08x\n", regs_buff[13]);
362 sbuf_printf(sb, "\tTDLEN\t %08x\n", regs_buff[14]);
363 sbuf_printf(sb, "\tTDH\t %08x\n", regs_buff[15]);
364 sbuf_printf(sb, "\tTDT\t %08x\n", regs_buff[16]);
365 sbuf_printf(sb, "\tTXDCTL\t %08x\n", regs_buff[17]);
366 sbuf_printf(sb, "\tTDFH\t %08x\n", regs_buff[18]);
367 sbuf_printf(sb, "\tTDFT\t %08x\n", regs_buff[19]);
368 sbuf_printf(sb, "\tTDFHS\t %08x\n", regs_buff[20]);
369 sbuf_printf(sb, "\tTDFPC\t %08x\n\n", regs_buff[21]);
370
371 free(regs_buff, M_DEVBUF);
372
373 #ifdef DUMP_DESCS
374 {
375 if_softc_ctx_t scctx = sc->shared;
376 struct rx_ring *rxr = &rx_que->rxr;
377 struct tx_ring *txr = &tx_que->txr;
378 int ntxd = scctx->isc_ntxd[0];
379 int nrxd = scctx->isc_nrxd[0];
380 int j;
381
382 for (j = 0; j < nrxd; j++) {
383 u32 staterr = le32toh(rxr->rx_base[j].wb.upper.status_error);
384 u32 length = le32toh(rxr->rx_base[j].wb.upper.length);
385 sbuf_printf(sb, "\tReceive Descriptor Address %d: %08" PRIx64 " Error:%d Length:%d\n", j, rxr->rx_base[j].read.buffer_addr, staterr, length);
386 }
387
388 for (j = 0; j < min(ntxd, 256); j++) {
389 unsigned int *ptr = (unsigned int *)&txr->tx_base[j];
390
391 sbuf_printf(sb, "\tTXD[%03d] [0]: %08x [1]: %08x [2]: %08x [3]: %08x eop: %d DD=%d\n",
392 j, ptr[0], ptr[1], ptr[2], ptr[3], buf->eop,
393 buf->eop != -1 ? txr->tx_base[buf->eop].upper.fields.status & IGC_TXD_STAT_DD : 0);
394
395 }
396 }
397 #endif
398
399 rc = sbuf_finish(sb);
400 sbuf_delete(sb);
401 return(rc);
402 }
403
404 static void *
igc_register(device_t dev)405 igc_register(device_t dev)
406 {
407 return (&igc_sctx_init);
408 }
409
410 static int
igc_set_num_queues(if_ctx_t ctx)411 igc_set_num_queues(if_ctx_t ctx)
412 {
413 int maxqueues;
414
415 maxqueues = 4;
416
417 return (maxqueues);
418 }
419
420 #define IGC_CAPS \
421 IFCAP_HWCSUM | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | \
422 IFCAP_VLAN_HWCSUM | IFCAP_WOL | IFCAP_TSO4 | IFCAP_LRO | \
423 IFCAP_VLAN_HWTSO | IFCAP_JUMBO_MTU | IFCAP_HWCSUM_IPV6 | IFCAP_TSO6
424
425 /*********************************************************************
426 * Device initialization routine
427 *
428 * The attach entry point is called when the driver is being loaded.
429 * This routine identifies the type of hardware, allocates all resources
430 * and initializes the hardware.
431 *
432 * return 0 on success, positive on failure
433 *********************************************************************/
434 static int
igc_if_attach_pre(if_ctx_t ctx)435 igc_if_attach_pre(if_ctx_t ctx)
436 {
437 struct igc_softc *sc;
438 if_softc_ctx_t scctx;
439 device_t dev;
440 struct igc_hw *hw;
441 int error = 0;
442
443 INIT_DEBUGOUT("igc_if_attach_pre: begin");
444 dev = iflib_get_dev(ctx);
445 sc = iflib_get_softc(ctx);
446
447 sc->ctx = sc->osdep.ctx = ctx;
448 sc->dev = sc->osdep.dev = dev;
449 scctx = sc->shared = iflib_get_softc_ctx(ctx);
450 sc->media = iflib_get_media(ctx);
451 hw = &sc->hw;
452
453 /* SYSCTL stuff */
454 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
455 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
456 OID_AUTO, "nvm", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
457 sc, 0, igc_sysctl_nvm_info, "I", "NVM Information");
458
459 sc->enable_aim = igc_enable_aim;
460 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
461 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
462 OID_AUTO, "enable_aim", CTLFLAG_RW,
463 &sc->enable_aim, 0,
464 "Interrupt Moderation (1=normal, 2=lowlatency)");
465
466 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
467 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
468 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
469 sc, 0, igc_sysctl_print_fw_version, "A",
470 "Prints FW/NVM Versions");
471
472 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
473 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
474 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
475 sc, 0, igc_sysctl_debug_info, "I", "Debug Information");
476
477 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
478 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
479 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
480 sc, 0, igc_set_flowcntl, "I", "Flow Control");
481
482 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
483 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
484 OID_AUTO, "reg_dump",
485 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
486 igc_get_regs, "A", "Dump Registers");
487
488 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
489 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
490 OID_AUTO, "rs_dump",
491 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
492 igc_get_rs, "I", "Dump RS indexes");
493
494 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
495 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
496 OID_AUTO, "dmac",
497 CTLTYPE_INT | CTLFLAG_RW, sc, 0,
498 igc_sysctl_dmac, "I", "DMA Coalesce");
499
500 /* Determine hardware and mac info */
501 igc_identify_hardware(ctx);
502
503 scctx->isc_tx_nsegments = IGC_MAX_SCATTER;
504 scctx->isc_nrxqsets_max = scctx->isc_ntxqsets_max = igc_set_num_queues(ctx);
505 if (bootverbose)
506 device_printf(dev, "attach_pre capping queues at %d\n",
507 scctx->isc_ntxqsets_max);
508
509 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(union igc_adv_tx_desc), IGC_DBA_ALIGN);
510 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union igc_adv_rx_desc), IGC_DBA_ALIGN);
511 scctx->isc_txd_size[0] = sizeof(union igc_adv_tx_desc);
512 scctx->isc_rxd_size[0] = sizeof(union igc_adv_rx_desc);
513 scctx->isc_txrx = &igc_txrx;
514 scctx->isc_tx_tso_segments_max = IGC_MAX_SCATTER;
515 scctx->isc_tx_tso_size_max = IGC_TSO_SIZE;
516 scctx->isc_tx_tso_segsize_max = IGC_TSO_SEG_SIZE;
517 scctx->isc_capabilities = scctx->isc_capenable = IGC_CAPS;
518 scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_TSO |
519 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_SCTP | CSUM_IP6_SCTP;
520
521 /*
522 ** Some new devices, as with ixgbe, now may
523 ** use a different BAR, so we need to keep
524 ** track of which is used.
525 */
526 scctx->isc_msix_bar = PCIR_BAR(IGC_MSIX_BAR);
527 if (pci_read_config(dev, scctx->isc_msix_bar, 4) == 0)
528 scctx->isc_msix_bar += 4;
529
530 /* Setup PCI resources */
531 if (igc_allocate_pci_resources(ctx)) {
532 device_printf(dev, "Allocation of PCI resources failed\n");
533 error = ENXIO;
534 goto err_pci;
535 }
536
537 /* Do Shared Code initialization */
538 error = igc_setup_init_funcs(hw, true);
539 if (error) {
540 device_printf(dev, "Setup of Shared code failed, error %d\n",
541 error);
542 error = ENXIO;
543 goto err_pci;
544 }
545
546 igc_setup_msix(ctx);
547 igc_get_bus_info(hw);
548
549 hw->mac.autoneg = DO_AUTO_NEG;
550 hw->phy.autoneg_wait_to_complete = false;
551 hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
552
553 /* Copper options */
554 if (hw->phy.media_type == igc_media_type_copper) {
555 hw->phy.mdix = AUTO_ALL_MODES;
556 }
557
558 /*
559 * Set the frame limits assuming
560 * standard ethernet sized frames.
561 */
562 scctx->isc_max_frame_size = sc->hw.mac.max_frame_size =
563 ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
564
565 /* Allocate multicast array memory. */
566 sc->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN *
567 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
568 if (sc->mta == NULL) {
569 device_printf(dev, "Can not allocate multicast setup array\n");
570 error = ENOMEM;
571 goto err_late;
572 }
573
574 /* Check SOL/IDER usage */
575 if (igc_check_reset_block(hw))
576 device_printf(dev, "PHY reset is blocked"
577 " due to SOL/IDER session.\n");
578
579 /* Sysctl for setting Energy Efficient Ethernet */
580 sc->hw.dev_spec._i225.eee_disable = igc_eee_setting;
581 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
582 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
583 OID_AUTO, "eee_control",
584 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
585 sc, 0, igc_sysctl_eee, "I",
586 "Disable Energy Efficient Ethernet");
587
588 /*
589 ** Start from a known state, this is
590 ** important in reading the nvm and
591 ** mac from that.
592 */
593 igc_reset_hw(hw);
594
595 /* Make sure we have a good EEPROM before we read from it */
596 if (igc_validate_nvm_checksum(hw) < 0) {
597 /*
598 ** Some PCI-E parts fail the first check due to
599 ** the link being in sleep state, call it again,
600 ** if it fails a second time its a real issue.
601 */
602 if (igc_validate_nvm_checksum(hw) < 0) {
603 device_printf(dev,
604 "The EEPROM Checksum Is Not Valid\n");
605 error = EIO;
606 goto err_late;
607 }
608 }
609
610 /* Copy the permanent MAC address out of the EEPROM */
611 if (igc_read_mac_addr(hw) < 0) {
612 device_printf(dev, "EEPROM read error while reading MAC"
613 " address\n");
614 error = EIO;
615 goto err_late;
616 }
617
618 if (!igc_is_valid_ether_addr(hw->mac.addr)) {
619 device_printf(dev, "Invalid MAC address\n");
620 error = EIO;
621 goto err_late;
622 }
623
624 /* Save the EEPROM/NVM versions */
625 igc_fw_version(sc);
626
627 igc_print_fw_version(sc);
628
629 /*
630 * Get Wake-on-Lan and Management info for later use
631 */
632 igc_get_wakeup(ctx);
633
634 /* Enable only WOL MAGIC by default */
635 scctx->isc_capenable &= ~IFCAP_WOL;
636 if (sc->wol != 0)
637 scctx->isc_capenable |= IFCAP_WOL_MAGIC;
638
639 iflib_set_mac(ctx, hw->mac.addr);
640
641 return (0);
642
643 err_late:
644 igc_release_hw_control(sc);
645 err_pci:
646 igc_free_pci_resources(ctx);
647 free(sc->mta, M_DEVBUF);
648
649 return (error);
650 }
651
652 static int
igc_if_attach_post(if_ctx_t ctx)653 igc_if_attach_post(if_ctx_t ctx)
654 {
655 struct igc_softc *sc = iflib_get_softc(ctx);
656 struct igc_hw *hw = &sc->hw;
657 int error = 0;
658
659 /* Setup OS specific network interface */
660 error = igc_setup_interface(ctx);
661 if (error != 0) {
662 goto err_late;
663 }
664
665 igc_reset(ctx);
666
667 /* Initialize statistics */
668 igc_update_stats_counters(sc);
669 hw->mac.get_link_status = true;
670 igc_if_update_admin_status(ctx);
671 igc_add_hw_stats(sc);
672
673 /* the driver can now take control from firmware */
674 igc_get_hw_control(sc);
675
676 INIT_DEBUGOUT("igc_if_attach_post: end");
677
678 return (error);
679
680 err_late:
681 igc_release_hw_control(sc);
682 igc_free_pci_resources(ctx);
683 igc_if_queues_free(ctx);
684 free(sc->mta, M_DEVBUF);
685
686 return (error);
687 }
688
689 /*********************************************************************
690 * Device removal routine
691 *
692 * The detach entry point is called when the driver is being removed.
693 * This routine stops the adapter and deallocates all the resources
694 * that were allocated for driver operation.
695 *
696 * return 0 on success, positive on failure
697 *********************************************************************/
698 static int
igc_if_detach(if_ctx_t ctx)699 igc_if_detach(if_ctx_t ctx)
700 {
701 struct igc_softc *sc = iflib_get_softc(ctx);
702
703 INIT_DEBUGOUT("igc_if_detach: begin");
704
705 igc_phy_hw_reset(&sc->hw);
706
707 igc_release_hw_control(sc);
708 igc_free_pci_resources(ctx);
709
710 return (0);
711 }
712
713 /*********************************************************************
714 *
715 * Shutdown entry point
716 *
717 **********************************************************************/
718
719 static int
igc_if_shutdown(if_ctx_t ctx)720 igc_if_shutdown(if_ctx_t ctx)
721 {
722 return igc_if_suspend(ctx);
723 }
724
725 /*
726 * Suspend/resume device methods.
727 */
728 static int
igc_if_suspend(if_ctx_t ctx)729 igc_if_suspend(if_ctx_t ctx)
730 {
731 struct igc_softc *sc = iflib_get_softc(ctx);
732
733 igc_release_hw_control(sc);
734 igc_enable_wakeup(ctx);
735 return (0);
736 }
737
738 static int
igc_if_resume(if_ctx_t ctx)739 igc_if_resume(if_ctx_t ctx)
740 {
741 igc_if_init(ctx);
742
743 return(0);
744 }
745
746 static int
igc_if_mtu_set(if_ctx_t ctx,uint32_t mtu)747 igc_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
748 {
749 int max_frame_size;
750 struct igc_softc *sc = iflib_get_softc(ctx);
751 if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx);
752
753 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
754
755 /* 9K Jumbo Frame size */
756 max_frame_size = 9234;
757
758 if (mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) {
759 return (EINVAL);
760 }
761
762 scctx->isc_max_frame_size = sc->hw.mac.max_frame_size =
763 mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
764 return (0);
765 }
766
767 /*********************************************************************
768 * Init entry point
769 *
770 * This routine is used in two ways. It is used by the stack as
771 * init entry point in network interface structure. It is also used
772 * by the driver as a hw/sw initialization routine to get to a
773 * consistent state.
774 *
775 **********************************************************************/
776 static void
igc_if_init(if_ctx_t ctx)777 igc_if_init(if_ctx_t ctx)
778 {
779 struct igc_softc *sc = iflib_get_softc(ctx);
780 if_softc_ctx_t scctx = sc->shared;
781 if_t ifp = iflib_get_ifp(ctx);
782 struct igc_tx_queue *tx_que;
783 int i;
784
785 INIT_DEBUGOUT("igc_if_init: begin");
786
787 /* Get the latest mac address, User can use a LAA */
788 bcopy(if_getlladdr(ifp), sc->hw.mac.addr,
789 ETHER_ADDR_LEN);
790
791 /* Put the address into the Receive Address Array */
792 igc_rar_set(&sc->hw, sc->hw.mac.addr, 0);
793
794 /* Initialize the hardware */
795 igc_reset(ctx);
796 igc_if_update_admin_status(ctx);
797
798 for (i = 0, tx_que = sc->tx_queues; i < sc->tx_num_queues; i++, tx_que++) {
799 struct tx_ring *txr = &tx_que->txr;
800
801 txr->tx_rs_cidx = txr->tx_rs_pidx;
802
803 /* Initialize the last processed descriptor to be the end of
804 * the ring, rather than the start, so that we avoid an
805 * off-by-one error when calculating how many descriptors are
806 * done in the credits_update function.
807 */
808 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
809 }
810
811 /* Setup VLAN support, basic and offload if available */
812 IGC_WRITE_REG(&sc->hw, IGC_VET, ETHERTYPE_VLAN);
813
814 /* Prepare transmit descriptors and buffers */
815 igc_initialize_transmit_unit(ctx);
816
817 /* Setup Multicast table */
818 igc_if_multi_set(ctx);
819
820 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
821 igc_initialize_receive_unit(ctx);
822
823 /* Set up VLAN support */
824 igc_setup_vlan_hw_support(ctx);
825
826 /* Don't lose promiscuous settings */
827 igc_if_set_promisc(ctx, if_getflags(ifp));
828 igc_clear_hw_cntrs_base_generic(&sc->hw);
829
830 if (sc->intr_type == IFLIB_INTR_MSIX) /* Set up queue routing */
831 igc_configure_queues(sc);
832
833 /* this clears any pending interrupts */
834 IGC_READ_REG(&sc->hw, IGC_ICR);
835 IGC_WRITE_REG(&sc->hw, IGC_ICS, IGC_ICS_LSC);
836
837 /* the driver can now take control from firmware */
838 igc_get_hw_control(sc);
839
840 /* Set Energy Efficient Ethernet */
841 igc_set_eee_i225(&sc->hw, true, true, true);
842 }
843
844 enum eitr_latency_target {
845 eitr_latency_disabled = 0,
846 eitr_latency_lowest = 1,
847 eitr_latency_low = 2,
848 eitr_latency_bulk = 3
849 };
850 /*********************************************************************
851 *
852 * Helper to calculate next EITR value for AIM
853 *
854 *********************************************************************/
855 static void
igc_neweitr(struct igc_softc * sc,struct igc_rx_queue * que,struct tx_ring * txr,struct rx_ring * rxr)856 igc_neweitr(struct igc_softc *sc, struct igc_rx_queue *que,
857 struct tx_ring *txr, struct rx_ring *rxr)
858 {
859 struct igc_hw *hw = &sc->hw;
860 u32 neweitr;
861 u32 bytes;
862 u32 bytes_packets;
863 u32 packets;
864 u8 nextlatency;
865
866 /* Idle, do nothing */
867 if ((txr->tx_bytes == 0) && (rxr->rx_bytes == 0))
868 return;
869
870 neweitr = 0;
871
872 if (sc->enable_aim) {
873 nextlatency = rxr->rx_nextlatency;
874
875 /* Use half default (4K) ITR if sub-gig */
876 if (sc->link_speed < 1000) {
877 neweitr = IGC_INTS_4K;
878 goto igc_set_next_eitr;
879 }
880 /* Want at least enough packet buffer for two frames to AIM */
881 if (sc->shared->isc_max_frame_size * 2 > (sc->pba << 10)) {
882 neweitr = igc_max_interrupt_rate;
883 sc->enable_aim = 0;
884 goto igc_set_next_eitr;
885 }
886
887 /* Get the largest values from the associated tx and rx ring */
888 if (txr->tx_bytes && txr->tx_packets) {
889 bytes = txr->tx_bytes;
890 bytes_packets = txr->tx_bytes/txr->tx_packets;
891 packets = txr->tx_packets;
892 }
893 if (rxr->rx_bytes && rxr->rx_packets) {
894 bytes = max(bytes, rxr->rx_bytes);
895 bytes_packets = max(bytes_packets, rxr->rx_bytes/rxr->rx_packets);
896 packets = max(packets, rxr->rx_packets);
897 }
898
899 /* Latency state machine */
900 switch (nextlatency) {
901 case eitr_latency_disabled: /* Bootstrapping */
902 nextlatency = eitr_latency_low;
903 break;
904 case eitr_latency_lowest: /* 70k ints/s */
905 /* TSO and jumbo frames */
906 if (bytes_packets > 8000)
907 nextlatency = eitr_latency_bulk;
908 else if ((packets < 5) && (bytes > 512))
909 nextlatency = eitr_latency_low;
910 break;
911 case eitr_latency_low: /* 20k ints/s */
912 if (bytes > 10000) {
913 /* Handle TSO */
914 if (bytes_packets > 8000)
915 nextlatency = eitr_latency_bulk;
916 else if ((packets < 10) || (bytes_packets > 1200))
917 nextlatency = eitr_latency_bulk;
918 else if (packets > 35)
919 nextlatency = eitr_latency_lowest;
920 } else if (bytes_packets > 2000) {
921 nextlatency = eitr_latency_bulk;
922 } else if (packets < 3 && bytes < 512) {
923 nextlatency = eitr_latency_lowest;
924 }
925 break;
926 case eitr_latency_bulk: /* 4k ints/s */
927 if (bytes > 25000) {
928 if (packets > 35)
929 nextlatency = eitr_latency_low;
930 } else if (bytes < 1500)
931 nextlatency = eitr_latency_low;
932 break;
933 default:
934 nextlatency = eitr_latency_low;
935 device_printf(sc->dev, "Unexpected neweitr transition %d\n",
936 nextlatency);
937 break;
938 }
939
940 /* Trim itr_latency_lowest for default AIM setting */
941 if (sc->enable_aim == 1 && nextlatency == eitr_latency_lowest)
942 nextlatency = eitr_latency_low;
943
944 /* Request new latency */
945 rxr->rx_nextlatency = nextlatency;
946 } else {
947 /* We may have toggled to AIM disabled */
948 nextlatency = eitr_latency_disabled;
949 rxr->rx_nextlatency = nextlatency;
950 }
951
952 /* ITR state machine */
953 switch(nextlatency) {
954 case eitr_latency_lowest:
955 neweitr = IGC_INTS_70K;
956 break;
957 case eitr_latency_low:
958 neweitr = IGC_INTS_20K;
959 break;
960 case eitr_latency_bulk:
961 neweitr = IGC_INTS_4K;
962 break;
963 case eitr_latency_disabled:
964 default:
965 neweitr = igc_max_interrupt_rate;
966 break;
967 }
968
969 igc_set_next_eitr:
970 neweitr = IGC_INTS_TO_EITR(neweitr);
971
972 neweitr |= IGC_EITR_CNT_IGNR;
973
974 if (neweitr != que->eitr_setting) {
975 que->eitr_setting = neweitr;
976 IGC_WRITE_REG(hw, IGC_EITR(que->msix), que->eitr_setting);
977 }
978 }
979
980 /*********************************************************************
981 *
982 * Fast Legacy/MSI Combined Interrupt Service routine
983 *
984 *********************************************************************/
985 int
igc_intr(void * arg)986 igc_intr(void *arg)
987 {
988 struct igc_softc *sc = arg;
989 struct igc_hw *hw = &sc->hw;
990 struct igc_rx_queue *que = &sc->rx_queues[0];
991 struct tx_ring *txr = &sc->tx_queues[0].txr;
992 struct rx_ring *rxr = &que->rxr;
993 if_ctx_t ctx = sc->ctx;
994 u32 reg_icr;
995
996 reg_icr = IGC_READ_REG(hw, IGC_ICR);
997
998 /* Hot eject? */
999 if (reg_icr == 0xffffffff)
1000 return FILTER_STRAY;
1001
1002 /* Definitely not our interrupt. */
1003 if (reg_icr == 0x0)
1004 return FILTER_STRAY;
1005
1006 if ((reg_icr & IGC_ICR_INT_ASSERTED) == 0)
1007 return FILTER_STRAY;
1008
1009 /*
1010 * Only MSI-X interrupts have one-shot behavior by taking advantage
1011 * of the EIAC register. Thus, explicitly disable interrupts. This
1012 * also works around the MSI message reordering errata on certain
1013 * systems.
1014 */
1015 IFDI_INTR_DISABLE(ctx);
1016
1017 /* Link status change */
1018 if (reg_icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC))
1019 igc_handle_link(ctx);
1020
1021 if (reg_icr & IGC_ICR_RXO)
1022 sc->rx_overruns++;
1023
1024 igc_neweitr(sc, que, txr, rxr);
1025
1026 /* Reset state */
1027 txr->tx_bytes = 0;
1028 txr->tx_packets = 0;
1029 rxr->rx_bytes = 0;
1030 rxr->rx_packets = 0;
1031
1032 return (FILTER_SCHEDULE_THREAD);
1033 }
1034
1035 static int
igc_if_rx_queue_intr_enable(if_ctx_t ctx,uint16_t rxqid)1036 igc_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1037 {
1038 struct igc_softc *sc = iflib_get_softc(ctx);
1039 struct igc_rx_queue *rxq = &sc->rx_queues[rxqid];
1040
1041 IGC_WRITE_REG(&sc->hw, IGC_EIMS, rxq->eims);
1042 return (0);
1043 }
1044
1045 static int
igc_if_tx_queue_intr_enable(if_ctx_t ctx,uint16_t txqid)1046 igc_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1047 {
1048 struct igc_softc *sc = iflib_get_softc(ctx);
1049 struct igc_tx_queue *txq = &sc->tx_queues[txqid];
1050
1051 IGC_WRITE_REG(&sc->hw, IGC_EIMS, txq->eims);
1052 return (0);
1053 }
1054
1055 /*********************************************************************
1056 *
1057 * MSI-X RX Interrupt Service routine
1058 *
1059 **********************************************************************/
1060 static int
igc_msix_que(void * arg)1061 igc_msix_que(void *arg)
1062 {
1063 struct igc_rx_queue *que = arg;
1064 struct igc_softc *sc = que->sc;
1065 struct tx_ring *txr = &sc->tx_queues[que->msix].txr;
1066 struct rx_ring *rxr = &que->rxr;
1067
1068 ++que->irqs;
1069
1070 igc_neweitr(sc, que, txr, rxr);
1071
1072 /* Reset state */
1073 txr->tx_bytes = 0;
1074 txr->tx_packets = 0;
1075 rxr->rx_bytes = 0;
1076 rxr->rx_packets = 0;
1077
1078 return (FILTER_SCHEDULE_THREAD);
1079 }
1080
1081 /*********************************************************************
1082 *
1083 * MSI-X Link Fast Interrupt Service routine
1084 *
1085 **********************************************************************/
1086 static int
igc_msix_link(void * arg)1087 igc_msix_link(void *arg)
1088 {
1089 struct igc_softc *sc = arg;
1090 u32 reg_icr;
1091
1092 ++sc->link_irq;
1093 MPASS(sc->hw.back != NULL);
1094 reg_icr = IGC_READ_REG(&sc->hw, IGC_ICR);
1095
1096 if (reg_icr & IGC_ICR_RXO)
1097 sc->rx_overruns++;
1098
1099 if (reg_icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
1100 igc_handle_link(sc->ctx);
1101 }
1102
1103 IGC_WRITE_REG(&sc->hw, IGC_IMS, IGC_IMS_LSC);
1104 IGC_WRITE_REG(&sc->hw, IGC_EIMS, sc->link_mask);
1105
1106 return (FILTER_HANDLED);
1107 }
1108
1109 static void
igc_handle_link(void * context)1110 igc_handle_link(void *context)
1111 {
1112 if_ctx_t ctx = context;
1113 struct igc_softc *sc = iflib_get_softc(ctx);
1114
1115 sc->hw.mac.get_link_status = true;
1116 iflib_admin_intr_deferred(ctx);
1117 }
1118
1119 /*********************************************************************
1120 *
1121 * Media Ioctl callback
1122 *
1123 * This routine is called whenever the user queries the status of
1124 * the interface using ifconfig.
1125 *
1126 **********************************************************************/
1127 static void
igc_if_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)1128 igc_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1129 {
1130 struct igc_softc *sc = iflib_get_softc(ctx);
1131
1132 INIT_DEBUGOUT("igc_if_media_status: begin");
1133
1134 iflib_admin_intr_deferred(ctx);
1135
1136 ifmr->ifm_status = IFM_AVALID;
1137 ifmr->ifm_active = IFM_ETHER;
1138
1139 if (!sc->link_active) {
1140 return;
1141 }
1142
1143 ifmr->ifm_status |= IFM_ACTIVE;
1144
1145 switch (sc->link_speed) {
1146 case 10:
1147 ifmr->ifm_active |= IFM_10_T;
1148 break;
1149 case 100:
1150 ifmr->ifm_active |= IFM_100_TX;
1151 break;
1152 case 1000:
1153 ifmr->ifm_active |= IFM_1000_T;
1154 break;
1155 case 2500:
1156 ifmr->ifm_active |= IFM_2500_T;
1157 break;
1158 }
1159
1160 if (sc->link_duplex == FULL_DUPLEX)
1161 ifmr->ifm_active |= IFM_FDX;
1162 else
1163 ifmr->ifm_active |= IFM_HDX;
1164 }
1165
1166 /*********************************************************************
1167 *
1168 * Media Ioctl callback
1169 *
1170 * This routine is called when the user changes speed/duplex using
1171 * media/mediopt option with ifconfig.
1172 *
1173 **********************************************************************/
1174 static int
igc_if_media_change(if_ctx_t ctx)1175 igc_if_media_change(if_ctx_t ctx)
1176 {
1177 struct igc_softc *sc = iflib_get_softc(ctx);
1178 struct ifmedia *ifm = iflib_get_media(ctx);
1179
1180 INIT_DEBUGOUT("igc_if_media_change: begin");
1181
1182 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1183 return (EINVAL);
1184
1185 sc->hw.mac.autoneg = DO_AUTO_NEG;
1186
1187 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1188 case IFM_AUTO:
1189 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1190 break;
1191 case IFM_2500_T:
1192 sc->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
1193 break;
1194 case IFM_1000_T:
1195 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1196 break;
1197 case IFM_100_TX:
1198 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1199 sc->hw.phy.autoneg_advertised = ADVERTISE_100_FULL;
1200 else
1201 sc->hw.phy.autoneg_advertised = ADVERTISE_100_HALF;
1202 break;
1203 case IFM_10_T:
1204 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1205 sc->hw.phy.autoneg_advertised = ADVERTISE_10_FULL;
1206 else
1207 sc->hw.phy.autoneg_advertised = ADVERTISE_10_HALF;
1208 break;
1209 default:
1210 device_printf(sc->dev, "Unsupported media type\n");
1211 }
1212
1213 igc_if_init(ctx);
1214
1215 return (0);
1216 }
1217
1218 static int
igc_if_set_promisc(if_ctx_t ctx,int flags)1219 igc_if_set_promisc(if_ctx_t ctx, int flags)
1220 {
1221 struct igc_softc *sc = iflib_get_softc(ctx);
1222 if_t ifp = iflib_get_ifp(ctx);
1223 u32 reg_rctl;
1224 int mcnt = 0;
1225
1226 reg_rctl = IGC_READ_REG(&sc->hw, IGC_RCTL);
1227 reg_rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_UPE);
1228 if (flags & IFF_ALLMULTI)
1229 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1230 else
1231 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
1232
1233 /* Don't disable if in MAX groups */
1234 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1235 reg_rctl &= (~IGC_RCTL_MPE);
1236 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl);
1237
1238 if (flags & IFF_PROMISC) {
1239 reg_rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE);
1240 /* Turn this on if you want to see bad packets */
1241 if (igc_debug_sbp)
1242 reg_rctl |= IGC_RCTL_SBP;
1243 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl);
1244 } else if (flags & IFF_ALLMULTI) {
1245 reg_rctl |= IGC_RCTL_MPE;
1246 reg_rctl &= ~IGC_RCTL_UPE;
1247 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl);
1248 }
1249 return (0);
1250 }
1251
1252 static u_int
igc_copy_maddr(void * arg,struct sockaddr_dl * sdl,u_int idx)1253 igc_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int idx)
1254 {
1255 u8 *mta = arg;
1256
1257 if (idx == MAX_NUM_MULTICAST_ADDRESSES)
1258 return (0);
1259
1260 bcopy(LLADDR(sdl), &mta[idx * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
1261
1262 return (1);
1263 }
1264
1265 /*********************************************************************
1266 * Multicast Update
1267 *
1268 * This routine is called whenever multicast address list is updated.
1269 *
1270 **********************************************************************/
1271
1272 static void
igc_if_multi_set(if_ctx_t ctx)1273 igc_if_multi_set(if_ctx_t ctx)
1274 {
1275 struct igc_softc *sc = iflib_get_softc(ctx);
1276 if_t ifp = iflib_get_ifp(ctx);
1277 u8 *mta; /* Multicast array memory */
1278 u32 reg_rctl = 0;
1279 int mcnt = 0;
1280
1281 IOCTL_DEBUGOUT("igc_set_multi: begin");
1282
1283 mta = sc->mta;
1284 bzero(mta, sizeof(u8) * ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1285
1286 mcnt = if_foreach_llmaddr(ifp, igc_copy_maddr, mta);
1287
1288 reg_rctl = IGC_READ_REG(&sc->hw, IGC_RCTL);
1289
1290 if (if_getflags(ifp) & IFF_PROMISC) {
1291 reg_rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE);
1292 /* Turn this on if you want to see bad packets */
1293 if (igc_debug_sbp)
1294 reg_rctl |= IGC_RCTL_SBP;
1295 } else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1296 if_getflags(ifp) & IFF_ALLMULTI) {
1297 reg_rctl |= IGC_RCTL_MPE;
1298 reg_rctl &= ~IGC_RCTL_UPE;
1299 } else
1300 reg_rctl &= ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
1301
1302 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1303 igc_update_mc_addr_list(&sc->hw, mta, mcnt);
1304
1305 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl);
1306 }
1307
1308 /*********************************************************************
1309 * Timer routine
1310 *
1311 * This routine schedules igc_if_update_admin_status() to check for
1312 * link status and to gather statistics as well as to perform some
1313 * controller-specific hardware patting.
1314 *
1315 **********************************************************************/
1316 static void
igc_if_timer(if_ctx_t ctx,uint16_t qid)1317 igc_if_timer(if_ctx_t ctx, uint16_t qid)
1318 {
1319
1320 if (qid != 0)
1321 return;
1322
1323 iflib_admin_intr_deferred(ctx);
1324 }
1325
1326 static void
igc_if_update_admin_status(if_ctx_t ctx)1327 igc_if_update_admin_status(if_ctx_t ctx)
1328 {
1329 struct igc_softc *sc = iflib_get_softc(ctx);
1330 struct igc_hw *hw = &sc->hw;
1331 device_t dev = iflib_get_dev(ctx);
1332 u32 link_check, thstat, ctrl;
1333
1334 link_check = thstat = ctrl = 0;
1335 /* Get the cached link value or read phy for real */
1336 switch (hw->phy.media_type) {
1337 case igc_media_type_copper:
1338 if (hw->mac.get_link_status == true) {
1339 /* Do the work to read phy */
1340 igc_check_for_link(hw);
1341 link_check = !hw->mac.get_link_status;
1342 } else
1343 link_check = true;
1344 break;
1345 case igc_media_type_unknown:
1346 igc_check_for_link(hw);
1347 link_check = !hw->mac.get_link_status;
1348 /* FALLTHROUGH */
1349 default:
1350 break;
1351 }
1352
1353 /* Now check for a transition */
1354 if (link_check && (sc->link_active == 0)) {
1355 igc_get_speed_and_duplex(hw, &sc->link_speed,
1356 &sc->link_duplex);
1357 if (bootverbose)
1358 device_printf(dev, "Link is up %d Mbps %s\n",
1359 sc->link_speed,
1360 ((sc->link_duplex == FULL_DUPLEX) ?
1361 "Full Duplex" : "Half Duplex"));
1362 sc->link_active = 1;
1363 iflib_link_state_change(ctx, LINK_STATE_UP,
1364 IF_Mbps(sc->link_speed));
1365 } else if (!link_check && (sc->link_active == 1)) {
1366 sc->link_speed = 0;
1367 sc->link_duplex = 0;
1368 sc->link_active = 0;
1369 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
1370 }
1371 igc_update_stats_counters(sc);
1372 }
1373
1374 static void
igc_if_watchdog_reset(if_ctx_t ctx)1375 igc_if_watchdog_reset(if_ctx_t ctx)
1376 {
1377 struct igc_softc *sc = iflib_get_softc(ctx);
1378
1379 /*
1380 * Just count the event; iflib(4) will already trigger a
1381 * sufficient reset of the controller.
1382 */
1383 sc->watchdog_events++;
1384 }
1385
1386 /*********************************************************************
1387 *
1388 * This routine disables all traffic on the adapter by issuing a
1389 * global reset on the MAC.
1390 *
1391 **********************************************************************/
1392 static void
igc_if_stop(if_ctx_t ctx)1393 igc_if_stop(if_ctx_t ctx)
1394 {
1395 struct igc_softc *sc = iflib_get_softc(ctx);
1396
1397 INIT_DEBUGOUT("igc_if_stop: begin");
1398
1399 igc_reset_hw(&sc->hw);
1400 IGC_WRITE_REG(&sc->hw, IGC_WUC, 0);
1401 }
1402
1403 /*********************************************************************
1404 *
1405 * Determine hardware revision.
1406 *
1407 **********************************************************************/
1408 static void
igc_identify_hardware(if_ctx_t ctx)1409 igc_identify_hardware(if_ctx_t ctx)
1410 {
1411 device_t dev = iflib_get_dev(ctx);
1412 struct igc_softc *sc = iflib_get_softc(ctx);
1413
1414 /* Make sure our PCI config space has the necessary stuff set */
1415 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1416
1417 /* Save off the information about this board */
1418 sc->hw.vendor_id = pci_get_vendor(dev);
1419 sc->hw.device_id = pci_get_device(dev);
1420 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1421 sc->hw.subsystem_vendor_id =
1422 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1423 sc->hw.subsystem_device_id =
1424 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1425
1426 /* Do Shared Code Init and Setup */
1427 if (igc_set_mac_type(&sc->hw)) {
1428 device_printf(dev, "Setup init failure\n");
1429 return;
1430 }
1431 }
1432
1433 static int
igc_allocate_pci_resources(if_ctx_t ctx)1434 igc_allocate_pci_resources(if_ctx_t ctx)
1435 {
1436 struct igc_softc *sc = iflib_get_softc(ctx);
1437 device_t dev = iflib_get_dev(ctx);
1438 int rid;
1439
1440 rid = PCIR_BAR(0);
1441 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1442 &rid, RF_ACTIVE);
1443 if (sc->memory == NULL) {
1444 device_printf(dev, "Unable to allocate bus resource: memory\n");
1445 return (ENXIO);
1446 }
1447 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory);
1448 sc->osdep.mem_bus_space_handle =
1449 rman_get_bushandle(sc->memory);
1450 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
1451
1452 sc->hw.back = &sc->osdep;
1453
1454 return (0);
1455 }
1456
1457 /*********************************************************************
1458 *
1459 * Set up the MSI-X Interrupt handlers
1460 *
1461 **********************************************************************/
1462 static int
igc_if_msix_intr_assign(if_ctx_t ctx,int msix)1463 igc_if_msix_intr_assign(if_ctx_t ctx, int msix)
1464 {
1465 struct igc_softc *sc = iflib_get_softc(ctx);
1466 struct igc_rx_queue *rx_que = sc->rx_queues;
1467 struct igc_tx_queue *tx_que = sc->tx_queues;
1468 int error, rid, i, vector = 0, rx_vectors;
1469 char buf[16];
1470
1471 /* First set up ring resources */
1472 for (i = 0; i < sc->rx_num_queues; i++, rx_que++, vector++) {
1473 rid = vector + 1;
1474 snprintf(buf, sizeof(buf), "rxq%d", i);
1475 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RXTX, igc_msix_que, rx_que, rx_que->me, buf);
1476 if (error) {
1477 device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error);
1478 sc->rx_num_queues = i + 1;
1479 goto fail;
1480 }
1481
1482 rx_que->msix = vector;
1483
1484 /*
1485 * Set the bit to enable interrupt
1486 * in IGC_IMS -- bits 20 and 21
1487 * are for RX0 and RX1, note this has
1488 * NOTHING to do with the MSI-X vector
1489 */
1490 rx_que->eims = 1 << vector;
1491 }
1492 rx_vectors = vector;
1493
1494 vector = 0;
1495 for (i = 0; i < sc->tx_num_queues; i++, tx_que++, vector++) {
1496 snprintf(buf, sizeof(buf), "txq%d", i);
1497 tx_que = &sc->tx_queues[i];
1498 iflib_softirq_alloc_generic(ctx,
1499 &sc->rx_queues[i % sc->rx_num_queues].que_irq,
1500 IFLIB_INTR_TX, tx_que, tx_que->me, buf);
1501
1502 tx_que->msix = (vector % sc->rx_num_queues);
1503
1504 /*
1505 * Set the bit to enable interrupt
1506 * in IGC_IMS -- bits 22 and 23
1507 * are for TX0 and TX1, note this has
1508 * NOTHING to do with the MSI-X vector
1509 */
1510 tx_que->eims = 1 << i;
1511 }
1512
1513 /* Link interrupt */
1514 rid = rx_vectors + 1;
1515 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, IFLIB_INTR_ADMIN, igc_msix_link, sc, 0, "aq");
1516
1517 if (error) {
1518 device_printf(iflib_get_dev(ctx), "Failed to register admin handler");
1519 goto fail;
1520 }
1521 sc->linkvec = rx_vectors;
1522 return (0);
1523 fail:
1524 iflib_irq_free(ctx, &sc->irq);
1525 rx_que = sc->rx_queues;
1526 for (int i = 0; i < sc->rx_num_queues; i++, rx_que++)
1527 iflib_irq_free(ctx, &rx_que->que_irq);
1528 return (error);
1529 }
1530
1531 static void
igc_configure_queues(struct igc_softc * sc)1532 igc_configure_queues(struct igc_softc *sc)
1533 {
1534 struct igc_hw *hw = &sc->hw;
1535 struct igc_rx_queue *rx_que;
1536 struct igc_tx_queue *tx_que;
1537 u32 ivar = 0, newitr = 0;
1538
1539 /* First turn on RSS capability */
1540 IGC_WRITE_REG(hw, IGC_GPIE,
1541 IGC_GPIE_MSIX_MODE | IGC_GPIE_EIAME | IGC_GPIE_PBA |
1542 IGC_GPIE_NSICR);
1543
1544 /* Turn on MSI-X */
1545 /* RX entries */
1546 for (int i = 0; i < sc->rx_num_queues; i++) {
1547 u32 index = i >> 1;
1548 ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index);
1549 rx_que = &sc->rx_queues[i];
1550 if (i & 1) {
1551 ivar &= 0xFF00FFFF;
1552 ivar |= (rx_que->msix | IGC_IVAR_VALID) << 16;
1553 } else {
1554 ivar &= 0xFFFFFF00;
1555 ivar |= rx_que->msix | IGC_IVAR_VALID;
1556 }
1557 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar);
1558 }
1559 /* TX entries */
1560 for (int i = 0; i < sc->tx_num_queues; i++) {
1561 u32 index = i >> 1;
1562 ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index);
1563 tx_que = &sc->tx_queues[i];
1564 if (i & 1) {
1565 ivar &= 0x00FFFFFF;
1566 ivar |= (tx_que->msix | IGC_IVAR_VALID) << 24;
1567 } else {
1568 ivar &= 0xFFFF00FF;
1569 ivar |= (tx_que->msix | IGC_IVAR_VALID) << 8;
1570 }
1571 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar);
1572 sc->que_mask |= tx_que->eims;
1573 }
1574
1575 /* And for the link interrupt */
1576 ivar = (sc->linkvec | IGC_IVAR_VALID) << 8;
1577 sc->link_mask = 1 << sc->linkvec;
1578 IGC_WRITE_REG(hw, IGC_IVAR_MISC, ivar);
1579
1580 /* Set the starting interrupt rate */
1581 if (igc_max_interrupt_rate > 0)
1582 newitr = IGC_INTS_TO_EITR(igc_max_interrupt_rate);
1583
1584 newitr |= IGC_EITR_CNT_IGNR;
1585
1586 for (int i = 0; i < sc->rx_num_queues; i++) {
1587 rx_que = &sc->rx_queues[i];
1588 IGC_WRITE_REG(hw, IGC_EITR(rx_que->msix), newitr);
1589 }
1590
1591 return;
1592 }
1593
1594 static void
igc_free_pci_resources(if_ctx_t ctx)1595 igc_free_pci_resources(if_ctx_t ctx)
1596 {
1597 struct igc_softc *sc = iflib_get_softc(ctx);
1598 struct igc_rx_queue *que = sc->rx_queues;
1599 device_t dev = iflib_get_dev(ctx);
1600
1601 /* Release all MSI-X queue resources */
1602 if (sc->intr_type == IFLIB_INTR_MSIX)
1603 iflib_irq_free(ctx, &sc->irq);
1604
1605 for (int i = 0; i < sc->rx_num_queues; i++, que++) {
1606 iflib_irq_free(ctx, &que->que_irq);
1607 }
1608
1609 if (sc->memory != NULL) {
1610 bus_release_resource(dev, SYS_RES_MEMORY,
1611 rman_get_rid(sc->memory), sc->memory);
1612 sc->memory = NULL;
1613 }
1614
1615 if (sc->flash != NULL) {
1616 bus_release_resource(dev, SYS_RES_MEMORY,
1617 rman_get_rid(sc->flash), sc->flash);
1618 sc->flash = NULL;
1619 }
1620
1621 if (sc->ioport != NULL) {
1622 bus_release_resource(dev, SYS_RES_IOPORT,
1623 rman_get_rid(sc->ioport), sc->ioport);
1624 sc->ioport = NULL;
1625 }
1626 }
1627
1628 /* Set up MSI or MSI-X */
1629 static int
igc_setup_msix(if_ctx_t ctx)1630 igc_setup_msix(if_ctx_t ctx)
1631 {
1632 return (0);
1633 }
1634
1635 /*********************************************************************
1636 *
1637 * Initialize the DMA Coalescing feature
1638 *
1639 **********************************************************************/
1640 static void
igc_init_dmac(struct igc_softc * sc,u32 pba)1641 igc_init_dmac(struct igc_softc *sc, u32 pba)
1642 {
1643 device_t dev = sc->dev;
1644 struct igc_hw *hw = &sc->hw;
1645 u32 dmac, reg = ~IGC_DMACR_DMAC_EN;
1646 u16 hwm;
1647 u16 max_frame_size;
1648 int status;
1649
1650 max_frame_size = sc->shared->isc_max_frame_size;
1651
1652 if (sc->dmac == 0) { /* Disabling it */
1653 IGC_WRITE_REG(hw, IGC_DMACR, reg);
1654 return;
1655 } else
1656 device_printf(dev, "DMA Coalescing enabled\n");
1657
1658 /* Set starting threshold */
1659 IGC_WRITE_REG(hw, IGC_DMCTXTH, 0);
1660
1661 hwm = 64 * pba - max_frame_size / 16;
1662 if (hwm < 64 * (pba - 6))
1663 hwm = 64 * (pba - 6);
1664 reg = IGC_READ_REG(hw, IGC_FCRTC);
1665 reg &= ~IGC_FCRTC_RTH_COAL_MASK;
1666 reg |= ((hwm << IGC_FCRTC_RTH_COAL_SHIFT)
1667 & IGC_FCRTC_RTH_COAL_MASK);
1668 IGC_WRITE_REG(hw, IGC_FCRTC, reg);
1669
1670 dmac = pba - max_frame_size / 512;
1671 if (dmac < pba - 10)
1672 dmac = pba - 10;
1673 reg = IGC_READ_REG(hw, IGC_DMACR);
1674 reg &= ~IGC_DMACR_DMACTHR_MASK;
1675 reg |= ((dmac << IGC_DMACR_DMACTHR_SHIFT)
1676 & IGC_DMACR_DMACTHR_MASK);
1677
1678 /* transition to L0x or L1 if available..*/
1679 reg |= (IGC_DMACR_DMAC_EN | IGC_DMACR_DMAC_LX_MASK);
1680
1681 /* Check if status is 2.5Gb backplane connection
1682 * before configuration of watchdog timer, which is
1683 * in msec values in 12.8usec intervals
1684 * watchdog timer= msec values in 32usec intervals
1685 * for non 2.5Gb connection
1686 */
1687 status = IGC_READ_REG(hw, IGC_STATUS);
1688 if ((status & IGC_STATUS_2P5_SKU) &&
1689 (!(status & IGC_STATUS_2P5_SKU_OVER)))
1690 reg |= ((sc->dmac * 5) >> 6);
1691 else
1692 reg |= (sc->dmac >> 5);
1693
1694 IGC_WRITE_REG(hw, IGC_DMACR, reg);
1695
1696 IGC_WRITE_REG(hw, IGC_DMCRTRH, 0);
1697
1698 /* Set the interval before transition */
1699 reg = IGC_READ_REG(hw, IGC_DMCTLX);
1700 reg |= IGC_DMCTLX_DCFLUSH_DIS;
1701
1702 /*
1703 ** in 2.5Gb connection, TTLX unit is 0.4 usec
1704 ** which is 0x4*2 = 0xA. But delay is still 4 usec
1705 */
1706 status = IGC_READ_REG(hw, IGC_STATUS);
1707 if ((status & IGC_STATUS_2P5_SKU) &&
1708 (!(status & IGC_STATUS_2P5_SKU_OVER)))
1709 reg |= 0xA;
1710 else
1711 reg |= 0x4;
1712
1713 IGC_WRITE_REG(hw, IGC_DMCTLX, reg);
1714
1715 /* free space in tx packet buffer to wake from DMA coal */
1716 IGC_WRITE_REG(hw, IGC_DMCTXTH, (IGC_TXPBSIZE -
1717 (2 * max_frame_size)) >> 6);
1718
1719 /* make low power state decision controlled by DMA coal */
1720 reg = IGC_READ_REG(hw, IGC_PCIEMISC);
1721 reg &= ~IGC_PCIEMISC_LX_DECISION;
1722 IGC_WRITE_REG(hw, IGC_PCIEMISC, reg);
1723 }
1724
1725 /*********************************************************************
1726 *
1727 * Initialize the hardware to a configuration as specified by the
1728 * softc structure.
1729 *
1730 **********************************************************************/
1731 static void
igc_reset(if_ctx_t ctx)1732 igc_reset(if_ctx_t ctx)
1733 {
1734 device_t dev = iflib_get_dev(ctx);
1735 struct igc_softc *sc = iflib_get_softc(ctx);
1736 struct igc_hw *hw = &sc->hw;
1737 u32 rx_buffer_size;
1738 u32 pba;
1739
1740 INIT_DEBUGOUT("igc_reset: begin");
1741 /* Let the firmware know the OS is in control */
1742 igc_get_hw_control(sc);
1743
1744 /*
1745 * Packet Buffer Allocation (PBA)
1746 * Writing PBA sets the receive portion of the buffer
1747 * the remainder is used for the transmit buffer.
1748 */
1749 pba = IGC_PBA_34K;
1750
1751 INIT_DEBUGOUT1("igc_reset: pba=%dK",pba);
1752
1753 /*
1754 * These parameters control the automatic generation (Tx) and
1755 * response (Rx) to Ethernet PAUSE frames.
1756 * - High water mark should allow for at least two frames to be
1757 * received after sending an XOFF.
1758 * - Low water mark works best when it is very near the high water mark.
1759 * This allows the receiver to restart by sending XON when it has
1760 * drained a bit. Here we use an arbitrary value of 1500 which will
1761 * restart after one full frame is pulled from the buffer. There
1762 * could be several smaller frames in the buffer and if so they will
1763 * not trigger the XON until their total number reduces the buffer
1764 * by 1500.
1765 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1766 */
1767 rx_buffer_size = (pba & 0xffff) << 10;
1768 hw->fc.high_water = rx_buffer_size -
1769 roundup2(sc->hw.mac.max_frame_size, 1024);
1770 /* 16-byte granularity */
1771 hw->fc.low_water = hw->fc.high_water - 16;
1772
1773 if (sc->fc) /* locally set flow control value? */
1774 hw->fc.requested_mode = sc->fc;
1775 else
1776 hw->fc.requested_mode = igc_fc_full;
1777
1778 hw->fc.pause_time = IGC_FC_PAUSE_TIME;
1779
1780 hw->fc.send_xon = true;
1781
1782 /* Issue a global reset */
1783 igc_reset_hw(hw);
1784 IGC_WRITE_REG(hw, IGC_WUC, 0);
1785
1786 /* and a re-init */
1787 if (igc_init_hw(hw) < 0) {
1788 device_printf(dev, "Hardware Initialization Failed\n");
1789 return;
1790 }
1791
1792 /* Setup DMA Coalescing */
1793 igc_init_dmac(sc, pba);
1794
1795 /* Save the final PBA off if it needs to be used elsewhere i.e. AIM */
1796 sc->pba = pba;
1797
1798 IGC_WRITE_REG(hw, IGC_VET, ETHERTYPE_VLAN);
1799 igc_get_phy_info(hw);
1800 igc_check_for_link(hw);
1801 }
1802
1803 /*
1804 * Initialise the RSS mapping for NICs that support multiple transmit/
1805 * receive rings.
1806 */
1807
1808 #define RSSKEYLEN 10
1809 static void
igc_initialize_rss_mapping(struct igc_softc * sc)1810 igc_initialize_rss_mapping(struct igc_softc *sc)
1811 {
1812 struct igc_hw *hw = &sc->hw;
1813 int i;
1814 int queue_id;
1815 u32 reta;
1816 u32 rss_key[RSSKEYLEN], mrqc, shift = 0;
1817
1818 /*
1819 * The redirection table controls which destination
1820 * queue each bucket redirects traffic to.
1821 * Each DWORD represents four queues, with the LSB
1822 * being the first queue in the DWORD.
1823 *
1824 * This just allocates buckets to queues using round-robin
1825 * allocation.
1826 *
1827 * NOTE: It Just Happens to line up with the default
1828 * RSS allocation method.
1829 */
1830
1831 /* Warning FM follows */
1832 reta = 0;
1833 for (i = 0; i < 128; i++) {
1834 #ifdef RSS
1835 queue_id = rss_get_indirection_to_bucket(i);
1836 /*
1837 * If we have more queues than buckets, we'll
1838 * end up mapping buckets to a subset of the
1839 * queues.
1840 *
1841 * If we have more buckets than queues, we'll
1842 * end up instead assigning multiple buckets
1843 * to queues.
1844 *
1845 * Both are suboptimal, but we need to handle
1846 * the case so we don't go out of bounds
1847 * indexing arrays and such.
1848 */
1849 queue_id = queue_id % sc->rx_num_queues;
1850 #else
1851 queue_id = (i % sc->rx_num_queues);
1852 #endif
1853 /* Adjust if required */
1854 queue_id = queue_id << shift;
1855
1856 /*
1857 * The low 8 bits are for hash value (n+0);
1858 * The next 8 bits are for hash value (n+1), etc.
1859 */
1860 reta = reta >> 8;
1861 reta = reta | ( ((uint32_t) queue_id) << 24);
1862 if ((i & 3) == 3) {
1863 IGC_WRITE_REG(hw, IGC_RETA(i >> 2), reta);
1864 reta = 0;
1865 }
1866 }
1867
1868 /* Now fill in hash table */
1869
1870 /*
1871 * MRQC: Multiple Receive Queues Command
1872 * Set queuing to RSS control, number depends on the device.
1873 */
1874 mrqc = IGC_MRQC_ENABLE_RSS_4Q;
1875
1876 #ifdef RSS
1877 /* XXX ew typecasting */
1878 rss_getkey((uint8_t *) &rss_key);
1879 #else
1880 arc4rand(&rss_key, sizeof(rss_key), 0);
1881 #endif
1882 for (i = 0; i < RSSKEYLEN; i++)
1883 IGC_WRITE_REG_ARRAY(hw, IGC_RSSRK(0), i, rss_key[i]);
1884
1885 /*
1886 * Configure the RSS fields to hash upon.
1887 */
1888 mrqc |= (IGC_MRQC_RSS_FIELD_IPV4 |
1889 IGC_MRQC_RSS_FIELD_IPV4_TCP);
1890 mrqc |= (IGC_MRQC_RSS_FIELD_IPV6 |
1891 IGC_MRQC_RSS_FIELD_IPV6_TCP);
1892 mrqc |=( IGC_MRQC_RSS_FIELD_IPV4_UDP |
1893 IGC_MRQC_RSS_FIELD_IPV6_UDP);
1894 mrqc |=( IGC_MRQC_RSS_FIELD_IPV6_UDP_EX |
1895 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX);
1896
1897 IGC_WRITE_REG(hw, IGC_MRQC, mrqc);
1898 }
1899
1900 /*********************************************************************
1901 *
1902 * Setup networking device structure and register interface media.
1903 *
1904 **********************************************************************/
1905 static int
igc_setup_interface(if_ctx_t ctx)1906 igc_setup_interface(if_ctx_t ctx)
1907 {
1908 if_t ifp = iflib_get_ifp(ctx);
1909 struct igc_softc *sc = iflib_get_softc(ctx);
1910 if_softc_ctx_t scctx = sc->shared;
1911
1912 INIT_DEBUGOUT("igc_setup_interface: begin");
1913
1914 /* Single Queue */
1915 if (sc->tx_num_queues == 1) {
1916 if_setsendqlen(ifp, scctx->isc_ntxd[0] - 1);
1917 if_setsendqready(ifp);
1918 }
1919
1920 /*
1921 * Specify the media types supported by this adapter and register
1922 * callbacks to update media and link information
1923 */
1924 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1925 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
1926 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1927 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
1928 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1929 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1930 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1931
1932 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1933 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1934 return (0);
1935 }
1936
1937 static int
igc_if_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)1938 igc_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1939 {
1940 struct igc_softc *sc = iflib_get_softc(ctx);
1941 if_softc_ctx_t scctx = sc->shared;
1942 int error = IGC_SUCCESS;
1943 struct igc_tx_queue *que;
1944 int i, j;
1945
1946 MPASS(sc->tx_num_queues > 0);
1947 MPASS(sc->tx_num_queues == ntxqsets);
1948
1949 /* First allocate the top level queue structs */
1950 if (!(sc->tx_queues =
1951 (struct igc_tx_queue *) malloc(sizeof(struct igc_tx_queue) *
1952 sc->tx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1953 device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n");
1954 return(ENOMEM);
1955 }
1956
1957 for (i = 0, que = sc->tx_queues; i < sc->tx_num_queues; i++, que++) {
1958 /* Set up some basics */
1959
1960 struct tx_ring *txr = &que->txr;
1961 txr->sc = que->sc = sc;
1962 que->me = txr->me = i;
1963
1964 /* Allocate report status array */
1965 if (!(txr->tx_rsq = (qidx_t *) malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
1966 device_printf(iflib_get_dev(ctx), "failed to allocate rs_idxs memory\n");
1967 error = ENOMEM;
1968 goto fail;
1969 }
1970 for (j = 0; j < scctx->isc_ntxd[0]; j++)
1971 txr->tx_rsq[j] = QIDX_INVALID;
1972 /* get the virtual and physical address of the hardware queues */
1973 txr->tx_base = (struct igc_tx_desc *)vaddrs[i*ntxqs];
1974 txr->tx_paddr = paddrs[i*ntxqs];
1975 }
1976
1977 if (bootverbose)
1978 device_printf(iflib_get_dev(ctx),
1979 "allocated for %d tx_queues\n", sc->tx_num_queues);
1980 return (0);
1981 fail:
1982 igc_if_queues_free(ctx);
1983 return (error);
1984 }
1985
1986 static int
igc_if_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)1987 igc_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1988 {
1989 struct igc_softc *sc = iflib_get_softc(ctx);
1990 int error = IGC_SUCCESS;
1991 struct igc_rx_queue *que;
1992 int i;
1993
1994 MPASS(sc->rx_num_queues > 0);
1995 MPASS(sc->rx_num_queues == nrxqsets);
1996
1997 /* First allocate the top level queue structs */
1998 if (!(sc->rx_queues =
1999 (struct igc_rx_queue *) malloc(sizeof(struct igc_rx_queue) *
2000 sc->rx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2001 device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n");
2002 error = ENOMEM;
2003 goto fail;
2004 }
2005
2006 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
2007 /* Set up some basics */
2008 struct rx_ring *rxr = &que->rxr;
2009 rxr->sc = que->sc = sc;
2010 rxr->que = que;
2011 que->me = rxr->me = i;
2012
2013 /* get the virtual and physical address of the hardware queues */
2014 rxr->rx_base = (union igc_rx_desc_extended *)vaddrs[i*nrxqs];
2015 rxr->rx_paddr = paddrs[i*nrxqs];
2016 }
2017
2018 if (bootverbose)
2019 device_printf(iflib_get_dev(ctx),
2020 "allocated for %d rx_queues\n", sc->rx_num_queues);
2021
2022 return (0);
2023 fail:
2024 igc_if_queues_free(ctx);
2025 return (error);
2026 }
2027
2028 static void
igc_if_queues_free(if_ctx_t ctx)2029 igc_if_queues_free(if_ctx_t ctx)
2030 {
2031 struct igc_softc *sc = iflib_get_softc(ctx);
2032 struct igc_tx_queue *tx_que = sc->tx_queues;
2033 struct igc_rx_queue *rx_que = sc->rx_queues;
2034
2035 if (tx_que != NULL) {
2036 for (int i = 0; i < sc->tx_num_queues; i++, tx_que++) {
2037 struct tx_ring *txr = &tx_que->txr;
2038 if (txr->tx_rsq == NULL)
2039 break;
2040
2041 free(txr->tx_rsq, M_DEVBUF);
2042 txr->tx_rsq = NULL;
2043 }
2044 free(sc->tx_queues, M_DEVBUF);
2045 sc->tx_queues = NULL;
2046 }
2047
2048 if (rx_que != NULL) {
2049 free(sc->rx_queues, M_DEVBUF);
2050 sc->rx_queues = NULL;
2051 }
2052
2053 if (sc->mta != NULL) {
2054 free(sc->mta, M_DEVBUF);
2055 }
2056 }
2057
2058 /*********************************************************************
2059 *
2060 * Enable transmit unit.
2061 *
2062 **********************************************************************/
2063 static void
igc_initialize_transmit_unit(if_ctx_t ctx)2064 igc_initialize_transmit_unit(if_ctx_t ctx)
2065 {
2066 struct igc_softc *sc = iflib_get_softc(ctx);
2067 if_softc_ctx_t scctx = sc->shared;
2068 struct igc_tx_queue *que;
2069 struct tx_ring *txr;
2070 struct igc_hw *hw = &sc->hw;
2071 u32 tctl, txdctl = 0;
2072
2073 INIT_DEBUGOUT("igc_initialize_transmit_unit: begin");
2074
2075 for (int i = 0; i < sc->tx_num_queues; i++, txr++) {
2076 u64 bus_addr;
2077 caddr_t offp, endp;
2078
2079 que = &sc->tx_queues[i];
2080 txr = &que->txr;
2081 bus_addr = txr->tx_paddr;
2082
2083 /* Clear checksum offload context. */
2084 offp = (caddr_t)&txr->csum_flags;
2085 endp = (caddr_t)(txr + 1);
2086 bzero(offp, endp - offp);
2087
2088 /* Base and Len of TX Ring */
2089 IGC_WRITE_REG(hw, IGC_TDLEN(i),
2090 scctx->isc_ntxd[0] * sizeof(struct igc_tx_desc));
2091 IGC_WRITE_REG(hw, IGC_TDBAH(i),
2092 (u32)(bus_addr >> 32));
2093 IGC_WRITE_REG(hw, IGC_TDBAL(i),
2094 (u32)bus_addr);
2095 /* Init the HEAD/TAIL indices */
2096 IGC_WRITE_REG(hw, IGC_TDT(i), 0);
2097 IGC_WRITE_REG(hw, IGC_TDH(i), 0);
2098
2099 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2100 IGC_READ_REG(&sc->hw, IGC_TDBAL(i)),
2101 IGC_READ_REG(&sc->hw, IGC_TDLEN(i)));
2102
2103 txdctl = 0; /* clear txdctl */
2104 txdctl |= 0x1f; /* PTHRESH */
2105 txdctl |= 1 << 8; /* HTHRESH */
2106 txdctl |= 1 << 16;/* WTHRESH */
2107 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */
2108 txdctl |= IGC_TXDCTL_GRAN;
2109 txdctl |= 1 << 25; /* LWTHRESH */
2110
2111 IGC_WRITE_REG(hw, IGC_TXDCTL(i), txdctl);
2112 }
2113
2114 /* Program the Transmit Control Register */
2115 tctl = IGC_READ_REG(&sc->hw, IGC_TCTL);
2116 tctl &= ~IGC_TCTL_CT;
2117 tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN |
2118 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT));
2119
2120 /* This write will effectively turn on the transmit unit. */
2121 IGC_WRITE_REG(&sc->hw, IGC_TCTL, tctl);
2122 }
2123
2124 /*********************************************************************
2125 *
2126 * Enable receive unit.
2127 *
2128 **********************************************************************/
2129 #define BSIZEPKT_ROUNDUP ((1<<IGC_SRRCTL_BSIZEPKT_SHIFT)-1)
2130
2131 static void
igc_initialize_receive_unit(if_ctx_t ctx)2132 igc_initialize_receive_unit(if_ctx_t ctx)
2133 {
2134 struct igc_softc *sc = iflib_get_softc(ctx);
2135 if_softc_ctx_t scctx = sc->shared;
2136 if_t ifp = iflib_get_ifp(ctx);
2137 struct igc_hw *hw = &sc->hw;
2138 struct igc_rx_queue *que;
2139 int i;
2140 u32 psize, rctl, rxcsum, srrctl = 0;
2141
2142 INIT_DEBUGOUT("igc_initialize_receive_units: begin");
2143
2144 /*
2145 * Make sure receives are disabled while setting
2146 * up the descriptor ring
2147 */
2148 rctl = IGC_READ_REG(hw, IGC_RCTL);
2149 IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
2150
2151 /* Setup the Receive Control Register */
2152 rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
2153 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM |
2154 IGC_RCTL_LBM_NO | IGC_RCTL_RDMTS_HALF |
2155 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
2156
2157 /* Do not store bad packets */
2158 rctl &= ~IGC_RCTL_SBP;
2159
2160 /* Enable Long Packet receive */
2161 if (if_getmtu(ifp) > ETHERMTU)
2162 rctl |= IGC_RCTL_LPE;
2163 else
2164 rctl &= ~IGC_RCTL_LPE;
2165
2166 /* Strip the CRC */
2167 if (!igc_disable_crc_stripping)
2168 rctl |= IGC_RCTL_SECRC;
2169
2170 rxcsum = IGC_READ_REG(hw, IGC_RXCSUM);
2171 if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
2172 rxcsum |= IGC_RXCSUM_CRCOFL;
2173 if (sc->tx_num_queues > 1)
2174 rxcsum |= IGC_RXCSUM_PCSD;
2175 else
2176 rxcsum |= IGC_RXCSUM_IPPCSE;
2177 } else {
2178 if (sc->tx_num_queues > 1)
2179 rxcsum |= IGC_RXCSUM_PCSD;
2180 else
2181 rxcsum &= ~IGC_RXCSUM_TUOFL;
2182 }
2183 IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);
2184
2185 if (sc->rx_num_queues > 1)
2186 igc_initialize_rss_mapping(sc);
2187
2188 if (if_getmtu(ifp) > ETHERMTU) {
2189 psize = scctx->isc_max_frame_size;
2190 /* are we on a vlan? */
2191 if (if_vlantrunkinuse(ifp))
2192 psize += VLAN_TAG_SIZE;
2193 IGC_WRITE_REG(&sc->hw, IGC_RLPML, psize);
2194 }
2195
2196 /* Set maximum packet buffer len */
2197 srrctl |= (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
2198 IGC_SRRCTL_BSIZEPKT_SHIFT;
2199 /* srrctl above overrides this but set the register to a sane value */
2200 rctl |= IGC_RCTL_SZ_2048;
2201
2202 /*
2203 * If TX flow control is disabled and there's >1 queue defined,
2204 * enable DROP.
2205 *
2206 * This drops frames rather than hanging the RX MAC for all queues.
2207 */
2208 if ((sc->rx_num_queues > 1) &&
2209 (sc->fc == igc_fc_none ||
2210 sc->fc == igc_fc_rx_pause)) {
2211 srrctl |= IGC_SRRCTL_DROP_EN;
2212 }
2213
2214 /* Setup the Base and Length of the Rx Descriptor Rings */
2215 for (i = 0, que = sc->rx_queues; i < sc->rx_num_queues; i++, que++) {
2216 struct rx_ring *rxr = &que->rxr;
2217 u64 bus_addr = rxr->rx_paddr;
2218 u32 rxdctl;
2219
2220 #ifdef notyet
2221 /* Configure for header split? -- ignore for now */
2222 rxr->hdr_split = igc_header_split;
2223 #else
2224 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
2225 #endif
2226
2227 IGC_WRITE_REG(hw, IGC_RDLEN(i),
2228 scctx->isc_nrxd[0] * sizeof(struct igc_rx_desc));
2229 IGC_WRITE_REG(hw, IGC_RDBAH(i),
2230 (uint32_t)(bus_addr >> 32));
2231 IGC_WRITE_REG(hw, IGC_RDBAL(i),
2232 (uint32_t)bus_addr);
2233 IGC_WRITE_REG(hw, IGC_SRRCTL(i), srrctl);
2234 /* Setup the Head and Tail Descriptor Pointers */
2235 IGC_WRITE_REG(hw, IGC_RDH(i), 0);
2236 IGC_WRITE_REG(hw, IGC_RDT(i), 0);
2237 /* Enable this Queue */
2238 rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(i));
2239 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
2240 rxdctl &= 0xFFF00000;
2241 rxdctl |= IGC_RX_PTHRESH;
2242 rxdctl |= IGC_RX_HTHRESH << 8;
2243 rxdctl |= IGC_RX_WTHRESH << 16;
2244 IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl);
2245 }
2246
2247 /* Make sure VLAN Filters are off */
2248 rctl &= ~IGC_RCTL_VFE;
2249
2250 /* Write out the settings */
2251 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
2252
2253 return;
2254 }
2255
2256 static void
igc_setup_vlan_hw_support(if_ctx_t ctx)2257 igc_setup_vlan_hw_support(if_ctx_t ctx)
2258 {
2259 struct igc_softc *sc = iflib_get_softc(ctx);
2260 struct igc_hw *hw = &sc->hw;
2261 struct ifnet *ifp = iflib_get_ifp(ctx);
2262 u32 reg;
2263
2264 /* igc hardware doesn't seem to implement VFTA for HWFILTER */
2265
2266 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING &&
2267 !igc_disable_crc_stripping) {
2268 reg = IGC_READ_REG(hw, IGC_CTRL);
2269 reg |= IGC_CTRL_VME;
2270 IGC_WRITE_REG(hw, IGC_CTRL, reg);
2271 } else {
2272 reg = IGC_READ_REG(hw, IGC_CTRL);
2273 reg &= ~IGC_CTRL_VME;
2274 IGC_WRITE_REG(hw, IGC_CTRL, reg);
2275 }
2276 }
2277
2278 static void
igc_if_intr_enable(if_ctx_t ctx)2279 igc_if_intr_enable(if_ctx_t ctx)
2280 {
2281 struct igc_softc *sc = iflib_get_softc(ctx);
2282 struct igc_hw *hw = &sc->hw;
2283 u32 mask;
2284
2285 if (__predict_true(sc->intr_type == IFLIB_INTR_MSIX)) {
2286 mask = (sc->que_mask | sc->link_mask);
2287 IGC_WRITE_REG(hw, IGC_EIAC, mask);
2288 IGC_WRITE_REG(hw, IGC_EIAM, mask);
2289 IGC_WRITE_REG(hw, IGC_EIMS, mask);
2290 IGC_WRITE_REG(hw, IGC_IMS, IGC_IMS_LSC);
2291 } else
2292 IGC_WRITE_REG(hw, IGC_IMS, IMS_ENABLE_MASK);
2293 IGC_WRITE_FLUSH(hw);
2294 }
2295
2296 static void
igc_if_intr_disable(if_ctx_t ctx)2297 igc_if_intr_disable(if_ctx_t ctx)
2298 {
2299 struct igc_softc *sc = iflib_get_softc(ctx);
2300 struct igc_hw *hw = &sc->hw;
2301
2302 if (__predict_true(sc->intr_type == IFLIB_INTR_MSIX)) {
2303 IGC_WRITE_REG(hw, IGC_EIMC, 0xffffffff);
2304 IGC_WRITE_REG(hw, IGC_EIAC, 0);
2305 }
2306 IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff);
2307 IGC_WRITE_FLUSH(hw);
2308 }
2309
2310 /*
2311 * igc_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
2312 * For ASF and Pass Through versions of f/w this means
2313 * that the driver is loaded. For AMT version type f/w
2314 * this means that the network i/f is open.
2315 */
2316 static void
igc_get_hw_control(struct igc_softc * sc)2317 igc_get_hw_control(struct igc_softc *sc)
2318 {
2319 u32 ctrl_ext;
2320
2321 if (sc->vf_ifp)
2322 return;
2323
2324 ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT);
2325 IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT,
2326 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
2327 }
2328
2329 /*
2330 * igc_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
2331 * For ASF and Pass Through versions of f/w this means that
2332 * the driver is no longer loaded. For AMT versions of the
2333 * f/w this means that the network i/f is closed.
2334 */
2335 static void
igc_release_hw_control(struct igc_softc * sc)2336 igc_release_hw_control(struct igc_softc *sc)
2337 {
2338 u32 ctrl_ext;
2339
2340 ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT);
2341 IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT,
2342 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
2343 return;
2344 }
2345
2346 static int
igc_is_valid_ether_addr(u8 * addr)2347 igc_is_valid_ether_addr(u8 *addr)
2348 {
2349 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2350
2351 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
2352 return (false);
2353 }
2354
2355 return (true);
2356 }
2357
2358 /*
2359 ** Parse the interface capabilities with regard
2360 ** to both system management and wake-on-lan for
2361 ** later use.
2362 */
2363 static void
igc_get_wakeup(if_ctx_t ctx)2364 igc_get_wakeup(if_ctx_t ctx)
2365 {
2366 struct igc_softc *sc = iflib_get_softc(ctx);
2367 u16 eeprom_data = 0, apme_mask;
2368
2369 apme_mask = IGC_WUC_APME;
2370 eeprom_data = IGC_READ_REG(&sc->hw, IGC_WUC);
2371
2372 if (eeprom_data & apme_mask)
2373 sc->wol = IGC_WUFC_LNKC;
2374 }
2375
2376
2377 /*
2378 * Enable PCI Wake On Lan capability
2379 */
2380 static void
igc_enable_wakeup(if_ctx_t ctx)2381 igc_enable_wakeup(if_ctx_t ctx)
2382 {
2383 struct igc_softc *sc = iflib_get_softc(ctx);
2384 device_t dev = iflib_get_dev(ctx);
2385 if_t ifp = iflib_get_ifp(ctx);
2386 int error = 0;
2387 u32 pmc, ctrl, rctl;
2388 u16 status;
2389
2390 if (pci_find_cap(dev, PCIY_PMG, &pmc) != 0)
2391 return;
2392
2393 /*
2394 * Determine type of Wakeup: note that wol
2395 * is set with all bits on by default.
2396 */
2397 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) == 0)
2398 sc->wol &= ~IGC_WUFC_MAG;
2399
2400 if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) == 0)
2401 sc->wol &= ~IGC_WUFC_EX;
2402
2403 if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) == 0)
2404 sc->wol &= ~IGC_WUFC_MC;
2405 else {
2406 rctl = IGC_READ_REG(&sc->hw, IGC_RCTL);
2407 rctl |= IGC_RCTL_MPE;
2408 IGC_WRITE_REG(&sc->hw, IGC_RCTL, rctl);
2409 }
2410
2411 if (!(sc->wol & (IGC_WUFC_EX | IGC_WUFC_MAG | IGC_WUFC_MC)))
2412 goto pme;
2413
2414 /* Advertise the wakeup capability */
2415 ctrl = IGC_READ_REG(&sc->hw, IGC_CTRL);
2416 ctrl |= IGC_CTRL_ADVD3WUC;
2417 IGC_WRITE_REG(&sc->hw, IGC_CTRL, ctrl);
2418
2419 /* Enable wakeup by the MAC */
2420 IGC_WRITE_REG(&sc->hw, IGC_WUC, IGC_WUC_PME_EN);
2421 IGC_WRITE_REG(&sc->hw, IGC_WUFC, sc->wol);
2422
2423 pme:
2424 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
2425 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2426 if (!error && (if_getcapenable(ifp) & IFCAP_WOL))
2427 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2428 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
2429
2430 return;
2431 }
2432
2433 /**********************************************************************
2434 *
2435 * Update the board statistics counters.
2436 *
2437 **********************************************************************/
2438 static void
igc_update_stats_counters(struct igc_softc * sc)2439 igc_update_stats_counters(struct igc_softc *sc)
2440 {
2441 u64 prev_xoffrxc = sc->stats.xoffrxc;
2442
2443 sc->stats.crcerrs += IGC_READ_REG(&sc->hw, IGC_CRCERRS);
2444 sc->stats.mpc += IGC_READ_REG(&sc->hw, IGC_MPC);
2445 sc->stats.scc += IGC_READ_REG(&sc->hw, IGC_SCC);
2446 sc->stats.ecol += IGC_READ_REG(&sc->hw, IGC_ECOL);
2447
2448 sc->stats.mcc += IGC_READ_REG(&sc->hw, IGC_MCC);
2449 sc->stats.latecol += IGC_READ_REG(&sc->hw, IGC_LATECOL);
2450 sc->stats.colc += IGC_READ_REG(&sc->hw, IGC_COLC);
2451 sc->stats.colc += IGC_READ_REG(&sc->hw, IGC_RERC);
2452 sc->stats.dc += IGC_READ_REG(&sc->hw, IGC_DC);
2453 sc->stats.rlec += IGC_READ_REG(&sc->hw, IGC_RLEC);
2454 sc->stats.xonrxc += IGC_READ_REG(&sc->hw, IGC_XONRXC);
2455 sc->stats.xontxc += IGC_READ_REG(&sc->hw, IGC_XONTXC);
2456 sc->stats.xoffrxc += IGC_READ_REG(&sc->hw, IGC_XOFFRXC);
2457 /*
2458 * For watchdog management we need to know if we have been
2459 * paused during the last interval, so capture that here.
2460 */
2461 if (sc->stats.xoffrxc != prev_xoffrxc)
2462 sc->shared->isc_pause_frames = 1;
2463 sc->stats.xofftxc += IGC_READ_REG(&sc->hw, IGC_XOFFTXC);
2464 sc->stats.fcruc += IGC_READ_REG(&sc->hw, IGC_FCRUC);
2465 sc->stats.prc64 += IGC_READ_REG(&sc->hw, IGC_PRC64);
2466 sc->stats.prc127 += IGC_READ_REG(&sc->hw, IGC_PRC127);
2467 sc->stats.prc255 += IGC_READ_REG(&sc->hw, IGC_PRC255);
2468 sc->stats.prc511 += IGC_READ_REG(&sc->hw, IGC_PRC511);
2469 sc->stats.prc1023 += IGC_READ_REG(&sc->hw, IGC_PRC1023);
2470 sc->stats.prc1522 += IGC_READ_REG(&sc->hw, IGC_PRC1522);
2471 sc->stats.tlpic += IGC_READ_REG(&sc->hw, IGC_TLPIC);
2472 sc->stats.rlpic += IGC_READ_REG(&sc->hw, IGC_RLPIC);
2473 sc->stats.gprc += IGC_READ_REG(&sc->hw, IGC_GPRC);
2474 sc->stats.bprc += IGC_READ_REG(&sc->hw, IGC_BPRC);
2475 sc->stats.mprc += IGC_READ_REG(&sc->hw, IGC_MPRC);
2476 sc->stats.gptc += IGC_READ_REG(&sc->hw, IGC_GPTC);
2477
2478 /* For the 64-bit byte counters the low dword must be read first. */
2479 /* Both registers clear on the read of the high dword */
2480
2481 sc->stats.gorc += IGC_READ_REG(&sc->hw, IGC_GORCL) +
2482 ((u64)IGC_READ_REG(&sc->hw, IGC_GORCH) << 32);
2483 sc->stats.gotc += IGC_READ_REG(&sc->hw, IGC_GOTCL) +
2484 ((u64)IGC_READ_REG(&sc->hw, IGC_GOTCH) << 32);
2485
2486 sc->stats.rnbc += IGC_READ_REG(&sc->hw, IGC_RNBC);
2487 sc->stats.ruc += IGC_READ_REG(&sc->hw, IGC_RUC);
2488 sc->stats.rfc += IGC_READ_REG(&sc->hw, IGC_RFC);
2489 sc->stats.roc += IGC_READ_REG(&sc->hw, IGC_ROC);
2490 sc->stats.rjc += IGC_READ_REG(&sc->hw, IGC_RJC);
2491
2492 sc->stats.mgprc += IGC_READ_REG(&sc->hw, IGC_MGTPRC);
2493 sc->stats.mgpdc += IGC_READ_REG(&sc->hw, IGC_MGTPDC);
2494 sc->stats.mgptc += IGC_READ_REG(&sc->hw, IGC_MGTPTC);
2495
2496 sc->stats.tor += IGC_READ_REG(&sc->hw, IGC_TORH);
2497 sc->stats.tot += IGC_READ_REG(&sc->hw, IGC_TOTH);
2498
2499 sc->stats.tpr += IGC_READ_REG(&sc->hw, IGC_TPR);
2500 sc->stats.tpt += IGC_READ_REG(&sc->hw, IGC_TPT);
2501 sc->stats.ptc64 += IGC_READ_REG(&sc->hw, IGC_PTC64);
2502 sc->stats.ptc127 += IGC_READ_REG(&sc->hw, IGC_PTC127);
2503 sc->stats.ptc255 += IGC_READ_REG(&sc->hw, IGC_PTC255);
2504 sc->stats.ptc511 += IGC_READ_REG(&sc->hw, IGC_PTC511);
2505 sc->stats.ptc1023 += IGC_READ_REG(&sc->hw, IGC_PTC1023);
2506 sc->stats.ptc1522 += IGC_READ_REG(&sc->hw, IGC_PTC1522);
2507 sc->stats.mptc += IGC_READ_REG(&sc->hw, IGC_MPTC);
2508 sc->stats.bptc += IGC_READ_REG(&sc->hw, IGC_BPTC);
2509
2510 /* Interrupt Counts */
2511 sc->stats.iac += IGC_READ_REG(&sc->hw, IGC_IAC);
2512 sc->stats.rxdmtc += IGC_READ_REG(&sc->hw, IGC_RXDMTC);
2513
2514 sc->stats.algnerrc += IGC_READ_REG(&sc->hw, IGC_ALGNERRC);
2515 sc->stats.tncrs += IGC_READ_REG(&sc->hw, IGC_TNCRS);
2516 sc->stats.htdpmc += IGC_READ_REG(&sc->hw, IGC_HTDPMC);
2517 sc->stats.tsctc += IGC_READ_REG(&sc->hw, IGC_TSCTC);
2518 }
2519
2520 static uint64_t
igc_if_get_counter(if_ctx_t ctx,ift_counter cnt)2521 igc_if_get_counter(if_ctx_t ctx, ift_counter cnt)
2522 {
2523 struct igc_softc *sc = iflib_get_softc(ctx);
2524 if_t ifp = iflib_get_ifp(ctx);
2525
2526 switch (cnt) {
2527 case IFCOUNTER_COLLISIONS:
2528 return (sc->stats.colc);
2529 case IFCOUNTER_IERRORS:
2530 return (sc->dropped_pkts + sc->stats.rxerrc +
2531 sc->stats.crcerrs + sc->stats.algnerrc +
2532 sc->stats.ruc + sc->stats.roc +
2533 sc->stats.mpc + sc->stats.htdpmc);
2534 case IFCOUNTER_OERRORS:
2535 return (sc->stats.ecol + sc->stats.latecol +
2536 sc->watchdog_events);
2537 default:
2538 return (if_get_counter_default(ifp, cnt));
2539 }
2540 }
2541
2542 /* igc_if_needs_restart - Tell iflib when the driver needs to be reinitialized
2543 * @ctx: iflib context
2544 * @event: event code to check
2545 *
2546 * Defaults to returning false for unknown events.
2547 *
2548 * @returns true if iflib needs to reinit the interface
2549 */
2550 static bool
igc_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)2551 igc_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
2552 {
2553 switch (event) {
2554 case IFLIB_RESTART_VLAN_CONFIG:
2555 default:
2556 return (false);
2557 }
2558 }
2559
2560 /* Export a single 32-bit register via a read-only sysctl. */
2561 static int
igc_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)2562 igc_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
2563 {
2564 struct igc_softc *sc;
2565 u_int val;
2566
2567 sc = oidp->oid_arg1;
2568 val = IGC_READ_REG(&sc->hw, oidp->oid_arg2);
2569 return (sysctl_handle_int(oidp, &val, 0, req));
2570 }
2571
2572 /* Per queue holdoff interrupt rate handler */
2573 static int
igc_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)2574 igc_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2575 {
2576 struct igc_rx_queue *rque;
2577 struct igc_tx_queue *tque;
2578 struct igc_hw *hw;
2579 int error;
2580 u32 reg, usec, rate;
2581
2582 bool tx = oidp->oid_arg2;
2583
2584 if (tx) {
2585 tque = oidp->oid_arg1;
2586 hw = &tque->sc->hw;
2587 reg = IGC_READ_REG(hw, IGC_EITR(tque->me));
2588 } else {
2589 rque = oidp->oid_arg1;
2590 hw = &rque->sc->hw;
2591 reg = IGC_READ_REG(hw, IGC_EITR(rque->msix));
2592 }
2593
2594 usec = (reg & IGC_QVECTOR_MASK);
2595 if (usec > 0)
2596 rate = IGC_INTS_TO_EITR(usec);
2597 else
2598 rate = 0;
2599
2600 error = sysctl_handle_int(oidp, &rate, 0, req);
2601 if (error || !req->newptr)
2602 return error;
2603 return 0;
2604 }
2605
2606 /*
2607 * Add sysctl variables, one per statistic, to the system.
2608 */
2609 static void
igc_add_hw_stats(struct igc_softc * sc)2610 igc_add_hw_stats(struct igc_softc *sc)
2611 {
2612 device_t dev = iflib_get_dev(sc->ctx);
2613 struct igc_tx_queue *tx_que = sc->tx_queues;
2614 struct igc_rx_queue *rx_que = sc->rx_queues;
2615
2616 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2617 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2618 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2619 struct igc_hw_stats *stats = &sc->stats;
2620
2621 struct sysctl_oid *stat_node, *queue_node, *int_node;
2622 struct sysctl_oid_list *stat_list, *queue_list, *int_list;
2623
2624 #define QUEUE_NAME_LEN 32
2625 char namebuf[QUEUE_NAME_LEN];
2626
2627 /* Driver Statistics */
2628 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
2629 CTLFLAG_RD, &sc->dropped_pkts,
2630 "Driver dropped packets");
2631 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
2632 CTLFLAG_RD, &sc->link_irq,
2633 "Link MSI-X IRQ Handled");
2634 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
2635 CTLFLAG_RD, &sc->rx_overruns,
2636 "RX overruns");
2637 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
2638 CTLFLAG_RD, &sc->watchdog_events,
2639 "Watchdog timeouts");
2640 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
2641 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2642 sc, IGC_CTRL, igc_sysctl_reg_handler, "IU",
2643 "Device Control Register");
2644 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
2645 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2646 sc, IGC_RCTL, igc_sysctl_reg_handler, "IU",
2647 "Receiver Control Register");
2648 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
2649 CTLFLAG_RD, &sc->hw.fc.high_water, 0,
2650 "Flow Control High Watermark");
2651 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
2652 CTLFLAG_RD, &sc->hw.fc.low_water, 0,
2653 "Flow Control Low Watermark");
2654
2655 for (int i = 0; i < sc->tx_num_queues; i++, tx_que++) {
2656 struct tx_ring *txr = &tx_que->txr;
2657 snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i);
2658 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
2659 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Name");
2660 queue_list = SYSCTL_CHILDREN(queue_node);
2661
2662 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
2663 CTLTYPE_UINT | CTLFLAG_RD, tx_que,
2664 true, igc_sysctl_interrupt_rate_handler, "IU",
2665 "Interrupt Rate");
2666 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
2667 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
2668 IGC_TDH(txr->me), igc_sysctl_reg_handler, "IU",
2669 "Transmit Descriptor Head");
2670 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
2671 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
2672 IGC_TDT(txr->me), igc_sysctl_reg_handler, "IU",
2673 "Transmit Descriptor Tail");
2674 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq",
2675 CTLFLAG_RD, &txr->tx_irq,
2676 "Queue MSI-X Transmit Interrupts");
2677 }
2678
2679 for (int j = 0; j < sc->rx_num_queues; j++, rx_que++) {
2680 struct rx_ring *rxr = &rx_que->rxr;
2681 snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", j);
2682 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
2683 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Name");
2684 queue_list = SYSCTL_CHILDREN(queue_node);
2685
2686 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
2687 CTLTYPE_UINT | CTLFLAG_RD, rx_que,
2688 false, igc_sysctl_interrupt_rate_handler, "IU",
2689 "Interrupt Rate");
2690 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
2691 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
2692 IGC_RDH(rxr->me), igc_sysctl_reg_handler, "IU",
2693 "Receive Descriptor Head");
2694 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
2695 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
2696 IGC_RDT(rxr->me), igc_sysctl_reg_handler, "IU",
2697 "Receive Descriptor Tail");
2698 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq",
2699 CTLFLAG_RD, &rxr->rx_irq,
2700 "Queue MSI-X Receive Interrupts");
2701 }
2702
2703 /* MAC stats get their own sub node */
2704
2705 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
2706 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Statistics");
2707 stat_list = SYSCTL_CHILDREN(stat_node);
2708
2709 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
2710 CTLFLAG_RD, &stats->ecol,
2711 "Excessive collisions");
2712 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
2713 CTLFLAG_RD, &stats->scc,
2714 "Single collisions");
2715 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
2716 CTLFLAG_RD, &stats->mcc,
2717 "Multiple collisions");
2718 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
2719 CTLFLAG_RD, &stats->latecol,
2720 "Late collisions");
2721 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
2722 CTLFLAG_RD, &stats->colc,
2723 "Collision Count");
2724 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
2725 CTLFLAG_RD, &sc->stats.symerrs,
2726 "Symbol Errors");
2727 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
2728 CTLFLAG_RD, &sc->stats.sec,
2729 "Sequence Errors");
2730 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
2731 CTLFLAG_RD, &sc->stats.dc,
2732 "Defer Count");
2733 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
2734 CTLFLAG_RD, &sc->stats.mpc,
2735 "Missed Packets");
2736 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_length_errors",
2737 CTLFLAG_RD, &sc->stats.rlec,
2738 "Receive Length Errors");
2739 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
2740 CTLFLAG_RD, &sc->stats.rnbc,
2741 "Receive No Buffers");
2742 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
2743 CTLFLAG_RD, &sc->stats.ruc,
2744 "Receive Undersize");
2745 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
2746 CTLFLAG_RD, &sc->stats.rfc,
2747 "Fragmented Packets Received ");
2748 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
2749 CTLFLAG_RD, &sc->stats.roc,
2750 "Oversized Packets Received");
2751 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
2752 CTLFLAG_RD, &sc->stats.rjc,
2753 "Recevied Jabber");
2754 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
2755 CTLFLAG_RD, &sc->stats.rxerrc,
2756 "Receive Errors");
2757 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
2758 CTLFLAG_RD, &sc->stats.crcerrs,
2759 "CRC errors");
2760 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
2761 CTLFLAG_RD, &sc->stats.algnerrc,
2762 "Alignment Errors");
2763 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
2764 CTLFLAG_RD, &sc->stats.xonrxc,
2765 "XON Received");
2766 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
2767 CTLFLAG_RD, &sc->stats.xontxc,
2768 "XON Transmitted");
2769 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
2770 CTLFLAG_RD, &sc->stats.xoffrxc,
2771 "XOFF Received");
2772 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
2773 CTLFLAG_RD, &sc->stats.xofftxc,
2774 "XOFF Transmitted");
2775 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "unsupported_fc_recvd",
2776 CTLFLAG_RD, &sc->stats.fcruc,
2777 "Unsupported Flow Control Received");
2778 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_recvd",
2779 CTLFLAG_RD, &sc->stats.mgprc,
2780 "Management Packets Received");
2781 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_drop",
2782 CTLFLAG_RD, &sc->stats.mgpdc,
2783 "Management Packets Dropped");
2784 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_txd",
2785 CTLFLAG_RD, &sc->stats.mgptc,
2786 "Management Packets Transmitted");
2787
2788 /* Packet Reception Stats */
2789 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
2790 CTLFLAG_RD, &sc->stats.tpr,
2791 "Total Packets Received ");
2792 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
2793 CTLFLAG_RD, &sc->stats.gprc,
2794 "Good Packets Received");
2795 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
2796 CTLFLAG_RD, &sc->stats.bprc,
2797 "Broadcast Packets Received");
2798 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
2799 CTLFLAG_RD, &sc->stats.mprc,
2800 "Multicast Packets Received");
2801 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
2802 CTLFLAG_RD, &sc->stats.prc64,
2803 "64 byte frames received ");
2804 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
2805 CTLFLAG_RD, &sc->stats.prc127,
2806 "65-127 byte frames received");
2807 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
2808 CTLFLAG_RD, &sc->stats.prc255,
2809 "128-255 byte frames received");
2810 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
2811 CTLFLAG_RD, &sc->stats.prc511,
2812 "256-511 byte frames received");
2813 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
2814 CTLFLAG_RD, &sc->stats.prc1023,
2815 "512-1023 byte frames received");
2816 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
2817 CTLFLAG_RD, &sc->stats.prc1522,
2818 "1023-1522 byte frames received");
2819 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
2820 CTLFLAG_RD, &sc->stats.gorc,
2821 "Good Octets Received");
2822
2823 /* Packet Transmission Stats */
2824 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
2825 CTLFLAG_RD, &sc->stats.gotc,
2826 "Good Octets Transmitted");
2827 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
2828 CTLFLAG_RD, &sc->stats.tpt,
2829 "Total Packets Transmitted");
2830 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
2831 CTLFLAG_RD, &sc->stats.gptc,
2832 "Good Packets Transmitted");
2833 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
2834 CTLFLAG_RD, &sc->stats.bptc,
2835 "Broadcast Packets Transmitted");
2836 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
2837 CTLFLAG_RD, &sc->stats.mptc,
2838 "Multicast Packets Transmitted");
2839 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
2840 CTLFLAG_RD, &sc->stats.ptc64,
2841 "64 byte frames transmitted ");
2842 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
2843 CTLFLAG_RD, &sc->stats.ptc127,
2844 "65-127 byte frames transmitted");
2845 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
2846 CTLFLAG_RD, &sc->stats.ptc255,
2847 "128-255 byte frames transmitted");
2848 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
2849 CTLFLAG_RD, &sc->stats.ptc511,
2850 "256-511 byte frames transmitted");
2851 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
2852 CTLFLAG_RD, &sc->stats.ptc1023,
2853 "512-1023 byte frames transmitted");
2854 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
2855 CTLFLAG_RD, &sc->stats.ptc1522,
2856 "1024-1522 byte frames transmitted");
2857 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
2858 CTLFLAG_RD, &sc->stats.tsctc,
2859 "TSO Contexts Transmitted");
2860
2861 /* Interrupt Stats */
2862
2863 int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts",
2864 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Interrupt Statistics");
2865 int_list = SYSCTL_CHILDREN(int_node);
2866
2867 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts",
2868 CTLFLAG_RD, &sc->stats.iac,
2869 "Interrupt Assertion Count");
2870
2871 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh",
2872 CTLFLAG_RD, &sc->stats.rxdmtc,
2873 "Rx Desc Min Thresh Count");
2874 }
2875
2876 static void
igc_fw_version(struct igc_softc * sc)2877 igc_fw_version(struct igc_softc *sc)
2878 {
2879 struct igc_hw *hw = &sc->hw;
2880 struct igc_fw_version *fw_ver = &sc->fw_ver;
2881
2882 *fw_ver = (struct igc_fw_version){0};
2883
2884 igc_get_fw_version(hw, fw_ver);
2885 }
2886
2887 static void
igc_sbuf_fw_version(struct igc_fw_version * fw_ver,struct sbuf * buf)2888 igc_sbuf_fw_version(struct igc_fw_version *fw_ver, struct sbuf *buf)
2889 {
2890 const char *space = "";
2891
2892 if (fw_ver->eep_major || fw_ver->eep_minor || fw_ver->eep_build) {
2893 sbuf_printf(buf, "EEPROM V%d.%d-%d", fw_ver->eep_major,
2894 fw_ver->eep_minor, fw_ver->eep_build);
2895 space = " ";
2896 }
2897
2898 if (fw_ver->invm_major || fw_ver->invm_minor || fw_ver->invm_img_type) {
2899 sbuf_printf(buf, "%sNVM V%d.%d imgtype%d",
2900 space, fw_ver->invm_major, fw_ver->invm_minor,
2901 fw_ver->invm_img_type);
2902 space = " ";
2903 }
2904
2905 if (fw_ver->or_valid) {
2906 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
2907 space, fw_ver->or_major, fw_ver->or_build,
2908 fw_ver->or_patch);
2909 space = " ";
2910 }
2911
2912 if (fw_ver->etrack_id)
2913 sbuf_printf(buf, "%seTrack 0x%08x", space, fw_ver->etrack_id);
2914 }
2915
2916 static void
igc_print_fw_version(struct igc_softc * sc)2917 igc_print_fw_version(struct igc_softc *sc )
2918 {
2919 device_t dev = sc->dev;
2920 struct sbuf *buf;
2921 int error = 0;
2922
2923 buf = sbuf_new_auto();
2924 if (!buf) {
2925 device_printf(dev, "Could not allocate sbuf for output.\n");
2926 return;
2927 }
2928
2929 igc_sbuf_fw_version(&sc->fw_ver, buf);
2930
2931 error = sbuf_finish(buf);
2932 if (error)
2933 device_printf(dev, "Error finishing sbuf: %d\n", error);
2934 else if (sbuf_len(buf))
2935 device_printf(dev, "%s\n", sbuf_data(buf));
2936
2937 sbuf_delete(buf);
2938 }
2939
2940 static int
igc_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)2941 igc_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
2942 {
2943 struct igc_softc *sc = (struct igc_softc *)arg1;
2944 device_t dev = sc->dev;
2945 struct sbuf *buf;
2946 int error = 0;
2947
2948 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2949 if (!buf) {
2950 device_printf(dev, "Could not allocate sbuf for output.\n");
2951 return (ENOMEM);
2952 }
2953
2954 igc_sbuf_fw_version(&sc->fw_ver, buf);
2955
2956 error = sbuf_finish(buf);
2957 if (error)
2958 device_printf(dev, "Error finishing sbuf: %d\n", error);
2959
2960 sbuf_delete(buf);
2961
2962 return (0);
2963 }
2964
2965 /**********************************************************************
2966 *
2967 * This routine provides a way to dump out the adapter eeprom,
2968 * often a useful debug/service tool. This only dumps the first
2969 * 32 words, stuff that matters is in that extent.
2970 *
2971 **********************************************************************/
2972 static int
igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)2973 igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
2974 {
2975 struct igc_softc *sc = (struct igc_softc *)arg1;
2976 int error;
2977 int result;
2978
2979 result = -1;
2980 error = sysctl_handle_int(oidp, &result, 0, req);
2981
2982 if (error || !req->newptr)
2983 return (error);
2984
2985 /*
2986 * This value will cause a hex dump of the
2987 * first 32 16-bit words of the EEPROM to
2988 * the screen.
2989 */
2990 if (result == 1)
2991 igc_print_nvm_info(sc);
2992
2993 return (error);
2994 }
2995
2996 static void
igc_print_nvm_info(struct igc_softc * sc)2997 igc_print_nvm_info(struct igc_softc *sc)
2998 {
2999 u16 eeprom_data;
3000 int i, j, row = 0;
3001
3002 /* Its a bit crude, but it gets the job done */
3003 printf("\nInterface EEPROM Dump:\n");
3004 printf("Offset\n0x0000 ");
3005 for (i = 0, j = 0; i < 32; i++, j++) {
3006 if (j == 8) { /* Make the offset block */
3007 j = 0; ++row;
3008 printf("\n0x00%x0 ",row);
3009 }
3010 igc_read_nvm(&sc->hw, i, 1, &eeprom_data);
3011 printf("%04x ", eeprom_data);
3012 }
3013 printf("\n");
3014 }
3015
3016 /*
3017 * Set flow control using sysctl:
3018 * Flow control values:
3019 * 0 - off
3020 * 1 - rx pause
3021 * 2 - tx pause
3022 * 3 - full
3023 */
3024 static int
igc_set_flowcntl(SYSCTL_HANDLER_ARGS)3025 igc_set_flowcntl(SYSCTL_HANDLER_ARGS)
3026 {
3027 int error;
3028 static int input = 3; /* default is full */
3029 struct igc_softc *sc = (struct igc_softc *) arg1;
3030
3031 error = sysctl_handle_int(oidp, &input, 0, req);
3032
3033 if ((error) || (req->newptr == NULL))
3034 return (error);
3035
3036 if (input == sc->fc) /* no change? */
3037 return (error);
3038
3039 switch (input) {
3040 case igc_fc_rx_pause:
3041 case igc_fc_tx_pause:
3042 case igc_fc_full:
3043 case igc_fc_none:
3044 sc->hw.fc.requested_mode = input;
3045 sc->fc = input;
3046 break;
3047 default:
3048 /* Do nothing */
3049 return (error);
3050 }
3051
3052 sc->hw.fc.current_mode = sc->hw.fc.requested_mode;
3053 igc_force_mac_fc(&sc->hw);
3054 return (error);
3055 }
3056
3057 /*
3058 * Manage DMA Coalesce:
3059 * Control values:
3060 * 0/1 - off/on
3061 * Legal timer values are:
3062 * 250,500,1000-10000 in thousands
3063 */
3064 static int
igc_sysctl_dmac(SYSCTL_HANDLER_ARGS)3065 igc_sysctl_dmac(SYSCTL_HANDLER_ARGS)
3066 {
3067 struct igc_softc *sc = (struct igc_softc *) arg1;
3068 int error;
3069
3070 error = sysctl_handle_int(oidp, &sc->dmac, 0, req);
3071
3072 if ((error) || (req->newptr == NULL))
3073 return (error);
3074
3075 switch (sc->dmac) {
3076 case 0:
3077 /* Disabling */
3078 break;
3079 case 1: /* Just enable and use default */
3080 sc->dmac = 1000;
3081 break;
3082 case 250:
3083 case 500:
3084 case 1000:
3085 case 2000:
3086 case 3000:
3087 case 4000:
3088 case 5000:
3089 case 6000:
3090 case 7000:
3091 case 8000:
3092 case 9000:
3093 case 10000:
3094 /* Legal values - allow */
3095 break;
3096 default:
3097 /* Do nothing, illegal value */
3098 sc->dmac = 0;
3099 return (EINVAL);
3100 }
3101 /* Reinit the interface */
3102 igc_if_init(sc->ctx);
3103 return (error);
3104 }
3105
3106 /*
3107 * Manage Energy Efficient Ethernet:
3108 * Control values:
3109 * 0/1 - enabled/disabled
3110 */
3111 static int
igc_sysctl_eee(SYSCTL_HANDLER_ARGS)3112 igc_sysctl_eee(SYSCTL_HANDLER_ARGS)
3113 {
3114 struct igc_softc *sc = (struct igc_softc *) arg1;
3115 int error, value;
3116
3117 value = sc->hw.dev_spec._i225.eee_disable;
3118 error = sysctl_handle_int(oidp, &value, 0, req);
3119 if (error || req->newptr == NULL)
3120 return (error);
3121
3122 sc->hw.dev_spec._i225.eee_disable = (value != 0);
3123 igc_if_init(sc->ctx);
3124
3125 return (0);
3126 }
3127
3128 static int
igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS)3129 igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3130 {
3131 struct igc_softc *sc;
3132 int error;
3133 int result;
3134
3135 result = -1;
3136 error = sysctl_handle_int(oidp, &result, 0, req);
3137
3138 if (error || !req->newptr)
3139 return (error);
3140
3141 if (result == 1) {
3142 sc = (struct igc_softc *) arg1;
3143 igc_print_debug_info(sc);
3144 }
3145
3146 return (error);
3147 }
3148
3149 static int
igc_get_rs(SYSCTL_HANDLER_ARGS)3150 igc_get_rs(SYSCTL_HANDLER_ARGS)
3151 {
3152 struct igc_softc *sc = (struct igc_softc *) arg1;
3153 int error;
3154 int result;
3155
3156 result = 0;
3157 error = sysctl_handle_int(oidp, &result, 0, req);
3158
3159 if (error || !req->newptr || result != 1)
3160 return (error);
3161 igc_dump_rs(sc);
3162
3163 return (error);
3164 }
3165
3166 static void
igc_if_debug(if_ctx_t ctx)3167 igc_if_debug(if_ctx_t ctx)
3168 {
3169 igc_dump_rs(iflib_get_softc(ctx));
3170 }
3171
3172 /*
3173 * This routine is meant to be fluid, add whatever is
3174 * needed for debugging a problem. -jfv
3175 */
3176 static void
igc_print_debug_info(struct igc_softc * sc)3177 igc_print_debug_info(struct igc_softc *sc)
3178 {
3179 device_t dev = iflib_get_dev(sc->ctx);
3180 if_t ifp = iflib_get_ifp(sc->ctx);
3181 struct tx_ring *txr = &sc->tx_queues->txr;
3182 struct rx_ring *rxr = &sc->rx_queues->rxr;
3183
3184 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
3185 printf("Interface is RUNNING ");
3186 else
3187 printf("Interface is NOT RUNNING\n");
3188
3189 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE)
3190 printf("and INACTIVE\n");
3191 else
3192 printf("and ACTIVE\n");
3193
3194 for (int i = 0; i < sc->tx_num_queues; i++, txr++) {
3195 device_printf(dev, "TX Queue %d ------\n", i);
3196 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
3197 IGC_READ_REG(&sc->hw, IGC_TDH(i)),
3198 IGC_READ_REG(&sc->hw, IGC_TDT(i)));
3199
3200 }
3201 for (int j=0; j < sc->rx_num_queues; j++, rxr++) {
3202 device_printf(dev, "RX Queue %d ------\n", j);
3203 device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
3204 IGC_READ_REG(&sc->hw, IGC_RDH(j)),
3205 IGC_READ_REG(&sc->hw, IGC_RDT(j)));
3206 }
3207 }
3208