xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision 1de7b4b805ddbf2429da511c053686ac4591ed89)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_rss.h"
40 #endif
41 
42 #include "ixgbe.h"
43 
44 /************************************************************************
45  * Driver version
46  ************************************************************************/
47 char ixgbe_driver_version[] = "3.2.12-k";
48 
49 
50 /************************************************************************
51  * PCI Device ID Table
52  *
53  *   Used by probe to select devices to load on
54  *   Last field stores an index into ixgbe_strings
55  *   Last entry must be all 0s
56  *
57  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58  ************************************************************************/
59 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
60 {
61 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
62 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
66 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
67 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
68 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
69 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
70 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
71 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
72 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
73 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
74 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
75 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
76 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
77 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
78 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
79 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
80 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
81 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
82 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
83 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
84 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
85 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
86 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
87 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
88 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
89 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
90 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
91 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
92 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
93 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
94 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
95 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
105 	/* required last entry */
106 	{0, 0, 0, 0, 0}
107 };
108 
109 /************************************************************************
110  * Table of branding strings
111  ************************************************************************/
112 static char    *ixgbe_strings[] = {
113 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
114 };
115 
116 /************************************************************************
117  * Function prototypes
118  ************************************************************************/
119 static int      ixgbe_probe(device_t);
120 static int      ixgbe_attach(device_t);
121 static int      ixgbe_detach(device_t);
122 static int      ixgbe_shutdown(device_t);
123 static int      ixgbe_suspend(device_t);
124 static int      ixgbe_resume(device_t);
125 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
126 static void     ixgbe_init(void *);
127 static void     ixgbe_init_locked(struct adapter *);
128 static void     ixgbe_stop(void *);
129 #if __FreeBSD_version >= 1100036
130 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
131 #endif
132 static void     ixgbe_init_device_features(struct adapter *);
133 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
134 static void     ixgbe_add_media_types(struct adapter *);
135 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
136 static int      ixgbe_media_change(struct ifnet *);
137 static int      ixgbe_allocate_pci_resources(struct adapter *);
138 static void     ixgbe_get_slot_info(struct adapter *);
139 static int      ixgbe_allocate_msix(struct adapter *);
140 static int      ixgbe_allocate_legacy(struct adapter *);
141 static int      ixgbe_configure_interrupts(struct adapter *);
142 static void     ixgbe_free_pci_resources(struct adapter *);
143 static void     ixgbe_local_timer(void *);
144 static int      ixgbe_setup_interface(device_t, struct adapter *);
145 static void     ixgbe_config_gpie(struct adapter *);
146 static void     ixgbe_config_dmac(struct adapter *);
147 static void     ixgbe_config_delay_values(struct adapter *);
148 static void     ixgbe_config_link(struct adapter *);
149 static void     ixgbe_check_wol_support(struct adapter *);
150 static int      ixgbe_setup_low_power_mode(struct adapter *);
151 static void     ixgbe_rearm_queues(struct adapter *, u64);
152 
153 static void     ixgbe_initialize_transmit_units(struct adapter *);
154 static void     ixgbe_initialize_receive_units(struct adapter *);
155 static void     ixgbe_enable_rx_drop(struct adapter *);
156 static void     ixgbe_disable_rx_drop(struct adapter *);
157 static void     ixgbe_initialize_rss_mapping(struct adapter *);
158 
159 static void     ixgbe_enable_intr(struct adapter *);
160 static void     ixgbe_disable_intr(struct adapter *);
161 static void     ixgbe_update_stats_counters(struct adapter *);
162 static void     ixgbe_set_promisc(struct adapter *);
163 static void     ixgbe_set_multi(struct adapter *);
164 static void     ixgbe_update_link_status(struct adapter *);
165 static void     ixgbe_set_ivar(struct adapter *, u8, u8, s8);
166 static void     ixgbe_configure_ivars(struct adapter *);
167 static u8       *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
168 
169 static void     ixgbe_setup_vlan_hw_support(struct adapter *);
170 static void     ixgbe_register_vlan(void *, struct ifnet *, u16);
171 static void     ixgbe_unregister_vlan(void *, struct ifnet *, u16);
172 
173 static void     ixgbe_add_device_sysctls(struct adapter *);
174 static void     ixgbe_add_hw_stats(struct adapter *);
175 static int      ixgbe_set_flowcntl(struct adapter *, int);
176 static int      ixgbe_set_advertise(struct adapter *, int);
177 static int      ixgbe_get_advertise(struct adapter *);
178 
179 /* Sysctl handlers */
180 static void     ixgbe_set_sysctl_value(struct adapter *, const char *,
181                                        const char *, int *, int);
182 static int      ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
183 static int      ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
184 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
185 static int      ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
186 static int      ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
187 static int      ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
188 #ifdef IXGBE_DEBUG
189 static int      ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
190 static int      ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
191 #endif
192 static int      ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
193 static int      ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
194 static int      ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
195 static int      ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
196 static int      ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
197 static int      ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
198 static int      ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
199 
200 /* Support for pluggable optic modules */
201 static bool     ixgbe_sfp_probe(struct adapter *);
202 
203 /* Legacy (single vector) interrupt handler */
204 static void     ixgbe_legacy_irq(void *);
205 
206 /* The MSI/MSI-X Interrupt handlers */
207 static void     ixgbe_msix_que(void *);
208 static void     ixgbe_msix_link(void *);
209 
210 /* Deferred interrupt tasklets */
211 static void     ixgbe_handle_que(void *, int);
212 static void     ixgbe_handle_link(void *, int);
213 static void     ixgbe_handle_msf(void *, int);
214 static void     ixgbe_handle_mod(void *, int);
215 static void     ixgbe_handle_phy(void *, int);
216 
217 
218 /************************************************************************
219  *  FreeBSD Device Interface Entry Points
220  ************************************************************************/
221 static device_method_t ix_methods[] = {
222 	/* Device interface */
223 	DEVMETHOD(device_probe, ixgbe_probe),
224 	DEVMETHOD(device_attach, ixgbe_attach),
225 	DEVMETHOD(device_detach, ixgbe_detach),
226 	DEVMETHOD(device_shutdown, ixgbe_shutdown),
227 	DEVMETHOD(device_suspend, ixgbe_suspend),
228 	DEVMETHOD(device_resume, ixgbe_resume),
229 #ifdef PCI_IOV
230 	DEVMETHOD(pci_iov_init, ixgbe_init_iov),
231 	DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
232 	DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
233 #endif /* PCI_IOV */
234 	DEVMETHOD_END
235 };
236 
237 static driver_t ix_driver = {
238 	"ix", ix_methods, sizeof(struct adapter),
239 };
240 
241 devclass_t ix_devclass;
242 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
243 
244 MODULE_DEPEND(ix, pci, 1, 1, 1);
245 MODULE_DEPEND(ix, ether, 1, 1, 1);
246 #ifdef DEV_NETMAP
247 MODULE_DEPEND(ix, netmap, 1, 1, 1);
248 #endif
249 
250 /*
251  * TUNEABLE PARAMETERS:
252  */
253 
254 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
255 
256 /*
257  * AIM: Adaptive Interrupt Moderation
258  * which means that the interrupt rate
259  * is varied over time based on the
260  * traffic for that interrupt vector
261  */
262 static int ixgbe_enable_aim = TRUE;
263 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
264     "Enable adaptive interrupt moderation");
265 
266 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
267 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
268     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
269 
270 /* How many packets rxeof tries to clean at a time */
271 static int ixgbe_rx_process_limit = 256;
272 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
273     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
274 
275 /* How many packets txeof tries to clean at a time */
276 static int ixgbe_tx_process_limit = 256;
277 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
278     &ixgbe_tx_process_limit, 0,
279     "Maximum number of sent packets to process at a time, -1 means unlimited");
280 
281 /* Flow control setting, default to full */
282 static int ixgbe_flow_control = ixgbe_fc_full;
283 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
284     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
285 
286 /* Advertise Speed, default to 0 (auto) */
287 static int ixgbe_advertise_speed = 0;
288 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
289     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
290 
291 /*
292  * Smart speed setting, default to on
293  * this only works as a compile option
294  * right now as its during attach, set
295  * this to 'ixgbe_smart_speed_off' to
296  * disable.
297  */
298 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
299 
300 /*
301  * MSI-X should be the default for best performance,
302  * but this allows it to be forced off for testing.
303  */
304 static int ixgbe_enable_msix = 1;
305 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
306     "Enable MSI-X interrupts");
307 
308 /*
309  * Number of Queues, can be set to 0,
310  * it then autoconfigures based on the
311  * number of cpus with a max of 8. This
312  * can be overriden manually here.
313  */
314 static int ixgbe_num_queues = 0;
315 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
316     "Number of queues to configure, 0 indicates autoconfigure");
317 
318 /*
319  * Number of TX descriptors per ring,
320  * setting higher than RX as this seems
321  * the better performing choice.
322  */
323 static int ixgbe_txd = PERFORM_TXD;
324 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
325     "Number of transmit descriptors per queue");
326 
327 /* Number of RX descriptors per ring */
328 static int ixgbe_rxd = PERFORM_RXD;
329 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
330     "Number of receive descriptors per queue");
331 
332 /*
333  * Defining this on will allow the use
334  * of unsupported SFP+ modules, note that
335  * doing so you are on your own :)
336  */
337 static int allow_unsupported_sfp = FALSE;
338 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
339     &allow_unsupported_sfp, 0,
340     "Allow unsupported SFP modules...use at your own risk");
341 
342 /*
343  * Not sure if Flow Director is fully baked,
344  * so we'll default to turning it off.
345  */
346 static int ixgbe_enable_fdir = 0;
347 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
348     "Enable Flow Director");
349 
350 /* Legacy Transmit (single queue) */
351 static int ixgbe_enable_legacy_tx = 0;
352 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
353     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
354 
355 /* Receive-Side Scaling */
356 static int ixgbe_enable_rss = 1;
357 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
358     "Enable Receive-Side Scaling (RSS)");
359 
360 /* Keep running tab on them for sanity check */
361 static int ixgbe_total_ports;
362 
363 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
364 static int (*ixgbe_ring_empty)(struct ifnet *, struct buf_ring *);
365 
366 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
367 
368 /************************************************************************
369  * ixgbe_initialize_rss_mapping
370  ************************************************************************/
371 static void
372 ixgbe_initialize_rss_mapping(struct adapter *adapter)
373 {
374 	struct ixgbe_hw *hw = &adapter->hw;
375 	u32             reta = 0, mrqc, rss_key[10];
376 	int             queue_id, table_size, index_mult;
377 	int             i, j;
378 	u32             rss_hash_config;
379 
380 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
381 		/* Fetch the configured RSS key */
382 		rss_getkey((uint8_t *)&rss_key);
383 	} else {
384 		/* set up random bits */
385 		arc4rand(&rss_key, sizeof(rss_key), 0);
386 	}
387 
388 	/* Set multiplier for RETA setup and table size based on MAC */
389 	index_mult = 0x1;
390 	table_size = 128;
391 	switch (adapter->hw.mac.type) {
392 	case ixgbe_mac_82598EB:
393 		index_mult = 0x11;
394 		break;
395 	case ixgbe_mac_X550:
396 	case ixgbe_mac_X550EM_x:
397 	case ixgbe_mac_X550EM_a:
398 		table_size = 512;
399 		break;
400 	default:
401 		break;
402 	}
403 
404 	/* Set up the redirection table */
405 	for (i = 0, j = 0; i < table_size; i++, j++) {
406 		if (j == adapter->num_queues)
407 			j = 0;
408 
409 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
410 			/*
411 			 * Fetch the RSS bucket id for the given indirection
412 			 * entry. Cap it at the number of configured buckets
413 			 * (which is num_queues.)
414 			 */
415 			queue_id = rss_get_indirection_to_bucket(i);
416 			queue_id = queue_id % adapter->num_queues;
417 		} else
418 			queue_id = (j * index_mult);
419 
420 		/*
421 		 * The low 8 bits are for hash value (n+0);
422 		 * The next 8 bits are for hash value (n+1), etc.
423 		 */
424 		reta = reta >> 8;
425 		reta = reta | (((uint32_t)queue_id) << 24);
426 		if ((i & 3) == 3) {
427 			if (i < 128)
428 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
429 			else
430 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
431 				    reta);
432 			reta = 0;
433 		}
434 	}
435 
436 	/* Now fill our hash function seeds */
437 	for (i = 0; i < 10; i++)
438 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
439 
440 	/* Perform hash on these packet types */
441 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
442 		rss_hash_config = rss_gethashconfig();
443 	else {
444 		/*
445 		 * Disable UDP - IP fragments aren't currently being handled
446 		 * and so we end up with a mix of 2-tuple and 4-tuple
447 		 * traffic.
448 		 */
449 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
450 		                | RSS_HASHTYPE_RSS_TCP_IPV4
451 		                | RSS_HASHTYPE_RSS_IPV6
452 		                | RSS_HASHTYPE_RSS_TCP_IPV6
453 		                | RSS_HASHTYPE_RSS_IPV6_EX
454 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
455 	}
456 
457 	mrqc = IXGBE_MRQC_RSSEN;
458 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
459 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
460 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
461 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
462 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
463 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
464 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
465 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
466 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
467 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
468 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
469 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
470 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
471 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
472 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
473 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
474 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
475 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
476 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
477 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
478 } /* ixgbe_initialize_rss_mapping */
479 
480 /************************************************************************
481  * ixgbe_initialize_receive_units - Setup receive registers and features.
482  ************************************************************************/
483 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
484 
485 static void
486 ixgbe_initialize_receive_units(struct adapter *adapter)
487 {
488 	struct rx_ring  *rxr = adapter->rx_rings;
489 	struct ixgbe_hw *hw = &adapter->hw;
490 	struct ifnet    *ifp = adapter->ifp;
491 	int             i, j;
492 	u32             bufsz, fctrl, srrctl, rxcsum;
493 	u32             hlreg;
494 
495 	/*
496 	 * Make sure receives are disabled while
497 	 * setting up the descriptor ring
498 	 */
499 	ixgbe_disable_rx(hw);
500 
501 	/* Enable broadcasts */
502 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
503 	fctrl |= IXGBE_FCTRL_BAM;
504 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
505 		fctrl |= IXGBE_FCTRL_DPF;
506 		fctrl |= IXGBE_FCTRL_PMCF;
507 	}
508 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
509 
510 	/* Set for Jumbo Frames? */
511 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
512 	if (ifp->if_mtu > ETHERMTU)
513 		hlreg |= IXGBE_HLREG0_JUMBOEN;
514 	else
515 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
516 
517 #ifdef DEV_NETMAP
518 	/* CRC stripping is conditional in Netmap */
519 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
520 	    (ifp->if_capenable & IFCAP_NETMAP) &&
521 	    !ix_crcstrip)
522 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
523 	else
524 #endif /* DEV_NETMAP */
525 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
526 
527 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
528 
529 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
530 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
531 
532 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
533 		u64 rdba = rxr->rxdma.dma_paddr;
534 		j = rxr->me;
535 
536 		/* Setup the Base and Length of the Rx Descriptor Ring */
537 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
538 		    (rdba & 0x00000000ffffffffULL));
539 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
540 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
541 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
542 
543 		/* Set up the SRRCTL register */
544 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
545 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
546 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
547 		srrctl |= bufsz;
548 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
549 
550 		/*
551 		 * Set DROP_EN iff we have no flow control and >1 queue.
552 		 * Note that srrctl was cleared shortly before during reset,
553 		 * so we do not need to clear the bit, but do it just in case
554 		 * this code is moved elsewhere.
555 		 */
556 		if (adapter->num_queues > 1 &&
557 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
558 			srrctl |= IXGBE_SRRCTL_DROP_EN;
559 		} else {
560 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
561 		}
562 
563 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
564 
565 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
566 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
567 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
568 
569 		/* Set the driver rx tail address */
570 		rxr->tail =  IXGBE_RDT(rxr->me);
571 	}
572 
573 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
574 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
575 		            | IXGBE_PSRTYPE_UDPHDR
576 		            | IXGBE_PSRTYPE_IPV4HDR
577 		            | IXGBE_PSRTYPE_IPV6HDR;
578 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
579 	}
580 
581 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
582 
583 	ixgbe_initialize_rss_mapping(adapter);
584 
585 	if (adapter->num_queues > 1) {
586 		/* RSS and RX IPP Checksum are mutually exclusive */
587 		rxcsum |= IXGBE_RXCSUM_PCSD;
588 	}
589 
590 	if (ifp->if_capenable & IFCAP_RXCSUM)
591 		rxcsum |= IXGBE_RXCSUM_PCSD;
592 
593 	/* This is useful for calculating UDP/IP fragment checksums */
594 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
595 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
596 
597 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
598 
599 	return;
600 } /* ixgbe_initialize_receive_units */
601 
602 /************************************************************************
603  * ixgbe_initialize_transmit_units - Enable transmit units.
604  ************************************************************************/
605 static void
606 ixgbe_initialize_transmit_units(struct adapter *adapter)
607 {
608 	struct tx_ring  *txr = adapter->tx_rings;
609 	struct ixgbe_hw *hw = &adapter->hw;
610 
611 	/* Setup the Base and Length of the Tx Descriptor Ring */
612 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
613 		u64 tdba = txr->txdma.dma_paddr;
614 		u32 txctrl = 0;
615 		int j = txr->me;
616 
617 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
618 		    (tdba & 0x00000000ffffffffULL));
619 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
620 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
621 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
622 
623 		/* Setup the HW Tx Head and Tail descriptor pointers */
624 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
625 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
626 
627 		/* Cache the tail address */
628 		txr->tail = IXGBE_TDT(j);
629 
630 		/* Disable Head Writeback */
631 		/*
632 		 * Note: for X550 series devices, these registers are actually
633 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
634 		 * fields remain the same.
635 		 */
636 		switch (hw->mac.type) {
637 		case ixgbe_mac_82598EB:
638 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
639 			break;
640 		default:
641 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
642 			break;
643 		}
644 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
645 		switch (hw->mac.type) {
646 		case ixgbe_mac_82598EB:
647 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
648 			break;
649 		default:
650 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
651 			break;
652 		}
653 
654 	}
655 
656 	if (hw->mac.type != ixgbe_mac_82598EB) {
657 		u32 dmatxctl, rttdcs;
658 
659 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
660 		dmatxctl |= IXGBE_DMATXCTL_TE;
661 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
662 		/* Disable arbiter to set MTQC */
663 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
664 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
665 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
666 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
667 		    ixgbe_get_mtqc(adapter->iov_mode));
668 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
669 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
670 	}
671 
672 	return;
673 } /* ixgbe_initialize_transmit_units */
674 
675 /************************************************************************
676  * ixgbe_attach - Device initialization routine
677  *
678  *   Called when the driver is being loaded.
679  *   Identifies the type of hardware, allocates all resources
680  *   and initializes the hardware.
681  *
682  *   return 0 on success, positive on failure
683  ************************************************************************/
684 static int
685 ixgbe_attach(device_t dev)
686 {
687 	struct adapter  *adapter;
688 	struct ixgbe_hw *hw;
689 	int             error = 0;
690 	u32             ctrl_ext;
691 
692 	INIT_DEBUGOUT("ixgbe_attach: begin");
693 
694 	/* Allocate, clear, and link in our adapter structure */
695 	adapter = device_get_softc(dev);
696 	adapter->hw.back = adapter;
697 	adapter->dev = dev;
698 	hw = &adapter->hw;
699 
700 	/* Core Lock Init*/
701 	IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
702 
703 	/* Set up the timer callout */
704 	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
705 
706 	/* Determine hardware revision */
707 	hw->vendor_id = pci_get_vendor(dev);
708 	hw->device_id = pci_get_device(dev);
709 	hw->revision_id = pci_get_revid(dev);
710 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
711 	hw->subsystem_device_id = pci_get_subdevice(dev);
712 
713 	/*
714 	 * Make sure BUSMASTER is set
715 	 */
716 	pci_enable_busmaster(dev);
717 
718 	/* Do base PCI setup - map BAR0 */
719 	if (ixgbe_allocate_pci_resources(adapter)) {
720 		device_printf(dev, "Allocation of PCI resources failed\n");
721 		error = ENXIO;
722 		goto err_out;
723 	}
724 
725 	/* let hardware know driver is loaded */
726 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
727 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
728 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
729 
730 	/*
731 	 * Initialize the shared code
732 	 */
733 	if (ixgbe_init_shared_code(hw)) {
734 		device_printf(dev, "Unable to initialize the shared code\n");
735 		error = ENXIO;
736 		goto err_out;
737 	}
738 
739 	if (hw->mbx.ops.init_params)
740 		hw->mbx.ops.init_params(hw);
741 
742 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
743 
744 	/* Pick up the 82599 settings */
745 	if (hw->mac.type != ixgbe_mac_82598EB) {
746 		hw->phy.smart_speed = ixgbe_smart_speed;
747 		adapter->num_segs = IXGBE_82599_SCATTER;
748 	} else
749 		adapter->num_segs = IXGBE_82598_SCATTER;
750 
751 	ixgbe_init_device_features(adapter);
752 
753 	if (ixgbe_configure_interrupts(adapter)) {
754 		error = ENXIO;
755 		goto err_out;
756 	}
757 
758 	/* Allocate multicast array memory. */
759 	adapter->mta = malloc(sizeof(*adapter->mta) *
760 	    MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
761 	if (adapter->mta == NULL) {
762 		device_printf(dev, "Can not allocate multicast setup array\n");
763 		error = ENOMEM;
764 		goto err_out;
765 	}
766 
767 	/* Enable WoL (if supported) */
768 	ixgbe_check_wol_support(adapter);
769 
770 	/* Register for VLAN events */
771 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
772 	    ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
773 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
774 	    ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
775 
776 	/* Verify adapter fan is still functional (if applicable) */
777 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
778 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
779 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
780 	}
781 
782 	/* Ensure SW/FW semaphore is free */
783 	ixgbe_init_swfw_semaphore(hw);
784 
785 	/* Enable EEE power saving */
786 	if (adapter->feat_en & IXGBE_FEATURE_EEE)
787 		hw->mac.ops.setup_eee(hw, TRUE);
788 
789 	/* Set an initial default flow control value */
790 	hw->fc.requested_mode = ixgbe_flow_control;
791 
792 	/* Sysctls for limiting the amount of work done in the taskqueues */
793 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
794 	    "max number of rx packets to process",
795 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
796 
797 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
798 	    "max number of tx packets to process",
799 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
800 
801 	/* Do descriptor calc and sanity checks */
802 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
803 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
804 		device_printf(dev, "TXD config issue, using default!\n");
805 		adapter->num_tx_desc = DEFAULT_TXD;
806 	} else
807 		adapter->num_tx_desc = ixgbe_txd;
808 
809 	/*
810 	 * With many RX rings it is easy to exceed the
811 	 * system mbuf allocation. Tuning nmbclusters
812 	 * can alleviate this.
813 	 */
814 	if (nmbclusters > 0) {
815 		int s;
816 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
817 		if (s > nmbclusters) {
818 			device_printf(dev, "RX Descriptors exceed system mbuf max, using default instead!\n");
819 			ixgbe_rxd = DEFAULT_RXD;
820 		}
821 	}
822 
823 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
824 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
825 		device_printf(dev, "RXD config issue, using default!\n");
826 		adapter->num_rx_desc = DEFAULT_RXD;
827 	} else
828 		adapter->num_rx_desc = ixgbe_rxd;
829 
830 	/* Allocate our TX/RX Queues */
831 	if (ixgbe_allocate_queues(adapter)) {
832 		error = ENOMEM;
833 		goto err_out;
834 	}
835 
836 	hw->phy.reset_if_overtemp = TRUE;
837 	error = ixgbe_reset_hw(hw);
838 	hw->phy.reset_if_overtemp = FALSE;
839 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
840 		/*
841 		 * No optics in this port, set up
842 		 * so the timer routine will probe
843 		 * for later insertion.
844 		 */
845 		adapter->sfp_probe = TRUE;
846 		error = IXGBE_SUCCESS;
847 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
848 		device_printf(dev, "Unsupported SFP+ module detected!\n");
849 		error = EIO;
850 		goto err_late;
851 	} else if (error) {
852 		device_printf(dev, "Hardware initialization failed\n");
853 		error = EIO;
854 		goto err_late;
855 	}
856 
857 	/* Make sure we have a good EEPROM before we read from it */
858 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
859 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
860 		error = EIO;
861 		goto err_late;
862 	}
863 
864 	/* Setup OS specific network interface */
865 	if (ixgbe_setup_interface(dev, adapter) != 0)
866 		goto err_late;
867 
868 	if (adapter->feat_en & IXGBE_FEATURE_MSIX)
869 		error = ixgbe_allocate_msix(adapter);
870 	else
871 		error = ixgbe_allocate_legacy(adapter);
872 	if (error)
873 		goto err_late;
874 
875 	error = ixgbe_start_hw(hw);
876 	switch (error) {
877 	case IXGBE_ERR_EEPROM_VERSION:
878 		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
879 		break;
880 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
881 		device_printf(dev, "Unsupported SFP+ Module\n");
882 		error = EIO;
883 		goto err_late;
884 	case IXGBE_ERR_SFP_NOT_PRESENT:
885 		device_printf(dev, "No SFP+ Module found\n");
886 		/* falls thru */
887 	default:
888 		break;
889 	}
890 
891 	/* Enable the optics for 82599 SFP+ fiber */
892 	ixgbe_enable_tx_laser(hw);
893 
894 	/* Enable power to the phy. */
895 	ixgbe_set_phy_power(hw, TRUE);
896 
897 	/* Initialize statistics */
898 	ixgbe_update_stats_counters(adapter);
899 
900 	/* Check PCIE slot type/speed/width */
901 	ixgbe_get_slot_info(adapter);
902 
903 	/*
904 	 * Do time init and sysctl init here, but
905 	 * only on the first port of a bypass adapter.
906 	 */
907 	ixgbe_bypass_init(adapter);
908 
909 	/* Set an initial dmac value */
910 	adapter->dmac = 0;
911 	/* Set initial advertised speeds (if applicable) */
912 	adapter->advertise = ixgbe_get_advertise(adapter);
913 
914 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
915 		ixgbe_define_iov_schemas(dev, &error);
916 
917 	/* Add sysctls */
918 	ixgbe_add_device_sysctls(adapter);
919 	ixgbe_add_hw_stats(adapter);
920 
921 	/* For Netmap */
922 	adapter->init_locked = ixgbe_init_locked;
923 	adapter->stop_locked = ixgbe_stop;
924 
925 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
926 		ixgbe_netmap_attach(adapter);
927 
928 	INIT_DEBUGOUT("ixgbe_attach: end");
929 
930 	return (0);
931 
932 err_late:
933 	ixgbe_free_transmit_structures(adapter);
934 	ixgbe_free_receive_structures(adapter);
935 	free(adapter->queues, M_DEVBUF);
936 err_out:
937 	if (adapter->ifp != NULL)
938 		if_free(adapter->ifp);
939 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
940 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
941 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
942 	ixgbe_free_pci_resources(adapter);
943 	free(adapter->mta, M_IXGBE);
944 	IXGBE_CORE_LOCK_DESTROY(adapter);
945 
946 	return (error);
947 } /* ixgbe_attach */
948 
949 /************************************************************************
950  * ixgbe_check_wol_support
951  *
952  *   Checks whether the adapter's ports are capable of
953  *   Wake On LAN by reading the adapter's NVM.
954  *
955  *   Sets each port's hw->wol_enabled value depending
956  *   on the value read here.
957  ************************************************************************/
958 static void
959 ixgbe_check_wol_support(struct adapter *adapter)
960 {
961 	struct ixgbe_hw *hw = &adapter->hw;
962 	u16             dev_caps = 0;
963 
964 	/* Find out WoL support for port */
965 	adapter->wol_support = hw->wol_enabled = 0;
966 	ixgbe_get_device_caps(hw, &dev_caps);
967 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
968 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
969 	     hw->bus.func == 0))
970 		adapter->wol_support = hw->wol_enabled = 1;
971 
972 	/* Save initial wake up filter configuration */
973 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
974 
975 	return;
976 } /* ixgbe_check_wol_support */
977 
978 /************************************************************************
979  * ixgbe_setup_interface
980  *
981  *   Setup networking device structure and register an interface.
982  ************************************************************************/
983 static int
984 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
985 {
986 	struct ifnet *ifp;
987 
988 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
989 
990 	ifp = adapter->ifp = if_alloc(IFT_ETHER);
991 	if (ifp == NULL) {
992 		device_printf(dev, "can not allocate ifnet structure\n");
993 		return (-1);
994 	}
995 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
996 	ifp->if_baudrate = IF_Gbps(10);
997 	ifp->if_init = ixgbe_init;
998 	ifp->if_softc = adapter;
999 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1000 	ifp->if_ioctl = ixgbe_ioctl;
1001 #if __FreeBSD_version >= 1100036
1002 	if_setgetcounterfn(ifp, ixgbe_get_counter);
1003 #endif
1004 #if __FreeBSD_version >= 1100045
1005 	/* TSO parameters */
1006 	ifp->if_hw_tsomax = 65518;
1007 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1008 	ifp->if_hw_tsomaxsegsize = 2048;
1009 #endif
1010 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1011 		ifp->if_start = ixgbe_legacy_start;
1012 		IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1013 		ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
1014 		IFQ_SET_READY(&ifp->if_snd);
1015 		ixgbe_start_locked = ixgbe_legacy_start_locked;
1016 		ixgbe_ring_empty = ixgbe_legacy_ring_empty;
1017 	} else {
1018 		ifp->if_transmit = ixgbe_mq_start;
1019 		ifp->if_qflush = ixgbe_qflush;
1020 		ixgbe_start_locked = ixgbe_mq_start_locked;
1021 		ixgbe_ring_empty = drbr_empty;
1022 	}
1023 
1024 	ether_ifattach(ifp, adapter->hw.mac.addr);
1025 
1026 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1027 
1028 	/*
1029 	 * Tell the upper layer(s) we support long frames.
1030 	 */
1031 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1032 
1033 	/* Set capability flags */
1034 	ifp->if_capabilities |= IFCAP_HWCSUM
1035 	                     |  IFCAP_HWCSUM_IPV6
1036 	                     |  IFCAP_TSO
1037 	                     |  IFCAP_LRO
1038 	                     |  IFCAP_VLAN_HWTAGGING
1039 	                     |  IFCAP_VLAN_HWTSO
1040 	                     |  IFCAP_VLAN_HWCSUM
1041 	                     |  IFCAP_JUMBO_MTU
1042 	                     |  IFCAP_VLAN_MTU
1043 	                     |  IFCAP_HWSTATS;
1044 
1045 	/* Enable the above capabilities by default */
1046 	ifp->if_capenable = ifp->if_capabilities;
1047 
1048 	/*
1049 	 * Don't turn this on by default, if vlans are
1050 	 * created on another pseudo device (eg. lagg)
1051 	 * then vlan events are not passed thru, breaking
1052 	 * operation, but with HW FILTER off it works. If
1053 	 * using vlans directly on the ixgbe driver you can
1054 	 * enable this and get full hardware tag filtering.
1055 	 */
1056 	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1057 
1058 	/*
1059 	 * Specify the media types supported by this adapter and register
1060 	 * callbacks to update media and link information
1061 	 */
1062 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1063 	    ixgbe_media_status);
1064 
1065 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1066 	ixgbe_add_media_types(adapter);
1067 
1068 	/* Set autoselect media by default */
1069 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1070 
1071 	return (0);
1072 } /* ixgbe_setup_interface */
1073 
1074 #if __FreeBSD_version >= 1100036
1075 /************************************************************************
1076  * ixgbe_get_counter
1077  ************************************************************************/
1078 static uint64_t
1079 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
1080 {
1081 	struct adapter *adapter;
1082 	struct tx_ring *txr;
1083 	uint64_t       rv;
1084 
1085 	adapter = if_getsoftc(ifp);
1086 
1087 	switch (cnt) {
1088 	case IFCOUNTER_IPACKETS:
1089 		return (adapter->ipackets);
1090 	case IFCOUNTER_OPACKETS:
1091 		return (adapter->opackets);
1092 	case IFCOUNTER_IBYTES:
1093 		return (adapter->ibytes);
1094 	case IFCOUNTER_OBYTES:
1095 		return (adapter->obytes);
1096 	case IFCOUNTER_IMCASTS:
1097 		return (adapter->imcasts);
1098 	case IFCOUNTER_OMCASTS:
1099 		return (adapter->omcasts);
1100 	case IFCOUNTER_COLLISIONS:
1101 		return (0);
1102 	case IFCOUNTER_IQDROPS:
1103 		return (adapter->iqdrops);
1104 	case IFCOUNTER_OQDROPS:
1105 		rv = 0;
1106 		txr = adapter->tx_rings;
1107 		for (int i = 0; i < adapter->num_queues; i++, txr++)
1108 			rv += txr->br->br_drops;
1109 		return (rv);
1110 	case IFCOUNTER_IERRORS:
1111 		return (adapter->ierrors);
1112 	default:
1113 		return (if_get_counter_default(ifp, cnt));
1114 	}
1115 } /* ixgbe_get_counter */
1116 #endif
1117 
1118 /************************************************************************
1119  * ixgbe_add_media_types
1120  ************************************************************************/
1121 static void
1122 ixgbe_add_media_types(struct adapter *adapter)
1123 {
1124 	struct ixgbe_hw *hw = &adapter->hw;
1125 	device_t        dev = adapter->dev;
1126 	u64             layer;
1127 
1128 	layer = adapter->phy_layer;
1129 
1130 	/* Media types with matching FreeBSD media defines */
1131 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1132 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1133 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1134 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1135 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1136 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1137 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1138 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1139 
1140 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1141 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1142 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1143 		    NULL);
1144 
1145 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1146 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1147 		if (hw->phy.multispeed_fiber)
1148 			ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1149 			    NULL);
1150 	}
1151 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1152 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1153 		if (hw->phy.multispeed_fiber)
1154 			ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1155 			    NULL);
1156 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1157 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1158 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1159 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1160 
1161 #ifdef IFM_ETH_XTYPE
1162 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1163 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1164 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1165 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1166 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1167 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1168 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1169 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1170 #else
1171 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1172 		device_printf(dev, "Media supported: 10GbaseKR\n");
1173 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1174 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1175 	}
1176 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1177 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1178 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1179 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1180 	}
1181 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1182 		device_printf(dev, "Media supported: 1000baseKX\n");
1183 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1184 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1185 	}
1186 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1187 		device_printf(dev, "Media supported: 2500baseKX\n");
1188 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1189 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1190 	}
1191 #endif
1192 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1193 		device_printf(dev, "Media supported: 1000baseBX\n");
1194 
1195 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1196 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1197 		    0, NULL);
1198 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1199 	}
1200 
1201 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1202 } /* ixgbe_add_media_types */
1203 
1204 /************************************************************************
1205  * ixgbe_is_sfp
1206  ************************************************************************/
1207 static inline bool
1208 ixgbe_is_sfp(struct ixgbe_hw *hw)
1209 {
1210 	switch (hw->mac.type) {
1211 	case ixgbe_mac_82598EB:
1212 		if (hw->phy.type == ixgbe_phy_nl)
1213 			return TRUE;
1214 		return FALSE;
1215 	case ixgbe_mac_82599EB:
1216 		switch (hw->mac.ops.get_media_type(hw)) {
1217 		case ixgbe_media_type_fiber:
1218 		case ixgbe_media_type_fiber_qsfp:
1219 			return TRUE;
1220 		default:
1221 			return FALSE;
1222 		}
1223 	case ixgbe_mac_X550EM_x:
1224 	case ixgbe_mac_X550EM_a:
1225 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1226 			return TRUE;
1227 		return FALSE;
1228 	default:
1229 		return FALSE;
1230 	}
1231 } /* ixgbe_is_sfp */
1232 
1233 /************************************************************************
1234  * ixgbe_config_link
1235  ************************************************************************/
1236 static void
1237 ixgbe_config_link(struct adapter *adapter)
1238 {
1239 	struct ixgbe_hw *hw = &adapter->hw;
1240 	u32             autoneg, err = 0;
1241 	bool            sfp, negotiate;
1242 
1243 	sfp = ixgbe_is_sfp(hw);
1244 
1245 	if (sfp) {
1246 		if (hw->phy.multispeed_fiber) {
1247 			hw->mac.ops.setup_sfp(hw);
1248 			ixgbe_enable_tx_laser(hw);
1249 			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1250 		} else
1251 			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1252 	} else {
1253 		if (hw->mac.ops.check_link)
1254 			err = ixgbe_check_link(hw, &adapter->link_speed,
1255 			    &adapter->link_up, FALSE);
1256 		if (err)
1257 			goto out;
1258 		autoneg = hw->phy.autoneg_advertised;
1259 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1260 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1261 			    &negotiate);
1262 		if (err)
1263 			goto out;
1264 		if (hw->mac.ops.setup_link)
1265 			err = hw->mac.ops.setup_link(hw, autoneg,
1266 			    adapter->link_up);
1267 	}
1268 out:
1269 
1270 	return;
1271 } /* ixgbe_config_link */
1272 
1273 /************************************************************************
1274  * ixgbe_update_stats_counters - Update board statistics counters.
1275  ************************************************************************/
1276 static void
1277 ixgbe_update_stats_counters(struct adapter *adapter)
1278 {
1279 	struct ixgbe_hw       *hw = &adapter->hw;
1280 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1281 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1282 	u64                   total_missed_rx = 0;
1283 
1284 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1285 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1286 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1287 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1288 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1289 
1290 	for (int i = 0; i < 16; i++) {
1291 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1292 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1293 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1294 	}
1295 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1296 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1297 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1298 
1299 	/* Hardware workaround, gprc counts missed packets */
1300 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1301 	stats->gprc -= missed_rx;
1302 
1303 	if (hw->mac.type != ixgbe_mac_82598EB) {
1304 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1305 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1306 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1307 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1308 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1309 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1310 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1311 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1312 	} else {
1313 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1314 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1315 		/* 82598 only has a counter in the high register */
1316 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1317 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1318 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1319 	}
1320 
1321 	/*
1322 	 * Workaround: mprc hardware is incorrectly counting
1323 	 * broadcasts, so for now we subtract those.
1324 	 */
1325 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1326 	stats->bprc += bprc;
1327 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1328 	if (hw->mac.type == ixgbe_mac_82598EB)
1329 		stats->mprc -= bprc;
1330 
1331 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1332 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1333 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1334 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1335 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1336 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1337 
1338 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1339 	stats->lxontxc += lxon;
1340 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1341 	stats->lxofftxc += lxoff;
1342 	total = lxon + lxoff;
1343 
1344 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1345 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1346 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1347 	stats->gptc -= total;
1348 	stats->mptc -= total;
1349 	stats->ptc64 -= total;
1350 	stats->gotc -= total * ETHER_MIN_LEN;
1351 
1352 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1353 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1354 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1355 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1356 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1357 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1358 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1359 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1360 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1361 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1362 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1363 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1364 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1365 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1366 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1367 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1368 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1369 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1370 	/* Only read FCOE on 82599 */
1371 	if (hw->mac.type != ixgbe_mac_82598EB) {
1372 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1373 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1374 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1375 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1376 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1377 	}
1378 
1379 	/* Fill out the OS statistics structure */
1380 	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1381 	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1382 	IXGBE_SET_IBYTES(adapter, stats->gorc);
1383 	IXGBE_SET_OBYTES(adapter, stats->gotc);
1384 	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1385 	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1386 	IXGBE_SET_COLLISIONS(adapter, 0);
1387 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1388 	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1389 } /* ixgbe_update_stats_counters */
1390 
1391 /************************************************************************
1392  * ixgbe_add_hw_stats
1393  *
1394  *   Add sysctl variables, one per statistic, to the system.
1395  ************************************************************************/
1396 static void
1397 ixgbe_add_hw_stats(struct adapter *adapter)
1398 {
1399 	device_t               dev = adapter->dev;
1400 	struct tx_ring         *txr = adapter->tx_rings;
1401 	struct rx_ring         *rxr = adapter->rx_rings;
1402 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1403 	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1404 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1405 	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1406 	struct sysctl_oid      *stat_node, *queue_node;
1407 	struct sysctl_oid_list *stat_list, *queue_list;
1408 
1409 #define QUEUE_NAME_LEN 32
1410 	char                   namebuf[QUEUE_NAME_LEN];
1411 
1412 	/* Driver Statistics */
1413 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1414 	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1415 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
1416 	    CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
1417 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1418 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1419 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1420 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1421 
1422 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
1423 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1424 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1425 		    CTLFLAG_RD, NULL, "Queue Name");
1426 		queue_list = SYSCTL_CHILDREN(queue_node);
1427 
1428 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1429 		    CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
1430 		    sizeof(&adapter->queues[i]),
1431 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1432 		    "Interrupt Rate");
1433 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1434 		    CTLFLAG_RD, &(adapter->queues[i].irqs),
1435 		    "irqs on this queue");
1436 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1437 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1438 		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1439 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1440 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1441 		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1442 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1443 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1444 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
1445 		    CTLFLAG_RD, &txr->no_tx_dma_setup,
1446 		    "Driver tx dma failure in xmit");
1447 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
1448 		    CTLFLAG_RD, &txr->no_desc_avail,
1449 		    "Queue No Descriptor Available");
1450 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1451 		    CTLFLAG_RD, &txr->total_packets,
1452 		    "Queue Packets Transmitted");
1453 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
1454 		    CTLFLAG_RD, &txr->br->br_drops,
1455 		    "Packets dropped in buf_ring");
1456 	}
1457 
1458 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1459 		struct lro_ctrl *lro = &rxr->lro;
1460 
1461 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1462 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1463 		    CTLFLAG_RD, NULL, "Queue Name");
1464 		queue_list = SYSCTL_CHILDREN(queue_node);
1465 
1466 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1467 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1468 		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1469 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1470 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1471 		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1472 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1473 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1474 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1475 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1476 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1477 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1478 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1479 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1480 		SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
1481 		    CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued");
1482 		SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
1483 		    CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed");
1484 	}
1485 
1486 	/* MAC stats get their own sub node */
1487 
1488 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1489 	    CTLFLAG_RD, NULL, "MAC Statistics");
1490 	stat_list = SYSCTL_CHILDREN(stat_node);
1491 
1492 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1493 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1494 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1495 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1496 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1497 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1498 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1499 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1500 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1501 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1502 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1503 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1504 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1505 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1506 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1507 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1508 
1509 	/* Flow Control stats */
1510 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1511 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1512 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1513 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1514 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1515 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1516 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1517 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1518 
1519 	/* Packet Reception Stats */
1520 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1521 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1522 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1523 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1524 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1525 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1526 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1527 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1528 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1529 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1530 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1531 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1532 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1533 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1534 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1535 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1536 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1537 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1538 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1539 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1540 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1541 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1542 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1543 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1544 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1545 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1546 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1547 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1548 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1549 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1550 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1551 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1552 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1553 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1554 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1555 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1556 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1557 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1558 
1559 	/* Packet Transmission Stats */
1560 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1561 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1562 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1563 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1564 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1565 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1566 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1567 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1568 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1569 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1570 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1571 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1572 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1573 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1574 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1575 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1576 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1577 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1578 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1579 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1580 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1581 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1582 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1583 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1584 } /* ixgbe_add_hw_stats */
1585 
1586 /************************************************************************
1587  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1588  *
1589  *   Retrieves the TDH value from the hardware
1590  ************************************************************************/
1591 static int
1592 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1593 {
1594 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1595 	int            error;
1596 	unsigned int   val;
1597 
1598 	if (!txr)
1599 		return (0);
1600 
1601 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1602 	error = sysctl_handle_int(oidp, &val, 0, req);
1603 	if (error || !req->newptr)
1604 		return error;
1605 
1606 	return (0);
1607 } /* ixgbe_sysctl_tdh_handler */
1608 
1609 /************************************************************************
1610  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1611  *
1612  *   Retrieves the TDT value from the hardware
1613  ************************************************************************/
1614 static int
1615 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1616 {
1617 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1618 	int            error;
1619 	unsigned int   val;
1620 
1621 	if (!txr)
1622 		return (0);
1623 
1624 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1625 	error = sysctl_handle_int(oidp, &val, 0, req);
1626 	if (error || !req->newptr)
1627 		return error;
1628 
1629 	return (0);
1630 } /* ixgbe_sysctl_tdt_handler */
1631 
1632 /************************************************************************
1633  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1634  *
1635  *   Retrieves the RDH value from the hardware
1636  ************************************************************************/
1637 static int
1638 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1639 {
1640 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1641 	int            error;
1642 	unsigned int   val;
1643 
1644 	if (!rxr)
1645 		return (0);
1646 
1647 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1648 	error = sysctl_handle_int(oidp, &val, 0, req);
1649 	if (error || !req->newptr)
1650 		return error;
1651 
1652 	return (0);
1653 } /* ixgbe_sysctl_rdh_handler */
1654 
1655 /************************************************************************
1656  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1657  *
1658  *   Retrieves the RDT value from the hardware
1659  ************************************************************************/
1660 static int
1661 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1662 {
1663 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1664 	int            error;
1665 	unsigned int   val;
1666 
1667 	if (!rxr)
1668 		return (0);
1669 
1670 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1671 	error = sysctl_handle_int(oidp, &val, 0, req);
1672 	if (error || !req->newptr)
1673 		return error;
1674 
1675 	return (0);
1676 } /* ixgbe_sysctl_rdt_handler */
1677 
1678 /************************************************************************
1679  * ixgbe_register_vlan
1680  *
1681  *   Run via vlan config EVENT, it enables us to use the
1682  *   HW Filter table since we can get the vlan id. This
1683  *   just creates the entry in the soft version of the
1684  *   VFTA, init will repopulate the real table.
1685  ************************************************************************/
1686 static void
1687 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1688 {
1689 	struct adapter *adapter = ifp->if_softc;
1690 	u16            index, bit;
1691 
1692 	if (ifp->if_softc != arg)   /* Not our event */
1693 		return;
1694 
1695 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
1696 		return;
1697 
1698 	IXGBE_CORE_LOCK(adapter);
1699 	index = (vtag >> 5) & 0x7F;
1700 	bit = vtag & 0x1F;
1701 	adapter->shadow_vfta[index] |= (1 << bit);
1702 	++adapter->num_vlans;
1703 	ixgbe_setup_vlan_hw_support(adapter);
1704 	IXGBE_CORE_UNLOCK(adapter);
1705 } /* ixgbe_register_vlan */
1706 
1707 /************************************************************************
1708  * ixgbe_unregister_vlan
1709  *
1710  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1711  ************************************************************************/
1712 static void
1713 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1714 {
1715 	struct adapter *adapter = ifp->if_softc;
1716 	u16            index, bit;
1717 
1718 	if (ifp->if_softc != arg)
1719 		return;
1720 
1721 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
1722 		return;
1723 
1724 	IXGBE_CORE_LOCK(adapter);
1725 	index = (vtag >> 5) & 0x7F;
1726 	bit = vtag & 0x1F;
1727 	adapter->shadow_vfta[index] &= ~(1 << bit);
1728 	--adapter->num_vlans;
1729 	/* Re-init to load the changes */
1730 	ixgbe_setup_vlan_hw_support(adapter);
1731 	IXGBE_CORE_UNLOCK(adapter);
1732 } /* ixgbe_unregister_vlan */
1733 
1734 /************************************************************************
1735  * ixgbe_setup_vlan_hw_support
1736  ************************************************************************/
1737 static void
1738 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
1739 {
1740 	struct ifnet    *ifp = adapter->ifp;
1741 	struct ixgbe_hw *hw = &adapter->hw;
1742 	struct rx_ring  *rxr;
1743 	int             i;
1744 	u32             ctrl;
1745 
1746 
1747 	/*
1748 	 * We get here thru init_locked, meaning
1749 	 * a soft reset, this has already cleared
1750 	 * the VFTA and other state, so if there
1751 	 * have been no vlan's registered do nothing.
1752 	 */
1753 	if (adapter->num_vlans == 0)
1754 		return;
1755 
1756 	/* Setup the queues for vlans */
1757 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1758 		for (i = 0; i < adapter->num_queues; i++) {
1759 			rxr = &adapter->rx_rings[i];
1760 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1761 			if (hw->mac.type != ixgbe_mac_82598EB) {
1762 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1763 				ctrl |= IXGBE_RXDCTL_VME;
1764 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1765 			}
1766 			rxr->vtag_strip = TRUE;
1767 		}
1768 	}
1769 
1770 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1771 		return;
1772 	/*
1773 	 * A soft reset zero's out the VFTA, so
1774 	 * we need to repopulate it now.
1775 	 */
1776 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1777 		if (adapter->shadow_vfta[i] != 0)
1778 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1779 			    adapter->shadow_vfta[i]);
1780 
1781 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1782 	/* Enable the Filter Table if enabled */
1783 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1784 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1785 		ctrl |= IXGBE_VLNCTRL_VFE;
1786 	}
1787 	if (hw->mac.type == ixgbe_mac_82598EB)
1788 		ctrl |= IXGBE_VLNCTRL_VME;
1789 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1790 } /* ixgbe_setup_vlan_hw_support */
1791 
1792 /************************************************************************
1793  * ixgbe_get_slot_info
1794  *
1795  *   Get the width and transaction speed of
1796  *   the slot this adapter is plugged into.
1797  ************************************************************************/
1798 static void
1799 ixgbe_get_slot_info(struct adapter *adapter)
1800 {
1801 	device_t              dev = adapter->dev;
1802 	struct ixgbe_hw       *hw = &adapter->hw;
1803 	u32                   offset;
1804 	u16                   link;
1805 	int                   bus_info_valid = TRUE;
1806 
1807 	/* Some devices are behind an internal bridge */
1808 	switch (hw->device_id) {
1809 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1810 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1811 		goto get_parent_info;
1812 	default:
1813 		break;
1814 	}
1815 
1816 	ixgbe_get_bus_info(hw);
1817 
1818 	/*
1819 	 * Some devices don't use PCI-E, but there is no need
1820 	 * to display "Unknown" for bus speed and width.
1821 	 */
1822 	switch (hw->mac.type) {
1823 	case ixgbe_mac_X550EM_x:
1824 	case ixgbe_mac_X550EM_a:
1825 		return;
1826 	default:
1827 		goto display;
1828 	}
1829 
1830 get_parent_info:
1831 	/*
1832 	 * For the Quad port adapter we need to parse back
1833 	 * up the PCI tree to find the speed of the expansion
1834 	 * slot into which this adapter is plugged. A bit more work.
1835 	 */
1836 	dev = device_get_parent(device_get_parent(dev));
1837 #ifdef IXGBE_DEBUG
1838 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1839 	    pci_get_slot(dev), pci_get_function(dev));
1840 #endif
1841 	dev = device_get_parent(device_get_parent(dev));
1842 #ifdef IXGBE_DEBUG
1843 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1844 	    pci_get_slot(dev), pci_get_function(dev));
1845 #endif
1846 	/* Now get the PCI Express Capabilities offset */
1847 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1848 		/*
1849 		 * Hmm...can't get PCI-Express capabilities.
1850 		 * Falling back to default method.
1851 		 */
1852 		bus_info_valid = FALSE;
1853 		ixgbe_get_bus_info(hw);
1854 		goto display;
1855 	}
1856 	/* ...and read the Link Status Register */
1857 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1858 	ixgbe_set_pci_config_data_generic(hw, link);
1859 
1860 display:
1861 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1862 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1863 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1864 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1865 	     "Unknown"),
1866 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1867 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1868 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1869 	     "Unknown"));
1870 
1871 	if (bus_info_valid) {
1872 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1873 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1874 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
1875 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1876 			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1877 		}
1878 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1879 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1880 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
1881 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1882 			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1883 		}
1884 	} else
1885 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1886 
1887 	return;
1888 } /* ixgbe_get_slot_info */
1889 
1890 /************************************************************************
1891  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
1892  ************************************************************************/
1893 static inline void
1894 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1895 {
1896 	struct ixgbe_hw *hw = &adapter->hw;
1897 	u64             queue = (u64)(1 << vector);
1898 	u32             mask;
1899 
1900 	if (hw->mac.type == ixgbe_mac_82598EB) {
1901 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1902 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1903 	} else {
1904 		mask = (queue & 0xFFFFFFFF);
1905 		if (mask)
1906 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1907 		mask = (queue >> 32);
1908 		if (mask)
1909 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1910 	}
1911 } /* ixgbe_enable_queue */
1912 
1913 /************************************************************************
1914  * ixgbe_disable_queue
1915  ************************************************************************/
1916 static inline void
1917 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1918 {
1919 	struct ixgbe_hw *hw = &adapter->hw;
1920 	u64             queue = (u64)(1 << vector);
1921 	u32             mask;
1922 
1923 	if (hw->mac.type == ixgbe_mac_82598EB) {
1924 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1925 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1926 	} else {
1927 		mask = (queue & 0xFFFFFFFF);
1928 		if (mask)
1929 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1930 		mask = (queue >> 32);
1931 		if (mask)
1932 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1933 	}
1934 } /* ixgbe_disable_queue */
1935 
1936 /************************************************************************
1937  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
1938  ************************************************************************/
1939 void
1940 ixgbe_msix_que(void *arg)
1941 {
1942 	struct ix_queue *que = arg;
1943 	struct adapter  *adapter = que->adapter;
1944 	struct ifnet    *ifp = adapter->ifp;
1945 	struct tx_ring  *txr = que->txr;
1946 	struct rx_ring  *rxr = que->rxr;
1947 	bool            more;
1948 	u32             newitr = 0;
1949 
1950 
1951 	/* Protect against spurious interrupts */
1952 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1953 		return;
1954 
1955 	ixgbe_disable_queue(adapter, que->msix);
1956 	++que->irqs;
1957 
1958 	more = ixgbe_rxeof(que);
1959 
1960 	IXGBE_TX_LOCK(txr);
1961 	ixgbe_txeof(txr);
1962 	if (!ixgbe_ring_empty(ifp, txr->br))
1963 		ixgbe_start_locked(ifp, txr);
1964 	IXGBE_TX_UNLOCK(txr);
1965 
1966 	/* Do AIM now? */
1967 
1968 	if (adapter->enable_aim == FALSE)
1969 		goto no_calc;
1970 	/*
1971 	 * Do Adaptive Interrupt Moderation:
1972 	 *  - Write out last calculated setting
1973 	 *  - Calculate based on average size over
1974 	 *    the last interval.
1975 	 */
1976 	if (que->eitr_setting)
1977 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
1978 		    que->eitr_setting);
1979 
1980 	que->eitr_setting = 0;
1981 
1982 	/* Idle, do nothing */
1983 	if ((txr->bytes == 0) && (rxr->bytes == 0))
1984 		goto no_calc;
1985 
1986 	if ((txr->bytes) && (txr->packets))
1987 		newitr = txr->bytes/txr->packets;
1988 	if ((rxr->bytes) && (rxr->packets))
1989 		newitr = max(newitr, (rxr->bytes / rxr->packets));
1990 	newitr += 24; /* account for hardware frame, crc */
1991 
1992 	/* set an upper boundary */
1993 	newitr = min(newitr, 3000);
1994 
1995 	/* Be nice to the mid range */
1996 	if ((newitr > 300) && (newitr < 1200))
1997 		newitr = (newitr / 3);
1998 	else
1999 		newitr = (newitr / 2);
2000 
2001 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2002 		newitr |= newitr << 16;
2003 	else
2004 		newitr |= IXGBE_EITR_CNT_WDIS;
2005 
2006 	/* save for next interrupt */
2007 	que->eitr_setting = newitr;
2008 
2009 	/* Reset state */
2010 	txr->bytes = 0;
2011 	txr->packets = 0;
2012 	rxr->bytes = 0;
2013 	rxr->packets = 0;
2014 
2015 no_calc:
2016 	if (more)
2017 		taskqueue_enqueue(que->tq, &que->que_task);
2018 	else
2019 		ixgbe_enable_queue(adapter, que->msix);
2020 
2021 	return;
2022 } /* ixgbe_msix_que */
2023 
2024 /************************************************************************
2025  * ixgbe_media_status - Media Ioctl callback
2026  *
2027  *   Called whenever the user queries the status of
2028  *   the interface using ifconfig.
2029  ************************************************************************/
2030 static void
2031 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2032 {
2033 	struct adapter  *adapter = ifp->if_softc;
2034 	struct ixgbe_hw *hw = &adapter->hw;
2035 	int             layer;
2036 
2037 	INIT_DEBUGOUT("ixgbe_media_status: begin");
2038 	IXGBE_CORE_LOCK(adapter);
2039 	ixgbe_update_link_status(adapter);
2040 
2041 	ifmr->ifm_status = IFM_AVALID;
2042 	ifmr->ifm_active = IFM_ETHER;
2043 
2044 	if (!adapter->link_active) {
2045 		IXGBE_CORE_UNLOCK(adapter);
2046 		return;
2047 	}
2048 
2049 	ifmr->ifm_status |= IFM_ACTIVE;
2050 	layer = adapter->phy_layer;
2051 
2052 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2053 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2054 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2055 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2056 		switch (adapter->link_speed) {
2057 		case IXGBE_LINK_SPEED_10GB_FULL:
2058 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2059 			break;
2060 		case IXGBE_LINK_SPEED_1GB_FULL:
2061 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2062 			break;
2063 		case IXGBE_LINK_SPEED_100_FULL:
2064 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2065 			break;
2066 		case IXGBE_LINK_SPEED_10_FULL:
2067 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2068 			break;
2069 		}
2070 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2071 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2072 		switch (adapter->link_speed) {
2073 		case IXGBE_LINK_SPEED_10GB_FULL:
2074 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2075 			break;
2076 		}
2077 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2078 		switch (adapter->link_speed) {
2079 		case IXGBE_LINK_SPEED_10GB_FULL:
2080 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2081 			break;
2082 		case IXGBE_LINK_SPEED_1GB_FULL:
2083 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2084 			break;
2085 		}
2086 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2087 		switch (adapter->link_speed) {
2088 		case IXGBE_LINK_SPEED_10GB_FULL:
2089 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2090 			break;
2091 		case IXGBE_LINK_SPEED_1GB_FULL:
2092 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2093 			break;
2094 		}
2095 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2096 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2097 		switch (adapter->link_speed) {
2098 		case IXGBE_LINK_SPEED_10GB_FULL:
2099 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2100 			break;
2101 		case IXGBE_LINK_SPEED_1GB_FULL:
2102 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2103 			break;
2104 		}
2105 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2106 		switch (adapter->link_speed) {
2107 		case IXGBE_LINK_SPEED_10GB_FULL:
2108 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2109 			break;
2110 		}
2111 	/*
2112 	 * XXX: These need to use the proper media types once
2113 	 * they're added.
2114 	 */
2115 #ifndef IFM_ETH_XTYPE
2116 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2117 		switch (adapter->link_speed) {
2118 		case IXGBE_LINK_SPEED_10GB_FULL:
2119 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2120 			break;
2121 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2122 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2123 			break;
2124 		case IXGBE_LINK_SPEED_1GB_FULL:
2125 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2126 			break;
2127 		}
2128 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2129 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2130 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2131 		switch (adapter->link_speed) {
2132 		case IXGBE_LINK_SPEED_10GB_FULL:
2133 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2134 			break;
2135 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2136 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2137 			break;
2138 		case IXGBE_LINK_SPEED_1GB_FULL:
2139 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2140 			break;
2141 		}
2142 #else
2143 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2144 		switch (adapter->link_speed) {
2145 		case IXGBE_LINK_SPEED_10GB_FULL:
2146 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2147 			break;
2148 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2149 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2150 			break;
2151 		case IXGBE_LINK_SPEED_1GB_FULL:
2152 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2153 			break;
2154 		}
2155 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2156 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2157 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2158 		switch (adapter->link_speed) {
2159 		case IXGBE_LINK_SPEED_10GB_FULL:
2160 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2161 			break;
2162 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2163 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2164 			break;
2165 		case IXGBE_LINK_SPEED_1GB_FULL:
2166 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2167 			break;
2168 		}
2169 #endif
2170 
2171 	/* If nothing is recognized... */
2172 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2173 		ifmr->ifm_active |= IFM_UNKNOWN;
2174 
2175 #if __FreeBSD_version >= 900025
2176 	/* Display current flow control setting used on link */
2177 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2178 	    hw->fc.current_mode == ixgbe_fc_full)
2179 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2180 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2181 	    hw->fc.current_mode == ixgbe_fc_full)
2182 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2183 #endif
2184 
2185 	IXGBE_CORE_UNLOCK(adapter);
2186 
2187 	return;
2188 } /* ixgbe_media_status */
2189 
2190 /************************************************************************
2191  * ixgbe_media_change - Media Ioctl callback
2192  *
2193  *   Called when the user changes speed/duplex using
2194  *   media/mediopt option with ifconfig.
2195  ************************************************************************/
2196 static int
2197 ixgbe_media_change(struct ifnet *ifp)
2198 {
2199 	struct adapter   *adapter = ifp->if_softc;
2200 	struct ifmedia   *ifm = &adapter->media;
2201 	struct ixgbe_hw  *hw = &adapter->hw;
2202 	ixgbe_link_speed speed = 0;
2203 
2204 	INIT_DEBUGOUT("ixgbe_media_change: begin");
2205 
2206 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2207 		return (EINVAL);
2208 
2209 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2210 		return (ENODEV);
2211 
2212 	/*
2213 	 * We don't actually need to check against the supported
2214 	 * media types of the adapter; ifmedia will take care of
2215 	 * that for us.
2216 	 */
2217 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2218 		case IFM_AUTO:
2219 		case IFM_10G_T:
2220 			speed |= IXGBE_LINK_SPEED_100_FULL;
2221 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
2222 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
2223 			break;
2224 		case IFM_10G_LRM:
2225 		case IFM_10G_LR:
2226 #ifndef IFM_ETH_XTYPE
2227 		case IFM_10G_SR: /* KR, too */
2228 		case IFM_10G_CX4: /* KX4 */
2229 #else
2230 		case IFM_10G_KR:
2231 		case IFM_10G_KX4:
2232 #endif
2233 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
2234 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
2235 			break;
2236 #ifndef IFM_ETH_XTYPE
2237 		case IFM_1000_CX: /* KX */
2238 #else
2239 		case IFM_1000_KX:
2240 #endif
2241 		case IFM_1000_LX:
2242 		case IFM_1000_SX:
2243 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
2244 			break;
2245 		case IFM_1000_T:
2246 			speed |= IXGBE_LINK_SPEED_100_FULL;
2247 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
2248 			break;
2249 		case IFM_10G_TWINAX:
2250 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
2251 			break;
2252 		case IFM_100_TX:
2253 			speed |= IXGBE_LINK_SPEED_100_FULL;
2254 			break;
2255 		case IFM_10_T:
2256 			speed |= IXGBE_LINK_SPEED_10_FULL;
2257 			break;
2258 		default:
2259 			goto invalid;
2260 	}
2261 
2262 	hw->mac.autotry_restart = TRUE;
2263 	hw->mac.ops.setup_link(hw, speed, TRUE);
2264 	adapter->advertise =
2265 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2266 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2267 	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2268 	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2269 
2270 	return (0);
2271 
2272 invalid:
2273 	device_printf(adapter->dev, "Invalid media type!\n");
2274 
2275 	return (EINVAL);
2276 } /* ixgbe_media_change */
2277 
2278 /************************************************************************
2279  * ixgbe_set_promisc
2280  ************************************************************************/
2281 static void
2282 ixgbe_set_promisc(struct adapter *adapter)
2283 {
2284 	struct ifnet *ifp = adapter->ifp;
2285 	int          mcnt = 0;
2286 	u32          rctl;
2287 
2288 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2289 	rctl &= (~IXGBE_FCTRL_UPE);
2290 	if (ifp->if_flags & IFF_ALLMULTI)
2291 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2292 	else {
2293 		struct ifmultiaddr *ifma;
2294 #if __FreeBSD_version < 800000
2295 		IF_ADDR_LOCK(ifp);
2296 #else
2297 		if_maddr_rlock(ifp);
2298 #endif
2299 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2300 			if (ifma->ifma_addr->sa_family != AF_LINK)
2301 				continue;
2302 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2303 				break;
2304 			mcnt++;
2305 		}
2306 #if __FreeBSD_version < 800000
2307 		IF_ADDR_UNLOCK(ifp);
2308 #else
2309 		if_maddr_runlock(ifp);
2310 #endif
2311 	}
2312 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2313 		rctl &= (~IXGBE_FCTRL_MPE);
2314 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2315 
2316 	if (ifp->if_flags & IFF_PROMISC) {
2317 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2318 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2319 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2320 		rctl |= IXGBE_FCTRL_MPE;
2321 		rctl &= ~IXGBE_FCTRL_UPE;
2322 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2323 	}
2324 } /* ixgbe_set_promisc */
2325 
2326 /************************************************************************
2327  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2328  ************************************************************************/
2329 static void
2330 ixgbe_msix_link(void *arg)
2331 {
2332 	struct adapter  *adapter = arg;
2333 	struct ixgbe_hw *hw = &adapter->hw;
2334 	u32             eicr, eicr_mask;
2335 	s32             retval;
2336 
2337 	++adapter->link_irq;
2338 
2339 	/* Pause other interrupts */
2340 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2341 
2342 	/* First get the cause */
2343 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2344 	/* Be sure the queue bits are not cleared */
2345 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2346 	/* Clear interrupt with write */
2347 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2348 
2349 	/* Link status change */
2350 	if (eicr & IXGBE_EICR_LSC) {
2351 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2352 		taskqueue_enqueue(adapter->tq, &adapter->link_task);
2353 	}
2354 
2355 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2356 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2357 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2358 			/* This is probably overkill :) */
2359 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2360 				return;
2361 			/* Disable the interrupt */
2362 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2363 			taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
2364 		}
2365 
2366 		if (eicr & IXGBE_EICR_ECC) {
2367 			device_printf(adapter->dev,
2368 			    "CRITICAL: ECC ERROR!!  Please Reboot!!\n");
2369 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2370 		}
2371 
2372 		/* Check for over temp condition */
2373 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2374 			switch (adapter->hw.mac.type) {
2375 			case ixgbe_mac_X550EM_a:
2376 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2377 					break;
2378 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2379 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2380 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2381 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2382 				retval = hw->phy.ops.check_overtemp(hw);
2383 				if (retval != IXGBE_ERR_OVERTEMP)
2384 					break;
2385 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2386 				device_printf(adapter->dev, "System shutdown required!\n");
2387 				break;
2388 			default:
2389 				if (!(eicr & IXGBE_EICR_TS))
2390 					break;
2391 				retval = hw->phy.ops.check_overtemp(hw);
2392 				if (retval != IXGBE_ERR_OVERTEMP)
2393 					break;
2394 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2395 				device_printf(adapter->dev, "System shutdown required!\n");
2396 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2397 				break;
2398 			}
2399 		}
2400 
2401 		/* Check for VF message */
2402 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2403 		    (eicr & IXGBE_EICR_MAILBOX))
2404 			taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
2405 	}
2406 
2407 	if (ixgbe_is_sfp(hw)) {
2408 		/* Pluggable optics-related interrupt */
2409 		if (hw->mac.type >= ixgbe_mac_X540)
2410 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2411 		else
2412 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2413 
2414 		if (eicr & eicr_mask) {
2415 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2416 			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2417 		}
2418 
2419 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2420 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2421 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2422 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2423 			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2424 		}
2425 	}
2426 
2427 	/* Check for fan failure */
2428 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2429 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
2430 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2431 	}
2432 
2433 	/* External PHY interrupt */
2434 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2435 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2436 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2437 		taskqueue_enqueue(adapter->tq, &adapter->phy_task);
2438 	}
2439 
2440 	/* Re-enable other interrupts */
2441 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
2442 } /* ixgbe_msix_link */
2443 
2444 /************************************************************************
2445  * ixgbe_sysctl_interrupt_rate_handler
2446  ************************************************************************/
2447 static int
2448 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2449 {
2450 	struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
2451 	int             error;
2452 	unsigned int    reg, usec, rate;
2453 
2454 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2455 	usec = ((reg & 0x0FF8) >> 3);
2456 	if (usec > 0)
2457 		rate = 500000 / usec;
2458 	else
2459 		rate = 0;
2460 	error = sysctl_handle_int(oidp, &rate, 0, req);
2461 	if (error || !req->newptr)
2462 		return error;
2463 	reg &= ~0xfff; /* default, no limitation */
2464 	ixgbe_max_interrupt_rate = 0;
2465 	if (rate > 0 && rate < 500000) {
2466 		if (rate < 1000)
2467 			rate = 1000;
2468 		ixgbe_max_interrupt_rate = rate;
2469 		reg |= ((4000000/rate) & 0xff8);
2470 	}
2471 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2472 
2473 	return (0);
2474 } /* ixgbe_sysctl_interrupt_rate_handler */
2475 
2476 /************************************************************************
2477  * ixgbe_add_device_sysctls
2478  ************************************************************************/
2479 static void
2480 ixgbe_add_device_sysctls(struct adapter *adapter)
2481 {
2482 	device_t               dev = adapter->dev;
2483 	struct ixgbe_hw        *hw = &adapter->hw;
2484 	struct sysctl_oid_list *child;
2485 	struct sysctl_ctx_list *ctx;
2486 
2487 	ctx = device_get_sysctl_ctx(dev);
2488 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2489 
2490 	/* Sysctls for all devices */
2491 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
2492 	    adapter, 0, ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
2493 
2494 	adapter->enable_aim = ixgbe_enable_aim;
2495 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2496 	    &adapter->enable_aim, 1, "Interrupt Moderation");
2497 
2498 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
2499 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2500 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2501 
2502 #ifdef IXGBE_DEBUG
2503 	/* testing sysctls (for all devices) */
2504 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
2505 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2506 	    "I", "PCI Power State");
2507 
2508 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
2509 	    CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2510 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2511 #endif
2512 	/* for X550 series devices */
2513 	if (hw->mac.type >= ixgbe_mac_X550)
2514 		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
2515 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2516 		    "I", "DMA Coalesce");
2517 
2518 	/* for WoL-capable devices */
2519 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2520 		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
2521 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2522 		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2523 
2524 		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
2525 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2526 		    "I", "Enable/Disable Wake Up Filters");
2527 	}
2528 
2529 	/* for X552/X557-AT devices */
2530 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2531 		struct sysctl_oid *phy_node;
2532 		struct sysctl_oid_list *phy_list;
2533 
2534 		phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
2535 		    CTLFLAG_RD, NULL, "External PHY sysctls");
2536 		phy_list = SYSCTL_CHILDREN(phy_node);
2537 
2538 		SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
2539 		    CTLTYPE_INT | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2540 		    "I", "Current External PHY Temperature (Celsius)");
2541 
2542 		SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
2543 		    CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
2544 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2545 		    "External PHY High Temperature Event Occurred");
2546 	}
2547 
2548 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2549 		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "eee_state",
2550 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2551 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2552 	}
2553 } /* ixgbe_add_device_sysctls */
2554 
2555 /************************************************************************
2556  * ixgbe_allocate_pci_resources
2557  ************************************************************************/
2558 static int
2559 ixgbe_allocate_pci_resources(struct adapter *adapter)
2560 {
2561 	device_t dev = adapter->dev;
2562 	int      rid;
2563 
2564 	rid = PCIR_BAR(0);
2565 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2566 	    RF_ACTIVE);
2567 
2568 	if (!(adapter->pci_mem)) {
2569 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2570 		return (ENXIO);
2571 	}
2572 
2573 	/* Save bus_space values for READ/WRITE_REG macros */
2574 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2575 	adapter->osdep.mem_bus_space_handle =
2576 	    rman_get_bushandle(adapter->pci_mem);
2577 	/* Set hw values for shared code */
2578 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2579 
2580 	return (0);
2581 } /* ixgbe_allocate_pci_resources */
2582 
2583 /************************************************************************
2584  * ixgbe_detach - Device removal routine
2585  *
2586  *   Called when the driver is being removed.
2587  *   Stops the adapter and deallocates all the resources
2588  *   that were allocated for driver operation.
2589  *
2590  *   return 0 on success, positive on failure
2591  ************************************************************************/
2592 static int
2593 ixgbe_detach(device_t dev)
2594 {
2595 	struct adapter  *adapter = device_get_softc(dev);
2596 	struct ix_queue *que = adapter->queues;
2597 	struct tx_ring  *txr = adapter->tx_rings;
2598 	u32             ctrl_ext;
2599 
2600 	INIT_DEBUGOUT("ixgbe_detach: begin");
2601 
2602 	/* Make sure VLANS are not using driver */
2603 	if (adapter->ifp->if_vlantrunk != NULL) {
2604 		device_printf(dev, "Vlan in use, detach first\n");
2605 		return (EBUSY);
2606 	}
2607 
2608 	if (ixgbe_pci_iov_detach(dev) != 0) {
2609 		device_printf(dev, "SR-IOV in use; detach first.\n");
2610 		return (EBUSY);
2611 	}
2612 
2613 	ether_ifdetach(adapter->ifp);
2614 	/* Stop the adapter */
2615 	IXGBE_CORE_LOCK(adapter);
2616 	ixgbe_setup_low_power_mode(adapter);
2617 	IXGBE_CORE_UNLOCK(adapter);
2618 
2619 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
2620 		if (que->tq) {
2621 			if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2622 				taskqueue_drain(que->tq, &txr->txq_task);
2623 			taskqueue_drain(que->tq, &que->que_task);
2624 			taskqueue_free(que->tq);
2625 		}
2626 	}
2627 
2628 	/* Drain the Link queue */
2629 	if (adapter->tq) {
2630 		taskqueue_drain(adapter->tq, &adapter->link_task);
2631 		taskqueue_drain(adapter->tq, &adapter->mod_task);
2632 		taskqueue_drain(adapter->tq, &adapter->msf_task);
2633 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
2634 			taskqueue_drain(adapter->tq, &adapter->mbx_task);
2635 		taskqueue_drain(adapter->tq, &adapter->phy_task);
2636 		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
2637 			taskqueue_drain(adapter->tq, &adapter->fdir_task);
2638 		taskqueue_free(adapter->tq);
2639 	}
2640 
2641 	/* let hardware know driver is unloading */
2642 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2643 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2644 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2645 
2646 	/* Unregister VLAN events */
2647 	if (adapter->vlan_attach != NULL)
2648 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
2649 	if (adapter->vlan_detach != NULL)
2650 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
2651 
2652 	callout_drain(&adapter->timer);
2653 
2654 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
2655 		netmap_detach(adapter->ifp);
2656 
2657 	ixgbe_free_pci_resources(adapter);
2658 	bus_generic_detach(dev);
2659 	if_free(adapter->ifp);
2660 
2661 	ixgbe_free_transmit_structures(adapter);
2662 	ixgbe_free_receive_structures(adapter);
2663 	free(adapter->queues, M_DEVBUF);
2664 	free(adapter->mta, M_IXGBE);
2665 
2666 	IXGBE_CORE_LOCK_DESTROY(adapter);
2667 
2668 	return (0);
2669 } /* ixgbe_detach */
2670 
2671 /************************************************************************
2672  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2673  *
2674  *   Prepare the adapter/port for LPLU and/or WoL
2675  ************************************************************************/
2676 static int
2677 ixgbe_setup_low_power_mode(struct adapter *adapter)
2678 {
2679 	struct ixgbe_hw *hw = &adapter->hw;
2680 	device_t        dev = adapter->dev;
2681 	s32             error = 0;
2682 
2683 	mtx_assert(&adapter->core_mtx, MA_OWNED);
2684 
2685 	/* Limit power management flow to X550EM baseT */
2686 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2687 	    hw->phy.ops.enter_lplu) {
2688 		/* Turn off support for APM wakeup. (Using ACPI instead) */
2689 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2690 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2691 
2692 		/*
2693 		 * Clear Wake Up Status register to prevent any previous wakeup
2694 		 * events from waking us up immediately after we suspend.
2695 		 */
2696 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2697 
2698 		/*
2699 		 * Program the Wakeup Filter Control register with user filter
2700 		 * settings
2701 		 */
2702 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2703 
2704 		/* Enable wakeups and power management in Wakeup Control */
2705 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2706 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2707 
2708 		/* X550EM baseT adapters need a special LPLU flow */
2709 		hw->phy.reset_disable = true;
2710 		ixgbe_stop(adapter);
2711 		error = hw->phy.ops.enter_lplu(hw);
2712 		if (error)
2713 			device_printf(dev, "Error entering LPLU: %d\n", error);
2714 		hw->phy.reset_disable = false;
2715 	} else {
2716 		/* Just stop for other adapters */
2717 		ixgbe_stop(adapter);
2718 	}
2719 
2720 	return error;
2721 } /* ixgbe_setup_low_power_mode */
2722 
2723 /************************************************************************
2724  * ixgbe_shutdown - Shutdown entry point
2725  ************************************************************************/
2726 static int
2727 ixgbe_shutdown(device_t dev)
2728 {
2729 	struct adapter *adapter = device_get_softc(dev);
2730 	int            error = 0;
2731 
2732 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2733 
2734 	IXGBE_CORE_LOCK(adapter);
2735 	error = ixgbe_setup_low_power_mode(adapter);
2736 	IXGBE_CORE_UNLOCK(adapter);
2737 
2738 	return (error);
2739 } /* ixgbe_shutdown */
2740 
2741 /************************************************************************
2742  * ixgbe_suspend
2743  *
2744  *   From D0 to D3
2745  ************************************************************************/
2746 static int
2747 ixgbe_suspend(device_t dev)
2748 {
2749 	struct adapter *adapter = device_get_softc(dev);
2750 	int            error = 0;
2751 
2752 	INIT_DEBUGOUT("ixgbe_suspend: begin");
2753 
2754 	IXGBE_CORE_LOCK(adapter);
2755 
2756 	error = ixgbe_setup_low_power_mode(adapter);
2757 
2758 	IXGBE_CORE_UNLOCK(adapter);
2759 
2760 	return (error);
2761 } /* ixgbe_suspend */
2762 
2763 /************************************************************************
2764  * ixgbe_resume
2765  *
2766  *   From D3 to D0
2767  ************************************************************************/
2768 static int
2769 ixgbe_resume(device_t dev)
2770 {
2771 	struct adapter  *adapter = device_get_softc(dev);
2772 	struct ifnet    *ifp = adapter->ifp;
2773 	struct ixgbe_hw *hw = &adapter->hw;
2774 	u32             wus;
2775 
2776 	INIT_DEBUGOUT("ixgbe_resume: begin");
2777 
2778 	IXGBE_CORE_LOCK(adapter);
2779 
2780 	/* Read & clear WUS register */
2781 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2782 	if (wus)
2783 		device_printf(dev, "Woken up by (WUS): %#010x\n",
2784 		    IXGBE_READ_REG(hw, IXGBE_WUS));
2785 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2786 	/* And clear WUFC until next low-power transition */
2787 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2788 
2789 	/*
2790 	 * Required after D3->D0 transition;
2791 	 * will re-advertise all previous advertised speeds
2792 	 */
2793 	if (ifp->if_flags & IFF_UP)
2794 		ixgbe_init_locked(adapter);
2795 
2796 	IXGBE_CORE_UNLOCK(adapter);
2797 
2798 	return (0);
2799 } /* ixgbe_resume */
2800 
2801 /************************************************************************
2802  * ixgbe_set_if_hwassist - Set the various hardware offload abilities.
2803  *
2804  *   Takes the ifnet's if_capenable flags (e.g. set by the user using
2805  *   ifconfig) and indicates to the OS via the ifnet's if_hwassist
2806  *   field what mbuf offload flags the driver will understand.
2807  ************************************************************************/
2808 static void
2809 ixgbe_set_if_hwassist(struct adapter *adapter)
2810 {
2811 	struct ifnet *ifp = adapter->ifp;
2812 
2813 	ifp->if_hwassist = 0;
2814 #if __FreeBSD_version >= 1000000
2815 	if (ifp->if_capenable & IFCAP_TSO4)
2816 		ifp->if_hwassist |= CSUM_IP_TSO;
2817 	if (ifp->if_capenable & IFCAP_TSO6)
2818 		ifp->if_hwassist |= CSUM_IP6_TSO;
2819 	if (ifp->if_capenable & IFCAP_TXCSUM) {
2820 		ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
2821 		if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2822 			ifp->if_hwassist |= CSUM_IP_SCTP;
2823 	}
2824 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
2825 		ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
2826 		if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2827 			ifp->if_hwassist |= CSUM_IP6_SCTP;
2828 	}
2829 #else
2830 	if (ifp->if_capenable & IFCAP_TSO)
2831 		ifp->if_hwassist |= CSUM_TSO;
2832 	if (ifp->if_capenable & IFCAP_TXCSUM) {
2833 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
2834 		if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2835 			ifp->if_hwassist |= CSUM_SCTP;
2836 	}
2837 #endif
2838 } /* ixgbe_set_if_hwassist */
2839 
2840 /************************************************************************
2841  * ixgbe_init_locked - Init entry point
2842  *
2843  *   Used in two ways: It is used by the stack as an init
2844  *   entry point in network interface structure. It is also
2845  *   used by the driver as a hw/sw initialization routine to
2846  *   get to a consistent state.
2847  *
2848  *   return 0 on success, positive on failure
2849  ************************************************************************/
2850 void
2851 ixgbe_init_locked(struct adapter *adapter)
2852 {
2853 	struct ifnet    *ifp = adapter->ifp;
2854 	device_t        dev = adapter->dev;
2855 	struct ixgbe_hw *hw = &adapter->hw;
2856 	struct tx_ring  *txr;
2857 	struct rx_ring  *rxr;
2858 	u32             txdctl, mhadd;
2859 	u32             rxdctl, rxctrl;
2860 	u32             ctrl_ext;
2861 	int             err = 0;
2862 
2863 	mtx_assert(&adapter->core_mtx, MA_OWNED);
2864 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
2865 
2866 	hw->adapter_stopped = FALSE;
2867 	ixgbe_stop_adapter(hw);
2868 	callout_stop(&adapter->timer);
2869 
2870 	/* Queue indices may change with IOV mode */
2871 	ixgbe_align_all_queue_indices(adapter);
2872 
2873 	/* reprogram the RAR[0] in case user changed it. */
2874 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2875 
2876 	/* Get the latest mac address, User can use a LAA */
2877 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2878 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2879 	hw->addr_ctrl.rar_used_count = 1;
2880 
2881 	/* Set hardware offload abilities from ifnet flags */
2882 	ixgbe_set_if_hwassist(adapter);
2883 
2884 	/* Prepare transmit descriptors and buffers */
2885 	if (ixgbe_setup_transmit_structures(adapter)) {
2886 		device_printf(dev, "Could not setup transmit structures\n");
2887 		ixgbe_stop(adapter);
2888 		return;
2889 	}
2890 
2891 	ixgbe_init_hw(hw);
2892 	ixgbe_initialize_iov(adapter);
2893 	ixgbe_initialize_transmit_units(adapter);
2894 
2895 	/* Setup Multicast table */
2896 	ixgbe_set_multi(adapter);
2897 
2898 	/* Determine the correct mbuf pool, based on frame size */
2899 	if (adapter->max_frame_size <= MCLBYTES)
2900 		adapter->rx_mbuf_sz = MCLBYTES;
2901 	else
2902 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
2903 
2904 	/* Prepare receive descriptors and buffers */
2905 	if (ixgbe_setup_receive_structures(adapter)) {
2906 		device_printf(dev, "Could not setup receive structures\n");
2907 		ixgbe_stop(adapter);
2908 		return;
2909 	}
2910 
2911 	/* Configure RX settings */
2912 	ixgbe_initialize_receive_units(adapter);
2913 
2914 	/* Enable SDP & MSI-X interrupts based on adapter */
2915 	ixgbe_config_gpie(adapter);
2916 
2917 	/* Set MTU size */
2918 	if (ifp->if_mtu > ETHERMTU) {
2919 		/* aka IXGBE_MAXFRS on 82599 and newer */
2920 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2921 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
2922 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2923 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2924 	}
2925 
2926 	/* Now enable all the queues */
2927 	for (int i = 0; i < adapter->num_queues; i++) {
2928 		txr = &adapter->tx_rings[i];
2929 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2930 		txdctl |= IXGBE_TXDCTL_ENABLE;
2931 		/* Set WTHRESH to 8, burst writeback */
2932 		txdctl |= (8 << 16);
2933 		/*
2934 		 * When the internal queue falls below PTHRESH (32),
2935 		 * start prefetching as long as there are at least
2936 		 * HTHRESH (1) buffers ready. The values are taken
2937 		 * from the Intel linux driver 3.8.21.
2938 		 * Prefetching enables tx line rate even with 1 queue.
2939 		 */
2940 		txdctl |= (32 << 0) | (1 << 8);
2941 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2942 	}
2943 
2944 	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
2945 		rxr = &adapter->rx_rings[i];
2946 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2947 		if (hw->mac.type == ixgbe_mac_82598EB) {
2948 			/*
2949 			 * PTHRESH = 21
2950 			 * HTHRESH = 4
2951 			 * WTHRESH = 8
2952 			 */
2953 			rxdctl &= ~0x3FFFFF;
2954 			rxdctl |= 0x080420;
2955 		}
2956 		rxdctl |= IXGBE_RXDCTL_ENABLE;
2957 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2958 		for (; j < 10; j++) {
2959 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2960 			    IXGBE_RXDCTL_ENABLE)
2961 				break;
2962 			else
2963 				msec_delay(1);
2964 		}
2965 		wmb();
2966 
2967 		/*
2968 		 * In netmap mode, we must preserve the buffers made
2969 		 * available to userspace before the if_init()
2970 		 * (this is true by default on the TX side, because
2971 		 * init makes all buffers available to userspace).
2972 		 *
2973 		 * netmap_reset() and the device specific routines
2974 		 * (e.g. ixgbe_setup_receive_rings()) map these
2975 		 * buffers at the end of the NIC ring, so here we
2976 		 * must set the RDT (tail) register to make sure
2977 		 * they are not overwritten.
2978 		 *
2979 		 * In this driver the NIC ring starts at RDH = 0,
2980 		 * RDT points to the last slot available for reception (?),
2981 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
2982 		 */
2983 #ifdef DEV_NETMAP
2984 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
2985 		    (ifp->if_capenable & IFCAP_NETMAP)) {
2986 			struct netmap_adapter *na = NA(adapter->ifp);
2987 			struct netmap_kring *kring = &na->rx_rings[i];
2988 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2989 
2990 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
2991 		} else
2992 #endif /* DEV_NETMAP */
2993 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
2994 			    adapter->num_rx_desc - 1);
2995 	}
2996 
2997 	/* Enable Receive engine */
2998 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2999 	if (hw->mac.type == ixgbe_mac_82598EB)
3000 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
3001 	rxctrl |= IXGBE_RXCTRL_RXEN;
3002 	ixgbe_enable_rx_dma(hw, rxctrl);
3003 
3004 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3005 
3006 	/* Set up MSI-X routing */
3007 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3008 		ixgbe_configure_ivars(adapter);
3009 		/* Set up auto-mask */
3010 		if (hw->mac.type == ixgbe_mac_82598EB)
3011 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3012 		else {
3013 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3014 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3015 		}
3016 	} else {  /* Simple settings for Legacy/MSI */
3017 		ixgbe_set_ivar(adapter, 0, 0, 0);
3018 		ixgbe_set_ivar(adapter, 0, 0, 1);
3019 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3020 	}
3021 
3022 	ixgbe_init_fdir(adapter);
3023 
3024 	/*
3025 	 * Check on any SFP devices that
3026 	 * need to be kick-started
3027 	 */
3028 	if (hw->phy.type == ixgbe_phy_none) {
3029 		err = hw->phy.ops.identify(hw);
3030 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3031 			device_printf(dev,
3032 			    "Unsupported SFP+ module type was detected.\n");
3033 			return;
3034 		}
3035 	}
3036 
3037 	/* Set moderation on the Link interrupt */
3038 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3039 
3040 	/* Config/Enable Link */
3041 	ixgbe_config_link(adapter);
3042 
3043 	/* Hardware Packet Buffer & Flow Control setup */
3044 	ixgbe_config_delay_values(adapter);
3045 
3046 	/* Initialize the FC settings */
3047 	ixgbe_start_hw(hw);
3048 
3049 	/* Set up VLAN support and filter */
3050 	ixgbe_setup_vlan_hw_support(adapter);
3051 
3052 	/* Setup DMA Coalescing */
3053 	ixgbe_config_dmac(adapter);
3054 
3055 	/* And now turn on interrupts */
3056 	ixgbe_enable_intr(adapter);
3057 
3058 	/* Enable the use of the MBX by the VF's */
3059 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3060 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3061 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3062 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3063 	}
3064 
3065 	/* Now inform the stack we're ready */
3066 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3067 
3068 	return;
3069 } /* ixgbe_init_locked */
3070 
3071 /************************************************************************
3072  * ixgbe_init
3073  ************************************************************************/
3074 static void
3075 ixgbe_init(void *arg)
3076 {
3077 	struct adapter *adapter = arg;
3078 
3079 	IXGBE_CORE_LOCK(adapter);
3080 	ixgbe_init_locked(adapter);
3081 	IXGBE_CORE_UNLOCK(adapter);
3082 
3083 	return;
3084 } /* ixgbe_init */
3085 
3086 /************************************************************************
3087  * ixgbe_set_ivar
3088  *
3089  *   Setup the correct IVAR register for a particular MSI-X interrupt
3090  *     (yes this is all very magic and confusing :)
3091  *    - entry is the register array entry
3092  *    - vector is the MSI-X vector for this queue
3093  *    - type is RX/TX/MISC
3094  ************************************************************************/
3095 static void
3096 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3097 {
3098 	struct ixgbe_hw *hw = &adapter->hw;
3099 	u32 ivar, index;
3100 
3101 	vector |= IXGBE_IVAR_ALLOC_VAL;
3102 
3103 	switch (hw->mac.type) {
3104 
3105 	case ixgbe_mac_82598EB:
3106 		if (type == -1)
3107 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3108 		else
3109 			entry += (type * 64);
3110 		index = (entry >> 2) & 0x1F;
3111 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3112 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3113 		ivar |= (vector << (8 * (entry & 0x3)));
3114 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3115 		break;
3116 
3117 	case ixgbe_mac_82599EB:
3118 	case ixgbe_mac_X540:
3119 	case ixgbe_mac_X550:
3120 	case ixgbe_mac_X550EM_x:
3121 	case ixgbe_mac_X550EM_a:
3122 		if (type == -1) { /* MISC IVAR */
3123 			index = (entry & 1) * 8;
3124 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3125 			ivar &= ~(0xFF << index);
3126 			ivar |= (vector << index);
3127 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3128 		} else {          /* RX/TX IVARS */
3129 			index = (16 * (entry & 1)) + (8 * type);
3130 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3131 			ivar &= ~(0xFF << index);
3132 			ivar |= (vector << index);
3133 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3134 		}
3135 
3136 	default:
3137 		break;
3138 	}
3139 } /* ixgbe_set_ivar */
3140 
3141 /************************************************************************
3142  * ixgbe_configure_ivars
3143  ************************************************************************/
3144 static void
3145 ixgbe_configure_ivars(struct adapter *adapter)
3146 {
3147 	struct ix_queue *que = adapter->queues;
3148 	u32             newitr;
3149 
3150 	if (ixgbe_max_interrupt_rate > 0)
3151 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3152 	else {
3153 		/*
3154 		 * Disable DMA coalescing if interrupt moderation is
3155 		 * disabled.
3156 		 */
3157 		adapter->dmac = 0;
3158 		newitr = 0;
3159 	}
3160 
3161 	for (int i = 0; i < adapter->num_queues; i++, que++) {
3162 		struct rx_ring *rxr = &adapter->rx_rings[i];
3163 		struct tx_ring *txr = &adapter->tx_rings[i];
3164 		/* First the RX queue entry */
3165 		ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3166 		/* ... and the TX */
3167 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3168 		/* Set an Initial EITR value */
3169 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
3170 	}
3171 
3172 	/* For the Link interrupt */
3173 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3174 } /* ixgbe_configure_ivars */
3175 
3176 /************************************************************************
3177  * ixgbe_config_gpie
3178  ************************************************************************/
3179 static void
3180 ixgbe_config_gpie(struct adapter *adapter)
3181 {
3182 	struct ixgbe_hw *hw = &adapter->hw;
3183 	u32             gpie;
3184 
3185 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3186 
3187 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3188 		/* Enable Enhanced MSI-X mode */
3189 		gpie |= IXGBE_GPIE_MSIX_MODE
3190 		     |  IXGBE_GPIE_EIAME
3191 		     |  IXGBE_GPIE_PBA_SUPPORT
3192 		     |  IXGBE_GPIE_OCD;
3193 	}
3194 
3195 	/* Fan Failure Interrupt */
3196 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3197 		gpie |= IXGBE_SDP1_GPIEN;
3198 
3199 	/* Thermal Sensor Interrupt */
3200 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3201 		gpie |= IXGBE_SDP0_GPIEN_X540;
3202 
3203 	/* Link detection */
3204 	switch (hw->mac.type) {
3205 	case ixgbe_mac_82599EB:
3206 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3207 		break;
3208 	case ixgbe_mac_X550EM_x:
3209 	case ixgbe_mac_X550EM_a:
3210 		gpie |= IXGBE_SDP0_GPIEN_X540;
3211 		break;
3212 	default:
3213 		break;
3214 	}
3215 
3216 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3217 
3218 	return;
3219 } /* ixgbe_config_gpie */
3220 
3221 /************************************************************************
3222  * ixgbe_config_delay_values
3223  *
3224  *   Requires adapter->max_frame_size to be set.
3225  ************************************************************************/
3226 static void
3227 ixgbe_config_delay_values(struct adapter *adapter)
3228 {
3229 	struct ixgbe_hw *hw = &adapter->hw;
3230 	u32             rxpb, frame, size, tmp;
3231 
3232 	frame = adapter->max_frame_size;
3233 
3234 	/* Calculate High Water */
3235 	switch (hw->mac.type) {
3236 	case ixgbe_mac_X540:
3237 	case ixgbe_mac_X550:
3238 	case ixgbe_mac_X550EM_x:
3239 	case ixgbe_mac_X550EM_a:
3240 		tmp = IXGBE_DV_X540(frame, frame);
3241 		break;
3242 	default:
3243 		tmp = IXGBE_DV(frame, frame);
3244 		break;
3245 	}
3246 	size = IXGBE_BT2KB(tmp);
3247 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3248 	hw->fc.high_water[0] = rxpb - size;
3249 
3250 	/* Now calculate Low Water */
3251 	switch (hw->mac.type) {
3252 	case ixgbe_mac_X540:
3253 	case ixgbe_mac_X550:
3254 	case ixgbe_mac_X550EM_x:
3255 	case ixgbe_mac_X550EM_a:
3256 		tmp = IXGBE_LOW_DV_X540(frame);
3257 		break;
3258 	default:
3259 		tmp = IXGBE_LOW_DV(frame);
3260 		break;
3261 	}
3262 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3263 
3264 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3265 	hw->fc.send_xon = TRUE;
3266 } /* ixgbe_config_delay_values */
3267 
3268 /************************************************************************
3269  * ixgbe_set_multi - Multicast Update
3270  *
3271  *   Called whenever multicast address list is updated.
3272  ************************************************************************/
3273 static void
3274 ixgbe_set_multi(struct adapter *adapter)
3275 {
3276 	struct ifmultiaddr   *ifma;
3277 	struct ixgbe_mc_addr *mta;
3278 	struct ifnet         *ifp = adapter->ifp;
3279 	u8                   *update_ptr;
3280 	int                  mcnt = 0;
3281 	u32                  fctrl;
3282 
3283 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
3284 
3285 	mta = adapter->mta;
3286 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3287 
3288 #if __FreeBSD_version < 800000
3289 	IF_ADDR_LOCK(ifp);
3290 #else
3291 	if_maddr_rlock(ifp);
3292 #endif
3293 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3294 		if (ifma->ifma_addr->sa_family != AF_LINK)
3295 			continue;
3296 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
3297 			break;
3298 		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
3299 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3300 		mta[mcnt].vmdq = adapter->pool;
3301 		mcnt++;
3302 	}
3303 #if __FreeBSD_version < 800000
3304 	IF_ADDR_UNLOCK(ifp);
3305 #else
3306 	if_maddr_runlock(ifp);
3307 #endif
3308 
3309 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3310 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3311 	if (ifp->if_flags & IFF_PROMISC)
3312 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3313 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3314 	    ifp->if_flags & IFF_ALLMULTI) {
3315 		fctrl |= IXGBE_FCTRL_MPE;
3316 		fctrl &= ~IXGBE_FCTRL_UPE;
3317 	} else
3318 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3319 
3320 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3321 
3322 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3323 		update_ptr = (u8 *)mta;
3324 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3325 		    ixgbe_mc_array_itr, TRUE);
3326 	}
3327 
3328 	return;
3329 } /* ixgbe_set_multi */
3330 
3331 /************************************************************************
3332  * ixgbe_mc_array_itr
3333  *
3334  *   An iterator function needed by the multicast shared code.
3335  *   It feeds the shared code routine the addresses in the
3336  *   array of ixgbe_set_multi() one by one.
3337  ************************************************************************/
3338 static u8 *
3339 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3340 {
3341 	struct ixgbe_mc_addr *mta;
3342 
3343 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3344 	*vmdq = mta->vmdq;
3345 
3346 	*update_ptr = (u8*)(mta + 1);
3347 
3348 	return (mta->addr);
3349 } /* ixgbe_mc_array_itr */
3350 
3351 /************************************************************************
3352  * ixgbe_local_timer - Timer routine
3353  *
3354  *   Checks for link status, updates statistics,
3355  *   and runs the watchdog check.
3356  ************************************************************************/
3357 static void
3358 ixgbe_local_timer(void *arg)
3359 {
3360 	struct adapter  *adapter = arg;
3361 	device_t        dev = adapter->dev;
3362 	struct ix_queue *que = adapter->queues;
3363 	u64             queues = 0;
3364 	int             hung = 0;
3365 
3366 	mtx_assert(&adapter->core_mtx, MA_OWNED);
3367 
3368 	/* Check for pluggable optics */
3369 	if (adapter->sfp_probe)
3370 		if (!ixgbe_sfp_probe(adapter))
3371 			goto out; /* Nothing to do */
3372 
3373 	ixgbe_update_link_status(adapter);
3374 	ixgbe_update_stats_counters(adapter);
3375 
3376 	/*
3377 	 * Check the TX queues status
3378 	 *      - mark hung queues so we don't schedule on them
3379 	 *      - watchdog only if all queues show hung
3380 	 */
3381 	for (int i = 0; i < adapter->num_queues; i++, que++) {
3382 		/* Keep track of queues with work for soft irq */
3383 		if (que->txr->busy)
3384 			queues |= ((u64)1 << que->me);
3385 		/*
3386 		 * Each time txeof runs without cleaning, but there
3387 		 * are uncleaned descriptors it increments busy. If
3388 		 * we get to the MAX we declare it hung.
3389 		 */
3390 		if (que->busy == IXGBE_QUEUE_HUNG) {
3391 			++hung;
3392 			/* Mark the queue as inactive */
3393 			adapter->active_queues &= ~((u64)1 << que->me);
3394 			continue;
3395 		} else {
3396 			/* Check if we've come back from hung */
3397 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
3398 				adapter->active_queues |= ((u64)1 << que->me);
3399 		}
3400 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
3401 			device_printf(dev,
3402 			    "Warning queue %d appears to be hung!\n", i);
3403 			que->txr->busy = IXGBE_QUEUE_HUNG;
3404 			++hung;
3405 		}
3406 	}
3407 
3408 	/* Only truly watchdog if all queues show hung */
3409 	if (hung == adapter->num_queues)
3410 		goto watchdog;
3411 	else if (queues != 0) { /* Force an IRQ on queues with work */
3412 		ixgbe_rearm_queues(adapter, queues);
3413 	}
3414 
3415 out:
3416 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3417 	return;
3418 
3419 watchdog:
3420 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
3421 	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3422 	adapter->watchdog_events++;
3423 	ixgbe_init_locked(adapter);
3424 } /* ixgbe_local_timer */
3425 
3426 /************************************************************************
3427  * ixgbe_sfp_probe
3428  *
3429  *   Determine if a port had optics inserted.
3430  ************************************************************************/
3431 static bool
3432 ixgbe_sfp_probe(struct adapter *adapter)
3433 {
3434 	struct ixgbe_hw *hw = &adapter->hw;
3435 	device_t        dev = adapter->dev;
3436 	bool            result = FALSE;
3437 
3438 	if ((hw->phy.type == ixgbe_phy_nl) &&
3439 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3440 		s32 ret = hw->phy.ops.identify_sfp(hw);
3441 		if (ret)
3442 			goto out;
3443 		ret = hw->phy.ops.reset(hw);
3444 		adapter->sfp_probe = FALSE;
3445 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3446 			device_printf(dev, "Unsupported SFP+ module detected!");
3447 			device_printf(dev,
3448 			    "Reload driver with supported module.\n");
3449 			goto out;
3450 		} else
3451 			device_printf(dev, "SFP+ module detected!\n");
3452 		/* We now have supported optics */
3453 		result = TRUE;
3454 	}
3455 out:
3456 
3457 	return (result);
3458 } /* ixgbe_sfp_probe */
3459 
3460 /************************************************************************
3461  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3462  ************************************************************************/
3463 static void
3464 ixgbe_handle_mod(void *context, int pending)
3465 {
3466 	struct adapter  *adapter = context;
3467 	struct ixgbe_hw *hw = &adapter->hw;
3468 	device_t        dev = adapter->dev;
3469 	u32             err, cage_full = 0;
3470 
3471 	if (adapter->hw.need_crosstalk_fix) {
3472 		switch (hw->mac.type) {
3473 		case ixgbe_mac_82599EB:
3474 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3475 			    IXGBE_ESDP_SDP2;
3476 			break;
3477 		case ixgbe_mac_X550EM_x:
3478 		case ixgbe_mac_X550EM_a:
3479 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3480 			    IXGBE_ESDP_SDP0;
3481 			break;
3482 		default:
3483 			break;
3484 		}
3485 
3486 		if (!cage_full)
3487 			return;
3488 	}
3489 
3490 	err = hw->phy.ops.identify_sfp(hw);
3491 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3492 		device_printf(dev,
3493 		    "Unsupported SFP+ module type was detected.\n");
3494 		return;
3495 	}
3496 
3497 	err = hw->mac.ops.setup_sfp(hw);
3498 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3499 		device_printf(dev,
3500 		    "Setup failure - unsupported SFP+ module type.\n");
3501 		return;
3502 	}
3503 	taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3504 } /* ixgbe_handle_mod */
3505 
3506 
3507 /************************************************************************
3508  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3509  ************************************************************************/
3510 static void
3511 ixgbe_handle_msf(void *context, int pending)
3512 {
3513 	struct adapter  *adapter = context;
3514 	struct ixgbe_hw *hw = &adapter->hw;
3515 	u32             autoneg;
3516 	bool            negotiate;
3517 
3518 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3519 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3520 
3521 	autoneg = hw->phy.autoneg_advertised;
3522 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3523 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3524 	if (hw->mac.ops.setup_link)
3525 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3526 
3527 	/* Adjust media types shown in ifconfig */
3528 	ifmedia_removeall(&adapter->media);
3529 	ixgbe_add_media_types(adapter);
3530 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3531 } /* ixgbe_handle_msf */
3532 
3533 /************************************************************************
3534  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3535  ************************************************************************/
3536 static void
3537 ixgbe_handle_phy(void *context, int pending)
3538 {
3539 	struct adapter  *adapter = context;
3540 	struct ixgbe_hw *hw = &adapter->hw;
3541 	int             error;
3542 
3543 	error = hw->phy.ops.handle_lasi(hw);
3544 	if (error == IXGBE_ERR_OVERTEMP)
3545 		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3546 	else if (error)
3547 		device_printf(adapter->dev,
3548 		    "Error handling LASI interrupt: %d\n", error);
3549 } /* ixgbe_handle_phy */
3550 
3551 /************************************************************************
3552  * ixgbe_stop - Stop the hardware
3553  *
3554  *   Disables all traffic on the adapter by issuing a
3555  *   global reset on the MAC and deallocates TX/RX buffers.
3556  ************************************************************************/
3557 static void
3558 ixgbe_stop(void *arg)
3559 {
3560 	struct ifnet    *ifp;
3561 	struct adapter  *adapter = arg;
3562 	struct ixgbe_hw *hw = &adapter->hw;
3563 
3564 	ifp = adapter->ifp;
3565 
3566 	mtx_assert(&adapter->core_mtx, MA_OWNED);
3567 
3568 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
3569 	ixgbe_disable_intr(adapter);
3570 	callout_stop(&adapter->timer);
3571 
3572 	/* Let the stack know...*/
3573 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3574 
3575 	ixgbe_reset_hw(hw);
3576 	hw->adapter_stopped = FALSE;
3577 	ixgbe_stop_adapter(hw);
3578 	if (hw->mac.type == ixgbe_mac_82599EB)
3579 		ixgbe_stop_mac_link_on_d3_82599(hw);
3580 	/* Turn off the laser - noop with no optics */
3581 	ixgbe_disable_tx_laser(hw);
3582 
3583 	/* Update the stack */
3584 	adapter->link_up = FALSE;
3585 	ixgbe_update_link_status(adapter);
3586 
3587 	/* reprogram the RAR[0] in case user changed it. */
3588 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3589 
3590 	return;
3591 } /* ixgbe_stop */
3592 
3593 /************************************************************************
3594  * ixgbe_update_link_status - Update OS on link state
3595  *
3596  * Note: Only updates the OS on the cached link state.
3597  *       The real check of the hardware only happens with
3598  *       a link interrupt.
3599  ************************************************************************/
3600 static void
3601 ixgbe_update_link_status(struct adapter *adapter)
3602 {
3603 	struct ifnet *ifp = adapter->ifp;
3604 	device_t     dev = adapter->dev;
3605 
3606 	if (adapter->link_up) {
3607 		if (adapter->link_active == FALSE) {
3608 			if (bootverbose)
3609 				device_printf(dev, "Link is up %d Gbps %s \n",
3610 				    ((adapter->link_speed == 128) ? 10 : 1),
3611 				    "Full Duplex");
3612 			adapter->link_active = TRUE;
3613 			/* Update any Flow Control changes */
3614 			ixgbe_fc_enable(&adapter->hw);
3615 			/* Update DMA coalescing config */
3616 			ixgbe_config_dmac(adapter);
3617 			if_link_state_change(ifp, LINK_STATE_UP);
3618 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3619 				ixgbe_ping_all_vfs(adapter);
3620 		}
3621 	} else { /* Link down */
3622 		if (adapter->link_active == TRUE) {
3623 			if (bootverbose)
3624 				device_printf(dev, "Link is Down\n");
3625 			if_link_state_change(ifp, LINK_STATE_DOWN);
3626 			adapter->link_active = FALSE;
3627 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3628 				ixgbe_ping_all_vfs(adapter);
3629 		}
3630 	}
3631 
3632 	return;
3633 } /* ixgbe_update_link_status */
3634 
3635 /************************************************************************
3636  * ixgbe_config_dmac - Configure DMA Coalescing
3637  ************************************************************************/
3638 static void
3639 ixgbe_config_dmac(struct adapter *adapter)
3640 {
3641 	struct ixgbe_hw          *hw = &adapter->hw;
3642 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3643 
3644 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3645 		return;
3646 
3647 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3648 	    dcfg->link_speed ^ adapter->link_speed) {
3649 		dcfg->watchdog_timer = adapter->dmac;
3650 		dcfg->fcoe_en = false;
3651 		dcfg->link_speed = adapter->link_speed;
3652 		dcfg->num_tcs = 1;
3653 
3654 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3655 		    dcfg->watchdog_timer, dcfg->link_speed);
3656 
3657 		hw->mac.ops.dmac_config(hw);
3658 	}
3659 } /* ixgbe_config_dmac */
3660 
3661 /************************************************************************
3662  * ixgbe_enable_intr
3663  ************************************************************************/
3664 static void
3665 ixgbe_enable_intr(struct adapter *adapter)
3666 {
3667 	struct ixgbe_hw *hw = &adapter->hw;
3668 	struct ix_queue *que = adapter->queues;
3669 	u32             mask, fwsm;
3670 
3671 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3672 
3673 	switch (adapter->hw.mac.type) {
3674 	case ixgbe_mac_82599EB:
3675 		mask |= IXGBE_EIMS_ECC;
3676 		/* Temperature sensor on some adapters */
3677 		mask |= IXGBE_EIMS_GPI_SDP0;
3678 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3679 		mask |= IXGBE_EIMS_GPI_SDP1;
3680 		mask |= IXGBE_EIMS_GPI_SDP2;
3681 		break;
3682 	case ixgbe_mac_X540:
3683 		/* Detect if Thermal Sensor is enabled */
3684 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3685 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3686 			mask |= IXGBE_EIMS_TS;
3687 		mask |= IXGBE_EIMS_ECC;
3688 		break;
3689 	case ixgbe_mac_X550:
3690 		/* MAC thermal sensor is automatically enabled */
3691 		mask |= IXGBE_EIMS_TS;
3692 		mask |= IXGBE_EIMS_ECC;
3693 		break;
3694 	case ixgbe_mac_X550EM_x:
3695 	case ixgbe_mac_X550EM_a:
3696 		/* Some devices use SDP0 for important information */
3697 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3698 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3699 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3700 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3701 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3702 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3703 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3704 		mask |= IXGBE_EIMS_ECC;
3705 		break;
3706 	default:
3707 		break;
3708 	}
3709 
3710 	/* Enable Fan Failure detection */
3711 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3712 		mask |= IXGBE_EIMS_GPI_SDP1;
3713 	/* Enable SR-IOV */
3714 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3715 		mask |= IXGBE_EIMS_MAILBOX;
3716 	/* Enable Flow Director */
3717 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3718 		mask |= IXGBE_EIMS_FLOW_DIR;
3719 
3720 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3721 
3722 	/* With MSI-X we use auto clear */
3723 	if (adapter->msix_mem) {
3724 		mask = IXGBE_EIMS_ENABLE_MASK;
3725 		/* Don't autoclear Link */
3726 		mask &= ~IXGBE_EIMS_OTHER;
3727 		mask &= ~IXGBE_EIMS_LSC;
3728 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3729 			mask &= ~IXGBE_EIMS_MAILBOX;
3730 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3731 	}
3732 
3733 	/*
3734 	 * Now enable all queues, this is done separately to
3735 	 * allow for handling the extended (beyond 32) MSI-X
3736 	 * vectors that can be used by 82599
3737 	 */
3738 	for (int i = 0; i < adapter->num_queues; i++, que++)
3739 		ixgbe_enable_queue(adapter, que->msix);
3740 
3741 	IXGBE_WRITE_FLUSH(hw);
3742 
3743 	return;
3744 } /* ixgbe_enable_intr */
3745 
3746 /************************************************************************
3747  * ixgbe_disable_intr
3748  ************************************************************************/
3749 static void
3750 ixgbe_disable_intr(struct adapter *adapter)
3751 {
3752 	if (adapter->msix_mem)
3753 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3754 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3755 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3756 	} else {
3757 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3758 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3759 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3760 	}
3761 	IXGBE_WRITE_FLUSH(&adapter->hw);
3762 
3763 	return;
3764 } /* ixgbe_disable_intr */
3765 
3766 /************************************************************************
3767  * ixgbe_legacy_irq - Legacy Interrupt Service routine
3768  ************************************************************************/
3769 static void
3770 ixgbe_legacy_irq(void *arg)
3771 {
3772 	struct ix_queue *que = arg;
3773 	struct adapter  *adapter = que->adapter;
3774 	struct ixgbe_hw *hw = &adapter->hw;
3775 	struct ifnet    *ifp = adapter->ifp;
3776 	struct tx_ring  *txr = adapter->tx_rings;
3777 	bool            more = false;
3778 	u32             eicr, eicr_mask;
3779 
3780 	/* Silicon errata #26 on 82598 */
3781 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3782 
3783 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3784 
3785 	++que->irqs;
3786 	if (eicr == 0) {
3787 		ixgbe_enable_intr(adapter);
3788 		return;
3789 	}
3790 
3791 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3792 		more = ixgbe_rxeof(que);
3793 
3794 		IXGBE_TX_LOCK(txr);
3795 		ixgbe_txeof(txr);
3796 		if (!ixgbe_ring_empty(ifp, txr->br))
3797 			ixgbe_start_locked(ifp, txr);
3798 		IXGBE_TX_UNLOCK(txr);
3799 	}
3800 
3801 	/* Check for fan failure */
3802 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3803 		ixgbe_check_fan_failure(adapter, eicr, true);
3804 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3805 	}
3806 
3807 	/* Link status change */
3808 	if (eicr & IXGBE_EICR_LSC)
3809 		taskqueue_enqueue(adapter->tq, &adapter->link_task);
3810 
3811 	if (ixgbe_is_sfp(hw)) {
3812 		/* Pluggable optics-related interrupt */
3813 		if (hw->mac.type >= ixgbe_mac_X540)
3814 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3815 		else
3816 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3817 
3818 		if (eicr & eicr_mask) {
3819 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3820 			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
3821 		}
3822 
3823 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3824 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3825 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3826 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3827 			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3828 		}
3829 	}
3830 
3831 	/* External PHY interrupt */
3832 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3833 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3834 		taskqueue_enqueue(adapter->tq, &adapter->phy_task);
3835 
3836 	if (more)
3837 		taskqueue_enqueue(que->tq, &que->que_task);
3838 	else
3839 		ixgbe_enable_intr(adapter);
3840 
3841 	return;
3842 } /* ixgbe_legacy_irq */
3843 
3844 /************************************************************************
3845  * ixgbe_free_pci_resources
3846  ************************************************************************/
3847 static void
3848 ixgbe_free_pci_resources(struct adapter *adapter)
3849 {
3850 	struct ix_queue *que = adapter->queues;
3851 	device_t        dev = adapter->dev;
3852 	int             rid, memrid;
3853 
3854 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3855 		memrid = PCIR_BAR(MSIX_82598_BAR);
3856 	else
3857 		memrid = PCIR_BAR(MSIX_82599_BAR);
3858 
3859 	/*
3860 	 * There is a slight possibility of a failure mode
3861 	 * in attach that will result in entering this function
3862 	 * before interrupt resources have been initialized, and
3863 	 * in that case we do not want to execute the loops below
3864 	 * We can detect this reliably by the state of the adapter
3865 	 * res pointer.
3866 	 */
3867 	if (adapter->res == NULL)
3868 		goto mem;
3869 
3870 	/*
3871 	 * Release all msix queue resources:
3872 	 */
3873 	for (int i = 0; i < adapter->num_queues; i++, que++) {
3874 		rid = que->msix + 1;
3875 		if (que->tag != NULL) {
3876 			bus_teardown_intr(dev, que->res, que->tag);
3877 			que->tag = NULL;
3878 		}
3879 		if (que->res != NULL)
3880 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
3881 	}
3882 
3883 
3884 	if (adapter->tag != NULL) {
3885 		bus_teardown_intr(dev, adapter->res, adapter->tag);
3886 		adapter->tag = NULL;
3887 	}
3888 
3889 	/* Clean the Legacy or Link interrupt last */
3890 	if (adapter->res != NULL)
3891 		bus_release_resource(dev, SYS_RES_IRQ, adapter->link_rid,
3892 		    adapter->res);
3893 
3894 mem:
3895 	if ((adapter->feat_en & IXGBE_FEATURE_MSI) ||
3896 	    (adapter->feat_en & IXGBE_FEATURE_MSIX))
3897 		pci_release_msi(dev);
3898 
3899 	if (adapter->msix_mem != NULL)
3900 		bus_release_resource(dev, SYS_RES_MEMORY, memrid,
3901 		    adapter->msix_mem);
3902 
3903 	if (adapter->pci_mem != NULL)
3904 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
3905 		    adapter->pci_mem);
3906 
3907 	return;
3908 } /* ixgbe_free_pci_resources */
3909 
3910 /************************************************************************
3911  * ixgbe_set_sysctl_value
3912  ************************************************************************/
3913 static void
3914 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
3915     const char *description, int *limit, int value)
3916 {
3917 	*limit = value;
3918 	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3919 	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3920 	    OID_AUTO, name, CTLFLAG_RW, limit, value, description);
3921 } /* ixgbe_set_sysctl_value */
3922 
3923 /************************************************************************
3924  * ixgbe_sysctl_flowcntl
3925  *
3926  *   SYSCTL wrapper around setting Flow Control
3927  ************************************************************************/
3928 static int
3929 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3930 {
3931 	struct adapter *adapter;
3932 	int            error, fc;
3933 
3934 	adapter = (struct adapter *)arg1;
3935 	fc = adapter->hw.fc.current_mode;
3936 
3937 	error = sysctl_handle_int(oidp, &fc, 0, req);
3938 	if ((error) || (req->newptr == NULL))
3939 		return (error);
3940 
3941 	/* Don't bother if it's not changed */
3942 	if (fc == adapter->hw.fc.current_mode)
3943 		return (0);
3944 
3945 	return ixgbe_set_flowcntl(adapter, fc);
3946 } /* ixgbe_sysctl_flowcntl */
3947 
3948 /************************************************************************
3949  * ixgbe_set_flowcntl - Set flow control
3950  *
3951  *   Flow control values:
3952  *     0 - off
3953  *     1 - rx pause
3954  *     2 - tx pause
3955  *     3 - full
3956  ************************************************************************/
3957 static int
3958 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3959 {
3960 	switch (fc) {
3961 	case ixgbe_fc_rx_pause:
3962 	case ixgbe_fc_tx_pause:
3963 	case ixgbe_fc_full:
3964 		adapter->hw.fc.requested_mode = fc;
3965 		if (adapter->num_queues > 1)
3966 			ixgbe_disable_rx_drop(adapter);
3967 		break;
3968 	case ixgbe_fc_none:
3969 		adapter->hw.fc.requested_mode = ixgbe_fc_none;
3970 		if (adapter->num_queues > 1)
3971 			ixgbe_enable_rx_drop(adapter);
3972 		break;
3973 	default:
3974 		return (EINVAL);
3975 	}
3976 
3977 	/* Don't autoneg if forcing a value */
3978 	adapter->hw.fc.disable_fc_autoneg = TRUE;
3979 	ixgbe_fc_enable(&adapter->hw);
3980 
3981 	return (0);
3982 } /* ixgbe_set_flowcntl */
3983 
3984 /************************************************************************
3985  * ixgbe_enable_rx_drop
3986  *
3987  *   Enable the hardware to drop packets when the buffer is
3988  *   full. This is useful with multiqueue, so that no single
3989  *   queue being full stalls the entire RX engine. We only
3990  *   enable this when Multiqueue is enabled AND Flow Control
3991  *   is disabled.
3992  ************************************************************************/
3993 static void
3994 ixgbe_enable_rx_drop(struct adapter *adapter)
3995 {
3996 	struct ixgbe_hw *hw = &adapter->hw;
3997 	struct rx_ring  *rxr;
3998 	u32             srrctl;
3999 
4000 	for (int i = 0; i < adapter->num_queues; i++) {
4001 		rxr = &adapter->rx_rings[i];
4002 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4003 		srrctl |= IXGBE_SRRCTL_DROP_EN;
4004 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4005 	}
4006 
4007 	/* enable drop for each vf */
4008 	for (int i = 0; i < adapter->num_vfs; i++) {
4009 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
4010 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4011 		    IXGBE_QDE_ENABLE));
4012 	}
4013 } /* ixgbe_enable_rx_drop */
4014 
4015 /************************************************************************
4016  * ixgbe_disable_rx_drop
4017  ************************************************************************/
4018 static void
4019 ixgbe_disable_rx_drop(struct adapter *adapter)
4020 {
4021 	struct ixgbe_hw *hw = &adapter->hw;
4022 	struct rx_ring  *rxr;
4023 	u32             srrctl;
4024 
4025 	for (int i = 0; i < adapter->num_queues; i++) {
4026 		rxr = &adapter->rx_rings[i];
4027 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4028 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4029 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4030 	}
4031 
4032 	/* disable drop for each vf */
4033 	for (int i = 0; i < adapter->num_vfs; i++) {
4034 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
4035 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4036 	}
4037 } /* ixgbe_disable_rx_drop */
4038 
4039 /************************************************************************
4040  * ixgbe_sysctl_advertise
4041  *
4042  *   SYSCTL wrapper around setting advertised speed
4043  ************************************************************************/
4044 static int
4045 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4046 {
4047 	struct adapter *adapter;
4048 	int            error, advertise;
4049 
4050 	adapter = (struct adapter *)arg1;
4051 	advertise = adapter->advertise;
4052 
4053 	error = sysctl_handle_int(oidp, &advertise, 0, req);
4054 	if ((error) || (req->newptr == NULL))
4055 		return (error);
4056 
4057 	return ixgbe_set_advertise(adapter, advertise);
4058 } /* ixgbe_sysctl_advertise */
4059 
4060 /************************************************************************
4061  * ixgbe_set_advertise - Control advertised link speed
4062  *
4063  *   Flags:
4064  *     0x1 - advertise 100 Mb
4065  *     0x2 - advertise 1G
4066  *     0x4 - advertise 10G
4067  *     0x8 - advertise 10 Mb (yes, Mb)
4068  ************************************************************************/
4069 static int
4070 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4071 {
4072 	device_t         dev;
4073 	struct ixgbe_hw  *hw;
4074 	ixgbe_link_speed speed = 0;
4075 	ixgbe_link_speed link_caps = 0;
4076 	s32              err = IXGBE_NOT_IMPLEMENTED;
4077 	bool             negotiate = FALSE;
4078 
4079 	/* Checks to validate new value */
4080 	if (adapter->advertise == advertise) /* no change */
4081 		return (0);
4082 
4083 	dev = adapter->dev;
4084 	hw = &adapter->hw;
4085 
4086 	/* No speed changes for backplane media */
4087 	if (hw->phy.media_type == ixgbe_media_type_backplane)
4088 		return (ENODEV);
4089 
4090 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4091 	      (hw->phy.multispeed_fiber))) {
4092 		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4093 		return (EINVAL);
4094 	}
4095 
4096 	if (advertise < 0x1 || advertise > 0xF) {
4097 		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4098 		return (EINVAL);
4099 	}
4100 
4101 	if (hw->mac.ops.get_link_capabilities) {
4102 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4103 		    &negotiate);
4104 		if (err != IXGBE_SUCCESS) {
4105 			device_printf(dev, "Unable to determine supported advertise speeds\n");
4106 			return (ENODEV);
4107 		}
4108 	}
4109 
4110 	/* Set new value and report new advertised mode */
4111 	if (advertise & 0x1) {
4112 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4113 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4114 			return (EINVAL);
4115 		}
4116 		speed |= IXGBE_LINK_SPEED_100_FULL;
4117 	}
4118 	if (advertise & 0x2) {
4119 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4120 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4121 			return (EINVAL);
4122 		}
4123 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4124 	}
4125 	if (advertise & 0x4) {
4126 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4127 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4128 			return (EINVAL);
4129 		}
4130 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4131 	}
4132 	if (advertise & 0x8) {
4133 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4134 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4135 			return (EINVAL);
4136 		}
4137 		speed |= IXGBE_LINK_SPEED_10_FULL;
4138 	}
4139 
4140 	hw->mac.autotry_restart = TRUE;
4141 	hw->mac.ops.setup_link(hw, speed, TRUE);
4142 	adapter->advertise = advertise;
4143 
4144 	return (0);
4145 } /* ixgbe_set_advertise */
4146 
4147 /************************************************************************
4148  * ixgbe_get_advertise - Get current advertised speed settings
4149  *
4150  *   Formatted for sysctl usage.
4151  *   Flags:
4152  *     0x1 - advertise 100 Mb
4153  *     0x2 - advertise 1G
4154  *     0x4 - advertise 10G
4155  *     0x8 - advertise 10 Mb (yes, Mb)
4156  ************************************************************************/
4157 static int
4158 ixgbe_get_advertise(struct adapter *adapter)
4159 {
4160 	struct ixgbe_hw  *hw = &adapter->hw;
4161 	int              speed;
4162 	ixgbe_link_speed link_caps = 0;
4163 	s32              err;
4164 	bool             negotiate = FALSE;
4165 
4166 	/*
4167 	 * Advertised speed means nothing unless it's copper or
4168 	 * multi-speed fiber
4169 	 */
4170 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4171 	    !(hw->phy.multispeed_fiber))
4172 		return (0);
4173 
4174 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4175 	if (err != IXGBE_SUCCESS)
4176 		return (0);
4177 
4178 	speed =
4179 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4180 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4181 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4182 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4183 
4184 	return speed;
4185 } /* ixgbe_get_advertise */
4186 
4187 /************************************************************************
4188  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4189  *
4190  *   Control values:
4191  *     0/1 - off / on (use default value of 1000)
4192  *
4193  *     Legal timer values are:
4194  *     50,100,250,500,1000,2000,5000,10000
4195  *
4196  *     Turning off interrupt moderation will also turn this off.
4197  ************************************************************************/
4198 static int
4199 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4200 {
4201 	struct adapter *adapter = (struct adapter *)arg1;
4202 	struct ifnet   *ifp = adapter->ifp;
4203 	int            error;
4204 	u32            newval;
4205 
4206 	newval = adapter->dmac;
4207 	error = sysctl_handle_int(oidp, &newval, 0, req);
4208 	if ((error) || (req->newptr == NULL))
4209 		return (error);
4210 
4211 	switch (newval) {
4212 	case 0:
4213 		/* Disabled */
4214 		adapter->dmac = 0;
4215 		break;
4216 	case 1:
4217 		/* Enable and use default */
4218 		adapter->dmac = 1000;
4219 		break;
4220 	case 50:
4221 	case 100:
4222 	case 250:
4223 	case 500:
4224 	case 1000:
4225 	case 2000:
4226 	case 5000:
4227 	case 10000:
4228 		/* Legal values - allow */
4229 		adapter->dmac = newval;
4230 		break;
4231 	default:
4232 		/* Do nothing, illegal value */
4233 		return (EINVAL);
4234 	}
4235 
4236 	/* Re-initialize hardware if it's already running */
4237 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4238 		ixgbe_init(adapter);
4239 
4240 	return (0);
4241 } /* ixgbe_sysctl_dmac */
4242 
4243 #ifdef IXGBE_DEBUG
4244 /************************************************************************
4245  * ixgbe_sysctl_power_state
4246  *
4247  *   Sysctl to test power states
4248  *   Values:
4249  *     0      - set device to D0
4250  *     3      - set device to D3
4251  *     (none) - get current device power state
4252  ************************************************************************/
4253 static int
4254 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4255 {
4256 	struct adapter *adapter = (struct adapter *)arg1;
4257 	device_t       dev = adapter->dev;
4258 	int            curr_ps, new_ps, error = 0;
4259 
4260 	curr_ps = new_ps = pci_get_powerstate(dev);
4261 
4262 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4263 	if ((error) || (req->newptr == NULL))
4264 		return (error);
4265 
4266 	if (new_ps == curr_ps)
4267 		return (0);
4268 
4269 	if (new_ps == 3 && curr_ps == 0)
4270 		error = DEVICE_SUSPEND(dev);
4271 	else if (new_ps == 0 && curr_ps == 3)
4272 		error = DEVICE_RESUME(dev);
4273 	else
4274 		return (EINVAL);
4275 
4276 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4277 
4278 	return (error);
4279 } /* ixgbe_sysctl_power_state */
4280 #endif
4281 
4282 /************************************************************************
4283  * ixgbe_sysctl_wol_enable
4284  *
4285  *   Sysctl to enable/disable the WoL capability,
4286  *   if supported by the adapter.
4287  *
4288  *   Values:
4289  *     0 - disabled
4290  *     1 - enabled
4291  ************************************************************************/
4292 static int
4293 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4294 {
4295 	struct adapter  *adapter = (struct adapter *)arg1;
4296 	struct ixgbe_hw *hw = &adapter->hw;
4297 	int             new_wol_enabled;
4298 	int             error = 0;
4299 
4300 	new_wol_enabled = hw->wol_enabled;
4301 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4302 	if ((error) || (req->newptr == NULL))
4303 		return (error);
4304 	new_wol_enabled = !!(new_wol_enabled);
4305 	if (new_wol_enabled == hw->wol_enabled)
4306 		return (0);
4307 
4308 	if (new_wol_enabled > 0 && !adapter->wol_support)
4309 		return (ENODEV);
4310 	else
4311 		hw->wol_enabled = new_wol_enabled;
4312 
4313 	return (0);
4314 } /* ixgbe_sysctl_wol_enable */
4315 
4316 /************************************************************************
4317  * ixgbe_sysctl_wufc - Wake Up Filter Control
4318  *
4319  *   Sysctl to enable/disable the types of packets that the
4320  *   adapter will wake up on upon receipt.
4321  *   Flags:
4322  *     0x1  - Link Status Change
4323  *     0x2  - Magic Packet
4324  *     0x4  - Direct Exact
4325  *     0x8  - Directed Multicast
4326  *     0x10 - Broadcast
4327  *     0x20 - ARP/IPv4 Request Packet
4328  *     0x40 - Direct IPv4 Packet
4329  *     0x80 - Direct IPv6 Packet
4330  *
4331  *   Settings not listed above will cause the sysctl to return an error.
4332  ************************************************************************/
4333 static int
4334 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4335 {
4336 	struct adapter *adapter = (struct adapter *)arg1;
4337 	int            error = 0;
4338 	u32            new_wufc;
4339 
4340 	new_wufc = adapter->wufc;
4341 
4342 	error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4343 	if ((error) || (req->newptr == NULL))
4344 		return (error);
4345 	if (new_wufc == adapter->wufc)
4346 		return (0);
4347 
4348 	if (new_wufc & 0xffffff00)
4349 		return (EINVAL);
4350 
4351 	new_wufc &= 0xff;
4352 	new_wufc |= (0xffffff & adapter->wufc);
4353 	adapter->wufc = new_wufc;
4354 
4355 	return (0);
4356 } /* ixgbe_sysctl_wufc */
4357 
4358 #ifdef IXGBE_DEBUG
4359 /************************************************************************
4360  * ixgbe_sysctl_print_rss_config
4361  ************************************************************************/
4362 static int
4363 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4364 {
4365 	struct adapter  *adapter = (struct adapter *)arg1;
4366 	struct ixgbe_hw *hw = &adapter->hw;
4367 	device_t        dev = adapter->dev;
4368 	struct sbuf     *buf;
4369 	int             error = 0, reta_size;
4370 	u32             reg;
4371 
4372 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4373 	if (!buf) {
4374 		device_printf(dev, "Could not allocate sbuf for output.\n");
4375 		return (ENOMEM);
4376 	}
4377 
4378 	// TODO: use sbufs to make a string to print out
4379 	/* Set multiplier for RETA setup and table size based on MAC */
4380 	switch (adapter->hw.mac.type) {
4381 	case ixgbe_mac_X550:
4382 	case ixgbe_mac_X550EM_x:
4383 	case ixgbe_mac_X550EM_a:
4384 		reta_size = 128;
4385 		break;
4386 	default:
4387 		reta_size = 32;
4388 		break;
4389 	}
4390 
4391 	/* Print out the redirection table */
4392 	sbuf_cat(buf, "\n");
4393 	for (int i = 0; i < reta_size; i++) {
4394 		if (i < 32) {
4395 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4396 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4397 		} else {
4398 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4399 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4400 		}
4401 	}
4402 
4403 	// TODO: print more config
4404 
4405 	error = sbuf_finish(buf);
4406 	if (error)
4407 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4408 
4409 	sbuf_delete(buf);
4410 
4411 	return (0);
4412 } /* ixgbe_sysctl_print_rss_config */
4413 #endif /* IXGBE_DEBUG */
4414 
4415 /************************************************************************
4416  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4417  *
4418  *   For X552/X557-AT devices using an external PHY
4419  ************************************************************************/
4420 static int
4421 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4422 {
4423 	struct adapter  *adapter = (struct adapter *)arg1;
4424 	struct ixgbe_hw *hw = &adapter->hw;
4425 	u16             reg;
4426 
4427 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4428 		device_printf(adapter->dev,
4429 		    "Device has no supported external thermal sensor.\n");
4430 		return (ENODEV);
4431 	}
4432 
4433 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4434 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4435 		device_printf(adapter->dev,
4436 		    "Error reading from PHY's current temperature register\n");
4437 		return (EAGAIN);
4438 	}
4439 
4440 	/* Shift temp for output */
4441 	reg = reg >> 8;
4442 
4443 	return (sysctl_handle_int(oidp, NULL, reg, req));
4444 } /* ixgbe_sysctl_phy_temp */
4445 
4446 /************************************************************************
4447  * ixgbe_sysctl_phy_overtemp_occurred
4448  *
4449  *   Reports (directly from the PHY) whether the current PHY
4450  *   temperature is over the overtemp threshold.
4451  ************************************************************************/
4452 static int
4453 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4454 {
4455 	struct adapter  *adapter = (struct adapter *)arg1;
4456 	struct ixgbe_hw *hw = &adapter->hw;
4457 	u16             reg;
4458 
4459 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4460 		device_printf(adapter->dev,
4461 		    "Device has no supported external thermal sensor.\n");
4462 		return (ENODEV);
4463 	}
4464 
4465 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4466 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4467 		device_printf(adapter->dev,
4468 		    "Error reading from PHY's temperature status register\n");
4469 		return (EAGAIN);
4470 	}
4471 
4472 	/* Get occurrence bit */
4473 	reg = !!(reg & 0x4000);
4474 
4475 	return (sysctl_handle_int(oidp, 0, reg, req));
4476 } /* ixgbe_sysctl_phy_overtemp_occurred */
4477 
4478 /************************************************************************
4479  * ixgbe_sysctl_eee_state
4480  *
4481  *   Sysctl to set EEE power saving feature
4482  *   Values:
4483  *     0      - disable EEE
4484  *     1      - enable EEE
4485  *     (none) - get current device EEE state
4486  ************************************************************************/
4487 static int
4488 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4489 {
4490 	struct adapter *adapter = (struct adapter *)arg1;
4491 	device_t       dev = adapter->dev;
4492 	int            curr_eee, new_eee, error = 0;
4493 	s32            retval;
4494 
4495 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4496 
4497 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4498 	if ((error) || (req->newptr == NULL))
4499 		return (error);
4500 
4501 	/* Nothing to do */
4502 	if (new_eee == curr_eee)
4503 		return (0);
4504 
4505 	/* Not supported */
4506 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4507 		return (EINVAL);
4508 
4509 	/* Bounds checking */
4510 	if ((new_eee < 0) || (new_eee > 1))
4511 		return (EINVAL);
4512 
4513 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4514 	if (retval) {
4515 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4516 		return (EINVAL);
4517 	}
4518 
4519 	/* Restart auto-neg */
4520 	ixgbe_init(adapter);
4521 
4522 	device_printf(dev, "New EEE state: %d\n", new_eee);
4523 
4524 	/* Cache new value */
4525 	if (new_eee)
4526 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4527 	else
4528 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4529 
4530 	return (error);
4531 } /* ixgbe_sysctl_eee_state */
4532 
4533 /************************************************************************
4534  * ixgbe_init_device_features
4535  ************************************************************************/
4536 static void
4537 ixgbe_init_device_features(struct adapter *adapter)
4538 {
4539 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4540 	                  | IXGBE_FEATURE_RSS
4541 	                  | IXGBE_FEATURE_MSI
4542 	                  | IXGBE_FEATURE_MSIX
4543 	                  | IXGBE_FEATURE_LEGACY_IRQ
4544 	                  | IXGBE_FEATURE_LEGACY_TX;
4545 
4546 	/* Set capabilities first... */
4547 	switch (adapter->hw.mac.type) {
4548 	case ixgbe_mac_82598EB:
4549 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4550 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4551 		break;
4552 	case ixgbe_mac_X540:
4553 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4554 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4555 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4556 		    (adapter->hw.bus.func == 0))
4557 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4558 		break;
4559 	case ixgbe_mac_X550:
4560 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4561 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4562 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4563 		break;
4564 	case ixgbe_mac_X550EM_x:
4565 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4566 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4567 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4568 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4569 		break;
4570 	case ixgbe_mac_X550EM_a:
4571 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4572 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4573 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4574 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4575 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4576 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4577 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4578 		}
4579 		break;
4580 	case ixgbe_mac_82599EB:
4581 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4582 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4583 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4584 		    (adapter->hw.bus.func == 0))
4585 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4586 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4587 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4588 		break;
4589 	default:
4590 		break;
4591 	}
4592 
4593 	/* Enabled by default... */
4594 	/* Fan failure detection */
4595 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4596 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4597 	/* Netmap */
4598 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4599 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4600 	/* EEE */
4601 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4602 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4603 	/* Thermal Sensor */
4604 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4605 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4606 
4607 	/* Enabled via global sysctl... */
4608 	/* Flow Director */
4609 	if (ixgbe_enable_fdir) {
4610 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4611 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4612 		else
4613 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4614 	}
4615 	/* Legacy (single queue) transmit */
4616 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
4617 	    ixgbe_enable_legacy_tx)
4618 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
4619 	/*
4620 	 * Message Signal Interrupts - Extended (MSI-X)
4621 	 * Normal MSI is only enabled if MSI-X calls fail.
4622 	 */
4623 	if (!ixgbe_enable_msix)
4624 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4625 	/* Receive-Side Scaling (RSS) */
4626 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4627 		adapter->feat_en |= IXGBE_FEATURE_RSS;
4628 
4629 	/* Disable features with unmet dependencies... */
4630 	/* No MSI-X */
4631 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4632 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4633 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4634 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4635 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4636 	}
4637 } /* ixgbe_init_device_features */
4638 
4639 /************************************************************************
4640  * ixgbe_probe - Device identification routine
4641  *
4642  *   Determines if the driver should be loaded on
4643  *   adapter based on its PCI vendor/device ID.
4644  *
4645  *   return BUS_PROBE_DEFAULT on success, positive on failure
4646  ************************************************************************/
4647 static int
4648 ixgbe_probe(device_t dev)
4649 {
4650 	ixgbe_vendor_info_t *ent;
4651 
4652 	u16  pci_vendor_id = 0;
4653 	u16  pci_device_id = 0;
4654 	u16  pci_subvendor_id = 0;
4655 	u16  pci_subdevice_id = 0;
4656 	char adapter_name[256];
4657 
4658 	INIT_DEBUGOUT("ixgbe_probe: begin");
4659 
4660 	pci_vendor_id = pci_get_vendor(dev);
4661 	if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
4662 		return (ENXIO);
4663 
4664 	pci_device_id = pci_get_device(dev);
4665 	pci_subvendor_id = pci_get_subvendor(dev);
4666 	pci_subdevice_id = pci_get_subdevice(dev);
4667 
4668 	ent = ixgbe_vendor_info_array;
4669 	while (ent->vendor_id != 0) {
4670 		if ((pci_vendor_id == ent->vendor_id) &&
4671 		    (pci_device_id == ent->device_id) &&
4672 		    ((pci_subvendor_id == ent->subvendor_id) ||
4673 		     (ent->subvendor_id == 0)) &&
4674 		    ((pci_subdevice_id == ent->subdevice_id) ||
4675 		     (ent->subdevice_id == 0))) {
4676 			sprintf(adapter_name, "%s, Version - %s",
4677 				ixgbe_strings[ent->index],
4678 				ixgbe_driver_version);
4679 			device_set_desc_copy(dev, adapter_name);
4680 			++ixgbe_total_ports;
4681 			return (BUS_PROBE_DEFAULT);
4682 		}
4683 		ent++;
4684 	}
4685 
4686 	return (ENXIO);
4687 } /* ixgbe_probe */
4688 
4689 
4690 /************************************************************************
4691  * ixgbe_ioctl - Ioctl entry point
4692  *
4693  *   Called when the user wants to configure the interface.
4694  *
4695  *   return 0 on success, positive on failure
4696  ************************************************************************/
4697 static int
4698 ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4699 {
4700 	struct adapter *adapter = ifp->if_softc;
4701 	struct ifreq   *ifr = (struct ifreq *) data;
4702 #if defined(INET) || defined(INET6)
4703 	struct ifaddr  *ifa = (struct ifaddr *)data;
4704 #endif
4705 	int            error = 0;
4706 	bool           avoid_reset = FALSE;
4707 
4708 	switch (command) {
4709 	case SIOCSIFADDR:
4710 #ifdef INET
4711 		if (ifa->ifa_addr->sa_family == AF_INET)
4712 			avoid_reset = TRUE;
4713 #endif
4714 #ifdef INET6
4715 		if (ifa->ifa_addr->sa_family == AF_INET6)
4716 			avoid_reset = TRUE;
4717 #endif
4718 		/*
4719 		 * Calling init results in link renegotiation,
4720 		 * so we avoid doing it when possible.
4721 		 */
4722 		if (avoid_reset) {
4723 			ifp->if_flags |= IFF_UP;
4724 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
4725 				ixgbe_init(adapter);
4726 #ifdef INET
4727 			if (!(ifp->if_flags & IFF_NOARP))
4728 				arp_ifinit(ifp, ifa);
4729 #endif
4730 		} else
4731 			error = ether_ioctl(ifp, command, data);
4732 		break;
4733 	case SIOCSIFMTU:
4734 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
4735 		if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
4736 			error = EINVAL;
4737 		} else {
4738 			IXGBE_CORE_LOCK(adapter);
4739 			ifp->if_mtu = ifr->ifr_mtu;
4740 			adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
4741 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4742 				ixgbe_init_locked(adapter);
4743 			ixgbe_recalculate_max_frame(adapter);
4744 			IXGBE_CORE_UNLOCK(adapter);
4745 		}
4746 		break;
4747 	case SIOCSIFFLAGS:
4748 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
4749 		IXGBE_CORE_LOCK(adapter);
4750 		if (ifp->if_flags & IFF_UP) {
4751 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4752 				if ((ifp->if_flags ^ adapter->if_flags) &
4753 				    (IFF_PROMISC | IFF_ALLMULTI)) {
4754 					ixgbe_set_promisc(adapter);
4755 				}
4756 			} else
4757 				ixgbe_init_locked(adapter);
4758 		} else
4759 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4760 				ixgbe_stop(adapter);
4761 		adapter->if_flags = ifp->if_flags;
4762 		IXGBE_CORE_UNLOCK(adapter);
4763 		break;
4764 	case SIOCADDMULTI:
4765 	case SIOCDELMULTI:
4766 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
4767 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4768 			IXGBE_CORE_LOCK(adapter);
4769 			ixgbe_disable_intr(adapter);
4770 			ixgbe_set_multi(adapter);
4771 			ixgbe_enable_intr(adapter);
4772 			IXGBE_CORE_UNLOCK(adapter);
4773 		}
4774 		break;
4775 	case SIOCSIFMEDIA:
4776 	case SIOCGIFMEDIA:
4777 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
4778 		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
4779 		break;
4780 	case SIOCSIFCAP:
4781 	{
4782 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
4783 
4784 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4785 
4786 		if (!mask)
4787 			break;
4788 
4789 		/* HW cannot turn these on/off separately */
4790 		if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
4791 			ifp->if_capenable ^= IFCAP_RXCSUM;
4792 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
4793 		}
4794 		if (mask & IFCAP_TXCSUM)
4795 			ifp->if_capenable ^= IFCAP_TXCSUM;
4796 		if (mask & IFCAP_TXCSUM_IPV6)
4797 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
4798 		if (mask & IFCAP_TSO4)
4799 			ifp->if_capenable ^= IFCAP_TSO4;
4800 		if (mask & IFCAP_TSO6)
4801 			ifp->if_capenable ^= IFCAP_TSO6;
4802 		if (mask & IFCAP_LRO)
4803 			ifp->if_capenable ^= IFCAP_LRO;
4804 		if (mask & IFCAP_VLAN_HWTAGGING)
4805 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4806 		if (mask & IFCAP_VLAN_HWFILTER)
4807 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
4808 		if (mask & IFCAP_VLAN_HWTSO)
4809 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4810 
4811 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4812 			IXGBE_CORE_LOCK(adapter);
4813 			ixgbe_init_locked(adapter);
4814 			IXGBE_CORE_UNLOCK(adapter);
4815 		}
4816 		VLAN_CAPABILITIES(ifp);
4817 		break;
4818 	}
4819 #if __FreeBSD_version >= 1100036
4820 	case SIOCGI2C:
4821 	{
4822 		struct ixgbe_hw *hw = &adapter->hw;
4823 		struct ifi2creq i2c;
4824 		int i;
4825 
4826 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
4827 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
4828 		if (error != 0)
4829 			break;
4830 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
4831 			error = EINVAL;
4832 			break;
4833 		}
4834 		if (i2c.len > sizeof(i2c.data)) {
4835 			error = EINVAL;
4836 			break;
4837 		}
4838 
4839 		for (i = 0; i < i2c.len; i++)
4840 			hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
4841 			    i2c.dev_addr, &i2c.data[i]);
4842 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
4843 		break;
4844 	}
4845 #endif
4846 	default:
4847 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
4848 		error = ether_ioctl(ifp, command, data);
4849 		break;
4850 	}
4851 
4852 	return (error);
4853 } /* ixgbe_ioctl */
4854 
4855 /************************************************************************
4856  * ixgbe_check_fan_failure
4857  ************************************************************************/
4858 static void
4859 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4860 {
4861 	u32 mask;
4862 
4863 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4864 	    IXGBE_ESDP_SDP1;
4865 
4866 	if (reg & mask)
4867 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4868 } /* ixgbe_check_fan_failure */
4869 
4870 /************************************************************************
4871  * ixgbe_handle_que
4872  ************************************************************************/
4873 static void
4874 ixgbe_handle_que(void *context, int pending)
4875 {
4876 	struct ix_queue *que = context;
4877 	struct adapter  *adapter = que->adapter;
4878 	struct tx_ring  *txr = que->txr;
4879 	struct ifnet    *ifp = adapter->ifp;
4880 
4881 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4882 		ixgbe_rxeof(que);
4883 		IXGBE_TX_LOCK(txr);
4884 		ixgbe_txeof(txr);
4885 		if (!ixgbe_ring_empty(ifp, txr->br))
4886 			ixgbe_start_locked(ifp, txr);
4887 		IXGBE_TX_UNLOCK(txr);
4888 	}
4889 
4890 	/* Re-enable this interrupt */
4891 	if (que->res != NULL)
4892 		ixgbe_enable_queue(adapter, que->msix);
4893 	else
4894 		ixgbe_enable_intr(adapter);
4895 
4896 	return;
4897 } /* ixgbe_handle_que */
4898 
4899 
4900 
4901 /************************************************************************
4902  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
4903  ************************************************************************/
4904 static int
4905 ixgbe_allocate_legacy(struct adapter *adapter)
4906 {
4907 	device_t        dev = adapter->dev;
4908 	struct ix_queue *que = adapter->queues;
4909 	struct tx_ring  *txr = adapter->tx_rings;
4910 	int             error;
4911 
4912 	/* We allocate a single interrupt resource */
4913 	adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
4914 	    &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
4915 	if (adapter->res == NULL) {
4916 		device_printf(dev,
4917 		    "Unable to allocate bus resource: interrupt\n");
4918 		return (ENXIO);
4919 	}
4920 
4921 	/*
4922 	 * Try allocating a fast interrupt and the associated deferred
4923 	 * processing contexts.
4924 	 */
4925 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
4926 		TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
4927 	TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
4928 	que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
4929 	    taskqueue_thread_enqueue, &que->tq);
4930 	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
4931 	    device_get_nameunit(adapter->dev));
4932 
4933 	/* Tasklets for Link, SFP and Multispeed Fiber */
4934 	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
4935 	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
4936 	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
4937 	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
4938 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4939 		TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
4940 	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
4941 	    taskqueue_thread_enqueue, &adapter->tq);
4942 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
4943 	    device_get_nameunit(adapter->dev));
4944 
4945 	if ((error = bus_setup_intr(dev, adapter->res,
4946 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, que,
4947 	    &adapter->tag)) != 0) {
4948 		device_printf(dev,
4949 		    "Failed to register fast interrupt handler: %d\n", error);
4950 		taskqueue_free(que->tq);
4951 		taskqueue_free(adapter->tq);
4952 		que->tq = NULL;
4953 		adapter->tq = NULL;
4954 
4955 		return (error);
4956 	}
4957 	/* For simplicity in the handlers */
4958 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
4959 
4960 	return (0);
4961 } /* ixgbe_allocate_legacy */
4962 
4963 
4964 /************************************************************************
4965  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
4966  ************************************************************************/
4967 static int
4968 ixgbe_allocate_msix(struct adapter *adapter)
4969 {
4970 	device_t        dev = adapter->dev;
4971 	struct ix_queue *que = adapter->queues;
4972 	struct tx_ring  *txr = adapter->tx_rings;
4973 	int             error, rid, vector = 0;
4974 	int             cpu_id = 0;
4975 	unsigned int    rss_buckets = 0;
4976 	cpuset_t        cpu_mask;
4977 
4978 	/*
4979 	 * If we're doing RSS, the number of queues needs to
4980 	 * match the number of RSS buckets that are configured.
4981 	 *
4982 	 * + If there's more queues than RSS buckets, we'll end
4983 	 *   up with queues that get no traffic.
4984 	 *
4985 	 * + If there's more RSS buckets than queues, we'll end
4986 	 *   up having multiple RSS buckets map to the same queue,
4987 	 *   so there'll be some contention.
4988 	 */
4989 	rss_buckets = rss_getnumbuckets();
4990 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
4991 	    (adapter->num_queues != rss_buckets)) {
4992 		device_printf(dev, "%s: number of queues (%d) != number of RSS buckets (%d); performance will be impacted.\n",
4993 		    __func__, adapter->num_queues, rss_buckets);
4994 	}
4995 
4996 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
4997 		rid = vector + 1;
4998 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
4999 		    RF_SHAREABLE | RF_ACTIVE);
5000 		if (que->res == NULL) {
5001 			device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
5002 			    vector);
5003 			return (ENXIO);
5004 		}
5005 		/* Set the handler function */
5006 		error = bus_setup_intr(dev, que->res,
5007 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_que, que,
5008 		    &que->tag);
5009 		if (error) {
5010 			que->res = NULL;
5011 			device_printf(dev, "Failed to register QUE handler");
5012 			return (error);
5013 		}
5014 #if __FreeBSD_version >= 800504
5015 		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
5016 #endif
5017 		que->msix = vector;
5018 		adapter->active_queues |= (u64)(1 << que->msix);
5019 
5020 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
5021 			/*
5022 			 * The queue ID is used as the RSS layer bucket ID.
5023 			 * We look up the queue ID -> RSS CPU ID and select
5024 			 * that.
5025 			 */
5026 			cpu_id = rss_getcpu(i % rss_buckets);
5027 			CPU_SETOF(cpu_id, &cpu_mask);
5028 		} else {
5029 			/*
5030 			 * Bind the MSI-X vector, and thus the
5031 			 * rings to the corresponding CPU.
5032 			 *
5033 			 * This just happens to match the default RSS
5034 			 * round-robin bucket -> queue -> CPU allocation.
5035 			 */
5036 			if (adapter->num_queues > 1)
5037 				cpu_id = i;
5038 		}
5039 		if (adapter->num_queues > 1)
5040 			bus_bind_intr(dev, que->res, cpu_id);
5041 #ifdef IXGBE_DEBUG
5042 		if (adapter->feat_en & IXGBE_FEATURE_RSS)
5043 			device_printf(dev, "Bound RSS bucket %d to CPU %d\n", i,
5044 			    cpu_id);
5045 		else
5046 			device_printf(dev, "Bound queue %d to cpu %d\n", i,
5047 			    cpu_id);
5048 #endif /* IXGBE_DEBUG */
5049 
5050 
5051 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
5052 			TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start,
5053 			    txr);
5054 		TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
5055 		que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
5056 		    taskqueue_thread_enqueue, &que->tq);
5057 #if __FreeBSD_version < 1100000
5058 		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
5059 		    device_get_nameunit(adapter->dev), i);
5060 #else
5061 		if (adapter->feat_en & IXGBE_FEATURE_RSS)
5062 			taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5063 			    &cpu_mask, "%s (bucket %d)",
5064 			    device_get_nameunit(adapter->dev), cpu_id);
5065 		else
5066 			taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5067 			    NULL, "%s:q%d", device_get_nameunit(adapter->dev),
5068 			    i);
5069 #endif
5070 	}
5071 
5072 	/* and Link */
5073 	adapter->link_rid = vector + 1;
5074 	adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
5075 	    &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
5076 	if (!adapter->res) {
5077 		device_printf(dev,
5078 		    "Unable to allocate bus resource: Link interrupt [%d]\n",
5079 		    adapter->link_rid);
5080 		return (ENXIO);
5081 	}
5082 	/* Set the link handler function */
5083 	error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
5084 	    NULL, ixgbe_msix_link, adapter, &adapter->tag);
5085 	if (error) {
5086 		adapter->res = NULL;
5087 		device_printf(dev, "Failed to register LINK handler");
5088 		return (error);
5089 	}
5090 #if __FreeBSD_version >= 800504
5091 	bus_describe_intr(dev, adapter->res, adapter->tag, "link");
5092 #endif
5093 	adapter->vector = vector;
5094 	/* Tasklets for Link, SFP and Multispeed Fiber */
5095 	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
5096 	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
5097 	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
5098 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
5099 		TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
5100 	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
5101 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5102 		TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
5103 	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
5104 	    taskqueue_thread_enqueue, &adapter->tq);
5105 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
5106 	    device_get_nameunit(adapter->dev));
5107 
5108 	return (0);
5109 } /* ixgbe_allocate_msix */
5110 
5111 /************************************************************************
5112  * ixgbe_configure_interrupts
5113  *
5114  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
5115  *   This will also depend on user settings.
5116  ************************************************************************/
5117 static int
5118 ixgbe_configure_interrupts(struct adapter *adapter)
5119 {
5120 	device_t dev = adapter->dev;
5121 	int      rid, want, queues, msgs;
5122 
5123 	/* Default to 1 queue if MSI-X setup fails */
5124 	adapter->num_queues = 1;
5125 
5126 	/* Override by tuneable */
5127 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
5128 		goto msi;
5129 
5130 	/* First try MSI-X */
5131 	msgs = pci_msix_count(dev);
5132 	if (msgs == 0)
5133 		goto msi;
5134 	rid = PCIR_BAR(MSIX_82598_BAR);
5135 	adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5136 	    RF_ACTIVE);
5137 	if (adapter->msix_mem == NULL) {
5138 		rid += 4;  /* 82599 maps in higher BAR */
5139 		adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
5140 		    &rid, RF_ACTIVE);
5141 	}
5142 	if (adapter->msix_mem == NULL) {
5143 		/* May not be enabled */
5144 		device_printf(adapter->dev, "Unable to map MSI-X table.\n");
5145 		goto msi;
5146 	}
5147 
5148 	/* Figure out a reasonable auto config value */
5149 	queues = min(mp_ncpus, msgs - 1);
5150 	/* If we're doing RSS, clamp at the number of RSS buckets */
5151 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
5152 		queues = min(queues, rss_getnumbuckets());
5153 	if (ixgbe_num_queues > queues) {
5154 		device_printf(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
5155 		ixgbe_num_queues = queues;
5156 	}
5157 
5158 	if (ixgbe_num_queues != 0)
5159 		queues = ixgbe_num_queues;
5160 	/* Set max queues to 8 when autoconfiguring */
5161 	else
5162 		queues = min(queues, 8);
5163 
5164 	/* reflect correct sysctl value */
5165 	ixgbe_num_queues = queues;
5166 
5167 	/*
5168 	 * Want one vector (RX/TX pair) per queue
5169 	 * plus an additional for Link.
5170 	 */
5171 	want = queues + 1;
5172 	if (msgs >= want)
5173 		msgs = want;
5174 	else {
5175 		device_printf(adapter->dev, "MSI-X Configuration Problem, %d vectors but %d queues wanted!\n",
5176 		    msgs, want);
5177 		goto msi;
5178 	}
5179 	if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
5180 		device_printf(adapter->dev,
5181 		    "Using MSI-X interrupts with %d vectors\n", msgs);
5182 		adapter->num_queues = queues;
5183 		adapter->feat_en |= IXGBE_FEATURE_MSIX;
5184 		return (0);
5185 	}
5186 	/*
5187 	 * MSI-X allocation failed or provided us with
5188 	 * less vectors than needed. Free MSI-X resources
5189 	 * and we'll try enabling MSI.
5190 	 */
5191 	pci_release_msi(dev);
5192 
5193 msi:
5194 	/* Without MSI-X, some features are no longer supported */
5195 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5196 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
5197 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5198 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
5199 
5200 	if (adapter->msix_mem != NULL) {
5201 		bus_release_resource(dev, SYS_RES_MEMORY, rid,
5202 		    adapter->msix_mem);
5203 		adapter->msix_mem = NULL;
5204 	}
5205 	msgs = 1;
5206 	if (pci_alloc_msi(dev, &msgs) == 0) {
5207 		adapter->feat_en |= IXGBE_FEATURE_MSI;
5208 		adapter->link_rid = 1;
5209 		device_printf(adapter->dev, "Using an MSI interrupt\n");
5210 		return (0);
5211 	}
5212 
5213 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
5214 		device_printf(adapter->dev,
5215 		    "Device does not support legacy interrupts.\n");
5216 		return 1;
5217 	}
5218 
5219 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
5220 	adapter->link_rid = 0;
5221 	device_printf(adapter->dev, "Using a Legacy interrupt\n");
5222 
5223 	return (0);
5224 } /* ixgbe_configure_interrupts */
5225 
5226 
5227 /************************************************************************
5228  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
5229  *
5230  *   Done outside of interrupt context since the driver might sleep
5231  ************************************************************************/
5232 static void
5233 ixgbe_handle_link(void *context, int pending)
5234 {
5235 	struct adapter  *adapter = context;
5236 	struct ixgbe_hw *hw = &adapter->hw;
5237 
5238 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
5239 	ixgbe_update_link_status(adapter);
5240 
5241 	/* Re-enable link interrupts */
5242 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
5243 } /* ixgbe_handle_link */
5244 
5245 /************************************************************************
5246  * ixgbe_rearm_queues
5247  ************************************************************************/
5248 static void
5249 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5250 {
5251 	u32 mask;
5252 
5253 	switch (adapter->hw.mac.type) {
5254 	case ixgbe_mac_82598EB:
5255 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5256 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5257 		break;
5258 	case ixgbe_mac_82599EB:
5259 	case ixgbe_mac_X540:
5260 	case ixgbe_mac_X550:
5261 	case ixgbe_mac_X550EM_x:
5262 	case ixgbe_mac_X550EM_a:
5263 		mask = (queues & 0xFFFFFFFF);
5264 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5265 		mask = (queues >> 32);
5266 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5267 		break;
5268 	default:
5269 		break;
5270 	}
5271 } /* ixgbe_rearm_queues */
5272 
5273