xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision 4fdd48909c687995764d27decbd087146d3e9c00)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_rss.h"
40 #endif
41 
42 #include "ixgbe.h"
43 
44 #ifdef	RSS
45 #include <net/rss_config.h>
46 #include <netinet/in_rss.h>
47 #endif
48 
49 /*********************************************************************
50  *  Driver version
51  *********************************************************************/
52 char ixgbe_driver_version[] = "3.1.13-k";
53 
54 
55 /*********************************************************************
56  *  PCI Device ID Table
57  *
58  *  Used by probe to select devices to load on
59  *  Last field stores an index into ixgbe_strings
60  *  Last entry must be all 0s
61  *
62  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
63  *********************************************************************/
64 
65 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
66 {
67 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
68 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
69 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
70 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
71 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
72 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
73 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
74 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
75 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
76 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
77 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
78 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
79 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
80 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
81 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
82 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
83 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
84 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
85 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
86 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
87 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
88 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
89 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
90 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
91 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
92 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
93 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
94 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
95 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
99 	/* required last entry */
100 	{0, 0, 0, 0, 0}
101 };
102 
103 /*********************************************************************
104  *  Table of branding strings
105  *********************************************************************/
106 
107 static char    *ixgbe_strings[] = {
108 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
109 };
110 
111 /*********************************************************************
112  *  Function prototypes
113  *********************************************************************/
114 static int      ixgbe_probe(device_t);
115 static int      ixgbe_attach(device_t);
116 static int      ixgbe_detach(device_t);
117 static int      ixgbe_shutdown(device_t);
118 static int	ixgbe_suspend(device_t);
119 static int	ixgbe_resume(device_t);
120 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
121 static void	ixgbe_init(void *);
122 static void	ixgbe_init_locked(struct adapter *);
123 static void     ixgbe_stop(void *);
124 #if __FreeBSD_version >= 1100036
125 static uint64_t	ixgbe_get_counter(struct ifnet *, ift_counter);
126 #endif
127 static void	ixgbe_add_media_types(struct adapter *);
128 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
129 static int      ixgbe_media_change(struct ifnet *);
130 static void     ixgbe_identify_hardware(struct adapter *);
131 static int      ixgbe_allocate_pci_resources(struct adapter *);
132 static void	ixgbe_get_slot_info(struct adapter *);
133 static int      ixgbe_allocate_msix(struct adapter *);
134 static int      ixgbe_allocate_legacy(struct adapter *);
135 static int	ixgbe_setup_msix(struct adapter *);
136 static void	ixgbe_free_pci_resources(struct adapter *);
137 static void	ixgbe_local_timer(void *);
138 static int	ixgbe_setup_interface(device_t, struct adapter *);
139 static void	ixgbe_config_gpie(struct adapter *);
140 static void	ixgbe_config_dmac(struct adapter *);
141 static void	ixgbe_config_delay_values(struct adapter *);
142 static void	ixgbe_config_link(struct adapter *);
143 static void	ixgbe_check_wol_support(struct adapter *);
144 static int	ixgbe_setup_low_power_mode(struct adapter *);
145 static void	ixgbe_rearm_queues(struct adapter *, u64);
146 
147 static void     ixgbe_initialize_transmit_units(struct adapter *);
148 static void     ixgbe_initialize_receive_units(struct adapter *);
149 static void	ixgbe_enable_rx_drop(struct adapter *);
150 static void	ixgbe_disable_rx_drop(struct adapter *);
151 static void	ixgbe_initialize_rss_mapping(struct adapter *);
152 
153 static void     ixgbe_enable_intr(struct adapter *);
154 static void     ixgbe_disable_intr(struct adapter *);
155 static void     ixgbe_update_stats_counters(struct adapter *);
156 static void     ixgbe_set_promisc(struct adapter *);
157 static void     ixgbe_set_multi(struct adapter *);
158 static void     ixgbe_update_link_status(struct adapter *);
159 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
160 static void	ixgbe_configure_ivars(struct adapter *);
161 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
162 
163 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
164 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
165 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
166 
167 static void	ixgbe_add_device_sysctls(struct adapter *);
168 static void     ixgbe_add_hw_stats(struct adapter *);
169 
170 /* Sysctl handlers */
171 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
172 		     const char *, int *, int);
173 static int	ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
174 static int	ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
175 static int	ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
176 static int	ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
177 static int	ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
178 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
179 #ifdef IXGBE_DEBUG
180 static int	ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
181 static int	ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
182 #endif
183 static int	ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
184 static int	ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
185 static int	ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
186 static int	ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
187 static int	ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
188 static int	ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
189 static int	ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS);
190 
191 /* Support for pluggable optic modules */
192 static bool	ixgbe_sfp_probe(struct adapter *);
193 static void	ixgbe_setup_optics(struct adapter *);
194 
195 /* Legacy (single vector interrupt handler */
196 static void	ixgbe_legacy_irq(void *);
197 
198 /* The MSI/X Interrupt handlers */
199 static void	ixgbe_msix_que(void *);
200 static void	ixgbe_msix_link(void *);
201 
202 /* Deferred interrupt tasklets */
203 static void	ixgbe_handle_que(void *, int);
204 static void	ixgbe_handle_link(void *, int);
205 static void	ixgbe_handle_msf(void *, int);
206 static void	ixgbe_handle_mod(void *, int);
207 static void	ixgbe_handle_phy(void *, int);
208 
209 #ifdef IXGBE_FDIR
210 static void	ixgbe_reinit_fdir(void *, int);
211 #endif
212 
213 #ifdef PCI_IOV
214 static void	ixgbe_ping_all_vfs(struct adapter *);
215 static void	ixgbe_handle_mbx(void *, int);
216 static int	ixgbe_init_iov(device_t, u16, const nvlist_t *);
217 static void	ixgbe_uninit_iov(device_t);
218 static int	ixgbe_add_vf(device_t, u16, const nvlist_t *);
219 static void	ixgbe_initialize_iov(struct adapter *);
220 static void	ixgbe_recalculate_max_frame(struct adapter *);
221 static void	ixgbe_init_vf(struct adapter *, struct ixgbe_vf *);
222 #endif /* PCI_IOV */
223 
224 
225 /*********************************************************************
226  *  FreeBSD Device Interface Entry Points
227  *********************************************************************/
228 
229 static device_method_t ix_methods[] = {
230 	/* Device interface */
231 	DEVMETHOD(device_probe, ixgbe_probe),
232 	DEVMETHOD(device_attach, ixgbe_attach),
233 	DEVMETHOD(device_detach, ixgbe_detach),
234 	DEVMETHOD(device_shutdown, ixgbe_shutdown),
235 	DEVMETHOD(device_suspend, ixgbe_suspend),
236 	DEVMETHOD(device_resume, ixgbe_resume),
237 #ifdef PCI_IOV
238 	DEVMETHOD(pci_iov_init, ixgbe_init_iov),
239 	DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
240 	DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
241 #endif /* PCI_IOV */
242 	DEVMETHOD_END
243 };
244 
245 static driver_t ix_driver = {
246 	"ix", ix_methods, sizeof(struct adapter),
247 };
248 
249 devclass_t ix_devclass;
250 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
251 
252 MODULE_DEPEND(ix, pci, 1, 1, 1);
253 MODULE_DEPEND(ix, ether, 1, 1, 1);
254 #ifdef DEV_NETMAP
255 MODULE_DEPEND(ix, netmap, 1, 1, 1);
256 #endif /* DEV_NETMAP */
257 
258 /*
259 ** TUNEABLE PARAMETERS:
260 */
261 
262 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
263 		   "IXGBE driver parameters");
264 
265 /*
266 ** AIM: Adaptive Interrupt Moderation
267 ** which means that the interrupt rate
268 ** is varied over time based on the
269 ** traffic for that interrupt vector
270 */
271 static int ixgbe_enable_aim = TRUE;
272 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
273     "Enable adaptive interrupt moderation");
274 
275 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
276 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
277     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
278 
279 /* How many packets rxeof tries to clean at a time */
280 static int ixgbe_rx_process_limit = 256;
281 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
282     &ixgbe_rx_process_limit, 0,
283     "Maximum number of received packets to process at a time,"
284     "-1 means unlimited");
285 
286 /* How many packets txeof tries to clean at a time */
287 static int ixgbe_tx_process_limit = 256;
288 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
289     &ixgbe_tx_process_limit, 0,
290     "Maximum number of sent packets to process at a time,"
291     "-1 means unlimited");
292 
293 /*
294 ** Smart speed setting, default to on
295 ** this only works as a compile option
296 ** right now as its during attach, set
297 ** this to 'ixgbe_smart_speed_off' to
298 ** disable.
299 */
300 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
301 
302 /*
303  * MSIX should be the default for best performance,
304  * but this allows it to be forced off for testing.
305  */
306 static int ixgbe_enable_msix = 1;
307 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
308     "Enable MSI-X interrupts");
309 
310 /*
311  * Number of Queues, can be set to 0,
312  * it then autoconfigures based on the
313  * number of cpus with a max of 8. This
314  * can be overriden manually here.
315  */
316 static int ixgbe_num_queues = 0;
317 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
318     "Number of queues to configure, 0 indicates autoconfigure");
319 
320 /*
321 ** Number of TX descriptors per ring,
322 ** setting higher than RX as this seems
323 ** the better performing choice.
324 */
325 static int ixgbe_txd = PERFORM_TXD;
326 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
327     "Number of transmit descriptors per queue");
328 
329 /* Number of RX descriptors per ring */
330 static int ixgbe_rxd = PERFORM_RXD;
331 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
332     "Number of receive descriptors per queue");
333 
334 /*
335 ** Defining this on will allow the use
336 ** of unsupported SFP+ modules, note that
337 ** doing so you are on your own :)
338 */
339 static int allow_unsupported_sfp = FALSE;
340 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
341 
342 /* Keep running tab on them for sanity check */
343 static int ixgbe_total_ports;
344 
345 #ifdef IXGBE_FDIR
346 /*
347 ** Flow Director actually 'steals'
348 ** part of the packet buffer as its
349 ** filter pool, this variable controls
350 ** how much it uses:
351 **  0 = 64K, 1 = 128K, 2 = 256K
352 */
353 static int fdir_pballoc = 1;
354 #endif
355 
356 #ifdef DEV_NETMAP
357 /*
358  * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
359  * be a reference on how to implement netmap support in a driver.
360  * Additional comments are in ixgbe_netmap.h .
361  *
362  * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
363  * that extend the standard driver.
364  */
365 #include <dev/netmap/ixgbe_netmap.h>
366 #endif /* DEV_NETMAP */
367 
368 static MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
369 
370 /*********************************************************************
371  *  Device identification routine
372  *
373  *  ixgbe_probe determines if the driver should be loaded on
374  *  adapter based on PCI vendor/device id of the adapter.
375  *
376  *  return BUS_PROBE_DEFAULT on success, positive on failure
377  *********************************************************************/
378 
379 static int
380 ixgbe_probe(device_t dev)
381 {
382 	ixgbe_vendor_info_t *ent;
383 
384 	u16	pci_vendor_id = 0;
385 	u16	pci_device_id = 0;
386 	u16	pci_subvendor_id = 0;
387 	u16	pci_subdevice_id = 0;
388 	char	adapter_name[256];
389 
390 	INIT_DEBUGOUT("ixgbe_probe: begin");
391 
392 	pci_vendor_id = pci_get_vendor(dev);
393 	if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
394 		return (ENXIO);
395 
396 	pci_device_id = pci_get_device(dev);
397 	pci_subvendor_id = pci_get_subvendor(dev);
398 	pci_subdevice_id = pci_get_subdevice(dev);
399 
400 	ent = ixgbe_vendor_info_array;
401 	while (ent->vendor_id != 0) {
402 		if ((pci_vendor_id == ent->vendor_id) &&
403 		    (pci_device_id == ent->device_id) &&
404 
405 		    ((pci_subvendor_id == ent->subvendor_id) ||
406 		     (ent->subvendor_id == 0)) &&
407 
408 		    ((pci_subdevice_id == ent->subdevice_id) ||
409 		     (ent->subdevice_id == 0))) {
410 			sprintf(adapter_name, "%s, Version - %s",
411 				ixgbe_strings[ent->index],
412 				ixgbe_driver_version);
413 			device_set_desc_copy(dev, adapter_name);
414 			++ixgbe_total_ports;
415 			return (BUS_PROBE_DEFAULT);
416 		}
417 		ent++;
418 	}
419 	return (ENXIO);
420 }
421 
422 /*********************************************************************
423  *  Device initialization routine
424  *
425  *  The attach entry point is called when the driver is being loaded.
426  *  This routine identifies the type of hardware, allocates all resources
427  *  and initializes the hardware.
428  *
429  *  return 0 on success, positive on failure
430  *********************************************************************/
431 
432 static int
433 ixgbe_attach(device_t dev)
434 {
435 	struct adapter *adapter;
436 	struct ixgbe_hw *hw;
437 	int             error = 0;
438 	u16		csum;
439 	u32		ctrl_ext;
440 
441 	INIT_DEBUGOUT("ixgbe_attach: begin");
442 
443 	/* Allocate, clear, and link in our adapter structure */
444 	adapter = device_get_softc(dev);
445 	adapter->dev = dev;
446 	hw = &adapter->hw;
447 
448 #ifdef DEV_NETMAP
449 	adapter->init_locked = ixgbe_init_locked;
450 	adapter->stop_locked = ixgbe_stop;
451 #endif
452 
453 	/* Core Lock Init*/
454 	IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
455 
456 	/* Set up the timer callout */
457 	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
458 
459 	/* Determine hardware revision */
460 	ixgbe_identify_hardware(adapter);
461 
462 	/* Do base PCI setup - map BAR0 */
463 	if (ixgbe_allocate_pci_resources(adapter)) {
464 		device_printf(dev, "Allocation of PCI resources failed\n");
465 		error = ENXIO;
466 		goto err_out;
467 	}
468 
469 	/* Sysctls for limiting the amount of work done in the taskqueues */
470 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
471 	    "max number of rx packets to process",
472 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
473 
474 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
475 	    "max number of tx packets to process",
476 	&adapter->tx_process_limit, ixgbe_tx_process_limit);
477 
478 	/* Do descriptor calc and sanity checks */
479 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
480 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
481 		device_printf(dev, "TXD config issue, using default!\n");
482 		adapter->num_tx_desc = DEFAULT_TXD;
483 	} else
484 		adapter->num_tx_desc = ixgbe_txd;
485 
486 	/*
487 	** With many RX rings it is easy to exceed the
488 	** system mbuf allocation. Tuning nmbclusters
489 	** can alleviate this.
490 	*/
491 	if (nmbclusters > 0) {
492 		int s;
493 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
494 		if (s > nmbclusters) {
495 			device_printf(dev, "RX Descriptors exceed "
496 			    "system mbuf max, using default instead!\n");
497 			ixgbe_rxd = DEFAULT_RXD;
498 		}
499 	}
500 
501 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
502 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
503 		device_printf(dev, "RXD config issue, using default!\n");
504 		adapter->num_rx_desc = DEFAULT_RXD;
505 	} else
506 		adapter->num_rx_desc = ixgbe_rxd;
507 
508 	/* Allocate our TX/RX Queues */
509 	if (ixgbe_allocate_queues(adapter)) {
510 		error = ENOMEM;
511 		goto err_out;
512 	}
513 
514 	/* Allocate multicast array memory. */
515 	adapter->mta = malloc(sizeof(*adapter->mta) *
516 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
517 	if (adapter->mta == NULL) {
518 		device_printf(dev, "Can not allocate multicast setup array\n");
519 		error = ENOMEM;
520 		goto err_late;
521 	}
522 
523 	/* Initialize the shared code */
524 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
525 	error = ixgbe_init_shared_code(hw);
526 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
527 		/*
528 		** No optics in this port, set up
529 		** so the timer routine will probe
530 		** for later insertion.
531 		*/
532 		adapter->sfp_probe = TRUE;
533 		error = 0;
534 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
535 		device_printf(dev, "Unsupported SFP+ module detected!\n");
536 		error = EIO;
537 		goto err_late;
538 	} else if (error) {
539 		device_printf(dev, "Unable to initialize the shared code\n");
540 		error = EIO;
541 		goto err_late;
542 	}
543 
544 	/* Make sure we have a good EEPROM before we read from it */
545 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
546 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
547 		error = EIO;
548 		goto err_late;
549 	}
550 
551 	error = ixgbe_init_hw(hw);
552 	switch (error) {
553 	case IXGBE_ERR_EEPROM_VERSION:
554 		device_printf(dev, "This device is a pre-production adapter/"
555 		    "LOM.  Please be aware there may be issues associated "
556 		    "with your hardware.\nIf you are experiencing problems "
557 		    "please contact your Intel or hardware representative "
558 		    "who provided you with this hardware.\n");
559 		break;
560 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
561 		device_printf(dev, "Unsupported SFP+ Module\n");
562 		error = EIO;
563 		goto err_late;
564 	case IXGBE_ERR_SFP_NOT_PRESENT:
565 		device_printf(dev, "No SFP+ Module found\n");
566 		/* falls thru */
567 	default:
568 		break;
569 	}
570 
571 	if ((adapter->msix > 1) && (ixgbe_enable_msix))
572 		error = ixgbe_allocate_msix(adapter);
573 	else
574 		error = ixgbe_allocate_legacy(adapter);
575 	if (error)
576 		goto err_late;
577 
578 	/* Setup OS specific network interface */
579 	if (ixgbe_setup_interface(dev, adapter) != 0)
580 		goto err_late;
581 
582 	/* Initialize statistics */
583 	ixgbe_update_stats_counters(adapter);
584 
585 	/* Register for VLAN events */
586 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
587 	    ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
588 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
589 	    ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
590 
591         /* Check PCIE slot type/speed/width */
592 	ixgbe_get_slot_info(adapter);
593 
594 	/* Set an initial default flow control & dmac value */
595 	adapter->fc = ixgbe_fc_full;
596 	adapter->dmac = 0;
597 	adapter->eee_enabled = 0;
598 
599 #ifdef PCI_IOV
600 	if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) {
601 		nvlist_t *pf_schema, *vf_schema;
602 
603 		hw->mbx.ops.init_params(hw);
604 		pf_schema = pci_iov_schema_alloc_node();
605 		vf_schema = pci_iov_schema_alloc_node();
606 		pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
607 		pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
608 		    IOV_SCHEMA_HASDEFAULT, TRUE);
609 		pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
610 		    IOV_SCHEMA_HASDEFAULT, FALSE);
611 		pci_iov_schema_add_bool(vf_schema, "allow-promisc",
612 		    IOV_SCHEMA_HASDEFAULT, FALSE);
613 		error = pci_iov_attach(dev, pf_schema, vf_schema);
614 		if (error != 0) {
615 			device_printf(dev,
616 			    "Error %d setting up SR-IOV\n", error);
617 		}
618 	}
619 #endif /* PCI_IOV */
620 
621 	/* Check for certain supported features */
622 	ixgbe_check_wol_support(adapter);
623 
624 	/* Add sysctls */
625 	ixgbe_add_device_sysctls(adapter);
626 	ixgbe_add_hw_stats(adapter);
627 
628 	/* let hardware know driver is loaded */
629 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
630 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
631 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
632 
633 #ifdef DEV_NETMAP
634 	ixgbe_netmap_attach(adapter);
635 #endif /* DEV_NETMAP */
636 	INIT_DEBUGOUT("ixgbe_attach: end");
637 	return (0);
638 
639 err_late:
640 	ixgbe_free_transmit_structures(adapter);
641 	ixgbe_free_receive_structures(adapter);
642 err_out:
643 	if (adapter->ifp != NULL)
644 		if_free(adapter->ifp);
645 	ixgbe_free_pci_resources(adapter);
646 	free(adapter->mta, M_DEVBUF);
647 	return (error);
648 }
649 
650 /*********************************************************************
651  *  Device removal routine
652  *
653  *  The detach entry point is called when the driver is being removed.
654  *  This routine stops the adapter and deallocates all the resources
655  *  that were allocated for driver operation.
656  *
657  *  return 0 on success, positive on failure
658  *********************************************************************/
659 
660 static int
661 ixgbe_detach(device_t dev)
662 {
663 	struct adapter *adapter = device_get_softc(dev);
664 	struct ix_queue *que = adapter->queues;
665 	struct tx_ring *txr = adapter->tx_rings;
666 	u32	ctrl_ext;
667 
668 	INIT_DEBUGOUT("ixgbe_detach: begin");
669 
670 	/* Make sure VLANS are not using driver */
671 	if (adapter->ifp->if_vlantrunk != NULL) {
672 		device_printf(dev,"Vlan in use, detach first\n");
673 		return (EBUSY);
674 	}
675 
676 #ifdef PCI_IOV
677 	if (pci_iov_detach(dev) != 0) {
678 		device_printf(dev, "SR-IOV in use; detach first.\n");
679 		return (EBUSY);
680 	}
681 #endif /* PCI_IOV */
682 
683 	ether_ifdetach(adapter->ifp);
684 	/* Stop the adapter */
685 	IXGBE_CORE_LOCK(adapter);
686 	ixgbe_setup_low_power_mode(adapter);
687 	IXGBE_CORE_UNLOCK(adapter);
688 
689 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
690 		if (que->tq) {
691 #ifndef IXGBE_LEGACY_TX
692 			taskqueue_drain(que->tq, &txr->txq_task);
693 #endif
694 			taskqueue_drain(que->tq, &que->que_task);
695 			taskqueue_free(que->tq);
696 		}
697 	}
698 
699 	/* Drain the Link queue */
700 	if (adapter->tq) {
701 		taskqueue_drain(adapter->tq, &adapter->link_task);
702 		taskqueue_drain(adapter->tq, &adapter->mod_task);
703 		taskqueue_drain(adapter->tq, &adapter->msf_task);
704 #ifdef PCI_IOV
705 		taskqueue_drain(adapter->tq, &adapter->mbx_task);
706 #endif
707 		taskqueue_drain(adapter->tq, &adapter->phy_task);
708 #ifdef IXGBE_FDIR
709 		taskqueue_drain(adapter->tq, &adapter->fdir_task);
710 #endif
711 		taskqueue_free(adapter->tq);
712 	}
713 
714 	/* let hardware know driver is unloading */
715 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
716 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
717 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
718 
719 	/* Unregister VLAN events */
720 	if (adapter->vlan_attach != NULL)
721 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
722 	if (adapter->vlan_detach != NULL)
723 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
724 
725 	callout_drain(&adapter->timer);
726 #ifdef DEV_NETMAP
727 	netmap_detach(adapter->ifp);
728 #endif /* DEV_NETMAP */
729 	ixgbe_free_pci_resources(adapter);
730 	bus_generic_detach(dev);
731 	if_free(adapter->ifp);
732 
733 	ixgbe_free_transmit_structures(adapter);
734 	ixgbe_free_receive_structures(adapter);
735 	free(adapter->mta, M_DEVBUF);
736 
737 	IXGBE_CORE_LOCK_DESTROY(adapter);
738 	return (0);
739 }
740 
741 /*********************************************************************
742  *
743  *  Shutdown entry point
744  *
745  **********************************************************************/
746 
747 static int
748 ixgbe_shutdown(device_t dev)
749 {
750 	struct adapter *adapter = device_get_softc(dev);
751 	int error = 0;
752 
753 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
754 
755 	IXGBE_CORE_LOCK(adapter);
756 	error = ixgbe_setup_low_power_mode(adapter);
757 	IXGBE_CORE_UNLOCK(adapter);
758 
759 	return (error);
760 }
761 
762 /**
763  * Methods for going from:
764  * D0 -> D3: ixgbe_suspend
765  * D3 -> D0: ixgbe_resume
766  */
767 static int
768 ixgbe_suspend(device_t dev)
769 {
770 	struct adapter *adapter = device_get_softc(dev);
771 	int error = 0;
772 
773 	INIT_DEBUGOUT("ixgbe_suspend: begin");
774 
775 	IXGBE_CORE_LOCK(adapter);
776 
777 	error = ixgbe_setup_low_power_mode(adapter);
778 
779 	IXGBE_CORE_UNLOCK(adapter);
780 
781 	return (error);
782 }
783 
784 static int
785 ixgbe_resume(device_t dev)
786 {
787 	struct adapter *adapter = device_get_softc(dev);
788 	struct ifnet *ifp = adapter->ifp;
789 	struct ixgbe_hw *hw = &adapter->hw;
790 	u32 wus;
791 
792 	INIT_DEBUGOUT("ixgbe_resume: begin");
793 
794 	IXGBE_CORE_LOCK(adapter);
795 
796 	/* Read & clear WUS register */
797 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
798 	if (wus)
799 		device_printf(dev, "Woken up by (WUS): %#010x\n",
800 		    IXGBE_READ_REG(hw, IXGBE_WUS));
801 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
802 	/* And clear WUFC until next low-power transition */
803 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
804 
805 	/*
806 	 * Required after D3->D0 transition;
807 	 * will re-advertise all previous advertised speeds
808 	 */
809 	if (ifp->if_flags & IFF_UP)
810 		ixgbe_init_locked(adapter);
811 
812 	IXGBE_CORE_UNLOCK(adapter);
813 
814 	return (0);
815 }
816 
817 
818 /*********************************************************************
819  *  Ioctl entry point
820  *
821  *  ixgbe_ioctl is called when the user wants to configure the
822  *  interface.
823  *
824  *  return 0 on success, positive on failure
825  **********************************************************************/
826 
827 static int
828 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
829 {
830 	struct adapter	*adapter = ifp->if_softc;
831 	struct ifreq	*ifr = (struct ifreq *) data;
832 #if defined(INET) || defined(INET6)
833 	struct ifaddr *ifa = (struct ifaddr *)data;
834 #endif
835 	int             error = 0;
836 	bool		avoid_reset = FALSE;
837 
838 	switch (command) {
839 
840         case SIOCSIFADDR:
841 #ifdef INET
842 		if (ifa->ifa_addr->sa_family == AF_INET)
843 			avoid_reset = TRUE;
844 #endif
845 #ifdef INET6
846 		if (ifa->ifa_addr->sa_family == AF_INET6)
847 			avoid_reset = TRUE;
848 #endif
849 		/*
850 		** Calling init results in link renegotiation,
851 		** so we avoid doing it when possible.
852 		*/
853 		if (avoid_reset) {
854 			ifp->if_flags |= IFF_UP;
855 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
856 				ixgbe_init(adapter);
857 #ifdef INET
858 			if (!(ifp->if_flags & IFF_NOARP))
859 				arp_ifinit(ifp, ifa);
860 #endif
861 		} else
862 			error = ether_ioctl(ifp, command, data);
863 		break;
864 	case SIOCSIFMTU:
865 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
866 		if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
867 			error = EINVAL;
868 		} else {
869 			IXGBE_CORE_LOCK(adapter);
870 			ifp->if_mtu = ifr->ifr_mtu;
871 			adapter->max_frame_size =
872 				ifp->if_mtu + IXGBE_MTU_HDR;
873 			ixgbe_init_locked(adapter);
874 #ifdef PCI_IOV
875 			ixgbe_recalculate_max_frame(adapter);
876 #endif
877 			IXGBE_CORE_UNLOCK(adapter);
878 		}
879 		break;
880 	case SIOCSIFFLAGS:
881 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
882 		IXGBE_CORE_LOCK(adapter);
883 		if (ifp->if_flags & IFF_UP) {
884 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
885 				if ((ifp->if_flags ^ adapter->if_flags) &
886 				    (IFF_PROMISC | IFF_ALLMULTI)) {
887 					ixgbe_set_promisc(adapter);
888                                 }
889 			} else
890 				ixgbe_init_locked(adapter);
891 		} else
892 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
893 				ixgbe_stop(adapter);
894 		adapter->if_flags = ifp->if_flags;
895 		IXGBE_CORE_UNLOCK(adapter);
896 		break;
897 	case SIOCADDMULTI:
898 	case SIOCDELMULTI:
899 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
900 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
901 			IXGBE_CORE_LOCK(adapter);
902 			ixgbe_disable_intr(adapter);
903 			ixgbe_set_multi(adapter);
904 			ixgbe_enable_intr(adapter);
905 			IXGBE_CORE_UNLOCK(adapter);
906 		}
907 		break;
908 	case SIOCSIFMEDIA:
909 	case SIOCGIFMEDIA:
910 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
911 		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
912 		break;
913 	case SIOCSIFCAP:
914 	{
915 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
916 
917 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
918 		if (!mask)
919 			break;
920 
921 		/* HW cannot turn these on/off separately */
922 		if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
923 			ifp->if_capenable ^= IFCAP_RXCSUM;
924 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
925 		}
926 		if (mask & IFCAP_TXCSUM)
927 			ifp->if_capenable ^= IFCAP_TXCSUM;
928 		if (mask & IFCAP_TXCSUM_IPV6)
929 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
930 		if (mask & IFCAP_TSO4)
931 			ifp->if_capenable ^= IFCAP_TSO4;
932 		if (mask & IFCAP_TSO6)
933 			ifp->if_capenable ^= IFCAP_TSO6;
934 		if (mask & IFCAP_LRO)
935 			ifp->if_capenable ^= IFCAP_LRO;
936 		if (mask & IFCAP_VLAN_HWTAGGING)
937 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
938 		if (mask & IFCAP_VLAN_HWFILTER)
939 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
940 		if (mask & IFCAP_VLAN_HWTSO)
941 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
942 
943 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
944 			IXGBE_CORE_LOCK(adapter);
945 			ixgbe_init_locked(adapter);
946 			IXGBE_CORE_UNLOCK(adapter);
947 		}
948 		VLAN_CAPABILITIES(ifp);
949 		break;
950 	}
951 #if __FreeBSD_version >= 1100036
952 	case SIOCGI2C:
953 	{
954 		struct ixgbe_hw *hw = &adapter->hw;
955 		struct ifi2creq i2c;
956 		int i;
957 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
958 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
959 		if (error != 0)
960 			break;
961 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
962 			error = EINVAL;
963 			break;
964 		}
965 		if (i2c.len > sizeof(i2c.data)) {
966 			error = EINVAL;
967 			break;
968 		}
969 
970 		for (i = 0; i < i2c.len; i++)
971 			hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
972 			    i2c.dev_addr, &i2c.data[i]);
973 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
974 		break;
975 	}
976 #endif
977 	default:
978 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
979 		error = ether_ioctl(ifp, command, data);
980 		break;
981 	}
982 
983 	return (error);
984 }
985 
986 /*
987  * Set the various hardware offload abilities.
988  *
989  * This takes the ifnet's if_capenable flags (e.g. set by the user using
990  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
991  * mbuf offload flags the driver will understand.
992  */
993 static void
994 ixgbe_set_if_hwassist(struct adapter *adapter)
995 {
996 	struct ifnet *ifp = adapter->ifp;
997 
998 	ifp->if_hwassist = 0;
999 #if __FreeBSD_version >= 1000000
1000 	if (ifp->if_capenable & IFCAP_TSO4)
1001 		ifp->if_hwassist |= CSUM_IP_TSO;
1002 	if (ifp->if_capenable & IFCAP_TSO6)
1003 		ifp->if_hwassist |= CSUM_IP6_TSO;
1004 	if (ifp->if_capenable & IFCAP_TXCSUM)
1005 		ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP |
1006 		    CSUM_IP_SCTP);
1007 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1008 		ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP |
1009 		    CSUM_IP6_SCTP);
1010 #else
1011 	if (ifp->if_capenable & IFCAP_TSO)
1012 		ifp->if_hwassist |= CSUM_TSO;
1013 	if (ifp->if_capenable & IFCAP_TXCSUM) {
1014 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1015 		struct ixgbe_hw *hw = &adapter->hw;
1016 		if (hw->mac.type != ixgbe_mac_82598EB)
1017 			ifp->if_hwassist |= CSUM_SCTP;
1018 	}
1019 #endif
1020 }
1021 
1022 /*********************************************************************
1023  *  Init entry point
1024  *
1025  *  This routine is used in two ways. It is used by the stack as
1026  *  init entry point in network interface structure. It is also used
1027  *  by the driver as a hw/sw initialization routine to get to a
1028  *  consistent state.
1029  *
1030  *  return 0 on success, positive on failure
1031  **********************************************************************/
1032 #define IXGBE_MHADD_MFS_SHIFT 16
1033 
1034 static void
1035 ixgbe_init_locked(struct adapter *adapter)
1036 {
1037 	struct ifnet   *ifp = adapter->ifp;
1038 	device_t 	dev = adapter->dev;
1039 	struct ixgbe_hw *hw = &adapter->hw;
1040 	struct tx_ring  *txr;
1041 	struct rx_ring  *rxr;
1042 	u32		txdctl, mhadd;
1043 	u32		rxdctl, rxctrl;
1044 	int err = 0;
1045 #ifdef PCI_IOV
1046 	enum ixgbe_iov_mode mode;
1047 #endif
1048 
1049 	mtx_assert(&adapter->core_mtx, MA_OWNED);
1050 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
1051 
1052 	hw->adapter_stopped = FALSE;
1053 	ixgbe_stop_adapter(hw);
1054         callout_stop(&adapter->timer);
1055 
1056 #ifdef PCI_IOV
1057 	mode = ixgbe_get_iov_mode(adapter);
1058 	adapter->pool = ixgbe_max_vfs(mode);
1059 	/* Queue indices may change with IOV mode */
1060 	for (int i = 0; i < adapter->num_queues; i++) {
1061 		adapter->rx_rings[i].me = ixgbe_pf_que_index(mode, i);
1062 		adapter->tx_rings[i].me = ixgbe_pf_que_index(mode, i);
1063 	}
1064 #endif
1065         /* reprogram the RAR[0] in case user changed it. */
1066 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
1067 
1068 	/* Get the latest mac address, User can use a LAA */
1069 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
1070 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
1071 	hw->addr_ctrl.rar_used_count = 1;
1072 
1073 	/* Set hardware offload abilities from ifnet flags */
1074 	ixgbe_set_if_hwassist(adapter);
1075 
1076 	/* Prepare transmit descriptors and buffers */
1077 	if (ixgbe_setup_transmit_structures(adapter)) {
1078 		device_printf(dev, "Could not setup transmit structures\n");
1079 		ixgbe_stop(adapter);
1080 		return;
1081 	}
1082 
1083 	ixgbe_init_hw(hw);
1084 #ifdef PCI_IOV
1085 	ixgbe_initialize_iov(adapter);
1086 #endif
1087 	ixgbe_initialize_transmit_units(adapter);
1088 
1089 	/* Setup Multicast table */
1090 	ixgbe_set_multi(adapter);
1091 
1092 	/* Determine the correct mbuf pool, based on frame size */
1093 	if (adapter->max_frame_size <= MCLBYTES)
1094 		adapter->rx_mbuf_sz = MCLBYTES;
1095 	else
1096 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
1097 
1098 	/* Prepare receive descriptors and buffers */
1099 	if (ixgbe_setup_receive_structures(adapter)) {
1100 		device_printf(dev, "Could not setup receive structures\n");
1101 		ixgbe_stop(adapter);
1102 		return;
1103 	}
1104 
1105 	/* Configure RX settings */
1106 	ixgbe_initialize_receive_units(adapter);
1107 
1108 	/* Enable SDP & MSIX interrupts based on adapter */
1109 	ixgbe_config_gpie(adapter);
1110 
1111 	/* Set MTU size */
1112 	if (ifp->if_mtu > ETHERMTU) {
1113 		/* aka IXGBE_MAXFRS on 82599 and newer */
1114 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1115 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
1116 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1117 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1118 	}
1119 
1120 	/* Now enable all the queues */
1121 	for (int i = 0; i < adapter->num_queues; i++) {
1122 		txr = &adapter->tx_rings[i];
1123 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
1124 		txdctl |= IXGBE_TXDCTL_ENABLE;
1125 		/* Set WTHRESH to 8, burst writeback */
1126 		txdctl |= (8 << 16);
1127 		/*
1128 		 * When the internal queue falls below PTHRESH (32),
1129 		 * start prefetching as long as there are at least
1130 		 * HTHRESH (1) buffers ready. The values are taken
1131 		 * from the Intel linux driver 3.8.21.
1132 		 * Prefetching enables tx line rate even with 1 queue.
1133 		 */
1134 		txdctl |= (32 << 0) | (1 << 8);
1135 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
1136 	}
1137 
1138 	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
1139 		rxr = &adapter->rx_rings[i];
1140 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1141 		if (hw->mac.type == ixgbe_mac_82598EB) {
1142 			/*
1143 			** PTHRESH = 21
1144 			** HTHRESH = 4
1145 			** WTHRESH = 8
1146 			*/
1147 			rxdctl &= ~0x3FFFFF;
1148 			rxdctl |= 0x080420;
1149 		}
1150 		rxdctl |= IXGBE_RXDCTL_ENABLE;
1151 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
1152 		for (; j < 10; j++) {
1153 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
1154 			    IXGBE_RXDCTL_ENABLE)
1155 				break;
1156 			else
1157 				msec_delay(1);
1158 		}
1159 		wmb();
1160 #ifdef DEV_NETMAP
1161 		/*
1162 		 * In netmap mode, we must preserve the buffers made
1163 		 * available to userspace before the if_init()
1164 		 * (this is true by default on the TX side, because
1165 		 * init makes all buffers available to userspace).
1166 		 *
1167 		 * netmap_reset() and the device specific routines
1168 		 * (e.g. ixgbe_setup_receive_rings()) map these
1169 		 * buffers at the end of the NIC ring, so here we
1170 		 * must set the RDT (tail) register to make sure
1171 		 * they are not overwritten.
1172 		 *
1173 		 * In this driver the NIC ring starts at RDH = 0,
1174 		 * RDT points to the last slot available for reception (?),
1175 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
1176 		 */
1177 		if (ifp->if_capenable & IFCAP_NETMAP) {
1178 			struct netmap_adapter *na = NA(adapter->ifp);
1179 			struct netmap_kring *kring = &na->rx_rings[i];
1180 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1181 
1182 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
1183 		} else
1184 #endif /* DEV_NETMAP */
1185 		IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), adapter->num_rx_desc - 1);
1186 	}
1187 
1188 	/* Enable Receive engine */
1189 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1190 	if (hw->mac.type == ixgbe_mac_82598EB)
1191 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
1192 	rxctrl |= IXGBE_RXCTRL_RXEN;
1193 	ixgbe_enable_rx_dma(hw, rxctrl);
1194 
1195 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1196 
1197 	/* Set up MSI/X routing */
1198 	if (ixgbe_enable_msix)  {
1199 		ixgbe_configure_ivars(adapter);
1200 		/* Set up auto-mask */
1201 		if (hw->mac.type == ixgbe_mac_82598EB)
1202 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1203 		else {
1204 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1205 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1206 		}
1207 	} else {  /* Simple settings for Legacy/MSI */
1208                 ixgbe_set_ivar(adapter, 0, 0, 0);
1209                 ixgbe_set_ivar(adapter, 0, 0, 1);
1210 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1211 	}
1212 
1213 #ifdef IXGBE_FDIR
1214 	/* Init Flow director */
1215 	if (hw->mac.type != ixgbe_mac_82598EB) {
1216 		u32 hdrm = 32 << fdir_pballoc;
1217 
1218 		hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1219 		ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1220 	}
1221 #endif
1222 
1223 	/*
1224 	 * Check on any SFP devices that
1225 	 * need to be kick-started
1226 	 */
1227 	if (hw->phy.type == ixgbe_phy_none) {
1228 		err = hw->phy.ops.identify(hw);
1229 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1230                 	device_printf(dev,
1231 			    "Unsupported SFP+ module type was detected.\n");
1232 			return;
1233         	}
1234 	}
1235 
1236 	/* Set moderation on the Link interrupt */
1237 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1238 
1239 	/* Configure Energy Efficient Ethernet for supported devices */
1240 	if (hw->mac.ops.setup_eee) {
1241 		err = hw->mac.ops.setup_eee(hw, adapter->eee_enabled);
1242 		if (err)
1243 			device_printf(dev, "Error setting up EEE: %d\n", err);
1244 	}
1245 
1246 	/* Config/Enable Link */
1247 	ixgbe_config_link(adapter);
1248 
1249 	/* Hardware Packet Buffer & Flow Control setup */
1250 	ixgbe_config_delay_values(adapter);
1251 
1252 	/* Initialize the FC settings */
1253 	ixgbe_start_hw(hw);
1254 
1255 	/* Set up VLAN support and filter */
1256 	ixgbe_setup_vlan_hw_support(adapter);
1257 
1258 	/* Setup DMA Coalescing */
1259 	ixgbe_config_dmac(adapter);
1260 
1261 	/* And now turn on interrupts */
1262 	ixgbe_enable_intr(adapter);
1263 
1264 #ifdef PCI_IOV
1265 	/* Enable the use of the MBX by the VF's */
1266 	{
1267 		u32 reg = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1268 		reg |= IXGBE_CTRL_EXT_PFRSTD;
1269 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, reg);
1270 	}
1271 #endif
1272 
1273 	/* Now inform the stack we're ready */
1274 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1275 
1276 	return;
1277 }
1278 
1279 static void
1280 ixgbe_init(void *arg)
1281 {
1282 	struct adapter *adapter = arg;
1283 
1284 	IXGBE_CORE_LOCK(adapter);
1285 	ixgbe_init_locked(adapter);
1286 	IXGBE_CORE_UNLOCK(adapter);
1287 	return;
1288 }
1289 
1290 static void
1291 ixgbe_config_gpie(struct adapter *adapter)
1292 {
1293 	struct ixgbe_hw *hw = &adapter->hw;
1294 	u32 gpie;
1295 
1296 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
1297 
1298 	/* Fan Failure Interrupt */
1299 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
1300 		gpie |= IXGBE_SDP1_GPIEN;
1301 
1302 	/*
1303 	 * Module detection (SDP2)
1304 	 * Media ready (SDP1)
1305 	 */
1306 	if (hw->mac.type == ixgbe_mac_82599EB) {
1307 		gpie |= IXGBE_SDP2_GPIEN;
1308 		if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
1309 			gpie |= IXGBE_SDP1_GPIEN;
1310 	}
1311 
1312 	/*
1313 	 * Thermal Failure Detection (X540)
1314 	 * Link Detection (X552 SFP+, X552/X557-AT)
1315 	 */
1316 	if (hw->mac.type == ixgbe_mac_X540 ||
1317 	    hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1318 	    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1319 		gpie |= IXGBE_SDP0_GPIEN_X540;
1320 
1321 	if (adapter->msix > 1) {
1322 		/* Enable Enhanced MSIX mode */
1323 		gpie |= IXGBE_GPIE_MSIX_MODE;
1324 		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1325 		    IXGBE_GPIE_OCD;
1326 	}
1327 
1328 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1329 	return;
1330 }
1331 
1332 /*
1333  * Requires adapter->max_frame_size to be set.
1334  */
1335 static void
1336 ixgbe_config_delay_values(struct adapter *adapter)
1337 {
1338 	struct ixgbe_hw *hw = &adapter->hw;
1339 	u32 rxpb, frame, size, tmp;
1340 
1341 	frame = adapter->max_frame_size;
1342 
1343 	/* Calculate High Water */
1344 	switch (hw->mac.type) {
1345 	case ixgbe_mac_X540:
1346 	case ixgbe_mac_X550:
1347 	case ixgbe_mac_X550EM_x:
1348 		tmp = IXGBE_DV_X540(frame, frame);
1349 		break;
1350 	default:
1351 		tmp = IXGBE_DV(frame, frame);
1352 		break;
1353 	}
1354 	size = IXGBE_BT2KB(tmp);
1355 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1356 	hw->fc.high_water[0] = rxpb - size;
1357 
1358 	/* Now calculate Low Water */
1359 	switch (hw->mac.type) {
1360 	case ixgbe_mac_X540:
1361 	case ixgbe_mac_X550:
1362 	case ixgbe_mac_X550EM_x:
1363 		tmp = IXGBE_LOW_DV_X540(frame);
1364 		break;
1365 	default:
1366 		tmp = IXGBE_LOW_DV(frame);
1367 		break;
1368 	}
1369 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1370 
1371 	hw->fc.requested_mode = adapter->fc;
1372 	hw->fc.pause_time = IXGBE_FC_PAUSE;
1373 	hw->fc.send_xon = TRUE;
1374 }
1375 
1376 /*
1377 **
1378 ** MSIX Interrupt Handlers and Tasklets
1379 **
1380 */
1381 
1382 static inline void
1383 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1384 {
1385 	struct ixgbe_hw *hw = &adapter->hw;
1386 	u64	queue = (u64)(1 << vector);
1387 	u32	mask;
1388 
1389 	if (hw->mac.type == ixgbe_mac_82598EB) {
1390                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1391                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1392 	} else {
1393                 mask = (queue & 0xFFFFFFFF);
1394                 if (mask)
1395                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1396                 mask = (queue >> 32);
1397                 if (mask)
1398                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1399 	}
1400 }
1401 
1402 static inline void
1403 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1404 {
1405 	struct ixgbe_hw *hw = &adapter->hw;
1406 	u64	queue = (u64)(1 << vector);
1407 	u32	mask;
1408 
1409 	if (hw->mac.type == ixgbe_mac_82598EB) {
1410                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1411                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1412 	} else {
1413                 mask = (queue & 0xFFFFFFFF);
1414                 if (mask)
1415                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1416                 mask = (queue >> 32);
1417                 if (mask)
1418                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1419 	}
1420 }
1421 
1422 static void
1423 ixgbe_handle_que(void *context, int pending)
1424 {
1425 	struct ix_queue *que = context;
1426 	struct adapter  *adapter = que->adapter;
1427 	struct tx_ring  *txr = que->txr;
1428 	struct ifnet    *ifp = adapter->ifp;
1429 
1430 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1431 		ixgbe_rxeof(que);
1432 		IXGBE_TX_LOCK(txr);
1433 		ixgbe_txeof(txr);
1434 #ifndef IXGBE_LEGACY_TX
1435 		if (!drbr_empty(ifp, txr->br))
1436 			ixgbe_mq_start_locked(ifp, txr);
1437 #else
1438 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1439 			ixgbe_start_locked(txr, ifp);
1440 #endif
1441 		IXGBE_TX_UNLOCK(txr);
1442 	}
1443 
1444 	/* Reenable this interrupt */
1445 	if (que->res != NULL)
1446 		ixgbe_enable_queue(adapter, que->msix);
1447 	else
1448 		ixgbe_enable_intr(adapter);
1449 	return;
1450 }
1451 
1452 
1453 /*********************************************************************
1454  *
1455  *  Legacy Interrupt Service routine
1456  *
1457  **********************************************************************/
1458 
1459 static void
1460 ixgbe_legacy_irq(void *arg)
1461 {
1462 	struct ix_queue *que = arg;
1463 	struct adapter	*adapter = que->adapter;
1464 	struct ixgbe_hw	*hw = &adapter->hw;
1465 	struct ifnet    *ifp = adapter->ifp;
1466 	struct 		tx_ring *txr = adapter->tx_rings;
1467 	bool		more;
1468 	u32       	reg_eicr;
1469 
1470 
1471 	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1472 
1473 	++que->irqs;
1474 	if (reg_eicr == 0) {
1475 		ixgbe_enable_intr(adapter);
1476 		return;
1477 	}
1478 
1479 	more = ixgbe_rxeof(que);
1480 
1481 	IXGBE_TX_LOCK(txr);
1482 	ixgbe_txeof(txr);
1483 #ifdef IXGBE_LEGACY_TX
1484 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1485 		ixgbe_start_locked(txr, ifp);
1486 #else
1487 	if (!drbr_empty(ifp, txr->br))
1488 		ixgbe_mq_start_locked(ifp, txr);
1489 #endif
1490 	IXGBE_TX_UNLOCK(txr);
1491 
1492 	/* Check for fan failure */
1493 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1494 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1495                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1496 		    "REPLACE IMMEDIATELY!!\n");
1497 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1498 	}
1499 
1500 	/* Link status change */
1501 	if (reg_eicr & IXGBE_EICR_LSC)
1502 		taskqueue_enqueue(adapter->tq, &adapter->link_task);
1503 
1504 	/* External PHY interrupt */
1505 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1506 	    (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1507 		taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1508 
1509 	if (more)
1510 		taskqueue_enqueue(que->tq, &que->que_task);
1511 	else
1512 		ixgbe_enable_intr(adapter);
1513 	return;
1514 }
1515 
1516 
1517 /*********************************************************************
1518  *
1519  *  MSIX Queue Interrupt Service routine
1520  *
1521  **********************************************************************/
1522 void
1523 ixgbe_msix_que(void *arg)
1524 {
1525 	struct ix_queue	*que = arg;
1526 	struct adapter  *adapter = que->adapter;
1527 	struct ifnet    *ifp = adapter->ifp;
1528 	struct tx_ring	*txr = que->txr;
1529 	struct rx_ring	*rxr = que->rxr;
1530 	bool		more;
1531 	u32		newitr = 0;
1532 
1533 
1534 	/* Protect against spurious interrupts */
1535 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1536 		return;
1537 
1538 	ixgbe_disable_queue(adapter, que->msix);
1539 	++que->irqs;
1540 
1541 	more = ixgbe_rxeof(que);
1542 
1543 	IXGBE_TX_LOCK(txr);
1544 	ixgbe_txeof(txr);
1545 #ifdef IXGBE_LEGACY_TX
1546 	if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1547 		ixgbe_start_locked(txr, ifp);
1548 #else
1549 	if (!drbr_empty(ifp, txr->br))
1550 		ixgbe_mq_start_locked(ifp, txr);
1551 #endif
1552 	IXGBE_TX_UNLOCK(txr);
1553 
1554 	/* Do AIM now? */
1555 
1556 	if (ixgbe_enable_aim == FALSE)
1557 		goto no_calc;
1558 	/*
1559 	** Do Adaptive Interrupt Moderation:
1560         **  - Write out last calculated setting
1561 	**  - Calculate based on average size over
1562 	**    the last interval.
1563 	*/
1564         if (que->eitr_setting)
1565                 IXGBE_WRITE_REG(&adapter->hw,
1566                     IXGBE_EITR(que->msix), que->eitr_setting);
1567 
1568         que->eitr_setting = 0;
1569 
1570         /* Idle, do nothing */
1571         if ((txr->bytes == 0) && (rxr->bytes == 0))
1572                 goto no_calc;
1573 
1574 	if ((txr->bytes) && (txr->packets))
1575                	newitr = txr->bytes/txr->packets;
1576 	if ((rxr->bytes) && (rxr->packets))
1577 		newitr = max(newitr,
1578 		    (rxr->bytes / rxr->packets));
1579 	newitr += 24; /* account for hardware frame, crc */
1580 
1581 	/* set an upper boundary */
1582 	newitr = min(newitr, 3000);
1583 
1584 	/* Be nice to the mid range */
1585 	if ((newitr > 300) && (newitr < 1200))
1586 		newitr = (newitr / 3);
1587 	else
1588 		newitr = (newitr / 2);
1589 
1590         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1591                 newitr |= newitr << 16;
1592         else
1593                 newitr |= IXGBE_EITR_CNT_WDIS;
1594 
1595         /* save for next interrupt */
1596         que->eitr_setting = newitr;
1597 
1598         /* Reset state */
1599         txr->bytes = 0;
1600         txr->packets = 0;
1601         rxr->bytes = 0;
1602         rxr->packets = 0;
1603 
1604 no_calc:
1605 	if (more)
1606 		taskqueue_enqueue(que->tq, &que->que_task);
1607 	else
1608 		ixgbe_enable_queue(adapter, que->msix);
1609 	return;
1610 }
1611 
1612 
1613 static void
1614 ixgbe_msix_link(void *arg)
1615 {
1616 	struct adapter	*adapter = arg;
1617 	struct ixgbe_hw *hw = &adapter->hw;
1618 	u32		reg_eicr, mod_mask;
1619 
1620 	++adapter->link_irq;
1621 
1622 	/* Pause other interrupts */
1623 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
1624 
1625 	/* First get the cause */
1626 	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1627 	/* Be sure the queue bits are not cleared */
1628 	reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1629 	/* Clear interrupt with write */
1630 	IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1631 
1632 	/* Link status change */
1633 	if (reg_eicr & IXGBE_EICR_LSC) {
1634 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1635 		taskqueue_enqueue(adapter->tq, &adapter->link_task);
1636 	}
1637 
1638 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1639 #ifdef IXGBE_FDIR
1640 		if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1641 			/* This is probably overkill :) */
1642 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1643 				return;
1644                 	/* Disable the interrupt */
1645 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1646 			taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1647 		} else
1648 #endif
1649 		if (reg_eicr & IXGBE_EICR_ECC) {
1650 			device_printf(adapter->dev, "CRITICAL: ECC ERROR!! "
1651 			    "Please Reboot!!\n");
1652 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1653 		}
1654 
1655 		/* Check for over temp condition */
1656 		if (reg_eicr & IXGBE_EICR_TS) {
1657 			device_printf(adapter->dev, "CRITICAL: OVER TEMP!! "
1658 			    "PHY IS SHUT DOWN!!\n");
1659 			device_printf(adapter->dev, "System shutdown required!\n");
1660 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1661 		}
1662 #ifdef PCI_IOV
1663 		if (reg_eicr & IXGBE_EICR_MAILBOX)
1664 			taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1665 #endif
1666 	}
1667 
1668 	/* Pluggable optics-related interrupt */
1669 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1670 		mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1671 	else
1672 		mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1673 
1674 	if (ixgbe_is_sfp(hw)) {
1675 		if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1676 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1677 			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1678 		} else if (reg_eicr & mod_mask) {
1679 			IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1680 			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1681 		}
1682 	}
1683 
1684 	/* Check for fan failure */
1685 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1686 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1687 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1688                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1689 		    "REPLACE IMMEDIATELY!!\n");
1690 	}
1691 
1692 	/* External PHY interrupt */
1693 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1694 	    (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1695 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1696 		taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1697 	}
1698 
1699 	/* Re-enable other interrupts */
1700 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1701 	return;
1702 }
1703 
1704 /*********************************************************************
1705  *
1706  *  Media Ioctl callback
1707  *
1708  *  This routine is called whenever the user queries the status of
1709  *  the interface using ifconfig.
1710  *
1711  **********************************************************************/
1712 static void
1713 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1714 {
1715 	struct adapter *adapter = ifp->if_softc;
1716 	struct ixgbe_hw *hw = &adapter->hw;
1717 	int layer;
1718 
1719 	INIT_DEBUGOUT("ixgbe_media_status: begin");
1720 	IXGBE_CORE_LOCK(adapter);
1721 	ixgbe_update_link_status(adapter);
1722 
1723 	ifmr->ifm_status = IFM_AVALID;
1724 	ifmr->ifm_active = IFM_ETHER;
1725 
1726 	if (!adapter->link_active) {
1727 		IXGBE_CORE_UNLOCK(adapter);
1728 		return;
1729 	}
1730 
1731 	ifmr->ifm_status |= IFM_ACTIVE;
1732 	layer = adapter->phy_layer;
1733 
1734 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1735 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1736 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1737 		switch (adapter->link_speed) {
1738 		case IXGBE_LINK_SPEED_10GB_FULL:
1739 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1740 			break;
1741 		case IXGBE_LINK_SPEED_1GB_FULL:
1742 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1743 			break;
1744 		case IXGBE_LINK_SPEED_100_FULL:
1745 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1746 			break;
1747 		}
1748 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1749 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1750 		switch (adapter->link_speed) {
1751 		case IXGBE_LINK_SPEED_10GB_FULL:
1752 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1753 			break;
1754 		}
1755 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1756 		switch (adapter->link_speed) {
1757 		case IXGBE_LINK_SPEED_10GB_FULL:
1758 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1759 			break;
1760 		case IXGBE_LINK_SPEED_1GB_FULL:
1761 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1762 			break;
1763 		}
1764 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1765 		switch (adapter->link_speed) {
1766 		case IXGBE_LINK_SPEED_10GB_FULL:
1767 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1768 			break;
1769 		case IXGBE_LINK_SPEED_1GB_FULL:
1770 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1771 			break;
1772 		}
1773 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1774 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1775 		switch (adapter->link_speed) {
1776 		case IXGBE_LINK_SPEED_10GB_FULL:
1777 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1778 			break;
1779 		case IXGBE_LINK_SPEED_1GB_FULL:
1780 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1781 			break;
1782 		}
1783 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1784 		switch (adapter->link_speed) {
1785 		case IXGBE_LINK_SPEED_10GB_FULL:
1786 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1787 			break;
1788 		}
1789 	/*
1790 	** XXX: These need to use the proper media types once
1791 	** they're added.
1792 	*/
1793 #ifndef IFM_ETH_XTYPE
1794 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1795 		switch (adapter->link_speed) {
1796 		case IXGBE_LINK_SPEED_10GB_FULL:
1797 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1798 			break;
1799 		case IXGBE_LINK_SPEED_2_5GB_FULL:
1800 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1801 			break;
1802 		case IXGBE_LINK_SPEED_1GB_FULL:
1803 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1804 			break;
1805 		}
1806 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1807 	    || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1808 		switch (adapter->link_speed) {
1809 		case IXGBE_LINK_SPEED_10GB_FULL:
1810 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1811 			break;
1812 		case IXGBE_LINK_SPEED_2_5GB_FULL:
1813 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1814 			break;
1815 		case IXGBE_LINK_SPEED_1GB_FULL:
1816 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1817 			break;
1818 		}
1819 #else
1820 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1821 		switch (adapter->link_speed) {
1822 		case IXGBE_LINK_SPEED_10GB_FULL:
1823 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
1824 			break;
1825 		case IXGBE_LINK_SPEED_2_5GB_FULL:
1826 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1827 			break;
1828 		case IXGBE_LINK_SPEED_1GB_FULL:
1829 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1830 			break;
1831 		}
1832 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1833 	    || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1834 		switch (adapter->link_speed) {
1835 		case IXGBE_LINK_SPEED_10GB_FULL:
1836 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
1837 			break;
1838 		case IXGBE_LINK_SPEED_2_5GB_FULL:
1839 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1840 			break;
1841 		case IXGBE_LINK_SPEED_1GB_FULL:
1842 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1843 			break;
1844 		}
1845 #endif
1846 
1847 	/* If nothing is recognized... */
1848 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1849 		ifmr->ifm_active |= IFM_UNKNOWN;
1850 
1851 #if __FreeBSD_version >= 900025
1852 	/* Display current flow control setting used on link */
1853 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1854 	    hw->fc.current_mode == ixgbe_fc_full)
1855 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1856 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1857 	    hw->fc.current_mode == ixgbe_fc_full)
1858 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1859 #endif
1860 
1861 	IXGBE_CORE_UNLOCK(adapter);
1862 
1863 	return;
1864 }
1865 
1866 /*********************************************************************
1867  *
1868  *  Media Ioctl callback
1869  *
1870  *  This routine is called when the user changes speed/duplex using
1871  *  media/mediopt option with ifconfig.
1872  *
1873  **********************************************************************/
1874 static int
1875 ixgbe_media_change(struct ifnet * ifp)
1876 {
1877 	struct adapter *adapter = ifp->if_softc;
1878 	struct ifmedia *ifm = &adapter->media;
1879 	struct ixgbe_hw *hw = &adapter->hw;
1880 	ixgbe_link_speed speed = 0;
1881 
1882 	INIT_DEBUGOUT("ixgbe_media_change: begin");
1883 
1884 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1885 		return (EINVAL);
1886 
1887 	if (hw->phy.media_type == ixgbe_media_type_backplane)
1888 		return (ENODEV);
1889 
1890 	/*
1891 	** We don't actually need to check against the supported
1892 	** media types of the adapter; ifmedia will take care of
1893 	** that for us.
1894 	*/
1895 #ifndef IFM_ETH_XTYPE
1896 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1897 		case IFM_AUTO:
1898 		case IFM_10G_T:
1899 			speed |= IXGBE_LINK_SPEED_100_FULL;
1900 		case IFM_10G_LRM:
1901 		case IFM_10G_SR: /* KR, too */
1902 		case IFM_10G_LR:
1903 		case IFM_10G_CX4: /* KX4 */
1904 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1905 		case IFM_10G_TWINAX:
1906 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
1907 			break;
1908 		case IFM_1000_T:
1909 			speed |= IXGBE_LINK_SPEED_100_FULL;
1910 		case IFM_1000_LX:
1911 		case IFM_1000_SX:
1912 		case IFM_1000_CX: /* KX */
1913 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1914 			break;
1915 		case IFM_100_TX:
1916 			speed |= IXGBE_LINK_SPEED_100_FULL;
1917 			break;
1918 		default:
1919 			goto invalid;
1920 	}
1921 #else
1922 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1923 		case IFM_AUTO:
1924 		case IFM_10G_T:
1925 			speed |= IXGBE_LINK_SPEED_100_FULL;
1926 		case IFM_10G_LRM:
1927 		case IFM_10G_KR:
1928 		case IFM_10G_LR:
1929 		case IFM_10G_KX4:
1930 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1931 		case IFM_10G_TWINAX:
1932 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
1933 			break;
1934 		case IFM_1000_T:
1935 			speed |= IXGBE_LINK_SPEED_100_FULL;
1936 		case IFM_1000_LX:
1937 		case IFM_1000_SX:
1938 		case IFM_1000_KX:
1939 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1940 			break;
1941 		case IFM_100_TX:
1942 			speed |= IXGBE_LINK_SPEED_100_FULL;
1943 			break;
1944 		default:
1945 			goto invalid;
1946 	}
1947 #endif
1948 
1949 	hw->mac.autotry_restart = TRUE;
1950 	hw->mac.ops.setup_link(hw, speed, TRUE);
1951 	adapter->advertise =
1952 		((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1953 		((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1954 		((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1955 
1956 	return (0);
1957 
1958 invalid:
1959 	device_printf(adapter->dev, "Invalid media type!\n");
1960 	return (EINVAL);
1961 }
1962 
1963 static void
1964 ixgbe_set_promisc(struct adapter *adapter)
1965 {
1966 	u_int32_t       reg_rctl;
1967 	struct ifnet   *ifp = adapter->ifp;
1968 	int		mcnt = 0;
1969 
1970 	reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1971 	reg_rctl &= (~IXGBE_FCTRL_UPE);
1972 	if (ifp->if_flags & IFF_ALLMULTI)
1973 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1974 	else {
1975 		struct	ifmultiaddr *ifma;
1976 #if __FreeBSD_version < 800000
1977 		IF_ADDR_LOCK(ifp);
1978 #else
1979 		if_maddr_rlock(ifp);
1980 #endif
1981 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1982 			if (ifma->ifma_addr->sa_family != AF_LINK)
1983 				continue;
1984 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1985 				break;
1986 			mcnt++;
1987 		}
1988 #if __FreeBSD_version < 800000
1989 		IF_ADDR_UNLOCK(ifp);
1990 #else
1991 		if_maddr_runlock(ifp);
1992 #endif
1993 	}
1994 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1995 		reg_rctl &= (~IXGBE_FCTRL_MPE);
1996 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1997 
1998 	if (ifp->if_flags & IFF_PROMISC) {
1999 		reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2000 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2001 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2002 		reg_rctl |= IXGBE_FCTRL_MPE;
2003 		reg_rctl &= ~IXGBE_FCTRL_UPE;
2004 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2005 	}
2006 	return;
2007 }
2008 
2009 
2010 /*********************************************************************
2011  *  Multicast Update
2012  *
2013  *  This routine is called whenever multicast address list is updated.
2014  *
2015  **********************************************************************/
2016 #define IXGBE_RAR_ENTRIES 16
2017 
2018 static void
2019 ixgbe_set_multi(struct adapter *adapter)
2020 {
2021 	u32			fctrl;
2022 	u8			*update_ptr;
2023 	struct ifmultiaddr	*ifma;
2024 	struct ixgbe_mc_addr	*mta;
2025 	int			mcnt = 0;
2026 	struct ifnet		*ifp = adapter->ifp;
2027 
2028 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
2029 
2030 	mta = adapter->mta;
2031 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
2032 
2033 #if __FreeBSD_version < 800000
2034 	IF_ADDR_LOCK(ifp);
2035 #else
2036 	if_maddr_rlock(ifp);
2037 #endif
2038 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2039 		if (ifma->ifma_addr->sa_family != AF_LINK)
2040 			continue;
2041 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2042 			break;
2043 		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2044 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2045 		mta[mcnt].vmdq = adapter->pool;
2046 		mcnt++;
2047 	}
2048 #if __FreeBSD_version < 800000
2049 	IF_ADDR_UNLOCK(ifp);
2050 #else
2051 	if_maddr_runlock(ifp);
2052 #endif
2053 
2054 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2055 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2056 	if (ifp->if_flags & IFF_PROMISC)
2057 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2058 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
2059 	    ifp->if_flags & IFF_ALLMULTI) {
2060 		fctrl |= IXGBE_FCTRL_MPE;
2061 		fctrl &= ~IXGBE_FCTRL_UPE;
2062 	} else
2063 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2064 
2065 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2066 
2067 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
2068 		update_ptr = (u8 *)mta;
2069 		ixgbe_update_mc_addr_list(&adapter->hw,
2070 		    update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
2071 	}
2072 
2073 	return;
2074 }
2075 
2076 /*
2077  * This is an iterator function now needed by the multicast
2078  * shared code. It simply feeds the shared code routine the
2079  * addresses in the array of ixgbe_set_multi() one by one.
2080  */
2081 static u8 *
2082 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
2083 {
2084 	struct ixgbe_mc_addr *mta;
2085 
2086 	mta = (struct ixgbe_mc_addr *)*update_ptr;
2087 	*vmdq = mta->vmdq;
2088 
2089 	*update_ptr = (u8*)(mta + 1);;
2090 	return (mta->addr);
2091 }
2092 
2093 
2094 /*********************************************************************
2095  *  Timer routine
2096  *
2097  *  This routine checks for link status,updates statistics,
2098  *  and runs the watchdog check.
2099  *
2100  **********************************************************************/
2101 
2102 static void
2103 ixgbe_local_timer(void *arg)
2104 {
2105 	struct adapter	*adapter = arg;
2106 	device_t	dev = adapter->dev;
2107 	struct ix_queue *que = adapter->queues;
2108 	u64		queues = 0;
2109 	int		hung = 0;
2110 
2111 	mtx_assert(&adapter->core_mtx, MA_OWNED);
2112 
2113 	/* Check for pluggable optics */
2114 	if (adapter->sfp_probe)
2115 		if (!ixgbe_sfp_probe(adapter))
2116 			goto out; /* Nothing to do */
2117 
2118 	ixgbe_update_link_status(adapter);
2119 	ixgbe_update_stats_counters(adapter);
2120 
2121 	/*
2122 	** Check the TX queues status
2123 	**	- mark hung queues so we don't schedule on them
2124 	**      - watchdog only if all queues show hung
2125 	*/
2126 	for (int i = 0; i < adapter->num_queues; i++, que++) {
2127 		/* Keep track of queues with work for soft irq */
2128 		if (que->txr->busy)
2129 			queues |= ((u64)1 << que->me);
2130 		/*
2131 		** Each time txeof runs without cleaning, but there
2132 		** are uncleaned descriptors it increments busy. If
2133 		** we get to the MAX we declare it hung.
2134 		*/
2135 		if (que->busy == IXGBE_QUEUE_HUNG) {
2136 			++hung;
2137 			/* Mark the queue as inactive */
2138 			adapter->active_queues &= ~((u64)1 << que->me);
2139 			continue;
2140 		} else {
2141 			/* Check if we've come back from hung */
2142 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
2143                                 adapter->active_queues |= ((u64)1 << que->me);
2144 		}
2145 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
2146 			device_printf(dev,"Warning queue %d "
2147 			    "appears to be hung!\n", i);
2148 			que->txr->busy = IXGBE_QUEUE_HUNG;
2149 			++hung;
2150 		}
2151 
2152 	}
2153 
2154 	/* Only truly watchdog if all queues show hung */
2155 	if (hung == adapter->num_queues)
2156 		goto watchdog;
2157 	else if (queues != 0) { /* Force an IRQ on queues with work */
2158 		ixgbe_rearm_queues(adapter, queues);
2159 	}
2160 
2161 out:
2162 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
2163 	return;
2164 
2165 watchdog:
2166 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2167 	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2168 	adapter->watchdog_events++;
2169 	ixgbe_init_locked(adapter);
2170 }
2171 
2172 
2173 /*
2174 ** Note: this routine updates the OS on the link state
2175 **	the real check of the hardware only happens with
2176 **	a link interrupt.
2177 */
2178 static void
2179 ixgbe_update_link_status(struct adapter *adapter)
2180 {
2181 	struct ifnet	*ifp = adapter->ifp;
2182 	device_t dev = adapter->dev;
2183 
2184 	if (adapter->link_up){
2185 		if (adapter->link_active == FALSE) {
2186 			if (bootverbose)
2187 				device_printf(dev,"Link is up %d Gbps %s \n",
2188 				    ((adapter->link_speed == 128)? 10:1),
2189 				    "Full Duplex");
2190 			adapter->link_active = TRUE;
2191 			/* Update any Flow Control changes */
2192 			ixgbe_fc_enable(&adapter->hw);
2193 			/* Update DMA coalescing config */
2194 			ixgbe_config_dmac(adapter);
2195 			if_link_state_change(ifp, LINK_STATE_UP);
2196 #ifdef PCI_IOV
2197 			ixgbe_ping_all_vfs(adapter);
2198 #endif
2199 		}
2200 	} else { /* Link down */
2201 		if (adapter->link_active == TRUE) {
2202 			if (bootverbose)
2203 				device_printf(dev,"Link is Down\n");
2204 			if_link_state_change(ifp, LINK_STATE_DOWN);
2205 			adapter->link_active = FALSE;
2206 #ifdef PCI_IOV
2207 			ixgbe_ping_all_vfs(adapter);
2208 #endif
2209 		}
2210 	}
2211 
2212 	return;
2213 }
2214 
2215 
2216 /*********************************************************************
2217  *
2218  *  This routine disables all traffic on the adapter by issuing a
2219  *  global reset on the MAC and deallocates TX/RX buffers.
2220  *
2221  **********************************************************************/
2222 
2223 static void
2224 ixgbe_stop(void *arg)
2225 {
2226 	struct ifnet   *ifp;
2227 	struct adapter *adapter = arg;
2228 	struct ixgbe_hw *hw = &adapter->hw;
2229 	ifp = adapter->ifp;
2230 
2231 	mtx_assert(&adapter->core_mtx, MA_OWNED);
2232 
2233 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
2234 	ixgbe_disable_intr(adapter);
2235 	callout_stop(&adapter->timer);
2236 
2237 	/* Let the stack know...*/
2238 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2239 
2240 	ixgbe_reset_hw(hw);
2241 	hw->adapter_stopped = FALSE;
2242 	ixgbe_stop_adapter(hw);
2243 	if (hw->mac.type == ixgbe_mac_82599EB)
2244 		ixgbe_stop_mac_link_on_d3_82599(hw);
2245 	/* Turn off the laser - noop with no optics */
2246 	ixgbe_disable_tx_laser(hw);
2247 
2248 	/* Update the stack */
2249 	adapter->link_up = FALSE;
2250        	ixgbe_update_link_status(adapter);
2251 
2252 	/* reprogram the RAR[0] in case user changed it. */
2253 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2254 
2255 	return;
2256 }
2257 
2258 
2259 /*********************************************************************
2260  *
2261  *  Determine hardware revision.
2262  *
2263  **********************************************************************/
2264 static void
2265 ixgbe_identify_hardware(struct adapter *adapter)
2266 {
2267 	device_t        dev = adapter->dev;
2268 	struct ixgbe_hw *hw = &adapter->hw;
2269 
2270 	/* Save off the information about this board */
2271 	hw->vendor_id = pci_get_vendor(dev);
2272 	hw->device_id = pci_get_device(dev);
2273 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2274 	hw->subsystem_vendor_id =
2275 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
2276 	hw->subsystem_device_id =
2277 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
2278 
2279 	/*
2280 	** Make sure BUSMASTER is set
2281 	*/
2282 	pci_enable_busmaster(dev);
2283 
2284 	/* We need this here to set the num_segs below */
2285 	ixgbe_set_mac_type(hw);
2286 
2287 	/* Pick up the 82599 settings */
2288 	if (hw->mac.type != ixgbe_mac_82598EB) {
2289 		hw->phy.smart_speed = ixgbe_smart_speed;
2290 		adapter->num_segs = IXGBE_82599_SCATTER;
2291 	} else
2292 		adapter->num_segs = IXGBE_82598_SCATTER;
2293 
2294 	return;
2295 }
2296 
2297 /*********************************************************************
2298  *
2299  *  Determine optic type
2300  *
2301  **********************************************************************/
2302 static void
2303 ixgbe_setup_optics(struct adapter *adapter)
2304 {
2305 	struct ixgbe_hw *hw = &adapter->hw;
2306 	int		layer;
2307 
2308 	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
2309 
2310 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2311 		adapter->optics = IFM_10G_T;
2312 		return;
2313 	}
2314 
2315 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2316 		adapter->optics = IFM_1000_T;
2317 		return;
2318 	}
2319 
2320 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2321 		adapter->optics = IFM_1000_SX;
2322 		return;
2323 	}
2324 
2325 	if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2326 	    IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2327 		adapter->optics = IFM_10G_LR;
2328 		return;
2329 	}
2330 
2331 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2332 		adapter->optics = IFM_10G_SR;
2333 		return;
2334 	}
2335 
2336 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2337 		adapter->optics = IFM_10G_TWINAX;
2338 		return;
2339 	}
2340 
2341 	if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2342 	    IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2343 		adapter->optics = IFM_10G_CX4;
2344 		return;
2345 	}
2346 
2347 	/* If we get here just set the default */
2348 	adapter->optics = IFM_ETHER | IFM_AUTO;
2349 	return;
2350 }
2351 
2352 /*********************************************************************
2353  *
2354  *  Setup the Legacy or MSI Interrupt handler
2355  *
2356  **********************************************************************/
2357 static int
2358 ixgbe_allocate_legacy(struct adapter *adapter)
2359 {
2360 	device_t	dev = adapter->dev;
2361 	struct		ix_queue *que = adapter->queues;
2362 #ifndef IXGBE_LEGACY_TX
2363 	struct tx_ring		*txr = adapter->tx_rings;
2364 #endif
2365 	int		error, rid = 0;
2366 
2367 	/* MSI RID at 1 */
2368 	if (adapter->msix == 1)
2369 		rid = 1;
2370 
2371 	/* We allocate a single interrupt resource */
2372 	adapter->res = bus_alloc_resource_any(dev,
2373             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2374 	if (adapter->res == NULL) {
2375 		device_printf(dev, "Unable to allocate bus resource: "
2376 		    "interrupt\n");
2377 		return (ENXIO);
2378 	}
2379 
2380 	/*
2381 	 * Try allocating a fast interrupt and the associated deferred
2382 	 * processing contexts.
2383 	 */
2384 #ifndef IXGBE_LEGACY_TX
2385 	TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2386 #endif
2387 	TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2388 	que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2389             taskqueue_thread_enqueue, &que->tq);
2390 	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2391             device_get_nameunit(adapter->dev));
2392 
2393 	/* Tasklets for Link, SFP and Multispeed Fiber */
2394 	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2395 	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2396 	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2397 	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2398 #ifdef IXGBE_FDIR
2399 	TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2400 #endif
2401 	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2402 	    taskqueue_thread_enqueue, &adapter->tq);
2403 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2404 	    device_get_nameunit(adapter->dev));
2405 
2406 	if ((error = bus_setup_intr(dev, adapter->res,
2407             INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2408             que, &adapter->tag)) != 0) {
2409 		device_printf(dev, "Failed to register fast interrupt "
2410 		    "handler: %d\n", error);
2411 		taskqueue_free(que->tq);
2412 		taskqueue_free(adapter->tq);
2413 		que->tq = NULL;
2414 		adapter->tq = NULL;
2415 		return (error);
2416 	}
2417 	/* For simplicity in the handlers */
2418 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2419 
2420 	return (0);
2421 }
2422 
2423 
2424 /*********************************************************************
2425  *
2426  *  Setup MSIX Interrupt resources and handlers
2427  *
2428  **********************************************************************/
2429 static int
2430 ixgbe_allocate_msix(struct adapter *adapter)
2431 {
2432 	device_t        dev = adapter->dev;
2433 	struct 		ix_queue *que = adapter->queues;
2434 	struct  	tx_ring *txr = adapter->tx_rings;
2435 	int 		error, rid, vector = 0;
2436 	int		cpu_id = 0;
2437 #ifdef	RSS
2438 	cpuset_t	cpu_mask;
2439 #endif
2440 
2441 #ifdef	RSS
2442 	/*
2443 	 * If we're doing RSS, the number of queues needs to
2444 	 * match the number of RSS buckets that are configured.
2445 	 *
2446 	 * + If there's more queues than RSS buckets, we'll end
2447 	 *   up with queues that get no traffic.
2448 	 *
2449 	 * + If there's more RSS buckets than queues, we'll end
2450 	 *   up having multiple RSS buckets map to the same queue,
2451 	 *   so there'll be some contention.
2452 	 */
2453 	if (adapter->num_queues != rss_getnumbuckets()) {
2454 		device_printf(dev,
2455 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
2456 		    "; performance will be impacted.\n",
2457 		    __func__,
2458 		    adapter->num_queues,
2459 		    rss_getnumbuckets());
2460 	}
2461 #endif
2462 
2463 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2464 		rid = vector + 1;
2465 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2466 		    RF_SHAREABLE | RF_ACTIVE);
2467 		if (que->res == NULL) {
2468 			device_printf(dev,"Unable to allocate"
2469 		    	    " bus resource: que interrupt [%d]\n", vector);
2470 			return (ENXIO);
2471 		}
2472 		/* Set the handler function */
2473 		error = bus_setup_intr(dev, que->res,
2474 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2475 		    ixgbe_msix_que, que, &que->tag);
2476 		if (error) {
2477 			que->res = NULL;
2478 			device_printf(dev, "Failed to register QUE handler");
2479 			return (error);
2480 		}
2481 #if __FreeBSD_version >= 800504
2482 		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2483 #endif
2484 		que->msix = vector;
2485 		adapter->active_queues |= (u64)(1 << que->msix);
2486 #ifdef	RSS
2487 		/*
2488 		 * The queue ID is used as the RSS layer bucket ID.
2489 		 * We look up the queue ID -> RSS CPU ID and select
2490 		 * that.
2491 		 */
2492 		cpu_id = rss_getcpu(i % rss_getnumbuckets());
2493 #else
2494 		/*
2495 		 * Bind the msix vector, and thus the
2496 		 * rings to the corresponding cpu.
2497 		 *
2498 		 * This just happens to match the default RSS round-robin
2499 		 * bucket -> queue -> CPU allocation.
2500 		 */
2501 		if (adapter->num_queues > 1)
2502 			cpu_id = i;
2503 #endif
2504 		if (adapter->num_queues > 1)
2505 			bus_bind_intr(dev, que->res, cpu_id);
2506 #ifdef IXGBE_DEBUG
2507 #ifdef	RSS
2508 		device_printf(dev,
2509 		    "Bound RSS bucket %d to CPU %d\n",
2510 		    i, cpu_id);
2511 #else
2512 		device_printf(dev,
2513 		    "Bound queue %d to cpu %d\n",
2514 		    i, cpu_id);
2515 #endif
2516 #endif /* IXGBE_DEBUG */
2517 
2518 
2519 #ifndef IXGBE_LEGACY_TX
2520 		TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2521 #endif
2522 		TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2523 		que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2524 		    taskqueue_thread_enqueue, &que->tq);
2525 #ifdef	RSS
2526 		CPU_SETOF(cpu_id, &cpu_mask);
2527 		taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2528 		    &cpu_mask,
2529 		    "%s (bucket %d)",
2530 		    device_get_nameunit(adapter->dev),
2531 		    cpu_id);
2532 #else
2533 		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
2534 		    device_get_nameunit(adapter->dev), i);
2535 #endif
2536 	}
2537 
2538 	/* and Link */
2539 	rid = vector + 1;
2540 	adapter->res = bus_alloc_resource_any(dev,
2541     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2542 	if (!adapter->res) {
2543 		device_printf(dev,"Unable to allocate"
2544     	    " bus resource: Link interrupt [%d]\n", rid);
2545 		return (ENXIO);
2546 	}
2547 	/* Set the link handler function */
2548 	error = bus_setup_intr(dev, adapter->res,
2549 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2550 	    ixgbe_msix_link, adapter, &adapter->tag);
2551 	if (error) {
2552 		adapter->res = NULL;
2553 		device_printf(dev, "Failed to register LINK handler");
2554 		return (error);
2555 	}
2556 #if __FreeBSD_version >= 800504
2557 	bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2558 #endif
2559 	adapter->vector = vector;
2560 	/* Tasklets for Link, SFP and Multispeed Fiber */
2561 	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2562 	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2563 	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2564 #ifdef PCI_IOV
2565 	TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
2566 #endif
2567 	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2568 #ifdef IXGBE_FDIR
2569 	TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2570 #endif
2571 	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2572 	    taskqueue_thread_enqueue, &adapter->tq);
2573 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2574 	    device_get_nameunit(adapter->dev));
2575 
2576 	return (0);
2577 }
2578 
2579 /*
2580  * Setup Either MSI/X or MSI
2581  */
2582 static int
2583 ixgbe_setup_msix(struct adapter *adapter)
2584 {
2585 	device_t dev = adapter->dev;
2586 	int rid, want, queues, msgs;
2587 
2588 	/* Override by tuneable */
2589 	if (ixgbe_enable_msix == 0)
2590 		goto msi;
2591 
2592 	/* First try MSI/X */
2593 	msgs = pci_msix_count(dev);
2594 	if (msgs == 0)
2595 		goto msi;
2596 	rid = PCIR_BAR(MSIX_82598_BAR);
2597 	adapter->msix_mem = bus_alloc_resource_any(dev,
2598 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2599        	if (adapter->msix_mem == NULL) {
2600 		rid += 4;	/* 82599 maps in higher BAR */
2601 		adapter->msix_mem = bus_alloc_resource_any(dev,
2602 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2603 	}
2604        	if (adapter->msix_mem == NULL) {
2605 		/* May not be enabled */
2606 		device_printf(adapter->dev,
2607 		    "Unable to map MSIX table \n");
2608 		goto msi;
2609 	}
2610 
2611 	/* Figure out a reasonable auto config value */
2612 	queues = (mp_ncpus > (msgs - 1)) ? (msgs - 1) : mp_ncpus;
2613 
2614 #ifdef	RSS
2615 	/* If we're doing RSS, clamp at the number of RSS buckets */
2616 	if (queues > rss_getnumbuckets())
2617 		queues = rss_getnumbuckets();
2618 #endif
2619 
2620 	if (ixgbe_num_queues != 0)
2621 		queues = ixgbe_num_queues;
2622 	/* Set max queues to 8 when autoconfiguring */
2623 	else if ((ixgbe_num_queues == 0) && (queues > 8))
2624 		queues = 8;
2625 
2626 	/* reflect correct sysctl value */
2627 	ixgbe_num_queues = queues;
2628 
2629 	/*
2630 	** Want one vector (RX/TX pair) per queue
2631 	** plus an additional for Link.
2632 	*/
2633 	want = queues + 1;
2634 	if (msgs >= want)
2635 		msgs = want;
2636 	else {
2637                	device_printf(adapter->dev,
2638 		    "MSIX Configuration Problem, "
2639 		    "%d vectors but %d queues wanted!\n",
2640 		    msgs, want);
2641 		goto msi;
2642 	}
2643 	if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2644                	device_printf(adapter->dev,
2645 		    "Using MSIX interrupts with %d vectors\n", msgs);
2646 		adapter->num_queues = queues;
2647 		return (msgs);
2648 	}
2649 	/*
2650 	** If MSIX alloc failed or provided us with
2651 	** less than needed, free and fall through to MSI
2652 	*/
2653 	pci_release_msi(dev);
2654 
2655 msi:
2656        	if (adapter->msix_mem != NULL) {
2657 		bus_release_resource(dev, SYS_RES_MEMORY,
2658 		    rid, adapter->msix_mem);
2659 		adapter->msix_mem = NULL;
2660 	}
2661 	msgs = 1;
2662 	if (pci_alloc_msi(dev, &msgs) == 0) {
2663 		device_printf(adapter->dev, "Using an MSI interrupt\n");
2664 		return (msgs);
2665 	}
2666 	device_printf(adapter->dev, "Using a Legacy interrupt\n");
2667 	return (0);
2668 }
2669 
2670 
2671 static int
2672 ixgbe_allocate_pci_resources(struct adapter *adapter)
2673 {
2674 	int             rid;
2675 	device_t        dev = adapter->dev;
2676 
2677 	rid = PCIR_BAR(0);
2678 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2679 	    &rid, RF_ACTIVE);
2680 
2681 	if (!(adapter->pci_mem)) {
2682 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2683 		return (ENXIO);
2684 	}
2685 
2686 	/* Save bus_space values for READ/WRITE_REG macros */
2687 	adapter->osdep.mem_bus_space_tag =
2688 		rman_get_bustag(adapter->pci_mem);
2689 	adapter->osdep.mem_bus_space_handle =
2690 		rman_get_bushandle(adapter->pci_mem);
2691 	/* Set hw values for shared code */
2692 	adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2693 	adapter->hw.back = adapter;
2694 
2695 	/* Default to 1 queue if MSI-X setup fails */
2696 	adapter->num_queues = 1;
2697 
2698 	/*
2699 	** Now setup MSI or MSI-X, should
2700 	** return us the number of supported
2701 	** vectors. (Will be 1 for MSI)
2702 	*/
2703 	adapter->msix = ixgbe_setup_msix(adapter);
2704 	return (0);
2705 }
2706 
2707 static void
2708 ixgbe_free_pci_resources(struct adapter * adapter)
2709 {
2710 	struct 		ix_queue *que = adapter->queues;
2711 	device_t	dev = adapter->dev;
2712 	int		rid, memrid;
2713 
2714 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2715 		memrid = PCIR_BAR(MSIX_82598_BAR);
2716 	else
2717 		memrid = PCIR_BAR(MSIX_82599_BAR);
2718 
2719 	/*
2720 	** There is a slight possibility of a failure mode
2721 	** in attach that will result in entering this function
2722 	** before interrupt resources have been initialized, and
2723 	** in that case we do not want to execute the loops below
2724 	** We can detect this reliably by the state of the adapter
2725 	** res pointer.
2726 	*/
2727 	if (adapter->res == NULL)
2728 		goto mem;
2729 
2730 	/*
2731 	**  Release all msix queue resources:
2732 	*/
2733 	for (int i = 0; i < adapter->num_queues; i++, que++) {
2734 		rid = que->msix + 1;
2735 		if (que->tag != NULL) {
2736 			bus_teardown_intr(dev, que->res, que->tag);
2737 			que->tag = NULL;
2738 		}
2739 		if (que->res != NULL)
2740 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2741 	}
2742 
2743 
2744 	/* Clean the Legacy or Link interrupt last */
2745 	if (adapter->vector) /* we are doing MSIX */
2746 		rid = adapter->vector + 1;
2747 	else
2748 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
2749 
2750 	if (adapter->tag != NULL) {
2751 		bus_teardown_intr(dev, adapter->res, adapter->tag);
2752 		adapter->tag = NULL;
2753 	}
2754 	if (adapter->res != NULL)
2755 		bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2756 
2757 mem:
2758 	if (adapter->msix)
2759 		pci_release_msi(dev);
2760 
2761 	if (adapter->msix_mem != NULL)
2762 		bus_release_resource(dev, SYS_RES_MEMORY,
2763 		    memrid, adapter->msix_mem);
2764 
2765 	if (adapter->pci_mem != NULL)
2766 		bus_release_resource(dev, SYS_RES_MEMORY,
2767 		    PCIR_BAR(0), adapter->pci_mem);
2768 
2769 	return;
2770 }
2771 
2772 /*********************************************************************
2773  *
2774  *  Setup networking device structure and register an interface.
2775  *
2776  **********************************************************************/
2777 static int
2778 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2779 {
2780 	struct ifnet   *ifp;
2781 
2782 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2783 
2784 	ifp = adapter->ifp = if_alloc(IFT_ETHER);
2785 	if (ifp == NULL) {
2786 		device_printf(dev, "can not allocate ifnet structure\n");
2787 		return (-1);
2788 	}
2789 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2790 	ifp->if_baudrate = IF_Gbps(10);
2791 	ifp->if_init = ixgbe_init;
2792 	ifp->if_softc = adapter;
2793 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2794 	ifp->if_ioctl = ixgbe_ioctl;
2795 #if __FreeBSD_version >= 1100036
2796 	if_setgetcounterfn(ifp, ixgbe_get_counter);
2797 #endif
2798 #if __FreeBSD_version >= 1100045
2799 	/* TSO parameters */
2800 	ifp->if_hw_tsomax = 65518;
2801 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2802 	ifp->if_hw_tsomaxsegsize = 2048;
2803 #endif
2804 #ifndef IXGBE_LEGACY_TX
2805 	ifp->if_transmit = ixgbe_mq_start;
2806 	ifp->if_qflush = ixgbe_qflush;
2807 #else
2808 	ifp->if_start = ixgbe_start;
2809 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2810 	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2811 	IFQ_SET_READY(&ifp->if_snd);
2812 #endif
2813 
2814 	ether_ifattach(ifp, adapter->hw.mac.addr);
2815 
2816 	adapter->max_frame_size =
2817 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2818 
2819 	/*
2820 	 * Tell the upper layer(s) we support long frames.
2821 	 */
2822 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2823 
2824 	/* Set capability flags */
2825 	ifp->if_capabilities |= IFCAP_RXCSUM
2826 			     |  IFCAP_TXCSUM
2827 			     |  IFCAP_RXCSUM_IPV6
2828 			     |  IFCAP_TXCSUM_IPV6
2829 			     |  IFCAP_TSO4
2830 			     |  IFCAP_TSO6
2831 			     |  IFCAP_LRO
2832 			     |  IFCAP_VLAN_HWTAGGING
2833 			     |  IFCAP_VLAN_HWTSO
2834 			     |  IFCAP_VLAN_HWCSUM
2835 			     |  IFCAP_JUMBO_MTU
2836 			     |  IFCAP_VLAN_MTU
2837 			     |  IFCAP_HWSTATS;
2838 
2839 	/* Enable the above capabilities by default */
2840 	ifp->if_capenable = ifp->if_capabilities;
2841 
2842 	/*
2843 	** Don't turn this on by default, if vlans are
2844 	** created on another pseudo device (eg. lagg)
2845 	** then vlan events are not passed thru, breaking
2846 	** operation, but with HW FILTER off it works. If
2847 	** using vlans directly on the ixgbe driver you can
2848 	** enable this and get full hardware tag filtering.
2849 	*/
2850 	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2851 
2852 	/*
2853 	 * Specify the media types supported by this adapter and register
2854 	 * callbacks to update media and link information
2855 	 */
2856 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2857 		    ixgbe_media_status);
2858 
2859 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
2860 	ixgbe_add_media_types(adapter);
2861 
2862 	/* Set autoselect media by default */
2863 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2864 
2865 	return (0);
2866 }
2867 
2868 static void
2869 ixgbe_add_media_types(struct adapter *adapter)
2870 {
2871 	struct ixgbe_hw *hw = &adapter->hw;
2872 	device_t dev = adapter->dev;
2873 	int layer;
2874 
2875 	layer = adapter->phy_layer;
2876 
2877 	/* Media types with matching FreeBSD media defines */
2878 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2879 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2880 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2881 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2882 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2883 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2884 
2885 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2886 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2887 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2888 
2889 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
2890 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2891 		if (hw->phy.multispeed_fiber)
2892 			ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2893 	}
2894 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2895 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2896 		if (hw->phy.multispeed_fiber)
2897 			ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2898 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2899 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2900 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2901 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2902 
2903 #ifdef IFM_ETH_XTYPE
2904 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2905 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2906 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
2907 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2908 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2909 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2910 #else
2911 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2912 		device_printf(dev, "Media supported: 10GbaseKR\n");
2913 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2914 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2915 	}
2916 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2917 		device_printf(dev, "Media supported: 10GbaseKX4\n");
2918 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2919 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2920 	}
2921 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2922 		device_printf(dev, "Media supported: 1000baseKX\n");
2923 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2924 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2925 	}
2926 #endif
2927 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
2928 		device_printf(dev, "Media supported: 1000baseBX\n");
2929 
2930 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2931 		ifmedia_add(&adapter->media,
2932 		    IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2933 		ifmedia_add(&adapter->media,
2934 		    IFM_ETHER | IFM_1000_T, 0, NULL);
2935 	}
2936 
2937 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2938 }
2939 
2940 static void
2941 ixgbe_config_link(struct adapter *adapter)
2942 {
2943 	struct ixgbe_hw *hw = &adapter->hw;
2944 	u32	autoneg, err = 0;
2945 	bool	sfp, negotiate;
2946 
2947 	sfp = ixgbe_is_sfp(hw);
2948 
2949 	if (sfp) {
2950 		if (hw->phy.multispeed_fiber) {
2951 			hw->mac.ops.setup_sfp(hw);
2952 			ixgbe_enable_tx_laser(hw);
2953 			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2954 		} else
2955 			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2956 	} else {
2957 		if (hw->mac.ops.check_link)
2958 			err = ixgbe_check_link(hw, &adapter->link_speed,
2959 			    &adapter->link_up, FALSE);
2960 		if (err)
2961 			goto out;
2962 		autoneg = hw->phy.autoneg_advertised;
2963 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2964                 	err  = hw->mac.ops.get_link_capabilities(hw,
2965 			    &autoneg, &negotiate);
2966 		if (err)
2967 			goto out;
2968 		if (hw->mac.ops.setup_link)
2969                 	err = hw->mac.ops.setup_link(hw,
2970 			    autoneg, adapter->link_up);
2971 	}
2972 out:
2973 	return;
2974 }
2975 
2976 
2977 /*********************************************************************
2978  *
2979  *  Enable transmit units.
2980  *
2981  **********************************************************************/
2982 static void
2983 ixgbe_initialize_transmit_units(struct adapter *adapter)
2984 {
2985 	struct tx_ring	*txr = adapter->tx_rings;
2986 	struct ixgbe_hw	*hw = &adapter->hw;
2987 
2988 	/* Setup the Base and Length of the Tx Descriptor Ring */
2989 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
2990 		u64	tdba = txr->txdma.dma_paddr;
2991 		u32	txctrl = 0;
2992 		int	j = txr->me;
2993 
2994 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
2995 		       (tdba & 0x00000000ffffffffULL));
2996 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
2997 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
2998 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2999 
3000 		/* Setup the HW Tx Head and Tail descriptor pointers */
3001 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
3002 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
3003 
3004 		/* Cache the tail address */
3005 		txr->tail = IXGBE_TDT(j);
3006 
3007 		/* Disable Head Writeback */
3008 		/*
3009 		 * Note: for X550 series devices, these registers are actually
3010 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
3011 		 * fields remain the same.
3012 		 */
3013 		switch (hw->mac.type) {
3014 		case ixgbe_mac_82598EB:
3015 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
3016 			break;
3017 		default:
3018 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
3019 			break;
3020                 }
3021 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3022 		switch (hw->mac.type) {
3023 		case ixgbe_mac_82598EB:
3024 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
3025 			break;
3026 		default:
3027 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
3028 			break;
3029 		}
3030 
3031 	}
3032 
3033 	if (hw->mac.type != ixgbe_mac_82598EB) {
3034 		u32 dmatxctl, rttdcs;
3035 #ifdef PCI_IOV
3036 		enum ixgbe_iov_mode mode = ixgbe_get_iov_mode(adapter);
3037 #endif
3038 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3039 		dmatxctl |= IXGBE_DMATXCTL_TE;
3040 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3041 		/* Disable arbiter to set MTQC */
3042 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3043 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
3044 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3045 #ifdef PCI_IOV
3046 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(mode));
3047 #else
3048 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
3049 #endif
3050 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3051 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3052 	}
3053 
3054 	return;
3055 }
3056 
3057 static void
3058 ixgbe_initialize_rss_mapping(struct adapter *adapter)
3059 {
3060 	struct ixgbe_hw	*hw = &adapter->hw;
3061 	u32 reta = 0, mrqc, rss_key[10];
3062 	int queue_id, table_size, index_mult;
3063 #ifdef	RSS
3064 	u32 rss_hash_config;
3065 #endif
3066 #ifdef PCI_IOV
3067 	enum ixgbe_iov_mode mode;
3068 #endif
3069 
3070 #ifdef	RSS
3071 	/* Fetch the configured RSS key */
3072 	rss_getkey((uint8_t *) &rss_key);
3073 #else
3074 	/* set up random bits */
3075 	arc4rand(&rss_key, sizeof(rss_key), 0);
3076 #endif
3077 
3078 	/* Set multiplier for RETA setup and table size based on MAC */
3079 	index_mult = 0x1;
3080 	table_size = 128;
3081 	switch (adapter->hw.mac.type) {
3082 	case ixgbe_mac_82598EB:
3083 		index_mult = 0x11;
3084 		break;
3085 	case ixgbe_mac_X550:
3086 	case ixgbe_mac_X550EM_x:
3087 		table_size = 512;
3088 		break;
3089 	default:
3090 		break;
3091 	}
3092 
3093 	/* Set up the redirection table */
3094 	for (int i = 0, j = 0; i < table_size; i++, j++) {
3095 		if (j == adapter->num_queues) j = 0;
3096 #ifdef	RSS
3097 		/*
3098 		 * Fetch the RSS bucket id for the given indirection entry.
3099 		 * Cap it at the number of configured buckets (which is
3100 		 * num_queues.)
3101 		 */
3102 		queue_id = rss_get_indirection_to_bucket(i);
3103 		queue_id = queue_id % adapter->num_queues;
3104 #else
3105 		queue_id = (j * index_mult);
3106 #endif
3107 		/*
3108 		 * The low 8 bits are for hash value (n+0);
3109 		 * The next 8 bits are for hash value (n+1), etc.
3110 		 */
3111 		reta = reta >> 8;
3112 		reta = reta | ( ((uint32_t) queue_id) << 24);
3113 		if ((i & 3) == 3) {
3114 			if (i < 128)
3115 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3116 			else
3117 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
3118 			reta = 0;
3119 		}
3120 	}
3121 
3122 	/* Now fill our hash function seeds */
3123 	for (int i = 0; i < 10; i++)
3124 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
3125 
3126 	/* Perform hash on these packet types */
3127 #ifdef	RSS
3128 	mrqc = IXGBE_MRQC_RSSEN;
3129 	rss_hash_config = rss_gethashconfig();
3130 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3131 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3132 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3133 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3134 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3135 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3136 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3137 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3138 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3139 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3140 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
3141 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3142 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3143 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3144 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
3145 		device_printf(adapter->dev,
3146 		    "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, "
3147 		    "but not supported\n", __func__);
3148 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3149 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3150 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
3151 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3152 #else
3153 	/*
3154 	 * Disable UDP - IP fragments aren't currently being handled
3155 	 * and so we end up with a mix of 2-tuple and 4-tuple
3156 	 * traffic.
3157 	 */
3158 	mrqc = IXGBE_MRQC_RSSEN
3159 	     | IXGBE_MRQC_RSS_FIELD_IPV4
3160 	     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3161 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3162 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3163 	     | IXGBE_MRQC_RSS_FIELD_IPV6
3164 	     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3165 	;
3166 #endif /* RSS */
3167 #ifdef PCI_IOV
3168 	mode = ixgbe_get_iov_mode(adapter);
3169 	mrqc |= ixgbe_get_mrqc(mode);
3170 #endif
3171 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3172 }
3173 
3174 
3175 /*********************************************************************
3176  *
3177  *  Setup receive registers and features.
3178  *
3179  **********************************************************************/
3180 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3181 
3182 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
3183 
3184 static void
3185 ixgbe_initialize_receive_units(struct adapter *adapter)
3186 {
3187 	struct	rx_ring	*rxr = adapter->rx_rings;
3188 	struct ixgbe_hw	*hw = &adapter->hw;
3189 	struct ifnet   *ifp = adapter->ifp;
3190 	u32		bufsz, fctrl, srrctl, rxcsum;
3191 	u32		hlreg;
3192 
3193 	/*
3194 	 * Make sure receives are disabled while
3195 	 * setting up the descriptor ring
3196 	 */
3197 	ixgbe_disable_rx(hw);
3198 
3199 	/* Enable broadcasts */
3200 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3201 	fctrl |= IXGBE_FCTRL_BAM;
3202 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3203 		fctrl |= IXGBE_FCTRL_DPF;
3204 		fctrl |= IXGBE_FCTRL_PMCF;
3205 	}
3206 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3207 
3208 	/* Set for Jumbo Frames? */
3209 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3210 	if (ifp->if_mtu > ETHERMTU)
3211 		hlreg |= IXGBE_HLREG0_JUMBOEN;
3212 	else
3213 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3214 #ifdef DEV_NETMAP
3215 	/* crcstrip is conditional in netmap (in RDRXCTL too ?) */
3216 	if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
3217 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
3218 	else
3219 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
3220 #endif /* DEV_NETMAP */
3221 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3222 
3223 	bufsz = (adapter->rx_mbuf_sz +
3224 	    BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3225 
3226 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3227 		u64 rdba = rxr->rxdma.dma_paddr;
3228 		int j = rxr->me;
3229 
3230 		/* Setup the Base and Length of the Rx Descriptor Ring */
3231 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
3232 			       (rdba & 0x00000000ffffffffULL));
3233 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
3234 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
3235 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3236 
3237 		/* Set up the SRRCTL register */
3238 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
3239 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3240 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3241 		srrctl |= bufsz;
3242 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3243 
3244 		/*
3245 		 * Set DROP_EN iff we have no flow control and >1 queue.
3246 		 * Note that srrctl was cleared shortly before during reset,
3247 		 * so we do not need to clear the bit, but do it just in case
3248 		 * this code is moved elsewhere.
3249 		 */
3250 		if (adapter->num_queues > 1 &&
3251 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
3252 			srrctl |= IXGBE_SRRCTL_DROP_EN;
3253 		} else {
3254 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3255 		}
3256 
3257 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
3258 
3259 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
3260 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
3261 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
3262 
3263 		/* Set the driver rx tail address */
3264 		rxr->tail =  IXGBE_RDT(rxr->me);
3265 	}
3266 
3267 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3268 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3269 			      IXGBE_PSRTYPE_UDPHDR |
3270 			      IXGBE_PSRTYPE_IPV4HDR |
3271 			      IXGBE_PSRTYPE_IPV6HDR;
3272 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3273 	}
3274 
3275 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3276 
3277 	ixgbe_initialize_rss_mapping(adapter);
3278 
3279 	if (adapter->num_queues > 1) {
3280 		/* RSS and RX IPP Checksum are mutually exclusive */
3281 		rxcsum |= IXGBE_RXCSUM_PCSD;
3282 	}
3283 
3284 	if (ifp->if_capenable & IFCAP_RXCSUM)
3285 		rxcsum |= IXGBE_RXCSUM_PCSD;
3286 
3287 	/* This is useful for calculating UDP/IP fragment checksums */
3288 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3289 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
3290 
3291 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3292 
3293 	return;
3294 }
3295 
3296 
3297 /*
3298 ** This routine is run via an vlan config EVENT,
3299 ** it enables us to use the HW Filter table since
3300 ** we can get the vlan id. This just creates the
3301 ** entry in the soft version of the VFTA, init will
3302 ** repopulate the real table.
3303 */
3304 static void
3305 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3306 {
3307 	struct adapter	*adapter = ifp->if_softc;
3308 	u16		index, bit;
3309 
3310 	if (ifp->if_softc !=  arg)   /* Not our event */
3311 		return;
3312 
3313 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3314 		return;
3315 
3316 	IXGBE_CORE_LOCK(adapter);
3317 	index = (vtag >> 5) & 0x7F;
3318 	bit = vtag & 0x1F;
3319 	adapter->shadow_vfta[index] |= (1 << bit);
3320 	++adapter->num_vlans;
3321 	ixgbe_setup_vlan_hw_support(adapter);
3322 	IXGBE_CORE_UNLOCK(adapter);
3323 }
3324 
3325 /*
3326 ** This routine is run via an vlan
3327 ** unconfig EVENT, remove our entry
3328 ** in the soft vfta.
3329 */
3330 static void
3331 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3332 {
3333 	struct adapter	*adapter = ifp->if_softc;
3334 	u16		index, bit;
3335 
3336 	if (ifp->if_softc !=  arg)
3337 		return;
3338 
3339 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3340 		return;
3341 
3342 	IXGBE_CORE_LOCK(adapter);
3343 	index = (vtag >> 5) & 0x7F;
3344 	bit = vtag & 0x1F;
3345 	adapter->shadow_vfta[index] &= ~(1 << bit);
3346 	--adapter->num_vlans;
3347 	/* Re-init to load the changes */
3348 	ixgbe_setup_vlan_hw_support(adapter);
3349 	IXGBE_CORE_UNLOCK(adapter);
3350 }
3351 
3352 static void
3353 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3354 {
3355 	struct ifnet 	*ifp = adapter->ifp;
3356 	struct ixgbe_hw *hw = &adapter->hw;
3357 	struct rx_ring	*rxr;
3358 	u32		ctrl;
3359 
3360 
3361 	/*
3362 	** We get here thru init_locked, meaning
3363 	** a soft reset, this has already cleared
3364 	** the VFTA and other state, so if there
3365 	** have been no vlan's registered do nothing.
3366 	*/
3367 	if (adapter->num_vlans == 0)
3368 		return;
3369 
3370 	/* Setup the queues for vlans */
3371 	for (int i = 0; i < adapter->num_queues; i++) {
3372 		rxr = &adapter->rx_rings[i];
3373 		/* On 82599 the VLAN enable is per/queue in RXDCTL */
3374 		if (hw->mac.type != ixgbe_mac_82598EB) {
3375 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3376 			ctrl |= IXGBE_RXDCTL_VME;
3377 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
3378 		}
3379 		rxr->vtag_strip = TRUE;
3380 	}
3381 
3382 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3383 		return;
3384 	/*
3385 	** A soft reset zero's out the VFTA, so
3386 	** we need to repopulate it now.
3387 	*/
3388 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3389 		if (adapter->shadow_vfta[i] != 0)
3390 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3391 			    adapter->shadow_vfta[i]);
3392 
3393 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3394 	/* Enable the Filter Table if enabled */
3395 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3396 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3397 		ctrl |= IXGBE_VLNCTRL_VFE;
3398 	}
3399 	if (hw->mac.type == ixgbe_mac_82598EB)
3400 		ctrl |= IXGBE_VLNCTRL_VME;
3401 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3402 }
3403 
3404 static void
3405 ixgbe_enable_intr(struct adapter *adapter)
3406 {
3407 	struct ixgbe_hw	*hw = &adapter->hw;
3408 	struct ix_queue	*que = adapter->queues;
3409 	u32		mask, fwsm;
3410 
3411 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3412 	/* Enable Fan Failure detection */
3413 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
3414 		    mask |= IXGBE_EIMS_GPI_SDP1;
3415 
3416 	switch (adapter->hw.mac.type) {
3417 		case ixgbe_mac_82599EB:
3418 			mask |= IXGBE_EIMS_ECC;
3419 			/* Temperature sensor on some adapters */
3420 			mask |= IXGBE_EIMS_GPI_SDP0;
3421 			/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3422 			mask |= IXGBE_EIMS_GPI_SDP1;
3423 			mask |= IXGBE_EIMS_GPI_SDP2;
3424 #ifdef IXGBE_FDIR
3425 			mask |= IXGBE_EIMS_FLOW_DIR;
3426 #endif
3427 #ifdef PCI_IOV
3428 			mask |= IXGBE_EIMS_MAILBOX;
3429 #endif
3430 			break;
3431 		case ixgbe_mac_X540:
3432 			/* Detect if Thermal Sensor is enabled */
3433 			fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3434 			if (fwsm & IXGBE_FWSM_TS_ENABLED)
3435 				mask |= IXGBE_EIMS_TS;
3436 			mask |= IXGBE_EIMS_ECC;
3437 #ifdef IXGBE_FDIR
3438 			mask |= IXGBE_EIMS_FLOW_DIR;
3439 #endif
3440 			break;
3441 		case ixgbe_mac_X550:
3442 		case ixgbe_mac_X550EM_x:
3443 			/* MAC thermal sensor is automatically enabled */
3444 			mask |= IXGBE_EIMS_TS;
3445 			/* Some devices use SDP0 for important information */
3446 			if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3447 			    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3448 				mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3449 			mask |= IXGBE_EIMS_ECC;
3450 #ifdef IXGBE_FDIR
3451 			mask |= IXGBE_EIMS_FLOW_DIR;
3452 #endif
3453 #ifdef PCI_IOV
3454 			mask |= IXGBE_EIMS_MAILBOX;
3455 #endif
3456 		/* falls through */
3457 		default:
3458 			break;
3459 	}
3460 
3461 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3462 
3463 	/* With MSI-X we use auto clear */
3464 	if (adapter->msix_mem) {
3465 		mask = IXGBE_EIMS_ENABLE_MASK;
3466 		/* Don't autoclear Link */
3467 		mask &= ~IXGBE_EIMS_OTHER;
3468 		mask &= ~IXGBE_EIMS_LSC;
3469 #ifdef PCI_IOV
3470 		mask &= ~IXGBE_EIMS_MAILBOX;
3471 #endif
3472 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3473 	}
3474 
3475 	/*
3476 	** Now enable all queues, this is done separately to
3477 	** allow for handling the extended (beyond 32) MSIX
3478 	** vectors that can be used by 82599
3479 	*/
3480         for (int i = 0; i < adapter->num_queues; i++, que++)
3481                 ixgbe_enable_queue(adapter, que->msix);
3482 
3483 	IXGBE_WRITE_FLUSH(hw);
3484 
3485 	return;
3486 }
3487 
3488 static void
3489 ixgbe_disable_intr(struct adapter *adapter)
3490 {
3491 	if (adapter->msix_mem)
3492 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3493 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3494 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3495 	} else {
3496 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3497 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3498 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3499 	}
3500 	IXGBE_WRITE_FLUSH(&adapter->hw);
3501 	return;
3502 }
3503 
3504 /*
3505 ** Get the width and transaction speed of
3506 ** the slot this adapter is plugged into.
3507 */
3508 static void
3509 ixgbe_get_slot_info(struct adapter *adapter)
3510 {
3511 	device_t		dev = adapter->dev;
3512 	struct ixgbe_hw		*hw = &adapter->hw;
3513 	struct ixgbe_mac_info	*mac = &hw->mac;
3514 	u16			link;
3515 	u32			offset;
3516 
3517 	/* For most devices simply call the shared code routine */
3518 	if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3519 		ixgbe_get_bus_info(hw);
3520 		/* These devices don't use PCI-E */
3521 		switch (hw->mac.type) {
3522 		case ixgbe_mac_X550EM_x:
3523 			return;
3524 		default:
3525 			goto display;
3526 		}
3527 	}
3528 
3529 	/*
3530 	** For the Quad port adapter we need to parse back
3531 	** up the PCI tree to find the speed of the expansion
3532 	** slot into which this adapter is plugged. A bit more work.
3533 	*/
3534 	dev = device_get_parent(device_get_parent(dev));
3535 #ifdef IXGBE_DEBUG
3536 	device_printf(dev, "parent pcib = %x,%x,%x\n",
3537 	    pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3538 #endif
3539 	dev = device_get_parent(device_get_parent(dev));
3540 #ifdef IXGBE_DEBUG
3541 	device_printf(dev, "slot pcib = %x,%x,%x\n",
3542 	    pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3543 #endif
3544 	/* Now get the PCI Express Capabilities offset */
3545 	pci_find_cap(dev, PCIY_EXPRESS, &offset);
3546 	/* ...and read the Link Status Register */
3547 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3548 	switch (link & IXGBE_PCI_LINK_WIDTH) {
3549 	case IXGBE_PCI_LINK_WIDTH_1:
3550 		hw->bus.width = ixgbe_bus_width_pcie_x1;
3551 		break;
3552 	case IXGBE_PCI_LINK_WIDTH_2:
3553 		hw->bus.width = ixgbe_bus_width_pcie_x2;
3554 		break;
3555 	case IXGBE_PCI_LINK_WIDTH_4:
3556 		hw->bus.width = ixgbe_bus_width_pcie_x4;
3557 		break;
3558 	case IXGBE_PCI_LINK_WIDTH_8:
3559 		hw->bus.width = ixgbe_bus_width_pcie_x8;
3560 		break;
3561 	default:
3562 		hw->bus.width = ixgbe_bus_width_unknown;
3563 		break;
3564 	}
3565 
3566 	switch (link & IXGBE_PCI_LINK_SPEED) {
3567 	case IXGBE_PCI_LINK_SPEED_2500:
3568 		hw->bus.speed = ixgbe_bus_speed_2500;
3569 		break;
3570 	case IXGBE_PCI_LINK_SPEED_5000:
3571 		hw->bus.speed = ixgbe_bus_speed_5000;
3572 		break;
3573 	case IXGBE_PCI_LINK_SPEED_8000:
3574 		hw->bus.speed = ixgbe_bus_speed_8000;
3575 		break;
3576 	default:
3577 		hw->bus.speed = ixgbe_bus_speed_unknown;
3578 		break;
3579 	}
3580 
3581 	mac->ops.set_lan_id(hw);
3582 
3583 display:
3584 	device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3585 	    ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3586 	    (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3587 	    (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3588 	    (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3589 	    (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3590 	    (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3591 	    ("Unknown"));
3592 
3593 	if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3594 	    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3595 	    (hw->bus.speed == ixgbe_bus_speed_2500))) {
3596 		device_printf(dev, "PCI-Express bandwidth available"
3597 		    " for this card\n     is not sufficient for"
3598 		    " optimal performance.\n");
3599 		device_printf(dev, "For optimal performance a x8 "
3600 		    "PCIE, or x4 PCIE Gen2 slot is required.\n");
3601         }
3602 	if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3603 	    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3604 	    (hw->bus.speed < ixgbe_bus_speed_8000))) {
3605 		device_printf(dev, "PCI-Express bandwidth available"
3606 		    " for this card\n     is not sufficient for"
3607 		    " optimal performance.\n");
3608 		device_printf(dev, "For optimal performance a x8 "
3609 		    "PCIE Gen3 slot is required.\n");
3610         }
3611 
3612 	return;
3613 }
3614 
3615 
3616 /*
3617 ** Setup the correct IVAR register for a particular MSIX interrupt
3618 **   (yes this is all very magic and confusing :)
3619 **  - entry is the register array entry
3620 **  - vector is the MSIX vector for this queue
3621 **  - type is RX/TX/MISC
3622 */
3623 static void
3624 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3625 {
3626 	struct ixgbe_hw *hw = &adapter->hw;
3627 	u32 ivar, index;
3628 
3629 	vector |= IXGBE_IVAR_ALLOC_VAL;
3630 
3631 	switch (hw->mac.type) {
3632 
3633 	case ixgbe_mac_82598EB:
3634 		if (type == -1)
3635 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3636 		else
3637 			entry += (type * 64);
3638 		index = (entry >> 2) & 0x1F;
3639 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3640 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3641 		ivar |= (vector << (8 * (entry & 0x3)));
3642 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3643 		break;
3644 
3645 	case ixgbe_mac_82599EB:
3646 	case ixgbe_mac_X540:
3647 	case ixgbe_mac_X550:
3648 	case ixgbe_mac_X550EM_x:
3649 		if (type == -1) { /* MISC IVAR */
3650 			index = (entry & 1) * 8;
3651 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3652 			ivar &= ~(0xFF << index);
3653 			ivar |= (vector << index);
3654 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3655 		} else {	/* RX/TX IVARS */
3656 			index = (16 * (entry & 1)) + (8 * type);
3657 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3658 			ivar &= ~(0xFF << index);
3659 			ivar |= (vector << index);
3660 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3661 		}
3662 
3663 	default:
3664 		break;
3665 	}
3666 }
3667 
3668 static void
3669 ixgbe_configure_ivars(struct adapter *adapter)
3670 {
3671 	struct  ix_queue	*que = adapter->queues;
3672 	u32			newitr;
3673 
3674 	if (ixgbe_max_interrupt_rate > 0)
3675 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3676 	else {
3677 		/*
3678 		** Disable DMA coalescing if interrupt moderation is
3679 		** disabled.
3680 		*/
3681 		adapter->dmac = 0;
3682 		newitr = 0;
3683 	}
3684 
3685         for (int i = 0; i < adapter->num_queues; i++, que++) {
3686 		struct rx_ring *rxr = &adapter->rx_rings[i];
3687 		struct tx_ring *txr = &adapter->tx_rings[i];
3688 		/* First the RX queue entry */
3689                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3690 		/* ... and the TX */
3691 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3692 		/* Set an Initial EITR value */
3693                 IXGBE_WRITE_REG(&adapter->hw,
3694                     IXGBE_EITR(que->msix), newitr);
3695 	}
3696 
3697 	/* For the Link interrupt */
3698         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3699 }
3700 
3701 /*
3702 ** ixgbe_sfp_probe - called in the local timer to
3703 ** determine if a port had optics inserted.
3704 */
3705 static bool
3706 ixgbe_sfp_probe(struct adapter *adapter)
3707 {
3708 	struct ixgbe_hw	*hw = &adapter->hw;
3709 	device_t	dev = adapter->dev;
3710 	bool		result = FALSE;
3711 
3712 	if ((hw->phy.type == ixgbe_phy_nl) &&
3713 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3714 		s32 ret = hw->phy.ops.identify_sfp(hw);
3715 		if (ret)
3716                         goto out;
3717 		ret = hw->phy.ops.reset(hw);
3718 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3719 			device_printf(dev, "Unsupported SFP+ module detected!");
3720 			device_printf(dev, "Reload driver with supported module.\n");
3721 			adapter->sfp_probe = FALSE;
3722                         goto out;
3723 		} else
3724 			device_printf(dev, "SFP+ module detected!\n");
3725 		/* We now have supported optics */
3726 		adapter->sfp_probe = FALSE;
3727 		/* Set the optics type so system reports correctly */
3728 		ixgbe_setup_optics(adapter);
3729 		result = TRUE;
3730 	}
3731 out:
3732 	return (result);
3733 }
3734 
3735 /*
3736 ** Tasklet handler for MSIX Link interrupts
3737 **  - do outside interrupt since it might sleep
3738 */
3739 static void
3740 ixgbe_handle_link(void *context, int pending)
3741 {
3742 	struct adapter  *adapter = context;
3743 	struct ixgbe_hw *hw = &adapter->hw;
3744 
3745 	ixgbe_check_link(hw,
3746 	    &adapter->link_speed, &adapter->link_up, 0);
3747 	ixgbe_update_link_status(adapter);
3748 
3749 	/* Re-enable link interrupts */
3750 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
3751 }
3752 
3753 /*
3754 ** Tasklet for handling SFP module interrupts
3755 */
3756 static void
3757 ixgbe_handle_mod(void *context, int pending)
3758 {
3759 	struct adapter  *adapter = context;
3760 	struct ixgbe_hw *hw = &adapter->hw;
3761 	device_t	dev = adapter->dev;
3762 	u32 err;
3763 
3764 	err = hw->phy.ops.identify_sfp(hw);
3765 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3766 		device_printf(dev,
3767 		    "Unsupported SFP+ module type was detected.\n");
3768 		return;
3769 	}
3770 
3771 	err = hw->mac.ops.setup_sfp(hw);
3772 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3773 		device_printf(dev,
3774 		    "Setup failure - unsupported SFP+ module type.\n");
3775 		return;
3776 	}
3777 	taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3778 	return;
3779 }
3780 
3781 
3782 /*
3783 ** Tasklet for handling MSF (multispeed fiber) interrupts
3784 */
3785 static void
3786 ixgbe_handle_msf(void *context, int pending)
3787 {
3788 	struct adapter  *adapter = context;
3789 	struct ixgbe_hw *hw = &adapter->hw;
3790 	u32 autoneg;
3791 	bool negotiate;
3792 
3793 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3794 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3795 
3796 	autoneg = hw->phy.autoneg_advertised;
3797 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3798 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3799 	if (hw->mac.ops.setup_link)
3800 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3801 
3802 	/* Adjust media types shown in ifconfig */
3803 	ifmedia_removeall(&adapter->media);
3804 	ixgbe_add_media_types(adapter);
3805 	return;
3806 }
3807 
3808 /*
3809 ** Tasklet for handling interrupts from an external PHY
3810 */
3811 static void
3812 ixgbe_handle_phy(void *context, int pending)
3813 {
3814 	struct adapter  *adapter = context;
3815 	struct ixgbe_hw *hw = &adapter->hw;
3816 	int error;
3817 
3818 	error = hw->phy.ops.handle_lasi(hw);
3819 	if (error == IXGBE_ERR_OVERTEMP)
3820 		device_printf(adapter->dev,
3821 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3822 		    " PHY will downshift to lower power state!\n");
3823 	else if (error)
3824 		device_printf(adapter->dev,
3825 		    "Error handling LASI interrupt: %d\n",
3826 		    error);
3827 	return;
3828 }
3829 
3830 #ifdef IXGBE_FDIR
3831 /*
3832 ** Tasklet for reinitializing the Flow Director filter table
3833 */
3834 static void
3835 ixgbe_reinit_fdir(void *context, int pending)
3836 {
3837 	struct adapter  *adapter = context;
3838 	struct ifnet   *ifp = adapter->ifp;
3839 
3840 	if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3841 		return;
3842 	ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3843 	adapter->fdir_reinit = 0;
3844 	/* re-enable flow director interrupts */
3845 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3846 	/* Restart the interface */
3847 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3848 	return;
3849 }
3850 #endif
3851 
3852 /*********************************************************************
3853  *
3854  *  Configure DMA Coalescing
3855  *
3856  **********************************************************************/
3857 static void
3858 ixgbe_config_dmac(struct adapter *adapter)
3859 {
3860 	struct ixgbe_hw *hw = &adapter->hw;
3861 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3862 
3863 	if (hw->mac.type < ixgbe_mac_X550 ||
3864 	    !hw->mac.ops.dmac_config)
3865 		return;
3866 
3867 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3868 	    dcfg->link_speed ^ adapter->link_speed) {
3869 		dcfg->watchdog_timer = adapter->dmac;
3870 		dcfg->fcoe_en = false;
3871 		dcfg->link_speed = adapter->link_speed;
3872 		dcfg->num_tcs = 1;
3873 
3874 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3875 		    dcfg->watchdog_timer, dcfg->link_speed);
3876 
3877 		hw->mac.ops.dmac_config(hw);
3878 	}
3879 }
3880 
3881 /*
3882  * Checks whether the adapter's ports are capable of
3883  * Wake On LAN by reading the adapter's NVM.
3884  *
3885  * Sets each port's hw->wol_enabled value depending
3886  * on the value read here.
3887  */
3888 static void
3889 ixgbe_check_wol_support(struct adapter *adapter)
3890 {
3891 	struct ixgbe_hw *hw = &adapter->hw;
3892 	u16 dev_caps = 0;
3893 
3894 	/* Find out WoL support for port */
3895 	adapter->wol_support = hw->wol_enabled = 0;
3896 	ixgbe_get_device_caps(hw, &dev_caps);
3897 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3898 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3899 	      hw->bus.func == 0))
3900 		adapter->wol_support = hw->wol_enabled = 1;
3901 
3902 	/* Save initial wake up filter configuration */
3903 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3904 
3905 	return;
3906 }
3907 
3908 /*
3909  * Prepare the adapter/port for LPLU and/or WoL
3910  */
3911 static int
3912 ixgbe_setup_low_power_mode(struct adapter *adapter)
3913 {
3914 	struct ixgbe_hw *hw = &adapter->hw;
3915 	device_t dev = adapter->dev;
3916 	s32 error = 0;
3917 
3918 	mtx_assert(&adapter->core_mtx, MA_OWNED);
3919 
3920 	/* Limit power management flow to X550EM baseT */
3921 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
3922 	    && hw->phy.ops.enter_lplu) {
3923 		/* Turn off support for APM wakeup. (Using ACPI instead) */
3924 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
3925 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3926 
3927 		/*
3928 		 * Clear Wake Up Status register to prevent any previous wakeup
3929 		 * events from waking us up immediately after we suspend.
3930 		 */
3931 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3932 
3933 		/*
3934 		 * Program the Wakeup Filter Control register with user filter
3935 		 * settings
3936 		 */
3937 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3938 
3939 		/* Enable wakeups and power management in Wakeup Control */
3940 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
3941 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3942 
3943 		/* X550EM baseT adapters need a special LPLU flow */
3944 		hw->phy.reset_disable = true;
3945 		ixgbe_stop(adapter);
3946 		error = hw->phy.ops.enter_lplu(hw);
3947 		if (error)
3948 			device_printf(dev,
3949 			    "Error entering LPLU: %d\n", error);
3950 		hw->phy.reset_disable = false;
3951 	} else {
3952 		/* Just stop for other adapters */
3953 		ixgbe_stop(adapter);
3954 	}
3955 
3956 	return error;
3957 }
3958 
3959 /**********************************************************************
3960  *
3961  *  Update the board statistics counters.
3962  *
3963  **********************************************************************/
3964 static void
3965 ixgbe_update_stats_counters(struct adapter *adapter)
3966 {
3967 	struct ixgbe_hw *hw = &adapter->hw;
3968 	u32 missed_rx = 0, bprc, lxon, lxoff, total;
3969 	u64 total_missed_rx = 0;
3970 
3971 	adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3972 	adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3973 	adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3974 	adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3975 
3976 	for (int i = 0; i < 16; i++) {
3977 		adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3978 		adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3979 		adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3980 	}
3981 	adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3982 	adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3983 	adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3984 
3985 	/* Hardware workaround, gprc counts missed packets */
3986 	adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3987 	adapter->stats.pf.gprc -= missed_rx;
3988 
3989 	if (hw->mac.type != ixgbe_mac_82598EB) {
3990 		adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3991 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3992 		adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3993 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3994 		adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3995 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3996 		adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3997 		adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3998 	} else {
3999 		adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4000 		adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4001 		/* 82598 only has a counter in the high register */
4002 		adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4003 		adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4004 		adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4005 	}
4006 
4007 	/*
4008 	 * Workaround: mprc hardware is incorrectly counting
4009 	 * broadcasts, so for now we subtract those.
4010 	 */
4011 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4012 	adapter->stats.pf.bprc += bprc;
4013 	adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4014 	if (hw->mac.type == ixgbe_mac_82598EB)
4015 		adapter->stats.pf.mprc -= bprc;
4016 
4017 	adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4018 	adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4019 	adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4020 	adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4021 	adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4022 	adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4023 
4024 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4025 	adapter->stats.pf.lxontxc += lxon;
4026 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4027 	adapter->stats.pf.lxofftxc += lxoff;
4028 	total = lxon + lxoff;
4029 
4030 	adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4031 	adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4032 	adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4033 	adapter->stats.pf.gptc -= total;
4034 	adapter->stats.pf.mptc -= total;
4035 	adapter->stats.pf.ptc64 -= total;
4036 	adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
4037 
4038 	adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4039 	adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4040 	adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4041 	adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4042 	adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
4043 	adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
4044 	adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
4045 	adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4046 	adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
4047 	adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4048 	adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4049 	adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4050 	adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4051 	adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4052 	adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4053 	adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
4054 	adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4055 	adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
4056 	/* Only read FCOE on 82599 */
4057 	if (hw->mac.type != ixgbe_mac_82598EB) {
4058 		adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4059 		adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4060 		adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4061 		adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4062 		adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4063 	}
4064 
4065 	/* Fill out the OS statistics structure */
4066 	IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
4067 	IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
4068 	IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
4069 	IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
4070 	IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
4071 	IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
4072 	IXGBE_SET_COLLISIONS(adapter, 0);
4073 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
4074 	IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
4075 	    + adapter->stats.pf.rlec);
4076 }
4077 
4078 #if __FreeBSD_version >= 1100036
4079 static uint64_t
4080 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
4081 {
4082 	struct adapter *adapter;
4083 	struct tx_ring *txr;
4084 	uint64_t rv;
4085 
4086 	adapter = if_getsoftc(ifp);
4087 
4088 	switch (cnt) {
4089 	case IFCOUNTER_IPACKETS:
4090 		return (adapter->ipackets);
4091 	case IFCOUNTER_OPACKETS:
4092 		return (adapter->opackets);
4093 	case IFCOUNTER_IBYTES:
4094 		return (adapter->ibytes);
4095 	case IFCOUNTER_OBYTES:
4096 		return (adapter->obytes);
4097 	case IFCOUNTER_IMCASTS:
4098 		return (adapter->imcasts);
4099 	case IFCOUNTER_OMCASTS:
4100 		return (adapter->omcasts);
4101 	case IFCOUNTER_COLLISIONS:
4102 		return (0);
4103 	case IFCOUNTER_IQDROPS:
4104 		return (adapter->iqdrops);
4105 	case IFCOUNTER_OQDROPS:
4106 		rv = 0;
4107 		txr = adapter->tx_rings;
4108 		for (int i = 0; i < adapter->num_queues; i++, txr++)
4109 			rv += txr->br->br_drops;
4110 		return (rv);
4111 	case IFCOUNTER_IERRORS:
4112 		return (adapter->ierrors);
4113 	default:
4114 		return (if_get_counter_default(ifp, cnt));
4115 	}
4116 }
4117 #endif
4118 
4119 /** ixgbe_sysctl_tdh_handler - Handler function
4120  *  Retrieves the TDH value from the hardware
4121  */
4122 static int
4123 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
4124 {
4125 	int error;
4126 
4127 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4128 	if (!txr) return 0;
4129 
4130 	unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
4131 	error = sysctl_handle_int(oidp, &val, 0, req);
4132 	if (error || !req->newptr)
4133 		return error;
4134 	return 0;
4135 }
4136 
4137 /** ixgbe_sysctl_tdt_handler - Handler function
4138  *  Retrieves the TDT value from the hardware
4139  */
4140 static int
4141 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
4142 {
4143 	int error;
4144 
4145 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4146 	if (!txr) return 0;
4147 
4148 	unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
4149 	error = sysctl_handle_int(oidp, &val, 0, req);
4150 	if (error || !req->newptr)
4151 		return error;
4152 	return 0;
4153 }
4154 
4155 /** ixgbe_sysctl_rdh_handler - Handler function
4156  *  Retrieves the RDH value from the hardware
4157  */
4158 static int
4159 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
4160 {
4161 	int error;
4162 
4163 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4164 	if (!rxr) return 0;
4165 
4166 	unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
4167 	error = sysctl_handle_int(oidp, &val, 0, req);
4168 	if (error || !req->newptr)
4169 		return error;
4170 	return 0;
4171 }
4172 
4173 /** ixgbe_sysctl_rdt_handler - Handler function
4174  *  Retrieves the RDT value from the hardware
4175  */
4176 static int
4177 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
4178 {
4179 	int error;
4180 
4181 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4182 	if (!rxr) return 0;
4183 
4184 	unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
4185 	error = sysctl_handle_int(oidp, &val, 0, req);
4186 	if (error || !req->newptr)
4187 		return error;
4188 	return 0;
4189 }
4190 
4191 static int
4192 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
4193 {
4194 	int error;
4195 	struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
4196 	unsigned int reg, usec, rate;
4197 
4198 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
4199 	usec = ((reg & 0x0FF8) >> 3);
4200 	if (usec > 0)
4201 		rate = 500000 / usec;
4202 	else
4203 		rate = 0;
4204 	error = sysctl_handle_int(oidp, &rate, 0, req);
4205 	if (error || !req->newptr)
4206 		return error;
4207 	reg &= ~0xfff; /* default, no limitation */
4208 	ixgbe_max_interrupt_rate = 0;
4209 	if (rate > 0 && rate < 500000) {
4210 		if (rate < 1000)
4211 			rate = 1000;
4212 		ixgbe_max_interrupt_rate = rate;
4213 		reg |= ((4000000/rate) & 0xff8 );
4214 	}
4215 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
4216 	return 0;
4217 }
4218 
4219 static void
4220 ixgbe_add_device_sysctls(struct adapter *adapter)
4221 {
4222 	device_t dev = adapter->dev;
4223 	struct ixgbe_hw *hw = &adapter->hw;
4224 	struct sysctl_oid_list *child;
4225 	struct sysctl_ctx_list *ctx;
4226 
4227 	ctx = device_get_sysctl_ctx(dev);
4228 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4229 
4230 	/* Sysctls for all devices */
4231 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
4232 			CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4233 			ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
4234 
4235         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
4236 			CTLFLAG_RW,
4237 			&ixgbe_enable_aim, 1, "Interrupt Moderation");
4238 
4239 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
4240 			CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4241 			ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
4242 
4243 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
4244 			CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4245 			ixgbe_sysctl_thermal_test, "I", "Thermal Test");
4246 
4247 #ifdef IXGBE_DEBUG
4248 	/* testing sysctls (for all devices) */
4249 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
4250 			CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4251 			ixgbe_sysctl_power_state, "I", "PCI Power State");
4252 
4253 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
4254 			CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
4255 			ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
4256 #endif
4257 	/* for X550 series devices */
4258 	if (hw->mac.type >= ixgbe_mac_X550)
4259 		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
4260 				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4261 				ixgbe_sysctl_dmac, "I", "DMA Coalesce");
4262 
4263 	/* for X552 backplane devices */
4264 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
4265 		struct sysctl_oid *eee_node;
4266 		struct sysctl_oid_list *eee_list;
4267 
4268 		eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
4269 					   CTLFLAG_RD, NULL,
4270 					   "Energy Efficient Ethernet sysctls");
4271 		eee_list = SYSCTL_CHILDREN(eee_node);
4272 
4273 		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
4274 				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4275 				ixgbe_sysctl_eee_enable, "I",
4276 				"Enable or Disable EEE");
4277 
4278 		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
4279 				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4280 				ixgbe_sysctl_eee_negotiated, "I",
4281 				"EEE negotiated on link");
4282 
4283 		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
4284 				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4285 				ixgbe_sysctl_eee_tx_lpi_status, "I",
4286 				"Whether or not TX link is in LPI state");
4287 
4288 		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
4289 				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4290 				ixgbe_sysctl_eee_rx_lpi_status, "I",
4291 				"Whether or not RX link is in LPI state");
4292 
4293 		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_delay",
4294 				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4295 				ixgbe_sysctl_eee_tx_lpi_delay, "I",
4296 				"TX LPI entry delay in microseconds");
4297 	}
4298 
4299 	/* for WoL-capable devices */
4300 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4301 		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
4302 				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4303 				ixgbe_sysctl_wol_enable, "I",
4304 				"Enable/Disable Wake on LAN");
4305 
4306 		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
4307 				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4308 				ixgbe_sysctl_wufc, "I",
4309 				"Enable/Disable Wake Up Filters");
4310 	}
4311 
4312 	/* for X552/X557-AT devices */
4313 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4314 		struct sysctl_oid *phy_node;
4315 		struct sysctl_oid_list *phy_list;
4316 
4317 		phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
4318 					   CTLFLAG_RD, NULL,
4319 					   "External PHY sysctls");
4320 		phy_list = SYSCTL_CHILDREN(phy_node);
4321 
4322 		SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
4323 				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4324 				ixgbe_sysctl_phy_temp, "I",
4325 				"Current External PHY Temperature (Celsius)");
4326 
4327 		SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
4328 				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4329 				ixgbe_sysctl_phy_overtemp_occurred, "I",
4330 				"External PHY High Temperature Event Occurred");
4331 	}
4332 }
4333 
4334 /*
4335  * Add sysctl variables, one per statistic, to the system.
4336  */
4337 static void
4338 ixgbe_add_hw_stats(struct adapter *adapter)
4339 {
4340 	device_t dev = adapter->dev;
4341 
4342 	struct tx_ring *txr = adapter->tx_rings;
4343 	struct rx_ring *rxr = adapter->rx_rings;
4344 
4345 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4346 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4347 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4348 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
4349 
4350 	struct sysctl_oid *stat_node, *queue_node;
4351 	struct sysctl_oid_list *stat_list, *queue_list;
4352 
4353 #define QUEUE_NAME_LEN 32
4354 	char namebuf[QUEUE_NAME_LEN];
4355 
4356 	/* Driver Statistics */
4357 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4358 			CTLFLAG_RD, &adapter->dropped_pkts,
4359 			"Driver dropped packets");
4360 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
4361 			CTLFLAG_RD, &adapter->mbuf_defrag_failed,
4362 			"m_defrag() failed");
4363 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
4364 			CTLFLAG_RD, &adapter->watchdog_events,
4365 			"Watchdog timeouts");
4366 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4367 			CTLFLAG_RD, &adapter->link_irq,
4368 			"Link MSIX IRQ Handled");
4369 
4370 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
4371 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4372 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4373 					    CTLFLAG_RD, NULL, "Queue Name");
4374 		queue_list = SYSCTL_CHILDREN(queue_node);
4375 
4376 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4377 				CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4378 				sizeof(&adapter->queues[i]),
4379 				ixgbe_sysctl_interrupt_rate_handler, "IU",
4380 				"Interrupt Rate");
4381 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4382 				CTLFLAG_RD, &(adapter->queues[i].irqs),
4383 				"irqs on this queue");
4384 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4385 				CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4386 				ixgbe_sysctl_tdh_handler, "IU",
4387 				"Transmit Descriptor Head");
4388 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4389 				CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4390 				ixgbe_sysctl_tdt_handler, "IU",
4391 				"Transmit Descriptor Tail");
4392 		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4393 				CTLFLAG_RD, &txr->tso_tx,
4394 				"TSO");
4395 		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4396 				CTLFLAG_RD, &txr->no_tx_dma_setup,
4397 				"Driver tx dma failure in xmit");
4398 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4399 				CTLFLAG_RD, &txr->no_desc_avail,
4400 				"Queue No Descriptor Available");
4401 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4402 				CTLFLAG_RD, &txr->total_packets,
4403 				"Queue Packets Transmitted");
4404 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4405 				CTLFLAG_RD, &txr->br->br_drops,
4406 				"Packets dropped in buf_ring");
4407 	}
4408 
4409 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4410 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4411 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4412 					    CTLFLAG_RD, NULL, "Queue Name");
4413 		queue_list = SYSCTL_CHILDREN(queue_node);
4414 
4415 		struct lro_ctrl *lro = &rxr->lro;
4416 
4417 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4418 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4419 					    CTLFLAG_RD, NULL, "Queue Name");
4420 		queue_list = SYSCTL_CHILDREN(queue_node);
4421 
4422 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4423 				CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4424 				ixgbe_sysctl_rdh_handler, "IU",
4425 				"Receive Descriptor Head");
4426 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4427 				CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4428 				ixgbe_sysctl_rdt_handler, "IU",
4429 				"Receive Descriptor Tail");
4430 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4431 				CTLFLAG_RD, &rxr->rx_packets,
4432 				"Queue Packets Received");
4433 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4434 				CTLFLAG_RD, &rxr->rx_bytes,
4435 				"Queue Bytes Received");
4436 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4437 				CTLFLAG_RD, &rxr->rx_copies,
4438 				"Copied RX Frames");
4439 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4440 				CTLFLAG_RD, &lro->lro_queued, 0,
4441 				"LRO Queued");
4442 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4443 				CTLFLAG_RD, &lro->lro_flushed, 0,
4444 				"LRO Flushed");
4445 	}
4446 
4447 	/* MAC stats get the own sub node */
4448 
4449 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4450 				    CTLFLAG_RD, NULL, "MAC Statistics");
4451 	stat_list = SYSCTL_CHILDREN(stat_node);
4452 
4453 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4454 			CTLFLAG_RD, &stats->crcerrs,
4455 			"CRC Errors");
4456 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4457 			CTLFLAG_RD, &stats->illerrc,
4458 			"Illegal Byte Errors");
4459 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4460 			CTLFLAG_RD, &stats->errbc,
4461 			"Byte Errors");
4462 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4463 			CTLFLAG_RD, &stats->mspdc,
4464 			"MAC Short Packets Discarded");
4465 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4466 			CTLFLAG_RD, &stats->mlfc,
4467 			"MAC Local Faults");
4468 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4469 			CTLFLAG_RD, &stats->mrfc,
4470 			"MAC Remote Faults");
4471 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4472 			CTLFLAG_RD, &stats->rlec,
4473 			"Receive Length Errors");
4474 
4475 	/* Flow Control stats */
4476 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4477 			CTLFLAG_RD, &stats->lxontxc,
4478 			"Link XON Transmitted");
4479 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4480 			CTLFLAG_RD, &stats->lxonrxc,
4481 			"Link XON Received");
4482 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4483 			CTLFLAG_RD, &stats->lxofftxc,
4484 			"Link XOFF Transmitted");
4485 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4486 			CTLFLAG_RD, &stats->lxoffrxc,
4487 			"Link XOFF Received");
4488 
4489 	/* Packet Reception Stats */
4490 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4491 			CTLFLAG_RD, &stats->tor,
4492 			"Total Octets Received");
4493 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4494 			CTLFLAG_RD, &stats->gorc,
4495 			"Good Octets Received");
4496 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4497 			CTLFLAG_RD, &stats->tpr,
4498 			"Total Packets Received");
4499 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4500 			CTLFLAG_RD, &stats->gprc,
4501 			"Good Packets Received");
4502 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4503 			CTLFLAG_RD, &stats->mprc,
4504 			"Multicast Packets Received");
4505 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4506 			CTLFLAG_RD, &stats->bprc,
4507 			"Broadcast Packets Received");
4508 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4509 			CTLFLAG_RD, &stats->prc64,
4510 			"64 byte frames received ");
4511 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4512 			CTLFLAG_RD, &stats->prc127,
4513 			"65-127 byte frames received");
4514 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4515 			CTLFLAG_RD, &stats->prc255,
4516 			"128-255 byte frames received");
4517 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4518 			CTLFLAG_RD, &stats->prc511,
4519 			"256-511 byte frames received");
4520 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4521 			CTLFLAG_RD, &stats->prc1023,
4522 			"512-1023 byte frames received");
4523 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4524 			CTLFLAG_RD, &stats->prc1522,
4525 			"1023-1522 byte frames received");
4526 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4527 			CTLFLAG_RD, &stats->ruc,
4528 			"Receive Undersized");
4529 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4530 			CTLFLAG_RD, &stats->rfc,
4531 			"Fragmented Packets Received ");
4532 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4533 			CTLFLAG_RD, &stats->roc,
4534 			"Oversized Packets Received");
4535 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4536 			CTLFLAG_RD, &stats->rjc,
4537 			"Received Jabber");
4538 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4539 			CTLFLAG_RD, &stats->mngprc,
4540 			"Management Packets Received");
4541 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4542 			CTLFLAG_RD, &stats->mngptc,
4543 			"Management Packets Dropped");
4544 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4545 			CTLFLAG_RD, &stats->xec,
4546 			"Checksum Errors");
4547 
4548 	/* Packet Transmission Stats */
4549 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4550 			CTLFLAG_RD, &stats->gotc,
4551 			"Good Octets Transmitted");
4552 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4553 			CTLFLAG_RD, &stats->tpt,
4554 			"Total Packets Transmitted");
4555 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4556 			CTLFLAG_RD, &stats->gptc,
4557 			"Good Packets Transmitted");
4558 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4559 			CTLFLAG_RD, &stats->bptc,
4560 			"Broadcast Packets Transmitted");
4561 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4562 			CTLFLAG_RD, &stats->mptc,
4563 			"Multicast Packets Transmitted");
4564 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4565 			CTLFLAG_RD, &stats->mngptc,
4566 			"Management Packets Transmitted");
4567 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4568 			CTLFLAG_RD, &stats->ptc64,
4569 			"64 byte frames transmitted ");
4570 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4571 			CTLFLAG_RD, &stats->ptc127,
4572 			"65-127 byte frames transmitted");
4573 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4574 			CTLFLAG_RD, &stats->ptc255,
4575 			"128-255 byte frames transmitted");
4576 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4577 			CTLFLAG_RD, &stats->ptc511,
4578 			"256-511 byte frames transmitted");
4579 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4580 			CTLFLAG_RD, &stats->ptc1023,
4581 			"512-1023 byte frames transmitted");
4582 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4583 			CTLFLAG_RD, &stats->ptc1522,
4584 			"1024-1522 byte frames transmitted");
4585 }
4586 
4587 static void
4588 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
4589     const char *description, int *limit, int value)
4590 {
4591 	*limit = value;
4592 	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4593 	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4594 	    OID_AUTO, name, CTLFLAG_RW, limit, value, description);
4595 }
4596 
4597 /*
4598 ** Set flow control using sysctl:
4599 ** Flow control values:
4600 ** 	0 - off
4601 **	1 - rx pause
4602 **	2 - tx pause
4603 **	3 - full
4604 */
4605 static int
4606 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4607 {
4608 	int error, last;
4609 	struct adapter *adapter = (struct adapter *) arg1;
4610 
4611 	last = adapter->fc;
4612 	error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
4613 	if ((error) || (req->newptr == NULL))
4614 		return (error);
4615 
4616 	/* Don't bother if it's not changed */
4617 	if (adapter->fc == last)
4618 		return (0);
4619 
4620 	switch (adapter->fc) {
4621 		case ixgbe_fc_rx_pause:
4622 		case ixgbe_fc_tx_pause:
4623 		case ixgbe_fc_full:
4624 			adapter->hw.fc.requested_mode = adapter->fc;
4625 			if (adapter->num_queues > 1)
4626 				ixgbe_disable_rx_drop(adapter);
4627 			break;
4628 		case ixgbe_fc_none:
4629 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
4630 			if (adapter->num_queues > 1)
4631 				ixgbe_enable_rx_drop(adapter);
4632 			break;
4633 		default:
4634 			adapter->fc = last;
4635 			return (EINVAL);
4636 	}
4637 	/* Don't autoneg if forcing a value */
4638 	adapter->hw.fc.disable_fc_autoneg = TRUE;
4639 	ixgbe_fc_enable(&adapter->hw);
4640 	return error;
4641 }
4642 
4643 /*
4644 ** Control advertised link speed:
4645 **	Flags:
4646 **	0x1 - advertise 100 Mb
4647 **	0x2 - advertise 1G
4648 **	0x4 - advertise 10G
4649 */
4650 static int
4651 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4652 {
4653 	int			error = 0, requested;
4654 	struct adapter		*adapter;
4655 	device_t		dev;
4656 	struct ixgbe_hw		*hw;
4657 	ixgbe_link_speed	speed = 0;
4658 
4659 	adapter = (struct adapter *) arg1;
4660 	dev = adapter->dev;
4661 	hw = &adapter->hw;
4662 
4663 	requested = adapter->advertise;
4664 	error = sysctl_handle_int(oidp, &requested, 0, req);
4665 	if ((error) || (req->newptr == NULL))
4666 		return (error);
4667 
4668 	/* No speed changes for backplane media */
4669 	if (hw->phy.media_type == ixgbe_media_type_backplane)
4670 		return (ENODEV);
4671 
4672 	/* Checks to validate new value */
4673 	if (adapter->advertise == requested) /* no change */
4674 		return (0);
4675 
4676 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4677 	    (hw->phy.multispeed_fiber))) {
4678 		device_printf(dev,
4679 		    "Advertised speed can only be set on copper or "
4680 		    "multispeed fiber media types.\n");
4681 		return (EINVAL);
4682 	}
4683 
4684 	if (requested < 0x1 || requested > 0x7) {
4685 		device_printf(dev,
4686 		    "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4687 		return (EINVAL);
4688 	}
4689 
4690 	if ((requested & 0x1)
4691 	    && (hw->mac.type != ixgbe_mac_X540)
4692 	    && (hw->mac.type != ixgbe_mac_X550)) {
4693 		device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4694 		return (EINVAL);
4695 	}
4696 
4697 	/* Set new value and report new advertised mode */
4698 	if (requested & 0x1)
4699 		speed |= IXGBE_LINK_SPEED_100_FULL;
4700 	if (requested & 0x2)
4701 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4702 	if (requested & 0x4)
4703 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4704 
4705 	hw->mac.autotry_restart = TRUE;
4706 	hw->mac.ops.setup_link(hw, speed, TRUE);
4707 	adapter->advertise = requested;
4708 
4709 	return (error);
4710 }
4711 
4712 /*
4713  * The following two sysctls are for X552/X557-AT devices;
4714  * they deal with the external PHY used in them.
4715  */
4716 static int
4717 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4718 {
4719 	struct adapter	*adapter = (struct adapter *) arg1;
4720 	struct ixgbe_hw *hw = &adapter->hw;
4721 	u16 reg;
4722 
4723 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4724 		device_printf(adapter->dev,
4725 		    "Device has no supported external thermal sensor.\n");
4726 		return (ENODEV);
4727 	}
4728 
4729 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4730 				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4731 				      &reg)) {
4732 		device_printf(adapter->dev,
4733 		    "Error reading from PHY's current temperature register\n");
4734 		return (EAGAIN);
4735 	}
4736 
4737 	/* Shift temp for output */
4738 	reg = reg >> 8;
4739 
4740 	return (sysctl_handle_int(oidp, NULL, reg, req));
4741 }
4742 
4743 /*
4744  * Reports whether the current PHY temperature is over
4745  * the overtemp threshold.
4746  *  - This is reported directly from the PHY
4747  */
4748 static int
4749 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4750 {
4751 	struct adapter	*adapter = (struct adapter *) arg1;
4752 	struct ixgbe_hw *hw = &adapter->hw;
4753 	u16 reg;
4754 
4755 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4756 		device_printf(adapter->dev,
4757 		    "Device has no supported external thermal sensor.\n");
4758 		return (ENODEV);
4759 	}
4760 
4761 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4762 				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4763 				      &reg)) {
4764 		device_printf(adapter->dev,
4765 		    "Error reading from PHY's temperature status register\n");
4766 		return (EAGAIN);
4767 	}
4768 
4769 	/* Get occurrence bit */
4770 	reg = !!(reg & 0x4000);
4771 	return (sysctl_handle_int(oidp, 0, reg, req));
4772 }
4773 
4774 /*
4775 ** Thermal Shutdown Trigger (internal MAC)
4776 **   - Set this to 1 to cause an overtemp event to occur
4777 */
4778 static int
4779 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4780 {
4781 	struct adapter	*adapter = (struct adapter *) arg1;
4782 	struct ixgbe_hw *hw = &adapter->hw;
4783 	int error, fire = 0;
4784 
4785 	error = sysctl_handle_int(oidp, &fire, 0, req);
4786 	if ((error) || (req->newptr == NULL))
4787 		return (error);
4788 
4789 	if (fire) {
4790 		u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4791 		reg |= IXGBE_EICR_TS;
4792 		IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4793 	}
4794 
4795 	return (0);
4796 }
4797 
4798 /*
4799 ** Manage DMA Coalescing.
4800 ** Control values:
4801 ** 	0/1 - off / on (use default value of 1000)
4802 **
4803 **	Legal timer values are:
4804 **	50,100,250,500,1000,2000,5000,10000
4805 **
4806 **	Turning off interrupt moderation will also turn this off.
4807 */
4808 static int
4809 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4810 {
4811 	struct adapter *adapter = (struct adapter *) arg1;
4812 	struct ifnet *ifp = adapter->ifp;
4813 	int		error;
4814 	u32		newval;
4815 
4816 	newval = adapter->dmac;
4817 	error = sysctl_handle_int(oidp, &newval, 0, req);
4818 	if ((error) || (req->newptr == NULL))
4819 		return (error);
4820 
4821 	switch (newval) {
4822 	case 0:
4823 		/* Disabled */
4824 		adapter->dmac = 0;
4825 		break;
4826 	case 1:
4827 		/* Enable and use default */
4828 		adapter->dmac = 1000;
4829 		break;
4830 	case 50:
4831 	case 100:
4832 	case 250:
4833 	case 500:
4834 	case 1000:
4835 	case 2000:
4836 	case 5000:
4837 	case 10000:
4838 		/* Legal values - allow */
4839 		adapter->dmac = newval;
4840 		break;
4841 	default:
4842 		/* Do nothing, illegal value */
4843 		return (EINVAL);
4844 	}
4845 
4846 	/* Re-initialize hardware if it's already running */
4847 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4848 		ixgbe_init(adapter);
4849 
4850 	return (0);
4851 }
4852 
4853 #ifdef IXGBE_DEBUG
4854 /**
4855  * Sysctl to test power states
4856  * Values:
4857  *	0      - set device to D0
4858  *	3      - set device to D3
4859  *	(none) - get current device power state
4860  */
4861 static int
4862 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4863 {
4864 	struct adapter *adapter = (struct adapter *) arg1;
4865 	device_t dev =  adapter->dev;
4866 	int curr_ps, new_ps, error = 0;
4867 
4868 	curr_ps = new_ps = pci_get_powerstate(dev);
4869 
4870 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4871 	if ((error) || (req->newptr == NULL))
4872 		return (error);
4873 
4874 	if (new_ps == curr_ps)
4875 		return (0);
4876 
4877 	if (new_ps == 3 && curr_ps == 0)
4878 		error = DEVICE_SUSPEND(dev);
4879 	else if (new_ps == 0 && curr_ps == 3)
4880 		error = DEVICE_RESUME(dev);
4881 	else
4882 		return (EINVAL);
4883 
4884 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4885 
4886 	return (error);
4887 }
4888 #endif
4889 /*
4890  * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4891  * Values:
4892  *	0 - disabled
4893  *	1 - enabled
4894  */
4895 static int
4896 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4897 {
4898 	struct adapter *adapter = (struct adapter *) arg1;
4899 	struct ixgbe_hw *hw = &adapter->hw;
4900 	int new_wol_enabled;
4901 	int error = 0;
4902 
4903 	new_wol_enabled = hw->wol_enabled;
4904 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4905 	if ((error) || (req->newptr == NULL))
4906 		return (error);
4907 	new_wol_enabled = !!(new_wol_enabled);
4908 	if (new_wol_enabled == hw->wol_enabled)
4909 		return (0);
4910 
4911 	if (new_wol_enabled > 0 && !adapter->wol_support)
4912 		return (ENODEV);
4913 	else
4914 		hw->wol_enabled = new_wol_enabled;
4915 
4916 	return (0);
4917 }
4918 
4919 /*
4920  * Sysctl to enable/disable the Energy Efficient Ethernet capability,
4921  * if supported by the adapter.
4922  * Values:
4923  *	0 - disabled
4924  *	1 - enabled
4925  */
4926 static int
4927 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4928 {
4929 	struct adapter *adapter = (struct adapter *) arg1;
4930 	struct ixgbe_hw *hw = &adapter->hw;
4931 	struct ifnet *ifp = adapter->ifp;
4932 	int new_eee_enabled, error = 0;
4933 
4934 	new_eee_enabled = adapter->eee_enabled;
4935 	error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
4936 	if ((error) || (req->newptr == NULL))
4937 		return (error);
4938 	new_eee_enabled = !!(new_eee_enabled);
4939 	if (new_eee_enabled == adapter->eee_enabled)
4940 		return (0);
4941 
4942 	if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee)
4943 		return (ENODEV);
4944 	else
4945 		adapter->eee_enabled = new_eee_enabled;
4946 
4947 	/* Re-initialize hardware if it's already running */
4948 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4949 		ixgbe_init(adapter);
4950 
4951 	return (0);
4952 }
4953 
4954 /*
4955  * Read-only sysctl indicating whether EEE support was negotiated
4956  * on the link.
4957  */
4958 static int
4959 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
4960 {
4961 	struct adapter *adapter = (struct adapter *) arg1;
4962 	struct ixgbe_hw *hw = &adapter->hw;
4963 	bool status;
4964 
4965 	status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
4966 
4967 	return (sysctl_handle_int(oidp, 0, status, req));
4968 }
4969 
4970 /*
4971  * Read-only sysctl indicating whether RX Link is in LPI state.
4972  */
4973 static int
4974 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
4975 {
4976 	struct adapter *adapter = (struct adapter *) arg1;
4977 	struct ixgbe_hw *hw = &adapter->hw;
4978 	bool status;
4979 
4980 	status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4981 	    IXGBE_EEE_RX_LPI_STATUS);
4982 
4983 	return (sysctl_handle_int(oidp, 0, status, req));
4984 }
4985 
4986 /*
4987  * Read-only sysctl indicating whether TX Link is in LPI state.
4988  */
4989 static int
4990 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
4991 {
4992 	struct adapter *adapter = (struct adapter *) arg1;
4993 	struct ixgbe_hw *hw = &adapter->hw;
4994 	bool status;
4995 
4996 	status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4997 	    IXGBE_EEE_TX_LPI_STATUS);
4998 
4999 	return (sysctl_handle_int(oidp, 0, status, req));
5000 }
5001 
5002 /*
5003  * Read-only sysctl indicating TX Link LPI delay
5004  */
5005 static int
5006 ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS)
5007 {
5008 	struct adapter *adapter = (struct adapter *) arg1;
5009 	struct ixgbe_hw *hw = &adapter->hw;
5010 	u32 reg;
5011 
5012 	reg = IXGBE_READ_REG(hw, IXGBE_EEE_SU);
5013 
5014 	return (sysctl_handle_int(oidp, 0, reg >> 26, req));
5015 }
5016 
5017 /*
5018  * Sysctl to enable/disable the types of packets that the
5019  * adapter will wake up on upon receipt.
5020  * WUFC - Wake Up Filter Control
5021  * Flags:
5022  *	0x1  - Link Status Change
5023  *	0x2  - Magic Packet
5024  *	0x4  - Direct Exact
5025  *	0x8  - Directed Multicast
5026  *	0x10 - Broadcast
5027  *	0x20 - ARP/IPv4 Request Packet
5028  *	0x40 - Direct IPv4 Packet
5029  *	0x80 - Direct IPv6 Packet
5030  *
5031  * Setting another flag will cause the sysctl to return an
5032  * error.
5033  */
5034 static int
5035 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
5036 {
5037 	struct adapter *adapter = (struct adapter *) arg1;
5038 	int error = 0;
5039 	u32 new_wufc;
5040 
5041 	new_wufc = adapter->wufc;
5042 
5043 	error = sysctl_handle_int(oidp, &new_wufc, 0, req);
5044 	if ((error) || (req->newptr == NULL))
5045 		return (error);
5046 	if (new_wufc == adapter->wufc)
5047 		return (0);
5048 
5049 	if (new_wufc & 0xffffff00)
5050 		return (EINVAL);
5051 	else {
5052 		new_wufc &= 0xff;
5053 		new_wufc |= (0xffffff & adapter->wufc);
5054 		adapter->wufc = new_wufc;
5055 	}
5056 
5057 	return (0);
5058 }
5059 
5060 #ifdef IXGBE_DEBUG
5061 static int
5062 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
5063 {
5064 	struct adapter *adapter = (struct adapter *)arg1;
5065 	struct ixgbe_hw *hw = &adapter->hw;
5066 	device_t dev = adapter->dev;
5067 	int error = 0, reta_size;
5068 	struct sbuf *buf;
5069 	u32 reg;
5070 
5071 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5072 	if (!buf) {
5073 		device_printf(dev, "Could not allocate sbuf for output.\n");
5074 		return (ENOMEM);
5075 	}
5076 
5077 	// TODO: use sbufs to make a string to print out
5078 	/* Set multiplier for RETA setup and table size based on MAC */
5079 	switch (adapter->hw.mac.type) {
5080 	case ixgbe_mac_X550:
5081 	case ixgbe_mac_X550EM_x:
5082 		reta_size = 128;
5083 		break;
5084 	default:
5085 		reta_size = 32;
5086 		break;
5087 	}
5088 
5089 	/* Print out the redirection table */
5090 	sbuf_cat(buf, "\n");
5091 	for (int i = 0; i < reta_size; i++) {
5092 		if (i < 32) {
5093 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5094 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5095 		} else {
5096 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5097 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5098 		}
5099 	}
5100 
5101 	// TODO: print more config
5102 
5103 	error = sbuf_finish(buf);
5104 	if (error)
5105 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5106 
5107 	sbuf_delete(buf);
5108 	return (0);
5109 }
5110 #endif /* IXGBE_DEBUG */
5111 
5112 /*
5113 ** Enable the hardware to drop packets when the buffer is
5114 ** full. This is useful when multiqueue,so that no single
5115 ** queue being full stalls the entire RX engine. We only
5116 ** enable this when Multiqueue AND when Flow Control is
5117 ** disabled.
5118 */
5119 static void
5120 ixgbe_enable_rx_drop(struct adapter *adapter)
5121 {
5122         struct ixgbe_hw *hw = &adapter->hw;
5123 
5124 	for (int i = 0; i < adapter->num_queues; i++) {
5125 		struct rx_ring *rxr = &adapter->rx_rings[i];
5126         	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5127         	srrctl |= IXGBE_SRRCTL_DROP_EN;
5128         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5129 	}
5130 #ifdef PCI_IOV
5131 	/* enable drop for each vf */
5132 	for (int i = 0; i < adapter->num_vfs; i++) {
5133 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
5134 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5135 		    IXGBE_QDE_ENABLE));
5136 	}
5137 #endif
5138 }
5139 
5140 static void
5141 ixgbe_disable_rx_drop(struct adapter *adapter)
5142 {
5143         struct ixgbe_hw *hw = &adapter->hw;
5144 
5145 	for (int i = 0; i < adapter->num_queues; i++) {
5146 		struct rx_ring *rxr = &adapter->rx_rings[i];
5147         	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5148         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5149         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5150 	}
5151 #ifdef PCI_IOV
5152 	/* disable drop for each vf */
5153 	for (int i = 0; i < adapter->num_vfs; i++) {
5154 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
5155 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5156 	}
5157 #endif
5158 }
5159 
5160 static void
5161 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5162 {
5163 	u32 mask;
5164 
5165 	switch (adapter->hw.mac.type) {
5166 	case ixgbe_mac_82598EB:
5167 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5168 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5169 		break;
5170 	case ixgbe_mac_82599EB:
5171 	case ixgbe_mac_X540:
5172 	case ixgbe_mac_X550:
5173 	case ixgbe_mac_X550EM_x:
5174 		mask = (queues & 0xFFFFFFFF);
5175 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5176 		mask = (queues >> 32);
5177 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5178 		break;
5179 	default:
5180 		break;
5181 	}
5182 }
5183 
5184 #ifdef PCI_IOV
5185 
5186 /*
5187 ** Support functions for SRIOV/VF management
5188 */
5189 
5190 static void
5191 ixgbe_ping_all_vfs(struct adapter *adapter)
5192 {
5193 	struct ixgbe_vf *vf;
5194 
5195 	for (int i = 0; i < adapter->num_vfs; i++) {
5196 		vf = &adapter->vfs[i];
5197 		if (vf->flags & IXGBE_VF_ACTIVE)
5198 			ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
5199 	}
5200 }
5201 
5202 
5203 static void
5204 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
5205     uint16_t tag)
5206 {
5207 	struct ixgbe_hw *hw;
5208 	uint32_t vmolr, vmvir;
5209 
5210 	hw = &adapter->hw;
5211 
5212 	vf->vlan_tag = tag;
5213 
5214 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
5215 
5216 	/* Do not receive packets that pass inexact filters. */
5217 	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
5218 
5219 	/* Disable Multicast Promicuous Mode. */
5220 	vmolr &= ~IXGBE_VMOLR_MPE;
5221 
5222 	/* Accept broadcasts. */
5223 	vmolr |= IXGBE_VMOLR_BAM;
5224 
5225 	if (tag == 0) {
5226 		/* Accept non-vlan tagged traffic. */
5227 		//vmolr |= IXGBE_VMOLR_AUPE;
5228 
5229 		/* Allow VM to tag outgoing traffic; no default tag. */
5230 		vmvir = 0;
5231 	} else {
5232 		/* Require vlan-tagged traffic. */
5233 		vmolr &= ~IXGBE_VMOLR_AUPE;
5234 
5235 		/* Tag all traffic with provided vlan tag. */
5236 		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
5237 	}
5238 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
5239 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
5240 }
5241 
5242 
5243 static boolean_t
5244 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
5245 {
5246 
5247 	/*
5248 	 * Frame size compatibility between PF and VF is only a problem on
5249 	 * 82599-based cards.  X540 and later support any combination of jumbo
5250 	 * frames on PFs and VFs.
5251 	 */
5252 	if (adapter->hw.mac.type != ixgbe_mac_82599EB)
5253 		return (TRUE);
5254 
5255 	switch (vf->api_ver) {
5256 	case IXGBE_API_VER_1_0:
5257 	case IXGBE_API_VER_UNKNOWN:
5258 		/*
5259 		 * On legacy (1.0 and older) VF versions, we don't support jumbo
5260 		 * frames on either the PF or the VF.
5261 		 */
5262 		if (adapter->max_frame_size > ETHER_MAX_LEN ||
5263 		    vf->max_frame_size > ETHER_MAX_LEN)
5264 		    return (FALSE);
5265 
5266 		return (TRUE);
5267 
5268 		break;
5269 	case IXGBE_API_VER_1_1:
5270 	default:
5271 		/*
5272 		 * 1.1 or later VF versions always work if they aren't using
5273 		 * jumbo frames.
5274 		 */
5275 		if (vf->max_frame_size <= ETHER_MAX_LEN)
5276 			return (TRUE);
5277 
5278 		/*
5279 		 * Jumbo frames only work with VFs if the PF is also using jumbo
5280 		 * frames.
5281 		 */
5282 		if (adapter->max_frame_size <= ETHER_MAX_LEN)
5283 			return (TRUE);
5284 
5285 		return (FALSE);
5286 
5287 	}
5288 }
5289 
5290 
5291 static void
5292 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
5293 {
5294 	ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
5295 
5296 	// XXX clear multicast addresses
5297 
5298 	ixgbe_clear_rar(&adapter->hw, vf->rar_index);
5299 
5300 	vf->api_ver = IXGBE_API_VER_UNKNOWN;
5301 }
5302 
5303 
5304 static void
5305 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
5306 {
5307 	struct ixgbe_hw *hw;
5308 	uint32_t vf_index, vfte;
5309 
5310 	hw = &adapter->hw;
5311 
5312 	vf_index = IXGBE_VF_INDEX(vf->pool);
5313 	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
5314 	vfte |= IXGBE_VF_BIT(vf->pool);
5315 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
5316 }
5317 
5318 
5319 static void
5320 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
5321 {
5322 	struct ixgbe_hw *hw;
5323 	uint32_t vf_index, vfre;
5324 
5325 	hw = &adapter->hw;
5326 
5327 	vf_index = IXGBE_VF_INDEX(vf->pool);
5328 	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
5329 	if (ixgbe_vf_frame_size_compatible(adapter, vf))
5330 		vfre |= IXGBE_VF_BIT(vf->pool);
5331 	else
5332 		vfre &= ~IXGBE_VF_BIT(vf->pool);
5333 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
5334 }
5335 
5336 
5337 static void
5338 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5339 {
5340 	struct ixgbe_hw *hw;
5341 	uint32_t ack;
5342 	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
5343 
5344 	hw = &adapter->hw;
5345 
5346 	ixgbe_process_vf_reset(adapter, vf);
5347 
5348 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5349 		ixgbe_set_rar(&adapter->hw, vf->rar_index,
5350 		    vf->ether_addr, vf->pool, TRUE);
5351 		ack = IXGBE_VT_MSGTYPE_ACK;
5352 	} else
5353 		ack = IXGBE_VT_MSGTYPE_NACK;
5354 
5355 	ixgbe_vf_enable_transmit(adapter, vf);
5356 	ixgbe_vf_enable_receive(adapter, vf);
5357 
5358 	vf->flags |= IXGBE_VF_CTS;
5359 
5360 	resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
5361 	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
5362 	resp[3] = hw->mac.mc_filter_type;
5363 	ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
5364 }
5365 
5366 
5367 static void
5368 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5369 {
5370 	uint8_t *mac;
5371 
5372 	mac = (uint8_t*)&msg[1];
5373 
5374 	/* Check that the VF has permission to change the MAC address. */
5375 	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
5376 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5377 		return;
5378 	}
5379 
5380 	if (ixgbe_validate_mac_addr(mac) != 0) {
5381 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5382 		return;
5383 	}
5384 
5385 	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5386 
5387 	ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
5388 	    vf->pool, TRUE);
5389 
5390 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
5391 }
5392 
5393 
5394 /*
5395 ** VF multicast addresses are set by using the appropriate bit in
5396 ** 1 of 128 32 bit addresses (4096 possible).
5397 */
5398 static void
5399 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
5400 {
5401 	u16	*list = (u16*)&msg[1];
5402 	int	entries;
5403 	u32	vmolr, vec_bit, vec_reg, mta_reg;
5404 
5405 	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
5406 	entries = min(entries, IXGBE_MAX_VF_MC);
5407 
5408 	vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
5409 
5410 	vf->num_mc_hashes = entries;
5411 
5412 	/* Set the appropriate MTA bit */
5413 	for (int i = 0; i < entries; i++) {
5414 		vf->mc_hash[i] = list[i];
5415 		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
5416                 vec_bit = vf->mc_hash[i] & 0x1F;
5417                 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
5418                 mta_reg |= (1 << vec_bit);
5419                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
5420         }
5421 
5422 	vmolr |= IXGBE_VMOLR_ROMPE;
5423 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
5424 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
5425 	return;
5426 }
5427 
5428 
5429 static void
5430 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5431 {
5432 	struct ixgbe_hw *hw;
5433 	int enable;
5434 	uint16_t tag;
5435 
5436 	hw = &adapter->hw;
5437 	enable = IXGBE_VT_MSGINFO(msg[0]);
5438 	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
5439 
5440 	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
5441 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5442 		return;
5443 	}
5444 
5445 	/* It is illegal to enable vlan tag 0. */
5446 	if (tag == 0 && enable != 0){
5447 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5448 		return;
5449 	}
5450 
5451 	ixgbe_set_vfta(hw, tag, vf->pool, enable);
5452 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
5453 }
5454 
5455 
5456 static void
5457 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5458 {
5459 	struct ixgbe_hw *hw;
5460 	uint32_t vf_max_size, pf_max_size, mhadd;
5461 
5462 	hw = &adapter->hw;
5463 	vf_max_size = msg[1];
5464 
5465 	if (vf_max_size < ETHER_CRC_LEN) {
5466 		/* We intentionally ACK invalid LPE requests. */
5467 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
5468 		return;
5469 	}
5470 
5471 	vf_max_size -= ETHER_CRC_LEN;
5472 
5473 	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
5474 		/* We intentionally ACK invalid LPE requests. */
5475 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
5476 		return;
5477 	}
5478 
5479 	vf->max_frame_size = vf_max_size;
5480 	ixgbe_update_max_frame(adapter, vf->max_frame_size);
5481 
5482 	/*
5483 	 * We might have to disable reception to this VF if the frame size is
5484 	 * not compatible with the config on the PF.
5485 	 */
5486 	ixgbe_vf_enable_receive(adapter, vf);
5487 
5488 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
5489 	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
5490 
5491 	if (pf_max_size < adapter->max_frame_size) {
5492 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
5493 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
5494 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
5495 	}
5496 
5497 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
5498 }
5499 
5500 
5501 static void
5502 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
5503     uint32_t *msg)
5504 {
5505 	//XXX implement this
5506 	ixgbe_send_vf_nack(adapter, vf, msg[0]);
5507 }
5508 
5509 
5510 static void
5511 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
5512     uint32_t *msg)
5513 {
5514 
5515 	switch (msg[1]) {
5516 	case IXGBE_API_VER_1_0:
5517 	case IXGBE_API_VER_1_1:
5518 		vf->api_ver = msg[1];
5519 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
5520 		break;
5521 	default:
5522 		vf->api_ver = IXGBE_API_VER_UNKNOWN;
5523 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5524 		break;
5525 	}
5526 }
5527 
5528 
5529 static void
5530 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf,
5531     uint32_t *msg)
5532 {
5533 	struct ixgbe_hw *hw;
5534 	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
5535 	int num_queues;
5536 
5537 	hw = &adapter->hw;
5538 
5539 	/* GET_QUEUES is not supported on pre-1.1 APIs. */
5540 	switch (msg[0]) {
5541 	case IXGBE_API_VER_1_0:
5542 	case IXGBE_API_VER_UNKNOWN:
5543 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5544 		return;
5545 	}
5546 
5547 	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
5548 	    IXGBE_VT_MSGTYPE_CTS;
5549 
5550 	num_queues = ixgbe_vf_queues(ixgbe_get_iov_mode(adapter));
5551 	resp[IXGBE_VF_TX_QUEUES] = num_queues;
5552 	resp[IXGBE_VF_RX_QUEUES] = num_queues;
5553 	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
5554 	resp[IXGBE_VF_DEF_QUEUE] = 0;
5555 
5556 	ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
5557 }
5558 
5559 
5560 static void
5561 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
5562 {
5563 	struct ixgbe_hw *hw;
5564 	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
5565 	int error;
5566 
5567 	hw = &adapter->hw;
5568 
5569 	error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
5570 
5571 	if (error != 0)
5572 		return;
5573 
5574 	CTR3(KTR_MALLOC, "%s: received msg %x from %d",
5575 	    adapter->ifp->if_xname, msg[0], vf->pool);
5576 	if (msg[0] == IXGBE_VF_RESET) {
5577 		ixgbe_vf_reset_msg(adapter, vf, msg);
5578 		return;
5579 	}
5580 
5581 	if (!(vf->flags & IXGBE_VF_CTS)) {
5582 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5583 		return;
5584 	}
5585 
5586 	switch (msg[0] & IXGBE_VT_MSG_MASK) {
5587 	case IXGBE_VF_SET_MAC_ADDR:
5588 		ixgbe_vf_set_mac(adapter, vf, msg);
5589 		break;
5590 	case IXGBE_VF_SET_MULTICAST:
5591 		ixgbe_vf_set_mc_addr(adapter, vf, msg);
5592 		break;
5593 	case IXGBE_VF_SET_VLAN:
5594 		ixgbe_vf_set_vlan(adapter, vf, msg);
5595 		break;
5596 	case IXGBE_VF_SET_LPE:
5597 		ixgbe_vf_set_lpe(adapter, vf, msg);
5598 		break;
5599 	case IXGBE_VF_SET_MACVLAN:
5600 		ixgbe_vf_set_macvlan(adapter, vf, msg);
5601 		break;
5602 	case IXGBE_VF_API_NEGOTIATE:
5603 		ixgbe_vf_api_negotiate(adapter, vf, msg);
5604 		break;
5605 	case IXGBE_VF_GET_QUEUES:
5606 		ixgbe_vf_get_queues(adapter, vf, msg);
5607 		break;
5608 	default:
5609 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5610 	}
5611 }
5612 
5613 
5614 /*
5615  * Tasklet for handling VF -> PF mailbox messages.
5616  */
5617 static void
5618 ixgbe_handle_mbx(void *context, int pending)
5619 {
5620 	struct adapter *adapter;
5621 	struct ixgbe_hw *hw;
5622 	struct ixgbe_vf *vf;
5623 	int i;
5624 
5625 	adapter = context;
5626 	hw = &adapter->hw;
5627 
5628 	IXGBE_CORE_LOCK(adapter);
5629 	for (i = 0; i < adapter->num_vfs; i++) {
5630 		vf = &adapter->vfs[i];
5631 
5632 		if (vf->flags & IXGBE_VF_ACTIVE) {
5633 			if (ixgbe_check_for_rst(hw, vf->pool) == 0)
5634 				ixgbe_process_vf_reset(adapter, vf);
5635 
5636 			if (ixgbe_check_for_msg(hw, vf->pool) == 0)
5637 				ixgbe_process_vf_msg(adapter, vf);
5638 
5639 			if (ixgbe_check_for_ack(hw, vf->pool) == 0)
5640 				ixgbe_process_vf_ack(adapter, vf);
5641 		}
5642 	}
5643 	IXGBE_CORE_UNLOCK(adapter);
5644 }
5645 
5646 
5647 static int
5648 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
5649 {
5650 	struct adapter *adapter;
5651 	enum ixgbe_iov_mode mode;
5652 
5653 	adapter = device_get_softc(dev);
5654 	adapter->num_vfs = num_vfs;
5655 	mode = ixgbe_get_iov_mode(adapter);
5656 
5657 	if (num_vfs > ixgbe_max_vfs(mode)) {
5658 		adapter->num_vfs = 0;
5659 		return (ENOSPC);
5660 	}
5661 
5662 	IXGBE_CORE_LOCK(adapter);
5663 
5664 	adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE,
5665 	    M_NOWAIT | M_ZERO);
5666 
5667 	if (adapter->vfs == NULL) {
5668 		adapter->num_vfs = 0;
5669 		IXGBE_CORE_UNLOCK(adapter);
5670 		return (ENOMEM);
5671 	}
5672 
5673 	ixgbe_init_locked(adapter);
5674 
5675 	IXGBE_CORE_UNLOCK(adapter);
5676 
5677 	return (0);
5678 }
5679 
5680 
5681 static void
5682 ixgbe_uninit_iov(device_t dev)
5683 {
5684 	struct ixgbe_hw *hw;
5685 	struct adapter *adapter;
5686 	uint32_t pf_reg, vf_reg;
5687 
5688 	adapter = device_get_softc(dev);
5689 	hw = &adapter->hw;
5690 
5691 	IXGBE_CORE_LOCK(adapter);
5692 
5693 	/* Enable rx/tx for the PF and disable it for all VFs. */
5694 	pf_reg = IXGBE_VF_INDEX(adapter->pool);
5695 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg),
5696 	    IXGBE_VF_BIT(adapter->pool));
5697 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg),
5698 	    IXGBE_VF_BIT(adapter->pool));
5699 
5700 	if (pf_reg == 0)
5701 		vf_reg = 1;
5702 	else
5703 		vf_reg = 0;
5704 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
5705 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
5706 
5707 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
5708 
5709 	free(adapter->vfs, M_IXGBE);
5710 	adapter->vfs = NULL;
5711 	adapter->num_vfs = 0;
5712 
5713 	IXGBE_CORE_UNLOCK(adapter);
5714 }
5715 
5716 
5717 static void
5718 ixgbe_initialize_iov(struct adapter *adapter)
5719 {
5720 	struct ixgbe_hw *hw = &adapter->hw;
5721 	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
5722 	enum ixgbe_iov_mode mode;
5723 	int i;
5724 
5725 	mode = ixgbe_get_iov_mode(adapter);
5726 	if (mode == IXGBE_NO_VM)
5727 		return;
5728 
5729 	IXGBE_CORE_LOCK_ASSERT(adapter);
5730 
5731 	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
5732 	mrqc &= ~IXGBE_MRQC_MRQE_MASK;
5733 
5734 	switch (mode) {
5735 	case IXGBE_64_VM:
5736 		mrqc |= IXGBE_MRQC_VMDQRSS64EN;
5737 		break;
5738 	case IXGBE_32_VM:
5739 		mrqc |= IXGBE_MRQC_VMDQRSS32EN;
5740 		break;
5741 	default:
5742 		panic("Unexpected SR-IOV mode %d", mode);
5743 	}
5744 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
5745 
5746 	mtqc = IXGBE_MTQC_VT_ENA;
5747 	switch (mode) {
5748 	case IXGBE_64_VM:
5749 		mtqc |= IXGBE_MTQC_64VF;
5750 		break;
5751 	case IXGBE_32_VM:
5752 		mtqc |= IXGBE_MTQC_32VF;
5753 		break;
5754 	default:
5755 		panic("Unexpected SR-IOV mode %d", mode);
5756 	}
5757 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
5758 
5759 
5760 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
5761 	gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
5762 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
5763 	switch (mode) {
5764 	case IXGBE_64_VM:
5765 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
5766 		break;
5767 	case IXGBE_32_VM:
5768 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
5769 		break;
5770 	default:
5771 		panic("Unexpected SR-IOV mode %d", mode);
5772 	}
5773 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
5774 
5775 
5776 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5777 	gcr_ext &= ~IXGBE_GPIE_VTMODE_MASK;
5778 	switch (mode) {
5779 	case IXGBE_64_VM:
5780 		gpie |= IXGBE_GPIE_VTMODE_64;
5781 		break;
5782 	case IXGBE_32_VM:
5783 		gpie |= IXGBE_GPIE_VTMODE_32;
5784 		break;
5785 	default:
5786 		panic("Unexpected SR-IOV mode %d", mode);
5787 	}
5788 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5789 
5790 	/* Enable rx/tx for the PF. */
5791 	vf_reg = IXGBE_VF_INDEX(adapter->pool);
5792 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg),
5793 	    IXGBE_VF_BIT(adapter->pool));
5794 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg),
5795 	    IXGBE_VF_BIT(adapter->pool));
5796 
5797 	/* Allow VM-to-VM communication. */
5798 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
5799 
5800 	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
5801 	vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
5802 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
5803 
5804 	for (i = 0; i < adapter->num_vfs; i++)
5805 		ixgbe_init_vf(adapter, &adapter->vfs[i]);
5806 }
5807 
5808 
5809 /*
5810 ** Check the max frame setting of all active VF's
5811 */
5812 static void
5813 ixgbe_recalculate_max_frame(struct adapter *adapter)
5814 {
5815 	struct ixgbe_vf *vf;
5816 
5817 	IXGBE_CORE_LOCK_ASSERT(adapter);
5818 
5819 	for (int i = 0; i < adapter->num_vfs; i++) {
5820 		vf = &adapter->vfs[i];
5821 		if (vf->flags & IXGBE_VF_ACTIVE)
5822 			ixgbe_update_max_frame(adapter, vf->max_frame_size);
5823 	}
5824 }
5825 
5826 
5827 static void
5828 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
5829 {
5830 	struct ixgbe_hw *hw;
5831 	uint32_t vf_index, pfmbimr;
5832 
5833 	IXGBE_CORE_LOCK_ASSERT(adapter);
5834 
5835 	hw = &adapter->hw;
5836 
5837 	if (!(vf->flags & IXGBE_VF_ACTIVE))
5838 		return;
5839 
5840 	vf_index = IXGBE_VF_INDEX(vf->pool);
5841 	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
5842 	pfmbimr |= IXGBE_VF_BIT(vf->pool);
5843 	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
5844 
5845 	ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
5846 
5847 	// XXX multicast addresses
5848 
5849 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5850 		ixgbe_set_rar(&adapter->hw, vf->rar_index,
5851 		    vf->ether_addr, vf->pool, TRUE);
5852 	}
5853 
5854 	ixgbe_vf_enable_transmit(adapter, vf);
5855 	ixgbe_vf_enable_receive(adapter, vf);
5856 
5857 	ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
5858 }
5859 
5860 static int
5861 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
5862 {
5863 	struct adapter *adapter;
5864 	struct ixgbe_vf *vf;
5865 	const void *mac;
5866 
5867 	adapter = device_get_softc(dev);
5868 
5869 	KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
5870 	    vfnum, adapter->num_vfs));
5871 
5872 	IXGBE_CORE_LOCK(adapter);
5873 	vf = &adapter->vfs[vfnum];
5874 	vf->pool= vfnum;
5875 
5876 	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
5877 	vf->rar_index = vfnum + 1;
5878 	vf->default_vlan = 0;
5879 	vf->max_frame_size = ETHER_MAX_LEN;
5880 	ixgbe_update_max_frame(adapter, vf->max_frame_size);
5881 
5882 	if (nvlist_exists_binary(config, "mac-addr")) {
5883 		mac = nvlist_get_binary(config, "mac-addr", NULL);
5884 		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5885 		if (nvlist_get_bool(config, "allow-set-mac"))
5886 			vf->flags |= IXGBE_VF_CAP_MAC;
5887 	} else
5888 		/*
5889 		 * If the administrator has not specified a MAC address then
5890 		 * we must allow the VF to choose one.
5891 		 */
5892 		vf->flags |= IXGBE_VF_CAP_MAC;
5893 
5894 	vf->flags = IXGBE_VF_ACTIVE;
5895 
5896 	ixgbe_init_vf(adapter, vf);
5897 	IXGBE_CORE_UNLOCK(adapter);
5898 
5899 	return (0);
5900 }
5901 #endif /* PCI_IOV */
5902 
5903