xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision abac203368a6069d3d557a71a60a843707694d65)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_rss.h"
40 #endif
41 
42 #include "ixgbe.h"
43 
44 #ifdef	RSS
45 #include <net/rss_config.h>
46 #include <netinet/in_rss.h>
47 #endif
48 
49 /*********************************************************************
50  *  Driver version
51  *********************************************************************/
52 char ixgbe_driver_version[] = "3.1.13-k";
53 
54 
55 /*********************************************************************
56  *  PCI Device ID Table
57  *
58  *  Used by probe to select devices to load on
59  *  Last field stores an index into ixgbe_strings
60  *  Last entry must be all 0s
61  *
62  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
63  *********************************************************************/
64 
65 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
66 {
67 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
68 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
69 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
70 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
71 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
72 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
73 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
74 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
75 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
76 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
77 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
78 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
79 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
80 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
81 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
82 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
83 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
84 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
85 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
86 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
87 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
88 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
89 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
90 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
91 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
92 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
93 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
94 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
95 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
99 	/* required last entry */
100 	{0, 0, 0, 0, 0}
101 };
102 
103 /*********************************************************************
104  *  Table of branding strings
105  *********************************************************************/
106 
107 static char    *ixgbe_strings[] = {
108 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
109 };
110 
111 /*********************************************************************
112  *  Function prototypes
113  *********************************************************************/
114 static int      ixgbe_probe(device_t);
115 static int      ixgbe_attach(device_t);
116 static int      ixgbe_detach(device_t);
117 static int      ixgbe_shutdown(device_t);
118 static int	ixgbe_suspend(device_t);
119 static int	ixgbe_resume(device_t);
120 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
121 static void	ixgbe_init(void *);
122 static void	ixgbe_init_locked(struct adapter *);
123 static void     ixgbe_stop(void *);
124 #if __FreeBSD_version >= 1100036
125 static uint64_t	ixgbe_get_counter(struct ifnet *, ift_counter);
126 #endif
127 static void	ixgbe_add_media_types(struct adapter *);
128 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
129 static int      ixgbe_media_change(struct ifnet *);
130 static void     ixgbe_identify_hardware(struct adapter *);
131 static int      ixgbe_allocate_pci_resources(struct adapter *);
132 static void	ixgbe_get_slot_info(struct adapter *);
133 static int      ixgbe_allocate_msix(struct adapter *);
134 static int      ixgbe_allocate_legacy(struct adapter *);
135 static int	ixgbe_setup_msix(struct adapter *);
136 static void	ixgbe_free_pci_resources(struct adapter *);
137 static void	ixgbe_local_timer(void *);
138 static int	ixgbe_setup_interface(device_t, struct adapter *);
139 static void	ixgbe_config_gpie(struct adapter *);
140 static void	ixgbe_config_dmac(struct adapter *);
141 static void	ixgbe_config_delay_values(struct adapter *);
142 static void	ixgbe_config_link(struct adapter *);
143 static void	ixgbe_check_wol_support(struct adapter *);
144 static int	ixgbe_setup_low_power_mode(struct adapter *);
145 static void	ixgbe_rearm_queues(struct adapter *, u64);
146 
147 static void     ixgbe_initialize_transmit_units(struct adapter *);
148 static void     ixgbe_initialize_receive_units(struct adapter *);
149 static void	ixgbe_enable_rx_drop(struct adapter *);
150 static void	ixgbe_disable_rx_drop(struct adapter *);
151 static void	ixgbe_initialize_rss_mapping(struct adapter *);
152 
153 static void     ixgbe_enable_intr(struct adapter *);
154 static void     ixgbe_disable_intr(struct adapter *);
155 static void     ixgbe_update_stats_counters(struct adapter *);
156 static void     ixgbe_set_promisc(struct adapter *);
157 static void     ixgbe_set_multi(struct adapter *);
158 static void     ixgbe_update_link_status(struct adapter *);
159 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
160 static void	ixgbe_configure_ivars(struct adapter *);
161 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
162 
163 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
164 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
165 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
166 
167 static void	ixgbe_add_device_sysctls(struct adapter *);
168 static void     ixgbe_add_hw_stats(struct adapter *);
169 
170 /* Sysctl handlers */
171 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
172 		     const char *, int *, int);
173 static int	ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
174 static int	ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
175 static int	ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
176 static int	ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
177 static int	ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
178 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
179 #ifdef IXGBE_DEBUG
180 static int	ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
181 static int	ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
182 #endif
183 static int	ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
184 static int	ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
185 static int	ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
186 static int	ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
187 static int	ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
188 static int	ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
189 static int	ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS);
190 
191 /* Support for pluggable optic modules */
192 static bool	ixgbe_sfp_probe(struct adapter *);
193 static void	ixgbe_setup_optics(struct adapter *);
194 
195 /* Legacy (single vector interrupt handler */
196 static void	ixgbe_legacy_irq(void *);
197 
198 /* The MSI/X Interrupt handlers */
199 static void	ixgbe_msix_que(void *);
200 static void	ixgbe_msix_link(void *);
201 
202 /* Deferred interrupt tasklets */
203 static void	ixgbe_handle_que(void *, int);
204 static void	ixgbe_handle_link(void *, int);
205 static void	ixgbe_handle_msf(void *, int);
206 static void	ixgbe_handle_mod(void *, int);
207 static void	ixgbe_handle_phy(void *, int);
208 
209 #ifdef IXGBE_FDIR
210 static void	ixgbe_reinit_fdir(void *, int);
211 #endif
212 
213 #ifdef PCI_IOV
214 static void	ixgbe_ping_all_vfs(struct adapter *);
215 static void	ixgbe_handle_mbx(void *, int);
216 static int	ixgbe_init_iov(device_t, u16, const nvlist_t *);
217 static void	ixgbe_uninit_iov(device_t);
218 static int	ixgbe_add_vf(device_t, u16, const nvlist_t *);
219 static void	ixgbe_initialize_iov(struct adapter *);
220 static void	ixgbe_recalculate_max_frame(struct adapter *);
221 static void	ixgbe_init_vf(struct adapter *, struct ixgbe_vf *);
222 #endif /* PCI_IOV */
223 
224 
225 /*********************************************************************
226  *  FreeBSD Device Interface Entry Points
227  *********************************************************************/
228 
229 static device_method_t ix_methods[] = {
230 	/* Device interface */
231 	DEVMETHOD(device_probe, ixgbe_probe),
232 	DEVMETHOD(device_attach, ixgbe_attach),
233 	DEVMETHOD(device_detach, ixgbe_detach),
234 	DEVMETHOD(device_shutdown, ixgbe_shutdown),
235 	DEVMETHOD(device_suspend, ixgbe_suspend),
236 	DEVMETHOD(device_resume, ixgbe_resume),
237 #ifdef PCI_IOV
238 	DEVMETHOD(pci_iov_init, ixgbe_init_iov),
239 	DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
240 	DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
241 #endif /* PCI_IOV */
242 	DEVMETHOD_END
243 };
244 
245 static driver_t ix_driver = {
246 	"ix", ix_methods, sizeof(struct adapter),
247 };
248 
249 devclass_t ix_devclass;
250 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
251 
252 MODULE_DEPEND(ix, pci, 1, 1, 1);
253 MODULE_DEPEND(ix, ether, 1, 1, 1);
254 #ifdef DEV_NETMAP
255 MODULE_DEPEND(ix, netmap, 1, 1, 1);
256 #endif /* DEV_NETMAP */
257 
258 /*
259 ** TUNEABLE PARAMETERS:
260 */
261 
262 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
263 		   "IXGBE driver parameters");
264 
265 /*
266 ** AIM: Adaptive Interrupt Moderation
267 ** which means that the interrupt rate
268 ** is varied over time based on the
269 ** traffic for that interrupt vector
270 */
271 static int ixgbe_enable_aim = TRUE;
272 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
273     "Enable adaptive interrupt moderation");
274 
275 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
276 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
277     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
278 
279 /* How many packets rxeof tries to clean at a time */
280 static int ixgbe_rx_process_limit = 256;
281 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
282     &ixgbe_rx_process_limit, 0,
283     "Maximum number of received packets to process at a time,"
284     "-1 means unlimited");
285 
286 /* How many packets txeof tries to clean at a time */
287 static int ixgbe_tx_process_limit = 256;
288 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
289     &ixgbe_tx_process_limit, 0,
290     "Maximum number of sent packets to process at a time,"
291     "-1 means unlimited");
292 
293 /*
294 ** Smart speed setting, default to on
295 ** this only works as a compile option
296 ** right now as its during attach, set
297 ** this to 'ixgbe_smart_speed_off' to
298 ** disable.
299 */
300 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
301 
302 /*
303  * MSIX should be the default for best performance,
304  * but this allows it to be forced off for testing.
305  */
306 static int ixgbe_enable_msix = 1;
307 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
308     "Enable MSI-X interrupts");
309 
310 /*
311  * Number of Queues, can be set to 0,
312  * it then autoconfigures based on the
313  * number of cpus with a max of 8. This
314  * can be overriden manually here.
315  */
316 static int ixgbe_num_queues = 0;
317 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
318     "Number of queues to configure, 0 indicates autoconfigure");
319 
320 /*
321 ** Number of TX descriptors per ring,
322 ** setting higher than RX as this seems
323 ** the better performing choice.
324 */
325 static int ixgbe_txd = PERFORM_TXD;
326 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
327     "Number of transmit descriptors per queue");
328 
329 /* Number of RX descriptors per ring */
330 static int ixgbe_rxd = PERFORM_RXD;
331 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
332     "Number of receive descriptors per queue");
333 
334 /*
335 ** Defining this on will allow the use
336 ** of unsupported SFP+ modules, note that
337 ** doing so you are on your own :)
338 */
339 static int allow_unsupported_sfp = FALSE;
340 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
341 
342 /* Keep running tab on them for sanity check */
343 static int ixgbe_total_ports;
344 
345 #ifdef IXGBE_FDIR
346 /*
347 ** Flow Director actually 'steals'
348 ** part of the packet buffer as its
349 ** filter pool, this variable controls
350 ** how much it uses:
351 **  0 = 64K, 1 = 128K, 2 = 256K
352 */
353 static int fdir_pballoc = 1;
354 #endif
355 
356 #ifdef DEV_NETMAP
357 /*
358  * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
359  * be a reference on how to implement netmap support in a driver.
360  * Additional comments are in ixgbe_netmap.h .
361  *
362  * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
363  * that extend the standard driver.
364  */
365 #include <dev/netmap/ixgbe_netmap.h>
366 #endif /* DEV_NETMAP */
367 
368 static MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
369 
370 /*********************************************************************
371  *  Device identification routine
372  *
373  *  ixgbe_probe determines if the driver should be loaded on
374  *  adapter based on PCI vendor/device id of the adapter.
375  *
376  *  return BUS_PROBE_DEFAULT on success, positive on failure
377  *********************************************************************/
378 
379 static int
380 ixgbe_probe(device_t dev)
381 {
382 	ixgbe_vendor_info_t *ent;
383 
384 	u16	pci_vendor_id = 0;
385 	u16	pci_device_id = 0;
386 	u16	pci_subvendor_id = 0;
387 	u16	pci_subdevice_id = 0;
388 	char	adapter_name[256];
389 
390 	INIT_DEBUGOUT("ixgbe_probe: begin");
391 
392 	pci_vendor_id = pci_get_vendor(dev);
393 	if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
394 		return (ENXIO);
395 
396 	pci_device_id = pci_get_device(dev);
397 	pci_subvendor_id = pci_get_subvendor(dev);
398 	pci_subdevice_id = pci_get_subdevice(dev);
399 
400 	ent = ixgbe_vendor_info_array;
401 	while (ent->vendor_id != 0) {
402 		if ((pci_vendor_id == ent->vendor_id) &&
403 		    (pci_device_id == ent->device_id) &&
404 
405 		    ((pci_subvendor_id == ent->subvendor_id) ||
406 		     (ent->subvendor_id == 0)) &&
407 
408 		    ((pci_subdevice_id == ent->subdevice_id) ||
409 		     (ent->subdevice_id == 0))) {
410 			sprintf(adapter_name, "%s, Version - %s",
411 				ixgbe_strings[ent->index],
412 				ixgbe_driver_version);
413 			device_set_desc_copy(dev, adapter_name);
414 			++ixgbe_total_ports;
415 			return (BUS_PROBE_DEFAULT);
416 		}
417 		ent++;
418 	}
419 	return (ENXIO);
420 }
421 
422 /*********************************************************************
423  *  Device initialization routine
424  *
425  *  The attach entry point is called when the driver is being loaded.
426  *  This routine identifies the type of hardware, allocates all resources
427  *  and initializes the hardware.
428  *
429  *  return 0 on success, positive on failure
430  *********************************************************************/
431 
432 static int
433 ixgbe_attach(device_t dev)
434 {
435 	struct adapter *adapter;
436 	struct ixgbe_hw *hw;
437 	int             error = 0;
438 	u16		csum;
439 	u32		ctrl_ext;
440 
441 	INIT_DEBUGOUT("ixgbe_attach: begin");
442 
443 	/* Allocate, clear, and link in our adapter structure */
444 	adapter = device_get_softc(dev);
445 	adapter->dev = dev;
446 	hw = &adapter->hw;
447 
448 #ifdef DEV_NETMAP
449 	adapter->init_locked = ixgbe_init_locked;
450 	adapter->stop_locked = ixgbe_stop;
451 #endif
452 
453 	/* Core Lock Init*/
454 	IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
455 
456 	/* Set up the timer callout */
457 	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
458 
459 	/* Determine hardware revision */
460 	ixgbe_identify_hardware(adapter);
461 
462 	/* Do base PCI setup - map BAR0 */
463 	if (ixgbe_allocate_pci_resources(adapter)) {
464 		device_printf(dev, "Allocation of PCI resources failed\n");
465 		error = ENXIO;
466 		goto err_out;
467 	}
468 
469 	/* Sysctls for limiting the amount of work done in the taskqueues */
470 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
471 	    "max number of rx packets to process",
472 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
473 
474 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
475 	    "max number of tx packets to process",
476 	&adapter->tx_process_limit, ixgbe_tx_process_limit);
477 
478 	/* Do descriptor calc and sanity checks */
479 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
480 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
481 		device_printf(dev, "TXD config issue, using default!\n");
482 		adapter->num_tx_desc = DEFAULT_TXD;
483 	} else
484 		adapter->num_tx_desc = ixgbe_txd;
485 
486 	/*
487 	** With many RX rings it is easy to exceed the
488 	** system mbuf allocation. Tuning nmbclusters
489 	** can alleviate this.
490 	*/
491 	if (nmbclusters > 0) {
492 		int s;
493 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
494 		if (s > nmbclusters) {
495 			device_printf(dev, "RX Descriptors exceed "
496 			    "system mbuf max, using default instead!\n");
497 			ixgbe_rxd = DEFAULT_RXD;
498 		}
499 	}
500 
501 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
502 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
503 		device_printf(dev, "RXD config issue, using default!\n");
504 		adapter->num_rx_desc = DEFAULT_RXD;
505 	} else
506 		adapter->num_rx_desc = ixgbe_rxd;
507 
508 	/* Allocate our TX/RX Queues */
509 	if (ixgbe_allocate_queues(adapter)) {
510 		error = ENOMEM;
511 		goto err_out;
512 	}
513 
514 	/* Allocate multicast array memory. */
515 	adapter->mta = malloc(sizeof(*adapter->mta) *
516 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
517 	if (adapter->mta == NULL) {
518 		device_printf(dev, "Can not allocate multicast setup array\n");
519 		error = ENOMEM;
520 		goto err_late;
521 	}
522 
523 	/* Initialize the shared code */
524 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
525 	error = ixgbe_init_shared_code(hw);
526 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
527 		/*
528 		** No optics in this port, set up
529 		** so the timer routine will probe
530 		** for later insertion.
531 		*/
532 		adapter->sfp_probe = TRUE;
533 		error = 0;
534 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
535 		device_printf(dev, "Unsupported SFP+ module detected!\n");
536 		error = EIO;
537 		goto err_late;
538 	} else if (error) {
539 		device_printf(dev, "Unable to initialize the shared code\n");
540 		error = EIO;
541 		goto err_late;
542 	}
543 
544 	/* Make sure we have a good EEPROM before we read from it */
545 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
546 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
547 		error = EIO;
548 		goto err_late;
549 	}
550 
551 	error = ixgbe_init_hw(hw);
552 	switch (error) {
553 	case IXGBE_ERR_EEPROM_VERSION:
554 		device_printf(dev, "This device is a pre-production adapter/"
555 		    "LOM.  Please be aware there may be issues associated "
556 		    "with your hardware.\nIf you are experiencing problems "
557 		    "please contact your Intel or hardware representative "
558 		    "who provided you with this hardware.\n");
559 		break;
560 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
561 		device_printf(dev, "Unsupported SFP+ Module\n");
562 		error = EIO;
563 		goto err_late;
564 	case IXGBE_ERR_SFP_NOT_PRESENT:
565 		device_printf(dev, "No SFP+ Module found\n");
566 		/* falls thru */
567 	default:
568 		break;
569 	}
570 
571 	if ((adapter->msix > 1) && (ixgbe_enable_msix))
572 		error = ixgbe_allocate_msix(adapter);
573 	else
574 		error = ixgbe_allocate_legacy(adapter);
575 	if (error)
576 		goto err_late;
577 
578 	/* Setup OS specific network interface */
579 	if (ixgbe_setup_interface(dev, adapter) != 0)
580 		goto err_late;
581 
582 	/* Initialize statistics */
583 	ixgbe_update_stats_counters(adapter);
584 
585 	/* Register for VLAN events */
586 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
587 	    ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
588 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
589 	    ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
590 
591         /* Check PCIE slot type/speed/width */
592 	ixgbe_get_slot_info(adapter);
593 
594 	/* Set an initial default flow control & dmac value */
595 	adapter->fc = ixgbe_fc_full;
596 	adapter->dmac = 0;
597 	adapter->eee_enabled = 0;
598 
599 #ifdef PCI_IOV
600 	if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) {
601 		nvlist_t *pf_schema, *vf_schema;
602 
603 		hw->mbx.ops.init_params(hw);
604 		pf_schema = pci_iov_schema_alloc_node();
605 		vf_schema = pci_iov_schema_alloc_node();
606 		pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
607 		pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
608 		    IOV_SCHEMA_HASDEFAULT, TRUE);
609 		pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
610 		    IOV_SCHEMA_HASDEFAULT, FALSE);
611 		pci_iov_schema_add_bool(vf_schema, "allow-promisc",
612 		    IOV_SCHEMA_HASDEFAULT, FALSE);
613 		error = pci_iov_attach(dev, pf_schema, vf_schema);
614 		if (error != 0) {
615 			device_printf(dev,
616 			    "Error %d setting up SR-IOV\n", error);
617 		}
618 	}
619 #endif /* PCI_IOV */
620 
621 	/* Check for certain supported features */
622 	ixgbe_check_wol_support(adapter);
623 
624 	/* Add sysctls */
625 	ixgbe_add_device_sysctls(adapter);
626 	ixgbe_add_hw_stats(adapter);
627 
628 	/* let hardware know driver is loaded */
629 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
630 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
631 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
632 
633 #ifdef DEV_NETMAP
634 	ixgbe_netmap_attach(adapter);
635 #endif /* DEV_NETMAP */
636 	INIT_DEBUGOUT("ixgbe_attach: end");
637 	return (0);
638 
639 err_late:
640 	ixgbe_free_transmit_structures(adapter);
641 	ixgbe_free_receive_structures(adapter);
642 err_out:
643 	if (adapter->ifp != NULL)
644 		if_free(adapter->ifp);
645 	ixgbe_free_pci_resources(adapter);
646 	free(adapter->mta, M_DEVBUF);
647 	return (error);
648 }
649 
650 /*********************************************************************
651  *  Device removal routine
652  *
653  *  The detach entry point is called when the driver is being removed.
654  *  This routine stops the adapter and deallocates all the resources
655  *  that were allocated for driver operation.
656  *
657  *  return 0 on success, positive on failure
658  *********************************************************************/
659 
660 static int
661 ixgbe_detach(device_t dev)
662 {
663 	struct adapter *adapter = device_get_softc(dev);
664 	struct ix_queue *que = adapter->queues;
665 	struct tx_ring *txr = adapter->tx_rings;
666 	u32	ctrl_ext;
667 
668 	INIT_DEBUGOUT("ixgbe_detach: begin");
669 
670 	/* Make sure VLANS are not using driver */
671 	if (adapter->ifp->if_vlantrunk != NULL) {
672 		device_printf(dev,"Vlan in use, detach first\n");
673 		return (EBUSY);
674 	}
675 
676 #ifdef PCI_IOV
677 	if (pci_iov_detach(dev) != 0) {
678 		device_printf(dev, "SR-IOV in use; detach first.\n");
679 		return (EBUSY);
680 	}
681 #endif /* PCI_IOV */
682 
683 	ether_ifdetach(adapter->ifp);
684 	/* Stop the adapter */
685 	IXGBE_CORE_LOCK(adapter);
686 	ixgbe_setup_low_power_mode(adapter);
687 	IXGBE_CORE_UNLOCK(adapter);
688 
689 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
690 		if (que->tq) {
691 #ifndef IXGBE_LEGACY_TX
692 			taskqueue_drain(que->tq, &txr->txq_task);
693 #endif
694 			taskqueue_drain(que->tq, &que->que_task);
695 			taskqueue_free(que->tq);
696 		}
697 	}
698 
699 	/* Drain the Link queue */
700 	if (adapter->tq) {
701 		taskqueue_drain(adapter->tq, &adapter->link_task);
702 		taskqueue_drain(adapter->tq, &adapter->mod_task);
703 		taskqueue_drain(adapter->tq, &adapter->msf_task);
704 #ifdef PCI_IOV
705 		taskqueue_drain(adapter->tq, &adapter->mbx_task);
706 #endif
707 		taskqueue_drain(adapter->tq, &adapter->phy_task);
708 #ifdef IXGBE_FDIR
709 		taskqueue_drain(adapter->tq, &adapter->fdir_task);
710 #endif
711 		taskqueue_free(adapter->tq);
712 	}
713 
714 	/* let hardware know driver is unloading */
715 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
716 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
717 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
718 
719 	/* Unregister VLAN events */
720 	if (adapter->vlan_attach != NULL)
721 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
722 	if (adapter->vlan_detach != NULL)
723 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
724 
725 	callout_drain(&adapter->timer);
726 #ifdef DEV_NETMAP
727 	netmap_detach(adapter->ifp);
728 #endif /* DEV_NETMAP */
729 	ixgbe_free_pci_resources(adapter);
730 	bus_generic_detach(dev);
731 	if_free(adapter->ifp);
732 
733 	ixgbe_free_transmit_structures(adapter);
734 	ixgbe_free_receive_structures(adapter);
735 	free(adapter->mta, M_DEVBUF);
736 
737 	IXGBE_CORE_LOCK_DESTROY(adapter);
738 	return (0);
739 }
740 
741 /*********************************************************************
742  *
743  *  Shutdown entry point
744  *
745  **********************************************************************/
746 
747 static int
748 ixgbe_shutdown(device_t dev)
749 {
750 	struct adapter *adapter = device_get_softc(dev);
751 	int error = 0;
752 
753 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
754 
755 	IXGBE_CORE_LOCK(adapter);
756 	error = ixgbe_setup_low_power_mode(adapter);
757 	IXGBE_CORE_UNLOCK(adapter);
758 
759 	return (error);
760 }
761 
762 /**
763  * Methods for going from:
764  * D0 -> D3: ixgbe_suspend
765  * D3 -> D0: ixgbe_resume
766  */
767 static int
768 ixgbe_suspend(device_t dev)
769 {
770 	struct adapter *adapter = device_get_softc(dev);
771 	int error = 0;
772 
773 	INIT_DEBUGOUT("ixgbe_suspend: begin");
774 
775 	IXGBE_CORE_LOCK(adapter);
776 
777 	error = ixgbe_setup_low_power_mode(adapter);
778 
779 	IXGBE_CORE_UNLOCK(adapter);
780 
781 	return (error);
782 }
783 
784 static int
785 ixgbe_resume(device_t dev)
786 {
787 	struct adapter *adapter = device_get_softc(dev);
788 	struct ifnet *ifp = adapter->ifp;
789 	struct ixgbe_hw *hw = &adapter->hw;
790 	u32 wus;
791 
792 	INIT_DEBUGOUT("ixgbe_resume: begin");
793 
794 	IXGBE_CORE_LOCK(adapter);
795 
796 	/* Read & clear WUS register */
797 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
798 	if (wus)
799 		device_printf(dev, "Woken up by (WUS): %#010x\n",
800 		    IXGBE_READ_REG(hw, IXGBE_WUS));
801 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
802 	/* And clear WUFC until next low-power transition */
803 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
804 
805 	/*
806 	 * Required after D3->D0 transition;
807 	 * will re-advertise all previous advertised speeds
808 	 */
809 	if (ifp->if_flags & IFF_UP)
810 		ixgbe_init_locked(adapter);
811 
812 	IXGBE_CORE_UNLOCK(adapter);
813 
814 	return (0);
815 }
816 
817 
818 /*********************************************************************
819  *  Ioctl entry point
820  *
821  *  ixgbe_ioctl is called when the user wants to configure the
822  *  interface.
823  *
824  *  return 0 on success, positive on failure
825  **********************************************************************/
826 
827 static int
828 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
829 {
830 	struct adapter	*adapter = ifp->if_softc;
831 	struct ifreq	*ifr = (struct ifreq *) data;
832 #if defined(INET) || defined(INET6)
833 	struct ifaddr *ifa = (struct ifaddr *)data;
834 #endif
835 	int             error = 0;
836 	bool		avoid_reset = FALSE;
837 
838 	switch (command) {
839 
840         case SIOCSIFADDR:
841 #ifdef INET
842 		if (ifa->ifa_addr->sa_family == AF_INET)
843 			avoid_reset = TRUE;
844 #endif
845 #ifdef INET6
846 		if (ifa->ifa_addr->sa_family == AF_INET6)
847 			avoid_reset = TRUE;
848 #endif
849 		/*
850 		** Calling init results in link renegotiation,
851 		** so we avoid doing it when possible.
852 		*/
853 		if (avoid_reset) {
854 			ifp->if_flags |= IFF_UP;
855 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
856 				ixgbe_init(adapter);
857 #ifdef INET
858 			if (!(ifp->if_flags & IFF_NOARP))
859 				arp_ifinit(ifp, ifa);
860 #endif
861 		} else
862 			error = ether_ioctl(ifp, command, data);
863 		break;
864 	case SIOCSIFMTU:
865 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
866 		if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
867 			error = EINVAL;
868 		} else {
869 			IXGBE_CORE_LOCK(adapter);
870 			ifp->if_mtu = ifr->ifr_mtu;
871 			adapter->max_frame_size =
872 				ifp->if_mtu + IXGBE_MTU_HDR;
873 			ixgbe_init_locked(adapter);
874 #ifdef PCI_IOV
875 			ixgbe_recalculate_max_frame(adapter);
876 #endif
877 			IXGBE_CORE_UNLOCK(adapter);
878 		}
879 		break;
880 	case SIOCSIFFLAGS:
881 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
882 		IXGBE_CORE_LOCK(adapter);
883 		if (ifp->if_flags & IFF_UP) {
884 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
885 				if ((ifp->if_flags ^ adapter->if_flags) &
886 				    (IFF_PROMISC | IFF_ALLMULTI)) {
887 					ixgbe_set_promisc(adapter);
888                                 }
889 			} else
890 				ixgbe_init_locked(adapter);
891 		} else
892 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
893 				ixgbe_stop(adapter);
894 		adapter->if_flags = ifp->if_flags;
895 		IXGBE_CORE_UNLOCK(adapter);
896 		break;
897 	case SIOCADDMULTI:
898 	case SIOCDELMULTI:
899 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
900 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
901 			IXGBE_CORE_LOCK(adapter);
902 			ixgbe_disable_intr(adapter);
903 			ixgbe_set_multi(adapter);
904 			ixgbe_enable_intr(adapter);
905 			IXGBE_CORE_UNLOCK(adapter);
906 		}
907 		break;
908 	case SIOCSIFMEDIA:
909 	case SIOCGIFMEDIA:
910 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
911 		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
912 		break;
913 	case SIOCSIFCAP:
914 	{
915 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
916 
917 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
918 		if (!mask)
919 			break;
920 
921 		/* HW cannot turn these on/off separately */
922 		if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
923 			ifp->if_capenable ^= IFCAP_RXCSUM;
924 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
925 		}
926 		if (mask & IFCAP_TXCSUM)
927 			ifp->if_capenable ^= IFCAP_TXCSUM;
928 		if (mask & IFCAP_TXCSUM_IPV6)
929 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
930 		if (mask & IFCAP_TSO4)
931 			ifp->if_capenable ^= IFCAP_TSO4;
932 		if (mask & IFCAP_TSO6)
933 			ifp->if_capenable ^= IFCAP_TSO6;
934 		if (mask & IFCAP_LRO)
935 			ifp->if_capenable ^= IFCAP_LRO;
936 		if (mask & IFCAP_VLAN_HWTAGGING)
937 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
938 		if (mask & IFCAP_VLAN_HWFILTER)
939 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
940 		if (mask & IFCAP_VLAN_HWTSO)
941 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
942 
943 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
944 			IXGBE_CORE_LOCK(adapter);
945 			ixgbe_init_locked(adapter);
946 			IXGBE_CORE_UNLOCK(adapter);
947 		}
948 		VLAN_CAPABILITIES(ifp);
949 		break;
950 	}
951 #if __FreeBSD_version >= 1100036
952 	case SIOCGI2C:
953 	{
954 		struct ixgbe_hw *hw = &adapter->hw;
955 		struct ifi2creq i2c;
956 		int i;
957 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
958 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
959 		if (error != 0)
960 			break;
961 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
962 			error = EINVAL;
963 			break;
964 		}
965 		if (i2c.len > sizeof(i2c.data)) {
966 			error = EINVAL;
967 			break;
968 		}
969 
970 		for (i = 0; i < i2c.len; i++)
971 			hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
972 			    i2c.dev_addr, &i2c.data[i]);
973 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
974 		break;
975 	}
976 #endif
977 	default:
978 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
979 		error = ether_ioctl(ifp, command, data);
980 		break;
981 	}
982 
983 	return (error);
984 }
985 
986 /*
987  * Set the various hardware offload abilities.
988  *
989  * This takes the ifnet's if_capenable flags (e.g. set by the user using
990  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
991  * mbuf offload flags the driver will understand.
992  */
993 static void
994 ixgbe_set_if_hwassist(struct adapter *adapter)
995 {
996 	struct ifnet *ifp = adapter->ifp;
997 
998 	ifp->if_hwassist = 0;
999 #if __FreeBSD_version >= 1000000
1000 	if (ifp->if_capenable & IFCAP_TSO4)
1001 		ifp->if_hwassist |= CSUM_IP_TSO;
1002 	if (ifp->if_capenable & IFCAP_TSO6)
1003 		ifp->if_hwassist |= CSUM_IP6_TSO;
1004 	if (ifp->if_capenable & IFCAP_TXCSUM)
1005 		ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP |
1006 		    CSUM_IP_SCTP);
1007 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1008 		ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP |
1009 		    CSUM_IP6_SCTP);
1010 #else
1011 	if (ifp->if_capenable & IFCAP_TSO)
1012 		ifp->if_hwassist |= CSUM_TSO;
1013 	if (ifp->if_capenable & IFCAP_TXCSUM) {
1014 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1015 		struct ixgbe_hw *hw = &adapter->hw;
1016 		if (hw->mac.type != ixgbe_mac_82598EB)
1017 			ifp->if_hwassist |= CSUM_SCTP;
1018 	}
1019 #endif
1020 }
1021 
1022 /*********************************************************************
1023  *  Init entry point
1024  *
1025  *  This routine is used in two ways. It is used by the stack as
1026  *  init entry point in network interface structure. It is also used
1027  *  by the driver as a hw/sw initialization routine to get to a
1028  *  consistent state.
1029  *
1030  *  return 0 on success, positive on failure
1031  **********************************************************************/
1032 #define IXGBE_MHADD_MFS_SHIFT 16
1033 
1034 static void
1035 ixgbe_init_locked(struct adapter *adapter)
1036 {
1037 	struct ifnet   *ifp = adapter->ifp;
1038 	device_t 	dev = adapter->dev;
1039 	struct ixgbe_hw *hw = &adapter->hw;
1040 	struct tx_ring  *txr;
1041 	struct rx_ring  *rxr;
1042 	u32		txdctl, mhadd;
1043 	u32		rxdctl, rxctrl;
1044 	int err = 0;
1045 #ifdef PCI_IOV
1046 	enum ixgbe_iov_mode mode;
1047 #endif
1048 
1049 	mtx_assert(&adapter->core_mtx, MA_OWNED);
1050 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
1051 
1052 	hw->adapter_stopped = FALSE;
1053 	ixgbe_stop_adapter(hw);
1054         callout_stop(&adapter->timer);
1055 
1056 #ifdef PCI_IOV
1057 	mode = ixgbe_get_iov_mode(adapter);
1058 	adapter->pool = ixgbe_max_vfs(mode);
1059 	/* Queue indices may change with IOV mode */
1060 	for (int i = 0; i < adapter->num_queues; i++) {
1061 		adapter->rx_rings[i].me = ixgbe_pf_que_index(mode, i);
1062 		adapter->tx_rings[i].me = ixgbe_pf_que_index(mode, i);
1063 	}
1064 #endif
1065         /* reprogram the RAR[0] in case user changed it. */
1066 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
1067 
1068 	/* Get the latest mac address, User can use a LAA */
1069 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
1070 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
1071 	hw->addr_ctrl.rar_used_count = 1;
1072 
1073 	/* Set hardware offload abilities from ifnet flags */
1074 	ixgbe_set_if_hwassist(adapter);
1075 
1076 	/* Prepare transmit descriptors and buffers */
1077 	if (ixgbe_setup_transmit_structures(adapter)) {
1078 		device_printf(dev, "Could not setup transmit structures\n");
1079 		ixgbe_stop(adapter);
1080 		return;
1081 	}
1082 
1083 	ixgbe_init_hw(hw);
1084 #ifdef PCI_IOV
1085 	ixgbe_initialize_iov(adapter);
1086 #endif
1087 	ixgbe_initialize_transmit_units(adapter);
1088 
1089 	/* Setup Multicast table */
1090 	ixgbe_set_multi(adapter);
1091 
1092 	/* Determine the correct mbuf pool, based on frame size */
1093 	if (adapter->max_frame_size <= MCLBYTES)
1094 		adapter->rx_mbuf_sz = MCLBYTES;
1095 	else
1096 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
1097 
1098 	/* Prepare receive descriptors and buffers */
1099 	if (ixgbe_setup_receive_structures(adapter)) {
1100 		device_printf(dev, "Could not setup receive structures\n");
1101 		ixgbe_stop(adapter);
1102 		return;
1103 	}
1104 
1105 	/* Configure RX settings */
1106 	ixgbe_initialize_receive_units(adapter);
1107 
1108 	/* Enable SDP & MSIX interrupts based on adapter */
1109 	ixgbe_config_gpie(adapter);
1110 
1111 	/* Set MTU size */
1112 	if (ifp->if_mtu > ETHERMTU) {
1113 		/* aka IXGBE_MAXFRS on 82599 and newer */
1114 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1115 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
1116 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1117 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1118 	}
1119 
1120 	/* Now enable all the queues */
1121 	for (int i = 0; i < adapter->num_queues; i++) {
1122 		txr = &adapter->tx_rings[i];
1123 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
1124 		txdctl |= IXGBE_TXDCTL_ENABLE;
1125 		/* Set WTHRESH to 8, burst writeback */
1126 		txdctl |= (8 << 16);
1127 		/*
1128 		 * When the internal queue falls below PTHRESH (32),
1129 		 * start prefetching as long as there are at least
1130 		 * HTHRESH (1) buffers ready. The values are taken
1131 		 * from the Intel linux driver 3.8.21.
1132 		 * Prefetching enables tx line rate even with 1 queue.
1133 		 */
1134 		txdctl |= (32 << 0) | (1 << 8);
1135 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
1136 	}
1137 
1138 	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
1139 		rxr = &adapter->rx_rings[i];
1140 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1141 		if (hw->mac.type == ixgbe_mac_82598EB) {
1142 			/*
1143 			** PTHRESH = 21
1144 			** HTHRESH = 4
1145 			** WTHRESH = 8
1146 			*/
1147 			rxdctl &= ~0x3FFFFF;
1148 			rxdctl |= 0x080420;
1149 		}
1150 		rxdctl |= IXGBE_RXDCTL_ENABLE;
1151 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
1152 		for (; j < 10; j++) {
1153 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
1154 			    IXGBE_RXDCTL_ENABLE)
1155 				break;
1156 			else
1157 				msec_delay(1);
1158 		}
1159 		wmb();
1160 #ifdef DEV_NETMAP
1161 		/*
1162 		 * In netmap mode, we must preserve the buffers made
1163 		 * available to userspace before the if_init()
1164 		 * (this is true by default on the TX side, because
1165 		 * init makes all buffers available to userspace).
1166 		 *
1167 		 * netmap_reset() and the device specific routines
1168 		 * (e.g. ixgbe_setup_receive_rings()) map these
1169 		 * buffers at the end of the NIC ring, so here we
1170 		 * must set the RDT (tail) register to make sure
1171 		 * they are not overwritten.
1172 		 *
1173 		 * In this driver the NIC ring starts at RDH = 0,
1174 		 * RDT points to the last slot available for reception (?),
1175 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
1176 		 */
1177 		if (ifp->if_capenable & IFCAP_NETMAP) {
1178 			struct netmap_adapter *na = NA(adapter->ifp);
1179 			struct netmap_kring *kring = &na->rx_rings[i];
1180 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1181 
1182 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
1183 		} else
1184 #endif /* DEV_NETMAP */
1185 		IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), adapter->num_rx_desc - 1);
1186 	}
1187 
1188 	/* Enable Receive engine */
1189 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1190 	if (hw->mac.type == ixgbe_mac_82598EB)
1191 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
1192 	rxctrl |= IXGBE_RXCTRL_RXEN;
1193 	ixgbe_enable_rx_dma(hw, rxctrl);
1194 
1195 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1196 
1197 	/* Set up MSI/X routing */
1198 	if (ixgbe_enable_msix)  {
1199 		ixgbe_configure_ivars(adapter);
1200 		/* Set up auto-mask */
1201 		if (hw->mac.type == ixgbe_mac_82598EB)
1202 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1203 		else {
1204 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1205 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1206 		}
1207 	} else {  /* Simple settings for Legacy/MSI */
1208                 ixgbe_set_ivar(adapter, 0, 0, 0);
1209                 ixgbe_set_ivar(adapter, 0, 0, 1);
1210 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1211 	}
1212 
1213 #ifdef IXGBE_FDIR
1214 	/* Init Flow director */
1215 	if (hw->mac.type != ixgbe_mac_82598EB) {
1216 		u32 hdrm = 32 << fdir_pballoc;
1217 
1218 		hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1219 		ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1220 	}
1221 #endif
1222 
1223 	/*
1224 	 * Check on any SFP devices that
1225 	 * need to be kick-started
1226 	 */
1227 	if (hw->phy.type == ixgbe_phy_none) {
1228 		err = hw->phy.ops.identify(hw);
1229 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1230                 	device_printf(dev,
1231 			    "Unsupported SFP+ module type was detected.\n");
1232 			return;
1233         	}
1234 	}
1235 
1236 	/* Set moderation on the Link interrupt */
1237 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1238 
1239 	/* Configure Energy Efficient Ethernet for supported devices */
1240 	if (hw->mac.ops.setup_eee) {
1241 		err = hw->mac.ops.setup_eee(hw, adapter->eee_enabled);
1242 		if (err)
1243 			device_printf(dev, "Error setting up EEE: %d\n", err);
1244 	}
1245 
1246 	/* Config/Enable Link */
1247 	ixgbe_config_link(adapter);
1248 
1249 	/* Hardware Packet Buffer & Flow Control setup */
1250 	ixgbe_config_delay_values(adapter);
1251 
1252 	/* Initialize the FC settings */
1253 	ixgbe_start_hw(hw);
1254 
1255 	/* Set up VLAN support and filter */
1256 	ixgbe_setup_vlan_hw_support(adapter);
1257 
1258 	/* Setup DMA Coalescing */
1259 	ixgbe_config_dmac(adapter);
1260 
1261 	/* And now turn on interrupts */
1262 	ixgbe_enable_intr(adapter);
1263 
1264 #ifdef PCI_IOV
1265 	/* Enable the use of the MBX by the VF's */
1266 	{
1267 		u32 reg = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1268 		reg |= IXGBE_CTRL_EXT_PFRSTD;
1269 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, reg);
1270 	}
1271 #endif
1272 
1273 	/* Now inform the stack we're ready */
1274 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1275 
1276 	return;
1277 }
1278 
1279 static void
1280 ixgbe_init(void *arg)
1281 {
1282 	struct adapter *adapter = arg;
1283 
1284 	IXGBE_CORE_LOCK(adapter);
1285 	ixgbe_init_locked(adapter);
1286 	IXGBE_CORE_UNLOCK(adapter);
1287 	return;
1288 }
1289 
1290 static void
1291 ixgbe_config_gpie(struct adapter *adapter)
1292 {
1293 	struct ixgbe_hw *hw = &adapter->hw;
1294 	u32 gpie;
1295 
1296 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
1297 
1298 	/* Fan Failure Interrupt */
1299 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
1300 		gpie |= IXGBE_SDP1_GPIEN;
1301 
1302 	/*
1303 	 * Module detection (SDP2)
1304 	 * Media ready (SDP1)
1305 	 */
1306 	if (hw->mac.type == ixgbe_mac_82599EB) {
1307 		gpie |= IXGBE_SDP2_GPIEN;
1308 		if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
1309 			gpie |= IXGBE_SDP1_GPIEN;
1310 	}
1311 
1312 	/*
1313 	 * Thermal Failure Detection (X540)
1314 	 * Link Detection (X552 SFP+, X552/X557-AT)
1315 	 */
1316 	if (hw->mac.type == ixgbe_mac_X540 ||
1317 	    hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1318 	    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1319 		gpie |= IXGBE_SDP0_GPIEN_X540;
1320 
1321 	if (adapter->msix > 1) {
1322 		/* Enable Enhanced MSIX mode */
1323 		gpie |= IXGBE_GPIE_MSIX_MODE;
1324 		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1325 		    IXGBE_GPIE_OCD;
1326 	}
1327 
1328 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1329 	return;
1330 }
1331 
1332 /*
1333  * Requires adapter->max_frame_size to be set.
1334  */
1335 static void
1336 ixgbe_config_delay_values(struct adapter *adapter)
1337 {
1338 	struct ixgbe_hw *hw = &adapter->hw;
1339 	u32 rxpb, frame, size, tmp;
1340 
1341 	frame = adapter->max_frame_size;
1342 
1343 	/* Calculate High Water */
1344 	switch (hw->mac.type) {
1345 	case ixgbe_mac_X540:
1346 	case ixgbe_mac_X550:
1347 	case ixgbe_mac_X550EM_x:
1348 		tmp = IXGBE_DV_X540(frame, frame);
1349 		break;
1350 	default:
1351 		tmp = IXGBE_DV(frame, frame);
1352 		break;
1353 	}
1354 	size = IXGBE_BT2KB(tmp);
1355 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1356 	hw->fc.high_water[0] = rxpb - size;
1357 
1358 	/* Now calculate Low Water */
1359 	switch (hw->mac.type) {
1360 	case ixgbe_mac_X540:
1361 	case ixgbe_mac_X550:
1362 	case ixgbe_mac_X550EM_x:
1363 		tmp = IXGBE_LOW_DV_X540(frame);
1364 		break;
1365 	default:
1366 		tmp = IXGBE_LOW_DV(frame);
1367 		break;
1368 	}
1369 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1370 
1371 	hw->fc.requested_mode = adapter->fc;
1372 	hw->fc.pause_time = IXGBE_FC_PAUSE;
1373 	hw->fc.send_xon = TRUE;
1374 }
1375 
1376 /*
1377 **
1378 ** MSIX Interrupt Handlers and Tasklets
1379 **
1380 */
1381 
1382 static inline void
1383 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1384 {
1385 	struct ixgbe_hw *hw = &adapter->hw;
1386 	u64	queue = (u64)(1 << vector);
1387 	u32	mask;
1388 
1389 	if (hw->mac.type == ixgbe_mac_82598EB) {
1390                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1391                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1392 	} else {
1393                 mask = (queue & 0xFFFFFFFF);
1394                 if (mask)
1395                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1396                 mask = (queue >> 32);
1397                 if (mask)
1398                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1399 	}
1400 }
1401 
1402 static inline void
1403 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1404 {
1405 	struct ixgbe_hw *hw = &adapter->hw;
1406 	u64	queue = (u64)(1 << vector);
1407 	u32	mask;
1408 
1409 	if (hw->mac.type == ixgbe_mac_82598EB) {
1410                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1411                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1412 	} else {
1413                 mask = (queue & 0xFFFFFFFF);
1414                 if (mask)
1415                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1416                 mask = (queue >> 32);
1417                 if (mask)
1418                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1419 	}
1420 }
1421 
1422 static void
1423 ixgbe_handle_que(void *context, int pending)
1424 {
1425 	struct ix_queue *que = context;
1426 	struct adapter  *adapter = que->adapter;
1427 	struct tx_ring  *txr = que->txr;
1428 	struct ifnet    *ifp = adapter->ifp;
1429 
1430 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1431 		ixgbe_rxeof(que);
1432 		IXGBE_TX_LOCK(txr);
1433 		ixgbe_txeof(txr);
1434 #ifndef IXGBE_LEGACY_TX
1435 		if (!drbr_empty(ifp, txr->br))
1436 			ixgbe_mq_start_locked(ifp, txr);
1437 #else
1438 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1439 			ixgbe_start_locked(txr, ifp);
1440 #endif
1441 		IXGBE_TX_UNLOCK(txr);
1442 	}
1443 
1444 	/* Reenable this interrupt */
1445 	if (que->res != NULL)
1446 		ixgbe_enable_queue(adapter, que->msix);
1447 	else
1448 		ixgbe_enable_intr(adapter);
1449 	return;
1450 }
1451 
1452 
1453 /*********************************************************************
1454  *
1455  *  Legacy Interrupt Service routine
1456  *
1457  **********************************************************************/
1458 
1459 static void
1460 ixgbe_legacy_irq(void *arg)
1461 {
1462 	struct ix_queue *que = arg;
1463 	struct adapter	*adapter = que->adapter;
1464 	struct ixgbe_hw	*hw = &adapter->hw;
1465 	struct ifnet    *ifp = adapter->ifp;
1466 	struct 		tx_ring *txr = adapter->tx_rings;
1467 	bool		more;
1468 	u32       	reg_eicr;
1469 
1470 
1471 	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1472 
1473 	++que->irqs;
1474 	if (reg_eicr == 0) {
1475 		ixgbe_enable_intr(adapter);
1476 		return;
1477 	}
1478 
1479 	more = ixgbe_rxeof(que);
1480 
1481 	IXGBE_TX_LOCK(txr);
1482 	ixgbe_txeof(txr);
1483 #ifdef IXGBE_LEGACY_TX
1484 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1485 		ixgbe_start_locked(txr, ifp);
1486 #else
1487 	if (!drbr_empty(ifp, txr->br))
1488 		ixgbe_mq_start_locked(ifp, txr);
1489 #endif
1490 	IXGBE_TX_UNLOCK(txr);
1491 
1492 	/* Check for fan failure */
1493 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1494 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1495                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1496 		    "REPLACE IMMEDIATELY!!\n");
1497 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1498 	}
1499 
1500 	/* Link status change */
1501 	if (reg_eicr & IXGBE_EICR_LSC)
1502 		taskqueue_enqueue(adapter->tq, &adapter->link_task);
1503 
1504 	/* External PHY interrupt */
1505 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1506 	    (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1507 		taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1508 
1509 	if (more)
1510 		taskqueue_enqueue(que->tq, &que->que_task);
1511 	else
1512 		ixgbe_enable_intr(adapter);
1513 	return;
1514 }
1515 
1516 
1517 /*********************************************************************
1518  *
1519  *  MSIX Queue Interrupt Service routine
1520  *
1521  **********************************************************************/
1522 void
1523 ixgbe_msix_que(void *arg)
1524 {
1525 	struct ix_queue	*que = arg;
1526 	struct adapter  *adapter = que->adapter;
1527 	struct ifnet    *ifp = adapter->ifp;
1528 	struct tx_ring	*txr = que->txr;
1529 	struct rx_ring	*rxr = que->rxr;
1530 	bool		more;
1531 	u32		newitr = 0;
1532 
1533 
1534 	/* Protect against spurious interrupts */
1535 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1536 		return;
1537 
1538 	ixgbe_disable_queue(adapter, que->msix);
1539 	++que->irqs;
1540 
1541 	more = ixgbe_rxeof(que);
1542 
1543 	IXGBE_TX_LOCK(txr);
1544 	ixgbe_txeof(txr);
1545 #ifdef IXGBE_LEGACY_TX
1546 	if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1547 		ixgbe_start_locked(txr, ifp);
1548 #else
1549 	if (!drbr_empty(ifp, txr->br))
1550 		ixgbe_mq_start_locked(ifp, txr);
1551 #endif
1552 	IXGBE_TX_UNLOCK(txr);
1553 
1554 	/* Do AIM now? */
1555 
1556 	if (ixgbe_enable_aim == FALSE)
1557 		goto no_calc;
1558 	/*
1559 	** Do Adaptive Interrupt Moderation:
1560         **  - Write out last calculated setting
1561 	**  - Calculate based on average size over
1562 	**    the last interval.
1563 	*/
1564         if (que->eitr_setting)
1565                 IXGBE_WRITE_REG(&adapter->hw,
1566                     IXGBE_EITR(que->msix), que->eitr_setting);
1567 
1568         que->eitr_setting = 0;
1569 
1570         /* Idle, do nothing */
1571         if ((txr->bytes == 0) && (rxr->bytes == 0))
1572                 goto no_calc;
1573 
1574 	if ((txr->bytes) && (txr->packets))
1575                	newitr = txr->bytes/txr->packets;
1576 	if ((rxr->bytes) && (rxr->packets))
1577 		newitr = max(newitr,
1578 		    (rxr->bytes / rxr->packets));
1579 	newitr += 24; /* account for hardware frame, crc */
1580 
1581 	/* set an upper boundary */
1582 	newitr = min(newitr, 3000);
1583 
1584 	/* Be nice to the mid range */
1585 	if ((newitr > 300) && (newitr < 1200))
1586 		newitr = (newitr / 3);
1587 	else
1588 		newitr = (newitr / 2);
1589 
1590         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1591                 newitr |= newitr << 16;
1592         else
1593                 newitr |= IXGBE_EITR_CNT_WDIS;
1594 
1595         /* save for next interrupt */
1596         que->eitr_setting = newitr;
1597 
1598         /* Reset state */
1599         txr->bytes = 0;
1600         txr->packets = 0;
1601         rxr->bytes = 0;
1602         rxr->packets = 0;
1603 
1604 no_calc:
1605 	if (more)
1606 		taskqueue_enqueue(que->tq, &que->que_task);
1607 	else
1608 		ixgbe_enable_queue(adapter, que->msix);
1609 	return;
1610 }
1611 
1612 
1613 static void
1614 ixgbe_msix_link(void *arg)
1615 {
1616 	struct adapter	*adapter = arg;
1617 	struct ixgbe_hw *hw = &adapter->hw;
1618 	u32		reg_eicr, mod_mask;
1619 
1620 	++adapter->link_irq;
1621 
1622 	/* Pause other interrupts */
1623 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
1624 
1625 	/* First get the cause */
1626 	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1627 	/* Be sure the queue bits are not cleared */
1628 	reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1629 	/* Clear interrupt with write */
1630 	IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1631 
1632 	/* Link status change */
1633 	if (reg_eicr & IXGBE_EICR_LSC) {
1634 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1635 		taskqueue_enqueue(adapter->tq, &adapter->link_task);
1636 	}
1637 
1638 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1639 #ifdef IXGBE_FDIR
1640 		if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1641 			/* This is probably overkill :) */
1642 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1643 				return;
1644                 	/* Disable the interrupt */
1645 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1646 			taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1647 		} else
1648 #endif
1649 		if (reg_eicr & IXGBE_EICR_ECC) {
1650 			device_printf(adapter->dev, "CRITICAL: ECC ERROR!! "
1651 			    "Please Reboot!!\n");
1652 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1653 		}
1654 
1655 		/* Check for over temp condition */
1656 		if (reg_eicr & IXGBE_EICR_TS) {
1657 			device_printf(adapter->dev, "CRITICAL: OVER TEMP!! "
1658 			    "PHY IS SHUT DOWN!!\n");
1659 			device_printf(adapter->dev, "System shutdown required!\n");
1660 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1661 		}
1662 #ifdef PCI_IOV
1663 		if (reg_eicr & IXGBE_EICR_MAILBOX)
1664 			taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1665 #endif
1666 	}
1667 
1668 	/* Pluggable optics-related interrupt */
1669 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1670 		mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1671 	else
1672 		mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1673 
1674 	if (ixgbe_is_sfp(hw)) {
1675 		if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1676 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1677 			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1678 		} else if (reg_eicr & mod_mask) {
1679 			IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1680 			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1681 		}
1682 	}
1683 
1684 	/* Check for fan failure */
1685 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1686 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1687 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1688                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1689 		    "REPLACE IMMEDIATELY!!\n");
1690 	}
1691 
1692 	/* External PHY interrupt */
1693 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1694 	    (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1695 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1696 		taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1697 	}
1698 
1699 	/* Re-enable other interrupts */
1700 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1701 	return;
1702 }
1703 
1704 /*********************************************************************
1705  *
1706  *  Media Ioctl callback
1707  *
1708  *  This routine is called whenever the user queries the status of
1709  *  the interface using ifconfig.
1710  *
1711  **********************************************************************/
1712 static void
1713 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1714 {
1715 	struct adapter *adapter = ifp->if_softc;
1716 	struct ixgbe_hw *hw = &adapter->hw;
1717 	int layer;
1718 
1719 	INIT_DEBUGOUT("ixgbe_media_status: begin");
1720 	IXGBE_CORE_LOCK(adapter);
1721 	ixgbe_update_link_status(adapter);
1722 
1723 	ifmr->ifm_status = IFM_AVALID;
1724 	ifmr->ifm_active = IFM_ETHER;
1725 
1726 	if (!adapter->link_active) {
1727 		IXGBE_CORE_UNLOCK(adapter);
1728 		return;
1729 	}
1730 
1731 	ifmr->ifm_status |= IFM_ACTIVE;
1732 	layer = adapter->phy_layer;
1733 
1734 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1735 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1736 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1737 		switch (adapter->link_speed) {
1738 		case IXGBE_LINK_SPEED_10GB_FULL:
1739 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1740 			break;
1741 		case IXGBE_LINK_SPEED_1GB_FULL:
1742 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1743 			break;
1744 		case IXGBE_LINK_SPEED_100_FULL:
1745 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1746 			break;
1747 		}
1748 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1749 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1750 		switch (adapter->link_speed) {
1751 		case IXGBE_LINK_SPEED_10GB_FULL:
1752 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1753 			break;
1754 		}
1755 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1756 		switch (adapter->link_speed) {
1757 		case IXGBE_LINK_SPEED_10GB_FULL:
1758 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1759 			break;
1760 		case IXGBE_LINK_SPEED_1GB_FULL:
1761 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1762 			break;
1763 		}
1764 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1765 		switch (adapter->link_speed) {
1766 		case IXGBE_LINK_SPEED_10GB_FULL:
1767 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1768 			break;
1769 		case IXGBE_LINK_SPEED_1GB_FULL:
1770 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1771 			break;
1772 		}
1773 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1774 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1775 		switch (adapter->link_speed) {
1776 		case IXGBE_LINK_SPEED_10GB_FULL:
1777 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1778 			break;
1779 		case IXGBE_LINK_SPEED_1GB_FULL:
1780 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1781 			break;
1782 		}
1783 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1784 		switch (adapter->link_speed) {
1785 		case IXGBE_LINK_SPEED_10GB_FULL:
1786 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1787 			break;
1788 		}
1789 	/*
1790 	** XXX: These need to use the proper media types once
1791 	** they're added.
1792 	*/
1793 #ifndef IFM_ETH_XTYPE
1794 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1795 		switch (adapter->link_speed) {
1796 		case IXGBE_LINK_SPEED_10GB_FULL:
1797 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1798 			break;
1799 		case IXGBE_LINK_SPEED_2_5GB_FULL:
1800 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1801 			break;
1802 		case IXGBE_LINK_SPEED_1GB_FULL:
1803 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1804 			break;
1805 		}
1806 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1807 	    || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1808 		switch (adapter->link_speed) {
1809 		case IXGBE_LINK_SPEED_10GB_FULL:
1810 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1811 			break;
1812 		case IXGBE_LINK_SPEED_2_5GB_FULL:
1813 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1814 			break;
1815 		case IXGBE_LINK_SPEED_1GB_FULL:
1816 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1817 			break;
1818 		}
1819 #else
1820 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1821 		switch (adapter->link_speed) {
1822 		case IXGBE_LINK_SPEED_10GB_FULL:
1823 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
1824 			break;
1825 		case IXGBE_LINK_SPEED_2_5GB_FULL:
1826 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1827 			break;
1828 		case IXGBE_LINK_SPEED_1GB_FULL:
1829 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1830 			break;
1831 		}
1832 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1833 	    || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1834 		switch (adapter->link_speed) {
1835 		case IXGBE_LINK_SPEED_10GB_FULL:
1836 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
1837 			break;
1838 		case IXGBE_LINK_SPEED_2_5GB_FULL:
1839 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1840 			break;
1841 		case IXGBE_LINK_SPEED_1GB_FULL:
1842 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1843 			break;
1844 		}
1845 #endif
1846 
1847 	/* If nothing is recognized... */
1848 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1849 		ifmr->ifm_active |= IFM_UNKNOWN;
1850 
1851 #if __FreeBSD_version >= 900025
1852 	/* Display current flow control setting used on link */
1853 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1854 	    hw->fc.current_mode == ixgbe_fc_full)
1855 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1856 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1857 	    hw->fc.current_mode == ixgbe_fc_full)
1858 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1859 #endif
1860 
1861 	IXGBE_CORE_UNLOCK(adapter);
1862 
1863 	return;
1864 }
1865 
1866 /*********************************************************************
1867  *
1868  *  Media Ioctl callback
1869  *
1870  *  This routine is called when the user changes speed/duplex using
1871  *  media/mediopt option with ifconfig.
1872  *
1873  **********************************************************************/
1874 static int
1875 ixgbe_media_change(struct ifnet * ifp)
1876 {
1877 	struct adapter *adapter = ifp->if_softc;
1878 	struct ifmedia *ifm = &adapter->media;
1879 	struct ixgbe_hw *hw = &adapter->hw;
1880 	ixgbe_link_speed speed = 0;
1881 
1882 	INIT_DEBUGOUT("ixgbe_media_change: begin");
1883 
1884 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1885 		return (EINVAL);
1886 
1887 	if (hw->phy.media_type == ixgbe_media_type_backplane)
1888 		return (ENODEV);
1889 
1890 	/*
1891 	** We don't actually need to check against the supported
1892 	** media types of the adapter; ifmedia will take care of
1893 	** that for us.
1894 	*/
1895 #ifndef IFM_ETH_XTYPE
1896 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1897 		case IFM_AUTO:
1898 		case IFM_10G_T:
1899 			speed |= IXGBE_LINK_SPEED_100_FULL;
1900 		case IFM_10G_LRM:
1901 		case IFM_10G_SR: /* KR, too */
1902 		case IFM_10G_LR:
1903 		case IFM_10G_CX4: /* KX4 */
1904 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1905 		case IFM_10G_TWINAX:
1906 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
1907 			break;
1908 		case IFM_1000_T:
1909 			speed |= IXGBE_LINK_SPEED_100_FULL;
1910 		case IFM_1000_LX:
1911 		case IFM_1000_SX:
1912 		case IFM_1000_CX: /* KX */
1913 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1914 			break;
1915 		case IFM_100_TX:
1916 			speed |= IXGBE_LINK_SPEED_100_FULL;
1917 			break;
1918 		default:
1919 			goto invalid;
1920 	}
1921 #else
1922 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1923 		case IFM_AUTO:
1924 		case IFM_10G_T:
1925 			speed |= IXGBE_LINK_SPEED_100_FULL;
1926 		case IFM_10G_LRM:
1927 		case IFM_10G_KR:
1928 		case IFM_10G_LR:
1929 		case IFM_10G_KX4:
1930 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1931 		case IFM_10G_TWINAX:
1932 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
1933 			break;
1934 		case IFM_1000_T:
1935 			speed |= IXGBE_LINK_SPEED_100_FULL;
1936 		case IFM_1000_LX:
1937 		case IFM_1000_SX:
1938 		case IFM_1000_KX:
1939 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1940 			break;
1941 		case IFM_100_TX:
1942 			speed |= IXGBE_LINK_SPEED_100_FULL;
1943 			break;
1944 		default:
1945 			goto invalid;
1946 	}
1947 #endif
1948 
1949 	hw->mac.autotry_restart = TRUE;
1950 	hw->mac.ops.setup_link(hw, speed, TRUE);
1951 	adapter->advertise =
1952 		((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1953 		((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1954 		((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1955 
1956 	return (0);
1957 
1958 invalid:
1959 	device_printf(adapter->dev, "Invalid media type!\n");
1960 	return (EINVAL);
1961 }
1962 
1963 static void
1964 ixgbe_set_promisc(struct adapter *adapter)
1965 {
1966 	u_int32_t       reg_rctl;
1967 	struct ifnet   *ifp = adapter->ifp;
1968 	int		mcnt = 0;
1969 
1970 	reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1971 	reg_rctl &= (~IXGBE_FCTRL_UPE);
1972 	if (ifp->if_flags & IFF_ALLMULTI)
1973 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1974 	else {
1975 		struct	ifmultiaddr *ifma;
1976 #if __FreeBSD_version < 800000
1977 		IF_ADDR_LOCK(ifp);
1978 #else
1979 		if_maddr_rlock(ifp);
1980 #endif
1981 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1982 			if (ifma->ifma_addr->sa_family != AF_LINK)
1983 				continue;
1984 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1985 				break;
1986 			mcnt++;
1987 		}
1988 #if __FreeBSD_version < 800000
1989 		IF_ADDR_UNLOCK(ifp);
1990 #else
1991 		if_maddr_runlock(ifp);
1992 #endif
1993 	}
1994 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1995 		reg_rctl &= (~IXGBE_FCTRL_MPE);
1996 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1997 
1998 	if (ifp->if_flags & IFF_PROMISC) {
1999 		reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2000 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2001 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2002 		reg_rctl |= IXGBE_FCTRL_MPE;
2003 		reg_rctl &= ~IXGBE_FCTRL_UPE;
2004 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2005 	}
2006 	return;
2007 }
2008 
2009 
2010 /*********************************************************************
2011  *  Multicast Update
2012  *
2013  *  This routine is called whenever multicast address list is updated.
2014  *
2015  **********************************************************************/
2016 #define IXGBE_RAR_ENTRIES 16
2017 
2018 static void
2019 ixgbe_set_multi(struct adapter *adapter)
2020 {
2021 	u32			fctrl;
2022 	u8			*update_ptr;
2023 	struct ifmultiaddr	*ifma;
2024 	struct ixgbe_mc_addr	*mta;
2025 	int			mcnt = 0;
2026 	struct ifnet		*ifp = adapter->ifp;
2027 
2028 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
2029 
2030 	mta = adapter->mta;
2031 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
2032 
2033 #if __FreeBSD_version < 800000
2034 	IF_ADDR_LOCK(ifp);
2035 #else
2036 	if_maddr_rlock(ifp);
2037 #endif
2038 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2039 		if (ifma->ifma_addr->sa_family != AF_LINK)
2040 			continue;
2041 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2042 			break;
2043 		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2044 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2045 		mta[mcnt].vmdq = adapter->pool;
2046 		mcnt++;
2047 	}
2048 #if __FreeBSD_version < 800000
2049 	IF_ADDR_UNLOCK(ifp);
2050 #else
2051 	if_maddr_runlock(ifp);
2052 #endif
2053 
2054 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2055 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2056 	if (ifp->if_flags & IFF_PROMISC)
2057 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2058 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
2059 	    ifp->if_flags & IFF_ALLMULTI) {
2060 		fctrl |= IXGBE_FCTRL_MPE;
2061 		fctrl &= ~IXGBE_FCTRL_UPE;
2062 	} else
2063 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2064 
2065 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2066 
2067 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
2068 		update_ptr = (u8 *)mta;
2069 		ixgbe_update_mc_addr_list(&adapter->hw,
2070 		    update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
2071 	}
2072 
2073 	return;
2074 }
2075 
2076 /*
2077  * This is an iterator function now needed by the multicast
2078  * shared code. It simply feeds the shared code routine the
2079  * addresses in the array of ixgbe_set_multi() one by one.
2080  */
2081 static u8 *
2082 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
2083 {
2084 	struct ixgbe_mc_addr *mta;
2085 
2086 	mta = (struct ixgbe_mc_addr *)*update_ptr;
2087 	*vmdq = mta->vmdq;
2088 
2089 	*update_ptr = (u8*)(mta + 1);;
2090 	return (mta->addr);
2091 }
2092 
2093 
2094 /*********************************************************************
2095  *  Timer routine
2096  *
2097  *  This routine checks for link status,updates statistics,
2098  *  and runs the watchdog check.
2099  *
2100  **********************************************************************/
2101 
2102 static void
2103 ixgbe_local_timer(void *arg)
2104 {
2105 	struct adapter	*adapter = arg;
2106 	device_t	dev = adapter->dev;
2107 	struct ix_queue *que = adapter->queues;
2108 	u64		queues = 0;
2109 	int		hung = 0;
2110 
2111 	mtx_assert(&adapter->core_mtx, MA_OWNED);
2112 
2113 	/* Check for pluggable optics */
2114 	if (adapter->sfp_probe)
2115 		if (!ixgbe_sfp_probe(adapter))
2116 			goto out; /* Nothing to do */
2117 
2118 	ixgbe_update_link_status(adapter);
2119 	ixgbe_update_stats_counters(adapter);
2120 
2121 	/*
2122 	** Check the TX queues status
2123 	**	- mark hung queues so we don't schedule on them
2124 	**      - watchdog only if all queues show hung
2125 	*/
2126 	for (int i = 0; i < adapter->num_queues; i++, que++) {
2127 		/* Keep track of queues with work for soft irq */
2128 		if (que->txr->busy)
2129 			queues |= ((u64)1 << que->me);
2130 		/*
2131 		** Each time txeof runs without cleaning, but there
2132 		** are uncleaned descriptors it increments busy. If
2133 		** we get to the MAX we declare it hung.
2134 		*/
2135 		if (que->busy == IXGBE_QUEUE_HUNG) {
2136 			++hung;
2137 			/* Mark the queue as inactive */
2138 			adapter->active_queues &= ~((u64)1 << que->me);
2139 			continue;
2140 		} else {
2141 			/* Check if we've come back from hung */
2142 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
2143                                 adapter->active_queues |= ((u64)1 << que->me);
2144 		}
2145 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
2146 			device_printf(dev,"Warning queue %d "
2147 			    "appears to be hung!\n", i);
2148 			que->txr->busy = IXGBE_QUEUE_HUNG;
2149 			++hung;
2150 		}
2151 
2152 	}
2153 
2154 	/* Only truly watchdog if all queues show hung */
2155 	if (hung == adapter->num_queues)
2156 		goto watchdog;
2157 	else if (queues != 0) { /* Force an IRQ on queues with work */
2158 		ixgbe_rearm_queues(adapter, queues);
2159 	}
2160 
2161 out:
2162 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
2163 	return;
2164 
2165 watchdog:
2166 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2167 	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2168 	adapter->watchdog_events++;
2169 	ixgbe_init_locked(adapter);
2170 }
2171 
2172 
2173 /*
2174 ** Note: this routine updates the OS on the link state
2175 **	the real check of the hardware only happens with
2176 **	a link interrupt.
2177 */
2178 static void
2179 ixgbe_update_link_status(struct adapter *adapter)
2180 {
2181 	struct ifnet	*ifp = adapter->ifp;
2182 	device_t dev = adapter->dev;
2183 
2184 	if (adapter->link_up){
2185 		if (adapter->link_active == FALSE) {
2186 			if (bootverbose)
2187 				device_printf(dev,"Link is up %d Gbps %s \n",
2188 				    ((adapter->link_speed == 128)? 10:1),
2189 				    "Full Duplex");
2190 			adapter->link_active = TRUE;
2191 			/* Update any Flow Control changes */
2192 			ixgbe_fc_enable(&adapter->hw);
2193 			/* Update DMA coalescing config */
2194 			ixgbe_config_dmac(adapter);
2195 			if_link_state_change(ifp, LINK_STATE_UP);
2196 #ifdef PCI_IOV
2197 			ixgbe_ping_all_vfs(adapter);
2198 #endif
2199 		}
2200 	} else { /* Link down */
2201 		if (adapter->link_active == TRUE) {
2202 			if (bootverbose)
2203 				device_printf(dev,"Link is Down\n");
2204 			if_link_state_change(ifp, LINK_STATE_DOWN);
2205 			adapter->link_active = FALSE;
2206 #ifdef PCI_IOV
2207 			ixgbe_ping_all_vfs(adapter);
2208 #endif
2209 		}
2210 	}
2211 
2212 	return;
2213 }
2214 
2215 
2216 /*********************************************************************
2217  *
2218  *  This routine disables all traffic on the adapter by issuing a
2219  *  global reset on the MAC and deallocates TX/RX buffers.
2220  *
2221  **********************************************************************/
2222 
2223 static void
2224 ixgbe_stop(void *arg)
2225 {
2226 	struct ifnet   *ifp;
2227 	struct adapter *adapter = arg;
2228 	struct ixgbe_hw *hw = &adapter->hw;
2229 	ifp = adapter->ifp;
2230 
2231 	mtx_assert(&adapter->core_mtx, MA_OWNED);
2232 
2233 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
2234 	ixgbe_disable_intr(adapter);
2235 	callout_stop(&adapter->timer);
2236 
2237 	/* Let the stack know...*/
2238 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2239 
2240 	ixgbe_reset_hw(hw);
2241 	hw->adapter_stopped = FALSE;
2242 	ixgbe_stop_adapter(hw);
2243 	if (hw->mac.type == ixgbe_mac_82599EB)
2244 		ixgbe_stop_mac_link_on_d3_82599(hw);
2245 	/* Turn off the laser - noop with no optics */
2246 	ixgbe_disable_tx_laser(hw);
2247 
2248 	/* Update the stack */
2249 	adapter->link_up = FALSE;
2250        	ixgbe_update_link_status(adapter);
2251 
2252 	/* reprogram the RAR[0] in case user changed it. */
2253 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2254 
2255 	return;
2256 }
2257 
2258 
2259 /*********************************************************************
2260  *
2261  *  Determine hardware revision.
2262  *
2263  **********************************************************************/
2264 static void
2265 ixgbe_identify_hardware(struct adapter *adapter)
2266 {
2267 	device_t        dev = adapter->dev;
2268 	struct ixgbe_hw *hw = &adapter->hw;
2269 
2270 	/* Save off the information about this board */
2271 	hw->vendor_id = pci_get_vendor(dev);
2272 	hw->device_id = pci_get_device(dev);
2273 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2274 	hw->subsystem_vendor_id =
2275 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
2276 	hw->subsystem_device_id =
2277 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
2278 
2279 	/*
2280 	** Make sure BUSMASTER is set
2281 	*/
2282 	pci_enable_busmaster(dev);
2283 
2284 	/* We need this here to set the num_segs below */
2285 	ixgbe_set_mac_type(hw);
2286 
2287 	/* Pick up the 82599 settings */
2288 	if (hw->mac.type != ixgbe_mac_82598EB) {
2289 		hw->phy.smart_speed = ixgbe_smart_speed;
2290 		adapter->num_segs = IXGBE_82599_SCATTER;
2291 	} else
2292 		adapter->num_segs = IXGBE_82598_SCATTER;
2293 
2294 	return;
2295 }
2296 
2297 /*********************************************************************
2298  *
2299  *  Determine optic type
2300  *
2301  **********************************************************************/
2302 static void
2303 ixgbe_setup_optics(struct adapter *adapter)
2304 {
2305 	struct ixgbe_hw *hw = &adapter->hw;
2306 	int		layer;
2307 
2308 	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
2309 
2310 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2311 		adapter->optics = IFM_10G_T;
2312 		return;
2313 	}
2314 
2315 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2316 		adapter->optics = IFM_1000_T;
2317 		return;
2318 	}
2319 
2320 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2321 		adapter->optics = IFM_1000_SX;
2322 		return;
2323 	}
2324 
2325 	if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2326 	    IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2327 		adapter->optics = IFM_10G_LR;
2328 		return;
2329 	}
2330 
2331 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2332 		adapter->optics = IFM_10G_SR;
2333 		return;
2334 	}
2335 
2336 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2337 		adapter->optics = IFM_10G_TWINAX;
2338 		return;
2339 	}
2340 
2341 	if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2342 	    IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2343 		adapter->optics = IFM_10G_CX4;
2344 		return;
2345 	}
2346 
2347 	/* If we get here just set the default */
2348 	adapter->optics = IFM_ETHER | IFM_AUTO;
2349 	return;
2350 }
2351 
2352 /*********************************************************************
2353  *
2354  *  Setup the Legacy or MSI Interrupt handler
2355  *
2356  **********************************************************************/
2357 static int
2358 ixgbe_allocate_legacy(struct adapter *adapter)
2359 {
2360 	device_t	dev = adapter->dev;
2361 	struct		ix_queue *que = adapter->queues;
2362 #ifndef IXGBE_LEGACY_TX
2363 	struct tx_ring		*txr = adapter->tx_rings;
2364 #endif
2365 	int		error, rid = 0;
2366 
2367 	/* MSI RID at 1 */
2368 	if (adapter->msix == 1)
2369 		rid = 1;
2370 
2371 	/* We allocate a single interrupt resource */
2372 	adapter->res = bus_alloc_resource_any(dev,
2373             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2374 	if (adapter->res == NULL) {
2375 		device_printf(dev, "Unable to allocate bus resource: "
2376 		    "interrupt\n");
2377 		return (ENXIO);
2378 	}
2379 
2380 	/*
2381 	 * Try allocating a fast interrupt and the associated deferred
2382 	 * processing contexts.
2383 	 */
2384 #ifndef IXGBE_LEGACY_TX
2385 	TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2386 #endif
2387 	TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2388 	que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2389             taskqueue_thread_enqueue, &que->tq);
2390 	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2391             device_get_nameunit(adapter->dev));
2392 
2393 	/* Tasklets for Link, SFP and Multispeed Fiber */
2394 	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2395 	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2396 	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2397 	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2398 #ifdef IXGBE_FDIR
2399 	TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2400 #endif
2401 	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2402 	    taskqueue_thread_enqueue, &adapter->tq);
2403 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2404 	    device_get_nameunit(adapter->dev));
2405 
2406 	if ((error = bus_setup_intr(dev, adapter->res,
2407             INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2408             que, &adapter->tag)) != 0) {
2409 		device_printf(dev, "Failed to register fast interrupt "
2410 		    "handler: %d\n", error);
2411 		taskqueue_free(que->tq);
2412 		taskqueue_free(adapter->tq);
2413 		que->tq = NULL;
2414 		adapter->tq = NULL;
2415 		return (error);
2416 	}
2417 	/* For simplicity in the handlers */
2418 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2419 
2420 	return (0);
2421 }
2422 
2423 
2424 /*********************************************************************
2425  *
2426  *  Setup MSIX Interrupt resources and handlers
2427  *
2428  **********************************************************************/
2429 static int
2430 ixgbe_allocate_msix(struct adapter *adapter)
2431 {
2432 	device_t        dev = adapter->dev;
2433 	struct 		ix_queue *que = adapter->queues;
2434 	struct  	tx_ring *txr = adapter->tx_rings;
2435 	int 		error, rid, vector = 0;
2436 	int		cpu_id = 0;
2437 #ifdef	RSS
2438 	cpuset_t	cpu_mask;
2439 #endif
2440 
2441 #ifdef	RSS
2442 	/*
2443 	 * If we're doing RSS, the number of queues needs to
2444 	 * match the number of RSS buckets that are configured.
2445 	 *
2446 	 * + If there's more queues than RSS buckets, we'll end
2447 	 *   up with queues that get no traffic.
2448 	 *
2449 	 * + If there's more RSS buckets than queues, we'll end
2450 	 *   up having multiple RSS buckets map to the same queue,
2451 	 *   so there'll be some contention.
2452 	 */
2453 	if (adapter->num_queues != rss_getnumbuckets()) {
2454 		device_printf(dev,
2455 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
2456 		    "; performance will be impacted.\n",
2457 		    __func__,
2458 		    adapter->num_queues,
2459 		    rss_getnumbuckets());
2460 	}
2461 #endif
2462 
2463 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2464 		rid = vector + 1;
2465 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2466 		    RF_SHAREABLE | RF_ACTIVE);
2467 		if (que->res == NULL) {
2468 			device_printf(dev,"Unable to allocate"
2469 		    	    " bus resource: que interrupt [%d]\n", vector);
2470 			return (ENXIO);
2471 		}
2472 		/* Set the handler function */
2473 		error = bus_setup_intr(dev, que->res,
2474 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2475 		    ixgbe_msix_que, que, &que->tag);
2476 		if (error) {
2477 			que->res = NULL;
2478 			device_printf(dev, "Failed to register QUE handler");
2479 			return (error);
2480 		}
2481 #if __FreeBSD_version >= 800504
2482 		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2483 #endif
2484 		que->msix = vector;
2485 		adapter->active_queues |= (u64)(1 << que->msix);
2486 #ifdef	RSS
2487 		/*
2488 		 * The queue ID is used as the RSS layer bucket ID.
2489 		 * We look up the queue ID -> RSS CPU ID and select
2490 		 * that.
2491 		 */
2492 		cpu_id = rss_getcpu(i % rss_getnumbuckets());
2493 #else
2494 		/*
2495 		 * Bind the msix vector, and thus the
2496 		 * rings to the corresponding cpu.
2497 		 *
2498 		 * This just happens to match the default RSS round-robin
2499 		 * bucket -> queue -> CPU allocation.
2500 		 */
2501 		if (adapter->num_queues > 1)
2502 			cpu_id = i;
2503 #endif
2504 		if (adapter->num_queues > 1)
2505 			bus_bind_intr(dev, que->res, cpu_id);
2506 #ifdef IXGBE_DEBUG
2507 #ifdef	RSS
2508 		device_printf(dev,
2509 		    "Bound RSS bucket %d to CPU %d\n",
2510 		    i, cpu_id);
2511 #else
2512 		device_printf(dev,
2513 		    "Bound queue %d to cpu %d\n",
2514 		    i, cpu_id);
2515 #endif
2516 #endif /* IXGBE_DEBUG */
2517 
2518 
2519 #ifndef IXGBE_LEGACY_TX
2520 		TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2521 #endif
2522 		TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2523 		que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2524 		    taskqueue_thread_enqueue, &que->tq);
2525 #ifdef	RSS
2526 		CPU_SETOF(cpu_id, &cpu_mask);
2527 		taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2528 		    &cpu_mask,
2529 		    "%s (bucket %d)",
2530 		    device_get_nameunit(adapter->dev),
2531 		    cpu_id);
2532 #else
2533 		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
2534 		    device_get_nameunit(adapter->dev), i);
2535 #endif
2536 	}
2537 
2538 	/* and Link */
2539 	rid = vector + 1;
2540 	adapter->res = bus_alloc_resource_any(dev,
2541     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2542 	if (!adapter->res) {
2543 		device_printf(dev,"Unable to allocate"
2544     	    " bus resource: Link interrupt [%d]\n", rid);
2545 		return (ENXIO);
2546 	}
2547 	/* Set the link handler function */
2548 	error = bus_setup_intr(dev, adapter->res,
2549 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2550 	    ixgbe_msix_link, adapter, &adapter->tag);
2551 	if (error) {
2552 		adapter->res = NULL;
2553 		device_printf(dev, "Failed to register LINK handler");
2554 		return (error);
2555 	}
2556 #if __FreeBSD_version >= 800504
2557 	bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2558 #endif
2559 	adapter->vector = vector;
2560 	/* Tasklets for Link, SFP and Multispeed Fiber */
2561 	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2562 	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2563 	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2564 #ifdef PCI_IOV
2565 	TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
2566 #endif
2567 	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2568 #ifdef IXGBE_FDIR
2569 	TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2570 #endif
2571 	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2572 	    taskqueue_thread_enqueue, &adapter->tq);
2573 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2574 	    device_get_nameunit(adapter->dev));
2575 
2576 	return (0);
2577 }
2578 
2579 /*
2580  * Setup Either MSI/X or MSI
2581  */
2582 static int
2583 ixgbe_setup_msix(struct adapter *adapter)
2584 {
2585 	device_t dev = adapter->dev;
2586 	int rid, want, queues, msgs;
2587 
2588 	/* Override by tuneable */
2589 	if (ixgbe_enable_msix == 0)
2590 		goto msi;
2591 
2592 	/* First try MSI/X */
2593 	msgs = pci_msix_count(dev);
2594 	if (msgs == 0)
2595 		goto msi;
2596 	rid = PCIR_BAR(MSIX_82598_BAR);
2597 	adapter->msix_mem = bus_alloc_resource_any(dev,
2598 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2599        	if (adapter->msix_mem == NULL) {
2600 		rid += 4;	/* 82599 maps in higher BAR */
2601 		adapter->msix_mem = bus_alloc_resource_any(dev,
2602 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2603 	}
2604        	if (adapter->msix_mem == NULL) {
2605 		/* May not be enabled */
2606 		device_printf(adapter->dev,
2607 		    "Unable to map MSIX table \n");
2608 		goto msi;
2609 	}
2610 
2611 	/* Figure out a reasonable auto config value */
2612 	queues = (mp_ncpus > (msgs - 1)) ? (msgs - 1) : mp_ncpus;
2613 
2614 #ifdef	RSS
2615 	/* If we're doing RSS, clamp at the number of RSS buckets */
2616 	if (queues > rss_getnumbuckets())
2617 		queues = rss_getnumbuckets();
2618 #endif
2619 
2620 	if (ixgbe_num_queues != 0)
2621 		queues = ixgbe_num_queues;
2622 	/* Set max queues to 8 when autoconfiguring */
2623 	else if ((ixgbe_num_queues == 0) && (queues > 8))
2624 		queues = 8;
2625 
2626 	/* reflect correct sysctl value */
2627 	ixgbe_num_queues = queues;
2628 
2629 	/*
2630 	** Want one vector (RX/TX pair) per queue
2631 	** plus an additional for Link.
2632 	*/
2633 	want = queues + 1;
2634 	if (msgs >= want)
2635 		msgs = want;
2636 	else {
2637                	device_printf(adapter->dev,
2638 		    "MSIX Configuration Problem, "
2639 		    "%d vectors but %d queues wanted!\n",
2640 		    msgs, want);
2641 		goto msi;
2642 	}
2643 	if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2644                	device_printf(adapter->dev,
2645 		    "Using MSIX interrupts with %d vectors\n", msgs);
2646 		adapter->num_queues = queues;
2647 		return (msgs);
2648 	}
2649 	/*
2650 	** If MSIX alloc failed or provided us with
2651 	** less than needed, free and fall through to MSI
2652 	*/
2653 	pci_release_msi(dev);
2654 
2655 msi:
2656        	if (adapter->msix_mem != NULL) {
2657 		bus_release_resource(dev, SYS_RES_MEMORY,
2658 		    rid, adapter->msix_mem);
2659 		adapter->msix_mem = NULL;
2660 	}
2661 	msgs = 1;
2662 	if (pci_alloc_msi(dev, &msgs) == 0) {
2663 		device_printf(adapter->dev, "Using an MSI interrupt\n");
2664 		return (msgs);
2665 	}
2666 	device_printf(adapter->dev, "Using a Legacy interrupt\n");
2667 	return (0);
2668 }
2669 
2670 
2671 static int
2672 ixgbe_allocate_pci_resources(struct adapter *adapter)
2673 {
2674 	int             rid;
2675 	device_t        dev = adapter->dev;
2676 
2677 	rid = PCIR_BAR(0);
2678 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2679 	    &rid, RF_ACTIVE);
2680 
2681 	if (!(adapter->pci_mem)) {
2682 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2683 		return (ENXIO);
2684 	}
2685 
2686 	/* Save bus_space values for READ/WRITE_REG macros */
2687 	adapter->osdep.mem_bus_space_tag =
2688 		rman_get_bustag(adapter->pci_mem);
2689 	adapter->osdep.mem_bus_space_handle =
2690 		rman_get_bushandle(adapter->pci_mem);
2691 	/* Set hw values for shared code */
2692 	adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2693 	adapter->hw.back = adapter;
2694 
2695 	/* Default to 1 queue if MSI-X setup fails */
2696 	adapter->num_queues = 1;
2697 
2698 	/*
2699 	** Now setup MSI or MSI-X, should
2700 	** return us the number of supported
2701 	** vectors. (Will be 1 for MSI)
2702 	*/
2703 	adapter->msix = ixgbe_setup_msix(adapter);
2704 	return (0);
2705 }
2706 
2707 static void
2708 ixgbe_free_pci_resources(struct adapter * adapter)
2709 {
2710 	struct 		ix_queue *que = adapter->queues;
2711 	device_t	dev = adapter->dev;
2712 	int		rid, memrid;
2713 
2714 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2715 		memrid = PCIR_BAR(MSIX_82598_BAR);
2716 	else
2717 		memrid = PCIR_BAR(MSIX_82599_BAR);
2718 
2719 	/*
2720 	** There is a slight possibility of a failure mode
2721 	** in attach that will result in entering this function
2722 	** before interrupt resources have been initialized, and
2723 	** in that case we do not want to execute the loops below
2724 	** We can detect this reliably by the state of the adapter
2725 	** res pointer.
2726 	*/
2727 	if (adapter->res == NULL)
2728 		goto mem;
2729 
2730 	/*
2731 	**  Release all msix queue resources:
2732 	*/
2733 	for (int i = 0; i < adapter->num_queues; i++, que++) {
2734 		rid = que->msix + 1;
2735 		if (que->tag != NULL) {
2736 			bus_teardown_intr(dev, que->res, que->tag);
2737 			que->tag = NULL;
2738 		}
2739 		if (que->res != NULL)
2740 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2741 	}
2742 
2743 
2744 	/* Clean the Legacy or Link interrupt last */
2745 	if (adapter->vector) /* we are doing MSIX */
2746 		rid = adapter->vector + 1;
2747 	else
2748 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
2749 
2750 	if (adapter->tag != NULL) {
2751 		bus_teardown_intr(dev, adapter->res, adapter->tag);
2752 		adapter->tag = NULL;
2753 	}
2754 	if (adapter->res != NULL)
2755 		bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2756 
2757 mem:
2758 	if (adapter->msix)
2759 		pci_release_msi(dev);
2760 
2761 	if (adapter->msix_mem != NULL)
2762 		bus_release_resource(dev, SYS_RES_MEMORY,
2763 		    memrid, adapter->msix_mem);
2764 
2765 	if (adapter->pci_mem != NULL)
2766 		bus_release_resource(dev, SYS_RES_MEMORY,
2767 		    PCIR_BAR(0), adapter->pci_mem);
2768 
2769 	return;
2770 }
2771 
2772 /*********************************************************************
2773  *
2774  *  Setup networking device structure and register an interface.
2775  *
2776  **********************************************************************/
2777 static int
2778 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2779 {
2780 	struct ifnet   *ifp;
2781 
2782 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2783 
2784 	ifp = adapter->ifp = if_alloc(IFT_ETHER);
2785 	if (ifp == NULL) {
2786 		device_printf(dev, "can not allocate ifnet structure\n");
2787 		return (-1);
2788 	}
2789 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2790 	ifp->if_baudrate = IF_Gbps(10);
2791 	ifp->if_init = ixgbe_init;
2792 	ifp->if_softc = adapter;
2793 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2794 	ifp->if_ioctl = ixgbe_ioctl;
2795 #if __FreeBSD_version >= 1100036
2796 	if_setgetcounterfn(ifp, ixgbe_get_counter);
2797 #endif
2798 #if __FreeBSD_version >= 1100045
2799 	/* TSO parameters */
2800 	ifp->if_hw_tsomax = 65518;
2801 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2802 	ifp->if_hw_tsomaxsegsize = 2048;
2803 #endif
2804 #ifndef IXGBE_LEGACY_TX
2805 	ifp->if_transmit = ixgbe_mq_start;
2806 	ifp->if_qflush = ixgbe_qflush;
2807 #else
2808 	ifp->if_start = ixgbe_start;
2809 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2810 	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2811 	IFQ_SET_READY(&ifp->if_snd);
2812 #endif
2813 
2814 	ether_ifattach(ifp, adapter->hw.mac.addr);
2815 
2816 	adapter->max_frame_size =
2817 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2818 
2819 	/*
2820 	 * Tell the upper layer(s) we support long frames.
2821 	 */
2822 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2823 
2824 	/* Set capability flags */
2825 	ifp->if_capabilities |= IFCAP_RXCSUM
2826 			     |  IFCAP_TXCSUM
2827 			     |  IFCAP_RXCSUM_IPV6
2828 			     |  IFCAP_TXCSUM_IPV6
2829 			     |  IFCAP_TSO4
2830 			     |  IFCAP_TSO6
2831 			     |  IFCAP_LRO
2832 			     |  IFCAP_VLAN_HWTAGGING
2833 			     |  IFCAP_VLAN_HWTSO
2834 			     |  IFCAP_VLAN_HWCSUM
2835 			     |  IFCAP_JUMBO_MTU
2836 			     |  IFCAP_VLAN_MTU
2837 			     |  IFCAP_HWSTATS;
2838 
2839 	/* Enable the above capabilities by default */
2840 	ifp->if_capenable = ifp->if_capabilities;
2841 
2842 	/*
2843 	** Don't turn this on by default, if vlans are
2844 	** created on another pseudo device (eg. lagg)
2845 	** then vlan events are not passed thru, breaking
2846 	** operation, but with HW FILTER off it works. If
2847 	** using vlans directly on the ixgbe driver you can
2848 	** enable this and get full hardware tag filtering.
2849 	*/
2850 	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2851 
2852 	/*
2853 	 * Specify the media types supported by this adapter and register
2854 	 * callbacks to update media and link information
2855 	 */
2856 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2857 		    ixgbe_media_status);
2858 
2859 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
2860 	ixgbe_add_media_types(adapter);
2861 
2862 	/* Set autoselect media by default */
2863 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2864 
2865 	return (0);
2866 }
2867 
2868 static void
2869 ixgbe_add_media_types(struct adapter *adapter)
2870 {
2871 	struct ixgbe_hw *hw = &adapter->hw;
2872 	device_t dev = adapter->dev;
2873 	int layer;
2874 
2875 	layer = adapter->phy_layer;
2876 
2877 	/* Media types with matching FreeBSD media defines */
2878 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2879 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2880 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2881 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2882 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2883 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2884 
2885 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2886 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2887 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2888 
2889 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
2890 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2891 		if (hw->phy.multispeed_fiber)
2892 			ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2893 	}
2894 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2895 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2896 		if (hw->phy.multispeed_fiber)
2897 			ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2898 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2899 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2900 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2901 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2902 
2903 #ifdef IFM_ETH_XTYPE
2904 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2905 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2906 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
2907 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2908 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2909 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2910 #else
2911 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2912 		device_printf(dev, "Media supported: 10GbaseKR\n");
2913 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2914 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2915 	}
2916 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2917 		device_printf(dev, "Media supported: 10GbaseKX4\n");
2918 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2919 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2920 	}
2921 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2922 		device_printf(dev, "Media supported: 1000baseKX\n");
2923 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2924 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2925 	}
2926 #endif
2927 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
2928 		device_printf(dev, "Media supported: 1000baseBX\n");
2929 
2930 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2931 		ifmedia_add(&adapter->media,
2932 		    IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2933 		ifmedia_add(&adapter->media,
2934 		    IFM_ETHER | IFM_1000_T, 0, NULL);
2935 	}
2936 
2937 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2938 }
2939 
2940 static void
2941 ixgbe_config_link(struct adapter *adapter)
2942 {
2943 	struct ixgbe_hw *hw = &adapter->hw;
2944 	u32	autoneg, err = 0;
2945 	bool	sfp, negotiate;
2946 
2947 	sfp = ixgbe_is_sfp(hw);
2948 
2949 	if (sfp) {
2950 		taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2951 	} else {
2952 		if (hw->mac.ops.check_link)
2953 			err = ixgbe_check_link(hw, &adapter->link_speed,
2954 			    &adapter->link_up, FALSE);
2955 		if (err)
2956 			goto out;
2957 		autoneg = hw->phy.autoneg_advertised;
2958 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2959                 	err  = hw->mac.ops.get_link_capabilities(hw,
2960 			    &autoneg, &negotiate);
2961 		if (err)
2962 			goto out;
2963 		if (hw->mac.ops.setup_link)
2964                 	err = hw->mac.ops.setup_link(hw,
2965 			    autoneg, adapter->link_up);
2966 	}
2967 out:
2968 	return;
2969 }
2970 
2971 
2972 /*********************************************************************
2973  *
2974  *  Enable transmit units.
2975  *
2976  **********************************************************************/
2977 static void
2978 ixgbe_initialize_transmit_units(struct adapter *adapter)
2979 {
2980 	struct tx_ring	*txr = adapter->tx_rings;
2981 	struct ixgbe_hw	*hw = &adapter->hw;
2982 
2983 	/* Setup the Base and Length of the Tx Descriptor Ring */
2984 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
2985 		u64	tdba = txr->txdma.dma_paddr;
2986 		u32	txctrl = 0;
2987 		int	j = txr->me;
2988 
2989 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
2990 		       (tdba & 0x00000000ffffffffULL));
2991 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
2992 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
2993 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2994 
2995 		/* Setup the HW Tx Head and Tail descriptor pointers */
2996 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
2997 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
2998 
2999 		/* Cache the tail address */
3000 		txr->tail = IXGBE_TDT(j);
3001 
3002 		/* Disable Head Writeback */
3003 		/*
3004 		 * Note: for X550 series devices, these registers are actually
3005 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
3006 		 * fields remain the same.
3007 		 */
3008 		switch (hw->mac.type) {
3009 		case ixgbe_mac_82598EB:
3010 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
3011 			break;
3012 		default:
3013 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
3014 			break;
3015                 }
3016 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3017 		switch (hw->mac.type) {
3018 		case ixgbe_mac_82598EB:
3019 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
3020 			break;
3021 		default:
3022 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
3023 			break;
3024 		}
3025 
3026 	}
3027 
3028 	if (hw->mac.type != ixgbe_mac_82598EB) {
3029 		u32 dmatxctl, rttdcs;
3030 #ifdef PCI_IOV
3031 		enum ixgbe_iov_mode mode = ixgbe_get_iov_mode(adapter);
3032 #endif
3033 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3034 		dmatxctl |= IXGBE_DMATXCTL_TE;
3035 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3036 		/* Disable arbiter to set MTQC */
3037 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3038 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
3039 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3040 #ifdef PCI_IOV
3041 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(mode));
3042 #else
3043 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
3044 #endif
3045 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3046 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3047 	}
3048 
3049 	return;
3050 }
3051 
3052 static void
3053 ixgbe_initialize_rss_mapping(struct adapter *adapter)
3054 {
3055 	struct ixgbe_hw	*hw = &adapter->hw;
3056 	u32 reta = 0, mrqc, rss_key[10];
3057 	int queue_id, table_size, index_mult;
3058 #ifdef	RSS
3059 	u32 rss_hash_config;
3060 #endif
3061 #ifdef PCI_IOV
3062 	enum ixgbe_iov_mode mode;
3063 #endif
3064 
3065 #ifdef	RSS
3066 	/* Fetch the configured RSS key */
3067 	rss_getkey((uint8_t *) &rss_key);
3068 #else
3069 	/* set up random bits */
3070 	arc4rand(&rss_key, sizeof(rss_key), 0);
3071 #endif
3072 
3073 	/* Set multiplier for RETA setup and table size based on MAC */
3074 	index_mult = 0x1;
3075 	table_size = 128;
3076 	switch (adapter->hw.mac.type) {
3077 	case ixgbe_mac_82598EB:
3078 		index_mult = 0x11;
3079 		break;
3080 	case ixgbe_mac_X550:
3081 	case ixgbe_mac_X550EM_x:
3082 		table_size = 512;
3083 		break;
3084 	default:
3085 		break;
3086 	}
3087 
3088 	/* Set up the redirection table */
3089 	for (int i = 0, j = 0; i < table_size; i++, j++) {
3090 		if (j == adapter->num_queues) j = 0;
3091 #ifdef	RSS
3092 		/*
3093 		 * Fetch the RSS bucket id for the given indirection entry.
3094 		 * Cap it at the number of configured buckets (which is
3095 		 * num_queues.)
3096 		 */
3097 		queue_id = rss_get_indirection_to_bucket(i);
3098 		queue_id = queue_id % adapter->num_queues;
3099 #else
3100 		queue_id = (j * index_mult);
3101 #endif
3102 		/*
3103 		 * The low 8 bits are for hash value (n+0);
3104 		 * The next 8 bits are for hash value (n+1), etc.
3105 		 */
3106 		reta = reta >> 8;
3107 		reta = reta | ( ((uint32_t) queue_id) << 24);
3108 		if ((i & 3) == 3) {
3109 			if (i < 128)
3110 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3111 			else
3112 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
3113 			reta = 0;
3114 		}
3115 	}
3116 
3117 	/* Now fill our hash function seeds */
3118 	for (int i = 0; i < 10; i++)
3119 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
3120 
3121 	/* Perform hash on these packet types */
3122 #ifdef	RSS
3123 	mrqc = IXGBE_MRQC_RSSEN;
3124 	rss_hash_config = rss_gethashconfig();
3125 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3126 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3127 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3128 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3129 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3130 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3131 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3132 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3133 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3134 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3135 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
3136 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3137 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3138 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3139 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
3140 		device_printf(adapter->dev,
3141 		    "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, "
3142 		    "but not supported\n", __func__);
3143 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3144 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3145 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
3146 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3147 #else
3148 	/*
3149 	 * Disable UDP - IP fragments aren't currently being handled
3150 	 * and so we end up with a mix of 2-tuple and 4-tuple
3151 	 * traffic.
3152 	 */
3153 	mrqc = IXGBE_MRQC_RSSEN
3154 	     | IXGBE_MRQC_RSS_FIELD_IPV4
3155 	     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3156 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3157 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3158 	     | IXGBE_MRQC_RSS_FIELD_IPV6
3159 	     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3160 	;
3161 #endif /* RSS */
3162 #ifdef PCI_IOV
3163 	mode = ixgbe_get_iov_mode(adapter);
3164 	mrqc |= ixgbe_get_mrqc(mode);
3165 #endif
3166 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3167 }
3168 
3169 
3170 /*********************************************************************
3171  *
3172  *  Setup receive registers and features.
3173  *
3174  **********************************************************************/
3175 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3176 
3177 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
3178 
3179 static void
3180 ixgbe_initialize_receive_units(struct adapter *adapter)
3181 {
3182 	struct	rx_ring	*rxr = adapter->rx_rings;
3183 	struct ixgbe_hw	*hw = &adapter->hw;
3184 	struct ifnet   *ifp = adapter->ifp;
3185 	u32		bufsz, fctrl, srrctl, rxcsum;
3186 	u32		hlreg;
3187 
3188 	/*
3189 	 * Make sure receives are disabled while
3190 	 * setting up the descriptor ring
3191 	 */
3192 	ixgbe_disable_rx(hw);
3193 
3194 	/* Enable broadcasts */
3195 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3196 	fctrl |= IXGBE_FCTRL_BAM;
3197 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3198 		fctrl |= IXGBE_FCTRL_DPF;
3199 		fctrl |= IXGBE_FCTRL_PMCF;
3200 	}
3201 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3202 
3203 	/* Set for Jumbo Frames? */
3204 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3205 	if (ifp->if_mtu > ETHERMTU)
3206 		hlreg |= IXGBE_HLREG0_JUMBOEN;
3207 	else
3208 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3209 #ifdef DEV_NETMAP
3210 	/* crcstrip is conditional in netmap (in RDRXCTL too ?) */
3211 	if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
3212 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
3213 	else
3214 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
3215 #endif /* DEV_NETMAP */
3216 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3217 
3218 	bufsz = (adapter->rx_mbuf_sz +
3219 	    BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3220 
3221 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3222 		u64 rdba = rxr->rxdma.dma_paddr;
3223 		int j = rxr->me;
3224 
3225 		/* Setup the Base and Length of the Rx Descriptor Ring */
3226 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
3227 			       (rdba & 0x00000000ffffffffULL));
3228 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
3229 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
3230 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3231 
3232 		/* Set up the SRRCTL register */
3233 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
3234 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3235 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3236 		srrctl |= bufsz;
3237 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3238 
3239 		/*
3240 		 * Set DROP_EN iff we have no flow control and >1 queue.
3241 		 * Note that srrctl was cleared shortly before during reset,
3242 		 * so we do not need to clear the bit, but do it just in case
3243 		 * this code is moved elsewhere.
3244 		 */
3245 		if (adapter->num_queues > 1 &&
3246 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
3247 			srrctl |= IXGBE_SRRCTL_DROP_EN;
3248 		} else {
3249 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3250 		}
3251 
3252 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
3253 
3254 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
3255 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
3256 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
3257 
3258 		/* Set the driver rx tail address */
3259 		rxr->tail =  IXGBE_RDT(rxr->me);
3260 	}
3261 
3262 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3263 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3264 			      IXGBE_PSRTYPE_UDPHDR |
3265 			      IXGBE_PSRTYPE_IPV4HDR |
3266 			      IXGBE_PSRTYPE_IPV6HDR;
3267 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3268 	}
3269 
3270 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3271 
3272 	ixgbe_initialize_rss_mapping(adapter);
3273 
3274 	if (adapter->num_queues > 1) {
3275 		/* RSS and RX IPP Checksum are mutually exclusive */
3276 		rxcsum |= IXGBE_RXCSUM_PCSD;
3277 	}
3278 
3279 	if (ifp->if_capenable & IFCAP_RXCSUM)
3280 		rxcsum |= IXGBE_RXCSUM_PCSD;
3281 
3282 	/* This is useful for calculating UDP/IP fragment checksums */
3283 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3284 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
3285 
3286 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3287 
3288 	return;
3289 }
3290 
3291 
3292 /*
3293 ** This routine is run via an vlan config EVENT,
3294 ** it enables us to use the HW Filter table since
3295 ** we can get the vlan id. This just creates the
3296 ** entry in the soft version of the VFTA, init will
3297 ** repopulate the real table.
3298 */
3299 static void
3300 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3301 {
3302 	struct adapter	*adapter = ifp->if_softc;
3303 	u16		index, bit;
3304 
3305 	if (ifp->if_softc !=  arg)   /* Not our event */
3306 		return;
3307 
3308 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3309 		return;
3310 
3311 	IXGBE_CORE_LOCK(adapter);
3312 	index = (vtag >> 5) & 0x7F;
3313 	bit = vtag & 0x1F;
3314 	adapter->shadow_vfta[index] |= (1 << bit);
3315 	++adapter->num_vlans;
3316 	ixgbe_setup_vlan_hw_support(adapter);
3317 	IXGBE_CORE_UNLOCK(adapter);
3318 }
3319 
3320 /*
3321 ** This routine is run via an vlan
3322 ** unconfig EVENT, remove our entry
3323 ** in the soft vfta.
3324 */
3325 static void
3326 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3327 {
3328 	struct adapter	*adapter = ifp->if_softc;
3329 	u16		index, bit;
3330 
3331 	if (ifp->if_softc !=  arg)
3332 		return;
3333 
3334 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3335 		return;
3336 
3337 	IXGBE_CORE_LOCK(adapter);
3338 	index = (vtag >> 5) & 0x7F;
3339 	bit = vtag & 0x1F;
3340 	adapter->shadow_vfta[index] &= ~(1 << bit);
3341 	--adapter->num_vlans;
3342 	/* Re-init to load the changes */
3343 	ixgbe_setup_vlan_hw_support(adapter);
3344 	IXGBE_CORE_UNLOCK(adapter);
3345 }
3346 
3347 static void
3348 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3349 {
3350 	struct ifnet 	*ifp = adapter->ifp;
3351 	struct ixgbe_hw *hw = &adapter->hw;
3352 	struct rx_ring	*rxr;
3353 	u32		ctrl;
3354 
3355 
3356 	/*
3357 	** We get here thru init_locked, meaning
3358 	** a soft reset, this has already cleared
3359 	** the VFTA and other state, so if there
3360 	** have been no vlan's registered do nothing.
3361 	*/
3362 	if (adapter->num_vlans == 0)
3363 		return;
3364 
3365 	/* Setup the queues for vlans */
3366 	for (int i = 0; i < adapter->num_queues; i++) {
3367 		rxr = &adapter->rx_rings[i];
3368 		/* On 82599 the VLAN enable is per/queue in RXDCTL */
3369 		if (hw->mac.type != ixgbe_mac_82598EB) {
3370 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3371 			ctrl |= IXGBE_RXDCTL_VME;
3372 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
3373 		}
3374 		rxr->vtag_strip = TRUE;
3375 	}
3376 
3377 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3378 		return;
3379 	/*
3380 	** A soft reset zero's out the VFTA, so
3381 	** we need to repopulate it now.
3382 	*/
3383 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3384 		if (adapter->shadow_vfta[i] != 0)
3385 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3386 			    adapter->shadow_vfta[i]);
3387 
3388 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3389 	/* Enable the Filter Table if enabled */
3390 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3391 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3392 		ctrl |= IXGBE_VLNCTRL_VFE;
3393 	}
3394 	if (hw->mac.type == ixgbe_mac_82598EB)
3395 		ctrl |= IXGBE_VLNCTRL_VME;
3396 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3397 }
3398 
3399 static void
3400 ixgbe_enable_intr(struct adapter *adapter)
3401 {
3402 	struct ixgbe_hw	*hw = &adapter->hw;
3403 	struct ix_queue	*que = adapter->queues;
3404 	u32		mask, fwsm;
3405 
3406 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3407 	/* Enable Fan Failure detection */
3408 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
3409 		    mask |= IXGBE_EIMS_GPI_SDP1;
3410 
3411 	switch (adapter->hw.mac.type) {
3412 		case ixgbe_mac_82599EB:
3413 			mask |= IXGBE_EIMS_ECC;
3414 			/* Temperature sensor on some adapters */
3415 			mask |= IXGBE_EIMS_GPI_SDP0;
3416 			/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3417 			mask |= IXGBE_EIMS_GPI_SDP1;
3418 			mask |= IXGBE_EIMS_GPI_SDP2;
3419 #ifdef IXGBE_FDIR
3420 			mask |= IXGBE_EIMS_FLOW_DIR;
3421 #endif
3422 #ifdef PCI_IOV
3423 			mask |= IXGBE_EIMS_MAILBOX;
3424 #endif
3425 			break;
3426 		case ixgbe_mac_X540:
3427 			/* Detect if Thermal Sensor is enabled */
3428 			fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3429 			if (fwsm & IXGBE_FWSM_TS_ENABLED)
3430 				mask |= IXGBE_EIMS_TS;
3431 			mask |= IXGBE_EIMS_ECC;
3432 #ifdef IXGBE_FDIR
3433 			mask |= IXGBE_EIMS_FLOW_DIR;
3434 #endif
3435 			break;
3436 		case ixgbe_mac_X550:
3437 		case ixgbe_mac_X550EM_x:
3438 			/* MAC thermal sensor is automatically enabled */
3439 			mask |= IXGBE_EIMS_TS;
3440 			/* Some devices use SDP0 for important information */
3441 			if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3442 			    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3443 				mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3444 			mask |= IXGBE_EIMS_ECC;
3445 #ifdef IXGBE_FDIR
3446 			mask |= IXGBE_EIMS_FLOW_DIR;
3447 #endif
3448 #ifdef PCI_IOV
3449 			mask |= IXGBE_EIMS_MAILBOX;
3450 #endif
3451 		/* falls through */
3452 		default:
3453 			break;
3454 	}
3455 
3456 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3457 
3458 	/* With MSI-X we use auto clear */
3459 	if (adapter->msix_mem) {
3460 		mask = IXGBE_EIMS_ENABLE_MASK;
3461 		/* Don't autoclear Link */
3462 		mask &= ~IXGBE_EIMS_OTHER;
3463 		mask &= ~IXGBE_EIMS_LSC;
3464 #ifdef PCI_IOV
3465 		mask &= ~IXGBE_EIMS_MAILBOX;
3466 #endif
3467 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3468 	}
3469 
3470 	/*
3471 	** Now enable all queues, this is done separately to
3472 	** allow for handling the extended (beyond 32) MSIX
3473 	** vectors that can be used by 82599
3474 	*/
3475         for (int i = 0; i < adapter->num_queues; i++, que++)
3476                 ixgbe_enable_queue(adapter, que->msix);
3477 
3478 	IXGBE_WRITE_FLUSH(hw);
3479 
3480 	return;
3481 }
3482 
3483 static void
3484 ixgbe_disable_intr(struct adapter *adapter)
3485 {
3486 	if (adapter->msix_mem)
3487 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3488 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3489 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3490 	} else {
3491 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3492 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3493 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3494 	}
3495 	IXGBE_WRITE_FLUSH(&adapter->hw);
3496 	return;
3497 }
3498 
3499 /*
3500 ** Get the width and transaction speed of
3501 ** the slot this adapter is plugged into.
3502 */
3503 static void
3504 ixgbe_get_slot_info(struct adapter *adapter)
3505 {
3506 	device_t		dev = adapter->dev;
3507 	struct ixgbe_hw		*hw = &adapter->hw;
3508 	struct ixgbe_mac_info	*mac = &hw->mac;
3509 	u16			link;
3510 	u32			offset;
3511 
3512 	/* For most devices simply call the shared code routine */
3513 	if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3514 		ixgbe_get_bus_info(hw);
3515 		/* These devices don't use PCI-E */
3516 		switch (hw->mac.type) {
3517 		case ixgbe_mac_X550EM_x:
3518 			return;
3519 		default:
3520 			goto display;
3521 		}
3522 	}
3523 
3524 	/*
3525 	** For the Quad port adapter we need to parse back
3526 	** up the PCI tree to find the speed of the expansion
3527 	** slot into which this adapter is plugged. A bit more work.
3528 	*/
3529 	dev = device_get_parent(device_get_parent(dev));
3530 #ifdef IXGBE_DEBUG
3531 	device_printf(dev, "parent pcib = %x,%x,%x\n",
3532 	    pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3533 #endif
3534 	dev = device_get_parent(device_get_parent(dev));
3535 #ifdef IXGBE_DEBUG
3536 	device_printf(dev, "slot pcib = %x,%x,%x\n",
3537 	    pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3538 #endif
3539 	/* Now get the PCI Express Capabilities offset */
3540 	pci_find_cap(dev, PCIY_EXPRESS, &offset);
3541 	/* ...and read the Link Status Register */
3542 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3543 	switch (link & IXGBE_PCI_LINK_WIDTH) {
3544 	case IXGBE_PCI_LINK_WIDTH_1:
3545 		hw->bus.width = ixgbe_bus_width_pcie_x1;
3546 		break;
3547 	case IXGBE_PCI_LINK_WIDTH_2:
3548 		hw->bus.width = ixgbe_bus_width_pcie_x2;
3549 		break;
3550 	case IXGBE_PCI_LINK_WIDTH_4:
3551 		hw->bus.width = ixgbe_bus_width_pcie_x4;
3552 		break;
3553 	case IXGBE_PCI_LINK_WIDTH_8:
3554 		hw->bus.width = ixgbe_bus_width_pcie_x8;
3555 		break;
3556 	default:
3557 		hw->bus.width = ixgbe_bus_width_unknown;
3558 		break;
3559 	}
3560 
3561 	switch (link & IXGBE_PCI_LINK_SPEED) {
3562 	case IXGBE_PCI_LINK_SPEED_2500:
3563 		hw->bus.speed = ixgbe_bus_speed_2500;
3564 		break;
3565 	case IXGBE_PCI_LINK_SPEED_5000:
3566 		hw->bus.speed = ixgbe_bus_speed_5000;
3567 		break;
3568 	case IXGBE_PCI_LINK_SPEED_8000:
3569 		hw->bus.speed = ixgbe_bus_speed_8000;
3570 		break;
3571 	default:
3572 		hw->bus.speed = ixgbe_bus_speed_unknown;
3573 		break;
3574 	}
3575 
3576 	mac->ops.set_lan_id(hw);
3577 
3578 display:
3579 	device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3580 	    ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3581 	    (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3582 	    (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3583 	    (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3584 	    (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3585 	    (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3586 	    ("Unknown"));
3587 
3588 	if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3589 	    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3590 	    (hw->bus.speed == ixgbe_bus_speed_2500))) {
3591 		device_printf(dev, "PCI-Express bandwidth available"
3592 		    " for this card\n     is not sufficient for"
3593 		    " optimal performance.\n");
3594 		device_printf(dev, "For optimal performance a x8 "
3595 		    "PCIE, or x4 PCIE Gen2 slot is required.\n");
3596         }
3597 	if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3598 	    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3599 	    (hw->bus.speed < ixgbe_bus_speed_8000))) {
3600 		device_printf(dev, "PCI-Express bandwidth available"
3601 		    " for this card\n     is not sufficient for"
3602 		    " optimal performance.\n");
3603 		device_printf(dev, "For optimal performance a x8 "
3604 		    "PCIE Gen3 slot is required.\n");
3605         }
3606 
3607 	return;
3608 }
3609 
3610 
3611 /*
3612 ** Setup the correct IVAR register for a particular MSIX interrupt
3613 **   (yes this is all very magic and confusing :)
3614 **  - entry is the register array entry
3615 **  - vector is the MSIX vector for this queue
3616 **  - type is RX/TX/MISC
3617 */
3618 static void
3619 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3620 {
3621 	struct ixgbe_hw *hw = &adapter->hw;
3622 	u32 ivar, index;
3623 
3624 	vector |= IXGBE_IVAR_ALLOC_VAL;
3625 
3626 	switch (hw->mac.type) {
3627 
3628 	case ixgbe_mac_82598EB:
3629 		if (type == -1)
3630 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3631 		else
3632 			entry += (type * 64);
3633 		index = (entry >> 2) & 0x1F;
3634 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3635 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3636 		ivar |= (vector << (8 * (entry & 0x3)));
3637 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3638 		break;
3639 
3640 	case ixgbe_mac_82599EB:
3641 	case ixgbe_mac_X540:
3642 	case ixgbe_mac_X550:
3643 	case ixgbe_mac_X550EM_x:
3644 		if (type == -1) { /* MISC IVAR */
3645 			index = (entry & 1) * 8;
3646 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3647 			ivar &= ~(0xFF << index);
3648 			ivar |= (vector << index);
3649 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3650 		} else {	/* RX/TX IVARS */
3651 			index = (16 * (entry & 1)) + (8 * type);
3652 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3653 			ivar &= ~(0xFF << index);
3654 			ivar |= (vector << index);
3655 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3656 		}
3657 
3658 	default:
3659 		break;
3660 	}
3661 }
3662 
3663 static void
3664 ixgbe_configure_ivars(struct adapter *adapter)
3665 {
3666 	struct  ix_queue	*que = adapter->queues;
3667 	u32			newitr;
3668 
3669 	if (ixgbe_max_interrupt_rate > 0)
3670 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3671 	else {
3672 		/*
3673 		** Disable DMA coalescing if interrupt moderation is
3674 		** disabled.
3675 		*/
3676 		adapter->dmac = 0;
3677 		newitr = 0;
3678 	}
3679 
3680         for (int i = 0; i < adapter->num_queues; i++, que++) {
3681 		struct rx_ring *rxr = &adapter->rx_rings[i];
3682 		struct tx_ring *txr = &adapter->tx_rings[i];
3683 		/* First the RX queue entry */
3684                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3685 		/* ... and the TX */
3686 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3687 		/* Set an Initial EITR value */
3688                 IXGBE_WRITE_REG(&adapter->hw,
3689                     IXGBE_EITR(que->msix), newitr);
3690 	}
3691 
3692 	/* For the Link interrupt */
3693         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3694 }
3695 
3696 /*
3697 ** ixgbe_sfp_probe - called in the local timer to
3698 ** determine if a port had optics inserted.
3699 */
3700 static bool
3701 ixgbe_sfp_probe(struct adapter *adapter)
3702 {
3703 	struct ixgbe_hw	*hw = &adapter->hw;
3704 	device_t	dev = adapter->dev;
3705 	bool		result = FALSE;
3706 
3707 	if ((hw->phy.type == ixgbe_phy_nl) &&
3708 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3709 		s32 ret = hw->phy.ops.identify_sfp(hw);
3710 		if (ret)
3711                         goto out;
3712 		ret = hw->phy.ops.reset(hw);
3713 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3714 			device_printf(dev, "Unsupported SFP+ module detected!");
3715 			device_printf(dev, "Reload driver with supported module.\n");
3716 			adapter->sfp_probe = FALSE;
3717                         goto out;
3718 		} else
3719 			device_printf(dev, "SFP+ module detected!\n");
3720 		/* We now have supported optics */
3721 		adapter->sfp_probe = FALSE;
3722 		/* Set the optics type so system reports correctly */
3723 		ixgbe_setup_optics(adapter);
3724 		result = TRUE;
3725 	}
3726 out:
3727 	return (result);
3728 }
3729 
3730 /*
3731 ** Tasklet handler for MSIX Link interrupts
3732 **  - do outside interrupt since it might sleep
3733 */
3734 static void
3735 ixgbe_handle_link(void *context, int pending)
3736 {
3737 	struct adapter  *adapter = context;
3738 	struct ixgbe_hw *hw = &adapter->hw;
3739 
3740 	ixgbe_check_link(hw,
3741 	    &adapter->link_speed, &adapter->link_up, 0);
3742 	ixgbe_update_link_status(adapter);
3743 
3744 	/* Re-enable link interrupts */
3745 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
3746 }
3747 
3748 /*
3749 ** Tasklet for handling SFP module interrupts
3750 */
3751 static void
3752 ixgbe_handle_mod(void *context, int pending)
3753 {
3754 	struct adapter  *adapter = context;
3755 	struct ixgbe_hw *hw = &adapter->hw;
3756 	enum ixgbe_phy_type orig_type = hw->phy.type;
3757 	device_t	dev = adapter->dev;
3758 	u32 err;
3759 
3760 	IXGBE_CORE_LOCK(adapter);
3761 
3762 	/* Check to see if the PHY type changed */
3763 	if (hw->phy.ops.identify) {
3764 		hw->phy.type = ixgbe_phy_unknown;
3765 		hw->phy.ops.identify(hw);
3766 	}
3767 
3768 	if (hw->phy.type != orig_type) {
3769 		device_printf(dev, "Detected phy_type %d\n", hw->phy.type);
3770 
3771 		if (hw->phy.type == ixgbe_phy_none) {
3772 			hw->phy.sfp_type = ixgbe_sfp_type_unknown;
3773 			goto out;
3774 		}
3775 
3776 		/* Try to do the initialization that was skipped before */
3777 		if (hw->phy.ops.init)
3778 			hw->phy.ops.init(hw);
3779 		if (hw->phy.ops.reset)
3780 			hw->phy.ops.reset(hw);
3781 	}
3782 
3783 	err = hw->phy.ops.identify_sfp(hw);
3784 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3785 		device_printf(dev,
3786 		    "Unsupported SFP+ module type was detected.\n");
3787 		goto out;
3788 	}
3789 
3790 	err = hw->mac.ops.setup_sfp(hw);
3791 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3792 		device_printf(dev,
3793 		    "Setup failure - unsupported SFP+ module type.\n");
3794 		goto out;
3795 	}
3796 	if (hw->phy.multispeed_fiber)
3797 		taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3798 out:
3799 	/* Update media type */
3800 	switch (hw->mac.ops.get_media_type(hw)) {
3801 		case ixgbe_media_type_fiber:
3802 			adapter->optics = IFM_10G_SR;
3803 			break;
3804 		case ixgbe_media_type_copper:
3805 			adapter->optics = IFM_10G_TWINAX;
3806 			break;
3807 		case ixgbe_media_type_cx4:
3808 			adapter->optics = IFM_10G_CX4;
3809 			break;
3810 		default:
3811 			adapter->optics = 0;
3812 			break;
3813 	}
3814 
3815 	IXGBE_CORE_UNLOCK(adapter);
3816 	return;
3817 }
3818 
3819 
3820 /*
3821 ** Tasklet for handling MSF (multispeed fiber) interrupts
3822 */
3823 static void
3824 ixgbe_handle_msf(void *context, int pending)
3825 {
3826 	struct adapter  *adapter = context;
3827 	struct ixgbe_hw *hw = &adapter->hw;
3828 	u32 autoneg;
3829 	bool negotiate;
3830 
3831 	IXGBE_CORE_LOCK(adapter);
3832 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3833 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3834 
3835 	autoneg = hw->phy.autoneg_advertised;
3836 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3837 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3838 	if (hw->mac.ops.setup_link)
3839 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3840 
3841 	/* Adjust media types shown in ifconfig */
3842 	ifmedia_removeall(&adapter->media);
3843 	ixgbe_add_media_types(adapter);
3844 	IXGBE_CORE_UNLOCK(adapter);
3845 	return;
3846 }
3847 
3848 /*
3849 ** Tasklet for handling interrupts from an external PHY
3850 */
3851 static void
3852 ixgbe_handle_phy(void *context, int pending)
3853 {
3854 	struct adapter  *adapter = context;
3855 	struct ixgbe_hw *hw = &adapter->hw;
3856 	int error;
3857 
3858 	error = hw->phy.ops.handle_lasi(hw);
3859 	if (error == IXGBE_ERR_OVERTEMP)
3860 		device_printf(adapter->dev,
3861 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3862 		    " PHY will downshift to lower power state!\n");
3863 	else if (error)
3864 		device_printf(adapter->dev,
3865 		    "Error handling LASI interrupt: %d\n",
3866 		    error);
3867 	return;
3868 }
3869 
3870 #ifdef IXGBE_FDIR
3871 /*
3872 ** Tasklet for reinitializing the Flow Director filter table
3873 */
3874 static void
3875 ixgbe_reinit_fdir(void *context, int pending)
3876 {
3877 	struct adapter  *adapter = context;
3878 	struct ifnet   *ifp = adapter->ifp;
3879 
3880 	if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3881 		return;
3882 	ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3883 	adapter->fdir_reinit = 0;
3884 	/* re-enable flow director interrupts */
3885 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3886 	/* Restart the interface */
3887 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3888 	return;
3889 }
3890 #endif
3891 
3892 /*********************************************************************
3893  *
3894  *  Configure DMA Coalescing
3895  *
3896  **********************************************************************/
3897 static void
3898 ixgbe_config_dmac(struct adapter *adapter)
3899 {
3900 	struct ixgbe_hw *hw = &adapter->hw;
3901 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3902 
3903 	if (hw->mac.type < ixgbe_mac_X550 ||
3904 	    !hw->mac.ops.dmac_config)
3905 		return;
3906 
3907 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3908 	    dcfg->link_speed ^ adapter->link_speed) {
3909 		dcfg->watchdog_timer = adapter->dmac;
3910 		dcfg->fcoe_en = false;
3911 		dcfg->link_speed = adapter->link_speed;
3912 		dcfg->num_tcs = 1;
3913 
3914 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3915 		    dcfg->watchdog_timer, dcfg->link_speed);
3916 
3917 		hw->mac.ops.dmac_config(hw);
3918 	}
3919 }
3920 
3921 /*
3922  * Checks whether the adapter's ports are capable of
3923  * Wake On LAN by reading the adapter's NVM.
3924  *
3925  * Sets each port's hw->wol_enabled value depending
3926  * on the value read here.
3927  */
3928 static void
3929 ixgbe_check_wol_support(struct adapter *adapter)
3930 {
3931 	struct ixgbe_hw *hw = &adapter->hw;
3932 	u16 dev_caps = 0;
3933 
3934 	/* Find out WoL support for port */
3935 	adapter->wol_support = hw->wol_enabled = 0;
3936 	ixgbe_get_device_caps(hw, &dev_caps);
3937 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3938 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3939 	      hw->bus.func == 0))
3940 		adapter->wol_support = hw->wol_enabled = 1;
3941 
3942 	/* Save initial wake up filter configuration */
3943 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3944 
3945 	return;
3946 }
3947 
3948 /*
3949  * Prepare the adapter/port for LPLU and/or WoL
3950  */
3951 static int
3952 ixgbe_setup_low_power_mode(struct adapter *adapter)
3953 {
3954 	struct ixgbe_hw *hw = &adapter->hw;
3955 	device_t dev = adapter->dev;
3956 	s32 error = 0;
3957 
3958 	mtx_assert(&adapter->core_mtx, MA_OWNED);
3959 
3960 	/* Limit power management flow to X550EM baseT */
3961 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
3962 	    && hw->phy.ops.enter_lplu) {
3963 		/* Turn off support for APM wakeup. (Using ACPI instead) */
3964 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
3965 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3966 
3967 		/*
3968 		 * Clear Wake Up Status register to prevent any previous wakeup
3969 		 * events from waking us up immediately after we suspend.
3970 		 */
3971 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3972 
3973 		/*
3974 		 * Program the Wakeup Filter Control register with user filter
3975 		 * settings
3976 		 */
3977 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3978 
3979 		/* Enable wakeups and power management in Wakeup Control */
3980 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
3981 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3982 
3983 		/* X550EM baseT adapters need a special LPLU flow */
3984 		hw->phy.reset_disable = true;
3985 		ixgbe_stop(adapter);
3986 		error = hw->phy.ops.enter_lplu(hw);
3987 		if (error)
3988 			device_printf(dev,
3989 			    "Error entering LPLU: %d\n", error);
3990 		hw->phy.reset_disable = false;
3991 	} else {
3992 		/* Just stop for other adapters */
3993 		ixgbe_stop(adapter);
3994 	}
3995 
3996 	return error;
3997 }
3998 
3999 /**********************************************************************
4000  *
4001  *  Update the board statistics counters.
4002  *
4003  **********************************************************************/
4004 static void
4005 ixgbe_update_stats_counters(struct adapter *adapter)
4006 {
4007 	struct ixgbe_hw *hw = &adapter->hw;
4008 	u32 missed_rx = 0, bprc, lxon, lxoff, total;
4009 	u64 total_missed_rx = 0;
4010 
4011 	adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4012 	adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
4013 	adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
4014 	adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
4015 
4016 	for (int i = 0; i < 16; i++) {
4017 		adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4018 		adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4019 		adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4020 	}
4021 	adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
4022 	adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
4023 	adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4024 
4025 	/* Hardware workaround, gprc counts missed packets */
4026 	adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4027 	adapter->stats.pf.gprc -= missed_rx;
4028 
4029 	if (hw->mac.type != ixgbe_mac_82598EB) {
4030 		adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
4031 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
4032 		adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
4033 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
4034 		adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
4035 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
4036 		adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4037 		adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4038 	} else {
4039 		adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4040 		adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4041 		/* 82598 only has a counter in the high register */
4042 		adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4043 		adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4044 		adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4045 	}
4046 
4047 	/*
4048 	 * Workaround: mprc hardware is incorrectly counting
4049 	 * broadcasts, so for now we subtract those.
4050 	 */
4051 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4052 	adapter->stats.pf.bprc += bprc;
4053 	adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4054 	if (hw->mac.type == ixgbe_mac_82598EB)
4055 		adapter->stats.pf.mprc -= bprc;
4056 
4057 	adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4058 	adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4059 	adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4060 	adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4061 	adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4062 	adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4063 
4064 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4065 	adapter->stats.pf.lxontxc += lxon;
4066 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4067 	adapter->stats.pf.lxofftxc += lxoff;
4068 	total = lxon + lxoff;
4069 
4070 	adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4071 	adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4072 	adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4073 	adapter->stats.pf.gptc -= total;
4074 	adapter->stats.pf.mptc -= total;
4075 	adapter->stats.pf.ptc64 -= total;
4076 	adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
4077 
4078 	adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4079 	adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4080 	adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4081 	adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4082 	adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
4083 	adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
4084 	adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
4085 	adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4086 	adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
4087 	adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4088 	adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4089 	adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4090 	adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4091 	adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4092 	adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4093 	adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
4094 	adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4095 	adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
4096 	/* Only read FCOE on 82599 */
4097 	if (hw->mac.type != ixgbe_mac_82598EB) {
4098 		adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4099 		adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4100 		adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4101 		adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4102 		adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4103 	}
4104 
4105 	/* Fill out the OS statistics structure */
4106 	IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
4107 	IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
4108 	IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
4109 	IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
4110 	IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
4111 	IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
4112 	IXGBE_SET_COLLISIONS(adapter, 0);
4113 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
4114 	IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
4115 	    + adapter->stats.pf.rlec);
4116 }
4117 
4118 #if __FreeBSD_version >= 1100036
4119 static uint64_t
4120 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
4121 {
4122 	struct adapter *adapter;
4123 	struct tx_ring *txr;
4124 	uint64_t rv;
4125 
4126 	adapter = if_getsoftc(ifp);
4127 
4128 	switch (cnt) {
4129 	case IFCOUNTER_IPACKETS:
4130 		return (adapter->ipackets);
4131 	case IFCOUNTER_OPACKETS:
4132 		return (adapter->opackets);
4133 	case IFCOUNTER_IBYTES:
4134 		return (adapter->ibytes);
4135 	case IFCOUNTER_OBYTES:
4136 		return (adapter->obytes);
4137 	case IFCOUNTER_IMCASTS:
4138 		return (adapter->imcasts);
4139 	case IFCOUNTER_OMCASTS:
4140 		return (adapter->omcasts);
4141 	case IFCOUNTER_COLLISIONS:
4142 		return (0);
4143 	case IFCOUNTER_IQDROPS:
4144 		return (adapter->iqdrops);
4145 	case IFCOUNTER_OQDROPS:
4146 		rv = 0;
4147 		txr = adapter->tx_rings;
4148 		for (int i = 0; i < adapter->num_queues; i++, txr++)
4149 			rv += txr->br->br_drops;
4150 		return (rv);
4151 	case IFCOUNTER_IERRORS:
4152 		return (adapter->ierrors);
4153 	default:
4154 		return (if_get_counter_default(ifp, cnt));
4155 	}
4156 }
4157 #endif
4158 
4159 /** ixgbe_sysctl_tdh_handler - Handler function
4160  *  Retrieves the TDH value from the hardware
4161  */
4162 static int
4163 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
4164 {
4165 	int error;
4166 
4167 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4168 	if (!txr) return 0;
4169 
4170 	unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
4171 	error = sysctl_handle_int(oidp, &val, 0, req);
4172 	if (error || !req->newptr)
4173 		return error;
4174 	return 0;
4175 }
4176 
4177 /** ixgbe_sysctl_tdt_handler - Handler function
4178  *  Retrieves the TDT value from the hardware
4179  */
4180 static int
4181 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
4182 {
4183 	int error;
4184 
4185 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4186 	if (!txr) return 0;
4187 
4188 	unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
4189 	error = sysctl_handle_int(oidp, &val, 0, req);
4190 	if (error || !req->newptr)
4191 		return error;
4192 	return 0;
4193 }
4194 
4195 /** ixgbe_sysctl_rdh_handler - Handler function
4196  *  Retrieves the RDH value from the hardware
4197  */
4198 static int
4199 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
4200 {
4201 	int error;
4202 
4203 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4204 	if (!rxr) return 0;
4205 
4206 	unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
4207 	error = sysctl_handle_int(oidp, &val, 0, req);
4208 	if (error || !req->newptr)
4209 		return error;
4210 	return 0;
4211 }
4212 
4213 /** ixgbe_sysctl_rdt_handler - Handler function
4214  *  Retrieves the RDT value from the hardware
4215  */
4216 static int
4217 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
4218 {
4219 	int error;
4220 
4221 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4222 	if (!rxr) return 0;
4223 
4224 	unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
4225 	error = sysctl_handle_int(oidp, &val, 0, req);
4226 	if (error || !req->newptr)
4227 		return error;
4228 	return 0;
4229 }
4230 
4231 static int
4232 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
4233 {
4234 	int error;
4235 	struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
4236 	unsigned int reg, usec, rate;
4237 
4238 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
4239 	usec = ((reg & 0x0FF8) >> 3);
4240 	if (usec > 0)
4241 		rate = 500000 / usec;
4242 	else
4243 		rate = 0;
4244 	error = sysctl_handle_int(oidp, &rate, 0, req);
4245 	if (error || !req->newptr)
4246 		return error;
4247 	reg &= ~0xfff; /* default, no limitation */
4248 	ixgbe_max_interrupt_rate = 0;
4249 	if (rate > 0 && rate < 500000) {
4250 		if (rate < 1000)
4251 			rate = 1000;
4252 		ixgbe_max_interrupt_rate = rate;
4253 		reg |= ((4000000/rate) & 0xff8 );
4254 	}
4255 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
4256 	return 0;
4257 }
4258 
4259 static void
4260 ixgbe_add_device_sysctls(struct adapter *adapter)
4261 {
4262 	device_t dev = adapter->dev;
4263 	struct ixgbe_hw *hw = &adapter->hw;
4264 	struct sysctl_oid_list *child;
4265 	struct sysctl_ctx_list *ctx;
4266 
4267 	ctx = device_get_sysctl_ctx(dev);
4268 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4269 
4270 	/* Sysctls for all devices */
4271 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
4272 			CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4273 			ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
4274 
4275         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
4276 			CTLFLAG_RW,
4277 			&ixgbe_enable_aim, 1, "Interrupt Moderation");
4278 
4279 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
4280 			CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4281 			ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
4282 
4283 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
4284 			CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4285 			ixgbe_sysctl_thermal_test, "I", "Thermal Test");
4286 
4287 #ifdef IXGBE_DEBUG
4288 	/* testing sysctls (for all devices) */
4289 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
4290 			CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4291 			ixgbe_sysctl_power_state, "I", "PCI Power State");
4292 
4293 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
4294 			CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
4295 			ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
4296 #endif
4297 	/* for X550 series devices */
4298 	if (hw->mac.type >= ixgbe_mac_X550)
4299 		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
4300 				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4301 				ixgbe_sysctl_dmac, "I", "DMA Coalesce");
4302 
4303 	/* for X552 backplane devices */
4304 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
4305 		struct sysctl_oid *eee_node;
4306 		struct sysctl_oid_list *eee_list;
4307 
4308 		eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
4309 					   CTLFLAG_RD, NULL,
4310 					   "Energy Efficient Ethernet sysctls");
4311 		eee_list = SYSCTL_CHILDREN(eee_node);
4312 
4313 		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
4314 				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4315 				ixgbe_sysctl_eee_enable, "I",
4316 				"Enable or Disable EEE");
4317 
4318 		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
4319 				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4320 				ixgbe_sysctl_eee_negotiated, "I",
4321 				"EEE negotiated on link");
4322 
4323 		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
4324 				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4325 				ixgbe_sysctl_eee_tx_lpi_status, "I",
4326 				"Whether or not TX link is in LPI state");
4327 
4328 		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
4329 				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4330 				ixgbe_sysctl_eee_rx_lpi_status, "I",
4331 				"Whether or not RX link is in LPI state");
4332 
4333 		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_delay",
4334 				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4335 				ixgbe_sysctl_eee_tx_lpi_delay, "I",
4336 				"TX LPI entry delay in microseconds");
4337 	}
4338 
4339 	/* for WoL-capable devices */
4340 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4341 		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
4342 				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4343 				ixgbe_sysctl_wol_enable, "I",
4344 				"Enable/Disable Wake on LAN");
4345 
4346 		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
4347 				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4348 				ixgbe_sysctl_wufc, "I",
4349 				"Enable/Disable Wake Up Filters");
4350 	}
4351 
4352 	/* for X552/X557-AT devices */
4353 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4354 		struct sysctl_oid *phy_node;
4355 		struct sysctl_oid_list *phy_list;
4356 
4357 		phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
4358 					   CTLFLAG_RD, NULL,
4359 					   "External PHY sysctls");
4360 		phy_list = SYSCTL_CHILDREN(phy_node);
4361 
4362 		SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
4363 				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4364 				ixgbe_sysctl_phy_temp, "I",
4365 				"Current External PHY Temperature (Celsius)");
4366 
4367 		SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
4368 				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4369 				ixgbe_sysctl_phy_overtemp_occurred, "I",
4370 				"External PHY High Temperature Event Occurred");
4371 	}
4372 }
4373 
4374 /*
4375  * Add sysctl variables, one per statistic, to the system.
4376  */
4377 static void
4378 ixgbe_add_hw_stats(struct adapter *adapter)
4379 {
4380 	device_t dev = adapter->dev;
4381 
4382 	struct tx_ring *txr = adapter->tx_rings;
4383 	struct rx_ring *rxr = adapter->rx_rings;
4384 
4385 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4386 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4387 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4388 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
4389 
4390 	struct sysctl_oid *stat_node, *queue_node;
4391 	struct sysctl_oid_list *stat_list, *queue_list;
4392 
4393 #define QUEUE_NAME_LEN 32
4394 	char namebuf[QUEUE_NAME_LEN];
4395 
4396 	/* Driver Statistics */
4397 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4398 			CTLFLAG_RD, &adapter->dropped_pkts,
4399 			"Driver dropped packets");
4400 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
4401 			CTLFLAG_RD, &adapter->mbuf_defrag_failed,
4402 			"m_defrag() failed");
4403 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
4404 			CTLFLAG_RD, &adapter->watchdog_events,
4405 			"Watchdog timeouts");
4406 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4407 			CTLFLAG_RD, &adapter->link_irq,
4408 			"Link MSIX IRQ Handled");
4409 
4410 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
4411 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4412 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4413 					    CTLFLAG_RD, NULL, "Queue Name");
4414 		queue_list = SYSCTL_CHILDREN(queue_node);
4415 
4416 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4417 				CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4418 				sizeof(&adapter->queues[i]),
4419 				ixgbe_sysctl_interrupt_rate_handler, "IU",
4420 				"Interrupt Rate");
4421 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4422 				CTLFLAG_RD, &(adapter->queues[i].irqs),
4423 				"irqs on this queue");
4424 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4425 				CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4426 				ixgbe_sysctl_tdh_handler, "IU",
4427 				"Transmit Descriptor Head");
4428 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4429 				CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4430 				ixgbe_sysctl_tdt_handler, "IU",
4431 				"Transmit Descriptor Tail");
4432 		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4433 				CTLFLAG_RD, &txr->tso_tx,
4434 				"TSO");
4435 		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4436 				CTLFLAG_RD, &txr->no_tx_dma_setup,
4437 				"Driver tx dma failure in xmit");
4438 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4439 				CTLFLAG_RD, &txr->no_desc_avail,
4440 				"Queue No Descriptor Available");
4441 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4442 				CTLFLAG_RD, &txr->total_packets,
4443 				"Queue Packets Transmitted");
4444 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4445 				CTLFLAG_RD, &txr->br->br_drops,
4446 				"Packets dropped in buf_ring");
4447 	}
4448 
4449 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4450 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4451 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4452 					    CTLFLAG_RD, NULL, "Queue Name");
4453 		queue_list = SYSCTL_CHILDREN(queue_node);
4454 
4455 		struct lro_ctrl *lro = &rxr->lro;
4456 
4457 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4458 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4459 					    CTLFLAG_RD, NULL, "Queue Name");
4460 		queue_list = SYSCTL_CHILDREN(queue_node);
4461 
4462 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4463 				CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4464 				ixgbe_sysctl_rdh_handler, "IU",
4465 				"Receive Descriptor Head");
4466 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4467 				CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4468 				ixgbe_sysctl_rdt_handler, "IU",
4469 				"Receive Descriptor Tail");
4470 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4471 				CTLFLAG_RD, &rxr->rx_packets,
4472 				"Queue Packets Received");
4473 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4474 				CTLFLAG_RD, &rxr->rx_bytes,
4475 				"Queue Bytes Received");
4476 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4477 				CTLFLAG_RD, &rxr->rx_copies,
4478 				"Copied RX Frames");
4479 		SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
4480 				CTLFLAG_RD, &lro->lro_queued, 0,
4481 				"LRO Queued");
4482 		SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
4483 				CTLFLAG_RD, &lro->lro_flushed, 0,
4484 				"LRO Flushed");
4485 	}
4486 
4487 	/* MAC stats get the own sub node */
4488 
4489 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4490 				    CTLFLAG_RD, NULL, "MAC Statistics");
4491 	stat_list = SYSCTL_CHILDREN(stat_node);
4492 
4493 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4494 			CTLFLAG_RD, &stats->crcerrs,
4495 			"CRC Errors");
4496 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4497 			CTLFLAG_RD, &stats->illerrc,
4498 			"Illegal Byte Errors");
4499 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4500 			CTLFLAG_RD, &stats->errbc,
4501 			"Byte Errors");
4502 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4503 			CTLFLAG_RD, &stats->mspdc,
4504 			"MAC Short Packets Discarded");
4505 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4506 			CTLFLAG_RD, &stats->mlfc,
4507 			"MAC Local Faults");
4508 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4509 			CTLFLAG_RD, &stats->mrfc,
4510 			"MAC Remote Faults");
4511 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4512 			CTLFLAG_RD, &stats->rlec,
4513 			"Receive Length Errors");
4514 
4515 	/* Flow Control stats */
4516 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4517 			CTLFLAG_RD, &stats->lxontxc,
4518 			"Link XON Transmitted");
4519 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4520 			CTLFLAG_RD, &stats->lxonrxc,
4521 			"Link XON Received");
4522 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4523 			CTLFLAG_RD, &stats->lxofftxc,
4524 			"Link XOFF Transmitted");
4525 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4526 			CTLFLAG_RD, &stats->lxoffrxc,
4527 			"Link XOFF Received");
4528 
4529 	/* Packet Reception Stats */
4530 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4531 			CTLFLAG_RD, &stats->tor,
4532 			"Total Octets Received");
4533 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4534 			CTLFLAG_RD, &stats->gorc,
4535 			"Good Octets Received");
4536 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4537 			CTLFLAG_RD, &stats->tpr,
4538 			"Total Packets Received");
4539 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4540 			CTLFLAG_RD, &stats->gprc,
4541 			"Good Packets Received");
4542 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4543 			CTLFLAG_RD, &stats->mprc,
4544 			"Multicast Packets Received");
4545 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4546 			CTLFLAG_RD, &stats->bprc,
4547 			"Broadcast Packets Received");
4548 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4549 			CTLFLAG_RD, &stats->prc64,
4550 			"64 byte frames received ");
4551 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4552 			CTLFLAG_RD, &stats->prc127,
4553 			"65-127 byte frames received");
4554 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4555 			CTLFLAG_RD, &stats->prc255,
4556 			"128-255 byte frames received");
4557 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4558 			CTLFLAG_RD, &stats->prc511,
4559 			"256-511 byte frames received");
4560 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4561 			CTLFLAG_RD, &stats->prc1023,
4562 			"512-1023 byte frames received");
4563 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4564 			CTLFLAG_RD, &stats->prc1522,
4565 			"1023-1522 byte frames received");
4566 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4567 			CTLFLAG_RD, &stats->ruc,
4568 			"Receive Undersized");
4569 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4570 			CTLFLAG_RD, &stats->rfc,
4571 			"Fragmented Packets Received ");
4572 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4573 			CTLFLAG_RD, &stats->roc,
4574 			"Oversized Packets Received");
4575 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4576 			CTLFLAG_RD, &stats->rjc,
4577 			"Received Jabber");
4578 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4579 			CTLFLAG_RD, &stats->mngprc,
4580 			"Management Packets Received");
4581 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4582 			CTLFLAG_RD, &stats->mngptc,
4583 			"Management Packets Dropped");
4584 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4585 			CTLFLAG_RD, &stats->xec,
4586 			"Checksum Errors");
4587 
4588 	/* Packet Transmission Stats */
4589 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4590 			CTLFLAG_RD, &stats->gotc,
4591 			"Good Octets Transmitted");
4592 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4593 			CTLFLAG_RD, &stats->tpt,
4594 			"Total Packets Transmitted");
4595 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4596 			CTLFLAG_RD, &stats->gptc,
4597 			"Good Packets Transmitted");
4598 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4599 			CTLFLAG_RD, &stats->bptc,
4600 			"Broadcast Packets Transmitted");
4601 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4602 			CTLFLAG_RD, &stats->mptc,
4603 			"Multicast Packets Transmitted");
4604 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4605 			CTLFLAG_RD, &stats->mngptc,
4606 			"Management Packets Transmitted");
4607 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4608 			CTLFLAG_RD, &stats->ptc64,
4609 			"64 byte frames transmitted ");
4610 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4611 			CTLFLAG_RD, &stats->ptc127,
4612 			"65-127 byte frames transmitted");
4613 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4614 			CTLFLAG_RD, &stats->ptc255,
4615 			"128-255 byte frames transmitted");
4616 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4617 			CTLFLAG_RD, &stats->ptc511,
4618 			"256-511 byte frames transmitted");
4619 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4620 			CTLFLAG_RD, &stats->ptc1023,
4621 			"512-1023 byte frames transmitted");
4622 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4623 			CTLFLAG_RD, &stats->ptc1522,
4624 			"1024-1522 byte frames transmitted");
4625 }
4626 
4627 static void
4628 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
4629     const char *description, int *limit, int value)
4630 {
4631 	*limit = value;
4632 	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4633 	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4634 	    OID_AUTO, name, CTLFLAG_RW, limit, value, description);
4635 }
4636 
4637 /*
4638 ** Set flow control using sysctl:
4639 ** Flow control values:
4640 ** 	0 - off
4641 **	1 - rx pause
4642 **	2 - tx pause
4643 **	3 - full
4644 */
4645 static int
4646 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4647 {
4648 	int error, last;
4649 	struct adapter *adapter = (struct adapter *) arg1;
4650 
4651 	last = adapter->fc;
4652 	error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
4653 	if ((error) || (req->newptr == NULL))
4654 		return (error);
4655 
4656 	/* Don't bother if it's not changed */
4657 	if (adapter->fc == last)
4658 		return (0);
4659 
4660 	switch (adapter->fc) {
4661 		case ixgbe_fc_rx_pause:
4662 		case ixgbe_fc_tx_pause:
4663 		case ixgbe_fc_full:
4664 			adapter->hw.fc.requested_mode = adapter->fc;
4665 			if (adapter->num_queues > 1)
4666 				ixgbe_disable_rx_drop(adapter);
4667 			break;
4668 		case ixgbe_fc_none:
4669 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
4670 			if (adapter->num_queues > 1)
4671 				ixgbe_enable_rx_drop(adapter);
4672 			break;
4673 		default:
4674 			adapter->fc = last;
4675 			return (EINVAL);
4676 	}
4677 	/* Don't autoneg if forcing a value */
4678 	adapter->hw.fc.disable_fc_autoneg = TRUE;
4679 	ixgbe_fc_enable(&adapter->hw);
4680 	return error;
4681 }
4682 
4683 /*
4684 ** Control advertised link speed:
4685 **	Flags:
4686 **	0x1 - advertise 100 Mb
4687 **	0x2 - advertise 1G
4688 **	0x4 - advertise 10G
4689 */
4690 static int
4691 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4692 {
4693 	int			error = 0, requested;
4694 	struct adapter		*adapter;
4695 	device_t		dev;
4696 	struct ixgbe_hw		*hw;
4697 	ixgbe_link_speed	speed = 0;
4698 
4699 	adapter = (struct adapter *) arg1;
4700 	dev = adapter->dev;
4701 	hw = &adapter->hw;
4702 
4703 	requested = adapter->advertise;
4704 	error = sysctl_handle_int(oidp, &requested, 0, req);
4705 	if ((error) || (req->newptr == NULL))
4706 		return (error);
4707 
4708 	/* No speed changes for backplane media */
4709 	if (hw->phy.media_type == ixgbe_media_type_backplane)
4710 		return (ENODEV);
4711 
4712 	/* Checks to validate new value */
4713 	if (adapter->advertise == requested) /* no change */
4714 		return (0);
4715 
4716 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4717 	    (hw->phy.multispeed_fiber))) {
4718 		device_printf(dev,
4719 		    "Advertised speed can only be set on copper or "
4720 		    "multispeed fiber media types.\n");
4721 		return (EINVAL);
4722 	}
4723 
4724 	if (requested < 0x1 || requested > 0x7) {
4725 		device_printf(dev,
4726 		    "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4727 		return (EINVAL);
4728 	}
4729 
4730 	if ((requested & 0x1)
4731 	    && (hw->mac.type != ixgbe_mac_X540)
4732 	    && (hw->mac.type != ixgbe_mac_X550)) {
4733 		device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4734 		return (EINVAL);
4735 	}
4736 
4737 	/* Set new value and report new advertised mode */
4738 	if (requested & 0x1)
4739 		speed |= IXGBE_LINK_SPEED_100_FULL;
4740 	if (requested & 0x2)
4741 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4742 	if (requested & 0x4)
4743 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4744 
4745 	hw->mac.autotry_restart = TRUE;
4746 	hw->mac.ops.setup_link(hw, speed, TRUE);
4747 	adapter->advertise = requested;
4748 
4749 	return (error);
4750 }
4751 
4752 /*
4753  * The following two sysctls are for X552/X557-AT devices;
4754  * they deal with the external PHY used in them.
4755  */
4756 static int
4757 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4758 {
4759 	struct adapter	*adapter = (struct adapter *) arg1;
4760 	struct ixgbe_hw *hw = &adapter->hw;
4761 	u16 reg;
4762 
4763 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4764 		device_printf(adapter->dev,
4765 		    "Device has no supported external thermal sensor.\n");
4766 		return (ENODEV);
4767 	}
4768 
4769 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4770 				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4771 				      &reg)) {
4772 		device_printf(adapter->dev,
4773 		    "Error reading from PHY's current temperature register\n");
4774 		return (EAGAIN);
4775 	}
4776 
4777 	/* Shift temp for output */
4778 	reg = reg >> 8;
4779 
4780 	return (sysctl_handle_int(oidp, NULL, reg, req));
4781 }
4782 
4783 /*
4784  * Reports whether the current PHY temperature is over
4785  * the overtemp threshold.
4786  *  - This is reported directly from the PHY
4787  */
4788 static int
4789 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4790 {
4791 	struct adapter	*adapter = (struct adapter *) arg1;
4792 	struct ixgbe_hw *hw = &adapter->hw;
4793 	u16 reg;
4794 
4795 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4796 		device_printf(adapter->dev,
4797 		    "Device has no supported external thermal sensor.\n");
4798 		return (ENODEV);
4799 	}
4800 
4801 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4802 				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4803 				      &reg)) {
4804 		device_printf(adapter->dev,
4805 		    "Error reading from PHY's temperature status register\n");
4806 		return (EAGAIN);
4807 	}
4808 
4809 	/* Get occurrence bit */
4810 	reg = !!(reg & 0x4000);
4811 	return (sysctl_handle_int(oidp, 0, reg, req));
4812 }
4813 
4814 /*
4815 ** Thermal Shutdown Trigger (internal MAC)
4816 **   - Set this to 1 to cause an overtemp event to occur
4817 */
4818 static int
4819 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4820 {
4821 	struct adapter	*adapter = (struct adapter *) arg1;
4822 	struct ixgbe_hw *hw = &adapter->hw;
4823 	int error, fire = 0;
4824 
4825 	error = sysctl_handle_int(oidp, &fire, 0, req);
4826 	if ((error) || (req->newptr == NULL))
4827 		return (error);
4828 
4829 	if (fire) {
4830 		u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4831 		reg |= IXGBE_EICR_TS;
4832 		IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4833 	}
4834 
4835 	return (0);
4836 }
4837 
4838 /*
4839 ** Manage DMA Coalescing.
4840 ** Control values:
4841 ** 	0/1 - off / on (use default value of 1000)
4842 **
4843 **	Legal timer values are:
4844 **	50,100,250,500,1000,2000,5000,10000
4845 **
4846 **	Turning off interrupt moderation will also turn this off.
4847 */
4848 static int
4849 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4850 {
4851 	struct adapter *adapter = (struct adapter *) arg1;
4852 	struct ifnet *ifp = adapter->ifp;
4853 	int		error;
4854 	u32		newval;
4855 
4856 	newval = adapter->dmac;
4857 	error = sysctl_handle_int(oidp, &newval, 0, req);
4858 	if ((error) || (req->newptr == NULL))
4859 		return (error);
4860 
4861 	switch (newval) {
4862 	case 0:
4863 		/* Disabled */
4864 		adapter->dmac = 0;
4865 		break;
4866 	case 1:
4867 		/* Enable and use default */
4868 		adapter->dmac = 1000;
4869 		break;
4870 	case 50:
4871 	case 100:
4872 	case 250:
4873 	case 500:
4874 	case 1000:
4875 	case 2000:
4876 	case 5000:
4877 	case 10000:
4878 		/* Legal values - allow */
4879 		adapter->dmac = newval;
4880 		break;
4881 	default:
4882 		/* Do nothing, illegal value */
4883 		return (EINVAL);
4884 	}
4885 
4886 	/* Re-initialize hardware if it's already running */
4887 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4888 		ixgbe_init(adapter);
4889 
4890 	return (0);
4891 }
4892 
4893 #ifdef IXGBE_DEBUG
4894 /**
4895  * Sysctl to test power states
4896  * Values:
4897  *	0      - set device to D0
4898  *	3      - set device to D3
4899  *	(none) - get current device power state
4900  */
4901 static int
4902 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4903 {
4904 	struct adapter *adapter = (struct adapter *) arg1;
4905 	device_t dev =  adapter->dev;
4906 	int curr_ps, new_ps, error = 0;
4907 
4908 	curr_ps = new_ps = pci_get_powerstate(dev);
4909 
4910 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4911 	if ((error) || (req->newptr == NULL))
4912 		return (error);
4913 
4914 	if (new_ps == curr_ps)
4915 		return (0);
4916 
4917 	if (new_ps == 3 && curr_ps == 0)
4918 		error = DEVICE_SUSPEND(dev);
4919 	else if (new_ps == 0 && curr_ps == 3)
4920 		error = DEVICE_RESUME(dev);
4921 	else
4922 		return (EINVAL);
4923 
4924 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4925 
4926 	return (error);
4927 }
4928 #endif
4929 /*
4930  * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4931  * Values:
4932  *	0 - disabled
4933  *	1 - enabled
4934  */
4935 static int
4936 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4937 {
4938 	struct adapter *adapter = (struct adapter *) arg1;
4939 	struct ixgbe_hw *hw = &adapter->hw;
4940 	int new_wol_enabled;
4941 	int error = 0;
4942 
4943 	new_wol_enabled = hw->wol_enabled;
4944 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4945 	if ((error) || (req->newptr == NULL))
4946 		return (error);
4947 	new_wol_enabled = !!(new_wol_enabled);
4948 	if (new_wol_enabled == hw->wol_enabled)
4949 		return (0);
4950 
4951 	if (new_wol_enabled > 0 && !adapter->wol_support)
4952 		return (ENODEV);
4953 	else
4954 		hw->wol_enabled = new_wol_enabled;
4955 
4956 	return (0);
4957 }
4958 
4959 /*
4960  * Sysctl to enable/disable the Energy Efficient Ethernet capability,
4961  * if supported by the adapter.
4962  * Values:
4963  *	0 - disabled
4964  *	1 - enabled
4965  */
4966 static int
4967 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4968 {
4969 	struct adapter *adapter = (struct adapter *) arg1;
4970 	struct ixgbe_hw *hw = &adapter->hw;
4971 	struct ifnet *ifp = adapter->ifp;
4972 	int new_eee_enabled, error = 0;
4973 
4974 	new_eee_enabled = adapter->eee_enabled;
4975 	error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
4976 	if ((error) || (req->newptr == NULL))
4977 		return (error);
4978 	new_eee_enabled = !!(new_eee_enabled);
4979 	if (new_eee_enabled == adapter->eee_enabled)
4980 		return (0);
4981 
4982 	if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee)
4983 		return (ENODEV);
4984 	else
4985 		adapter->eee_enabled = new_eee_enabled;
4986 
4987 	/* Re-initialize hardware if it's already running */
4988 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4989 		ixgbe_init(adapter);
4990 
4991 	return (0);
4992 }
4993 
4994 /*
4995  * Read-only sysctl indicating whether EEE support was negotiated
4996  * on the link.
4997  */
4998 static int
4999 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
5000 {
5001 	struct adapter *adapter = (struct adapter *) arg1;
5002 	struct ixgbe_hw *hw = &adapter->hw;
5003 	bool status;
5004 
5005 	status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
5006 
5007 	return (sysctl_handle_int(oidp, 0, status, req));
5008 }
5009 
5010 /*
5011  * Read-only sysctl indicating whether RX Link is in LPI state.
5012  */
5013 static int
5014 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
5015 {
5016 	struct adapter *adapter = (struct adapter *) arg1;
5017 	struct ixgbe_hw *hw = &adapter->hw;
5018 	bool status;
5019 
5020 	status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
5021 	    IXGBE_EEE_RX_LPI_STATUS);
5022 
5023 	return (sysctl_handle_int(oidp, 0, status, req));
5024 }
5025 
5026 /*
5027  * Read-only sysctl indicating whether TX Link is in LPI state.
5028  */
5029 static int
5030 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
5031 {
5032 	struct adapter *adapter = (struct adapter *) arg1;
5033 	struct ixgbe_hw *hw = &adapter->hw;
5034 	bool status;
5035 
5036 	status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
5037 	    IXGBE_EEE_TX_LPI_STATUS);
5038 
5039 	return (sysctl_handle_int(oidp, 0, status, req));
5040 }
5041 
5042 /*
5043  * Read-only sysctl indicating TX Link LPI delay
5044  */
5045 static int
5046 ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS)
5047 {
5048 	struct adapter *adapter = (struct adapter *) arg1;
5049 	struct ixgbe_hw *hw = &adapter->hw;
5050 	u32 reg;
5051 
5052 	reg = IXGBE_READ_REG(hw, IXGBE_EEE_SU);
5053 
5054 	return (sysctl_handle_int(oidp, 0, reg >> 26, req));
5055 }
5056 
5057 /*
5058  * Sysctl to enable/disable the types of packets that the
5059  * adapter will wake up on upon receipt.
5060  * WUFC - Wake Up Filter Control
5061  * Flags:
5062  *	0x1  - Link Status Change
5063  *	0x2  - Magic Packet
5064  *	0x4  - Direct Exact
5065  *	0x8  - Directed Multicast
5066  *	0x10 - Broadcast
5067  *	0x20 - ARP/IPv4 Request Packet
5068  *	0x40 - Direct IPv4 Packet
5069  *	0x80 - Direct IPv6 Packet
5070  *
5071  * Setting another flag will cause the sysctl to return an
5072  * error.
5073  */
5074 static int
5075 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
5076 {
5077 	struct adapter *adapter = (struct adapter *) arg1;
5078 	int error = 0;
5079 	u32 new_wufc;
5080 
5081 	new_wufc = adapter->wufc;
5082 
5083 	error = sysctl_handle_int(oidp, &new_wufc, 0, req);
5084 	if ((error) || (req->newptr == NULL))
5085 		return (error);
5086 	if (new_wufc == adapter->wufc)
5087 		return (0);
5088 
5089 	if (new_wufc & 0xffffff00)
5090 		return (EINVAL);
5091 	else {
5092 		new_wufc &= 0xff;
5093 		new_wufc |= (0xffffff & adapter->wufc);
5094 		adapter->wufc = new_wufc;
5095 	}
5096 
5097 	return (0);
5098 }
5099 
5100 #ifdef IXGBE_DEBUG
5101 static int
5102 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
5103 {
5104 	struct adapter *adapter = (struct adapter *)arg1;
5105 	struct ixgbe_hw *hw = &adapter->hw;
5106 	device_t dev = adapter->dev;
5107 	int error = 0, reta_size;
5108 	struct sbuf *buf;
5109 	u32 reg;
5110 
5111 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5112 	if (!buf) {
5113 		device_printf(dev, "Could not allocate sbuf for output.\n");
5114 		return (ENOMEM);
5115 	}
5116 
5117 	// TODO: use sbufs to make a string to print out
5118 	/* Set multiplier for RETA setup and table size based on MAC */
5119 	switch (adapter->hw.mac.type) {
5120 	case ixgbe_mac_X550:
5121 	case ixgbe_mac_X550EM_x:
5122 		reta_size = 128;
5123 		break;
5124 	default:
5125 		reta_size = 32;
5126 		break;
5127 	}
5128 
5129 	/* Print out the redirection table */
5130 	sbuf_cat(buf, "\n");
5131 	for (int i = 0; i < reta_size; i++) {
5132 		if (i < 32) {
5133 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5134 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5135 		} else {
5136 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5137 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5138 		}
5139 	}
5140 
5141 	// TODO: print more config
5142 
5143 	error = sbuf_finish(buf);
5144 	if (error)
5145 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5146 
5147 	sbuf_delete(buf);
5148 	return (0);
5149 }
5150 #endif /* IXGBE_DEBUG */
5151 
5152 /*
5153 ** Enable the hardware to drop packets when the buffer is
5154 ** full. This is useful when multiqueue,so that no single
5155 ** queue being full stalls the entire RX engine. We only
5156 ** enable this when Multiqueue AND when Flow Control is
5157 ** disabled.
5158 */
5159 static void
5160 ixgbe_enable_rx_drop(struct adapter *adapter)
5161 {
5162         struct ixgbe_hw *hw = &adapter->hw;
5163 
5164 	for (int i = 0; i < adapter->num_queues; i++) {
5165 		struct rx_ring *rxr = &adapter->rx_rings[i];
5166         	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5167         	srrctl |= IXGBE_SRRCTL_DROP_EN;
5168         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5169 	}
5170 #ifdef PCI_IOV
5171 	/* enable drop for each vf */
5172 	for (int i = 0; i < adapter->num_vfs; i++) {
5173 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
5174 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5175 		    IXGBE_QDE_ENABLE));
5176 	}
5177 #endif
5178 }
5179 
5180 static void
5181 ixgbe_disable_rx_drop(struct adapter *adapter)
5182 {
5183         struct ixgbe_hw *hw = &adapter->hw;
5184 
5185 	for (int i = 0; i < adapter->num_queues; i++) {
5186 		struct rx_ring *rxr = &adapter->rx_rings[i];
5187         	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5188         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5189         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5190 	}
5191 #ifdef PCI_IOV
5192 	/* disable drop for each vf */
5193 	for (int i = 0; i < adapter->num_vfs; i++) {
5194 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
5195 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5196 	}
5197 #endif
5198 }
5199 
5200 static void
5201 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5202 {
5203 	u32 mask;
5204 
5205 	switch (adapter->hw.mac.type) {
5206 	case ixgbe_mac_82598EB:
5207 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5208 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5209 		break;
5210 	case ixgbe_mac_82599EB:
5211 	case ixgbe_mac_X540:
5212 	case ixgbe_mac_X550:
5213 	case ixgbe_mac_X550EM_x:
5214 		mask = (queues & 0xFFFFFFFF);
5215 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5216 		mask = (queues >> 32);
5217 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5218 		break;
5219 	default:
5220 		break;
5221 	}
5222 }
5223 
5224 #ifdef PCI_IOV
5225 
5226 /*
5227 ** Support functions for SRIOV/VF management
5228 */
5229 
5230 static void
5231 ixgbe_ping_all_vfs(struct adapter *adapter)
5232 {
5233 	struct ixgbe_vf *vf;
5234 
5235 	for (int i = 0; i < adapter->num_vfs; i++) {
5236 		vf = &adapter->vfs[i];
5237 		if (vf->flags & IXGBE_VF_ACTIVE)
5238 			ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
5239 	}
5240 }
5241 
5242 
5243 static void
5244 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
5245     uint16_t tag)
5246 {
5247 	struct ixgbe_hw *hw;
5248 	uint32_t vmolr, vmvir;
5249 
5250 	hw = &adapter->hw;
5251 
5252 	vf->vlan_tag = tag;
5253 
5254 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
5255 
5256 	/* Do not receive packets that pass inexact filters. */
5257 	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
5258 
5259 	/* Disable Multicast Promicuous Mode. */
5260 	vmolr &= ~IXGBE_VMOLR_MPE;
5261 
5262 	/* Accept broadcasts. */
5263 	vmolr |= IXGBE_VMOLR_BAM;
5264 
5265 	if (tag == 0) {
5266 		/* Accept non-vlan tagged traffic. */
5267 		//vmolr |= IXGBE_VMOLR_AUPE;
5268 
5269 		/* Allow VM to tag outgoing traffic; no default tag. */
5270 		vmvir = 0;
5271 	} else {
5272 		/* Require vlan-tagged traffic. */
5273 		vmolr &= ~IXGBE_VMOLR_AUPE;
5274 
5275 		/* Tag all traffic with provided vlan tag. */
5276 		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
5277 	}
5278 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
5279 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
5280 }
5281 
5282 
5283 static boolean_t
5284 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
5285 {
5286 
5287 	/*
5288 	 * Frame size compatibility between PF and VF is only a problem on
5289 	 * 82599-based cards.  X540 and later support any combination of jumbo
5290 	 * frames on PFs and VFs.
5291 	 */
5292 	if (adapter->hw.mac.type != ixgbe_mac_82599EB)
5293 		return (TRUE);
5294 
5295 	switch (vf->api_ver) {
5296 	case IXGBE_API_VER_1_0:
5297 	case IXGBE_API_VER_UNKNOWN:
5298 		/*
5299 		 * On legacy (1.0 and older) VF versions, we don't support jumbo
5300 		 * frames on either the PF or the VF.
5301 		 */
5302 		if (adapter->max_frame_size > ETHER_MAX_LEN ||
5303 		    vf->max_frame_size > ETHER_MAX_LEN)
5304 		    return (FALSE);
5305 
5306 		return (TRUE);
5307 
5308 		break;
5309 	case IXGBE_API_VER_1_1:
5310 	default:
5311 		/*
5312 		 * 1.1 or later VF versions always work if they aren't using
5313 		 * jumbo frames.
5314 		 */
5315 		if (vf->max_frame_size <= ETHER_MAX_LEN)
5316 			return (TRUE);
5317 
5318 		/*
5319 		 * Jumbo frames only work with VFs if the PF is also using jumbo
5320 		 * frames.
5321 		 */
5322 		if (adapter->max_frame_size <= ETHER_MAX_LEN)
5323 			return (TRUE);
5324 
5325 		return (FALSE);
5326 
5327 	}
5328 }
5329 
5330 
5331 static void
5332 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
5333 {
5334 	ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
5335 
5336 	// XXX clear multicast addresses
5337 
5338 	ixgbe_clear_rar(&adapter->hw, vf->rar_index);
5339 
5340 	vf->api_ver = IXGBE_API_VER_UNKNOWN;
5341 }
5342 
5343 
5344 static void
5345 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
5346 {
5347 	struct ixgbe_hw *hw;
5348 	uint32_t vf_index, vfte;
5349 
5350 	hw = &adapter->hw;
5351 
5352 	vf_index = IXGBE_VF_INDEX(vf->pool);
5353 	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
5354 	vfte |= IXGBE_VF_BIT(vf->pool);
5355 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
5356 }
5357 
5358 
5359 static void
5360 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
5361 {
5362 	struct ixgbe_hw *hw;
5363 	uint32_t vf_index, vfre;
5364 
5365 	hw = &adapter->hw;
5366 
5367 	vf_index = IXGBE_VF_INDEX(vf->pool);
5368 	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
5369 	if (ixgbe_vf_frame_size_compatible(adapter, vf))
5370 		vfre |= IXGBE_VF_BIT(vf->pool);
5371 	else
5372 		vfre &= ~IXGBE_VF_BIT(vf->pool);
5373 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
5374 }
5375 
5376 
5377 static void
5378 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5379 {
5380 	struct ixgbe_hw *hw;
5381 	uint32_t ack;
5382 	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
5383 
5384 	hw = &adapter->hw;
5385 
5386 	ixgbe_process_vf_reset(adapter, vf);
5387 
5388 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5389 		ixgbe_set_rar(&adapter->hw, vf->rar_index,
5390 		    vf->ether_addr, vf->pool, TRUE);
5391 		ack = IXGBE_VT_MSGTYPE_ACK;
5392 	} else
5393 		ack = IXGBE_VT_MSGTYPE_NACK;
5394 
5395 	ixgbe_vf_enable_transmit(adapter, vf);
5396 	ixgbe_vf_enable_receive(adapter, vf);
5397 
5398 	vf->flags |= IXGBE_VF_CTS;
5399 
5400 	resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
5401 	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
5402 	resp[3] = hw->mac.mc_filter_type;
5403 	ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
5404 }
5405 
5406 
5407 static void
5408 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5409 {
5410 	uint8_t *mac;
5411 
5412 	mac = (uint8_t*)&msg[1];
5413 
5414 	/* Check that the VF has permission to change the MAC address. */
5415 	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
5416 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5417 		return;
5418 	}
5419 
5420 	if (ixgbe_validate_mac_addr(mac) != 0) {
5421 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5422 		return;
5423 	}
5424 
5425 	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5426 
5427 	ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
5428 	    vf->pool, TRUE);
5429 
5430 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
5431 }
5432 
5433 
5434 /*
5435 ** VF multicast addresses are set by using the appropriate bit in
5436 ** 1 of 128 32 bit addresses (4096 possible).
5437 */
5438 static void
5439 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
5440 {
5441 	u16	*list = (u16*)&msg[1];
5442 	int	entries;
5443 	u32	vmolr, vec_bit, vec_reg, mta_reg;
5444 
5445 	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
5446 	entries = min(entries, IXGBE_MAX_VF_MC);
5447 
5448 	vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
5449 
5450 	vf->num_mc_hashes = entries;
5451 
5452 	/* Set the appropriate MTA bit */
5453 	for (int i = 0; i < entries; i++) {
5454 		vf->mc_hash[i] = list[i];
5455 		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
5456                 vec_bit = vf->mc_hash[i] & 0x1F;
5457                 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
5458                 mta_reg |= (1 << vec_bit);
5459                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
5460         }
5461 
5462 	vmolr |= IXGBE_VMOLR_ROMPE;
5463 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
5464 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
5465 	return;
5466 }
5467 
5468 
5469 static void
5470 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5471 {
5472 	struct ixgbe_hw *hw;
5473 	int enable;
5474 	uint16_t tag;
5475 
5476 	hw = &adapter->hw;
5477 	enable = IXGBE_VT_MSGINFO(msg[0]);
5478 	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
5479 
5480 	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
5481 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5482 		return;
5483 	}
5484 
5485 	/* It is illegal to enable vlan tag 0. */
5486 	if (tag == 0 && enable != 0){
5487 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5488 		return;
5489 	}
5490 
5491 	ixgbe_set_vfta(hw, tag, vf->pool, enable);
5492 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
5493 }
5494 
5495 
5496 static void
5497 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5498 {
5499 	struct ixgbe_hw *hw;
5500 	uint32_t vf_max_size, pf_max_size, mhadd;
5501 
5502 	hw = &adapter->hw;
5503 	vf_max_size = msg[1];
5504 
5505 	if (vf_max_size < ETHER_CRC_LEN) {
5506 		/* We intentionally ACK invalid LPE requests. */
5507 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
5508 		return;
5509 	}
5510 
5511 	vf_max_size -= ETHER_CRC_LEN;
5512 
5513 	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
5514 		/* We intentionally ACK invalid LPE requests. */
5515 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
5516 		return;
5517 	}
5518 
5519 	vf->max_frame_size = vf_max_size;
5520 	ixgbe_update_max_frame(adapter, vf->max_frame_size);
5521 
5522 	/*
5523 	 * We might have to disable reception to this VF if the frame size is
5524 	 * not compatible with the config on the PF.
5525 	 */
5526 	ixgbe_vf_enable_receive(adapter, vf);
5527 
5528 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
5529 	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
5530 
5531 	if (pf_max_size < adapter->max_frame_size) {
5532 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
5533 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
5534 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
5535 	}
5536 
5537 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
5538 }
5539 
5540 
5541 static void
5542 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
5543     uint32_t *msg)
5544 {
5545 	//XXX implement this
5546 	ixgbe_send_vf_nack(adapter, vf, msg[0]);
5547 }
5548 
5549 
5550 static void
5551 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
5552     uint32_t *msg)
5553 {
5554 
5555 	switch (msg[1]) {
5556 	case IXGBE_API_VER_1_0:
5557 	case IXGBE_API_VER_1_1:
5558 		vf->api_ver = msg[1];
5559 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
5560 		break;
5561 	default:
5562 		vf->api_ver = IXGBE_API_VER_UNKNOWN;
5563 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5564 		break;
5565 	}
5566 }
5567 
5568 
5569 static void
5570 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf,
5571     uint32_t *msg)
5572 {
5573 	struct ixgbe_hw *hw;
5574 	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
5575 	int num_queues;
5576 
5577 	hw = &adapter->hw;
5578 
5579 	/* GET_QUEUES is not supported on pre-1.1 APIs. */
5580 	switch (msg[0]) {
5581 	case IXGBE_API_VER_1_0:
5582 	case IXGBE_API_VER_UNKNOWN:
5583 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5584 		return;
5585 	}
5586 
5587 	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
5588 	    IXGBE_VT_MSGTYPE_CTS;
5589 
5590 	num_queues = ixgbe_vf_queues(ixgbe_get_iov_mode(adapter));
5591 	resp[IXGBE_VF_TX_QUEUES] = num_queues;
5592 	resp[IXGBE_VF_RX_QUEUES] = num_queues;
5593 	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
5594 	resp[IXGBE_VF_DEF_QUEUE] = 0;
5595 
5596 	ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
5597 }
5598 
5599 
5600 static void
5601 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
5602 {
5603 	struct ixgbe_hw *hw;
5604 	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
5605 	int error;
5606 
5607 	hw = &adapter->hw;
5608 
5609 	error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
5610 
5611 	if (error != 0)
5612 		return;
5613 
5614 	CTR3(KTR_MALLOC, "%s: received msg %x from %d",
5615 	    adapter->ifp->if_xname, msg[0], vf->pool);
5616 	if (msg[0] == IXGBE_VF_RESET) {
5617 		ixgbe_vf_reset_msg(adapter, vf, msg);
5618 		return;
5619 	}
5620 
5621 	if (!(vf->flags & IXGBE_VF_CTS)) {
5622 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5623 		return;
5624 	}
5625 
5626 	switch (msg[0] & IXGBE_VT_MSG_MASK) {
5627 	case IXGBE_VF_SET_MAC_ADDR:
5628 		ixgbe_vf_set_mac(adapter, vf, msg);
5629 		break;
5630 	case IXGBE_VF_SET_MULTICAST:
5631 		ixgbe_vf_set_mc_addr(adapter, vf, msg);
5632 		break;
5633 	case IXGBE_VF_SET_VLAN:
5634 		ixgbe_vf_set_vlan(adapter, vf, msg);
5635 		break;
5636 	case IXGBE_VF_SET_LPE:
5637 		ixgbe_vf_set_lpe(adapter, vf, msg);
5638 		break;
5639 	case IXGBE_VF_SET_MACVLAN:
5640 		ixgbe_vf_set_macvlan(adapter, vf, msg);
5641 		break;
5642 	case IXGBE_VF_API_NEGOTIATE:
5643 		ixgbe_vf_api_negotiate(adapter, vf, msg);
5644 		break;
5645 	case IXGBE_VF_GET_QUEUES:
5646 		ixgbe_vf_get_queues(adapter, vf, msg);
5647 		break;
5648 	default:
5649 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5650 	}
5651 }
5652 
5653 
5654 /*
5655  * Tasklet for handling VF -> PF mailbox messages.
5656  */
5657 static void
5658 ixgbe_handle_mbx(void *context, int pending)
5659 {
5660 	struct adapter *adapter;
5661 	struct ixgbe_hw *hw;
5662 	struct ixgbe_vf *vf;
5663 	int i;
5664 
5665 	adapter = context;
5666 	hw = &adapter->hw;
5667 
5668 	IXGBE_CORE_LOCK(adapter);
5669 	for (i = 0; i < adapter->num_vfs; i++) {
5670 		vf = &adapter->vfs[i];
5671 
5672 		if (vf->flags & IXGBE_VF_ACTIVE) {
5673 			if (ixgbe_check_for_rst(hw, vf->pool) == 0)
5674 				ixgbe_process_vf_reset(adapter, vf);
5675 
5676 			if (ixgbe_check_for_msg(hw, vf->pool) == 0)
5677 				ixgbe_process_vf_msg(adapter, vf);
5678 
5679 			if (ixgbe_check_for_ack(hw, vf->pool) == 0)
5680 				ixgbe_process_vf_ack(adapter, vf);
5681 		}
5682 	}
5683 	IXGBE_CORE_UNLOCK(adapter);
5684 }
5685 
5686 
5687 static int
5688 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
5689 {
5690 	struct adapter *adapter;
5691 	enum ixgbe_iov_mode mode;
5692 
5693 	adapter = device_get_softc(dev);
5694 	adapter->num_vfs = num_vfs;
5695 	mode = ixgbe_get_iov_mode(adapter);
5696 
5697 	if (num_vfs > ixgbe_max_vfs(mode)) {
5698 		adapter->num_vfs = 0;
5699 		return (ENOSPC);
5700 	}
5701 
5702 	IXGBE_CORE_LOCK(adapter);
5703 
5704 	adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE,
5705 	    M_NOWAIT | M_ZERO);
5706 
5707 	if (adapter->vfs == NULL) {
5708 		adapter->num_vfs = 0;
5709 		IXGBE_CORE_UNLOCK(adapter);
5710 		return (ENOMEM);
5711 	}
5712 
5713 	ixgbe_init_locked(adapter);
5714 
5715 	IXGBE_CORE_UNLOCK(adapter);
5716 
5717 	return (0);
5718 }
5719 
5720 
5721 static void
5722 ixgbe_uninit_iov(device_t dev)
5723 {
5724 	struct ixgbe_hw *hw;
5725 	struct adapter *adapter;
5726 	uint32_t pf_reg, vf_reg;
5727 
5728 	adapter = device_get_softc(dev);
5729 	hw = &adapter->hw;
5730 
5731 	IXGBE_CORE_LOCK(adapter);
5732 
5733 	/* Enable rx/tx for the PF and disable it for all VFs. */
5734 	pf_reg = IXGBE_VF_INDEX(adapter->pool);
5735 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg),
5736 	    IXGBE_VF_BIT(adapter->pool));
5737 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg),
5738 	    IXGBE_VF_BIT(adapter->pool));
5739 
5740 	if (pf_reg == 0)
5741 		vf_reg = 1;
5742 	else
5743 		vf_reg = 0;
5744 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
5745 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
5746 
5747 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
5748 
5749 	free(adapter->vfs, M_IXGBE);
5750 	adapter->vfs = NULL;
5751 	adapter->num_vfs = 0;
5752 
5753 	IXGBE_CORE_UNLOCK(adapter);
5754 }
5755 
5756 
5757 static void
5758 ixgbe_initialize_iov(struct adapter *adapter)
5759 {
5760 	struct ixgbe_hw *hw = &adapter->hw;
5761 	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
5762 	enum ixgbe_iov_mode mode;
5763 	int i;
5764 
5765 	mode = ixgbe_get_iov_mode(adapter);
5766 	if (mode == IXGBE_NO_VM)
5767 		return;
5768 
5769 	IXGBE_CORE_LOCK_ASSERT(adapter);
5770 
5771 	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
5772 	mrqc &= ~IXGBE_MRQC_MRQE_MASK;
5773 
5774 	switch (mode) {
5775 	case IXGBE_64_VM:
5776 		mrqc |= IXGBE_MRQC_VMDQRSS64EN;
5777 		break;
5778 	case IXGBE_32_VM:
5779 		mrqc |= IXGBE_MRQC_VMDQRSS32EN;
5780 		break;
5781 	default:
5782 		panic("Unexpected SR-IOV mode %d", mode);
5783 	}
5784 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
5785 
5786 	mtqc = IXGBE_MTQC_VT_ENA;
5787 	switch (mode) {
5788 	case IXGBE_64_VM:
5789 		mtqc |= IXGBE_MTQC_64VF;
5790 		break;
5791 	case IXGBE_32_VM:
5792 		mtqc |= IXGBE_MTQC_32VF;
5793 		break;
5794 	default:
5795 		panic("Unexpected SR-IOV mode %d", mode);
5796 	}
5797 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
5798 
5799 
5800 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
5801 	gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
5802 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
5803 	switch (mode) {
5804 	case IXGBE_64_VM:
5805 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
5806 		break;
5807 	case IXGBE_32_VM:
5808 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
5809 		break;
5810 	default:
5811 		panic("Unexpected SR-IOV mode %d", mode);
5812 	}
5813 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
5814 
5815 
5816 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5817 	gcr_ext &= ~IXGBE_GPIE_VTMODE_MASK;
5818 	switch (mode) {
5819 	case IXGBE_64_VM:
5820 		gpie |= IXGBE_GPIE_VTMODE_64;
5821 		break;
5822 	case IXGBE_32_VM:
5823 		gpie |= IXGBE_GPIE_VTMODE_32;
5824 		break;
5825 	default:
5826 		panic("Unexpected SR-IOV mode %d", mode);
5827 	}
5828 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5829 
5830 	/* Enable rx/tx for the PF. */
5831 	vf_reg = IXGBE_VF_INDEX(adapter->pool);
5832 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg),
5833 	    IXGBE_VF_BIT(adapter->pool));
5834 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg),
5835 	    IXGBE_VF_BIT(adapter->pool));
5836 
5837 	/* Allow VM-to-VM communication. */
5838 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
5839 
5840 	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
5841 	vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
5842 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
5843 
5844 	for (i = 0; i < adapter->num_vfs; i++)
5845 		ixgbe_init_vf(adapter, &adapter->vfs[i]);
5846 }
5847 
5848 
5849 /*
5850 ** Check the max frame setting of all active VF's
5851 */
5852 static void
5853 ixgbe_recalculate_max_frame(struct adapter *adapter)
5854 {
5855 	struct ixgbe_vf *vf;
5856 
5857 	IXGBE_CORE_LOCK_ASSERT(adapter);
5858 
5859 	for (int i = 0; i < adapter->num_vfs; i++) {
5860 		vf = &adapter->vfs[i];
5861 		if (vf->flags & IXGBE_VF_ACTIVE)
5862 			ixgbe_update_max_frame(adapter, vf->max_frame_size);
5863 	}
5864 }
5865 
5866 
5867 static void
5868 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
5869 {
5870 	struct ixgbe_hw *hw;
5871 	uint32_t vf_index, pfmbimr;
5872 
5873 	IXGBE_CORE_LOCK_ASSERT(adapter);
5874 
5875 	hw = &adapter->hw;
5876 
5877 	if (!(vf->flags & IXGBE_VF_ACTIVE))
5878 		return;
5879 
5880 	vf_index = IXGBE_VF_INDEX(vf->pool);
5881 	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
5882 	pfmbimr |= IXGBE_VF_BIT(vf->pool);
5883 	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
5884 
5885 	ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
5886 
5887 	// XXX multicast addresses
5888 
5889 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5890 		ixgbe_set_rar(&adapter->hw, vf->rar_index,
5891 		    vf->ether_addr, vf->pool, TRUE);
5892 	}
5893 
5894 	ixgbe_vf_enable_transmit(adapter, vf);
5895 	ixgbe_vf_enable_receive(adapter, vf);
5896 
5897 	ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
5898 }
5899 
5900 static int
5901 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
5902 {
5903 	struct adapter *adapter;
5904 	struct ixgbe_vf *vf;
5905 	const void *mac;
5906 
5907 	adapter = device_get_softc(dev);
5908 
5909 	KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
5910 	    vfnum, adapter->num_vfs));
5911 
5912 	IXGBE_CORE_LOCK(adapter);
5913 	vf = &adapter->vfs[vfnum];
5914 	vf->pool= vfnum;
5915 
5916 	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
5917 	vf->rar_index = vfnum + 1;
5918 	vf->default_vlan = 0;
5919 	vf->max_frame_size = ETHER_MAX_LEN;
5920 	ixgbe_update_max_frame(adapter, vf->max_frame_size);
5921 
5922 	if (nvlist_exists_binary(config, "mac-addr")) {
5923 		mac = nvlist_get_binary(config, "mac-addr", NULL);
5924 		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5925 		if (nvlist_get_bool(config, "allow-set-mac"))
5926 			vf->flags |= IXGBE_VF_CAP_MAC;
5927 	} else
5928 		/*
5929 		 * If the administrator has not specified a MAC address then
5930 		 * we must allow the VF to choose one.
5931 		 */
5932 		vf->flags |= IXGBE_VF_CAP_MAC;
5933 
5934 	vf->flags = IXGBE_VF_ACTIVE;
5935 
5936 	ixgbe_init_vf(adapter, vf);
5937 	IXGBE_CORE_UNLOCK(adapter);
5938 
5939 	return (0);
5940 }
5941 #endif /* PCI_IOV */
5942 
5943