xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision ec0e626bafb335b30c499d06066997f54b10c092)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_rss.h"
40 #endif
41 
42 #include "ixgbe.h"
43 
44 #ifdef	RSS
45 #include <net/rss_config.h>
46 #include <netinet/in_rss.h>
47 #endif
48 
49 /*********************************************************************
50  *  Set this to one to display debug statistics
51  *********************************************************************/
52 int             ixgbe_display_debug_stats = 0;
53 
54 /*********************************************************************
55  *  Driver version
56  *********************************************************************/
57 char ixgbe_driver_version[] = "2.7.4";
58 
59 /*********************************************************************
60  *  PCI Device ID Table
61  *
62  *  Used by probe to select devices to load on
63  *  Last field stores an index into ixgbe_strings
64  *  Last entry must be all 0s
65  *
66  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
67  *********************************************************************/
68 
69 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
70 {
71 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
72 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
73 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
74 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
75 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
76 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
77 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
78 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
79 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
80 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
81 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
82 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
83 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
84 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
85 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
86 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
87 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
88 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
89 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
90 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
91 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
92 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
93 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
94 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
95 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
101 	/* required last entry */
102 	{0, 0, 0, 0, 0}
103 };
104 
105 /*********************************************************************
106  *  Table of branding strings
107  *********************************************************************/
108 
109 static char    *ixgbe_strings[] = {
110 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
111 };
112 
113 /*********************************************************************
114  *  Function prototypes
115  *********************************************************************/
116 static int      ixgbe_probe(device_t);
117 static int      ixgbe_attach(device_t);
118 static int      ixgbe_detach(device_t);
119 static int      ixgbe_shutdown(device_t);
120 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
121 static void	ixgbe_init(void *);
122 static void	ixgbe_init_locked(struct adapter *);
123 static void     ixgbe_stop(void *);
124 #if __FreeBSD_version >= 1100036
125 static uint64_t	ixgbe_get_counter(struct ifnet *, ift_counter);
126 #endif
127 static void	ixgbe_add_media_types(struct adapter *);
128 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
129 static int      ixgbe_media_change(struct ifnet *);
130 static void     ixgbe_identify_hardware(struct adapter *);
131 static int      ixgbe_allocate_pci_resources(struct adapter *);
132 static void	ixgbe_get_slot_info(struct ixgbe_hw *);
133 static int      ixgbe_allocate_msix(struct adapter *);
134 static int      ixgbe_allocate_legacy(struct adapter *);
135 static int	ixgbe_setup_msix(struct adapter *);
136 static void	ixgbe_free_pci_resources(struct adapter *);
137 static void	ixgbe_local_timer(void *);
138 static int	ixgbe_setup_interface(device_t, struct adapter *);
139 static void	ixgbe_config_link(struct adapter *);
140 static void	ixgbe_rearm_queues(struct adapter *, u64);
141 
142 static void     ixgbe_initialize_transmit_units(struct adapter *);
143 static void     ixgbe_initialize_receive_units(struct adapter *);
144 static void	ixgbe_enable_rx_drop(struct adapter *);
145 static void	ixgbe_disable_rx_drop(struct adapter *);
146 
147 static void     ixgbe_enable_intr(struct adapter *);
148 static void     ixgbe_disable_intr(struct adapter *);
149 static void     ixgbe_update_stats_counters(struct adapter *);
150 static void     ixgbe_set_promisc(struct adapter *);
151 static void     ixgbe_set_multi(struct adapter *);
152 static void     ixgbe_update_link_status(struct adapter *);
153 static int	ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
154 static int	ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
155 static int	ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS);
156 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
157 static void	ixgbe_configure_ivars(struct adapter *);
158 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
159 
160 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
161 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
162 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
163 
164 static void     ixgbe_add_hw_stats(struct adapter *adapter);
165 
166 /* Support for pluggable optic modules */
167 static bool	ixgbe_sfp_probe(struct adapter *);
168 static void	ixgbe_setup_optics(struct adapter *);
169 
170 /* Legacy (single vector interrupt handler */
171 static void	ixgbe_legacy_irq(void *);
172 
173 /* The MSI/X Interrupt handlers */
174 static void	ixgbe_msix_que(void *);
175 static void	ixgbe_msix_link(void *);
176 
177 /* Deferred interrupt tasklets */
178 static void	ixgbe_handle_que(void *, int);
179 static void	ixgbe_handle_link(void *, int);
180 static void	ixgbe_handle_msf(void *, int);
181 static void	ixgbe_handle_mod(void *, int);
182 
183 #ifdef IXGBE_FDIR
184 static void	ixgbe_reinit_fdir(void *, int);
185 #endif
186 
187 
188 /* Missing shared code prototype */
189 extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
190 
191 /*********************************************************************
192  *  FreeBSD Device Interface Entry Points
193  *********************************************************************/
194 
195 static device_method_t ix_methods[] = {
196 	/* Device interface */
197 	DEVMETHOD(device_probe, ixgbe_probe),
198 	DEVMETHOD(device_attach, ixgbe_attach),
199 	DEVMETHOD(device_detach, ixgbe_detach),
200 	DEVMETHOD(device_shutdown, ixgbe_shutdown),
201 	DEVMETHOD_END
202 };
203 
204 static driver_t ix_driver = {
205 	"ix", ix_methods, sizeof(struct adapter),
206 };
207 
208 devclass_t ix_devclass;
209 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
210 
211 MODULE_DEPEND(ix, pci, 1, 1, 1);
212 MODULE_DEPEND(ix, ether, 1, 1, 1);
213 
214 /*
215 ** TUNEABLE PARAMETERS:
216 */
217 
218 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
219 		   "IXGBE driver parameters");
220 
221 /*
222 ** AIM: Adaptive Interrupt Moderation
223 ** which means that the interrupt rate
224 ** is varied over time based on the
225 ** traffic for that interrupt vector
226 */
227 static int ixgbe_enable_aim = TRUE;
228 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
229     "Enable adaptive interrupt moderation");
230 
231 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
232 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
233     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
234 
235 /* How many packets rxeof tries to clean at a time */
236 static int ixgbe_rx_process_limit = 256;
237 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
238 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
239     &ixgbe_rx_process_limit, 0,
240     "Maximum number of received packets to process at a time,"
241     "-1 means unlimited");
242 
243 /* How many packets txeof tries to clean at a time */
244 static int ixgbe_tx_process_limit = 256;
245 TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
246 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
247     &ixgbe_tx_process_limit, 0,
248     "Maximum number of sent packets to process at a time,"
249     "-1 means unlimited");
250 
251 /*
252 ** Smart speed setting, default to on
253 ** this only works as a compile option
254 ** right now as its during attach, set
255 ** this to 'ixgbe_smart_speed_off' to
256 ** disable.
257 */
258 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
259 
260 /*
261  * MSIX should be the default for best performance,
262  * but this allows it to be forced off for testing.
263  */
264 static int ixgbe_enable_msix = 1;
265 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
266     "Enable MSI-X interrupts");
267 
268 /*
269  * Number of Queues, can be set to 0,
270  * it then autoconfigures based on the
271  * number of cpus with a max of 8. This
272  * can be overriden manually here.
273  */
274 static int ixgbe_num_queues = 0;
275 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
276     "Number of queues to configure, 0 indicates autoconfigure");
277 
278 /*
279 ** Number of TX descriptors per ring,
280 ** setting higher than RX as this seems
281 ** the better performing choice.
282 */
283 static int ixgbe_txd = PERFORM_TXD;
284 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
285     "Number of transmit descriptors per queue");
286 
287 /* Number of RX descriptors per ring */
288 static int ixgbe_rxd = PERFORM_RXD;
289 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
290     "Number of receive descriptors per queue");
291 
292 /*
293 ** Defining this on will allow the use
294 ** of unsupported SFP+ modules, note that
295 ** doing so you are on your own :)
296 */
297 static int allow_unsupported_sfp = FALSE;
298 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
299 
300 /* Keep running tab on them for sanity check */
301 static int ixgbe_total_ports;
302 
303 #ifdef IXGBE_FDIR
304 /*
305 ** Flow Director actually 'steals'
306 ** part of the packet buffer as its
307 ** filter pool, this variable controls
308 ** how much it uses:
309 **  0 = 64K, 1 = 128K, 2 = 256K
310 */
311 static int fdir_pballoc = 1;
312 #endif
313 
314 #ifdef DEV_NETMAP
315 /*
316  * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
317  * be a reference on how to implement netmap support in a driver.
318  * Additional comments are in ixgbe_netmap.h .
319  *
320  * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
321  * that extend the standard driver.
322  */
323 #include <dev/netmap/ixgbe_netmap.h>
324 #endif /* DEV_NETMAP */
325 
326 /*********************************************************************
327  *  Device identification routine
328  *
329  *  ixgbe_probe determines if the driver should be loaded on
330  *  adapter based on PCI vendor/device id of the adapter.
331  *
332  *  return BUS_PROBE_DEFAULT on success, positive on failure
333  *********************************************************************/
334 
335 static int
336 ixgbe_probe(device_t dev)
337 {
338 	ixgbe_vendor_info_t *ent;
339 
340 	u16	pci_vendor_id = 0;
341 	u16	pci_device_id = 0;
342 	u16	pci_subvendor_id = 0;
343 	u16	pci_subdevice_id = 0;
344 	char	adapter_name[256];
345 
346 	INIT_DEBUGOUT("ixgbe_probe: begin");
347 
348 	pci_vendor_id = pci_get_vendor(dev);
349 	if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
350 		return (ENXIO);
351 
352 	pci_device_id = pci_get_device(dev);
353 	pci_subvendor_id = pci_get_subvendor(dev);
354 	pci_subdevice_id = pci_get_subdevice(dev);
355 
356 	ent = ixgbe_vendor_info_array;
357 	while (ent->vendor_id != 0) {
358 		if ((pci_vendor_id == ent->vendor_id) &&
359 		    (pci_device_id == ent->device_id) &&
360 
361 		    ((pci_subvendor_id == ent->subvendor_id) ||
362 		     (ent->subvendor_id == 0)) &&
363 
364 		    ((pci_subdevice_id == ent->subdevice_id) ||
365 		     (ent->subdevice_id == 0))) {
366 			sprintf(adapter_name, "%s, Version - %s",
367 				ixgbe_strings[ent->index],
368 				ixgbe_driver_version);
369 			device_set_desc_copy(dev, adapter_name);
370 			++ixgbe_total_ports;
371 			return (BUS_PROBE_DEFAULT);
372 		}
373 		ent++;
374 	}
375 	return (ENXIO);
376 }
377 
378 /*********************************************************************
379  *  Device initialization routine
380  *
381  *  The attach entry point is called when the driver is being loaded.
382  *  This routine identifies the type of hardware, allocates all resources
383  *  and initializes the hardware.
384  *
385  *  return 0 on success, positive on failure
386  *********************************************************************/
387 
388 static int
389 ixgbe_attach(device_t dev)
390 {
391 	struct adapter *adapter;
392 	struct ixgbe_hw *hw;
393 	int             error = 0;
394 	u16		csum;
395 	u32		ctrl_ext;
396 
397 	INIT_DEBUGOUT("ixgbe_attach: begin");
398 
399 	/* Allocate, clear, and link in our adapter structure */
400 	adapter = device_get_softc(dev);
401 	adapter->dev = adapter->osdep.dev = dev;
402 	hw = &adapter->hw;
403 
404 	/* Core Lock Init*/
405 	IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
406 
407 	/* SYSCTL APIs */
408 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
409 			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
410 			OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
411 			adapter, 0, ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
412 
413         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
414 			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
415 			OID_AUTO, "enable_aim", CTLFLAG_RW,
416 			&ixgbe_enable_aim, 1, "Interrupt Moderation");
417 
418 	/*
419 	** Allow a kind of speed control by forcing the autoneg
420 	** advertised speed list to only a certain value, this
421 	** supports 1G on 82599 devices, and 100Mb on x540.
422 	*/
423 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
424 			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
425 			OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
426 			adapter, 0, ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
427 
428 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
429 			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
430 			OID_AUTO, "ts", CTLTYPE_INT | CTLFLAG_RW, adapter,
431 			0, ixgbe_set_thermal_test, "I", "Thermal Test");
432 
433 	/* Set up the timer callout */
434 	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
435 
436 	/* Determine hardware revision */
437 	ixgbe_identify_hardware(adapter);
438 
439 	/* Do base PCI setup - map BAR0 */
440 	if (ixgbe_allocate_pci_resources(adapter)) {
441 		device_printf(dev, "Allocation of PCI resources failed\n");
442 		error = ENXIO;
443 		goto err_out;
444 	}
445 
446 	/* Do descriptor calc and sanity checks */
447 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
448 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
449 		device_printf(dev, "TXD config issue, using default!\n");
450 		adapter->num_tx_desc = DEFAULT_TXD;
451 	} else
452 		adapter->num_tx_desc = ixgbe_txd;
453 
454 	/*
455 	** With many RX rings it is easy to exceed the
456 	** system mbuf allocation. Tuning nmbclusters
457 	** can alleviate this.
458 	*/
459 	if (nmbclusters > 0) {
460 		int s;
461 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
462 		if (s > nmbclusters) {
463 			device_printf(dev, "RX Descriptors exceed "
464 			    "system mbuf max, using default instead!\n");
465 			ixgbe_rxd = DEFAULT_RXD;
466 		}
467 	}
468 
469 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
470 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
471 		device_printf(dev, "RXD config issue, using default!\n");
472 		adapter->num_rx_desc = DEFAULT_RXD;
473 	} else
474 		adapter->num_rx_desc = ixgbe_rxd;
475 
476 	/* Allocate our TX/RX Queues */
477 	if (ixgbe_allocate_queues(adapter)) {
478 		error = ENOMEM;
479 		goto err_out;
480 	}
481 
482 	/* Allocate multicast array memory. */
483 	adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
484 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
485 	if (adapter->mta == NULL) {
486 		device_printf(dev, "Can not allocate multicast setup array\n");
487 		error = ENOMEM;
488 		goto err_late;
489 	}
490 
491 	/* Initialize the shared code */
492 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
493 	error = ixgbe_init_shared_code(hw);
494 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
495 		/*
496 		** No optics in this port, set up
497 		** so the timer routine will probe
498 		** for later insertion.
499 		*/
500 		adapter->sfp_probe = TRUE;
501 		error = 0;
502 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
503 		device_printf(dev,"Unsupported SFP+ module detected!\n");
504 		error = EIO;
505 		goto err_late;
506 	} else if (error) {
507 		device_printf(dev,"Unable to initialize the shared code\n");
508 		error = EIO;
509 		goto err_late;
510 	}
511 
512 	/* Make sure we have a good EEPROM before we read from it */
513 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
514 		device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
515 		error = EIO;
516 		goto err_late;
517 	}
518 
519 	error = ixgbe_init_hw(hw);
520 	switch (error) {
521 	case IXGBE_ERR_EEPROM_VERSION:
522 		device_printf(dev, "This device is a pre-production adapter/"
523 		    "LOM.  Please be aware there may be issues associated "
524 		    "with your hardware.\n If you are experiencing problems "
525 		    "please contact your Intel or hardware representative "
526 		    "who provided you with this hardware.\n");
527 		break;
528 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
529 		device_printf(dev,"Unsupported SFP+ Module\n");
530 		error = EIO;
531 		goto err_late;
532 	case IXGBE_ERR_SFP_NOT_PRESENT:
533 		device_printf(dev,"No SFP+ Module found\n");
534 		/* falls thru */
535 	default:
536 		break;
537 	}
538 
539 	/* Detect and set physical type */
540 	ixgbe_setup_optics(adapter);
541 
542 	if ((adapter->msix > 1) && (ixgbe_enable_msix))
543 		error = ixgbe_allocate_msix(adapter);
544 	else
545 		error = ixgbe_allocate_legacy(adapter);
546 	if (error)
547 		goto err_late;
548 
549 	/* Setup OS specific network interface */
550 	if (ixgbe_setup_interface(dev, adapter) != 0)
551 		goto err_late;
552 
553 	/* Initialize statistics */
554 	ixgbe_update_stats_counters(adapter);
555 
556 	/* Register for VLAN events */
557 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
558 	    ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
559 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
560 	    ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
561 
562         /*
563 	** Check PCIE slot type/speed/width
564 	*/
565 	ixgbe_get_slot_info(hw);
566 
567 
568 	/* Set an initial default flow control value */
569 	adapter->fc = ixgbe_fc_full;
570 
571 	/* let hardware know driver is loaded */
572 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
573 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
574 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
575 
576 	ixgbe_add_hw_stats(adapter);
577 
578 #ifdef DEV_NETMAP
579 	ixgbe_netmap_attach(adapter);
580 #endif /* DEV_NETMAP */
581 	INIT_DEBUGOUT("ixgbe_attach: end");
582 	return (0);
583 
584 err_late:
585 	ixgbe_free_transmit_structures(adapter);
586 	ixgbe_free_receive_structures(adapter);
587 err_out:
588 	if (adapter->ifp != NULL)
589 		if_free(adapter->ifp);
590 	ixgbe_free_pci_resources(adapter);
591 	free(adapter->mta, M_DEVBUF);
592 	return (error);
593 }
594 
595 /*********************************************************************
596  *  Device removal routine
597  *
598  *  The detach entry point is called when the driver is being removed.
599  *  This routine stops the adapter and deallocates all the resources
600  *  that were allocated for driver operation.
601  *
602  *  return 0 on success, positive on failure
603  *********************************************************************/
604 
605 static int
606 ixgbe_detach(device_t dev)
607 {
608 	struct adapter *adapter = device_get_softc(dev);
609 	struct ix_queue *que = adapter->queues;
610 	struct tx_ring *txr = adapter->tx_rings;
611 	u32	ctrl_ext;
612 
613 	INIT_DEBUGOUT("ixgbe_detach: begin");
614 
615 	/* Make sure VLANS are not using driver */
616 	if (adapter->ifp->if_vlantrunk != NULL) {
617 		device_printf(dev,"Vlan in use, detach first\n");
618 		return (EBUSY);
619 	}
620 
621 	IXGBE_CORE_LOCK(adapter);
622 	ixgbe_stop(adapter);
623 	IXGBE_CORE_UNLOCK(adapter);
624 
625 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
626 		if (que->tq) {
627 #ifndef IXGBE_LEGACY_TX
628 			taskqueue_drain(que->tq, &txr->txq_task);
629 #endif
630 			taskqueue_drain(que->tq, &que->que_task);
631 			taskqueue_free(que->tq);
632 		}
633 	}
634 
635 	/* Drain the Link queue */
636 	if (adapter->tq) {
637 		taskqueue_drain(adapter->tq, &adapter->link_task);
638 		taskqueue_drain(adapter->tq, &adapter->mod_task);
639 		taskqueue_drain(adapter->tq, &adapter->msf_task);
640 #ifdef IXGBE_FDIR
641 		taskqueue_drain(adapter->tq, &adapter->fdir_task);
642 #endif
643 		taskqueue_free(adapter->tq);
644 	}
645 
646 	/* let hardware know driver is unloading */
647 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
648 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
649 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
650 
651 	/* Unregister VLAN events */
652 	if (adapter->vlan_attach != NULL)
653 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
654 	if (adapter->vlan_detach != NULL)
655 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
656 
657 	ether_ifdetach(adapter->ifp);
658 	callout_drain(&adapter->timer);
659 #ifdef DEV_NETMAP
660 	netmap_detach(adapter->ifp);
661 #endif /* DEV_NETMAP */
662 	ixgbe_free_pci_resources(adapter);
663 	bus_generic_detach(dev);
664 	if_free(adapter->ifp);
665 
666 	ixgbe_free_transmit_structures(adapter);
667 	ixgbe_free_receive_structures(adapter);
668 	free(adapter->mta, M_DEVBUF);
669 
670 	IXGBE_CORE_LOCK_DESTROY(adapter);
671 	return (0);
672 }
673 
674 /*********************************************************************
675  *
676  *  Shutdown entry point
677  *
678  **********************************************************************/
679 
680 static int
681 ixgbe_shutdown(device_t dev)
682 {
683 	struct adapter *adapter = device_get_softc(dev);
684 	IXGBE_CORE_LOCK(adapter);
685 	ixgbe_stop(adapter);
686 	IXGBE_CORE_UNLOCK(adapter);
687 	return (0);
688 }
689 
690 
691 /*********************************************************************
692  *  Ioctl entry point
693  *
694  *  ixgbe_ioctl is called when the user wants to configure the
695  *  interface.
696  *
697  *  return 0 on success, positive on failure
698  **********************************************************************/
699 
700 static int
701 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
702 {
703 	struct adapter	*adapter = ifp->if_softc;
704 	struct ifreq	*ifr = (struct ifreq *) data;
705 #if defined(INET) || defined(INET6)
706 	struct ifaddr *ifa = (struct ifaddr *)data;
707 	bool		avoid_reset = FALSE;
708 #endif
709 	int             error = 0;
710 
711 	switch (command) {
712 
713         case SIOCSIFADDR:
714 #ifdef INET
715 		if (ifa->ifa_addr->sa_family == AF_INET)
716 			avoid_reset = TRUE;
717 #endif
718 #ifdef INET6
719 		if (ifa->ifa_addr->sa_family == AF_INET6)
720 			avoid_reset = TRUE;
721 #endif
722 #if defined(INET) || defined(INET6)
723 		/*
724 		** Calling init results in link renegotiation,
725 		** so we avoid doing it when possible.
726 		*/
727 		if (avoid_reset) {
728 			ifp->if_flags |= IFF_UP;
729 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
730 				ixgbe_init(adapter);
731 			if (!(ifp->if_flags & IFF_NOARP))
732 				arp_ifinit(ifp, ifa);
733 		} else
734 			error = ether_ioctl(ifp, command, data);
735 #endif
736 		break;
737 	case SIOCSIFMTU:
738 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
739 		if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
740 			error = EINVAL;
741 		} else {
742 			IXGBE_CORE_LOCK(adapter);
743 			ifp->if_mtu = ifr->ifr_mtu;
744 			adapter->max_frame_size =
745 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
746 			ixgbe_init_locked(adapter);
747 			IXGBE_CORE_UNLOCK(adapter);
748 		}
749 		break;
750 	case SIOCSIFFLAGS:
751 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
752 		IXGBE_CORE_LOCK(adapter);
753 		if (ifp->if_flags & IFF_UP) {
754 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
755 				if ((ifp->if_flags ^ adapter->if_flags) &
756 				    (IFF_PROMISC | IFF_ALLMULTI)) {
757 					ixgbe_set_promisc(adapter);
758                                 }
759 			} else
760 				ixgbe_init_locked(adapter);
761 		} else
762 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
763 				ixgbe_stop(adapter);
764 		adapter->if_flags = ifp->if_flags;
765 		IXGBE_CORE_UNLOCK(adapter);
766 		break;
767 	case SIOCADDMULTI:
768 	case SIOCDELMULTI:
769 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
770 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
771 			IXGBE_CORE_LOCK(adapter);
772 			ixgbe_disable_intr(adapter);
773 			ixgbe_set_multi(adapter);
774 			ixgbe_enable_intr(adapter);
775 			IXGBE_CORE_UNLOCK(adapter);
776 		}
777 		break;
778 	case SIOCSIFMEDIA:
779 	case SIOCGIFMEDIA:
780 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
781 		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
782 		break;
783 	case SIOCSIFCAP:
784 	{
785 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
786 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
787 		if (mask & IFCAP_HWCSUM)
788 			ifp->if_capenable ^= IFCAP_HWCSUM;
789 		if (mask & IFCAP_TSO4)
790 			ifp->if_capenable ^= IFCAP_TSO4;
791 		if (mask & IFCAP_TSO6)
792 			ifp->if_capenable ^= IFCAP_TSO6;
793 		if (mask & IFCAP_LRO)
794 			ifp->if_capenable ^= IFCAP_LRO;
795 		if (mask & IFCAP_VLAN_HWTAGGING)
796 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
797 		if (mask & IFCAP_VLAN_HWFILTER)
798 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
799 		if (mask & IFCAP_VLAN_HWTSO)
800 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
801 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
802 			IXGBE_CORE_LOCK(adapter);
803 			ixgbe_init_locked(adapter);
804 			IXGBE_CORE_UNLOCK(adapter);
805 		}
806 		VLAN_CAPABILITIES(ifp);
807 		break;
808 	}
809 #if __FreeBSD_version >= 1100036
810 	case SIOCGI2C:
811 	{
812 		struct ixgbe_hw *hw = &adapter->hw;
813 		struct ifi2creq i2c;
814 		int i;
815 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
816 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
817 		if (error != 0)
818 			break;
819 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
820 			error = EINVAL;
821 			break;
822 		}
823 		if (i2c.len > sizeof(i2c.data)) {
824 			error = EINVAL;
825 			break;
826 		}
827 
828 		for (i = 0; i < i2c.len; i++)
829 			hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
830 			    i2c.dev_addr, &i2c.data[i]);
831 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
832 		break;
833 	}
834 #endif
835 	default:
836 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
837 		error = ether_ioctl(ifp, command, data);
838 		break;
839 	}
840 
841 	return (error);
842 }
843 
844 /*********************************************************************
845  *  Init entry point
846  *
847  *  This routine is used in two ways. It is used by the stack as
848  *  init entry point in network interface structure. It is also used
849  *  by the driver as a hw/sw initialization routine to get to a
850  *  consistent state.
851  *
852  *  return 0 on success, positive on failure
853  **********************************************************************/
854 #define IXGBE_MHADD_MFS_SHIFT 16
855 
856 static void
857 ixgbe_init_locked(struct adapter *adapter)
858 {
859 	struct ifnet   *ifp = adapter->ifp;
860 	device_t 	dev = adapter->dev;
861 	struct ixgbe_hw *hw = &adapter->hw;
862 	u32		k, txdctl, mhadd, gpie;
863 	u32		rxdctl, rxctrl;
864 
865 	mtx_assert(&adapter->core_mtx, MA_OWNED);
866 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
867 	hw->adapter_stopped = FALSE;
868 	ixgbe_stop_adapter(hw);
869         callout_stop(&adapter->timer);
870 
871         /* reprogram the RAR[0] in case user changed it. */
872         ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
873 
874 	/* Get the latest mac address, User can use a LAA */
875 	bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
876 	      IXGBE_ETH_LENGTH_OF_ADDRESS);
877 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
878 	hw->addr_ctrl.rar_used_count = 1;
879 
880 	/* Set the various hardware offload abilities */
881 	ifp->if_hwassist = 0;
882 	if (ifp->if_capenable & IFCAP_TSO)
883 		ifp->if_hwassist |= CSUM_TSO;
884 	if (ifp->if_capenable & IFCAP_TXCSUM) {
885 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
886 #if __FreeBSD_version >= 800000
887 		if (hw->mac.type != ixgbe_mac_82598EB)
888 			ifp->if_hwassist |= CSUM_SCTP;
889 #endif
890 	}
891 
892 	/* Prepare transmit descriptors and buffers */
893 	if (ixgbe_setup_transmit_structures(adapter)) {
894 		device_printf(dev,"Could not setup transmit structures\n");
895 		ixgbe_stop(adapter);
896 		return;
897 	}
898 
899 	ixgbe_init_hw(hw);
900 	ixgbe_initialize_transmit_units(adapter);
901 
902 	/* Setup Multicast table */
903 	ixgbe_set_multi(adapter);
904 
905 	/*
906 	** Determine the correct mbuf pool
907 	** for doing jumbo frames
908 	*/
909 	if (adapter->max_frame_size <= 2048)
910 		adapter->rx_mbuf_sz = MCLBYTES;
911 	else if (adapter->max_frame_size <= 4096)
912 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
913 	else if (adapter->max_frame_size <= 9216)
914 		adapter->rx_mbuf_sz = MJUM9BYTES;
915 	else
916 		adapter->rx_mbuf_sz = MJUM16BYTES;
917 
918 	/* Prepare receive descriptors and buffers */
919 	if (ixgbe_setup_receive_structures(adapter)) {
920 		device_printf(dev,"Could not setup receive structures\n");
921 		ixgbe_stop(adapter);
922 		return;
923 	}
924 
925 	/* Configure RX settings */
926 	ixgbe_initialize_receive_units(adapter);
927 
928 	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
929 
930 	/* Enable Fan Failure Interrupt */
931 	gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw);
932 
933 	/* Add for Module detection */
934 	if (hw->mac.type == ixgbe_mac_82599EB)
935 		gpie |= IXGBE_SDP2_GPIEN_BY_MAC(hw);
936 
937 	/* Thermal Failure Detection */
938 	if (hw->mac.type == ixgbe_mac_X540)
939 		gpie |= IXGBE_SDP0_GPIEN_BY_MAC(hw);
940 
941 	if (adapter->msix > 1) {
942 		/* Enable Enhanced MSIX mode */
943 		gpie |= IXGBE_GPIE_MSIX_MODE;
944 		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
945 		    IXGBE_GPIE_OCD;
946 	}
947 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
948 
949 	/* Set MTU size */
950 	if (ifp->if_mtu > ETHERMTU) {
951 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
952 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
953 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
954 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
955 	}
956 
957 	/* Now enable all the queues */
958 
959 	for (int i = 0; i < adapter->num_queues; i++) {
960 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
961 		txdctl |= IXGBE_TXDCTL_ENABLE;
962 		/* Set WTHRESH to 8, burst writeback */
963 		txdctl |= (8 << 16);
964 		/*
965 		 * When the internal queue falls below PTHRESH (32),
966 		 * start prefetching as long as there are at least
967 		 * HTHRESH (1) buffers ready. The values are taken
968 		 * from the Intel linux driver 3.8.21.
969 		 * Prefetching enables tx line rate even with 1 queue.
970 		 */
971 		txdctl |= (32 << 0) | (1 << 8);
972 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
973 	}
974 
975 	for (int i = 0; i < adapter->num_queues; i++) {
976 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
977 		if (hw->mac.type == ixgbe_mac_82598EB) {
978 			/*
979 			** PTHRESH = 21
980 			** HTHRESH = 4
981 			** WTHRESH = 8
982 			*/
983 			rxdctl &= ~0x3FFFFF;
984 			rxdctl |= 0x080420;
985 		}
986 		rxdctl |= IXGBE_RXDCTL_ENABLE;
987 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
988 		for (k = 0; k < 10; k++) {
989 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
990 			    IXGBE_RXDCTL_ENABLE)
991 				break;
992 			else
993 				msec_delay(1);
994 		}
995 		wmb();
996 #ifdef DEV_NETMAP
997 		/*
998 		 * In netmap mode, we must preserve the buffers made
999 		 * available to userspace before the if_init()
1000 		 * (this is true by default on the TX side, because
1001 		 * init makes all buffers available to userspace).
1002 		 *
1003 		 * netmap_reset() and the device specific routines
1004 		 * (e.g. ixgbe_setup_receive_rings()) map these
1005 		 * buffers at the end of the NIC ring, so here we
1006 		 * must set the RDT (tail) register to make sure
1007 		 * they are not overwritten.
1008 		 *
1009 		 * In this driver the NIC ring starts at RDH = 0,
1010 		 * RDT points to the last slot available for reception (?),
1011 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
1012 		 */
1013 		if (ifp->if_capenable & IFCAP_NETMAP) {
1014 			struct netmap_adapter *na = NA(adapter->ifp);
1015 			struct netmap_kring *kring = &na->rx_rings[i];
1016 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1017 
1018 			IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
1019 		} else
1020 #endif /* DEV_NETMAP */
1021 		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1022 	}
1023 
1024 	/* Enable Receive engine */
1025 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1026 	if (hw->mac.type == ixgbe_mac_82598EB)
1027 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
1028 	rxctrl |= IXGBE_RXCTRL_RXEN;
1029 	ixgbe_enable_rx_dma(hw, rxctrl);
1030 
1031 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1032 
1033 	/* Set up MSI/X routing */
1034 	if (ixgbe_enable_msix)  {
1035 		ixgbe_configure_ivars(adapter);
1036 		/* Set up auto-mask */
1037 		if (hw->mac.type == ixgbe_mac_82598EB)
1038 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1039 		else {
1040 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1041 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1042 		}
1043 	} else {  /* Simple settings for Legacy/MSI */
1044                 ixgbe_set_ivar(adapter, 0, 0, 0);
1045                 ixgbe_set_ivar(adapter, 0, 0, 1);
1046 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1047 	}
1048 
1049 #ifdef IXGBE_FDIR
1050 	/* Init Flow director */
1051 	if (hw->mac.type != ixgbe_mac_82598EB) {
1052 		u32 hdrm = 32 << fdir_pballoc;
1053 
1054 		hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1055 		ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1056 	}
1057 #endif
1058 
1059 	/*
1060 	** Check on any SFP devices that
1061 	** need to be kick-started
1062 	*/
1063 	if (hw->phy.type == ixgbe_phy_none) {
1064 		int err = hw->phy.ops.identify(hw);
1065 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1066                 	device_printf(dev,
1067 			    "Unsupported SFP+ module type was detected.\n");
1068 			return;
1069         	}
1070 	}
1071 
1072 	/* Set moderation on the Link interrupt */
1073 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1074 
1075 	/* Config/Enable Link */
1076 	ixgbe_config_link(adapter);
1077 
1078 	/* Hardware Packet Buffer & Flow Control setup */
1079 	{
1080 		u32 rxpb, frame, size, tmp;
1081 
1082 		frame = adapter->max_frame_size;
1083 
1084 		/* Calculate High Water */
1085 		switch (hw->mac.type) {
1086 		case ixgbe_mac_X540:
1087 		case ixgbe_mac_X550:
1088 		case ixgbe_mac_X550EM_a:
1089 		case ixgbe_mac_X550EM_x:
1090 			tmp = IXGBE_DV_X540(frame, frame);
1091 			break;
1092 		default:
1093 			tmp = IXGBE_DV(frame, frame);
1094 			break;
1095 		}
1096 		size = IXGBE_BT2KB(tmp);
1097 		rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1098 		hw->fc.high_water[0] = rxpb - size;
1099 
1100 		/* Now calculate Low Water */
1101 		switch (hw->mac.type) {
1102 		case ixgbe_mac_X540:
1103 		case ixgbe_mac_X550:
1104 		case ixgbe_mac_X550EM_a:
1105 		case ixgbe_mac_X550EM_x:
1106 			tmp = IXGBE_LOW_DV_X540(frame);
1107 			break;
1108 		default:
1109 			tmp = IXGBE_LOW_DV(frame);
1110 			break;
1111 		}
1112 		hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1113 
1114 		hw->fc.requested_mode = adapter->fc;
1115 		hw->fc.pause_time = IXGBE_FC_PAUSE;
1116 		hw->fc.send_xon = TRUE;
1117 	}
1118 	/* Initialize the FC settings */
1119 	ixgbe_start_hw(hw);
1120 
1121 	/* Set up VLAN support and filter */
1122 	ixgbe_setup_vlan_hw_support(adapter);
1123 
1124 	/* And now turn on interrupts */
1125 	ixgbe_enable_intr(adapter);
1126 
1127 	/* Now inform the stack we're ready */
1128 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1129 
1130 	return;
1131 }
1132 
1133 static void
1134 ixgbe_init(void *arg)
1135 {
1136 	struct adapter *adapter = arg;
1137 
1138 	IXGBE_CORE_LOCK(adapter);
1139 	ixgbe_init_locked(adapter);
1140 	IXGBE_CORE_UNLOCK(adapter);
1141 	return;
1142 }
1143 
1144 
1145 /*
1146 **
1147 ** MSIX Interrupt Handlers and Tasklets
1148 **
1149 */
1150 
1151 static inline void
1152 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1153 {
1154 	struct ixgbe_hw *hw = &adapter->hw;
1155 	u64	queue = (u64)(1 << vector);
1156 	u32	mask;
1157 
1158 	if (hw->mac.type == ixgbe_mac_82598EB) {
1159                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1160                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1161 	} else {
1162                 mask = (queue & 0xFFFFFFFF);
1163                 if (mask)
1164                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1165                 mask = (queue >> 32);
1166                 if (mask)
1167                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1168 	}
1169 }
1170 
1171 static inline void
1172 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1173 {
1174 	struct ixgbe_hw *hw = &adapter->hw;
1175 	u64	queue = (u64)(1 << vector);
1176 	u32	mask;
1177 
1178 	if (hw->mac.type == ixgbe_mac_82598EB) {
1179                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1180                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1181 	} else {
1182                 mask = (queue & 0xFFFFFFFF);
1183                 if (mask)
1184                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1185                 mask = (queue >> 32);
1186                 if (mask)
1187                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1188 	}
1189 }
1190 
1191 static void
1192 ixgbe_handle_que(void *context, int pending)
1193 {
1194 	struct ix_queue *que = context;
1195 	struct adapter  *adapter = que->adapter;
1196 	struct tx_ring  *txr = que->txr;
1197 	struct ifnet    *ifp = adapter->ifp;
1198 	bool		more;
1199 
1200 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1201 		more = ixgbe_rxeof(que);
1202 		IXGBE_TX_LOCK(txr);
1203 		ixgbe_txeof(txr);
1204 #ifndef IXGBE_LEGACY_TX
1205 		if (!drbr_empty(ifp, txr->br))
1206 			ixgbe_mq_start_locked(ifp, txr);
1207 #else
1208 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1209 			ixgbe_start_locked(txr, ifp);
1210 #endif
1211 		IXGBE_TX_UNLOCK(txr);
1212 	}
1213 
1214 	/* Reenable this interrupt */
1215 	if (que->res != NULL)
1216 		ixgbe_enable_queue(adapter, que->msix);
1217 	else
1218 		ixgbe_enable_intr(adapter);
1219 	return;
1220 }
1221 
1222 
1223 /*********************************************************************
1224  *
1225  *  Legacy Interrupt Service routine
1226  *
1227  **********************************************************************/
1228 
1229 static void
1230 ixgbe_legacy_irq(void *arg)
1231 {
1232 	struct ix_queue *que = arg;
1233 	struct adapter	*adapter = que->adapter;
1234 	struct ixgbe_hw	*hw = &adapter->hw;
1235 	struct ifnet    *ifp = adapter->ifp;
1236 	struct 		tx_ring *txr = adapter->tx_rings;
1237 	bool		more;
1238 	u32       	reg_eicr;
1239 
1240 
1241 	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1242 
1243 	++que->irqs;
1244 	if (reg_eicr == 0) {
1245 		ixgbe_enable_intr(adapter);
1246 		return;
1247 	}
1248 
1249 	more = ixgbe_rxeof(que);
1250 
1251 	IXGBE_TX_LOCK(txr);
1252 	ixgbe_txeof(txr);
1253 #ifdef IXGBE_LEGACY_TX
1254 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1255 		ixgbe_start_locked(txr, ifp);
1256 #else
1257 	if (!drbr_empty(ifp, txr->br))
1258 		ixgbe_mq_start_locked(ifp, txr);
1259 #endif
1260 	IXGBE_TX_UNLOCK(txr);
1261 
1262 	/* Check for fan failure */
1263 	if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1264 	    (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1265                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1266 		    "REPLACE IMMEDIATELY!!\n");
1267 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1268 	}
1269 
1270 	/* Link status change */
1271 	if (reg_eicr & IXGBE_EICR_LSC)
1272 		taskqueue_enqueue(adapter->tq, &adapter->link_task);
1273 
1274 	if (more)
1275 		taskqueue_enqueue(que->tq, &que->que_task);
1276 	else
1277 		ixgbe_enable_intr(adapter);
1278 	return;
1279 }
1280 
1281 
1282 /*********************************************************************
1283  *
1284  *  MSIX Queue Interrupt Service routine
1285  *
1286  **********************************************************************/
1287 void
1288 ixgbe_msix_que(void *arg)
1289 {
1290 	struct ix_queue	*que = arg;
1291 	struct adapter  *adapter = que->adapter;
1292 	struct ifnet    *ifp = adapter->ifp;
1293 	struct tx_ring	*txr = que->txr;
1294 	struct rx_ring	*rxr = que->rxr;
1295 	bool		more;
1296 	u32		newitr = 0;
1297 
1298 	/* Protect against spurious interrupts */
1299 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1300 		return;
1301 
1302 	ixgbe_disable_queue(adapter, que->msix);
1303 	++que->irqs;
1304 
1305 	more = ixgbe_rxeof(que);
1306 
1307 	IXGBE_TX_LOCK(txr);
1308 	ixgbe_txeof(txr);
1309 #ifdef IXGBE_LEGACY_TX
1310 	if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1311 		ixgbe_start_locked(txr, ifp);
1312 #else
1313 	if (!drbr_empty(ifp, txr->br))
1314 		ixgbe_mq_start_locked(ifp, txr);
1315 #endif
1316 	IXGBE_TX_UNLOCK(txr);
1317 
1318 	/* Do AIM now? */
1319 
1320 	if (ixgbe_enable_aim == FALSE)
1321 		goto no_calc;
1322 	/*
1323 	** Do Adaptive Interrupt Moderation:
1324         **  - Write out last calculated setting
1325 	**  - Calculate based on average size over
1326 	**    the last interval.
1327 	*/
1328         if (que->eitr_setting)
1329                 IXGBE_WRITE_REG(&adapter->hw,
1330                     IXGBE_EITR(que->msix), que->eitr_setting);
1331 
1332         que->eitr_setting = 0;
1333 
1334         /* Idle, do nothing */
1335         if ((txr->bytes == 0) && (rxr->bytes == 0))
1336                 goto no_calc;
1337 
1338 	if ((txr->bytes) && (txr->packets))
1339                	newitr = txr->bytes/txr->packets;
1340 	if ((rxr->bytes) && (rxr->packets))
1341 		newitr = max(newitr,
1342 		    (rxr->bytes / rxr->packets));
1343 	newitr += 24; /* account for hardware frame, crc */
1344 
1345 	/* set an upper boundary */
1346 	newitr = min(newitr, 3000);
1347 
1348 	/* Be nice to the mid range */
1349 	if ((newitr > 300) && (newitr < 1200))
1350 		newitr = (newitr / 3);
1351 	else
1352 		newitr = (newitr / 2);
1353 
1354         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1355                 newitr |= newitr << 16;
1356         else
1357                 newitr |= IXGBE_EITR_CNT_WDIS;
1358 
1359         /* save for next interrupt */
1360         que->eitr_setting = newitr;
1361 
1362         /* Reset state */
1363         txr->bytes = 0;
1364         txr->packets = 0;
1365         rxr->bytes = 0;
1366         rxr->packets = 0;
1367 
1368 no_calc:
1369 	if (more)
1370 		taskqueue_enqueue(que->tq, &que->que_task);
1371 	else
1372 		ixgbe_enable_queue(adapter, que->msix);
1373 	return;
1374 }
1375 
1376 
1377 static void
1378 ixgbe_msix_link(void *arg)
1379 {
1380 	struct adapter	*adapter = arg;
1381 	struct ixgbe_hw *hw = &adapter->hw;
1382 	u32		reg_eicr;
1383 
1384 	++adapter->vector_irq;
1385 
1386 	/* First get the cause */
1387 	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1388 	/* Be sure the queue bits are not cleared */
1389 	reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1390 	/* Clear interrupt with write */
1391 	IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1392 
1393 	/* Link status change */
1394 	if (reg_eicr & IXGBE_EICR_LSC)
1395 		taskqueue_enqueue(adapter->tq, &adapter->link_task);
1396 
1397 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1398 #ifdef IXGBE_FDIR
1399 		if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1400 			/* This is probably overkill :) */
1401 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1402 				return;
1403                 	/* Disable the interrupt */
1404 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1405 			taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1406 		} else
1407 #endif
1408 		if (reg_eicr & IXGBE_EICR_ECC) {
1409                 	device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1410 			    "Please Reboot!!\n");
1411 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1412 		} else
1413 
1414 		if (ixgbe_is_sfp(hw)) {
1415 			if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
1416 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1417 				taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1418 			} else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
1419 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2_BY_MAC(hw));
1420 				taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1421 			}
1422 		}
1423         }
1424 
1425 	/* Check for fan failure */
1426 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1427 	    (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1428                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1429 		    "REPLACE IMMEDIATELY!!\n");
1430 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1431 	}
1432 
1433 	/* Check for over temp condition */
1434 	switch (hw->mac.type) {
1435 	case ixgbe_mac_X540:
1436 	case ixgbe_mac_X550:
1437 	case ixgbe_mac_X550EM_a:
1438 		if (reg_eicr & IXGBE_EICR_TS) {
1439 			device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1440 			    "PHY IS SHUT DOWN!!\n");
1441 			device_printf(adapter->dev, "System shutdown required\n");
1442 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1443 		}
1444 		break;
1445 	default:
1446 		/* Other MACs have no thermal sensor interrupt */
1447 		break;
1448 	}
1449 
1450 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1451 	return;
1452 }
1453 
1454 /*********************************************************************
1455  *
1456  *  Media Ioctl callback
1457  *
1458  *  This routine is called whenever the user queries the status of
1459  *  the interface using ifconfig.
1460  *
1461  **********************************************************************/
1462 static void
1463 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1464 {
1465 	struct adapter *adapter = ifp->if_softc;
1466 	struct ixgbe_hw *hw = &adapter->hw;
1467 	int layer;
1468 
1469 	INIT_DEBUGOUT("ixgbe_media_status: begin");
1470 	IXGBE_CORE_LOCK(adapter);
1471 	ixgbe_update_link_status(adapter);
1472 
1473 	ifmr->ifm_status = IFM_AVALID;
1474 	ifmr->ifm_active = IFM_ETHER;
1475 
1476 	if (!adapter->link_active) {
1477 		IXGBE_CORE_UNLOCK(adapter);
1478 		return;
1479 	}
1480 
1481 	ifmr->ifm_status |= IFM_ACTIVE;
1482 	layer = ixgbe_get_supported_physical_layer(hw);
1483 
1484 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1485 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1486 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1487 		switch (adapter->link_speed) {
1488 		case IXGBE_LINK_SPEED_10GB_FULL:
1489 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1490 			break;
1491 		case IXGBE_LINK_SPEED_1GB_FULL:
1492 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1493 			break;
1494 		case IXGBE_LINK_SPEED_100_FULL:
1495 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1496 			break;
1497 		}
1498 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1499 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1500 		switch (adapter->link_speed) {
1501 		case IXGBE_LINK_SPEED_10GB_FULL:
1502 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1503 			break;
1504 		}
1505 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1506 		switch (adapter->link_speed) {
1507 		case IXGBE_LINK_SPEED_10GB_FULL:
1508 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1509 			break;
1510 		case IXGBE_LINK_SPEED_1GB_FULL:
1511 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1512 			break;
1513 		}
1514 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1515 		switch (adapter->link_speed) {
1516 		case IXGBE_LINK_SPEED_10GB_FULL:
1517 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1518 			break;
1519 		case IXGBE_LINK_SPEED_1GB_FULL:
1520 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1521 			break;
1522 		}
1523 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1524 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1525 		switch (adapter->link_speed) {
1526 		case IXGBE_LINK_SPEED_10GB_FULL:
1527 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1528 			break;
1529 		case IXGBE_LINK_SPEED_1GB_FULL:
1530 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1531 			break;
1532 		}
1533 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1534 		switch (adapter->link_speed) {
1535 		case IXGBE_LINK_SPEED_10GB_FULL:
1536 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1537 			break;
1538 		}
1539 	/*
1540 	** XXX: These need to use the proper media types once
1541 	** they're added.
1542 	*/
1543 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1544 		switch (adapter->link_speed) {
1545 		case IXGBE_LINK_SPEED_10GB_FULL:
1546 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1547 			break;
1548 		case IXGBE_LINK_SPEED_1GB_FULL:
1549 			ifmr->ifm_active |= IFM_10_5 | IFM_FDX;
1550 			break;
1551 		}
1552 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1553 	    || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1554 		switch (adapter->link_speed) {
1555 		case IXGBE_LINK_SPEED_10GB_FULL:
1556 			ifmr->ifm_active |= IFM_10_2 | IFM_FDX;
1557 			break;
1558 		case IXGBE_LINK_SPEED_1GB_FULL:
1559 			ifmr->ifm_active |= IFM_10_5 | IFM_FDX;
1560 			break;
1561 		}
1562 
1563 	/* If nothing is recognized... */
1564 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1565 		ifmr->ifm_active |= IFM_UNKNOWN;
1566 
1567 #if __FreeBSD_version >= 900025
1568 	/* Flow control setting */
1569 	if (adapter->fc == ixgbe_fc_rx_pause || adapter->fc == ixgbe_fc_full)
1570 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1571 	if (adapter->fc == ixgbe_fc_tx_pause || adapter->fc == ixgbe_fc_full)
1572 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1573 #endif
1574 
1575 	IXGBE_CORE_UNLOCK(adapter);
1576 
1577 	return;
1578 }
1579 
1580 /*********************************************************************
1581  *
1582  *  Media Ioctl callback
1583  *
1584  *  This routine is called when the user changes speed/duplex using
1585  *  media/mediopt option with ifconfig.
1586  *
1587  **********************************************************************/
1588 static int
1589 ixgbe_media_change(struct ifnet * ifp)
1590 {
1591 	struct adapter *adapter = ifp->if_softc;
1592 	struct ifmedia *ifm = &adapter->media;
1593 	struct ixgbe_hw *hw = &adapter->hw;
1594 	ixgbe_link_speed speed = 0;
1595 
1596 	INIT_DEBUGOUT("ixgbe_media_change: begin");
1597 
1598 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1599 		return (EINVAL);
1600 
1601 	/*
1602 	** We don't actually need to check against the supported
1603 	** media types of the adapter; ifmedia will take care of
1604 	** that for us.
1605 	** 	NOTE: this relies on falling thru the switch
1606 	**	to get all the values set, it can be confusing.
1607 	*/
1608 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1609 		case IFM_AUTO:
1610 		case IFM_10G_T:
1611 			speed |= IXGBE_LINK_SPEED_100_FULL;
1612 		case IFM_10G_LRM:
1613 		case IFM_10G_SR:  /* KR, too */
1614 		case IFM_10G_LR:
1615 		case IFM_10G_CX4: /* KX4 for now */
1616 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1617 		case IFM_10G_TWINAX:
1618 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
1619 			break;
1620 		case IFM_1000_T:
1621 			speed |= IXGBE_LINK_SPEED_100_FULL;
1622 		case IFM_1000_LX:
1623 		case IFM_1000_SX:
1624 		case IFM_1000_CX: /* KX until there's real support */
1625 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1626 			break;
1627 		case IFM_100_TX:
1628 			speed |= IXGBE_LINK_SPEED_100_FULL;
1629 			break;
1630 		default:
1631 			goto invalid;
1632 	}
1633 
1634 	hw->mac.autotry_restart = TRUE;
1635 	hw->mac.ops.setup_link(hw, speed, TRUE);
1636 	adapter->advertise =
1637 		((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1638 		((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1639 		((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1640 
1641 	return (0);
1642 
1643 invalid:
1644 	device_printf(adapter->dev, "Invalid media type\n");
1645 	return (EINVAL);
1646 }
1647 
1648 static void
1649 ixgbe_set_promisc(struct adapter *adapter)
1650 {
1651 	u_int32_t       reg_rctl;
1652 	struct ifnet   *ifp = adapter->ifp;
1653 	int		mcnt = 0;
1654 
1655 	reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1656 	reg_rctl &= (~IXGBE_FCTRL_UPE);
1657 	if (ifp->if_flags & IFF_ALLMULTI)
1658 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1659 	else {
1660 		struct	ifmultiaddr *ifma;
1661 #if __FreeBSD_version < 800000
1662 		IF_ADDR_LOCK(ifp);
1663 #else
1664 		if_maddr_rlock(ifp);
1665 #endif
1666 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1667 			if (ifma->ifma_addr->sa_family != AF_LINK)
1668 				continue;
1669 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1670 				break;
1671 			mcnt++;
1672 		}
1673 #if __FreeBSD_version < 800000
1674 		IF_ADDR_UNLOCK(ifp);
1675 #else
1676 		if_maddr_runlock(ifp);
1677 #endif
1678 	}
1679 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1680 		reg_rctl &= (~IXGBE_FCTRL_MPE);
1681 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1682 
1683 	if (ifp->if_flags & IFF_PROMISC) {
1684 		reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1685 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1686 	} else if (ifp->if_flags & IFF_ALLMULTI) {
1687 		reg_rctl |= IXGBE_FCTRL_MPE;
1688 		reg_rctl &= ~IXGBE_FCTRL_UPE;
1689 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1690 	}
1691 	return;
1692 }
1693 
1694 
1695 /*********************************************************************
1696  *  Multicast Update
1697  *
1698  *  This routine is called whenever multicast address list is updated.
1699  *
1700  **********************************************************************/
1701 #define IXGBE_RAR_ENTRIES 16
1702 
1703 static void
1704 ixgbe_set_multi(struct adapter *adapter)
1705 {
1706 	u32	fctrl;
1707 	u8	*mta;
1708 	u8	*update_ptr;
1709 	struct	ifmultiaddr *ifma;
1710 	int	mcnt = 0;
1711 	struct ifnet   *ifp = adapter->ifp;
1712 
1713 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1714 
1715 	mta = adapter->mta;
1716 	bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1717 	    MAX_NUM_MULTICAST_ADDRESSES);
1718 
1719 #if __FreeBSD_version < 800000
1720 	IF_ADDR_LOCK(ifp);
1721 #else
1722 	if_maddr_rlock(ifp);
1723 #endif
1724 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1725 		if (ifma->ifma_addr->sa_family != AF_LINK)
1726 			continue;
1727 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1728 			break;
1729 		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1730 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1731 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
1732 		mcnt++;
1733 	}
1734 #if __FreeBSD_version < 800000
1735 	IF_ADDR_UNLOCK(ifp);
1736 #else
1737 	if_maddr_runlock(ifp);
1738 #endif
1739 
1740 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1741 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1742 	if (ifp->if_flags & IFF_PROMISC)
1743 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1744 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1745 	    ifp->if_flags & IFF_ALLMULTI) {
1746 		fctrl |= IXGBE_FCTRL_MPE;
1747 		fctrl &= ~IXGBE_FCTRL_UPE;
1748 	} else
1749 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1750 
1751 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1752 
1753 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
1754 		update_ptr = mta;
1755 		ixgbe_update_mc_addr_list(&adapter->hw,
1756 		    update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1757 	}
1758 
1759 	return;
1760 }
1761 
1762 /*
1763  * This is an iterator function now needed by the multicast
1764  * shared code. It simply feeds the shared code routine the
1765  * addresses in the array of ixgbe_set_multi() one by one.
1766  */
1767 static u8 *
1768 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1769 {
1770 	u8 *addr = *update_ptr;
1771 	u8 *newptr;
1772 	*vmdq = 0;
1773 
1774 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1775 	*update_ptr = newptr;
1776 	return addr;
1777 }
1778 
1779 
1780 /*********************************************************************
1781  *  Timer routine
1782  *
1783  *  This routine checks for link status,updates statistics,
1784  *  and runs the watchdog check.
1785  *
1786  **********************************************************************/
1787 
1788 static void
1789 ixgbe_local_timer(void *arg)
1790 {
1791 	struct adapter	*adapter = arg;
1792 	device_t	dev = adapter->dev;
1793 	struct ix_queue *que = adapter->queues;
1794 	u64		queues = 0;
1795 	int		hung = 0;
1796 
1797 	mtx_assert(&adapter->core_mtx, MA_OWNED);
1798 
1799 	/* Check for pluggable optics */
1800 	if (adapter->sfp_probe)
1801 		if (!ixgbe_sfp_probe(adapter))
1802 			goto out; /* Nothing to do */
1803 
1804 	ixgbe_update_link_status(adapter);
1805 	ixgbe_update_stats_counters(adapter);
1806 
1807 	/*
1808 	** Check the TX queues status
1809 	**	- mark hung queues so we don't schedule on them
1810 	**      - watchdog only if all queues show hung
1811 	*/
1812 	for (int i = 0; i < adapter->num_queues; i++, que++) {
1813 		/* Keep track of queues with work for soft irq */
1814 		if (que->txr->busy)
1815 			queues |= ((u64)1 << que->me);
1816 		/*
1817 		** Each time txeof runs without cleaning, but there
1818 		** are uncleaned descriptors it increments busy. If
1819 		** we get to the MAX we declare it hung.
1820 		*/
1821 		if (que->busy == IXGBE_QUEUE_HUNG) {
1822 			++hung;
1823 			/* Mark the queue as inactive */
1824 			adapter->active_queues &= ~((u64)1 << que->me);
1825 			continue;
1826 		} else {
1827 			/* Check if we've come back from hung */
1828 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1829                                 adapter->active_queues |= ((u64)1 << que->me);
1830 		}
1831 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
1832 			device_printf(dev,"Warning queue %d "
1833 			    "appears to be hung!\n", i);
1834 			que->txr->busy = IXGBE_QUEUE_HUNG;
1835 			++hung;
1836 		}
1837 
1838 	}
1839 
1840 	/* Only truly watchdog if all queues show hung */
1841 	if (hung == adapter->num_queues)
1842 		goto watchdog;
1843 	else if (queues != 0) { /* Force an IRQ on queues with work */
1844 		ixgbe_rearm_queues(adapter, queues);
1845 	}
1846 
1847 out:
1848 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1849 	return;
1850 
1851 watchdog:
1852 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1853 	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1854 	adapter->watchdog_events++;
1855 	ixgbe_init_locked(adapter);
1856 }
1857 
1858 /*
1859 ** Note: this routine updates the OS on the link state
1860 **	the real check of the hardware only happens with
1861 **	a link interrupt.
1862 */
1863 static void
1864 ixgbe_update_link_status(struct adapter *adapter)
1865 {
1866 	struct ifnet	*ifp = adapter->ifp;
1867 	device_t dev = adapter->dev;
1868 
1869 
1870 	if (adapter->link_up){
1871 		if (adapter->link_active == FALSE) {
1872 			if (bootverbose)
1873 				device_printf(dev,"Link is up %d Gbps %s \n",
1874 				    ((adapter->link_speed == 128)? 10:1),
1875 				    "Full Duplex");
1876 			adapter->link_active = TRUE;
1877 			/* Update any Flow Control changes */
1878 			ixgbe_fc_enable(&adapter->hw);
1879 			if_link_state_change(ifp, LINK_STATE_UP);
1880 		}
1881 	} else { /* Link down */
1882 		if (adapter->link_active == TRUE) {
1883 			if (bootverbose)
1884 				device_printf(dev,"Link is Down\n");
1885 			if_link_state_change(ifp, LINK_STATE_DOWN);
1886 			adapter->link_active = FALSE;
1887 		}
1888 	}
1889 
1890 	return;
1891 }
1892 
1893 
1894 /*********************************************************************
1895  *
1896  *  This routine disables all traffic on the adapter by issuing a
1897  *  global reset on the MAC and deallocates TX/RX buffers.
1898  *
1899  **********************************************************************/
1900 
1901 static void
1902 ixgbe_stop(void *arg)
1903 {
1904 	struct ifnet   *ifp;
1905 	struct adapter *adapter = arg;
1906 	struct ixgbe_hw *hw = &adapter->hw;
1907 	ifp = adapter->ifp;
1908 
1909 	mtx_assert(&adapter->core_mtx, MA_OWNED);
1910 
1911 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
1912 	ixgbe_disable_intr(adapter);
1913 	callout_stop(&adapter->timer);
1914 
1915 	/* Let the stack know...*/
1916 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1917 
1918 	ixgbe_reset_hw(hw);
1919 	hw->adapter_stopped = FALSE;
1920 	ixgbe_stop_adapter(hw);
1921 	if (hw->mac.type == ixgbe_mac_82599EB)
1922 		ixgbe_stop_mac_link_on_d3_82599(hw);
1923 	/* Turn off the laser - noop with no optics */
1924 	ixgbe_disable_tx_laser(hw);
1925 
1926 	/* Update the stack */
1927 	adapter->link_up = FALSE;
1928        	ixgbe_update_link_status(adapter);
1929 
1930 	/* reprogram the RAR[0] in case user changed it. */
1931 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1932 
1933 	return;
1934 }
1935 
1936 
1937 /*********************************************************************
1938  *
1939  *  Determine hardware revision.
1940  *
1941  **********************************************************************/
1942 static void
1943 ixgbe_identify_hardware(struct adapter *adapter)
1944 {
1945 	device_t        dev = adapter->dev;
1946 	struct ixgbe_hw *hw = &adapter->hw;
1947 
1948 	/* Save off the information about this board */
1949 	hw->vendor_id = pci_get_vendor(dev);
1950 	hw->device_id = pci_get_device(dev);
1951 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1952 	hw->subsystem_vendor_id =
1953 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
1954 	hw->subsystem_device_id =
1955 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
1956 
1957 	/*
1958 	** Make sure BUSMASTER is set
1959 	*/
1960 	pci_enable_busmaster(dev);
1961 
1962 	/* We need this here to set the num_segs below */
1963 	ixgbe_set_mac_type(hw);
1964 
1965 	/* Pick up the 82599 and VF settings */
1966 	if (hw->mac.type != ixgbe_mac_82598EB) {
1967 		hw->phy.smart_speed = ixgbe_smart_speed;
1968 		adapter->num_segs = IXGBE_82599_SCATTER;
1969 	} else
1970 		adapter->num_segs = IXGBE_82598_SCATTER;
1971 
1972 	return;
1973 }
1974 
1975 /*********************************************************************
1976  *
1977  *  Determine optic type
1978  *
1979  **********************************************************************/
1980 static void
1981 ixgbe_setup_optics(struct adapter *adapter)
1982 {
1983 	struct ixgbe_hw *hw = &adapter->hw;
1984 	int		layer;
1985 
1986 	layer = ixgbe_get_supported_physical_layer(hw);
1987 
1988 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1989 		adapter->optics = IFM_10G_T;
1990 		return;
1991 	}
1992 
1993 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1994 		adapter->optics = IFM_1000_T;
1995 		return;
1996 	}
1997 
1998 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1999 		adapter->optics = IFM_1000_SX;
2000 		return;
2001 	}
2002 
2003 	if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2004 	    IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2005 		adapter->optics = IFM_10G_LR;
2006 		return;
2007 	}
2008 
2009 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2010 		adapter->optics = IFM_10G_SR;
2011 		return;
2012 	}
2013 
2014 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2015 		adapter->optics = IFM_10G_TWINAX;
2016 		return;
2017 	}
2018 
2019 	if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2020 	    IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2021 		adapter->optics = IFM_10G_CX4;
2022 		return;
2023 	}
2024 
2025 	/* If we get here just set the default */
2026 	adapter->optics = IFM_ETHER | IFM_AUTO;
2027 	return;
2028 }
2029 
2030 /*********************************************************************
2031  *
2032  *  Setup the Legacy or MSI Interrupt handler
2033  *
2034  **********************************************************************/
2035 static int
2036 ixgbe_allocate_legacy(struct adapter *adapter)
2037 {
2038 	device_t	dev = adapter->dev;
2039 	struct		ix_queue *que = adapter->queues;
2040 #ifndef IXGBE_LEGACY_TX
2041 	struct tx_ring		*txr = adapter->tx_rings;
2042 #endif
2043 	int		error, rid = 0;
2044 
2045 	/* MSI RID at 1 */
2046 	if (adapter->msix == 1)
2047 		rid = 1;
2048 
2049 	/* We allocate a single interrupt resource */
2050 	adapter->res = bus_alloc_resource_any(dev,
2051             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2052 	if (adapter->res == NULL) {
2053 		device_printf(dev, "Unable to allocate bus resource: "
2054 		    "interrupt\n");
2055 		return (ENXIO);
2056 	}
2057 
2058 	/*
2059 	 * Try allocating a fast interrupt and the associated deferred
2060 	 * processing contexts.
2061 	 */
2062 #ifndef IXGBE_LEGACY_TX
2063 	TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2064 #endif
2065 	TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2066 	que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2067             taskqueue_thread_enqueue, &que->tq);
2068 	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2069             device_get_nameunit(adapter->dev));
2070 
2071 	/* Tasklets for Link, SFP and Multispeed Fiber */
2072 	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2073 	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2074 	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2075 #ifdef IXGBE_FDIR
2076 	TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2077 #endif
2078 	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2079 	    taskqueue_thread_enqueue, &adapter->tq);
2080 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2081 	    device_get_nameunit(adapter->dev));
2082 
2083 	if ((error = bus_setup_intr(dev, adapter->res,
2084             INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2085             que, &adapter->tag)) != 0) {
2086 		device_printf(dev, "Failed to register fast interrupt "
2087 		    "handler: %d\n", error);
2088 		taskqueue_free(que->tq);
2089 		taskqueue_free(adapter->tq);
2090 		que->tq = NULL;
2091 		adapter->tq = NULL;
2092 		return (error);
2093 	}
2094 	/* For simplicity in the handlers */
2095 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2096 
2097 	return (0);
2098 }
2099 
2100 
2101 /*********************************************************************
2102  *
2103  *  Setup MSIX Interrupt resources and handlers
2104  *
2105  **********************************************************************/
2106 static int
2107 ixgbe_allocate_msix(struct adapter *adapter)
2108 {
2109 	device_t        dev = adapter->dev;
2110 	struct 		ix_queue *que = adapter->queues;
2111 	struct  	tx_ring *txr = adapter->tx_rings;
2112 	int 		error, rid, vector = 0;
2113 	int		cpu_id = 0;
2114 #ifdef	RSS
2115 	cpuset_t	cpu_mask;
2116 #endif
2117 
2118 #ifdef	RSS
2119 	/*
2120 	 * If we're doing RSS, the number of queues needs to
2121 	 * match the number of RSS buckets that are configured.
2122 	 *
2123 	 * + If there's more queues than RSS buckets, we'll end
2124 	 *   up with queues that get no traffic.
2125 	 *
2126 	 * + If there's more RSS buckets than queues, we'll end
2127 	 *   up having multiple RSS buckets map to the same queue,
2128 	 *   so there'll be some contention.
2129 	 */
2130 	if (adapter->num_queues != rss_getnumbuckets()) {
2131 		device_printf(dev,
2132 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
2133 		    "; performance will be impacted.\n",
2134 		    __func__,
2135 		    adapter->num_queues,
2136 		    rss_getnumbuckets());
2137 	}
2138 #endif
2139 
2140 
2141 
2142 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2143 		rid = vector + 1;
2144 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2145 		    RF_SHAREABLE | RF_ACTIVE);
2146 		if (que->res == NULL) {
2147 			device_printf(dev,"Unable to allocate"
2148 		    	    " bus resource: que interrupt [%d]\n", vector);
2149 			return (ENXIO);
2150 		}
2151 		/* Set the handler function */
2152 		error = bus_setup_intr(dev, que->res,
2153 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2154 		    ixgbe_msix_que, que, &que->tag);
2155 		if (error) {
2156 			que->res = NULL;
2157 			device_printf(dev, "Failed to register QUE handler");
2158 			return (error);
2159 		}
2160 #if __FreeBSD_version >= 800504
2161 		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2162 #endif
2163 		que->msix = vector;
2164 		adapter->active_queues |= (u64)(1 << que->msix);
2165 #ifdef	RSS
2166 		/*
2167 		 * The queue ID is used as the RSS layer bucket ID.
2168 		 * We look up the queue ID -> RSS CPU ID and select
2169 		 * that.
2170 		 */
2171 		cpu_id = rss_getcpu(i % rss_getnumbuckets());
2172 #else
2173 		/*
2174 		 * Bind the msix vector, and thus the
2175 		 * rings to the corresponding cpu.
2176 		 *
2177 		 * This just happens to match the default RSS round-robin
2178 		 * bucket -> queue -> CPU allocation.
2179 		 */
2180 		if (adapter->num_queues > 1)
2181 			cpu_id = i;
2182 #endif
2183 		if (adapter->num_queues > 1)
2184 			bus_bind_intr(dev, que->res, cpu_id);
2185 
2186 #ifdef	RSS
2187 		device_printf(dev,
2188 		    "Bound RSS bucket %d to CPU %d\n",
2189 		    i, cpu_id);
2190 #else
2191 #if 0 // This is too noisy
2192 		device_printf(dev,
2193 		    "Bound queue %d to cpu %d\n",
2194 		    i, cpu_id);
2195 #endif
2196 #endif
2197 
2198 
2199 #ifndef IXGBE_LEGACY_TX
2200 		TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2201 #endif
2202 		TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2203 		que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2204 		    taskqueue_thread_enqueue, &que->tq);
2205 #ifdef	RSS
2206 		CPU_SETOF(cpu_id, &cpu_mask);
2207 		taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2208 		    &cpu_mask,
2209 		    "%s (bucket %d)",
2210 		    device_get_nameunit(adapter->dev),
2211 		    cpu_id);
2212 #else
2213 		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2214 		    device_get_nameunit(adapter->dev));
2215 #endif
2216 	}
2217 
2218 	/* and Link */
2219 	rid = vector + 1;
2220 	adapter->res = bus_alloc_resource_any(dev,
2221     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2222 	if (!adapter->res) {
2223 		device_printf(dev,"Unable to allocate"
2224     	    " bus resource: Link interrupt [%d]\n", rid);
2225 		return (ENXIO);
2226 	}
2227 	/* Set the link handler function */
2228 	error = bus_setup_intr(dev, adapter->res,
2229 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2230 	    ixgbe_msix_link, adapter, &adapter->tag);
2231 	if (error) {
2232 		adapter->res = NULL;
2233 		device_printf(dev, "Failed to register LINK handler");
2234 		return (error);
2235 	}
2236 #if __FreeBSD_version >= 800504
2237 	bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2238 #endif
2239 	adapter->vector = vector;
2240 	/* Tasklets for Link, SFP and Multispeed Fiber */
2241 	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2242 	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2243 	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2244 #ifdef IXGBE_FDIR
2245 	TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2246 #endif
2247 	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2248 	    taskqueue_thread_enqueue, &adapter->tq);
2249 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2250 	    device_get_nameunit(adapter->dev));
2251 
2252 	return (0);
2253 }
2254 
2255 /*
2256  * Setup Either MSI/X or MSI
2257  */
2258 static int
2259 ixgbe_setup_msix(struct adapter *adapter)
2260 {
2261 	device_t dev = adapter->dev;
2262 	int rid, want, queues, msgs;
2263 
2264 	/* Override by tuneable */
2265 	if (ixgbe_enable_msix == 0)
2266 		goto msi;
2267 
2268 	/* First try MSI/X */
2269 	msgs = pci_msix_count(dev);
2270 	if (msgs == 0)
2271 		goto msi;
2272 	rid = PCIR_BAR(MSIX_82598_BAR);
2273 	adapter->msix_mem = bus_alloc_resource_any(dev,
2274 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2275        	if (adapter->msix_mem == NULL) {
2276 		rid += 4;	/* 82599 maps in higher BAR */
2277 		adapter->msix_mem = bus_alloc_resource_any(dev,
2278 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2279 	}
2280        	if (adapter->msix_mem == NULL) {
2281 		/* May not be enabled */
2282 		device_printf(adapter->dev,
2283 		    "Unable to map MSIX table \n");
2284 		goto msi;
2285 	}
2286 
2287 	/* Figure out a reasonable auto config value */
2288 	queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2289 
2290 #ifdef	RSS
2291 	/* If we're doing RSS, clamp at the number of RSS buckets */
2292 	if (queues > rss_getnumbuckets())
2293 		queues = rss_getnumbuckets();
2294 #endif
2295 
2296 	if (ixgbe_num_queues != 0)
2297 		queues = ixgbe_num_queues;
2298 
2299 	/* reflect correct sysctl value */
2300 	ixgbe_num_queues = queues;
2301 
2302 	/*
2303 	** Want one vector (RX/TX pair) per queue
2304 	** plus an additional for Link.
2305 	*/
2306 	want = queues + 1;
2307 	if (msgs >= want)
2308 		msgs = want;
2309 	else {
2310                	device_printf(adapter->dev,
2311 		    "MSIX Configuration Problem, "
2312 		    "%d vectors but %d queues wanted!\n",
2313 		    msgs, want);
2314 		goto msi;
2315 	}
2316 	if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2317                	device_printf(adapter->dev,
2318 		    "Using MSIX interrupts with %d vectors\n", msgs);
2319 		adapter->num_queues = queues;
2320 		return (msgs);
2321 	}
2322 	/*
2323 	** If MSIX alloc failed or provided us with
2324 	** less than needed, free and fall through to MSI
2325 	*/
2326 	pci_release_msi(dev);
2327 
2328 msi:
2329        	if (adapter->msix_mem != NULL) {
2330 		bus_release_resource(dev, SYS_RES_MEMORY,
2331 		    rid, adapter->msix_mem);
2332 		adapter->msix_mem = NULL;
2333 	}
2334        	msgs = 1;
2335        	if (pci_alloc_msi(dev, &msgs) == 0) {
2336                	device_printf(adapter->dev,"Using an MSI interrupt\n");
2337 		return (msgs);
2338 	}
2339 	device_printf(adapter->dev,"Using a Legacy interrupt\n");
2340 	return (0);
2341 }
2342 
2343 
2344 static int
2345 ixgbe_allocate_pci_resources(struct adapter *adapter)
2346 {
2347 	int             rid;
2348 	device_t        dev = adapter->dev;
2349 
2350 	rid = PCIR_BAR(0);
2351 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2352 	    &rid, RF_ACTIVE);
2353 
2354 	if (!(adapter->pci_mem)) {
2355 		device_printf(dev,"Unable to allocate bus resource: memory\n");
2356 		return (ENXIO);
2357 	}
2358 
2359 	adapter->osdep.mem_bus_space_tag =
2360 		rman_get_bustag(adapter->pci_mem);
2361 	adapter->osdep.mem_bus_space_handle =
2362 		rman_get_bushandle(adapter->pci_mem);
2363 	adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2364 
2365 	/* Legacy defaults */
2366 	adapter->num_queues = 1;
2367 	adapter->hw.back = &adapter->osdep;
2368 
2369 	/*
2370 	** Now setup MSI or MSI/X, should
2371 	** return us the number of supported
2372 	** vectors. (Will be 1 for MSI)
2373 	*/
2374 	adapter->msix = ixgbe_setup_msix(adapter);
2375 	return (0);
2376 }
2377 
2378 static void
2379 ixgbe_free_pci_resources(struct adapter * adapter)
2380 {
2381 	struct 		ix_queue *que = adapter->queues;
2382 	device_t	dev = adapter->dev;
2383 	int		rid, memrid;
2384 
2385 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2386 		memrid = PCIR_BAR(MSIX_82598_BAR);
2387 	else
2388 		memrid = PCIR_BAR(MSIX_82599_BAR);
2389 
2390 	/*
2391 	** There is a slight possibility of a failure mode
2392 	** in attach that will result in entering this function
2393 	** before interrupt resources have been initialized, and
2394 	** in that case we do not want to execute the loops below
2395 	** We can detect this reliably by the state of the adapter
2396 	** res pointer.
2397 	*/
2398 	if (adapter->res == NULL)
2399 		goto mem;
2400 
2401 	/*
2402 	**  Release all msix queue resources:
2403 	*/
2404 	for (int i = 0; i < adapter->num_queues; i++, que++) {
2405 		rid = que->msix + 1;
2406 		if (que->tag != NULL) {
2407 			bus_teardown_intr(dev, que->res, que->tag);
2408 			que->tag = NULL;
2409 		}
2410 		if (que->res != NULL)
2411 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2412 	}
2413 
2414 
2415 	/* Clean the Legacy or Link interrupt last */
2416 	if (adapter->vector) /* we are doing MSIX */
2417 		rid = adapter->vector + 1;
2418 	else
2419 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
2420 
2421 	if (adapter->tag != NULL) {
2422 		bus_teardown_intr(dev, adapter->res, adapter->tag);
2423 		adapter->tag = NULL;
2424 	}
2425 	if (adapter->res != NULL)
2426 		bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2427 
2428 mem:
2429 	if (adapter->msix)
2430 		pci_release_msi(dev);
2431 
2432 	if (adapter->msix_mem != NULL)
2433 		bus_release_resource(dev, SYS_RES_MEMORY,
2434 		    memrid, adapter->msix_mem);
2435 
2436 	if (adapter->pci_mem != NULL)
2437 		bus_release_resource(dev, SYS_RES_MEMORY,
2438 		    PCIR_BAR(0), adapter->pci_mem);
2439 
2440 	return;
2441 }
2442 
2443 /*********************************************************************
2444  *
2445  *  Setup networking device structure and register an interface.
2446  *
2447  **********************************************************************/
2448 static int
2449 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2450 {
2451 	struct ifnet   *ifp;
2452 
2453 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2454 
2455 	ifp = adapter->ifp = if_alloc(IFT_ETHER);
2456 	if (ifp == NULL) {
2457 		device_printf(dev, "can not allocate ifnet structure\n");
2458 		return (-1);
2459 	}
2460 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2461 	ifp->if_baudrate = IF_Gbps(10);
2462 	ifp->if_init = ixgbe_init;
2463 	ifp->if_softc = adapter;
2464 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2465 	ifp->if_ioctl = ixgbe_ioctl;
2466 #if __FreeBSD_version >= 1100036
2467 	if_setgetcounterfn(ifp, ixgbe_get_counter);
2468 #endif
2469 #ifndef IXGBE_LEGACY_TX
2470 	ifp->if_transmit = ixgbe_mq_start;
2471 	ifp->if_qflush = ixgbe_qflush;
2472 #else
2473 	ifp->if_start = ixgbe_start;
2474 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2475 	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2476 	IFQ_SET_READY(&ifp->if_snd);
2477 #endif
2478 
2479 	ether_ifattach(ifp, adapter->hw.mac.addr);
2480 
2481 	adapter->max_frame_size =
2482 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2483 
2484 	/*
2485 	 * Tell the upper layer(s) we support long frames.
2486 	 */
2487 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2488 
2489 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2490 	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2491 	ifp->if_capabilities |= IFCAP_LRO;
2492 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2493 			     |  IFCAP_VLAN_HWTSO
2494 			     |  IFCAP_VLAN_MTU
2495 			     |  IFCAP_HWSTATS;
2496 	ifp->if_capenable = ifp->if_capabilities;
2497 
2498 	/*
2499 	** Don't turn this on by default, if vlans are
2500 	** created on another pseudo device (eg. lagg)
2501 	** then vlan events are not passed thru, breaking
2502 	** operation, but with HW FILTER off it works. If
2503 	** using vlans directly on the ixgbe driver you can
2504 	** enable this and get full hardware tag filtering.
2505 	*/
2506 	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2507 
2508 	/*
2509 	 * Specify the media types supported by this adapter and register
2510 	 * callbacks to update media and link information
2511 	 */
2512 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2513 		    ixgbe_media_status);
2514 
2515 	ixgbe_add_media_types(adapter);
2516 
2517 	/* Autoselect media by default */
2518 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2519 
2520 	return (0);
2521 }
2522 
2523 static void
2524 ixgbe_add_media_types(struct adapter *adapter)
2525 {
2526 	struct ixgbe_hw *hw = &adapter->hw;
2527 	device_t dev = adapter->dev;
2528 	int layer;
2529 
2530 	layer = ixgbe_get_supported_physical_layer(hw);
2531 
2532 	/* Media types with matching FreeBSD media defines */
2533 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2534 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2535 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2536 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2537 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2538 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2539 
2540 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2541 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2542 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2543 
2544 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2545 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2546 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
2547 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2548 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2549 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2550 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2551 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2552 #if 0
2553 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_LX)
2554 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2555 #endif
2556 
2557 	/*
2558 	** Other (no matching FreeBSD media type):
2559 	** To workaround this, we'll assign these completely
2560 	** inappropriate media types.
2561 	*/
2562 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2563 		device_printf(dev, "Media supported: 10GbaseKR\n");
2564 		device_printf(dev, "10GbaseKR mapped to 10baseT\n");
2565 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2566 	}
2567 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2568 		device_printf(dev, "Media supported: 10GbaseKX4\n");
2569 		device_printf(dev, "10GbaseKX4 mapped to 10base2\n");
2570 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_2, 0, NULL);
2571 	}
2572 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2573 		device_printf(dev, "Media supported: 1000baseKX\n");
2574 		device_printf(dev, "1000baseKX mapped to 10base5\n");
2575 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_5, 0, NULL);
2576 	}
2577 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
2578 		/* Someday, someone will care about you... */
2579 		device_printf(dev, "Media supported: 1000baseBX\n");
2580 	}
2581 
2582 	/* Very old */
2583 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2584 		ifmedia_add(&adapter->media,
2585 		    IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2586 		ifmedia_add(&adapter->media,
2587 		    IFM_ETHER | IFM_1000_T, 0, NULL);
2588 	}
2589 
2590 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2591 }
2592 
2593 static void
2594 ixgbe_config_link(struct adapter *adapter)
2595 {
2596 	struct ixgbe_hw *hw = &adapter->hw;
2597 	u32	autoneg, err = 0;
2598 	bool	sfp, negotiate;
2599 
2600 	sfp = ixgbe_is_sfp(hw);
2601 
2602 	if (sfp) {
2603 		if (hw->phy.multispeed_fiber) {
2604 			hw->mac.ops.setup_sfp(hw);
2605 			ixgbe_enable_tx_laser(hw);
2606 			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2607 		} else
2608 			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2609 	} else {
2610 		if (hw->mac.ops.check_link)
2611 			err = ixgbe_check_link(hw, &adapter->link_speed,
2612 			    &adapter->link_up, FALSE);
2613 		if (err)
2614 			goto out;
2615 		autoneg = hw->phy.autoneg_advertised;
2616 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2617                 	err  = hw->mac.ops.get_link_capabilities(hw,
2618 			    &autoneg, &negotiate);
2619 		if (err)
2620 			goto out;
2621 		if (hw->mac.ops.setup_link)
2622                 	err = hw->mac.ops.setup_link(hw,
2623 			    autoneg, adapter->link_up);
2624 	}
2625 out:
2626 	return;
2627 }
2628 
2629 
2630 /*********************************************************************
2631  *
2632  *  Enable transmit units.
2633  *
2634  **********************************************************************/
2635 static void
2636 ixgbe_initialize_transmit_units(struct adapter *adapter)
2637 {
2638 	struct tx_ring	*txr = adapter->tx_rings;
2639 	struct ixgbe_hw	*hw = &adapter->hw;
2640 
2641 	/* Setup the Base and Length of the Tx Descriptor Ring */
2642 
2643 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
2644 		u64	tdba = txr->txdma.dma_paddr;
2645 		u32	txctrl = 0;
2646 
2647 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2648 		       (tdba & 0x00000000ffffffffULL));
2649 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2650 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2651 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2652 
2653 		/* Setup the HW Tx Head and Tail descriptor pointers */
2654 		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2655 		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2656 
2657 		/* Cache the tail address */
2658 		txr->tail = IXGBE_TDT(txr->me);
2659 
2660 		/* Set the processing limit */
2661 		txr->process_limit = ixgbe_tx_process_limit;
2662 
2663 		/* Disable Head Writeback */
2664 		switch (hw->mac.type) {
2665 		case ixgbe_mac_82598EB:
2666 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2667 			break;
2668 		case ixgbe_mac_82599EB:
2669 		case ixgbe_mac_X540:
2670 		default:
2671 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2672 			break;
2673                 }
2674 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2675 		switch (hw->mac.type) {
2676 		case ixgbe_mac_82598EB:
2677 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2678 			break;
2679 		case ixgbe_mac_82599EB:
2680 		case ixgbe_mac_X540:
2681 		default:
2682 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2683 			break;
2684 		}
2685 
2686 	}
2687 
2688 	if (hw->mac.type != ixgbe_mac_82598EB) {
2689 		u32 dmatxctl, rttdcs;
2690 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2691 		dmatxctl |= IXGBE_DMATXCTL_TE;
2692 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2693 		/* Disable arbiter to set MTQC */
2694 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2695 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
2696 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2697 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2698 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2699 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2700 	}
2701 
2702 	return;
2703 }
2704 
2705 static void
2706 ixgbe_initialise_rss_mapping(struct adapter *adapter)
2707 {
2708 	struct ixgbe_hw	*hw = &adapter->hw;
2709 	uint32_t reta;
2710 	int i, j, queue_id;
2711 	uint32_t rss_key[10];
2712 	uint32_t mrqc;
2713 #ifdef	RSS
2714 	uint32_t rss_hash_config;
2715 #endif
2716 
2717 	/* Setup RSS */
2718 	reta = 0;
2719 
2720 #ifdef	RSS
2721 	/* Fetch the configured RSS key */
2722 	rss_getkey((uint8_t *) &rss_key);
2723 #else
2724 	/* set up random bits */
2725 	arc4rand(&rss_key, sizeof(rss_key), 0);
2726 #endif
2727 
2728 	/* Set up the redirection table */
2729 	for (i = 0, j = 0; i < 128; i++, j++) {
2730 		if (j == adapter->num_queues) j = 0;
2731 #ifdef	RSS
2732 		/*
2733 		 * Fetch the RSS bucket id for the given indirection entry.
2734 		 * Cap it at the number of configured buckets (which is
2735 		 * num_queues.)
2736 		 */
2737 		queue_id = rss_get_indirection_to_bucket(i);
2738 		queue_id = queue_id % adapter->num_queues;
2739 #else
2740 		queue_id = (j * 0x11);
2741 #endif
2742 		/*
2743 		 * The low 8 bits are for hash value (n+0);
2744 		 * The next 8 bits are for hash value (n+1), etc.
2745 		 */
2746 		reta = reta >> 8;
2747 		reta = reta | ( ((uint32_t) queue_id) << 24);
2748 		if ((i & 3) == 3) {
2749 			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2750 			reta = 0;
2751 		}
2752 	}
2753 
2754 	/* Now fill our hash function seeds */
2755 	for (int i = 0; i < 10; i++)
2756 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2757 
2758 	/* Perform hash on these packet types */
2759 #ifdef	RSS
2760 	mrqc = IXGBE_MRQC_RSSEN;
2761 	rss_hash_config = rss_gethashconfig();
2762 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2763 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
2764 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2765 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
2766 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2767 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
2768 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2769 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2770 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2771 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
2772 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
2773 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
2774 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2775 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2776 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
2777 		device_printf(adapter->dev,
2778 		    "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, "
2779 		    "but not supported\n", __func__);
2780 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2781 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2782 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
2783 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2784 #else
2785 	/*
2786 	 * Disable UDP - IP fragments aren't currently being handled
2787 	 * and so we end up with a mix of 2-tuple and 4-tuple
2788 	 * traffic.
2789 	 */
2790 	mrqc = IXGBE_MRQC_RSSEN
2791 	     | IXGBE_MRQC_RSS_FIELD_IPV4
2792 	     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2793 #if 0
2794 	     | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2795 #endif
2796 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2797 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2798 	     | IXGBE_MRQC_RSS_FIELD_IPV6
2799 	     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2800 #if 0
2801 	     | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2802 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
2803 #endif
2804 	;
2805 #endif /* RSS */
2806 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2807 }
2808 
2809 
2810 /*********************************************************************
2811  *
2812  *  Setup receive registers and features.
2813  *
2814  **********************************************************************/
2815 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2816 
2817 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
2818 
2819 static void
2820 ixgbe_initialize_receive_units(struct adapter *adapter)
2821 {
2822 	struct	rx_ring	*rxr = adapter->rx_rings;
2823 	struct ixgbe_hw	*hw = &adapter->hw;
2824 	struct ifnet   *ifp = adapter->ifp;
2825 	u32		bufsz, fctrl, srrctl, rxcsum;
2826 	u32		hlreg;
2827 
2828 
2829 	/*
2830 	 * Make sure receives are disabled while
2831 	 * setting up the descriptor ring
2832 	 */
2833 	ixgbe_disable_rx(hw);
2834 
2835 	/* Enable broadcasts */
2836 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2837 	fctrl |= IXGBE_FCTRL_BAM;
2838 	fctrl |= IXGBE_FCTRL_DPF;
2839 	fctrl |= IXGBE_FCTRL_PMCF;
2840 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2841 
2842 	/* Set for Jumbo Frames? */
2843 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2844 	if (ifp->if_mtu > ETHERMTU)
2845 		hlreg |= IXGBE_HLREG0_JUMBOEN;
2846 	else
2847 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2848 #ifdef DEV_NETMAP
2849 	/* crcstrip is conditional in netmap (in RDRXCTL too ?) */
2850 	if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
2851 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
2852 	else
2853 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
2854 #endif /* DEV_NETMAP */
2855 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2856 
2857 	bufsz = (adapter->rx_mbuf_sz +
2858 	    BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2859 
2860 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2861 		u64 rdba = rxr->rxdma.dma_paddr;
2862 
2863 		/* Setup the Base and Length of the Rx Descriptor Ring */
2864 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2865 			       (rdba & 0x00000000ffffffffULL));
2866 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2867 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2868 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2869 
2870 		/* Set up the SRRCTL register */
2871 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
2872 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2873 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2874 		srrctl |= bufsz;
2875 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2876 
2877 		/*
2878 		 * Set DROP_EN iff we have no flow control and >1 queue.
2879 		 * Note that srrctl was cleared shortly before during reset,
2880 		 * so we do not need to clear the bit, but do it just in case
2881 		 * this code is moved elsewhere.
2882 		 */
2883 		if (adapter->num_queues > 1 &&
2884 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
2885 			srrctl |= IXGBE_SRRCTL_DROP_EN;
2886 		} else {
2887 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
2888 		}
2889 
2890 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2891 
2892 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
2893 		IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2894 		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
2895 
2896 		/* Set the processing limit */
2897 		rxr->process_limit = ixgbe_rx_process_limit;
2898 
2899 		/* Set the driver rx tail address */
2900 		rxr->tail =  IXGBE_RDT(rxr->me);
2901 	}
2902 
2903 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2904 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2905 			      IXGBE_PSRTYPE_UDPHDR |
2906 			      IXGBE_PSRTYPE_IPV4HDR |
2907 			      IXGBE_PSRTYPE_IPV6HDR;
2908 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2909 	}
2910 
2911 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2912 
2913 	ixgbe_initialise_rss_mapping(adapter);
2914 
2915 	if (adapter->num_queues > 1) {
2916 		/* RSS and RX IPP Checksum are mutually exclusive */
2917 		rxcsum |= IXGBE_RXCSUM_PCSD;
2918 	}
2919 
2920 	if (ifp->if_capenable & IFCAP_RXCSUM)
2921 		rxcsum |= IXGBE_RXCSUM_PCSD;
2922 
2923 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2924 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
2925 
2926 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2927 
2928 	return;
2929 }
2930 
2931 
2932 /*
2933 ** This routine is run via an vlan config EVENT,
2934 ** it enables us to use the HW Filter table since
2935 ** we can get the vlan id. This just creates the
2936 ** entry in the soft version of the VFTA, init will
2937 ** repopulate the real table.
2938 */
2939 static void
2940 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2941 {
2942 	struct adapter	*adapter = ifp->if_softc;
2943 	u16		index, bit;
2944 
2945 	if (ifp->if_softc !=  arg)   /* Not our event */
2946 		return;
2947 
2948 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
2949 		return;
2950 
2951 	IXGBE_CORE_LOCK(adapter);
2952 	index = (vtag >> 5) & 0x7F;
2953 	bit = vtag & 0x1F;
2954 	adapter->shadow_vfta[index] |= (1 << bit);
2955 	++adapter->num_vlans;
2956 	ixgbe_setup_vlan_hw_support(adapter);
2957 	IXGBE_CORE_UNLOCK(adapter);
2958 }
2959 
2960 /*
2961 ** This routine is run via an vlan
2962 ** unconfig EVENT, remove our entry
2963 ** in the soft vfta.
2964 */
2965 static void
2966 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2967 {
2968 	struct adapter	*adapter = ifp->if_softc;
2969 	u16		index, bit;
2970 
2971 	if (ifp->if_softc !=  arg)
2972 		return;
2973 
2974 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
2975 		return;
2976 
2977 	IXGBE_CORE_LOCK(adapter);
2978 	index = (vtag >> 5) & 0x7F;
2979 	bit = vtag & 0x1F;
2980 	adapter->shadow_vfta[index] &= ~(1 << bit);
2981 	--adapter->num_vlans;
2982 	/* Re-init to load the changes */
2983 	ixgbe_setup_vlan_hw_support(adapter);
2984 	IXGBE_CORE_UNLOCK(adapter);
2985 }
2986 
2987 static void
2988 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2989 {
2990 	struct ifnet 	*ifp = adapter->ifp;
2991 	struct ixgbe_hw *hw = &adapter->hw;
2992 	struct rx_ring	*rxr;
2993 	u32		ctrl;
2994 
2995 
2996 	/*
2997 	** We get here thru init_locked, meaning
2998 	** a soft reset, this has already cleared
2999 	** the VFTA and other state, so if there
3000 	** have been no vlan's registered do nothing.
3001 	*/
3002 	if (adapter->num_vlans == 0)
3003 		return;
3004 
3005 	/* Setup the queues for vlans */
3006 	for (int i = 0; i < adapter->num_queues; i++) {
3007 		rxr = &adapter->rx_rings[i];
3008 		/* On 82599 the VLAN enable is per/queue in RXDCTL */
3009 		if (hw->mac.type != ixgbe_mac_82598EB) {
3010 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
3011 			ctrl |= IXGBE_RXDCTL_VME;
3012 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
3013 		}
3014 		rxr->vtag_strip = TRUE;
3015 	}
3016 
3017 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3018 		return;
3019 	/*
3020 	** A soft reset zero's out the VFTA, so
3021 	** we need to repopulate it now.
3022 	*/
3023 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3024 		if (adapter->shadow_vfta[i] != 0)
3025 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3026 			    adapter->shadow_vfta[i]);
3027 
3028 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3029 	/* Enable the Filter Table if enabled */
3030 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3031 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3032 		ctrl |= IXGBE_VLNCTRL_VFE;
3033 	}
3034 	if (hw->mac.type == ixgbe_mac_82598EB)
3035 		ctrl |= IXGBE_VLNCTRL_VME;
3036 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3037 }
3038 
3039 static void
3040 ixgbe_enable_intr(struct adapter *adapter)
3041 {
3042 	struct ixgbe_hw	*hw = &adapter->hw;
3043 	struct ix_queue	*que = adapter->queues;
3044 	u32		mask, fwsm;
3045 
3046 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3047 	/* Enable Fan Failure detection */
3048 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
3049 		    mask |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3050 
3051 	switch (adapter->hw.mac.type) {
3052 		case ixgbe_mac_82599EB:
3053 			mask |= IXGBE_EIMS_ECC;
3054 			/* Temperature sensor on some adapters */
3055 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3056 			/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3057 			mask |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3058 			mask |= IXGBE_EIMS_GPI_SDP2_BY_MAC(hw);
3059 #ifdef IXGBE_FDIR
3060 			mask |= IXGBE_EIMS_FLOW_DIR;
3061 #endif
3062 			break;
3063 		case ixgbe_mac_X540:
3064 		case ixgbe_mac_X550:
3065 		case ixgbe_mac_X550EM_a:
3066 		case ixgbe_mac_X550EM_x:
3067 			/* Detect if Thermal Sensor is enabled */
3068 			fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3069 			if (fwsm & IXGBE_FWSM_TS_ENABLED)
3070 				mask |= IXGBE_EIMS_TS;
3071 			/* XXX: Which SFP mode line does this look at? */
3072 			if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
3073 				mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3074 			mask |= IXGBE_EIMS_ECC;
3075 #ifdef IXGBE_FDIR
3076 			mask |= IXGBE_EIMS_FLOW_DIR;
3077 #endif
3078 		/* falls through */
3079 		default:
3080 			break;
3081 	}
3082 
3083 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3084 
3085 	/* With RSS we use auto clear */
3086 	if (adapter->msix_mem) {
3087 		mask = IXGBE_EIMS_ENABLE_MASK;
3088 		/* Don't autoclear Link */
3089 		mask &= ~IXGBE_EIMS_OTHER;
3090 		mask &= ~IXGBE_EIMS_LSC;
3091 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3092 	}
3093 
3094 	/*
3095 	** Now enable all queues, this is done separately to
3096 	** allow for handling the extended (beyond 32) MSIX
3097 	** vectors that can be used by 82599
3098 	*/
3099         for (int i = 0; i < adapter->num_queues; i++, que++)
3100                 ixgbe_enable_queue(adapter, que->msix);
3101 
3102 	IXGBE_WRITE_FLUSH(hw);
3103 
3104 	return;
3105 }
3106 
3107 static void
3108 ixgbe_disable_intr(struct adapter *adapter)
3109 {
3110 	if (adapter->msix_mem)
3111 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3112 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3113 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3114 	} else {
3115 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3116 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3117 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3118 	}
3119 	IXGBE_WRITE_FLUSH(&adapter->hw);
3120 	return;
3121 }
3122 
3123 /*
3124 ** Get the width and transaction speed of
3125 ** the slot this adapter is plugged into.
3126 */
3127 static void
3128 ixgbe_get_slot_info(struct ixgbe_hw *hw)
3129 {
3130 	device_t		dev = ((struct ixgbe_osdep *)hw->back)->dev;
3131 	struct ixgbe_mac_info	*mac = &hw->mac;
3132 	u16			link;
3133 	u32			offset;
3134 
3135 	/* For most devices simply call the shared code routine */
3136 	if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3137 		ixgbe_get_bus_info(hw);
3138 		/* These devices don't use PCI-E */
3139 		if (hw->mac.type == ixgbe_mac_X550EM_x
3140 		    || hw->mac.type == ixgbe_mac_X550EM_a)
3141 			return;
3142 		goto display;
3143 	}
3144 
3145 	/*
3146 	** For the Quad port adapter we need to parse back
3147 	** up the PCI tree to find the speed of the expansion
3148 	** slot into which this adapter is plugged. A bit more work.
3149 	*/
3150 	dev = device_get_parent(device_get_parent(dev));
3151 #ifdef IXGBE_DEBUG
3152 	device_printf(dev, "parent pcib = %x,%x,%x\n",
3153 	    pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3154 #endif
3155 	dev = device_get_parent(device_get_parent(dev));
3156 #ifdef IXGBE_DEBUG
3157 	device_printf(dev, "slot pcib = %x,%x,%x\n",
3158 	    pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3159 #endif
3160 	/* Now get the PCI Express Capabilities offset */
3161 	pci_find_cap(dev, PCIY_EXPRESS, &offset);
3162 	/* ...and read the Link Status Register */
3163 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3164 	switch (link & IXGBE_PCI_LINK_WIDTH) {
3165 	case IXGBE_PCI_LINK_WIDTH_1:
3166 		hw->bus.width = ixgbe_bus_width_pcie_x1;
3167 		break;
3168 	case IXGBE_PCI_LINK_WIDTH_2:
3169 		hw->bus.width = ixgbe_bus_width_pcie_x2;
3170 		break;
3171 	case IXGBE_PCI_LINK_WIDTH_4:
3172 		hw->bus.width = ixgbe_bus_width_pcie_x4;
3173 		break;
3174 	case IXGBE_PCI_LINK_WIDTH_8:
3175 		hw->bus.width = ixgbe_bus_width_pcie_x8;
3176 		break;
3177 	default:
3178 		hw->bus.width = ixgbe_bus_width_unknown;
3179 		break;
3180 	}
3181 
3182 	switch (link & IXGBE_PCI_LINK_SPEED) {
3183 	case IXGBE_PCI_LINK_SPEED_2500:
3184 		hw->bus.speed = ixgbe_bus_speed_2500;
3185 		break;
3186 	case IXGBE_PCI_LINK_SPEED_5000:
3187 		hw->bus.speed = ixgbe_bus_speed_5000;
3188 		break;
3189 	case IXGBE_PCI_LINK_SPEED_8000:
3190 		hw->bus.speed = ixgbe_bus_speed_8000;
3191 		break;
3192 	default:
3193 		hw->bus.speed = ixgbe_bus_speed_unknown;
3194 		break;
3195 	}
3196 
3197 	mac->ops.set_lan_id(hw);
3198 
3199 display:
3200 	device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3201 	    ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3202 	    (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3203 	    (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3204 	    (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3205 	    (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3206 	    (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3207 	    ("Unknown"));
3208 
3209 	if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3210 	    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3211 	    (hw->bus.speed == ixgbe_bus_speed_2500))) {
3212 		device_printf(dev, "PCI-Express bandwidth available"
3213 		    " for this card\n     is not sufficient for"
3214 		    " optimal performance.\n");
3215 		device_printf(dev, "For optimal performance a x8 "
3216 		    "PCIE, or x4 PCIE Gen2 slot is required.\n");
3217         }
3218 	if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3219 	    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3220 	    (hw->bus.speed < ixgbe_bus_speed_8000))) {
3221 		device_printf(dev, "PCI-Express bandwidth available"
3222 		    " for this card\n     is not sufficient for"
3223 		    " optimal performance.\n");
3224 		device_printf(dev, "For optimal performance a x8 "
3225 		    "PCIE Gen3 slot is required.\n");
3226         }
3227 
3228 	return;
3229 }
3230 
3231 
3232 /*
3233 ** Setup the correct IVAR register for a particular MSIX interrupt
3234 **   (yes this is all very magic and confusing :)
3235 **  - entry is the register array entry
3236 **  - vector is the MSIX vector for this queue
3237 **  - type is RX/TX/MISC
3238 */
3239 static void
3240 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3241 {
3242 	struct ixgbe_hw *hw = &adapter->hw;
3243 	u32 ivar, index;
3244 
3245 	vector |= IXGBE_IVAR_ALLOC_VAL;
3246 
3247 	switch (hw->mac.type) {
3248 
3249 	case ixgbe_mac_82598EB:
3250 		if (type == -1)
3251 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3252 		else
3253 			entry += (type * 64);
3254 		index = (entry >> 2) & 0x1F;
3255 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3256 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3257 		ivar |= (vector << (8 * (entry & 0x3)));
3258 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3259 		break;
3260 
3261 	case ixgbe_mac_82599EB:
3262 	case ixgbe_mac_X540:
3263 	case ixgbe_mac_X550:
3264 	case ixgbe_mac_X550EM_a:
3265 	case ixgbe_mac_X550EM_x:
3266 		if (type == -1) { /* MISC IVAR */
3267 			index = (entry & 1) * 8;
3268 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3269 			ivar &= ~(0xFF << index);
3270 			ivar |= (vector << index);
3271 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3272 		} else {	/* RX/TX IVARS */
3273 			index = (16 * (entry & 1)) + (8 * type);
3274 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3275 			ivar &= ~(0xFF << index);
3276 			ivar |= (vector << index);
3277 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3278 		}
3279 
3280 	default:
3281 		break;
3282 	}
3283 }
3284 
3285 static void
3286 ixgbe_configure_ivars(struct adapter *adapter)
3287 {
3288 	struct  ix_queue *que = adapter->queues;
3289 	u32 newitr;
3290 
3291 	if (ixgbe_max_interrupt_rate > 0)
3292 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3293 	else
3294 		newitr = 0;
3295 
3296         for (int i = 0; i < adapter->num_queues; i++, que++) {
3297 		/* First the RX queue entry */
3298                 ixgbe_set_ivar(adapter, i, que->msix, 0);
3299 		/* ... and the TX */
3300 		ixgbe_set_ivar(adapter, i, que->msix, 1);
3301 		/* Set an Initial EITR value */
3302                 IXGBE_WRITE_REG(&adapter->hw,
3303                     IXGBE_EITR(que->msix), newitr);
3304 	}
3305 
3306 	/* For the Link interrupt */
3307         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3308 }
3309 
3310 /*
3311 ** ixgbe_sfp_probe - called in the local timer to
3312 ** determine if a port had optics inserted.
3313 */
3314 static bool ixgbe_sfp_probe(struct adapter *adapter)
3315 {
3316 	struct ixgbe_hw	*hw = &adapter->hw;
3317 	device_t	dev = adapter->dev;
3318 	bool		result = FALSE;
3319 
3320 	if ((hw->phy.type == ixgbe_phy_nl) &&
3321 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3322 		s32 ret = hw->phy.ops.identify_sfp(hw);
3323 		if (ret)
3324                         goto out;
3325 		ret = hw->phy.ops.reset(hw);
3326 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3327 			device_printf(dev,"Unsupported SFP+ module detected!");
3328 			printf(" Reload driver with supported module.\n");
3329 			adapter->sfp_probe = FALSE;
3330                         goto out;
3331 		} else
3332 			device_printf(dev,"SFP+ module detected!\n");
3333 		/* We now have supported optics */
3334 		adapter->sfp_probe = FALSE;
3335 		/* Set the optics type so system reports correctly */
3336 		ixgbe_setup_optics(adapter);
3337 		result = TRUE;
3338 	}
3339 out:
3340 	return (result);
3341 }
3342 
3343 /*
3344 ** Tasklet handler for MSIX Link interrupts
3345 **  - do outside interrupt since it might sleep
3346 */
3347 static void
3348 ixgbe_handle_link(void *context, int pending)
3349 {
3350 	struct adapter  *adapter = context;
3351 
3352 	ixgbe_check_link(&adapter->hw,
3353 	    &adapter->link_speed, &adapter->link_up, 0);
3354        	ixgbe_update_link_status(adapter);
3355 }
3356 
3357 /*
3358 ** Tasklet for handling SFP module interrupts
3359 */
3360 static void
3361 ixgbe_handle_mod(void *context, int pending)
3362 {
3363 	struct adapter  *adapter = context;
3364 	struct ixgbe_hw *hw = &adapter->hw;
3365 	device_t	dev = adapter->dev;
3366 	u32 err;
3367 
3368 	err = hw->phy.ops.identify_sfp(hw);
3369 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3370 		device_printf(dev,
3371 		    "Unsupported SFP+ module type was detected.\n");
3372 		return;
3373 	}
3374 	err = hw->mac.ops.setup_sfp(hw);
3375 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3376 		device_printf(dev,
3377 		    "Setup failure - unsupported SFP+ module type.\n");
3378 		return;
3379 	}
3380 	taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3381 	return;
3382 }
3383 
3384 
3385 /*
3386 ** Tasklet for handling MSF (multispeed fiber) interrupts
3387 */
3388 static void
3389 ixgbe_handle_msf(void *context, int pending)
3390 {
3391 	struct adapter  *adapter = context;
3392 	struct ixgbe_hw *hw = &adapter->hw;
3393 	u32 autoneg;
3394 	bool negotiate;
3395 	int err;
3396 
3397 	err = hw->phy.ops.identify_sfp(hw);
3398 	if (!err) {
3399 		ixgbe_setup_optics(adapter);
3400 		INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
3401 	}
3402 
3403 	autoneg = hw->phy.autoneg_advertised;
3404 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3405 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3406 	if (hw->mac.ops.setup_link)
3407 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3408 
3409 	ifmedia_removeall(&adapter->media);
3410 	ixgbe_add_media_types(adapter);
3411 	return;
3412 }
3413 
3414 #ifdef IXGBE_FDIR
3415 /*
3416 ** Tasklet for reinitializing the Flow Director filter table
3417 */
3418 static void
3419 ixgbe_reinit_fdir(void *context, int pending)
3420 {
3421 	struct adapter  *adapter = context;
3422 	struct ifnet   *ifp = adapter->ifp;
3423 
3424 	if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3425 		return;
3426 	ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3427 	adapter->fdir_reinit = 0;
3428 	/* re-enable flow director interrupts */
3429 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3430 	/* Restart the interface */
3431 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3432 	return;
3433 }
3434 #endif
3435 
3436 /**********************************************************************
3437  *
3438  *  Update the board statistics counters.
3439  *
3440  **********************************************************************/
3441 static void
3442 ixgbe_update_stats_counters(struct adapter *adapter)
3443 {
3444 	struct ixgbe_hw *hw = &adapter->hw;
3445 	u32 missed_rx = 0, bprc, lxon, lxoff, total;
3446 	u64 total_missed_rx = 0;
3447 
3448 	adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3449 	adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3450 	adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3451 	adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3452 
3453 	/*
3454 	** Note: these are for the 8 possible traffic classes,
3455 	**	 which in current implementation is unused,
3456 	**	 therefore only 0 should read real data.
3457 	*/
3458 	for (int i = 0; i < 8; i++) {
3459 		u32 mp;
3460 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3461 		/* missed_rx tallies misses for the gprc workaround */
3462 		missed_rx += mp;
3463 		/* global total per queue */
3464         	adapter->stats.pf.mpc[i] += mp;
3465 		/* total for stats display */
3466 		total_missed_rx += adapter->stats.pf.mpc[i];
3467 		if (hw->mac.type == ixgbe_mac_82598EB) {
3468 			adapter->stats.pf.rnbc[i] +=
3469 			    IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3470 			adapter->stats.pf.qbtc[i] +=
3471 			    IXGBE_READ_REG(hw, IXGBE_QBTC(i));
3472 			adapter->stats.pf.qbrc[i] +=
3473 			    IXGBE_READ_REG(hw, IXGBE_QBRC(i));
3474 			adapter->stats.pf.pxonrxc[i] +=
3475 		    	    IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
3476 		} else
3477 			adapter->stats.pf.pxonrxc[i] +=
3478 		    	    IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
3479 		adapter->stats.pf.pxontxc[i] +=
3480 		    IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
3481 		adapter->stats.pf.pxofftxc[i] +=
3482 		    IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
3483 		if (hw->mac.type != ixgbe_mac_X550EM_x)
3484 			adapter->stats.pf.pxoffrxc[i] +=
3485 			    IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
3486 		adapter->stats.pf.pxon2offc[i] +=
3487 		    IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
3488 	}
3489 	for (int i = 0; i < 16; i++) {
3490 		adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3491 		adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3492 		adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3493 	}
3494 	adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3495 	adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3496 	adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3497 
3498 	/* Hardware workaround, gprc counts missed packets */
3499 	adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3500 	adapter->stats.pf.gprc -= missed_rx;
3501 
3502 	if (hw->mac.type != ixgbe_mac_82598EB) {
3503 		adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3504 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3505 		adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3506 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3507 		adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3508 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3509 		adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3510 		adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3511 	} else {
3512 		adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3513 		adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3514 		/* 82598 only has a counter in the high register */
3515 		adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3516 		adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3517 		adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3518 	}
3519 
3520 	/*
3521 	 * Workaround: mprc hardware is incorrectly counting
3522 	 * broadcasts, so for now we subtract those.
3523 	 */
3524 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3525 	adapter->stats.pf.bprc += bprc;
3526 	adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3527 	if (hw->mac.type == ixgbe_mac_82598EB)
3528 		adapter->stats.pf.mprc -= bprc;
3529 
3530 	adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3531 	adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3532 	adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3533 	adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3534 	adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3535 	adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3536 
3537 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3538 	adapter->stats.pf.lxontxc += lxon;
3539 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3540 	adapter->stats.pf.lxofftxc += lxoff;
3541 	total = lxon + lxoff;
3542 
3543 	adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3544 	adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3545 	adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3546 	adapter->stats.pf.gptc -= total;
3547 	adapter->stats.pf.mptc -= total;
3548 	adapter->stats.pf.ptc64 -= total;
3549 	adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
3550 
3551 	adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3552 	adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3553 	adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3554 	adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3555 	adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3556 	adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3557 	adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3558 	adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3559 	adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3560 	adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3561 	adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3562 	adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3563 	adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3564 	adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3565 	adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3566 	adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3567 	adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3568 	adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3569 	/* Only read FCOE on 82599 */
3570 	if (hw->mac.type != ixgbe_mac_82598EB) {
3571 		adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3572 		adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3573 		adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3574 		adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3575 		adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3576 	}
3577 
3578 	/* Fill out the OS statistics structure */
3579 	IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
3580 	IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
3581 	IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
3582 	IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
3583 	IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
3584 	IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
3585 	IXGBE_SET_COLLISIONS(adapter, 0);
3586 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
3587 	IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
3588 	    + adapter->stats.pf.rlec);
3589 }
3590 
3591 #if __FreeBSD_version >= 1100036
3592 static uint64_t
3593 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
3594 {
3595 	struct adapter *adapter;
3596 
3597 	adapter = if_getsoftc(ifp);
3598 
3599 	switch (cnt) {
3600 	case IFCOUNTER_IPACKETS:
3601 		return (adapter->ipackets);
3602 	case IFCOUNTER_OPACKETS:
3603 		return (adapter->opackets);
3604 	case IFCOUNTER_IBYTES:
3605 		return (adapter->ibytes);
3606 	case IFCOUNTER_OBYTES:
3607 		return (adapter->obytes);
3608 	case IFCOUNTER_IMCASTS:
3609 		return (adapter->imcasts);
3610 	case IFCOUNTER_OMCASTS:
3611 		return (adapter->omcasts);
3612 	case IFCOUNTER_COLLISIONS:
3613 		return (0);
3614 	case IFCOUNTER_IQDROPS:
3615 		return (adapter->iqdrops);
3616 	case IFCOUNTER_IERRORS:
3617 		return (adapter->ierrors);
3618 	default:
3619 		return (if_get_counter_default(ifp, cnt));
3620 	}
3621 }
3622 #endif
3623 
3624 /** ixgbe_sysctl_tdh_handler - Handler function
3625  *  Retrieves the TDH value from the hardware
3626  */
3627 static int
3628 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
3629 {
3630 	int error;
3631 
3632 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3633 	if (!txr) return 0;
3634 
3635 	unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
3636 	error = sysctl_handle_int(oidp, &val, 0, req);
3637 	if (error || !req->newptr)
3638 		return error;
3639 	return 0;
3640 }
3641 
3642 /** ixgbe_sysctl_tdt_handler - Handler function
3643  *  Retrieves the TDT value from the hardware
3644  */
3645 static int
3646 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
3647 {
3648 	int error;
3649 
3650 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3651 	if (!txr) return 0;
3652 
3653 	unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
3654 	error = sysctl_handle_int(oidp, &val, 0, req);
3655 	if (error || !req->newptr)
3656 		return error;
3657 	return 0;
3658 }
3659 
3660 /** ixgbe_sysctl_rdh_handler - Handler function
3661  *  Retrieves the RDH value from the hardware
3662  */
3663 static int
3664 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
3665 {
3666 	int error;
3667 
3668 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3669 	if (!rxr) return 0;
3670 
3671 	unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
3672 	error = sysctl_handle_int(oidp, &val, 0, req);
3673 	if (error || !req->newptr)
3674 		return error;
3675 	return 0;
3676 }
3677 
3678 /** ixgbe_sysctl_rdt_handler - Handler function
3679  *  Retrieves the RDT value from the hardware
3680  */
3681 static int
3682 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
3683 {
3684 	int error;
3685 
3686 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3687 	if (!rxr) return 0;
3688 
3689 	unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
3690 	error = sysctl_handle_int(oidp, &val, 0, req);
3691 	if (error || !req->newptr)
3692 		return error;
3693 	return 0;
3694 }
3695 
3696 static int
3697 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
3698 {
3699 	int error;
3700 	struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
3701 	unsigned int reg, usec, rate;
3702 
3703 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3704 	usec = ((reg & 0x0FF8) >> 3);
3705 	if (usec > 0)
3706 		rate = 500000 / usec;
3707 	else
3708 		rate = 0;
3709 	error = sysctl_handle_int(oidp, &rate, 0, req);
3710 	if (error || !req->newptr)
3711 		return error;
3712 	reg &= ~0xfff; /* default, no limitation */
3713 	ixgbe_max_interrupt_rate = 0;
3714 	if (rate > 0 && rate < 500000) {
3715 		if (rate < 1000)
3716 			rate = 1000;
3717 		ixgbe_max_interrupt_rate = rate;
3718 		reg |= ((4000000/rate) & 0xff8 );
3719 	}
3720 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
3721 	return 0;
3722 }
3723 
3724 /*
3725  * Add sysctl variables, one per statistic, to the system.
3726  */
3727 static void
3728 ixgbe_add_hw_stats(struct adapter *adapter)
3729 {
3730 	device_t dev = adapter->dev;
3731 
3732 	struct tx_ring *txr = adapter->tx_rings;
3733 	struct rx_ring *rxr = adapter->rx_rings;
3734 
3735 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3736 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3737 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3738 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3739 
3740 	struct sysctl_oid *stat_node, *queue_node;
3741 	struct sysctl_oid_list *stat_list, *queue_list;
3742 
3743 #define QUEUE_NAME_LEN 32
3744 	char namebuf[QUEUE_NAME_LEN];
3745 
3746 	/* Driver Statistics */
3747 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
3748 			CTLFLAG_RD, &adapter->dropped_pkts,
3749 			"Driver dropped packets");
3750 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
3751 			CTLFLAG_RD, &adapter->mbuf_defrag_failed,
3752 			"m_defrag() failed");
3753 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3754 			CTLFLAG_RD, &adapter->watchdog_events,
3755 			"Watchdog timeouts");
3756 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
3757 			CTLFLAG_RD, &adapter->vector_irq,
3758 			"Link MSIX IRQ Handled");
3759 
3760 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
3761 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
3762 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3763 					    CTLFLAG_RD, NULL, "Queue Name");
3764 		queue_list = SYSCTL_CHILDREN(queue_node);
3765 
3766 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
3767 				CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
3768 				sizeof(&adapter->queues[i]),
3769 				ixgbe_sysctl_interrupt_rate_handler, "IU",
3770 				"Interrupt Rate");
3771 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3772 				CTLFLAG_RD, &(adapter->queues[i].irqs),
3773 				"irqs on this queue");
3774 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
3775 				CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
3776 				ixgbe_sysctl_tdh_handler, "IU",
3777 				"Transmit Descriptor Head");
3778 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
3779 				CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
3780 				ixgbe_sysctl_tdt_handler, "IU",
3781 				"Transmit Descriptor Tail");
3782 		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
3783 				CTLFLAG_RD, &txr->tso_tx,
3784 				"TSO");
3785 		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
3786 				CTLFLAG_RD, &txr->no_tx_dma_setup,
3787 				"Driver tx dma failure in xmit");
3788 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3789 				CTLFLAG_RD, &txr->no_desc_avail,
3790 				"Queue No Descriptor Available");
3791 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3792 				CTLFLAG_RD, &txr->total_packets,
3793 				"Queue Packets Transmitted");
3794 	}
3795 
3796 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3797 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
3798 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3799 					    CTLFLAG_RD, NULL, "Queue Name");
3800 		queue_list = SYSCTL_CHILDREN(queue_node);
3801 
3802 		struct lro_ctrl *lro = &rxr->lro;
3803 
3804 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
3805 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3806 					    CTLFLAG_RD, NULL, "Queue Name");
3807 		queue_list = SYSCTL_CHILDREN(queue_node);
3808 
3809 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
3810 				CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
3811 				ixgbe_sysctl_rdh_handler, "IU",
3812 				"Receive Descriptor Head");
3813 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
3814 				CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
3815 				ixgbe_sysctl_rdt_handler, "IU",
3816 				"Receive Descriptor Tail");
3817 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3818 				CTLFLAG_RD, &rxr->rx_packets,
3819 				"Queue Packets Received");
3820 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3821 				CTLFLAG_RD, &rxr->rx_bytes,
3822 				"Queue Bytes Received");
3823 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
3824 				CTLFLAG_RD, &rxr->rx_copies,
3825 				"Copied RX Frames");
3826 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
3827 				CTLFLAG_RD, &lro->lro_queued, 0,
3828 				"LRO Queued");
3829 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
3830 				CTLFLAG_RD, &lro->lro_flushed, 0,
3831 				"LRO Flushed");
3832 	}
3833 
3834 	/* MAC stats get the own sub node */
3835 
3836 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
3837 				    CTLFLAG_RD, NULL, "MAC Statistics");
3838 	stat_list = SYSCTL_CHILDREN(stat_node);
3839 
3840 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
3841 			CTLFLAG_RD, &stats->crcerrs,
3842 			"CRC Errors");
3843 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
3844 			CTLFLAG_RD, &stats->illerrc,
3845 			"Illegal Byte Errors");
3846 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
3847 			CTLFLAG_RD, &stats->errbc,
3848 			"Byte Errors");
3849 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
3850 			CTLFLAG_RD, &stats->mspdc,
3851 			"MAC Short Packets Discarded");
3852 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
3853 			CTLFLAG_RD, &stats->mlfc,
3854 			"MAC Local Faults");
3855 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
3856 			CTLFLAG_RD, &stats->mrfc,
3857 			"MAC Remote Faults");
3858 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
3859 			CTLFLAG_RD, &stats->rlec,
3860 			"Receive Length Errors");
3861 
3862 	/* Flow Control stats */
3863 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
3864 			CTLFLAG_RD, &stats->lxontxc,
3865 			"Link XON Transmitted");
3866 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
3867 			CTLFLAG_RD, &stats->lxonrxc,
3868 			"Link XON Received");
3869 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
3870 			CTLFLAG_RD, &stats->lxofftxc,
3871 			"Link XOFF Transmitted");
3872 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
3873 			CTLFLAG_RD, &stats->lxoffrxc,
3874 			"Link XOFF Received");
3875 
3876 	/* Packet Reception Stats */
3877 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
3878 			CTLFLAG_RD, &stats->tor,
3879 			"Total Octets Received");
3880 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
3881 			CTLFLAG_RD, &stats->gorc,
3882 			"Good Octets Received");
3883 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
3884 			CTLFLAG_RD, &stats->tpr,
3885 			"Total Packets Received");
3886 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
3887 			CTLFLAG_RD, &stats->gprc,
3888 			"Good Packets Received");
3889 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
3890 			CTLFLAG_RD, &stats->mprc,
3891 			"Multicast Packets Received");
3892 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
3893 			CTLFLAG_RD, &stats->bprc,
3894 			"Broadcast Packets Received");
3895 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
3896 			CTLFLAG_RD, &stats->prc64,
3897 			"64 byte frames received ");
3898 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
3899 			CTLFLAG_RD, &stats->prc127,
3900 			"65-127 byte frames received");
3901 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
3902 			CTLFLAG_RD, &stats->prc255,
3903 			"128-255 byte frames received");
3904 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
3905 			CTLFLAG_RD, &stats->prc511,
3906 			"256-511 byte frames received");
3907 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
3908 			CTLFLAG_RD, &stats->prc1023,
3909 			"512-1023 byte frames received");
3910 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
3911 			CTLFLAG_RD, &stats->prc1522,
3912 			"1023-1522 byte frames received");
3913 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
3914 			CTLFLAG_RD, &stats->ruc,
3915 			"Receive Undersized");
3916 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
3917 			CTLFLAG_RD, &stats->rfc,
3918 			"Fragmented Packets Received ");
3919 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
3920 			CTLFLAG_RD, &stats->roc,
3921 			"Oversized Packets Received");
3922 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
3923 			CTLFLAG_RD, &stats->rjc,
3924 			"Received Jabber");
3925 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
3926 			CTLFLAG_RD, &stats->mngprc,
3927 			"Management Packets Received");
3928 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
3929 			CTLFLAG_RD, &stats->mngptc,
3930 			"Management Packets Dropped");
3931 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
3932 			CTLFLAG_RD, &stats->xec,
3933 			"Checksum Errors");
3934 
3935 	/* Packet Transmission Stats */
3936 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
3937 			CTLFLAG_RD, &stats->gotc,
3938 			"Good Octets Transmitted");
3939 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
3940 			CTLFLAG_RD, &stats->tpt,
3941 			"Total Packets Transmitted");
3942 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
3943 			CTLFLAG_RD, &stats->gptc,
3944 			"Good Packets Transmitted");
3945 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
3946 			CTLFLAG_RD, &stats->bptc,
3947 			"Broadcast Packets Transmitted");
3948 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
3949 			CTLFLAG_RD, &stats->mptc,
3950 			"Multicast Packets Transmitted");
3951 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
3952 			CTLFLAG_RD, &stats->mngptc,
3953 			"Management Packets Transmitted");
3954 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
3955 			CTLFLAG_RD, &stats->ptc64,
3956 			"64 byte frames transmitted ");
3957 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
3958 			CTLFLAG_RD, &stats->ptc127,
3959 			"65-127 byte frames transmitted");
3960 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
3961 			CTLFLAG_RD, &stats->ptc255,
3962 			"128-255 byte frames transmitted");
3963 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
3964 			CTLFLAG_RD, &stats->ptc511,
3965 			"256-511 byte frames transmitted");
3966 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
3967 			CTLFLAG_RD, &stats->ptc1023,
3968 			"512-1023 byte frames transmitted");
3969 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
3970 			CTLFLAG_RD, &stats->ptc1522,
3971 			"1024-1522 byte frames transmitted");
3972 }
3973 
3974 /*
3975 ** Set flow control using sysctl:
3976 ** Flow control values:
3977 ** 	0 - off
3978 **	1 - rx pause
3979 **	2 - tx pause
3980 **	3 - full
3981 */
3982 static int
3983 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
3984 {
3985 	int error, last;
3986 	struct adapter *adapter = (struct adapter *) arg1;
3987 
3988 	last = adapter->fc;
3989 	error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
3990 	if ((error) || (req->newptr == NULL))
3991 		return (error);
3992 
3993 	/* Don't bother if it's not changed */
3994 	if (adapter->fc == last)
3995 		return (0);
3996 
3997 	switch (adapter->fc) {
3998 		case ixgbe_fc_rx_pause:
3999 		case ixgbe_fc_tx_pause:
4000 		case ixgbe_fc_full:
4001 			adapter->hw.fc.requested_mode = adapter->fc;
4002 			if (adapter->num_queues > 1)
4003 				ixgbe_disable_rx_drop(adapter);
4004 			break;
4005 		case ixgbe_fc_none:
4006 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
4007 			if (adapter->num_queues > 1)
4008 				ixgbe_enable_rx_drop(adapter);
4009 			break;
4010 		default:
4011 			adapter->fc = last;
4012 			return (EINVAL);
4013 	}
4014 	/* Don't autoneg if forcing a value */
4015 	adapter->hw.fc.disable_fc_autoneg = TRUE;
4016 	ixgbe_fc_enable(&adapter->hw);
4017 	return error;
4018 }
4019 
4020 /*
4021 ** Control advertised link speed:
4022 **	Flags:
4023 **	0x1 - advertise 100 Mb
4024 **	0x2 - advertise 1G
4025 **	0x4 - advertise 10G
4026 */
4027 static int
4028 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4029 {
4030 	int			error = 0, requested;
4031 	struct adapter		*adapter;
4032 	device_t		dev;
4033 	struct ixgbe_hw		*hw;
4034 	ixgbe_link_speed	speed = 0;
4035 
4036 	adapter = (struct adapter *) arg1;
4037 	dev = adapter->dev;
4038 	hw = &adapter->hw;
4039 
4040 	requested = adapter->advertise;
4041 	error = sysctl_handle_int(oidp, &requested, 0, req);
4042 	if ((error) || (req->newptr == NULL))
4043 		return (error);
4044 
4045 	/* Checks to validate new value */
4046 	if (adapter->advertise == requested) /* no change */
4047 		return (0);
4048 
4049 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4050 	    (hw->phy.multispeed_fiber))) {
4051 		device_printf(dev,
4052 		    "Advertised speed can only be set on copper or "
4053 		    "multispeed fiber media types.\n");
4054 		return (EINVAL);
4055 	}
4056 
4057 	if (requested < 0x1 || requested > 0x7) {
4058 		device_printf(dev,
4059 		    "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4060 		return (EINVAL);
4061 	}
4062 
4063 	if ((requested & 0x1)
4064 	    && (hw->mac.type != ixgbe_mac_X540)
4065 	    && (hw->mac.type != ixgbe_mac_X550)) {
4066 		device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4067 		return (EINVAL);
4068 	}
4069 
4070 	/* Set new value and report new advertised mode */
4071 	if (requested & 0x1)
4072 		speed |= IXGBE_LINK_SPEED_100_FULL;
4073 	if (requested & 0x2)
4074 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4075 	if (requested & 0x4)
4076 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4077 
4078 	hw->mac.autotry_restart = TRUE;
4079 	hw->mac.ops.setup_link(hw, speed, TRUE);
4080 	adapter->advertise = requested;
4081 
4082 	return (error);
4083 }
4084 
4085 /*
4086 ** Thermal Shutdown Trigger
4087 **   - cause a Thermal Overtemp IRQ
4088 **   - this now requires firmware enabling
4089 */
4090 static int
4091 ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS)
4092 {
4093 	int		error, fire = 0;
4094 	struct adapter	*adapter = (struct adapter *) arg1;
4095 	struct ixgbe_hw *hw = &adapter->hw;
4096 
4097 
4098 	if (hw->mac.type < ixgbe_mac_X540)
4099 		return (0);
4100 
4101 	error = sysctl_handle_int(oidp, &fire, 0, req);
4102 	if ((error) || (req->newptr == NULL))
4103 		return (error);
4104 
4105 	if (fire) {
4106 		u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4107 		reg |= IXGBE_EICR_TS;
4108 		IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4109 	}
4110 
4111 	return (0);
4112 }
4113 
4114 /*
4115 ** Enable the hardware to drop packets when the buffer is
4116 ** full. This is useful when multiqueue,so that no single
4117 ** queue being full stalls the entire RX engine. We only
4118 ** enable this when Multiqueue AND when Flow Control is
4119 ** disabled.
4120 */
4121 static void
4122 ixgbe_enable_rx_drop(struct adapter *adapter)
4123 {
4124         struct ixgbe_hw *hw = &adapter->hw;
4125 
4126 	for (int i = 0; i < adapter->num_queues; i++) {
4127         	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4128         	srrctl |= IXGBE_SRRCTL_DROP_EN;
4129         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4130 	}
4131 }
4132 
4133 static void
4134 ixgbe_disable_rx_drop(struct adapter *adapter)
4135 {
4136         struct ixgbe_hw *hw = &adapter->hw;
4137 
4138 	for (int i = 0; i < adapter->num_queues; i++) {
4139         	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4140         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4141         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4142 	}
4143 }
4144 
4145 static void
4146 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
4147 {
4148 	u32 mask;
4149 
4150 	switch (adapter->hw.mac.type) {
4151 	case ixgbe_mac_82598EB:
4152 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
4153 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
4154 		break;
4155 	case ixgbe_mac_82599EB:
4156 	case ixgbe_mac_X540:
4157 	case ixgbe_mac_X550:
4158 		mask = (queues & 0xFFFFFFFF);
4159 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
4160 		mask = (queues >> 32);
4161 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
4162 		break;
4163 	default:
4164 		break;
4165 	}
4166 }
4167 
4168 
4169