xref: /freebsd/sys/dev/e1000/if_em.c (revision b7c60aadbbd5c846a250c05791fe7406d6d78bf4)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2011, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #endif
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #if __FreeBSD_version >= 800000
44 #include <sys/buf_ring.h>
45 #endif
46 #include <sys/bus.h>
47 #include <sys/endian.h>
48 #include <sys/kernel.h>
49 #include <sys/kthread.h>
50 #include <sys/malloc.h>
51 #include <sys/mbuf.h>
52 #include <sys/module.h>
53 #include <sys/rman.h>
54 #include <sys/socket.h>
55 #include <sys/sockio.h>
56 #include <sys/sysctl.h>
57 #include <sys/taskqueue.h>
58 #include <sys/eventhandler.h>
59 #include <machine/bus.h>
60 #include <machine/resource.h>
61 
62 #include <net/bpf.h>
63 #include <net/ethernet.h>
64 #include <net/if.h>
65 #include <net/if_arp.h>
66 #include <net/if_dl.h>
67 #include <net/if_media.h>
68 
69 #include <net/if_types.h>
70 #include <net/if_vlan_var.h>
71 
72 #include <netinet/in_systm.h>
73 #include <netinet/in.h>
74 #include <netinet/if_ether.h>
75 #include <netinet/ip.h>
76 #include <netinet/ip6.h>
77 #include <netinet/tcp.h>
78 #include <netinet/udp.h>
79 
80 #include <machine/in_cksum.h>
81 #include <dev/led/led.h>
82 #include <dev/pci/pcivar.h>
83 #include <dev/pci/pcireg.h>
84 
85 #include "e1000_api.h"
86 #include "e1000_82571.h"
87 #include "if_em.h"
88 
89 /*********************************************************************
90  *  Set this to one to display debug statistics
91  *********************************************************************/
92 int	em_display_debug_stats = 0;
93 
94 /*********************************************************************
95  *  Driver version:
96  *********************************************************************/
97 char em_driver_version[] = "7.3.2";
98 
99 /*********************************************************************
100  *  PCI Device ID Table
101  *
102  *  Used by probe to select devices to load on
103  *  Last field stores an index into e1000_strings
104  *  Last entry must be all 0s
105  *
106  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
107  *********************************************************************/
108 
109 static em_vendor_info_t em_vendor_info_array[] =
110 {
111 	/* Intel(R) PRO/1000 Network Connection */
112 	{ 0x8086, E1000_DEV_ID_82571EB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
113 	{ 0x8086, E1000_DEV_ID_82571EB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
114 	{ 0x8086, E1000_DEV_ID_82571EB_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
115 	{ 0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL,
116 						PCI_ANY_ID, PCI_ANY_ID, 0},
117 	{ 0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD,
118 						PCI_ANY_ID, PCI_ANY_ID, 0},
119 	{ 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
120 						PCI_ANY_ID, PCI_ANY_ID, 0},
121 	{ 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP,
122 						PCI_ANY_ID, PCI_ANY_ID, 0},
123 	{ 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER,
124 						PCI_ANY_ID, PCI_ANY_ID, 0},
125 	{ 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER,
126 						PCI_ANY_ID, PCI_ANY_ID, 0},
127 	{ 0x8086, E1000_DEV_ID_82572EI_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
128 	{ 0x8086, E1000_DEV_ID_82572EI_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
129 	{ 0x8086, E1000_DEV_ID_82572EI_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
130 	{ 0x8086, E1000_DEV_ID_82572EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
131 
132 	{ 0x8086, E1000_DEV_ID_82573E,		PCI_ANY_ID, PCI_ANY_ID, 0},
133 	{ 0x8086, E1000_DEV_ID_82573E_IAMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
134 	{ 0x8086, E1000_DEV_ID_82573L,		PCI_ANY_ID, PCI_ANY_ID, 0},
135 	{ 0x8086, E1000_DEV_ID_82583V,		PCI_ANY_ID, PCI_ANY_ID, 0},
136 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
137 						PCI_ANY_ID, PCI_ANY_ID, 0},
138 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
139 						PCI_ANY_ID, PCI_ANY_ID, 0},
140 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
141 						PCI_ANY_ID, PCI_ANY_ID, 0},
142 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
143 						PCI_ANY_ID, PCI_ANY_ID, 0},
144 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
145 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
146 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_C,	PCI_ANY_ID, PCI_ANY_ID, 0},
147 	{ 0x8086, E1000_DEV_ID_ICH8_IFE,	PCI_ANY_ID, PCI_ANY_ID, 0},
148 	{ 0x8086, E1000_DEV_ID_ICH8_IFE_GT,	PCI_ANY_ID, PCI_ANY_ID, 0},
149 	{ 0x8086, E1000_DEV_ID_ICH8_IFE_G,	PCI_ANY_ID, PCI_ANY_ID, 0},
150 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_M,	PCI_ANY_ID, PCI_ANY_ID, 0},
151 	{ 0x8086, E1000_DEV_ID_ICH8_82567V_3,	PCI_ANY_ID, PCI_ANY_ID, 0},
152 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
153 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
154 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_C,	PCI_ANY_ID, PCI_ANY_ID, 0},
155 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_M,	PCI_ANY_ID, PCI_ANY_ID, 0},
156 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_M_V,	PCI_ANY_ID, PCI_ANY_ID, 0},
157 	{ 0x8086, E1000_DEV_ID_ICH9_IFE,	PCI_ANY_ID, PCI_ANY_ID, 0},
158 	{ 0x8086, E1000_DEV_ID_ICH9_IFE_GT,	PCI_ANY_ID, PCI_ANY_ID, 0},
159 	{ 0x8086, E1000_DEV_ID_ICH9_IFE_G,	PCI_ANY_ID, PCI_ANY_ID, 0},
160 	{ 0x8086, E1000_DEV_ID_ICH9_BM,		PCI_ANY_ID, PCI_ANY_ID, 0},
161 	{ 0x8086, E1000_DEV_ID_82574L,		PCI_ANY_ID, PCI_ANY_ID, 0},
162 	{ 0x8086, E1000_DEV_ID_82574LA,		PCI_ANY_ID, PCI_ANY_ID, 0},
163 	{ 0x8086, E1000_DEV_ID_ICH10_R_BM_LM,	PCI_ANY_ID, PCI_ANY_ID, 0},
164 	{ 0x8086, E1000_DEV_ID_ICH10_R_BM_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
165 	{ 0x8086, E1000_DEV_ID_ICH10_R_BM_V,	PCI_ANY_ID, PCI_ANY_ID, 0},
166 	{ 0x8086, E1000_DEV_ID_ICH10_D_BM_LM,	PCI_ANY_ID, PCI_ANY_ID, 0},
167 	{ 0x8086, E1000_DEV_ID_ICH10_D_BM_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
168 	{ 0x8086, E1000_DEV_ID_ICH10_D_BM_V,	PCI_ANY_ID, PCI_ANY_ID, 0},
169 	{ 0x8086, E1000_DEV_ID_PCH_M_HV_LM,	PCI_ANY_ID, PCI_ANY_ID, 0},
170 	{ 0x8086, E1000_DEV_ID_PCH_M_HV_LC,	PCI_ANY_ID, PCI_ANY_ID, 0},
171 	{ 0x8086, E1000_DEV_ID_PCH_D_HV_DM,	PCI_ANY_ID, PCI_ANY_ID, 0},
172 	{ 0x8086, E1000_DEV_ID_PCH_D_HV_DC,	PCI_ANY_ID, PCI_ANY_ID, 0},
173 	{ 0x8086, E1000_DEV_ID_PCH2_LV_LM,	PCI_ANY_ID, PCI_ANY_ID, 0},
174 	{ 0x8086, E1000_DEV_ID_PCH2_LV_V,	PCI_ANY_ID, PCI_ANY_ID, 0},
175 	/* required last entry */
176 	{ 0, 0, 0, 0, 0}
177 };
178 
179 /*********************************************************************
180  *  Table of branding strings for all supported NICs.
181  *********************************************************************/
182 
183 static char *em_strings[] = {
184 	"Intel(R) PRO/1000 Network Connection"
185 };
186 
187 /*********************************************************************
188  *  Function prototypes
189  *********************************************************************/
190 static int	em_probe(device_t);
191 static int	em_attach(device_t);
192 static int	em_detach(device_t);
193 static int	em_shutdown(device_t);
194 static int	em_suspend(device_t);
195 static int	em_resume(device_t);
196 static void	em_start(struct ifnet *);
197 static void	em_start_locked(struct ifnet *, struct tx_ring *);
198 #ifdef EM_MULTIQUEUE
199 static int	em_mq_start(struct ifnet *, struct mbuf *);
200 static int	em_mq_start_locked(struct ifnet *,
201 		    struct tx_ring *, struct mbuf *);
202 static void	em_qflush(struct ifnet *);
203 #endif
204 static int	em_ioctl(struct ifnet *, u_long, caddr_t);
205 static void	em_init(void *);
206 static void	em_init_locked(struct adapter *);
207 static void	em_stop(void *);
208 static void	em_media_status(struct ifnet *, struct ifmediareq *);
209 static int	em_media_change(struct ifnet *);
210 static void	em_identify_hardware(struct adapter *);
211 static int	em_allocate_pci_resources(struct adapter *);
212 static int	em_allocate_legacy(struct adapter *);
213 static int	em_allocate_msix(struct adapter *);
214 static int	em_allocate_queues(struct adapter *);
215 static int	em_setup_msix(struct adapter *);
216 static void	em_free_pci_resources(struct adapter *);
217 static void	em_local_timer(void *);
218 static void	em_reset(struct adapter *);
219 static int	em_setup_interface(device_t, struct adapter *);
220 
221 static void	em_setup_transmit_structures(struct adapter *);
222 static void	em_initialize_transmit_unit(struct adapter *);
223 static int	em_allocate_transmit_buffers(struct tx_ring *);
224 static void	em_free_transmit_structures(struct adapter *);
225 static void	em_free_transmit_buffers(struct tx_ring *);
226 
227 static int	em_setup_receive_structures(struct adapter *);
228 static int	em_allocate_receive_buffers(struct rx_ring *);
229 static void	em_initialize_receive_unit(struct adapter *);
230 static void	em_free_receive_structures(struct adapter *);
231 static void	em_free_receive_buffers(struct rx_ring *);
232 
233 static void	em_enable_intr(struct adapter *);
234 static void	em_disable_intr(struct adapter *);
235 static void	em_update_stats_counters(struct adapter *);
236 static void	em_add_hw_stats(struct adapter *adapter);
237 static bool	em_txeof(struct tx_ring *);
238 static bool	em_rxeof(struct rx_ring *, int, int *);
239 #ifndef __NO_STRICT_ALIGNMENT
240 static int	em_fixup_rx(struct rx_ring *);
241 #endif
242 static void	em_receive_checksum(struct e1000_rx_desc *, struct mbuf *);
243 static void	em_transmit_checksum_setup(struct tx_ring *, struct mbuf *, int,
244 		    struct ip *, u32 *, u32 *);
245 static void	em_tso_setup(struct tx_ring *, struct mbuf *, int, struct ip *,
246 		    struct tcphdr *, u32 *, u32 *);
247 static void	em_set_promisc(struct adapter *);
248 static void	em_disable_promisc(struct adapter *);
249 static void	em_set_multi(struct adapter *);
250 static void	em_update_link_status(struct adapter *);
251 static void	em_refresh_mbufs(struct rx_ring *, int);
252 static void	em_register_vlan(void *, struct ifnet *, u16);
253 static void	em_unregister_vlan(void *, struct ifnet *, u16);
254 static void	em_setup_vlan_hw_support(struct adapter *);
255 static int	em_xmit(struct tx_ring *, struct mbuf **);
256 static int	em_dma_malloc(struct adapter *, bus_size_t,
257 		    struct em_dma_alloc *, int);
258 static void	em_dma_free(struct adapter *, struct em_dma_alloc *);
259 static int	em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
260 static void	em_print_nvm_info(struct adapter *);
261 static int	em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
262 static void	em_print_debug_info(struct adapter *);
263 static int 	em_is_valid_ether_addr(u8 *);
264 static int	em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
265 static void	em_add_int_delay_sysctl(struct adapter *, const char *,
266 		    const char *, struct em_int_delay_info *, int, int);
267 /* Management and WOL Support */
268 static void	em_init_manageability(struct adapter *);
269 static void	em_release_manageability(struct adapter *);
270 static void     em_get_hw_control(struct adapter *);
271 static void     em_release_hw_control(struct adapter *);
272 static void	em_get_wakeup(device_t);
273 static void     em_enable_wakeup(device_t);
274 static int	em_enable_phy_wakeup(struct adapter *);
275 static void	em_led_func(void *, int);
276 static void	em_disable_aspm(struct adapter *);
277 
278 static int	em_irq_fast(void *);
279 
280 /* MSIX handlers */
281 static void	em_msix_tx(void *);
282 static void	em_msix_rx(void *);
283 static void	em_msix_link(void *);
284 static void	em_handle_tx(void *context, int pending);
285 static void	em_handle_rx(void *context, int pending);
286 static void	em_handle_link(void *context, int pending);
287 
288 static void	em_set_sysctl_value(struct adapter *, const char *,
289 		    const char *, int *, int);
290 static int	em_set_flowcntl(SYSCTL_HANDLER_ARGS);
291 
292 static __inline void em_rx_discard(struct rx_ring *, int);
293 
294 #ifdef DEVICE_POLLING
295 static poll_handler_t em_poll;
296 #endif /* POLLING */
297 
298 /*********************************************************************
299  *  FreeBSD Device Interface Entry Points
300  *********************************************************************/
301 
302 static device_method_t em_methods[] = {
303 	/* Device interface */
304 	DEVMETHOD(device_probe, em_probe),
305 	DEVMETHOD(device_attach, em_attach),
306 	DEVMETHOD(device_detach, em_detach),
307 	DEVMETHOD(device_shutdown, em_shutdown),
308 	DEVMETHOD(device_suspend, em_suspend),
309 	DEVMETHOD(device_resume, em_resume),
310 	{0, 0}
311 };
312 
313 static driver_t em_driver = {
314 	"em", em_methods, sizeof(struct adapter),
315 };
316 
317 devclass_t em_devclass;
318 DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
319 MODULE_DEPEND(em, pci, 1, 1, 1);
320 MODULE_DEPEND(em, ether, 1, 1, 1);
321 
322 /*********************************************************************
323  *  Tunable default values.
324  *********************************************************************/
325 
326 #define EM_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
327 #define EM_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
328 #define M_TSO_LEN			66
329 
330 /* Allow common code without TSO */
331 #ifndef CSUM_TSO
332 #define CSUM_TSO	0
333 #endif
334 
335 static SYSCTL_NODE(_hw, OID_AUTO, em, CTLFLAG_RD, 0, "EM driver parameters");
336 
337 static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
338 static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
339 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
340 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
341 SYSCTL_INT(_hw_em, OID_AUTO, tx_int_delay, CTLFLAG_RDTUN, &em_tx_int_delay_dflt,
342     0, "Default transmit interrupt delay in usecs");
343 SYSCTL_INT(_hw_em, OID_AUTO, rx_int_delay, CTLFLAG_RDTUN, &em_rx_int_delay_dflt,
344     0, "Default receive interrupt delay in usecs");
345 
346 static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
347 static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
348 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
349 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
350 SYSCTL_INT(_hw_em, OID_AUTO, tx_abs_int_delay, CTLFLAG_RDTUN,
351     &em_tx_abs_int_delay_dflt, 0,
352     "Default transmit interrupt delay limit in usecs");
353 SYSCTL_INT(_hw_em, OID_AUTO, rx_abs_int_delay, CTLFLAG_RDTUN,
354     &em_rx_abs_int_delay_dflt, 0,
355     "Default receive interrupt delay limit in usecs");
356 
357 static int em_rxd = EM_DEFAULT_RXD;
358 static int em_txd = EM_DEFAULT_TXD;
359 TUNABLE_INT("hw.em.rxd", &em_rxd);
360 TUNABLE_INT("hw.em.txd", &em_txd);
361 SYSCTL_INT(_hw_em, OID_AUTO, rxd, CTLFLAG_RDTUN, &em_rxd, 0,
362     "Number of receive descriptors per queue");
363 SYSCTL_INT(_hw_em, OID_AUTO, txd, CTLFLAG_RDTUN, &em_txd, 0,
364     "Number of transmit descriptors per queue");
365 
366 static int em_smart_pwr_down = FALSE;
367 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
368 SYSCTL_INT(_hw_em, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &em_smart_pwr_down,
369     0, "Set to true to leave smart power down enabled on newer adapters");
370 
371 /* Controls whether promiscuous also shows bad packets */
372 static int em_debug_sbp = FALSE;
373 TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
374 SYSCTL_INT(_hw_em, OID_AUTO, sbp, CTLFLAG_RDTUN, &em_debug_sbp, 0,
375     "Show bad packets in promiscuous mode");
376 
377 static int em_enable_msix = TRUE;
378 TUNABLE_INT("hw.em.enable_msix", &em_enable_msix);
379 SYSCTL_INT(_hw_em, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &em_enable_msix, 0,
380     "Enable MSI-X interrupts");
381 
382 /* How many packets rxeof tries to clean at a time */
383 static int em_rx_process_limit = 100;
384 TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
385 SYSCTL_INT(_hw_em, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
386     &em_rx_process_limit, 0,
387     "Maximum number of received packets to process "
388     "at a time, -1 means unlimited");
389 
390 /* Energy efficient ethernet - default to OFF */
391 static int eee_setting = 0;
392 TUNABLE_INT("hw.em.eee_setting", &eee_setting);
393 SYSCTL_INT(_hw_em, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &eee_setting, 0,
394     "Enable Energy Efficient Ethernet");
395 
396 /* Global used in WOL setup with multiport cards */
397 static int global_quad_port_a = 0;
398 
399 #ifdef DEV_NETMAP	/* see ixgbe.c for details */
400 #include <dev/netmap/if_em_netmap.h>
401 #endif /* DEV_NETMAP */
402 
403 /*********************************************************************
404  *  Device identification routine
405  *
406  *  em_probe determines if the driver should be loaded on
407  *  adapter based on PCI vendor/device id of the adapter.
408  *
409  *  return BUS_PROBE_DEFAULT on success, positive on failure
410  *********************************************************************/
411 
412 static int
413 em_probe(device_t dev)
414 {
415 	char		adapter_name[60];
416 	u16		pci_vendor_id = 0;
417 	u16		pci_device_id = 0;
418 	u16		pci_subvendor_id = 0;
419 	u16		pci_subdevice_id = 0;
420 	em_vendor_info_t *ent;
421 
422 	INIT_DEBUGOUT("em_probe: begin");
423 
424 	pci_vendor_id = pci_get_vendor(dev);
425 	if (pci_vendor_id != EM_VENDOR_ID)
426 		return (ENXIO);
427 
428 	pci_device_id = pci_get_device(dev);
429 	pci_subvendor_id = pci_get_subvendor(dev);
430 	pci_subdevice_id = pci_get_subdevice(dev);
431 
432 	ent = em_vendor_info_array;
433 	while (ent->vendor_id != 0) {
434 		if ((pci_vendor_id == ent->vendor_id) &&
435 		    (pci_device_id == ent->device_id) &&
436 
437 		    ((pci_subvendor_id == ent->subvendor_id) ||
438 		    (ent->subvendor_id == PCI_ANY_ID)) &&
439 
440 		    ((pci_subdevice_id == ent->subdevice_id) ||
441 		    (ent->subdevice_id == PCI_ANY_ID))) {
442 			sprintf(adapter_name, "%s %s",
443 				em_strings[ent->index],
444 				em_driver_version);
445 			device_set_desc_copy(dev, adapter_name);
446 			return (BUS_PROBE_DEFAULT);
447 		}
448 		ent++;
449 	}
450 
451 	return (ENXIO);
452 }
453 
454 /*********************************************************************
455  *  Device initialization routine
456  *
457  *  The attach entry point is called when the driver is being loaded.
458  *  This routine identifies the type of hardware, allocates all resources
459  *  and initializes the hardware.
460  *
461  *  return 0 on success, positive on failure
462  *********************************************************************/
463 
464 static int
465 em_attach(device_t dev)
466 {
467 	struct adapter	*adapter;
468 	struct e1000_hw	*hw;
469 	int		error = 0;
470 
471 	INIT_DEBUGOUT("em_attach: begin");
472 
473 	if (resource_disabled("em", device_get_unit(dev))) {
474 		device_printf(dev, "Disabled by device hint\n");
475 		return (ENXIO);
476 	}
477 
478 	adapter = device_get_softc(dev);
479 	adapter->dev = adapter->osdep.dev = dev;
480 	hw = &adapter->hw;
481 	EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
482 
483 	/* SYSCTL stuff */
484 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
485 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
486 	    OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
487 	    em_sysctl_nvm_info, "I", "NVM Information");
488 
489 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
490 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
491 	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
492 	    em_sysctl_debug_info, "I", "Debug Information");
493 
494 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
495 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
496 	    OID_AUTO, "fc", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
497 	    em_set_flowcntl, "I", "Flow Control");
498 
499 	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
500 
501 	/* Determine hardware and mac info */
502 	em_identify_hardware(adapter);
503 
504 	/* Setup PCI resources */
505 	if (em_allocate_pci_resources(adapter)) {
506 		device_printf(dev, "Allocation of PCI resources failed\n");
507 		error = ENXIO;
508 		goto err_pci;
509 	}
510 
511 	/*
512 	** For ICH8 and family we need to
513 	** map the flash memory, and this
514 	** must happen after the MAC is
515 	** identified
516 	*/
517 	if ((hw->mac.type == e1000_ich8lan) ||
518 	    (hw->mac.type == e1000_ich9lan) ||
519 	    (hw->mac.type == e1000_ich10lan) ||
520 	    (hw->mac.type == e1000_pchlan) ||
521 	    (hw->mac.type == e1000_pch2lan)) {
522 		int rid = EM_BAR_TYPE_FLASH;
523 		adapter->flash = bus_alloc_resource_any(dev,
524 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
525 		if (adapter->flash == NULL) {
526 			device_printf(dev, "Mapping of Flash failed\n");
527 			error = ENXIO;
528 			goto err_pci;
529 		}
530 		/* This is used in the shared code */
531 		hw->flash_address = (u8 *)adapter->flash;
532 		adapter->osdep.flash_bus_space_tag =
533 		    rman_get_bustag(adapter->flash);
534 		adapter->osdep.flash_bus_space_handle =
535 		    rman_get_bushandle(adapter->flash);
536 	}
537 
538 	/* Do Shared Code initialization */
539 	if (e1000_setup_init_funcs(hw, TRUE)) {
540 		device_printf(dev, "Setup of Shared code failed\n");
541 		error = ENXIO;
542 		goto err_pci;
543 	}
544 
545 	e1000_get_bus_info(hw);
546 
547 	/* Set up some sysctls for the tunable interrupt delays */
548 	em_add_int_delay_sysctl(adapter, "rx_int_delay",
549 	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
550 	    E1000_REGISTER(hw, E1000_RDTR), em_rx_int_delay_dflt);
551 	em_add_int_delay_sysctl(adapter, "tx_int_delay",
552 	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
553 	    E1000_REGISTER(hw, E1000_TIDV), em_tx_int_delay_dflt);
554 	em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
555 	    "receive interrupt delay limit in usecs",
556 	    &adapter->rx_abs_int_delay,
557 	    E1000_REGISTER(hw, E1000_RADV),
558 	    em_rx_abs_int_delay_dflt);
559 	em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
560 	    "transmit interrupt delay limit in usecs",
561 	    &adapter->tx_abs_int_delay,
562 	    E1000_REGISTER(hw, E1000_TADV),
563 	    em_tx_abs_int_delay_dflt);
564 
565 	/* Sysctl for limiting the amount of work done in the taskqueue */
566 	em_set_sysctl_value(adapter, "rx_processing_limit",
567 	    "max number of rx packets to process", &adapter->rx_process_limit,
568 	    em_rx_process_limit);
569 
570 	/*
571 	 * Validate number of transmit and receive descriptors. It
572 	 * must not exceed hardware maximum, and must be multiple
573 	 * of E1000_DBA_ALIGN.
574 	 */
575 	if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
576 	    (em_txd > EM_MAX_TXD) || (em_txd < EM_MIN_TXD)) {
577 		device_printf(dev, "Using %d TX descriptors instead of %d!\n",
578 		    EM_DEFAULT_TXD, em_txd);
579 		adapter->num_tx_desc = EM_DEFAULT_TXD;
580 	} else
581 		adapter->num_tx_desc = em_txd;
582 
583 	if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
584 	    (em_rxd > EM_MAX_RXD) || (em_rxd < EM_MIN_RXD)) {
585 		device_printf(dev, "Using %d RX descriptors instead of %d!\n",
586 		    EM_DEFAULT_RXD, em_rxd);
587 		adapter->num_rx_desc = EM_DEFAULT_RXD;
588 	} else
589 		adapter->num_rx_desc = em_rxd;
590 
591 	hw->mac.autoneg = DO_AUTO_NEG;
592 	hw->phy.autoneg_wait_to_complete = FALSE;
593 	hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
594 
595 	/* Copper options */
596 	if (hw->phy.media_type == e1000_media_type_copper) {
597 		hw->phy.mdix = AUTO_ALL_MODES;
598 		hw->phy.disable_polarity_correction = FALSE;
599 		hw->phy.ms_type = EM_MASTER_SLAVE;
600 	}
601 
602 	/*
603 	 * Set the frame limits assuming
604 	 * standard ethernet sized frames.
605 	 */
606 	adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
607 	adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
608 
609 	/*
610 	 * This controls when hardware reports transmit completion
611 	 * status.
612 	 */
613 	hw->mac.report_tx_early = 1;
614 
615 	/*
616 	** Get queue/ring memory
617 	*/
618 	if (em_allocate_queues(adapter)) {
619 		error = ENOMEM;
620 		goto err_pci;
621 	}
622 
623 	/* Allocate multicast array memory. */
624 	adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
625 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
626 	if (adapter->mta == NULL) {
627 		device_printf(dev, "Can not allocate multicast setup array\n");
628 		error = ENOMEM;
629 		goto err_late;
630 	}
631 
632 	/* Check SOL/IDER usage */
633 	if (e1000_check_reset_block(hw))
634 		device_printf(dev, "PHY reset is blocked"
635 		    " due to SOL/IDER session.\n");
636 
637 	/* Sysctl for setting Energy Efficient Ethernet */
638 	em_set_sysctl_value(adapter, "eee_control",
639 	    "enable Energy Efficient Ethernet",
640 	    &hw->dev_spec.ich8lan.eee_disable, eee_setting);
641 
642 	/*
643 	** Start from a known state, this is
644 	** important in reading the nvm and
645 	** mac from that.
646 	*/
647 	e1000_reset_hw(hw);
648 
649 
650 	/* Make sure we have a good EEPROM before we read from it */
651 	if (e1000_validate_nvm_checksum(hw) < 0) {
652 		/*
653 		** Some PCI-E parts fail the first check due to
654 		** the link being in sleep state, call it again,
655 		** if it fails a second time its a real issue.
656 		*/
657 		if (e1000_validate_nvm_checksum(hw) < 0) {
658 			device_printf(dev,
659 			    "The EEPROM Checksum Is Not Valid\n");
660 			error = EIO;
661 			goto err_late;
662 		}
663 	}
664 
665 	/* Copy the permanent MAC address out of the EEPROM */
666 	if (e1000_read_mac_addr(hw) < 0) {
667 		device_printf(dev, "EEPROM read error while reading MAC"
668 		    " address\n");
669 		error = EIO;
670 		goto err_late;
671 	}
672 
673 	if (!em_is_valid_ether_addr(hw->mac.addr)) {
674 		device_printf(dev, "Invalid MAC address\n");
675 		error = EIO;
676 		goto err_late;
677 	}
678 
679 	/*
680 	**  Do interrupt configuration
681 	*/
682 	if (adapter->msix > 1) /* Do MSIX */
683 		error = em_allocate_msix(adapter);
684 	else  /* MSI or Legacy */
685 		error = em_allocate_legacy(adapter);
686 	if (error)
687 		goto err_late;
688 
689 	/*
690 	 * Get Wake-on-Lan and Management info for later use
691 	 */
692 	em_get_wakeup(dev);
693 
694 	/* Setup OS specific network interface */
695 	if (em_setup_interface(dev, adapter) != 0)
696 		goto err_late;
697 
698 	em_reset(adapter);
699 
700 	/* Initialize statistics */
701 	em_update_stats_counters(adapter);
702 
703 	hw->mac.get_link_status = 1;
704 	em_update_link_status(adapter);
705 
706 	/* Register for VLAN events */
707 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
708 	    em_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
709 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
710 	    em_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
711 
712 	em_add_hw_stats(adapter);
713 
714 	/* Non-AMT based hardware can now take control from firmware */
715 	if (adapter->has_manage && !adapter->has_amt)
716 		em_get_hw_control(adapter);
717 
718 	/* Tell the stack that the interface is not active */
719 	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
720 	adapter->ifp->if_drv_flags |= IFF_DRV_OACTIVE;
721 
722 	adapter->led_dev = led_create(em_led_func, adapter,
723 	    device_get_nameunit(dev));
724 #ifdef DEV_NETMAP
725 	em_netmap_attach(adapter);
726 #endif /* DEV_NETMAP */
727 
728 	INIT_DEBUGOUT("em_attach: end");
729 
730 	return (0);
731 
732 err_late:
733 	em_free_transmit_structures(adapter);
734 	em_free_receive_structures(adapter);
735 	em_release_hw_control(adapter);
736 	if (adapter->ifp != NULL)
737 		if_free(adapter->ifp);
738 err_pci:
739 	em_free_pci_resources(adapter);
740 	free(adapter->mta, M_DEVBUF);
741 	EM_CORE_LOCK_DESTROY(adapter);
742 
743 	return (error);
744 }
745 
746 /*********************************************************************
747  *  Device removal routine
748  *
749  *  The detach entry point is called when the driver is being removed.
750  *  This routine stops the adapter and deallocates all the resources
751  *  that were allocated for driver operation.
752  *
753  *  return 0 on success, positive on failure
754  *********************************************************************/
755 
756 static int
757 em_detach(device_t dev)
758 {
759 	struct adapter	*adapter = device_get_softc(dev);
760 	struct ifnet	*ifp = adapter->ifp;
761 
762 	INIT_DEBUGOUT("em_detach: begin");
763 
764 	/* Make sure VLANS are not using driver */
765 	if (adapter->ifp->if_vlantrunk != NULL) {
766 		device_printf(dev,"Vlan in use, detach first\n");
767 		return (EBUSY);
768 	}
769 
770 #ifdef DEVICE_POLLING
771 	if (ifp->if_capenable & IFCAP_POLLING)
772 		ether_poll_deregister(ifp);
773 #endif
774 
775 	if (adapter->led_dev != NULL)
776 		led_destroy(adapter->led_dev);
777 
778 	EM_CORE_LOCK(adapter);
779 	adapter->in_detach = 1;
780 	em_stop(adapter);
781 	EM_CORE_UNLOCK(adapter);
782 	EM_CORE_LOCK_DESTROY(adapter);
783 
784 	e1000_phy_hw_reset(&adapter->hw);
785 
786 	em_release_manageability(adapter);
787 	em_release_hw_control(adapter);
788 
789 	/* Unregister VLAN events */
790 	if (adapter->vlan_attach != NULL)
791 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
792 	if (adapter->vlan_detach != NULL)
793 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
794 
795 	ether_ifdetach(adapter->ifp);
796 	callout_drain(&adapter->timer);
797 
798 #ifdef DEV_NETMAP
799 	netmap_detach(ifp);
800 #endif /* DEV_NETMAP */
801 
802 	em_free_pci_resources(adapter);
803 	bus_generic_detach(dev);
804 	if_free(ifp);
805 
806 	em_free_transmit_structures(adapter);
807 	em_free_receive_structures(adapter);
808 
809 	em_release_hw_control(adapter);
810 	free(adapter->mta, M_DEVBUF);
811 
812 	return (0);
813 }
814 
815 /*********************************************************************
816  *
817  *  Shutdown entry point
818  *
819  **********************************************************************/
820 
821 static int
822 em_shutdown(device_t dev)
823 {
824 	return em_suspend(dev);
825 }
826 
827 /*
828  * Suspend/resume device methods.
829  */
830 static int
831 em_suspend(device_t dev)
832 {
833 	struct adapter *adapter = device_get_softc(dev);
834 
835 	EM_CORE_LOCK(adapter);
836 
837         em_release_manageability(adapter);
838 	em_release_hw_control(adapter);
839 	em_enable_wakeup(dev);
840 
841 	EM_CORE_UNLOCK(adapter);
842 
843 	return bus_generic_suspend(dev);
844 }
845 
846 static int
847 em_resume(device_t dev)
848 {
849 	struct adapter *adapter = device_get_softc(dev);
850 	struct ifnet *ifp = adapter->ifp;
851 
852 	EM_CORE_LOCK(adapter);
853 	if (adapter->hw.mac.type == e1000_pch2lan)
854 		e1000_resume_workarounds_pchlan(&adapter->hw);
855 	em_init_locked(adapter);
856 	em_init_manageability(adapter);
857 	EM_CORE_UNLOCK(adapter);
858 	em_start(ifp);
859 
860 	return bus_generic_resume(dev);
861 }
862 
863 
864 #ifdef EM_MULTIQUEUE
865 /*********************************************************************
866  *  Multiqueue Transmit routines
867  *
868  *  em_mq_start is called by the stack to initiate a transmit.
869  *  however, if busy the driver can queue the request rather
870  *  than do an immediate send. It is this that is an advantage
871  *  in this driver, rather than also having multiple tx queues.
872  **********************************************************************/
873 static int
874 em_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
875 {
876 	struct adapter  *adapter = txr->adapter;
877         struct mbuf     *next;
878         int             err = 0, enq = 0;
879 
880 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
881 	    IFF_DRV_RUNNING || adapter->link_active == 0) {
882 		if (m != NULL)
883 			err = drbr_enqueue(ifp, txr->br, m);
884 		return (err);
885 	}
886 
887 	enq = 0;
888 	if (m == NULL) {
889 		next = drbr_dequeue(ifp, txr->br);
890 	} else if (drbr_needs_enqueue(ifp, txr->br)) {
891 		if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
892 			return (err);
893 		next = drbr_dequeue(ifp, txr->br);
894 	} else
895 		next = m;
896 
897 	/* Process the queue */
898 	while (next != NULL) {
899 		if ((err = em_xmit(txr, &next)) != 0) {
900                         if (next != NULL)
901                                 err = drbr_enqueue(ifp, txr->br, next);
902                         break;
903 		}
904 		enq++;
905 		drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
906 		ETHER_BPF_MTAP(ifp, next);
907 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
908                         break;
909 		next = drbr_dequeue(ifp, txr->br);
910 	}
911 
912 	if (enq > 0) {
913                 /* Set the watchdog */
914                 txr->queue_status = EM_QUEUE_WORKING;
915 		txr->watchdog_time = ticks;
916 	}
917 
918 	if (txr->tx_avail < EM_MAX_SCATTER)
919 		em_txeof(txr);
920 	if (txr->tx_avail < EM_MAX_SCATTER)
921 		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
922 	return (err);
923 }
924 
925 /*
926 ** Multiqueue capable stack interface
927 */
928 static int
929 em_mq_start(struct ifnet *ifp, struct mbuf *m)
930 {
931 	struct adapter	*adapter = ifp->if_softc;
932 	struct tx_ring	*txr = adapter->tx_rings;
933 	int 		error;
934 
935 	if (EM_TX_TRYLOCK(txr)) {
936 		error = em_mq_start_locked(ifp, txr, m);
937 		EM_TX_UNLOCK(txr);
938 	} else
939 		error = drbr_enqueue(ifp, txr->br, m);
940 
941 	return (error);
942 }
943 
944 /*
945 ** Flush all ring buffers
946 */
947 static void
948 em_qflush(struct ifnet *ifp)
949 {
950 	struct adapter  *adapter = ifp->if_softc;
951 	struct tx_ring  *txr = adapter->tx_rings;
952 	struct mbuf     *m;
953 
954 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
955 		EM_TX_LOCK(txr);
956 		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
957 			m_freem(m);
958 		EM_TX_UNLOCK(txr);
959 	}
960 	if_qflush(ifp);
961 }
962 #endif /* EM_MULTIQUEUE */
963 
964 static void
965 em_start_locked(struct ifnet *ifp, struct tx_ring *txr)
966 {
967 	struct adapter	*adapter = ifp->if_softc;
968 	struct mbuf	*m_head;
969 
970 	EM_TX_LOCK_ASSERT(txr);
971 
972 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
973 	    IFF_DRV_RUNNING)
974 		return;
975 
976 	if (!adapter->link_active)
977 		return;
978 
979 	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
980         	/* Call cleanup if number of TX descriptors low */
981 		if (txr->tx_avail <= EM_TX_CLEANUP_THRESHOLD)
982 			em_txeof(txr);
983 		if (txr->tx_avail < EM_MAX_SCATTER) {
984 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
985 			break;
986 		}
987                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
988 		if (m_head == NULL)
989 			break;
990 		/*
991 		 *  Encapsulation can modify our pointer, and or make it
992 		 *  NULL on failure.  In that event, we can't requeue.
993 		 */
994 		if (em_xmit(txr, &m_head)) {
995 			if (m_head == NULL)
996 				break;
997 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
998 			break;
999 		}
1000 
1001 		/* Send a copy of the frame to the BPF listener */
1002 		ETHER_BPF_MTAP(ifp, m_head);
1003 
1004 		/* Set timeout in case hardware has problems transmitting. */
1005 		txr->watchdog_time = ticks;
1006                 txr->queue_status = EM_QUEUE_WORKING;
1007 	}
1008 
1009 	return;
1010 }
1011 
1012 static void
1013 em_start(struct ifnet *ifp)
1014 {
1015 	struct adapter	*adapter = ifp->if_softc;
1016 	struct tx_ring	*txr = adapter->tx_rings;
1017 
1018 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1019 		EM_TX_LOCK(txr);
1020 		em_start_locked(ifp, txr);
1021 		EM_TX_UNLOCK(txr);
1022 	}
1023 	/*
1024 	** If we went inactive schedule
1025 	** a task to clean up.
1026 	*/
1027 	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1028 		taskqueue_enqueue(txr->tq, &txr->tx_task);
1029 	return;
1030 }
1031 
1032 /*********************************************************************
1033  *  Ioctl entry point
1034  *
1035  *  em_ioctl is called when the user wants to configure the
1036  *  interface.
1037  *
1038  *  return 0 on success, positive on failure
1039  **********************************************************************/
1040 
1041 static int
1042 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1043 {
1044 	struct adapter	*adapter = ifp->if_softc;
1045 	struct ifreq	*ifr = (struct ifreq *)data;
1046 #if defined(INET) || defined(INET6)
1047 	struct ifaddr	*ifa = (struct ifaddr *)data;
1048 #endif
1049 	bool		avoid_reset = FALSE;
1050 	int		error = 0;
1051 
1052 	if (adapter->in_detach)
1053 		return (error);
1054 
1055 	switch (command) {
1056 	case SIOCSIFADDR:
1057 #ifdef INET
1058 		if (ifa->ifa_addr->sa_family == AF_INET)
1059 			avoid_reset = TRUE;
1060 #endif
1061 #ifdef INET6
1062 		if (ifa->ifa_addr->sa_family == AF_INET6)
1063 			avoid_reset = TRUE;
1064 #endif
1065 		/*
1066 		** Calling init results in link renegotiation,
1067 		** so we avoid doing it when possible.
1068 		*/
1069 		if (avoid_reset) {
1070 			ifp->if_flags |= IFF_UP;
1071 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1072 				em_init(adapter);
1073 #ifdef INET
1074 			if (!(ifp->if_flags & IFF_NOARP))
1075 				arp_ifinit(ifp, ifa);
1076 #endif
1077 		} else
1078 			error = ether_ioctl(ifp, command, data);
1079 		break;
1080 	case SIOCSIFMTU:
1081 	    {
1082 		int max_frame_size;
1083 
1084 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1085 
1086 		EM_CORE_LOCK(adapter);
1087 		switch (adapter->hw.mac.type) {
1088 		case e1000_82571:
1089 		case e1000_82572:
1090 		case e1000_ich9lan:
1091 		case e1000_ich10lan:
1092 		case e1000_pch2lan:
1093 		case e1000_82574:
1094 		case e1000_82583:
1095 		case e1000_80003es2lan:	/* 9K Jumbo Frame size */
1096 			max_frame_size = 9234;
1097 			break;
1098 		case e1000_pchlan:
1099 			max_frame_size = 4096;
1100 			break;
1101 			/* Adapters that do not support jumbo frames */
1102 		case e1000_ich8lan:
1103 			max_frame_size = ETHER_MAX_LEN;
1104 			break;
1105 		default:
1106 			max_frame_size = MAX_JUMBO_FRAME_SIZE;
1107 		}
1108 		if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1109 		    ETHER_CRC_LEN) {
1110 			EM_CORE_UNLOCK(adapter);
1111 			error = EINVAL;
1112 			break;
1113 		}
1114 
1115 		ifp->if_mtu = ifr->ifr_mtu;
1116 		adapter->max_frame_size =
1117 		    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1118 		em_init_locked(adapter);
1119 		EM_CORE_UNLOCK(adapter);
1120 		break;
1121 	    }
1122 	case SIOCSIFFLAGS:
1123 		IOCTL_DEBUGOUT("ioctl rcv'd:\
1124 		    SIOCSIFFLAGS (Set Interface Flags)");
1125 		EM_CORE_LOCK(adapter);
1126 		if (ifp->if_flags & IFF_UP) {
1127 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1128 				if ((ifp->if_flags ^ adapter->if_flags) &
1129 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1130 					em_disable_promisc(adapter);
1131 					em_set_promisc(adapter);
1132 				}
1133 			} else
1134 				em_init_locked(adapter);
1135 		} else
1136 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1137 				em_stop(adapter);
1138 		adapter->if_flags = ifp->if_flags;
1139 		EM_CORE_UNLOCK(adapter);
1140 		break;
1141 	case SIOCADDMULTI:
1142 	case SIOCDELMULTI:
1143 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1144 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1145 			EM_CORE_LOCK(adapter);
1146 			em_disable_intr(adapter);
1147 			em_set_multi(adapter);
1148 #ifdef DEVICE_POLLING
1149 			if (!(ifp->if_capenable & IFCAP_POLLING))
1150 #endif
1151 				em_enable_intr(adapter);
1152 			EM_CORE_UNLOCK(adapter);
1153 		}
1154 		break;
1155 	case SIOCSIFMEDIA:
1156 		/* Check SOL/IDER usage */
1157 		EM_CORE_LOCK(adapter);
1158 		if (e1000_check_reset_block(&adapter->hw)) {
1159 			EM_CORE_UNLOCK(adapter);
1160 			device_printf(adapter->dev, "Media change is"
1161 			    " blocked due to SOL/IDER session.\n");
1162 			break;
1163 		}
1164 		EM_CORE_UNLOCK(adapter);
1165 		/* falls thru */
1166 	case SIOCGIFMEDIA:
1167 		IOCTL_DEBUGOUT("ioctl rcv'd: \
1168 		    SIOCxIFMEDIA (Get/Set Interface Media)");
1169 		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1170 		break;
1171 	case SIOCSIFCAP:
1172 	    {
1173 		int mask, reinit;
1174 
1175 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1176 		reinit = 0;
1177 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1178 #ifdef DEVICE_POLLING
1179 		if (mask & IFCAP_POLLING) {
1180 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1181 				error = ether_poll_register(em_poll, ifp);
1182 				if (error)
1183 					return (error);
1184 				EM_CORE_LOCK(adapter);
1185 				em_disable_intr(adapter);
1186 				ifp->if_capenable |= IFCAP_POLLING;
1187 				EM_CORE_UNLOCK(adapter);
1188 			} else {
1189 				error = ether_poll_deregister(ifp);
1190 				/* Enable interrupt even in error case */
1191 				EM_CORE_LOCK(adapter);
1192 				em_enable_intr(adapter);
1193 				ifp->if_capenable &= ~IFCAP_POLLING;
1194 				EM_CORE_UNLOCK(adapter);
1195 			}
1196 		}
1197 #endif
1198 		if (mask & IFCAP_HWCSUM) {
1199 			ifp->if_capenable ^= IFCAP_HWCSUM;
1200 			reinit = 1;
1201 		}
1202 		if (mask & IFCAP_TSO4) {
1203 			ifp->if_capenable ^= IFCAP_TSO4;
1204 			reinit = 1;
1205 		}
1206 		if (mask & IFCAP_VLAN_HWTAGGING) {
1207 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1208 			reinit = 1;
1209 		}
1210 		if (mask & IFCAP_VLAN_HWFILTER) {
1211 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1212 			reinit = 1;
1213 		}
1214 		if (mask & IFCAP_VLAN_HWTSO) {
1215 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1216 			reinit = 1;
1217 		}
1218 		if ((mask & IFCAP_WOL) &&
1219 		    (ifp->if_capabilities & IFCAP_WOL) != 0) {
1220 			if (mask & IFCAP_WOL_MCAST)
1221 				ifp->if_capenable ^= IFCAP_WOL_MCAST;
1222 			if (mask & IFCAP_WOL_MAGIC)
1223 				ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1224 		}
1225 		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1226 			em_init(adapter);
1227 		VLAN_CAPABILITIES(ifp);
1228 		break;
1229 	    }
1230 
1231 	default:
1232 		error = ether_ioctl(ifp, command, data);
1233 		break;
1234 	}
1235 
1236 	return (error);
1237 }
1238 
1239 
1240 /*********************************************************************
1241  *  Init entry point
1242  *
1243  *  This routine is used in two ways. It is used by the stack as
1244  *  init entry point in network interface structure. It is also used
1245  *  by the driver as a hw/sw initialization routine to get to a
1246  *  consistent state.
1247  *
1248  *  return 0 on success, positive on failure
1249  **********************************************************************/
1250 
1251 static void
1252 em_init_locked(struct adapter *adapter)
1253 {
1254 	struct ifnet	*ifp = adapter->ifp;
1255 	device_t	dev = adapter->dev;
1256 
1257 	INIT_DEBUGOUT("em_init: begin");
1258 
1259 	EM_CORE_LOCK_ASSERT(adapter);
1260 
1261 	em_disable_intr(adapter);
1262 	callout_stop(&adapter->timer);
1263 
1264 	/* Get the latest mac address, User can use a LAA */
1265         bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1266               ETHER_ADDR_LEN);
1267 
1268 	/* Put the address into the Receive Address Array */
1269 	e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1270 
1271 	/*
1272 	 * With the 82571 adapter, RAR[0] may be overwritten
1273 	 * when the other port is reset, we make a duplicate
1274 	 * in RAR[14] for that eventuality, this assures
1275 	 * the interface continues to function.
1276 	 */
1277 	if (adapter->hw.mac.type == e1000_82571) {
1278 		e1000_set_laa_state_82571(&adapter->hw, TRUE);
1279 		e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1280 		    E1000_RAR_ENTRIES - 1);
1281 	}
1282 
1283 	/* Initialize the hardware */
1284 	em_reset(adapter);
1285 	em_update_link_status(adapter);
1286 
1287 	/* Setup VLAN support, basic and offload if available */
1288 	E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1289 
1290 	/* Set hardware offload abilities */
1291 	ifp->if_hwassist = 0;
1292 	if (ifp->if_capenable & IFCAP_TXCSUM)
1293 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1294 	if (ifp->if_capenable & IFCAP_TSO4)
1295 		ifp->if_hwassist |= CSUM_TSO;
1296 
1297 	/* Configure for OS presence */
1298 	em_init_manageability(adapter);
1299 
1300 	/* Prepare transmit descriptors and buffers */
1301 	em_setup_transmit_structures(adapter);
1302 	em_initialize_transmit_unit(adapter);
1303 
1304 	/* Setup Multicast table */
1305 	em_set_multi(adapter);
1306 
1307 	/*
1308 	** Figure out the desired mbuf
1309 	** pool for doing jumbos
1310 	*/
1311 	if (adapter->max_frame_size <= 2048)
1312 		adapter->rx_mbuf_sz = MCLBYTES;
1313 	else if (adapter->max_frame_size <= 4096)
1314 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
1315 	else
1316 		adapter->rx_mbuf_sz = MJUM9BYTES;
1317 
1318 	/* Prepare receive descriptors and buffers */
1319 	if (em_setup_receive_structures(adapter)) {
1320 		device_printf(dev, "Could not setup receive structures\n");
1321 		em_stop(adapter);
1322 		return;
1323 	}
1324 	em_initialize_receive_unit(adapter);
1325 
1326 	/* Use real VLAN Filter support? */
1327 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1328 		if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1329 			/* Use real VLAN Filter support */
1330 			em_setup_vlan_hw_support(adapter);
1331 		else {
1332 			u32 ctrl;
1333 			ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1334 			ctrl |= E1000_CTRL_VME;
1335 			E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1336 		}
1337 	}
1338 
1339 	/* Don't lose promiscuous settings */
1340 	em_set_promisc(adapter);
1341 
1342 	/* Set the interface as ACTIVE */
1343 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1344 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1345 
1346 	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1347 	e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1348 
1349 	/* MSI/X configuration for 82574 */
1350 	if (adapter->hw.mac.type == e1000_82574) {
1351 		int tmp;
1352 		tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1353 		tmp |= E1000_CTRL_EXT_PBA_CLR;
1354 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1355 		/* Set the IVAR - interrupt vector routing. */
1356 		E1000_WRITE_REG(&adapter->hw, E1000_IVAR, adapter->ivars);
1357 	}
1358 
1359 #ifdef DEVICE_POLLING
1360 	/*
1361 	 * Only enable interrupts if we are not polling, make sure
1362 	 * they are off otherwise.
1363 	 */
1364 	if (ifp->if_capenable & IFCAP_POLLING)
1365 		em_disable_intr(adapter);
1366 	else
1367 #endif /* DEVICE_POLLING */
1368 		em_enable_intr(adapter);
1369 
1370 	/* AMT based hardware can now take control from firmware */
1371 	if (adapter->has_manage && adapter->has_amt)
1372 		em_get_hw_control(adapter);
1373 }
1374 
1375 static void
1376 em_init(void *arg)
1377 {
1378 	struct adapter *adapter = arg;
1379 
1380 	EM_CORE_LOCK(adapter);
1381 	em_init_locked(adapter);
1382 	EM_CORE_UNLOCK(adapter);
1383 }
1384 
1385 
1386 #ifdef DEVICE_POLLING
1387 /*********************************************************************
1388  *
1389  *  Legacy polling routine: note this only works with single queue
1390  *
1391  *********************************************************************/
1392 static int
1393 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1394 {
1395 	struct adapter *adapter = ifp->if_softc;
1396 	struct tx_ring	*txr = adapter->tx_rings;
1397 	struct rx_ring	*rxr = adapter->rx_rings;
1398 	u32		reg_icr;
1399 	int		rx_done;
1400 
1401 	EM_CORE_LOCK(adapter);
1402 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1403 		EM_CORE_UNLOCK(adapter);
1404 		return (0);
1405 	}
1406 
1407 	if (cmd == POLL_AND_CHECK_STATUS) {
1408 		reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1409 		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1410 			callout_stop(&adapter->timer);
1411 			adapter->hw.mac.get_link_status = 1;
1412 			em_update_link_status(adapter);
1413 			callout_reset(&adapter->timer, hz,
1414 			    em_local_timer, adapter);
1415 		}
1416 	}
1417 	EM_CORE_UNLOCK(adapter);
1418 
1419 	em_rxeof(rxr, count, &rx_done);
1420 
1421 	EM_TX_LOCK(txr);
1422 	em_txeof(txr);
1423 #ifdef EM_MULTIQUEUE
1424 	if (!drbr_empty(ifp, txr->br))
1425 		em_mq_start_locked(ifp, txr, NULL);
1426 #else
1427 	em_start_locked(ifp, txr);
1428 #endif
1429 	EM_TX_UNLOCK(txr);
1430 
1431 	return (rx_done);
1432 }
1433 #endif /* DEVICE_POLLING */
1434 
1435 
1436 /*********************************************************************
1437  *
1438  *  Fast Legacy/MSI Combined Interrupt Service routine
1439  *
1440  *********************************************************************/
1441 static int
1442 em_irq_fast(void *arg)
1443 {
1444 	struct adapter	*adapter = arg;
1445 	struct ifnet	*ifp;
1446 	u32		reg_icr;
1447 
1448 	ifp = adapter->ifp;
1449 
1450 	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1451 
1452 	/* Hot eject?  */
1453 	if (reg_icr == 0xffffffff)
1454 		return FILTER_STRAY;
1455 
1456 	/* Definitely not our interrupt.  */
1457 	if (reg_icr == 0x0)
1458 		return FILTER_STRAY;
1459 
1460 	/*
1461 	 * Starting with the 82571 chip, bit 31 should be used to
1462 	 * determine whether the interrupt belongs to us.
1463 	 */
1464 	if (adapter->hw.mac.type >= e1000_82571 &&
1465 	    (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1466 		return FILTER_STRAY;
1467 
1468 	em_disable_intr(adapter);
1469 	taskqueue_enqueue(adapter->tq, &adapter->que_task);
1470 
1471 	/* Link status change */
1472 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1473 		adapter->hw.mac.get_link_status = 1;
1474 		taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1475 	}
1476 
1477 	if (reg_icr & E1000_ICR_RXO)
1478 		adapter->rx_overruns++;
1479 	return FILTER_HANDLED;
1480 }
1481 
1482 /* Combined RX/TX handler, used by Legacy and MSI */
1483 static void
1484 em_handle_que(void *context, int pending)
1485 {
1486 	struct adapter	*adapter = context;
1487 	struct ifnet	*ifp = adapter->ifp;
1488 	struct tx_ring	*txr = adapter->tx_rings;
1489 	struct rx_ring	*rxr = adapter->rx_rings;
1490 
1491 
1492 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1493 		bool more = em_rxeof(rxr, adapter->rx_process_limit, NULL);
1494 		EM_TX_LOCK(txr);
1495 		em_txeof(txr);
1496 #ifdef EM_MULTIQUEUE
1497 		if (!drbr_empty(ifp, txr->br))
1498 			em_mq_start_locked(ifp, txr, NULL);
1499 #else
1500 		em_start_locked(ifp, txr);
1501 #endif
1502 		EM_TX_UNLOCK(txr);
1503 		if (more || (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
1504 			taskqueue_enqueue(adapter->tq, &adapter->que_task);
1505 			return;
1506 		}
1507 	}
1508 
1509 	em_enable_intr(adapter);
1510 	return;
1511 }
1512 
1513 
1514 /*********************************************************************
1515  *
1516  *  MSIX Interrupt Service Routines
1517  *
1518  **********************************************************************/
1519 static void
1520 em_msix_tx(void *arg)
1521 {
1522 	struct tx_ring *txr = arg;
1523 	struct adapter *adapter = txr->adapter;
1524 	bool		more;
1525 
1526 	++txr->tx_irq;
1527 	EM_TX_LOCK(txr);
1528 	more = em_txeof(txr);
1529 	EM_TX_UNLOCK(txr);
1530 	if (more)
1531 		taskqueue_enqueue(txr->tq, &txr->tx_task);
1532 	else
1533 		/* Reenable this interrupt */
1534 		E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims);
1535 	return;
1536 }
1537 
1538 /*********************************************************************
1539  *
1540  *  MSIX RX Interrupt Service routine
1541  *
1542  **********************************************************************/
1543 
1544 static void
1545 em_msix_rx(void *arg)
1546 {
1547 	struct rx_ring	*rxr = arg;
1548 	struct adapter	*adapter = rxr->adapter;
1549 	bool		more;
1550 
1551 	++rxr->rx_irq;
1552 	more = em_rxeof(rxr, adapter->rx_process_limit, NULL);
1553 	if (more)
1554 		taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1555 	else
1556 		/* Reenable this interrupt */
1557 		E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims);
1558 	return;
1559 }
1560 
1561 /*********************************************************************
1562  *
1563  *  MSIX Link Fast Interrupt Service routine
1564  *
1565  **********************************************************************/
1566 static void
1567 em_msix_link(void *arg)
1568 {
1569 	struct adapter	*adapter = arg;
1570 	u32		reg_icr;
1571 
1572 	++adapter->link_irq;
1573 	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1574 
1575 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1576 		adapter->hw.mac.get_link_status = 1;
1577 		em_handle_link(adapter, 0);
1578 	} else
1579 		E1000_WRITE_REG(&adapter->hw, E1000_IMS,
1580 		    EM_MSIX_LINK | E1000_IMS_LSC);
1581 	return;
1582 }
1583 
1584 static void
1585 em_handle_rx(void *context, int pending)
1586 {
1587 	struct rx_ring	*rxr = context;
1588 	struct adapter	*adapter = rxr->adapter;
1589         bool            more;
1590 
1591 	more = em_rxeof(rxr, adapter->rx_process_limit, NULL);
1592 	if (more)
1593 		taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1594 	else
1595 		/* Reenable this interrupt */
1596 		E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims);
1597 }
1598 
1599 static void
1600 em_handle_tx(void *context, int pending)
1601 {
1602 	struct tx_ring	*txr = context;
1603 	struct adapter	*adapter = txr->adapter;
1604 	struct ifnet	*ifp = adapter->ifp;
1605 
1606 	EM_TX_LOCK(txr);
1607 	em_txeof(txr);
1608 #ifdef EM_MULTIQUEUE
1609 	if (!drbr_empty(ifp, txr->br))
1610 		em_mq_start_locked(ifp, txr, NULL);
1611 #else
1612 	em_start_locked(ifp, txr);
1613 #endif
1614 	E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims);
1615 	EM_TX_UNLOCK(txr);
1616 }
1617 
1618 static void
1619 em_handle_link(void *context, int pending)
1620 {
1621 	struct adapter	*adapter = context;
1622 	struct ifnet *ifp = adapter->ifp;
1623 
1624 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1625 		return;
1626 
1627 	EM_CORE_LOCK(adapter);
1628 	callout_stop(&adapter->timer);
1629 	em_update_link_status(adapter);
1630 	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1631 	E1000_WRITE_REG(&adapter->hw, E1000_IMS,
1632 	    EM_MSIX_LINK | E1000_IMS_LSC);
1633 	EM_CORE_UNLOCK(adapter);
1634 }
1635 
1636 
1637 /*********************************************************************
1638  *
1639  *  Media Ioctl callback
1640  *
1641  *  This routine is called whenever the user queries the status of
1642  *  the interface using ifconfig.
1643  *
1644  **********************************************************************/
1645 static void
1646 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1647 {
1648 	struct adapter *adapter = ifp->if_softc;
1649 	u_char fiber_type = IFM_1000_SX;
1650 
1651 	INIT_DEBUGOUT("em_media_status: begin");
1652 
1653 	EM_CORE_LOCK(adapter);
1654 	em_update_link_status(adapter);
1655 
1656 	ifmr->ifm_status = IFM_AVALID;
1657 	ifmr->ifm_active = IFM_ETHER;
1658 
1659 	if (!adapter->link_active) {
1660 		EM_CORE_UNLOCK(adapter);
1661 		return;
1662 	}
1663 
1664 	ifmr->ifm_status |= IFM_ACTIVE;
1665 
1666 	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1667 	    (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1668 		ifmr->ifm_active |= fiber_type | IFM_FDX;
1669 	} else {
1670 		switch (adapter->link_speed) {
1671 		case 10:
1672 			ifmr->ifm_active |= IFM_10_T;
1673 			break;
1674 		case 100:
1675 			ifmr->ifm_active |= IFM_100_TX;
1676 			break;
1677 		case 1000:
1678 			ifmr->ifm_active |= IFM_1000_T;
1679 			break;
1680 		}
1681 		if (adapter->link_duplex == FULL_DUPLEX)
1682 			ifmr->ifm_active |= IFM_FDX;
1683 		else
1684 			ifmr->ifm_active |= IFM_HDX;
1685 	}
1686 	EM_CORE_UNLOCK(adapter);
1687 }
1688 
1689 /*********************************************************************
1690  *
1691  *  Media Ioctl callback
1692  *
1693  *  This routine is called when the user changes speed/duplex using
1694  *  media/mediopt option with ifconfig.
1695  *
1696  **********************************************************************/
1697 static int
1698 em_media_change(struct ifnet *ifp)
1699 {
1700 	struct adapter *adapter = ifp->if_softc;
1701 	struct ifmedia  *ifm = &adapter->media;
1702 
1703 	INIT_DEBUGOUT("em_media_change: begin");
1704 
1705 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1706 		return (EINVAL);
1707 
1708 	EM_CORE_LOCK(adapter);
1709 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1710 	case IFM_AUTO:
1711 		adapter->hw.mac.autoneg = DO_AUTO_NEG;
1712 		adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1713 		break;
1714 	case IFM_1000_LX:
1715 	case IFM_1000_SX:
1716 	case IFM_1000_T:
1717 		adapter->hw.mac.autoneg = DO_AUTO_NEG;
1718 		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1719 		break;
1720 	case IFM_100_TX:
1721 		adapter->hw.mac.autoneg = FALSE;
1722 		adapter->hw.phy.autoneg_advertised = 0;
1723 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1724 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1725 		else
1726 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1727 		break;
1728 	case IFM_10_T:
1729 		adapter->hw.mac.autoneg = FALSE;
1730 		adapter->hw.phy.autoneg_advertised = 0;
1731 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1732 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1733 		else
1734 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1735 		break;
1736 	default:
1737 		device_printf(adapter->dev, "Unsupported media type\n");
1738 	}
1739 
1740 	em_init_locked(adapter);
1741 	EM_CORE_UNLOCK(adapter);
1742 
1743 	return (0);
1744 }
1745 
1746 /*********************************************************************
1747  *
1748  *  This routine maps the mbufs to tx descriptors.
1749  *
1750  *  return 0 on success, positive on failure
1751  **********************************************************************/
1752 
1753 static int
1754 em_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1755 {
1756 	struct adapter		*adapter = txr->adapter;
1757 	bus_dma_segment_t	segs[EM_MAX_SCATTER];
1758 	bus_dmamap_t		map;
1759 	struct em_buffer	*tx_buffer, *tx_buffer_mapped;
1760 	struct e1000_tx_desc	*ctxd = NULL;
1761 	struct mbuf		*m_head;
1762 	struct ether_header	*eh;
1763 	struct ip		*ip = NULL;
1764 	struct tcphdr		*tp = NULL;
1765 	u32			txd_upper, txd_lower, txd_used, txd_saved;
1766 	int			ip_off, poff;
1767 	int			nsegs, i, j, first, last = 0;
1768 	int			error, do_tso, tso_desc = 0, remap = 1;
1769 
1770 retry:
1771 	m_head = *m_headp;
1772 	txd_upper = txd_lower = txd_used = txd_saved = 0;
1773 	do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
1774 	ip_off = poff = 0;
1775 
1776 	/*
1777 	 * Intel recommends entire IP/TCP header length reside in a single
1778 	 * buffer. If multiple descriptors are used to describe the IP and
1779 	 * TCP header, each descriptor should describe one or more
1780 	 * complete headers; descriptors referencing only parts of headers
1781 	 * are not supported. If all layer headers are not coalesced into
1782 	 * a single buffer, each buffer should not cross a 4KB boundary,
1783 	 * or be larger than the maximum read request size.
1784 	 * Controller also requires modifing IP/TCP header to make TSO work
1785 	 * so we firstly get a writable mbuf chain then coalesce ethernet/
1786 	 * IP/TCP header into a single buffer to meet the requirement of
1787 	 * controller. This also simplifies IP/TCP/UDP checksum offloading
1788 	 * which also has similiar restrictions.
1789 	 */
1790 	if (do_tso || m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
1791 		if (do_tso || (m_head->m_next != NULL &&
1792 		    m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)) {
1793 			if (M_WRITABLE(*m_headp) == 0) {
1794 				m_head = m_dup(*m_headp, M_DONTWAIT);
1795 				m_freem(*m_headp);
1796 				if (m_head == NULL) {
1797 					*m_headp = NULL;
1798 					return (ENOBUFS);
1799 				}
1800 				*m_headp = m_head;
1801 			}
1802 		}
1803 		/*
1804 		 * XXX
1805 		 * Assume IPv4, we don't have TSO/checksum offload support
1806 		 * for IPv6 yet.
1807 		 */
1808 		ip_off = sizeof(struct ether_header);
1809 		m_head = m_pullup(m_head, ip_off);
1810 		if (m_head == NULL) {
1811 			*m_headp = NULL;
1812 			return (ENOBUFS);
1813 		}
1814 		eh = mtod(m_head, struct ether_header *);
1815 		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1816 			ip_off = sizeof(struct ether_vlan_header);
1817 			m_head = m_pullup(m_head, ip_off);
1818 			if (m_head == NULL) {
1819 				*m_headp = NULL;
1820 				return (ENOBUFS);
1821 			}
1822 		}
1823 		m_head = m_pullup(m_head, ip_off + sizeof(struct ip));
1824 		if (m_head == NULL) {
1825 			*m_headp = NULL;
1826 			return (ENOBUFS);
1827 		}
1828 		ip = (struct ip *)(mtod(m_head, char *) + ip_off);
1829 		poff = ip_off + (ip->ip_hl << 2);
1830 		if (do_tso) {
1831 			m_head = m_pullup(m_head, poff + sizeof(struct tcphdr));
1832 			if (m_head == NULL) {
1833 				*m_headp = NULL;
1834 				return (ENOBUFS);
1835 			}
1836 			tp = (struct tcphdr *)(mtod(m_head, char *) + poff);
1837 			/*
1838 			 * TSO workaround:
1839 			 *   pull 4 more bytes of data into it.
1840 			 */
1841 			m_head = m_pullup(m_head, poff + (tp->th_off << 2) + 4);
1842 			if (m_head == NULL) {
1843 				*m_headp = NULL;
1844 				return (ENOBUFS);
1845 			}
1846 			ip = (struct ip *)(mtod(m_head, char *) + ip_off);
1847 			ip->ip_len = 0;
1848 			ip->ip_sum = 0;
1849 			/*
1850 			 * The pseudo TCP checksum does not include TCP payload
1851 			 * length so driver should recompute the checksum here
1852 			 * what hardware expect to see. This is adherence of
1853 			 * Microsoft's Large Send specification.
1854 			 */
1855 			tp = (struct tcphdr *)(mtod(m_head, char *) + poff);
1856 			tp->th_sum = in_pseudo(ip->ip_src.s_addr,
1857 			    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1858 		} else if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
1859 			m_head = m_pullup(m_head, poff + sizeof(struct tcphdr));
1860 			if (m_head == NULL) {
1861 				*m_headp = NULL;
1862 				return (ENOBUFS);
1863 			}
1864 			tp = (struct tcphdr *)(mtod(m_head, char *) + poff);
1865 			m_head = m_pullup(m_head, poff + (tp->th_off << 2));
1866 			if (m_head == NULL) {
1867 				*m_headp = NULL;
1868 				return (ENOBUFS);
1869 			}
1870 			ip = (struct ip *)(mtod(m_head, char *) + ip_off);
1871 			tp = (struct tcphdr *)(mtod(m_head, char *) + poff);
1872 		} else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
1873 			m_head = m_pullup(m_head, poff + sizeof(struct udphdr));
1874 			if (m_head == NULL) {
1875 				*m_headp = NULL;
1876 				return (ENOBUFS);
1877 			}
1878 			ip = (struct ip *)(mtod(m_head, char *) + ip_off);
1879 		}
1880 		*m_headp = m_head;
1881 	}
1882 
1883 	/*
1884 	 * Map the packet for DMA
1885 	 *
1886 	 * Capture the first descriptor index,
1887 	 * this descriptor will have the index
1888 	 * of the EOP which is the only one that
1889 	 * now gets a DONE bit writeback.
1890 	 */
1891 	first = txr->next_avail_desc;
1892 	tx_buffer = &txr->tx_buffers[first];
1893 	tx_buffer_mapped = tx_buffer;
1894 	map = tx_buffer->map;
1895 
1896 	error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1897 	    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1898 
1899 	/*
1900 	 * There are two types of errors we can (try) to handle:
1901 	 * - EFBIG means the mbuf chain was too long and bus_dma ran
1902 	 *   out of segments.  Defragment the mbuf chain and try again.
1903 	 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1904 	 *   at this point in time.  Defer sending and try again later.
1905 	 * All other errors, in particular EINVAL, are fatal and prevent the
1906 	 * mbuf chain from ever going through.  Drop it and report error.
1907 	 */
1908 	if (error == EFBIG && remap) {
1909 		struct mbuf *m;
1910 
1911 		m = m_defrag(*m_headp, M_DONTWAIT);
1912 		if (m == NULL) {
1913 			adapter->mbuf_alloc_failed++;
1914 			m_freem(*m_headp);
1915 			*m_headp = NULL;
1916 			return (ENOBUFS);
1917 		}
1918 		*m_headp = m;
1919 
1920 		/* Try it again, but only once */
1921 		remap = 0;
1922 		goto retry;
1923 	} else if (error == ENOMEM) {
1924 		adapter->no_tx_dma_setup++;
1925 		return (error);
1926 	} else if (error != 0) {
1927 		adapter->no_tx_dma_setup++;
1928 		m_freem(*m_headp);
1929 		*m_headp = NULL;
1930 		return (error);
1931 	}
1932 
1933 	/*
1934 	 * TSO Hardware workaround, if this packet is not
1935 	 * TSO, and is only a single descriptor long, and
1936 	 * it follows a TSO burst, then we need to add a
1937 	 * sentinel descriptor to prevent premature writeback.
1938 	 */
1939 	if ((do_tso == 0) && (txr->tx_tso == TRUE)) {
1940 		if (nsegs == 1)
1941 			tso_desc = TRUE;
1942 		txr->tx_tso = FALSE;
1943 	}
1944 
1945         if (nsegs > (txr->tx_avail - 2)) {
1946                 txr->no_desc_avail++;
1947 		bus_dmamap_unload(txr->txtag, map);
1948 		return (ENOBUFS);
1949         }
1950 	m_head = *m_headp;
1951 
1952 	/* Do hardware assists */
1953 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1954 		em_tso_setup(txr, m_head, ip_off, ip, tp,
1955 		    &txd_upper, &txd_lower);
1956 		/* we need to make a final sentinel transmit desc */
1957 		tso_desc = TRUE;
1958 	} else if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1959 		em_transmit_checksum_setup(txr, m_head,
1960 		    ip_off, ip, &txd_upper, &txd_lower);
1961 
1962 	if (m_head->m_flags & M_VLANTAG) {
1963 		/* Set the vlan id. */
1964 		txd_upper |=
1965 		    (htole16(m_head->m_pkthdr.ether_vtag) << 16);
1966                 /* Tell hardware to add tag */
1967                 txd_lower |= htole32(E1000_TXD_CMD_VLE);
1968         }
1969 
1970 	i = txr->next_avail_desc;
1971 
1972 	/* Set up our transmit descriptors */
1973 	for (j = 0; j < nsegs; j++) {
1974 		bus_size_t seg_len;
1975 		bus_addr_t seg_addr;
1976 
1977 		tx_buffer = &txr->tx_buffers[i];
1978 		ctxd = &txr->tx_base[i];
1979 		seg_addr = segs[j].ds_addr;
1980 		seg_len  = segs[j].ds_len;
1981 		/*
1982 		** TSO Workaround:
1983 		** If this is the last descriptor, we want to
1984 		** split it so we have a small final sentinel
1985 		*/
1986 		if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) {
1987 			seg_len -= 4;
1988 			ctxd->buffer_addr = htole64(seg_addr);
1989 			ctxd->lower.data = htole32(
1990 			adapter->txd_cmd | txd_lower | seg_len);
1991 			ctxd->upper.data =
1992 			    htole32(txd_upper);
1993 			if (++i == adapter->num_tx_desc)
1994 				i = 0;
1995 			/* Now make the sentinel */
1996 			++txd_used; /* using an extra txd */
1997 			ctxd = &txr->tx_base[i];
1998 			tx_buffer = &txr->tx_buffers[i];
1999 			ctxd->buffer_addr =
2000 			    htole64(seg_addr + seg_len);
2001 			ctxd->lower.data = htole32(
2002 			adapter->txd_cmd | txd_lower | 4);
2003 			ctxd->upper.data =
2004 			    htole32(txd_upper);
2005 			last = i;
2006 			if (++i == adapter->num_tx_desc)
2007 				i = 0;
2008 		} else {
2009 			ctxd->buffer_addr = htole64(seg_addr);
2010 			ctxd->lower.data = htole32(
2011 			adapter->txd_cmd | txd_lower | seg_len);
2012 			ctxd->upper.data =
2013 			    htole32(txd_upper);
2014 			last = i;
2015 			if (++i == adapter->num_tx_desc)
2016 				i = 0;
2017 		}
2018 		tx_buffer->m_head = NULL;
2019 		tx_buffer->next_eop = -1;
2020 	}
2021 
2022 	txr->next_avail_desc = i;
2023 	txr->tx_avail -= nsegs;
2024 	if (tso_desc) /* TSO used an extra for sentinel */
2025 		txr->tx_avail -= txd_used;
2026 
2027         tx_buffer->m_head = m_head;
2028 	/*
2029 	** Here we swap the map so the last descriptor,
2030 	** which gets the completion interrupt has the
2031 	** real map, and the first descriptor gets the
2032 	** unused map from this descriptor.
2033 	*/
2034 	tx_buffer_mapped->map = tx_buffer->map;
2035 	tx_buffer->map = map;
2036         bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
2037 
2038         /*
2039          * Last Descriptor of Packet
2040 	 * needs End Of Packet (EOP)
2041 	 * and Report Status (RS)
2042          */
2043         ctxd->lower.data |=
2044 	    htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
2045 	/*
2046 	 * Keep track in the first buffer which
2047 	 * descriptor will be written back
2048 	 */
2049 	tx_buffer = &txr->tx_buffers[first];
2050 	tx_buffer->next_eop = last;
2051 	/* Update the watchdog time early and often */
2052 	txr->watchdog_time = ticks;
2053 
2054 	/*
2055 	 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
2056 	 * that this frame is available to transmit.
2057 	 */
2058 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2059 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2060 	E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), i);
2061 
2062 	return (0);
2063 }
2064 
2065 static void
2066 em_set_promisc(struct adapter *adapter)
2067 {
2068 	struct ifnet	*ifp = adapter->ifp;
2069 	u32		reg_rctl;
2070 
2071 	reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2072 
2073 	if (ifp->if_flags & IFF_PROMISC) {
2074 		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2075 		/* Turn this on if you want to see bad packets */
2076 		if (em_debug_sbp)
2077 			reg_rctl |= E1000_RCTL_SBP;
2078 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2079 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2080 		reg_rctl |= E1000_RCTL_MPE;
2081 		reg_rctl &= ~E1000_RCTL_UPE;
2082 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2083 	}
2084 }
2085 
2086 static void
2087 em_disable_promisc(struct adapter *adapter)
2088 {
2089 	u32	reg_rctl;
2090 
2091 	reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2092 
2093 	reg_rctl &=  (~E1000_RCTL_UPE);
2094 	reg_rctl &=  (~E1000_RCTL_MPE);
2095 	reg_rctl &=  (~E1000_RCTL_SBP);
2096 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2097 }
2098 
2099 
2100 /*********************************************************************
2101  *  Multicast Update
2102  *
2103  *  This routine is called whenever multicast address list is updated.
2104  *
2105  **********************************************************************/
2106 
2107 static void
2108 em_set_multi(struct adapter *adapter)
2109 {
2110 	struct ifnet	*ifp = adapter->ifp;
2111 	struct ifmultiaddr *ifma;
2112 	u32 reg_rctl = 0;
2113 	u8  *mta; /* Multicast array memory */
2114 	int mcnt = 0;
2115 
2116 	IOCTL_DEBUGOUT("em_set_multi: begin");
2117 
2118 	mta = adapter->mta;
2119 	bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
2120 
2121 	if (adapter->hw.mac.type == e1000_82542 &&
2122 	    adapter->hw.revision_id == E1000_REVISION_2) {
2123 		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2124 		if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2125 			e1000_pci_clear_mwi(&adapter->hw);
2126 		reg_rctl |= E1000_RCTL_RST;
2127 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2128 		msec_delay(5);
2129 	}
2130 
2131 #if __FreeBSD_version < 800000
2132 	IF_ADDR_LOCK(ifp);
2133 #else
2134 	if_maddr_rlock(ifp);
2135 #endif
2136 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2137 		if (ifma->ifma_addr->sa_family != AF_LINK)
2138 			continue;
2139 
2140 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2141 			break;
2142 
2143 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2144 		    &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
2145 		mcnt++;
2146 	}
2147 #if __FreeBSD_version < 800000
2148 	IF_ADDR_UNLOCK(ifp);
2149 #else
2150 	if_maddr_runlock(ifp);
2151 #endif
2152 	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2153 		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2154 		reg_rctl |= E1000_RCTL_MPE;
2155 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2156 	} else
2157 		e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
2158 
2159 	if (adapter->hw.mac.type == e1000_82542 &&
2160 	    adapter->hw.revision_id == E1000_REVISION_2) {
2161 		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2162 		reg_rctl &= ~E1000_RCTL_RST;
2163 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2164 		msec_delay(5);
2165 		if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2166 			e1000_pci_set_mwi(&adapter->hw);
2167 	}
2168 }
2169 
2170 
2171 /*********************************************************************
2172  *  Timer routine
2173  *
2174  *  This routine checks for link status and updates statistics.
2175  *
2176  **********************************************************************/
2177 
2178 static void
2179 em_local_timer(void *arg)
2180 {
2181 	struct adapter	*adapter = arg;
2182 	struct ifnet	*ifp = adapter->ifp;
2183 	struct tx_ring	*txr = adapter->tx_rings;
2184 	struct rx_ring	*rxr = adapter->rx_rings;
2185 	u32		trigger;
2186 
2187 	EM_CORE_LOCK_ASSERT(adapter);
2188 
2189 	em_update_link_status(adapter);
2190 	em_update_stats_counters(adapter);
2191 
2192 	/* Reset LAA into RAR[0] on 82571 */
2193 	if ((adapter->hw.mac.type == e1000_82571) &&
2194 	    e1000_get_laa_state_82571(&adapter->hw))
2195 		e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2196 
2197 	/* Mask to use in the irq trigger */
2198 	if (adapter->msix_mem)
2199 		trigger = rxr->ims; /* RX for 82574 */
2200 	else
2201 		trigger = E1000_ICS_RXDMT0;
2202 
2203 	/*
2204 	** Check on the state of the TX queue(s), this
2205 	** can be done without the lock because its RO
2206 	** and the HUNG state will be static if set.
2207 	*/
2208 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
2209 		if ((txr->queue_status == EM_QUEUE_HUNG) &&
2210 		    (adapter->pause_frames == 0))
2211 			goto hung;
2212 		/* Schedule a TX tasklet if needed */
2213 		if (txr->tx_avail <= EM_MAX_SCATTER)
2214 			taskqueue_enqueue(txr->tq, &txr->tx_task);
2215 	}
2216 
2217 	adapter->pause_frames = 0;
2218 	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
2219 #ifndef DEVICE_POLLING
2220 	/* Trigger an RX interrupt to guarantee mbuf refresh */
2221 	E1000_WRITE_REG(&adapter->hw, E1000_ICS, trigger);
2222 #endif
2223 	return;
2224 hung:
2225 	/* Looks like we're hung */
2226 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2227 	device_printf(adapter->dev,
2228 	    "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
2229 	    E1000_READ_REG(&adapter->hw, E1000_TDH(txr->me)),
2230 	    E1000_READ_REG(&adapter->hw, E1000_TDT(txr->me)));
2231 	device_printf(adapter->dev,"TX(%d) desc avail = %d,"
2232 	    "Next TX to Clean = %d\n",
2233 	    txr->me, txr->tx_avail, txr->next_to_clean);
2234 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2235 	adapter->watchdog_events++;
2236 	adapter->pause_frames = 0;
2237 	em_init_locked(adapter);
2238 }
2239 
2240 
2241 static void
2242 em_update_link_status(struct adapter *adapter)
2243 {
2244 	struct e1000_hw *hw = &adapter->hw;
2245 	struct ifnet *ifp = adapter->ifp;
2246 	device_t dev = adapter->dev;
2247 	struct tx_ring *txr = adapter->tx_rings;
2248 	u32 link_check = 0;
2249 
2250 	/* Get the cached link value or read phy for real */
2251 	switch (hw->phy.media_type) {
2252 	case e1000_media_type_copper:
2253 		if (hw->mac.get_link_status) {
2254 			/* Do the work to read phy */
2255 			e1000_check_for_link(hw);
2256 			link_check = !hw->mac.get_link_status;
2257 			if (link_check) /* ESB2 fix */
2258 				e1000_cfg_on_link_up(hw);
2259 		} else
2260 			link_check = TRUE;
2261 		break;
2262 	case e1000_media_type_fiber:
2263 		e1000_check_for_link(hw);
2264 		link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2265                                  E1000_STATUS_LU);
2266 		break;
2267 	case e1000_media_type_internal_serdes:
2268 		e1000_check_for_link(hw);
2269 		link_check = adapter->hw.mac.serdes_has_link;
2270 		break;
2271 	default:
2272 	case e1000_media_type_unknown:
2273 		break;
2274 	}
2275 
2276 	/* Now check for a transition */
2277 	if (link_check && (adapter->link_active == 0)) {
2278 		e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2279 		    &adapter->link_duplex);
2280 		/* Check if we must disable SPEED_MODE bit on PCI-E */
2281 		if ((adapter->link_speed != SPEED_1000) &&
2282 		    ((hw->mac.type == e1000_82571) ||
2283 		    (hw->mac.type == e1000_82572))) {
2284 			int tarc0;
2285 			tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
2286 			tarc0 &= ~SPEED_MODE_BIT;
2287 			E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
2288 		}
2289 		if (bootverbose)
2290 			device_printf(dev, "Link is up %d Mbps %s\n",
2291 			    adapter->link_speed,
2292 			    ((adapter->link_duplex == FULL_DUPLEX) ?
2293 			    "Full Duplex" : "Half Duplex"));
2294 		adapter->link_active = 1;
2295 		adapter->smartspeed = 0;
2296 		ifp->if_baudrate = adapter->link_speed * 1000000;
2297 		if_link_state_change(ifp, LINK_STATE_UP);
2298 	} else if (!link_check && (adapter->link_active == 1)) {
2299 		ifp->if_baudrate = adapter->link_speed = 0;
2300 		adapter->link_duplex = 0;
2301 		if (bootverbose)
2302 			device_printf(dev, "Link is Down\n");
2303 		adapter->link_active = 0;
2304 		/* Link down, disable watchdog */
2305 		for (int i = 0; i < adapter->num_queues; i++, txr++)
2306 			txr->queue_status = EM_QUEUE_IDLE;
2307 		if_link_state_change(ifp, LINK_STATE_DOWN);
2308 	}
2309 }
2310 
2311 /*********************************************************************
2312  *
2313  *  This routine disables all traffic on the adapter by issuing a
2314  *  global reset on the MAC and deallocates TX/RX buffers.
2315  *
2316  *  This routine should always be called with BOTH the CORE
2317  *  and TX locks.
2318  **********************************************************************/
2319 
2320 static void
2321 em_stop(void *arg)
2322 {
2323 	struct adapter	*adapter = arg;
2324 	struct ifnet	*ifp = adapter->ifp;
2325 	struct tx_ring	*txr = adapter->tx_rings;
2326 
2327 	EM_CORE_LOCK_ASSERT(adapter);
2328 
2329 	INIT_DEBUGOUT("em_stop: begin");
2330 
2331 	em_disable_intr(adapter);
2332 	callout_stop(&adapter->timer);
2333 
2334 	/* Tell the stack that the interface is no longer active */
2335 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2336 	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2337 
2338         /* Unarm watchdog timer. */
2339 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
2340 		EM_TX_LOCK(txr);
2341 		txr->queue_status = EM_QUEUE_IDLE;
2342 		EM_TX_UNLOCK(txr);
2343 	}
2344 
2345 	e1000_reset_hw(&adapter->hw);
2346 	E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2347 
2348 	e1000_led_off(&adapter->hw);
2349 	e1000_cleanup_led(&adapter->hw);
2350 }
2351 
2352 
2353 /*********************************************************************
2354  *
2355  *  Determine hardware revision.
2356  *
2357  **********************************************************************/
2358 static void
2359 em_identify_hardware(struct adapter *adapter)
2360 {
2361 	device_t dev = adapter->dev;
2362 
2363 	/* Make sure our PCI config space has the necessary stuff set */
2364 	adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2365 	if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2366 	    (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2367 		device_printf(dev, "Memory Access and/or Bus Master bits "
2368 		    "were not set!\n");
2369 		adapter->hw.bus.pci_cmd_word |=
2370 		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2371 		pci_write_config(dev, PCIR_COMMAND,
2372 		    adapter->hw.bus.pci_cmd_word, 2);
2373 	}
2374 
2375 	/* Save off the information about this board */
2376 	adapter->hw.vendor_id = pci_get_vendor(dev);
2377 	adapter->hw.device_id = pci_get_device(dev);
2378 	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2379 	adapter->hw.subsystem_vendor_id =
2380 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
2381 	adapter->hw.subsystem_device_id =
2382 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
2383 
2384 	/* Do Shared Code Init and Setup */
2385 	if (e1000_set_mac_type(&adapter->hw)) {
2386 		device_printf(dev, "Setup init failure\n");
2387 		return;
2388 	}
2389 }
2390 
2391 static int
2392 em_allocate_pci_resources(struct adapter *adapter)
2393 {
2394 	device_t	dev = adapter->dev;
2395 	int		rid;
2396 
2397 	rid = PCIR_BAR(0);
2398 	adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2399 	    &rid, RF_ACTIVE);
2400 	if (adapter->memory == NULL) {
2401 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2402 		return (ENXIO);
2403 	}
2404 	adapter->osdep.mem_bus_space_tag =
2405 	    rman_get_bustag(adapter->memory);
2406 	adapter->osdep.mem_bus_space_handle =
2407 	    rman_get_bushandle(adapter->memory);
2408 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2409 
2410 	/* Default to a single queue */
2411 	adapter->num_queues = 1;
2412 
2413 	/*
2414 	 * Setup MSI/X or MSI if PCI Express
2415 	 */
2416 	adapter->msix = em_setup_msix(adapter);
2417 
2418 	adapter->hw.back = &adapter->osdep;
2419 
2420 	return (0);
2421 }
2422 
2423 /*********************************************************************
2424  *
2425  *  Setup the Legacy or MSI Interrupt handler
2426  *
2427  **********************************************************************/
2428 int
2429 em_allocate_legacy(struct adapter *adapter)
2430 {
2431 	device_t dev = adapter->dev;
2432 	struct tx_ring	*txr = adapter->tx_rings;
2433 	int error, rid = 0;
2434 
2435 	/* Manually turn off all interrupts */
2436 	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2437 
2438 	if (adapter->msix == 1) /* using MSI */
2439 		rid = 1;
2440 	/* We allocate a single interrupt resource */
2441 	adapter->res = bus_alloc_resource_any(dev,
2442 	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2443 	if (adapter->res == NULL) {
2444 		device_printf(dev, "Unable to allocate bus resource: "
2445 		    "interrupt\n");
2446 		return (ENXIO);
2447 	}
2448 
2449 	/*
2450 	 * Allocate a fast interrupt and the associated
2451 	 * deferred processing contexts.
2452 	 */
2453 	TASK_INIT(&adapter->que_task, 0, em_handle_que, adapter);
2454 	adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2455 	    taskqueue_thread_enqueue, &adapter->tq);
2456 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s que",
2457 	    device_get_nameunit(adapter->dev));
2458 	/* Use a TX only tasklet for local timer */
2459 	TASK_INIT(&txr->tx_task, 0, em_handle_tx, txr);
2460 	txr->tq = taskqueue_create_fast("em_txq", M_NOWAIT,
2461 	    taskqueue_thread_enqueue, &txr->tq);
2462 	taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
2463 	    device_get_nameunit(adapter->dev));
2464 	TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2465 	if ((error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET,
2466 	    em_irq_fast, NULL, adapter, &adapter->tag)) != 0) {
2467 		device_printf(dev, "Failed to register fast interrupt "
2468 			    "handler: %d\n", error);
2469 		taskqueue_free(adapter->tq);
2470 		adapter->tq = NULL;
2471 		return (error);
2472 	}
2473 
2474 	return (0);
2475 }
2476 
2477 /*********************************************************************
2478  *
2479  *  Setup the MSIX Interrupt handlers
2480  *   This is not really Multiqueue, rather
2481  *   its just seperate interrupt vectors
2482  *   for TX, RX, and Link.
2483  *
2484  **********************************************************************/
2485 int
2486 em_allocate_msix(struct adapter *adapter)
2487 {
2488 	device_t	dev = adapter->dev;
2489 	struct		tx_ring *txr = adapter->tx_rings;
2490 	struct		rx_ring *rxr = adapter->rx_rings;
2491 	int		error, rid, vector = 0;
2492 
2493 
2494 	/* Make sure all interrupts are disabled */
2495 	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2496 
2497 	/* First set up ring resources */
2498 	for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++) {
2499 
2500 		/* RX ring */
2501 		rid = vector + 1;
2502 
2503 		rxr->res = bus_alloc_resource_any(dev,
2504 		    SYS_RES_IRQ, &rid, RF_ACTIVE);
2505 		if (rxr->res == NULL) {
2506 			device_printf(dev,
2507 			    "Unable to allocate bus resource: "
2508 			    "RX MSIX Interrupt %d\n", i);
2509 			return (ENXIO);
2510 		}
2511 		if ((error = bus_setup_intr(dev, rxr->res,
2512 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx,
2513 		    rxr, &rxr->tag)) != 0) {
2514 			device_printf(dev, "Failed to register RX handler");
2515 			return (error);
2516 		}
2517 #if __FreeBSD_version >= 800504
2518 		bus_describe_intr(dev, rxr->res, rxr->tag, "rx %d", i);
2519 #endif
2520 		rxr->msix = vector++; /* NOTE increment vector for TX */
2521 		TASK_INIT(&rxr->rx_task, 0, em_handle_rx, rxr);
2522 		rxr->tq = taskqueue_create_fast("em_rxq", M_NOWAIT,
2523 		    taskqueue_thread_enqueue, &rxr->tq);
2524 		taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
2525 		    device_get_nameunit(adapter->dev));
2526 		/*
2527 		** Set the bit to enable interrupt
2528 		** in E1000_IMS -- bits 20 and 21
2529 		** are for RX0 and RX1, note this has
2530 		** NOTHING to do with the MSIX vector
2531 		*/
2532 		rxr->ims = 1 << (20 + i);
2533 		adapter->ivars |= (8 | rxr->msix) << (i * 4);
2534 
2535 		/* TX ring */
2536 		rid = vector + 1;
2537 		txr->res = bus_alloc_resource_any(dev,
2538 		    SYS_RES_IRQ, &rid, RF_ACTIVE);
2539 		if (txr->res == NULL) {
2540 			device_printf(dev,
2541 			    "Unable to allocate bus resource: "
2542 			    "TX MSIX Interrupt %d\n", i);
2543 			return (ENXIO);
2544 		}
2545 		if ((error = bus_setup_intr(dev, txr->res,
2546 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx,
2547 		    txr, &txr->tag)) != 0) {
2548 			device_printf(dev, "Failed to register TX handler");
2549 			return (error);
2550 		}
2551 #if __FreeBSD_version >= 800504
2552 		bus_describe_intr(dev, txr->res, txr->tag, "tx %d", i);
2553 #endif
2554 		txr->msix = vector++; /* Increment vector for next pass */
2555 		TASK_INIT(&txr->tx_task, 0, em_handle_tx, txr);
2556 		txr->tq = taskqueue_create_fast("em_txq", M_NOWAIT,
2557 		    taskqueue_thread_enqueue, &txr->tq);
2558 		taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
2559 		    device_get_nameunit(adapter->dev));
2560 		/*
2561 		** Set the bit to enable interrupt
2562 		** in E1000_IMS -- bits 22 and 23
2563 		** are for TX0 and TX1, note this has
2564 		** NOTHING to do with the MSIX vector
2565 		*/
2566 		txr->ims = 1 << (22 + i);
2567 		adapter->ivars |= (8 | txr->msix) << (8 + (i * 4));
2568 	}
2569 
2570 	/* Link interrupt */
2571 	++rid;
2572 	adapter->res = bus_alloc_resource_any(dev,
2573 	    SYS_RES_IRQ, &rid, RF_ACTIVE);
2574 	if (!adapter->res) {
2575 		device_printf(dev,"Unable to allocate "
2576 		    "bus resource: Link interrupt [%d]\n", rid);
2577 		return (ENXIO);
2578         }
2579 	/* Set the link handler function */
2580 	error = bus_setup_intr(dev, adapter->res,
2581 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2582 	    em_msix_link, adapter, &adapter->tag);
2583 	if (error) {
2584 		adapter->res = NULL;
2585 		device_printf(dev, "Failed to register LINK handler");
2586 		return (error);
2587 	}
2588 #if __FreeBSD_version >= 800504
2589 		bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2590 #endif
2591 	adapter->linkvec = vector;
2592 	adapter->ivars |=  (8 | vector) << 16;
2593 	adapter->ivars |= 0x80000000;
2594 
2595 	return (0);
2596 }
2597 
2598 
2599 static void
2600 em_free_pci_resources(struct adapter *adapter)
2601 {
2602 	device_t	dev = adapter->dev;
2603 	struct tx_ring	*txr;
2604 	struct rx_ring	*rxr;
2605 	int		rid;
2606 
2607 
2608 	/*
2609 	** Release all the queue interrupt resources:
2610 	*/
2611 	for (int i = 0; i < adapter->num_queues; i++) {
2612 		txr = &adapter->tx_rings[i];
2613 		rxr = &adapter->rx_rings[i];
2614 		/* an early abort? */
2615 		if ((txr == NULL) || (rxr == NULL))
2616 			break;
2617 		rid = txr->msix +1;
2618 		if (txr->tag != NULL) {
2619 			bus_teardown_intr(dev, txr->res, txr->tag);
2620 			txr->tag = NULL;
2621 		}
2622 		if (txr->res != NULL)
2623 			bus_release_resource(dev, SYS_RES_IRQ,
2624 			    rid, txr->res);
2625 		rid = rxr->msix +1;
2626 		if (rxr->tag != NULL) {
2627 			bus_teardown_intr(dev, rxr->res, rxr->tag);
2628 			rxr->tag = NULL;
2629 		}
2630 		if (rxr->res != NULL)
2631 			bus_release_resource(dev, SYS_RES_IRQ,
2632 			    rid, rxr->res);
2633 	}
2634 
2635         if (adapter->linkvec) /* we are doing MSIX */
2636                 rid = adapter->linkvec + 1;
2637         else
2638                 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2639 
2640 	if (adapter->tag != NULL) {
2641 		bus_teardown_intr(dev, adapter->res, adapter->tag);
2642 		adapter->tag = NULL;
2643 	}
2644 
2645 	if (adapter->res != NULL)
2646 		bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2647 
2648 
2649 	if (adapter->msix)
2650 		pci_release_msi(dev);
2651 
2652 	if (adapter->msix_mem != NULL)
2653 		bus_release_resource(dev, SYS_RES_MEMORY,
2654 		    PCIR_BAR(EM_MSIX_BAR), adapter->msix_mem);
2655 
2656 	if (adapter->memory != NULL)
2657 		bus_release_resource(dev, SYS_RES_MEMORY,
2658 		    PCIR_BAR(0), adapter->memory);
2659 
2660 	if (adapter->flash != NULL)
2661 		bus_release_resource(dev, SYS_RES_MEMORY,
2662 		    EM_FLASH, adapter->flash);
2663 }
2664 
2665 /*
2666  * Setup MSI or MSI/X
2667  */
2668 static int
2669 em_setup_msix(struct adapter *adapter)
2670 {
2671 	device_t dev = adapter->dev;
2672 	int val = 0;
2673 
2674 	/*
2675 	** Setup MSI/X for Hartwell: tests have shown
2676 	** use of two queues to be unstable, and to
2677 	** provide no great gain anyway, so we simply
2678 	** seperate the interrupts and use a single queue.
2679 	*/
2680 	if ((adapter->hw.mac.type == e1000_82574) &&
2681 	    (em_enable_msix == TRUE)) {
2682 		/* Map the MSIX BAR */
2683 		int rid = PCIR_BAR(EM_MSIX_BAR);
2684 		adapter->msix_mem = bus_alloc_resource_any(dev,
2685 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2686        		if (!adapter->msix_mem) {
2687 			/* May not be enabled */
2688                		device_printf(adapter->dev,
2689 			    "Unable to map MSIX table \n");
2690 			goto msi;
2691        		}
2692 		val = pci_msix_count(dev);
2693 		/* We only need 3 vectors */
2694 		if (val > 3)
2695 			val = 3;
2696 		if ((val != 3) && (val != 5)) {
2697 			bus_release_resource(dev, SYS_RES_MEMORY,
2698 			    PCIR_BAR(EM_MSIX_BAR), adapter->msix_mem);
2699 			adapter->msix_mem = NULL;
2700                		device_printf(adapter->dev,
2701 			    "MSIX: incorrect vectors, using MSI\n");
2702 			goto msi;
2703 		}
2704 
2705 		if (pci_alloc_msix(dev, &val) == 0) {
2706 			device_printf(adapter->dev,
2707 			    "Using MSIX interrupts "
2708 			    "with %d vectors\n", val);
2709 		}
2710 
2711 		return (val);
2712 	}
2713 msi:
2714        	val = pci_msi_count(dev);
2715        	if (val == 1 && pci_alloc_msi(dev, &val) == 0) {
2716                	adapter->msix = 1;
2717                	device_printf(adapter->dev,"Using an MSI interrupt\n");
2718 		return (val);
2719 	}
2720 	/* Should only happen due to manual configuration */
2721 	device_printf(adapter->dev,"No MSI/MSIX using a Legacy IRQ\n");
2722 	return (0);
2723 }
2724 
2725 
2726 /*********************************************************************
2727  *
2728  *  Initialize the hardware to a configuration
2729  *  as specified by the adapter structure.
2730  *
2731  **********************************************************************/
2732 static void
2733 em_reset(struct adapter *adapter)
2734 {
2735 	device_t	dev = adapter->dev;
2736 	struct ifnet	*ifp = adapter->ifp;
2737 	struct e1000_hw	*hw = &adapter->hw;
2738 	u16		rx_buffer_size;
2739 	u32		pba;
2740 
2741 	INIT_DEBUGOUT("em_reset: begin");
2742 
2743 	/* Set up smart power down as default off on newer adapters. */
2744 	if (!em_smart_pwr_down && (hw->mac.type == e1000_82571 ||
2745 	    hw->mac.type == e1000_82572)) {
2746 		u16 phy_tmp = 0;
2747 
2748 		/* Speed up time to link by disabling smart power down. */
2749 		e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
2750 		phy_tmp &= ~IGP02E1000_PM_SPD;
2751 		e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp);
2752 	}
2753 
2754 	/*
2755 	 * Packet Buffer Allocation (PBA)
2756 	 * Writing PBA sets the receive portion of the buffer
2757 	 * the remainder is used for the transmit buffer.
2758 	 */
2759 	switch (hw->mac.type) {
2760 	/* Total Packet Buffer on these is 48K */
2761 	case e1000_82571:
2762 	case e1000_82572:
2763 	case e1000_80003es2lan:
2764 			pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
2765 		break;
2766 	case e1000_82573: /* 82573: Total Packet Buffer is 32K */
2767 			pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
2768 		break;
2769 	case e1000_82574:
2770 	case e1000_82583:
2771 			pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
2772 		break;
2773 	case e1000_ich8lan:
2774 		pba = E1000_PBA_8K;
2775 		break;
2776 	case e1000_ich9lan:
2777 	case e1000_ich10lan:
2778 		/* Boost Receive side for jumbo frames */
2779 		if (adapter->max_frame_size > 4096)
2780 			pba = E1000_PBA_14K;
2781 		else
2782 			pba = E1000_PBA_10K;
2783 		break;
2784 	case e1000_pchlan:
2785 	case e1000_pch2lan:
2786 		pba = E1000_PBA_26K;
2787 		break;
2788 	default:
2789 		if (adapter->max_frame_size > 8192)
2790 			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
2791 		else
2792 			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
2793 	}
2794 	E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
2795 
2796 	/*
2797 	 * These parameters control the automatic generation (Tx) and
2798 	 * response (Rx) to Ethernet PAUSE frames.
2799 	 * - High water mark should allow for at least two frames to be
2800 	 *   received after sending an XOFF.
2801 	 * - Low water mark works best when it is very near the high water mark.
2802 	 *   This allows the receiver to restart by sending XON when it has
2803 	 *   drained a bit. Here we use an arbitary value of 1500 which will
2804 	 *   restart after one full frame is pulled from the buffer. There
2805 	 *   could be several smaller frames in the buffer and if so they will
2806 	 *   not trigger the XON until their total number reduces the buffer
2807 	 *   by 1500.
2808 	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2809 	 */
2810 	rx_buffer_size = ((E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10 );
2811 	hw->fc.high_water = rx_buffer_size -
2812 	    roundup2(adapter->max_frame_size, 1024);
2813 	hw->fc.low_water = hw->fc.high_water - 1500;
2814 
2815 	if (adapter->fc) /* locally set flow control value? */
2816 		hw->fc.requested_mode = adapter->fc;
2817 	else
2818 		hw->fc.requested_mode = e1000_fc_full;
2819 
2820 	if (hw->mac.type == e1000_80003es2lan)
2821 		hw->fc.pause_time = 0xFFFF;
2822 	else
2823 		hw->fc.pause_time = EM_FC_PAUSE_TIME;
2824 
2825 	hw->fc.send_xon = TRUE;
2826 
2827 	/* Device specific overrides/settings */
2828 	switch (hw->mac.type) {
2829 	case e1000_pchlan:
2830 		/* Workaround: no TX flow ctrl for PCH */
2831                 hw->fc.requested_mode = e1000_fc_rx_pause;
2832 		hw->fc.pause_time = 0xFFFF; /* override */
2833 		if (ifp->if_mtu > ETHERMTU) {
2834 			hw->fc.high_water = 0x3500;
2835 			hw->fc.low_water = 0x1500;
2836 		} else {
2837 			hw->fc.high_water = 0x5000;
2838 			hw->fc.low_water = 0x3000;
2839 		}
2840 		hw->fc.refresh_time = 0x1000;
2841 		break;
2842 	case e1000_pch2lan:
2843 		hw->fc.high_water = 0x5C20;
2844 		hw->fc.low_water = 0x5048;
2845 		hw->fc.pause_time = 0x0650;
2846 		hw->fc.refresh_time = 0x0400;
2847 		/* Jumbos need adjusted PBA */
2848 		if (ifp->if_mtu > ETHERMTU)
2849 			E1000_WRITE_REG(hw, E1000_PBA, 12);
2850 		else
2851 			E1000_WRITE_REG(hw, E1000_PBA, 26);
2852 		break;
2853         case e1000_ich9lan:
2854         case e1000_ich10lan:
2855 		if (ifp->if_mtu > ETHERMTU) {
2856 			hw->fc.high_water = 0x2800;
2857 			hw->fc.low_water = hw->fc.high_water - 8;
2858 			break;
2859 		}
2860 		/* else fall thru */
2861 	default:
2862 		if (hw->mac.type == e1000_80003es2lan)
2863 			hw->fc.pause_time = 0xFFFF;
2864 		break;
2865 	}
2866 
2867 	/* Issue a global reset */
2868 	e1000_reset_hw(hw);
2869 	E1000_WRITE_REG(hw, E1000_WUC, 0);
2870 	em_disable_aspm(adapter);
2871 	/* and a re-init */
2872 	if (e1000_init_hw(hw) < 0) {
2873 		device_printf(dev, "Hardware Initialization Failed\n");
2874 		return;
2875 	}
2876 
2877 	E1000_WRITE_REG(hw, E1000_VET, ETHERTYPE_VLAN);
2878 	e1000_get_phy_info(hw);
2879 	e1000_check_for_link(hw);
2880 	return;
2881 }
2882 
2883 /*********************************************************************
2884  *
2885  *  Setup networking device structure and register an interface.
2886  *
2887  **********************************************************************/
2888 static int
2889 em_setup_interface(device_t dev, struct adapter *adapter)
2890 {
2891 	struct ifnet   *ifp;
2892 
2893 	INIT_DEBUGOUT("em_setup_interface: begin");
2894 
2895 	ifp = adapter->ifp = if_alloc(IFT_ETHER);
2896 	if (ifp == NULL) {
2897 		device_printf(dev, "can not allocate ifnet structure\n");
2898 		return (-1);
2899 	}
2900 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2901 	ifp->if_init =  em_init;
2902 	ifp->if_softc = adapter;
2903 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2904 	ifp->if_ioctl = em_ioctl;
2905 	ifp->if_start = em_start;
2906 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2907 	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2908 	IFQ_SET_READY(&ifp->if_snd);
2909 
2910 	ether_ifattach(ifp, adapter->hw.mac.addr);
2911 
2912 	ifp->if_capabilities = ifp->if_capenable = 0;
2913 
2914 #ifdef EM_MULTIQUEUE
2915 	/* Multiqueue stack interface */
2916 	ifp->if_transmit = em_mq_start;
2917 	ifp->if_qflush = em_qflush;
2918 #endif
2919 
2920 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2921 	ifp->if_capabilities |= IFCAP_TSO4;
2922 	/*
2923 	 * Tell the upper layer(s) we
2924 	 * support full VLAN capability
2925 	 */
2926 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2927 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2928 			     |  IFCAP_VLAN_HWTSO
2929 			     |  IFCAP_VLAN_MTU;
2930 	ifp->if_capenable = ifp->if_capabilities;
2931 
2932 	/*
2933 	** Don't turn this on by default, if vlans are
2934 	** created on another pseudo device (eg. lagg)
2935 	** then vlan events are not passed thru, breaking
2936 	** operation, but with HW FILTER off it works. If
2937 	** using vlans directly on the em driver you can
2938 	** enable this and get full hardware tag filtering.
2939 	*/
2940 	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2941 
2942 #ifdef DEVICE_POLLING
2943 	ifp->if_capabilities |= IFCAP_POLLING;
2944 #endif
2945 
2946 	/* Enable only WOL MAGIC by default */
2947 	if (adapter->wol) {
2948 		ifp->if_capabilities |= IFCAP_WOL;
2949 		ifp->if_capenable |= IFCAP_WOL_MAGIC;
2950 	}
2951 
2952 	/*
2953 	 * Specify the media types supported by this adapter and register
2954 	 * callbacks to update media and link information
2955 	 */
2956 	ifmedia_init(&adapter->media, IFM_IMASK,
2957 	    em_media_change, em_media_status);
2958 	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2959 	    (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2960 		u_char fiber_type = IFM_1000_SX;	/* default type */
2961 
2962 		ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2963 			    0, NULL);
2964 		ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2965 	} else {
2966 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2967 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2968 			    0, NULL);
2969 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2970 			    0, NULL);
2971 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2972 			    0, NULL);
2973 		if (adapter->hw.phy.type != e1000_phy_ife) {
2974 			ifmedia_add(&adapter->media,
2975 				IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2976 			ifmedia_add(&adapter->media,
2977 				IFM_ETHER | IFM_1000_T, 0, NULL);
2978 		}
2979 	}
2980 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2981 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2982 	return (0);
2983 }
2984 
2985 
2986 /*
2987  * Manage DMA'able memory.
2988  */
2989 static void
2990 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2991 {
2992 	if (error)
2993 		return;
2994 	*(bus_addr_t *) arg = segs[0].ds_addr;
2995 }
2996 
2997 static int
2998 em_dma_malloc(struct adapter *adapter, bus_size_t size,
2999         struct em_dma_alloc *dma, int mapflags)
3000 {
3001 	int error;
3002 
3003 	error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
3004 				EM_DBA_ALIGN, 0,	/* alignment, bounds */
3005 				BUS_SPACE_MAXADDR,	/* lowaddr */
3006 				BUS_SPACE_MAXADDR,	/* highaddr */
3007 				NULL, NULL,		/* filter, filterarg */
3008 				size,			/* maxsize */
3009 				1,			/* nsegments */
3010 				size,			/* maxsegsize */
3011 				0,			/* flags */
3012 				NULL,			/* lockfunc */
3013 				NULL,			/* lockarg */
3014 				&dma->dma_tag);
3015 	if (error) {
3016 		device_printf(adapter->dev,
3017 		    "%s: bus_dma_tag_create failed: %d\n",
3018 		    __func__, error);
3019 		goto fail_0;
3020 	}
3021 
3022 	error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
3023 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
3024 	if (error) {
3025 		device_printf(adapter->dev,
3026 		    "%s: bus_dmamem_alloc(%ju) failed: %d\n",
3027 		    __func__, (uintmax_t)size, error);
3028 		goto fail_2;
3029 	}
3030 
3031 	dma->dma_paddr = 0;
3032 	error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3033 	    size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
3034 	if (error || dma->dma_paddr == 0) {
3035 		device_printf(adapter->dev,
3036 		    "%s: bus_dmamap_load failed: %d\n",
3037 		    __func__, error);
3038 		goto fail_3;
3039 	}
3040 
3041 	return (0);
3042 
3043 fail_3:
3044 	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3045 fail_2:
3046 	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3047 	bus_dma_tag_destroy(dma->dma_tag);
3048 fail_0:
3049 	dma->dma_map = NULL;
3050 	dma->dma_tag = NULL;
3051 
3052 	return (error);
3053 }
3054 
3055 static void
3056 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
3057 {
3058 	if (dma->dma_tag == NULL)
3059 		return;
3060 	if (dma->dma_map != NULL) {
3061 		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3062 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3063 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3064 		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3065 		dma->dma_map = NULL;
3066 	}
3067 	bus_dma_tag_destroy(dma->dma_tag);
3068 	dma->dma_tag = NULL;
3069 }
3070 
3071 
3072 /*********************************************************************
3073  *
3074  *  Allocate memory for the transmit and receive rings, and then
3075  *  the descriptors associated with each, called only once at attach.
3076  *
3077  **********************************************************************/
3078 static int
3079 em_allocate_queues(struct adapter *adapter)
3080 {
3081 	device_t		dev = adapter->dev;
3082 	struct tx_ring		*txr = NULL;
3083 	struct rx_ring		*rxr = NULL;
3084 	int rsize, tsize, error = E1000_SUCCESS;
3085 	int txconf = 0, rxconf = 0;
3086 
3087 
3088 	/* Allocate the TX ring struct memory */
3089 	if (!(adapter->tx_rings =
3090 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
3091 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
3092 		device_printf(dev, "Unable to allocate TX ring memory\n");
3093 		error = ENOMEM;
3094 		goto fail;
3095 	}
3096 
3097 	/* Now allocate the RX */
3098 	if (!(adapter->rx_rings =
3099 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
3100 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
3101 		device_printf(dev, "Unable to allocate RX ring memory\n");
3102 		error = ENOMEM;
3103 		goto rx_fail;
3104 	}
3105 
3106 	tsize = roundup2(adapter->num_tx_desc *
3107 	    sizeof(struct e1000_tx_desc), EM_DBA_ALIGN);
3108 	/*
3109 	 * Now set up the TX queues, txconf is needed to handle the
3110 	 * possibility that things fail midcourse and we need to
3111 	 * undo memory gracefully
3112 	 */
3113 	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
3114 		/* Set up some basics */
3115 		txr = &adapter->tx_rings[i];
3116 		txr->adapter = adapter;
3117 		txr->me = i;
3118 
3119 		/* Initialize the TX lock */
3120 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
3121 		    device_get_nameunit(dev), txr->me);
3122 		mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
3123 
3124 		if (em_dma_malloc(adapter, tsize,
3125 			&txr->txdma, BUS_DMA_NOWAIT)) {
3126 			device_printf(dev,
3127 			    "Unable to allocate TX Descriptor memory\n");
3128 			error = ENOMEM;
3129 			goto err_tx_desc;
3130 		}
3131 		txr->tx_base = (struct e1000_tx_desc *)txr->txdma.dma_vaddr;
3132 		bzero((void *)txr->tx_base, tsize);
3133 
3134         	if (em_allocate_transmit_buffers(txr)) {
3135 			device_printf(dev,
3136 			    "Critical Failure setting up transmit buffers\n");
3137 			error = ENOMEM;
3138 			goto err_tx_desc;
3139         	}
3140 #if __FreeBSD_version >= 800000
3141 		/* Allocate a buf ring */
3142 		txr->br = buf_ring_alloc(4096, M_DEVBUF,
3143 		    M_WAITOK, &txr->tx_mtx);
3144 #endif
3145 	}
3146 
3147 	/*
3148 	 * Next the RX queues...
3149 	 */
3150 	rsize = roundup2(adapter->num_rx_desc *
3151 	    sizeof(struct e1000_rx_desc), EM_DBA_ALIGN);
3152 	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
3153 		rxr = &adapter->rx_rings[i];
3154 		rxr->adapter = adapter;
3155 		rxr->me = i;
3156 
3157 		/* Initialize the RX lock */
3158 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
3159 		    device_get_nameunit(dev), txr->me);
3160 		mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
3161 
3162 		if (em_dma_malloc(adapter, rsize,
3163 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
3164 			device_printf(dev,
3165 			    "Unable to allocate RxDescriptor memory\n");
3166 			error = ENOMEM;
3167 			goto err_rx_desc;
3168 		}
3169 		rxr->rx_base = (struct e1000_rx_desc *)rxr->rxdma.dma_vaddr;
3170 		bzero((void *)rxr->rx_base, rsize);
3171 
3172         	/* Allocate receive buffers for the ring*/
3173 		if (em_allocate_receive_buffers(rxr)) {
3174 			device_printf(dev,
3175 			    "Critical Failure setting up receive buffers\n");
3176 			error = ENOMEM;
3177 			goto err_rx_desc;
3178 		}
3179 	}
3180 
3181 	return (0);
3182 
3183 err_rx_desc:
3184 	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
3185 		em_dma_free(adapter, &rxr->rxdma);
3186 err_tx_desc:
3187 	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
3188 		em_dma_free(adapter, &txr->txdma);
3189 	free(adapter->rx_rings, M_DEVBUF);
3190 rx_fail:
3191 #if __FreeBSD_version >= 800000
3192 	buf_ring_free(txr->br, M_DEVBUF);
3193 #endif
3194 	free(adapter->tx_rings, M_DEVBUF);
3195 fail:
3196 	return (error);
3197 }
3198 
3199 
3200 /*********************************************************************
3201  *
3202  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
3203  *  the information needed to transmit a packet on the wire. This is
3204  *  called only once at attach, setup is done every reset.
3205  *
3206  **********************************************************************/
3207 static int
3208 em_allocate_transmit_buffers(struct tx_ring *txr)
3209 {
3210 	struct adapter *adapter = txr->adapter;
3211 	device_t dev = adapter->dev;
3212 	struct em_buffer *txbuf;
3213 	int error, i;
3214 
3215 	/*
3216 	 * Setup DMA descriptor areas.
3217 	 */
3218 	if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),
3219 			       1, 0,			/* alignment, bounds */
3220 			       BUS_SPACE_MAXADDR,	/* lowaddr */
3221 			       BUS_SPACE_MAXADDR,	/* highaddr */
3222 			       NULL, NULL,		/* filter, filterarg */
3223 			       EM_TSO_SIZE,		/* maxsize */
3224 			       EM_MAX_SCATTER,		/* nsegments */
3225 			       PAGE_SIZE,		/* maxsegsize */
3226 			       0,			/* flags */
3227 			       NULL,			/* lockfunc */
3228 			       NULL,			/* lockfuncarg */
3229 			       &txr->txtag))) {
3230 		device_printf(dev,"Unable to allocate TX DMA tag\n");
3231 		goto fail;
3232 	}
3233 
3234 	if (!(txr->tx_buffers =
3235 	    (struct em_buffer *) malloc(sizeof(struct em_buffer) *
3236 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
3237 		device_printf(dev, "Unable to allocate tx_buffer memory\n");
3238 		error = ENOMEM;
3239 		goto fail;
3240 	}
3241 
3242         /* Create the descriptor buffer dma maps */
3243 	txbuf = txr->tx_buffers;
3244 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
3245 		error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
3246 		if (error != 0) {
3247 			device_printf(dev, "Unable to create TX DMA map\n");
3248 			goto fail;
3249 		}
3250 	}
3251 
3252 	return 0;
3253 fail:
3254 	/* We free all, it handles case where we are in the middle */
3255 	em_free_transmit_structures(adapter);
3256 	return (error);
3257 }
3258 
3259 /*********************************************************************
3260  *
3261  *  Initialize a transmit ring.
3262  *
3263  **********************************************************************/
3264 static void
3265 em_setup_transmit_ring(struct tx_ring *txr)
3266 {
3267 	struct adapter *adapter = txr->adapter;
3268 	struct em_buffer *txbuf;
3269 	int i;
3270 #ifdef DEV_NETMAP
3271 	struct netmap_adapter *na = NA(adapter->ifp);
3272 	struct netmap_slot *slot;
3273 #endif /* DEV_NETMAP */
3274 
3275 	/* Clear the old descriptor contents */
3276 	EM_TX_LOCK(txr);
3277 #ifdef DEV_NETMAP
3278 	slot = netmap_reset(na, NR_TX, txr->me, 0);
3279 #endif /* DEV_NETMAP */
3280 
3281 	bzero((void *)txr->tx_base,
3282 	      (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
3283 	/* Reset indices */
3284 	txr->next_avail_desc = 0;
3285 	txr->next_to_clean = 0;
3286 
3287 	/* Free any existing tx buffers. */
3288         txbuf = txr->tx_buffers;
3289 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
3290 		if (txbuf->m_head != NULL) {
3291 			bus_dmamap_sync(txr->txtag, txbuf->map,
3292 			    BUS_DMASYNC_POSTWRITE);
3293 			bus_dmamap_unload(txr->txtag, txbuf->map);
3294 			m_freem(txbuf->m_head);
3295 			txbuf->m_head = NULL;
3296 		}
3297 #ifdef DEV_NETMAP
3298 		if (slot) {
3299 			int si = i + na->tx_rings[txr->me].nkr_hwofs;
3300 			uint64_t paddr;
3301 			void *addr;
3302 
3303 			if (si >= na->num_tx_desc)
3304 				si -= na->num_tx_desc;
3305 			addr = PNMB(slot + si, &paddr);
3306 			txr->tx_base[i].buffer_addr = htole64(paddr);
3307 			/* reload the map for netmap mode */
3308 			netmap_load_map(txr->txtag, txbuf->map, addr);
3309 		}
3310 #endif /* DEV_NETMAP */
3311 
3312 		/* clear the watch index */
3313 		txbuf->next_eop = -1;
3314         }
3315 
3316 	/* Set number of descriptors available */
3317 	txr->tx_avail = adapter->num_tx_desc;
3318 	txr->queue_status = EM_QUEUE_IDLE;
3319 
3320 	/* Clear checksum offload context. */
3321 	txr->last_hw_offload = 0;
3322 	txr->last_hw_ipcss = 0;
3323 	txr->last_hw_ipcso = 0;
3324 	txr->last_hw_tucss = 0;
3325 	txr->last_hw_tucso = 0;
3326 
3327 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3328 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3329 	EM_TX_UNLOCK(txr);
3330 }
3331 
3332 /*********************************************************************
3333  *
3334  *  Initialize all transmit rings.
3335  *
3336  **********************************************************************/
3337 static void
3338 em_setup_transmit_structures(struct adapter *adapter)
3339 {
3340 	struct tx_ring *txr = adapter->tx_rings;
3341 
3342 	for (int i = 0; i < adapter->num_queues; i++, txr++)
3343 		em_setup_transmit_ring(txr);
3344 
3345 	return;
3346 }
3347 
3348 /*********************************************************************
3349  *
3350  *  Enable transmit unit.
3351  *
3352  **********************************************************************/
3353 static void
3354 em_initialize_transmit_unit(struct adapter *adapter)
3355 {
3356 	struct tx_ring	*txr = adapter->tx_rings;
3357 	struct e1000_hw	*hw = &adapter->hw;
3358 	u32	tctl, tarc, tipg = 0;
3359 
3360 	 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
3361 
3362 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
3363 		u64 bus_addr = txr->txdma.dma_paddr;
3364 		/* Base and Len of TX Ring */
3365 		E1000_WRITE_REG(hw, E1000_TDLEN(i),
3366 	    	    adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
3367 		E1000_WRITE_REG(hw, E1000_TDBAH(i),
3368 	    	    (u32)(bus_addr >> 32));
3369 		E1000_WRITE_REG(hw, E1000_TDBAL(i),
3370 	    	    (u32)bus_addr);
3371 		/* Init the HEAD/TAIL indices */
3372 		E1000_WRITE_REG(hw, E1000_TDT(i), 0);
3373 		E1000_WRITE_REG(hw, E1000_TDH(i), 0);
3374 
3375 		HW_DEBUGOUT2("Base = %x, Length = %x\n",
3376 		    E1000_READ_REG(&adapter->hw, E1000_TDBAL(i)),
3377 		    E1000_READ_REG(&adapter->hw, E1000_TDLEN(i)));
3378 
3379 		txr->queue_status = EM_QUEUE_IDLE;
3380 	}
3381 
3382 	/* Set the default values for the Tx Inter Packet Gap timer */
3383 	switch (adapter->hw.mac.type) {
3384 	case e1000_80003es2lan:
3385 		tipg = DEFAULT_82543_TIPG_IPGR1;
3386 		tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
3387 		    E1000_TIPG_IPGR2_SHIFT;
3388 		break;
3389 	default:
3390 		if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3391 		    (adapter->hw.phy.media_type ==
3392 		    e1000_media_type_internal_serdes))
3393 			tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3394 		else
3395 			tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3396 		tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3397 		tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3398 	}
3399 
3400 	E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
3401 	E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
3402 
3403 	if(adapter->hw.mac.type >= e1000_82540)
3404 		E1000_WRITE_REG(&adapter->hw, E1000_TADV,
3405 		    adapter->tx_abs_int_delay.value);
3406 
3407 	if ((adapter->hw.mac.type == e1000_82571) ||
3408 	    (adapter->hw.mac.type == e1000_82572)) {
3409 		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3410 		tarc |= SPEED_MODE_BIT;
3411 		E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3412 	} else if (adapter->hw.mac.type == e1000_80003es2lan) {
3413 		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3414 		tarc |= 1;
3415 		E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3416 		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
3417 		tarc |= 1;
3418 		E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
3419 	}
3420 
3421 	adapter->txd_cmd = E1000_TXD_CMD_IFCS;
3422 	if (adapter->tx_int_delay.value > 0)
3423 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3424 
3425 	/* Program the Transmit Control Register */
3426 	tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
3427 	tctl &= ~E1000_TCTL_CT;
3428 	tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3429 		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
3430 
3431 	if (adapter->hw.mac.type >= e1000_82571)
3432 		tctl |= E1000_TCTL_MULR;
3433 
3434 	/* This write will effectively turn on the transmit unit. */
3435 	E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
3436 
3437 }
3438 
3439 
3440 /*********************************************************************
3441  *
3442  *  Free all transmit rings.
3443  *
3444  **********************************************************************/
3445 static void
3446 em_free_transmit_structures(struct adapter *adapter)
3447 {
3448 	struct tx_ring *txr = adapter->tx_rings;
3449 
3450 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
3451 		EM_TX_LOCK(txr);
3452 		em_free_transmit_buffers(txr);
3453 		em_dma_free(adapter, &txr->txdma);
3454 		EM_TX_UNLOCK(txr);
3455 		EM_TX_LOCK_DESTROY(txr);
3456 	}
3457 
3458 	free(adapter->tx_rings, M_DEVBUF);
3459 }
3460 
3461 /*********************************************************************
3462  *
3463  *  Free transmit ring related data structures.
3464  *
3465  **********************************************************************/
3466 static void
3467 em_free_transmit_buffers(struct tx_ring *txr)
3468 {
3469 	struct adapter		*adapter = txr->adapter;
3470 	struct em_buffer	*txbuf;
3471 
3472 	INIT_DEBUGOUT("free_transmit_ring: begin");
3473 
3474 	if (txr->tx_buffers == NULL)
3475 		return;
3476 
3477 	for (int i = 0; i < adapter->num_tx_desc; i++) {
3478 		txbuf = &txr->tx_buffers[i];
3479 		if (txbuf->m_head != NULL) {
3480 			bus_dmamap_sync(txr->txtag, txbuf->map,
3481 			    BUS_DMASYNC_POSTWRITE);
3482 			bus_dmamap_unload(txr->txtag,
3483 			    txbuf->map);
3484 			m_freem(txbuf->m_head);
3485 			txbuf->m_head = NULL;
3486 			if (txbuf->map != NULL) {
3487 				bus_dmamap_destroy(txr->txtag,
3488 				    txbuf->map);
3489 				txbuf->map = NULL;
3490 			}
3491 		} else if (txbuf->map != NULL) {
3492 			bus_dmamap_unload(txr->txtag,
3493 			    txbuf->map);
3494 			bus_dmamap_destroy(txr->txtag,
3495 			    txbuf->map);
3496 			txbuf->map = NULL;
3497 		}
3498 	}
3499 #if __FreeBSD_version >= 800000
3500 	if (txr->br != NULL)
3501 		buf_ring_free(txr->br, M_DEVBUF);
3502 #endif
3503 	if (txr->tx_buffers != NULL) {
3504 		free(txr->tx_buffers, M_DEVBUF);
3505 		txr->tx_buffers = NULL;
3506 	}
3507 	if (txr->txtag != NULL) {
3508 		bus_dma_tag_destroy(txr->txtag);
3509 		txr->txtag = NULL;
3510 	}
3511 	return;
3512 }
3513 
3514 
3515 /*********************************************************************
3516  *  The offload context is protocol specific (TCP/UDP) and thus
3517  *  only needs to be set when the protocol changes. The occasion
3518  *  of a context change can be a performance detriment, and
3519  *  might be better just disabled. The reason arises in the way
3520  *  in which the controller supports pipelined requests from the
3521  *  Tx data DMA. Up to four requests can be pipelined, and they may
3522  *  belong to the same packet or to multiple packets. However all
3523  *  requests for one packet are issued before a request is issued
3524  *  for a subsequent packet and if a request for the next packet
3525  *  requires a context change, that request will be stalled
3526  *  until the previous request completes. This means setting up
3527  *  a new context effectively disables pipelined Tx data DMA which
3528  *  in turn greatly slow down performance to send small sized
3529  *  frames.
3530  **********************************************************************/
3531 static void
3532 em_transmit_checksum_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off,
3533     struct ip *ip, u32 *txd_upper, u32 *txd_lower)
3534 {
3535 	struct adapter			*adapter = txr->adapter;
3536 	struct e1000_context_desc	*TXD = NULL;
3537 	struct em_buffer		*tx_buffer;
3538 	int				cur, hdr_len;
3539 	u32				cmd = 0;
3540 	u16				offload = 0;
3541 	u8				ipcso, ipcss, tucso, tucss;
3542 
3543 	ipcss = ipcso = tucss = tucso = 0;
3544 	hdr_len = ip_off + (ip->ip_hl << 2);
3545 	cur = txr->next_avail_desc;
3546 
3547 	/* Setup of IP header checksum. */
3548 	if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3549 		*txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3550 		offload |= CSUM_IP;
3551 		ipcss = ip_off;
3552 		ipcso = ip_off + offsetof(struct ip, ip_sum);
3553 		/*
3554 		 * Start offset for header checksum calculation.
3555 		 * End offset for header checksum calculation.
3556 		 * Offset of place to put the checksum.
3557 		 */
3558 		TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
3559 		TXD->lower_setup.ip_fields.ipcss = ipcss;
3560 		TXD->lower_setup.ip_fields.ipcse = htole16(hdr_len);
3561 		TXD->lower_setup.ip_fields.ipcso = ipcso;
3562 		cmd |= E1000_TXD_CMD_IP;
3563 	}
3564 
3565 	if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3566  		*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3567  		*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3568  		offload |= CSUM_TCP;
3569  		tucss = hdr_len;
3570  		tucso = hdr_len + offsetof(struct tcphdr, th_sum);
3571  		/*
3572  		 * Setting up new checksum offload context for every frames
3573  		 * takes a lot of processing time for hardware. This also
3574  		 * reduces performance a lot for small sized frames so avoid
3575  		 * it if driver can use previously configured checksum
3576  		 * offload context.
3577  		 */
3578  		if (txr->last_hw_offload == offload) {
3579  			if (offload & CSUM_IP) {
3580  				if (txr->last_hw_ipcss == ipcss &&
3581  				    txr->last_hw_ipcso == ipcso &&
3582  				    txr->last_hw_tucss == tucss &&
3583  				    txr->last_hw_tucso == tucso)
3584  					return;
3585  			} else {
3586  				if (txr->last_hw_tucss == tucss &&
3587  				    txr->last_hw_tucso == tucso)
3588  					return;
3589  			}
3590   		}
3591  		txr->last_hw_offload = offload;
3592  		txr->last_hw_tucss = tucss;
3593  		txr->last_hw_tucso = tucso;
3594  		/*
3595  		 * Start offset for payload checksum calculation.
3596  		 * End offset for payload checksum calculation.
3597  		 * Offset of place to put the checksum.
3598  		 */
3599 		TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
3600  		TXD->upper_setup.tcp_fields.tucss = hdr_len;
3601  		TXD->upper_setup.tcp_fields.tucse = htole16(0);
3602  		TXD->upper_setup.tcp_fields.tucso = tucso;
3603  		cmd |= E1000_TXD_CMD_TCP;
3604  	} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3605  		*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3606  		*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3607  		tucss = hdr_len;
3608  		tucso = hdr_len + offsetof(struct udphdr, uh_sum);
3609  		/*
3610  		 * Setting up new checksum offload context for every frames
3611  		 * takes a lot of processing time for hardware. This also
3612  		 * reduces performance a lot for small sized frames so avoid
3613  		 * it if driver can use previously configured checksum
3614  		 * offload context.
3615  		 */
3616  		if (txr->last_hw_offload == offload) {
3617  			if (offload & CSUM_IP) {
3618  				if (txr->last_hw_ipcss == ipcss &&
3619  				    txr->last_hw_ipcso == ipcso &&
3620  				    txr->last_hw_tucss == tucss &&
3621  				    txr->last_hw_tucso == tucso)
3622  					return;
3623  			} else {
3624  				if (txr->last_hw_tucss == tucss &&
3625  				    txr->last_hw_tucso == tucso)
3626  					return;
3627  			}
3628  		}
3629  		txr->last_hw_offload = offload;
3630  		txr->last_hw_tucss = tucss;
3631  		txr->last_hw_tucso = tucso;
3632  		/*
3633  		 * Start offset for header checksum calculation.
3634  		 * End offset for header checksum calculation.
3635  		 * Offset of place to put the checksum.
3636  		 */
3637 		TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
3638  		TXD->upper_setup.tcp_fields.tucss = tucss;
3639  		TXD->upper_setup.tcp_fields.tucse = htole16(0);
3640  		TXD->upper_setup.tcp_fields.tucso = tucso;
3641   	}
3642 
3643  	if (offload & CSUM_IP) {
3644  		txr->last_hw_ipcss = ipcss;
3645  		txr->last_hw_ipcso = ipcso;
3646   	}
3647 
3648 	TXD->tcp_seg_setup.data = htole32(0);
3649 	TXD->cmd_and_length =
3650 	    htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3651 	tx_buffer = &txr->tx_buffers[cur];
3652 	tx_buffer->m_head = NULL;
3653 	tx_buffer->next_eop = -1;
3654 
3655 	if (++cur == adapter->num_tx_desc)
3656 		cur = 0;
3657 
3658 	txr->tx_avail--;
3659 	txr->next_avail_desc = cur;
3660 }
3661 
3662 
3663 /**********************************************************************
3664  *
3665  *  Setup work for hardware segmentation offload (TSO)
3666  *
3667  **********************************************************************/
3668 static void
3669 em_tso_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off,
3670     struct ip *ip, struct tcphdr *tp, u32 *txd_upper, u32 *txd_lower)
3671 {
3672 	struct adapter			*adapter = txr->adapter;
3673 	struct e1000_context_desc	*TXD;
3674 	struct em_buffer		*tx_buffer;
3675 	int cur, hdr_len;
3676 
3677 	/*
3678 	 * In theory we can use the same TSO context if and only if
3679 	 * frame is the same type(IP/TCP) and the same MSS. However
3680 	 * checking whether a frame has the same IP/TCP structure is
3681 	 * hard thing so just ignore that and always restablish a
3682 	 * new TSO context.
3683 	 */
3684 	hdr_len = ip_off + (ip->ip_hl << 2) + (tp->th_off << 2);
3685 	*txd_lower = (E1000_TXD_CMD_DEXT |	/* Extended descr type */
3686 		      E1000_TXD_DTYP_D |	/* Data descr type */
3687 		      E1000_TXD_CMD_TSE);	/* Do TSE on this packet */
3688 
3689 	/* IP and/or TCP header checksum calculation and insertion. */
3690 	*txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
3691 
3692 	cur = txr->next_avail_desc;
3693 	tx_buffer = &txr->tx_buffers[cur];
3694 	TXD = (struct e1000_context_desc *) &txr->tx_base[cur];
3695 
3696 	/*
3697 	 * Start offset for header checksum calculation.
3698 	 * End offset for header checksum calculation.
3699 	 * Offset of place put the checksum.
3700 	 */
3701 	TXD->lower_setup.ip_fields.ipcss = ip_off;
3702 	TXD->lower_setup.ip_fields.ipcse =
3703 	    htole16(ip_off + (ip->ip_hl << 2) - 1);
3704 	TXD->lower_setup.ip_fields.ipcso = ip_off + offsetof(struct ip, ip_sum);
3705 	/*
3706 	 * Start offset for payload checksum calculation.
3707 	 * End offset for payload checksum calculation.
3708 	 * Offset of place to put the checksum.
3709 	 */
3710 	TXD->upper_setup.tcp_fields.tucss = ip_off + (ip->ip_hl << 2);
3711 	TXD->upper_setup.tcp_fields.tucse = 0;
3712 	TXD->upper_setup.tcp_fields.tucso =
3713 	    ip_off + (ip->ip_hl << 2) + offsetof(struct tcphdr, th_sum);
3714 	/*
3715 	 * Payload size per packet w/o any headers.
3716 	 * Length of all headers up to payload.
3717 	 */
3718 	TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz);
3719 	TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
3720 
3721 	TXD->cmd_and_length = htole32(adapter->txd_cmd |
3722 				E1000_TXD_CMD_DEXT |	/* Extended descr */
3723 				E1000_TXD_CMD_TSE |	/* TSE context */
3724 				E1000_TXD_CMD_IP |	/* Do IP csum */
3725 				E1000_TXD_CMD_TCP |	/* Do TCP checksum */
3726 				(mp->m_pkthdr.len - (hdr_len))); /* Total len */
3727 
3728 	tx_buffer->m_head = NULL;
3729 	tx_buffer->next_eop = -1;
3730 
3731 	if (++cur == adapter->num_tx_desc)
3732 		cur = 0;
3733 
3734 	txr->tx_avail--;
3735 	txr->next_avail_desc = cur;
3736 	txr->tx_tso = TRUE;
3737 }
3738 
3739 
3740 /**********************************************************************
3741  *
3742  *  Examine each tx_buffer in the used queue. If the hardware is done
3743  *  processing the packet then free associated resources. The
3744  *  tx_buffer is put back on the free queue.
3745  *
3746  **********************************************************************/
3747 static bool
3748 em_txeof(struct tx_ring *txr)
3749 {
3750 	struct adapter	*adapter = txr->adapter;
3751         int first, last, done, processed;
3752         struct em_buffer *tx_buffer;
3753         struct e1000_tx_desc   *tx_desc, *eop_desc;
3754 	struct ifnet   *ifp = adapter->ifp;
3755 
3756 	EM_TX_LOCK_ASSERT(txr);
3757 #ifdef DEV_NETMAP
3758 	if (ifp->if_capenable & IFCAP_NETMAP) {
3759 		struct netmap_adapter *na = NA(ifp);
3760 
3761 		selwakeuppri(&na->tx_rings[txr->me].si, PI_NET);
3762 		EM_TX_UNLOCK(txr);
3763 		EM_CORE_LOCK(adapter);
3764 		selwakeuppri(&na->tx_rings[na->num_queues + 1].si, PI_NET);
3765 		EM_CORE_UNLOCK(adapter);
3766 		EM_TX_LOCK(txr);
3767 		return (FALSE);
3768 	}
3769 #endif /* DEV_NETMAP */
3770 
3771 	/* No work, make sure watchdog is off */
3772         if (txr->tx_avail == adapter->num_tx_desc) {
3773 		txr->queue_status = EM_QUEUE_IDLE;
3774                 return (FALSE);
3775 	}
3776 
3777 	processed = 0;
3778         first = txr->next_to_clean;
3779         tx_desc = &txr->tx_base[first];
3780         tx_buffer = &txr->tx_buffers[first];
3781 	last = tx_buffer->next_eop;
3782         eop_desc = &txr->tx_base[last];
3783 
3784 	/*
3785 	 * What this does is get the index of the
3786 	 * first descriptor AFTER the EOP of the
3787 	 * first packet, that way we can do the
3788 	 * simple comparison on the inner while loop.
3789 	 */
3790 	if (++last == adapter->num_tx_desc)
3791  		last = 0;
3792 	done = last;
3793 
3794         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3795             BUS_DMASYNC_POSTREAD);
3796 
3797         while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3798 		/* We clean the range of the packet */
3799 		while (first != done) {
3800                 	tx_desc->upper.data = 0;
3801                 	tx_desc->lower.data = 0;
3802                 	tx_desc->buffer_addr = 0;
3803                 	++txr->tx_avail;
3804 			++processed;
3805 
3806 			if (tx_buffer->m_head) {
3807 				bus_dmamap_sync(txr->txtag,
3808 				    tx_buffer->map,
3809 				    BUS_DMASYNC_POSTWRITE);
3810 				bus_dmamap_unload(txr->txtag,
3811 				    tx_buffer->map);
3812                         	m_freem(tx_buffer->m_head);
3813                         	tx_buffer->m_head = NULL;
3814                 	}
3815 			tx_buffer->next_eop = -1;
3816 			txr->watchdog_time = ticks;
3817 
3818 	                if (++first == adapter->num_tx_desc)
3819 				first = 0;
3820 
3821 	                tx_buffer = &txr->tx_buffers[first];
3822 			tx_desc = &txr->tx_base[first];
3823 		}
3824 		++ifp->if_opackets;
3825 		/* See if we can continue to the next packet */
3826 		last = tx_buffer->next_eop;
3827 		if (last != -1) {
3828         		eop_desc = &txr->tx_base[last];
3829 			/* Get new done point */
3830 			if (++last == adapter->num_tx_desc) last = 0;
3831 			done = last;
3832 		} else
3833 			break;
3834         }
3835         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3836             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3837 
3838         txr->next_to_clean = first;
3839 
3840 	/*
3841 	** Watchdog calculation, we know there's
3842 	** work outstanding or the first return
3843 	** would have been taken, so none processed
3844 	** for too long indicates a hang. local timer
3845 	** will examine this and do a reset if needed.
3846 	*/
3847 	if ((!processed) && ((ticks - txr->watchdog_time) > EM_WATCHDOG))
3848 		txr->queue_status = EM_QUEUE_HUNG;
3849 
3850         /*
3851          * If we have a minimum free, clear IFF_DRV_OACTIVE
3852          * to tell the stack that it is OK to send packets.
3853 	 * Notice that all writes of OACTIVE happen under the
3854 	 * TX lock which, with a single queue, guarantees
3855 	 * sanity.
3856          */
3857         if (txr->tx_avail >= EM_MAX_SCATTER)
3858 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3859 
3860 	/* Disable watchdog if all clean */
3861 	if (txr->tx_avail == adapter->num_tx_desc) {
3862 		txr->queue_status = EM_QUEUE_IDLE;
3863 		return (FALSE);
3864 	}
3865 
3866 	return (TRUE);
3867 }
3868 
3869 
3870 /*********************************************************************
3871  *
3872  *  Refresh RX descriptor mbufs from system mbuf buffer pool.
3873  *
3874  **********************************************************************/
3875 static void
3876 em_refresh_mbufs(struct rx_ring *rxr, int limit)
3877 {
3878 	struct adapter		*adapter = rxr->adapter;
3879 	struct mbuf		*m;
3880 	bus_dma_segment_t	segs[1];
3881 	struct em_buffer	*rxbuf;
3882 	int			i, j, error, nsegs;
3883 	bool			cleaned = FALSE;
3884 
3885 	i = j = rxr->next_to_refresh;
3886 	/*
3887 	** Get one descriptor beyond
3888 	** our work mark to control
3889 	** the loop.
3890 	*/
3891 	if (++j == adapter->num_rx_desc)
3892 		j = 0;
3893 
3894 	while (j != limit) {
3895 		rxbuf = &rxr->rx_buffers[i];
3896 		if (rxbuf->m_head == NULL) {
3897 			m = m_getjcl(M_DONTWAIT, MT_DATA,
3898 			    M_PKTHDR, adapter->rx_mbuf_sz);
3899 			/*
3900 			** If we have a temporary resource shortage
3901 			** that causes a failure, just abort refresh
3902 			** for now, we will return to this point when
3903 			** reinvoked from em_rxeof.
3904 			*/
3905 			if (m == NULL)
3906 				goto update;
3907 		} else
3908 			m = rxbuf->m_head;
3909 
3910 		m->m_len = m->m_pkthdr.len = adapter->rx_mbuf_sz;
3911 		m->m_flags |= M_PKTHDR;
3912 		m->m_data = m->m_ext.ext_buf;
3913 
3914 		/* Use bus_dma machinery to setup the memory mapping  */
3915 		error = bus_dmamap_load_mbuf_sg(rxr->rxtag, rxbuf->map,
3916 		    m, segs, &nsegs, BUS_DMA_NOWAIT);
3917 		if (error != 0) {
3918 			printf("Refresh mbufs: hdr dmamap load"
3919 			    " failure - %d\n", error);
3920 			m_free(m);
3921 			rxbuf->m_head = NULL;
3922 			goto update;
3923 		}
3924 		rxbuf->m_head = m;
3925 		bus_dmamap_sync(rxr->rxtag,
3926 		    rxbuf->map, BUS_DMASYNC_PREREAD);
3927 		rxr->rx_base[i].buffer_addr = htole64(segs[0].ds_addr);
3928 		cleaned = TRUE;
3929 
3930 		i = j; /* Next is precalulated for us */
3931 		rxr->next_to_refresh = i;
3932 		/* Calculate next controlling index */
3933 		if (++j == adapter->num_rx_desc)
3934 			j = 0;
3935 	}
3936 update:
3937 	/*
3938 	** Update the tail pointer only if,
3939 	** and as far as we have refreshed.
3940 	*/
3941 	if (cleaned)
3942 		E1000_WRITE_REG(&adapter->hw,
3943 		    E1000_RDT(rxr->me), rxr->next_to_refresh);
3944 
3945 	return;
3946 }
3947 
3948 
3949 /*********************************************************************
3950  *
3951  *  Allocate memory for rx_buffer structures. Since we use one
3952  *  rx_buffer per received packet, the maximum number of rx_buffer's
3953  *  that we'll need is equal to the number of receive descriptors
3954  *  that we've allocated.
3955  *
3956  **********************************************************************/
3957 static int
3958 em_allocate_receive_buffers(struct rx_ring *rxr)
3959 {
3960 	struct adapter		*adapter = rxr->adapter;
3961 	device_t		dev = adapter->dev;
3962 	struct em_buffer	*rxbuf;
3963 	int			error;
3964 
3965 	rxr->rx_buffers = malloc(sizeof(struct em_buffer) *
3966 	    adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3967 	if (rxr->rx_buffers == NULL) {
3968 		device_printf(dev, "Unable to allocate rx_buffer memory\n");
3969 		return (ENOMEM);
3970 	}
3971 
3972 	error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3973 				1, 0,			/* alignment, bounds */
3974 				BUS_SPACE_MAXADDR,	/* lowaddr */
3975 				BUS_SPACE_MAXADDR,	/* highaddr */
3976 				NULL, NULL,		/* filter, filterarg */
3977 				MJUM9BYTES,		/* maxsize */
3978 				1,			/* nsegments */
3979 				MJUM9BYTES,		/* maxsegsize */
3980 				0,			/* flags */
3981 				NULL,			/* lockfunc */
3982 				NULL,			/* lockarg */
3983 				&rxr->rxtag);
3984 	if (error) {
3985 		device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3986 		    __func__, error);
3987 		goto fail;
3988 	}
3989 
3990 	rxbuf = rxr->rx_buffers;
3991 	for (int i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
3992 		rxbuf = &rxr->rx_buffers[i];
3993 		error = bus_dmamap_create(rxr->rxtag, BUS_DMA_NOWAIT,
3994 		    &rxbuf->map);
3995 		if (error) {
3996 			device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3997 			    __func__, error);
3998 			goto fail;
3999 		}
4000 	}
4001 
4002 	return (0);
4003 
4004 fail:
4005 	em_free_receive_structures(adapter);
4006 	return (error);
4007 }
4008 
4009 
4010 /*********************************************************************
4011  *
4012  *  Initialize a receive ring and its buffers.
4013  *
4014  **********************************************************************/
4015 static int
4016 em_setup_receive_ring(struct rx_ring *rxr)
4017 {
4018 	struct	adapter 	*adapter = rxr->adapter;
4019 	struct em_buffer	*rxbuf;
4020 	bus_dma_segment_t	seg[1];
4021 	int			rsize, nsegs, error;
4022 #ifdef DEV_NETMAP
4023 	struct netmap_adapter *na = NA(adapter->ifp);
4024 	struct netmap_slot *slot;
4025 #endif
4026 
4027 
4028 	/* Clear the ring contents */
4029 	EM_RX_LOCK(rxr);
4030 	rsize = roundup2(adapter->num_rx_desc *
4031 	    sizeof(struct e1000_rx_desc), EM_DBA_ALIGN);
4032 	bzero((void *)rxr->rx_base, rsize);
4033 #ifdef DEV_NETMAP
4034 	slot = netmap_reset(na, NR_RX, 0, 0);
4035 #endif
4036 
4037 	/*
4038 	** Free current RX buffer structs and their mbufs
4039 	*/
4040 	for (int i = 0; i < adapter->num_rx_desc; i++) {
4041 		rxbuf = &rxr->rx_buffers[i];
4042 		if (rxbuf->m_head != NULL) {
4043 			bus_dmamap_sync(rxr->rxtag, rxbuf->map,
4044 			    BUS_DMASYNC_POSTREAD);
4045 			bus_dmamap_unload(rxr->rxtag, rxbuf->map);
4046 			m_freem(rxbuf->m_head);
4047 			rxbuf->m_head = NULL; /* mark as freed */
4048 		}
4049 	}
4050 
4051 	/* Now replenish the mbufs */
4052         for (int j = 0; j != adapter->num_rx_desc; ++j) {
4053 		rxbuf = &rxr->rx_buffers[j];
4054 #ifdef DEV_NETMAP
4055 		if (slot) {
4056 			/* slot si is mapped to the j-th NIC-ring entry */
4057 			int si = j + na->rx_rings[0].nkr_hwofs;
4058 			uint64_t paddr;
4059 			void *addr;
4060 
4061 			if (si > na->num_rx_desc)
4062 				si -= na->num_rx_desc;
4063 			addr = PNMB(slot + si, &paddr);
4064 			netmap_load_map(rxr->rxtag, rxbuf->map, addr);
4065 			/* Update descriptor */
4066 			rxr->rx_base[j].buffer_addr = htole64(paddr);
4067 			continue;
4068 		}
4069 #endif /* DEV_NETMAP */
4070 		rxbuf->m_head = m_getjcl(M_DONTWAIT, MT_DATA,
4071 		    M_PKTHDR, adapter->rx_mbuf_sz);
4072 		if (rxbuf->m_head == NULL) {
4073 			error = ENOBUFS;
4074 			goto fail;
4075 		}
4076 		rxbuf->m_head->m_len = adapter->rx_mbuf_sz;
4077 		rxbuf->m_head->m_flags &= ~M_HASFCS; /* we strip it */
4078 		rxbuf->m_head->m_pkthdr.len = adapter->rx_mbuf_sz;
4079 
4080 		/* Get the memory mapping */
4081 		error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
4082 		    rxbuf->map, rxbuf->m_head, seg,
4083 		    &nsegs, BUS_DMA_NOWAIT);
4084 		if (error != 0) {
4085 			m_freem(rxbuf->m_head);
4086 			rxbuf->m_head = NULL;
4087 			goto fail;
4088 		}
4089 		bus_dmamap_sync(rxr->rxtag,
4090 		    rxbuf->map, BUS_DMASYNC_PREREAD);
4091 
4092 		/* Update descriptor */
4093 		rxr->rx_base[j].buffer_addr = htole64(seg[0].ds_addr);
4094 	}
4095 	rxr->next_to_check = 0;
4096 	rxr->next_to_refresh = 0;
4097 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4098 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4099 
4100 fail:
4101 	EM_RX_UNLOCK(rxr);
4102 	return (error);
4103 }
4104 
4105 /*********************************************************************
4106  *
4107  *  Initialize all receive rings.
4108  *
4109  **********************************************************************/
4110 static int
4111 em_setup_receive_structures(struct adapter *adapter)
4112 {
4113 	struct rx_ring *rxr = adapter->rx_rings;
4114 	int q;
4115 
4116 	for (q = 0; q < adapter->num_queues; q++, rxr++)
4117 		if (em_setup_receive_ring(rxr))
4118 			goto fail;
4119 
4120 	return (0);
4121 fail:
4122 	/*
4123 	 * Free RX buffers allocated so far, we will only handle
4124 	 * the rings that completed, the failing case will have
4125 	 * cleaned up for itself. 'q' failed, so its the terminus.
4126 	 */
4127 	for (int i = 0; i < q; ++i) {
4128 		rxr = &adapter->rx_rings[i];
4129 		for (int n = 0; n < adapter->num_rx_desc; n++) {
4130 			struct em_buffer *rxbuf;
4131 			rxbuf = &rxr->rx_buffers[n];
4132 			if (rxbuf->m_head != NULL) {
4133 				bus_dmamap_sync(rxr->rxtag, rxbuf->map,
4134 			  	  BUS_DMASYNC_POSTREAD);
4135 				bus_dmamap_unload(rxr->rxtag, rxbuf->map);
4136 				m_freem(rxbuf->m_head);
4137 				rxbuf->m_head = NULL;
4138 			}
4139 		}
4140 		rxr->next_to_check = 0;
4141 		rxr->next_to_refresh = 0;
4142 	}
4143 
4144 	return (ENOBUFS);
4145 }
4146 
4147 /*********************************************************************
4148  *
4149  *  Free all receive rings.
4150  *
4151  **********************************************************************/
4152 static void
4153 em_free_receive_structures(struct adapter *adapter)
4154 {
4155 	struct rx_ring *rxr = adapter->rx_rings;
4156 
4157 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4158 		em_free_receive_buffers(rxr);
4159 		/* Free the ring memory as well */
4160 		em_dma_free(adapter, &rxr->rxdma);
4161 		EM_RX_LOCK_DESTROY(rxr);
4162 	}
4163 
4164 	free(adapter->rx_rings, M_DEVBUF);
4165 }
4166 
4167 
4168 /*********************************************************************
4169  *
4170  *  Free receive ring data structures
4171  *
4172  **********************************************************************/
4173 static void
4174 em_free_receive_buffers(struct rx_ring *rxr)
4175 {
4176 	struct adapter		*adapter = rxr->adapter;
4177 	struct em_buffer	*rxbuf = NULL;
4178 
4179 	INIT_DEBUGOUT("free_receive_buffers: begin");
4180 
4181 	if (rxr->rx_buffers != NULL) {
4182 		for (int i = 0; i < adapter->num_rx_desc; i++) {
4183 			rxbuf = &rxr->rx_buffers[i];
4184 			if (rxbuf->map != NULL) {
4185 				bus_dmamap_sync(rxr->rxtag, rxbuf->map,
4186 				    BUS_DMASYNC_POSTREAD);
4187 				bus_dmamap_unload(rxr->rxtag, rxbuf->map);
4188 				bus_dmamap_destroy(rxr->rxtag, rxbuf->map);
4189 			}
4190 			if (rxbuf->m_head != NULL) {
4191 				m_freem(rxbuf->m_head);
4192 				rxbuf->m_head = NULL;
4193 			}
4194 		}
4195 		free(rxr->rx_buffers, M_DEVBUF);
4196 		rxr->rx_buffers = NULL;
4197 		rxr->next_to_check = 0;
4198 		rxr->next_to_refresh = 0;
4199 	}
4200 
4201 	if (rxr->rxtag != NULL) {
4202 		bus_dma_tag_destroy(rxr->rxtag);
4203 		rxr->rxtag = NULL;
4204 	}
4205 
4206 	return;
4207 }
4208 
4209 
4210 /*********************************************************************
4211  *
4212  *  Enable receive unit.
4213  *
4214  **********************************************************************/
4215 #define MAX_INTS_PER_SEC	8000
4216 #define DEFAULT_ITR	     1000000000/(MAX_INTS_PER_SEC * 256)
4217 
4218 static void
4219 em_initialize_receive_unit(struct adapter *adapter)
4220 {
4221 	struct rx_ring	*rxr = adapter->rx_rings;
4222 	struct ifnet	*ifp = adapter->ifp;
4223 	struct e1000_hw	*hw = &adapter->hw;
4224 	u64	bus_addr;
4225 	u32	rctl, rxcsum;
4226 
4227 	INIT_DEBUGOUT("em_initialize_receive_units: begin");
4228 
4229 	/*
4230 	 * Make sure receives are disabled while setting
4231 	 * up the descriptor ring
4232 	 */
4233 	rctl = E1000_READ_REG(hw, E1000_RCTL);
4234 	/* Do not disable if ever enabled on this hardware */
4235 	if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583))
4236 		E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
4237 
4238 	E1000_WRITE_REG(&adapter->hw, E1000_RADV,
4239 	    adapter->rx_abs_int_delay.value);
4240 	/*
4241 	 * Set the interrupt throttling rate. Value is calculated
4242 	 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
4243 	 */
4244 	E1000_WRITE_REG(hw, E1000_ITR, DEFAULT_ITR);
4245 
4246 	/*
4247 	** When using MSIX interrupts we need to throttle
4248 	** using the EITR register (82574 only)
4249 	*/
4250 	if (hw->mac.type == e1000_82574) {
4251 		for (int i = 0; i < 4; i++)
4252 			E1000_WRITE_REG(hw, E1000_EITR_82574(i),
4253 			    DEFAULT_ITR);
4254 		/* Disable accelerated acknowledge */
4255 		E1000_WRITE_REG(hw, E1000_RFCTL, E1000_RFCTL_ACK_DIS);
4256 	}
4257 
4258 	if (ifp->if_capenable & IFCAP_RXCSUM) {
4259 		rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
4260 		rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
4261 		E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
4262 	}
4263 
4264 	/*
4265 	** XXX TEMPORARY WORKAROUND: on some systems with 82573
4266 	** long latencies are observed, like Lenovo X60. This
4267 	** change eliminates the problem, but since having positive
4268 	** values in RDTR is a known source of problems on other
4269 	** platforms another solution is being sought.
4270 	*/
4271 	if (hw->mac.type == e1000_82573)
4272 		E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
4273 
4274 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4275 		/* Setup the Base and Length of the Rx Descriptor Ring */
4276 		bus_addr = rxr->rxdma.dma_paddr;
4277 		E1000_WRITE_REG(hw, E1000_RDLEN(i),
4278 		    adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
4279 		E1000_WRITE_REG(hw, E1000_RDBAH(i), (u32)(bus_addr >> 32));
4280 		E1000_WRITE_REG(hw, E1000_RDBAL(i), (u32)bus_addr);
4281 		/* Setup the Head and Tail Descriptor Pointers */
4282 		E1000_WRITE_REG(hw, E1000_RDH(i), 0);
4283 #ifdef DEV_NETMAP
4284 		/*
4285 		 * an init() while a netmap client is active must
4286 		 * preserve the rx buffers passed to userspace.
4287 		 * In this driver it means we adjust RDT to
4288 		 * something different from na->num_rx_desc - 1.
4289 		 */
4290 		if (ifp->if_capenable & IFCAP_NETMAP) {
4291 			struct netmap_adapter *na = NA(adapter->ifp);
4292 			struct netmap_kring *kring = &na->rx_rings[i];
4293 			int t = na->num_rx_desc - 1 - kring->nr_hwavail;
4294 
4295 			E1000_WRITE_REG(hw, E1000_RDT(i), t);
4296 		} else
4297 #endif /* DEV_NETMAP */
4298 		E1000_WRITE_REG(hw, E1000_RDT(i), adapter->num_rx_desc - 1);
4299 	}
4300 
4301 	/* Set PTHRESH for improved jumbo performance */
4302 	if (((adapter->hw.mac.type == e1000_ich9lan) ||
4303 	    (adapter->hw.mac.type == e1000_pch2lan) ||
4304 	    (adapter->hw.mac.type == e1000_ich10lan)) &&
4305 	    (ifp->if_mtu > ETHERMTU)) {
4306 		u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
4307 		E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
4308 	}
4309 
4310 	if (adapter->hw.mac.type == e1000_pch2lan) {
4311 		if (ifp->if_mtu > ETHERMTU)
4312 			e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
4313 		else
4314 			e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
4315 	}
4316 
4317 	/* Setup the Receive Control Register */
4318 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4319 	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
4320 	    E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
4321 	    (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4322 
4323         /* Strip the CRC */
4324         rctl |= E1000_RCTL_SECRC;
4325 
4326         /* Make sure VLAN Filters are off */
4327         rctl &= ~E1000_RCTL_VFE;
4328 	rctl &= ~E1000_RCTL_SBP;
4329 
4330 	if (adapter->rx_mbuf_sz == MCLBYTES)
4331 		rctl |= E1000_RCTL_SZ_2048;
4332 	else if (adapter->rx_mbuf_sz == MJUMPAGESIZE)
4333 		rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
4334 	else if (adapter->rx_mbuf_sz > MJUMPAGESIZE)
4335 		rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
4336 
4337 	if (ifp->if_mtu > ETHERMTU)
4338 		rctl |= E1000_RCTL_LPE;
4339 	else
4340 		rctl &= ~E1000_RCTL_LPE;
4341 
4342 	/* Write out the settings */
4343 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
4344 
4345 	return;
4346 }
4347 
4348 
4349 /*********************************************************************
4350  *
4351  *  This routine executes in interrupt context. It replenishes
4352  *  the mbufs in the descriptor and sends data which has been
4353  *  dma'ed into host memory to upper layer.
4354  *
4355  *  We loop at most count times if count is > 0, or until done if
4356  *  count < 0.
4357  *
4358  *  For polling we also now return the number of cleaned packets
4359  *********************************************************************/
4360 static bool
4361 em_rxeof(struct rx_ring *rxr, int count, int *done)
4362 {
4363 	struct adapter		*adapter = rxr->adapter;
4364 	struct ifnet		*ifp = adapter->ifp;
4365 	struct mbuf		*mp, *sendmp;
4366 	u8			status = 0;
4367 	u16 			len;
4368 	int			i, processed, rxdone = 0;
4369 	bool			eop;
4370 	struct e1000_rx_desc	*cur;
4371 
4372 	EM_RX_LOCK(rxr);
4373 
4374 #ifdef DEV_NETMAP
4375 	if (ifp->if_capenable & IFCAP_NETMAP) {
4376 		struct netmap_adapter *na = NA(ifp);
4377 
4378 		selwakeuppri(&na->rx_rings[rxr->me].si, PI_NET);
4379 		EM_RX_UNLOCK(rxr);
4380 		EM_CORE_LOCK(adapter);
4381 		selwakeuppri(&na->rx_rings[na->num_queues + 1].si, PI_NET);
4382 		EM_CORE_UNLOCK(adapter);
4383 		return (0);
4384 	}
4385 #endif /* DEV_NETMAP */
4386 
4387 	for (i = rxr->next_to_check, processed = 0; count != 0;) {
4388 
4389 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
4390 			break;
4391 
4392 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4393 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4394 
4395 		cur = &rxr->rx_base[i];
4396 		status = cur->status;
4397 		mp = sendmp = NULL;
4398 
4399 		if ((status & E1000_RXD_STAT_DD) == 0)
4400 			break;
4401 
4402 		len = le16toh(cur->length);
4403 		eop = (status & E1000_RXD_STAT_EOP) != 0;
4404 
4405 		if ((cur->errors & E1000_RXD_ERR_FRAME_ERR_MASK) ||
4406 		    (rxr->discard == TRUE)) {
4407 			ifp->if_ierrors++;
4408 			++rxr->rx_discarded;
4409 			if (!eop) /* Catch subsequent segs */
4410 				rxr->discard = TRUE;
4411 			else
4412 				rxr->discard = FALSE;
4413 			em_rx_discard(rxr, i);
4414 			goto next_desc;
4415 		}
4416 
4417 		/* Assign correct length to the current fragment */
4418 		mp = rxr->rx_buffers[i].m_head;
4419 		mp->m_len = len;
4420 
4421 		/* Trigger for refresh */
4422 		rxr->rx_buffers[i].m_head = NULL;
4423 
4424 		/* First segment? */
4425 		if (rxr->fmp == NULL) {
4426 			mp->m_pkthdr.len = len;
4427 			rxr->fmp = rxr->lmp = mp;
4428 		} else {
4429 			/* Chain mbuf's together */
4430 			mp->m_flags &= ~M_PKTHDR;
4431 			rxr->lmp->m_next = mp;
4432 			rxr->lmp = mp;
4433 			rxr->fmp->m_pkthdr.len += len;
4434 		}
4435 
4436 		if (eop) {
4437 			--count;
4438 			sendmp = rxr->fmp;
4439 			sendmp->m_pkthdr.rcvif = ifp;
4440 			ifp->if_ipackets++;
4441 			em_receive_checksum(cur, sendmp);
4442 #ifndef __NO_STRICT_ALIGNMENT
4443 			if (adapter->max_frame_size >
4444 			    (MCLBYTES - ETHER_ALIGN) &&
4445 			    em_fixup_rx(rxr) != 0)
4446 				goto skip;
4447 #endif
4448 			if (status & E1000_RXD_STAT_VP) {
4449 				sendmp->m_pkthdr.ether_vtag =
4450 				    le16toh(cur->special);
4451 				sendmp->m_flags |= M_VLANTAG;
4452 			}
4453 #ifndef __NO_STRICT_ALIGNMENT
4454 skip:
4455 #endif
4456 			rxr->fmp = rxr->lmp = NULL;
4457 		}
4458 next_desc:
4459 		/* Zero out the receive descriptors status. */
4460 		cur->status = 0;
4461 		++rxdone;	/* cumulative for POLL */
4462 		++processed;
4463 
4464 		/* Advance our pointers to the next descriptor. */
4465 		if (++i == adapter->num_rx_desc)
4466 			i = 0;
4467 
4468 		/* Send to the stack */
4469 		if (sendmp != NULL) {
4470 			rxr->next_to_check = i;
4471 			EM_RX_UNLOCK(rxr);
4472 			(*ifp->if_input)(ifp, sendmp);
4473 			EM_RX_LOCK(rxr);
4474 			i = rxr->next_to_check;
4475 		}
4476 
4477 		/* Only refresh mbufs every 8 descriptors */
4478 		if (processed == 8) {
4479 			em_refresh_mbufs(rxr, i);
4480 			processed = 0;
4481 		}
4482 	}
4483 
4484 	/* Catch any remaining refresh work */
4485 	if (e1000_rx_unrefreshed(rxr))
4486 		em_refresh_mbufs(rxr, i);
4487 
4488 	rxr->next_to_check = i;
4489 	if (done != NULL)
4490 		*done = rxdone;
4491 	EM_RX_UNLOCK(rxr);
4492 
4493 	return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
4494 }
4495 
4496 static __inline void
4497 em_rx_discard(struct rx_ring *rxr, int i)
4498 {
4499 	struct em_buffer	*rbuf;
4500 
4501 	rbuf = &rxr->rx_buffers[i];
4502 	/* Free any previous pieces */
4503 	if (rxr->fmp != NULL) {
4504 		rxr->fmp->m_flags |= M_PKTHDR;
4505 		m_freem(rxr->fmp);
4506 		rxr->fmp = NULL;
4507 		rxr->lmp = NULL;
4508 	}
4509 	/*
4510 	** Free buffer and allow em_refresh_mbufs()
4511 	** to clean up and recharge buffer.
4512 	*/
4513 	if (rbuf->m_head) {
4514 		m_free(rbuf->m_head);
4515 		rbuf->m_head = NULL;
4516 	}
4517 	return;
4518 }
4519 
4520 #ifndef __NO_STRICT_ALIGNMENT
4521 /*
4522  * When jumbo frames are enabled we should realign entire payload on
4523  * architecures with strict alignment. This is serious design mistake of 8254x
4524  * as it nullifies DMA operations. 8254x just allows RX buffer size to be
4525  * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
4526  * payload. On architecures without strict alignment restrictions 8254x still
4527  * performs unaligned memory access which would reduce the performance too.
4528  * To avoid copying over an entire frame to align, we allocate a new mbuf and
4529  * copy ethernet header to the new mbuf. The new mbuf is prepended into the
4530  * existing mbuf chain.
4531  *
4532  * Be aware, best performance of the 8254x is achived only when jumbo frame is
4533  * not used at all on architectures with strict alignment.
4534  */
4535 static int
4536 em_fixup_rx(struct rx_ring *rxr)
4537 {
4538 	struct adapter *adapter = rxr->adapter;
4539 	struct mbuf *m, *n;
4540 	int error;
4541 
4542 	error = 0;
4543 	m = rxr->fmp;
4544 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
4545 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
4546 		m->m_data += ETHER_HDR_LEN;
4547 	} else {
4548 		MGETHDR(n, M_DONTWAIT, MT_DATA);
4549 		if (n != NULL) {
4550 			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
4551 			m->m_data += ETHER_HDR_LEN;
4552 			m->m_len -= ETHER_HDR_LEN;
4553 			n->m_len = ETHER_HDR_LEN;
4554 			M_MOVE_PKTHDR(n, m);
4555 			n->m_next = m;
4556 			rxr->fmp = n;
4557 		} else {
4558 			adapter->dropped_pkts++;
4559 			m_freem(rxr->fmp);
4560 			rxr->fmp = NULL;
4561 			error = ENOMEM;
4562 		}
4563 	}
4564 
4565 	return (error);
4566 }
4567 #endif
4568 
4569 /*********************************************************************
4570  *
4571  *  Verify that the hardware indicated that the checksum is valid.
4572  *  Inform the stack about the status of checksum so that stack
4573  *  doesn't spend time verifying the checksum.
4574  *
4575  *********************************************************************/
4576 static void
4577 em_receive_checksum(struct e1000_rx_desc *rx_desc, struct mbuf *mp)
4578 {
4579 	/* Ignore Checksum bit is set */
4580 	if (rx_desc->status & E1000_RXD_STAT_IXSM) {
4581 		mp->m_pkthdr.csum_flags = 0;
4582 		return;
4583 	}
4584 
4585 	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
4586 		/* Did it pass? */
4587 		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
4588 			/* IP Checksum Good */
4589 			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4590 			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4591 
4592 		} else {
4593 			mp->m_pkthdr.csum_flags = 0;
4594 		}
4595 	}
4596 
4597 	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
4598 		/* Did it pass? */
4599 		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
4600 			mp->m_pkthdr.csum_flags |=
4601 			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4602 			mp->m_pkthdr.csum_data = htons(0xffff);
4603 		}
4604 	}
4605 }
4606 
4607 /*
4608  * This routine is run via an vlan
4609  * config EVENT
4610  */
4611 static void
4612 em_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4613 {
4614 	struct adapter	*adapter = ifp->if_softc;
4615 	u32		index, bit;
4616 
4617 	if (ifp->if_softc !=  arg)   /* Not our event */
4618 		return;
4619 
4620 	if ((vtag == 0) || (vtag > 4095))       /* Invalid ID */
4621                 return;
4622 
4623 	EM_CORE_LOCK(adapter);
4624 	index = (vtag >> 5) & 0x7F;
4625 	bit = vtag & 0x1F;
4626 	adapter->shadow_vfta[index] |= (1 << bit);
4627 	++adapter->num_vlans;
4628 	/* Re-init to load the changes */
4629 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
4630 		em_init_locked(adapter);
4631 	EM_CORE_UNLOCK(adapter);
4632 }
4633 
4634 /*
4635  * This routine is run via an vlan
4636  * unconfig EVENT
4637  */
4638 static void
4639 em_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4640 {
4641 	struct adapter	*adapter = ifp->if_softc;
4642 	u32		index, bit;
4643 
4644 	if (ifp->if_softc !=  arg)
4645 		return;
4646 
4647 	if ((vtag == 0) || (vtag > 4095))       /* Invalid */
4648                 return;
4649 
4650 	EM_CORE_LOCK(adapter);
4651 	index = (vtag >> 5) & 0x7F;
4652 	bit = vtag & 0x1F;
4653 	adapter->shadow_vfta[index] &= ~(1 << bit);
4654 	--adapter->num_vlans;
4655 	/* Re-init to load the changes */
4656 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
4657 		em_init_locked(adapter);
4658 	EM_CORE_UNLOCK(adapter);
4659 }
4660 
4661 static void
4662 em_setup_vlan_hw_support(struct adapter *adapter)
4663 {
4664 	struct e1000_hw *hw = &adapter->hw;
4665 	u32             reg;
4666 
4667 	/*
4668 	** We get here thru init_locked, meaning
4669 	** a soft reset, this has already cleared
4670 	** the VFTA and other state, so if there
4671 	** have been no vlan's registered do nothing.
4672 	*/
4673 	if (adapter->num_vlans == 0)
4674                 return;
4675 
4676 	/*
4677 	** A soft reset zero's out the VFTA, so
4678 	** we need to repopulate it now.
4679 	*/
4680 	for (int i = 0; i < EM_VFTA_SIZE; i++)
4681                 if (adapter->shadow_vfta[i] != 0)
4682 			E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
4683                             i, adapter->shadow_vfta[i]);
4684 
4685 	reg = E1000_READ_REG(hw, E1000_CTRL);
4686 	reg |= E1000_CTRL_VME;
4687 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
4688 
4689 	/* Enable the Filter Table */
4690 	reg = E1000_READ_REG(hw, E1000_RCTL);
4691 	reg &= ~E1000_RCTL_CFIEN;
4692 	reg |= E1000_RCTL_VFE;
4693 	E1000_WRITE_REG(hw, E1000_RCTL, reg);
4694 }
4695 
4696 static void
4697 em_enable_intr(struct adapter *adapter)
4698 {
4699 	struct e1000_hw *hw = &adapter->hw;
4700 	u32 ims_mask = IMS_ENABLE_MASK;
4701 
4702 	if (hw->mac.type == e1000_82574) {
4703 		E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
4704 		ims_mask |= EM_MSIX_MASK;
4705 	}
4706 	E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
4707 }
4708 
4709 static void
4710 em_disable_intr(struct adapter *adapter)
4711 {
4712 	struct e1000_hw *hw = &adapter->hw;
4713 
4714 	if (hw->mac.type == e1000_82574)
4715 		E1000_WRITE_REG(hw, EM_EIAC, 0);
4716 	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
4717 }
4718 
4719 /*
4720  * Bit of a misnomer, what this really means is
4721  * to enable OS management of the system... aka
4722  * to disable special hardware management features
4723  */
4724 static void
4725 em_init_manageability(struct adapter *adapter)
4726 {
4727 	/* A shared code workaround */
4728 #define E1000_82542_MANC2H E1000_MANC2H
4729 	if (adapter->has_manage) {
4730 		int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
4731 		int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4732 
4733 		/* disable hardware interception of ARP */
4734 		manc &= ~(E1000_MANC_ARP_EN);
4735 
4736                 /* enable receiving management packets to the host */
4737 		manc |= E1000_MANC_EN_MNG2HOST;
4738 #define E1000_MNG2HOST_PORT_623 (1 << 5)
4739 #define E1000_MNG2HOST_PORT_664 (1 << 6)
4740 		manc2h |= E1000_MNG2HOST_PORT_623;
4741 		manc2h |= E1000_MNG2HOST_PORT_664;
4742 		E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
4743 		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4744 	}
4745 }
4746 
4747 /*
4748  * Give control back to hardware management
4749  * controller if there is one.
4750  */
4751 static void
4752 em_release_manageability(struct adapter *adapter)
4753 {
4754 	if (adapter->has_manage) {
4755 		int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4756 
4757 		/* re-enable hardware interception of ARP */
4758 		manc |= E1000_MANC_ARP_EN;
4759 		manc &= ~E1000_MANC_EN_MNG2HOST;
4760 
4761 		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4762 	}
4763 }
4764 
4765 /*
4766  * em_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
4767  * For ASF and Pass Through versions of f/w this means
4768  * that the driver is loaded. For AMT version type f/w
4769  * this means that the network i/f is open.
4770  */
4771 static void
4772 em_get_hw_control(struct adapter *adapter)
4773 {
4774 	u32 ctrl_ext, swsm;
4775 
4776 	if (adapter->hw.mac.type == e1000_82573) {
4777 		swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4778 		E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4779 		    swsm | E1000_SWSM_DRV_LOAD);
4780 		return;
4781 	}
4782 	/* else */
4783 	ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4784 	E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4785 	    ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4786 	return;
4787 }
4788 
4789 /*
4790  * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4791  * For ASF and Pass Through versions of f/w this means that
4792  * the driver is no longer loaded. For AMT versions of the
4793  * f/w this means that the network i/f is closed.
4794  */
4795 static void
4796 em_release_hw_control(struct adapter *adapter)
4797 {
4798 	u32 ctrl_ext, swsm;
4799 
4800 	if (!adapter->has_manage)
4801 		return;
4802 
4803 	if (adapter->hw.mac.type == e1000_82573) {
4804 		swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4805 		E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4806 		    swsm & ~E1000_SWSM_DRV_LOAD);
4807 		return;
4808 	}
4809 	/* else */
4810 	ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4811 	E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4812 	    ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4813 	return;
4814 }
4815 
4816 static int
4817 em_is_valid_ether_addr(u8 *addr)
4818 {
4819 	char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4820 
4821 	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4822 		return (FALSE);
4823 	}
4824 
4825 	return (TRUE);
4826 }
4827 
4828 /*
4829 ** Parse the interface capabilities with regard
4830 ** to both system management and wake-on-lan for
4831 ** later use.
4832 */
4833 static void
4834 em_get_wakeup(device_t dev)
4835 {
4836 	struct adapter	*adapter = device_get_softc(dev);
4837 	u16		eeprom_data = 0, device_id, apme_mask;
4838 
4839 	adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
4840 	apme_mask = EM_EEPROM_APME;
4841 
4842 	switch (adapter->hw.mac.type) {
4843 	case e1000_82573:
4844 	case e1000_82583:
4845 		adapter->has_amt = TRUE;
4846 		/* Falls thru */
4847 	case e1000_82571:
4848 	case e1000_82572:
4849 	case e1000_80003es2lan:
4850 		if (adapter->hw.bus.func == 1) {
4851 			e1000_read_nvm(&adapter->hw,
4852 			    NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
4853 			break;
4854 		} else
4855 			e1000_read_nvm(&adapter->hw,
4856 			    NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4857 		break;
4858 	case e1000_ich8lan:
4859 	case e1000_ich9lan:
4860 	case e1000_ich10lan:
4861 	case e1000_pchlan:
4862 	case e1000_pch2lan:
4863 		apme_mask = E1000_WUC_APME;
4864 		adapter->has_amt = TRUE;
4865 		eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC);
4866 		break;
4867 	default:
4868 		e1000_read_nvm(&adapter->hw,
4869 		    NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4870 		break;
4871 	}
4872 	if (eeprom_data & apme_mask)
4873 		adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
4874 	/*
4875          * We have the eeprom settings, now apply the special cases
4876          * where the eeprom may be wrong or the board won't support
4877          * wake on lan on a particular port
4878 	 */
4879 	device_id = pci_get_device(dev);
4880         switch (device_id) {
4881 	case E1000_DEV_ID_82571EB_FIBER:
4882 		/* Wake events only supported on port A for dual fiber
4883 		 * regardless of eeprom setting */
4884 		if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
4885 		    E1000_STATUS_FUNC_1)
4886 			adapter->wol = 0;
4887 		break;
4888 	case E1000_DEV_ID_82571EB_QUAD_COPPER:
4889 	case E1000_DEV_ID_82571EB_QUAD_FIBER:
4890 	case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
4891                 /* if quad port adapter, disable WoL on all but port A */
4892 		if (global_quad_port_a != 0)
4893 			adapter->wol = 0;
4894 		/* Reset for multiple quad port adapters */
4895 		if (++global_quad_port_a == 4)
4896 			global_quad_port_a = 0;
4897                 break;
4898 	}
4899 	return;
4900 }
4901 
4902 
4903 /*
4904  * Enable PCI Wake On Lan capability
4905  */
4906 static void
4907 em_enable_wakeup(device_t dev)
4908 {
4909 	struct adapter	*adapter = device_get_softc(dev);
4910 	struct ifnet	*ifp = adapter->ifp;
4911 	u32		pmc, ctrl, ctrl_ext, rctl;
4912 	u16     	status;
4913 
4914 	if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
4915 		return;
4916 
4917 	/* Advertise the wakeup capability */
4918 	ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4919 	ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
4920 	E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4921 	E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4922 
4923 	if ((adapter->hw.mac.type == e1000_ich8lan) ||
4924 	    (adapter->hw.mac.type == e1000_pchlan) ||
4925 	    (adapter->hw.mac.type == e1000_ich9lan) ||
4926 	    (adapter->hw.mac.type == e1000_ich10lan))
4927 		e1000_suspend_workarounds_ich8lan(&adapter->hw);
4928 
4929 	/* Keep the laser running on Fiber adapters */
4930 	if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4931 	    adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
4932 		ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4933 		ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4934 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
4935 	}
4936 
4937 	/*
4938 	** Determine type of Wakeup: note that wol
4939 	** is set with all bits on by default.
4940 	*/
4941 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
4942 		adapter->wol &= ~E1000_WUFC_MAG;
4943 
4944 	if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
4945 		adapter->wol &= ~E1000_WUFC_MC;
4946 	else {
4947 		rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4948 		rctl |= E1000_RCTL_MPE;
4949 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4950 	}
4951 
4952 	if ((adapter->hw.mac.type == e1000_pchlan) ||
4953 	    (adapter->hw.mac.type == e1000_pch2lan)) {
4954 		if (em_enable_phy_wakeup(adapter))
4955 			return;
4956 	} else {
4957 		E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4958 		E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4959 	}
4960 
4961 	if (adapter->hw.phy.type == e1000_phy_igp_3)
4962 		e1000_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
4963 
4964         /* Request PME */
4965         status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4966 	status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4967 	if (ifp->if_capenable & IFCAP_WOL)
4968 		status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4969         pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4970 
4971 	return;
4972 }
4973 
4974 /*
4975 ** WOL in the newer chipset interfaces (pchlan)
4976 ** require thing to be copied into the phy
4977 */
4978 static int
4979 em_enable_phy_wakeup(struct adapter *adapter)
4980 {
4981 	struct e1000_hw *hw = &adapter->hw;
4982 	u32 mreg, ret = 0;
4983 	u16 preg;
4984 
4985 	/* copy MAC RARs to PHY RARs */
4986 	e1000_copy_rx_addrs_to_phy_ich8lan(hw);
4987 
4988 	/* copy MAC MTA to PHY MTA */
4989 	for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4990 		mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4991 		e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4992 		e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4993 		    (u16)((mreg >> 16) & 0xFFFF));
4994 	}
4995 
4996 	/* configure PHY Rx Control register */
4997 	e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4998 	mreg = E1000_READ_REG(hw, E1000_RCTL);
4999 	if (mreg & E1000_RCTL_UPE)
5000 		preg |= BM_RCTL_UPE;
5001 	if (mreg & E1000_RCTL_MPE)
5002 		preg |= BM_RCTL_MPE;
5003 	preg &= ~(BM_RCTL_MO_MASK);
5004 	if (mreg & E1000_RCTL_MO_3)
5005 		preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5006 				<< BM_RCTL_MO_SHIFT);
5007 	if (mreg & E1000_RCTL_BAM)
5008 		preg |= BM_RCTL_BAM;
5009 	if (mreg & E1000_RCTL_PMCF)
5010 		preg |= BM_RCTL_PMCF;
5011 	mreg = E1000_READ_REG(hw, E1000_CTRL);
5012 	if (mreg & E1000_CTRL_RFCE)
5013 		preg |= BM_RCTL_RFCE;
5014 	e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
5015 
5016 	/* enable PHY wakeup in MAC register */
5017 	E1000_WRITE_REG(hw, E1000_WUC,
5018 	    E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
5019 	E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
5020 
5021 	/* configure and enable PHY wakeup in PHY registers */
5022 	e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
5023 	e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
5024 
5025 	/* activate PHY wakeup */
5026 	ret = hw->phy.ops.acquire(hw);
5027 	if (ret) {
5028 		printf("Could not acquire PHY\n");
5029 		return ret;
5030 	}
5031 	e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
5032 	                         (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
5033 	ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
5034 	if (ret) {
5035 		printf("Could not read PHY page 769\n");
5036 		goto out;
5037 	}
5038 	preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5039 	ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
5040 	if (ret)
5041 		printf("Could not set PHY Host Wakeup bit\n");
5042 out:
5043 	hw->phy.ops.release(hw);
5044 
5045 	return ret;
5046 }
5047 
5048 static void
5049 em_led_func(void *arg, int onoff)
5050 {
5051 	struct adapter	*adapter = arg;
5052 
5053 	EM_CORE_LOCK(adapter);
5054 	if (onoff) {
5055 		e1000_setup_led(&adapter->hw);
5056 		e1000_led_on(&adapter->hw);
5057 	} else {
5058 		e1000_led_off(&adapter->hw);
5059 		e1000_cleanup_led(&adapter->hw);
5060 	}
5061 	EM_CORE_UNLOCK(adapter);
5062 }
5063 
5064 /*
5065 ** Disable the L0S and L1 LINK states
5066 */
5067 static void
5068 em_disable_aspm(struct adapter *adapter)
5069 {
5070 	int		base, reg;
5071 	u16		link_cap,link_ctrl;
5072 	device_t	dev = adapter->dev;
5073 
5074 	switch (adapter->hw.mac.type) {
5075 		case e1000_82573:
5076 		case e1000_82574:
5077 		case e1000_82583:
5078 			break;
5079 		default:
5080 			return;
5081 	}
5082 	if (pci_find_cap(dev, PCIY_EXPRESS, &base) != 0)
5083 		return;
5084 	reg = base + PCIR_EXPRESS_LINK_CAP;
5085 	link_cap = pci_read_config(dev, reg, 2);
5086 	if ((link_cap & PCIM_LINK_CAP_ASPM) == 0)
5087 		return;
5088 	reg = base + PCIR_EXPRESS_LINK_CTL;
5089 	link_ctrl = pci_read_config(dev, reg, 2);
5090 	link_ctrl &= 0xFFFC; /* turn off bit 1 and 2 */
5091 	pci_write_config(dev, reg, link_ctrl, 2);
5092 	return;
5093 }
5094 
5095 /**********************************************************************
5096  *
5097  *  Update the board statistics counters.
5098  *
5099  **********************************************************************/
5100 static void
5101 em_update_stats_counters(struct adapter *adapter)
5102 {
5103 	struct ifnet   *ifp;
5104 
5105 	if(adapter->hw.phy.media_type == e1000_media_type_copper ||
5106 	   (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
5107 		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
5108 		adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
5109 	}
5110 	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
5111 	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
5112 	adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
5113 	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
5114 
5115 	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
5116 	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
5117 	adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
5118 	adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
5119 	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
5120 	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
5121 	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
5122 	/*
5123 	** For watchdog management we need to know if we have been
5124 	** paused during the last interval, so capture that here.
5125 	*/
5126 	adapter->pause_frames = E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
5127 	adapter->stats.xoffrxc += adapter->pause_frames;
5128 	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
5129 	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
5130 	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
5131 	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
5132 	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
5133 	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
5134 	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
5135 	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
5136 	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
5137 	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
5138 	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
5139 	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
5140 
5141 	/* For the 64-bit byte counters the low dword must be read first. */
5142 	/* Both registers clear on the read of the high dword */
5143 
5144 	adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
5145 	    ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
5146 	adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
5147 	    ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
5148 
5149 	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
5150 	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
5151 	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
5152 	adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
5153 	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
5154 
5155 	adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
5156 	adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
5157 
5158 	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
5159 	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
5160 	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
5161 	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
5162 	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
5163 	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
5164 	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
5165 	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
5166 	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
5167 	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
5168 
5169 	/* Interrupt Counts */
5170 
5171 	adapter->stats.iac += E1000_READ_REG(&adapter->hw, E1000_IAC);
5172 	adapter->stats.icrxptc += E1000_READ_REG(&adapter->hw, E1000_ICRXPTC);
5173 	adapter->stats.icrxatc += E1000_READ_REG(&adapter->hw, E1000_ICRXATC);
5174 	adapter->stats.ictxptc += E1000_READ_REG(&adapter->hw, E1000_ICTXPTC);
5175 	adapter->stats.ictxatc += E1000_READ_REG(&adapter->hw, E1000_ICTXATC);
5176 	adapter->stats.ictxqec += E1000_READ_REG(&adapter->hw, E1000_ICTXQEC);
5177 	adapter->stats.ictxqmtc += E1000_READ_REG(&adapter->hw, E1000_ICTXQMTC);
5178 	adapter->stats.icrxdmtc += E1000_READ_REG(&adapter->hw, E1000_ICRXDMTC);
5179 	adapter->stats.icrxoc += E1000_READ_REG(&adapter->hw, E1000_ICRXOC);
5180 
5181 	if (adapter->hw.mac.type >= e1000_82543) {
5182 		adapter->stats.algnerrc +=
5183 		E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
5184 		adapter->stats.rxerrc +=
5185 		E1000_READ_REG(&adapter->hw, E1000_RXERRC);
5186 		adapter->stats.tncrs +=
5187 		E1000_READ_REG(&adapter->hw, E1000_TNCRS);
5188 		adapter->stats.cexterr +=
5189 		E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
5190 		adapter->stats.tsctc +=
5191 		E1000_READ_REG(&adapter->hw, E1000_TSCTC);
5192 		adapter->stats.tsctfc +=
5193 		E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
5194 	}
5195 	ifp = adapter->ifp;
5196 
5197 	ifp->if_collisions = adapter->stats.colc;
5198 
5199 	/* Rx Errors */
5200 	ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
5201 	    adapter->stats.crcerrs + adapter->stats.algnerrc +
5202 	    adapter->stats.ruc + adapter->stats.roc +
5203 	    adapter->stats.mpc + adapter->stats.cexterr;
5204 
5205 	/* Tx Errors */
5206 	ifp->if_oerrors = adapter->stats.ecol +
5207 	    adapter->stats.latecol + adapter->watchdog_events;
5208 }
5209 
5210 /* Export a single 32-bit register via a read-only sysctl. */
5211 static int
5212 em_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
5213 {
5214 	struct adapter *adapter;
5215 	u_int val;
5216 
5217 	adapter = oidp->oid_arg1;
5218 	val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
5219 	return (sysctl_handle_int(oidp, &val, 0, req));
5220 }
5221 
5222 /*
5223  * Add sysctl variables, one per statistic, to the system.
5224  */
5225 static void
5226 em_add_hw_stats(struct adapter *adapter)
5227 {
5228 	device_t dev = adapter->dev;
5229 
5230 	struct tx_ring *txr = adapter->tx_rings;
5231 	struct rx_ring *rxr = adapter->rx_rings;
5232 
5233 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
5234 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
5235 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
5236 	struct e1000_hw_stats *stats = &adapter->stats;
5237 
5238 	struct sysctl_oid *stat_node, *queue_node, *int_node;
5239 	struct sysctl_oid_list *stat_list, *queue_list, *int_list;
5240 
5241 #define QUEUE_NAME_LEN 32
5242 	char namebuf[QUEUE_NAME_LEN];
5243 
5244 	/* Driver Statistics */
5245 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
5246 			CTLFLAG_RD, &adapter->link_irq,
5247 			"Link MSIX IRQ Handled");
5248 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail",
5249 			 CTLFLAG_RD, &adapter->mbuf_alloc_failed,
5250 			 "Std mbuf failed");
5251 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail",
5252 			 CTLFLAG_RD, &adapter->mbuf_cluster_failed,
5253 			 "Std mbuf cluster failed");
5254 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
5255 			CTLFLAG_RD, &adapter->dropped_pkts,
5256 			"Driver dropped packets");
5257 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
5258 			CTLFLAG_RD, &adapter->no_tx_dma_setup,
5259 			"Driver tx dma failure in xmit");
5260 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
5261 			CTLFLAG_RD, &adapter->rx_overruns,
5262 			"RX overruns");
5263 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
5264 			CTLFLAG_RD, &adapter->watchdog_events,
5265 			"Watchdog timeouts");
5266 
5267 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
5268 			CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
5269 			em_sysctl_reg_handler, "IU",
5270 			"Device Control Register");
5271 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
5272 			CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
5273 			em_sysctl_reg_handler, "IU",
5274 			"Receiver Control Register");
5275 	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
5276 			CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
5277 			"Flow Control High Watermark");
5278 	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
5279 			CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
5280 			"Flow Control Low Watermark");
5281 
5282 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
5283 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
5284 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
5285 					    CTLFLAG_RD, NULL, "Queue Name");
5286 		queue_list = SYSCTL_CHILDREN(queue_node);
5287 
5288 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
5289 				CTLTYPE_UINT | CTLFLAG_RD, adapter,
5290 				E1000_TDH(txr->me),
5291 				em_sysctl_reg_handler, "IU",
5292  				"Transmit Descriptor Head");
5293 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
5294 				CTLTYPE_UINT | CTLFLAG_RD, adapter,
5295 				E1000_TDT(txr->me),
5296 				em_sysctl_reg_handler, "IU",
5297  				"Transmit Descriptor Tail");
5298 		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq",
5299 				CTLFLAG_RD, &txr->tx_irq,
5300 				"Queue MSI-X Transmit Interrupts");
5301 		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_desc_avail",
5302 				CTLFLAG_RD, &txr->no_desc_avail,
5303 				"Queue No Descriptor Available");
5304 
5305 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
5306 				CTLTYPE_UINT | CTLFLAG_RD, adapter,
5307 				E1000_RDH(rxr->me),
5308 				em_sysctl_reg_handler, "IU",
5309 				"Receive Descriptor Head");
5310 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
5311 				CTLTYPE_UINT | CTLFLAG_RD, adapter,
5312 				E1000_RDT(rxr->me),
5313 				em_sysctl_reg_handler, "IU",
5314 				"Receive Descriptor Tail");
5315 		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq",
5316 				CTLFLAG_RD, &rxr->rx_irq,
5317 				"Queue MSI-X Receive Interrupts");
5318 	}
5319 
5320 	/* MAC stats get their own sub node */
5321 
5322 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
5323 				    CTLFLAG_RD, NULL, "Statistics");
5324 	stat_list = SYSCTL_CHILDREN(stat_node);
5325 
5326 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
5327 			CTLFLAG_RD, &stats->ecol,
5328 			"Excessive collisions");
5329 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
5330 			CTLFLAG_RD, &stats->scc,
5331 			"Single collisions");
5332 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
5333 			CTLFLAG_RD, &stats->mcc,
5334 			"Multiple collisions");
5335 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
5336 			CTLFLAG_RD, &stats->latecol,
5337 			"Late collisions");
5338 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
5339 			CTLFLAG_RD, &stats->colc,
5340 			"Collision Count");
5341 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
5342 			CTLFLAG_RD, &adapter->stats.symerrs,
5343 			"Symbol Errors");
5344 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
5345 			CTLFLAG_RD, &adapter->stats.sec,
5346 			"Sequence Errors");
5347 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
5348 			CTLFLAG_RD, &adapter->stats.dc,
5349 			"Defer Count");
5350 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
5351 			CTLFLAG_RD, &adapter->stats.mpc,
5352 			"Missed Packets");
5353 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
5354 			CTLFLAG_RD, &adapter->stats.rnbc,
5355 			"Receive No Buffers");
5356 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
5357 			CTLFLAG_RD, &adapter->stats.ruc,
5358 			"Receive Undersize");
5359 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
5360 			CTLFLAG_RD, &adapter->stats.rfc,
5361 			"Fragmented Packets Received ");
5362 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
5363 			CTLFLAG_RD, &adapter->stats.roc,
5364 			"Oversized Packets Received");
5365 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
5366 			CTLFLAG_RD, &adapter->stats.rjc,
5367 			"Recevied Jabber");
5368 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
5369 			CTLFLAG_RD, &adapter->stats.rxerrc,
5370 			"Receive Errors");
5371 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
5372 			CTLFLAG_RD, &adapter->stats.crcerrs,
5373 			"CRC errors");
5374 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
5375 			CTLFLAG_RD, &adapter->stats.algnerrc,
5376 			"Alignment Errors");
5377 	/* On 82575 these are collision counts */
5378 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
5379 			CTLFLAG_RD, &adapter->stats.cexterr,
5380 			"Collision/Carrier extension errors");
5381 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
5382 			CTLFLAG_RD, &adapter->stats.xonrxc,
5383 			"XON Received");
5384 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
5385 			CTLFLAG_RD, &adapter->stats.xontxc,
5386 			"XON Transmitted");
5387 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
5388 			CTLFLAG_RD, &adapter->stats.xoffrxc,
5389 			"XOFF Received");
5390 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
5391 			CTLFLAG_RD, &adapter->stats.xofftxc,
5392 			"XOFF Transmitted");
5393 
5394 	/* Packet Reception Stats */
5395 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
5396 			CTLFLAG_RD, &adapter->stats.tpr,
5397 			"Total Packets Received ");
5398 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
5399 			CTLFLAG_RD, &adapter->stats.gprc,
5400 			"Good Packets Received");
5401 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
5402 			CTLFLAG_RD, &adapter->stats.bprc,
5403 			"Broadcast Packets Received");
5404 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
5405 			CTLFLAG_RD, &adapter->stats.mprc,
5406 			"Multicast Packets Received");
5407 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
5408 			CTLFLAG_RD, &adapter->stats.prc64,
5409 			"64 byte frames received ");
5410 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
5411 			CTLFLAG_RD, &adapter->stats.prc127,
5412 			"65-127 byte frames received");
5413 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
5414 			CTLFLAG_RD, &adapter->stats.prc255,
5415 			"128-255 byte frames received");
5416 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
5417 			CTLFLAG_RD, &adapter->stats.prc511,
5418 			"256-511 byte frames received");
5419 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
5420 			CTLFLAG_RD, &adapter->stats.prc1023,
5421 			"512-1023 byte frames received");
5422 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
5423 			CTLFLAG_RD, &adapter->stats.prc1522,
5424 			"1023-1522 byte frames received");
5425  	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
5426  			CTLFLAG_RD, &adapter->stats.gorc,
5427  			"Good Octets Received");
5428 
5429 	/* Packet Transmission Stats */
5430  	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
5431  			CTLFLAG_RD, &adapter->stats.gotc,
5432  			"Good Octets Transmitted");
5433 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
5434 			CTLFLAG_RD, &adapter->stats.tpt,
5435 			"Total Packets Transmitted");
5436 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
5437 			CTLFLAG_RD, &adapter->stats.gptc,
5438 			"Good Packets Transmitted");
5439 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
5440 			CTLFLAG_RD, &adapter->stats.bptc,
5441 			"Broadcast Packets Transmitted");
5442 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
5443 			CTLFLAG_RD, &adapter->stats.mptc,
5444 			"Multicast Packets Transmitted");
5445 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
5446 			CTLFLAG_RD, &adapter->stats.ptc64,
5447 			"64 byte frames transmitted ");
5448 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
5449 			CTLFLAG_RD, &adapter->stats.ptc127,
5450 			"65-127 byte frames transmitted");
5451 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
5452 			CTLFLAG_RD, &adapter->stats.ptc255,
5453 			"128-255 byte frames transmitted");
5454 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
5455 			CTLFLAG_RD, &adapter->stats.ptc511,
5456 			"256-511 byte frames transmitted");
5457 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
5458 			CTLFLAG_RD, &adapter->stats.ptc1023,
5459 			"512-1023 byte frames transmitted");
5460 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
5461 			CTLFLAG_RD, &adapter->stats.ptc1522,
5462 			"1024-1522 byte frames transmitted");
5463 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
5464 			CTLFLAG_RD, &adapter->stats.tsctc,
5465 			"TSO Contexts Transmitted");
5466 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
5467 			CTLFLAG_RD, &adapter->stats.tsctfc,
5468 			"TSO Contexts Failed");
5469 
5470 
5471 	/* Interrupt Stats */
5472 
5473 	int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts",
5474 				    CTLFLAG_RD, NULL, "Interrupt Statistics");
5475 	int_list = SYSCTL_CHILDREN(int_node);
5476 
5477 	SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts",
5478 			CTLFLAG_RD, &adapter->stats.iac,
5479 			"Interrupt Assertion Count");
5480 
5481 	SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer",
5482 			CTLFLAG_RD, &adapter->stats.icrxptc,
5483 			"Interrupt Cause Rx Pkt Timer Expire Count");
5484 
5485 	SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_abs_timer",
5486 			CTLFLAG_RD, &adapter->stats.icrxatc,
5487 			"Interrupt Cause Rx Abs Timer Expire Count");
5488 
5489 	SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer",
5490 			CTLFLAG_RD, &adapter->stats.ictxptc,
5491 			"Interrupt Cause Tx Pkt Timer Expire Count");
5492 
5493 	SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_abs_timer",
5494 			CTLFLAG_RD, &adapter->stats.ictxatc,
5495 			"Interrupt Cause Tx Abs Timer Expire Count");
5496 
5497 	SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_empty",
5498 			CTLFLAG_RD, &adapter->stats.ictxqec,
5499 			"Interrupt Cause Tx Queue Empty Count");
5500 
5501 	SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh",
5502 			CTLFLAG_RD, &adapter->stats.ictxqmtc,
5503 			"Interrupt Cause Tx Queue Min Thresh Count");
5504 
5505 	SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh",
5506 			CTLFLAG_RD, &adapter->stats.icrxdmtc,
5507 			"Interrupt Cause Rx Desc Min Thresh Count");
5508 
5509 	SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_overrun",
5510 			CTLFLAG_RD, &adapter->stats.icrxoc,
5511 			"Interrupt Cause Receiver Overrun Count");
5512 }
5513 
5514 /**********************************************************************
5515  *
5516  *  This routine provides a way to dump out the adapter eeprom,
5517  *  often a useful debug/service tool. This only dumps the first
5518  *  32 words, stuff that matters is in that extent.
5519  *
5520  **********************************************************************/
5521 static int
5522 em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
5523 {
5524 	struct adapter *adapter = (struct adapter *)arg1;
5525 	int error;
5526 	int result;
5527 
5528 	result = -1;
5529 	error = sysctl_handle_int(oidp, &result, 0, req);
5530 
5531 	if (error || !req->newptr)
5532 		return (error);
5533 
5534 	/*
5535 	 * This value will cause a hex dump of the
5536 	 * first 32 16-bit words of the EEPROM to
5537 	 * the screen.
5538 	 */
5539 	if (result == 1)
5540 		em_print_nvm_info(adapter);
5541 
5542 	return (error);
5543 }
5544 
5545 static void
5546 em_print_nvm_info(struct adapter *adapter)
5547 {
5548 	u16	eeprom_data;
5549 	int	i, j, row = 0;
5550 
5551 	/* Its a bit crude, but it gets the job done */
5552 	printf("\nInterface EEPROM Dump:\n");
5553 	printf("Offset\n0x0000  ");
5554 	for (i = 0, j = 0; i < 32; i++, j++) {
5555 		if (j == 8) { /* Make the offset block */
5556 			j = 0; ++row;
5557 			printf("\n0x00%x0  ",row);
5558 		}
5559 		e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
5560 		printf("%04x ", eeprom_data);
5561 	}
5562 	printf("\n");
5563 }
5564 
5565 static int
5566 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
5567 {
5568 	struct em_int_delay_info *info;
5569 	struct adapter *adapter;
5570 	u32 regval;
5571 	int error, usecs, ticks;
5572 
5573 	info = (struct em_int_delay_info *)arg1;
5574 	usecs = info->value;
5575 	error = sysctl_handle_int(oidp, &usecs, 0, req);
5576 	if (error != 0 || req->newptr == NULL)
5577 		return (error);
5578 	if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
5579 		return (EINVAL);
5580 	info->value = usecs;
5581 	ticks = EM_USECS_TO_TICKS(usecs);
5582 
5583 	adapter = info->adapter;
5584 
5585 	EM_CORE_LOCK(adapter);
5586 	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
5587 	regval = (regval & ~0xffff) | (ticks & 0xffff);
5588 	/* Handle a few special cases. */
5589 	switch (info->offset) {
5590 	case E1000_RDTR:
5591 		break;
5592 	case E1000_TIDV:
5593 		if (ticks == 0) {
5594 			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
5595 			/* Don't write 0 into the TIDV register. */
5596 			regval++;
5597 		} else
5598 			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
5599 		break;
5600 	}
5601 	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
5602 	EM_CORE_UNLOCK(adapter);
5603 	return (0);
5604 }
5605 
5606 static void
5607 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
5608 	const char *description, struct em_int_delay_info *info,
5609 	int offset, int value)
5610 {
5611 	info->adapter = adapter;
5612 	info->offset = offset;
5613 	info->value = value;
5614 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
5615 	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5616 	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
5617 	    info, 0, em_sysctl_int_delay, "I", description);
5618 }
5619 
5620 static void
5621 em_set_sysctl_value(struct adapter *adapter, const char *name,
5622 	const char *description, int *limit, int value)
5623 {
5624 	*limit = value;
5625 	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
5626 	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5627 	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
5628 }
5629 
5630 
5631 /*
5632 ** Set flow control using sysctl:
5633 ** Flow control values:
5634 **      0 - off
5635 **      1 - rx pause
5636 **      2 - tx pause
5637 **      3 - full
5638 */
5639 static int
5640 em_set_flowcntl(SYSCTL_HANDLER_ARGS)
5641 {
5642         int		error;
5643 	static int	input = 3; /* default is full */
5644         struct adapter	*adapter = (struct adapter *) arg1;
5645 
5646         error = sysctl_handle_int(oidp, &input, 0, req);
5647 
5648         if ((error) || (req->newptr == NULL))
5649                 return (error);
5650 
5651 	if (input == adapter->fc) /* no change? */
5652 		return (error);
5653 
5654         switch (input) {
5655                 case e1000_fc_rx_pause:
5656                 case e1000_fc_tx_pause:
5657                 case e1000_fc_full:
5658                 case e1000_fc_none:
5659                         adapter->hw.fc.requested_mode = input;
5660 			adapter->fc = input;
5661                         break;
5662                 default:
5663 			/* Do nothing */
5664 			return (error);
5665         }
5666 
5667         adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode;
5668         e1000_force_mac_fc(&adapter->hw);
5669         return (error);
5670 }
5671 
5672 
5673 static int
5674 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5675 {
5676 	struct adapter *adapter;
5677 	int error;
5678 	int result;
5679 
5680 	result = -1;
5681 	error = sysctl_handle_int(oidp, &result, 0, req);
5682 
5683 	if (error || !req->newptr)
5684 		return (error);
5685 
5686 	if (result == 1) {
5687 		adapter = (struct adapter *)arg1;
5688 		em_print_debug_info(adapter);
5689         }
5690 
5691 	return (error);
5692 }
5693 
5694 /*
5695 ** This routine is meant to be fluid, add whatever is
5696 ** needed for debugging a problem.  -jfv
5697 */
5698 static void
5699 em_print_debug_info(struct adapter *adapter)
5700 {
5701 	device_t dev = adapter->dev;
5702 	struct tx_ring *txr = adapter->tx_rings;
5703 	struct rx_ring *rxr = adapter->rx_rings;
5704 
5705 	if (adapter->ifp->if_drv_flags & IFF_DRV_RUNNING)
5706 		printf("Interface is RUNNING ");
5707 	else
5708 		printf("Interface is NOT RUNNING\n");
5709 
5710 	if (adapter->ifp->if_drv_flags & IFF_DRV_OACTIVE)
5711 		printf("and INACTIVE\n");
5712 	else
5713 		printf("and ACTIVE\n");
5714 
5715 	device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
5716 	    E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
5717 	    E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
5718 	device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
5719 	    E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
5720 	    E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
5721 	device_printf(dev, "Tx Queue Status = %d\n", txr->queue_status);
5722 	device_printf(dev, "TX descriptors avail = %d\n",
5723 	    txr->tx_avail);
5724 	device_printf(dev, "Tx Descriptors avail failure = %ld\n",
5725 	    txr->no_desc_avail);
5726 	device_printf(dev, "RX discarded packets = %ld\n",
5727 	    rxr->rx_discarded);
5728 	device_printf(dev, "RX Next to Check = %d\n", rxr->next_to_check);
5729 	device_printf(dev, "RX Next to Refresh = %d\n", rxr->next_to_refresh);
5730 }
5731