xref: /freebsd/sys/dev/e1000/if_em.c (revision 409a390c3341fb4f162cd7de1fd595a323ebbfd8)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2009, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
37 #include "opt_inet.h"
38 #endif
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #if __FreeBSD_version >= 800000
43 #include <sys/buf_ring.h>
44 #endif
45 #include <sys/bus.h>
46 #include <sys/endian.h>
47 #include <sys/kernel.h>
48 #include <sys/kthread.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/module.h>
52 #include <sys/rman.h>
53 #include <sys/socket.h>
54 #include <sys/sockio.h>
55 #include <sys/sysctl.h>
56 #include <sys/taskqueue.h>
57 #if __FreeBSD_version >= 700029
58 #include <sys/eventhandler.h>
59 #endif
60 #include <machine/bus.h>
61 #include <machine/resource.h>
62 
63 #include <net/bpf.h>
64 #include <net/ethernet.h>
65 #include <net/if.h>
66 #include <net/if_arp.h>
67 #include <net/if_dl.h>
68 #include <net/if_media.h>
69 
70 #include <net/if_types.h>
71 #include <net/if_vlan_var.h>
72 
73 #include <netinet/in_systm.h>
74 #include <netinet/in.h>
75 #include <netinet/if_ether.h>
76 #include <netinet/ip.h>
77 #include <netinet/ip6.h>
78 #include <netinet/tcp.h>
79 #include <netinet/udp.h>
80 
81 #include <machine/in_cksum.h>
82 #include <dev/pci/pcivar.h>
83 #include <dev/pci/pcireg.h>
84 
85 #include "e1000_api.h"
86 #include "e1000_82571.h"
87 #include "if_em.h"
88 
89 /*********************************************************************
90  *  Set this to one to display debug statistics
91  *********************************************************************/
92 int	em_display_debug_stats = 0;
93 
94 /*********************************************************************
95  *  Driver version:
96  *********************************************************************/
97 char em_driver_version[] = "6.9.24";
98 
99 
100 /*********************************************************************
101  *  PCI Device ID Table
102  *
103  *  Used by probe to select devices to load on
104  *  Last field stores an index into e1000_strings
105  *  Last entry must be all 0s
106  *
107  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
108  *********************************************************************/
109 
110 static em_vendor_info_t em_vendor_info_array[] =
111 {
112 	/* Intel(R) PRO/1000 Network Connection */
113 	{ 0x8086, E1000_DEV_ID_82540EM,		PCI_ANY_ID, PCI_ANY_ID, 0},
114 	{ 0x8086, E1000_DEV_ID_82540EM_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
115 	{ 0x8086, E1000_DEV_ID_82540EP,		PCI_ANY_ID, PCI_ANY_ID, 0},
116 	{ 0x8086, E1000_DEV_ID_82540EP_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
117 	{ 0x8086, E1000_DEV_ID_82540EP_LP,	PCI_ANY_ID, PCI_ANY_ID, 0},
118 
119 	{ 0x8086, E1000_DEV_ID_82541EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
120 	{ 0x8086, E1000_DEV_ID_82541ER,		PCI_ANY_ID, PCI_ANY_ID, 0},
121 	{ 0x8086, E1000_DEV_ID_82541ER_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
122 	{ 0x8086, E1000_DEV_ID_82541EI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
123 	{ 0x8086, E1000_DEV_ID_82541GI,		PCI_ANY_ID, PCI_ANY_ID, 0},
124 	{ 0x8086, E1000_DEV_ID_82541GI_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
125 	{ 0x8086, E1000_DEV_ID_82541GI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
126 
127 	{ 0x8086, E1000_DEV_ID_82542,		PCI_ANY_ID, PCI_ANY_ID, 0},
128 
129 	{ 0x8086, E1000_DEV_ID_82543GC_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
130 	{ 0x8086, E1000_DEV_ID_82543GC_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
131 
132 	{ 0x8086, E1000_DEV_ID_82544EI_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
133 	{ 0x8086, E1000_DEV_ID_82544EI_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
134 	{ 0x8086, E1000_DEV_ID_82544GC_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
135 	{ 0x8086, E1000_DEV_ID_82544GC_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
136 
137 	{ 0x8086, E1000_DEV_ID_82545EM_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
138 	{ 0x8086, E1000_DEV_ID_82545EM_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
139 	{ 0x8086, E1000_DEV_ID_82545GM_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
140 	{ 0x8086, E1000_DEV_ID_82545GM_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
141 	{ 0x8086, E1000_DEV_ID_82545GM_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
142 
143 	{ 0x8086, E1000_DEV_ID_82546EB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
144 	{ 0x8086, E1000_DEV_ID_82546EB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
145 	{ 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
146 	{ 0x8086, E1000_DEV_ID_82546GB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
147 	{ 0x8086, E1000_DEV_ID_82546GB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
148 	{ 0x8086, E1000_DEV_ID_82546GB_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
149 	{ 0x8086, E1000_DEV_ID_82546GB_PCIE,	PCI_ANY_ID, PCI_ANY_ID, 0},
150 	{ 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
151 	{ 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
152 						PCI_ANY_ID, PCI_ANY_ID, 0},
153 
154 	{ 0x8086, E1000_DEV_ID_82547EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
155 	{ 0x8086, E1000_DEV_ID_82547EI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
156 	{ 0x8086, E1000_DEV_ID_82547GI,		PCI_ANY_ID, PCI_ANY_ID, 0},
157 
158 	{ 0x8086, E1000_DEV_ID_82571EB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
159 	{ 0x8086, E1000_DEV_ID_82571EB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
160 	{ 0x8086, E1000_DEV_ID_82571EB_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
161 	{ 0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL,
162 						PCI_ANY_ID, PCI_ANY_ID, 0},
163 	{ 0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD,
164 						PCI_ANY_ID, PCI_ANY_ID, 0},
165 	{ 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
166 						PCI_ANY_ID, PCI_ANY_ID, 0},
167 	{ 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP,
168 						PCI_ANY_ID, PCI_ANY_ID, 0},
169 	{ 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER,
170 						PCI_ANY_ID, PCI_ANY_ID, 0},
171 	{ 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER,
172 						PCI_ANY_ID, PCI_ANY_ID, 0},
173 	{ 0x8086, E1000_DEV_ID_82572EI_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
174 	{ 0x8086, E1000_DEV_ID_82572EI_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
175 	{ 0x8086, E1000_DEV_ID_82572EI_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
176 	{ 0x8086, E1000_DEV_ID_82572EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
177 
178 	{ 0x8086, E1000_DEV_ID_82573E,		PCI_ANY_ID, PCI_ANY_ID, 0},
179 	{ 0x8086, E1000_DEV_ID_82573E_IAMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
180 	{ 0x8086, E1000_DEV_ID_82573L,		PCI_ANY_ID, PCI_ANY_ID, 0},
181 	{ 0x8086, E1000_DEV_ID_82583V,		PCI_ANY_ID, PCI_ANY_ID, 0},
182 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
183 						PCI_ANY_ID, PCI_ANY_ID, 0},
184 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
185 						PCI_ANY_ID, PCI_ANY_ID, 0},
186 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
187 						PCI_ANY_ID, PCI_ANY_ID, 0},
188 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
189 						PCI_ANY_ID, PCI_ANY_ID, 0},
190 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
191 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
192 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_C,	PCI_ANY_ID, PCI_ANY_ID, 0},
193 	{ 0x8086, E1000_DEV_ID_ICH8_IFE,	PCI_ANY_ID, PCI_ANY_ID, 0},
194 	{ 0x8086, E1000_DEV_ID_ICH8_IFE_GT,	PCI_ANY_ID, PCI_ANY_ID, 0},
195 	{ 0x8086, E1000_DEV_ID_ICH8_IFE_G,	PCI_ANY_ID, PCI_ANY_ID, 0},
196 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_M,	PCI_ANY_ID, PCI_ANY_ID, 0},
197 	{ 0x8086, E1000_DEV_ID_ICH8_82567V_3,	PCI_ANY_ID, PCI_ANY_ID, 0},
198 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
199 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
200 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_C,	PCI_ANY_ID, PCI_ANY_ID, 0},
201 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_M,	PCI_ANY_ID, PCI_ANY_ID, 0},
202 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_M_V,	PCI_ANY_ID, PCI_ANY_ID, 0},
203 	{ 0x8086, E1000_DEV_ID_ICH9_IFE,	PCI_ANY_ID, PCI_ANY_ID, 0},
204 	{ 0x8086, E1000_DEV_ID_ICH9_IFE_GT,	PCI_ANY_ID, PCI_ANY_ID, 0},
205 	{ 0x8086, E1000_DEV_ID_ICH9_IFE_G,	PCI_ANY_ID, PCI_ANY_ID, 0},
206 	{ 0x8086, E1000_DEV_ID_ICH9_BM,		PCI_ANY_ID, PCI_ANY_ID, 0},
207 	{ 0x8086, E1000_DEV_ID_82574L,		PCI_ANY_ID, PCI_ANY_ID, 0},
208 	{ 0x8086, E1000_DEV_ID_82574LA,		PCI_ANY_ID, PCI_ANY_ID, 0},
209 	{ 0x8086, E1000_DEV_ID_ICH10_R_BM_LM,	PCI_ANY_ID, PCI_ANY_ID, 0},
210 	{ 0x8086, E1000_DEV_ID_ICH10_R_BM_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
211 	{ 0x8086, E1000_DEV_ID_ICH10_R_BM_V,	PCI_ANY_ID, PCI_ANY_ID, 0},
212 	{ 0x8086, E1000_DEV_ID_ICH10_D_BM_LM,	PCI_ANY_ID, PCI_ANY_ID, 0},
213 	{ 0x8086, E1000_DEV_ID_ICH10_D_BM_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
214 	{ 0x8086, E1000_DEV_ID_PCH_M_HV_LM,	PCI_ANY_ID, PCI_ANY_ID, 0},
215 	{ 0x8086, E1000_DEV_ID_PCH_M_HV_LC,	PCI_ANY_ID, PCI_ANY_ID, 0},
216 	{ 0x8086, E1000_DEV_ID_PCH_D_HV_DM,	PCI_ANY_ID, PCI_ANY_ID, 0},
217 	{ 0x8086, E1000_DEV_ID_PCH_D_HV_DC,	PCI_ANY_ID, PCI_ANY_ID, 0},
218 	/* required last entry */
219 	{ 0, 0, 0, 0, 0}
220 };
221 
222 /*********************************************************************
223  *  Table of branding strings for all supported NICs.
224  *********************************************************************/
225 
226 static char *em_strings[] = {
227 	"Intel(R) PRO/1000 Network Connection"
228 };
229 
230 /*********************************************************************
231  *  Function prototypes
232  *********************************************************************/
233 static int	em_probe(device_t);
234 static int	em_attach(device_t);
235 static int	em_detach(device_t);
236 static int	em_shutdown(device_t);
237 static int	em_suspend(device_t);
238 static int	em_resume(device_t);
239 static void	em_start(struct ifnet *);
240 static void	em_start_locked(struct ifnet *ifp);
241 #if __FreeBSD_version >= 800000
242 static int	em_mq_start(struct ifnet *, struct mbuf *);
243 static int	em_mq_start_locked(struct ifnet *, struct mbuf *);
244 static void	em_qflush(struct ifnet *);
245 #endif
246 static int	em_ioctl(struct ifnet *, u_long, caddr_t);
247 static void	em_init(void *);
248 static void	em_init_locked(struct adapter *);
249 static void	em_stop(void *);
250 static void	em_media_status(struct ifnet *, struct ifmediareq *);
251 static int	em_media_change(struct ifnet *);
252 static void	em_identify_hardware(struct adapter *);
253 static int	em_allocate_pci_resources(struct adapter *);
254 static int	em_allocate_legacy(struct adapter *adapter);
255 static int	em_allocate_msix(struct adapter *adapter);
256 static int	em_setup_msix(struct adapter *);
257 static void	em_free_pci_resources(struct adapter *);
258 static void	em_local_timer(void *);
259 static int	em_hardware_init(struct adapter *);
260 static void	em_setup_interface(device_t, struct adapter *);
261 static void	em_setup_transmit_structures(struct adapter *);
262 static void	em_initialize_transmit_unit(struct adapter *);
263 static int	em_setup_receive_structures(struct adapter *);
264 static void	em_initialize_receive_unit(struct adapter *);
265 static void	em_enable_intr(struct adapter *);
266 static void	em_disable_intr(struct adapter *);
267 static void	em_free_transmit_structures(struct adapter *);
268 static void	em_free_receive_structures(struct adapter *);
269 static void	em_update_stats_counters(struct adapter *);
270 static void	em_txeof(struct adapter *);
271 static void	em_tx_purge(struct adapter *);
272 static int	em_allocate_receive_structures(struct adapter *);
273 static int	em_allocate_transmit_structures(struct adapter *);
274 static int	em_rxeof(struct adapter *, int);
275 #ifndef __NO_STRICT_ALIGNMENT
276 static int	em_fixup_rx(struct adapter *);
277 #endif
278 static void	em_receive_checksum(struct adapter *, struct e1000_rx_desc *,
279 		    struct mbuf *);
280 static void	em_transmit_checksum_setup(struct adapter *, struct mbuf *,
281 		    u32 *, u32 *);
282 #if __FreeBSD_version >= 700000
283 static bool	em_tso_setup(struct adapter *, struct mbuf *,
284 		    u32 *, u32 *);
285 #endif /* FreeBSD_version >= 700000 */
286 static void	em_set_promisc(struct adapter *);
287 static void	em_disable_promisc(struct adapter *);
288 static void	em_set_multi(struct adapter *);
289 static void	em_print_hw_stats(struct adapter *);
290 static void	em_update_link_status(struct adapter *);
291 static int	em_get_buf(struct adapter *, int);
292 #if __FreeBSD_version >= 700029
293 static void	em_register_vlan(void *, struct ifnet *, u16);
294 static void	em_unregister_vlan(void *, struct ifnet *, u16);
295 static void	em_setup_vlan_hw_support(struct adapter *);
296 #endif
297 static int	em_xmit(struct adapter *, struct mbuf **);
298 static void	em_smartspeed(struct adapter *);
299 static int	em_82547_fifo_workaround(struct adapter *, int);
300 static void	em_82547_update_fifo_head(struct adapter *, int);
301 static int	em_82547_tx_fifo_reset(struct adapter *);
302 static void	em_82547_move_tail(void *);
303 static int	em_dma_malloc(struct adapter *, bus_size_t,
304 		    struct em_dma_alloc *, int);
305 static void	em_dma_free(struct adapter *, struct em_dma_alloc *);
306 static void	em_print_debug_info(struct adapter *);
307 static void	em_print_nvm_info(struct adapter *);
308 static int 	em_is_valid_ether_addr(u8 *);
309 static int	em_sysctl_stats(SYSCTL_HANDLER_ARGS);
310 static int	em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
311 static u32	em_fill_descriptors (bus_addr_t address, u32 length,
312 		    PDESC_ARRAY desc_array);
313 static int	em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
314 static void	em_add_int_delay_sysctl(struct adapter *, const char *,
315 		    const char *, struct em_int_delay_info *, int, int);
316 /* Management and WOL Support */
317 static void	em_init_manageability(struct adapter *);
318 static void	em_release_manageability(struct adapter *);
319 static void     em_get_hw_control(struct adapter *);
320 static void     em_release_hw_control(struct adapter *);
321 static void	em_get_wakeup(device_t);
322 static void     em_enable_wakeup(device_t);
323 static int	em_enable_phy_wakeup(struct adapter *);
324 
325 #ifdef EM_LEGACY_IRQ
326 static void	em_intr(void *);
327 #else /* FAST IRQ */
328 #if __FreeBSD_version < 700000
329 static void	em_irq_fast(void *);
330 #else
331 static int	em_irq_fast(void *);
332 #endif
333 
334 /* MSIX handlers */
335 static void	em_msix_tx(void *);
336 static void	em_msix_rx(void *);
337 static void	em_msix_link(void *);
338 static void	em_handle_rx(void *context, int pending);
339 static void	em_handle_tx(void *context, int pending);
340 
341 static void	em_handle_rxtx(void *context, int pending);
342 static void	em_handle_link(void *context, int pending);
343 static void	em_add_rx_process_limit(struct adapter *, const char *,
344 		    const char *, int *, int);
345 #endif /* ~EM_LEGACY_IRQ */
346 
347 #ifdef DEVICE_POLLING
348 static poll_handler_t em_poll;
349 #endif /* POLLING */
350 
351 /*********************************************************************
352  *  FreeBSD Device Interface Entry Points
353  *********************************************************************/
354 
355 static device_method_t em_methods[] = {
356 	/* Device interface */
357 	DEVMETHOD(device_probe, em_probe),
358 	DEVMETHOD(device_attach, em_attach),
359 	DEVMETHOD(device_detach, em_detach),
360 	DEVMETHOD(device_shutdown, em_shutdown),
361 	DEVMETHOD(device_suspend, em_suspend),
362 	DEVMETHOD(device_resume, em_resume),
363 	{0, 0}
364 };
365 
366 static driver_t em_driver = {
367 	"em", em_methods, sizeof(struct adapter),
368 };
369 
370 static devclass_t em_devclass;
371 DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
372 MODULE_DEPEND(em, pci, 1, 1, 1);
373 MODULE_DEPEND(em, ether, 1, 1, 1);
374 
375 /*********************************************************************
376  *  Tunable default values.
377  *********************************************************************/
378 
379 #define EM_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
380 #define EM_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
381 #define M_TSO_LEN			66
382 
383 /* Allow common code without TSO */
384 #ifndef CSUM_TSO
385 #define CSUM_TSO	0
386 #endif
387 
388 static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
389 static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
390 static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
391 static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
392 static int em_rxd = EM_DEFAULT_RXD;
393 static int em_txd = EM_DEFAULT_TXD;
394 static int em_smart_pwr_down = FALSE;
395 /* Controls whether promiscuous also shows bad packets */
396 static int em_debug_sbp = FALSE;
397 /* Local switch for MSI/MSIX */
398 static int em_enable_msi = TRUE;
399 
400 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
401 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
402 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
403 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
404 TUNABLE_INT("hw.em.rxd", &em_rxd);
405 TUNABLE_INT("hw.em.txd", &em_txd);
406 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
407 TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
408 TUNABLE_INT("hw.em.enable_msi", &em_enable_msi);
409 
410 #ifndef EM_LEGACY_IRQ
411 /* How many packets rxeof tries to clean at a time */
412 static int em_rx_process_limit = 100;
413 TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
414 #endif
415 
416 /* Flow control setting - default to FULL */
417 static int em_fc_setting = e1000_fc_full;
418 TUNABLE_INT("hw.em.fc_setting", &em_fc_setting);
419 
420 /*
421 ** Shadow VFTA table, this is needed because
422 ** the real vlan filter table gets cleared during
423 ** a soft reset and the driver needs to be able
424 ** to repopulate it.
425 */
426 static u32 em_shadow_vfta[EM_VFTA_SIZE];
427 
428 /* Global used in WOL setup with multiport cards */
429 static int global_quad_port_a = 0;
430 
431 /*********************************************************************
432  *  Device identification routine
433  *
434  *  em_probe determines if the driver should be loaded on
435  *  adapter based on PCI vendor/device id of the adapter.
436  *
437  *  return BUS_PROBE_DEFAULT on success, positive on failure
438  *********************************************************************/
439 
440 static int
441 em_probe(device_t dev)
442 {
443 	char		adapter_name[60];
444 	u16		pci_vendor_id = 0;
445 	u16		pci_device_id = 0;
446 	u16		pci_subvendor_id = 0;
447 	u16		pci_subdevice_id = 0;
448 	em_vendor_info_t *ent;
449 
450 	INIT_DEBUGOUT("em_probe: begin");
451 
452 	pci_vendor_id = pci_get_vendor(dev);
453 	if (pci_vendor_id != EM_VENDOR_ID)
454 		return (ENXIO);
455 
456 	pci_device_id = pci_get_device(dev);
457 	pci_subvendor_id = pci_get_subvendor(dev);
458 	pci_subdevice_id = pci_get_subdevice(dev);
459 
460 	ent = em_vendor_info_array;
461 	while (ent->vendor_id != 0) {
462 		if ((pci_vendor_id == ent->vendor_id) &&
463 		    (pci_device_id == ent->device_id) &&
464 
465 		    ((pci_subvendor_id == ent->subvendor_id) ||
466 		    (ent->subvendor_id == PCI_ANY_ID)) &&
467 
468 		    ((pci_subdevice_id == ent->subdevice_id) ||
469 		    (ent->subdevice_id == PCI_ANY_ID))) {
470 			sprintf(adapter_name, "%s %s",
471 				em_strings[ent->index],
472 				em_driver_version);
473 			device_set_desc_copy(dev, adapter_name);
474 			return (BUS_PROBE_DEFAULT);
475 		}
476 		ent++;
477 	}
478 
479 	return (ENXIO);
480 }
481 
482 /*********************************************************************
483  *  Device initialization routine
484  *
485  *  The attach entry point is called when the driver is being loaded.
486  *  This routine identifies the type of hardware, allocates all resources
487  *  and initializes the hardware.
488  *
489  *  return 0 on success, positive on failure
490  *********************************************************************/
491 
492 static int
493 em_attach(device_t dev)
494 {
495 	struct adapter	*adapter;
496 	int		tsize, rsize;
497 	int		error = 0;
498 
499 	INIT_DEBUGOUT("em_attach: begin");
500 
501 	adapter = device_get_softc(dev);
502 	adapter->dev = adapter->osdep.dev = dev;
503 	EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
504 	EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
505 	EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
506 
507 	/* SYSCTL stuff */
508 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
509 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
510 	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
511 	    em_sysctl_debug_info, "I", "Debug Information");
512 
513 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
514 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
515 	    OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
516 	    em_sysctl_stats, "I", "Statistics");
517 
518 	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
519 	callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
520 
521 	/* Determine hardware and mac info */
522 	em_identify_hardware(adapter);
523 
524 	/* Setup PCI resources */
525 	if (em_allocate_pci_resources(adapter)) {
526 		device_printf(dev, "Allocation of PCI resources failed\n");
527 		error = ENXIO;
528 		goto err_pci;
529 	}
530 
531 	/*
532 	** For ICH8 and family we need to
533 	** map the flash memory, and this
534 	** must happen after the MAC is
535 	** identified
536 	*/
537 	if ((adapter->hw.mac.type == e1000_ich8lan) ||
538 	    (adapter->hw.mac.type == e1000_pchlan) ||
539 	    (adapter->hw.mac.type == e1000_ich9lan) ||
540 	    (adapter->hw.mac.type == e1000_ich10lan)) {
541 		int rid = EM_BAR_TYPE_FLASH;
542 		adapter->flash = bus_alloc_resource_any(dev,
543 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
544 		if (adapter->flash == NULL) {
545 			device_printf(dev, "Mapping of Flash failed\n");
546 			error = ENXIO;
547 			goto err_pci;
548 		}
549 		/* This is used in the shared code */
550 		adapter->hw.flash_address = (u8 *)adapter->flash;
551 		adapter->osdep.flash_bus_space_tag =
552 		    rman_get_bustag(adapter->flash);
553 		adapter->osdep.flash_bus_space_handle =
554 		    rman_get_bushandle(adapter->flash);
555 	}
556 
557 	/* Do Shared Code initialization */
558 	if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
559 		device_printf(dev, "Setup of Shared code failed\n");
560 		error = ENXIO;
561 		goto err_pci;
562 	}
563 
564 	e1000_get_bus_info(&adapter->hw);
565 
566 	/* Set up some sysctls for the tunable interrupt delays */
567 	em_add_int_delay_sysctl(adapter, "rx_int_delay",
568 	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
569 	    E1000_REGISTER(&adapter->hw, E1000_RDTR), em_rx_int_delay_dflt);
570 	em_add_int_delay_sysctl(adapter, "tx_int_delay",
571 	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
572 	    E1000_REGISTER(&adapter->hw, E1000_TIDV), em_tx_int_delay_dflt);
573 	if (adapter->hw.mac.type >= e1000_82540) {
574 		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
575 		    "receive interrupt delay limit in usecs",
576 		    &adapter->rx_abs_int_delay,
577 		    E1000_REGISTER(&adapter->hw, E1000_RADV),
578 		    em_rx_abs_int_delay_dflt);
579 		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
580 		    "transmit interrupt delay limit in usecs",
581 		    &adapter->tx_abs_int_delay,
582 		    E1000_REGISTER(&adapter->hw, E1000_TADV),
583 		    em_tx_abs_int_delay_dflt);
584 	}
585 
586 #ifndef EM_LEGACY_IRQ
587 	/* Sysctls for limiting the amount of work done in the taskqueue */
588 	em_add_rx_process_limit(adapter, "rx_processing_limit",
589 	    "max number of rx packets to process", &adapter->rx_process_limit,
590 	    em_rx_process_limit);
591 #endif
592 
593 	/*
594 	 * Validate number of transmit and receive descriptors. It
595 	 * must not exceed hardware maximum, and must be multiple
596 	 * of E1000_DBA_ALIGN.
597 	 */
598 	if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
599 	    (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
600 	    (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
601 	    (em_txd < EM_MIN_TXD)) {
602 		device_printf(dev, "Using %d TX descriptors instead of %d!\n",
603 		    EM_DEFAULT_TXD, em_txd);
604 		adapter->num_tx_desc = EM_DEFAULT_TXD;
605 	} else
606 		adapter->num_tx_desc = em_txd;
607 	if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
608 	    (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
609 	    (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
610 	    (em_rxd < EM_MIN_RXD)) {
611 		device_printf(dev, "Using %d RX descriptors instead of %d!\n",
612 		    EM_DEFAULT_RXD, em_rxd);
613 		adapter->num_rx_desc = EM_DEFAULT_RXD;
614 	} else
615 		adapter->num_rx_desc = em_rxd;
616 
617 	adapter->hw.mac.autoneg = DO_AUTO_NEG;
618 	adapter->hw.phy.autoneg_wait_to_complete = FALSE;
619 	adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
620 	adapter->rx_buffer_len = 2048;
621 
622 	e1000_init_script_state_82541(&adapter->hw, TRUE);
623 	e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
624 
625 	/* Copper options */
626 	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
627 		adapter->hw.phy.mdix = AUTO_ALL_MODES;
628 		adapter->hw.phy.disable_polarity_correction = FALSE;
629 		adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
630 	}
631 
632 	/*
633 	 * Set the frame limits assuming
634 	 * standard ethernet sized frames.
635 	 */
636 	adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
637 	adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
638 
639 	/*
640 	 * This controls when hardware reports transmit completion
641 	 * status.
642 	 */
643 	adapter->hw.mac.report_tx_early = 1;
644 
645 	tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
646 	    EM_DBA_ALIGN);
647 
648 	/* Allocate Transmit Descriptor ring */
649 	if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
650 		device_printf(dev, "Unable to allocate tx_desc memory\n");
651 		error = ENOMEM;
652 		goto err_tx_desc;
653 	}
654 	adapter->tx_desc_base =
655 	    (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
656 
657 	rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
658 	    EM_DBA_ALIGN);
659 
660 	/* Allocate Receive Descriptor ring */
661 	if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
662 		device_printf(dev, "Unable to allocate rx_desc memory\n");
663 		error = ENOMEM;
664 		goto err_rx_desc;
665 	}
666 	adapter->rx_desc_base =
667 	    (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
668 
669 	/*
670 	** Start from a known state, this is
671 	** important in reading the nvm and
672 	** mac from that.
673 	*/
674 	e1000_reset_hw(&adapter->hw);
675 
676 	/* Make sure we have a good EEPROM before we read from it */
677 	if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
678 		/*
679 		** Some PCI-E parts fail the first check due to
680 		** the link being in sleep state, call it again,
681 		** if it fails a second time its a real issue.
682 		*/
683 		if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
684 			device_printf(dev,
685 			    "The EEPROM Checksum Is Not Valid\n");
686 			error = EIO;
687 			goto err_hw_init;
688 		}
689 	}
690 
691 	/* Copy the permanent MAC address out of the EEPROM */
692 	if (e1000_read_mac_addr(&adapter->hw) < 0) {
693 		device_printf(dev, "EEPROM read error while reading MAC"
694 		    " address\n");
695 		error = EIO;
696 		goto err_hw_init;
697 	}
698 
699 	if (!em_is_valid_ether_addr(adapter->hw.mac.addr)) {
700 		device_printf(dev, "Invalid MAC address\n");
701 		error = EIO;
702 		goto err_hw_init;
703 	}
704 
705 	/* Initialize the hardware */
706 	if (em_hardware_init(adapter)) {
707 		device_printf(dev, "Unable to initialize the hardware\n");
708 		error = EIO;
709 		goto err_hw_init;
710 	}
711 
712 	/* Allocate transmit descriptors and buffers */
713 	if (em_allocate_transmit_structures(adapter)) {
714 		device_printf(dev, "Could not setup transmit structures\n");
715 		error = ENOMEM;
716 		goto err_tx_struct;
717 	}
718 
719 	/* Allocate receive descriptors and buffers */
720 	if (em_allocate_receive_structures(adapter)) {
721 		device_printf(dev, "Could not setup receive structures\n");
722 		error = ENOMEM;
723 		goto err_rx_struct;
724 	}
725 
726 	/*
727 	**  Do interrupt configuration
728 	*/
729 	if (adapter->msi > 1) /* Do MSI/X */
730 		error = em_allocate_msix(adapter);
731 	else  /* MSI or Legacy */
732 		error = em_allocate_legacy(adapter);
733 	if (error)
734 		goto err_rx_struct;
735 
736 	/*
737 	 * Get Wake-on-Lan and Management info for later use
738 	 */
739 	em_get_wakeup(dev);
740 
741 	/* Setup OS specific network interface */
742 	em_setup_interface(dev, adapter);
743 
744 	/* Initialize statistics */
745 	em_update_stats_counters(adapter);
746 
747 	adapter->hw.mac.get_link_status = 1;
748 	em_update_link_status(adapter);
749 
750 	/* Indicate SOL/IDER usage */
751 	if (e1000_check_reset_block(&adapter->hw))
752 		device_printf(dev,
753 		    "PHY reset is blocked due to SOL/IDER session.\n");
754 
755 	/* Do we need workaround for 82544 PCI-X adapter? */
756 	if (adapter->hw.bus.type == e1000_bus_type_pcix &&
757 	    adapter->hw.mac.type == e1000_82544)
758 		adapter->pcix_82544 = TRUE;
759 	else
760 		adapter->pcix_82544 = FALSE;
761 
762 #if __FreeBSD_version >= 700029
763 	/* Register for VLAN events */
764 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
765 	    em_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
766 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
767 	    em_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
768 #endif
769 
770 	/* Non-AMT based hardware can now take control from firmware */
771 	if (adapter->has_manage && !adapter->has_amt)
772 		em_get_hw_control(adapter);
773 
774 	/* Tell the stack that the interface is not active */
775 	adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
776 
777 	INIT_DEBUGOUT("em_attach: end");
778 
779 	return (0);
780 
781 err_rx_struct:
782 	em_free_transmit_structures(adapter);
783 err_tx_struct:
784 err_hw_init:
785 	em_release_hw_control(adapter);
786 	em_dma_free(adapter, &adapter->rxdma);
787 err_rx_desc:
788 	em_dma_free(adapter, &adapter->txdma);
789 err_tx_desc:
790 err_pci:
791 	em_free_pci_resources(adapter);
792 	EM_TX_LOCK_DESTROY(adapter);
793 	EM_RX_LOCK_DESTROY(adapter);
794 	EM_CORE_LOCK_DESTROY(adapter);
795 
796 	return (error);
797 }
798 
799 /*********************************************************************
800  *  Device removal routine
801  *
802  *  The detach entry point is called when the driver is being removed.
803  *  This routine stops the adapter and deallocates all the resources
804  *  that were allocated for driver operation.
805  *
806  *  return 0 on success, positive on failure
807  *********************************************************************/
808 
809 static int
810 em_detach(device_t dev)
811 {
812 	struct adapter	*adapter = device_get_softc(dev);
813 	struct ifnet	*ifp = adapter->ifp;
814 
815 	INIT_DEBUGOUT("em_detach: begin");
816 
817 	/* Make sure VLANS are not using driver */
818 #if __FreeBSD_version >= 700000
819 	if (adapter->ifp->if_vlantrunk != NULL) {
820 #else
821 	if (adapter->ifp->if_nvlans != 0) {
822 #endif
823 		device_printf(dev,"Vlan in use, detach first\n");
824 		return (EBUSY);
825 	}
826 
827 #ifdef DEVICE_POLLING
828 	if (ifp->if_capenable & IFCAP_POLLING)
829 		ether_poll_deregister(ifp);
830 #endif
831 
832 	EM_CORE_LOCK(adapter);
833 	EM_TX_LOCK(adapter);
834 	adapter->in_detach = 1;
835 	em_stop(adapter);
836 	e1000_phy_hw_reset(&adapter->hw);
837 
838 	em_release_manageability(adapter);
839 
840 	EM_TX_UNLOCK(adapter);
841 	EM_CORE_UNLOCK(adapter);
842 
843 #if __FreeBSD_version >= 700029
844 	/* Unregister VLAN events */
845 	if (adapter->vlan_attach != NULL)
846 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
847 	if (adapter->vlan_detach != NULL)
848 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
849 #endif
850 
851 	ether_ifdetach(adapter->ifp);
852 	callout_drain(&adapter->timer);
853 	callout_drain(&adapter->tx_fifo_timer);
854 
855 	em_free_pci_resources(adapter);
856 	bus_generic_detach(dev);
857 	if_free(ifp);
858 
859 	em_free_transmit_structures(adapter);
860 	em_free_receive_structures(adapter);
861 
862 	/* Free Transmit Descriptor ring */
863 	if (adapter->tx_desc_base) {
864 		em_dma_free(adapter, &adapter->txdma);
865 		adapter->tx_desc_base = NULL;
866 	}
867 
868 	/* Free Receive Descriptor ring */
869 	if (adapter->rx_desc_base) {
870 		em_dma_free(adapter, &adapter->rxdma);
871 		adapter->rx_desc_base = NULL;
872 	}
873 
874 	em_release_hw_control(adapter);
875 	EM_TX_LOCK_DESTROY(adapter);
876 	EM_RX_LOCK_DESTROY(adapter);
877 	EM_CORE_LOCK_DESTROY(adapter);
878 
879 	return (0);
880 }
881 
882 /*********************************************************************
883  *
884  *  Shutdown entry point
885  *
886  **********************************************************************/
887 
888 static int
889 em_shutdown(device_t dev)
890 {
891 	return em_suspend(dev);
892 }
893 
894 /*
895  * Suspend/resume device methods.
896  */
897 static int
898 em_suspend(device_t dev)
899 {
900 	struct adapter *adapter = device_get_softc(dev);
901 
902 	EM_CORE_LOCK(adapter);
903 
904         em_release_manageability(adapter);
905 	em_release_hw_control(adapter);
906 	em_enable_wakeup(dev);
907 
908 	EM_CORE_UNLOCK(adapter);
909 
910 	return bus_generic_suspend(dev);
911 }
912 
913 static int
914 em_resume(device_t dev)
915 {
916 	struct adapter *adapter = device_get_softc(dev);
917 	struct ifnet *ifp = adapter->ifp;
918 
919 	EM_CORE_LOCK(adapter);
920 	em_init_locked(adapter);
921 	em_init_manageability(adapter);
922 	EM_CORE_UNLOCK(adapter);
923 	em_start(ifp);
924 
925 	return bus_generic_resume(dev);
926 }
927 
928 
929 /*********************************************************************
930  *  Transmit entry point
931  *
932  *  em_start is called by the stack to initiate a transmit.
933  *  The driver will remain in this routine as long as there are
934  *  packets to transmit and transmit resources are available.
935  *  In case resources are not available stack is notified and
936  *  the packet is requeued.
937  **********************************************************************/
938 
939 #if __FreeBSD_version >= 800000
940 static int
941 em_mq_start_locked(struct ifnet *ifp, struct mbuf *m)
942 {
943 	struct adapter	*adapter = ifp->if_softc;
944 	struct mbuf	*next;
945 	int error = E1000_SUCCESS;
946 
947 	EM_TX_LOCK_ASSERT(adapter);
948 	/* To allow being called from a tasklet */
949 	if (m == NULL)
950 		goto process;
951 
952 	if (((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
953 	    IFF_DRV_RUNNING)
954 	    || (!adapter->link_active)) {
955 		error = drbr_enqueue(ifp, adapter->br, m);
956 		return (error);
957 	} else if (drbr_empty(ifp, adapter->br) &&
958 	    (adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)) {
959 		if ((error = em_xmit(adapter, &m)) != 0) {
960 			if (m)
961 				error = drbr_enqueue(ifp, adapter->br, m);
962 			return (error);
963 		} else {
964 			/*
965 			 * We've bypassed the buf ring so we need to update
966 			 * ifp directly
967 			 */
968 			drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
969 			/*
970 			** Send a copy of the frame to the BPF
971 			** listener and set the watchdog on.
972 			*/
973 			ETHER_BPF_MTAP(ifp, m);
974 			adapter->watchdog_check = TRUE;
975 		}
976 	} else if ((error = drbr_enqueue(ifp, adapter->br, m)) != 0)
977 		return (error);
978 
979 process:
980 	if (drbr_empty(ifp, adapter->br))
981 		return(error);
982         /* Process the queue */
983         while (TRUE) {
984                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
985                         break;
986                 next = drbr_dequeue(ifp, adapter->br);
987                 if (next == NULL)
988                         break;
989                 if ((error = em_xmit(adapter, &next)) != 0) {
990 			if (next != NULL)
991 				error = drbr_enqueue(ifp, adapter->br, next);
992                         break;
993 		}
994 		drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
995                 ETHER_BPF_MTAP(ifp, next);
996                 /* Set the watchdog */
997 		adapter->watchdog_check = TRUE;
998         }
999 
1000         if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
1001                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1002 
1003 	return (error);
1004 }
1005 
1006 /*
1007 ** Multiqueue capable stack interface, this is not
1008 ** yet truely multiqueue, but that is coming...
1009 */
1010 static int
1011 em_mq_start(struct ifnet *ifp, struct mbuf *m)
1012 {
1013 
1014 	struct adapter *adapter = ifp->if_softc;
1015 	int error = 0;
1016 
1017 	if (EM_TX_TRYLOCK(adapter)) {
1018 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1019 			error = em_mq_start_locked(ifp, m);
1020 		EM_TX_UNLOCK(adapter);
1021 	} else
1022 		error = drbr_enqueue(ifp, adapter->br, m);
1023 
1024 	return (error);
1025 }
1026 
1027 static void
1028 em_qflush(struct ifnet *ifp)
1029 {
1030 	struct mbuf *m;
1031 	struct adapter *adapter = (struct adapter *)ifp->if_softc;
1032 
1033 	EM_TX_LOCK(adapter);
1034 	while ((m = buf_ring_dequeue_sc(adapter->br)) != NULL)
1035 		m_freem(m);
1036 	if_qflush(ifp);
1037 	EM_TX_UNLOCK(adapter);
1038 }
1039 #endif /* FreeBSD_version */
1040 
1041 static void
1042 em_start_locked(struct ifnet *ifp)
1043 {
1044 	struct adapter	*adapter = ifp->if_softc;
1045 	struct mbuf	*m_head;
1046 
1047 	EM_TX_LOCK_ASSERT(adapter);
1048 
1049 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1050 	    IFF_DRV_RUNNING)
1051 		return;
1052 	if (!adapter->link_active)
1053 		return;
1054 
1055 	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
1056 
1057                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1058 		if (m_head == NULL)
1059 			break;
1060 		/*
1061 		 *  Encapsulation can modify our pointer, and or make it
1062 		 *  NULL on failure.  In that event, we can't requeue.
1063 		 */
1064 		if (em_xmit(adapter, &m_head)) {
1065 			if (m_head == NULL)
1066 				break;
1067 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1068 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1069 			break;
1070 		}
1071 
1072 		/* Send a copy of the frame to the BPF listener */
1073 		ETHER_BPF_MTAP(ifp, m_head);
1074 
1075 		/* Set timeout in case hardware has problems transmitting. */
1076 		adapter->watchdog_check = TRUE;
1077 	}
1078 	if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
1079 		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1080 
1081 	return;
1082 }
1083 
1084 static void
1085 em_start(struct ifnet *ifp)
1086 {
1087 	struct adapter *adapter = ifp->if_softc;
1088 
1089 	EM_TX_LOCK(adapter);
1090 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1091 		em_start_locked(ifp);
1092 	EM_TX_UNLOCK(adapter);
1093 }
1094 
1095 /*********************************************************************
1096  *  Ioctl entry point
1097  *
1098  *  em_ioctl is called when the user wants to configure the
1099  *  interface.
1100  *
1101  *  return 0 on success, positive on failure
1102  **********************************************************************/
1103 
1104 static int
1105 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1106 {
1107 	struct adapter	*adapter = ifp->if_softc;
1108 	struct ifreq *ifr = (struct ifreq *)data;
1109 #ifdef INET
1110 	struct ifaddr *ifa = (struct ifaddr *)data;
1111 #endif
1112 	int error = 0;
1113 
1114 	if (adapter->in_detach)
1115 		return (error);
1116 
1117 	switch (command) {
1118 	case SIOCSIFADDR:
1119 #ifdef INET
1120 		if (ifa->ifa_addr->sa_family == AF_INET) {
1121 			/*
1122 			 * XXX
1123 			 * Since resetting hardware takes a very long time
1124 			 * and results in link renegotiation we only
1125 			 * initialize the hardware only when it is absolutely
1126 			 * required.
1127 			 */
1128 			ifp->if_flags |= IFF_UP;
1129 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1130 				EM_CORE_LOCK(adapter);
1131 				em_init_locked(adapter);
1132 				EM_CORE_UNLOCK(adapter);
1133 			}
1134 			arp_ifinit(ifp, ifa);
1135 		} else
1136 #endif
1137 			error = ether_ioctl(ifp, command, data);
1138 		break;
1139 	case SIOCSIFMTU:
1140 	    {
1141 		int max_frame_size;
1142 		u16 eeprom_data = 0;
1143 
1144 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1145 
1146 		EM_CORE_LOCK(adapter);
1147 		switch (adapter->hw.mac.type) {
1148 		case e1000_82573:
1149 			/*
1150 			 * 82573 only supports jumbo frames
1151 			 * if ASPM is disabled.
1152 			 */
1153 			e1000_read_nvm(&adapter->hw,
1154 			    NVM_INIT_3GIO_3, 1, &eeprom_data);
1155 			if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1156 				max_frame_size = ETHER_MAX_LEN;
1157 				break;
1158 			}
1159 			/* Allow Jumbo frames - fall thru */
1160 		case e1000_82571:
1161 		case e1000_82572:
1162 		case e1000_ich9lan:
1163 		case e1000_ich10lan:
1164 		case e1000_82574:
1165 		case e1000_80003es2lan:	/* Limit Jumbo Frame size */
1166 			max_frame_size = 9234;
1167 			break;
1168 		case e1000_pchlan:
1169 			max_frame_size = 4096;
1170 			break;
1171 			/* Adapters that do not support jumbo frames */
1172 		case e1000_82542:
1173 		case e1000_82583:
1174 		case e1000_ich8lan:
1175 			max_frame_size = ETHER_MAX_LEN;
1176 			break;
1177 		default:
1178 			max_frame_size = MAX_JUMBO_FRAME_SIZE;
1179 		}
1180 		if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1181 		    ETHER_CRC_LEN) {
1182 			EM_CORE_UNLOCK(adapter);
1183 			error = EINVAL;
1184 			break;
1185 		}
1186 
1187 		ifp->if_mtu = ifr->ifr_mtu;
1188 		adapter->max_frame_size =
1189 		    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1190 		em_init_locked(adapter);
1191 		EM_CORE_UNLOCK(adapter);
1192 		break;
1193 	    }
1194 	case SIOCSIFFLAGS:
1195 		IOCTL_DEBUGOUT("ioctl rcv'd:\
1196 		    SIOCSIFFLAGS (Set Interface Flags)");
1197 		EM_CORE_LOCK(adapter);
1198 		if (ifp->if_flags & IFF_UP) {
1199 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1200 				if ((ifp->if_flags ^ adapter->if_flags) &
1201 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1202 					em_disable_promisc(adapter);
1203 					em_set_promisc(adapter);
1204 				}
1205 			} else
1206 				em_init_locked(adapter);
1207 		} else
1208 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1209 				EM_TX_LOCK(adapter);
1210 				em_stop(adapter);
1211 				EM_TX_UNLOCK(adapter);
1212 			}
1213 		adapter->if_flags = ifp->if_flags;
1214 		EM_CORE_UNLOCK(adapter);
1215 		break;
1216 	case SIOCADDMULTI:
1217 	case SIOCDELMULTI:
1218 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1219 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1220 			EM_CORE_LOCK(adapter);
1221 			em_disable_intr(adapter);
1222 			em_set_multi(adapter);
1223 			if (adapter->hw.mac.type == e1000_82542 &&
1224 	    		    adapter->hw.revision_id == E1000_REVISION_2) {
1225 				em_initialize_receive_unit(adapter);
1226 			}
1227 #ifdef DEVICE_POLLING
1228 			if (!(ifp->if_capenable & IFCAP_POLLING))
1229 #endif
1230 				em_enable_intr(adapter);
1231 			EM_CORE_UNLOCK(adapter);
1232 		}
1233 		break;
1234 	case SIOCSIFMEDIA:
1235 		/* Check SOL/IDER usage */
1236 		EM_CORE_LOCK(adapter);
1237 		if (e1000_check_reset_block(&adapter->hw)) {
1238 			EM_CORE_UNLOCK(adapter);
1239 			device_printf(adapter->dev, "Media change is"
1240 			    " blocked due to SOL/IDER session.\n");
1241 			break;
1242 		}
1243 		EM_CORE_UNLOCK(adapter);
1244 	case SIOCGIFMEDIA:
1245 		IOCTL_DEBUGOUT("ioctl rcv'd: \
1246 		    SIOCxIFMEDIA (Get/Set Interface Media)");
1247 		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1248 		break;
1249 	case SIOCSIFCAP:
1250 	    {
1251 		int mask, reinit;
1252 
1253 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1254 		reinit = 0;
1255 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1256 #ifdef DEVICE_POLLING
1257 		if (mask & IFCAP_POLLING) {
1258 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1259 				error = ether_poll_register(em_poll, ifp);
1260 				if (error)
1261 					return (error);
1262 				EM_CORE_LOCK(adapter);
1263 				em_disable_intr(adapter);
1264 				ifp->if_capenable |= IFCAP_POLLING;
1265 				EM_CORE_UNLOCK(adapter);
1266 			} else {
1267 				error = ether_poll_deregister(ifp);
1268 				/* Enable interrupt even in error case */
1269 				EM_CORE_LOCK(adapter);
1270 				em_enable_intr(adapter);
1271 				ifp->if_capenable &= ~IFCAP_POLLING;
1272 				EM_CORE_UNLOCK(adapter);
1273 			}
1274 		}
1275 #endif
1276 		if (mask & IFCAP_HWCSUM) {
1277 			ifp->if_capenable ^= IFCAP_HWCSUM;
1278 			reinit = 1;
1279 		}
1280 #if __FreeBSD_version >= 700000
1281 		if (mask & IFCAP_TSO4) {
1282 			ifp->if_capenable ^= IFCAP_TSO4;
1283 			reinit = 1;
1284 		}
1285 #endif
1286 		if (mask & IFCAP_VLAN_HWTAGGING) {
1287 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1288 			reinit = 1;
1289 		}
1290 		if ((mask & IFCAP_WOL) &&
1291 		    (ifp->if_capabilities & IFCAP_WOL) != 0) {
1292 			if (mask & IFCAP_WOL_MCAST)
1293 				ifp->if_capenable ^= IFCAP_WOL_MCAST;
1294 			if (mask & IFCAP_WOL_MAGIC)
1295 				ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1296 		}
1297 		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1298 			em_init(adapter);
1299 #if __FreeBSD_version >= 700000
1300 		VLAN_CAPABILITIES(ifp);
1301 #endif
1302 		break;
1303 	    }
1304 
1305 	default:
1306 		error = ether_ioctl(ifp, command, data);
1307 		break;
1308 	}
1309 
1310 	return (error);
1311 }
1312 
1313 
1314 /*********************************************************************
1315  *  Init entry point
1316  *
1317  *  This routine is used in two ways. It is used by the stack as
1318  *  init entry point in network interface structure. It is also used
1319  *  by the driver as a hw/sw initialization routine to get to a
1320  *  consistent state.
1321  *
1322  *  return 0 on success, positive on failure
1323  **********************************************************************/
1324 
1325 static void
1326 em_init_locked(struct adapter *adapter)
1327 {
1328 	struct ifnet	*ifp = adapter->ifp;
1329 	device_t	dev = adapter->dev;
1330 	u32		pba;
1331 
1332 	INIT_DEBUGOUT("em_init: begin");
1333 
1334 	EM_CORE_LOCK_ASSERT(adapter);
1335 
1336 	EM_TX_LOCK(adapter);
1337 	em_stop(adapter);
1338 	EM_TX_UNLOCK(adapter);
1339 
1340 	/*
1341 	 * Packet Buffer Allocation (PBA)
1342 	 * Writing PBA sets the receive portion of the buffer
1343 	 * the remainder is used for the transmit buffer.
1344 	 *
1345 	 * Devices before the 82547 had a Packet Buffer of 64K.
1346 	 *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1347 	 * After the 82547 the buffer was reduced to 40K.
1348 	 *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1349 	 *   Note: default does not leave enough room for Jumbo Frame >10k.
1350 	 */
1351 	switch (adapter->hw.mac.type) {
1352 	case e1000_82547:
1353 	case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1354 		if (adapter->max_frame_size > 8192)
1355 			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1356 		else
1357 			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1358 		adapter->tx_fifo_head = 0;
1359 		adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1360 		adapter->tx_fifo_size =
1361 		    (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1362 		break;
1363 	/* Total Packet Buffer on these is 48K */
1364 	case e1000_82571:
1365 	case e1000_82572:
1366 	case e1000_80003es2lan:
1367 			pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1368 		break;
1369 	case e1000_82573: /* 82573: Total Packet Buffer is 32K */
1370 			pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1371 		break;
1372 	case e1000_82574:
1373 	case e1000_82583:
1374 			pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
1375 		break;
1376 	case e1000_ich9lan:
1377 	case e1000_ich10lan:
1378 	case e1000_pchlan:
1379 		pba = E1000_PBA_10K;
1380 		break;
1381 	case e1000_ich8lan:
1382 		pba = E1000_PBA_8K;
1383 		break;
1384 	default:
1385 		/* Devices before 82547 had a Packet Buffer of 64K.   */
1386 		if (adapter->max_frame_size > 8192)
1387 			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1388 		else
1389 			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1390 	}
1391 
1392 	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
1393 	E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1394 
1395 	/* Get the latest mac address, User can use a LAA */
1396         bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1397               ETHER_ADDR_LEN);
1398 
1399 	/* Put the address into the Receive Address Array */
1400 	e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1401 
1402 	/*
1403 	 * With the 82571 adapter, RAR[0] may be overwritten
1404 	 * when the other port is reset, we make a duplicate
1405 	 * in RAR[14] for that eventuality, this assures
1406 	 * the interface continues to function.
1407 	 */
1408 	if (adapter->hw.mac.type == e1000_82571) {
1409 		e1000_set_laa_state_82571(&adapter->hw, TRUE);
1410 		e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1411 		    E1000_RAR_ENTRIES - 1);
1412 	}
1413 
1414 	/* Initialize the hardware */
1415 	if (em_hardware_init(adapter)) {
1416 		device_printf(dev, "Unable to initialize the hardware\n");
1417 		return;
1418 	}
1419 	em_update_link_status(adapter);
1420 
1421 	/* Setup VLAN support, basic and offload if available */
1422 	E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1423 
1424 #if __FreeBSD_version < 700029
1425 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1426 		u32 ctrl;
1427 		ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1428 		ctrl |= E1000_CTRL_VME;
1429 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1430 	}
1431 #else
1432 	/* Use real VLAN Filter support */
1433 	em_setup_vlan_hw_support(adapter);
1434 #endif
1435 
1436 	/* Set hardware offload abilities */
1437 	ifp->if_hwassist = 0;
1438 	if (adapter->hw.mac.type >= e1000_82543) {
1439 		if (ifp->if_capenable & IFCAP_TXCSUM)
1440 			ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1441 #if __FreeBSD_version >= 700000
1442 		if (ifp->if_capenable & IFCAP_TSO4)
1443 			ifp->if_hwassist |= CSUM_TSO;
1444 #endif
1445 	}
1446 
1447 	/* Configure for OS presence */
1448 	em_init_manageability(adapter);
1449 
1450 	/* Prepare transmit descriptors and buffers */
1451 	em_setup_transmit_structures(adapter);
1452 	em_initialize_transmit_unit(adapter);
1453 
1454 	/* Setup Multicast table */
1455 	em_set_multi(adapter);
1456 
1457 	/* Prepare receive descriptors and buffers */
1458 	if (em_setup_receive_structures(adapter)) {
1459 		device_printf(dev, "Could not setup receive structures\n");
1460 		EM_TX_LOCK(adapter);
1461 		em_stop(adapter);
1462 		EM_TX_UNLOCK(adapter);
1463 		return;
1464 	}
1465 	em_initialize_receive_unit(adapter);
1466 
1467 	/* Don't lose promiscuous settings */
1468 	em_set_promisc(adapter);
1469 
1470 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1471 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1472 
1473 	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1474 	e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1475 
1476 	/* MSI/X configuration for 82574 */
1477 	if (adapter->hw.mac.type == e1000_82574) {
1478 		int tmp;
1479 		tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1480 		tmp |= E1000_CTRL_EXT_PBA_CLR;
1481 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1482 		/*
1483 		** Set the IVAR - interrupt vector routing.
1484 		** Each nibble represents a vector, high bit
1485 		** is enable, other 3 bits are the MSIX table
1486 		** entry, we map RXQ0 to 0, TXQ0 to 1, and
1487 		** Link (other) to 2, hence the magic number.
1488 		*/
1489 		E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1490 	}
1491 
1492 #ifdef DEVICE_POLLING
1493 	/*
1494 	 * Only enable interrupts if we are not polling, make sure
1495 	 * they are off otherwise.
1496 	 */
1497 	if (ifp->if_capenable & IFCAP_POLLING)
1498 		em_disable_intr(adapter);
1499 	else
1500 #endif /* DEVICE_POLLING */
1501 		em_enable_intr(adapter);
1502 
1503 	/* AMT based hardware can now take control from firmware */
1504 	if (adapter->has_manage && adapter->has_amt)
1505 		em_get_hw_control(adapter);
1506 
1507 	/* Don't reset the phy next time init gets called */
1508 	adapter->hw.phy.reset_disable = TRUE;
1509 }
1510 
1511 static void
1512 em_init(void *arg)
1513 {
1514 	struct adapter *adapter = arg;
1515 
1516 	EM_CORE_LOCK(adapter);
1517 	em_init_locked(adapter);
1518 	EM_CORE_UNLOCK(adapter);
1519 }
1520 
1521 
1522 #ifdef DEVICE_POLLING
1523 /*********************************************************************
1524  *
1525  *  Legacy polling routine
1526  *
1527  *********************************************************************/
1528 static int
1529 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1530 {
1531 	struct adapter *adapter = ifp->if_softc;
1532 	u32		reg_icr, rx_done = 0;
1533 
1534 	EM_CORE_LOCK(adapter);
1535 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1536 		EM_CORE_UNLOCK(adapter);
1537 		return (rx_done);
1538 	}
1539 
1540 	if (cmd == POLL_AND_CHECK_STATUS) {
1541 		reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1542 		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1543 			callout_stop(&adapter->timer);
1544 			adapter->hw.mac.get_link_status = 1;
1545 			em_update_link_status(adapter);
1546 			callout_reset(&adapter->timer, hz,
1547 			    em_local_timer, adapter);
1548 		}
1549 	}
1550 	EM_CORE_UNLOCK(adapter);
1551 
1552 	rx_done = em_rxeof(adapter, count);
1553 
1554 	EM_TX_LOCK(adapter);
1555 	em_txeof(adapter);
1556 #if __FreeBSD_version >= 800000
1557 	if (!drbr_empty(ifp, adapter->br))
1558 		em_mq_start_locked(ifp, NULL);
1559 #else
1560 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1561 		em_start_locked(ifp);
1562 #endif
1563 	EM_TX_UNLOCK(adapter);
1564 	return (rx_done);
1565 }
1566 #endif /* DEVICE_POLLING */
1567 
1568 #ifdef EM_LEGACY_IRQ
1569 /*********************************************************************
1570  *
1571  *  Legacy Interrupt Service routine
1572  *
1573  *********************************************************************/
1574 
1575 static void
1576 em_intr(void *arg)
1577 {
1578 	struct adapter	*adapter = arg;
1579 	struct ifnet	*ifp = adapter->ifp;
1580 	u32		reg_icr;
1581 
1582 
1583 	if (ifp->if_capenable & IFCAP_POLLING)
1584 		return;
1585 
1586 	EM_CORE_LOCK(adapter);
1587 	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1588 	if (reg_icr & E1000_ICR_RXO)
1589 		adapter->rx_overruns++;
1590 	if ((reg_icr == 0xffffffff) || (reg_icr == 0)||
1591 	    (adapter->hw.mac.type >= e1000_82571 &&
1592 	    (reg_icr & E1000_ICR_INT_ASSERTED) == 0))
1593 			goto out;
1594 
1595 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1596 			goto out;
1597 
1598 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1599 		callout_stop(&adapter->timer);
1600 		adapter->hw.mac.get_link_status = 1;
1601 		em_update_link_status(adapter);
1602 		/* Deal with TX cruft when link lost */
1603 		em_tx_purge(adapter);
1604 		callout_reset(&adapter->timer, hz,
1605 		    em_local_timer, adapter);
1606 		goto out;
1607 	}
1608 
1609 	EM_TX_LOCK(adapter);
1610 	em_txeof(adapter);
1611 	em_rxeof(adapter, -1);
1612 	em_txeof(adapter);
1613 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1614 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1615 		em_start_locked(ifp);
1616 	EM_TX_UNLOCK(adapter);
1617 
1618 out:
1619 	EM_CORE_UNLOCK(adapter);
1620 	return;
1621 }
1622 
1623 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1624 
1625 static void
1626 em_handle_link(void *context, int pending)
1627 {
1628 	struct adapter	*adapter = context;
1629 	struct ifnet *ifp = adapter->ifp;
1630 
1631 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1632 		return;
1633 
1634 	EM_CORE_LOCK(adapter);
1635 	callout_stop(&adapter->timer);
1636 	em_update_link_status(adapter);
1637 	/* Deal with TX cruft when link lost */
1638 	em_tx_purge(adapter);
1639 	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1640 	EM_CORE_UNLOCK(adapter);
1641 }
1642 
1643 
1644 /* Combined RX/TX handler, used by Legacy and MSI */
1645 static void
1646 em_handle_rxtx(void *context, int pending)
1647 {
1648 	struct adapter	*adapter = context;
1649 	struct ifnet	*ifp = adapter->ifp;
1650 
1651 
1652 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1653 		if (em_rxeof(adapter, adapter->rx_process_limit) != 0)
1654 			taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1655 		EM_TX_LOCK(adapter);
1656 		em_txeof(adapter);
1657 
1658 #if __FreeBSD_version >= 800000
1659 		if (!drbr_empty(ifp, adapter->br))
1660 			em_mq_start_locked(ifp, NULL);
1661 #else
1662 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1663 			em_start_locked(ifp);
1664 #endif
1665 		EM_TX_UNLOCK(adapter);
1666 	}
1667 
1668 	em_enable_intr(adapter);
1669 }
1670 
1671 /*********************************************************************
1672  *
1673  *  Fast Legacy/MSI Combined Interrupt Service routine
1674  *
1675  *********************************************************************/
1676 #if __FreeBSD_version < 700000
1677 #define FILTER_STRAY
1678 #define FILTER_HANDLED
1679 static void
1680 #else
1681 static int
1682 #endif
1683 em_irq_fast(void *arg)
1684 {
1685 	struct adapter	*adapter = arg;
1686 	struct ifnet	*ifp;
1687 	u32		reg_icr;
1688 
1689 	ifp = adapter->ifp;
1690 
1691 	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1692 
1693 	/* Hot eject?  */
1694 	if (reg_icr == 0xffffffff)
1695 		return FILTER_STRAY;
1696 
1697 	/* Definitely not our interrupt.  */
1698 	if (reg_icr == 0x0)
1699 		return FILTER_STRAY;
1700 
1701 	/*
1702 	 * Starting with the 82571 chip, bit 31 should be used to
1703 	 * determine whether the interrupt belongs to us.
1704 	 */
1705 	if (adapter->hw.mac.type >= e1000_82571 &&
1706 	    (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1707 		return FILTER_STRAY;
1708 
1709 	/*
1710 	 * Mask interrupts until the taskqueue is finished running.  This is
1711 	 * cheap, just assume that it is needed.  This also works around the
1712 	 * MSI message reordering errata on certain systems.
1713 	 */
1714 	em_disable_intr(adapter);
1715 	taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1716 
1717 	/* Link status change */
1718 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1719 		adapter->hw.mac.get_link_status = 1;
1720 		taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1721 	}
1722 
1723 	if (reg_icr & E1000_ICR_RXO)
1724 		adapter->rx_overruns++;
1725 	return FILTER_HANDLED;
1726 }
1727 
1728 /*********************************************************************
1729  *
1730  *  MSIX Interrupt Service Routines
1731  *
1732  **********************************************************************/
1733 #define EM_MSIX_TX	0x00040000
1734 #define EM_MSIX_RX	0x00010000
1735 #define EM_MSIX_LINK	0x00100000
1736 
1737 static void
1738 em_msix_tx(void *arg)
1739 {
1740 	struct adapter *adapter = arg;
1741 	struct ifnet	*ifp = adapter->ifp;
1742 
1743 	++adapter->tx_irq;
1744 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1745 		EM_TX_LOCK(adapter);
1746 		em_txeof(adapter);
1747 		EM_TX_UNLOCK(adapter);
1748 		taskqueue_enqueue(adapter->tq, &adapter->tx_task);
1749 	}
1750 	/* Reenable this interrupt */
1751 	E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_TX);
1752 	return;
1753 }
1754 
1755 /*********************************************************************
1756  *
1757  *  MSIX RX Interrupt Service routine
1758  *
1759  **********************************************************************/
1760 
1761 static void
1762 em_msix_rx(void *arg)
1763 {
1764 	struct adapter *adapter = arg;
1765 	struct ifnet	*ifp = adapter->ifp;
1766 
1767 	++adapter->rx_irq;
1768 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1769 	    (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1770 		taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1771 	/* Reenable this interrupt */
1772 	E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_RX);
1773 	return;
1774 }
1775 
1776 /*********************************************************************
1777  *
1778  *  MSIX Link Fast Interrupt Service routine
1779  *
1780  **********************************************************************/
1781 
1782 static void
1783 em_msix_link(void *arg)
1784 {
1785 	struct adapter	*adapter = arg;
1786 	u32		reg_icr;
1787 
1788 	++adapter->link_irq;
1789 	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1790 
1791 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1792 		adapter->hw.mac.get_link_status = 1;
1793 		taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1794 	}
1795 	E1000_WRITE_REG(&adapter->hw, E1000_IMS,
1796 	    EM_MSIX_LINK | E1000_IMS_LSC);
1797 	return;
1798 }
1799 
1800 static void
1801 em_handle_rx(void *context, int pending)
1802 {
1803 	struct adapter	*adapter = context;
1804 	struct ifnet	*ifp = adapter->ifp;
1805 
1806 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1807 	    (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1808 		taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1809 
1810 }
1811 
1812 static void
1813 em_handle_tx(void *context, int pending)
1814 {
1815 	struct adapter	*adapter = context;
1816 	struct ifnet	*ifp = adapter->ifp;
1817 
1818 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1819 		if (!EM_TX_TRYLOCK(adapter))
1820 			return;
1821 		em_txeof(adapter);
1822 #if __FreeBSD_version >= 800000
1823 		if (!drbr_empty(ifp, adapter->br))
1824 			em_mq_start_locked(ifp, NULL);
1825 #else
1826 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1827 			em_start_locked(ifp);
1828 #endif
1829 		EM_TX_UNLOCK(adapter);
1830 	}
1831 }
1832 #endif /* EM_FAST_IRQ */
1833 
1834 /*********************************************************************
1835  *
1836  *  Media Ioctl callback
1837  *
1838  *  This routine is called whenever the user queries the status of
1839  *  the interface using ifconfig.
1840  *
1841  **********************************************************************/
1842 static void
1843 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1844 {
1845 	struct adapter *adapter = ifp->if_softc;
1846 	u_char fiber_type = IFM_1000_SX;
1847 
1848 	INIT_DEBUGOUT("em_media_status: begin");
1849 
1850 	EM_CORE_LOCK(adapter);
1851 	em_update_link_status(adapter);
1852 
1853 	ifmr->ifm_status = IFM_AVALID;
1854 	ifmr->ifm_active = IFM_ETHER;
1855 
1856 	if (!adapter->link_active) {
1857 		EM_CORE_UNLOCK(adapter);
1858 		return;
1859 	}
1860 
1861 	ifmr->ifm_status |= IFM_ACTIVE;
1862 
1863 	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1864 	    (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1865 		if (adapter->hw.mac.type == e1000_82545)
1866 			fiber_type = IFM_1000_LX;
1867 		ifmr->ifm_active |= fiber_type | IFM_FDX;
1868 	} else {
1869 		switch (adapter->link_speed) {
1870 		case 10:
1871 			ifmr->ifm_active |= IFM_10_T;
1872 			break;
1873 		case 100:
1874 			ifmr->ifm_active |= IFM_100_TX;
1875 			break;
1876 		case 1000:
1877 			ifmr->ifm_active |= IFM_1000_T;
1878 			break;
1879 		}
1880 		if (adapter->link_duplex == FULL_DUPLEX)
1881 			ifmr->ifm_active |= IFM_FDX;
1882 		else
1883 			ifmr->ifm_active |= IFM_HDX;
1884 	}
1885 	EM_CORE_UNLOCK(adapter);
1886 }
1887 
1888 /*********************************************************************
1889  *
1890  *  Media Ioctl callback
1891  *
1892  *  This routine is called when the user changes speed/duplex using
1893  *  media/mediopt option with ifconfig.
1894  *
1895  **********************************************************************/
1896 static int
1897 em_media_change(struct ifnet *ifp)
1898 {
1899 	struct adapter *adapter = ifp->if_softc;
1900 	struct ifmedia  *ifm = &adapter->media;
1901 
1902 	INIT_DEBUGOUT("em_media_change: begin");
1903 
1904 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1905 		return (EINVAL);
1906 
1907 	EM_CORE_LOCK(adapter);
1908 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1909 	case IFM_AUTO:
1910 		adapter->hw.mac.autoneg = DO_AUTO_NEG;
1911 		adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1912 		break;
1913 	case IFM_1000_LX:
1914 	case IFM_1000_SX:
1915 	case IFM_1000_T:
1916 		adapter->hw.mac.autoneg = DO_AUTO_NEG;
1917 		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1918 		break;
1919 	case IFM_100_TX:
1920 		adapter->hw.mac.autoneg = FALSE;
1921 		adapter->hw.phy.autoneg_advertised = 0;
1922 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1923 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1924 		else
1925 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1926 		break;
1927 	case IFM_10_T:
1928 		adapter->hw.mac.autoneg = FALSE;
1929 		adapter->hw.phy.autoneg_advertised = 0;
1930 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1931 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1932 		else
1933 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1934 		break;
1935 	default:
1936 		device_printf(adapter->dev, "Unsupported media type\n");
1937 	}
1938 
1939 	/* As the speed/duplex settings my have changed we need to
1940 	 * reset the PHY.
1941 	 */
1942 	adapter->hw.phy.reset_disable = FALSE;
1943 
1944 	em_init_locked(adapter);
1945 	EM_CORE_UNLOCK(adapter);
1946 
1947 	return (0);
1948 }
1949 
1950 /*********************************************************************
1951  *
1952  *  This routine maps the mbufs to tx descriptors.
1953  *
1954  *  return 0 on success, positive on failure
1955  **********************************************************************/
1956 
1957 static int
1958 em_xmit(struct adapter *adapter, struct mbuf **m_headp)
1959 {
1960 	bus_dma_segment_t	segs[EM_MAX_SCATTER];
1961 	bus_dmamap_t		map;
1962 	struct em_buffer	*tx_buffer, *tx_buffer_mapped;
1963 	struct e1000_tx_desc	*ctxd = NULL;
1964 	struct mbuf		*m_head;
1965 	u32			txd_upper, txd_lower, txd_used, txd_saved;
1966 	int			nsegs, i, j, first, last = 0;
1967 	int			error, do_tso, tso_desc = 0;
1968 #if __FreeBSD_version < 700000
1969 	struct m_tag		*mtag;
1970 #endif
1971 	m_head = *m_headp;
1972 	txd_upper = txd_lower = txd_used = txd_saved = 0;
1973 
1974 #if __FreeBSD_version >= 700000
1975 	do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
1976 #else
1977 	do_tso = 0;
1978 #endif
1979 
1980         /*
1981          * Force a cleanup if number of TX descriptors
1982          * available hits the threshold
1983          */
1984 	if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1985 		em_txeof(adapter);
1986 		/* Now do we at least have a minimal? */
1987 		if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
1988 			adapter->no_tx_desc_avail1++;
1989 			return (ENOBUFS);
1990 		}
1991 	}
1992 
1993 
1994 	/*
1995 	 * TSO workaround:
1996 	 *  If an mbuf is only header we need
1997 	 *     to pull 4 bytes of data into it.
1998 	 */
1999 	if (do_tso && (m_head->m_len <= M_TSO_LEN)) {
2000 		m_head = m_pullup(m_head, M_TSO_LEN + 4);
2001 		*m_headp = m_head;
2002 		if (m_head == NULL)
2003 			return (ENOBUFS);
2004 	}
2005 
2006 	/*
2007 	 * Map the packet for DMA
2008 	 *
2009 	 * Capture the first descriptor index,
2010 	 * this descriptor will have the index
2011 	 * of the EOP which is the only one that
2012 	 * now gets a DONE bit writeback.
2013 	 */
2014 	first = adapter->next_avail_tx_desc;
2015 	tx_buffer = &adapter->tx_buffer_area[first];
2016 	tx_buffer_mapped = tx_buffer;
2017 	map = tx_buffer->map;
2018 
2019 	error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2020 	    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2021 
2022 	/*
2023 	 * There are two types of errors we can (try) to handle:
2024 	 * - EFBIG means the mbuf chain was too long and bus_dma ran
2025 	 *   out of segments.  Defragment the mbuf chain and try again.
2026 	 * - ENOMEM means bus_dma could not obtain enough bounce buffers
2027 	 *   at this point in time.  Defer sending and try again later.
2028 	 * All other errors, in particular EINVAL, are fatal and prevent the
2029 	 * mbuf chain from ever going through.  Drop it and report error.
2030 	 */
2031 	if (error == EFBIG) {
2032 		struct mbuf *m;
2033 
2034 		m = m_defrag(*m_headp, M_DONTWAIT);
2035 		if (m == NULL) {
2036 			adapter->mbuf_alloc_failed++;
2037 			m_freem(*m_headp);
2038 			*m_headp = NULL;
2039 			return (ENOBUFS);
2040 		}
2041 		*m_headp = m;
2042 
2043 		/* Try it again */
2044 		error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2045 		    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2046 
2047 		if (error) {
2048 			adapter->no_tx_dma_setup++;
2049 			m_freem(*m_headp);
2050 			*m_headp = NULL;
2051 			return (error);
2052 		}
2053 	} else if (error != 0) {
2054 		adapter->no_tx_dma_setup++;
2055 		return (error);
2056 	}
2057 
2058 	/*
2059 	 * TSO Hardware workaround, if this packet is not
2060 	 * TSO, and is only a single descriptor long, and
2061 	 * it follows a TSO burst, then we need to add a
2062 	 * sentinel descriptor to prevent premature writeback.
2063 	 */
2064 	if ((do_tso == 0) && (adapter->tx_tso == TRUE)) {
2065 		if (nsegs == 1)
2066 			tso_desc = TRUE;
2067 		adapter->tx_tso = FALSE;
2068 	}
2069 
2070         if (nsegs > (adapter->num_tx_desc_avail - 2)) {
2071                 adapter->no_tx_desc_avail2++;
2072 		bus_dmamap_unload(adapter->txtag, map);
2073 		return (ENOBUFS);
2074         }
2075 	m_head = *m_headp;
2076 
2077 	/* Do hardware assists */
2078 #if __FreeBSD_version >= 700000
2079 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2080 		error = em_tso_setup(adapter, m_head, &txd_upper, &txd_lower);
2081 		if (error != TRUE)
2082 			return (ENXIO); /* something foobar */
2083 		/* we need to make a final sentinel transmit desc */
2084 		tso_desc = TRUE;
2085 	} else
2086 #endif
2087 	if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
2088 		em_transmit_checksum_setup(adapter,  m_head,
2089 		    &txd_upper, &txd_lower);
2090 
2091 	i = adapter->next_avail_tx_desc;
2092 	if (adapter->pcix_82544)
2093 		txd_saved = i;
2094 
2095 	/* Set up our transmit descriptors */
2096 	for (j = 0; j < nsegs; j++) {
2097 		bus_size_t seg_len;
2098 		bus_addr_t seg_addr;
2099 		/* If adapter is 82544 and on PCIX bus */
2100 		if(adapter->pcix_82544) {
2101 			DESC_ARRAY	desc_array;
2102 			u32		array_elements, counter;
2103 			/*
2104 			 * Check the Address and Length combination and
2105 			 * split the data accordingly
2106 			 */
2107 			array_elements = em_fill_descriptors(segs[j].ds_addr,
2108 			    segs[j].ds_len, &desc_array);
2109 			for (counter = 0; counter < array_elements; counter++) {
2110 				if (txd_used == adapter->num_tx_desc_avail) {
2111 					adapter->next_avail_tx_desc = txd_saved;
2112 					adapter->no_tx_desc_avail2++;
2113 					bus_dmamap_unload(adapter->txtag, map);
2114 					return (ENOBUFS);
2115 				}
2116 				tx_buffer = &adapter->tx_buffer_area[i];
2117 				ctxd = &adapter->tx_desc_base[i];
2118 				ctxd->buffer_addr = htole64(
2119 				    desc_array.descriptor[counter].address);
2120 				ctxd->lower.data = htole32(
2121 				    (adapter->txd_cmd | txd_lower | (u16)
2122 				    desc_array.descriptor[counter].length));
2123 				ctxd->upper.data =
2124 				    htole32((txd_upper));
2125 				last = i;
2126 				if (++i == adapter->num_tx_desc)
2127                                          i = 0;
2128 				tx_buffer->m_head = NULL;
2129 				tx_buffer->next_eop = -1;
2130 				txd_used++;
2131                         }
2132 		} else {
2133 			tx_buffer = &adapter->tx_buffer_area[i];
2134 			ctxd = &adapter->tx_desc_base[i];
2135 			seg_addr = segs[j].ds_addr;
2136 			seg_len  = segs[j].ds_len;
2137 			/*
2138 			** TSO Workaround:
2139 			** If this is the last descriptor, we want to
2140 			** split it so we have a small final sentinel
2141 			*/
2142 			if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) {
2143 				seg_len -= 4;
2144 				ctxd->buffer_addr = htole64(seg_addr);
2145 				ctxd->lower.data = htole32(
2146 				adapter->txd_cmd | txd_lower | seg_len);
2147 				ctxd->upper.data =
2148 				    htole32(txd_upper);
2149 				if (++i == adapter->num_tx_desc)
2150 					i = 0;
2151 				/* Now make the sentinel */
2152 				++txd_used; /* using an extra txd */
2153 				ctxd = &adapter->tx_desc_base[i];
2154 				tx_buffer = &adapter->tx_buffer_area[i];
2155 				ctxd->buffer_addr =
2156 				    htole64(seg_addr + seg_len);
2157 				ctxd->lower.data = htole32(
2158 				adapter->txd_cmd | txd_lower | 4);
2159 				ctxd->upper.data =
2160 				    htole32(txd_upper);
2161 				last = i;
2162 				if (++i == adapter->num_tx_desc)
2163 					i = 0;
2164 			} else {
2165 				ctxd->buffer_addr = htole64(seg_addr);
2166 				ctxd->lower.data = htole32(
2167 				adapter->txd_cmd | txd_lower | seg_len);
2168 				ctxd->upper.data =
2169 				    htole32(txd_upper);
2170 				last = i;
2171 				if (++i == adapter->num_tx_desc)
2172 					i = 0;
2173 			}
2174 			tx_buffer->m_head = NULL;
2175 			tx_buffer->next_eop = -1;
2176 		}
2177 	}
2178 
2179 	adapter->next_avail_tx_desc = i;
2180 	if (adapter->pcix_82544)
2181 		adapter->num_tx_desc_avail -= txd_used;
2182 	else {
2183 		adapter->num_tx_desc_avail -= nsegs;
2184 		if (tso_desc) /* TSO used an extra for sentinel */
2185 			adapter->num_tx_desc_avail -= txd_used;
2186 	}
2187 
2188         /*
2189 	** Handle VLAN tag, this is the
2190 	** biggest difference between
2191 	** 6.x and 7
2192 	*/
2193 #if __FreeBSD_version < 700000
2194         /* Find out if we are in vlan mode. */
2195         mtag = VLAN_OUTPUT_TAG(ifp, m_head);
2196         if (mtag != NULL) {
2197                 ctxd->upper.fields.special =
2198                     htole16(VLAN_TAG_VALUE(mtag));
2199 #else /* FreeBSD 7 */
2200 	if (m_head->m_flags & M_VLANTAG) {
2201 		/* Set the vlan id. */
2202 		ctxd->upper.fields.special =
2203 		    htole16(m_head->m_pkthdr.ether_vtag);
2204 #endif
2205                 /* Tell hardware to add tag */
2206                 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
2207         }
2208 
2209         tx_buffer->m_head = m_head;
2210 	tx_buffer_mapped->map = tx_buffer->map;
2211 	tx_buffer->map = map;
2212         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
2213 
2214         /*
2215          * Last Descriptor of Packet
2216 	 * needs End Of Packet (EOP)
2217 	 * and Report Status (RS)
2218          */
2219         ctxd->lower.data |=
2220 	    htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
2221 	/*
2222 	 * Keep track in the first buffer which
2223 	 * descriptor will be written back
2224 	 */
2225 	tx_buffer = &adapter->tx_buffer_area[first];
2226 	tx_buffer->next_eop = last;
2227 
2228 	/*
2229 	 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
2230 	 * that this frame is available to transmit.
2231 	 */
2232 	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2233 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2234 	if (adapter->hw.mac.type == e1000_82547 &&
2235 	    adapter->link_duplex == HALF_DUPLEX)
2236 		em_82547_move_tail(adapter);
2237 	else {
2238 		E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
2239 		if (adapter->hw.mac.type == e1000_82547)
2240 			em_82547_update_fifo_head(adapter,
2241 			    m_head->m_pkthdr.len);
2242 	}
2243 
2244 	return (0);
2245 }
2246 
2247 /*********************************************************************
2248  *
2249  * 82547 workaround to avoid controller hang in half-duplex environment.
2250  * The workaround is to avoid queuing a large packet that would span
2251  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
2252  * in this case. We do that only when FIFO is quiescent.
2253  *
2254  **********************************************************************/
2255 static void
2256 em_82547_move_tail(void *arg)
2257 {
2258 	struct adapter *adapter = arg;
2259 	struct e1000_tx_desc *tx_desc;
2260 	u16	hw_tdt, sw_tdt, length = 0;
2261 	bool	eop = 0;
2262 
2263 	EM_TX_LOCK_ASSERT(adapter);
2264 
2265 	hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
2266 	sw_tdt = adapter->next_avail_tx_desc;
2267 
2268 	while (hw_tdt != sw_tdt) {
2269 		tx_desc = &adapter->tx_desc_base[hw_tdt];
2270 		length += tx_desc->lower.flags.length;
2271 		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
2272 		if (++hw_tdt == adapter->num_tx_desc)
2273 			hw_tdt = 0;
2274 
2275 		if (eop) {
2276 			if (em_82547_fifo_workaround(adapter, length)) {
2277 				adapter->tx_fifo_wrk_cnt++;
2278 				callout_reset(&adapter->tx_fifo_timer, 1,
2279 					em_82547_move_tail, adapter);
2280 				break;
2281 			}
2282 			E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
2283 			em_82547_update_fifo_head(adapter, length);
2284 			length = 0;
2285 		}
2286 	}
2287 }
2288 
2289 static int
2290 em_82547_fifo_workaround(struct adapter *adapter, int len)
2291 {
2292 	int fifo_space, fifo_pkt_len;
2293 
2294 	fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2295 
2296 	if (adapter->link_duplex == HALF_DUPLEX) {
2297 		fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
2298 
2299 		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
2300 			if (em_82547_tx_fifo_reset(adapter))
2301 				return (0);
2302 			else
2303 				return (1);
2304 		}
2305 	}
2306 
2307 	return (0);
2308 }
2309 
2310 static void
2311 em_82547_update_fifo_head(struct adapter *adapter, int len)
2312 {
2313 	int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2314 
2315 	/* tx_fifo_head is always 16 byte aligned */
2316 	adapter->tx_fifo_head += fifo_pkt_len;
2317 	if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
2318 		adapter->tx_fifo_head -= adapter->tx_fifo_size;
2319 	}
2320 }
2321 
2322 
2323 static int
2324 em_82547_tx_fifo_reset(struct adapter *adapter)
2325 {
2326 	u32 tctl;
2327 
2328 	if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
2329 	    E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
2330 	    (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
2331 	    E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
2332 	    (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
2333 	    E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
2334 	    (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
2335 		/* Disable TX unit */
2336 		tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2337 		E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
2338 		    tctl & ~E1000_TCTL_EN);
2339 
2340 		/* Reset FIFO pointers */
2341 		E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
2342 		    adapter->tx_head_addr);
2343 		E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
2344 		    adapter->tx_head_addr);
2345 		E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
2346 		    adapter->tx_head_addr);
2347 		E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
2348 		    adapter->tx_head_addr);
2349 
2350 		/* Re-enable TX unit */
2351 		E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2352 		E1000_WRITE_FLUSH(&adapter->hw);
2353 
2354 		adapter->tx_fifo_head = 0;
2355 		adapter->tx_fifo_reset_cnt++;
2356 
2357 		return (TRUE);
2358 	}
2359 	else {
2360 		return (FALSE);
2361 	}
2362 }
2363 
2364 static void
2365 em_set_promisc(struct adapter *adapter)
2366 {
2367 	struct ifnet	*ifp = adapter->ifp;
2368 	u32		reg_rctl;
2369 
2370 	reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2371 
2372 	if (ifp->if_flags & IFF_PROMISC) {
2373 		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2374 		/* Turn this on if you want to see bad packets */
2375 		if (em_debug_sbp)
2376 			reg_rctl |= E1000_RCTL_SBP;
2377 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2378 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2379 		reg_rctl |= E1000_RCTL_MPE;
2380 		reg_rctl &= ~E1000_RCTL_UPE;
2381 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2382 	}
2383 }
2384 
2385 static void
2386 em_disable_promisc(struct adapter *adapter)
2387 {
2388 	u32	reg_rctl;
2389 
2390 	reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2391 
2392 	reg_rctl &=  (~E1000_RCTL_UPE);
2393 	reg_rctl &=  (~E1000_RCTL_MPE);
2394 	reg_rctl &=  (~E1000_RCTL_SBP);
2395 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2396 }
2397 
2398 
2399 /*********************************************************************
2400  *  Multicast Update
2401  *
2402  *  This routine is called whenever multicast address list is updated.
2403  *
2404  **********************************************************************/
2405 
2406 static void
2407 em_set_multi(struct adapter *adapter)
2408 {
2409 	struct ifnet	*ifp = adapter->ifp;
2410 	struct ifmultiaddr *ifma;
2411 	u32 reg_rctl = 0;
2412 	u8  *mta; /* Multicast array memory */
2413 	int mcnt = 0;
2414 
2415 	IOCTL_DEBUGOUT("em_set_multi: begin");
2416 
2417 	if (adapter->hw.mac.type == e1000_82542 &&
2418 	    adapter->hw.revision_id == E1000_REVISION_2) {
2419 		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2420 		if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2421 			e1000_pci_clear_mwi(&adapter->hw);
2422 		reg_rctl |= E1000_RCTL_RST;
2423 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2424 		msec_delay(5);
2425 	}
2426 
2427 	/* Allocate temporary memory to setup array */
2428 	mta = malloc(sizeof(u8) *
2429 	    (ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES),
2430 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2431 	if (mta == NULL)
2432 		panic("em_set_multi memory failure\n");
2433 
2434 #if __FreeBSD_version < 800000
2435 	IF_ADDR_LOCK(ifp);
2436 #else
2437 	if_maddr_rlock(ifp);
2438 #endif
2439 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2440 		if (ifma->ifma_addr->sa_family != AF_LINK)
2441 			continue;
2442 
2443 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2444 			break;
2445 
2446 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2447 		    &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
2448 		mcnt++;
2449 	}
2450 #if __FreeBSD_version < 800000
2451 	IF_ADDR_UNLOCK(ifp);
2452 #else
2453 	if_maddr_runlock(ifp);
2454 #endif
2455 	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2456 		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2457 		reg_rctl |= E1000_RCTL_MPE;
2458 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2459 	} else
2460 		e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
2461 
2462 	if (adapter->hw.mac.type == e1000_82542 &&
2463 	    adapter->hw.revision_id == E1000_REVISION_2) {
2464 		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2465 		reg_rctl &= ~E1000_RCTL_RST;
2466 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2467 		msec_delay(5);
2468 		if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2469 			e1000_pci_set_mwi(&adapter->hw);
2470 	}
2471 	free(mta, M_DEVBUF);
2472 }
2473 
2474 
2475 /*********************************************************************
2476  *  Timer routine
2477  *
2478  *  This routine checks for link status and updates statistics.
2479  *
2480  **********************************************************************/
2481 
2482 static void
2483 em_local_timer(void *arg)
2484 {
2485 	struct adapter	*adapter = arg;
2486 	struct ifnet	*ifp = adapter->ifp;
2487 
2488 	EM_CORE_LOCK_ASSERT(adapter);
2489 
2490 	taskqueue_enqueue(adapter->tq,
2491 	    &adapter->rxtx_task);
2492 	em_update_link_status(adapter);
2493 	em_update_stats_counters(adapter);
2494 
2495 	/* Reset LAA into RAR[0] on 82571 */
2496 	if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
2497 		e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2498 
2499 	if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
2500 		em_print_hw_stats(adapter);
2501 
2502 	em_smartspeed(adapter);
2503 
2504 	/*
2505 	 * We check the watchdog: the time since
2506 	 * the last TX descriptor was cleaned.
2507 	 * This implies a functional TX engine.
2508 	 */
2509 	if ((adapter->watchdog_check == TRUE) &&
2510 	    (ticks - adapter->watchdog_time > EM_WATCHDOG))
2511 		goto hung;
2512 
2513 	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
2514 	return;
2515 hung:
2516 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2517 	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2518 	adapter->watchdog_events++;
2519 	em_init_locked(adapter);
2520 }
2521 
2522 static void
2523 em_update_link_status(struct adapter *adapter)
2524 {
2525 	struct e1000_hw *hw = &adapter->hw;
2526 	struct ifnet *ifp = adapter->ifp;
2527 	device_t dev = adapter->dev;
2528 	u32 link_check = 0;
2529 
2530 	/* Get the cached link value or read phy for real */
2531 	switch (hw->phy.media_type) {
2532 	case e1000_media_type_copper:
2533 		if (hw->mac.get_link_status) {
2534 			/* Do the work to read phy */
2535 			e1000_check_for_link(hw);
2536 			link_check = !hw->mac.get_link_status;
2537 			if (link_check) /* ESB2 fix */
2538 				e1000_cfg_on_link_up(hw);
2539 		} else
2540 			link_check = TRUE;
2541 		break;
2542 	case e1000_media_type_fiber:
2543 		e1000_check_for_link(hw);
2544 		link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2545                                  E1000_STATUS_LU);
2546 		break;
2547 	case e1000_media_type_internal_serdes:
2548 		e1000_check_for_link(hw);
2549 		link_check = adapter->hw.mac.serdes_has_link;
2550 		break;
2551 	default:
2552 	case e1000_media_type_unknown:
2553 		break;
2554 	}
2555 
2556 	/* Now check for a transition */
2557 	if (link_check && (adapter->link_active == 0)) {
2558 		e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2559 		    &adapter->link_duplex);
2560 		/* Check if we must disable SPEED_MODE bit on PCI-E */
2561 		if ((adapter->link_speed != SPEED_1000) &&
2562 		    ((hw->mac.type == e1000_82571) ||
2563 		    (hw->mac.type == e1000_82572))) {
2564 			int tarc0;
2565 			tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
2566 			tarc0 &= ~SPEED_MODE_BIT;
2567 			E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
2568 		}
2569 		if (bootverbose)
2570 			device_printf(dev, "Link is up %d Mbps %s\n",
2571 			    adapter->link_speed,
2572 			    ((adapter->link_duplex == FULL_DUPLEX) ?
2573 			    "Full Duplex" : "Half Duplex"));
2574 		adapter->link_active = 1;
2575 		adapter->smartspeed = 0;
2576 		ifp->if_baudrate = adapter->link_speed * 1000000;
2577 		if_link_state_change(ifp, LINK_STATE_UP);
2578 	} else if (!link_check && (adapter->link_active == 1)) {
2579 		ifp->if_baudrate = adapter->link_speed = 0;
2580 		adapter->link_duplex = 0;
2581 		if (bootverbose)
2582 			device_printf(dev, "Link is Down\n");
2583 		adapter->link_active = 0;
2584 		/* Link down, disable watchdog */
2585 		adapter->watchdog_check = FALSE;
2586 		if_link_state_change(ifp, LINK_STATE_DOWN);
2587 	}
2588 }
2589 
2590 /*********************************************************************
2591  *
2592  *  This routine disables all traffic on the adapter by issuing a
2593  *  global reset on the MAC and deallocates TX/RX buffers.
2594  *
2595  *  This routine should always be called with BOTH the CORE
2596  *  and TX locks.
2597  **********************************************************************/
2598 
2599 static void
2600 em_stop(void *arg)
2601 {
2602 	struct adapter	*adapter = arg;
2603 	struct ifnet	*ifp = adapter->ifp;
2604 
2605 	EM_CORE_LOCK_ASSERT(adapter);
2606 	EM_TX_LOCK_ASSERT(adapter);
2607 
2608 	INIT_DEBUGOUT("em_stop: begin");
2609 
2610 	em_disable_intr(adapter);
2611 	callout_stop(&adapter->timer);
2612 	callout_stop(&adapter->tx_fifo_timer);
2613 
2614 	/* Tell the stack that the interface is no longer active */
2615 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2616 
2617 	e1000_reset_hw(&adapter->hw);
2618 	if (adapter->hw.mac.type >= e1000_82544)
2619 		E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2620 }
2621 
2622 
2623 /*********************************************************************
2624  *
2625  *  Determine hardware revision.
2626  *
2627  **********************************************************************/
2628 static void
2629 em_identify_hardware(struct adapter *adapter)
2630 {
2631 	device_t dev = adapter->dev;
2632 
2633 	/* Make sure our PCI config space has the necessary stuff set */
2634 	adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2635 	if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2636 	    (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2637 		device_printf(dev, "Memory Access and/or Bus Master bits "
2638 		    "were not set!\n");
2639 		adapter->hw.bus.pci_cmd_word |=
2640 		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2641 		pci_write_config(dev, PCIR_COMMAND,
2642 		    adapter->hw.bus.pci_cmd_word, 2);
2643 	}
2644 
2645 	/* Save off the information about this board */
2646 	adapter->hw.vendor_id = pci_get_vendor(dev);
2647 	adapter->hw.device_id = pci_get_device(dev);
2648 	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2649 	adapter->hw.subsystem_vendor_id =
2650 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
2651 	adapter->hw.subsystem_device_id =
2652 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
2653 
2654 	/* Do Shared Code Init and Setup */
2655 	if (e1000_set_mac_type(&adapter->hw)) {
2656 		device_printf(dev, "Setup init failure\n");
2657 		return;
2658 	}
2659 }
2660 
2661 static int
2662 em_allocate_pci_resources(struct adapter *adapter)
2663 {
2664 	device_t	dev = adapter->dev;
2665 	int		val, rid, error = E1000_SUCCESS;
2666 
2667 	rid = PCIR_BAR(0);
2668 	adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2669 	    &rid, RF_ACTIVE);
2670 	if (adapter->memory == NULL) {
2671 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2672 		return (ENXIO);
2673 	}
2674 	adapter->osdep.mem_bus_space_tag =
2675 	    rman_get_bustag(adapter->memory);
2676 	adapter->osdep.mem_bus_space_handle =
2677 	    rman_get_bushandle(adapter->memory);
2678 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2679 
2680 	/* Only older adapters use IO mapping */
2681 	if ((adapter->hw.mac.type > e1000_82543) &&
2682 	    (adapter->hw.mac.type < e1000_82571)) {
2683 		/* Figure our where our IO BAR is ? */
2684 		for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2685 			val = pci_read_config(dev, rid, 4);
2686 			if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2687 				adapter->io_rid = rid;
2688 				break;
2689 			}
2690 			rid += 4;
2691 			/* check for 64bit BAR */
2692 			if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2693 				rid += 4;
2694 		}
2695 		if (rid >= PCIR_CIS) {
2696 			device_printf(dev, "Unable to locate IO BAR\n");
2697 			return (ENXIO);
2698 		}
2699 		adapter->ioport = bus_alloc_resource_any(dev,
2700 		    SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2701 		if (adapter->ioport == NULL) {
2702 			device_printf(dev, "Unable to allocate bus resource: "
2703 			    "ioport\n");
2704 			return (ENXIO);
2705 		}
2706 		adapter->hw.io_base = 0;
2707 		adapter->osdep.io_bus_space_tag =
2708 		    rman_get_bustag(adapter->ioport);
2709 		adapter->osdep.io_bus_space_handle =
2710 		    rman_get_bushandle(adapter->ioport);
2711 	}
2712 
2713 	/*
2714 	** Init the resource arrays
2715 	**  used by MSIX setup
2716 	*/
2717 	for (int i = 0; i < 3; i++) {
2718 		adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */
2719 		adapter->tag[i] = NULL;
2720 		adapter->res[i] = NULL;
2721 	}
2722 
2723 	/*
2724 	 * Setup MSI/X or MSI if PCI Express
2725 	 */
2726 	if (em_enable_msi)
2727 		adapter->msi = em_setup_msix(adapter);
2728 
2729 	adapter->hw.back = &adapter->osdep;
2730 
2731 	return (error);
2732 }
2733 
2734 /*********************************************************************
2735  *
2736  *  Setup the Legacy or MSI Interrupt handler
2737  *
2738  **********************************************************************/
2739 int
2740 em_allocate_legacy(struct adapter *adapter)
2741 {
2742 	device_t dev = adapter->dev;
2743 	int error;
2744 
2745 	/* Manually turn off all interrupts */
2746 	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2747 
2748 	/* Legacy RID is 0 */
2749 	if (adapter->msi == 0)
2750 		adapter->rid[0] = 0;
2751 
2752 	/* We allocate a single interrupt resource */
2753 	adapter->res[0] = bus_alloc_resource_any(dev,
2754 	    SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
2755 	if (adapter->res[0] == NULL) {
2756 		device_printf(dev, "Unable to allocate bus resource: "
2757 		    "interrupt\n");
2758 		return (ENXIO);
2759 	}
2760 
2761 #ifdef EM_LEGACY_IRQ
2762 	/* We do Legacy setup */
2763 	if ((error = bus_setup_intr(dev, adapter->res[0],
2764 #if __FreeBSD_version > 700000
2765 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_intr, adapter,
2766 #else /* 6.X */
2767 	    INTR_TYPE_NET | INTR_MPSAFE, em_intr, adapter,
2768 #endif
2769 	    &adapter->tag[0])) != 0) {
2770 		device_printf(dev, "Failed to register interrupt handler");
2771 		return (error);
2772 	}
2773 
2774 #else /* FAST_IRQ */
2775 	/*
2776 	 * Try allocating a fast interrupt and the associated deferred
2777 	 * processing contexts.
2778 	 */
2779 	TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
2780 	TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2781 	adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2782 	    taskqueue_thread_enqueue, &adapter->tq);
2783 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2784 	    device_get_nameunit(adapter->dev));
2785 #if __FreeBSD_version < 700000
2786 	if ((error = bus_setup_intr(dev, adapter->res[0],
2787 	    INTR_TYPE_NET | INTR_FAST, em_irq_fast, adapter,
2788 #else
2789 	if ((error = bus_setup_intr(dev, adapter->res[0],
2790 	    INTR_TYPE_NET, em_irq_fast, NULL, adapter,
2791 #endif
2792 	    &adapter->tag[0])) != 0) {
2793 		device_printf(dev, "Failed to register fast interrupt "
2794 			    "handler: %d\n", error);
2795 		taskqueue_free(adapter->tq);
2796 		adapter->tq = NULL;
2797 		return (error);
2798 	}
2799 #endif  /* EM_LEGACY_IRQ */
2800 
2801 	return (0);
2802 }
2803 
2804 /*********************************************************************
2805  *
2806  *  Setup the MSIX Interrupt handlers
2807  *   This is not really Multiqueue, rather
2808  *   its just multiple interrupt vectors.
2809  *
2810  **********************************************************************/
2811 int
2812 em_allocate_msix(struct adapter *adapter)
2813 {
2814 	device_t dev = adapter->dev;
2815 	int error;
2816 
2817 	/* Make sure all interrupts are disabled */
2818 	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2819 
2820 	/* First get the resources */
2821 	for (int i = 0; i < adapter->msi; i++) {
2822 		adapter->res[i] = bus_alloc_resource_any(dev,
2823 		    SYS_RES_IRQ, &adapter->rid[i], RF_ACTIVE);
2824 		if (adapter->res[i] == NULL) {
2825 			device_printf(dev,
2826 			    "Unable to allocate bus resource: "
2827 			    "MSIX Interrupt\n");
2828 			return (ENXIO);
2829 		}
2830 	}
2831 
2832 	/*
2833 	 * Now allocate deferred processing contexts.
2834 	 */
2835 	TASK_INIT(&adapter->rx_task, 0, em_handle_rx, adapter);
2836 	TASK_INIT(&adapter->tx_task, 0, em_handle_tx, adapter);
2837 	/*
2838 	 * Handle compatibility for msi case for deferral due to
2839 	 * trylock failure
2840 	 */
2841 	TASK_INIT(&adapter->rxtx_task, 0, em_handle_tx, adapter);
2842 	TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2843 	adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2844 	    taskqueue_thread_enqueue, &adapter->tq);
2845 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2846 	    device_get_nameunit(adapter->dev));
2847 
2848 	/*
2849 	 * And setup the interrupt handlers
2850 	 */
2851 
2852 	/* First slot to RX */
2853 	if ((error = bus_setup_intr(dev, adapter->res[0],
2854 #if __FreeBSD_version > 700000
2855 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx, adapter,
2856 #else /* 6.X */
2857 	    INTR_TYPE_NET | INTR_MPSAFE, em_msix_rx, adapter,
2858 #endif
2859 	    &adapter->tag[0])) != 0) {
2860 		device_printf(dev, "Failed to register RX handler");
2861 		return (error);
2862 	}
2863 
2864 	/* Next TX */
2865 	if ((error = bus_setup_intr(dev, adapter->res[1],
2866 #if __FreeBSD_version > 700000
2867 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx, adapter,
2868 #else /* 6.X */
2869 	    INTR_TYPE_NET | INTR_MPSAFE, em_msix_tx, adapter,
2870 #endif
2871 	    &adapter->tag[1])) != 0) {
2872 		device_printf(dev, "Failed to register TX handler");
2873 		return (error);
2874 	}
2875 
2876 	/* And Link */
2877 	if ((error = bus_setup_intr(dev, adapter->res[2],
2878 #if __FreeBSD_version > 700000
2879 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_link, adapter,
2880 #else /* 6.X */
2881 	    INTR_TYPE_NET | INTR_MPSAFE, em_msix_link, adapter,
2882 #endif
2883 	    &adapter->tag[2])) != 0) {
2884 		device_printf(dev, "Failed to register TX handler");
2885 		return (error);
2886 	}
2887 
2888 	return (0);
2889 }
2890 
2891 
2892 static void
2893 em_free_pci_resources(struct adapter *adapter)
2894 {
2895 	device_t dev = adapter->dev;
2896 
2897 	/* Make sure the for loop below runs once */
2898 	if (adapter->msi == 0)
2899 		adapter->msi = 1;
2900 
2901 	/*
2902 	 * First release all the interrupt resources:
2903 	 *      notice that since these are just kept
2904 	 *      in an array we can do the same logic
2905 	 *      whether its MSIX or just legacy.
2906 	 */
2907 	for (int i = 0; i < adapter->msi; i++) {
2908 		if (adapter->tag[i] != NULL) {
2909 			bus_teardown_intr(dev, adapter->res[i],
2910 			    adapter->tag[i]);
2911 			adapter->tag[i] = NULL;
2912 		}
2913 		if (adapter->res[i] != NULL) {
2914 			bus_release_resource(dev, SYS_RES_IRQ,
2915 			    adapter->rid[i], adapter->res[i]);
2916 		}
2917 	}
2918 
2919 	if (adapter->msi)
2920 		pci_release_msi(dev);
2921 
2922 	if (adapter->msix != NULL)
2923 		bus_release_resource(dev, SYS_RES_MEMORY,
2924 		    PCIR_BAR(EM_MSIX_BAR), adapter->msix);
2925 
2926 	if (adapter->memory != NULL)
2927 		bus_release_resource(dev, SYS_RES_MEMORY,
2928 		    PCIR_BAR(0), adapter->memory);
2929 
2930 	if (adapter->flash != NULL)
2931 		bus_release_resource(dev, SYS_RES_MEMORY,
2932 		    EM_FLASH, adapter->flash);
2933 
2934 	if (adapter->ioport != NULL)
2935 		bus_release_resource(dev, SYS_RES_IOPORT,
2936 		    adapter->io_rid, adapter->ioport);
2937 }
2938 
2939 /*
2940  * Setup MSI or MSI/X
2941  */
2942 static int
2943 em_setup_msix(struct adapter *adapter)
2944 {
2945 	device_t dev = adapter->dev;
2946 	int val = 0;
2947 
2948 	if (adapter->hw.mac.type < e1000_82571)
2949 		return (0);
2950 
2951 	/* Setup MSI/X for Hartwell */
2952 	if (adapter->hw.mac.type == e1000_82574) {
2953 		/* Map the MSIX BAR */
2954 		int rid = PCIR_BAR(EM_MSIX_BAR);
2955 		adapter->msix = bus_alloc_resource_any(dev,
2956 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2957        		if (!adapter->msix) {
2958 			/* May not be enabled */
2959                		device_printf(adapter->dev,
2960 			    "Unable to map MSIX table \n");
2961 			goto msi;
2962        		}
2963 		val = pci_msix_count(dev);
2964 		/*
2965 		** 82574 can be configured for 5 but
2966 		** we limit use to 3.
2967 		*/
2968 		if (val > 3) val = 3;
2969 		if ((val) && pci_alloc_msix(dev, &val) == 0) {
2970                		device_printf(adapter->dev,"Using MSIX interrupts\n");
2971 			return (val);
2972 		}
2973 	}
2974 msi:
2975        	val = pci_msi_count(dev);
2976        	if (val == 1 && pci_alloc_msi(dev, &val) == 0) {
2977                	adapter->msi = 1;
2978                	device_printf(adapter->dev,"Using MSI interrupt\n");
2979 		return (val);
2980 	}
2981 	return (0);
2982 }
2983 
2984 /*********************************************************************
2985  *
2986  *  Initialize the hardware to a configuration
2987  *  as specified by the adapter structure.
2988  *
2989  **********************************************************************/
2990 static int
2991 em_hardware_init(struct adapter *adapter)
2992 {
2993 	device_t dev = adapter->dev;
2994 	u16 	rx_buffer_size;
2995 
2996 	INIT_DEBUGOUT("em_hardware_init: begin");
2997 
2998 	/* Issue a global reset */
2999 	e1000_reset_hw(&adapter->hw);
3000 
3001 	/* When hardware is reset, fifo_head is also reset */
3002 	adapter->tx_fifo_head = 0;
3003 
3004 	/* Set up smart power down as default off on newer adapters. */
3005 	if (!em_smart_pwr_down && (adapter->hw.mac.type == e1000_82571 ||
3006 	    adapter->hw.mac.type == e1000_82572)) {
3007 		u16 phy_tmp = 0;
3008 
3009 		/* Speed up time to link by disabling smart power down. */
3010 		e1000_read_phy_reg(&adapter->hw,
3011 		    IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
3012 		phy_tmp &= ~IGP02E1000_PM_SPD;
3013 		e1000_write_phy_reg(&adapter->hw,
3014 		    IGP02E1000_PHY_POWER_MGMT, phy_tmp);
3015 	}
3016 
3017 	/*
3018 	 * These parameters control the automatic generation (Tx) and
3019 	 * response (Rx) to Ethernet PAUSE frames.
3020 	 * - High water mark should allow for at least two frames to be
3021 	 *   received after sending an XOFF.
3022 	 * - Low water mark works best when it is very near the high water mark.
3023 	 *   This allows the receiver to restart by sending XON when it has
3024 	 *   drained a bit. Here we use an arbitary value of 1500 which will
3025 	 *   restart after one full frame is pulled from the buffer. There
3026 	 *   could be several smaller frames in the buffer and if so they will
3027 	 *   not trigger the XON until their total number reduces the buffer
3028 	 *   by 1500.
3029 	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
3030 	 */
3031 	rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
3032 	    0xffff) << 10 );
3033 
3034 	adapter->hw.fc.high_water = rx_buffer_size -
3035 	    roundup2(adapter->max_frame_size, 1024);
3036 	adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
3037 
3038 	if (adapter->hw.mac.type == e1000_80003es2lan)
3039 		adapter->hw.fc.pause_time = 0xFFFF;
3040 	else
3041 		adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
3042 	adapter->hw.fc.send_xon = TRUE;
3043 
3044         /* Set Flow control, use the tunable location if sane */
3045         if ((em_fc_setting >= 0) || (em_fc_setting < 4))
3046                 adapter->hw.fc.requested_mode = em_fc_setting;
3047         else
3048                 adapter->hw.fc.requested_mode = e1000_fc_none;
3049 
3050 	/* Override - workaround for PCHLAN issue */
3051 	if (adapter->hw.mac.type == e1000_pchlan)
3052                 adapter->hw.fc.requested_mode = e1000_fc_rx_pause;
3053 
3054 	if (e1000_init_hw(&adapter->hw) < 0) {
3055 		device_printf(dev, "Hardware Initialization Failed\n");
3056 		return (EIO);
3057 	}
3058 
3059 	e1000_check_for_link(&adapter->hw);
3060 
3061 	return (0);
3062 }
3063 
3064 /*********************************************************************
3065  *
3066  *  Setup networking device structure and register an interface.
3067  *
3068  **********************************************************************/
3069 static void
3070 em_setup_interface(device_t dev, struct adapter *adapter)
3071 {
3072 	struct ifnet   *ifp;
3073 
3074 	INIT_DEBUGOUT("em_setup_interface: begin");
3075 
3076 	ifp = adapter->ifp = if_alloc(IFT_ETHER);
3077 	if (ifp == NULL)
3078 		panic("%s: can not if_alloc()", device_get_nameunit(dev));
3079 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3080 	ifp->if_mtu = ETHERMTU;
3081 	ifp->if_init =  em_init;
3082 	ifp->if_softc = adapter;
3083 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3084 	ifp->if_ioctl = em_ioctl;
3085 	ifp->if_start = em_start;
3086 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
3087 	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
3088 	IFQ_SET_READY(&ifp->if_snd);
3089 
3090 	ether_ifattach(ifp, adapter->hw.mac.addr);
3091 
3092 	ifp->if_capabilities = ifp->if_capenable = 0;
3093 
3094 #if __FreeBSD_version >= 800000
3095 	/* Multiqueue tx functions */
3096 	ifp->if_transmit = em_mq_start;
3097 	ifp->if_qflush = em_qflush;
3098 	adapter->br = buf_ring_alloc(4096, M_DEVBUF, M_WAITOK, &adapter->tx_mtx);
3099 #endif
3100 	if (adapter->hw.mac.type >= e1000_82543) {
3101 		int version_cap;
3102 #if __FreeBSD_version < 700000
3103 		version_cap = IFCAP_HWCSUM;
3104 #else
3105 		version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
3106 #endif
3107 		ifp->if_capabilities |= version_cap;
3108 		ifp->if_capenable |= version_cap;
3109 	}
3110 
3111 #if __FreeBSD_version >= 700000
3112 	/* Identify TSO capable adapters */
3113 	if ((adapter->hw.mac.type > e1000_82544) &&
3114 	    (adapter->hw.mac.type != e1000_82547))
3115 		ifp->if_capabilities |= IFCAP_TSO4;
3116 	/*
3117 	 * By default only enable on PCI-E, this
3118 	 * can be overriden by ifconfig.
3119 	 */
3120 	if (adapter->hw.mac.type >= e1000_82571)
3121 		ifp->if_capenable |= IFCAP_TSO4;
3122 #endif
3123 
3124 	/*
3125 	 * Tell the upper layer(s) we support long frames.
3126 	 */
3127 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3128 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
3129 	ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
3130 
3131 #ifdef DEVICE_POLLING
3132 	ifp->if_capabilities |= IFCAP_POLLING;
3133 #endif
3134 
3135 	/* Enable All WOL methods by default */
3136 	if (adapter->wol) {
3137 		ifp->if_capabilities |= IFCAP_WOL;
3138 		ifp->if_capenable |= IFCAP_WOL;
3139 	}
3140 
3141 	/*
3142 	 * Specify the media types supported by this adapter and register
3143 	 * callbacks to update media and link information
3144 	 */
3145 	ifmedia_init(&adapter->media, IFM_IMASK,
3146 	    em_media_change, em_media_status);
3147 	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3148 	    (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
3149 		u_char fiber_type = IFM_1000_SX;	/* default type */
3150 
3151 		if (adapter->hw.mac.type == e1000_82545)
3152 			fiber_type = IFM_1000_LX;
3153 		ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
3154 			    0, NULL);
3155 		ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
3156 	} else {
3157 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
3158 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
3159 			    0, NULL);
3160 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
3161 			    0, NULL);
3162 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
3163 			    0, NULL);
3164 		if (adapter->hw.phy.type != e1000_phy_ife) {
3165 			ifmedia_add(&adapter->media,
3166 				IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
3167 			ifmedia_add(&adapter->media,
3168 				IFM_ETHER | IFM_1000_T, 0, NULL);
3169 		}
3170 	}
3171 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3172 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3173 }
3174 
3175 
3176 /*********************************************************************
3177  *
3178  *  Workaround for SmartSpeed on 82541 and 82547 controllers
3179  *
3180  **********************************************************************/
3181 static void
3182 em_smartspeed(struct adapter *adapter)
3183 {
3184 	u16 phy_tmp;
3185 
3186 	if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
3187 	    adapter->hw.mac.autoneg == 0 ||
3188 	    (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
3189 		return;
3190 
3191 	if (adapter->smartspeed == 0) {
3192 		/* If Master/Slave config fault is asserted twice,
3193 		 * we assume back-to-back */
3194 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3195 		if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
3196 			return;
3197 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3198 		if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
3199 			e1000_read_phy_reg(&adapter->hw,
3200 			    PHY_1000T_CTRL, &phy_tmp);
3201 			if(phy_tmp & CR_1000T_MS_ENABLE) {
3202 				phy_tmp &= ~CR_1000T_MS_ENABLE;
3203 				e1000_write_phy_reg(&adapter->hw,
3204 				    PHY_1000T_CTRL, phy_tmp);
3205 				adapter->smartspeed++;
3206 				if(adapter->hw.mac.autoneg &&
3207 				   !e1000_copper_link_autoneg(&adapter->hw) &&
3208 				   !e1000_read_phy_reg(&adapter->hw,
3209 				    PHY_CONTROL, &phy_tmp)) {
3210 					phy_tmp |= (MII_CR_AUTO_NEG_EN |
3211 						    MII_CR_RESTART_AUTO_NEG);
3212 					e1000_write_phy_reg(&adapter->hw,
3213 					    PHY_CONTROL, phy_tmp);
3214 				}
3215 			}
3216 		}
3217 		return;
3218 	} else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
3219 		/* If still no link, perhaps using 2/3 pair cable */
3220 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
3221 		phy_tmp |= CR_1000T_MS_ENABLE;
3222 		e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
3223 		if(adapter->hw.mac.autoneg &&
3224 		   !e1000_copper_link_autoneg(&adapter->hw) &&
3225 		   !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
3226 			phy_tmp |= (MII_CR_AUTO_NEG_EN |
3227 				    MII_CR_RESTART_AUTO_NEG);
3228 			e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
3229 		}
3230 	}
3231 	/* Restart process after EM_SMARTSPEED_MAX iterations */
3232 	if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
3233 		adapter->smartspeed = 0;
3234 }
3235 
3236 
3237 /*
3238  * Manage DMA'able memory.
3239  */
3240 static void
3241 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3242 {
3243 	if (error)
3244 		return;
3245 	*(bus_addr_t *) arg = segs[0].ds_addr;
3246 }
3247 
3248 static int
3249 em_dma_malloc(struct adapter *adapter, bus_size_t size,
3250         struct em_dma_alloc *dma, int mapflags)
3251 {
3252 	int error;
3253 
3254 #if __FreeBSD_version >= 700000
3255 	error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
3256 #else
3257 	error = bus_dma_tag_create(NULL,		 /* parent */
3258 #endif
3259 				EM_DBA_ALIGN, 0,	/* alignment, bounds */
3260 				BUS_SPACE_MAXADDR,	/* lowaddr */
3261 				BUS_SPACE_MAXADDR,	/* highaddr */
3262 				NULL, NULL,		/* filter, filterarg */
3263 				size,			/* maxsize */
3264 				1,			/* nsegments */
3265 				size,			/* maxsegsize */
3266 				0,			/* flags */
3267 				NULL,			/* lockfunc */
3268 				NULL,			/* lockarg */
3269 				&dma->dma_tag);
3270 	if (error) {
3271 		device_printf(adapter->dev,
3272 		    "%s: bus_dma_tag_create failed: %d\n",
3273 		    __func__, error);
3274 		goto fail_0;
3275 	}
3276 
3277 	error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
3278 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
3279 	if (error) {
3280 		device_printf(adapter->dev,
3281 		    "%s: bus_dmamem_alloc(%ju) failed: %d\n",
3282 		    __func__, (uintmax_t)size, error);
3283 		goto fail_2;
3284 	}
3285 
3286 	dma->dma_paddr = 0;
3287 	error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3288 	    size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
3289 	if (error || dma->dma_paddr == 0) {
3290 		device_printf(adapter->dev,
3291 		    "%s: bus_dmamap_load failed: %d\n",
3292 		    __func__, error);
3293 		goto fail_3;
3294 	}
3295 
3296 	return (0);
3297 
3298 fail_3:
3299 	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3300 fail_2:
3301 	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3302 	bus_dma_tag_destroy(dma->dma_tag);
3303 fail_0:
3304 	dma->dma_map = NULL;
3305 	dma->dma_tag = NULL;
3306 
3307 	return (error);
3308 }
3309 
3310 static void
3311 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
3312 {
3313 	if (dma->dma_tag == NULL)
3314 		return;
3315 	if (dma->dma_map != NULL) {
3316 		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3317 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3318 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3319 		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3320 		dma->dma_map = NULL;
3321 	}
3322 	bus_dma_tag_destroy(dma->dma_tag);
3323 	dma->dma_tag = NULL;
3324 }
3325 
3326 
3327 /*********************************************************************
3328  *
3329  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
3330  *  the information needed to transmit a packet on the wire.
3331  *
3332  **********************************************************************/
3333 static int
3334 em_allocate_transmit_structures(struct adapter *adapter)
3335 {
3336 	device_t dev = adapter->dev;
3337 	struct em_buffer *tx_buffer;
3338 	int error;
3339 
3340 	/*
3341 	 * Create DMA tags for tx descriptors
3342 	 */
3343 #if __FreeBSD_version >= 700000
3344 	if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3345 #else
3346 	if ((error = bus_dma_tag_create(NULL,		 /* parent */
3347 #endif
3348 				1, 0,			/* alignment, bounds */
3349 				BUS_SPACE_MAXADDR,	/* lowaddr */
3350 				BUS_SPACE_MAXADDR,	/* highaddr */
3351 				NULL, NULL,		/* filter, filterarg */
3352 				EM_TSO_SIZE,		/* maxsize */
3353 				EM_MAX_SCATTER,		/* nsegments */
3354 				EM_TSO_SEG_SIZE,	/* maxsegsize */
3355 				0,			/* flags */
3356 				NULL,		/* lockfunc */
3357 				NULL,		/* lockarg */
3358 				&adapter->txtag)) != 0) {
3359 		device_printf(dev, "Unable to allocate TX DMA tag\n");
3360 		goto fail;
3361 	}
3362 
3363 	adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
3364 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3365 	if (adapter->tx_buffer_area == NULL) {
3366 		device_printf(dev, "Unable to allocate tx_buffer memory\n");
3367 		error = ENOMEM;
3368 		goto fail;
3369 	}
3370 
3371 	/* Create the descriptor buffer dma maps */
3372 	for (int i = 0; i < adapter->num_tx_desc; i++) {
3373 		tx_buffer = &adapter->tx_buffer_area[i];
3374 		error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
3375 		if (error != 0) {
3376 			device_printf(dev, "Unable to create TX DMA map\n");
3377 			goto fail;
3378 		}
3379 		tx_buffer->next_eop = -1;
3380 	}
3381 
3382 	return (0);
3383 fail:
3384 	em_free_transmit_structures(adapter);
3385 	return (error);
3386 }
3387 
3388 /*********************************************************************
3389  *
3390  *  (Re)Initialize transmit structures.
3391  *
3392  **********************************************************************/
3393 static void
3394 em_setup_transmit_structures(struct adapter *adapter)
3395 {
3396 	struct em_buffer *tx_buffer;
3397 
3398 	/* Clear the old ring contents */
3399 	bzero(adapter->tx_desc_base,
3400 	    (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
3401 
3402 	/* Free any existing TX buffers */
3403 	for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
3404 		tx_buffer = &adapter->tx_buffer_area[i];
3405 		bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3406 		    BUS_DMASYNC_POSTWRITE);
3407 		bus_dmamap_unload(adapter->txtag, tx_buffer->map);
3408 		m_freem(tx_buffer->m_head);
3409 		tx_buffer->m_head = NULL;
3410 		tx_buffer->next_eop = -1;
3411 	}
3412 
3413 	/* Reset state */
3414 	adapter->next_avail_tx_desc = 0;
3415 	adapter->next_tx_to_clean = 0;
3416 	adapter->num_tx_desc_avail = adapter->num_tx_desc;
3417 
3418 	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3419 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3420 
3421 	return;
3422 }
3423 
3424 /*********************************************************************
3425  *
3426  *  Enable transmit unit.
3427  *
3428  **********************************************************************/
3429 static void
3430 em_initialize_transmit_unit(struct adapter *adapter)
3431 {
3432 	u32	tctl, tarc, tipg = 0;
3433 	u64	bus_addr;
3434 
3435 	 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
3436 	/* Setup the Base and Length of the Tx Descriptor Ring */
3437 	bus_addr = adapter->txdma.dma_paddr;
3438 	E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
3439 	    adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
3440 	E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
3441 	    (u32)(bus_addr >> 32));
3442 	E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
3443 	    (u32)bus_addr);
3444 	/* Setup the HW Tx Head and Tail descriptor pointers */
3445 	E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
3446 	E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
3447 
3448 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
3449 	    E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
3450 	    E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
3451 
3452 	/* Set the default values for the Tx Inter Packet Gap timer */
3453 	switch (adapter->hw.mac.type) {
3454 	case e1000_82542:
3455 		tipg = DEFAULT_82542_TIPG_IPGT;
3456 		tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3457 		tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3458 		break;
3459 	case e1000_80003es2lan:
3460 		tipg = DEFAULT_82543_TIPG_IPGR1;
3461 		tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
3462 		    E1000_TIPG_IPGR2_SHIFT;
3463 		break;
3464 	default:
3465 		if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3466 		    (adapter->hw.phy.media_type ==
3467 		    e1000_media_type_internal_serdes))
3468 			tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3469 		else
3470 			tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3471 		tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3472 		tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3473 	}
3474 
3475 	E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
3476 	E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
3477 	if(adapter->hw.mac.type >= e1000_82540)
3478 		E1000_WRITE_REG(&adapter->hw, E1000_TADV,
3479 		    adapter->tx_abs_int_delay.value);
3480 
3481 	if ((adapter->hw.mac.type == e1000_82571) ||
3482 	    (adapter->hw.mac.type == e1000_82572)) {
3483 		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3484 		tarc |= SPEED_MODE_BIT;
3485 		E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3486 	} else if (adapter->hw.mac.type == e1000_80003es2lan) {
3487 		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3488 		tarc |= 1;
3489 		E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3490 		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
3491 		tarc |= 1;
3492 		E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
3493 	}
3494 
3495 	/* Program the Transmit Control Register */
3496 	tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
3497 	tctl &= ~E1000_TCTL_CT;
3498 	tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3499 		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
3500 
3501 	if (adapter->hw.mac.type >= e1000_82571)
3502 		tctl |= E1000_TCTL_MULR;
3503 
3504 	/* This write will effectively turn on the transmit unit. */
3505 	E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
3506 
3507 	/* Setup Transmit Descriptor Base Settings */
3508 	adapter->txd_cmd = E1000_TXD_CMD_IFCS;
3509 
3510 	if (adapter->tx_int_delay.value > 0)
3511 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3512 }
3513 
3514 /*********************************************************************
3515  *
3516  *  Free all transmit related data structures.
3517  *
3518  **********************************************************************/
3519 static void
3520 em_free_transmit_structures(struct adapter *adapter)
3521 {
3522 	struct em_buffer *tx_buffer;
3523 
3524 	INIT_DEBUGOUT("free_transmit_structures: begin");
3525 
3526 	if (adapter->tx_buffer_area != NULL) {
3527 		for (int i = 0; i < adapter->num_tx_desc; i++) {
3528 			tx_buffer = &adapter->tx_buffer_area[i];
3529 			if (tx_buffer->m_head != NULL) {
3530 				bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3531 				    BUS_DMASYNC_POSTWRITE);
3532 				bus_dmamap_unload(adapter->txtag,
3533 				    tx_buffer->map);
3534 				m_freem(tx_buffer->m_head);
3535 				tx_buffer->m_head = NULL;
3536 			} else if (tx_buffer->map != NULL)
3537 				bus_dmamap_unload(adapter->txtag,
3538 				    tx_buffer->map);
3539 			if (tx_buffer->map != NULL) {
3540 				bus_dmamap_destroy(adapter->txtag,
3541 				    tx_buffer->map);
3542 				tx_buffer->map = NULL;
3543 			}
3544 		}
3545 	}
3546 	if (adapter->tx_buffer_area != NULL) {
3547 		free(adapter->tx_buffer_area, M_DEVBUF);
3548 		adapter->tx_buffer_area = NULL;
3549 	}
3550 	if (adapter->txtag != NULL) {
3551 		bus_dma_tag_destroy(adapter->txtag);
3552 		adapter->txtag = NULL;
3553 	}
3554 #if __FreeBSD_version >= 800000
3555 	if (adapter->br != NULL)
3556         	buf_ring_free(adapter->br, M_DEVBUF);
3557 #endif
3558 }
3559 
3560 /*********************************************************************
3561  *
3562  *  The offload context needs to be set when we transfer the first
3563  *  packet of a particular protocol (TCP/UDP). This routine has been
3564  *  enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
3565  *
3566  *  Added back the old method of keeping the current context type
3567  *  and not setting if unnecessary, as this is reported to be a
3568  *  big performance win.  -jfv
3569  **********************************************************************/
3570 static void
3571 em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
3572     u32 *txd_upper, u32 *txd_lower)
3573 {
3574 	struct e1000_context_desc *TXD = NULL;
3575 	struct em_buffer *tx_buffer;
3576 	struct ether_vlan_header *eh;
3577 	struct ip *ip = NULL;
3578 	struct ip6_hdr *ip6;
3579 	int curr_txd, ehdrlen;
3580 	u32 cmd, hdr_len, ip_hlen;
3581 	u16 etype;
3582 	u8 ipproto;
3583 
3584 
3585 	cmd = hdr_len = ipproto = 0;
3586 	curr_txd = adapter->next_avail_tx_desc;
3587 
3588 	/*
3589 	 * Determine where frame payload starts.
3590 	 * Jump over vlan headers if already present,
3591 	 * helpful for QinQ too.
3592 	 */
3593 	eh = mtod(mp, struct ether_vlan_header *);
3594 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3595 		etype = ntohs(eh->evl_proto);
3596 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3597 	} else {
3598 		etype = ntohs(eh->evl_encap_proto);
3599 		ehdrlen = ETHER_HDR_LEN;
3600 	}
3601 
3602 	/*
3603 	 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
3604 	 * TODO: Support SCTP too when it hits the tree.
3605 	 */
3606 	switch (etype) {
3607 	case ETHERTYPE_IP:
3608 		ip = (struct ip *)(mp->m_data + ehdrlen);
3609 		ip_hlen = ip->ip_hl << 2;
3610 
3611 		/* Setup of IP header checksum. */
3612 		if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3613 			/*
3614 			 * Start offset for header checksum calculation.
3615 			 * End offset for header checksum calculation.
3616 			 * Offset of place to put the checksum.
3617 			 */
3618 			TXD = (struct e1000_context_desc *)
3619 			    &adapter->tx_desc_base[curr_txd];
3620 			TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3621 			TXD->lower_setup.ip_fields.ipcse =
3622 			    htole16(ehdrlen + ip_hlen);
3623 			TXD->lower_setup.ip_fields.ipcso =
3624 			    ehdrlen + offsetof(struct ip, ip_sum);
3625 			cmd |= E1000_TXD_CMD_IP;
3626 			*txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3627 		}
3628 
3629 		if (mp->m_len < ehdrlen + ip_hlen)
3630 			return;	/* failure */
3631 
3632 		hdr_len = ehdrlen + ip_hlen;
3633 		ipproto = ip->ip_p;
3634 
3635 		break;
3636 	case ETHERTYPE_IPV6:
3637 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3638 		ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
3639 
3640 		if (mp->m_len < ehdrlen + ip_hlen)
3641 			return;	/* failure */
3642 
3643 		/* IPv6 doesn't have a header checksum. */
3644 
3645 		hdr_len = ehdrlen + ip_hlen;
3646 		ipproto = ip6->ip6_nxt;
3647 
3648 		break;
3649 	default:
3650 		*txd_upper = 0;
3651 		*txd_lower = 0;
3652 		return;
3653 	}
3654 
3655 	switch (ipproto) {
3656 	case IPPROTO_TCP:
3657 		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3658 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3659 			*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3660 			/* no need for context if already set */
3661 			if (adapter->last_hw_offload == CSUM_TCP)
3662 				return;
3663 			adapter->last_hw_offload = CSUM_TCP;
3664 			/*
3665 			 * Start offset for payload checksum calculation.
3666 			 * End offset for payload checksum calculation.
3667 			 * Offset of place to put the checksum.
3668 			 */
3669 			TXD = (struct e1000_context_desc *)
3670 			    &adapter->tx_desc_base[curr_txd];
3671 			TXD->upper_setup.tcp_fields.tucss = hdr_len;
3672 			TXD->upper_setup.tcp_fields.tucse = htole16(0);
3673 			TXD->upper_setup.tcp_fields.tucso =
3674 			    hdr_len + offsetof(struct tcphdr, th_sum);
3675 			cmd |= E1000_TXD_CMD_TCP;
3676 		}
3677 		break;
3678 	case IPPROTO_UDP:
3679 	{
3680 		if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3681 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3682 			*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3683 			/* no need for context if already set */
3684 			if (adapter->last_hw_offload == CSUM_UDP)
3685 				return;
3686 			adapter->last_hw_offload = CSUM_UDP;
3687 			/*
3688 			 * Start offset for header checksum calculation.
3689 			 * End offset for header checksum calculation.
3690 			 * Offset of place to put the checksum.
3691 			 */
3692 			TXD = (struct e1000_context_desc *)
3693 			    &adapter->tx_desc_base[curr_txd];
3694 			TXD->upper_setup.tcp_fields.tucss = hdr_len;
3695 			TXD->upper_setup.tcp_fields.tucse = htole16(0);
3696 			TXD->upper_setup.tcp_fields.tucso =
3697 			    hdr_len + offsetof(struct udphdr, uh_sum);
3698 		}
3699 		/* Fall Thru */
3700 	}
3701 	default:
3702 		break;
3703 	}
3704 
3705 	TXD->tcp_seg_setup.data = htole32(0);
3706 	TXD->cmd_and_length =
3707 	    htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3708 	tx_buffer = &adapter->tx_buffer_area[curr_txd];
3709 	tx_buffer->m_head = NULL;
3710 	tx_buffer->next_eop = -1;
3711 
3712 	if (++curr_txd == adapter->num_tx_desc)
3713 		curr_txd = 0;
3714 
3715 	adapter->num_tx_desc_avail--;
3716 	adapter->next_avail_tx_desc = curr_txd;
3717 }
3718 
3719 
3720 #if __FreeBSD_version >= 700000
3721 /**********************************************************************
3722  *
3723  *  Setup work for hardware segmentation offload (TSO)
3724  *
3725  **********************************************************************/
3726 static bool
3727 em_tso_setup(struct adapter *adapter, struct mbuf *mp, u32 *txd_upper,
3728    u32 *txd_lower)
3729 {
3730 	struct e1000_context_desc *TXD;
3731 	struct em_buffer *tx_buffer;
3732 	struct ether_vlan_header *eh;
3733 	struct ip *ip;
3734 	struct ip6_hdr *ip6;
3735 	struct tcphdr *th;
3736 	int curr_txd, ehdrlen, hdr_len, ip_hlen, isip6;
3737 	u16 etype;
3738 
3739 	/*
3740 	 * This function could/should be extended to support IP/IPv6
3741 	 * fragmentation as well.  But as they say, one step at a time.
3742 	 */
3743 
3744 	/*
3745 	 * Determine where frame payload starts.
3746 	 * Jump over vlan headers if already present,
3747 	 * helpful for QinQ too.
3748 	 */
3749 	eh = mtod(mp, struct ether_vlan_header *);
3750 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3751 		etype = ntohs(eh->evl_proto);
3752 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3753 	} else {
3754 		etype = ntohs(eh->evl_encap_proto);
3755 		ehdrlen = ETHER_HDR_LEN;
3756 	}
3757 
3758 	/* Ensure we have at least the IP+TCP header in the first mbuf. */
3759 	if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3760 		return FALSE;	/* -1 */
3761 
3762 	/*
3763 	 * We only support TCP for IPv4 and IPv6 (notyet) for the moment.
3764 	 * TODO: Support SCTP too when it hits the tree.
3765 	 */
3766 	switch (etype) {
3767 	case ETHERTYPE_IP:
3768 		isip6 = 0;
3769 		ip = (struct ip *)(mp->m_data + ehdrlen);
3770 		if (ip->ip_p != IPPROTO_TCP)
3771 			return FALSE;	/* 0 */
3772 		ip->ip_len = 0;
3773 		ip->ip_sum = 0;
3774 		ip_hlen = ip->ip_hl << 2;
3775 		if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3776 			return FALSE;	/* -1 */
3777 		th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3778 #if 1
3779 		th->th_sum = in_pseudo(ip->ip_src.s_addr,
3780 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3781 #else
3782 		th->th_sum = mp->m_pkthdr.csum_data;
3783 #endif
3784 		break;
3785 	case ETHERTYPE_IPV6:
3786 		isip6 = 1;
3787 		return FALSE;			/* Not supported yet. */
3788 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3789 		if (ip6->ip6_nxt != IPPROTO_TCP)
3790 			return FALSE;	/* 0 */
3791 		ip6->ip6_plen = 0;
3792 		ip_hlen = sizeof(struct ip6_hdr); /* XXX: no header stacking. */
3793 		if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3794 			return FALSE;	/* -1 */
3795 		th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
3796 #if 0
3797 		th->th_sum = in6_pseudo(ip6->ip6_src, ip->ip6_dst,
3798 		    htons(IPPROTO_TCP));	/* XXX: function notyet. */
3799 #else
3800 		th->th_sum = mp->m_pkthdr.csum_data;
3801 #endif
3802 		break;
3803 	default:
3804 		return FALSE;
3805 	}
3806 	hdr_len = ehdrlen + ip_hlen + (th->th_off << 2);
3807 
3808 	*txd_lower = (E1000_TXD_CMD_DEXT |	/* Extended descr type */
3809 		      E1000_TXD_DTYP_D |	/* Data descr type */
3810 		      E1000_TXD_CMD_TSE);	/* Do TSE on this packet */
3811 
3812 	/* IP and/or TCP header checksum calculation and insertion. */
3813 	*txd_upper = ((isip6 ? 0 : E1000_TXD_POPTS_IXSM) |
3814 		      E1000_TXD_POPTS_TXSM) << 8;
3815 
3816 	curr_txd = adapter->next_avail_tx_desc;
3817 	tx_buffer = &adapter->tx_buffer_area[curr_txd];
3818 	TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3819 
3820 	/* IPv6 doesn't have a header checksum. */
3821 	if (!isip6) {
3822 		/*
3823 		 * Start offset for header checksum calculation.
3824 		 * End offset for header checksum calculation.
3825 		 * Offset of place put the checksum.
3826 		 */
3827 		TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3828 		TXD->lower_setup.ip_fields.ipcse =
3829 		    htole16(ehdrlen + ip_hlen - 1);
3830 		TXD->lower_setup.ip_fields.ipcso =
3831 		    ehdrlen + offsetof(struct ip, ip_sum);
3832 	}
3833 	/*
3834 	 * Start offset for payload checksum calculation.
3835 	 * End offset for payload checksum calculation.
3836 	 * Offset of place to put the checksum.
3837 	 */
3838 	TXD->upper_setup.tcp_fields.tucss =
3839 	    ehdrlen + ip_hlen;
3840 	TXD->upper_setup.tcp_fields.tucse = 0;
3841 	TXD->upper_setup.tcp_fields.tucso =
3842 	    ehdrlen + ip_hlen + offsetof(struct tcphdr, th_sum);
3843 	/*
3844 	 * Payload size per packet w/o any headers.
3845 	 * Length of all headers up to payload.
3846 	 */
3847 	TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz);
3848 	TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
3849 
3850 	TXD->cmd_and_length = htole32(adapter->txd_cmd |
3851 				E1000_TXD_CMD_DEXT |	/* Extended descr */
3852 				E1000_TXD_CMD_TSE |	/* TSE context */
3853 				(isip6 ? 0 : E1000_TXD_CMD_IP) | /* Do IP csum */
3854 				E1000_TXD_CMD_TCP |	/* Do TCP checksum */
3855 				(mp->m_pkthdr.len - (hdr_len))); /* Total len */
3856 
3857 	tx_buffer->m_head = NULL;
3858 	tx_buffer->next_eop = -1;
3859 
3860 	if (++curr_txd == adapter->num_tx_desc)
3861 		curr_txd = 0;
3862 
3863 	adapter->num_tx_desc_avail--;
3864 	adapter->next_avail_tx_desc = curr_txd;
3865 	adapter->tx_tso = TRUE;
3866 
3867 	return TRUE;
3868 }
3869 
3870 #endif /* __FreeBSD_version >= 700000 */
3871 
3872 /**********************************************************************
3873  *
3874  *  Examine each tx_buffer in the used queue. If the hardware is done
3875  *  processing the packet then free associated resources. The
3876  *  tx_buffer is put back on the free queue.
3877  *
3878  **********************************************************************/
3879 static void
3880 em_txeof(struct adapter *adapter)
3881 {
3882         int first, last, done, num_avail;
3883         struct em_buffer *tx_buffer;
3884         struct e1000_tx_desc   *tx_desc, *eop_desc;
3885 	struct ifnet   *ifp = adapter->ifp;
3886 
3887 	EM_TX_LOCK_ASSERT(adapter);
3888 
3889         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3890                 return;
3891 
3892         num_avail = adapter->num_tx_desc_avail;
3893         first = adapter->next_tx_to_clean;
3894         tx_desc = &adapter->tx_desc_base[first];
3895         tx_buffer = &adapter->tx_buffer_area[first];
3896 	last = tx_buffer->next_eop;
3897         eop_desc = &adapter->tx_desc_base[last];
3898 
3899 	/*
3900 	 * What this does is get the index of the
3901 	 * first descriptor AFTER the EOP of the
3902 	 * first packet, that way we can do the
3903 	 * simple comparison on the inner while loop.
3904 	 */
3905 	if (++last == adapter->num_tx_desc)
3906  		last = 0;
3907 	done = last;
3908 
3909         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3910             BUS_DMASYNC_POSTREAD);
3911 
3912         while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3913 		/* We clean the range of the packet */
3914 		while (first != done) {
3915                 	tx_desc->upper.data = 0;
3916                 	tx_desc->lower.data = 0;
3917                 	tx_desc->buffer_addr = 0;
3918                 	++num_avail;
3919 
3920 			if (tx_buffer->m_head) {
3921 				ifp->if_opackets++;
3922 				bus_dmamap_sync(adapter->txtag,
3923 				    tx_buffer->map,
3924 				    BUS_DMASYNC_POSTWRITE);
3925 				bus_dmamap_unload(adapter->txtag,
3926 				    tx_buffer->map);
3927 
3928                         	m_freem(tx_buffer->m_head);
3929                         	tx_buffer->m_head = NULL;
3930                 	}
3931 			tx_buffer->next_eop = -1;
3932 			adapter->watchdog_time = ticks;
3933 
3934 	                if (++first == adapter->num_tx_desc)
3935 				first = 0;
3936 
3937 	                tx_buffer = &adapter->tx_buffer_area[first];
3938 			tx_desc = &adapter->tx_desc_base[first];
3939 		}
3940 		/* See if we can continue to the next packet */
3941 		last = tx_buffer->next_eop;
3942 		if (last != -1) {
3943         		eop_desc = &adapter->tx_desc_base[last];
3944 			/* Get new done point */
3945 			if (++last == adapter->num_tx_desc) last = 0;
3946 			done = last;
3947 		} else
3948 			break;
3949         }
3950         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3951             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3952 
3953         adapter->next_tx_to_clean = first;
3954 
3955         /*
3956          * If we have enough room, clear IFF_DRV_OACTIVE to
3957          * tell the stack that it is OK to send packets.
3958          * If there are no pending descriptors, clear the watchdog.
3959          */
3960         if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
3961                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3962                 if (num_avail == adapter->num_tx_desc) {
3963 			adapter->watchdog_check = FALSE;
3964         		adapter->num_tx_desc_avail = num_avail;
3965 			return;
3966 		}
3967         }
3968 
3969         adapter->num_tx_desc_avail = num_avail;
3970 	return;
3971 }
3972 
3973 /*********************************************************************
3974  *
3975  *  When Link is lost sometimes there is work still in the TX ring
3976  *  which may result in a watchdog, rather than allow that we do an
3977  *  attempted cleanup and then reinit here. Note that this has been
3978  *  seens mostly with fiber adapters.
3979  *
3980  **********************************************************************/
3981 static void
3982 em_tx_purge(struct adapter *adapter)
3983 {
3984 	if ((!adapter->link_active) && (adapter->watchdog_check)) {
3985 		EM_TX_LOCK(adapter);
3986 		em_txeof(adapter);
3987 		EM_TX_UNLOCK(adapter);
3988 		if (adapter->watchdog_check) /* Still outstanding? */
3989 			em_init_locked(adapter);
3990 	}
3991 }
3992 
3993 /*********************************************************************
3994  *
3995  *  Get a buffer from system mbuf buffer pool.
3996  *
3997  **********************************************************************/
3998 static int
3999 em_get_buf(struct adapter *adapter, int i)
4000 {
4001 	struct mbuf		*m;
4002 	bus_dma_segment_t	segs[1];
4003 	bus_dmamap_t		map;
4004 	struct em_buffer	*rx_buffer;
4005 	int			error, nsegs;
4006 
4007 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
4008 	if (m == NULL) {
4009 		adapter->mbuf_cluster_failed++;
4010 		return (ENOBUFS);
4011 	}
4012 	m->m_len = m->m_pkthdr.len = MCLBYTES;
4013 
4014 	if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
4015 		m_adj(m, ETHER_ALIGN);
4016 
4017 	/*
4018 	 * Using memory from the mbuf cluster pool, invoke the
4019 	 * bus_dma machinery to arrange the memory mapping.
4020 	 */
4021 	error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
4022 	    adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
4023 	if (error != 0) {
4024 		m_free(m);
4025 		return (error);
4026 	}
4027 
4028 	/* If nsegs is wrong then the stack is corrupt. */
4029 	KASSERT(nsegs == 1, ("Too many segments returned!"));
4030 
4031 	rx_buffer = &adapter->rx_buffer_area[i];
4032 	if (rx_buffer->m_head != NULL)
4033 		bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4034 
4035 	map = rx_buffer->map;
4036 	rx_buffer->map = adapter->rx_sparemap;
4037 	adapter->rx_sparemap = map;
4038 	bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
4039 	rx_buffer->m_head = m;
4040 
4041 	adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
4042 	return (0);
4043 }
4044 
4045 /*********************************************************************
4046  *
4047  *  Allocate memory for rx_buffer structures. Since we use one
4048  *  rx_buffer per received packet, the maximum number of rx_buffer's
4049  *  that we'll need is equal to the number of receive descriptors
4050  *  that we've allocated.
4051  *
4052  **********************************************************************/
4053 static int
4054 em_allocate_receive_structures(struct adapter *adapter)
4055 {
4056 	device_t dev = adapter->dev;
4057 	struct em_buffer *rx_buffer;
4058 	int i, error;
4059 
4060 	adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
4061 	    adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
4062 	if (adapter->rx_buffer_area == NULL) {
4063 		device_printf(dev, "Unable to allocate rx_buffer memory\n");
4064 		return (ENOMEM);
4065 	}
4066 
4067 #if __FreeBSD_version >= 700000
4068 	error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
4069 #else
4070 	error = bus_dma_tag_create(NULL,		 /* parent */
4071 #endif
4072 				1, 0,			/* alignment, bounds */
4073 				BUS_SPACE_MAXADDR,	/* lowaddr */
4074 				BUS_SPACE_MAXADDR,	/* highaddr */
4075 				NULL, NULL,		/* filter, filterarg */
4076 				MCLBYTES,		/* maxsize */
4077 				1,			/* nsegments */
4078 				MCLBYTES,		/* maxsegsize */
4079 				0,			/* flags */
4080 				NULL,			/* lockfunc */
4081 				NULL,			/* lockarg */
4082 				&adapter->rxtag);
4083 	if (error) {
4084 		device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
4085 		    __func__, error);
4086 		goto fail;
4087 	}
4088 
4089 	/* Create the spare map (used by getbuf) */
4090 	error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4091 	     &adapter->rx_sparemap);
4092 	if (error) {
4093 		device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4094 		    __func__, error);
4095 		goto fail;
4096 	}
4097 
4098 	rx_buffer = adapter->rx_buffer_area;
4099 	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4100 		error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4101 		    &rx_buffer->map);
4102 		if (error) {
4103 			device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4104 			    __func__, error);
4105 			goto fail;
4106 		}
4107 	}
4108 
4109 	return (0);
4110 
4111 fail:
4112 	em_free_receive_structures(adapter);
4113 	return (error);
4114 }
4115 
4116 /*********************************************************************
4117  *
4118  *  (Re)initialize receive structures.
4119  *
4120  **********************************************************************/
4121 static int
4122 em_setup_receive_structures(struct adapter *adapter)
4123 {
4124 	struct em_buffer *rx_buffer;
4125 	int i, error;
4126 
4127 	/* Reset descriptor ring */
4128 	bzero(adapter->rx_desc_base,
4129 	    (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
4130 
4131 	/* Free current RX buffers. */
4132 	rx_buffer = adapter->rx_buffer_area;
4133 	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4134 		if (rx_buffer->m_head != NULL) {
4135 			bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4136 			    BUS_DMASYNC_POSTREAD);
4137 			bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4138 			m_freem(rx_buffer->m_head);
4139 			rx_buffer->m_head = NULL;
4140 		}
4141         }
4142 
4143 	/* Allocate new ones. */
4144 	for (i = 0; i < adapter->num_rx_desc; i++) {
4145 		error = em_get_buf(adapter, i);
4146 		if (error)
4147                         return (error);
4148 	}
4149 
4150 	/* Setup our descriptor pointers */
4151 	adapter->next_rx_desc_to_check = 0;
4152 	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4153 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4154 
4155 	return (0);
4156 }
4157 
4158 /*********************************************************************
4159  *
4160  *  Enable receive unit.
4161  *
4162  **********************************************************************/
4163 #define MAX_INTS_PER_SEC	8000
4164 #define DEFAULT_ITR	     1000000000/(MAX_INTS_PER_SEC * 256)
4165 
4166 static void
4167 em_initialize_receive_unit(struct adapter *adapter)
4168 {
4169 	struct ifnet	*ifp = adapter->ifp;
4170 	u64	bus_addr;
4171 	u32	rctl, rxcsum;
4172 
4173 	INIT_DEBUGOUT("em_initialize_receive_unit: begin");
4174 
4175 	/*
4176 	 * Make sure receives are disabled while setting
4177 	 * up the descriptor ring
4178 	 */
4179 	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4180 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
4181 
4182 	if (adapter->hw.mac.type >= e1000_82540) {
4183 		E1000_WRITE_REG(&adapter->hw, E1000_RADV,
4184 		    adapter->rx_abs_int_delay.value);
4185 		/*
4186 		 * Set the interrupt throttling rate. Value is calculated
4187 		 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
4188 		 */
4189 		E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
4190 	}
4191 
4192 	/*
4193 	** When using MSIX interrupts we need to throttle
4194 	** using the EITR register (82574 only)
4195 	*/
4196 	if (adapter->msix)
4197 		for (int i = 0; i < 4; i++)
4198 			E1000_WRITE_REG(&adapter->hw,
4199 			    E1000_EITR_82574(i), DEFAULT_ITR);
4200 
4201 	/* Disable accelerated ackknowledge */
4202 	if (adapter->hw.mac.type == e1000_82574)
4203 		E1000_WRITE_REG(&adapter->hw,
4204 		    E1000_RFCTL, E1000_RFCTL_ACK_DIS);
4205 
4206 	/* Setup the Base and Length of the Rx Descriptor Ring */
4207 	bus_addr = adapter->rxdma.dma_paddr;
4208 	E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
4209 	    adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
4210 	E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
4211 	    (u32)(bus_addr >> 32));
4212 	E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
4213 	    (u32)bus_addr);
4214 
4215 	/* Setup the Receive Control Register */
4216 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4217 	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
4218 		   E1000_RCTL_RDMTS_HALF |
4219 		   (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4220 
4221 	/* Make sure VLAN Filters are off */
4222 	rctl &= ~E1000_RCTL_VFE;
4223 
4224 	if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
4225 		rctl |= E1000_RCTL_SBP;
4226 	else
4227 		rctl &= ~E1000_RCTL_SBP;
4228 
4229 	switch (adapter->rx_buffer_len) {
4230 	default:
4231 	case 2048:
4232 		rctl |= E1000_RCTL_SZ_2048;
4233 		break;
4234 	case 4096:
4235 		rctl |= E1000_RCTL_SZ_4096 |
4236 		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
4237 		break;
4238 	case 8192:
4239 		rctl |= E1000_RCTL_SZ_8192 |
4240 		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
4241 		break;
4242 	case 16384:
4243 		rctl |= E1000_RCTL_SZ_16384 |
4244 		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
4245 		break;
4246 	}
4247 
4248 	if (ifp->if_mtu > ETHERMTU)
4249 		rctl |= E1000_RCTL_LPE;
4250 	else
4251 		rctl &= ~E1000_RCTL_LPE;
4252 
4253 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
4254 	if ((adapter->hw.mac.type >= e1000_82543) &&
4255 	    (ifp->if_capenable & IFCAP_RXCSUM)) {
4256 		rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
4257 		rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
4258 		E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
4259 	}
4260 
4261 	/*
4262 	** XXX TEMPORARY WORKAROUND: on some systems with 82573
4263 	** long latencies are observed, like Lenovo X60. This
4264 	** change eliminates the problem, but since having positive
4265 	** values in RDTR is a known source of problems on other
4266 	** platforms another solution is being sought.
4267 	*/
4268 	if (adapter->hw.mac.type == e1000_82573)
4269 		E1000_WRITE_REG(&adapter->hw, E1000_RDTR, 0x20);
4270 
4271 	/* Enable Receives */
4272 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4273 
4274 	/*
4275 	 * Setup the HW Rx Head and
4276 	 * Tail Descriptor Pointers
4277 	 */
4278 	E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
4279 	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
4280 
4281 	return;
4282 }
4283 
4284 /*********************************************************************
4285  *
4286  *  Free receive related data structures.
4287  *
4288  **********************************************************************/
4289 static void
4290 em_free_receive_structures(struct adapter *adapter)
4291 {
4292 	struct em_buffer *rx_buffer;
4293 	int i;
4294 
4295 	INIT_DEBUGOUT("free_receive_structures: begin");
4296 
4297 	if (adapter->rx_sparemap) {
4298 		bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
4299 		adapter->rx_sparemap = NULL;
4300 	}
4301 
4302 	/* Cleanup any existing buffers */
4303 	if (adapter->rx_buffer_area != NULL) {
4304 		rx_buffer = adapter->rx_buffer_area;
4305 		for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4306 			if (rx_buffer->m_head != NULL) {
4307 				bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4308 				    BUS_DMASYNC_POSTREAD);
4309 				bus_dmamap_unload(adapter->rxtag,
4310 				    rx_buffer->map);
4311 				m_freem(rx_buffer->m_head);
4312 				rx_buffer->m_head = NULL;
4313 			} else if (rx_buffer->map != NULL)
4314 				bus_dmamap_unload(adapter->rxtag,
4315 				    rx_buffer->map);
4316 			if (rx_buffer->map != NULL) {
4317 				bus_dmamap_destroy(adapter->rxtag,
4318 				    rx_buffer->map);
4319 				rx_buffer->map = NULL;
4320 			}
4321 		}
4322 	}
4323 
4324 	if (adapter->rx_buffer_area != NULL) {
4325 		free(adapter->rx_buffer_area, M_DEVBUF);
4326 		adapter->rx_buffer_area = NULL;
4327 	}
4328 
4329 	if (adapter->rxtag != NULL) {
4330 		bus_dma_tag_destroy(adapter->rxtag);
4331 		adapter->rxtag = NULL;
4332 	}
4333 }
4334 
4335 /*********************************************************************
4336  *
4337  *  This routine executes in interrupt context. It replenishes
4338  *  the mbufs in the descriptor and sends data which has been
4339  *  dma'ed into host memory to upper layer.
4340  *
4341  *  We loop at most count times if count is > 0, or until done if
4342  *  count < 0.
4343  *
4344  *  For polling we also now return the number of cleaned packets
4345  *********************************************************************/
4346 static int
4347 em_rxeof(struct adapter *adapter, int count)
4348 {
4349 	struct ifnet	*ifp = adapter->ifp;
4350 	struct mbuf	*mp;
4351 	u8		status, accept_frame = 0, eop = 0;
4352 	u16 		len, desc_len, prev_len_adj;
4353 	int		i, rx_sent = 0;
4354 	struct e1000_rx_desc   *current_desc;
4355 
4356 	EM_RX_LOCK(adapter);
4357 	i = adapter->next_rx_desc_to_check;
4358 	current_desc = &adapter->rx_desc_base[i];
4359 	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4360 	    BUS_DMASYNC_POSTREAD);
4361 
4362 	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
4363 		EM_RX_UNLOCK(adapter);
4364 		return (rx_sent);
4365 	}
4366 
4367 	while ((current_desc->status & E1000_RXD_STAT_DD) &&
4368 	    (count != 0) &&
4369 	    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4370 		struct mbuf *m = NULL;
4371 
4372 		mp = adapter->rx_buffer_area[i].m_head;
4373 		/*
4374 		 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
4375 		 * needs to access the last received byte in the mbuf.
4376 		 */
4377 		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
4378 		    BUS_DMASYNC_POSTREAD);
4379 
4380 		accept_frame = 1;
4381 		prev_len_adj = 0;
4382 		desc_len = le16toh(current_desc->length);
4383 		status = current_desc->status;
4384 		if (status & E1000_RXD_STAT_EOP) {
4385 			count--;
4386 			eop = 1;
4387 			if (desc_len < ETHER_CRC_LEN) {
4388 				len = 0;
4389 				prev_len_adj = ETHER_CRC_LEN - desc_len;
4390 			} else
4391 				len = desc_len - ETHER_CRC_LEN;
4392 		} else {
4393 			eop = 0;
4394 			len = desc_len;
4395 		}
4396 
4397 		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
4398 			u8	last_byte;
4399 			u32	pkt_len = desc_len;
4400 
4401 			if (adapter->fmp != NULL)
4402 				pkt_len += adapter->fmp->m_pkthdr.len;
4403 
4404 			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
4405 			if (TBI_ACCEPT(&adapter->hw, status,
4406 			    current_desc->errors, pkt_len, last_byte,
4407 			    adapter->min_frame_size, adapter->max_frame_size)) {
4408 				e1000_tbi_adjust_stats_82543(&adapter->hw,
4409 				    &adapter->stats, pkt_len,
4410 				    adapter->hw.mac.addr,
4411 				    adapter->max_frame_size);
4412 				if (len > 0)
4413 					len--;
4414 			} else
4415 				accept_frame = 0;
4416 		}
4417 
4418 		if (accept_frame) {
4419 			if (em_get_buf(adapter, i) != 0) {
4420 				ifp->if_iqdrops++;
4421 				goto discard;
4422 			}
4423 
4424 			/* Assign correct length to the current fragment */
4425 			mp->m_len = len;
4426 
4427 			if (adapter->fmp == NULL) {
4428 				mp->m_pkthdr.len = len;
4429 				adapter->fmp = mp; /* Store the first mbuf */
4430 				adapter->lmp = mp;
4431 			} else {
4432 				/* Chain mbuf's together */
4433 				mp->m_flags &= ~M_PKTHDR;
4434 				/*
4435 				 * Adjust length of previous mbuf in chain if
4436 				 * we received less than 4 bytes in the last
4437 				 * descriptor.
4438 				 */
4439 				if (prev_len_adj > 0) {
4440 					adapter->lmp->m_len -= prev_len_adj;
4441 					adapter->fmp->m_pkthdr.len -=
4442 					    prev_len_adj;
4443 				}
4444 				adapter->lmp->m_next = mp;
4445 				adapter->lmp = adapter->lmp->m_next;
4446 				adapter->fmp->m_pkthdr.len += len;
4447 			}
4448 
4449 			if (eop) {
4450 				adapter->fmp->m_pkthdr.rcvif = ifp;
4451 				ifp->if_ipackets++;
4452 				em_receive_checksum(adapter, current_desc,
4453 				    adapter->fmp);
4454 #ifndef __NO_STRICT_ALIGNMENT
4455 				if (adapter->max_frame_size >
4456 				    (MCLBYTES - ETHER_ALIGN) &&
4457 				    em_fixup_rx(adapter) != 0)
4458 					goto skip;
4459 #endif
4460 				if (status & E1000_RXD_STAT_VP) {
4461 #if __FreeBSD_version < 700000
4462 					VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
4463 					    (le16toh(current_desc->special) &
4464 					    E1000_RXD_SPC_VLAN_MASK));
4465 #else
4466 					adapter->fmp->m_pkthdr.ether_vtag =
4467 					    (le16toh(current_desc->special) &
4468 					    E1000_RXD_SPC_VLAN_MASK);
4469 					adapter->fmp->m_flags |= M_VLANTAG;
4470 #endif
4471 				}
4472 #ifndef __NO_STRICT_ALIGNMENT
4473 skip:
4474 #endif
4475 				m = adapter->fmp;
4476 				adapter->fmp = NULL;
4477 				adapter->lmp = NULL;
4478 			}
4479 		} else {
4480 			ifp->if_ierrors++;
4481 discard:
4482 			/* Reuse loaded DMA map and just update mbuf chain */
4483 			mp = adapter->rx_buffer_area[i].m_head;
4484 			mp->m_len = mp->m_pkthdr.len = MCLBYTES;
4485 			mp->m_data = mp->m_ext.ext_buf;
4486 			mp->m_next = NULL;
4487 			if (adapter->max_frame_size <=
4488 			    (MCLBYTES - ETHER_ALIGN))
4489 				m_adj(mp, ETHER_ALIGN);
4490 			if (adapter->fmp != NULL) {
4491 				m_freem(adapter->fmp);
4492 				adapter->fmp = NULL;
4493 				adapter->lmp = NULL;
4494 			}
4495 			m = NULL;
4496 		}
4497 
4498 		/* Zero out the receive descriptors status. */
4499 		current_desc->status = 0;
4500 		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4501 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4502 
4503 		/* Advance our pointers to the next descriptor. */
4504 		if (++i == adapter->num_rx_desc)
4505 			i = 0;
4506 		/* Call into the stack */
4507 		if (m != NULL) {
4508 			adapter->next_rx_desc_to_check = i;
4509 			EM_RX_UNLOCK(adapter);
4510 			(*ifp->if_input)(ifp, m);
4511 			EM_RX_LOCK(adapter);
4512 			rx_sent++;
4513 			i = adapter->next_rx_desc_to_check;
4514 		}
4515 		current_desc = &adapter->rx_desc_base[i];
4516 	}
4517 	adapter->next_rx_desc_to_check = i;
4518 
4519 	/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
4520 	if (--i < 0)
4521 		i = adapter->num_rx_desc - 1;
4522 	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
4523 	EM_RX_UNLOCK(adapter);
4524 	return (rx_sent);
4525 }
4526 
4527 #ifndef __NO_STRICT_ALIGNMENT
4528 /*
4529  * When jumbo frames are enabled we should realign entire payload on
4530  * architecures with strict alignment. This is serious design mistake of 8254x
4531  * as it nullifies DMA operations. 8254x just allows RX buffer size to be
4532  * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
4533  * payload. On architecures without strict alignment restrictions 8254x still
4534  * performs unaligned memory access which would reduce the performance too.
4535  * To avoid copying over an entire frame to align, we allocate a new mbuf and
4536  * copy ethernet header to the new mbuf. The new mbuf is prepended into the
4537  * existing mbuf chain.
4538  *
4539  * Be aware, best performance of the 8254x is achived only when jumbo frame is
4540  * not used at all on architectures with strict alignment.
4541  */
4542 static int
4543 em_fixup_rx(struct adapter *adapter)
4544 {
4545 	struct mbuf *m, *n;
4546 	int error;
4547 
4548 	error = 0;
4549 	m = adapter->fmp;
4550 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
4551 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
4552 		m->m_data += ETHER_HDR_LEN;
4553 	} else {
4554 		MGETHDR(n, M_DONTWAIT, MT_DATA);
4555 		if (n != NULL) {
4556 			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
4557 			m->m_data += ETHER_HDR_LEN;
4558 			m->m_len -= ETHER_HDR_LEN;
4559 			n->m_len = ETHER_HDR_LEN;
4560 			M_MOVE_PKTHDR(n, m);
4561 			n->m_next = m;
4562 			adapter->fmp = n;
4563 		} else {
4564 			adapter->dropped_pkts++;
4565 			m_freem(adapter->fmp);
4566 			adapter->fmp = NULL;
4567 			error = ENOMEM;
4568 		}
4569 	}
4570 
4571 	return (error);
4572 }
4573 #endif
4574 
4575 /*********************************************************************
4576  *
4577  *  Verify that the hardware indicated that the checksum is valid.
4578  *  Inform the stack about the status of checksum so that stack
4579  *  doesn't spend time verifying the checksum.
4580  *
4581  *********************************************************************/
4582 static void
4583 em_receive_checksum(struct adapter *adapter,
4584 	    struct e1000_rx_desc *rx_desc, struct mbuf *mp)
4585 {
4586 	/* 82543 or newer only */
4587 	if ((adapter->hw.mac.type < e1000_82543) ||
4588 	    /* Ignore Checksum bit is set */
4589 	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
4590 		mp->m_pkthdr.csum_flags = 0;
4591 		return;
4592 	}
4593 
4594 	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
4595 		/* Did it pass? */
4596 		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
4597 			/* IP Checksum Good */
4598 			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4599 			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4600 
4601 		} else {
4602 			mp->m_pkthdr.csum_flags = 0;
4603 		}
4604 	}
4605 
4606 	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
4607 		/* Did it pass? */
4608 		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
4609 			mp->m_pkthdr.csum_flags |=
4610 			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4611 			mp->m_pkthdr.csum_data = htons(0xffff);
4612 		}
4613 	}
4614 }
4615 
4616 #if __FreeBSD_version >= 700029
4617 /*
4618  * This routine is run via an vlan
4619  * config EVENT
4620  */
4621 static void
4622 em_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4623 {
4624 	struct adapter	*adapter = ifp->if_softc;
4625 	u32		index, bit;
4626 
4627 	if (ifp->if_softc !=  arg)   /* Not our event */
4628 		return;
4629 
4630 	if ((vtag == 0) || (vtag > 4095))       /* Invalid ID */
4631                 return;
4632 
4633 	index = (vtag >> 5) & 0x7F;
4634 	bit = vtag & 0x1F;
4635 	em_shadow_vfta[index] |= (1 << bit);
4636 	++adapter->num_vlans;
4637 	/* Re-init to load the changes */
4638 	em_init(adapter);
4639 }
4640 
4641 /*
4642  * This routine is run via an vlan
4643  * unconfig EVENT
4644  */
4645 static void
4646 em_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4647 {
4648 	struct adapter	*adapter = ifp->if_softc;
4649 	u32		index, bit;
4650 
4651 	if (ifp->if_softc !=  arg)
4652 		return;
4653 
4654 	if ((vtag == 0) || (vtag > 4095))       /* Invalid */
4655                 return;
4656 
4657 	index = (vtag >> 5) & 0x7F;
4658 	bit = vtag & 0x1F;
4659 	em_shadow_vfta[index] &= ~(1 << bit);
4660 	--adapter->num_vlans;
4661 	/* Re-init to load the changes */
4662 	em_init(adapter);
4663 }
4664 
4665 static void
4666 em_setup_vlan_hw_support(struct adapter *adapter)
4667 {
4668 	struct e1000_hw *hw = &adapter->hw;
4669 	u32             reg;
4670 
4671 	/*
4672 	** We get here thru init_locked, meaning
4673 	** a soft reset, this has already cleared
4674 	** the VFTA and other state, so if there
4675 	** have been no vlan's registered do nothing.
4676 	*/
4677 	if (adapter->num_vlans == 0)
4678                 return;
4679 
4680 	/*
4681 	** A soft reset zero's out the VFTA, so
4682 	** we need to repopulate it now.
4683 	*/
4684 	for (int i = 0; i < EM_VFTA_SIZE; i++)
4685                 if (em_shadow_vfta[i] != 0)
4686 			E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
4687                             i, em_shadow_vfta[i]);
4688 
4689 	reg = E1000_READ_REG(hw, E1000_CTRL);
4690 	reg |= E1000_CTRL_VME;
4691 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
4692 
4693 	/* Enable the Filter Table */
4694 	reg = E1000_READ_REG(hw, E1000_RCTL);
4695 	reg &= ~E1000_RCTL_CFIEN;
4696 	reg |= E1000_RCTL_VFE;
4697 	E1000_WRITE_REG(hw, E1000_RCTL, reg);
4698 
4699 	/* Update the frame size */
4700 	E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
4701 	    adapter->max_frame_size + VLAN_TAG_SIZE);
4702 }
4703 #endif
4704 
4705 static void
4706 em_enable_intr(struct adapter *adapter)
4707 {
4708 	struct e1000_hw *hw = &adapter->hw;
4709 	u32 ims_mask = IMS_ENABLE_MASK;
4710 
4711 	if (adapter->msix) {
4712 		E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
4713 		ims_mask |= EM_MSIX_MASK;
4714 	}
4715 	E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
4716 }
4717 
4718 static void
4719 em_disable_intr(struct adapter *adapter)
4720 {
4721 	struct e1000_hw *hw = &adapter->hw;
4722 
4723 	if (adapter->msix)
4724 		E1000_WRITE_REG(hw, EM_EIAC, 0);
4725 	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
4726 }
4727 
4728 /*
4729  * Bit of a misnomer, what this really means is
4730  * to enable OS management of the system... aka
4731  * to disable special hardware management features
4732  */
4733 static void
4734 em_init_manageability(struct adapter *adapter)
4735 {
4736 	/* A shared code workaround */
4737 #define E1000_82542_MANC2H E1000_MANC2H
4738 	if (adapter->has_manage) {
4739 		int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
4740 		int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4741 
4742 		/* disable hardware interception of ARP */
4743 		manc &= ~(E1000_MANC_ARP_EN);
4744 
4745                 /* enable receiving management packets to the host */
4746                 if (adapter->hw.mac.type >= e1000_82571) {
4747 			manc |= E1000_MANC_EN_MNG2HOST;
4748 #define E1000_MNG2HOST_PORT_623 (1 << 5)
4749 #define E1000_MNG2HOST_PORT_664 (1 << 6)
4750 			manc2h |= E1000_MNG2HOST_PORT_623;
4751 			manc2h |= E1000_MNG2HOST_PORT_664;
4752 			E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
4753 		}
4754 
4755 		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4756 	}
4757 }
4758 
4759 /*
4760  * Give control back to hardware management
4761  * controller if there is one.
4762  */
4763 static void
4764 em_release_manageability(struct adapter *adapter)
4765 {
4766 	if (adapter->has_manage) {
4767 		int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4768 
4769 		/* re-enable hardware interception of ARP */
4770 		manc |= E1000_MANC_ARP_EN;
4771 
4772 		if (adapter->hw.mac.type >= e1000_82571)
4773 			manc &= ~E1000_MANC_EN_MNG2HOST;
4774 
4775 		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4776 	}
4777 }
4778 
4779 /*
4780  * em_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
4781  * For ASF and Pass Through versions of f/w this means
4782  * that the driver is loaded. For AMT version type f/w
4783  * this means that the network i/f is open.
4784  */
4785 static void
4786 em_get_hw_control(struct adapter *adapter)
4787 {
4788 	u32 ctrl_ext, swsm;
4789 
4790 	if (adapter->hw.mac.type == e1000_82573) {
4791 		swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4792 		E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4793 		    swsm | E1000_SWSM_DRV_LOAD);
4794 		return;
4795 	}
4796 	/* else */
4797 	ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4798 	E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4799 	    ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4800 	return;
4801 }
4802 
4803 /*
4804  * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4805  * For ASF and Pass Through versions of f/w this means that
4806  * the driver is no longer loaded. For AMT versions of the
4807  * f/w this means that the network i/f is closed.
4808  */
4809 static void
4810 em_release_hw_control(struct adapter *adapter)
4811 {
4812 	u32 ctrl_ext, swsm;
4813 
4814 	if (!adapter->has_manage)
4815 		return;
4816 
4817 	if (adapter->hw.mac.type == e1000_82573) {
4818 		swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4819 		E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4820 		    swsm & ~E1000_SWSM_DRV_LOAD);
4821 		return;
4822 	}
4823 	/* else */
4824 	ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4825 	E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4826 	    ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4827 	return;
4828 }
4829 
4830 static int
4831 em_is_valid_ether_addr(u8 *addr)
4832 {
4833 	char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4834 
4835 	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4836 		return (FALSE);
4837 	}
4838 
4839 	return (TRUE);
4840 }
4841 
4842 /*
4843 ** Parse the interface capabilities with regard
4844 ** to both system management and wake-on-lan for
4845 ** later use.
4846 */
4847 static void
4848 em_get_wakeup(device_t dev)
4849 {
4850 	struct adapter	*adapter = device_get_softc(dev);
4851 	u16		eeprom_data = 0, device_id, apme_mask;
4852 
4853 	adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
4854 	apme_mask = EM_EEPROM_APME;
4855 
4856 	switch (adapter->hw.mac.type) {
4857 	case e1000_82542:
4858 	case e1000_82543:
4859 		break;
4860 	case e1000_82544:
4861 		e1000_read_nvm(&adapter->hw,
4862 		    NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
4863 		apme_mask = EM_82544_APME;
4864 		break;
4865 	case e1000_82573:
4866 	case e1000_82583:
4867 		adapter->has_amt = TRUE;
4868 		/* Falls thru */
4869 	case e1000_82546:
4870 	case e1000_82546_rev_3:
4871 	case e1000_82571:
4872 	case e1000_82572:
4873 	case e1000_80003es2lan:
4874 		if (adapter->hw.bus.func == 1) {
4875 			e1000_read_nvm(&adapter->hw,
4876 			    NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
4877 			break;
4878 		} else
4879 			e1000_read_nvm(&adapter->hw,
4880 			    NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4881 		break;
4882 	case e1000_ich8lan:
4883 	case e1000_ich9lan:
4884 	case e1000_ich10lan:
4885 	case e1000_pchlan:
4886 		apme_mask = E1000_WUC_APME;
4887 		adapter->has_amt = TRUE;
4888 		eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC);
4889 		break;
4890 	default:
4891 		e1000_read_nvm(&adapter->hw,
4892 		    NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4893 		break;
4894 	}
4895 	if (eeprom_data & apme_mask)
4896 		adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
4897 	/*
4898          * We have the eeprom settings, now apply the special cases
4899          * where the eeprom may be wrong or the board won't support
4900          * wake on lan on a particular port
4901 	 */
4902 	device_id = pci_get_device(dev);
4903         switch (device_id) {
4904 	case E1000_DEV_ID_82546GB_PCIE:
4905 		adapter->wol = 0;
4906 		break;
4907 	case E1000_DEV_ID_82546EB_FIBER:
4908 	case E1000_DEV_ID_82546GB_FIBER:
4909 	case E1000_DEV_ID_82571EB_FIBER:
4910 		/* Wake events only supported on port A for dual fiber
4911 		 * regardless of eeprom setting */
4912 		if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
4913 		    E1000_STATUS_FUNC_1)
4914 			adapter->wol = 0;
4915 		break;
4916 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
4917 	case E1000_DEV_ID_82571EB_QUAD_COPPER:
4918 	case E1000_DEV_ID_82571EB_QUAD_FIBER:
4919 	case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
4920                 /* if quad port adapter, disable WoL on all but port A */
4921 		if (global_quad_port_a != 0)
4922 			adapter->wol = 0;
4923 		/* Reset for multiple quad port adapters */
4924 		if (++global_quad_port_a == 4)
4925 			global_quad_port_a = 0;
4926                 break;
4927 	}
4928 	return;
4929 }
4930 
4931 
4932 /*
4933  * Enable PCI Wake On Lan capability
4934  */
4935 void
4936 em_enable_wakeup(device_t dev)
4937 {
4938 	struct adapter	*adapter = device_get_softc(dev);
4939 	struct ifnet	*ifp = adapter->ifp;
4940 	u32		pmc, ctrl, ctrl_ext, rctl;
4941 	u16     	status;
4942 
4943 	if ((pci_find_extcap(dev, PCIY_PMG, &pmc) != 0))
4944 		return;
4945 
4946 	/* Advertise the wakeup capability */
4947 	ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4948 	ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
4949 	E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4950 	E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4951 
4952 	/* ICH workaround code */
4953 	if ((adapter->hw.mac.type == e1000_ich8lan) ||
4954 	    (adapter->hw.mac.type == e1000_pchlan) ||
4955 	    (adapter->hw.mac.type == e1000_ich9lan) ||
4956 	    (adapter->hw.mac.type == e1000_ich10lan)) {
4957 		e1000_disable_gig_wol_ich8lan(&adapter->hw);
4958 		e1000_hv_phy_powerdown_workaround_ich8lan(&adapter->hw);
4959 	}
4960 
4961 	/* Keep the laser running on Fiber adapters */
4962 	if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4963 	    adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
4964 		ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4965 		ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4966 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
4967 	}
4968 
4969 	/*
4970 	** Determine type of Wakeup: note that wol
4971 	** is set with all bits on by default.
4972 	*/
4973 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
4974 		adapter->wol &= ~E1000_WUFC_MAG;
4975 
4976 	if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
4977 		adapter->wol &= ~E1000_WUFC_MC;
4978 	else {
4979 		rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4980 		rctl |= E1000_RCTL_MPE;
4981 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4982 	}
4983 
4984 	if (adapter->hw.mac.type == e1000_pchlan) {
4985 		if (em_enable_phy_wakeup(adapter))
4986 			return;
4987 	} else {
4988 		E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4989 		E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4990 	}
4991 
4992 	if (adapter->hw.phy.type == e1000_phy_igp_3)
4993 		e1000_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
4994 
4995         /* Request PME */
4996         status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4997 	status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4998 	if (ifp->if_capenable & IFCAP_WOL)
4999 		status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
5000         pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
5001 
5002 	return;
5003 }
5004 
5005 /*
5006 ** WOL in the newer chipset interfaces (pchlan)
5007 ** require thing to be copied into the phy
5008 */
5009 static int
5010 em_enable_phy_wakeup(struct adapter *adapter)
5011 {
5012 	struct e1000_hw *hw = &adapter->hw;
5013 	u32 mreg, ret = 0;
5014 	u16 preg;
5015 
5016 	/* copy MAC RARs to PHY RARs */
5017 	for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
5018 		mreg = E1000_READ_REG(hw, E1000_RAL(i));
5019 		e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
5020 		e1000_write_phy_reg(hw, BM_RAR_M(i),
5021 		    (u16)((mreg >> 16) & 0xFFFF));
5022 		mreg = E1000_READ_REG(hw, E1000_RAH(i));
5023 		e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
5024 		e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
5025 		    (u16)((mreg >> 16) & 0xFFFF));
5026 	}
5027 
5028 	/* copy MAC MTA to PHY MTA */
5029 	for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5030 		mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
5031 		e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
5032 		e1000_write_phy_reg(hw, BM_MTA(i) + 1,
5033 		    (u16)((mreg >> 16) & 0xFFFF));
5034 	}
5035 
5036 	/* configure PHY Rx Control register */
5037 	e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
5038 	mreg = E1000_READ_REG(hw, E1000_RCTL);
5039 	if (mreg & E1000_RCTL_UPE)
5040 		preg |= BM_RCTL_UPE;
5041 	if (mreg & E1000_RCTL_MPE)
5042 		preg |= BM_RCTL_MPE;
5043 	preg &= ~(BM_RCTL_MO_MASK);
5044 	if (mreg & E1000_RCTL_MO_3)
5045 		preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5046 				<< BM_RCTL_MO_SHIFT);
5047 	if (mreg & E1000_RCTL_BAM)
5048 		preg |= BM_RCTL_BAM;
5049 	if (mreg & E1000_RCTL_PMCF)
5050 		preg |= BM_RCTL_PMCF;
5051 	mreg = E1000_READ_REG(hw, E1000_CTRL);
5052 	if (mreg & E1000_CTRL_RFCE)
5053 		preg |= BM_RCTL_RFCE;
5054 	e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
5055 
5056 	/* enable PHY wakeup in MAC register */
5057 	E1000_WRITE_REG(hw, E1000_WUC,
5058 	    E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
5059 	E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
5060 
5061 	/* configure and enable PHY wakeup in PHY registers */
5062 	e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
5063 	e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
5064 
5065 	/* activate PHY wakeup */
5066 	ret = hw->phy.ops.acquire(hw);
5067 	if (ret) {
5068 		printf("Could not acquire PHY\n");
5069 		return ret;
5070 	}
5071 	e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
5072 	                         (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
5073 	ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
5074 	if (ret) {
5075 		printf("Could not read PHY page 769\n");
5076 		goto out;
5077 	}
5078 	preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5079 	ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
5080 	if (ret)
5081 		printf("Could not set PHY Host Wakeup bit\n");
5082 out:
5083 	hw->phy.ops.release(hw);
5084 
5085 	return ret;
5086 }
5087 
5088 
5089 /*********************************************************************
5090 * 82544 Coexistence issue workaround.
5091 *    There are 2 issues.
5092 *       1. Transmit Hang issue.
5093 *    To detect this issue, following equation can be used...
5094 *	  SIZE[3:0] + ADDR[2:0] = SUM[3:0].
5095 *	  If SUM[3:0] is in between 1 to 4, we will have this issue.
5096 *
5097 *       2. DAC issue.
5098 *    To detect this issue, following equation can be used...
5099 *	  SIZE[3:0] + ADDR[2:0] = SUM[3:0].
5100 *	  If SUM[3:0] is in between 9 to c, we will have this issue.
5101 *
5102 *
5103 *    WORKAROUND:
5104 *	  Make sure we do not have ending address
5105 *	  as 1,2,3,4(Hang) or 9,a,b,c (DAC)
5106 *
5107 *************************************************************************/
5108 static u32
5109 em_fill_descriptors (bus_addr_t address, u32 length,
5110 		PDESC_ARRAY desc_array)
5111 {
5112 	u32 safe_terminator;
5113 
5114 	/* Since issue is sensitive to length and address.*/
5115 	/* Let us first check the address...*/
5116 	if (length <= 4) {
5117 		desc_array->descriptor[0].address = address;
5118 		desc_array->descriptor[0].length = length;
5119 		desc_array->elements = 1;
5120 		return (desc_array->elements);
5121 	}
5122 	safe_terminator = (u32)((((u32)address & 0x7) +
5123 	    (length & 0xF)) & 0xF);
5124 	/* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
5125 	if (safe_terminator == 0   ||
5126 	(safe_terminator > 4   &&
5127 	safe_terminator < 9)   ||
5128 	(safe_terminator > 0xC &&
5129 	safe_terminator <= 0xF)) {
5130 		desc_array->descriptor[0].address = address;
5131 		desc_array->descriptor[0].length = length;
5132 		desc_array->elements = 1;
5133 		return (desc_array->elements);
5134 	}
5135 
5136 	desc_array->descriptor[0].address = address;
5137 	desc_array->descriptor[0].length = length - 4;
5138 	desc_array->descriptor[1].address = address + (length - 4);
5139 	desc_array->descriptor[1].length = 4;
5140 	desc_array->elements = 2;
5141 	return (desc_array->elements);
5142 }
5143 
5144 /**********************************************************************
5145  *
5146  *  Update the board statistics counters.
5147  *
5148  **********************************************************************/
5149 static void
5150 em_update_stats_counters(struct adapter *adapter)
5151 {
5152 	struct ifnet   *ifp;
5153 
5154 	if(adapter->hw.phy.media_type == e1000_media_type_copper ||
5155 	   (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
5156 		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
5157 		adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
5158 	}
5159 	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
5160 	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
5161 	adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
5162 	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
5163 
5164 	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
5165 	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
5166 	adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
5167 	adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
5168 	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
5169 	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
5170 	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
5171 	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
5172 	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
5173 	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
5174 	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
5175 	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
5176 	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
5177 	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
5178 	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
5179 	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
5180 	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
5181 	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
5182 	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
5183 	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
5184 
5185 	/* For the 64-bit byte counters the low dword must be read first. */
5186 	/* Both registers clear on the read of the high dword */
5187 
5188 	adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH);
5189 	adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH);
5190 
5191 	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
5192 	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
5193 	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
5194 	adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
5195 	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
5196 
5197 	adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
5198 	adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
5199 
5200 	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
5201 	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
5202 	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
5203 	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
5204 	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
5205 	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
5206 	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
5207 	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
5208 	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
5209 	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
5210 
5211 	if (adapter->hw.mac.type >= e1000_82543) {
5212 		adapter->stats.algnerrc +=
5213 		E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
5214 		adapter->stats.rxerrc +=
5215 		E1000_READ_REG(&adapter->hw, E1000_RXERRC);
5216 		adapter->stats.tncrs +=
5217 		E1000_READ_REG(&adapter->hw, E1000_TNCRS);
5218 		adapter->stats.cexterr +=
5219 		E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
5220 		adapter->stats.tsctc +=
5221 		E1000_READ_REG(&adapter->hw, E1000_TSCTC);
5222 		adapter->stats.tsctfc +=
5223 		E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
5224 	}
5225 	ifp = adapter->ifp;
5226 
5227 	ifp->if_collisions = adapter->stats.colc;
5228 
5229 	/* Rx Errors */
5230 	ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
5231 	    adapter->stats.crcerrs + adapter->stats.algnerrc +
5232 	    adapter->stats.ruc + adapter->stats.roc +
5233 	    adapter->stats.mpc + adapter->stats.cexterr;
5234 
5235 	/* Tx Errors */
5236 	ifp->if_oerrors = adapter->stats.ecol +
5237 	    adapter->stats.latecol + adapter->watchdog_events;
5238 }
5239 
5240 
5241 /**********************************************************************
5242  *
5243  *  This routine is called only when em_display_debug_stats is enabled.
5244  *  This routine provides a way to take a look at important statistics
5245  *  maintained by the driver and hardware.
5246  *
5247  **********************************************************************/
5248 static void
5249 em_print_debug_info(struct adapter *adapter)
5250 {
5251 	device_t dev = adapter->dev;
5252 	u8 *hw_addr = adapter->hw.hw_addr;
5253 
5254 	device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
5255 	device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
5256 	    E1000_READ_REG(&adapter->hw, E1000_CTRL),
5257 	    E1000_READ_REG(&adapter->hw, E1000_RCTL));
5258 	device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
5259 	    ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\
5260 	    (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) );
5261 	device_printf(dev, "Flow control watermarks high = %d low = %d\n",
5262 	    adapter->hw.fc.high_water,
5263 	    adapter->hw.fc.low_water);
5264 	device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
5265 	    E1000_READ_REG(&adapter->hw, E1000_TIDV),
5266 	    E1000_READ_REG(&adapter->hw, E1000_TADV));
5267 	device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
5268 	    E1000_READ_REG(&adapter->hw, E1000_RDTR),
5269 	    E1000_READ_REG(&adapter->hw, E1000_RADV));
5270 	device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
5271 	    (long long)adapter->tx_fifo_wrk_cnt,
5272 	    (long long)adapter->tx_fifo_reset_cnt);
5273 	device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
5274 	    E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
5275 	    E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
5276 	device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
5277 	    E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
5278 	    E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
5279 	device_printf(dev, "Num Tx descriptors avail = %d\n",
5280 	    adapter->num_tx_desc_avail);
5281 	device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
5282 	    adapter->no_tx_desc_avail1);
5283 	device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
5284 	    adapter->no_tx_desc_avail2);
5285 	device_printf(dev, "Std mbuf failed = %ld\n",
5286 	    adapter->mbuf_alloc_failed);
5287 	device_printf(dev, "Std mbuf cluster failed = %ld\n",
5288 	    adapter->mbuf_cluster_failed);
5289 	device_printf(dev, "Driver dropped packets = %ld\n",
5290 	    adapter->dropped_pkts);
5291 	device_printf(dev, "Driver tx dma failure in encap = %ld\n",
5292 		adapter->no_tx_dma_setup);
5293 }
5294 
5295 static void
5296 em_print_hw_stats(struct adapter *adapter)
5297 {
5298 	device_t dev = adapter->dev;
5299 
5300 	device_printf(dev, "Excessive collisions = %lld\n",
5301 	    (long long)adapter->stats.ecol);
5302 #if	(DEBUG_HW > 0)  /* Dont output these errors normally */
5303 	device_printf(dev, "Symbol errors = %lld\n",
5304 	    (long long)adapter->stats.symerrs);
5305 #endif
5306 	device_printf(dev, "Sequence errors = %lld\n",
5307 	    (long long)adapter->stats.sec);
5308 	device_printf(dev, "Defer count = %lld\n",
5309 	    (long long)adapter->stats.dc);
5310 	device_printf(dev, "Missed Packets = %lld\n",
5311 	    (long long)adapter->stats.mpc);
5312 	device_printf(dev, "Receive No Buffers = %lld\n",
5313 	    (long long)adapter->stats.rnbc);
5314 	/* RLEC is inaccurate on some hardware, calculate our own. */
5315 	device_printf(dev, "Receive Length Errors = %lld\n",
5316 	    ((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
5317 	device_printf(dev, "Receive errors = %lld\n",
5318 	    (long long)adapter->stats.rxerrc);
5319 	device_printf(dev, "Crc errors = %lld\n",
5320 	    (long long)adapter->stats.crcerrs);
5321 	device_printf(dev, "Alignment errors = %lld\n",
5322 	    (long long)adapter->stats.algnerrc);
5323 	device_printf(dev, "Collision/Carrier extension errors = %lld\n",
5324 	    (long long)adapter->stats.cexterr);
5325 	device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
5326 	device_printf(dev, "watchdog timeouts = %ld\n",
5327 	    adapter->watchdog_events);
5328 	device_printf(dev, "RX MSIX IRQ = %ld TX MSIX IRQ = %ld"
5329 	    " LINK MSIX IRQ = %ld\n", adapter->rx_irq,
5330 	    adapter->tx_irq , adapter->link_irq);
5331 	device_printf(dev, "XON Rcvd = %lld\n",
5332 	    (long long)adapter->stats.xonrxc);
5333 	device_printf(dev, "XON Xmtd = %lld\n",
5334 	    (long long)adapter->stats.xontxc);
5335 	device_printf(dev, "XOFF Rcvd = %lld\n",
5336 	    (long long)adapter->stats.xoffrxc);
5337 	device_printf(dev, "XOFF Xmtd = %lld\n",
5338 	    (long long)adapter->stats.xofftxc);
5339 	device_printf(dev, "Good Packets Rcvd = %lld\n",
5340 	    (long long)adapter->stats.gprc);
5341 	device_printf(dev, "Good Packets Xmtd = %lld\n",
5342 	    (long long)adapter->stats.gptc);
5343 	device_printf(dev, "TSO Contexts Xmtd = %lld\n",
5344 	    (long long)adapter->stats.tsctc);
5345 	device_printf(dev, "TSO Contexts Failed = %lld\n",
5346 	    (long long)adapter->stats.tsctfc);
5347 }
5348 
5349 /**********************************************************************
5350  *
5351  *  This routine provides a way to dump out the adapter eeprom,
5352  *  often a useful debug/service tool. This only dumps the first
5353  *  32 words, stuff that matters is in that extent.
5354  *
5355  **********************************************************************/
5356 static void
5357 em_print_nvm_info(struct adapter *adapter)
5358 {
5359 	u16	eeprom_data;
5360 	int	i, j, row = 0;
5361 
5362 	/* Its a bit crude, but it gets the job done */
5363 	printf("\nInterface EEPROM Dump:\n");
5364 	printf("Offset\n0x0000  ");
5365 	for (i = 0, j = 0; i < 32; i++, j++) {
5366 		if (j == 8) { /* Make the offset block */
5367 			j = 0; ++row;
5368 			printf("\n0x00%x0  ",row);
5369 		}
5370 		e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
5371 		printf("%04x ", eeprom_data);
5372 	}
5373 	printf("\n");
5374 }
5375 
5376 static int
5377 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5378 {
5379 	struct adapter *adapter;
5380 	int error;
5381 	int result;
5382 
5383 	result = -1;
5384 	error = sysctl_handle_int(oidp, &result, 0, req);
5385 
5386 	if (error || !req->newptr)
5387 		return (error);
5388 
5389 	if (result == 1) {
5390 		adapter = (struct adapter *)arg1;
5391 		em_print_debug_info(adapter);
5392 	}
5393 	/*
5394 	 * This value will cause a hex dump of the
5395 	 * first 32 16-bit words of the EEPROM to
5396 	 * the screen.
5397 	 */
5398 	if (result == 2) {
5399 		adapter = (struct adapter *)arg1;
5400 		em_print_nvm_info(adapter);
5401         }
5402 
5403 	return (error);
5404 }
5405 
5406 
5407 static int
5408 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
5409 {
5410 	struct adapter *adapter;
5411 	int error;
5412 	int result;
5413 
5414 	result = -1;
5415 	error = sysctl_handle_int(oidp, &result, 0, req);
5416 
5417 	if (error || !req->newptr)
5418 		return (error);
5419 
5420 	if (result == 1) {
5421 		adapter = (struct adapter *)arg1;
5422 		em_print_hw_stats(adapter);
5423 	}
5424 
5425 	return (error);
5426 }
5427 
5428 static int
5429 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
5430 {
5431 	struct em_int_delay_info *info;
5432 	struct adapter *adapter;
5433 	u32 regval;
5434 	int error;
5435 	int usecs;
5436 	int ticks;
5437 
5438 	info = (struct em_int_delay_info *)arg1;
5439 	usecs = info->value;
5440 	error = sysctl_handle_int(oidp, &usecs, 0, req);
5441 	if (error != 0 || req->newptr == NULL)
5442 		return (error);
5443 	if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
5444 		return (EINVAL);
5445 	info->value = usecs;
5446 	ticks = EM_USECS_TO_TICKS(usecs);
5447 
5448 	adapter = info->adapter;
5449 
5450 	EM_CORE_LOCK(adapter);
5451 	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
5452 	regval = (regval & ~0xffff) | (ticks & 0xffff);
5453 	/* Handle a few special cases. */
5454 	switch (info->offset) {
5455 	case E1000_RDTR:
5456 		break;
5457 	case E1000_TIDV:
5458 		if (ticks == 0) {
5459 			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
5460 			/* Don't write 0 into the TIDV register. */
5461 			regval++;
5462 		} else
5463 			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
5464 		break;
5465 	}
5466 	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
5467 	EM_CORE_UNLOCK(adapter);
5468 	return (0);
5469 }
5470 
5471 static void
5472 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
5473 	const char *description, struct em_int_delay_info *info,
5474 	int offset, int value)
5475 {
5476 	info->adapter = adapter;
5477 	info->offset = offset;
5478 	info->value = value;
5479 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
5480 	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5481 	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
5482 	    info, 0, em_sysctl_int_delay, "I", description);
5483 }
5484 
5485 #ifndef EM_LEGACY_IRQ
5486 static void
5487 em_add_rx_process_limit(struct adapter *adapter, const char *name,
5488 	const char *description, int *limit, int value)
5489 {
5490 	*limit = value;
5491 	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
5492 	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5493 	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
5494 }
5495 #endif
5496 
5497 
5498