xref: /freebsd/sys/dev/e1000/if_em.c (revision d5fc25e5d6c52b306312784663ccad85923a9c76)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2009, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
37 #include "opt_inet.h"
38 #endif
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/buf_ring.h>
43 #include <sys/bus.h>
44 #include <sys/endian.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/module.h>
50 #include <sys/rman.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/sysctl.h>
54 #include <sys/taskqueue.h>
55 #include <sys/eventhandler.h>
56 #ifdef EM_TIMESYNC
57 #include <sys/ioccom.h>
58 #include <sys/time.h>
59 #endif
60 #include <machine/bus.h>
61 #include <machine/resource.h>
62 
63 #include <net/bpf.h>
64 #include <net/ethernet.h>
65 #include <net/if.h>
66 #include <net/if_arp.h>
67 #include <net/if_dl.h>
68 #include <net/if_media.h>
69 
70 #include <net/if_types.h>
71 #include <net/if_vlan_var.h>
72 
73 #include <netinet/in_systm.h>
74 #include <netinet/in.h>
75 #include <netinet/if_ether.h>
76 #include <netinet/ip.h>
77 #include <netinet/ip6.h>
78 #include <netinet/tcp.h>
79 #include <netinet/udp.h>
80 
81 #include <machine/in_cksum.h>
82 #include <dev/pci/pcivar.h>
83 #include <dev/pci/pcireg.h>
84 
85 #include "e1000_api.h"
86 #include "e1000_82571.h"
87 #include "if_em.h"
88 
89 /*********************************************************************
90  *  Set this to one to display debug statistics
91  *********************************************************************/
92 int	em_display_debug_stats = 0;
93 
94 /*********************************************************************
95  *  Driver version:
96  *********************************************************************/
97 char em_driver_version[] = "6.9.9";
98 
99 
100 /*********************************************************************
101  *  PCI Device ID Table
102  *
103  *  Used by probe to select devices to load on
104  *  Last field stores an index into e1000_strings
105  *  Last entry must be all 0s
106  *
107  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
108  *********************************************************************/
109 
110 static em_vendor_info_t em_vendor_info_array[] =
111 {
112 	/* Intel(R) PRO/1000 Network Connection */
113 	{ 0x8086, E1000_DEV_ID_82540EM,		PCI_ANY_ID, PCI_ANY_ID, 0},
114 	{ 0x8086, E1000_DEV_ID_82540EM_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
115 	{ 0x8086, E1000_DEV_ID_82540EP,		PCI_ANY_ID, PCI_ANY_ID, 0},
116 	{ 0x8086, E1000_DEV_ID_82540EP_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
117 	{ 0x8086, E1000_DEV_ID_82540EP_LP,	PCI_ANY_ID, PCI_ANY_ID, 0},
118 
119 	{ 0x8086, E1000_DEV_ID_82541EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
120 	{ 0x8086, E1000_DEV_ID_82541ER,		PCI_ANY_ID, PCI_ANY_ID, 0},
121 	{ 0x8086, E1000_DEV_ID_82541ER_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
122 	{ 0x8086, E1000_DEV_ID_82541EI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
123 	{ 0x8086, E1000_DEV_ID_82541GI,		PCI_ANY_ID, PCI_ANY_ID, 0},
124 	{ 0x8086, E1000_DEV_ID_82541GI_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
125 	{ 0x8086, E1000_DEV_ID_82541GI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
126 
127 	{ 0x8086, E1000_DEV_ID_82542,		PCI_ANY_ID, PCI_ANY_ID, 0},
128 
129 	{ 0x8086, E1000_DEV_ID_82543GC_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
130 	{ 0x8086, E1000_DEV_ID_82543GC_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
131 
132 	{ 0x8086, E1000_DEV_ID_82544EI_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
133 	{ 0x8086, E1000_DEV_ID_82544EI_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
134 	{ 0x8086, E1000_DEV_ID_82544GC_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
135 	{ 0x8086, E1000_DEV_ID_82544GC_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
136 
137 	{ 0x8086, E1000_DEV_ID_82545EM_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
138 	{ 0x8086, E1000_DEV_ID_82545EM_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
139 	{ 0x8086, E1000_DEV_ID_82545GM_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
140 	{ 0x8086, E1000_DEV_ID_82545GM_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
141 	{ 0x8086, E1000_DEV_ID_82545GM_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
142 
143 	{ 0x8086, E1000_DEV_ID_82546EB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
144 	{ 0x8086, E1000_DEV_ID_82546EB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
145 	{ 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
146 	{ 0x8086, E1000_DEV_ID_82546GB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
147 	{ 0x8086, E1000_DEV_ID_82546GB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
148 	{ 0x8086, E1000_DEV_ID_82546GB_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
149 	{ 0x8086, E1000_DEV_ID_82546GB_PCIE,	PCI_ANY_ID, PCI_ANY_ID, 0},
150 	{ 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
151 	{ 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
152 						PCI_ANY_ID, PCI_ANY_ID, 0},
153 
154 	{ 0x8086, E1000_DEV_ID_82547EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
155 	{ 0x8086, E1000_DEV_ID_82547EI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
156 	{ 0x8086, E1000_DEV_ID_82547GI,		PCI_ANY_ID, PCI_ANY_ID, 0},
157 
158 	{ 0x8086, E1000_DEV_ID_82571EB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
159 	{ 0x8086, E1000_DEV_ID_82571EB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
160 	{ 0x8086, E1000_DEV_ID_82571EB_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
161 	{ 0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL,
162 						PCI_ANY_ID, PCI_ANY_ID, 0},
163 	{ 0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD,
164 						PCI_ANY_ID, PCI_ANY_ID, 0},
165 	{ 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
166 						PCI_ANY_ID, PCI_ANY_ID, 0},
167 	{ 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP,
168 						PCI_ANY_ID, PCI_ANY_ID, 0},
169 	{ 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER,
170 						PCI_ANY_ID, PCI_ANY_ID, 0},
171 	{ 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER,
172 						PCI_ANY_ID, PCI_ANY_ID, 0},
173 	{ 0x8086, E1000_DEV_ID_82572EI_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
174 	{ 0x8086, E1000_DEV_ID_82572EI_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
175 	{ 0x8086, E1000_DEV_ID_82572EI_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
176 	{ 0x8086, E1000_DEV_ID_82572EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
177 
178 	{ 0x8086, E1000_DEV_ID_82573E,		PCI_ANY_ID, PCI_ANY_ID, 0},
179 	{ 0x8086, E1000_DEV_ID_82573E_IAMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
180 	{ 0x8086, E1000_DEV_ID_82573L,		PCI_ANY_ID, PCI_ANY_ID, 0},
181 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
182 						PCI_ANY_ID, PCI_ANY_ID, 0},
183 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
184 						PCI_ANY_ID, PCI_ANY_ID, 0},
185 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
186 						PCI_ANY_ID, PCI_ANY_ID, 0},
187 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
188 						PCI_ANY_ID, PCI_ANY_ID, 0},
189 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
190 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
191 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_C,	PCI_ANY_ID, PCI_ANY_ID, 0},
192 	{ 0x8086, E1000_DEV_ID_ICH8_IFE,	PCI_ANY_ID, PCI_ANY_ID, 0},
193 	{ 0x8086, E1000_DEV_ID_ICH8_IFE_GT,	PCI_ANY_ID, PCI_ANY_ID, 0},
194 	{ 0x8086, E1000_DEV_ID_ICH8_IFE_G,	PCI_ANY_ID, PCI_ANY_ID, 0},
195 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_M,	PCI_ANY_ID, PCI_ANY_ID, 0},
196 
197 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
198 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
199 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_C,	PCI_ANY_ID, PCI_ANY_ID, 0},
200 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_M,	PCI_ANY_ID, PCI_ANY_ID, 0},
201 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_M_V,	PCI_ANY_ID, PCI_ANY_ID, 0},
202 	{ 0x8086, E1000_DEV_ID_ICH9_IFE,	PCI_ANY_ID, PCI_ANY_ID, 0},
203 	{ 0x8086, E1000_DEV_ID_ICH9_IFE_GT,	PCI_ANY_ID, PCI_ANY_ID, 0},
204 	{ 0x8086, E1000_DEV_ID_ICH9_IFE_G,	PCI_ANY_ID, PCI_ANY_ID, 0},
205 	{ 0x8086, E1000_DEV_ID_ICH9_BM,		PCI_ANY_ID, PCI_ANY_ID, 0},
206 	{ 0x8086, E1000_DEV_ID_82574L,		PCI_ANY_ID, PCI_ANY_ID, 0},
207 	{ 0x8086, E1000_DEV_ID_ICH10_R_BM_LM,	PCI_ANY_ID, PCI_ANY_ID, 0},
208 	{ 0x8086, E1000_DEV_ID_ICH10_R_BM_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
209 	{ 0x8086, E1000_DEV_ID_ICH10_R_BM_V,	PCI_ANY_ID, PCI_ANY_ID, 0},
210 	{ 0x8086, E1000_DEV_ID_ICH10_D_BM_LM,	PCI_ANY_ID, PCI_ANY_ID, 0},
211 	{ 0x8086, E1000_DEV_ID_ICH10_D_BM_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
212 	/* required last entry */
213 	{ 0, 0, 0, 0, 0}
214 };
215 
216 /*********************************************************************
217  *  Table of branding strings for all supported NICs.
218  *********************************************************************/
219 
220 static char *em_strings[] = {
221 	"Intel(R) PRO/1000 Network Connection"
222 };
223 
224 /*********************************************************************
225  *  Function prototypes
226  *********************************************************************/
227 static int	em_probe(device_t);
228 static int	em_attach(device_t);
229 static int	em_detach(device_t);
230 static int	em_shutdown(device_t);
231 static int	em_suspend(device_t);
232 static int	em_resume(device_t);
233 static void	em_start(struct ifnet *);
234 static void	em_start_locked(struct ifnet *ifp);
235 static int	em_ioctl(struct ifnet *, u_long, caddr_t);
236 static void	em_watchdog(struct adapter *);
237 static void	em_init(void *);
238 static void	em_init_locked(struct adapter *);
239 static void	em_stop(void *);
240 static void	em_media_status(struct ifnet *, struct ifmediareq *);
241 static int	em_media_change(struct ifnet *);
242 static void	em_identify_hardware(struct adapter *);
243 static int	em_allocate_pci_resources(struct adapter *);
244 static int	em_allocate_legacy(struct adapter *adapter);
245 static int	em_allocate_msix(struct adapter *adapter);
246 static int	em_setup_msix(struct adapter *);
247 static void	em_free_pci_resources(struct adapter *);
248 static void	em_local_timer(void *);
249 static int	em_hardware_init(struct adapter *);
250 static void	em_setup_interface(device_t, struct adapter *);
251 static void	em_setup_transmit_structures(struct adapter *);
252 static void	em_initialize_transmit_unit(struct adapter *);
253 static int	em_setup_receive_structures(struct adapter *);
254 static void	em_initialize_receive_unit(struct adapter *);
255 static void	em_enable_intr(struct adapter *);
256 static void	em_disable_intr(struct adapter *);
257 static void	em_free_transmit_structures(struct adapter *);
258 static void	em_free_receive_structures(struct adapter *);
259 static void	em_update_stats_counters(struct adapter *);
260 static void	em_txeof(struct adapter *);
261 static void	em_tx_purge(struct adapter *);
262 static int	em_allocate_receive_structures(struct adapter *);
263 static int	em_allocate_transmit_structures(struct adapter *);
264 static int	em_rxeof(struct adapter *, int, int *);
265 #ifndef __NO_STRICT_ALIGNMENT
266 static int	em_fixup_rx(struct adapter *);
267 #endif
268 static void	em_receive_checksum(struct adapter *, struct e1000_rx_desc *,
269 		    struct mbuf *);
270 static void	em_transmit_checksum_setup(struct adapter *, struct mbuf *,
271 		    u32 *, u32 *);
272 #if __FreeBSD_version >= 700000
273 static bool	em_tso_setup(struct adapter *, struct mbuf *,
274 		    u32 *, u32 *);
275 #endif /* FreeBSD_version >= 700000 */
276 static void	em_set_promisc(struct adapter *);
277 static void	em_disable_promisc(struct adapter *);
278 static void	em_set_multi(struct adapter *);
279 static void	em_print_hw_stats(struct adapter *);
280 static void	em_update_link_status(struct adapter *);
281 static int	em_get_buf(struct adapter *, int);
282 static void	em_register_vlan(void *, struct ifnet *, u16);
283 static void	em_unregister_vlan(void *, struct ifnet *, u16);
284 static int	em_xmit(struct adapter *, struct mbuf **);
285 static void	em_smartspeed(struct adapter *);
286 static int	em_82547_fifo_workaround(struct adapter *, int);
287 static void	em_82547_update_fifo_head(struct adapter *, int);
288 static int	em_82547_tx_fifo_reset(struct adapter *);
289 static void	em_82547_move_tail(void *);
290 static int	em_dma_malloc(struct adapter *, bus_size_t,
291 		    struct em_dma_alloc *, int);
292 static void	em_dma_free(struct adapter *, struct em_dma_alloc *);
293 static void	em_print_debug_info(struct adapter *);
294 static void	em_print_nvm_info(struct adapter *);
295 static int 	em_is_valid_ether_addr(u8 *);
296 static int	em_sysctl_stats(SYSCTL_HANDLER_ARGS);
297 static int	em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
298 static u32	em_fill_descriptors (bus_addr_t address, u32 length,
299 		    PDESC_ARRAY desc_array);
300 static int	em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
301 static void	em_add_int_delay_sysctl(struct adapter *, const char *,
302 		    const char *, struct em_int_delay_info *, int, int);
303 /* Management and WOL Support */
304 static void	em_init_manageability(struct adapter *);
305 static void	em_release_manageability(struct adapter *);
306 static void     em_get_hw_control(struct adapter *);
307 static void     em_release_hw_control(struct adapter *);
308 static void     em_enable_wakeup(device_t);
309 
310 #ifdef EM_TIMESYNC
311 /* Precision Time sync support */
312 static int	em_tsync_init(struct adapter *);
313 static void	em_tsync_disable(struct adapter *);
314 #endif
315 
316 #ifdef EM_LEGACY_IRQ
317 static void	em_intr(void *);
318 #else /* FAST IRQ */
319 #if __FreeBSD_version < 700000
320 static void	em_irq_fast(void *);
321 #else
322 static int	em_irq_fast(void *);
323 #endif
324 
325 /* MSIX handlers */
326 static void	em_msix_tx(void *);
327 static void	em_msix_rx(void *);
328 static void	em_msix_link(void *);
329 static void	em_handle_rx(void *context, int pending);
330 static void	em_handle_tx(void *context, int pending);
331 
332 static void	em_handle_rxtx(void *context, int pending);
333 static void	em_handle_link(void *context, int pending);
334 static void	em_add_rx_process_limit(struct adapter *, const char *,
335 		    const char *, int *, int);
336 #endif /* ~EM_LEGACY_IRQ */
337 
338 #ifdef DEVICE_POLLING
339 static poll_handler_t em_poll;
340 #endif /* POLLING */
341 
342 /*********************************************************************
343  *  FreeBSD Device Interface Entry Points
344  *********************************************************************/
345 
346 static device_method_t em_methods[] = {
347 	/* Device interface */
348 	DEVMETHOD(device_probe, em_probe),
349 	DEVMETHOD(device_attach, em_attach),
350 	DEVMETHOD(device_detach, em_detach),
351 	DEVMETHOD(device_shutdown, em_shutdown),
352 	DEVMETHOD(device_suspend, em_suspend),
353 	DEVMETHOD(device_resume, em_resume),
354 	{0, 0}
355 };
356 
357 static driver_t em_driver = {
358 	"em", em_methods, sizeof(struct adapter),
359 };
360 
361 static devclass_t em_devclass;
362 DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
363 MODULE_DEPEND(em, pci, 1, 1, 1);
364 MODULE_DEPEND(em, ether, 1, 1, 1);
365 
366 /*********************************************************************
367  *  Tunable default values.
368  *********************************************************************/
369 
370 #define EM_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
371 #define EM_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
372 #define M_TSO_LEN			66
373 
374 /* Allow common code without TSO */
375 #ifndef CSUM_TSO
376 #define CSUM_TSO	0
377 #endif
378 
379 static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
380 static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
381 static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
382 static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
383 static int em_rxd = EM_DEFAULT_RXD;
384 static int em_txd = EM_DEFAULT_TXD;
385 static int em_smart_pwr_down = FALSE;
386 /* Controls whether promiscuous also shows bad packets */
387 static int em_debug_sbp = FALSE;
388 /* Local switch for MSI/MSIX */
389 static int em_enable_msi = TRUE;
390 
391 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
392 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
393 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
394 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
395 TUNABLE_INT("hw.em.rxd", &em_rxd);
396 TUNABLE_INT("hw.em.txd", &em_txd);
397 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
398 TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
399 TUNABLE_INT("hw.em.enable_msi", &em_enable_msi);
400 
401 #ifndef EM_LEGACY_IRQ
402 /* How many packets rxeof tries to clean at a time */
403 static int em_rx_process_limit = 100;
404 TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
405 #endif
406 
407 /* Global used in WOL setup with multiport cards */
408 static int global_quad_port_a = 0;
409 
410 /*********************************************************************
411  *  Device identification routine
412  *
413  *  em_probe determines if the driver should be loaded on
414  *  adapter based on PCI vendor/device id of the adapter.
415  *
416  *  return BUS_PROBE_DEFAULT on success, positive on failure
417  *********************************************************************/
418 
419 static int
420 em_probe(device_t dev)
421 {
422 	char		adapter_name[60];
423 	u16		pci_vendor_id = 0;
424 	u16		pci_device_id = 0;
425 	u16		pci_subvendor_id = 0;
426 	u16		pci_subdevice_id = 0;
427 	em_vendor_info_t *ent;
428 
429 	INIT_DEBUGOUT("em_probe: begin");
430 
431 	pci_vendor_id = pci_get_vendor(dev);
432 	if (pci_vendor_id != EM_VENDOR_ID)
433 		return (ENXIO);
434 
435 	pci_device_id = pci_get_device(dev);
436 	pci_subvendor_id = pci_get_subvendor(dev);
437 	pci_subdevice_id = pci_get_subdevice(dev);
438 
439 	ent = em_vendor_info_array;
440 	while (ent->vendor_id != 0) {
441 		if ((pci_vendor_id == ent->vendor_id) &&
442 		    (pci_device_id == ent->device_id) &&
443 
444 		    ((pci_subvendor_id == ent->subvendor_id) ||
445 		    (ent->subvendor_id == PCI_ANY_ID)) &&
446 
447 		    ((pci_subdevice_id == ent->subdevice_id) ||
448 		    (ent->subdevice_id == PCI_ANY_ID))) {
449 			sprintf(adapter_name, "%s %s",
450 				em_strings[ent->index],
451 				em_driver_version);
452 			device_set_desc_copy(dev, adapter_name);
453 			return (BUS_PROBE_DEFAULT);
454 		}
455 		ent++;
456 	}
457 
458 	return (ENXIO);
459 }
460 
461 /*********************************************************************
462  *  Device initialization routine
463  *
464  *  The attach entry point is called when the driver is being loaded.
465  *  This routine identifies the type of hardware, allocates all resources
466  *  and initializes the hardware.
467  *
468  *  return 0 on success, positive on failure
469  *********************************************************************/
470 
471 static int
472 em_attach(device_t dev)
473 {
474 	struct adapter	*adapter;
475 	int		tsize, rsize;
476 	int		error = 0;
477 	u16		eeprom_data, device_id;
478 
479 	INIT_DEBUGOUT("em_attach: begin");
480 
481 	adapter = device_get_softc(dev);
482 	adapter->dev = adapter->osdep.dev = dev;
483 	EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
484 	EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
485 	EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
486 
487 	/* SYSCTL stuff */
488 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
489 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
490 	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
491 	    em_sysctl_debug_info, "I", "Debug Information");
492 
493 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
494 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
495 	    OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
496 	    em_sysctl_stats, "I", "Statistics");
497 
498 	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
499 	callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
500 
501 	/* Determine hardware and mac info */
502 	em_identify_hardware(adapter);
503 
504 	/* Setup PCI resources */
505 	if (em_allocate_pci_resources(adapter)) {
506 		device_printf(dev, "Allocation of PCI resources failed\n");
507 		error = ENXIO;
508 		goto err_pci;
509 	}
510 
511 	/*
512 	** For ICH8 and family we need to
513 	** map the flash memory, and this
514 	** must happen after the MAC is
515 	** identified
516 	*/
517 	if ((adapter->hw.mac.type == e1000_ich8lan) ||
518 	    (adapter->hw.mac.type == e1000_ich9lan) ||
519 	    (adapter->hw.mac.type == e1000_ich10lan)) {
520 		int rid = EM_BAR_TYPE_FLASH;
521 		adapter->flash = bus_alloc_resource_any(dev,
522 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
523 		if (adapter->flash == NULL) {
524 			device_printf(dev, "Mapping of Flash failed\n");
525 			error = ENXIO;
526 			goto err_pci;
527 		}
528 		/* This is used in the shared code */
529 		adapter->hw.flash_address = (u8 *)adapter->flash;
530 		adapter->osdep.flash_bus_space_tag =
531 		    rman_get_bustag(adapter->flash);
532 		adapter->osdep.flash_bus_space_handle =
533 		    rman_get_bushandle(adapter->flash);
534 	}
535 
536 	/* Do Shared Code initialization */
537 	if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
538 		device_printf(dev, "Setup of Shared code failed\n");
539 		error = ENXIO;
540 		goto err_pci;
541 	}
542 
543 	e1000_get_bus_info(&adapter->hw);
544 
545 	/* Set up some sysctls for the tunable interrupt delays */
546 	em_add_int_delay_sysctl(adapter, "rx_int_delay",
547 	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
548 	    E1000_REGISTER(&adapter->hw, E1000_RDTR), em_rx_int_delay_dflt);
549 	em_add_int_delay_sysctl(adapter, "tx_int_delay",
550 	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
551 	    E1000_REGISTER(&adapter->hw, E1000_TIDV), em_tx_int_delay_dflt);
552 	if (adapter->hw.mac.type >= e1000_82540) {
553 		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
554 		    "receive interrupt delay limit in usecs",
555 		    &adapter->rx_abs_int_delay,
556 		    E1000_REGISTER(&adapter->hw, E1000_RADV),
557 		    em_rx_abs_int_delay_dflt);
558 		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
559 		    "transmit interrupt delay limit in usecs",
560 		    &adapter->tx_abs_int_delay,
561 		    E1000_REGISTER(&adapter->hw, E1000_TADV),
562 		    em_tx_abs_int_delay_dflt);
563 	}
564 
565 #ifndef EM_LEGACY_IRQ
566 	/* Sysctls for limiting the amount of work done in the taskqueue */
567 	em_add_rx_process_limit(adapter, "rx_processing_limit",
568 	    "max number of rx packets to process", &adapter->rx_process_limit,
569 	    em_rx_process_limit);
570 #endif
571 
572 	/*
573 	 * Validate number of transmit and receive descriptors. It
574 	 * must not exceed hardware maximum, and must be multiple
575 	 * of E1000_DBA_ALIGN.
576 	 */
577 	if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
578 	    (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
579 	    (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
580 	    (em_txd < EM_MIN_TXD)) {
581 		device_printf(dev, "Using %d TX descriptors instead of %d!\n",
582 		    EM_DEFAULT_TXD, em_txd);
583 		adapter->num_tx_desc = EM_DEFAULT_TXD;
584 	} else
585 		adapter->num_tx_desc = em_txd;
586 	if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
587 	    (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
588 	    (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
589 	    (em_rxd < EM_MIN_RXD)) {
590 		device_printf(dev, "Using %d RX descriptors instead of %d!\n",
591 		    EM_DEFAULT_RXD, em_rxd);
592 		adapter->num_rx_desc = EM_DEFAULT_RXD;
593 	} else
594 		adapter->num_rx_desc = em_rxd;
595 
596 	adapter->hw.mac.autoneg = DO_AUTO_NEG;
597 	adapter->hw.phy.autoneg_wait_to_complete = FALSE;
598 	adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
599 	adapter->rx_buffer_len = 2048;
600 
601 	e1000_init_script_state_82541(&adapter->hw, TRUE);
602 	e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
603 
604 	/* Copper options */
605 	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
606 		adapter->hw.phy.mdix = AUTO_ALL_MODES;
607 		adapter->hw.phy.disable_polarity_correction = FALSE;
608 		adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
609 	}
610 
611 	/*
612 	 * Set the frame limits assuming
613 	 * standard ethernet sized frames.
614 	 */
615 	adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
616 	adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
617 
618 	/*
619 	 * This controls when hardware reports transmit completion
620 	 * status.
621 	 */
622 	adapter->hw.mac.report_tx_early = 1;
623 
624 	tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
625 	    EM_DBA_ALIGN);
626 
627 	/* Allocate Transmit Descriptor ring */
628 	if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
629 		device_printf(dev, "Unable to allocate tx_desc memory\n");
630 		error = ENOMEM;
631 		goto err_tx_desc;
632 	}
633 	adapter->tx_desc_base =
634 	    (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
635 
636 	rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
637 	    EM_DBA_ALIGN);
638 
639 	/* Allocate Receive Descriptor ring */
640 	if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
641 		device_printf(dev, "Unable to allocate rx_desc memory\n");
642 		error = ENOMEM;
643 		goto err_rx_desc;
644 	}
645 	adapter->rx_desc_base =
646 	    (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
647 
648 	/*
649 	** Start from a known state, this is
650 	** important in reading the nvm and
651 	** mac from that.
652 	*/
653 	e1000_reset_hw(&adapter->hw);
654 
655 	/* Make sure we have a good EEPROM before we read from it */
656 	if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
657 		/*
658 		** Some PCI-E parts fail the first check due to
659 		** the link being in sleep state, call it again,
660 		** if it fails a second time its a real issue.
661 		*/
662 		if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
663 			device_printf(dev,
664 			    "The EEPROM Checksum Is Not Valid\n");
665 			error = EIO;
666 			goto err_hw_init;
667 		}
668 	}
669 
670 	/* Copy the permanent MAC address out of the EEPROM */
671 	if (e1000_read_mac_addr(&adapter->hw) < 0) {
672 		device_printf(dev, "EEPROM read error while reading MAC"
673 		    " address\n");
674 		error = EIO;
675 		goto err_hw_init;
676 	}
677 
678 	if (!em_is_valid_ether_addr(adapter->hw.mac.addr)) {
679 		device_printf(dev, "Invalid MAC address\n");
680 		error = EIO;
681 		goto err_hw_init;
682 	}
683 
684 	/* Initialize the hardware */
685 	if (em_hardware_init(adapter)) {
686 		device_printf(dev, "Unable to initialize the hardware\n");
687 		error = EIO;
688 		goto err_hw_init;
689 	}
690 
691 	/* Allocate transmit descriptors and buffers */
692 	if (em_allocate_transmit_structures(adapter)) {
693 		device_printf(dev, "Could not setup transmit structures\n");
694 		error = ENOMEM;
695 		goto err_tx_struct;
696 	}
697 
698 	/* Allocate receive descriptors and buffers */
699 	if (em_allocate_receive_structures(adapter)) {
700 		device_printf(dev, "Could not setup receive structures\n");
701 		error = ENOMEM;
702 		goto err_rx_struct;
703 	}
704 
705 	/*
706 	**  Do interrupt configuration
707 	*/
708 	if (adapter->msi > 1) /* Do MSI/X */
709 		error = em_allocate_msix(adapter);
710 	else  /* MSI or Legacy */
711 		error = em_allocate_legacy(adapter);
712 	if (error)
713 		goto err_rx_struct;
714 
715 	/* Setup OS specific network interface */
716 	em_setup_interface(dev, adapter);
717 
718 	/* Initialize statistics */
719 	em_update_stats_counters(adapter);
720 
721 	adapter->hw.mac.get_link_status = 1;
722 	em_update_link_status(adapter);
723 
724 	/* Indicate SOL/IDER usage */
725 	if (e1000_check_reset_block(&adapter->hw))
726 		device_printf(dev,
727 		    "PHY reset is blocked due to SOL/IDER session.\n");
728 
729 	/* Determine if we have to control management hardware */
730 	adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
731 
732 	/*
733 	 * Setup Wake-on-Lan
734 	 */
735 	switch (adapter->hw.mac.type) {
736 
737 	case e1000_82542:
738 	case e1000_82543:
739 		break;
740 	case e1000_82546:
741 	case e1000_82546_rev_3:
742 	case e1000_82571:
743 	case e1000_80003es2lan:
744 		if (adapter->hw.bus.func == 1)
745 			e1000_read_nvm(&adapter->hw,
746 			    NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
747 		else
748 			e1000_read_nvm(&adapter->hw,
749 			    NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
750 		eeprom_data &= EM_EEPROM_APME;
751 		break;
752 	default:
753 		/* APME bit in EEPROM is mapped to WUC.APME */
754 		eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) &
755 		    E1000_WUC_APME;
756 		break;
757 	}
758 	if (eeprom_data)
759 		adapter->wol = E1000_WUFC_MAG;
760 	/*
761          * We have the eeprom settings, now apply the special cases
762          * where the eeprom may be wrong or the board won't support
763          * wake on lan on a particular port
764 	 */
765 	device_id = pci_get_device(dev);
766         switch (device_id) {
767 	case E1000_DEV_ID_82546GB_PCIE:
768 		adapter->wol = 0;
769 		break;
770 	case E1000_DEV_ID_82546EB_FIBER:
771 	case E1000_DEV_ID_82546GB_FIBER:
772 	case E1000_DEV_ID_82571EB_FIBER:
773 		/* Wake events only supported on port A for dual fiber
774 		 * regardless of eeprom setting */
775 		if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
776 		    E1000_STATUS_FUNC_1)
777 			adapter->wol = 0;
778 		break;
779 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
780 	case E1000_DEV_ID_82571EB_QUAD_COPPER:
781 	case E1000_DEV_ID_82571EB_QUAD_FIBER:
782 	case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
783                 /* if quad port adapter, disable WoL on all but port A */
784 		if (global_quad_port_a != 0)
785 			adapter->wol = 0;
786 		/* Reset for multiple quad port adapters */
787 		if (++global_quad_port_a == 4)
788 			global_quad_port_a = 0;
789                 break;
790 	}
791 
792 	/* Do we need workaround for 82544 PCI-X adapter? */
793 	if (adapter->hw.bus.type == e1000_bus_type_pcix &&
794 	    adapter->hw.mac.type == e1000_82544)
795 		adapter->pcix_82544 = TRUE;
796 	else
797 		adapter->pcix_82544 = FALSE;
798 
799 	/* Register for VLAN events */
800 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
801 	    em_register_vlan, 0, EVENTHANDLER_PRI_FIRST);
802 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
803 	    em_unregister_vlan, 0, EVENTHANDLER_PRI_FIRST);
804 
805 	/* Tell the stack that the interface is not active */
806 	adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
807 
808 	INIT_DEBUGOUT("em_attach: end");
809 
810 	return (0);
811 
812 err_rx_struct:
813 	em_free_transmit_structures(adapter);
814 err_tx_struct:
815 err_hw_init:
816 	em_release_hw_control(adapter);
817 	em_dma_free(adapter, &adapter->rxdma);
818 err_rx_desc:
819 	em_dma_free(adapter, &adapter->txdma);
820 err_tx_desc:
821 err_pci:
822 	em_free_pci_resources(adapter);
823 	EM_TX_LOCK_DESTROY(adapter);
824 	EM_RX_LOCK_DESTROY(adapter);
825 	EM_CORE_LOCK_DESTROY(adapter);
826 
827 	return (error);
828 }
829 
830 /*********************************************************************
831  *  Device removal routine
832  *
833  *  The detach entry point is called when the driver is being removed.
834  *  This routine stops the adapter and deallocates all the resources
835  *  that were allocated for driver operation.
836  *
837  *  return 0 on success, positive on failure
838  *********************************************************************/
839 
840 static int
841 em_detach(device_t dev)
842 {
843 	struct adapter	*adapter = device_get_softc(dev);
844 	struct ifnet	*ifp = adapter->ifp;
845 
846 	INIT_DEBUGOUT("em_detach: begin");
847 
848 	/* Make sure VLANS are not using driver */
849 #if __FreeBSD_version >= 700000
850 	if (adapter->ifp->if_vlantrunk != NULL) {
851 #else
852 	if (adapter->ifp->if_nvlans != 0) {
853 #endif
854 		device_printf(dev,"Vlan in use, detach first\n");
855 		return (EBUSY);
856 	}
857 
858 #ifdef DEVICE_POLLING
859 	if (ifp->if_capenable & IFCAP_POLLING)
860 		ether_poll_deregister(ifp);
861 #endif
862 
863 	EM_CORE_LOCK(adapter);
864 	EM_TX_LOCK(adapter);
865 	adapter->in_detach = 1;
866 	em_stop(adapter);
867 	e1000_phy_hw_reset(&adapter->hw);
868 
869 	em_release_manageability(adapter);
870 
871 	if (((adapter->hw.mac.type == e1000_82573) ||
872 	    (adapter->hw.mac.type == e1000_ich8lan) ||
873 	    (adapter->hw.mac.type == e1000_ich10lan) ||
874 	    (adapter->hw.mac.type == e1000_ich9lan)) &&
875 	    e1000_check_mng_mode(&adapter->hw))
876 		em_release_hw_control(adapter);
877 
878 	if (adapter->wol) {
879 		E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
880 		E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
881 		em_enable_wakeup(dev);
882 	}
883 
884 	EM_TX_UNLOCK(adapter);
885 	EM_CORE_UNLOCK(adapter);
886 
887 	/* Unregister VLAN events */
888 	if (adapter->vlan_attach != NULL)
889 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
890 	if (adapter->vlan_detach != NULL)
891 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
892 
893 	ether_ifdetach(adapter->ifp);
894 	callout_drain(&adapter->timer);
895 	callout_drain(&adapter->tx_fifo_timer);
896 
897 	em_free_pci_resources(adapter);
898 	bus_generic_detach(dev);
899 	if_free(ifp);
900 
901 	drbr_free(adapter->br, M_DEVBUF);
902 	em_free_transmit_structures(adapter);
903 	em_free_receive_structures(adapter);
904 
905 	/* Free Transmit Descriptor ring */
906 	if (adapter->tx_desc_base) {
907 		em_dma_free(adapter, &adapter->txdma);
908 		adapter->tx_desc_base = NULL;
909 	}
910 
911 	/* Free Receive Descriptor ring */
912 	if (adapter->rx_desc_base) {
913 		em_dma_free(adapter, &adapter->rxdma);
914 		adapter->rx_desc_base = NULL;
915 	}
916 
917 	EM_TX_LOCK_DESTROY(adapter);
918 	EM_RX_LOCK_DESTROY(adapter);
919 	EM_CORE_LOCK_DESTROY(adapter);
920 
921 	return (0);
922 }
923 
924 /*********************************************************************
925  *
926  *  Shutdown entry point
927  *
928  **********************************************************************/
929 
930 static int
931 em_shutdown(device_t dev)
932 {
933 	return em_suspend(dev);
934 }
935 
936 /*
937  * Suspend/resume device methods.
938  */
939 static int
940 em_suspend(device_t dev)
941 {
942 	struct adapter *adapter = device_get_softc(dev);
943 
944 	EM_CORE_LOCK(adapter);
945 
946 	EM_TX_LOCK(adapter);
947 	em_stop(adapter);
948 	EM_TX_UNLOCK(adapter);
949 
950         em_release_manageability(adapter);
951 
952         if (((adapter->hw.mac.type == e1000_82573) ||
953             (adapter->hw.mac.type == e1000_ich8lan) ||
954             (adapter->hw.mac.type == e1000_ich10lan) ||
955             (adapter->hw.mac.type == e1000_ich9lan)) &&
956             e1000_check_mng_mode(&adapter->hw))
957                 em_release_hw_control(adapter);
958 
959         if (adapter->wol) {
960                 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
961                 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
962                 em_enable_wakeup(dev);
963         }
964 
965 	EM_CORE_UNLOCK(adapter);
966 
967 	return bus_generic_suspend(dev);
968 }
969 
970 static int
971 em_resume(device_t dev)
972 {
973 	struct adapter *adapter = device_get_softc(dev);
974 	struct ifnet *ifp = adapter->ifp;
975 
976 	EM_CORE_LOCK(adapter);
977 	em_init_locked(adapter);
978 	em_init_manageability(adapter);
979 	EM_CORE_UNLOCK(adapter);
980 	em_start(ifp);
981 
982 	return bus_generic_resume(dev);
983 }
984 
985 
986 /*********************************************************************
987  *  Transmit entry point
988  *
989  *  em_start is called by the stack to initiate a transmit.
990  *  The driver will remain in this routine as long as there are
991  *  packets to transmit and transmit resources are available.
992  *  In case resources are not available stack is notified and
993  *  the packet is requeued.
994  **********************************************************************/
995 
996 #ifdef IFNET_BUF_RING
997 static int
998 em_transmit_locked(struct ifnet *ifp, struct mbuf *m)
999 {
1000 	struct adapter	*adapter = ifp->if_softc;
1001 	int error;
1002 
1003 	EM_TX_LOCK_ASSERT(adapter);
1004 	if (((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1005 	    IFF_DRV_RUNNING)
1006 	    || (!adapter->link_active)) {
1007 		error = drbr_enqueue(ifp, adapter->br, m);
1008 		return (error);
1009 	} else if (ADAPTER_RING_EMPTY(adapter) &&
1010 	    (adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)) {
1011 		if (em_xmit(adapter, &m)) {
1012 			if (m && (error = drbr_enqueue(ifp, adapter->br, m)) != 0)
1013 				return (error);
1014 		} else {
1015 			/*
1016 			 * We've bypassed the buf ring so we need to update
1017 			 * ifp directly
1018 			 */
1019 			drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
1020 			/*
1021 			** Send a copy of the frame to the BPF
1022 			** listener and set the watchdog on.
1023 			*/
1024 			ETHER_BPF_MTAP(ifp, m);
1025 			adapter->watchdog_timer = EM_TX_TIMEOUT;
1026 		}
1027 	} else if ((error = drbr_enqueue(ifp, adapter->br, m)) != 0)
1028 		return (error);
1029 
1030 	if (!ADAPTER_RING_EMPTY(adapter))
1031 		em_start_locked(ifp);
1032 
1033 	return (0);
1034 }
1035 
1036 static int
1037 em_transmit(struct ifnet *ifp, struct mbuf *m)
1038 {
1039 
1040 	struct adapter *adapter = ifp->if_softc;
1041 	int error = 0;
1042 
1043 	if(EM_TX_TRYLOCK(adapter)) {
1044 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1045 			error = em_transmit_locked(ifp, m);
1046 		EM_TX_UNLOCK(adapter);
1047 	} else
1048 		error = drbr_enqueue(ifp, adapter->br, m);
1049 
1050 	return (error);
1051 }
1052 
1053 static void
1054 em_qflush(struct ifnet *ifp)
1055 {
1056 	struct mbuf *m;
1057 	struct adapter *adapter = (struct adapter *)ifp->if_softc;
1058 
1059 	EM_TX_LOCK(adapter);
1060 	while ((m = buf_ring_dequeue_sc(adapter->br)) != NULL)
1061 		m_freem(m);
1062 	if_qflush(ifp);
1063 	EM_TX_UNLOCK(adapter);
1064 }
1065 #endif
1066 
1067 static void
1068 em_start_locked(struct ifnet *ifp)
1069 {
1070 	struct adapter	*adapter = ifp->if_softc;
1071 	struct mbuf	*m_head;
1072 
1073 	EM_TX_LOCK_ASSERT(adapter);
1074 
1075 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1076 	    IFF_DRV_RUNNING)
1077 		return;
1078 	if (!adapter->link_active)
1079 		return;
1080 
1081 	while ((adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)
1082 	    && (!ADAPTER_RING_EMPTY(adapter))) {
1083 
1084 		m_head = em_dequeue(ifp, adapter->br);
1085 		if (m_head == NULL)
1086 			break;
1087 		/*
1088 		 *  Encapsulation can modify our pointer, and or make it
1089 		 *  NULL on failure.  In that event, we can't requeue.
1090 		 */
1091 		if (em_xmit(adapter, &m_head)) {
1092 			if (m_head == NULL)
1093 				break;
1094 #ifndef IFNET_BUF_RING
1095 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1096 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1097 #endif
1098 			break;
1099 		}
1100 
1101 		/* Send a copy of the frame to the BPF listener */
1102 		ETHER_BPF_MTAP(ifp, m_head);
1103 
1104 		/* Set timeout in case hardware has problems transmitting. */
1105 		adapter->watchdog_timer = EM_TX_TIMEOUT;
1106 	}
1107 	if ((adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD))
1108 		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1109 
1110 }
1111 
1112 static void
1113 em_start(struct ifnet *ifp)
1114 {
1115 	struct adapter *adapter = ifp->if_softc;
1116 
1117 	EM_TX_LOCK(adapter);
1118 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1119 		em_start_locked(ifp);
1120 	EM_TX_UNLOCK(adapter);
1121 }
1122 
1123 /*********************************************************************
1124  *  Ioctl entry point
1125  *
1126  *  em_ioctl is called when the user wants to configure the
1127  *  interface.
1128  *
1129  *  return 0 on success, positive on failure
1130  **********************************************************************/
1131 
1132 static int
1133 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1134 {
1135 	struct adapter	*adapter = ifp->if_softc;
1136 	struct ifreq *ifr = (struct ifreq *)data;
1137 #ifdef INET
1138 	struct ifaddr *ifa = (struct ifaddr *)data;
1139 #endif
1140 	int error = 0;
1141 
1142 	if (adapter->in_detach)
1143 		return (error);
1144 
1145 	switch (command) {
1146 	case SIOCSIFADDR:
1147 #ifdef INET
1148 		if (ifa->ifa_addr->sa_family == AF_INET) {
1149 			/*
1150 			 * XXX
1151 			 * Since resetting hardware takes a very long time
1152 			 * and results in link renegotiation we only
1153 			 * initialize the hardware only when it is absolutely
1154 			 * required.
1155 			 */
1156 			ifp->if_flags |= IFF_UP;
1157 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1158 				EM_CORE_LOCK(adapter);
1159 				em_init_locked(adapter);
1160 				EM_CORE_UNLOCK(adapter);
1161 			}
1162 			arp_ifinit(ifp, ifa);
1163 		} else
1164 #endif
1165 			error = ether_ioctl(ifp, command, data);
1166 		break;
1167 	case SIOCSIFMTU:
1168 	    {
1169 		int max_frame_size;
1170 		u16 eeprom_data = 0;
1171 
1172 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1173 
1174 		EM_CORE_LOCK(adapter);
1175 		switch (adapter->hw.mac.type) {
1176 		case e1000_82573:
1177 			/*
1178 			 * 82573 only supports jumbo frames
1179 			 * if ASPM is disabled.
1180 			 */
1181 			e1000_read_nvm(&adapter->hw,
1182 			    NVM_INIT_3GIO_3, 1, &eeprom_data);
1183 			if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1184 				max_frame_size = ETHER_MAX_LEN;
1185 				break;
1186 			}
1187 			/* Allow Jumbo frames - fall thru */
1188 		case e1000_82571:
1189 		case e1000_82572:
1190 		case e1000_ich9lan:
1191 		case e1000_ich10lan:
1192 		case e1000_82574:
1193 		case e1000_80003es2lan:	/* Limit Jumbo Frame size */
1194 			max_frame_size = 9234;
1195 			break;
1196 			/* Adapters that do not support jumbo frames */
1197 		case e1000_82542:
1198 		case e1000_ich8lan:
1199 			max_frame_size = ETHER_MAX_LEN;
1200 			break;
1201 		default:
1202 			max_frame_size = MAX_JUMBO_FRAME_SIZE;
1203 		}
1204 		if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1205 		    ETHER_CRC_LEN) {
1206 			EM_CORE_UNLOCK(adapter);
1207 			error = EINVAL;
1208 			break;
1209 		}
1210 
1211 		ifp->if_mtu = ifr->ifr_mtu;
1212 		adapter->max_frame_size =
1213 		    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1214 		em_init_locked(adapter);
1215 		EM_CORE_UNLOCK(adapter);
1216 		break;
1217 	    }
1218 	case SIOCSIFFLAGS:
1219 		IOCTL_DEBUGOUT("ioctl rcv'd:\
1220 		    SIOCSIFFLAGS (Set Interface Flags)");
1221 		EM_CORE_LOCK(adapter);
1222 		if (ifp->if_flags & IFF_UP) {
1223 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1224 				if ((ifp->if_flags ^ adapter->if_flags) &
1225 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1226 					em_disable_promisc(adapter);
1227 					em_set_promisc(adapter);
1228 				}
1229 			} else
1230 				em_init_locked(adapter);
1231 		} else
1232 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1233 				EM_TX_LOCK(adapter);
1234 				em_stop(adapter);
1235 				EM_TX_UNLOCK(adapter);
1236 			}
1237 		adapter->if_flags = ifp->if_flags;
1238 		EM_CORE_UNLOCK(adapter);
1239 		break;
1240 	case SIOCADDMULTI:
1241 	case SIOCDELMULTI:
1242 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1243 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1244 			EM_CORE_LOCK(adapter);
1245 			em_disable_intr(adapter);
1246 			em_set_multi(adapter);
1247 			if (adapter->hw.mac.type == e1000_82542 &&
1248 	    		    adapter->hw.revision_id == E1000_REVISION_2) {
1249 				em_initialize_receive_unit(adapter);
1250 			}
1251 #ifdef DEVICE_POLLING
1252 			if (!(ifp->if_capenable & IFCAP_POLLING))
1253 #endif
1254 				em_enable_intr(adapter);
1255 			EM_CORE_UNLOCK(adapter);
1256 		}
1257 		break;
1258 	case SIOCSIFMEDIA:
1259 		/* Check SOL/IDER usage */
1260 		EM_CORE_LOCK(adapter);
1261 		if (e1000_check_reset_block(&adapter->hw)) {
1262 			EM_CORE_UNLOCK(adapter);
1263 			device_printf(adapter->dev, "Media change is"
1264 			    " blocked due to SOL/IDER session.\n");
1265 			break;
1266 		}
1267 		EM_CORE_UNLOCK(adapter);
1268 	case SIOCGIFMEDIA:
1269 		IOCTL_DEBUGOUT("ioctl rcv'd: \
1270 		    SIOCxIFMEDIA (Get/Set Interface Media)");
1271 		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1272 		break;
1273 	case SIOCSIFCAP:
1274 	    {
1275 		int mask, reinit;
1276 
1277 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1278 		reinit = 0;
1279 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1280 #ifdef DEVICE_POLLING
1281 		if (mask & IFCAP_POLLING) {
1282 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1283 				error = ether_poll_register(em_poll, ifp);
1284 				if (error)
1285 					return (error);
1286 				EM_CORE_LOCK(adapter);
1287 				em_disable_intr(adapter);
1288 				ifp->if_capenable |= IFCAP_POLLING;
1289 				EM_CORE_UNLOCK(adapter);
1290 			} else {
1291 				error = ether_poll_deregister(ifp);
1292 				/* Enable interrupt even in error case */
1293 				EM_CORE_LOCK(adapter);
1294 				em_enable_intr(adapter);
1295 				ifp->if_capenable &= ~IFCAP_POLLING;
1296 				EM_CORE_UNLOCK(adapter);
1297 			}
1298 		}
1299 #endif
1300 		if (mask & IFCAP_HWCSUM) {
1301 			ifp->if_capenable ^= IFCAP_HWCSUM;
1302 			reinit = 1;
1303 		}
1304 #if __FreeBSD_version >= 700000
1305 		if (mask & IFCAP_TSO4) {
1306 			ifp->if_capenable ^= IFCAP_TSO4;
1307 			reinit = 1;
1308 		}
1309 #endif
1310 
1311 		if (mask & IFCAP_VLAN_HWTAGGING) {
1312 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1313 			reinit = 1;
1314 		}
1315 		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1316 			em_init(adapter);
1317 #if __FreeBSD_version >= 700000
1318 		VLAN_CAPABILITIES(ifp);
1319 #endif
1320 		break;
1321 	    }
1322 
1323 #ifdef EM_TIMESYNC
1324 	/*
1325 	** IOCTL support for Precision Time (IEEE 1588) Support
1326 	*/
1327 	case EM_TIMESYNC_READTS:
1328 	    {
1329 		u32 rx_ctl, tx_ctl;
1330 		struct em_tsync_read *tdata;
1331 
1332 		tdata = (struct em_tsync_read *) ifr->ifr_data;
1333 
1334 		IOCTL_DEBUGOUT("Reading Timestamp\n");
1335 
1336 		if (tdata->read_current_time) {
1337 			getnanotime(&tdata->system_time);
1338 			tdata->network_time = E1000_READ_REG(&adapter->hw, E1000_SYSTIML);
1339 			tdata->network_time |=
1340 			    (u64)E1000_READ_REG(&adapter->hw, E1000_SYSTIMH ) << 32;
1341 		}
1342 
1343 		rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
1344 		tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
1345 
1346 		IOCTL_DEBUGOUT1("RX_CTL value = %u\n", rx_ctl);
1347 		IOCTL_DEBUGOUT1("TX_CTL value = %u\n", tx_ctl);
1348 
1349 		if (rx_ctl & 0x1) {
1350 			IOCTL_DEBUGOUT("RX timestamp is valid\n");
1351 			u32 tmp;
1352 			unsigned char *tmp_cp;
1353 
1354 			tdata->rx_valid = 1;
1355 			tdata->rx_stamp = E1000_READ_REG(&adapter->hw, E1000_RXSTMPL);
1356 			tdata->rx_stamp |= (u64)E1000_READ_REG(&adapter->hw,
1357 			    E1000_RXSTMPH) << 32;
1358 
1359 			tmp = E1000_READ_REG(&adapter->hw, E1000_RXSATRL);
1360 			tmp_cp = (unsigned char *) &tmp;
1361 			tdata->srcid[0] = tmp_cp[0];
1362 			tdata->srcid[1] = tmp_cp[1];
1363 			tdata->srcid[2] = tmp_cp[2];
1364 			tdata->srcid[3] = tmp_cp[3];
1365 			tmp = E1000_READ_REG(&adapter->hw, E1000_RXSATRH);
1366 			tmp_cp = (unsigned char *) &tmp;
1367 			tdata->srcid[4] = tmp_cp[0];
1368 			tdata->srcid[5] = tmp_cp[1];
1369 			tdata->seqid = tmp >> 16;
1370 			tdata->seqid = htons(tdata->seqid);
1371 		} else
1372 			tdata->rx_valid = 0;
1373 
1374 		if (tx_ctl & 0x1) {
1375 			IOCTL_DEBUGOUT("TX timestamp is valid\n");
1376 			tdata->tx_valid = 1;
1377 			tdata->tx_stamp = E1000_READ_REG(&adapter->hw, E1000_TXSTMPL);
1378 			tdata->tx_stamp |= (u64) E1000_READ_REG(&adapter->hw,
1379 			    E1000_TXSTMPH) << 32;
1380 		} else
1381 			tdata->tx_valid = 0;
1382 
1383 		return (0);
1384 	    }
1385 #endif	/* EM_TIMESYNC */
1386 
1387 	default:
1388 		error = ether_ioctl(ifp, command, data);
1389 		break;
1390 	}
1391 
1392 	return (error);
1393 }
1394 
1395 /*********************************************************************
1396  *  Watchdog timer:
1397  *
1398  *  This routine is called from the local timer every second.
1399  *  As long as transmit descriptors are being cleaned the value
1400  *  is non-zero and we do nothing. Reaching 0 indicates a tx hang
1401  *  and we then reset the device.
1402  *
1403  **********************************************************************/
1404 
1405 static void
1406 em_watchdog(struct adapter *adapter)
1407 {
1408 
1409 	EM_CORE_LOCK_ASSERT(adapter);
1410 
1411 	/*
1412 	** The timer is set to 5 every time start queues a packet.
1413 	** Then txeof keeps resetting it as long as it cleans at
1414 	** least one descriptor.
1415 	** Finally, anytime all descriptors are clean the timer is
1416 	** set to 0.
1417 	*/
1418 	EM_TX_LOCK(adapter);
1419 	if ((adapter->watchdog_timer == 0) || (--adapter->watchdog_timer)) {
1420 		EM_TX_UNLOCK(adapter);
1421 		return;
1422 	}
1423 
1424 	/* If we are in this routine because of pause frames, then
1425 	 * don't reset the hardware.
1426 	 */
1427 	if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
1428 	    E1000_STATUS_TXOFF) {
1429 		adapter->watchdog_timer = EM_TX_TIMEOUT;
1430 		EM_TX_UNLOCK(adapter);
1431 		return;
1432 	}
1433 
1434 	if (e1000_check_for_link(&adapter->hw) == 0)
1435 		device_printf(adapter->dev, "watchdog timeout -- resetting\n");
1436 	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1437 	adapter->watchdog_events++;
1438 	EM_TX_UNLOCK(adapter);
1439 
1440 	em_init_locked(adapter);
1441 }
1442 
1443 /*********************************************************************
1444  *  Init entry point
1445  *
1446  *  This routine is used in two ways. It is used by the stack as
1447  *  init entry point in network interface structure. It is also used
1448  *  by the driver as a hw/sw initialization routine to get to a
1449  *  consistent state.
1450  *
1451  *  return 0 on success, positive on failure
1452  **********************************************************************/
1453 
1454 static void
1455 em_init_locked(struct adapter *adapter)
1456 {
1457 	struct ifnet	*ifp = adapter->ifp;
1458 	device_t	dev = adapter->dev;
1459 	u32		pba;
1460 
1461 	INIT_DEBUGOUT("em_init: begin");
1462 
1463 	EM_CORE_LOCK_ASSERT(adapter);
1464 
1465 	EM_TX_LOCK(adapter);
1466 	em_stop(adapter);
1467 	EM_TX_UNLOCK(adapter);
1468 
1469 	/*
1470 	 * Packet Buffer Allocation (PBA)
1471 	 * Writing PBA sets the receive portion of the buffer
1472 	 * the remainder is used for the transmit buffer.
1473 	 *
1474 	 * Devices before the 82547 had a Packet Buffer of 64K.
1475 	 *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1476 	 * After the 82547 the buffer was reduced to 40K.
1477 	 *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1478 	 *   Note: default does not leave enough room for Jumbo Frame >10k.
1479 	 */
1480 	switch (adapter->hw.mac.type) {
1481 	case e1000_82547:
1482 	case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1483 		if (adapter->max_frame_size > 8192)
1484 			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1485 		else
1486 			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1487 		adapter->tx_fifo_head = 0;
1488 		adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1489 		adapter->tx_fifo_size =
1490 		    (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1491 		break;
1492 	/* Total Packet Buffer on these is 48K */
1493 	case e1000_82571:
1494 	case e1000_82572:
1495 	case e1000_80003es2lan:
1496 			pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1497 		break;
1498 	case e1000_82573: /* 82573: Total Packet Buffer is 32K */
1499 			pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1500 		break;
1501 	case e1000_82574:
1502 			pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
1503 		break;
1504 	case e1000_ich9lan:
1505 	case e1000_ich10lan:
1506 #define E1000_PBA_10K	0x000A
1507 		pba = E1000_PBA_10K;
1508 		break;
1509 	case e1000_ich8lan:
1510 		pba = E1000_PBA_8K;
1511 		break;
1512 	default:
1513 		/* Devices before 82547 had a Packet Buffer of 64K.   */
1514 		if (adapter->max_frame_size > 8192)
1515 			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1516 		else
1517 			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1518 	}
1519 
1520 	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
1521 	E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1522 
1523 	/* Get the latest mac address, User can use a LAA */
1524         bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1525               ETHER_ADDR_LEN);
1526 
1527 	/* Put the address into the Receive Address Array */
1528 	e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1529 
1530 	/*
1531 	 * With the 82571 adapter, RAR[0] may be overwritten
1532 	 * when the other port is reset, we make a duplicate
1533 	 * in RAR[14] for that eventuality, this assures
1534 	 * the interface continues to function.
1535 	 */
1536 	if (adapter->hw.mac.type == e1000_82571) {
1537 		e1000_set_laa_state_82571(&adapter->hw, TRUE);
1538 		e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1539 		    E1000_RAR_ENTRIES - 1);
1540 	}
1541 
1542 	/* Initialize the hardware */
1543 	if (em_hardware_init(adapter)) {
1544 		device_printf(dev, "Unable to initialize the hardware\n");
1545 		return;
1546 	}
1547 	em_update_link_status(adapter);
1548 
1549 	/* Setup VLAN support, basic and offload if available */
1550 	E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1551 
1552 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
1553 	    ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) {
1554 		u32 ctrl;
1555 		ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1556 		ctrl |= E1000_CTRL_VME;
1557 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1558 	}
1559 
1560 
1561 	/* Set hardware offload abilities */
1562 	ifp->if_hwassist = 0;
1563 	if (adapter->hw.mac.type >= e1000_82543) {
1564 		if (ifp->if_capenable & IFCAP_TXCSUM)
1565 			ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1566 #if __FreeBSD_version >= 700000
1567 		if (ifp->if_capenable & IFCAP_TSO4)
1568 			ifp->if_hwassist |= CSUM_TSO;
1569 #endif
1570 	}
1571 
1572 	/* Configure for OS presence */
1573 	em_init_manageability(adapter);
1574 
1575 	/* Prepare transmit descriptors and buffers */
1576 	em_setup_transmit_structures(adapter);
1577 	em_initialize_transmit_unit(adapter);
1578 
1579 	/* Setup Multicast table */
1580 	em_set_multi(adapter);
1581 
1582 	/* Prepare receive descriptors and buffers */
1583 	if (em_setup_receive_structures(adapter)) {
1584 		device_printf(dev, "Could not setup receive structures\n");
1585 		EM_TX_LOCK(adapter);
1586 		em_stop(adapter);
1587 		EM_TX_UNLOCK(adapter);
1588 		return;
1589 	}
1590 	em_initialize_receive_unit(adapter);
1591 
1592 	/* Don't lose promiscuous settings */
1593 	em_set_promisc(adapter);
1594 
1595 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1596 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1597 
1598 	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1599 	e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1600 
1601 	/* MSI/X configuration for 82574 */
1602 	if (adapter->hw.mac.type == e1000_82574) {
1603 		int tmp;
1604 		tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1605 		tmp |= E1000_CTRL_EXT_PBA_CLR;
1606 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1607 		/*
1608 		** Set the IVAR - interrupt vector routing.
1609 		** Each nibble represents a vector, high bit
1610 		** is enable, other 3 bits are the MSIX table
1611 		** entry, we map RXQ0 to 0, TXQ0 to 1, and
1612 		** Link (other) to 2, hence the magic number.
1613 		*/
1614 		E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1615 	}
1616 
1617 #ifdef DEVICE_POLLING
1618 	/*
1619 	 * Only enable interrupts if we are not polling, make sure
1620 	 * they are off otherwise.
1621 	 */
1622 	if (ifp->if_capenable & IFCAP_POLLING)
1623 		em_disable_intr(adapter);
1624 	else
1625 #endif /* DEVICE_POLLING */
1626 		em_enable_intr(adapter);
1627 
1628 #ifdef EM_TIMESYNC
1629 	/* Initializae IEEE 1588 Precision Time hardware */
1630 	if ((adapter->hw.mac.type == e1000_82574) ||
1631 	    (adapter->hw.mac.type == e1000_ich10lan))
1632 		em_tsync_init(adapter);
1633 #endif
1634 
1635 	/* Don't reset the phy next time init gets called */
1636 	adapter->hw.phy.reset_disable = TRUE;
1637 }
1638 
1639 static void
1640 em_init(void *arg)
1641 {
1642 	struct adapter *adapter = arg;
1643 
1644 	EM_CORE_LOCK(adapter);
1645 	em_init_locked(adapter);
1646 	EM_CORE_UNLOCK(adapter);
1647 }
1648 
1649 
1650 #ifdef DEVICE_POLLING
1651 /*********************************************************************
1652  *
1653  *  Legacy polling routine
1654  *
1655  *********************************************************************/
1656 static int
1657 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1658 {
1659 	struct adapter *adapter;
1660 	u32		reg_icr;
1661 	int		rx_npkts;
1662 
1663 	adapter = ifp->if_softc;
1664 	rx_npkts = 0;
1665 
1666 	EM_CORE_LOCK(adapter);
1667 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1668 		EM_CORE_UNLOCK(adapter);
1669 		return (rx_npkts);
1670 	}
1671 
1672 	if (cmd == POLL_AND_CHECK_STATUS) {
1673 		reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1674 		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1675 			callout_stop(&adapter->timer);
1676 			adapter->hw.mac.get_link_status = 1;
1677 			em_update_link_status(adapter);
1678 			callout_reset(&adapter->timer, hz,
1679 			    em_local_timer, adapter);
1680 		}
1681 	}
1682 	EM_CORE_UNLOCK(adapter);
1683 
1684 	em_rxeof(adapter, count, &rx_npkts);
1685 
1686 	EM_TX_LOCK(adapter);
1687 	em_txeof(adapter);
1688 
1689 	if (!ADAPTER_RING_EMPTY(adapter))
1690 		em_start_locked(ifp);
1691 	EM_TX_UNLOCK(adapter);
1692 	return (rx_npkts);
1693 }
1694 #endif /* DEVICE_POLLING */
1695 
1696 #ifdef EM_LEGACY_IRQ
1697 /*********************************************************************
1698  *
1699  *  Legacy Interrupt Service routine
1700  *
1701  *********************************************************************/
1702 
1703 static void
1704 em_intr(void *arg)
1705 {
1706 	struct adapter	*adapter = arg;
1707 	struct ifnet	*ifp = adapter->ifp;
1708 	u32		reg_icr;
1709 
1710 
1711 	if (ifp->if_capenable & IFCAP_POLLING)
1712 		return;
1713 
1714 	EM_CORE_LOCK(adapter);
1715 	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1716 	if ((reg_icr == 0xffffffff) || (reg_icr == 0)||
1717 	    (adapter->hw.mac.type >= e1000_82571 &&
1718 	    (reg_icr & E1000_ICR_INT_ASSERTED) == 0))
1719 			goto out;
1720 
1721 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1722 			goto out;
1723 
1724 	EM_TX_LOCK(adapter);
1725 	em_txeof(adapter);
1726 	em_rxeof(adapter, -1, NULL);
1727 	em_txeof(adapter);
1728 	EM_TX_UNLOCK(adapter);
1729 
1730 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1731 		callout_stop(&adapter->timer);
1732 		adapter->hw.mac.get_link_status = 1;
1733 		em_update_link_status(adapter);
1734 		/* Deal with TX cruft when link lost */
1735 		em_tx_purge(adapter);
1736 		callout_reset(&adapter->timer, hz,
1737 		    em_local_timer, adapter);
1738 	}
1739 
1740 	if (reg_icr & E1000_ICR_RXO)
1741 		adapter->rx_overruns++;
1742 out:
1743 	EM_CORE_UNLOCK(adapter);
1744 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1745 	    !ADAPTER_RING_EMPTY(adapter))
1746 		em_start(ifp);
1747 }
1748 
1749 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1750 
1751 static void
1752 em_handle_link(void *context, int pending)
1753 {
1754 	struct adapter	*adapter = context;
1755 	struct ifnet *ifp = adapter->ifp;
1756 
1757 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1758 		return;
1759 
1760 	EM_CORE_LOCK(adapter);
1761 	callout_stop(&adapter->timer);
1762 	em_update_link_status(adapter);
1763 	/* Deal with TX cruft when link lost */
1764 	em_tx_purge(adapter);
1765 	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1766 	EM_CORE_UNLOCK(adapter);
1767 }
1768 
1769 
1770 /* Combined RX/TX handler, used by Legacy and MSI */
1771 static void
1772 em_handle_rxtx(void *context, int pending)
1773 {
1774 	struct adapter	*adapter = context;
1775 	struct ifnet	*ifp = adapter->ifp;
1776 
1777 
1778 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1779 		if (em_rxeof(adapter, adapter->rx_process_limit, NULL) != 0)
1780 			taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1781 		EM_TX_LOCK(adapter);
1782 		em_txeof(adapter);
1783 
1784 		if (!ADAPTER_RING_EMPTY(adapter))
1785 			em_start_locked(ifp);
1786 		EM_TX_UNLOCK(adapter);
1787 	}
1788 
1789 	em_enable_intr(adapter);
1790 }
1791 
1792 /*********************************************************************
1793  *
1794  *  Fast Legacy/MSI Combined Interrupt Service routine
1795  *
1796  *********************************************************************/
1797 #if __FreeBSD_version < 700000
1798 #define FILTER_STRAY
1799 #define FILTER_HANDLED
1800 static void
1801 #else
1802 static int
1803 #endif
1804 em_irq_fast(void *arg)
1805 {
1806 	struct adapter	*adapter = arg;
1807 	struct ifnet	*ifp;
1808 	u32		reg_icr;
1809 
1810 	ifp = adapter->ifp;
1811 
1812 	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1813 
1814 	/* Hot eject?  */
1815 	if (reg_icr == 0xffffffff)
1816 		return FILTER_STRAY;
1817 
1818 	/* Definitely not our interrupt.  */
1819 	if (reg_icr == 0x0)
1820 		return FILTER_STRAY;
1821 
1822 	/*
1823 	 * Starting with the 82571 chip, bit 31 should be used to
1824 	 * determine whether the interrupt belongs to us.
1825 	 */
1826 	if (adapter->hw.mac.type >= e1000_82571 &&
1827 	    (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1828 		return FILTER_STRAY;
1829 
1830 	/*
1831 	 * Mask interrupts until the taskqueue is finished running.  This is
1832 	 * cheap, just assume that it is needed.  This also works around the
1833 	 * MSI message reordering errata on certain systems.
1834 	 */
1835 	em_disable_intr(adapter);
1836 	taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1837 
1838 	/* Link status change */
1839 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1840 		adapter->hw.mac.get_link_status = 1;
1841 		taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1842 	}
1843 
1844 	if (reg_icr & E1000_ICR_RXO)
1845 		adapter->rx_overruns++;
1846 	return FILTER_HANDLED;
1847 }
1848 
1849 /*********************************************************************
1850  *
1851  *  MSIX Interrupt Service Routines
1852  *
1853  **********************************************************************/
1854 #define EM_MSIX_TX	0x00040000
1855 #define EM_MSIX_RX	0x00010000
1856 #define EM_MSIX_LINK	0x00100000
1857 
1858 static void
1859 em_msix_tx(void *arg)
1860 {
1861 	struct adapter *adapter = arg;
1862 	struct ifnet	*ifp = adapter->ifp;
1863 
1864 	++adapter->tx_irq;
1865 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1866 		EM_TX_LOCK(adapter);
1867 		em_txeof(adapter);
1868 		EM_TX_UNLOCK(adapter);
1869 		taskqueue_enqueue(adapter->tq, &adapter->tx_task);
1870 	}
1871 	/* Reenable this interrupt */
1872 	E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_TX);
1873 	return;
1874 }
1875 
1876 /*********************************************************************
1877  *
1878  *  MSIX RX Interrupt Service routine
1879  *
1880  **********************************************************************/
1881 
1882 static void
1883 em_msix_rx(void *arg)
1884 {
1885 	struct adapter *adapter = arg;
1886 	struct ifnet	*ifp = adapter->ifp;
1887 
1888 	++adapter->rx_irq;
1889 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1890 	    (em_rxeof(adapter, adapter->rx_process_limit, NULL) != 0))
1891 		taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1892 	/* Reenable this interrupt */
1893 	E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_RX);
1894 	return;
1895 }
1896 
1897 /*********************************************************************
1898  *
1899  *  MSIX Link Fast Interrupt Service routine
1900  *
1901  **********************************************************************/
1902 
1903 static void
1904 em_msix_link(void *arg)
1905 {
1906 	struct adapter	*adapter = arg;
1907 	u32		reg_icr;
1908 
1909 	++adapter->link_irq;
1910 	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1911 
1912 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1913 		adapter->hw.mac.get_link_status = 1;
1914 		taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1915 	}
1916 	E1000_WRITE_REG(&adapter->hw, E1000_IMS,
1917 	    EM_MSIX_LINK | E1000_IMS_LSC);
1918 	return;
1919 }
1920 
1921 static void
1922 em_handle_rx(void *context, int pending)
1923 {
1924 	struct adapter	*adapter = context;
1925 	struct ifnet	*ifp = adapter->ifp;
1926 
1927 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1928 	    (em_rxeof(adapter, adapter->rx_process_limit, NULL) != 0))
1929 		taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1930 
1931 }
1932 
1933 static void
1934 em_handle_tx(void *context, int pending)
1935 {
1936 	struct adapter	*adapter = context;
1937 	struct ifnet	*ifp = adapter->ifp;
1938 
1939 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1940 		if (!EM_TX_TRYLOCK(adapter))
1941 			return;
1942 
1943 		em_txeof(adapter);
1944 		if (!ADAPTER_RING_EMPTY(adapter))
1945 			em_start_locked(ifp);
1946 		EM_TX_UNLOCK(adapter);
1947 	}
1948 }
1949 #endif /* EM_FAST_IRQ */
1950 
1951 /*********************************************************************
1952  *
1953  *  Media Ioctl callback
1954  *
1955  *  This routine is called whenever the user queries the status of
1956  *  the interface using ifconfig.
1957  *
1958  **********************************************************************/
1959 static void
1960 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1961 {
1962 	struct adapter *adapter = ifp->if_softc;
1963 	u_char fiber_type = IFM_1000_SX;
1964 
1965 	INIT_DEBUGOUT("em_media_status: begin");
1966 
1967 	EM_CORE_LOCK(adapter);
1968 	em_update_link_status(adapter);
1969 
1970 	ifmr->ifm_status = IFM_AVALID;
1971 	ifmr->ifm_active = IFM_ETHER;
1972 
1973 	if (!adapter->link_active) {
1974 		EM_CORE_UNLOCK(adapter);
1975 		return;
1976 	}
1977 
1978 	ifmr->ifm_status |= IFM_ACTIVE;
1979 
1980 	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1981 	    (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1982 		if (adapter->hw.mac.type == e1000_82545)
1983 			fiber_type = IFM_1000_LX;
1984 		ifmr->ifm_active |= fiber_type | IFM_FDX;
1985 	} else {
1986 		switch (adapter->link_speed) {
1987 		case 10:
1988 			ifmr->ifm_active |= IFM_10_T;
1989 			break;
1990 		case 100:
1991 			ifmr->ifm_active |= IFM_100_TX;
1992 			break;
1993 		case 1000:
1994 			ifmr->ifm_active |= IFM_1000_T;
1995 			break;
1996 		}
1997 		if (adapter->link_duplex == FULL_DUPLEX)
1998 			ifmr->ifm_active |= IFM_FDX;
1999 		else
2000 			ifmr->ifm_active |= IFM_HDX;
2001 	}
2002 	EM_CORE_UNLOCK(adapter);
2003 }
2004 
2005 /*********************************************************************
2006  *
2007  *  Media Ioctl callback
2008  *
2009  *  This routine is called when the user changes speed/duplex using
2010  *  media/mediopt option with ifconfig.
2011  *
2012  **********************************************************************/
2013 static int
2014 em_media_change(struct ifnet *ifp)
2015 {
2016 	struct adapter *adapter = ifp->if_softc;
2017 	struct ifmedia  *ifm = &adapter->media;
2018 
2019 	INIT_DEBUGOUT("em_media_change: begin");
2020 
2021 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2022 		return (EINVAL);
2023 
2024 	EM_CORE_LOCK(adapter);
2025 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2026 	case IFM_AUTO:
2027 		adapter->hw.mac.autoneg = DO_AUTO_NEG;
2028 		adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
2029 		break;
2030 	case IFM_1000_LX:
2031 	case IFM_1000_SX:
2032 	case IFM_1000_T:
2033 		adapter->hw.mac.autoneg = DO_AUTO_NEG;
2034 		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
2035 		break;
2036 	case IFM_100_TX:
2037 		adapter->hw.mac.autoneg = FALSE;
2038 		adapter->hw.phy.autoneg_advertised = 0;
2039 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2040 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
2041 		else
2042 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
2043 		break;
2044 	case IFM_10_T:
2045 		adapter->hw.mac.autoneg = FALSE;
2046 		adapter->hw.phy.autoneg_advertised = 0;
2047 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2048 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
2049 		else
2050 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
2051 		break;
2052 	default:
2053 		device_printf(adapter->dev, "Unsupported media type\n");
2054 	}
2055 
2056 	/* As the speed/duplex settings my have changed we need to
2057 	 * reset the PHY.
2058 	 */
2059 	adapter->hw.phy.reset_disable = FALSE;
2060 
2061 	em_init_locked(adapter);
2062 	EM_CORE_UNLOCK(adapter);
2063 
2064 	return (0);
2065 }
2066 
2067 /*********************************************************************
2068  *
2069  *  This routine maps the mbufs to tx descriptors.
2070  *
2071  *  return 0 on success, positive on failure
2072  **********************************************************************/
2073 
2074 static int
2075 em_xmit(struct adapter *adapter, struct mbuf **m_headp)
2076 {
2077 	bus_dma_segment_t	segs[EM_MAX_SCATTER];
2078 	bus_dmamap_t		map;
2079 	struct em_buffer	*tx_buffer, *tx_buffer_mapped;
2080 	struct e1000_tx_desc	*ctxd = NULL;
2081 	struct mbuf		*m_head;
2082 	u32			txd_upper, txd_lower, txd_used, txd_saved;
2083 	int			nsegs, i, j, first, last = 0;
2084 	int			error, do_tso, tso_desc = 0;
2085 #if __FreeBSD_version < 700000
2086 	struct m_tag		*mtag;
2087 #endif
2088 	m_head = *m_headp;
2089 	txd_upper = txd_lower = txd_used = txd_saved = 0;
2090 
2091 #if __FreeBSD_version >= 700000
2092 	do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
2093 #else
2094 	do_tso = 0;
2095 #endif
2096 
2097         /*
2098          * Force a cleanup if number of TX descriptors
2099          * available hits the threshold
2100          */
2101 	if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
2102 		em_txeof(adapter);
2103 		/* Now do we at least have a minimal? */
2104 		if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
2105 			adapter->no_tx_desc_avail1++;
2106 			return (ENOBUFS);
2107 		}
2108 	}
2109 
2110 
2111 	/*
2112 	 * TSO workaround:
2113 	 *  If an mbuf is only header we need
2114 	 *     to pull 4 bytes of data into it.
2115 	 */
2116 	if (do_tso && (m_head->m_len <= M_TSO_LEN)) {
2117 		m_head = m_pullup(m_head, M_TSO_LEN + 4);
2118 		*m_headp = m_head;
2119 		if (m_head == NULL)
2120 			return (ENOBUFS);
2121 	}
2122 
2123 	/*
2124 	 * Map the packet for DMA
2125 	 *
2126 	 * Capture the first descriptor index,
2127 	 * this descriptor will have the index
2128 	 * of the EOP which is the only one that
2129 	 * now gets a DONE bit writeback.
2130 	 */
2131 	first = adapter->next_avail_tx_desc;
2132 	tx_buffer = &adapter->tx_buffer_area[first];
2133 	tx_buffer_mapped = tx_buffer;
2134 	map = tx_buffer->map;
2135 
2136 	error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2137 	    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2138 
2139 	/*
2140 	 * There are two types of errors we can (try) to handle:
2141 	 * - EFBIG means the mbuf chain was too long and bus_dma ran
2142 	 *   out of segments.  Defragment the mbuf chain and try again.
2143 	 * - ENOMEM means bus_dma could not obtain enough bounce buffers
2144 	 *   at this point in time.  Defer sending and try again later.
2145 	 * All other errors, in particular EINVAL, are fatal and prevent the
2146 	 * mbuf chain from ever going through.  Drop it and report error.
2147 	 */
2148 	if (error == EFBIG) {
2149 		struct mbuf *m;
2150 
2151 		m = m_defrag(*m_headp, M_DONTWAIT);
2152 		if (m == NULL) {
2153 			adapter->mbuf_alloc_failed++;
2154 			m_freem(*m_headp);
2155 			*m_headp = NULL;
2156 			return (ENOBUFS);
2157 		}
2158 		*m_headp = m;
2159 
2160 		/* Try it again */
2161 		error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2162 		    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2163 
2164 		if (error) {
2165 			adapter->no_tx_dma_setup++;
2166 			m_freem(*m_headp);
2167 			*m_headp = NULL;
2168 			return (error);
2169 		}
2170 	} else if (error != 0) {
2171 		adapter->no_tx_dma_setup++;
2172 		return (error);
2173 	}
2174 
2175 	/*
2176 	 * TSO Hardware workaround, if this packet is not
2177 	 * TSO, and is only a single descriptor long, and
2178 	 * it follows a TSO burst, then we need to add a
2179 	 * sentinel descriptor to prevent premature writeback.
2180 	 */
2181 	if ((do_tso == 0) && (adapter->tx_tso == TRUE)) {
2182 		if (nsegs == 1)
2183 			tso_desc = TRUE;
2184 		adapter->tx_tso = FALSE;
2185 	}
2186 
2187         if (nsegs > (adapter->num_tx_desc_avail - 2)) {
2188                 adapter->no_tx_desc_avail2++;
2189 		bus_dmamap_unload(adapter->txtag, map);
2190 		return (ENOBUFS);
2191         }
2192 	m_head = *m_headp;
2193 
2194 	/* Do hardware assists */
2195 #if __FreeBSD_version >= 700000
2196 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2197 		error = em_tso_setup(adapter, m_head, &txd_upper, &txd_lower);
2198 		if (error != TRUE)
2199 			return (ENXIO); /* something foobar */
2200 		/* we need to make a final sentinel transmit desc */
2201 		tso_desc = TRUE;
2202 	} else
2203 #endif
2204 #ifndef EM_TIMESYNC
2205 	/*
2206 	** Timesync needs to check the packet header
2207 	** so call checksum code to do so, but don't
2208 	** penalize the code if not defined.
2209 	*/
2210 	if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
2211 #endif
2212 		em_transmit_checksum_setup(adapter,  m_head,
2213 		    &txd_upper, &txd_lower);
2214 
2215 	i = adapter->next_avail_tx_desc;
2216 	if (adapter->pcix_82544)
2217 		txd_saved = i;
2218 
2219 	/* Set up our transmit descriptors */
2220 	for (j = 0; j < nsegs; j++) {
2221 		bus_size_t seg_len;
2222 		bus_addr_t seg_addr;
2223 		/* If adapter is 82544 and on PCIX bus */
2224 		if(adapter->pcix_82544) {
2225 			DESC_ARRAY	desc_array;
2226 			u32		array_elements, counter;
2227 			/*
2228 			 * Check the Address and Length combination and
2229 			 * split the data accordingly
2230 			 */
2231 			array_elements = em_fill_descriptors(segs[j].ds_addr,
2232 			    segs[j].ds_len, &desc_array);
2233 			for (counter = 0; counter < array_elements; counter++) {
2234 				if (txd_used == adapter->num_tx_desc_avail) {
2235 					adapter->next_avail_tx_desc = txd_saved;
2236 					adapter->no_tx_desc_avail2++;
2237 					bus_dmamap_unload(adapter->txtag, map);
2238 					return (ENOBUFS);
2239 				}
2240 				tx_buffer = &adapter->tx_buffer_area[i];
2241 				ctxd = &adapter->tx_desc_base[i];
2242 				ctxd->buffer_addr = htole64(
2243 				    desc_array.descriptor[counter].address);
2244 				ctxd->lower.data = htole32(
2245 				    (adapter->txd_cmd | txd_lower | (u16)
2246 				    desc_array.descriptor[counter].length));
2247 				ctxd->upper.data =
2248 				    htole32((txd_upper));
2249 				last = i;
2250 				if (++i == adapter->num_tx_desc)
2251                                          i = 0;
2252 				tx_buffer->m_head = NULL;
2253 				tx_buffer->next_eop = -1;
2254 				txd_used++;
2255                         }
2256 		} else {
2257 			tx_buffer = &adapter->tx_buffer_area[i];
2258 			ctxd = &adapter->tx_desc_base[i];
2259 			seg_addr = segs[j].ds_addr;
2260 			seg_len  = segs[j].ds_len;
2261 			/*
2262 			** TSO Workaround:
2263 			** If this is the last descriptor, we want to
2264 			** split it so we have a small final sentinel
2265 			*/
2266 			if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) {
2267 				seg_len -= 4;
2268 				ctxd->buffer_addr = htole64(seg_addr);
2269 				ctxd->lower.data = htole32(
2270 				adapter->txd_cmd | txd_lower | seg_len);
2271 				ctxd->upper.data =
2272 				    htole32(txd_upper);
2273 				if (++i == adapter->num_tx_desc)
2274 					i = 0;
2275 				/* Now make the sentinel */
2276 				++txd_used; /* using an extra txd */
2277 				ctxd = &adapter->tx_desc_base[i];
2278 				tx_buffer = &adapter->tx_buffer_area[i];
2279 				ctxd->buffer_addr =
2280 				    htole64(seg_addr + seg_len);
2281 				ctxd->lower.data = htole32(
2282 				adapter->txd_cmd | txd_lower | 4);
2283 				ctxd->upper.data =
2284 				    htole32(txd_upper);
2285 				last = i;
2286 				if (++i == adapter->num_tx_desc)
2287 					i = 0;
2288 			} else {
2289 				ctxd->buffer_addr = htole64(seg_addr);
2290 				ctxd->lower.data = htole32(
2291 				adapter->txd_cmd | txd_lower | seg_len);
2292 				ctxd->upper.data =
2293 				    htole32(txd_upper);
2294 				last = i;
2295 				if (++i == adapter->num_tx_desc)
2296 					i = 0;
2297 			}
2298 			tx_buffer->m_head = NULL;
2299 			tx_buffer->next_eop = -1;
2300 		}
2301 	}
2302 
2303 	adapter->next_avail_tx_desc = i;
2304 	if (adapter->pcix_82544)
2305 		adapter->num_tx_desc_avail -= txd_used;
2306 	else {
2307 		adapter->num_tx_desc_avail -= nsegs;
2308 		if (tso_desc) /* TSO used an extra for sentinel */
2309 			adapter->num_tx_desc_avail -= txd_used;
2310 	}
2311 
2312         /*
2313 	** Handle VLAN tag, this is the
2314 	** biggest difference between
2315 	** 6.x and 7
2316 	*/
2317 #if __FreeBSD_version < 700000
2318         /* Find out if we are in vlan mode. */
2319         mtag = VLAN_OUTPUT_TAG(ifp, m_head);
2320         if (mtag != NULL) {
2321                 ctxd->upper.fields.special =
2322                     htole16(VLAN_TAG_VALUE(mtag));
2323 #else /* FreeBSD 7 */
2324 	if (m_head->m_flags & M_VLANTAG) {
2325 		/* Set the vlan id. */
2326 		ctxd->upper.fields.special =
2327 		    htole16(m_head->m_pkthdr.ether_vtag);
2328 #endif
2329                 /* Tell hardware to add tag */
2330                 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
2331         }
2332 
2333         tx_buffer->m_head = m_head;
2334 	tx_buffer_mapped->map = tx_buffer->map;
2335 	tx_buffer->map = map;
2336         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
2337 
2338         /*
2339          * Last Descriptor of Packet
2340 	 * needs End Of Packet (EOP)
2341 	 * and Report Status (RS)
2342          */
2343         ctxd->lower.data |=
2344 	    htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
2345 	/*
2346 	 * Keep track in the first buffer which
2347 	 * descriptor will be written back
2348 	 */
2349 	tx_buffer = &adapter->tx_buffer_area[first];
2350 	tx_buffer->next_eop = last;
2351 
2352 	/*
2353 	 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
2354 	 * that this frame is available to transmit.
2355 	 */
2356 	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2357 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2358 	if (adapter->hw.mac.type == e1000_82547 &&
2359 	    adapter->link_duplex == HALF_DUPLEX)
2360 		em_82547_move_tail(adapter);
2361 	else {
2362 		E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
2363 		if (adapter->hw.mac.type == e1000_82547)
2364 			em_82547_update_fifo_head(adapter,
2365 			    m_head->m_pkthdr.len);
2366 	}
2367 
2368 #ifdef EM_TIMESYNC
2369 	if (ctxd->upper.data & E1000_TXD_EXTCMD_TSTAMP) {
2370 		HW_DEBUGOUT( "@@@ Timestamp bit is set in transmit descriptor\n" );
2371 	}
2372 #endif
2373 	return (0);
2374 }
2375 
2376 /*********************************************************************
2377  *
2378  * 82547 workaround to avoid controller hang in half-duplex environment.
2379  * The workaround is to avoid queuing a large packet that would span
2380  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
2381  * in this case. We do that only when FIFO is quiescent.
2382  *
2383  **********************************************************************/
2384 static void
2385 em_82547_move_tail(void *arg)
2386 {
2387 	struct adapter *adapter = arg;
2388 	struct e1000_tx_desc *tx_desc;
2389 	u16	hw_tdt, sw_tdt, length = 0;
2390 	bool	eop = 0;
2391 
2392 	EM_TX_LOCK_ASSERT(adapter);
2393 
2394 	hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
2395 	sw_tdt = adapter->next_avail_tx_desc;
2396 
2397 	while (hw_tdt != sw_tdt) {
2398 		tx_desc = &adapter->tx_desc_base[hw_tdt];
2399 		length += tx_desc->lower.flags.length;
2400 		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
2401 		if (++hw_tdt == adapter->num_tx_desc)
2402 			hw_tdt = 0;
2403 
2404 		if (eop) {
2405 			if (em_82547_fifo_workaround(adapter, length)) {
2406 				adapter->tx_fifo_wrk_cnt++;
2407 				callout_reset(&adapter->tx_fifo_timer, 1,
2408 					em_82547_move_tail, adapter);
2409 				break;
2410 			}
2411 			E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
2412 			em_82547_update_fifo_head(adapter, length);
2413 			length = 0;
2414 		}
2415 	}
2416 }
2417 
2418 static int
2419 em_82547_fifo_workaround(struct adapter *adapter, int len)
2420 {
2421 	int fifo_space, fifo_pkt_len;
2422 
2423 	fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2424 
2425 	if (adapter->link_duplex == HALF_DUPLEX) {
2426 		fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
2427 
2428 		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
2429 			if (em_82547_tx_fifo_reset(adapter))
2430 				return (0);
2431 			else
2432 				return (1);
2433 		}
2434 	}
2435 
2436 	return (0);
2437 }
2438 
2439 static void
2440 em_82547_update_fifo_head(struct adapter *adapter, int len)
2441 {
2442 	int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2443 
2444 	/* tx_fifo_head is always 16 byte aligned */
2445 	adapter->tx_fifo_head += fifo_pkt_len;
2446 	if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
2447 		adapter->tx_fifo_head -= adapter->tx_fifo_size;
2448 	}
2449 }
2450 
2451 
2452 static int
2453 em_82547_tx_fifo_reset(struct adapter *adapter)
2454 {
2455 	u32 tctl;
2456 
2457 	if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
2458 	    E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
2459 	    (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
2460 	    E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
2461 	    (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
2462 	    E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
2463 	    (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
2464 		/* Disable TX unit */
2465 		tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2466 		E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
2467 		    tctl & ~E1000_TCTL_EN);
2468 
2469 		/* Reset FIFO pointers */
2470 		E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
2471 		    adapter->tx_head_addr);
2472 		E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
2473 		    adapter->tx_head_addr);
2474 		E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
2475 		    adapter->tx_head_addr);
2476 		E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
2477 		    adapter->tx_head_addr);
2478 
2479 		/* Re-enable TX unit */
2480 		E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2481 		E1000_WRITE_FLUSH(&adapter->hw);
2482 
2483 		adapter->tx_fifo_head = 0;
2484 		adapter->tx_fifo_reset_cnt++;
2485 
2486 		return (TRUE);
2487 	}
2488 	else {
2489 		return (FALSE);
2490 	}
2491 }
2492 
2493 static void
2494 em_set_promisc(struct adapter *adapter)
2495 {
2496 	struct ifnet	*ifp = adapter->ifp;
2497 	u32		reg_rctl;
2498 
2499 	reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2500 
2501 	if (ifp->if_flags & IFF_PROMISC) {
2502 		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2503 		/* Turn this on if you want to see bad packets */
2504 		if (em_debug_sbp)
2505 			reg_rctl |= E1000_RCTL_SBP;
2506 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2507 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2508 		reg_rctl |= E1000_RCTL_MPE;
2509 		reg_rctl &= ~E1000_RCTL_UPE;
2510 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2511 	}
2512 }
2513 
2514 static void
2515 em_disable_promisc(struct adapter *adapter)
2516 {
2517 	u32	reg_rctl;
2518 
2519 	reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2520 
2521 	reg_rctl &=  (~E1000_RCTL_UPE);
2522 	reg_rctl &=  (~E1000_RCTL_MPE);
2523 	reg_rctl &=  (~E1000_RCTL_SBP);
2524 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2525 }
2526 
2527 
2528 /*********************************************************************
2529  *  Multicast Update
2530  *
2531  *  This routine is called whenever multicast address list is updated.
2532  *
2533  **********************************************************************/
2534 
2535 static void
2536 em_set_multi(struct adapter *adapter)
2537 {
2538 	struct ifnet	*ifp = adapter->ifp;
2539 	struct ifmultiaddr *ifma;
2540 	u32 reg_rctl = 0;
2541 	u8  *mta; /* Multicast array memory */
2542 	int mcnt = 0;
2543 
2544 	IOCTL_DEBUGOUT("em_set_multi: begin");
2545 
2546 	if (adapter->hw.mac.type == e1000_82542 &&
2547 	    adapter->hw.revision_id == E1000_REVISION_2) {
2548 		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2549 		if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2550 			e1000_pci_clear_mwi(&adapter->hw);
2551 		reg_rctl |= E1000_RCTL_RST;
2552 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2553 		msec_delay(5);
2554 	}
2555 
2556 	/* Allocate temporary memory to setup array */
2557 	mta = malloc(sizeof(u8) *
2558 	    (ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES),
2559 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2560 	if (mta == NULL)
2561 		panic("em_set_multi memory failure\n");
2562 
2563 	IF_ADDR_LOCK(ifp);
2564 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2565 		if (ifma->ifma_addr->sa_family != AF_LINK)
2566 			continue;
2567 
2568 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2569 			break;
2570 
2571 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2572 		    &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
2573 		mcnt++;
2574 	}
2575 	IF_ADDR_UNLOCK(ifp);
2576 
2577 	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2578 		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2579 		reg_rctl |= E1000_RCTL_MPE;
2580 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2581 	} else
2582 		e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
2583 
2584 	if (adapter->hw.mac.type == e1000_82542 &&
2585 	    adapter->hw.revision_id == E1000_REVISION_2) {
2586 		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2587 		reg_rctl &= ~E1000_RCTL_RST;
2588 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2589 		msec_delay(5);
2590 		if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2591 			e1000_pci_set_mwi(&adapter->hw);
2592 	}
2593 	free(mta, M_DEVBUF);
2594 }
2595 
2596 
2597 /*********************************************************************
2598  *  Timer routine
2599  *
2600  *  This routine checks for link status and updates statistics.
2601  *
2602  **********************************************************************/
2603 
2604 static void
2605 em_local_timer(void *arg)
2606 {
2607 	struct adapter	*adapter = arg;
2608 	struct ifnet	*ifp = adapter->ifp;
2609 
2610 	EM_CORE_LOCK_ASSERT(adapter);
2611 
2612 	taskqueue_enqueue(adapter->tq,
2613 	    &adapter->rxtx_task);
2614 	em_update_link_status(adapter);
2615 	em_update_stats_counters(adapter);
2616 
2617 	/* Reset LAA into RAR[0] on 82571 */
2618 	if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
2619 		e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2620 
2621 	if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
2622 		em_print_hw_stats(adapter);
2623 
2624 	em_smartspeed(adapter);
2625 
2626 	/*
2627 	 * Each second we check the watchdog to
2628 	 * protect against hardware hangs.
2629 	 */
2630 	em_watchdog(adapter);
2631 
2632 	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
2633 
2634 }
2635 
2636 static void
2637 em_update_link_status(struct adapter *adapter)
2638 {
2639 	struct e1000_hw *hw = &adapter->hw;
2640 	struct ifnet *ifp = adapter->ifp;
2641 	device_t dev = adapter->dev;
2642 	u32 link_check = 0;
2643 
2644 	/* Get the cached link value or read phy for real */
2645 	switch (hw->phy.media_type) {
2646 	case e1000_media_type_copper:
2647 		if (hw->mac.get_link_status) {
2648 			/* Do the work to read phy */
2649 			e1000_check_for_link(hw);
2650 			link_check = !hw->mac.get_link_status;
2651 			if (link_check) /* ESB2 fix */
2652 				e1000_cfg_on_link_up(hw);
2653 		} else
2654 			link_check = TRUE;
2655 		break;
2656 	case e1000_media_type_fiber:
2657 		e1000_check_for_link(hw);
2658 		link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2659                                  E1000_STATUS_LU);
2660 		break;
2661 	case e1000_media_type_internal_serdes:
2662 		e1000_check_for_link(hw);
2663 		link_check = adapter->hw.mac.serdes_has_link;
2664 		break;
2665 	default:
2666 	case e1000_media_type_unknown:
2667 		break;
2668 	}
2669 
2670 	/* Now check for a transition */
2671 	if (link_check && (adapter->link_active == 0)) {
2672 		e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2673 		    &adapter->link_duplex);
2674 		/* Check if we must disable SPEED_MODE bit on PCI-E */
2675 		if ((adapter->link_speed != SPEED_1000) &&
2676 		    ((hw->mac.type == e1000_82571) ||
2677 		    (hw->mac.type == e1000_82572))) {
2678 			int tarc0;
2679 			tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
2680 			tarc0 &= ~SPEED_MODE_BIT;
2681 			E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
2682 		}
2683 		if (bootverbose)
2684 			device_printf(dev, "Link is up %d Mbps %s\n",
2685 			    adapter->link_speed,
2686 			    ((adapter->link_duplex == FULL_DUPLEX) ?
2687 			    "Full Duplex" : "Half Duplex"));
2688 		adapter->link_active = 1;
2689 		adapter->smartspeed = 0;
2690 		ifp->if_baudrate = adapter->link_speed * 1000000;
2691 		if_link_state_change(ifp, LINK_STATE_UP);
2692 	} else if (!link_check && (adapter->link_active == 1)) {
2693 		ifp->if_baudrate = adapter->link_speed = 0;
2694 		adapter->link_duplex = 0;
2695 		if (bootverbose)
2696 			device_printf(dev, "Link is Down\n");
2697 		adapter->link_active = 0;
2698 		/* Link down, disable watchdog */
2699 		adapter->watchdog_timer = FALSE;
2700 		if_link_state_change(ifp, LINK_STATE_DOWN);
2701 	}
2702 }
2703 
2704 /*********************************************************************
2705  *
2706  *  This routine disables all traffic on the adapter by issuing a
2707  *  global reset on the MAC and deallocates TX/RX buffers.
2708  *
2709  *  This routine should always be called with BOTH the CORE
2710  *  and TX locks.
2711  **********************************************************************/
2712 
2713 static void
2714 em_stop(void *arg)
2715 {
2716 	struct adapter	*adapter = arg;
2717 	struct ifnet	*ifp = adapter->ifp;
2718 
2719 	EM_CORE_LOCK_ASSERT(adapter);
2720 	EM_TX_LOCK_ASSERT(adapter);
2721 
2722 	INIT_DEBUGOUT("em_stop: begin");
2723 
2724 	em_disable_intr(adapter);
2725 	callout_stop(&adapter->timer);
2726 	callout_stop(&adapter->tx_fifo_timer);
2727 
2728 	/* Tell the stack that the interface is no longer active */
2729 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2730 
2731 #ifdef EM_TIMESYNC
2732 	/* Disable IEEE 1588 Time hardware */
2733 	if ((adapter->hw.mac.type == e1000_82574) ||
2734 	    (adapter->hw.mac.type == e1000_ich10lan))
2735 		em_tsync_disable(adapter);
2736 #endif
2737 
2738 	e1000_reset_hw(&adapter->hw);
2739 	if (adapter->hw.mac.type >= e1000_82544)
2740 		E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2741 }
2742 
2743 
2744 /*********************************************************************
2745  *
2746  *  Determine hardware revision.
2747  *
2748  **********************************************************************/
2749 static void
2750 em_identify_hardware(struct adapter *adapter)
2751 {
2752 	device_t dev = adapter->dev;
2753 
2754 	/* Make sure our PCI config space has the necessary stuff set */
2755 	adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2756 	if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2757 	    (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2758 		device_printf(dev, "Memory Access and/or Bus Master bits "
2759 		    "were not set!\n");
2760 		adapter->hw.bus.pci_cmd_word |=
2761 		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2762 		pci_write_config(dev, PCIR_COMMAND,
2763 		    adapter->hw.bus.pci_cmd_word, 2);
2764 	}
2765 
2766 	/* Save off the information about this board */
2767 	adapter->hw.vendor_id = pci_get_vendor(dev);
2768 	adapter->hw.device_id = pci_get_device(dev);
2769 	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2770 	adapter->hw.subsystem_vendor_id =
2771 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
2772 	adapter->hw.subsystem_device_id =
2773 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
2774 
2775 	/* Do Shared Code Init and Setup */
2776 	if (e1000_set_mac_type(&adapter->hw)) {
2777 		device_printf(dev, "Setup init failure\n");
2778 		return;
2779 	}
2780 }
2781 
2782 static int
2783 em_allocate_pci_resources(struct adapter *adapter)
2784 {
2785 	device_t	dev = adapter->dev;
2786 	int		val, rid, error = E1000_SUCCESS;
2787 
2788 	rid = PCIR_BAR(0);
2789 	adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2790 	    &rid, RF_ACTIVE);
2791 	if (adapter->memory == NULL) {
2792 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2793 		return (ENXIO);
2794 	}
2795 	adapter->osdep.mem_bus_space_tag =
2796 	    rman_get_bustag(adapter->memory);
2797 	adapter->osdep.mem_bus_space_handle =
2798 	    rman_get_bushandle(adapter->memory);
2799 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2800 
2801 	/* Only older adapters use IO mapping */
2802 	if ((adapter->hw.mac.type > e1000_82543) &&
2803 	    (adapter->hw.mac.type < e1000_82571)) {
2804 		/* Figure our where our IO BAR is ? */
2805 		for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2806 			val = pci_read_config(dev, rid, 4);
2807 			if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2808 				adapter->io_rid = rid;
2809 				break;
2810 			}
2811 			rid += 4;
2812 			/* check for 64bit BAR */
2813 			if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2814 				rid += 4;
2815 		}
2816 		if (rid >= PCIR_CIS) {
2817 			device_printf(dev, "Unable to locate IO BAR\n");
2818 			return (ENXIO);
2819 		}
2820 		adapter->ioport = bus_alloc_resource_any(dev,
2821 		    SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2822 		if (adapter->ioport == NULL) {
2823 			device_printf(dev, "Unable to allocate bus resource: "
2824 			    "ioport\n");
2825 			return (ENXIO);
2826 		}
2827 		adapter->hw.io_base = 0;
2828 		adapter->osdep.io_bus_space_tag =
2829 		    rman_get_bustag(adapter->ioport);
2830 		adapter->osdep.io_bus_space_handle =
2831 		    rman_get_bushandle(adapter->ioport);
2832 	}
2833 
2834 	/*
2835 	** Init the resource arrays
2836 	**  used by MSIX setup
2837 	*/
2838 	for (int i = 0; i < 3; i++) {
2839 		adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */
2840 		adapter->tag[i] = NULL;
2841 		adapter->res[i] = NULL;
2842 	}
2843 
2844 	/*
2845 	 * Setup MSI/X or MSI if PCI Express
2846 	 */
2847 	if (em_enable_msi)
2848 		adapter->msi = em_setup_msix(adapter);
2849 
2850 	adapter->hw.back = &adapter->osdep;
2851 
2852 	return (error);
2853 }
2854 
2855 /*********************************************************************
2856  *
2857  *  Setup the Legacy or MSI Interrupt handler
2858  *
2859  **********************************************************************/
2860 int
2861 em_allocate_legacy(struct adapter *adapter)
2862 {
2863 	device_t dev = adapter->dev;
2864 	int error;
2865 
2866 	/* Manually turn off all interrupts */
2867 	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2868 
2869 	/* Legacy RID is 0 */
2870 	if (adapter->msi == 0)
2871 		adapter->rid[0] = 0;
2872 
2873 	/* We allocate a single interrupt resource */
2874 	adapter->res[0] = bus_alloc_resource_any(dev,
2875 	    SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
2876 	if (adapter->res[0] == NULL) {
2877 		device_printf(dev, "Unable to allocate bus resource: "
2878 		    "interrupt\n");
2879 		return (ENXIO);
2880 	}
2881 
2882 #ifdef EM_LEGACY_IRQ
2883 	/* We do Legacy setup */
2884 	if ((error = bus_setup_intr(dev, adapter->res[0],
2885 #if __FreeBSD_version > 700000
2886 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_intr, adapter,
2887 #else /* 6.X */
2888 	    INTR_TYPE_NET | INTR_MPSAFE, em_intr, adapter,
2889 #endif
2890 	    &adapter->tag[0])) != 0) {
2891 		device_printf(dev, "Failed to register interrupt handler");
2892 		return (error);
2893 	}
2894 
2895 #else /* FAST_IRQ */
2896 	/*
2897 	 * Try allocating a fast interrupt and the associated deferred
2898 	 * processing contexts.
2899 	 */
2900 	TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
2901 	TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2902 	adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2903 	    taskqueue_thread_enqueue, &adapter->tq);
2904 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2905 	    device_get_nameunit(adapter->dev));
2906 #if __FreeBSD_version < 700000
2907 	if ((error = bus_setup_intr(dev, adapter->res[0],
2908 	    INTR_TYPE_NET | INTR_FAST, em_irq_fast, adapter,
2909 #else
2910 	if ((error = bus_setup_intr(dev, adapter->res[0],
2911 	    INTR_TYPE_NET, em_irq_fast, NULL, adapter,
2912 #endif
2913 	    &adapter->tag[0])) != 0) {
2914 		device_printf(dev, "Failed to register fast interrupt "
2915 			    "handler: %d\n", error);
2916 		taskqueue_free(adapter->tq);
2917 		adapter->tq = NULL;
2918 		return (error);
2919 	}
2920 #endif  /* EM_LEGACY_IRQ */
2921 
2922 	return (0);
2923 }
2924 
2925 /*********************************************************************
2926  *
2927  *  Setup the MSIX Interrupt handlers
2928  *   This is not really Multiqueue, rather
2929  *   its just multiple interrupt vectors.
2930  *
2931  **********************************************************************/
2932 int
2933 em_allocate_msix(struct adapter *adapter)
2934 {
2935 	device_t dev = adapter->dev;
2936 	int error;
2937 
2938 	/* Make sure all interrupts are disabled */
2939 	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2940 
2941 	/* First get the resources */
2942 	for (int i = 0; i < adapter->msi; i++) {
2943 		adapter->res[i] = bus_alloc_resource_any(dev,
2944 		    SYS_RES_IRQ, &adapter->rid[i], RF_ACTIVE);
2945 		if (adapter->res[i] == NULL) {
2946 			device_printf(dev,
2947 			    "Unable to allocate bus resource: "
2948 			    "MSIX Interrupt\n");
2949 			return (ENXIO);
2950 		}
2951 	}
2952 
2953 	/*
2954 	 * Now allocate deferred processing contexts.
2955 	 */
2956 	TASK_INIT(&adapter->rx_task, 0, em_handle_rx, adapter);
2957 	TASK_INIT(&adapter->tx_task, 0, em_handle_tx, adapter);
2958 	/*
2959 	 * Handle compatibility for msi case for deferral due to
2960 	 * trylock failure
2961 	 */
2962 	TASK_INIT(&adapter->rxtx_task, 0, em_handle_tx, adapter);
2963 	TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2964 	adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2965 	    taskqueue_thread_enqueue, &adapter->tq);
2966 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2967 	    device_get_nameunit(adapter->dev));
2968 
2969 	/*
2970 	 * And setup the interrupt handlers
2971 	 */
2972 
2973 	/* First slot to RX */
2974 	if ((error = bus_setup_intr(dev, adapter->res[0],
2975 #if __FreeBSD_version > 700000
2976 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx, adapter,
2977 #else /* 6.X */
2978 	    INTR_TYPE_NET | INTR_MPSAFE, em_msix_rx, adapter,
2979 #endif
2980 	    &adapter->tag[0])) != 0) {
2981 		device_printf(dev, "Failed to register RX handler");
2982 		return (error);
2983 	}
2984 
2985 	/* Next TX */
2986 	if ((error = bus_setup_intr(dev, adapter->res[1],
2987 #if __FreeBSD_version > 700000
2988 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx, adapter,
2989 #else /* 6.X */
2990 	    INTR_TYPE_NET | INTR_MPSAFE, em_msix_tx, adapter,
2991 #endif
2992 	    &adapter->tag[1])) != 0) {
2993 		device_printf(dev, "Failed to register TX handler");
2994 		return (error);
2995 	}
2996 
2997 	/* And Link */
2998 	if ((error = bus_setup_intr(dev, adapter->res[2],
2999 #if __FreeBSD_version > 700000
3000 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_link, adapter,
3001 #else /* 6.X */
3002 	    INTR_TYPE_NET | INTR_MPSAFE, em_msix_link, adapter,
3003 #endif
3004 	    &adapter->tag[2])) != 0) {
3005 		device_printf(dev, "Failed to register TX handler");
3006 		return (error);
3007 	}
3008 
3009 	return (0);
3010 }
3011 
3012 
3013 static void
3014 em_free_pci_resources(struct adapter *adapter)
3015 {
3016 	device_t dev = adapter->dev;
3017 
3018 	/* Make sure the for loop below runs once */
3019 	if (adapter->msi == 0)
3020 		adapter->msi = 1;
3021 
3022 	/*
3023 	 * First release all the interrupt resources:
3024 	 *      notice that since these are just kept
3025 	 *      in an array we can do the same logic
3026 	 *      whether its MSIX or just legacy.
3027 	 */
3028 	for (int i = 0; i < adapter->msi; i++) {
3029 		if (adapter->tag[i] != NULL) {
3030 			bus_teardown_intr(dev, adapter->res[i],
3031 			    adapter->tag[i]);
3032 			adapter->tag[i] = NULL;
3033 		}
3034 		if (adapter->res[i] != NULL) {
3035 			bus_release_resource(dev, SYS_RES_IRQ,
3036 			    adapter->rid[i], adapter->res[i]);
3037 		}
3038 	}
3039 
3040 	if (adapter->msi)
3041 		pci_release_msi(dev);
3042 
3043 	if (adapter->msix != NULL)
3044 		bus_release_resource(dev, SYS_RES_MEMORY,
3045 		    PCIR_BAR(EM_MSIX_BAR), adapter->msix);
3046 
3047 	if (adapter->memory != NULL)
3048 		bus_release_resource(dev, SYS_RES_MEMORY,
3049 		    PCIR_BAR(0), adapter->memory);
3050 
3051 	if (adapter->flash != NULL)
3052 		bus_release_resource(dev, SYS_RES_MEMORY,
3053 		    EM_FLASH, adapter->flash);
3054 
3055 	if (adapter->ioport != NULL)
3056 		bus_release_resource(dev, SYS_RES_IOPORT,
3057 		    adapter->io_rid, adapter->ioport);
3058 }
3059 
3060 /*
3061  * Setup MSI or MSI/X
3062  */
3063 static int
3064 em_setup_msix(struct adapter *adapter)
3065 {
3066 	device_t dev = adapter->dev;
3067 	int val = 0;
3068 
3069 	if (adapter->hw.mac.type < e1000_82571)
3070 		return (0);
3071 
3072 	/* Setup MSI/X for Hartwell */
3073 	if (adapter->hw.mac.type == e1000_82574) {
3074 		/* Map the MSIX BAR */
3075 		int rid = PCIR_BAR(EM_MSIX_BAR);
3076 		adapter->msix = bus_alloc_resource_any(dev,
3077 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
3078        		if (!adapter->msix) {
3079 			/* May not be enabled */
3080                		device_printf(adapter->dev,
3081 			    "Unable to map MSIX table \n");
3082 			goto msi;
3083        		}
3084 		val = pci_msix_count(dev);
3085 		/*
3086 		** 82574 can be configured for 5 but
3087 		** we limit use to 3.
3088 		*/
3089 		if (val > 3) val = 3;
3090 		if ((val) && pci_alloc_msix(dev, &val) == 0) {
3091                		device_printf(adapter->dev,"Using MSIX interrupts\n");
3092 			return (val);
3093 		}
3094 	}
3095 msi:
3096        	val = pci_msi_count(dev);
3097        	if (val == 1 && pci_alloc_msi(dev, &val) == 0) {
3098                	adapter->msi = 1;
3099                	device_printf(adapter->dev,"Using MSI interrupt\n");
3100 		return (val);
3101 	}
3102 	return (0);
3103 }
3104 
3105 /*********************************************************************
3106  *
3107  *  Initialize the hardware to a configuration
3108  *  as specified by the adapter structure.
3109  *
3110  **********************************************************************/
3111 static int
3112 em_hardware_init(struct adapter *adapter)
3113 {
3114 	device_t dev = adapter->dev;
3115 	u16 	rx_buffer_size;
3116 
3117 	INIT_DEBUGOUT("em_hardware_init: begin");
3118 
3119 	/* Issue a global reset */
3120 	e1000_reset_hw(&adapter->hw);
3121 
3122 	/* Get control from any management/hw control */
3123 	if (((adapter->hw.mac.type == e1000_82573) ||
3124 	    (adapter->hw.mac.type == e1000_ich8lan) ||
3125 	    (adapter->hw.mac.type == e1000_ich10lan) ||
3126 	    (adapter->hw.mac.type == e1000_ich9lan)) &&
3127 	    e1000_check_mng_mode(&adapter->hw))
3128 		em_get_hw_control(adapter);
3129 
3130 	/* When hardware is reset, fifo_head is also reset */
3131 	adapter->tx_fifo_head = 0;
3132 
3133 	/* Set up smart power down as default off on newer adapters. */
3134 	if (!em_smart_pwr_down && (adapter->hw.mac.type == e1000_82571 ||
3135 	    adapter->hw.mac.type == e1000_82572)) {
3136 		u16 phy_tmp = 0;
3137 
3138 		/* Speed up time to link by disabling smart power down. */
3139 		e1000_read_phy_reg(&adapter->hw,
3140 		    IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
3141 		phy_tmp &= ~IGP02E1000_PM_SPD;
3142 		e1000_write_phy_reg(&adapter->hw,
3143 		    IGP02E1000_PHY_POWER_MGMT, phy_tmp);
3144 	}
3145 
3146 	/*
3147 	 * These parameters control the automatic generation (Tx) and
3148 	 * response (Rx) to Ethernet PAUSE frames.
3149 	 * - High water mark should allow for at least two frames to be
3150 	 *   received after sending an XOFF.
3151 	 * - Low water mark works best when it is very near the high water mark.
3152 	 *   This allows the receiver to restart by sending XON when it has
3153 	 *   drained a bit. Here we use an arbitary value of 1500 which will
3154 	 *   restart after one full frame is pulled from the buffer. There
3155 	 *   could be several smaller frames in the buffer and if so they will
3156 	 *   not trigger the XON until their total number reduces the buffer
3157 	 *   by 1500.
3158 	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
3159 	 */
3160 	rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
3161 	    0xffff) << 10 );
3162 
3163 	adapter->hw.fc.high_water = rx_buffer_size -
3164 	    roundup2(adapter->max_frame_size, 1024);
3165 	adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
3166 
3167 	if (adapter->hw.mac.type == e1000_80003es2lan)
3168 		adapter->hw.fc.pause_time = 0xFFFF;
3169 	else
3170 		adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
3171 	adapter->hw.fc.send_xon = TRUE;
3172 	adapter->hw.fc.requested_mode = e1000_fc_full;
3173 
3174 	if (e1000_init_hw(&adapter->hw) < 0) {
3175 		device_printf(dev, "Hardware Initialization Failed\n");
3176 		return (EIO);
3177 	}
3178 
3179 	e1000_check_for_link(&adapter->hw);
3180 
3181 	return (0);
3182 }
3183 
3184 /*********************************************************************
3185  *
3186  *  Setup networking device structure and register an interface.
3187  *
3188  **********************************************************************/
3189 static void
3190 em_setup_interface(device_t dev, struct adapter *adapter)
3191 {
3192 	struct ifnet   *ifp;
3193 
3194 	INIT_DEBUGOUT("em_setup_interface: begin");
3195 
3196 	ifp = adapter->ifp = if_alloc(IFT_ETHER);
3197 	if (ifp == NULL)
3198 		panic("%s: can not if_alloc()", device_get_nameunit(dev));
3199 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3200 	ifp->if_mtu = ETHERMTU;
3201 	ifp->if_init =  em_init;
3202 	ifp->if_softc = adapter;
3203 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3204 	ifp->if_ioctl = em_ioctl;
3205 	ifp->if_start = em_start;
3206 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
3207 	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
3208 	IFQ_SET_READY(&ifp->if_snd);
3209 
3210 	ether_ifattach(ifp, adapter->hw.mac.addr);
3211 
3212 	ifp->if_capabilities = ifp->if_capenable = 0;
3213 
3214 #ifdef IFNET_BUF_RING
3215 	ifp->if_transmit = em_transmit;
3216 	ifp->if_qflush = em_qflush;
3217 	adapter->br = buf_ring_alloc(2048, M_DEVBUF, M_WAITOK, &adapter->tx_mtx);
3218 #endif
3219 	if (adapter->hw.mac.type >= e1000_82543) {
3220 		int version_cap;
3221 #if __FreeBSD_version < 700000
3222 		version_cap = IFCAP_HWCSUM;
3223 #else
3224 		version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
3225 #endif
3226 		ifp->if_capabilities |= version_cap;
3227 		ifp->if_capenable |= version_cap;
3228 	}
3229 
3230 #if __FreeBSD_version >= 700000
3231 	/* Identify TSO capable adapters */
3232 	if ((adapter->hw.mac.type > e1000_82544) &&
3233 	    (adapter->hw.mac.type != e1000_82547))
3234 		ifp->if_capabilities |= IFCAP_TSO4;
3235 	/*
3236 	 * By default only enable on PCI-E, this
3237 	 * can be overriden by ifconfig.
3238 	 */
3239 	if (adapter->hw.mac.type >= e1000_82571)
3240 		ifp->if_capenable |= IFCAP_TSO4;
3241 #endif
3242 
3243 	/*
3244 	 * Tell the upper layer(s) we support long frames.
3245 	 */
3246 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3247 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
3248 	ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
3249 
3250 #ifdef DEVICE_POLLING
3251 	ifp->if_capabilities |= IFCAP_POLLING;
3252 #endif
3253 
3254 	/*
3255 	 * Specify the media types supported by this adapter and register
3256 	 * callbacks to update media and link information
3257 	 */
3258 	ifmedia_init(&adapter->media, IFM_IMASK,
3259 	    em_media_change, em_media_status);
3260 	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3261 	    (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
3262 		u_char fiber_type = IFM_1000_SX;	/* default type */
3263 
3264 		if (adapter->hw.mac.type == e1000_82545)
3265 			fiber_type = IFM_1000_LX;
3266 		ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
3267 			    0, NULL);
3268 		ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
3269 	} else {
3270 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
3271 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
3272 			    0, NULL);
3273 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
3274 			    0, NULL);
3275 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
3276 			    0, NULL);
3277 		if (adapter->hw.phy.type != e1000_phy_ife) {
3278 			ifmedia_add(&adapter->media,
3279 				IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
3280 			ifmedia_add(&adapter->media,
3281 				IFM_ETHER | IFM_1000_T, 0, NULL);
3282 		}
3283 	}
3284 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3285 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3286 }
3287 
3288 
3289 /*********************************************************************
3290  *
3291  *  Workaround for SmartSpeed on 82541 and 82547 controllers
3292  *
3293  **********************************************************************/
3294 static void
3295 em_smartspeed(struct adapter *adapter)
3296 {
3297 	u16 phy_tmp;
3298 
3299 	if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
3300 	    adapter->hw.mac.autoneg == 0 ||
3301 	    (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
3302 		return;
3303 
3304 	if (adapter->smartspeed == 0) {
3305 		/* If Master/Slave config fault is asserted twice,
3306 		 * we assume back-to-back */
3307 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3308 		if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
3309 			return;
3310 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3311 		if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
3312 			e1000_read_phy_reg(&adapter->hw,
3313 			    PHY_1000T_CTRL, &phy_tmp);
3314 			if(phy_tmp & CR_1000T_MS_ENABLE) {
3315 				phy_tmp &= ~CR_1000T_MS_ENABLE;
3316 				e1000_write_phy_reg(&adapter->hw,
3317 				    PHY_1000T_CTRL, phy_tmp);
3318 				adapter->smartspeed++;
3319 				if(adapter->hw.mac.autoneg &&
3320 				   !e1000_phy_setup_autoneg(&adapter->hw) &&
3321 				   !e1000_read_phy_reg(&adapter->hw,
3322 				    PHY_CONTROL, &phy_tmp)) {
3323 					phy_tmp |= (MII_CR_AUTO_NEG_EN |
3324 						    MII_CR_RESTART_AUTO_NEG);
3325 					e1000_write_phy_reg(&adapter->hw,
3326 					    PHY_CONTROL, phy_tmp);
3327 				}
3328 			}
3329 		}
3330 		return;
3331 	} else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
3332 		/* If still no link, perhaps using 2/3 pair cable */
3333 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
3334 		phy_tmp |= CR_1000T_MS_ENABLE;
3335 		e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
3336 		if(adapter->hw.mac.autoneg &&
3337 		   !e1000_phy_setup_autoneg(&adapter->hw) &&
3338 		   !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
3339 			phy_tmp |= (MII_CR_AUTO_NEG_EN |
3340 				    MII_CR_RESTART_AUTO_NEG);
3341 			e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
3342 		}
3343 	}
3344 	/* Restart process after EM_SMARTSPEED_MAX iterations */
3345 	if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
3346 		adapter->smartspeed = 0;
3347 }
3348 
3349 
3350 /*
3351  * Manage DMA'able memory.
3352  */
3353 static void
3354 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3355 {
3356 	if (error)
3357 		return;
3358 	*(bus_addr_t *) arg = segs[0].ds_addr;
3359 }
3360 
3361 static int
3362 em_dma_malloc(struct adapter *adapter, bus_size_t size,
3363         struct em_dma_alloc *dma, int mapflags)
3364 {
3365 	int error;
3366 
3367 #if __FreeBSD_version >= 700000
3368 	error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
3369 #else
3370 	error = bus_dma_tag_create(NULL,		 /* parent */
3371 #endif
3372 				EM_DBA_ALIGN, 0,	/* alignment, bounds */
3373 				BUS_SPACE_MAXADDR,	/* lowaddr */
3374 				BUS_SPACE_MAXADDR,	/* highaddr */
3375 				NULL, NULL,		/* filter, filterarg */
3376 				size,			/* maxsize */
3377 				1,			/* nsegments */
3378 				size,			/* maxsegsize */
3379 				0,			/* flags */
3380 				NULL,			/* lockfunc */
3381 				NULL,			/* lockarg */
3382 				&dma->dma_tag);
3383 	if (error) {
3384 		device_printf(adapter->dev,
3385 		    "%s: bus_dma_tag_create failed: %d\n",
3386 		    __func__, error);
3387 		goto fail_0;
3388 	}
3389 
3390 	error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
3391 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
3392 	if (error) {
3393 		device_printf(adapter->dev,
3394 		    "%s: bus_dmamem_alloc(%ju) failed: %d\n",
3395 		    __func__, (uintmax_t)size, error);
3396 		goto fail_2;
3397 	}
3398 
3399 	dma->dma_paddr = 0;
3400 	error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3401 	    size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
3402 	if (error || dma->dma_paddr == 0) {
3403 		device_printf(adapter->dev,
3404 		    "%s: bus_dmamap_load failed: %d\n",
3405 		    __func__, error);
3406 		goto fail_3;
3407 	}
3408 
3409 	return (0);
3410 
3411 fail_3:
3412 	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3413 fail_2:
3414 	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3415 	bus_dma_tag_destroy(dma->dma_tag);
3416 fail_0:
3417 	dma->dma_map = NULL;
3418 	dma->dma_tag = NULL;
3419 
3420 	return (error);
3421 }
3422 
3423 static void
3424 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
3425 {
3426 	if (dma->dma_tag == NULL)
3427 		return;
3428 	if (dma->dma_map != NULL) {
3429 		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3430 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3431 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3432 		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3433 		dma->dma_map = NULL;
3434 	}
3435 	bus_dma_tag_destroy(dma->dma_tag);
3436 	dma->dma_tag = NULL;
3437 }
3438 
3439 
3440 /*********************************************************************
3441  *
3442  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
3443  *  the information needed to transmit a packet on the wire.
3444  *
3445  **********************************************************************/
3446 static int
3447 em_allocate_transmit_structures(struct adapter *adapter)
3448 {
3449 	device_t dev = adapter->dev;
3450 	struct em_buffer *tx_buffer;
3451 	int error;
3452 
3453 	/*
3454 	 * Create DMA tags for tx descriptors
3455 	 */
3456 #if __FreeBSD_version >= 700000
3457 	if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3458 #else
3459 	if ((error = bus_dma_tag_create(NULL,		 /* parent */
3460 #endif
3461 				1, 0,			/* alignment, bounds */
3462 				BUS_SPACE_MAXADDR,	/* lowaddr */
3463 				BUS_SPACE_MAXADDR,	/* highaddr */
3464 				NULL, NULL,		/* filter, filterarg */
3465 				EM_TSO_SIZE,		/* maxsize */
3466 				EM_MAX_SCATTER,		/* nsegments */
3467 				EM_TSO_SEG_SIZE,	/* maxsegsize */
3468 				0,			/* flags */
3469 				NULL,		/* lockfunc */
3470 				NULL,		/* lockarg */
3471 				&adapter->txtag)) != 0) {
3472 		device_printf(dev, "Unable to allocate TX DMA tag\n");
3473 		goto fail;
3474 	}
3475 
3476 	adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
3477 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3478 	if (adapter->tx_buffer_area == NULL) {
3479 		device_printf(dev, "Unable to allocate tx_buffer memory\n");
3480 		error = ENOMEM;
3481 		goto fail;
3482 	}
3483 
3484 	/* Create the descriptor buffer dma maps */
3485 	for (int i = 0; i < adapter->num_tx_desc; i++) {
3486 		tx_buffer = &adapter->tx_buffer_area[i];
3487 		error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
3488 		if (error != 0) {
3489 			device_printf(dev, "Unable to create TX DMA map\n");
3490 			goto fail;
3491 		}
3492 		tx_buffer->next_eop = -1;
3493 	}
3494 
3495 	return (0);
3496 fail:
3497 	em_free_transmit_structures(adapter);
3498 	return (error);
3499 }
3500 
3501 /*********************************************************************
3502  *
3503  *  (Re)Initialize transmit structures.
3504  *
3505  **********************************************************************/
3506 static void
3507 em_setup_transmit_structures(struct adapter *adapter)
3508 {
3509 	struct em_buffer *tx_buffer;
3510 
3511 	/* Clear the old ring contents */
3512 	bzero(adapter->tx_desc_base,
3513 	    (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
3514 
3515 	/* Free any existing TX buffers */
3516 	for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
3517 		tx_buffer = &adapter->tx_buffer_area[i];
3518 		bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3519 		    BUS_DMASYNC_POSTWRITE);
3520 		bus_dmamap_unload(adapter->txtag, tx_buffer->map);
3521 		m_freem(tx_buffer->m_head);
3522 		tx_buffer->m_head = NULL;
3523 		tx_buffer->next_eop = -1;
3524 	}
3525 
3526 	/* Reset state */
3527 	adapter->next_avail_tx_desc = 0;
3528 	adapter->next_tx_to_clean = 0;
3529 	adapter->num_tx_desc_avail = adapter->num_tx_desc;
3530 
3531 	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3532 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3533 
3534 	return;
3535 }
3536 
3537 /*********************************************************************
3538  *
3539  *  Enable transmit unit.
3540  *
3541  **********************************************************************/
3542 static void
3543 em_initialize_transmit_unit(struct adapter *adapter)
3544 {
3545 	u32	tctl, tarc, tipg = 0;
3546 	u64	bus_addr;
3547 
3548 	 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
3549 	/* Setup the Base and Length of the Tx Descriptor Ring */
3550 	bus_addr = adapter->txdma.dma_paddr;
3551 	E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
3552 	    adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
3553 	E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
3554 	    (u32)(bus_addr >> 32));
3555 	E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
3556 	    (u32)bus_addr);
3557 	/* Setup the HW Tx Head and Tail descriptor pointers */
3558 	E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
3559 	E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
3560 
3561 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
3562 	    E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
3563 	    E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
3564 
3565 	/* Set the default values for the Tx Inter Packet Gap timer */
3566 	switch (adapter->hw.mac.type) {
3567 	case e1000_82542:
3568 		tipg = DEFAULT_82542_TIPG_IPGT;
3569 		tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3570 		tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3571 		break;
3572 	case e1000_80003es2lan:
3573 		tipg = DEFAULT_82543_TIPG_IPGR1;
3574 		tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
3575 		    E1000_TIPG_IPGR2_SHIFT;
3576 		break;
3577 	default:
3578 		if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3579 		    (adapter->hw.phy.media_type ==
3580 		    e1000_media_type_internal_serdes))
3581 			tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3582 		else
3583 			tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3584 		tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3585 		tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3586 	}
3587 
3588 	E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
3589 	E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
3590 	if(adapter->hw.mac.type >= e1000_82540)
3591 		E1000_WRITE_REG(&adapter->hw, E1000_TADV,
3592 		    adapter->tx_abs_int_delay.value);
3593 
3594 	if ((adapter->hw.mac.type == e1000_82571) ||
3595 	    (adapter->hw.mac.type == e1000_82572)) {
3596 		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3597 		tarc |= SPEED_MODE_BIT;
3598 		E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3599 	} else if (adapter->hw.mac.type == e1000_80003es2lan) {
3600 		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3601 		tarc |= 1;
3602 		E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3603 		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
3604 		tarc |= 1;
3605 		E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
3606 	}
3607 
3608 	/* Program the Transmit Control Register */
3609 	tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
3610 	tctl &= ~E1000_TCTL_CT;
3611 	tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3612 		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
3613 
3614 	if (adapter->hw.mac.type >= e1000_82571)
3615 		tctl |= E1000_TCTL_MULR;
3616 
3617 	/* This write will effectively turn on the transmit unit. */
3618 	E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
3619 
3620 	/* Setup Transmit Descriptor Base Settings */
3621 	adapter->txd_cmd = E1000_TXD_CMD_IFCS;
3622 
3623 	if (adapter->tx_int_delay.value > 0)
3624 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3625 }
3626 
3627 /*********************************************************************
3628  *
3629  *  Free all transmit related data structures.
3630  *
3631  **********************************************************************/
3632 static void
3633 em_free_transmit_structures(struct adapter *adapter)
3634 {
3635 	struct em_buffer *tx_buffer;
3636 
3637 	INIT_DEBUGOUT("free_transmit_structures: begin");
3638 
3639 	if (adapter->tx_buffer_area != NULL) {
3640 		for (int i = 0; i < adapter->num_tx_desc; i++) {
3641 			tx_buffer = &adapter->tx_buffer_area[i];
3642 			if (tx_buffer->m_head != NULL) {
3643 				bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3644 				    BUS_DMASYNC_POSTWRITE);
3645 				bus_dmamap_unload(adapter->txtag,
3646 				    tx_buffer->map);
3647 				m_freem(tx_buffer->m_head);
3648 				tx_buffer->m_head = NULL;
3649 			} else if (tx_buffer->map != NULL)
3650 				bus_dmamap_unload(adapter->txtag,
3651 				    tx_buffer->map);
3652 			if (tx_buffer->map != NULL) {
3653 				bus_dmamap_destroy(adapter->txtag,
3654 				    tx_buffer->map);
3655 				tx_buffer->map = NULL;
3656 			}
3657 		}
3658 	}
3659 	if (adapter->tx_buffer_area != NULL) {
3660 		free(adapter->tx_buffer_area, M_DEVBUF);
3661 		adapter->tx_buffer_area = NULL;
3662 	}
3663 	if (adapter->txtag != NULL) {
3664 		bus_dma_tag_destroy(adapter->txtag);
3665 		adapter->txtag = NULL;
3666 	}
3667 }
3668 
3669 /*********************************************************************
3670  *
3671  *  The offload context needs to be set when we transfer the first
3672  *  packet of a particular protocol (TCP/UDP). This routine has been
3673  *  enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
3674  *
3675  **********************************************************************/
3676 static void
3677 em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
3678     u32 *txd_upper, u32 *txd_lower)
3679 {
3680 	struct e1000_context_desc *TXD;
3681 	struct em_buffer *tx_buffer;
3682 	struct ether_vlan_header *eh;
3683 	struct ip *ip = NULL;
3684 	struct ip6_hdr *ip6;
3685 	struct tcp_hdr *th;
3686 	int curr_txd, ehdrlen;
3687 	u32 cmd, hdr_len, ip_hlen;
3688 	u16 etype;
3689 	u8 ipproto;
3690 
3691 	cmd = hdr_len = ipproto = 0;
3692 	/* Setup checksum offload context. */
3693 	curr_txd = adapter->next_avail_tx_desc;
3694 	tx_buffer = &adapter->tx_buffer_area[curr_txd];
3695 	TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3696 
3697 	/*
3698 	 * Determine where frame payload starts.
3699 	 * Jump over vlan headers if already present,
3700 	 * helpful for QinQ too.
3701 	 */
3702 	eh = mtod(mp, struct ether_vlan_header *);
3703 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3704 		etype = ntohs(eh->evl_proto);
3705 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3706 	} else {
3707 		etype = ntohs(eh->evl_encap_proto);
3708 		ehdrlen = ETHER_HDR_LEN;
3709 	}
3710 
3711 	/*
3712 	 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
3713 	 * TODO: Support SCTP too when it hits the tree.
3714 	 */
3715 	switch (etype) {
3716 	case ETHERTYPE_IP:
3717 		ip = (struct ip *)(mp->m_data + ehdrlen);
3718 		ip_hlen = ip->ip_hl << 2;
3719 
3720 		/* Setup of IP header checksum. */
3721 		if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3722 			/*
3723 			 * Start offset for header checksum calculation.
3724 			 * End offset for header checksum calculation.
3725 			 * Offset of place to put the checksum.
3726 			 */
3727 			TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3728 			TXD->lower_setup.ip_fields.ipcse =
3729 			    htole16(ehdrlen + ip_hlen);
3730 			TXD->lower_setup.ip_fields.ipcso =
3731 			    ehdrlen + offsetof(struct ip, ip_sum);
3732 			cmd |= E1000_TXD_CMD_IP;
3733 			*txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3734 		}
3735 
3736 		if (mp->m_len < ehdrlen + ip_hlen)
3737 			return;	/* failure */
3738 
3739 		hdr_len = ehdrlen + ip_hlen;
3740 		ipproto = ip->ip_p;
3741 
3742 		break;
3743 	case ETHERTYPE_IPV6:
3744 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3745 		ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
3746 
3747 		if (mp->m_len < ehdrlen + ip_hlen)
3748 			return;	/* failure */
3749 
3750 		/* IPv6 doesn't have a header checksum. */
3751 
3752 		hdr_len = ehdrlen + ip_hlen;
3753 		ipproto = ip6->ip6_nxt;
3754 
3755 		break;
3756 #ifdef EM_TIMESYNC
3757 	case ETHERTYPE_IEEE1588:
3758 		*txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
3759 		break;
3760 #endif
3761 	default:
3762 		*txd_upper = 0;
3763 		*txd_lower = 0;
3764 		return;
3765 	}
3766 
3767 	switch (ipproto) {
3768 	case IPPROTO_TCP:
3769 		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3770 			/*
3771 			 * Start offset for payload checksum calculation.
3772 			 * End offset for payload checksum calculation.
3773 			 * Offset of place to put the checksum.
3774 			 */
3775 			th = (struct tcp_hdr *)(mp->m_data + hdr_len);
3776 			TXD->upper_setup.tcp_fields.tucss = hdr_len;
3777 			TXD->upper_setup.tcp_fields.tucse = htole16(0);
3778 			TXD->upper_setup.tcp_fields.tucso =
3779 			    hdr_len + offsetof(struct tcphdr, th_sum);
3780 			cmd |= E1000_TXD_CMD_TCP;
3781 			*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3782 		}
3783 		break;
3784 	case IPPROTO_UDP:
3785 	{
3786 #ifdef EM_TIMESYNC
3787 		void *hdr = (caddr_t) ip + ip_hlen;
3788 		struct udphdr *uh = (struct udphdr *)hdr;
3789 
3790 		if (uh->uh_dport == htons(TSYNC_PORT)) {
3791 			*txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
3792 			IOCTL_DEBUGOUT("@@@ Sending Event Packet\n");
3793 		}
3794 #endif
3795 		if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3796 			/*
3797 			 * Start offset for header checksum calculation.
3798 			 * End offset for header checksum calculation.
3799 			 * Offset of place to put the checksum.
3800 			 */
3801 			TXD->upper_setup.tcp_fields.tucss = hdr_len;
3802 			TXD->upper_setup.tcp_fields.tucse = htole16(0);
3803 			TXD->upper_setup.tcp_fields.tucso =
3804 			    hdr_len + offsetof(struct udphdr, uh_sum);
3805 			*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3806 		}
3807 		/* Fall Thru */
3808 	}
3809 	default:
3810 		break;
3811 	}
3812 
3813 #ifdef EM_TIMESYNC
3814 	/*
3815 	** We might be here just for TIMESYNC
3816 	** which means we don't need the context
3817 	** descriptor.
3818 	*/
3819 	if (!mp->m_pkthdr.csum_flags & CSUM_OFFLOAD)
3820 		return;
3821 #endif
3822 	*txd_lower = E1000_TXD_CMD_DEXT |	/* Extended descr type */
3823 		     E1000_TXD_DTYP_D;		/* Data descr */
3824 	TXD->tcp_seg_setup.data = htole32(0);
3825 	TXD->cmd_and_length =
3826 	    htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3827 	tx_buffer->m_head = NULL;
3828 	tx_buffer->next_eop = -1;
3829 
3830 	if (++curr_txd == adapter->num_tx_desc)
3831 		curr_txd = 0;
3832 
3833 	adapter->num_tx_desc_avail--;
3834 	adapter->next_avail_tx_desc = curr_txd;
3835 }
3836 
3837 
3838 #if __FreeBSD_version >= 700000
3839 /**********************************************************************
3840  *
3841  *  Setup work for hardware segmentation offload (TSO)
3842  *
3843  **********************************************************************/
3844 static bool
3845 em_tso_setup(struct adapter *adapter, struct mbuf *mp, u32 *txd_upper,
3846    u32 *txd_lower)
3847 {
3848 	struct e1000_context_desc *TXD;
3849 	struct em_buffer *tx_buffer;
3850 	struct ether_vlan_header *eh;
3851 	struct ip *ip;
3852 	struct ip6_hdr *ip6;
3853 	struct tcphdr *th;
3854 	int curr_txd, ehdrlen, hdr_len, ip_hlen, isip6;
3855 	u16 etype;
3856 
3857 	/*
3858 	 * This function could/should be extended to support IP/IPv6
3859 	 * fragmentation as well.  But as they say, one step at a time.
3860 	 */
3861 
3862 	/*
3863 	 * Determine where frame payload starts.
3864 	 * Jump over vlan headers if already present,
3865 	 * helpful for QinQ too.
3866 	 */
3867 	eh = mtod(mp, struct ether_vlan_header *);
3868 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3869 		etype = ntohs(eh->evl_proto);
3870 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3871 	} else {
3872 		etype = ntohs(eh->evl_encap_proto);
3873 		ehdrlen = ETHER_HDR_LEN;
3874 	}
3875 
3876 	/* Ensure we have at least the IP+TCP header in the first mbuf. */
3877 	if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3878 		return FALSE;	/* -1 */
3879 
3880 	/*
3881 	 * We only support TCP for IPv4 and IPv6 (notyet) for the moment.
3882 	 * TODO: Support SCTP too when it hits the tree.
3883 	 */
3884 	switch (etype) {
3885 	case ETHERTYPE_IP:
3886 		isip6 = 0;
3887 		ip = (struct ip *)(mp->m_data + ehdrlen);
3888 		if (ip->ip_p != IPPROTO_TCP)
3889 			return FALSE;	/* 0 */
3890 		ip->ip_len = 0;
3891 		ip->ip_sum = 0;
3892 		ip_hlen = ip->ip_hl << 2;
3893 		if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3894 			return FALSE;	/* -1 */
3895 		th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3896 #if 1
3897 		th->th_sum = in_pseudo(ip->ip_src.s_addr,
3898 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3899 #else
3900 		th->th_sum = mp->m_pkthdr.csum_data;
3901 #endif
3902 		break;
3903 	case ETHERTYPE_IPV6:
3904 		isip6 = 1;
3905 		return FALSE;			/* Not supported yet. */
3906 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3907 		if (ip6->ip6_nxt != IPPROTO_TCP)
3908 			return FALSE;	/* 0 */
3909 		ip6->ip6_plen = 0;
3910 		ip_hlen = sizeof(struct ip6_hdr); /* XXX: no header stacking. */
3911 		if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3912 			return FALSE;	/* -1 */
3913 		th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
3914 #if 0
3915 		th->th_sum = in6_pseudo(ip6->ip6_src, ip->ip6_dst,
3916 		    htons(IPPROTO_TCP));	/* XXX: function notyet. */
3917 #else
3918 		th->th_sum = mp->m_pkthdr.csum_data;
3919 #endif
3920 		break;
3921 	default:
3922 		return FALSE;
3923 	}
3924 	hdr_len = ehdrlen + ip_hlen + (th->th_off << 2);
3925 
3926 	*txd_lower = (E1000_TXD_CMD_DEXT |	/* Extended descr type */
3927 		      E1000_TXD_DTYP_D |	/* Data descr type */
3928 		      E1000_TXD_CMD_TSE);	/* Do TSE on this packet */
3929 
3930 	/* IP and/or TCP header checksum calculation and insertion. */
3931 	*txd_upper = ((isip6 ? 0 : E1000_TXD_POPTS_IXSM) |
3932 		      E1000_TXD_POPTS_TXSM) << 8;
3933 
3934 	curr_txd = adapter->next_avail_tx_desc;
3935 	tx_buffer = &adapter->tx_buffer_area[curr_txd];
3936 	TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3937 
3938 	/* IPv6 doesn't have a header checksum. */
3939 	if (!isip6) {
3940 		/*
3941 		 * Start offset for header checksum calculation.
3942 		 * End offset for header checksum calculation.
3943 		 * Offset of place put the checksum.
3944 		 */
3945 		TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3946 		TXD->lower_setup.ip_fields.ipcse =
3947 		    htole16(ehdrlen + ip_hlen - 1);
3948 		TXD->lower_setup.ip_fields.ipcso =
3949 		    ehdrlen + offsetof(struct ip, ip_sum);
3950 	}
3951 	/*
3952 	 * Start offset for payload checksum calculation.
3953 	 * End offset for payload checksum calculation.
3954 	 * Offset of place to put the checksum.
3955 	 */
3956 	TXD->upper_setup.tcp_fields.tucss =
3957 	    ehdrlen + ip_hlen;
3958 	TXD->upper_setup.tcp_fields.tucse = 0;
3959 	TXD->upper_setup.tcp_fields.tucso =
3960 	    ehdrlen + ip_hlen + offsetof(struct tcphdr, th_sum);
3961 	/*
3962 	 * Payload size per packet w/o any headers.
3963 	 * Length of all headers up to payload.
3964 	 */
3965 	TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz);
3966 	TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
3967 
3968 	TXD->cmd_and_length = htole32(adapter->txd_cmd |
3969 				E1000_TXD_CMD_DEXT |	/* Extended descr */
3970 				E1000_TXD_CMD_TSE |	/* TSE context */
3971 				(isip6 ? 0 : E1000_TXD_CMD_IP) | /* Do IP csum */
3972 				E1000_TXD_CMD_TCP |	/* Do TCP checksum */
3973 				(mp->m_pkthdr.len - (hdr_len))); /* Total len */
3974 
3975 	tx_buffer->m_head = NULL;
3976 	tx_buffer->next_eop = -1;
3977 
3978 	if (++curr_txd == adapter->num_tx_desc)
3979 		curr_txd = 0;
3980 
3981 	adapter->num_tx_desc_avail--;
3982 	adapter->next_avail_tx_desc = curr_txd;
3983 	adapter->tx_tso = TRUE;
3984 
3985 	return TRUE;
3986 }
3987 
3988 #endif /* __FreeBSD_version >= 700000 */
3989 
3990 /**********************************************************************
3991  *
3992  *  Examine each tx_buffer in the used queue. If the hardware is done
3993  *  processing the packet then free associated resources. The
3994  *  tx_buffer is put back on the free queue.
3995  *
3996  **********************************************************************/
3997 static void
3998 em_txeof(struct adapter *adapter)
3999 {
4000         int first, last, done, num_avail;
4001 	u32 cleaned = 0;
4002         struct em_buffer *tx_buffer;
4003         struct e1000_tx_desc   *tx_desc, *eop_desc;
4004 	struct ifnet   *ifp = adapter->ifp;
4005 
4006 	EM_TX_LOCK_ASSERT(adapter);
4007 
4008         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
4009                 return;
4010 
4011         num_avail = adapter->num_tx_desc_avail;
4012         first = adapter->next_tx_to_clean;
4013         tx_desc = &adapter->tx_desc_base[first];
4014         tx_buffer = &adapter->tx_buffer_area[first];
4015 	last = tx_buffer->next_eop;
4016         eop_desc = &adapter->tx_desc_base[last];
4017 
4018 	/*
4019 	 * What this does is get the index of the
4020 	 * first descriptor AFTER the EOP of the
4021 	 * first packet, that way we can do the
4022 	 * simple comparison on the inner while loop.
4023 	 */
4024 	if (++last == adapter->num_tx_desc)
4025  		last = 0;
4026 	done = last;
4027 
4028         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
4029             BUS_DMASYNC_POSTREAD);
4030 
4031         while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
4032 		/* We clean the range of the packet */
4033 		while (first != done) {
4034                 	tx_desc->upper.data = 0;
4035                 	tx_desc->lower.data = 0;
4036                 	tx_desc->buffer_addr = 0;
4037                 	++num_avail; ++cleaned;
4038 
4039 			if (tx_buffer->m_head) {
4040 				ifp->if_opackets++;
4041 				bus_dmamap_sync(adapter->txtag,
4042 				    tx_buffer->map,
4043 				    BUS_DMASYNC_POSTWRITE);
4044 				bus_dmamap_unload(adapter->txtag,
4045 				    tx_buffer->map);
4046 
4047                         	m_freem(tx_buffer->m_head);
4048                         	tx_buffer->m_head = NULL;
4049                 	}
4050 			tx_buffer->next_eop = -1;
4051 
4052 	                if (++first == adapter->num_tx_desc)
4053 				first = 0;
4054 
4055 	                tx_buffer = &adapter->tx_buffer_area[first];
4056 			tx_desc = &adapter->tx_desc_base[first];
4057 		}
4058 		/* See if we can continue to the next packet */
4059 		last = tx_buffer->next_eop;
4060 		if (last != -1) {
4061         		eop_desc = &adapter->tx_desc_base[last];
4062 			/* Get new done point */
4063 			if (++last == adapter->num_tx_desc) last = 0;
4064 			done = last;
4065 		} else
4066 			break;
4067         }
4068         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
4069             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4070 
4071         adapter->next_tx_to_clean = first;
4072 
4073         /*
4074          * If we have enough room, clear IFF_DRV_OACTIVE to
4075          * tell the stack that it is OK to send packets.
4076          * If there are no pending descriptors, clear the timeout.
4077          */
4078         if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
4079                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4080                 if (num_avail == adapter->num_tx_desc) {
4081 			adapter->watchdog_timer = 0;
4082         		adapter->num_tx_desc_avail = num_avail;
4083 			return;
4084 		}
4085         }
4086 
4087 	/* If any descriptors cleaned, reset the watchdog */
4088 	if (cleaned)
4089 		adapter->watchdog_timer = EM_TX_TIMEOUT;
4090         adapter->num_tx_desc_avail = num_avail;
4091 	return;
4092 }
4093 
4094 /*********************************************************************
4095  *
4096  *  When Link is lost sometimes there is work still in the TX ring
4097  *  which will result in a watchdog, rather than allow that do an
4098  *  attempted cleanup and then reinit here. Note that this has been
4099  *  seens mostly with fiber adapters.
4100  *
4101  **********************************************************************/
4102 static void
4103 em_tx_purge(struct adapter *adapter)
4104 {
4105 	if ((!adapter->link_active) && (adapter->watchdog_timer)) {
4106 		EM_TX_LOCK(adapter);
4107 		em_txeof(adapter);
4108 		EM_TX_UNLOCK(adapter);
4109 		if (adapter->watchdog_timer) { /* Still not clean? */
4110 			adapter->watchdog_timer = 0;
4111 			em_init_locked(adapter);
4112 		}
4113 	}
4114 }
4115 
4116 /*********************************************************************
4117  *
4118  *  Get a buffer from system mbuf buffer pool.
4119  *
4120  **********************************************************************/
4121 static int
4122 em_get_buf(struct adapter *adapter, int i)
4123 {
4124 	struct mbuf		*m;
4125 	bus_dma_segment_t	segs[1];
4126 	bus_dmamap_t		map;
4127 	struct em_buffer	*rx_buffer;
4128 	int			error, nsegs;
4129 
4130 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
4131 	if (m == NULL) {
4132 		adapter->mbuf_cluster_failed++;
4133 		return (ENOBUFS);
4134 	}
4135 	m->m_len = m->m_pkthdr.len = MCLBYTES;
4136 
4137 	if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
4138 		m_adj(m, ETHER_ALIGN);
4139 
4140 	/*
4141 	 * Using memory from the mbuf cluster pool, invoke the
4142 	 * bus_dma machinery to arrange the memory mapping.
4143 	 */
4144 	error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
4145 	    adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
4146 	if (error != 0) {
4147 		m_free(m);
4148 		return (error);
4149 	}
4150 
4151 	/* If nsegs is wrong then the stack is corrupt. */
4152 	KASSERT(nsegs == 1, ("Too many segments returned!"));
4153 
4154 	rx_buffer = &adapter->rx_buffer_area[i];
4155 	if (rx_buffer->m_head != NULL)
4156 		bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4157 
4158 	map = rx_buffer->map;
4159 	rx_buffer->map = adapter->rx_sparemap;
4160 	adapter->rx_sparemap = map;
4161 	bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
4162 	rx_buffer->m_head = m;
4163 
4164 	adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
4165 	return (0);
4166 }
4167 
4168 /*********************************************************************
4169  *
4170  *  Allocate memory for rx_buffer structures. Since we use one
4171  *  rx_buffer per received packet, the maximum number of rx_buffer's
4172  *  that we'll need is equal to the number of receive descriptors
4173  *  that we've allocated.
4174  *
4175  **********************************************************************/
4176 static int
4177 em_allocate_receive_structures(struct adapter *adapter)
4178 {
4179 	device_t dev = adapter->dev;
4180 	struct em_buffer *rx_buffer;
4181 	int i, error;
4182 
4183 	adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
4184 	    adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
4185 	if (adapter->rx_buffer_area == NULL) {
4186 		device_printf(dev, "Unable to allocate rx_buffer memory\n");
4187 		return (ENOMEM);
4188 	}
4189 
4190 #if __FreeBSD_version >= 700000
4191 	error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
4192 #else
4193 	error = bus_dma_tag_create(NULL,		 /* parent */
4194 #endif
4195 				1, 0,			/* alignment, bounds */
4196 				BUS_SPACE_MAXADDR,	/* lowaddr */
4197 				BUS_SPACE_MAXADDR,	/* highaddr */
4198 				NULL, NULL,		/* filter, filterarg */
4199 				MCLBYTES,		/* maxsize */
4200 				1,			/* nsegments */
4201 				MCLBYTES,		/* maxsegsize */
4202 				0,			/* flags */
4203 				NULL,			/* lockfunc */
4204 				NULL,			/* lockarg */
4205 				&adapter->rxtag);
4206 	if (error) {
4207 		device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
4208 		    __func__, error);
4209 		goto fail;
4210 	}
4211 
4212 	/* Create the spare map (used by getbuf) */
4213 	error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4214 	     &adapter->rx_sparemap);
4215 	if (error) {
4216 		device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4217 		    __func__, error);
4218 		goto fail;
4219 	}
4220 
4221 	rx_buffer = adapter->rx_buffer_area;
4222 	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4223 		error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4224 		    &rx_buffer->map);
4225 		if (error) {
4226 			device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4227 			    __func__, error);
4228 			goto fail;
4229 		}
4230 	}
4231 
4232 	return (0);
4233 
4234 fail:
4235 	em_free_receive_structures(adapter);
4236 	return (error);
4237 }
4238 
4239 /*********************************************************************
4240  *
4241  *  (Re)initialize receive structures.
4242  *
4243  **********************************************************************/
4244 static int
4245 em_setup_receive_structures(struct adapter *adapter)
4246 {
4247 	struct em_buffer *rx_buffer;
4248 	int i, error;
4249 
4250 	/* Reset descriptor ring */
4251 	bzero(adapter->rx_desc_base,
4252 	    (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
4253 
4254 	/* Free current RX buffers. */
4255 	rx_buffer = adapter->rx_buffer_area;
4256 	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4257 		if (rx_buffer->m_head != NULL) {
4258 			bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4259 			    BUS_DMASYNC_POSTREAD);
4260 			bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4261 			m_freem(rx_buffer->m_head);
4262 			rx_buffer->m_head = NULL;
4263 		}
4264         }
4265 
4266 	/* Allocate new ones. */
4267 	for (i = 0; i < adapter->num_rx_desc; i++) {
4268 		error = em_get_buf(adapter, i);
4269 		if (error)
4270                         return (error);
4271 	}
4272 
4273 	/* Setup our descriptor pointers */
4274 	adapter->next_rx_desc_to_check = 0;
4275 	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4276 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4277 
4278 	return (0);
4279 }
4280 
4281 /*********************************************************************
4282  *
4283  *  Enable receive unit.
4284  *
4285  **********************************************************************/
4286 #define MAX_INTS_PER_SEC	8000
4287 #define DEFAULT_ITR	     1000000000/(MAX_INTS_PER_SEC * 256)
4288 
4289 static void
4290 em_initialize_receive_unit(struct adapter *adapter)
4291 {
4292 	struct ifnet	*ifp = adapter->ifp;
4293 	u64	bus_addr;
4294 	u32	rctl, rxcsum;
4295 
4296 	INIT_DEBUGOUT("em_initialize_receive_unit: begin");
4297 
4298 	/*
4299 	 * Make sure receives are disabled while setting
4300 	 * up the descriptor ring
4301 	 */
4302 	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4303 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
4304 
4305 	if (adapter->hw.mac.type >= e1000_82540) {
4306 		E1000_WRITE_REG(&adapter->hw, E1000_RADV,
4307 		    adapter->rx_abs_int_delay.value);
4308 		/*
4309 		 * Set the interrupt throttling rate. Value is calculated
4310 		 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
4311 		 */
4312 		E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
4313 	}
4314 
4315 	/*
4316 	** When using MSIX interrupts we need to throttle
4317 	** using the EITR register (82574 only)
4318 	*/
4319 	if (adapter->msix)
4320 		for (int i = 0; i < 4; i++)
4321 			E1000_WRITE_REG(&adapter->hw,
4322 			    E1000_EITR_82574(i), DEFAULT_ITR);
4323 
4324 	/* Disable accelerated ackknowledge */
4325 	if (adapter->hw.mac.type == e1000_82574)
4326 		E1000_WRITE_REG(&adapter->hw,
4327 		    E1000_RFCTL, E1000_RFCTL_ACK_DIS);
4328 
4329 	/* Setup the Base and Length of the Rx Descriptor Ring */
4330 	bus_addr = adapter->rxdma.dma_paddr;
4331 	E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
4332 	    adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
4333 	E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
4334 	    (u32)(bus_addr >> 32));
4335 	E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
4336 	    (u32)bus_addr);
4337 
4338 	/* Setup the Receive Control Register */
4339 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4340 	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
4341 		   E1000_RCTL_RDMTS_HALF |
4342 		   (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4343 
4344 	/* Make sure VLAN Filters are off */
4345 	rctl &= ~E1000_RCTL_VFE;
4346 
4347 	if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
4348 		rctl |= E1000_RCTL_SBP;
4349 	else
4350 		rctl &= ~E1000_RCTL_SBP;
4351 
4352 	switch (adapter->rx_buffer_len) {
4353 	default:
4354 	case 2048:
4355 		rctl |= E1000_RCTL_SZ_2048;
4356 		break;
4357 	case 4096:
4358 		rctl |= E1000_RCTL_SZ_4096 |
4359 		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
4360 		break;
4361 	case 8192:
4362 		rctl |= E1000_RCTL_SZ_8192 |
4363 		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
4364 		break;
4365 	case 16384:
4366 		rctl |= E1000_RCTL_SZ_16384 |
4367 		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
4368 		break;
4369 	}
4370 
4371 	if (ifp->if_mtu > ETHERMTU)
4372 		rctl |= E1000_RCTL_LPE;
4373 	else
4374 		rctl &= ~E1000_RCTL_LPE;
4375 
4376 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
4377 	if ((adapter->hw.mac.type >= e1000_82543) &&
4378 	    (ifp->if_capenable & IFCAP_RXCSUM)) {
4379 		rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
4380 		rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
4381 		E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
4382 	}
4383 
4384 	/*
4385 	** XXX TEMPORARY WORKAROUND: on some systems with 82573
4386 	** long latencies are observed, like Lenovo X60. This
4387 	** change eliminates the problem, but since having positive
4388 	** values in RDTR is a known source of problems on other
4389 	** platforms another solution is being sought.
4390 	*/
4391 	if (adapter->hw.mac.type == e1000_82573)
4392 		E1000_WRITE_REG(&adapter->hw, E1000_RDTR, 0x20);
4393 
4394 	/* Enable Receives */
4395 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4396 
4397 	/*
4398 	 * Setup the HW Rx Head and
4399 	 * Tail Descriptor Pointers
4400 	 */
4401 	E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
4402 	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
4403 
4404 	return;
4405 }
4406 
4407 /*********************************************************************
4408  *
4409  *  Free receive related data structures.
4410  *
4411  **********************************************************************/
4412 static void
4413 em_free_receive_structures(struct adapter *adapter)
4414 {
4415 	struct em_buffer *rx_buffer;
4416 	int i;
4417 
4418 	INIT_DEBUGOUT("free_receive_structures: begin");
4419 
4420 	if (adapter->rx_sparemap) {
4421 		bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
4422 		adapter->rx_sparemap = NULL;
4423 	}
4424 
4425 	/* Cleanup any existing buffers */
4426 	if (adapter->rx_buffer_area != NULL) {
4427 		rx_buffer = adapter->rx_buffer_area;
4428 		for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4429 			if (rx_buffer->m_head != NULL) {
4430 				bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4431 				    BUS_DMASYNC_POSTREAD);
4432 				bus_dmamap_unload(adapter->rxtag,
4433 				    rx_buffer->map);
4434 				m_freem(rx_buffer->m_head);
4435 				rx_buffer->m_head = NULL;
4436 			} else if (rx_buffer->map != NULL)
4437 				bus_dmamap_unload(adapter->rxtag,
4438 				    rx_buffer->map);
4439 			if (rx_buffer->map != NULL) {
4440 				bus_dmamap_destroy(adapter->rxtag,
4441 				    rx_buffer->map);
4442 				rx_buffer->map = NULL;
4443 			}
4444 		}
4445 	}
4446 
4447 	if (adapter->rx_buffer_area != NULL) {
4448 		free(adapter->rx_buffer_area, M_DEVBUF);
4449 		adapter->rx_buffer_area = NULL;
4450 	}
4451 
4452 	if (adapter->rxtag != NULL) {
4453 		bus_dma_tag_destroy(adapter->rxtag);
4454 		adapter->rxtag = NULL;
4455 	}
4456 }
4457 
4458 /*********************************************************************
4459  *
4460  *  This routine executes in interrupt context. It replenishes
4461  *  the mbufs in the descriptor and sends data which has been
4462  *  dma'ed into host memory to upper layer.
4463  *
4464  *  We loop at most count times if count is > 0, or until done if
4465  *  count < 0.
4466  *
4467  *********************************************************************/
4468 static int
4469 em_rxeof(struct adapter *adapter, int count, int *rx_npktsp)
4470 {
4471 	struct ifnet	*ifp = adapter->ifp;;
4472 	struct mbuf	*mp;
4473 	u8		status, accept_frame = 0, eop = 0;
4474 	u16 		len, desc_len, prev_len_adj;
4475 	int		i, rx_npkts;
4476 	struct e1000_rx_desc   *current_desc;
4477 
4478 	EM_RX_LOCK(adapter);
4479 	i = adapter->next_rx_desc_to_check;
4480 	rx_npkts = 0;
4481 	current_desc = &adapter->rx_desc_base[i];
4482 	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4483 	    BUS_DMASYNC_POSTREAD);
4484 
4485 	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
4486 		EM_RX_UNLOCK(adapter);
4487 		if (rx_npktsp != NULL)
4488 			*rx_npktsp = rx_npkts;
4489 		return (0);
4490 	}
4491 
4492 	while ((current_desc->status & E1000_RXD_STAT_DD) &&
4493 	    (count != 0) &&
4494 	    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4495 		struct mbuf *m = NULL;
4496 
4497 		mp = adapter->rx_buffer_area[i].m_head;
4498 		/*
4499 		 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
4500 		 * needs to access the last received byte in the mbuf.
4501 		 */
4502 		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
4503 		    BUS_DMASYNC_POSTREAD);
4504 
4505 		accept_frame = 1;
4506 		prev_len_adj = 0;
4507 		desc_len = le16toh(current_desc->length);
4508 		status = current_desc->status;
4509 		if (status & E1000_RXD_STAT_EOP) {
4510 			count--;
4511 			eop = 1;
4512 			if (desc_len < ETHER_CRC_LEN) {
4513 				len = 0;
4514 				prev_len_adj = ETHER_CRC_LEN - desc_len;
4515 			} else
4516 				len = desc_len - ETHER_CRC_LEN;
4517 		} else {
4518 			eop = 0;
4519 			len = desc_len;
4520 		}
4521 
4522 		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
4523 			u8	last_byte;
4524 			u32	pkt_len = desc_len;
4525 
4526 			if (adapter->fmp != NULL)
4527 				pkt_len += adapter->fmp->m_pkthdr.len;
4528 
4529 			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
4530 			if (TBI_ACCEPT(&adapter->hw, status,
4531 			    current_desc->errors, pkt_len, last_byte,
4532 			    adapter->min_frame_size, adapter->max_frame_size)) {
4533 				e1000_tbi_adjust_stats_82543(&adapter->hw,
4534 				    &adapter->stats, pkt_len,
4535 				    adapter->hw.mac.addr,
4536 				    adapter->max_frame_size);
4537 				if (len > 0)
4538 					len--;
4539 			} else
4540 				accept_frame = 0;
4541 		}
4542 
4543 		if (accept_frame) {
4544 			if (em_get_buf(adapter, i) != 0) {
4545 				ifp->if_iqdrops++;
4546 				goto discard;
4547 			}
4548 
4549 			/* Assign correct length to the current fragment */
4550 			mp->m_len = len;
4551 
4552 			if (adapter->fmp == NULL) {
4553 				mp->m_pkthdr.len = len;
4554 				adapter->fmp = mp; /* Store the first mbuf */
4555 				adapter->lmp = mp;
4556 			} else {
4557 				/* Chain mbuf's together */
4558 				mp->m_flags &= ~M_PKTHDR;
4559 				/*
4560 				 * Adjust length of previous mbuf in chain if
4561 				 * we received less than 4 bytes in the last
4562 				 * descriptor.
4563 				 */
4564 				if (prev_len_adj > 0) {
4565 					adapter->lmp->m_len -= prev_len_adj;
4566 					adapter->fmp->m_pkthdr.len -=
4567 					    prev_len_adj;
4568 				}
4569 				adapter->lmp->m_next = mp;
4570 				adapter->lmp = adapter->lmp->m_next;
4571 				adapter->fmp->m_pkthdr.len += len;
4572 			}
4573 
4574 			if (eop) {
4575 				adapter->fmp->m_pkthdr.rcvif = ifp;
4576 				ifp->if_ipackets++;
4577 				em_receive_checksum(adapter, current_desc,
4578 				    adapter->fmp);
4579 #ifndef __NO_STRICT_ALIGNMENT
4580 				if (adapter->max_frame_size >
4581 				    (MCLBYTES - ETHER_ALIGN) &&
4582 				    em_fixup_rx(adapter) != 0)
4583 					goto skip;
4584 #endif
4585 				if (status & E1000_RXD_STAT_VP) {
4586 #if __FreeBSD_version < 700000
4587 					VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
4588 					    (le16toh(current_desc->special) &
4589 					    E1000_RXD_SPC_VLAN_MASK));
4590 #else
4591 					adapter->fmp->m_pkthdr.ether_vtag =
4592 					    (le16toh(current_desc->special) &
4593 					    E1000_RXD_SPC_VLAN_MASK);
4594 					adapter->fmp->m_flags |= M_VLANTAG;
4595 #endif
4596 				}
4597 #ifndef __NO_STRICT_ALIGNMENT
4598 skip:
4599 #endif
4600 				m = adapter->fmp;
4601 				adapter->fmp = NULL;
4602 				adapter->lmp = NULL;
4603 			}
4604 		} else {
4605 			ifp->if_ierrors++;
4606 discard:
4607 			/* Reuse loaded DMA map and just update mbuf chain */
4608 			mp = adapter->rx_buffer_area[i].m_head;
4609 			mp->m_len = mp->m_pkthdr.len = MCLBYTES;
4610 			mp->m_data = mp->m_ext.ext_buf;
4611 			mp->m_next = NULL;
4612 			if (adapter->max_frame_size <=
4613 			    (MCLBYTES - ETHER_ALIGN))
4614 				m_adj(mp, ETHER_ALIGN);
4615 			if (adapter->fmp != NULL) {
4616 				m_freem(adapter->fmp);
4617 				adapter->fmp = NULL;
4618 				adapter->lmp = NULL;
4619 			}
4620 			m = NULL;
4621 		}
4622 
4623 		/* Zero out the receive descriptors status. */
4624 		current_desc->status = 0;
4625 		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4626 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4627 
4628 		/* Advance our pointers to the next descriptor. */
4629 		if (++i == adapter->num_rx_desc)
4630 			i = 0;
4631 		if (m != NULL) {
4632 			adapter->next_rx_desc_to_check = i;
4633 			/* Unlock for call into stack */
4634 			EM_RX_UNLOCK(adapter);
4635 			(*ifp->if_input)(ifp, m);
4636 			EM_RX_LOCK(adapter);
4637 			rx_npkts++;
4638 			i = adapter->next_rx_desc_to_check;
4639 		}
4640 		current_desc = &adapter->rx_desc_base[i];
4641 	}
4642 	adapter->next_rx_desc_to_check = i;
4643 
4644 	/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
4645 	if (--i < 0)
4646 		i = adapter->num_rx_desc - 1;
4647 	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
4648 	EM_RX_UNLOCK(adapter);
4649 	if (rx_npktsp != NULL)
4650 		*rx_npktsp = rx_npkts;
4651 	if (!((current_desc->status) & E1000_RXD_STAT_DD))
4652 		return (0);
4653 
4654 	return (1);
4655 }
4656 
4657 #ifndef __NO_STRICT_ALIGNMENT
4658 /*
4659  * When jumbo frames are enabled we should realign entire payload on
4660  * architecures with strict alignment. This is serious design mistake of 8254x
4661  * as it nullifies DMA operations. 8254x just allows RX buffer size to be
4662  * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
4663  * payload. On architecures without strict alignment restrictions 8254x still
4664  * performs unaligned memory access which would reduce the performance too.
4665  * To avoid copying over an entire frame to align, we allocate a new mbuf and
4666  * copy ethernet header to the new mbuf. The new mbuf is prepended into the
4667  * existing mbuf chain.
4668  *
4669  * Be aware, best performance of the 8254x is achived only when jumbo frame is
4670  * not used at all on architectures with strict alignment.
4671  */
4672 static int
4673 em_fixup_rx(struct adapter *adapter)
4674 {
4675 	struct mbuf *m, *n;
4676 	int error;
4677 
4678 	error = 0;
4679 	m = adapter->fmp;
4680 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
4681 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
4682 		m->m_data += ETHER_HDR_LEN;
4683 	} else {
4684 		MGETHDR(n, M_DONTWAIT, MT_DATA);
4685 		if (n != NULL) {
4686 			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
4687 			m->m_data += ETHER_HDR_LEN;
4688 			m->m_len -= ETHER_HDR_LEN;
4689 			n->m_len = ETHER_HDR_LEN;
4690 			M_MOVE_PKTHDR(n, m);
4691 			n->m_next = m;
4692 			adapter->fmp = n;
4693 		} else {
4694 			adapter->dropped_pkts++;
4695 			m_freem(adapter->fmp);
4696 			adapter->fmp = NULL;
4697 			error = ENOMEM;
4698 		}
4699 	}
4700 
4701 	return (error);
4702 }
4703 #endif
4704 
4705 /*********************************************************************
4706  *
4707  *  Verify that the hardware indicated that the checksum is valid.
4708  *  Inform the stack about the status of checksum so that stack
4709  *  doesn't spend time verifying the checksum.
4710  *
4711  *********************************************************************/
4712 static void
4713 em_receive_checksum(struct adapter *adapter,
4714 	    struct e1000_rx_desc *rx_desc, struct mbuf *mp)
4715 {
4716 	/* 82543 or newer only */
4717 	if ((adapter->hw.mac.type < e1000_82543) ||
4718 	    /* Ignore Checksum bit is set */
4719 	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
4720 		mp->m_pkthdr.csum_flags = 0;
4721 		return;
4722 	}
4723 
4724 	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
4725 		/* Did it pass? */
4726 		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
4727 			/* IP Checksum Good */
4728 			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4729 			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4730 
4731 		} else {
4732 			mp->m_pkthdr.csum_flags = 0;
4733 		}
4734 	}
4735 
4736 	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
4737 		/* Did it pass? */
4738 		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
4739 			mp->m_pkthdr.csum_flags |=
4740 			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4741 			mp->m_pkthdr.csum_data = htons(0xffff);
4742 		}
4743 	}
4744 }
4745 
4746 /*
4747  * This routine is run via an vlan
4748  * config EVENT
4749  */
4750 static void
4751 em_register_vlan(void *unused, struct ifnet *ifp, u16 vtag)
4752 {
4753 	struct adapter	*adapter = ifp->if_softc;
4754 	u32		ctrl, rctl, index, vfta;
4755 
4756 	ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4757 	ctrl |= E1000_CTRL_VME;
4758 	E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4759 
4760 	/* Setup for Hardware Filter */
4761 	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4762 	rctl |= E1000_RCTL_VFE;
4763 	rctl &= ~E1000_RCTL_CFIEN;
4764 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4765 
4766 	/* Make entry in the hardware filter table */
4767 	index = ((vtag >> 5) & 0x7F);
4768 	vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
4769 	vfta |= (1 << (vtag & 0x1F));
4770 	E1000_WRITE_REG_ARRAY(&adapter->hw, E1000_VFTA, index, vfta);
4771 
4772 	/* Update the frame size */
4773 	E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
4774 	    adapter->max_frame_size + VLAN_TAG_SIZE);
4775 
4776 }
4777 
4778 /*
4779  * This routine is run via an vlan
4780  * unconfig EVENT
4781  */
4782 static void
4783 em_unregister_vlan(void *unused, struct ifnet *ifp, u16 vtag)
4784 {
4785 	struct adapter	*adapter = ifp->if_softc;
4786 	u32		index, vfta;
4787 
4788 	/* Remove entry in the hardware filter table */
4789 	index = ((vtag >> 5) & 0x7F);
4790 	vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
4791 	vfta &= ~(1 << (vtag & 0x1F));
4792 	E1000_WRITE_REG_ARRAY(&adapter->hw, E1000_VFTA, index, vfta);
4793 	/* Have all vlans unregistered? */
4794 	if (adapter->ifp->if_vlantrunk == NULL) {
4795 		u32 rctl;
4796 		/* Turn off the filter table */
4797 		rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4798 		rctl &= ~E1000_RCTL_VFE;
4799 		rctl |= E1000_RCTL_CFIEN;
4800 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4801 		/* Reset the frame size */
4802 		E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
4803 		    adapter->max_frame_size);
4804 	}
4805 }
4806 
4807 static void
4808 em_enable_intr(struct adapter *adapter)
4809 {
4810 	struct e1000_hw *hw = &adapter->hw;
4811 	u32 ims_mask = IMS_ENABLE_MASK;
4812 
4813 	if (adapter->msix) {
4814 		E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
4815 		ims_mask |= EM_MSIX_MASK;
4816 	}
4817 	E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
4818 }
4819 
4820 static void
4821 em_disable_intr(struct adapter *adapter)
4822 {
4823 	struct e1000_hw *hw = &adapter->hw;
4824 
4825 	if (adapter->msix)
4826 		E1000_WRITE_REG(hw, EM_EIAC, 0);
4827 	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
4828 }
4829 
4830 /*
4831  * Bit of a misnomer, what this really means is
4832  * to enable OS management of the system... aka
4833  * to disable special hardware management features
4834  */
4835 static void
4836 em_init_manageability(struct adapter *adapter)
4837 {
4838 	/* A shared code workaround */
4839 #define E1000_82542_MANC2H E1000_MANC2H
4840 	if (adapter->has_manage) {
4841 		int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
4842 		int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4843 
4844 		/* disable hardware interception of ARP */
4845 		manc &= ~(E1000_MANC_ARP_EN);
4846 
4847                 /* enable receiving management packets to the host */
4848                 if (adapter->hw.mac.type >= e1000_82571) {
4849 			manc |= E1000_MANC_EN_MNG2HOST;
4850 #define E1000_MNG2HOST_PORT_623 (1 << 5)
4851 #define E1000_MNG2HOST_PORT_664 (1 << 6)
4852 			manc2h |= E1000_MNG2HOST_PORT_623;
4853 			manc2h |= E1000_MNG2HOST_PORT_664;
4854 			E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
4855 		}
4856 
4857 		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4858 	}
4859 }
4860 
4861 /*
4862  * Give control back to hardware management
4863  * controller if there is one.
4864  */
4865 static void
4866 em_release_manageability(struct adapter *adapter)
4867 {
4868 	if (adapter->has_manage) {
4869 		int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4870 
4871 		/* re-enable hardware interception of ARP */
4872 		manc |= E1000_MANC_ARP_EN;
4873 
4874 		if (adapter->hw.mac.type >= e1000_82571)
4875 			manc &= ~E1000_MANC_EN_MNG2HOST;
4876 
4877 		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4878 	}
4879 }
4880 
4881 /*
4882  * em_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4883  * For ASF and Pass Through versions of f/w this means that
4884  * the driver is loaded. For AMT version (only with 82573)
4885  * of the f/w this means that the network i/f is open.
4886  *
4887  */
4888 static void
4889 em_get_hw_control(struct adapter *adapter)
4890 {
4891 	u32 ctrl_ext, swsm;
4892 
4893 	/* Let firmware know the driver has taken over */
4894 	switch (adapter->hw.mac.type) {
4895 	case e1000_82573:
4896 		swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4897 		E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4898 		    swsm | E1000_SWSM_DRV_LOAD);
4899 		break;
4900 	case e1000_82571:
4901 	case e1000_82572:
4902 	case e1000_80003es2lan:
4903 	case e1000_ich8lan:
4904 	case e1000_ich9lan:
4905 	case e1000_ich10lan:
4906 		ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4907 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4908 		    ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4909 		break;
4910 	default:
4911 		break;
4912 	}
4913 }
4914 
4915 /*
4916  * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4917  * For ASF and Pass Through versions of f/w this means that the
4918  * driver is no longer loaded. For AMT version (only with 82573) i
4919  * of the f/w this means that the network i/f is closed.
4920  *
4921  */
4922 static void
4923 em_release_hw_control(struct adapter *adapter)
4924 {
4925 	u32 ctrl_ext, swsm;
4926 
4927 	/* Let firmware taken over control of h/w */
4928 	switch (adapter->hw.mac.type) {
4929 	case e1000_82573:
4930 		swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4931 		E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4932 		    swsm & ~E1000_SWSM_DRV_LOAD);
4933 		break;
4934 	case e1000_82571:
4935 	case e1000_82572:
4936 	case e1000_80003es2lan:
4937 	case e1000_ich8lan:
4938 	case e1000_ich9lan:
4939 	case e1000_ich10lan:
4940 		ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4941 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4942 		    ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4943 		break;
4944 	default:
4945 		break;
4946 
4947 	}
4948 }
4949 
4950 static int
4951 em_is_valid_ether_addr(u8 *addr)
4952 {
4953 	char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4954 
4955 	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4956 		return (FALSE);
4957 	}
4958 
4959 	return (TRUE);
4960 }
4961 
4962 /*
4963  * Enable PCI Wake On Lan capability
4964  */
4965 void
4966 em_enable_wakeup(device_t dev)
4967 {
4968 	u16     cap, status;
4969 	u8      id;
4970 
4971 	/* First find the capabilities pointer*/
4972 	cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
4973 	/* Read the PM Capabilities */
4974 	id = pci_read_config(dev, cap, 1);
4975 	if (id != PCIY_PMG)     /* Something wrong */
4976 		return;
4977 	/* OK, we have the power capabilities, so
4978 	   now get the status register */
4979 	cap += PCIR_POWER_STATUS;
4980 	status = pci_read_config(dev, cap, 2);
4981 	status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4982 	pci_write_config(dev, cap, status, 2);
4983 	return;
4984 }
4985 
4986 
4987 /*********************************************************************
4988 * 82544 Coexistence issue workaround.
4989 *    There are 2 issues.
4990 *       1. Transmit Hang issue.
4991 *    To detect this issue, following equation can be used...
4992 *	  SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4993 *	  If SUM[3:0] is in between 1 to 4, we will have this issue.
4994 *
4995 *       2. DAC issue.
4996 *    To detect this issue, following equation can be used...
4997 *	  SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4998 *	  If SUM[3:0] is in between 9 to c, we will have this issue.
4999 *
5000 *
5001 *    WORKAROUND:
5002 *	  Make sure we do not have ending address
5003 *	  as 1,2,3,4(Hang) or 9,a,b,c (DAC)
5004 *
5005 *************************************************************************/
5006 static u32
5007 em_fill_descriptors (bus_addr_t address, u32 length,
5008 		PDESC_ARRAY desc_array)
5009 {
5010 	u32 safe_terminator;
5011 
5012 	/* Since issue is sensitive to length and address.*/
5013 	/* Let us first check the address...*/
5014 	if (length <= 4) {
5015 		desc_array->descriptor[0].address = address;
5016 		desc_array->descriptor[0].length = length;
5017 		desc_array->elements = 1;
5018 		return (desc_array->elements);
5019 	}
5020 	safe_terminator = (u32)((((u32)address & 0x7) +
5021 	    (length & 0xF)) & 0xF);
5022 	/* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
5023 	if (safe_terminator == 0   ||
5024 	(safe_terminator > 4   &&
5025 	safe_terminator < 9)   ||
5026 	(safe_terminator > 0xC &&
5027 	safe_terminator <= 0xF)) {
5028 		desc_array->descriptor[0].address = address;
5029 		desc_array->descriptor[0].length = length;
5030 		desc_array->elements = 1;
5031 		return (desc_array->elements);
5032 	}
5033 
5034 	desc_array->descriptor[0].address = address;
5035 	desc_array->descriptor[0].length = length - 4;
5036 	desc_array->descriptor[1].address = address + (length - 4);
5037 	desc_array->descriptor[1].length = 4;
5038 	desc_array->elements = 2;
5039 	return (desc_array->elements);
5040 }
5041 
5042 /**********************************************************************
5043  *
5044  *  Update the board statistics counters.
5045  *
5046  **********************************************************************/
5047 static void
5048 em_update_stats_counters(struct adapter *adapter)
5049 {
5050 	struct ifnet   *ifp;
5051 
5052 	if(adapter->hw.phy.media_type == e1000_media_type_copper ||
5053 	   (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
5054 		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
5055 		adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
5056 	}
5057 	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
5058 	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
5059 	adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
5060 	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
5061 
5062 	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
5063 	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
5064 	adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
5065 	adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
5066 	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
5067 	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
5068 	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
5069 	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
5070 	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
5071 	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
5072 	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
5073 	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
5074 	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
5075 	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
5076 	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
5077 	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
5078 	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
5079 	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
5080 	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
5081 	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
5082 
5083 	/* For the 64-bit byte counters the low dword must be read first. */
5084 	/* Both registers clear on the read of the high dword */
5085 
5086 	adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH);
5087 	adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH);
5088 
5089 	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
5090 	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
5091 	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
5092 	adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
5093 	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
5094 
5095 	adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
5096 	adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
5097 
5098 	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
5099 	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
5100 	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
5101 	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
5102 	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
5103 	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
5104 	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
5105 	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
5106 	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
5107 	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
5108 
5109 	if (adapter->hw.mac.type >= e1000_82543) {
5110 		adapter->stats.algnerrc +=
5111 		E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
5112 		adapter->stats.rxerrc +=
5113 		E1000_READ_REG(&adapter->hw, E1000_RXERRC);
5114 		adapter->stats.tncrs +=
5115 		E1000_READ_REG(&adapter->hw, E1000_TNCRS);
5116 		adapter->stats.cexterr +=
5117 		E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
5118 		adapter->stats.tsctc +=
5119 		E1000_READ_REG(&adapter->hw, E1000_TSCTC);
5120 		adapter->stats.tsctfc +=
5121 		E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
5122 	}
5123 	ifp = adapter->ifp;
5124 
5125 	ifp->if_collisions = adapter->stats.colc;
5126 
5127 	/* Rx Errors */
5128 	ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
5129 	    adapter->stats.crcerrs + adapter->stats.algnerrc +
5130 	    adapter->stats.ruc + adapter->stats.roc +
5131 	    adapter->stats.mpc + adapter->stats.cexterr;
5132 
5133 	/* Tx Errors */
5134 	ifp->if_oerrors = adapter->stats.ecol +
5135 	    adapter->stats.latecol + adapter->watchdog_events;
5136 }
5137 
5138 
5139 /**********************************************************************
5140  *
5141  *  This routine is called only when em_display_debug_stats is enabled.
5142  *  This routine provides a way to take a look at important statistics
5143  *  maintained by the driver and hardware.
5144  *
5145  **********************************************************************/
5146 static void
5147 em_print_debug_info(struct adapter *adapter)
5148 {
5149 	device_t dev = adapter->dev;
5150 	u8 *hw_addr = adapter->hw.hw_addr;
5151 
5152 	device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
5153 	device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
5154 	    E1000_READ_REG(&adapter->hw, E1000_CTRL),
5155 	    E1000_READ_REG(&adapter->hw, E1000_RCTL));
5156 	device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
5157 	    ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\
5158 	    (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) );
5159 	device_printf(dev, "Flow control watermarks high = %d low = %d\n",
5160 	    adapter->hw.fc.high_water,
5161 	    adapter->hw.fc.low_water);
5162 	device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
5163 	    E1000_READ_REG(&adapter->hw, E1000_TIDV),
5164 	    E1000_READ_REG(&adapter->hw, E1000_TADV));
5165 	device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
5166 	    E1000_READ_REG(&adapter->hw, E1000_RDTR),
5167 	    E1000_READ_REG(&adapter->hw, E1000_RADV));
5168 	device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
5169 	    (long long)adapter->tx_fifo_wrk_cnt,
5170 	    (long long)adapter->tx_fifo_reset_cnt);
5171 	device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
5172 	    E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
5173 	    E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
5174 	device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
5175 	    E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
5176 	    E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
5177 	device_printf(dev, "Num Tx descriptors avail = %d\n",
5178 	    adapter->num_tx_desc_avail);
5179 	device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
5180 	    adapter->no_tx_desc_avail1);
5181 	device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
5182 	    adapter->no_tx_desc_avail2);
5183 	device_printf(dev, "Std mbuf failed = %ld\n",
5184 	    adapter->mbuf_alloc_failed);
5185 	device_printf(dev, "Std mbuf cluster failed = %ld\n",
5186 	    adapter->mbuf_cluster_failed);
5187 	device_printf(dev, "Driver dropped packets = %ld\n",
5188 	    adapter->dropped_pkts);
5189 	device_printf(dev, "Driver tx dma failure in encap = %ld\n",
5190 		adapter->no_tx_dma_setup);
5191 }
5192 
5193 static void
5194 em_print_hw_stats(struct adapter *adapter)
5195 {
5196 	device_t dev = adapter->dev;
5197 
5198 	device_printf(dev, "Excessive collisions = %lld\n",
5199 	    (long long)adapter->stats.ecol);
5200 #if	(DEBUG_HW > 0)  /* Dont output these errors normally */
5201 	device_printf(dev, "Symbol errors = %lld\n",
5202 	    (long long)adapter->stats.symerrs);
5203 #endif
5204 	device_printf(dev, "Sequence errors = %lld\n",
5205 	    (long long)adapter->stats.sec);
5206 	device_printf(dev, "Defer count = %lld\n",
5207 	    (long long)adapter->stats.dc);
5208 	device_printf(dev, "Missed Packets = %lld\n",
5209 	    (long long)adapter->stats.mpc);
5210 	device_printf(dev, "Receive No Buffers = %lld\n",
5211 	    (long long)adapter->stats.rnbc);
5212 	/* RLEC is inaccurate on some hardware, calculate our own. */
5213 	device_printf(dev, "Receive Length Errors = %lld\n",
5214 	    ((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
5215 	device_printf(dev, "Receive errors = %lld\n",
5216 	    (long long)adapter->stats.rxerrc);
5217 	device_printf(dev, "Crc errors = %lld\n",
5218 	    (long long)adapter->stats.crcerrs);
5219 	device_printf(dev, "Alignment errors = %lld\n",
5220 	    (long long)adapter->stats.algnerrc);
5221 	device_printf(dev, "Collision/Carrier extension errors = %lld\n",
5222 	    (long long)adapter->stats.cexterr);
5223 	device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
5224 	device_printf(dev, "watchdog timeouts = %ld\n",
5225 	    adapter->watchdog_events);
5226 	device_printf(dev, "RX MSIX IRQ = %ld TX MSIX IRQ = %ld"
5227 	    " LINK MSIX IRQ = %ld\n", adapter->rx_irq,
5228 	    adapter->tx_irq , adapter->link_irq);
5229 	device_printf(dev, "XON Rcvd = %lld\n",
5230 	    (long long)adapter->stats.xonrxc);
5231 	device_printf(dev, "XON Xmtd = %lld\n",
5232 	    (long long)adapter->stats.xontxc);
5233 	device_printf(dev, "XOFF Rcvd = %lld\n",
5234 	    (long long)adapter->stats.xoffrxc);
5235 	device_printf(dev, "XOFF Xmtd = %lld\n",
5236 	    (long long)adapter->stats.xofftxc);
5237 	device_printf(dev, "Good Packets Rcvd = %lld\n",
5238 	    (long long)adapter->stats.gprc);
5239 	device_printf(dev, "Good Packets Xmtd = %lld\n",
5240 	    (long long)adapter->stats.gptc);
5241 	device_printf(dev, "TSO Contexts Xmtd = %lld\n",
5242 	    (long long)adapter->stats.tsctc);
5243 	device_printf(dev, "TSO Contexts Failed = %lld\n",
5244 	    (long long)adapter->stats.tsctfc);
5245 }
5246 
5247 /**********************************************************************
5248  *
5249  *  This routine provides a way to dump out the adapter eeprom,
5250  *  often a useful debug/service tool. This only dumps the first
5251  *  32 words, stuff that matters is in that extent.
5252  *
5253  **********************************************************************/
5254 static void
5255 em_print_nvm_info(struct adapter *adapter)
5256 {
5257 	u16	eeprom_data;
5258 	int	i, j, row = 0;
5259 
5260 	/* Its a bit crude, but it gets the job done */
5261 	printf("\nInterface EEPROM Dump:\n");
5262 	printf("Offset\n0x0000  ");
5263 	for (i = 0, j = 0; i < 32; i++, j++) {
5264 		if (j == 8) { /* Make the offset block */
5265 			j = 0; ++row;
5266 			printf("\n0x00%x0  ",row);
5267 		}
5268 		e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
5269 		printf("%04x ", eeprom_data);
5270 	}
5271 	printf("\n");
5272 }
5273 
5274 static int
5275 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5276 {
5277 	struct adapter *adapter;
5278 	int error;
5279 	int result;
5280 
5281 	result = -1;
5282 	error = sysctl_handle_int(oidp, &result, 0, req);
5283 
5284 	if (error || !req->newptr)
5285 		return (error);
5286 
5287 	if (result == 1) {
5288 		adapter = (struct adapter *)arg1;
5289 		em_print_debug_info(adapter);
5290 	}
5291 	/*
5292 	 * This value will cause a hex dump of the
5293 	 * first 32 16-bit words of the EEPROM to
5294 	 * the screen.
5295 	 */
5296 	if (result == 2) {
5297 		adapter = (struct adapter *)arg1;
5298 		em_print_nvm_info(adapter);
5299         }
5300 
5301 	return (error);
5302 }
5303 
5304 
5305 static int
5306 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
5307 {
5308 	struct adapter *adapter;
5309 	int error;
5310 	int result;
5311 
5312 	result = -1;
5313 	error = sysctl_handle_int(oidp, &result, 0, req);
5314 
5315 	if (error || !req->newptr)
5316 		return (error);
5317 
5318 	if (result == 1) {
5319 		adapter = (struct adapter *)arg1;
5320 		em_print_hw_stats(adapter);
5321 	}
5322 
5323 	return (error);
5324 }
5325 
5326 static int
5327 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
5328 {
5329 	struct em_int_delay_info *info;
5330 	struct adapter *adapter;
5331 	u32 regval;
5332 	int error;
5333 	int usecs;
5334 	int ticks;
5335 
5336 	info = (struct em_int_delay_info *)arg1;
5337 	usecs = info->value;
5338 	error = sysctl_handle_int(oidp, &usecs, 0, req);
5339 	if (error != 0 || req->newptr == NULL)
5340 		return (error);
5341 	if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
5342 		return (EINVAL);
5343 	info->value = usecs;
5344 	ticks = EM_USECS_TO_TICKS(usecs);
5345 
5346 	adapter = info->adapter;
5347 
5348 	EM_CORE_LOCK(adapter);
5349 	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
5350 	regval = (regval & ~0xffff) | (ticks & 0xffff);
5351 	/* Handle a few special cases. */
5352 	switch (info->offset) {
5353 	case E1000_RDTR:
5354 		break;
5355 	case E1000_TIDV:
5356 		if (ticks == 0) {
5357 			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
5358 			/* Don't write 0 into the TIDV register. */
5359 			regval++;
5360 		} else
5361 			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
5362 		break;
5363 	}
5364 	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
5365 	EM_CORE_UNLOCK(adapter);
5366 	return (0);
5367 }
5368 
5369 static void
5370 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
5371 	const char *description, struct em_int_delay_info *info,
5372 	int offset, int value)
5373 {
5374 	info->adapter = adapter;
5375 	info->offset = offset;
5376 	info->value = value;
5377 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
5378 	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5379 	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
5380 	    info, 0, em_sysctl_int_delay, "I", description);
5381 }
5382 
5383 #ifndef EM_LEGACY_IRQ
5384 static void
5385 em_add_rx_process_limit(struct adapter *adapter, const char *name,
5386 	const char *description, int *limit, int value)
5387 {
5388 	*limit = value;
5389 	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
5390 	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5391 	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
5392 }
5393 #endif
5394 
5395 #ifdef EM_TIMESYNC
5396 /*
5397  * Initialize the Time Sync Feature
5398  */
5399 static int
5400 em_tsync_init(struct adapter *adapter)
5401 {
5402 	device_t	dev = adapter->dev;
5403 	u32		tx_ctl, rx_ctl;
5404 
5405 
5406 	E1000_WRITE_REG(&adapter->hw, E1000_TIMINCA, (1<<24) |
5407 	    20833/PICOSECS_PER_TICK);
5408 
5409 	adapter->last_stamp =  E1000_READ_REG(&adapter->hw, E1000_SYSTIML);
5410 	adapter->last_stamp |= (u64)E1000_READ_REG(&adapter->hw,
5411 	    E1000_SYSTIMH) << 32ULL;
5412 
5413 	/* Enable the TX side */
5414 	tx_ctl =  E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5415 	tx_ctl |= 0x10;
5416 	E1000_WRITE_REG(&adapter->hw, E1000_TSYNCTXCTL, tx_ctl);
5417 	E1000_WRITE_FLUSH(&adapter->hw);
5418 
5419 	tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5420 	if ((tx_ctl & 0x10) == 0) {
5421      		device_printf(dev, "Failed to enable TX timestamping\n");
5422 		return (ENXIO);
5423 	}
5424 
5425 	/* Enable RX */
5426 	rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5427 	rx_ctl |= 0x10; /* Enable the feature */
5428 	rx_ctl |= 0x0a; /* This value turns on Ver 1 and 2 */
5429 	E1000_WRITE_REG(&adapter->hw, E1000_TSYNCRXCTL, rx_ctl);
5430 
5431 	/*
5432 	 * Ethertype Stamping (Ethertype = 0x88F7)
5433 	 */
5434 	E1000_WRITE_REG(&adapter->hw, E1000_RXMTRL, htonl(0x440088f7));
5435 
5436 	/*
5437 	 * Source Port Queue Filter Setup:
5438 	 *  this is for UDP port filtering
5439 	 */
5440 	E1000_WRITE_REG(&adapter->hw, E1000_RXUDP, htons(TSYNC_PORT));
5441 	/* Protocol = UDP, enable Timestamp, and filter on source/protocol */
5442 
5443 	E1000_WRITE_FLUSH(&adapter->hw);
5444 
5445 	rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5446 	if ((rx_ctl & 0x10) == 0) {
5447      		device_printf(dev, "Failed to enable RX timestamping\n");
5448 		return (ENXIO);
5449 	}
5450 
5451 	device_printf(dev, "IEEE 1588 Precision Time Protocol enabled\n");
5452 
5453 	return (0);
5454 }
5455 
5456 /*
5457  * Disable the Time Sync Feature
5458  */
5459 static void
5460 em_tsync_disable(struct adapter *adapter)
5461 {
5462 	u32		tx_ctl, rx_ctl;
5463 
5464 	tx_ctl =  E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5465 	tx_ctl &= ~0x10;
5466 	E1000_WRITE_REG(&adapter->hw, E1000_TSYNCTXCTL, tx_ctl);
5467 	E1000_WRITE_FLUSH(&adapter->hw);
5468 
5469 	/* Invalidate TX Timestamp */
5470 	E1000_READ_REG(&adapter->hw, E1000_TXSTMPH);
5471 
5472 	tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5473 	if (tx_ctl & 0x10)
5474      		HW_DEBUGOUT("Failed to disable TX timestamping\n");
5475 
5476 	rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5477 	rx_ctl &= ~0x10;
5478 
5479 	E1000_WRITE_REG(&adapter->hw, E1000_TSYNCRXCTL, rx_ctl);
5480 	E1000_WRITE_FLUSH(&adapter->hw);
5481 
5482 	/* Invalidate RX Timestamp */
5483 	E1000_READ_REG(&adapter->hw, E1000_RXSATRH);
5484 
5485 	rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5486 	if (rx_ctl & 0x10)
5487 		HW_DEBUGOUT("Failed to disable RX timestamping\n");
5488 
5489 	return;
5490 }
5491 #endif /* EM_TIMESYNC */
5492