xref: /freebsd/sys/dev/e1000/if_em.c (revision c0020399a650364d0134f79f3fa319f84064372d)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2009, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
37 #include "opt_inet.h"
38 #endif
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/buf_ring.h>
43 #include <sys/bus.h>
44 #include <sys/endian.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/module.h>
50 #include <sys/rman.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/sysctl.h>
54 #include <sys/taskqueue.h>
55 #include <sys/eventhandler.h>
56 #ifdef EM_TIMESYNC
57 #include <sys/ioccom.h>
58 #include <sys/time.h>
59 #endif
60 #include <machine/bus.h>
61 #include <machine/resource.h>
62 
63 #include <net/bpf.h>
64 #include <net/ethernet.h>
65 #include <net/if.h>
66 #include <net/if_arp.h>
67 #include <net/if_dl.h>
68 #include <net/if_media.h>
69 
70 #include <net/if_types.h>
71 #include <net/if_vlan_var.h>
72 
73 #include <netinet/in_systm.h>
74 #include <netinet/in.h>
75 #include <netinet/if_ether.h>
76 #include <netinet/ip.h>
77 #include <netinet/ip6.h>
78 #include <netinet/tcp.h>
79 #include <netinet/udp.h>
80 
81 #include <machine/in_cksum.h>
82 #include <dev/pci/pcivar.h>
83 #include <dev/pci/pcireg.h>
84 
85 #include "e1000_api.h"
86 #include "e1000_82571.h"
87 #include "if_em.h"
88 
89 /*********************************************************************
90  *  Set this to one to display debug statistics
91  *********************************************************************/
92 int	em_display_debug_stats = 0;
93 
94 /*********************************************************************
95  *  Driver version:
96  *********************************************************************/
97 char em_driver_version[] = "6.9.9";
98 
99 
100 /*********************************************************************
101  *  PCI Device ID Table
102  *
103  *  Used by probe to select devices to load on
104  *  Last field stores an index into e1000_strings
105  *  Last entry must be all 0s
106  *
107  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
108  *********************************************************************/
109 
110 static em_vendor_info_t em_vendor_info_array[] =
111 {
112 	/* Intel(R) PRO/1000 Network Connection */
113 	{ 0x8086, E1000_DEV_ID_82540EM,		PCI_ANY_ID, PCI_ANY_ID, 0},
114 	{ 0x8086, E1000_DEV_ID_82540EM_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
115 	{ 0x8086, E1000_DEV_ID_82540EP,		PCI_ANY_ID, PCI_ANY_ID, 0},
116 	{ 0x8086, E1000_DEV_ID_82540EP_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
117 	{ 0x8086, E1000_DEV_ID_82540EP_LP,	PCI_ANY_ID, PCI_ANY_ID, 0},
118 
119 	{ 0x8086, E1000_DEV_ID_82541EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
120 	{ 0x8086, E1000_DEV_ID_82541ER,		PCI_ANY_ID, PCI_ANY_ID, 0},
121 	{ 0x8086, E1000_DEV_ID_82541ER_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
122 	{ 0x8086, E1000_DEV_ID_82541EI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
123 	{ 0x8086, E1000_DEV_ID_82541GI,		PCI_ANY_ID, PCI_ANY_ID, 0},
124 	{ 0x8086, E1000_DEV_ID_82541GI_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
125 	{ 0x8086, E1000_DEV_ID_82541GI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
126 
127 	{ 0x8086, E1000_DEV_ID_82542,		PCI_ANY_ID, PCI_ANY_ID, 0},
128 
129 	{ 0x8086, E1000_DEV_ID_82543GC_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
130 	{ 0x8086, E1000_DEV_ID_82543GC_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
131 
132 	{ 0x8086, E1000_DEV_ID_82544EI_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
133 	{ 0x8086, E1000_DEV_ID_82544EI_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
134 	{ 0x8086, E1000_DEV_ID_82544GC_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
135 	{ 0x8086, E1000_DEV_ID_82544GC_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
136 
137 	{ 0x8086, E1000_DEV_ID_82545EM_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
138 	{ 0x8086, E1000_DEV_ID_82545EM_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
139 	{ 0x8086, E1000_DEV_ID_82545GM_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
140 	{ 0x8086, E1000_DEV_ID_82545GM_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
141 	{ 0x8086, E1000_DEV_ID_82545GM_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
142 
143 	{ 0x8086, E1000_DEV_ID_82546EB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
144 	{ 0x8086, E1000_DEV_ID_82546EB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
145 	{ 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
146 	{ 0x8086, E1000_DEV_ID_82546GB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
147 	{ 0x8086, E1000_DEV_ID_82546GB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
148 	{ 0x8086, E1000_DEV_ID_82546GB_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
149 	{ 0x8086, E1000_DEV_ID_82546GB_PCIE,	PCI_ANY_ID, PCI_ANY_ID, 0},
150 	{ 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
151 	{ 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
152 						PCI_ANY_ID, PCI_ANY_ID, 0},
153 
154 	{ 0x8086, E1000_DEV_ID_82547EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
155 	{ 0x8086, E1000_DEV_ID_82547EI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
156 	{ 0x8086, E1000_DEV_ID_82547GI,		PCI_ANY_ID, PCI_ANY_ID, 0},
157 
158 	{ 0x8086, E1000_DEV_ID_82571EB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
159 	{ 0x8086, E1000_DEV_ID_82571EB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
160 	{ 0x8086, E1000_DEV_ID_82571EB_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
161 	{ 0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL,
162 						PCI_ANY_ID, PCI_ANY_ID, 0},
163 	{ 0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD,
164 						PCI_ANY_ID, PCI_ANY_ID, 0},
165 	{ 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
166 						PCI_ANY_ID, PCI_ANY_ID, 0},
167 	{ 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP,
168 						PCI_ANY_ID, PCI_ANY_ID, 0},
169 	{ 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER,
170 						PCI_ANY_ID, PCI_ANY_ID, 0},
171 	{ 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER,
172 						PCI_ANY_ID, PCI_ANY_ID, 0},
173 	{ 0x8086, E1000_DEV_ID_82572EI_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
174 	{ 0x8086, E1000_DEV_ID_82572EI_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
175 	{ 0x8086, E1000_DEV_ID_82572EI_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
176 	{ 0x8086, E1000_DEV_ID_82572EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
177 
178 	{ 0x8086, E1000_DEV_ID_82573E,		PCI_ANY_ID, PCI_ANY_ID, 0},
179 	{ 0x8086, E1000_DEV_ID_82573E_IAMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
180 	{ 0x8086, E1000_DEV_ID_82573L,		PCI_ANY_ID, PCI_ANY_ID, 0},
181 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
182 						PCI_ANY_ID, PCI_ANY_ID, 0},
183 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
184 						PCI_ANY_ID, PCI_ANY_ID, 0},
185 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
186 						PCI_ANY_ID, PCI_ANY_ID, 0},
187 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
188 						PCI_ANY_ID, PCI_ANY_ID, 0},
189 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
190 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
191 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_C,	PCI_ANY_ID, PCI_ANY_ID, 0},
192 	{ 0x8086, E1000_DEV_ID_ICH8_IFE,	PCI_ANY_ID, PCI_ANY_ID, 0},
193 	{ 0x8086, E1000_DEV_ID_ICH8_IFE_GT,	PCI_ANY_ID, PCI_ANY_ID, 0},
194 	{ 0x8086, E1000_DEV_ID_ICH8_IFE_G,	PCI_ANY_ID, PCI_ANY_ID, 0},
195 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_M,	PCI_ANY_ID, PCI_ANY_ID, 0},
196 
197 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
198 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
199 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_C,	PCI_ANY_ID, PCI_ANY_ID, 0},
200 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_M,	PCI_ANY_ID, PCI_ANY_ID, 0},
201 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_M_V,	PCI_ANY_ID, PCI_ANY_ID, 0},
202 	{ 0x8086, E1000_DEV_ID_ICH9_IFE,	PCI_ANY_ID, PCI_ANY_ID, 0},
203 	{ 0x8086, E1000_DEV_ID_ICH9_IFE_GT,	PCI_ANY_ID, PCI_ANY_ID, 0},
204 	{ 0x8086, E1000_DEV_ID_ICH9_IFE_G,	PCI_ANY_ID, PCI_ANY_ID, 0},
205 	{ 0x8086, E1000_DEV_ID_ICH9_BM,		PCI_ANY_ID, PCI_ANY_ID, 0},
206 	{ 0x8086, E1000_DEV_ID_82574L,		PCI_ANY_ID, PCI_ANY_ID, 0},
207 	{ 0x8086, E1000_DEV_ID_ICH10_R_BM_LM,	PCI_ANY_ID, PCI_ANY_ID, 0},
208 	{ 0x8086, E1000_DEV_ID_ICH10_R_BM_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
209 	{ 0x8086, E1000_DEV_ID_ICH10_R_BM_V,	PCI_ANY_ID, PCI_ANY_ID, 0},
210 	{ 0x8086, E1000_DEV_ID_ICH10_D_BM_LM,	PCI_ANY_ID, PCI_ANY_ID, 0},
211 	{ 0x8086, E1000_DEV_ID_ICH10_D_BM_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
212 	/* required last entry */
213 	{ 0, 0, 0, 0, 0}
214 };
215 
216 /*********************************************************************
217  *  Table of branding strings for all supported NICs.
218  *********************************************************************/
219 
220 static char *em_strings[] = {
221 	"Intel(R) PRO/1000 Network Connection"
222 };
223 
224 /*********************************************************************
225  *  Function prototypes
226  *********************************************************************/
227 static int	em_probe(device_t);
228 static int	em_attach(device_t);
229 static int	em_detach(device_t);
230 static int	em_shutdown(device_t);
231 static int	em_suspend(device_t);
232 static int	em_resume(device_t);
233 static void	em_start(struct ifnet *);
234 static void	em_start_locked(struct ifnet *ifp);
235 static int	em_ioctl(struct ifnet *, u_long, caddr_t);
236 static void	em_watchdog(struct adapter *);
237 static void	em_init(void *);
238 static void	em_init_locked(struct adapter *);
239 static void	em_stop(void *);
240 static void	em_media_status(struct ifnet *, struct ifmediareq *);
241 static int	em_media_change(struct ifnet *);
242 static void	em_identify_hardware(struct adapter *);
243 static int	em_allocate_pci_resources(struct adapter *);
244 static int	em_allocate_legacy(struct adapter *adapter);
245 static int	em_allocate_msix(struct adapter *adapter);
246 static int	em_setup_msix(struct adapter *);
247 static void	em_free_pci_resources(struct adapter *);
248 static void	em_local_timer(void *);
249 static int	em_hardware_init(struct adapter *);
250 static void	em_setup_interface(device_t, struct adapter *);
251 static void	em_setup_transmit_structures(struct adapter *);
252 static void	em_initialize_transmit_unit(struct adapter *);
253 static int	em_setup_receive_structures(struct adapter *);
254 static void	em_initialize_receive_unit(struct adapter *);
255 static void	em_enable_intr(struct adapter *);
256 static void	em_disable_intr(struct adapter *);
257 static void	em_free_transmit_structures(struct adapter *);
258 static void	em_free_receive_structures(struct adapter *);
259 static void	em_update_stats_counters(struct adapter *);
260 static void	em_txeof(struct adapter *);
261 static void	em_tx_purge(struct adapter *);
262 static int	em_allocate_receive_structures(struct adapter *);
263 static int	em_allocate_transmit_structures(struct adapter *);
264 static int	em_rxeof(struct adapter *, int);
265 #ifndef __NO_STRICT_ALIGNMENT
266 static int	em_fixup_rx(struct adapter *);
267 #endif
268 static void	em_receive_checksum(struct adapter *, struct e1000_rx_desc *,
269 		    struct mbuf *);
270 static void	em_transmit_checksum_setup(struct adapter *, struct mbuf *,
271 		    u32 *, u32 *);
272 #if __FreeBSD_version >= 700000
273 static bool	em_tso_setup(struct adapter *, struct mbuf *,
274 		    u32 *, u32 *);
275 #endif /* FreeBSD_version >= 700000 */
276 static void	em_set_promisc(struct adapter *);
277 static void	em_disable_promisc(struct adapter *);
278 static void	em_set_multi(struct adapter *);
279 static void	em_print_hw_stats(struct adapter *);
280 static void	em_update_link_status(struct adapter *);
281 static int	em_get_buf(struct adapter *, int);
282 static void	em_register_vlan(void *, struct ifnet *, u16);
283 static void	em_unregister_vlan(void *, struct ifnet *, u16);
284 static int	em_xmit(struct adapter *, struct mbuf **);
285 static void	em_smartspeed(struct adapter *);
286 static int	em_82547_fifo_workaround(struct adapter *, int);
287 static void	em_82547_update_fifo_head(struct adapter *, int);
288 static int	em_82547_tx_fifo_reset(struct adapter *);
289 static void	em_82547_move_tail(void *);
290 static int	em_dma_malloc(struct adapter *, bus_size_t,
291 		    struct em_dma_alloc *, int);
292 static void	em_dma_free(struct adapter *, struct em_dma_alloc *);
293 static void	em_print_debug_info(struct adapter *);
294 static void	em_print_nvm_info(struct adapter *);
295 static int 	em_is_valid_ether_addr(u8 *);
296 static int	em_sysctl_stats(SYSCTL_HANDLER_ARGS);
297 static int	em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
298 static u32	em_fill_descriptors (bus_addr_t address, u32 length,
299 		    PDESC_ARRAY desc_array);
300 static int	em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
301 static void	em_add_int_delay_sysctl(struct adapter *, const char *,
302 		    const char *, struct em_int_delay_info *, int, int);
303 /* Management and WOL Support */
304 static void	em_init_manageability(struct adapter *);
305 static void	em_release_manageability(struct adapter *);
306 static void     em_get_hw_control(struct adapter *);
307 static void     em_release_hw_control(struct adapter *);
308 static void     em_enable_wakeup(device_t);
309 
310 #ifdef EM_TIMESYNC
311 /* Precision Time sync support */
312 static int	em_tsync_init(struct adapter *);
313 static void	em_tsync_disable(struct adapter *);
314 #endif
315 
316 #ifdef EM_LEGACY_IRQ
317 static void	em_intr(void *);
318 #else /* FAST IRQ */
319 #if __FreeBSD_version < 700000
320 static void	em_irq_fast(void *);
321 #else
322 static int	em_irq_fast(void *);
323 #endif
324 
325 /* MSIX handlers */
326 static void	em_msix_tx(void *);
327 static void	em_msix_rx(void *);
328 static void	em_msix_link(void *);
329 static void	em_handle_rx(void *context, int pending);
330 static void	em_handle_tx(void *context, int pending);
331 
332 static void	em_handle_rxtx(void *context, int pending);
333 static void	em_handle_link(void *context, int pending);
334 static void	em_add_rx_process_limit(struct adapter *, const char *,
335 		    const char *, int *, int);
336 #endif /* ~EM_LEGACY_IRQ */
337 
338 #ifdef DEVICE_POLLING
339 static poll_handler_t em_poll;
340 #endif /* POLLING */
341 
342 /*********************************************************************
343  *  FreeBSD Device Interface Entry Points
344  *********************************************************************/
345 
346 static device_method_t em_methods[] = {
347 	/* Device interface */
348 	DEVMETHOD(device_probe, em_probe),
349 	DEVMETHOD(device_attach, em_attach),
350 	DEVMETHOD(device_detach, em_detach),
351 	DEVMETHOD(device_shutdown, em_shutdown),
352 	DEVMETHOD(device_suspend, em_suspend),
353 	DEVMETHOD(device_resume, em_resume),
354 	{0, 0}
355 };
356 
357 static driver_t em_driver = {
358 	"em", em_methods, sizeof(struct adapter),
359 };
360 
361 static devclass_t em_devclass;
362 DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
363 MODULE_DEPEND(em, pci, 1, 1, 1);
364 MODULE_DEPEND(em, ether, 1, 1, 1);
365 
366 /*********************************************************************
367  *  Tunable default values.
368  *********************************************************************/
369 
370 #define EM_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
371 #define EM_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
372 #define M_TSO_LEN			66
373 
374 /* Allow common code without TSO */
375 #ifndef CSUM_TSO
376 #define CSUM_TSO	0
377 #endif
378 
379 static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
380 static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
381 static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
382 static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
383 static int em_rxd = EM_DEFAULT_RXD;
384 static int em_txd = EM_DEFAULT_TXD;
385 static int em_smart_pwr_down = FALSE;
386 /* Controls whether promiscuous also shows bad packets */
387 static int em_debug_sbp = FALSE;
388 /* Local switch for MSI/MSIX */
389 static int em_enable_msi = TRUE;
390 
391 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
392 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
393 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
394 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
395 TUNABLE_INT("hw.em.rxd", &em_rxd);
396 TUNABLE_INT("hw.em.txd", &em_txd);
397 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
398 TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
399 TUNABLE_INT("hw.em.enable_msi", &em_enable_msi);
400 
401 #ifndef EM_LEGACY_IRQ
402 /* How many packets rxeof tries to clean at a time */
403 static int em_rx_process_limit = 100;
404 TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
405 #endif
406 
407 /* Global used in WOL setup with multiport cards */
408 static int global_quad_port_a = 0;
409 
410 /*********************************************************************
411  *  Device identification routine
412  *
413  *  em_probe determines if the driver should be loaded on
414  *  adapter based on PCI vendor/device id of the adapter.
415  *
416  *  return BUS_PROBE_DEFAULT on success, positive on failure
417  *********************************************************************/
418 
419 static int
420 em_probe(device_t dev)
421 {
422 	char		adapter_name[60];
423 	u16		pci_vendor_id = 0;
424 	u16		pci_device_id = 0;
425 	u16		pci_subvendor_id = 0;
426 	u16		pci_subdevice_id = 0;
427 	em_vendor_info_t *ent;
428 
429 	INIT_DEBUGOUT("em_probe: begin");
430 
431 	pci_vendor_id = pci_get_vendor(dev);
432 	if (pci_vendor_id != EM_VENDOR_ID)
433 		return (ENXIO);
434 
435 	pci_device_id = pci_get_device(dev);
436 	pci_subvendor_id = pci_get_subvendor(dev);
437 	pci_subdevice_id = pci_get_subdevice(dev);
438 
439 	ent = em_vendor_info_array;
440 	while (ent->vendor_id != 0) {
441 		if ((pci_vendor_id == ent->vendor_id) &&
442 		    (pci_device_id == ent->device_id) &&
443 
444 		    ((pci_subvendor_id == ent->subvendor_id) ||
445 		    (ent->subvendor_id == PCI_ANY_ID)) &&
446 
447 		    ((pci_subdevice_id == ent->subdevice_id) ||
448 		    (ent->subdevice_id == PCI_ANY_ID))) {
449 			sprintf(adapter_name, "%s %s",
450 				em_strings[ent->index],
451 				em_driver_version);
452 			device_set_desc_copy(dev, adapter_name);
453 			return (BUS_PROBE_DEFAULT);
454 		}
455 		ent++;
456 	}
457 
458 	return (ENXIO);
459 }
460 
461 /*********************************************************************
462  *  Device initialization routine
463  *
464  *  The attach entry point is called when the driver is being loaded.
465  *  This routine identifies the type of hardware, allocates all resources
466  *  and initializes the hardware.
467  *
468  *  return 0 on success, positive on failure
469  *********************************************************************/
470 
471 static int
472 em_attach(device_t dev)
473 {
474 	struct adapter	*adapter;
475 	int		tsize, rsize;
476 	int		error = 0;
477 	u16		eeprom_data, device_id;
478 
479 	INIT_DEBUGOUT("em_attach: begin");
480 
481 	adapter = device_get_softc(dev);
482 	adapter->dev = adapter->osdep.dev = dev;
483 	EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
484 	EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
485 	EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
486 
487 	/* SYSCTL stuff */
488 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
489 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
490 	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
491 	    em_sysctl_debug_info, "I", "Debug Information");
492 
493 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
494 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
495 	    OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
496 	    em_sysctl_stats, "I", "Statistics");
497 
498 	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
499 	callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
500 
501 	/* Determine hardware and mac info */
502 	em_identify_hardware(adapter);
503 
504 	/* Setup PCI resources */
505 	if (em_allocate_pci_resources(adapter)) {
506 		device_printf(dev, "Allocation of PCI resources failed\n");
507 		error = ENXIO;
508 		goto err_pci;
509 	}
510 
511 	/*
512 	** For ICH8 and family we need to
513 	** map the flash memory, and this
514 	** must happen after the MAC is
515 	** identified
516 	*/
517 	if ((adapter->hw.mac.type == e1000_ich8lan) ||
518 	    (adapter->hw.mac.type == e1000_ich9lan) ||
519 	    (adapter->hw.mac.type == e1000_ich10lan)) {
520 		int rid = EM_BAR_TYPE_FLASH;
521 		adapter->flash = bus_alloc_resource_any(dev,
522 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
523 		if (adapter->flash == NULL) {
524 			device_printf(dev, "Mapping of Flash failed\n");
525 			error = ENXIO;
526 			goto err_pci;
527 		}
528 		/* This is used in the shared code */
529 		adapter->hw.flash_address = (u8 *)adapter->flash;
530 		adapter->osdep.flash_bus_space_tag =
531 		    rman_get_bustag(adapter->flash);
532 		adapter->osdep.flash_bus_space_handle =
533 		    rman_get_bushandle(adapter->flash);
534 	}
535 
536 	/* Do Shared Code initialization */
537 	if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
538 		device_printf(dev, "Setup of Shared code failed\n");
539 		error = ENXIO;
540 		goto err_pci;
541 	}
542 
543 	e1000_get_bus_info(&adapter->hw);
544 
545 	/* Set up some sysctls for the tunable interrupt delays */
546 	em_add_int_delay_sysctl(adapter, "rx_int_delay",
547 	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
548 	    E1000_REGISTER(&adapter->hw, E1000_RDTR), em_rx_int_delay_dflt);
549 	em_add_int_delay_sysctl(adapter, "tx_int_delay",
550 	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
551 	    E1000_REGISTER(&adapter->hw, E1000_TIDV), em_tx_int_delay_dflt);
552 	if (adapter->hw.mac.type >= e1000_82540) {
553 		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
554 		    "receive interrupt delay limit in usecs",
555 		    &adapter->rx_abs_int_delay,
556 		    E1000_REGISTER(&adapter->hw, E1000_RADV),
557 		    em_rx_abs_int_delay_dflt);
558 		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
559 		    "transmit interrupt delay limit in usecs",
560 		    &adapter->tx_abs_int_delay,
561 		    E1000_REGISTER(&adapter->hw, E1000_TADV),
562 		    em_tx_abs_int_delay_dflt);
563 	}
564 
565 #ifndef EM_LEGACY_IRQ
566 	/* Sysctls for limiting the amount of work done in the taskqueue */
567 	em_add_rx_process_limit(adapter, "rx_processing_limit",
568 	    "max number of rx packets to process", &adapter->rx_process_limit,
569 	    em_rx_process_limit);
570 #endif
571 
572 	/*
573 	 * Validate number of transmit and receive descriptors. It
574 	 * must not exceed hardware maximum, and must be multiple
575 	 * of E1000_DBA_ALIGN.
576 	 */
577 	if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
578 	    (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
579 	    (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
580 	    (em_txd < EM_MIN_TXD)) {
581 		device_printf(dev, "Using %d TX descriptors instead of %d!\n",
582 		    EM_DEFAULT_TXD, em_txd);
583 		adapter->num_tx_desc = EM_DEFAULT_TXD;
584 	} else
585 		adapter->num_tx_desc = em_txd;
586 	if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
587 	    (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
588 	    (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
589 	    (em_rxd < EM_MIN_RXD)) {
590 		device_printf(dev, "Using %d RX descriptors instead of %d!\n",
591 		    EM_DEFAULT_RXD, em_rxd);
592 		adapter->num_rx_desc = EM_DEFAULT_RXD;
593 	} else
594 		adapter->num_rx_desc = em_rxd;
595 
596 	adapter->hw.mac.autoneg = DO_AUTO_NEG;
597 	adapter->hw.phy.autoneg_wait_to_complete = FALSE;
598 	adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
599 	adapter->rx_buffer_len = 2048;
600 
601 	e1000_init_script_state_82541(&adapter->hw, TRUE);
602 	e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
603 
604 	/* Copper options */
605 	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
606 		adapter->hw.phy.mdix = AUTO_ALL_MODES;
607 		adapter->hw.phy.disable_polarity_correction = FALSE;
608 		adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
609 	}
610 
611 	/*
612 	 * Set the frame limits assuming
613 	 * standard ethernet sized frames.
614 	 */
615 	adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
616 	adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
617 
618 	/*
619 	 * This controls when hardware reports transmit completion
620 	 * status.
621 	 */
622 	adapter->hw.mac.report_tx_early = 1;
623 
624 	tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
625 	    EM_DBA_ALIGN);
626 
627 	/* Allocate Transmit Descriptor ring */
628 	if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
629 		device_printf(dev, "Unable to allocate tx_desc memory\n");
630 		error = ENOMEM;
631 		goto err_tx_desc;
632 	}
633 	adapter->tx_desc_base =
634 	    (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
635 
636 	rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
637 	    EM_DBA_ALIGN);
638 
639 	/* Allocate Receive Descriptor ring */
640 	if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
641 		device_printf(dev, "Unable to allocate rx_desc memory\n");
642 		error = ENOMEM;
643 		goto err_rx_desc;
644 	}
645 	adapter->rx_desc_base =
646 	    (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
647 
648 	/*
649 	** Start from a known state, this is
650 	** important in reading the nvm and
651 	** mac from that.
652 	*/
653 	e1000_reset_hw(&adapter->hw);
654 
655 	/* Make sure we have a good EEPROM before we read from it */
656 	if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
657 		/*
658 		** Some PCI-E parts fail the first check due to
659 		** the link being in sleep state, call it again,
660 		** if it fails a second time its a real issue.
661 		*/
662 		if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
663 			device_printf(dev,
664 			    "The EEPROM Checksum Is Not Valid\n");
665 			error = EIO;
666 			goto err_hw_init;
667 		}
668 	}
669 
670 	/* Copy the permanent MAC address out of the EEPROM */
671 	if (e1000_read_mac_addr(&adapter->hw) < 0) {
672 		device_printf(dev, "EEPROM read error while reading MAC"
673 		    " address\n");
674 		error = EIO;
675 		goto err_hw_init;
676 	}
677 
678 	if (!em_is_valid_ether_addr(adapter->hw.mac.addr)) {
679 		device_printf(dev, "Invalid MAC address\n");
680 		error = EIO;
681 		goto err_hw_init;
682 	}
683 
684 	/* Initialize the hardware */
685 	if (em_hardware_init(adapter)) {
686 		device_printf(dev, "Unable to initialize the hardware\n");
687 		error = EIO;
688 		goto err_hw_init;
689 	}
690 
691 	/* Allocate transmit descriptors and buffers */
692 	if (em_allocate_transmit_structures(adapter)) {
693 		device_printf(dev, "Could not setup transmit structures\n");
694 		error = ENOMEM;
695 		goto err_tx_struct;
696 	}
697 
698 	/* Allocate receive descriptors and buffers */
699 	if (em_allocate_receive_structures(adapter)) {
700 		device_printf(dev, "Could not setup receive structures\n");
701 		error = ENOMEM;
702 		goto err_rx_struct;
703 	}
704 
705 	/*
706 	**  Do interrupt configuration
707 	*/
708 	if (adapter->msi > 1) /* Do MSI/X */
709 		error = em_allocate_msix(adapter);
710 	else  /* MSI or Legacy */
711 		error = em_allocate_legacy(adapter);
712 	if (error)
713 		goto err_rx_struct;
714 
715 	/* Setup OS specific network interface */
716 	em_setup_interface(dev, adapter);
717 
718 	/* Initialize statistics */
719 	em_update_stats_counters(adapter);
720 
721 	adapter->hw.mac.get_link_status = 1;
722 	em_update_link_status(adapter);
723 
724 	/* Indicate SOL/IDER usage */
725 	if (e1000_check_reset_block(&adapter->hw))
726 		device_printf(dev,
727 		    "PHY reset is blocked due to SOL/IDER session.\n");
728 
729 	/* Determine if we have to control management hardware */
730 	adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
731 
732 	/*
733 	 * Setup Wake-on-Lan
734 	 */
735 	switch (adapter->hw.mac.type) {
736 
737 	case e1000_82542:
738 	case e1000_82543:
739 		break;
740 	case e1000_82546:
741 	case e1000_82546_rev_3:
742 	case e1000_82571:
743 	case e1000_80003es2lan:
744 		if (adapter->hw.bus.func == 1)
745 			e1000_read_nvm(&adapter->hw,
746 			    NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
747 		else
748 			e1000_read_nvm(&adapter->hw,
749 			    NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
750 		eeprom_data &= EM_EEPROM_APME;
751 		break;
752 	default:
753 		/* APME bit in EEPROM is mapped to WUC.APME */
754 		eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) &
755 		    E1000_WUC_APME;
756 		break;
757 	}
758 	if (eeprom_data)
759 		adapter->wol = E1000_WUFC_MAG;
760 	/*
761          * We have the eeprom settings, now apply the special cases
762          * where the eeprom may be wrong or the board won't support
763          * wake on lan on a particular port
764 	 */
765 	device_id = pci_get_device(dev);
766         switch (device_id) {
767 	case E1000_DEV_ID_82546GB_PCIE:
768 		adapter->wol = 0;
769 		break;
770 	case E1000_DEV_ID_82546EB_FIBER:
771 	case E1000_DEV_ID_82546GB_FIBER:
772 	case E1000_DEV_ID_82571EB_FIBER:
773 		/* Wake events only supported on port A for dual fiber
774 		 * regardless of eeprom setting */
775 		if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
776 		    E1000_STATUS_FUNC_1)
777 			adapter->wol = 0;
778 		break;
779 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
780 	case E1000_DEV_ID_82571EB_QUAD_COPPER:
781 	case E1000_DEV_ID_82571EB_QUAD_FIBER:
782 	case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
783                 /* if quad port adapter, disable WoL on all but port A */
784 		if (global_quad_port_a != 0)
785 			adapter->wol = 0;
786 		/* Reset for multiple quad port adapters */
787 		if (++global_quad_port_a == 4)
788 			global_quad_port_a = 0;
789                 break;
790 	}
791 
792 	/* Do we need workaround for 82544 PCI-X adapter? */
793 	if (adapter->hw.bus.type == e1000_bus_type_pcix &&
794 	    adapter->hw.mac.type == e1000_82544)
795 		adapter->pcix_82544 = TRUE;
796 	else
797 		adapter->pcix_82544 = FALSE;
798 
799 	/* Register for VLAN events */
800 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
801 	    em_register_vlan, 0, EVENTHANDLER_PRI_FIRST);
802 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
803 	    em_unregister_vlan, 0, EVENTHANDLER_PRI_FIRST);
804 
805 	/* Tell the stack that the interface is not active */
806 	adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
807 
808 	INIT_DEBUGOUT("em_attach: end");
809 
810 	return (0);
811 
812 err_rx_struct:
813 	em_free_transmit_structures(adapter);
814 err_tx_struct:
815 err_hw_init:
816 	em_release_hw_control(adapter);
817 	em_dma_free(adapter, &adapter->rxdma);
818 err_rx_desc:
819 	em_dma_free(adapter, &adapter->txdma);
820 err_tx_desc:
821 err_pci:
822 	em_free_pci_resources(adapter);
823 	EM_TX_LOCK_DESTROY(adapter);
824 	EM_RX_LOCK_DESTROY(adapter);
825 	EM_CORE_LOCK_DESTROY(adapter);
826 
827 	return (error);
828 }
829 
830 /*********************************************************************
831  *  Device removal routine
832  *
833  *  The detach entry point is called when the driver is being removed.
834  *  This routine stops the adapter and deallocates all the resources
835  *  that were allocated for driver operation.
836  *
837  *  return 0 on success, positive on failure
838  *********************************************************************/
839 
840 static int
841 em_detach(device_t dev)
842 {
843 	struct adapter	*adapter = device_get_softc(dev);
844 	struct ifnet	*ifp = adapter->ifp;
845 
846 	INIT_DEBUGOUT("em_detach: begin");
847 
848 	/* Make sure VLANS are not using driver */
849 #if __FreeBSD_version >= 700000
850 	if (adapter->ifp->if_vlantrunk != NULL) {
851 #else
852 	if (adapter->ifp->if_nvlans != 0) {
853 #endif
854 		device_printf(dev,"Vlan in use, detach first\n");
855 		return (EBUSY);
856 	}
857 
858 #ifdef DEVICE_POLLING
859 	if (ifp->if_capenable & IFCAP_POLLING)
860 		ether_poll_deregister(ifp);
861 #endif
862 
863 	EM_CORE_LOCK(adapter);
864 	EM_TX_LOCK(adapter);
865 	adapter->in_detach = 1;
866 	em_stop(adapter);
867 	e1000_phy_hw_reset(&adapter->hw);
868 
869 	em_release_manageability(adapter);
870 
871 	if (((adapter->hw.mac.type == e1000_82573) ||
872 	    (adapter->hw.mac.type == e1000_ich8lan) ||
873 	    (adapter->hw.mac.type == e1000_ich10lan) ||
874 	    (adapter->hw.mac.type == e1000_ich9lan)) &&
875 	    e1000_check_mng_mode(&adapter->hw))
876 		em_release_hw_control(adapter);
877 
878 	if (adapter->wol) {
879 		E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
880 		E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
881 		em_enable_wakeup(dev);
882 	}
883 
884 	EM_TX_UNLOCK(adapter);
885 	EM_CORE_UNLOCK(adapter);
886 
887 	/* Unregister VLAN events */
888 	if (adapter->vlan_attach != NULL)
889 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
890 	if (adapter->vlan_detach != NULL)
891 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
892 
893 	ether_ifdetach(adapter->ifp);
894 	callout_drain(&adapter->timer);
895 	callout_drain(&adapter->tx_fifo_timer);
896 
897 	em_free_pci_resources(adapter);
898 	bus_generic_detach(dev);
899 	if_free(ifp);
900 
901 #ifdef IFNET_BUF_RING
902 	drbr_free(adapter->br, M_DEVBUF);
903 #endif
904 	em_free_transmit_structures(adapter);
905 	em_free_receive_structures(adapter);
906 
907 	/* Free Transmit Descriptor ring */
908 	if (adapter->tx_desc_base) {
909 		em_dma_free(adapter, &adapter->txdma);
910 		adapter->tx_desc_base = NULL;
911 	}
912 
913 	/* Free Receive Descriptor ring */
914 	if (adapter->rx_desc_base) {
915 		em_dma_free(adapter, &adapter->rxdma);
916 		adapter->rx_desc_base = NULL;
917 	}
918 
919 	EM_TX_LOCK_DESTROY(adapter);
920 	EM_RX_LOCK_DESTROY(adapter);
921 	EM_CORE_LOCK_DESTROY(adapter);
922 
923 	return (0);
924 }
925 
926 /*********************************************************************
927  *
928  *  Shutdown entry point
929  *
930  **********************************************************************/
931 
932 static int
933 em_shutdown(device_t dev)
934 {
935 	return em_suspend(dev);
936 }
937 
938 /*
939  * Suspend/resume device methods.
940  */
941 static int
942 em_suspend(device_t dev)
943 {
944 	struct adapter *adapter = device_get_softc(dev);
945 
946 	EM_CORE_LOCK(adapter);
947 
948 	EM_TX_LOCK(adapter);
949 	em_stop(adapter);
950 	EM_TX_UNLOCK(adapter);
951 
952         em_release_manageability(adapter);
953 
954         if (((adapter->hw.mac.type == e1000_82573) ||
955             (adapter->hw.mac.type == e1000_ich8lan) ||
956             (adapter->hw.mac.type == e1000_ich10lan) ||
957             (adapter->hw.mac.type == e1000_ich9lan)) &&
958             e1000_check_mng_mode(&adapter->hw))
959                 em_release_hw_control(adapter);
960 
961         if (adapter->wol) {
962                 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
963                 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
964                 em_enable_wakeup(dev);
965         }
966 
967 	EM_CORE_UNLOCK(adapter);
968 
969 	return bus_generic_suspend(dev);
970 }
971 
972 static int
973 em_resume(device_t dev)
974 {
975 	struct adapter *adapter = device_get_softc(dev);
976 	struct ifnet *ifp = adapter->ifp;
977 
978 	EM_CORE_LOCK(adapter);
979 	em_init_locked(adapter);
980 	em_init_manageability(adapter);
981 	EM_CORE_UNLOCK(adapter);
982 	em_start(ifp);
983 
984 	return bus_generic_resume(dev);
985 }
986 
987 
988 /*********************************************************************
989  *  Transmit entry point
990  *
991  *  em_start is called by the stack to initiate a transmit.
992  *  The driver will remain in this routine as long as there are
993  *  packets to transmit and transmit resources are available.
994  *  In case resources are not available stack is notified and
995  *  the packet is requeued.
996  **********************************************************************/
997 
998 #ifdef IFNET_BUF_RING
999 static int
1000 em_transmit_locked(struct ifnet *ifp, struct mbuf *m)
1001 {
1002 	struct adapter	*adapter = ifp->if_softc;
1003 	int error;
1004 
1005 	EM_TX_LOCK_ASSERT(adapter);
1006 	if (((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1007 	    IFF_DRV_RUNNING)
1008 	    || (!adapter->link_active)) {
1009 		error = drbr_enqueue(ifp, adapter->br, m);
1010 		return (error);
1011 	}
1012 
1013 	if (ADAPTER_RING_EMPTY(adapter) &&
1014 	    (adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)) {
1015 		if (em_xmit(adapter, &m)) {
1016 			if (m && (error = drbr_enqueue(ifp, adapter->br, m)) != 0) {
1017 				return (error);
1018 			}
1019 		} else{
1020 			/* Send a copy of the frame to the BPF listener */
1021 			ETHER_BPF_MTAP(ifp, m);
1022 		}
1023 	} else if ((error = drbr_enqueue(ifp, adapter->br, m)) != 0)
1024 		return (error);
1025 
1026 	if (!ADAPTER_RING_EMPTY(adapter))
1027 		em_start_locked(ifp);
1028 
1029 	return (0);
1030 }
1031 
1032 static int
1033 em_transmit(struct ifnet *ifp, struct mbuf *m)
1034 {
1035 
1036 	struct adapter *adapter = ifp->if_softc;
1037 	int error = 0;
1038 
1039 	if(EM_TX_TRYLOCK(adapter)) {
1040 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1041 			error = em_transmit_locked(ifp, m);
1042 		EM_TX_UNLOCK(adapter);
1043 	} else
1044 		error = drbr_enqueue(ifp, adapter->br, m);
1045 
1046 	return (error);
1047 }
1048 
1049 static void
1050 em_qflush(struct ifnet *ifp)
1051 {
1052 	struct mbuf *m;
1053 	struct adapter *adapter = (struct adapter *)ifp->if_softc;
1054 
1055 	EM_TX_LOCK(adapter);
1056 	while ((m = buf_ring_dequeue_sc(adapter->br)) != NULL)
1057 		m_freem(m);
1058 	if_qflush(ifp);
1059 	EM_TX_UNLOCK(adapter);
1060 }
1061 
1062 static void
1063 em_start_locked(struct ifnet *ifp)
1064 {
1065 	struct adapter	*adapter = ifp->if_softc;
1066 	struct mbuf	*m_head;
1067 
1068 	EM_TX_LOCK_ASSERT(adapter);
1069 
1070 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1071 	    IFF_DRV_RUNNING)
1072 		return;
1073 	if (!adapter->link_active)
1074 		return;
1075 
1076 	while ((adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)
1077 	    && (!ADAPTER_RING_EMPTY(adapter))) {
1078 
1079 		m_head = buf_ring_dequeue_sc(adapter->br);
1080 		if (m_head == NULL)
1081 			break;
1082 		/*
1083 		 *  Encapsulation can modify our pointer, and or make it
1084 		 *  NULL on failure.  In that event, we can't requeue.
1085 		 */
1086 		if (em_xmit(adapter, &m_head)) {
1087 			if (m_head == NULL)
1088 				break;
1089 			break;
1090 		}
1091 
1092 		/* Send a copy of the frame to the BPF listener */
1093 		ETHER_BPF_MTAP(ifp, m_head);
1094 
1095 		/* Set timeout in case hardware has problems transmitting. */
1096 		adapter->watchdog_timer = EM_TX_TIMEOUT;
1097 	}
1098 	if ((adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD))
1099 		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1100 
1101 }
1102 #else
1103 static void
1104 em_start_locked(struct ifnet *ifp)
1105 {
1106 	struct adapter	*adapter = ifp->if_softc;
1107 	struct mbuf	*m_head;
1108 
1109 	EM_TX_LOCK_ASSERT(adapter);
1110 
1111 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1112 	    IFF_DRV_RUNNING)
1113 		return;
1114 	if (!adapter->link_active)
1115 		return;
1116 
1117 	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
1118 
1119 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1120 		if (m_head == NULL)
1121 			break;
1122 		/*
1123 		 *  Encapsulation can modify our pointer, and or make it
1124 		 *  NULL on failure.  In that event, we can't requeue.
1125 		 */
1126 		if (em_xmit(adapter, &m_head)) {
1127 			if (m_head == NULL)
1128 				break;
1129 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1130 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1131 			break;
1132 		}
1133 
1134 		/* Send a copy of the frame to the BPF listener */
1135 		ETHER_BPF_MTAP(ifp, m_head);
1136 
1137 		/* Set timeout in case hardware has problems transmitting. */
1138 		adapter->watchdog_timer = EM_TX_TIMEOUT;
1139 	}
1140 }
1141 
1142 #endif
1143 
1144 static void
1145 em_start(struct ifnet *ifp)
1146 {
1147 	struct adapter *adapter = ifp->if_softc;
1148 
1149 	EM_TX_LOCK(adapter);
1150 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1151 		em_start_locked(ifp);
1152 	EM_TX_UNLOCK(adapter);
1153 }
1154 
1155 /*********************************************************************
1156  *  Ioctl entry point
1157  *
1158  *  em_ioctl is called when the user wants to configure the
1159  *  interface.
1160  *
1161  *  return 0 on success, positive on failure
1162  **********************************************************************/
1163 
1164 static int
1165 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1166 {
1167 	struct adapter	*adapter = ifp->if_softc;
1168 	struct ifreq *ifr = (struct ifreq *)data;
1169 #ifdef INET
1170 	struct ifaddr *ifa = (struct ifaddr *)data;
1171 #endif
1172 	int error = 0;
1173 
1174 	if (adapter->in_detach)
1175 		return (error);
1176 
1177 	switch (command) {
1178 	case SIOCSIFADDR:
1179 #ifdef INET
1180 		if (ifa->ifa_addr->sa_family == AF_INET) {
1181 			/*
1182 			 * XXX
1183 			 * Since resetting hardware takes a very long time
1184 			 * and results in link renegotiation we only
1185 			 * initialize the hardware only when it is absolutely
1186 			 * required.
1187 			 */
1188 			ifp->if_flags |= IFF_UP;
1189 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1190 				EM_CORE_LOCK(adapter);
1191 				em_init_locked(adapter);
1192 				EM_CORE_UNLOCK(adapter);
1193 			}
1194 			arp_ifinit(ifp, ifa);
1195 		} else
1196 #endif
1197 			error = ether_ioctl(ifp, command, data);
1198 		break;
1199 	case SIOCSIFMTU:
1200 	    {
1201 		int max_frame_size;
1202 		u16 eeprom_data = 0;
1203 
1204 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1205 
1206 		EM_CORE_LOCK(adapter);
1207 		switch (adapter->hw.mac.type) {
1208 		case e1000_82573:
1209 			/*
1210 			 * 82573 only supports jumbo frames
1211 			 * if ASPM is disabled.
1212 			 */
1213 			e1000_read_nvm(&adapter->hw,
1214 			    NVM_INIT_3GIO_3, 1, &eeprom_data);
1215 			if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1216 				max_frame_size = ETHER_MAX_LEN;
1217 				break;
1218 			}
1219 			/* Allow Jumbo frames - fall thru */
1220 		case e1000_82571:
1221 		case e1000_82572:
1222 		case e1000_ich9lan:
1223 		case e1000_ich10lan:
1224 		case e1000_82574:
1225 		case e1000_80003es2lan:	/* Limit Jumbo Frame size */
1226 			max_frame_size = 9234;
1227 			break;
1228 			/* Adapters that do not support jumbo frames */
1229 		case e1000_82542:
1230 		case e1000_ich8lan:
1231 			max_frame_size = ETHER_MAX_LEN;
1232 			break;
1233 		default:
1234 			max_frame_size = MAX_JUMBO_FRAME_SIZE;
1235 		}
1236 		if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1237 		    ETHER_CRC_LEN) {
1238 			EM_CORE_UNLOCK(adapter);
1239 			error = EINVAL;
1240 			break;
1241 		}
1242 
1243 		ifp->if_mtu = ifr->ifr_mtu;
1244 		adapter->max_frame_size =
1245 		    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1246 		em_init_locked(adapter);
1247 		EM_CORE_UNLOCK(adapter);
1248 		break;
1249 	    }
1250 	case SIOCSIFFLAGS:
1251 		IOCTL_DEBUGOUT("ioctl rcv'd:\
1252 		    SIOCSIFFLAGS (Set Interface Flags)");
1253 		EM_CORE_LOCK(adapter);
1254 		if (ifp->if_flags & IFF_UP) {
1255 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1256 				if ((ifp->if_flags ^ adapter->if_flags) &
1257 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1258 					em_disable_promisc(adapter);
1259 					em_set_promisc(adapter);
1260 				}
1261 			} else
1262 				em_init_locked(adapter);
1263 		} else
1264 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1265 				EM_TX_LOCK(adapter);
1266 				em_stop(adapter);
1267 				EM_TX_UNLOCK(adapter);
1268 			}
1269 		adapter->if_flags = ifp->if_flags;
1270 		EM_CORE_UNLOCK(adapter);
1271 		break;
1272 	case SIOCADDMULTI:
1273 	case SIOCDELMULTI:
1274 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1275 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1276 			EM_CORE_LOCK(adapter);
1277 			em_disable_intr(adapter);
1278 			em_set_multi(adapter);
1279 			if (adapter->hw.mac.type == e1000_82542 &&
1280 	    		    adapter->hw.revision_id == E1000_REVISION_2) {
1281 				em_initialize_receive_unit(adapter);
1282 			}
1283 #ifdef DEVICE_POLLING
1284 			if (!(ifp->if_capenable & IFCAP_POLLING))
1285 #endif
1286 				em_enable_intr(adapter);
1287 			EM_CORE_UNLOCK(adapter);
1288 		}
1289 		break;
1290 	case SIOCSIFMEDIA:
1291 		/* Check SOL/IDER usage */
1292 		EM_CORE_LOCK(adapter);
1293 		if (e1000_check_reset_block(&adapter->hw)) {
1294 			EM_CORE_UNLOCK(adapter);
1295 			device_printf(adapter->dev, "Media change is"
1296 			    " blocked due to SOL/IDER session.\n");
1297 			break;
1298 		}
1299 		EM_CORE_UNLOCK(adapter);
1300 	case SIOCGIFMEDIA:
1301 		IOCTL_DEBUGOUT("ioctl rcv'd: \
1302 		    SIOCxIFMEDIA (Get/Set Interface Media)");
1303 		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1304 		break;
1305 	case SIOCSIFCAP:
1306 	    {
1307 		int mask, reinit;
1308 
1309 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1310 		reinit = 0;
1311 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1312 #ifdef DEVICE_POLLING
1313 		if (mask & IFCAP_POLLING) {
1314 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1315 				error = ether_poll_register(em_poll, ifp);
1316 				if (error)
1317 					return (error);
1318 				EM_CORE_LOCK(adapter);
1319 				em_disable_intr(adapter);
1320 				ifp->if_capenable |= IFCAP_POLLING;
1321 				EM_CORE_UNLOCK(adapter);
1322 			} else {
1323 				error = ether_poll_deregister(ifp);
1324 				/* Enable interrupt even in error case */
1325 				EM_CORE_LOCK(adapter);
1326 				em_enable_intr(adapter);
1327 				ifp->if_capenable &= ~IFCAP_POLLING;
1328 				EM_CORE_UNLOCK(adapter);
1329 			}
1330 		}
1331 #endif
1332 		if (mask & IFCAP_HWCSUM) {
1333 			ifp->if_capenable ^= IFCAP_HWCSUM;
1334 			reinit = 1;
1335 		}
1336 #if __FreeBSD_version >= 700000
1337 		if (mask & IFCAP_TSO4) {
1338 			ifp->if_capenable ^= IFCAP_TSO4;
1339 			reinit = 1;
1340 		}
1341 #endif
1342 
1343 		if (mask & IFCAP_VLAN_HWTAGGING) {
1344 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1345 			reinit = 1;
1346 		}
1347 		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1348 			em_init(adapter);
1349 #if __FreeBSD_version >= 700000
1350 		VLAN_CAPABILITIES(ifp);
1351 #endif
1352 		break;
1353 	    }
1354 
1355 #ifdef EM_TIMESYNC
1356 	/*
1357 	** IOCTL support for Precision Time (IEEE 1588) Support
1358 	*/
1359 	case EM_TIMESYNC_READTS:
1360 	    {
1361 		u32 rx_ctl, tx_ctl;
1362 		struct em_tsync_read *tdata;
1363 
1364 		tdata = (struct em_tsync_read *) ifr->ifr_data;
1365 
1366 		IOCTL_DEBUGOUT("Reading Timestamp\n");
1367 
1368 		if (tdata->read_current_time) {
1369 			getnanotime(&tdata->system_time);
1370 			tdata->network_time = E1000_READ_REG(&adapter->hw, E1000_SYSTIML);
1371 			tdata->network_time |=
1372 			    (u64)E1000_READ_REG(&adapter->hw, E1000_SYSTIMH ) << 32;
1373 		}
1374 
1375 		rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
1376 		tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
1377 
1378 		IOCTL_DEBUGOUT1("RX_CTL value = %u\n", rx_ctl);
1379 		IOCTL_DEBUGOUT1("TX_CTL value = %u\n", tx_ctl);
1380 
1381 		if (rx_ctl & 0x1) {
1382 			IOCTL_DEBUGOUT("RX timestamp is valid\n");
1383 			u32 tmp;
1384 			unsigned char *tmp_cp;
1385 
1386 			tdata->rx_valid = 1;
1387 			tdata->rx_stamp = E1000_READ_REG(&adapter->hw, E1000_RXSTMPL);
1388 			tdata->rx_stamp |= (u64)E1000_READ_REG(&adapter->hw,
1389 			    E1000_RXSTMPH) << 32;
1390 
1391 			tmp = E1000_READ_REG(&adapter->hw, E1000_RXSATRL);
1392 			tmp_cp = (unsigned char *) &tmp;
1393 			tdata->srcid[0] = tmp_cp[0];
1394 			tdata->srcid[1] = tmp_cp[1];
1395 			tdata->srcid[2] = tmp_cp[2];
1396 			tdata->srcid[3] = tmp_cp[3];
1397 			tmp = E1000_READ_REG(&adapter->hw, E1000_RXSATRH);
1398 			tmp_cp = (unsigned char *) &tmp;
1399 			tdata->srcid[4] = tmp_cp[0];
1400 			tdata->srcid[5] = tmp_cp[1];
1401 			tdata->seqid = tmp >> 16;
1402 			tdata->seqid = htons(tdata->seqid);
1403 		} else
1404 			tdata->rx_valid = 0;
1405 
1406 		if (tx_ctl & 0x1) {
1407 			IOCTL_DEBUGOUT("TX timestamp is valid\n");
1408 			tdata->tx_valid = 1;
1409 			tdata->tx_stamp = E1000_READ_REG(&adapter->hw, E1000_TXSTMPL);
1410 			tdata->tx_stamp |= (u64) E1000_READ_REG(&adapter->hw,
1411 			    E1000_TXSTMPH) << 32;
1412 		} else
1413 			tdata->tx_valid = 0;
1414 
1415 		return (0);
1416 	    }
1417 #endif	/* EM_TIMESYNC */
1418 
1419 	default:
1420 		error = ether_ioctl(ifp, command, data);
1421 		break;
1422 	}
1423 
1424 	return (error);
1425 }
1426 
1427 /*********************************************************************
1428  *  Watchdog timer:
1429  *
1430  *  This routine is called from the local timer every second.
1431  *  As long as transmit descriptors are being cleaned the value
1432  *  is non-zero and we do nothing. Reaching 0 indicates a tx hang
1433  *  and we then reset the device.
1434  *
1435  **********************************************************************/
1436 
1437 static void
1438 em_watchdog(struct adapter *adapter)
1439 {
1440 
1441 	EM_CORE_LOCK_ASSERT(adapter);
1442 
1443 	/*
1444 	** The timer is set to 5 every time start queues a packet.
1445 	** Then txeof keeps resetting it as long as it cleans at
1446 	** least one descriptor.
1447 	** Finally, anytime all descriptors are clean the timer is
1448 	** set to 0.
1449 	*/
1450 	EM_TX_LOCK(adapter);
1451 	if ((adapter->watchdog_timer == 0) || (--adapter->watchdog_timer)) {
1452 		EM_TX_UNLOCK(adapter);
1453 		return;
1454 	}
1455 
1456 	/* If we are in this routine because of pause frames, then
1457 	 * don't reset the hardware.
1458 	 */
1459 	if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
1460 	    E1000_STATUS_TXOFF) {
1461 		adapter->watchdog_timer = EM_TX_TIMEOUT;
1462 		EM_TX_UNLOCK(adapter);
1463 		return;
1464 	}
1465 
1466 	if (e1000_check_for_link(&adapter->hw) == 0)
1467 		device_printf(adapter->dev, "watchdog timeout -- resetting\n");
1468 	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1469 	adapter->watchdog_events++;
1470 	EM_TX_UNLOCK(adapter);
1471 
1472 	em_init_locked(adapter);
1473 }
1474 
1475 /*********************************************************************
1476  *  Init entry point
1477  *
1478  *  This routine is used in two ways. It is used by the stack as
1479  *  init entry point in network interface structure. It is also used
1480  *  by the driver as a hw/sw initialization routine to get to a
1481  *  consistent state.
1482  *
1483  *  return 0 on success, positive on failure
1484  **********************************************************************/
1485 
1486 static void
1487 em_init_locked(struct adapter *adapter)
1488 {
1489 	struct ifnet	*ifp = adapter->ifp;
1490 	device_t	dev = adapter->dev;
1491 	u32		pba;
1492 
1493 	INIT_DEBUGOUT("em_init: begin");
1494 
1495 	EM_CORE_LOCK_ASSERT(adapter);
1496 
1497 	EM_TX_LOCK(adapter);
1498 	em_stop(adapter);
1499 	EM_TX_UNLOCK(adapter);
1500 
1501 	/*
1502 	 * Packet Buffer Allocation (PBA)
1503 	 * Writing PBA sets the receive portion of the buffer
1504 	 * the remainder is used for the transmit buffer.
1505 	 *
1506 	 * Devices before the 82547 had a Packet Buffer of 64K.
1507 	 *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1508 	 * After the 82547 the buffer was reduced to 40K.
1509 	 *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1510 	 *   Note: default does not leave enough room for Jumbo Frame >10k.
1511 	 */
1512 	switch (adapter->hw.mac.type) {
1513 	case e1000_82547:
1514 	case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1515 		if (adapter->max_frame_size > 8192)
1516 			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1517 		else
1518 			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1519 		adapter->tx_fifo_head = 0;
1520 		adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1521 		adapter->tx_fifo_size =
1522 		    (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1523 		break;
1524 	/* Total Packet Buffer on these is 48K */
1525 	case e1000_82571:
1526 	case e1000_82572:
1527 	case e1000_80003es2lan:
1528 			pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1529 		break;
1530 	case e1000_82573: /* 82573: Total Packet Buffer is 32K */
1531 			pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1532 		break;
1533 	case e1000_82574:
1534 			pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
1535 		break;
1536 	case e1000_ich9lan:
1537 	case e1000_ich10lan:
1538 #define E1000_PBA_10K	0x000A
1539 		pba = E1000_PBA_10K;
1540 		break;
1541 	case e1000_ich8lan:
1542 		pba = E1000_PBA_8K;
1543 		break;
1544 	default:
1545 		/* Devices before 82547 had a Packet Buffer of 64K.   */
1546 		if (adapter->max_frame_size > 8192)
1547 			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1548 		else
1549 			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1550 	}
1551 
1552 	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
1553 	E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1554 
1555 	/* Get the latest mac address, User can use a LAA */
1556         bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1557               ETHER_ADDR_LEN);
1558 
1559 	/* Put the address into the Receive Address Array */
1560 	e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1561 
1562 	/*
1563 	 * With the 82571 adapter, RAR[0] may be overwritten
1564 	 * when the other port is reset, we make a duplicate
1565 	 * in RAR[14] for that eventuality, this assures
1566 	 * the interface continues to function.
1567 	 */
1568 	if (adapter->hw.mac.type == e1000_82571) {
1569 		e1000_set_laa_state_82571(&adapter->hw, TRUE);
1570 		e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1571 		    E1000_RAR_ENTRIES - 1);
1572 	}
1573 
1574 	/* Initialize the hardware */
1575 	if (em_hardware_init(adapter)) {
1576 		device_printf(dev, "Unable to initialize the hardware\n");
1577 		return;
1578 	}
1579 	em_update_link_status(adapter);
1580 
1581 	/* Setup VLAN support, basic and offload if available */
1582 	E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1583 
1584 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
1585 	    ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) {
1586 		u32 ctrl;
1587 		ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1588 		ctrl |= E1000_CTRL_VME;
1589 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1590 	}
1591 
1592 
1593 	/* Set hardware offload abilities */
1594 	ifp->if_hwassist = 0;
1595 	if (adapter->hw.mac.type >= e1000_82543) {
1596 		if (ifp->if_capenable & IFCAP_TXCSUM)
1597 			ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1598 #if __FreeBSD_version >= 700000
1599 		if (ifp->if_capenable & IFCAP_TSO4)
1600 			ifp->if_hwassist |= CSUM_TSO;
1601 #endif
1602 	}
1603 
1604 	/* Configure for OS presence */
1605 	em_init_manageability(adapter);
1606 
1607 	/* Prepare transmit descriptors and buffers */
1608 	em_setup_transmit_structures(adapter);
1609 	em_initialize_transmit_unit(adapter);
1610 
1611 	/* Setup Multicast table */
1612 	em_set_multi(adapter);
1613 
1614 	/* Prepare receive descriptors and buffers */
1615 	if (em_setup_receive_structures(adapter)) {
1616 		device_printf(dev, "Could not setup receive structures\n");
1617 		EM_TX_LOCK(adapter);
1618 		em_stop(adapter);
1619 		EM_TX_UNLOCK(adapter);
1620 		return;
1621 	}
1622 	em_initialize_receive_unit(adapter);
1623 
1624 	/* Don't lose promiscuous settings */
1625 	em_set_promisc(adapter);
1626 
1627 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1628 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1629 
1630 	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1631 	e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1632 
1633 	/* MSI/X configuration for 82574 */
1634 	if (adapter->hw.mac.type == e1000_82574) {
1635 		int tmp;
1636 		tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1637 		tmp |= E1000_CTRL_EXT_PBA_CLR;
1638 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1639 		/*
1640 		** Set the IVAR - interrupt vector routing.
1641 		** Each nibble represents a vector, high bit
1642 		** is enable, other 3 bits are the MSIX table
1643 		** entry, we map RXQ0 to 0, TXQ0 to 1, and
1644 		** Link (other) to 2, hence the magic number.
1645 		*/
1646 		E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1647 	}
1648 
1649 #ifdef DEVICE_POLLING
1650 	/*
1651 	 * Only enable interrupts if we are not polling, make sure
1652 	 * they are off otherwise.
1653 	 */
1654 	if (ifp->if_capenable & IFCAP_POLLING)
1655 		em_disable_intr(adapter);
1656 	else
1657 #endif /* DEVICE_POLLING */
1658 		em_enable_intr(adapter);
1659 
1660 #ifdef EM_TIMESYNC
1661 	/* Initializae IEEE 1588 Precision Time hardware */
1662 	if ((adapter->hw.mac.type == e1000_82574) ||
1663 	    (adapter->hw.mac.type == e1000_ich10lan))
1664 		em_tsync_init(adapter);
1665 #endif
1666 
1667 	/* Don't reset the phy next time init gets called */
1668 	adapter->hw.phy.reset_disable = TRUE;
1669 }
1670 
1671 static void
1672 em_init(void *arg)
1673 {
1674 	struct adapter *adapter = arg;
1675 
1676 	EM_CORE_LOCK(adapter);
1677 	em_init_locked(adapter);
1678 	EM_CORE_UNLOCK(adapter);
1679 }
1680 
1681 
1682 #ifdef DEVICE_POLLING
1683 /*********************************************************************
1684  *
1685  *  Legacy polling routine
1686  *
1687  *********************************************************************/
1688 static void
1689 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1690 {
1691 	struct adapter *adapter = ifp->if_softc;
1692 	u32		reg_icr;
1693 
1694 	EM_CORE_LOCK(adapter);
1695 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1696 		EM_CORE_UNLOCK(adapter);
1697 		return;
1698 	}
1699 
1700 	if (cmd == POLL_AND_CHECK_STATUS) {
1701 		reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1702 		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1703 			callout_stop(&adapter->timer);
1704 			adapter->hw.mac.get_link_status = 1;
1705 			em_update_link_status(adapter);
1706 			callout_reset(&adapter->timer, hz,
1707 			    em_local_timer, adapter);
1708 		}
1709 	}
1710 	EM_CORE_UNLOCK(adapter);
1711 
1712 	em_rxeof(adapter, count);
1713 
1714 	EM_TX_LOCK(adapter);
1715 	em_txeof(adapter);
1716 
1717 	if (!ADAPTER_RING_EMPTY(adapter))
1718 		em_start_locked(ifp);
1719 	EM_TX_UNLOCK(adapter);
1720 }
1721 #endif /* DEVICE_POLLING */
1722 
1723 #ifdef EM_LEGACY_IRQ
1724 /*********************************************************************
1725  *
1726  *  Legacy Interrupt Service routine
1727  *
1728  *********************************************************************/
1729 
1730 static void
1731 em_intr(void *arg)
1732 {
1733 	struct adapter	*adapter = arg;
1734 	struct ifnet	*ifp = adapter->ifp;
1735 	u32		reg_icr;
1736 
1737 
1738 	if (ifp->if_capenable & IFCAP_POLLING)
1739 		return;
1740 
1741 	EM_CORE_LOCK(adapter);
1742 	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1743 	if ((reg_icr == 0xffffffff) || (reg_icr == 0)||
1744 	    (adapter->hw.mac.type >= e1000_82571 &&
1745 	    (reg_icr & E1000_ICR_INT_ASSERTED) == 0))
1746 			goto out;
1747 
1748 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1749 			goto out;
1750 
1751 	EM_TX_LOCK(adapter);
1752 	em_txeof(adapter);
1753 	em_rxeof(adapter, -1);
1754 	em_txeof(adapter);
1755 	EM_TX_UNLOCK(adapter);
1756 
1757 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1758 		callout_stop(&adapter->timer);
1759 		adapter->hw.mac.get_link_status = 1;
1760 		em_update_link_status(adapter);
1761 		/* Deal with TX cruft when link lost */
1762 		em_tx_purge(adapter);
1763 		callout_reset(&adapter->timer, hz,
1764 		    em_local_timer, adapter);
1765 	}
1766 
1767 	if (reg_icr & E1000_ICR_RXO)
1768 		adapter->rx_overruns++;
1769 out:
1770 	EM_CORE_UNLOCK(adapter);
1771 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1772 	    !ADAPTER_RING_EMPTY(adapter))
1773 		em_start(ifp);
1774 }
1775 
1776 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1777 
1778 static void
1779 em_handle_link(void *context, int pending)
1780 {
1781 	struct adapter	*adapter = context;
1782 	struct ifnet *ifp = adapter->ifp;
1783 
1784 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1785 		return;
1786 
1787 	EM_CORE_LOCK(adapter);
1788 	callout_stop(&adapter->timer);
1789 	em_update_link_status(adapter);
1790 	/* Deal with TX cruft when link lost */
1791 	em_tx_purge(adapter);
1792 	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1793 	EM_CORE_UNLOCK(adapter);
1794 }
1795 
1796 
1797 /* Combined RX/TX handler, used by Legacy and MSI */
1798 static void
1799 em_handle_rxtx(void *context, int pending)
1800 {
1801 	struct adapter	*adapter = context;
1802 	struct ifnet	*ifp = adapter->ifp;
1803 
1804 
1805 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1806 		if (em_rxeof(adapter, adapter->rx_process_limit) != 0)
1807 			taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1808 		EM_TX_LOCK(adapter);
1809 		em_txeof(adapter);
1810 
1811 		if (!ADAPTER_RING_EMPTY(adapter))
1812 			em_start_locked(ifp);
1813 		EM_TX_UNLOCK(adapter);
1814 	}
1815 
1816 	em_enable_intr(adapter);
1817 }
1818 
1819 /*********************************************************************
1820  *
1821  *  Fast Legacy/MSI Combined Interrupt Service routine
1822  *
1823  *********************************************************************/
1824 #if __FreeBSD_version < 700000
1825 #define FILTER_STRAY
1826 #define FILTER_HANDLED
1827 static void
1828 #else
1829 static int
1830 #endif
1831 em_irq_fast(void *arg)
1832 {
1833 	struct adapter	*adapter = arg;
1834 	struct ifnet	*ifp;
1835 	u32		reg_icr;
1836 
1837 	ifp = adapter->ifp;
1838 
1839 	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1840 
1841 	/* Hot eject?  */
1842 	if (reg_icr == 0xffffffff)
1843 		return FILTER_STRAY;
1844 
1845 	/* Definitely not our interrupt.  */
1846 	if (reg_icr == 0x0)
1847 		return FILTER_STRAY;
1848 
1849 	/*
1850 	 * Starting with the 82571 chip, bit 31 should be used to
1851 	 * determine whether the interrupt belongs to us.
1852 	 */
1853 	if (adapter->hw.mac.type >= e1000_82571 &&
1854 	    (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1855 		return FILTER_STRAY;
1856 
1857 	/*
1858 	 * Mask interrupts until the taskqueue is finished running.  This is
1859 	 * cheap, just assume that it is needed.  This also works around the
1860 	 * MSI message reordering errata on certain systems.
1861 	 */
1862 	em_disable_intr(adapter);
1863 	taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1864 
1865 	/* Link status change */
1866 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1867 		adapter->hw.mac.get_link_status = 1;
1868 		taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1869 	}
1870 
1871 	if (reg_icr & E1000_ICR_RXO)
1872 		adapter->rx_overruns++;
1873 	return FILTER_HANDLED;
1874 }
1875 
1876 /*********************************************************************
1877  *
1878  *  MSIX Interrupt Service Routines
1879  *
1880  **********************************************************************/
1881 #define EM_MSIX_TX	0x00040000
1882 #define EM_MSIX_RX	0x00010000
1883 #define EM_MSIX_LINK	0x00100000
1884 
1885 static void
1886 em_msix_tx(void *arg)
1887 {
1888 	struct adapter *adapter = arg;
1889 	struct ifnet	*ifp = adapter->ifp;
1890 
1891 	++adapter->tx_irq;
1892 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1893 		EM_TX_LOCK(adapter);
1894 		em_txeof(adapter);
1895 		EM_TX_UNLOCK(adapter);
1896 		taskqueue_enqueue(adapter->tq, &adapter->tx_task);
1897 	}
1898 	/* Reenable this interrupt */
1899 	E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_TX);
1900 	return;
1901 }
1902 
1903 /*********************************************************************
1904  *
1905  *  MSIX RX Interrupt Service routine
1906  *
1907  **********************************************************************/
1908 
1909 static void
1910 em_msix_rx(void *arg)
1911 {
1912 	struct adapter *adapter = arg;
1913 	struct ifnet	*ifp = adapter->ifp;
1914 
1915 	++adapter->rx_irq;
1916 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1917 	    (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1918 		taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1919 	/* Reenable this interrupt */
1920 	E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_RX);
1921 	return;
1922 }
1923 
1924 /*********************************************************************
1925  *
1926  *  MSIX Link Fast Interrupt Service routine
1927  *
1928  **********************************************************************/
1929 
1930 static void
1931 em_msix_link(void *arg)
1932 {
1933 	struct adapter	*adapter = arg;
1934 	u32		reg_icr;
1935 
1936 	++adapter->link_irq;
1937 	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1938 
1939 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1940 		adapter->hw.mac.get_link_status = 1;
1941 		taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1942 	}
1943 	E1000_WRITE_REG(&adapter->hw, E1000_IMS,
1944 	    EM_MSIX_LINK | E1000_IMS_LSC);
1945 	return;
1946 }
1947 
1948 static void
1949 em_handle_rx(void *context, int pending)
1950 {
1951 	struct adapter	*adapter = context;
1952 	struct ifnet	*ifp = adapter->ifp;
1953 
1954 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1955 	    (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1956 		taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1957 
1958 }
1959 
1960 static void
1961 em_handle_tx(void *context, int pending)
1962 {
1963 	struct adapter	*adapter = context;
1964 	struct ifnet	*ifp = adapter->ifp;
1965 
1966 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1967 #ifdef IFNET_BUF_RING
1968 		if (!EM_TX_TRYLOCK(adapter))
1969 			return;
1970 #else
1971 		EM_TX_LOCK(adapter);
1972 #endif
1973 
1974 		em_txeof(adapter);
1975 		if (!ADAPTER_RING_EMPTY(adapter))
1976 			em_start_locked(ifp);
1977 		EM_TX_UNLOCK(adapter);
1978 	}
1979 }
1980 #endif /* EM_FAST_IRQ */
1981 
1982 /*********************************************************************
1983  *
1984  *  Media Ioctl callback
1985  *
1986  *  This routine is called whenever the user queries the status of
1987  *  the interface using ifconfig.
1988  *
1989  **********************************************************************/
1990 static void
1991 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1992 {
1993 	struct adapter *adapter = ifp->if_softc;
1994 	u_char fiber_type = IFM_1000_SX;
1995 
1996 	INIT_DEBUGOUT("em_media_status: begin");
1997 
1998 	EM_CORE_LOCK(adapter);
1999 	em_update_link_status(adapter);
2000 
2001 	ifmr->ifm_status = IFM_AVALID;
2002 	ifmr->ifm_active = IFM_ETHER;
2003 
2004 	if (!adapter->link_active) {
2005 		EM_CORE_UNLOCK(adapter);
2006 		return;
2007 	}
2008 
2009 	ifmr->ifm_status |= IFM_ACTIVE;
2010 
2011 	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2012 	    (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2013 		if (adapter->hw.mac.type == e1000_82545)
2014 			fiber_type = IFM_1000_LX;
2015 		ifmr->ifm_active |= fiber_type | IFM_FDX;
2016 	} else {
2017 		switch (adapter->link_speed) {
2018 		case 10:
2019 			ifmr->ifm_active |= IFM_10_T;
2020 			break;
2021 		case 100:
2022 			ifmr->ifm_active |= IFM_100_TX;
2023 			break;
2024 		case 1000:
2025 			ifmr->ifm_active |= IFM_1000_T;
2026 			break;
2027 		}
2028 		if (adapter->link_duplex == FULL_DUPLEX)
2029 			ifmr->ifm_active |= IFM_FDX;
2030 		else
2031 			ifmr->ifm_active |= IFM_HDX;
2032 	}
2033 	EM_CORE_UNLOCK(adapter);
2034 }
2035 
2036 /*********************************************************************
2037  *
2038  *  Media Ioctl callback
2039  *
2040  *  This routine is called when the user changes speed/duplex using
2041  *  media/mediopt option with ifconfig.
2042  *
2043  **********************************************************************/
2044 static int
2045 em_media_change(struct ifnet *ifp)
2046 {
2047 	struct adapter *adapter = ifp->if_softc;
2048 	struct ifmedia  *ifm = &adapter->media;
2049 
2050 	INIT_DEBUGOUT("em_media_change: begin");
2051 
2052 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2053 		return (EINVAL);
2054 
2055 	EM_CORE_LOCK(adapter);
2056 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2057 	case IFM_AUTO:
2058 		adapter->hw.mac.autoneg = DO_AUTO_NEG;
2059 		adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
2060 		break;
2061 	case IFM_1000_LX:
2062 	case IFM_1000_SX:
2063 	case IFM_1000_T:
2064 		adapter->hw.mac.autoneg = DO_AUTO_NEG;
2065 		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
2066 		break;
2067 	case IFM_100_TX:
2068 		adapter->hw.mac.autoneg = FALSE;
2069 		adapter->hw.phy.autoneg_advertised = 0;
2070 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2071 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
2072 		else
2073 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
2074 		break;
2075 	case IFM_10_T:
2076 		adapter->hw.mac.autoneg = FALSE;
2077 		adapter->hw.phy.autoneg_advertised = 0;
2078 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2079 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
2080 		else
2081 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
2082 		break;
2083 	default:
2084 		device_printf(adapter->dev, "Unsupported media type\n");
2085 	}
2086 
2087 	/* As the speed/duplex settings my have changed we need to
2088 	 * reset the PHY.
2089 	 */
2090 	adapter->hw.phy.reset_disable = FALSE;
2091 
2092 	em_init_locked(adapter);
2093 	EM_CORE_UNLOCK(adapter);
2094 
2095 	return (0);
2096 }
2097 
2098 /*********************************************************************
2099  *
2100  *  This routine maps the mbufs to tx descriptors.
2101  *
2102  *  return 0 on success, positive on failure
2103  **********************************************************************/
2104 
2105 static int
2106 em_xmit(struct adapter *adapter, struct mbuf **m_headp)
2107 {
2108 	bus_dma_segment_t	segs[EM_MAX_SCATTER];
2109 	bus_dmamap_t		map;
2110 	struct em_buffer	*tx_buffer, *tx_buffer_mapped;
2111 	struct e1000_tx_desc	*ctxd = NULL;
2112 	struct mbuf		*m_head;
2113 	u32			txd_upper, txd_lower, txd_used, txd_saved;
2114 	int			nsegs, i, j, first, last = 0;
2115 	int			error, do_tso, tso_desc = 0;
2116 #if __FreeBSD_version < 700000
2117 	struct m_tag		*mtag;
2118 #endif
2119 	m_head = *m_headp;
2120 	txd_upper = txd_lower = txd_used = txd_saved = 0;
2121 
2122 #if __FreeBSD_version >= 700000
2123 	do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
2124 #else
2125 	do_tso = 0;
2126 #endif
2127 
2128         /*
2129          * Force a cleanup if number of TX descriptors
2130          * available hits the threshold
2131          */
2132 	if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
2133 		em_txeof(adapter);
2134 		/* Now do we at least have a minimal? */
2135 		if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
2136 			adapter->no_tx_desc_avail1++;
2137 			return (ENOBUFS);
2138 		}
2139 	}
2140 
2141 
2142 	/*
2143 	 * TSO workaround:
2144 	 *  If an mbuf is only header we need
2145 	 *     to pull 4 bytes of data into it.
2146 	 */
2147 	if (do_tso && (m_head->m_len <= M_TSO_LEN)) {
2148 		m_head = m_pullup(m_head, M_TSO_LEN + 4);
2149 		*m_headp = m_head;
2150 		if (m_head == NULL)
2151 			return (ENOBUFS);
2152 	}
2153 
2154 	/*
2155 	 * Map the packet for DMA
2156 	 *
2157 	 * Capture the first descriptor index,
2158 	 * this descriptor will have the index
2159 	 * of the EOP which is the only one that
2160 	 * now gets a DONE bit writeback.
2161 	 */
2162 	first = adapter->next_avail_tx_desc;
2163 	tx_buffer = &adapter->tx_buffer_area[first];
2164 	tx_buffer_mapped = tx_buffer;
2165 	map = tx_buffer->map;
2166 
2167 	error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2168 	    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2169 
2170 	/*
2171 	 * There are two types of errors we can (try) to handle:
2172 	 * - EFBIG means the mbuf chain was too long and bus_dma ran
2173 	 *   out of segments.  Defragment the mbuf chain and try again.
2174 	 * - ENOMEM means bus_dma could not obtain enough bounce buffers
2175 	 *   at this point in time.  Defer sending and try again later.
2176 	 * All other errors, in particular EINVAL, are fatal and prevent the
2177 	 * mbuf chain from ever going through.  Drop it and report error.
2178 	 */
2179 	if (error == EFBIG) {
2180 		struct mbuf *m;
2181 
2182 		m = m_defrag(*m_headp, M_DONTWAIT);
2183 		if (m == NULL) {
2184 			adapter->mbuf_alloc_failed++;
2185 			m_freem(*m_headp);
2186 			*m_headp = NULL;
2187 			return (ENOBUFS);
2188 		}
2189 		*m_headp = m;
2190 
2191 		/* Try it again */
2192 		error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2193 		    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2194 
2195 		if (error) {
2196 			adapter->no_tx_dma_setup++;
2197 			m_freem(*m_headp);
2198 			*m_headp = NULL;
2199 			return (error);
2200 		}
2201 	} else if (error != 0) {
2202 		adapter->no_tx_dma_setup++;
2203 		return (error);
2204 	}
2205 
2206 	/*
2207 	 * TSO Hardware workaround, if this packet is not
2208 	 * TSO, and is only a single descriptor long, and
2209 	 * it follows a TSO burst, then we need to add a
2210 	 * sentinel descriptor to prevent premature writeback.
2211 	 */
2212 	if ((do_tso == 0) && (adapter->tx_tso == TRUE)) {
2213 		if (nsegs == 1)
2214 			tso_desc = TRUE;
2215 		adapter->tx_tso = FALSE;
2216 	}
2217 
2218         if (nsegs > (adapter->num_tx_desc_avail - 2)) {
2219                 adapter->no_tx_desc_avail2++;
2220 		bus_dmamap_unload(adapter->txtag, map);
2221 		return (ENOBUFS);
2222         }
2223 	m_head = *m_headp;
2224 
2225 	/* Do hardware assists */
2226 #if __FreeBSD_version >= 700000
2227 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2228 		error = em_tso_setup(adapter, m_head, &txd_upper, &txd_lower);
2229 		if (error != TRUE)
2230 			return (ENXIO); /* something foobar */
2231 		/* we need to make a final sentinel transmit desc */
2232 		tso_desc = TRUE;
2233 	} else
2234 #endif
2235 #ifndef EM_TIMESYNC
2236 	/*
2237 	** Timesync needs to check the packet header
2238 	** so call checksum code to do so, but don't
2239 	** penalize the code if not defined.
2240 	*/
2241 	if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
2242 #endif
2243 		em_transmit_checksum_setup(adapter,  m_head,
2244 		    &txd_upper, &txd_lower);
2245 
2246 	i = adapter->next_avail_tx_desc;
2247 	if (adapter->pcix_82544)
2248 		txd_saved = i;
2249 
2250 	/* Set up our transmit descriptors */
2251 	for (j = 0; j < nsegs; j++) {
2252 		bus_size_t seg_len;
2253 		bus_addr_t seg_addr;
2254 		/* If adapter is 82544 and on PCIX bus */
2255 		if(adapter->pcix_82544) {
2256 			DESC_ARRAY	desc_array;
2257 			u32		array_elements, counter;
2258 			/*
2259 			 * Check the Address and Length combination and
2260 			 * split the data accordingly
2261 			 */
2262 			array_elements = em_fill_descriptors(segs[j].ds_addr,
2263 			    segs[j].ds_len, &desc_array);
2264 			for (counter = 0; counter < array_elements; counter++) {
2265 				if (txd_used == adapter->num_tx_desc_avail) {
2266 					adapter->next_avail_tx_desc = txd_saved;
2267 					adapter->no_tx_desc_avail2++;
2268 					bus_dmamap_unload(adapter->txtag, map);
2269 					return (ENOBUFS);
2270 				}
2271 				tx_buffer = &adapter->tx_buffer_area[i];
2272 				ctxd = &adapter->tx_desc_base[i];
2273 				ctxd->buffer_addr = htole64(
2274 				    desc_array.descriptor[counter].address);
2275 				ctxd->lower.data = htole32(
2276 				    (adapter->txd_cmd | txd_lower | (u16)
2277 				    desc_array.descriptor[counter].length));
2278 				ctxd->upper.data =
2279 				    htole32((txd_upper));
2280 				last = i;
2281 				if (++i == adapter->num_tx_desc)
2282                                          i = 0;
2283 				tx_buffer->m_head = NULL;
2284 				tx_buffer->next_eop = -1;
2285 				txd_used++;
2286                         }
2287 		} else {
2288 			tx_buffer = &adapter->tx_buffer_area[i];
2289 			ctxd = &adapter->tx_desc_base[i];
2290 			seg_addr = segs[j].ds_addr;
2291 			seg_len  = segs[j].ds_len;
2292 			/*
2293 			** TSO Workaround:
2294 			** If this is the last descriptor, we want to
2295 			** split it so we have a small final sentinel
2296 			*/
2297 			if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) {
2298 				seg_len -= 4;
2299 				ctxd->buffer_addr = htole64(seg_addr);
2300 				ctxd->lower.data = htole32(
2301 				adapter->txd_cmd | txd_lower | seg_len);
2302 				ctxd->upper.data =
2303 				    htole32(txd_upper);
2304 				if (++i == adapter->num_tx_desc)
2305 					i = 0;
2306 				/* Now make the sentinel */
2307 				++txd_used; /* using an extra txd */
2308 				ctxd = &adapter->tx_desc_base[i];
2309 				tx_buffer = &adapter->tx_buffer_area[i];
2310 				ctxd->buffer_addr =
2311 				    htole64(seg_addr + seg_len);
2312 				ctxd->lower.data = htole32(
2313 				adapter->txd_cmd | txd_lower | 4);
2314 				ctxd->upper.data =
2315 				    htole32(txd_upper);
2316 				last = i;
2317 				if (++i == adapter->num_tx_desc)
2318 					i = 0;
2319 			} else {
2320 				ctxd->buffer_addr = htole64(seg_addr);
2321 				ctxd->lower.data = htole32(
2322 				adapter->txd_cmd | txd_lower | seg_len);
2323 				ctxd->upper.data =
2324 				    htole32(txd_upper);
2325 				last = i;
2326 				if (++i == adapter->num_tx_desc)
2327 					i = 0;
2328 			}
2329 			tx_buffer->m_head = NULL;
2330 			tx_buffer->next_eop = -1;
2331 		}
2332 	}
2333 
2334 	adapter->next_avail_tx_desc = i;
2335 	if (adapter->pcix_82544)
2336 		adapter->num_tx_desc_avail -= txd_used;
2337 	else {
2338 		adapter->num_tx_desc_avail -= nsegs;
2339 		if (tso_desc) /* TSO used an extra for sentinel */
2340 			adapter->num_tx_desc_avail -= txd_used;
2341 	}
2342 
2343         /*
2344 	** Handle VLAN tag, this is the
2345 	** biggest difference between
2346 	** 6.x and 7
2347 	*/
2348 #if __FreeBSD_version < 700000
2349         /* Find out if we are in vlan mode. */
2350         mtag = VLAN_OUTPUT_TAG(ifp, m_head);
2351         if (mtag != NULL) {
2352                 ctxd->upper.fields.special =
2353                     htole16(VLAN_TAG_VALUE(mtag));
2354 #else /* FreeBSD 7 */
2355 	if (m_head->m_flags & M_VLANTAG) {
2356 		/* Set the vlan id. */
2357 		ctxd->upper.fields.special =
2358 		    htole16(m_head->m_pkthdr.ether_vtag);
2359 #endif
2360                 /* Tell hardware to add tag */
2361                 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
2362         }
2363 
2364         tx_buffer->m_head = m_head;
2365 	tx_buffer_mapped->map = tx_buffer->map;
2366 	tx_buffer->map = map;
2367         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
2368 
2369         /*
2370          * Last Descriptor of Packet
2371 	 * needs End Of Packet (EOP)
2372 	 * and Report Status (RS)
2373          */
2374         ctxd->lower.data |=
2375 	    htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
2376 	/*
2377 	 * Keep track in the first buffer which
2378 	 * descriptor will be written back
2379 	 */
2380 	tx_buffer = &adapter->tx_buffer_area[first];
2381 	tx_buffer->next_eop = last;
2382 
2383 	/*
2384 	 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
2385 	 * that this frame is available to transmit.
2386 	 */
2387 	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2388 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2389 	if (adapter->hw.mac.type == e1000_82547 &&
2390 	    adapter->link_duplex == HALF_DUPLEX)
2391 		em_82547_move_tail(adapter);
2392 	else {
2393 		E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
2394 		if (adapter->hw.mac.type == e1000_82547)
2395 			em_82547_update_fifo_head(adapter,
2396 			    m_head->m_pkthdr.len);
2397 	}
2398 
2399 #ifdef EM_TIMESYNC
2400 	if (ctxd->upper.data & E1000_TXD_EXTCMD_TSTAMP) {
2401 		HW_DEBUGOUT( "@@@ Timestamp bit is set in transmit descriptor\n" );
2402 	}
2403 #endif
2404 	return (0);
2405 }
2406 
2407 /*********************************************************************
2408  *
2409  * 82547 workaround to avoid controller hang in half-duplex environment.
2410  * The workaround is to avoid queuing a large packet that would span
2411  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
2412  * in this case. We do that only when FIFO is quiescent.
2413  *
2414  **********************************************************************/
2415 static void
2416 em_82547_move_tail(void *arg)
2417 {
2418 	struct adapter *adapter = arg;
2419 	struct e1000_tx_desc *tx_desc;
2420 	u16	hw_tdt, sw_tdt, length = 0;
2421 	bool	eop = 0;
2422 
2423 	EM_TX_LOCK_ASSERT(adapter);
2424 
2425 	hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
2426 	sw_tdt = adapter->next_avail_tx_desc;
2427 
2428 	while (hw_tdt != sw_tdt) {
2429 		tx_desc = &adapter->tx_desc_base[hw_tdt];
2430 		length += tx_desc->lower.flags.length;
2431 		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
2432 		if (++hw_tdt == adapter->num_tx_desc)
2433 			hw_tdt = 0;
2434 
2435 		if (eop) {
2436 			if (em_82547_fifo_workaround(adapter, length)) {
2437 				adapter->tx_fifo_wrk_cnt++;
2438 				callout_reset(&adapter->tx_fifo_timer, 1,
2439 					em_82547_move_tail, adapter);
2440 				break;
2441 			}
2442 			E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
2443 			em_82547_update_fifo_head(adapter, length);
2444 			length = 0;
2445 		}
2446 	}
2447 }
2448 
2449 static int
2450 em_82547_fifo_workaround(struct adapter *adapter, int len)
2451 {
2452 	int fifo_space, fifo_pkt_len;
2453 
2454 	fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2455 
2456 	if (adapter->link_duplex == HALF_DUPLEX) {
2457 		fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
2458 
2459 		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
2460 			if (em_82547_tx_fifo_reset(adapter))
2461 				return (0);
2462 			else
2463 				return (1);
2464 		}
2465 	}
2466 
2467 	return (0);
2468 }
2469 
2470 static void
2471 em_82547_update_fifo_head(struct adapter *adapter, int len)
2472 {
2473 	int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2474 
2475 	/* tx_fifo_head is always 16 byte aligned */
2476 	adapter->tx_fifo_head += fifo_pkt_len;
2477 	if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
2478 		adapter->tx_fifo_head -= adapter->tx_fifo_size;
2479 	}
2480 }
2481 
2482 
2483 static int
2484 em_82547_tx_fifo_reset(struct adapter *adapter)
2485 {
2486 	u32 tctl;
2487 
2488 	if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
2489 	    E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
2490 	    (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
2491 	    E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
2492 	    (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
2493 	    E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
2494 	    (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
2495 		/* Disable TX unit */
2496 		tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2497 		E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
2498 		    tctl & ~E1000_TCTL_EN);
2499 
2500 		/* Reset FIFO pointers */
2501 		E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
2502 		    adapter->tx_head_addr);
2503 		E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
2504 		    adapter->tx_head_addr);
2505 		E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
2506 		    adapter->tx_head_addr);
2507 		E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
2508 		    adapter->tx_head_addr);
2509 
2510 		/* Re-enable TX unit */
2511 		E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2512 		E1000_WRITE_FLUSH(&adapter->hw);
2513 
2514 		adapter->tx_fifo_head = 0;
2515 		adapter->tx_fifo_reset_cnt++;
2516 
2517 		return (TRUE);
2518 	}
2519 	else {
2520 		return (FALSE);
2521 	}
2522 }
2523 
2524 static void
2525 em_set_promisc(struct adapter *adapter)
2526 {
2527 	struct ifnet	*ifp = adapter->ifp;
2528 	u32		reg_rctl;
2529 
2530 	reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2531 
2532 	if (ifp->if_flags & IFF_PROMISC) {
2533 		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2534 		/* Turn this on if you want to see bad packets */
2535 		if (em_debug_sbp)
2536 			reg_rctl |= E1000_RCTL_SBP;
2537 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2538 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2539 		reg_rctl |= E1000_RCTL_MPE;
2540 		reg_rctl &= ~E1000_RCTL_UPE;
2541 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2542 	}
2543 }
2544 
2545 static void
2546 em_disable_promisc(struct adapter *adapter)
2547 {
2548 	u32	reg_rctl;
2549 
2550 	reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2551 
2552 	reg_rctl &=  (~E1000_RCTL_UPE);
2553 	reg_rctl &=  (~E1000_RCTL_MPE);
2554 	reg_rctl &=  (~E1000_RCTL_SBP);
2555 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2556 }
2557 
2558 
2559 /*********************************************************************
2560  *  Multicast Update
2561  *
2562  *  This routine is called whenever multicast address list is updated.
2563  *
2564  **********************************************************************/
2565 
2566 static void
2567 em_set_multi(struct adapter *adapter)
2568 {
2569 	struct ifnet	*ifp = adapter->ifp;
2570 	struct ifmultiaddr *ifma;
2571 	u32 reg_rctl = 0;
2572 	u8  *mta; /* Multicast array memory */
2573 	int mcnt = 0;
2574 
2575 	IOCTL_DEBUGOUT("em_set_multi: begin");
2576 
2577 	if (adapter->hw.mac.type == e1000_82542 &&
2578 	    adapter->hw.revision_id == E1000_REVISION_2) {
2579 		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2580 		if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2581 			e1000_pci_clear_mwi(&adapter->hw);
2582 		reg_rctl |= E1000_RCTL_RST;
2583 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2584 		msec_delay(5);
2585 	}
2586 
2587 	/* Allocate temporary memory to setup array */
2588 	mta = malloc(sizeof(u8) *
2589 	    (ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES),
2590 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2591 	if (mta == NULL)
2592 		panic("em_set_multi memory failure\n");
2593 
2594 	IF_ADDR_LOCK(ifp);
2595 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2596 		if (ifma->ifma_addr->sa_family != AF_LINK)
2597 			continue;
2598 
2599 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2600 			break;
2601 
2602 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2603 		    &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
2604 		mcnt++;
2605 	}
2606 	IF_ADDR_UNLOCK(ifp);
2607 
2608 	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2609 		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2610 		reg_rctl |= E1000_RCTL_MPE;
2611 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2612 	} else
2613 		e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
2614 
2615 	if (adapter->hw.mac.type == e1000_82542 &&
2616 	    adapter->hw.revision_id == E1000_REVISION_2) {
2617 		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2618 		reg_rctl &= ~E1000_RCTL_RST;
2619 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2620 		msec_delay(5);
2621 		if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2622 			e1000_pci_set_mwi(&adapter->hw);
2623 	}
2624 	free(mta, M_DEVBUF);
2625 }
2626 
2627 
2628 /*********************************************************************
2629  *  Timer routine
2630  *
2631  *  This routine checks for link status and updates statistics.
2632  *
2633  **********************************************************************/
2634 
2635 static void
2636 em_local_timer(void *arg)
2637 {
2638 	struct adapter	*adapter = arg;
2639 	struct ifnet	*ifp = adapter->ifp;
2640 
2641 	EM_CORE_LOCK_ASSERT(adapter);
2642 
2643 	taskqueue_enqueue(adapter->tq,
2644 	    &adapter->rxtx_task);
2645 	em_update_link_status(adapter);
2646 	em_update_stats_counters(adapter);
2647 
2648 	/* Reset LAA into RAR[0] on 82571 */
2649 	if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
2650 		e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2651 
2652 	if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
2653 		em_print_hw_stats(adapter);
2654 
2655 	em_smartspeed(adapter);
2656 
2657 	/*
2658 	 * Each second we check the watchdog to
2659 	 * protect against hardware hangs.
2660 	 */
2661 	em_watchdog(adapter);
2662 
2663 	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
2664 
2665 }
2666 
2667 static void
2668 em_update_link_status(struct adapter *adapter)
2669 {
2670 	struct e1000_hw *hw = &adapter->hw;
2671 	struct ifnet *ifp = adapter->ifp;
2672 	device_t dev = adapter->dev;
2673 	u32 link_check = 0;
2674 
2675 	/* Get the cached link value or read phy for real */
2676 	switch (hw->phy.media_type) {
2677 	case e1000_media_type_copper:
2678 		if (hw->mac.get_link_status) {
2679 			/* Do the work to read phy */
2680 			e1000_check_for_link(hw);
2681 			link_check = !hw->mac.get_link_status;
2682 			if (link_check) /* ESB2 fix */
2683 				e1000_cfg_on_link_up(hw);
2684 		} else
2685 			link_check = TRUE;
2686 		break;
2687 	case e1000_media_type_fiber:
2688 		e1000_check_for_link(hw);
2689 		link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2690                                  E1000_STATUS_LU);
2691 		break;
2692 	case e1000_media_type_internal_serdes:
2693 		e1000_check_for_link(hw);
2694 		link_check = adapter->hw.mac.serdes_has_link;
2695 		break;
2696 	default:
2697 	case e1000_media_type_unknown:
2698 		break;
2699 	}
2700 
2701 	/* Now check for a transition */
2702 	if (link_check && (adapter->link_active == 0)) {
2703 		e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2704 		    &adapter->link_duplex);
2705 		/* Check if we must disable SPEED_MODE bit on PCI-E */
2706 		if ((adapter->link_speed != SPEED_1000) &&
2707 		    ((hw->mac.type == e1000_82571) ||
2708 		    (hw->mac.type == e1000_82572))) {
2709 			int tarc0;
2710 			tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
2711 			tarc0 &= ~SPEED_MODE_BIT;
2712 			E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
2713 		}
2714 		if (bootverbose)
2715 			device_printf(dev, "Link is up %d Mbps %s\n",
2716 			    adapter->link_speed,
2717 			    ((adapter->link_duplex == FULL_DUPLEX) ?
2718 			    "Full Duplex" : "Half Duplex"));
2719 		adapter->link_active = 1;
2720 		adapter->smartspeed = 0;
2721 		ifp->if_baudrate = adapter->link_speed * 1000000;
2722 		if_link_state_change(ifp, LINK_STATE_UP);
2723 	} else if (!link_check && (adapter->link_active == 1)) {
2724 		ifp->if_baudrate = adapter->link_speed = 0;
2725 		adapter->link_duplex = 0;
2726 		if (bootverbose)
2727 			device_printf(dev, "Link is Down\n");
2728 		adapter->link_active = 0;
2729 		/* Link down, disable watchdog */
2730 		adapter->watchdog_timer = FALSE;
2731 		if_link_state_change(ifp, LINK_STATE_DOWN);
2732 	}
2733 }
2734 
2735 /*********************************************************************
2736  *
2737  *  This routine disables all traffic on the adapter by issuing a
2738  *  global reset on the MAC and deallocates TX/RX buffers.
2739  *
2740  *  This routine should always be called with BOTH the CORE
2741  *  and TX locks.
2742  **********************************************************************/
2743 
2744 static void
2745 em_stop(void *arg)
2746 {
2747 	struct adapter	*adapter = arg;
2748 	struct ifnet	*ifp = adapter->ifp;
2749 
2750 	EM_CORE_LOCK_ASSERT(adapter);
2751 	EM_TX_LOCK_ASSERT(adapter);
2752 
2753 	INIT_DEBUGOUT("em_stop: begin");
2754 
2755 	em_disable_intr(adapter);
2756 	callout_stop(&adapter->timer);
2757 	callout_stop(&adapter->tx_fifo_timer);
2758 
2759 	/* Tell the stack that the interface is no longer active */
2760 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2761 
2762 #ifdef EM_TIMESYNC
2763 	/* Disable IEEE 1588 Time hardware */
2764 	if ((adapter->hw.mac.type == e1000_82574) ||
2765 	    (adapter->hw.mac.type == e1000_ich10lan))
2766 		em_tsync_disable(adapter);
2767 #endif
2768 
2769 	e1000_reset_hw(&adapter->hw);
2770 	if (adapter->hw.mac.type >= e1000_82544)
2771 		E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2772 }
2773 
2774 
2775 /*********************************************************************
2776  *
2777  *  Determine hardware revision.
2778  *
2779  **********************************************************************/
2780 static void
2781 em_identify_hardware(struct adapter *adapter)
2782 {
2783 	device_t dev = adapter->dev;
2784 
2785 	/* Make sure our PCI config space has the necessary stuff set */
2786 	adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2787 	if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2788 	    (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2789 		device_printf(dev, "Memory Access and/or Bus Master bits "
2790 		    "were not set!\n");
2791 		adapter->hw.bus.pci_cmd_word |=
2792 		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2793 		pci_write_config(dev, PCIR_COMMAND,
2794 		    adapter->hw.bus.pci_cmd_word, 2);
2795 	}
2796 
2797 	/* Save off the information about this board */
2798 	adapter->hw.vendor_id = pci_get_vendor(dev);
2799 	adapter->hw.device_id = pci_get_device(dev);
2800 	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2801 	adapter->hw.subsystem_vendor_id =
2802 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
2803 	adapter->hw.subsystem_device_id =
2804 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
2805 
2806 	/* Do Shared Code Init and Setup */
2807 	if (e1000_set_mac_type(&adapter->hw)) {
2808 		device_printf(dev, "Setup init failure\n");
2809 		return;
2810 	}
2811 }
2812 
2813 static int
2814 em_allocate_pci_resources(struct adapter *adapter)
2815 {
2816 	device_t	dev = adapter->dev;
2817 	int		val, rid, error = E1000_SUCCESS;
2818 
2819 	rid = PCIR_BAR(0);
2820 	adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2821 	    &rid, RF_ACTIVE);
2822 	if (adapter->memory == NULL) {
2823 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2824 		return (ENXIO);
2825 	}
2826 	adapter->osdep.mem_bus_space_tag =
2827 	    rman_get_bustag(adapter->memory);
2828 	adapter->osdep.mem_bus_space_handle =
2829 	    rman_get_bushandle(adapter->memory);
2830 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2831 
2832 	/* Only older adapters use IO mapping */
2833 	if ((adapter->hw.mac.type > e1000_82543) &&
2834 	    (adapter->hw.mac.type < e1000_82571)) {
2835 		/* Figure our where our IO BAR is ? */
2836 		for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2837 			val = pci_read_config(dev, rid, 4);
2838 			if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2839 				adapter->io_rid = rid;
2840 				break;
2841 			}
2842 			rid += 4;
2843 			/* check for 64bit BAR */
2844 			if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2845 				rid += 4;
2846 		}
2847 		if (rid >= PCIR_CIS) {
2848 			device_printf(dev, "Unable to locate IO BAR\n");
2849 			return (ENXIO);
2850 		}
2851 		adapter->ioport = bus_alloc_resource_any(dev,
2852 		    SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2853 		if (adapter->ioport == NULL) {
2854 			device_printf(dev, "Unable to allocate bus resource: "
2855 			    "ioport\n");
2856 			return (ENXIO);
2857 		}
2858 		adapter->hw.io_base = 0;
2859 		adapter->osdep.io_bus_space_tag =
2860 		    rman_get_bustag(adapter->ioport);
2861 		adapter->osdep.io_bus_space_handle =
2862 		    rman_get_bushandle(adapter->ioport);
2863 	}
2864 
2865 	/*
2866 	** Init the resource arrays
2867 	**  used by MSIX setup
2868 	*/
2869 	for (int i = 0; i < 3; i++) {
2870 		adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */
2871 		adapter->tag[i] = NULL;
2872 		adapter->res[i] = NULL;
2873 	}
2874 
2875 	/*
2876 	 * Setup MSI/X or MSI if PCI Express
2877 	 */
2878 	if (em_enable_msi)
2879 		adapter->msi = em_setup_msix(adapter);
2880 
2881 	adapter->hw.back = &adapter->osdep;
2882 
2883 	return (error);
2884 }
2885 
2886 /*********************************************************************
2887  *
2888  *  Setup the Legacy or MSI Interrupt handler
2889  *
2890  **********************************************************************/
2891 int
2892 em_allocate_legacy(struct adapter *adapter)
2893 {
2894 	device_t dev = adapter->dev;
2895 	int error;
2896 
2897 	/* Manually turn off all interrupts */
2898 	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2899 
2900 	/* Legacy RID is 0 */
2901 	if (adapter->msi == 0)
2902 		adapter->rid[0] = 0;
2903 
2904 	/* We allocate a single interrupt resource */
2905 	adapter->res[0] = bus_alloc_resource_any(dev,
2906 	    SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
2907 	if (adapter->res[0] == NULL) {
2908 		device_printf(dev, "Unable to allocate bus resource: "
2909 		    "interrupt\n");
2910 		return (ENXIO);
2911 	}
2912 
2913 #ifdef EM_LEGACY_IRQ
2914 	/* We do Legacy setup */
2915 	if ((error = bus_setup_intr(dev, adapter->res[0],
2916 #if __FreeBSD_version > 700000
2917 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_intr, adapter,
2918 #else /* 6.X */
2919 	    INTR_TYPE_NET | INTR_MPSAFE, em_intr, adapter,
2920 #endif
2921 	    &adapter->tag[0])) != 0) {
2922 		device_printf(dev, "Failed to register interrupt handler");
2923 		return (error);
2924 	}
2925 
2926 #else /* FAST_IRQ */
2927 	/*
2928 	 * Try allocating a fast interrupt and the associated deferred
2929 	 * processing contexts.
2930 	 */
2931 	TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
2932 	TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2933 	adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2934 	    taskqueue_thread_enqueue, &adapter->tq);
2935 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2936 	    device_get_nameunit(adapter->dev));
2937 #if __FreeBSD_version < 700000
2938 	if ((error = bus_setup_intr(dev, adapter->res[0],
2939 	    INTR_TYPE_NET | INTR_FAST, em_irq_fast, adapter,
2940 #else
2941 	if ((error = bus_setup_intr(dev, adapter->res[0],
2942 	    INTR_TYPE_NET, em_irq_fast, NULL, adapter,
2943 #endif
2944 	    &adapter->tag[0])) != 0) {
2945 		device_printf(dev, "Failed to register fast interrupt "
2946 			    "handler: %d\n", error);
2947 		taskqueue_free(adapter->tq);
2948 		adapter->tq = NULL;
2949 		return (error);
2950 	}
2951 #endif  /* EM_LEGACY_IRQ */
2952 
2953 	return (0);
2954 }
2955 
2956 /*********************************************************************
2957  *
2958  *  Setup the MSIX Interrupt handlers
2959  *   This is not really Multiqueue, rather
2960  *   its just multiple interrupt vectors.
2961  *
2962  **********************************************************************/
2963 int
2964 em_allocate_msix(struct adapter *adapter)
2965 {
2966 	device_t dev = adapter->dev;
2967 	int error;
2968 
2969 	/* Make sure all interrupts are disabled */
2970 	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2971 
2972 	/* First get the resources */
2973 	for (int i = 0; i < adapter->msi; i++) {
2974 		adapter->res[i] = bus_alloc_resource_any(dev,
2975 		    SYS_RES_IRQ, &adapter->rid[i], RF_ACTIVE);
2976 		if (adapter->res[i] == NULL) {
2977 			device_printf(dev,
2978 			    "Unable to allocate bus resource: "
2979 			    "MSIX Interrupt\n");
2980 			return (ENXIO);
2981 		}
2982 	}
2983 
2984 	/*
2985 	 * Now allocate deferred processing contexts.
2986 	 */
2987 	TASK_INIT(&adapter->rx_task, 0, em_handle_rx, adapter);
2988 	TASK_INIT(&adapter->tx_task, 0, em_handle_tx, adapter);
2989 	TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2990 	adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2991 	    taskqueue_thread_enqueue, &adapter->tq);
2992 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2993 	    device_get_nameunit(adapter->dev));
2994 
2995 	/*
2996 	 * And setup the interrupt handlers
2997 	 */
2998 
2999 	/* First slot to RX */
3000 	if ((error = bus_setup_intr(dev, adapter->res[0],
3001 #if __FreeBSD_version > 700000
3002 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx, adapter,
3003 #else /* 6.X */
3004 	    INTR_TYPE_NET | INTR_MPSAFE, em_msix_rx, adapter,
3005 #endif
3006 	    &adapter->tag[0])) != 0) {
3007 		device_printf(dev, "Failed to register RX handler");
3008 		return (error);
3009 	}
3010 
3011 	/* Next TX */
3012 	if ((error = bus_setup_intr(dev, adapter->res[1],
3013 #if __FreeBSD_version > 700000
3014 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx, adapter,
3015 #else /* 6.X */
3016 	    INTR_TYPE_NET | INTR_MPSAFE, em_msix_tx, adapter,
3017 #endif
3018 	    &adapter->tag[1])) != 0) {
3019 		device_printf(dev, "Failed to register TX handler");
3020 		return (error);
3021 	}
3022 
3023 	/* And Link */
3024 	if ((error = bus_setup_intr(dev, adapter->res[2],
3025 #if __FreeBSD_version > 700000
3026 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_link, adapter,
3027 #else /* 6.X */
3028 	    INTR_TYPE_NET | INTR_MPSAFE, em_msix_link, adapter,
3029 #endif
3030 	    &adapter->tag[2])) != 0) {
3031 		device_printf(dev, "Failed to register TX handler");
3032 		return (error);
3033 	}
3034 
3035 	return (0);
3036 }
3037 
3038 
3039 static void
3040 em_free_pci_resources(struct adapter *adapter)
3041 {
3042 	device_t dev = adapter->dev;
3043 
3044 	/* Make sure the for loop below runs once */
3045 	if (adapter->msi == 0)
3046 		adapter->msi = 1;
3047 
3048 	/*
3049 	 * First release all the interrupt resources:
3050 	 *      notice that since these are just kept
3051 	 *      in an array we can do the same logic
3052 	 *      whether its MSIX or just legacy.
3053 	 */
3054 	for (int i = 0; i < adapter->msi; i++) {
3055 		if (adapter->tag[i] != NULL) {
3056 			bus_teardown_intr(dev, adapter->res[i],
3057 			    adapter->tag[i]);
3058 			adapter->tag[i] = NULL;
3059 		}
3060 		if (adapter->res[i] != NULL) {
3061 			bus_release_resource(dev, SYS_RES_IRQ,
3062 			    adapter->rid[i], adapter->res[i]);
3063 		}
3064 	}
3065 
3066 	if (adapter->msi)
3067 		pci_release_msi(dev);
3068 
3069 	if (adapter->msix != NULL)
3070 		bus_release_resource(dev, SYS_RES_MEMORY,
3071 		    PCIR_BAR(EM_MSIX_BAR), adapter->msix);
3072 
3073 	if (adapter->memory != NULL)
3074 		bus_release_resource(dev, SYS_RES_MEMORY,
3075 		    PCIR_BAR(0), adapter->memory);
3076 
3077 	if (adapter->flash != NULL)
3078 		bus_release_resource(dev, SYS_RES_MEMORY,
3079 		    EM_FLASH, adapter->flash);
3080 
3081 	if (adapter->ioport != NULL)
3082 		bus_release_resource(dev, SYS_RES_IOPORT,
3083 		    adapter->io_rid, adapter->ioport);
3084 }
3085 
3086 /*
3087  * Setup MSI or MSI/X
3088  */
3089 static int
3090 em_setup_msix(struct adapter *adapter)
3091 {
3092 	device_t dev = adapter->dev;
3093 	int val = 0;
3094 
3095 	if (adapter->hw.mac.type < e1000_82571)
3096 		return (0);
3097 
3098 	/* Setup MSI/X for Hartwell */
3099 	if (adapter->hw.mac.type == e1000_82574) {
3100 		/* Map the MSIX BAR */
3101 		int rid = PCIR_BAR(EM_MSIX_BAR);
3102 		adapter->msix = bus_alloc_resource_any(dev,
3103 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
3104        		if (!adapter->msix) {
3105 			/* May not be enabled */
3106                		device_printf(adapter->dev,
3107 			    "Unable to map MSIX table \n");
3108 			goto msi;
3109        		}
3110 		val = pci_msix_count(dev);
3111 		/*
3112 		** 82574 can be configured for 5 but
3113 		** we limit use to 3.
3114 		*/
3115 		if (val > 3) val = 3;
3116 		if ((val) && pci_alloc_msix(dev, &val) == 0) {
3117                		device_printf(adapter->dev,"Using MSIX interrupts\n");
3118 			return (val);
3119 		}
3120 	}
3121 msi:
3122        	val = pci_msi_count(dev);
3123        	if (val == 1 && pci_alloc_msi(dev, &val) == 0) {
3124                	adapter->msi = 1;
3125                	device_printf(adapter->dev,"Using MSI interrupt\n");
3126 		return (val);
3127 	}
3128 	return (0);
3129 }
3130 
3131 /*********************************************************************
3132  *
3133  *  Initialize the hardware to a configuration
3134  *  as specified by the adapter structure.
3135  *
3136  **********************************************************************/
3137 static int
3138 em_hardware_init(struct adapter *adapter)
3139 {
3140 	device_t dev = adapter->dev;
3141 	u16 	rx_buffer_size;
3142 
3143 	INIT_DEBUGOUT("em_hardware_init: begin");
3144 
3145 	/* Issue a global reset */
3146 	e1000_reset_hw(&adapter->hw);
3147 
3148 	/* Get control from any management/hw control */
3149 	if (((adapter->hw.mac.type == e1000_82573) ||
3150 	    (adapter->hw.mac.type == e1000_ich8lan) ||
3151 	    (adapter->hw.mac.type == e1000_ich10lan) ||
3152 	    (adapter->hw.mac.type == e1000_ich9lan)) &&
3153 	    e1000_check_mng_mode(&adapter->hw))
3154 		em_get_hw_control(adapter);
3155 
3156 	/* When hardware is reset, fifo_head is also reset */
3157 	adapter->tx_fifo_head = 0;
3158 
3159 	/* Set up smart power down as default off on newer adapters. */
3160 	if (!em_smart_pwr_down && (adapter->hw.mac.type == e1000_82571 ||
3161 	    adapter->hw.mac.type == e1000_82572)) {
3162 		u16 phy_tmp = 0;
3163 
3164 		/* Speed up time to link by disabling smart power down. */
3165 		e1000_read_phy_reg(&adapter->hw,
3166 		    IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
3167 		phy_tmp &= ~IGP02E1000_PM_SPD;
3168 		e1000_write_phy_reg(&adapter->hw,
3169 		    IGP02E1000_PHY_POWER_MGMT, phy_tmp);
3170 	}
3171 
3172 	/*
3173 	 * These parameters control the automatic generation (Tx) and
3174 	 * response (Rx) to Ethernet PAUSE frames.
3175 	 * - High water mark should allow for at least two frames to be
3176 	 *   received after sending an XOFF.
3177 	 * - Low water mark works best when it is very near the high water mark.
3178 	 *   This allows the receiver to restart by sending XON when it has
3179 	 *   drained a bit. Here we use an arbitary value of 1500 which will
3180 	 *   restart after one full frame is pulled from the buffer. There
3181 	 *   could be several smaller frames in the buffer and if so they will
3182 	 *   not trigger the XON until their total number reduces the buffer
3183 	 *   by 1500.
3184 	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
3185 	 */
3186 	rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
3187 	    0xffff) << 10 );
3188 
3189 	adapter->hw.fc.high_water = rx_buffer_size -
3190 	    roundup2(adapter->max_frame_size, 1024);
3191 	adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
3192 
3193 	if (adapter->hw.mac.type == e1000_80003es2lan)
3194 		adapter->hw.fc.pause_time = 0xFFFF;
3195 	else
3196 		adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
3197 	adapter->hw.fc.send_xon = TRUE;
3198 	adapter->hw.fc.requested_mode = e1000_fc_full;
3199 
3200 	if (e1000_init_hw(&adapter->hw) < 0) {
3201 		device_printf(dev, "Hardware Initialization Failed\n");
3202 		return (EIO);
3203 	}
3204 
3205 	e1000_check_for_link(&adapter->hw);
3206 
3207 	return (0);
3208 }
3209 
3210 /*********************************************************************
3211  *
3212  *  Setup networking device structure and register an interface.
3213  *
3214  **********************************************************************/
3215 static void
3216 em_setup_interface(device_t dev, struct adapter *adapter)
3217 {
3218 	struct ifnet   *ifp;
3219 
3220 	INIT_DEBUGOUT("em_setup_interface: begin");
3221 
3222 	ifp = adapter->ifp = if_alloc(IFT_ETHER);
3223 	if (ifp == NULL)
3224 		panic("%s: can not if_alloc()", device_get_nameunit(dev));
3225 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3226 	ifp->if_mtu = ETHERMTU;
3227 	ifp->if_init =  em_init;
3228 	ifp->if_softc = adapter;
3229 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3230 	ifp->if_ioctl = em_ioctl;
3231 	ifp->if_start = em_start;
3232 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
3233 	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
3234 	IFQ_SET_READY(&ifp->if_snd);
3235 
3236 	ether_ifattach(ifp, adapter->hw.mac.addr);
3237 
3238 	ifp->if_capabilities = ifp->if_capenable = 0;
3239 
3240 #ifdef IFNET_BUF_RING
3241 	ifp->if_transmit = em_transmit;
3242 	ifp->if_qflush = em_qflush;
3243 	adapter->br = buf_ring_alloc(2048, M_DEVBUF, M_WAITOK, &adapter->tx_mtx);
3244 #endif
3245 	if (adapter->hw.mac.type >= e1000_82543) {
3246 		int version_cap;
3247 #if __FreeBSD_version < 700000
3248 		version_cap = IFCAP_HWCSUM;
3249 #else
3250 		version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
3251 #endif
3252 		ifp->if_capabilities |= version_cap;
3253 		ifp->if_capenable |= version_cap;
3254 	}
3255 
3256 #if __FreeBSD_version >= 700000
3257 	/* Identify TSO capable adapters */
3258 	if ((adapter->hw.mac.type > e1000_82544) &&
3259 	    (adapter->hw.mac.type != e1000_82547))
3260 		ifp->if_capabilities |= IFCAP_TSO4;
3261 	/*
3262 	 * By default only enable on PCI-E, this
3263 	 * can be overriden by ifconfig.
3264 	 */
3265 	if (adapter->hw.mac.type >= e1000_82571)
3266 		ifp->if_capenable |= IFCAP_TSO4;
3267 #endif
3268 
3269 	/*
3270 	 * Tell the upper layer(s) we support long frames.
3271 	 */
3272 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3273 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
3274 	ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
3275 
3276 #ifdef DEVICE_POLLING
3277 	ifp->if_capabilities |= IFCAP_POLLING;
3278 #endif
3279 
3280 	/*
3281 	 * Specify the media types supported by this adapter and register
3282 	 * callbacks to update media and link information
3283 	 */
3284 	ifmedia_init(&adapter->media, IFM_IMASK,
3285 	    em_media_change, em_media_status);
3286 	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3287 	    (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
3288 		u_char fiber_type = IFM_1000_SX;	/* default type */
3289 
3290 		if (adapter->hw.mac.type == e1000_82545)
3291 			fiber_type = IFM_1000_LX;
3292 		ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
3293 			    0, NULL);
3294 		ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
3295 	} else {
3296 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
3297 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
3298 			    0, NULL);
3299 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
3300 			    0, NULL);
3301 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
3302 			    0, NULL);
3303 		if (adapter->hw.phy.type != e1000_phy_ife) {
3304 			ifmedia_add(&adapter->media,
3305 				IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
3306 			ifmedia_add(&adapter->media,
3307 				IFM_ETHER | IFM_1000_T, 0, NULL);
3308 		}
3309 	}
3310 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3311 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3312 }
3313 
3314 
3315 /*********************************************************************
3316  *
3317  *  Workaround for SmartSpeed on 82541 and 82547 controllers
3318  *
3319  **********************************************************************/
3320 static void
3321 em_smartspeed(struct adapter *adapter)
3322 {
3323 	u16 phy_tmp;
3324 
3325 	if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
3326 	    adapter->hw.mac.autoneg == 0 ||
3327 	    (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
3328 		return;
3329 
3330 	if (adapter->smartspeed == 0) {
3331 		/* If Master/Slave config fault is asserted twice,
3332 		 * we assume back-to-back */
3333 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3334 		if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
3335 			return;
3336 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3337 		if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
3338 			e1000_read_phy_reg(&adapter->hw,
3339 			    PHY_1000T_CTRL, &phy_tmp);
3340 			if(phy_tmp & CR_1000T_MS_ENABLE) {
3341 				phy_tmp &= ~CR_1000T_MS_ENABLE;
3342 				e1000_write_phy_reg(&adapter->hw,
3343 				    PHY_1000T_CTRL, phy_tmp);
3344 				adapter->smartspeed++;
3345 				if(adapter->hw.mac.autoneg &&
3346 				   !e1000_phy_setup_autoneg(&adapter->hw) &&
3347 				   !e1000_read_phy_reg(&adapter->hw,
3348 				    PHY_CONTROL, &phy_tmp)) {
3349 					phy_tmp |= (MII_CR_AUTO_NEG_EN |
3350 						    MII_CR_RESTART_AUTO_NEG);
3351 					e1000_write_phy_reg(&adapter->hw,
3352 					    PHY_CONTROL, phy_tmp);
3353 				}
3354 			}
3355 		}
3356 		return;
3357 	} else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
3358 		/* If still no link, perhaps using 2/3 pair cable */
3359 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
3360 		phy_tmp |= CR_1000T_MS_ENABLE;
3361 		e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
3362 		if(adapter->hw.mac.autoneg &&
3363 		   !e1000_phy_setup_autoneg(&adapter->hw) &&
3364 		   !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
3365 			phy_tmp |= (MII_CR_AUTO_NEG_EN |
3366 				    MII_CR_RESTART_AUTO_NEG);
3367 			e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
3368 		}
3369 	}
3370 	/* Restart process after EM_SMARTSPEED_MAX iterations */
3371 	if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
3372 		adapter->smartspeed = 0;
3373 }
3374 
3375 
3376 /*
3377  * Manage DMA'able memory.
3378  */
3379 static void
3380 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3381 {
3382 	if (error)
3383 		return;
3384 	*(bus_addr_t *) arg = segs[0].ds_addr;
3385 }
3386 
3387 static int
3388 em_dma_malloc(struct adapter *adapter, bus_size_t size,
3389         struct em_dma_alloc *dma, int mapflags)
3390 {
3391 	int error;
3392 
3393 #if __FreeBSD_version >= 700000
3394 	error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
3395 #else
3396 	error = bus_dma_tag_create(NULL,		 /* parent */
3397 #endif
3398 				EM_DBA_ALIGN, 0,	/* alignment, bounds */
3399 				BUS_SPACE_MAXADDR,	/* lowaddr */
3400 				BUS_SPACE_MAXADDR,	/* highaddr */
3401 				NULL, NULL,		/* filter, filterarg */
3402 				size,			/* maxsize */
3403 				1,			/* nsegments */
3404 				size,			/* maxsegsize */
3405 				0,			/* flags */
3406 				NULL,			/* lockfunc */
3407 				NULL,			/* lockarg */
3408 				&dma->dma_tag);
3409 	if (error) {
3410 		device_printf(adapter->dev,
3411 		    "%s: bus_dma_tag_create failed: %d\n",
3412 		    __func__, error);
3413 		goto fail_0;
3414 	}
3415 
3416 	error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
3417 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
3418 	if (error) {
3419 		device_printf(adapter->dev,
3420 		    "%s: bus_dmamem_alloc(%ju) failed: %d\n",
3421 		    __func__, (uintmax_t)size, error);
3422 		goto fail_2;
3423 	}
3424 
3425 	dma->dma_paddr = 0;
3426 	error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3427 	    size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
3428 	if (error || dma->dma_paddr == 0) {
3429 		device_printf(adapter->dev,
3430 		    "%s: bus_dmamap_load failed: %d\n",
3431 		    __func__, error);
3432 		goto fail_3;
3433 	}
3434 
3435 	return (0);
3436 
3437 fail_3:
3438 	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3439 fail_2:
3440 	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3441 	bus_dma_tag_destroy(dma->dma_tag);
3442 fail_0:
3443 	dma->dma_map = NULL;
3444 	dma->dma_tag = NULL;
3445 
3446 	return (error);
3447 }
3448 
3449 static void
3450 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
3451 {
3452 	if (dma->dma_tag == NULL)
3453 		return;
3454 	if (dma->dma_map != NULL) {
3455 		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3456 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3457 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3458 		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3459 		dma->dma_map = NULL;
3460 	}
3461 	bus_dma_tag_destroy(dma->dma_tag);
3462 	dma->dma_tag = NULL;
3463 }
3464 
3465 
3466 /*********************************************************************
3467  *
3468  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
3469  *  the information needed to transmit a packet on the wire.
3470  *
3471  **********************************************************************/
3472 static int
3473 em_allocate_transmit_structures(struct adapter *adapter)
3474 {
3475 	device_t dev = adapter->dev;
3476 	struct em_buffer *tx_buffer;
3477 	int error;
3478 
3479 	/*
3480 	 * Create DMA tags for tx descriptors
3481 	 */
3482 #if __FreeBSD_version >= 700000
3483 	if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3484 #else
3485 	if ((error = bus_dma_tag_create(NULL,		 /* parent */
3486 #endif
3487 				1, 0,			/* alignment, bounds */
3488 				BUS_SPACE_MAXADDR,	/* lowaddr */
3489 				BUS_SPACE_MAXADDR,	/* highaddr */
3490 				NULL, NULL,		/* filter, filterarg */
3491 				EM_TSO_SIZE,		/* maxsize */
3492 				EM_MAX_SCATTER,		/* nsegments */
3493 				EM_TSO_SEG_SIZE,	/* maxsegsize */
3494 				0,			/* flags */
3495 				NULL,		/* lockfunc */
3496 				NULL,		/* lockarg */
3497 				&adapter->txtag)) != 0) {
3498 		device_printf(dev, "Unable to allocate TX DMA tag\n");
3499 		goto fail;
3500 	}
3501 
3502 	adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
3503 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3504 	if (adapter->tx_buffer_area == NULL) {
3505 		device_printf(dev, "Unable to allocate tx_buffer memory\n");
3506 		error = ENOMEM;
3507 		goto fail;
3508 	}
3509 
3510 	/* Create the descriptor buffer dma maps */
3511 	for (int i = 0; i < adapter->num_tx_desc; i++) {
3512 		tx_buffer = &adapter->tx_buffer_area[i];
3513 		error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
3514 		if (error != 0) {
3515 			device_printf(dev, "Unable to create TX DMA map\n");
3516 			goto fail;
3517 		}
3518 		tx_buffer->next_eop = -1;
3519 	}
3520 
3521 	return (0);
3522 fail:
3523 	em_free_transmit_structures(adapter);
3524 	return (error);
3525 }
3526 
3527 /*********************************************************************
3528  *
3529  *  (Re)Initialize transmit structures.
3530  *
3531  **********************************************************************/
3532 static void
3533 em_setup_transmit_structures(struct adapter *adapter)
3534 {
3535 	struct em_buffer *tx_buffer;
3536 
3537 	/* Clear the old ring contents */
3538 	bzero(adapter->tx_desc_base,
3539 	    (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
3540 
3541 	/* Free any existing TX buffers */
3542 	for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
3543 		tx_buffer = &adapter->tx_buffer_area[i];
3544 		bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3545 		    BUS_DMASYNC_POSTWRITE);
3546 		bus_dmamap_unload(adapter->txtag, tx_buffer->map);
3547 		m_freem(tx_buffer->m_head);
3548 		tx_buffer->m_head = NULL;
3549 		tx_buffer->next_eop = -1;
3550 	}
3551 
3552 	/* Reset state */
3553 	adapter->next_avail_tx_desc = 0;
3554 	adapter->next_tx_to_clean = 0;
3555 	adapter->num_tx_desc_avail = adapter->num_tx_desc;
3556 
3557 	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3558 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3559 
3560 	return;
3561 }
3562 
3563 /*********************************************************************
3564  *
3565  *  Enable transmit unit.
3566  *
3567  **********************************************************************/
3568 static void
3569 em_initialize_transmit_unit(struct adapter *adapter)
3570 {
3571 	u32	tctl, tarc, tipg = 0;
3572 	u64	bus_addr;
3573 
3574 	 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
3575 	/* Setup the Base and Length of the Tx Descriptor Ring */
3576 	bus_addr = adapter->txdma.dma_paddr;
3577 	E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
3578 	    adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
3579 	E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
3580 	    (u32)(bus_addr >> 32));
3581 	E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
3582 	    (u32)bus_addr);
3583 	/* Setup the HW Tx Head and Tail descriptor pointers */
3584 	E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
3585 	E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
3586 
3587 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
3588 	    E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
3589 	    E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
3590 
3591 	/* Set the default values for the Tx Inter Packet Gap timer */
3592 	switch (adapter->hw.mac.type) {
3593 	case e1000_82542:
3594 		tipg = DEFAULT_82542_TIPG_IPGT;
3595 		tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3596 		tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3597 		break;
3598 	case e1000_80003es2lan:
3599 		tipg = DEFAULT_82543_TIPG_IPGR1;
3600 		tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
3601 		    E1000_TIPG_IPGR2_SHIFT;
3602 		break;
3603 	default:
3604 		if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3605 		    (adapter->hw.phy.media_type ==
3606 		    e1000_media_type_internal_serdes))
3607 			tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3608 		else
3609 			tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3610 		tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3611 		tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3612 	}
3613 
3614 	E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
3615 	E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
3616 	if(adapter->hw.mac.type >= e1000_82540)
3617 		E1000_WRITE_REG(&adapter->hw, E1000_TADV,
3618 		    adapter->tx_abs_int_delay.value);
3619 
3620 	if ((adapter->hw.mac.type == e1000_82571) ||
3621 	    (adapter->hw.mac.type == e1000_82572)) {
3622 		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3623 		tarc |= SPEED_MODE_BIT;
3624 		E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3625 	} else if (adapter->hw.mac.type == e1000_80003es2lan) {
3626 		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3627 		tarc |= 1;
3628 		E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3629 		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
3630 		tarc |= 1;
3631 		E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
3632 	}
3633 
3634 	/* Program the Transmit Control Register */
3635 	tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
3636 	tctl &= ~E1000_TCTL_CT;
3637 	tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3638 		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
3639 
3640 	if (adapter->hw.mac.type >= e1000_82571)
3641 		tctl |= E1000_TCTL_MULR;
3642 
3643 	/* This write will effectively turn on the transmit unit. */
3644 	E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
3645 
3646 	/* Setup Transmit Descriptor Base Settings */
3647 	adapter->txd_cmd = E1000_TXD_CMD_IFCS;
3648 
3649 	if (adapter->tx_int_delay.value > 0)
3650 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3651 }
3652 
3653 /*********************************************************************
3654  *
3655  *  Free all transmit related data structures.
3656  *
3657  **********************************************************************/
3658 static void
3659 em_free_transmit_structures(struct adapter *adapter)
3660 {
3661 	struct em_buffer *tx_buffer;
3662 
3663 	INIT_DEBUGOUT("free_transmit_structures: begin");
3664 
3665 	if (adapter->tx_buffer_area != NULL) {
3666 		for (int i = 0; i < adapter->num_tx_desc; i++) {
3667 			tx_buffer = &adapter->tx_buffer_area[i];
3668 			if (tx_buffer->m_head != NULL) {
3669 				bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3670 				    BUS_DMASYNC_POSTWRITE);
3671 				bus_dmamap_unload(adapter->txtag,
3672 				    tx_buffer->map);
3673 				m_freem(tx_buffer->m_head);
3674 				tx_buffer->m_head = NULL;
3675 			} else if (tx_buffer->map != NULL)
3676 				bus_dmamap_unload(adapter->txtag,
3677 				    tx_buffer->map);
3678 			if (tx_buffer->map != NULL) {
3679 				bus_dmamap_destroy(adapter->txtag,
3680 				    tx_buffer->map);
3681 				tx_buffer->map = NULL;
3682 			}
3683 		}
3684 	}
3685 	if (adapter->tx_buffer_area != NULL) {
3686 		free(adapter->tx_buffer_area, M_DEVBUF);
3687 		adapter->tx_buffer_area = NULL;
3688 	}
3689 	if (adapter->txtag != NULL) {
3690 		bus_dma_tag_destroy(adapter->txtag);
3691 		adapter->txtag = NULL;
3692 	}
3693 }
3694 
3695 /*********************************************************************
3696  *
3697  *  The offload context needs to be set when we transfer the first
3698  *  packet of a particular protocol (TCP/UDP). This routine has been
3699  *  enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
3700  *
3701  **********************************************************************/
3702 static void
3703 em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
3704     u32 *txd_upper, u32 *txd_lower)
3705 {
3706 	struct e1000_context_desc *TXD;
3707 	struct em_buffer *tx_buffer;
3708 	struct ether_vlan_header *eh;
3709 	struct ip *ip = NULL;
3710 	struct ip6_hdr *ip6;
3711 	struct tcp_hdr *th;
3712 	int curr_txd, ehdrlen;
3713 	u32 cmd, hdr_len, ip_hlen;
3714 	u16 etype;
3715 	u8 ipproto;
3716 
3717 	cmd = hdr_len = ipproto = 0;
3718 	/* Setup checksum offload context. */
3719 	curr_txd = adapter->next_avail_tx_desc;
3720 	tx_buffer = &adapter->tx_buffer_area[curr_txd];
3721 	TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3722 
3723 	/*
3724 	 * Determine where frame payload starts.
3725 	 * Jump over vlan headers if already present,
3726 	 * helpful for QinQ too.
3727 	 */
3728 	eh = mtod(mp, struct ether_vlan_header *);
3729 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3730 		etype = ntohs(eh->evl_proto);
3731 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3732 	} else {
3733 		etype = ntohs(eh->evl_encap_proto);
3734 		ehdrlen = ETHER_HDR_LEN;
3735 	}
3736 
3737 	/*
3738 	 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
3739 	 * TODO: Support SCTP too when it hits the tree.
3740 	 */
3741 	switch (etype) {
3742 	case ETHERTYPE_IP:
3743 		ip = (struct ip *)(mp->m_data + ehdrlen);
3744 		ip_hlen = ip->ip_hl << 2;
3745 
3746 		/* Setup of IP header checksum. */
3747 		if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3748 			/*
3749 			 * Start offset for header checksum calculation.
3750 			 * End offset for header checksum calculation.
3751 			 * Offset of place to put the checksum.
3752 			 */
3753 			TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3754 			TXD->lower_setup.ip_fields.ipcse =
3755 			    htole16(ehdrlen + ip_hlen);
3756 			TXD->lower_setup.ip_fields.ipcso =
3757 			    ehdrlen + offsetof(struct ip, ip_sum);
3758 			cmd |= E1000_TXD_CMD_IP;
3759 			*txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3760 		}
3761 
3762 		if (mp->m_len < ehdrlen + ip_hlen)
3763 			return;	/* failure */
3764 
3765 		hdr_len = ehdrlen + ip_hlen;
3766 		ipproto = ip->ip_p;
3767 
3768 		break;
3769 	case ETHERTYPE_IPV6:
3770 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3771 		ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
3772 
3773 		if (mp->m_len < ehdrlen + ip_hlen)
3774 			return;	/* failure */
3775 
3776 		/* IPv6 doesn't have a header checksum. */
3777 
3778 		hdr_len = ehdrlen + ip_hlen;
3779 		ipproto = ip6->ip6_nxt;
3780 
3781 		break;
3782 #ifdef EM_TIMESYNC
3783 	case ETHERTYPE_IEEE1588:
3784 		*txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
3785 		break;
3786 #endif
3787 	default:
3788 		*txd_upper = 0;
3789 		*txd_lower = 0;
3790 		return;
3791 	}
3792 
3793 	switch (ipproto) {
3794 	case IPPROTO_TCP:
3795 		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3796 			/*
3797 			 * Start offset for payload checksum calculation.
3798 			 * End offset for payload checksum calculation.
3799 			 * Offset of place to put the checksum.
3800 			 */
3801 			th = (struct tcp_hdr *)(mp->m_data + hdr_len);
3802 			TXD->upper_setup.tcp_fields.tucss = hdr_len;
3803 			TXD->upper_setup.tcp_fields.tucse = htole16(0);
3804 			TXD->upper_setup.tcp_fields.tucso =
3805 			    hdr_len + offsetof(struct tcphdr, th_sum);
3806 			cmd |= E1000_TXD_CMD_TCP;
3807 			*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3808 		}
3809 		break;
3810 	case IPPROTO_UDP:
3811 	{
3812 #ifdef EM_TIMESYNC
3813 		void *hdr = (caddr_t) ip + ip_hlen;
3814 		struct udphdr *uh = (struct udphdr *)hdr;
3815 
3816 		if (uh->uh_dport == htons(TSYNC_PORT)) {
3817 			*txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
3818 			IOCTL_DEBUGOUT("@@@ Sending Event Packet\n");
3819 		}
3820 #endif
3821 		if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3822 			/*
3823 			 * Start offset for header checksum calculation.
3824 			 * End offset for header checksum calculation.
3825 			 * Offset of place to put the checksum.
3826 			 */
3827 			TXD->upper_setup.tcp_fields.tucss = hdr_len;
3828 			TXD->upper_setup.tcp_fields.tucse = htole16(0);
3829 			TXD->upper_setup.tcp_fields.tucso =
3830 			    hdr_len + offsetof(struct udphdr, uh_sum);
3831 			*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3832 		}
3833 		/* Fall Thru */
3834 	}
3835 	default:
3836 		break;
3837 	}
3838 
3839 #ifdef EM_TIMESYNC
3840 	/*
3841 	** We might be here just for TIMESYNC
3842 	** which means we don't need the context
3843 	** descriptor.
3844 	*/
3845 	if (!mp->m_pkthdr.csum_flags & CSUM_OFFLOAD)
3846 		return;
3847 #endif
3848 	*txd_lower = E1000_TXD_CMD_DEXT |	/* Extended descr type */
3849 		     E1000_TXD_DTYP_D;		/* Data descr */
3850 	TXD->tcp_seg_setup.data = htole32(0);
3851 	TXD->cmd_and_length =
3852 	    htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3853 	tx_buffer->m_head = NULL;
3854 	tx_buffer->next_eop = -1;
3855 
3856 	if (++curr_txd == adapter->num_tx_desc)
3857 		curr_txd = 0;
3858 
3859 	adapter->num_tx_desc_avail--;
3860 	adapter->next_avail_tx_desc = curr_txd;
3861 }
3862 
3863 
3864 #if __FreeBSD_version >= 700000
3865 /**********************************************************************
3866  *
3867  *  Setup work for hardware segmentation offload (TSO)
3868  *
3869  **********************************************************************/
3870 static bool
3871 em_tso_setup(struct adapter *adapter, struct mbuf *mp, u32 *txd_upper,
3872    u32 *txd_lower)
3873 {
3874 	struct e1000_context_desc *TXD;
3875 	struct em_buffer *tx_buffer;
3876 	struct ether_vlan_header *eh;
3877 	struct ip *ip;
3878 	struct ip6_hdr *ip6;
3879 	struct tcphdr *th;
3880 	int curr_txd, ehdrlen, hdr_len, ip_hlen, isip6;
3881 	u16 etype;
3882 
3883 	/*
3884 	 * This function could/should be extended to support IP/IPv6
3885 	 * fragmentation as well.  But as they say, one step at a time.
3886 	 */
3887 
3888 	/*
3889 	 * Determine where frame payload starts.
3890 	 * Jump over vlan headers if already present,
3891 	 * helpful for QinQ too.
3892 	 */
3893 	eh = mtod(mp, struct ether_vlan_header *);
3894 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3895 		etype = ntohs(eh->evl_proto);
3896 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3897 	} else {
3898 		etype = ntohs(eh->evl_encap_proto);
3899 		ehdrlen = ETHER_HDR_LEN;
3900 	}
3901 
3902 	/* Ensure we have at least the IP+TCP header in the first mbuf. */
3903 	if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3904 		return FALSE;	/* -1 */
3905 
3906 	/*
3907 	 * We only support TCP for IPv4 and IPv6 (notyet) for the moment.
3908 	 * TODO: Support SCTP too when it hits the tree.
3909 	 */
3910 	switch (etype) {
3911 	case ETHERTYPE_IP:
3912 		isip6 = 0;
3913 		ip = (struct ip *)(mp->m_data + ehdrlen);
3914 		if (ip->ip_p != IPPROTO_TCP)
3915 			return FALSE;	/* 0 */
3916 		ip->ip_len = 0;
3917 		ip->ip_sum = 0;
3918 		ip_hlen = ip->ip_hl << 2;
3919 		if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3920 			return FALSE;	/* -1 */
3921 		th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3922 #if 1
3923 		th->th_sum = in_pseudo(ip->ip_src.s_addr,
3924 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3925 #else
3926 		th->th_sum = mp->m_pkthdr.csum_data;
3927 #endif
3928 		break;
3929 	case ETHERTYPE_IPV6:
3930 		isip6 = 1;
3931 		return FALSE;			/* Not supported yet. */
3932 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3933 		if (ip6->ip6_nxt != IPPROTO_TCP)
3934 			return FALSE;	/* 0 */
3935 		ip6->ip6_plen = 0;
3936 		ip_hlen = sizeof(struct ip6_hdr); /* XXX: no header stacking. */
3937 		if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3938 			return FALSE;	/* -1 */
3939 		th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
3940 #if 0
3941 		th->th_sum = in6_pseudo(ip6->ip6_src, ip->ip6_dst,
3942 		    htons(IPPROTO_TCP));	/* XXX: function notyet. */
3943 #else
3944 		th->th_sum = mp->m_pkthdr.csum_data;
3945 #endif
3946 		break;
3947 	default:
3948 		return FALSE;
3949 	}
3950 	hdr_len = ehdrlen + ip_hlen + (th->th_off << 2);
3951 
3952 	*txd_lower = (E1000_TXD_CMD_DEXT |	/* Extended descr type */
3953 		      E1000_TXD_DTYP_D |	/* Data descr type */
3954 		      E1000_TXD_CMD_TSE);	/* Do TSE on this packet */
3955 
3956 	/* IP and/or TCP header checksum calculation and insertion. */
3957 	*txd_upper = ((isip6 ? 0 : E1000_TXD_POPTS_IXSM) |
3958 		      E1000_TXD_POPTS_TXSM) << 8;
3959 
3960 	curr_txd = adapter->next_avail_tx_desc;
3961 	tx_buffer = &adapter->tx_buffer_area[curr_txd];
3962 	TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3963 
3964 	/* IPv6 doesn't have a header checksum. */
3965 	if (!isip6) {
3966 		/*
3967 		 * Start offset for header checksum calculation.
3968 		 * End offset for header checksum calculation.
3969 		 * Offset of place put the checksum.
3970 		 */
3971 		TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3972 		TXD->lower_setup.ip_fields.ipcse =
3973 		    htole16(ehdrlen + ip_hlen - 1);
3974 		TXD->lower_setup.ip_fields.ipcso =
3975 		    ehdrlen + offsetof(struct ip, ip_sum);
3976 	}
3977 	/*
3978 	 * Start offset for payload checksum calculation.
3979 	 * End offset for payload checksum calculation.
3980 	 * Offset of place to put the checksum.
3981 	 */
3982 	TXD->upper_setup.tcp_fields.tucss =
3983 	    ehdrlen + ip_hlen;
3984 	TXD->upper_setup.tcp_fields.tucse = 0;
3985 	TXD->upper_setup.tcp_fields.tucso =
3986 	    ehdrlen + ip_hlen + offsetof(struct tcphdr, th_sum);
3987 	/*
3988 	 * Payload size per packet w/o any headers.
3989 	 * Length of all headers up to payload.
3990 	 */
3991 	TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz);
3992 	TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
3993 
3994 	TXD->cmd_and_length = htole32(adapter->txd_cmd |
3995 				E1000_TXD_CMD_DEXT |	/* Extended descr */
3996 				E1000_TXD_CMD_TSE |	/* TSE context */
3997 				(isip6 ? 0 : E1000_TXD_CMD_IP) | /* Do IP csum */
3998 				E1000_TXD_CMD_TCP |	/* Do TCP checksum */
3999 				(mp->m_pkthdr.len - (hdr_len))); /* Total len */
4000 
4001 	tx_buffer->m_head = NULL;
4002 	tx_buffer->next_eop = -1;
4003 
4004 	if (++curr_txd == adapter->num_tx_desc)
4005 		curr_txd = 0;
4006 
4007 	adapter->num_tx_desc_avail--;
4008 	adapter->next_avail_tx_desc = curr_txd;
4009 	adapter->tx_tso = TRUE;
4010 
4011 	return TRUE;
4012 }
4013 
4014 #endif /* __FreeBSD_version >= 700000 */
4015 
4016 /**********************************************************************
4017  *
4018  *  Examine each tx_buffer in the used queue. If the hardware is done
4019  *  processing the packet then free associated resources. The
4020  *  tx_buffer is put back on the free queue.
4021  *
4022  **********************************************************************/
4023 static void
4024 em_txeof(struct adapter *adapter)
4025 {
4026         int first, last, done, num_avail;
4027         struct em_buffer *tx_buffer;
4028         struct e1000_tx_desc   *tx_desc, *eop_desc;
4029 	struct ifnet   *ifp = adapter->ifp;
4030 
4031 	EM_TX_LOCK_ASSERT(adapter);
4032 
4033         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
4034                 return;
4035 
4036         num_avail = adapter->num_tx_desc_avail;
4037         first = adapter->next_tx_to_clean;
4038         tx_desc = &adapter->tx_desc_base[first];
4039         tx_buffer = &adapter->tx_buffer_area[first];
4040 	last = tx_buffer->next_eop;
4041         eop_desc = &adapter->tx_desc_base[last];
4042 
4043 	/*
4044 	 * What this does is get the index of the
4045 	 * first descriptor AFTER the EOP of the
4046 	 * first packet, that way we can do the
4047 	 * simple comparison on the inner while loop.
4048 	 */
4049 	if (++last == adapter->num_tx_desc)
4050  		last = 0;
4051 	done = last;
4052 
4053         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
4054             BUS_DMASYNC_POSTREAD);
4055 
4056         while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
4057 		/* We clean the range of the packet */
4058 		while (first != done) {
4059                 	tx_desc->upper.data = 0;
4060                 	tx_desc->lower.data = 0;
4061                 	tx_desc->buffer_addr = 0;
4062                 	num_avail++;
4063 
4064 			if (tx_buffer->m_head) {
4065 				ifp->if_opackets++;
4066 				bus_dmamap_sync(adapter->txtag,
4067 				    tx_buffer->map,
4068 				    BUS_DMASYNC_POSTWRITE);
4069 				bus_dmamap_unload(adapter->txtag,
4070 				    tx_buffer->map);
4071 
4072                         	m_freem(tx_buffer->m_head);
4073                         	tx_buffer->m_head = NULL;
4074                 	}
4075 			tx_buffer->next_eop = -1;
4076 
4077 	                if (++first == adapter->num_tx_desc)
4078 				first = 0;
4079 
4080 	                tx_buffer = &adapter->tx_buffer_area[first];
4081 			tx_desc = &adapter->tx_desc_base[first];
4082 		}
4083 		/* See if we can continue to the next packet */
4084 		last = tx_buffer->next_eop;
4085 		if (last != -1) {
4086         		eop_desc = &adapter->tx_desc_base[last];
4087 			/* Get new done point */
4088 			if (++last == adapter->num_tx_desc) last = 0;
4089 			done = last;
4090 		} else
4091 			break;
4092         }
4093         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
4094             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4095 
4096         adapter->next_tx_to_clean = first;
4097 
4098         /*
4099          * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
4100          * that it is OK to send packets.
4101          * If there are no pending descriptors, clear the timeout. Otherwise,
4102          * if some descriptors have been freed, restart the timeout.
4103          */
4104         if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
4105                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4106 		/* All clean, turn off the timer */
4107                 if (num_avail == adapter->num_tx_desc) {
4108 			adapter->watchdog_timer = 0;
4109 		} else
4110 		/* Some cleaned, reset the timer */
4111                 if (num_avail != adapter->num_tx_desc_avail)
4112 			adapter->watchdog_timer = EM_TX_TIMEOUT;
4113         }
4114         adapter->num_tx_desc_avail = num_avail;
4115 	return;
4116 }
4117 
4118 /*********************************************************************
4119  *
4120  *  When Link is lost sometimes there is work still in the TX ring
4121  *  which will result in a watchdog, rather than allow that do an
4122  *  attempted cleanup and then reinit here. Note that this has been
4123  *  seens mostly with fiber adapters.
4124  *
4125  **********************************************************************/
4126 static void
4127 em_tx_purge(struct adapter *adapter)
4128 {
4129 	if ((!adapter->link_active) && (adapter->watchdog_timer)) {
4130 		EM_TX_LOCK(adapter);
4131 		em_txeof(adapter);
4132 		EM_TX_UNLOCK(adapter);
4133 		if (adapter->watchdog_timer) { /* Still not clean? */
4134 			adapter->watchdog_timer = 0;
4135 			em_init_locked(adapter);
4136 		}
4137 	}
4138 }
4139 
4140 /*********************************************************************
4141  *
4142  *  Get a buffer from system mbuf buffer pool.
4143  *
4144  **********************************************************************/
4145 static int
4146 em_get_buf(struct adapter *adapter, int i)
4147 {
4148 	struct mbuf		*m;
4149 	bus_dma_segment_t	segs[1];
4150 	bus_dmamap_t		map;
4151 	struct em_buffer	*rx_buffer;
4152 	int			error, nsegs;
4153 
4154 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
4155 	if (m == NULL) {
4156 		adapter->mbuf_cluster_failed++;
4157 		return (ENOBUFS);
4158 	}
4159 	m->m_len = m->m_pkthdr.len = MCLBYTES;
4160 
4161 	if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
4162 		m_adj(m, ETHER_ALIGN);
4163 
4164 	/*
4165 	 * Using memory from the mbuf cluster pool, invoke the
4166 	 * bus_dma machinery to arrange the memory mapping.
4167 	 */
4168 	error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
4169 	    adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
4170 	if (error != 0) {
4171 		m_free(m);
4172 		return (error);
4173 	}
4174 
4175 	/* If nsegs is wrong then the stack is corrupt. */
4176 	KASSERT(nsegs == 1, ("Too many segments returned!"));
4177 
4178 	rx_buffer = &adapter->rx_buffer_area[i];
4179 	if (rx_buffer->m_head != NULL)
4180 		bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4181 
4182 	map = rx_buffer->map;
4183 	rx_buffer->map = adapter->rx_sparemap;
4184 	adapter->rx_sparemap = map;
4185 	bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
4186 	rx_buffer->m_head = m;
4187 
4188 	adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
4189 	return (0);
4190 }
4191 
4192 /*********************************************************************
4193  *
4194  *  Allocate memory for rx_buffer structures. Since we use one
4195  *  rx_buffer per received packet, the maximum number of rx_buffer's
4196  *  that we'll need is equal to the number of receive descriptors
4197  *  that we've allocated.
4198  *
4199  **********************************************************************/
4200 static int
4201 em_allocate_receive_structures(struct adapter *adapter)
4202 {
4203 	device_t dev = adapter->dev;
4204 	struct em_buffer *rx_buffer;
4205 	int i, error;
4206 
4207 	adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
4208 	    adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
4209 	if (adapter->rx_buffer_area == NULL) {
4210 		device_printf(dev, "Unable to allocate rx_buffer memory\n");
4211 		return (ENOMEM);
4212 	}
4213 
4214 #if __FreeBSD_version >= 700000
4215 	error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
4216 #else
4217 	error = bus_dma_tag_create(NULL,		 /* parent */
4218 #endif
4219 				1, 0,			/* alignment, bounds */
4220 				BUS_SPACE_MAXADDR,	/* lowaddr */
4221 				BUS_SPACE_MAXADDR,	/* highaddr */
4222 				NULL, NULL,		/* filter, filterarg */
4223 				MCLBYTES,		/* maxsize */
4224 				1,			/* nsegments */
4225 				MCLBYTES,		/* maxsegsize */
4226 				0,			/* flags */
4227 				NULL,			/* lockfunc */
4228 				NULL,			/* lockarg */
4229 				&adapter->rxtag);
4230 	if (error) {
4231 		device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
4232 		    __func__, error);
4233 		goto fail;
4234 	}
4235 
4236 	/* Create the spare map (used by getbuf) */
4237 	error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4238 	     &adapter->rx_sparemap);
4239 	if (error) {
4240 		device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4241 		    __func__, error);
4242 		goto fail;
4243 	}
4244 
4245 	rx_buffer = adapter->rx_buffer_area;
4246 	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4247 		error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4248 		    &rx_buffer->map);
4249 		if (error) {
4250 			device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4251 			    __func__, error);
4252 			goto fail;
4253 		}
4254 	}
4255 
4256 	return (0);
4257 
4258 fail:
4259 	em_free_receive_structures(adapter);
4260 	return (error);
4261 }
4262 
4263 /*********************************************************************
4264  *
4265  *  (Re)initialize receive structures.
4266  *
4267  **********************************************************************/
4268 static int
4269 em_setup_receive_structures(struct adapter *adapter)
4270 {
4271 	struct em_buffer *rx_buffer;
4272 	int i, error;
4273 
4274 	/* Reset descriptor ring */
4275 	bzero(adapter->rx_desc_base,
4276 	    (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
4277 
4278 	/* Free current RX buffers. */
4279 	rx_buffer = adapter->rx_buffer_area;
4280 	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4281 		if (rx_buffer->m_head != NULL) {
4282 			bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4283 			    BUS_DMASYNC_POSTREAD);
4284 			bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4285 			m_freem(rx_buffer->m_head);
4286 			rx_buffer->m_head = NULL;
4287 		}
4288         }
4289 
4290 	/* Allocate new ones. */
4291 	for (i = 0; i < adapter->num_rx_desc; i++) {
4292 		error = em_get_buf(adapter, i);
4293 		if (error)
4294                         return (error);
4295 	}
4296 
4297 	/* Setup our descriptor pointers */
4298 	adapter->next_rx_desc_to_check = 0;
4299 	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4300 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4301 
4302 	return (0);
4303 }
4304 
4305 /*********************************************************************
4306  *
4307  *  Enable receive unit.
4308  *
4309  **********************************************************************/
4310 #define MAX_INTS_PER_SEC	8000
4311 #define DEFAULT_ITR	     1000000000/(MAX_INTS_PER_SEC * 256)
4312 
4313 static void
4314 em_initialize_receive_unit(struct adapter *adapter)
4315 {
4316 	struct ifnet	*ifp = adapter->ifp;
4317 	u64	bus_addr;
4318 	u32	rctl, rxcsum;
4319 
4320 	INIT_DEBUGOUT("em_initialize_receive_unit: begin");
4321 
4322 	/*
4323 	 * Make sure receives are disabled while setting
4324 	 * up the descriptor ring
4325 	 */
4326 	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4327 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
4328 
4329 	if (adapter->hw.mac.type >= e1000_82540) {
4330 		E1000_WRITE_REG(&adapter->hw, E1000_RADV,
4331 		    adapter->rx_abs_int_delay.value);
4332 		/*
4333 		 * Set the interrupt throttling rate. Value is calculated
4334 		 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
4335 		 */
4336 		E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
4337 	}
4338 
4339 	/*
4340 	** When using MSIX interrupts we need to throttle
4341 	** using the EITR register (82574 only)
4342 	*/
4343 	if (adapter->msix)
4344 		for (int i = 0; i < 4; i++)
4345 			E1000_WRITE_REG(&adapter->hw,
4346 			    E1000_EITR_82574(i), DEFAULT_ITR);
4347 
4348 	/* Disable accelerated ackknowledge */
4349 	if (adapter->hw.mac.type == e1000_82574)
4350 		E1000_WRITE_REG(&adapter->hw,
4351 		    E1000_RFCTL, E1000_RFCTL_ACK_DIS);
4352 
4353 	/* Setup the Base and Length of the Rx Descriptor Ring */
4354 	bus_addr = adapter->rxdma.dma_paddr;
4355 	E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
4356 	    adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
4357 	E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
4358 	    (u32)(bus_addr >> 32));
4359 	E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
4360 	    (u32)bus_addr);
4361 
4362 	/* Setup the Receive Control Register */
4363 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4364 	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
4365 		   E1000_RCTL_RDMTS_HALF |
4366 		   (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4367 
4368 	/* Make sure VLAN Filters are off */
4369 	rctl &= ~E1000_RCTL_VFE;
4370 
4371 	if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
4372 		rctl |= E1000_RCTL_SBP;
4373 	else
4374 		rctl &= ~E1000_RCTL_SBP;
4375 
4376 	switch (adapter->rx_buffer_len) {
4377 	default:
4378 	case 2048:
4379 		rctl |= E1000_RCTL_SZ_2048;
4380 		break;
4381 	case 4096:
4382 		rctl |= E1000_RCTL_SZ_4096 |
4383 		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
4384 		break;
4385 	case 8192:
4386 		rctl |= E1000_RCTL_SZ_8192 |
4387 		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
4388 		break;
4389 	case 16384:
4390 		rctl |= E1000_RCTL_SZ_16384 |
4391 		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
4392 		break;
4393 	}
4394 
4395 	if (ifp->if_mtu > ETHERMTU)
4396 		rctl |= E1000_RCTL_LPE;
4397 	else
4398 		rctl &= ~E1000_RCTL_LPE;
4399 
4400 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
4401 	if ((adapter->hw.mac.type >= e1000_82543) &&
4402 	    (ifp->if_capenable & IFCAP_RXCSUM)) {
4403 		rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
4404 		rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
4405 		E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
4406 	}
4407 
4408 	/*
4409 	** XXX TEMPORARY WORKAROUND: on some systems with 82573
4410 	** long latencies are observed, like Lenovo X60. This
4411 	** change eliminates the problem, but since having positive
4412 	** values in RDTR is a known source of problems on other
4413 	** platforms another solution is being sought.
4414 	*/
4415 	if (adapter->hw.mac.type == e1000_82573)
4416 		E1000_WRITE_REG(&adapter->hw, E1000_RDTR, 0x20);
4417 
4418 	/* Enable Receives */
4419 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4420 
4421 	/*
4422 	 * Setup the HW Rx Head and
4423 	 * Tail Descriptor Pointers
4424 	 */
4425 	E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
4426 	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
4427 
4428 	return;
4429 }
4430 
4431 /*********************************************************************
4432  *
4433  *  Free receive related data structures.
4434  *
4435  **********************************************************************/
4436 static void
4437 em_free_receive_structures(struct adapter *adapter)
4438 {
4439 	struct em_buffer *rx_buffer;
4440 	int i;
4441 
4442 	INIT_DEBUGOUT("free_receive_structures: begin");
4443 
4444 	if (adapter->rx_sparemap) {
4445 		bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
4446 		adapter->rx_sparemap = NULL;
4447 	}
4448 
4449 	/* Cleanup any existing buffers */
4450 	if (adapter->rx_buffer_area != NULL) {
4451 		rx_buffer = adapter->rx_buffer_area;
4452 		for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4453 			if (rx_buffer->m_head != NULL) {
4454 				bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4455 				    BUS_DMASYNC_POSTREAD);
4456 				bus_dmamap_unload(adapter->rxtag,
4457 				    rx_buffer->map);
4458 				m_freem(rx_buffer->m_head);
4459 				rx_buffer->m_head = NULL;
4460 			} else if (rx_buffer->map != NULL)
4461 				bus_dmamap_unload(adapter->rxtag,
4462 				    rx_buffer->map);
4463 			if (rx_buffer->map != NULL) {
4464 				bus_dmamap_destroy(adapter->rxtag,
4465 				    rx_buffer->map);
4466 				rx_buffer->map = NULL;
4467 			}
4468 		}
4469 	}
4470 
4471 	if (adapter->rx_buffer_area != NULL) {
4472 		free(adapter->rx_buffer_area, M_DEVBUF);
4473 		adapter->rx_buffer_area = NULL;
4474 	}
4475 
4476 	if (adapter->rxtag != NULL) {
4477 		bus_dma_tag_destroy(adapter->rxtag);
4478 		adapter->rxtag = NULL;
4479 	}
4480 }
4481 
4482 /*********************************************************************
4483  *
4484  *  This routine executes in interrupt context. It replenishes
4485  *  the mbufs in the descriptor and sends data which has been
4486  *  dma'ed into host memory to upper layer.
4487  *
4488  *  We loop at most count times if count is > 0, or until done if
4489  *  count < 0.
4490  *
4491  *********************************************************************/
4492 static int
4493 em_rxeof(struct adapter *adapter, int count)
4494 {
4495 	struct ifnet	*ifp = adapter->ifp;;
4496 	struct mbuf	*mp;
4497 	u8		status, accept_frame = 0, eop = 0;
4498 	u16 		len, desc_len, prev_len_adj;
4499 	int		i;
4500 	struct e1000_rx_desc   *current_desc;
4501 
4502 	EM_RX_LOCK(adapter);
4503 	i = adapter->next_rx_desc_to_check;
4504 	current_desc = &adapter->rx_desc_base[i];
4505 	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4506 	    BUS_DMASYNC_POSTREAD);
4507 
4508 	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
4509 		EM_RX_UNLOCK(adapter);
4510 		return (0);
4511 	}
4512 
4513 	while ((current_desc->status & E1000_RXD_STAT_DD) &&
4514 	    (count != 0) &&
4515 	    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4516 		struct mbuf *m = NULL;
4517 
4518 		mp = adapter->rx_buffer_area[i].m_head;
4519 		/*
4520 		 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
4521 		 * needs to access the last received byte in the mbuf.
4522 		 */
4523 		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
4524 		    BUS_DMASYNC_POSTREAD);
4525 
4526 		accept_frame = 1;
4527 		prev_len_adj = 0;
4528 		desc_len = le16toh(current_desc->length);
4529 		status = current_desc->status;
4530 		if (status & E1000_RXD_STAT_EOP) {
4531 			count--;
4532 			eop = 1;
4533 			if (desc_len < ETHER_CRC_LEN) {
4534 				len = 0;
4535 				prev_len_adj = ETHER_CRC_LEN - desc_len;
4536 			} else
4537 				len = desc_len - ETHER_CRC_LEN;
4538 		} else {
4539 			eop = 0;
4540 			len = desc_len;
4541 		}
4542 
4543 		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
4544 			u8	last_byte;
4545 			u32	pkt_len = desc_len;
4546 
4547 			if (adapter->fmp != NULL)
4548 				pkt_len += adapter->fmp->m_pkthdr.len;
4549 
4550 			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
4551 			if (TBI_ACCEPT(&adapter->hw, status,
4552 			    current_desc->errors, pkt_len, last_byte,
4553 			    adapter->min_frame_size, adapter->max_frame_size)) {
4554 				e1000_tbi_adjust_stats_82543(&adapter->hw,
4555 				    &adapter->stats, pkt_len,
4556 				    adapter->hw.mac.addr,
4557 				    adapter->max_frame_size);
4558 				if (len > 0)
4559 					len--;
4560 			} else
4561 				accept_frame = 0;
4562 		}
4563 
4564 		if (accept_frame) {
4565 			if (em_get_buf(adapter, i) != 0) {
4566 				ifp->if_iqdrops++;
4567 				goto discard;
4568 			}
4569 
4570 			/* Assign correct length to the current fragment */
4571 			mp->m_len = len;
4572 
4573 			if (adapter->fmp == NULL) {
4574 				mp->m_pkthdr.len = len;
4575 				adapter->fmp = mp; /* Store the first mbuf */
4576 				adapter->lmp = mp;
4577 			} else {
4578 				/* Chain mbuf's together */
4579 				mp->m_flags &= ~M_PKTHDR;
4580 				/*
4581 				 * Adjust length of previous mbuf in chain if
4582 				 * we received less than 4 bytes in the last
4583 				 * descriptor.
4584 				 */
4585 				if (prev_len_adj > 0) {
4586 					adapter->lmp->m_len -= prev_len_adj;
4587 					adapter->fmp->m_pkthdr.len -=
4588 					    prev_len_adj;
4589 				}
4590 				adapter->lmp->m_next = mp;
4591 				adapter->lmp = adapter->lmp->m_next;
4592 				adapter->fmp->m_pkthdr.len += len;
4593 			}
4594 
4595 			if (eop) {
4596 				adapter->fmp->m_pkthdr.rcvif = ifp;
4597 				ifp->if_ipackets++;
4598 				em_receive_checksum(adapter, current_desc,
4599 				    adapter->fmp);
4600 #ifndef __NO_STRICT_ALIGNMENT
4601 				if (adapter->max_frame_size >
4602 				    (MCLBYTES - ETHER_ALIGN) &&
4603 				    em_fixup_rx(adapter) != 0)
4604 					goto skip;
4605 #endif
4606 				if (status & E1000_RXD_STAT_VP) {
4607 #if __FreeBSD_version < 700000
4608 					VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
4609 					    (le16toh(current_desc->special) &
4610 					    E1000_RXD_SPC_VLAN_MASK));
4611 #else
4612 					adapter->fmp->m_pkthdr.ether_vtag =
4613 					    (le16toh(current_desc->special) &
4614 					    E1000_RXD_SPC_VLAN_MASK);
4615 					adapter->fmp->m_flags |= M_VLANTAG;
4616 #endif
4617 				}
4618 #ifndef __NO_STRICT_ALIGNMENT
4619 skip:
4620 #endif
4621 				m = adapter->fmp;
4622 				adapter->fmp = NULL;
4623 				adapter->lmp = NULL;
4624 			}
4625 		} else {
4626 			ifp->if_ierrors++;
4627 discard:
4628 			/* Reuse loaded DMA map and just update mbuf chain */
4629 			mp = adapter->rx_buffer_area[i].m_head;
4630 			mp->m_len = mp->m_pkthdr.len = MCLBYTES;
4631 			mp->m_data = mp->m_ext.ext_buf;
4632 			mp->m_next = NULL;
4633 			if (adapter->max_frame_size <=
4634 			    (MCLBYTES - ETHER_ALIGN))
4635 				m_adj(mp, ETHER_ALIGN);
4636 			if (adapter->fmp != NULL) {
4637 				m_freem(adapter->fmp);
4638 				adapter->fmp = NULL;
4639 				adapter->lmp = NULL;
4640 			}
4641 			m = NULL;
4642 		}
4643 
4644 		/* Zero out the receive descriptors status. */
4645 		current_desc->status = 0;
4646 		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4647 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4648 
4649 		/* Advance our pointers to the next descriptor. */
4650 		if (++i == adapter->num_rx_desc)
4651 			i = 0;
4652 		if (m != NULL) {
4653 			adapter->next_rx_desc_to_check = i;
4654 			/* Unlock for call into stack */
4655 			EM_RX_UNLOCK(adapter);
4656 			(*ifp->if_input)(ifp, m);
4657 			EM_RX_LOCK(adapter);
4658 			i = adapter->next_rx_desc_to_check;
4659 		}
4660 		current_desc = &adapter->rx_desc_base[i];
4661 	}
4662 	adapter->next_rx_desc_to_check = i;
4663 
4664 	/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
4665 	if (--i < 0)
4666 		i = adapter->num_rx_desc - 1;
4667 	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
4668 	EM_RX_UNLOCK(adapter);
4669 	if (!((current_desc->status) & E1000_RXD_STAT_DD))
4670 		return (0);
4671 
4672 	return (1);
4673 }
4674 
4675 #ifndef __NO_STRICT_ALIGNMENT
4676 /*
4677  * When jumbo frames are enabled we should realign entire payload on
4678  * architecures with strict alignment. This is serious design mistake of 8254x
4679  * as it nullifies DMA operations. 8254x just allows RX buffer size to be
4680  * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
4681  * payload. On architecures without strict alignment restrictions 8254x still
4682  * performs unaligned memory access which would reduce the performance too.
4683  * To avoid copying over an entire frame to align, we allocate a new mbuf and
4684  * copy ethernet header to the new mbuf. The new mbuf is prepended into the
4685  * existing mbuf chain.
4686  *
4687  * Be aware, best performance of the 8254x is achived only when jumbo frame is
4688  * not used at all on architectures with strict alignment.
4689  */
4690 static int
4691 em_fixup_rx(struct adapter *adapter)
4692 {
4693 	struct mbuf *m, *n;
4694 	int error;
4695 
4696 	error = 0;
4697 	m = adapter->fmp;
4698 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
4699 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
4700 		m->m_data += ETHER_HDR_LEN;
4701 	} else {
4702 		MGETHDR(n, M_DONTWAIT, MT_DATA);
4703 		if (n != NULL) {
4704 			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
4705 			m->m_data += ETHER_HDR_LEN;
4706 			m->m_len -= ETHER_HDR_LEN;
4707 			n->m_len = ETHER_HDR_LEN;
4708 			M_MOVE_PKTHDR(n, m);
4709 			n->m_next = m;
4710 			adapter->fmp = n;
4711 		} else {
4712 			adapter->dropped_pkts++;
4713 			m_freem(adapter->fmp);
4714 			adapter->fmp = NULL;
4715 			error = ENOMEM;
4716 		}
4717 	}
4718 
4719 	return (error);
4720 }
4721 #endif
4722 
4723 /*********************************************************************
4724  *
4725  *  Verify that the hardware indicated that the checksum is valid.
4726  *  Inform the stack about the status of checksum so that stack
4727  *  doesn't spend time verifying the checksum.
4728  *
4729  *********************************************************************/
4730 static void
4731 em_receive_checksum(struct adapter *adapter,
4732 	    struct e1000_rx_desc *rx_desc, struct mbuf *mp)
4733 {
4734 	/* 82543 or newer only */
4735 	if ((adapter->hw.mac.type < e1000_82543) ||
4736 	    /* Ignore Checksum bit is set */
4737 	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
4738 		mp->m_pkthdr.csum_flags = 0;
4739 		return;
4740 	}
4741 
4742 	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
4743 		/* Did it pass? */
4744 		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
4745 			/* IP Checksum Good */
4746 			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4747 			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4748 
4749 		} else {
4750 			mp->m_pkthdr.csum_flags = 0;
4751 		}
4752 	}
4753 
4754 	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
4755 		/* Did it pass? */
4756 		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
4757 			mp->m_pkthdr.csum_flags |=
4758 			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4759 			mp->m_pkthdr.csum_data = htons(0xffff);
4760 		}
4761 	}
4762 }
4763 
4764 /*
4765  * This routine is run via an vlan
4766  * config EVENT
4767  */
4768 static void
4769 em_register_vlan(void *unused, struct ifnet *ifp, u16 vtag)
4770 {
4771 	struct adapter	*adapter = ifp->if_softc;
4772 	u32		ctrl, rctl, index, vfta;
4773 
4774 	ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4775 	ctrl |= E1000_CTRL_VME;
4776 	E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4777 
4778 	/* Setup for Hardware Filter */
4779 	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4780 	rctl |= E1000_RCTL_VFE;
4781 	rctl &= ~E1000_RCTL_CFIEN;
4782 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4783 
4784 	/* Make entry in the hardware filter table */
4785 	index = ((vtag >> 5) & 0x7F);
4786 	vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
4787 	vfta |= (1 << (vtag & 0x1F));
4788 	E1000_WRITE_REG_ARRAY(&adapter->hw, E1000_VFTA, index, vfta);
4789 
4790 	/* Update the frame size */
4791 	E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
4792 	    adapter->max_frame_size + VLAN_TAG_SIZE);
4793 
4794 }
4795 
4796 /*
4797  * This routine is run via an vlan
4798  * unconfig EVENT
4799  */
4800 static void
4801 em_unregister_vlan(void *unused, struct ifnet *ifp, u16 vtag)
4802 {
4803 	struct adapter	*adapter = ifp->if_softc;
4804 	u32		index, vfta;
4805 
4806 	/* Remove entry in the hardware filter table */
4807 	index = ((vtag >> 5) & 0x7F);
4808 	vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
4809 	vfta &= ~(1 << (vtag & 0x1F));
4810 	E1000_WRITE_REG_ARRAY(&adapter->hw, E1000_VFTA, index, vfta);
4811 	/* Have all vlans unregistered? */
4812 	if (adapter->ifp->if_vlantrunk == NULL) {
4813 		u32 rctl;
4814 		/* Turn off the filter table */
4815 		rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4816 		rctl &= ~E1000_RCTL_VFE;
4817 		rctl |= E1000_RCTL_CFIEN;
4818 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4819 		/* Reset the frame size */
4820 		E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
4821 		    adapter->max_frame_size);
4822 	}
4823 }
4824 
4825 static void
4826 em_enable_intr(struct adapter *adapter)
4827 {
4828 	struct e1000_hw *hw = &adapter->hw;
4829 	u32 ims_mask = IMS_ENABLE_MASK;
4830 
4831 	if (adapter->msix) {
4832 		E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
4833 		ims_mask |= EM_MSIX_MASK;
4834 	}
4835 	E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
4836 }
4837 
4838 static void
4839 em_disable_intr(struct adapter *adapter)
4840 {
4841 	struct e1000_hw *hw = &adapter->hw;
4842 
4843 	if (adapter->msix)
4844 		E1000_WRITE_REG(hw, EM_EIAC, 0);
4845 	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
4846 }
4847 
4848 /*
4849  * Bit of a misnomer, what this really means is
4850  * to enable OS management of the system... aka
4851  * to disable special hardware management features
4852  */
4853 static void
4854 em_init_manageability(struct adapter *adapter)
4855 {
4856 	/* A shared code workaround */
4857 #define E1000_82542_MANC2H E1000_MANC2H
4858 	if (adapter->has_manage) {
4859 		int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
4860 		int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4861 
4862 		/* disable hardware interception of ARP */
4863 		manc &= ~(E1000_MANC_ARP_EN);
4864 
4865                 /* enable receiving management packets to the host */
4866                 if (adapter->hw.mac.type >= e1000_82571) {
4867 			manc |= E1000_MANC_EN_MNG2HOST;
4868 #define E1000_MNG2HOST_PORT_623 (1 << 5)
4869 #define E1000_MNG2HOST_PORT_664 (1 << 6)
4870 			manc2h |= E1000_MNG2HOST_PORT_623;
4871 			manc2h |= E1000_MNG2HOST_PORT_664;
4872 			E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
4873 		}
4874 
4875 		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4876 	}
4877 }
4878 
4879 /*
4880  * Give control back to hardware management
4881  * controller if there is one.
4882  */
4883 static void
4884 em_release_manageability(struct adapter *adapter)
4885 {
4886 	if (adapter->has_manage) {
4887 		int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4888 
4889 		/* re-enable hardware interception of ARP */
4890 		manc |= E1000_MANC_ARP_EN;
4891 
4892 		if (adapter->hw.mac.type >= e1000_82571)
4893 			manc &= ~E1000_MANC_EN_MNG2HOST;
4894 
4895 		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4896 	}
4897 }
4898 
4899 /*
4900  * em_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4901  * For ASF and Pass Through versions of f/w this means that
4902  * the driver is loaded. For AMT version (only with 82573)
4903  * of the f/w this means that the network i/f is open.
4904  *
4905  */
4906 static void
4907 em_get_hw_control(struct adapter *adapter)
4908 {
4909 	u32 ctrl_ext, swsm;
4910 
4911 	/* Let firmware know the driver has taken over */
4912 	switch (adapter->hw.mac.type) {
4913 	case e1000_82573:
4914 		swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4915 		E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4916 		    swsm | E1000_SWSM_DRV_LOAD);
4917 		break;
4918 	case e1000_82571:
4919 	case e1000_82572:
4920 	case e1000_80003es2lan:
4921 	case e1000_ich8lan:
4922 	case e1000_ich9lan:
4923 	case e1000_ich10lan:
4924 		ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4925 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4926 		    ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4927 		break;
4928 	default:
4929 		break;
4930 	}
4931 }
4932 
4933 /*
4934  * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4935  * For ASF and Pass Through versions of f/w this means that the
4936  * driver is no longer loaded. For AMT version (only with 82573) i
4937  * of the f/w this means that the network i/f is closed.
4938  *
4939  */
4940 static void
4941 em_release_hw_control(struct adapter *adapter)
4942 {
4943 	u32 ctrl_ext, swsm;
4944 
4945 	/* Let firmware taken over control of h/w */
4946 	switch (adapter->hw.mac.type) {
4947 	case e1000_82573:
4948 		swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4949 		E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4950 		    swsm & ~E1000_SWSM_DRV_LOAD);
4951 		break;
4952 	case e1000_82571:
4953 	case e1000_82572:
4954 	case e1000_80003es2lan:
4955 	case e1000_ich8lan:
4956 	case e1000_ich9lan:
4957 	case e1000_ich10lan:
4958 		ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4959 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4960 		    ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4961 		break;
4962 	default:
4963 		break;
4964 
4965 	}
4966 }
4967 
4968 static int
4969 em_is_valid_ether_addr(u8 *addr)
4970 {
4971 	char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4972 
4973 	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4974 		return (FALSE);
4975 	}
4976 
4977 	return (TRUE);
4978 }
4979 
4980 /*
4981  * Enable PCI Wake On Lan capability
4982  */
4983 void
4984 em_enable_wakeup(device_t dev)
4985 {
4986 	u16     cap, status;
4987 	u8      id;
4988 
4989 	/* First find the capabilities pointer*/
4990 	cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
4991 	/* Read the PM Capabilities */
4992 	id = pci_read_config(dev, cap, 1);
4993 	if (id != PCIY_PMG)     /* Something wrong */
4994 		return;
4995 	/* OK, we have the power capabilities, so
4996 	   now get the status register */
4997 	cap += PCIR_POWER_STATUS;
4998 	status = pci_read_config(dev, cap, 2);
4999 	status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
5000 	pci_write_config(dev, cap, status, 2);
5001 	return;
5002 }
5003 
5004 
5005 /*********************************************************************
5006 * 82544 Coexistence issue workaround.
5007 *    There are 2 issues.
5008 *       1. Transmit Hang issue.
5009 *    To detect this issue, following equation can be used...
5010 *	  SIZE[3:0] + ADDR[2:0] = SUM[3:0].
5011 *	  If SUM[3:0] is in between 1 to 4, we will have this issue.
5012 *
5013 *       2. DAC issue.
5014 *    To detect this issue, following equation can be used...
5015 *	  SIZE[3:0] + ADDR[2:0] = SUM[3:0].
5016 *	  If SUM[3:0] is in between 9 to c, we will have this issue.
5017 *
5018 *
5019 *    WORKAROUND:
5020 *	  Make sure we do not have ending address
5021 *	  as 1,2,3,4(Hang) or 9,a,b,c (DAC)
5022 *
5023 *************************************************************************/
5024 static u32
5025 em_fill_descriptors (bus_addr_t address, u32 length,
5026 		PDESC_ARRAY desc_array)
5027 {
5028 	u32 safe_terminator;
5029 
5030 	/* Since issue is sensitive to length and address.*/
5031 	/* Let us first check the address...*/
5032 	if (length <= 4) {
5033 		desc_array->descriptor[0].address = address;
5034 		desc_array->descriptor[0].length = length;
5035 		desc_array->elements = 1;
5036 		return (desc_array->elements);
5037 	}
5038 	safe_terminator = (u32)((((u32)address & 0x7) +
5039 	    (length & 0xF)) & 0xF);
5040 	/* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
5041 	if (safe_terminator == 0   ||
5042 	(safe_terminator > 4   &&
5043 	safe_terminator < 9)   ||
5044 	(safe_terminator > 0xC &&
5045 	safe_terminator <= 0xF)) {
5046 		desc_array->descriptor[0].address = address;
5047 		desc_array->descriptor[0].length = length;
5048 		desc_array->elements = 1;
5049 		return (desc_array->elements);
5050 	}
5051 
5052 	desc_array->descriptor[0].address = address;
5053 	desc_array->descriptor[0].length = length - 4;
5054 	desc_array->descriptor[1].address = address + (length - 4);
5055 	desc_array->descriptor[1].length = 4;
5056 	desc_array->elements = 2;
5057 	return (desc_array->elements);
5058 }
5059 
5060 /**********************************************************************
5061  *
5062  *  Update the board statistics counters.
5063  *
5064  **********************************************************************/
5065 static void
5066 em_update_stats_counters(struct adapter *adapter)
5067 {
5068 	struct ifnet   *ifp;
5069 
5070 	if(adapter->hw.phy.media_type == e1000_media_type_copper ||
5071 	   (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
5072 		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
5073 		adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
5074 	}
5075 	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
5076 	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
5077 	adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
5078 	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
5079 
5080 	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
5081 	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
5082 	adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
5083 	adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
5084 	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
5085 	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
5086 	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
5087 	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
5088 	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
5089 	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
5090 	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
5091 	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
5092 	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
5093 	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
5094 	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
5095 	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
5096 	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
5097 	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
5098 	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
5099 	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
5100 
5101 	/* For the 64-bit byte counters the low dword must be read first. */
5102 	/* Both registers clear on the read of the high dword */
5103 
5104 	adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH);
5105 	adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH);
5106 
5107 	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
5108 	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
5109 	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
5110 	adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
5111 	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
5112 
5113 	adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
5114 	adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
5115 
5116 	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
5117 	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
5118 	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
5119 	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
5120 	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
5121 	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
5122 	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
5123 	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
5124 	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
5125 	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
5126 
5127 	if (adapter->hw.mac.type >= e1000_82543) {
5128 		adapter->stats.algnerrc +=
5129 		E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
5130 		adapter->stats.rxerrc +=
5131 		E1000_READ_REG(&adapter->hw, E1000_RXERRC);
5132 		adapter->stats.tncrs +=
5133 		E1000_READ_REG(&adapter->hw, E1000_TNCRS);
5134 		adapter->stats.cexterr +=
5135 		E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
5136 		adapter->stats.tsctc +=
5137 		E1000_READ_REG(&adapter->hw, E1000_TSCTC);
5138 		adapter->stats.tsctfc +=
5139 		E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
5140 	}
5141 	ifp = adapter->ifp;
5142 
5143 	ifp->if_collisions = adapter->stats.colc;
5144 
5145 	/* Rx Errors */
5146 	ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
5147 	    adapter->stats.crcerrs + adapter->stats.algnerrc +
5148 	    adapter->stats.ruc + adapter->stats.roc +
5149 	    adapter->stats.mpc + adapter->stats.cexterr;
5150 
5151 	/* Tx Errors */
5152 	ifp->if_oerrors = adapter->stats.ecol +
5153 	    adapter->stats.latecol + adapter->watchdog_events;
5154 }
5155 
5156 
5157 /**********************************************************************
5158  *
5159  *  This routine is called only when em_display_debug_stats is enabled.
5160  *  This routine provides a way to take a look at important statistics
5161  *  maintained by the driver and hardware.
5162  *
5163  **********************************************************************/
5164 static void
5165 em_print_debug_info(struct adapter *adapter)
5166 {
5167 	device_t dev = adapter->dev;
5168 	u8 *hw_addr = adapter->hw.hw_addr;
5169 
5170 	device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
5171 	device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
5172 	    E1000_READ_REG(&adapter->hw, E1000_CTRL),
5173 	    E1000_READ_REG(&adapter->hw, E1000_RCTL));
5174 	device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
5175 	    ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\
5176 	    (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) );
5177 	device_printf(dev, "Flow control watermarks high = %d low = %d\n",
5178 	    adapter->hw.fc.high_water,
5179 	    adapter->hw.fc.low_water);
5180 	device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
5181 	    E1000_READ_REG(&adapter->hw, E1000_TIDV),
5182 	    E1000_READ_REG(&adapter->hw, E1000_TADV));
5183 	device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
5184 	    E1000_READ_REG(&adapter->hw, E1000_RDTR),
5185 	    E1000_READ_REG(&adapter->hw, E1000_RADV));
5186 	device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
5187 	    (long long)adapter->tx_fifo_wrk_cnt,
5188 	    (long long)adapter->tx_fifo_reset_cnt);
5189 	device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
5190 	    E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
5191 	    E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
5192 	device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
5193 	    E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
5194 	    E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
5195 	device_printf(dev, "Num Tx descriptors avail = %d\n",
5196 	    adapter->num_tx_desc_avail);
5197 	device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
5198 	    adapter->no_tx_desc_avail1);
5199 	device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
5200 	    adapter->no_tx_desc_avail2);
5201 	device_printf(dev, "Std mbuf failed = %ld\n",
5202 	    adapter->mbuf_alloc_failed);
5203 	device_printf(dev, "Std mbuf cluster failed = %ld\n",
5204 	    adapter->mbuf_cluster_failed);
5205 	device_printf(dev, "Driver dropped packets = %ld\n",
5206 	    adapter->dropped_pkts);
5207 	device_printf(dev, "Driver tx dma failure in encap = %ld\n",
5208 		adapter->no_tx_dma_setup);
5209 }
5210 
5211 static void
5212 em_print_hw_stats(struct adapter *adapter)
5213 {
5214 	device_t dev = adapter->dev;
5215 
5216 	device_printf(dev, "Excessive collisions = %lld\n",
5217 	    (long long)adapter->stats.ecol);
5218 #if	(DEBUG_HW > 0)  /* Dont output these errors normally */
5219 	device_printf(dev, "Symbol errors = %lld\n",
5220 	    (long long)adapter->stats.symerrs);
5221 #endif
5222 	device_printf(dev, "Sequence errors = %lld\n",
5223 	    (long long)adapter->stats.sec);
5224 	device_printf(dev, "Defer count = %lld\n",
5225 	    (long long)adapter->stats.dc);
5226 	device_printf(dev, "Missed Packets = %lld\n",
5227 	    (long long)adapter->stats.mpc);
5228 	device_printf(dev, "Receive No Buffers = %lld\n",
5229 	    (long long)adapter->stats.rnbc);
5230 	/* RLEC is inaccurate on some hardware, calculate our own. */
5231 	device_printf(dev, "Receive Length Errors = %lld\n",
5232 	    ((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
5233 	device_printf(dev, "Receive errors = %lld\n",
5234 	    (long long)adapter->stats.rxerrc);
5235 	device_printf(dev, "Crc errors = %lld\n",
5236 	    (long long)adapter->stats.crcerrs);
5237 	device_printf(dev, "Alignment errors = %lld\n",
5238 	    (long long)adapter->stats.algnerrc);
5239 	device_printf(dev, "Collision/Carrier extension errors = %lld\n",
5240 	    (long long)adapter->stats.cexterr);
5241 	device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
5242 	device_printf(dev, "watchdog timeouts = %ld\n",
5243 	    adapter->watchdog_events);
5244 	device_printf(dev, "RX MSIX IRQ = %ld TX MSIX IRQ = %ld"
5245 	    " LINK MSIX IRQ = %ld\n", adapter->rx_irq,
5246 	    adapter->tx_irq , adapter->link_irq);
5247 	device_printf(dev, "XON Rcvd = %lld\n",
5248 	    (long long)adapter->stats.xonrxc);
5249 	device_printf(dev, "XON Xmtd = %lld\n",
5250 	    (long long)adapter->stats.xontxc);
5251 	device_printf(dev, "XOFF Rcvd = %lld\n",
5252 	    (long long)adapter->stats.xoffrxc);
5253 	device_printf(dev, "XOFF Xmtd = %lld\n",
5254 	    (long long)adapter->stats.xofftxc);
5255 	device_printf(dev, "Good Packets Rcvd = %lld\n",
5256 	    (long long)adapter->stats.gprc);
5257 	device_printf(dev, "Good Packets Xmtd = %lld\n",
5258 	    (long long)adapter->stats.gptc);
5259 	device_printf(dev, "TSO Contexts Xmtd = %lld\n",
5260 	    (long long)adapter->stats.tsctc);
5261 	device_printf(dev, "TSO Contexts Failed = %lld\n",
5262 	    (long long)adapter->stats.tsctfc);
5263 }
5264 
5265 /**********************************************************************
5266  *
5267  *  This routine provides a way to dump out the adapter eeprom,
5268  *  often a useful debug/service tool. This only dumps the first
5269  *  32 words, stuff that matters is in that extent.
5270  *
5271  **********************************************************************/
5272 static void
5273 em_print_nvm_info(struct adapter *adapter)
5274 {
5275 	u16	eeprom_data;
5276 	int	i, j, row = 0;
5277 
5278 	/* Its a bit crude, but it gets the job done */
5279 	printf("\nInterface EEPROM Dump:\n");
5280 	printf("Offset\n0x0000  ");
5281 	for (i = 0, j = 0; i < 32; i++, j++) {
5282 		if (j == 8) { /* Make the offset block */
5283 			j = 0; ++row;
5284 			printf("\n0x00%x0  ",row);
5285 		}
5286 		e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
5287 		printf("%04x ", eeprom_data);
5288 	}
5289 	printf("\n");
5290 }
5291 
5292 static int
5293 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5294 {
5295 	struct adapter *adapter;
5296 	int error;
5297 	int result;
5298 
5299 	result = -1;
5300 	error = sysctl_handle_int(oidp, &result, 0, req);
5301 
5302 	if (error || !req->newptr)
5303 		return (error);
5304 
5305 	if (result == 1) {
5306 		adapter = (struct adapter *)arg1;
5307 		em_print_debug_info(adapter);
5308 	}
5309 	/*
5310 	 * This value will cause a hex dump of the
5311 	 * first 32 16-bit words of the EEPROM to
5312 	 * the screen.
5313 	 */
5314 	if (result == 2) {
5315 		adapter = (struct adapter *)arg1;
5316 		em_print_nvm_info(adapter);
5317         }
5318 
5319 	return (error);
5320 }
5321 
5322 
5323 static int
5324 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
5325 {
5326 	struct adapter *adapter;
5327 	int error;
5328 	int result;
5329 
5330 	result = -1;
5331 	error = sysctl_handle_int(oidp, &result, 0, req);
5332 
5333 	if (error || !req->newptr)
5334 		return (error);
5335 
5336 	if (result == 1) {
5337 		adapter = (struct adapter *)arg1;
5338 		em_print_hw_stats(adapter);
5339 	}
5340 
5341 	return (error);
5342 }
5343 
5344 static int
5345 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
5346 {
5347 	struct em_int_delay_info *info;
5348 	struct adapter *adapter;
5349 	u32 regval;
5350 	int error;
5351 	int usecs;
5352 	int ticks;
5353 
5354 	info = (struct em_int_delay_info *)arg1;
5355 	usecs = info->value;
5356 	error = sysctl_handle_int(oidp, &usecs, 0, req);
5357 	if (error != 0 || req->newptr == NULL)
5358 		return (error);
5359 	if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
5360 		return (EINVAL);
5361 	info->value = usecs;
5362 	ticks = EM_USECS_TO_TICKS(usecs);
5363 
5364 	adapter = info->adapter;
5365 
5366 	EM_CORE_LOCK(adapter);
5367 	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
5368 	regval = (regval & ~0xffff) | (ticks & 0xffff);
5369 	/* Handle a few special cases. */
5370 	switch (info->offset) {
5371 	case E1000_RDTR:
5372 		break;
5373 	case E1000_TIDV:
5374 		if (ticks == 0) {
5375 			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
5376 			/* Don't write 0 into the TIDV register. */
5377 			regval++;
5378 		} else
5379 			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
5380 		break;
5381 	}
5382 	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
5383 	EM_CORE_UNLOCK(adapter);
5384 	return (0);
5385 }
5386 
5387 static void
5388 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
5389 	const char *description, struct em_int_delay_info *info,
5390 	int offset, int value)
5391 {
5392 	info->adapter = adapter;
5393 	info->offset = offset;
5394 	info->value = value;
5395 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
5396 	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5397 	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
5398 	    info, 0, em_sysctl_int_delay, "I", description);
5399 }
5400 
5401 #ifndef EM_LEGACY_IRQ
5402 static void
5403 em_add_rx_process_limit(struct adapter *adapter, const char *name,
5404 	const char *description, int *limit, int value)
5405 {
5406 	*limit = value;
5407 	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
5408 	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5409 	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
5410 }
5411 #endif
5412 
5413 #ifdef EM_TIMESYNC
5414 /*
5415  * Initialize the Time Sync Feature
5416  */
5417 static int
5418 em_tsync_init(struct adapter *adapter)
5419 {
5420 	device_t	dev = adapter->dev;
5421 	u32		tx_ctl, rx_ctl;
5422 
5423 
5424 	E1000_WRITE_REG(&adapter->hw, E1000_TIMINCA, (1<<24) |
5425 	    20833/PICOSECS_PER_TICK);
5426 
5427 	adapter->last_stamp =  E1000_READ_REG(&adapter->hw, E1000_SYSTIML);
5428 	adapter->last_stamp |= (u64)E1000_READ_REG(&adapter->hw,
5429 	    E1000_SYSTIMH) << 32ULL;
5430 
5431 	/* Enable the TX side */
5432 	tx_ctl =  E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5433 	tx_ctl |= 0x10;
5434 	E1000_WRITE_REG(&adapter->hw, E1000_TSYNCTXCTL, tx_ctl);
5435 	E1000_WRITE_FLUSH(&adapter->hw);
5436 
5437 	tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5438 	if ((tx_ctl & 0x10) == 0) {
5439      		device_printf(dev, "Failed to enable TX timestamping\n");
5440 		return (ENXIO);
5441 	}
5442 
5443 	/* Enable RX */
5444 	rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5445 	rx_ctl |= 0x10; /* Enable the feature */
5446 	rx_ctl |= 0x0a; /* This value turns on Ver 1 and 2 */
5447 	E1000_WRITE_REG(&adapter->hw, E1000_TSYNCRXCTL, rx_ctl);
5448 
5449 	/*
5450 	 * Ethertype Stamping (Ethertype = 0x88F7)
5451 	 */
5452 	E1000_WRITE_REG(&adapter->hw, E1000_RXMTRL, htonl(0x440088f7));
5453 
5454 	/*
5455 	 * Source Port Queue Filter Setup:
5456 	 *  this is for UDP port filtering
5457 	 */
5458 	E1000_WRITE_REG(&adapter->hw, E1000_RXUDP, htons(TSYNC_PORT));
5459 	/* Protocol = UDP, enable Timestamp, and filter on source/protocol */
5460 
5461 	E1000_WRITE_FLUSH(&adapter->hw);
5462 
5463 	rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5464 	if ((rx_ctl & 0x10) == 0) {
5465      		device_printf(dev, "Failed to enable RX timestamping\n");
5466 		return (ENXIO);
5467 	}
5468 
5469 	device_printf(dev, "IEEE 1588 Precision Time Protocol enabled\n");
5470 
5471 	return (0);
5472 }
5473 
5474 /*
5475  * Disable the Time Sync Feature
5476  */
5477 static void
5478 em_tsync_disable(struct adapter *adapter)
5479 {
5480 	u32		tx_ctl, rx_ctl;
5481 
5482 	tx_ctl =  E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5483 	tx_ctl &= ~0x10;
5484 	E1000_WRITE_REG(&adapter->hw, E1000_TSYNCTXCTL, tx_ctl);
5485 	E1000_WRITE_FLUSH(&adapter->hw);
5486 
5487 	/* Invalidate TX Timestamp */
5488 	E1000_READ_REG(&adapter->hw, E1000_TXSTMPH);
5489 
5490 	tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5491 	if (tx_ctl & 0x10)
5492      		HW_DEBUGOUT("Failed to disable TX timestamping\n");
5493 
5494 	rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5495 	rx_ctl &= ~0x10;
5496 
5497 	E1000_WRITE_REG(&adapter->hw, E1000_TSYNCRXCTL, rx_ctl);
5498 	E1000_WRITE_FLUSH(&adapter->hw);
5499 
5500 	/* Invalidate RX Timestamp */
5501 	E1000_READ_REG(&adapter->hw, E1000_RXSATRH);
5502 
5503 	rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5504 	if (rx_ctl & 0x10)
5505 		HW_DEBUGOUT("Failed to disable RX timestamping\n");
5506 
5507 	return;
5508 }
5509 #endif /* EM_TIMESYNC */
5510