xref: /freebsd/sys/dev/e1000/if_em.c (revision 00a5db46de56179184c0f000eaacad695e2b0859)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2009, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
37 #include "opt_inet.h"
38 #endif
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/buf_ring.h>
43 #include <sys/bus.h>
44 #include <sys/endian.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/module.h>
50 #include <sys/rman.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/sysctl.h>
54 #include <sys/taskqueue.h>
55 #include <sys/eventhandler.h>
56 #ifdef EM_TIMESYNC
57 #include <sys/ioccom.h>
58 #include <sys/time.h>
59 #endif
60 #include <machine/bus.h>
61 #include <machine/resource.h>
62 
63 #include <net/bpf.h>
64 #include <net/ethernet.h>
65 #include <net/if.h>
66 #include <net/if_arp.h>
67 #include <net/if_dl.h>
68 #include <net/if_media.h>
69 
70 #include <net/if_types.h>
71 #include <net/if_vlan_var.h>
72 
73 #include <netinet/in_systm.h>
74 #include <netinet/in.h>
75 #include <netinet/if_ether.h>
76 #include <netinet/ip.h>
77 #include <netinet/ip6.h>
78 #include <netinet/tcp.h>
79 #include <netinet/udp.h>
80 
81 #include <machine/in_cksum.h>
82 #include <dev/pci/pcivar.h>
83 #include <dev/pci/pcireg.h>
84 
85 #include "e1000_api.h"
86 #include "e1000_82571.h"
87 #include "if_em.h"
88 
89 /*********************************************************************
90  *  Set this to one to display debug statistics
91  *********************************************************************/
92 int	em_display_debug_stats = 0;
93 
94 /*********************************************************************
95  *  Driver version:
96  *********************************************************************/
97 char em_driver_version[] = "6.9.9";
98 
99 
100 /*********************************************************************
101  *  PCI Device ID Table
102  *
103  *  Used by probe to select devices to load on
104  *  Last field stores an index into e1000_strings
105  *  Last entry must be all 0s
106  *
107  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
108  *********************************************************************/
109 
110 static em_vendor_info_t em_vendor_info_array[] =
111 {
112 	/* Intel(R) PRO/1000 Network Connection */
113 	{ 0x8086, E1000_DEV_ID_82540EM,		PCI_ANY_ID, PCI_ANY_ID, 0},
114 	{ 0x8086, E1000_DEV_ID_82540EM_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
115 	{ 0x8086, E1000_DEV_ID_82540EP,		PCI_ANY_ID, PCI_ANY_ID, 0},
116 	{ 0x8086, E1000_DEV_ID_82540EP_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
117 	{ 0x8086, E1000_DEV_ID_82540EP_LP,	PCI_ANY_ID, PCI_ANY_ID, 0},
118 
119 	{ 0x8086, E1000_DEV_ID_82541EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
120 	{ 0x8086, E1000_DEV_ID_82541ER,		PCI_ANY_ID, PCI_ANY_ID, 0},
121 	{ 0x8086, E1000_DEV_ID_82541ER_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
122 	{ 0x8086, E1000_DEV_ID_82541EI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
123 	{ 0x8086, E1000_DEV_ID_82541GI,		PCI_ANY_ID, PCI_ANY_ID, 0},
124 	{ 0x8086, E1000_DEV_ID_82541GI_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
125 	{ 0x8086, E1000_DEV_ID_82541GI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
126 
127 	{ 0x8086, E1000_DEV_ID_82542,		PCI_ANY_ID, PCI_ANY_ID, 0},
128 
129 	{ 0x8086, E1000_DEV_ID_82543GC_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
130 	{ 0x8086, E1000_DEV_ID_82543GC_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
131 
132 	{ 0x8086, E1000_DEV_ID_82544EI_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
133 	{ 0x8086, E1000_DEV_ID_82544EI_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
134 	{ 0x8086, E1000_DEV_ID_82544GC_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
135 	{ 0x8086, E1000_DEV_ID_82544GC_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
136 
137 	{ 0x8086, E1000_DEV_ID_82545EM_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
138 	{ 0x8086, E1000_DEV_ID_82545EM_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
139 	{ 0x8086, E1000_DEV_ID_82545GM_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
140 	{ 0x8086, E1000_DEV_ID_82545GM_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
141 	{ 0x8086, E1000_DEV_ID_82545GM_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
142 
143 	{ 0x8086, E1000_DEV_ID_82546EB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
144 	{ 0x8086, E1000_DEV_ID_82546EB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
145 	{ 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
146 	{ 0x8086, E1000_DEV_ID_82546GB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
147 	{ 0x8086, E1000_DEV_ID_82546GB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
148 	{ 0x8086, E1000_DEV_ID_82546GB_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
149 	{ 0x8086, E1000_DEV_ID_82546GB_PCIE,	PCI_ANY_ID, PCI_ANY_ID, 0},
150 	{ 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
151 	{ 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
152 						PCI_ANY_ID, PCI_ANY_ID, 0},
153 
154 	{ 0x8086, E1000_DEV_ID_82547EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
155 	{ 0x8086, E1000_DEV_ID_82547EI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
156 	{ 0x8086, E1000_DEV_ID_82547GI,		PCI_ANY_ID, PCI_ANY_ID, 0},
157 
158 	{ 0x8086, E1000_DEV_ID_82571EB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
159 	{ 0x8086, E1000_DEV_ID_82571EB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
160 	{ 0x8086, E1000_DEV_ID_82571EB_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
161 	{ 0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL,
162 						PCI_ANY_ID, PCI_ANY_ID, 0},
163 	{ 0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD,
164 						PCI_ANY_ID, PCI_ANY_ID, 0},
165 	{ 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
166 						PCI_ANY_ID, PCI_ANY_ID, 0},
167 	{ 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP,
168 						PCI_ANY_ID, PCI_ANY_ID, 0},
169 	{ 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER,
170 						PCI_ANY_ID, PCI_ANY_ID, 0},
171 	{ 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER,
172 						PCI_ANY_ID, PCI_ANY_ID, 0},
173 	{ 0x8086, E1000_DEV_ID_82572EI_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
174 	{ 0x8086, E1000_DEV_ID_82572EI_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
175 	{ 0x8086, E1000_DEV_ID_82572EI_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
176 	{ 0x8086, E1000_DEV_ID_82572EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
177 
178 	{ 0x8086, E1000_DEV_ID_82573E,		PCI_ANY_ID, PCI_ANY_ID, 0},
179 	{ 0x8086, E1000_DEV_ID_82573E_IAMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
180 	{ 0x8086, E1000_DEV_ID_82573L,		PCI_ANY_ID, PCI_ANY_ID, 0},
181 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
182 						PCI_ANY_ID, PCI_ANY_ID, 0},
183 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
184 						PCI_ANY_ID, PCI_ANY_ID, 0},
185 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
186 						PCI_ANY_ID, PCI_ANY_ID, 0},
187 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
188 						PCI_ANY_ID, PCI_ANY_ID, 0},
189 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
190 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
191 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_C,	PCI_ANY_ID, PCI_ANY_ID, 0},
192 	{ 0x8086, E1000_DEV_ID_ICH8_IFE,	PCI_ANY_ID, PCI_ANY_ID, 0},
193 	{ 0x8086, E1000_DEV_ID_ICH8_IFE_GT,	PCI_ANY_ID, PCI_ANY_ID, 0},
194 	{ 0x8086, E1000_DEV_ID_ICH8_IFE_G,	PCI_ANY_ID, PCI_ANY_ID, 0},
195 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_M,	PCI_ANY_ID, PCI_ANY_ID, 0},
196 
197 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
198 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
199 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_C,	PCI_ANY_ID, PCI_ANY_ID, 0},
200 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_M,	PCI_ANY_ID, PCI_ANY_ID, 0},
201 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_M_V,	PCI_ANY_ID, PCI_ANY_ID, 0},
202 	{ 0x8086, E1000_DEV_ID_ICH9_IFE,	PCI_ANY_ID, PCI_ANY_ID, 0},
203 	{ 0x8086, E1000_DEV_ID_ICH9_IFE_GT,	PCI_ANY_ID, PCI_ANY_ID, 0},
204 	{ 0x8086, E1000_DEV_ID_ICH9_IFE_G,	PCI_ANY_ID, PCI_ANY_ID, 0},
205 	{ 0x8086, E1000_DEV_ID_ICH9_BM,		PCI_ANY_ID, PCI_ANY_ID, 0},
206 	{ 0x8086, E1000_DEV_ID_82574L,		PCI_ANY_ID, PCI_ANY_ID, 0},
207 	{ 0x8086, E1000_DEV_ID_ICH10_R_BM_LM,	PCI_ANY_ID, PCI_ANY_ID, 0},
208 	{ 0x8086, E1000_DEV_ID_ICH10_R_BM_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
209 	{ 0x8086, E1000_DEV_ID_ICH10_R_BM_V,	PCI_ANY_ID, PCI_ANY_ID, 0},
210 	{ 0x8086, E1000_DEV_ID_ICH10_D_BM_LM,	PCI_ANY_ID, PCI_ANY_ID, 0},
211 	{ 0x8086, E1000_DEV_ID_ICH10_D_BM_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
212 	/* required last entry */
213 	{ 0, 0, 0, 0, 0}
214 };
215 
216 /*********************************************************************
217  *  Table of branding strings for all supported NICs.
218  *********************************************************************/
219 
220 static char *em_strings[] = {
221 	"Intel(R) PRO/1000 Network Connection"
222 };
223 
224 /*********************************************************************
225  *  Function prototypes
226  *********************************************************************/
227 static int	em_probe(device_t);
228 static int	em_attach(device_t);
229 static int	em_detach(device_t);
230 static int	em_shutdown(device_t);
231 static int	em_suspend(device_t);
232 static int	em_resume(device_t);
233 static void	em_start(struct ifnet *);
234 static void	em_start_locked(struct ifnet *ifp);
235 static int	em_ioctl(struct ifnet *, u_long, caddr_t);
236 static void	em_watchdog(struct adapter *);
237 static void	em_init(void *);
238 static void	em_init_locked(struct adapter *);
239 static void	em_stop(void *);
240 static void	em_media_status(struct ifnet *, struct ifmediareq *);
241 static int	em_media_change(struct ifnet *);
242 static void	em_identify_hardware(struct adapter *);
243 static int	em_allocate_pci_resources(struct adapter *);
244 static int	em_allocate_legacy(struct adapter *adapter);
245 static int	em_allocate_msix(struct adapter *adapter);
246 static int	em_setup_msix(struct adapter *);
247 static void	em_free_pci_resources(struct adapter *);
248 static void	em_local_timer(void *);
249 static int	em_hardware_init(struct adapter *);
250 static void	em_setup_interface(device_t, struct adapter *);
251 static void	em_setup_transmit_structures(struct adapter *);
252 static void	em_initialize_transmit_unit(struct adapter *);
253 static int	em_setup_receive_structures(struct adapter *);
254 static void	em_initialize_receive_unit(struct adapter *);
255 static void	em_enable_intr(struct adapter *);
256 static void	em_disable_intr(struct adapter *);
257 static void	em_free_transmit_structures(struct adapter *);
258 static void	em_free_receive_structures(struct adapter *);
259 static void	em_update_stats_counters(struct adapter *);
260 static void	em_txeof(struct adapter *);
261 static void	em_tx_purge(struct adapter *);
262 static int	em_allocate_receive_structures(struct adapter *);
263 static int	em_allocate_transmit_structures(struct adapter *);
264 static int	em_rxeof(struct adapter *, int);
265 #ifndef __NO_STRICT_ALIGNMENT
266 static int	em_fixup_rx(struct adapter *);
267 #endif
268 static void	em_receive_checksum(struct adapter *, struct e1000_rx_desc *,
269 		    struct mbuf *);
270 static void	em_transmit_checksum_setup(struct adapter *, struct mbuf *,
271 		    u32 *, u32 *);
272 #if __FreeBSD_version >= 700000
273 static bool	em_tso_setup(struct adapter *, struct mbuf *,
274 		    u32 *, u32 *);
275 #endif /* FreeBSD_version >= 700000 */
276 static void	em_set_promisc(struct adapter *);
277 static void	em_disable_promisc(struct adapter *);
278 static void	em_set_multi(struct adapter *);
279 static void	em_print_hw_stats(struct adapter *);
280 static void	em_update_link_status(struct adapter *);
281 static int	em_get_buf(struct adapter *, int);
282 static void	em_register_vlan(void *, struct ifnet *, u16);
283 static void	em_unregister_vlan(void *, struct ifnet *, u16);
284 static int	em_xmit(struct adapter *, struct mbuf **);
285 static void	em_smartspeed(struct adapter *);
286 static int	em_82547_fifo_workaround(struct adapter *, int);
287 static void	em_82547_update_fifo_head(struct adapter *, int);
288 static int	em_82547_tx_fifo_reset(struct adapter *);
289 static void	em_82547_move_tail(void *);
290 static int	em_dma_malloc(struct adapter *, bus_size_t,
291 		    struct em_dma_alloc *, int);
292 static void	em_dma_free(struct adapter *, struct em_dma_alloc *);
293 static void	em_print_debug_info(struct adapter *);
294 static void	em_print_nvm_info(struct adapter *);
295 static int 	em_is_valid_ether_addr(u8 *);
296 static int	em_sysctl_stats(SYSCTL_HANDLER_ARGS);
297 static int	em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
298 static u32	em_fill_descriptors (bus_addr_t address, u32 length,
299 		    PDESC_ARRAY desc_array);
300 static int	em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
301 static void	em_add_int_delay_sysctl(struct adapter *, const char *,
302 		    const char *, struct em_int_delay_info *, int, int);
303 /* Management and WOL Support */
304 static void	em_init_manageability(struct adapter *);
305 static void	em_release_manageability(struct adapter *);
306 static void     em_get_hw_control(struct adapter *);
307 static void     em_release_hw_control(struct adapter *);
308 static void     em_enable_wakeup(device_t);
309 
310 #ifdef EM_TIMESYNC
311 /* Precision Time sync support */
312 static int	em_tsync_init(struct adapter *);
313 static void	em_tsync_disable(struct adapter *);
314 #endif
315 
316 #ifdef EM_LEGACY_IRQ
317 static void	em_intr(void *);
318 #else /* FAST IRQ */
319 #if __FreeBSD_version < 700000
320 static void	em_irq_fast(void *);
321 #else
322 static int	em_irq_fast(void *);
323 #endif
324 
325 /* MSIX handlers */
326 static void	em_msix_tx(void *);
327 static void	em_msix_rx(void *);
328 static void	em_msix_link(void *);
329 static void	em_handle_rx(void *context, int pending);
330 static void	em_handle_tx(void *context, int pending);
331 
332 static void	em_handle_rxtx(void *context, int pending);
333 static void	em_handle_link(void *context, int pending);
334 static void	em_add_rx_process_limit(struct adapter *, const char *,
335 		    const char *, int *, int);
336 #endif /* ~EM_LEGACY_IRQ */
337 
338 #ifdef DEVICE_POLLING
339 static poll_handler_t em_poll;
340 #endif /* POLLING */
341 
342 /*********************************************************************
343  *  FreeBSD Device Interface Entry Points
344  *********************************************************************/
345 
346 static device_method_t em_methods[] = {
347 	/* Device interface */
348 	DEVMETHOD(device_probe, em_probe),
349 	DEVMETHOD(device_attach, em_attach),
350 	DEVMETHOD(device_detach, em_detach),
351 	DEVMETHOD(device_shutdown, em_shutdown),
352 	DEVMETHOD(device_suspend, em_suspend),
353 	DEVMETHOD(device_resume, em_resume),
354 	{0, 0}
355 };
356 
357 static driver_t em_driver = {
358 	"em", em_methods, sizeof(struct adapter),
359 };
360 
361 static devclass_t em_devclass;
362 DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
363 MODULE_DEPEND(em, pci, 1, 1, 1);
364 MODULE_DEPEND(em, ether, 1, 1, 1);
365 
366 /*********************************************************************
367  *  Tunable default values.
368  *********************************************************************/
369 
370 #define EM_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
371 #define EM_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
372 #define M_TSO_LEN			66
373 
374 /* Allow common code without TSO */
375 #ifndef CSUM_TSO
376 #define CSUM_TSO	0
377 #endif
378 
379 static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
380 static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
381 static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
382 static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
383 static int em_rxd = EM_DEFAULT_RXD;
384 static int em_txd = EM_DEFAULT_TXD;
385 static int em_smart_pwr_down = FALSE;
386 /* Controls whether promiscuous also shows bad packets */
387 static int em_debug_sbp = FALSE;
388 /* Local switch for MSI/MSIX */
389 static int em_enable_msi = TRUE;
390 
391 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
392 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
393 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
394 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
395 TUNABLE_INT("hw.em.rxd", &em_rxd);
396 TUNABLE_INT("hw.em.txd", &em_txd);
397 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
398 TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
399 TUNABLE_INT("hw.em.enable_msi", &em_enable_msi);
400 
401 #ifndef EM_LEGACY_IRQ
402 /* How many packets rxeof tries to clean at a time */
403 static int em_rx_process_limit = 100;
404 TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
405 #endif
406 
407 /* Global used in WOL setup with multiport cards */
408 static int global_quad_port_a = 0;
409 
410 /*********************************************************************
411  *  Device identification routine
412  *
413  *  em_probe determines if the driver should be loaded on
414  *  adapter based on PCI vendor/device id of the adapter.
415  *
416  *  return BUS_PROBE_DEFAULT on success, positive on failure
417  *********************************************************************/
418 
419 static int
420 em_probe(device_t dev)
421 {
422 	char		adapter_name[60];
423 	u16		pci_vendor_id = 0;
424 	u16		pci_device_id = 0;
425 	u16		pci_subvendor_id = 0;
426 	u16		pci_subdevice_id = 0;
427 	em_vendor_info_t *ent;
428 
429 	INIT_DEBUGOUT("em_probe: begin");
430 
431 	pci_vendor_id = pci_get_vendor(dev);
432 	if (pci_vendor_id != EM_VENDOR_ID)
433 		return (ENXIO);
434 
435 	pci_device_id = pci_get_device(dev);
436 	pci_subvendor_id = pci_get_subvendor(dev);
437 	pci_subdevice_id = pci_get_subdevice(dev);
438 
439 	ent = em_vendor_info_array;
440 	while (ent->vendor_id != 0) {
441 		if ((pci_vendor_id == ent->vendor_id) &&
442 		    (pci_device_id == ent->device_id) &&
443 
444 		    ((pci_subvendor_id == ent->subvendor_id) ||
445 		    (ent->subvendor_id == PCI_ANY_ID)) &&
446 
447 		    ((pci_subdevice_id == ent->subdevice_id) ||
448 		    (ent->subdevice_id == PCI_ANY_ID))) {
449 			sprintf(adapter_name, "%s %s",
450 				em_strings[ent->index],
451 				em_driver_version);
452 			device_set_desc_copy(dev, adapter_name);
453 			return (BUS_PROBE_DEFAULT);
454 		}
455 		ent++;
456 	}
457 
458 	return (ENXIO);
459 }
460 
461 /*********************************************************************
462  *  Device initialization routine
463  *
464  *  The attach entry point is called when the driver is being loaded.
465  *  This routine identifies the type of hardware, allocates all resources
466  *  and initializes the hardware.
467  *
468  *  return 0 on success, positive on failure
469  *********************************************************************/
470 
471 static int
472 em_attach(device_t dev)
473 {
474 	struct adapter	*adapter;
475 	int		tsize, rsize;
476 	int		error = 0;
477 	u16		eeprom_data, device_id;
478 
479 	INIT_DEBUGOUT("em_attach: begin");
480 
481 	adapter = device_get_softc(dev);
482 	adapter->dev = adapter->osdep.dev = dev;
483 	EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
484 	EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
485 	EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
486 
487 	/* SYSCTL stuff */
488 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
489 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
490 	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
491 	    em_sysctl_debug_info, "I", "Debug Information");
492 
493 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
494 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
495 	    OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
496 	    em_sysctl_stats, "I", "Statistics");
497 
498 	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
499 	callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
500 
501 	/* Determine hardware and mac info */
502 	em_identify_hardware(adapter);
503 
504 	/* Setup PCI resources */
505 	if (em_allocate_pci_resources(adapter)) {
506 		device_printf(dev, "Allocation of PCI resources failed\n");
507 		error = ENXIO;
508 		goto err_pci;
509 	}
510 
511 	/*
512 	** For ICH8 and family we need to
513 	** map the flash memory, and this
514 	** must happen after the MAC is
515 	** identified
516 	*/
517 	if ((adapter->hw.mac.type == e1000_ich8lan) ||
518 	    (adapter->hw.mac.type == e1000_ich9lan) ||
519 	    (adapter->hw.mac.type == e1000_ich10lan)) {
520 		int rid = EM_BAR_TYPE_FLASH;
521 		adapter->flash = bus_alloc_resource_any(dev,
522 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
523 		if (adapter->flash == NULL) {
524 			device_printf(dev, "Mapping of Flash failed\n");
525 			error = ENXIO;
526 			goto err_pci;
527 		}
528 		/* This is used in the shared code */
529 		adapter->hw.flash_address = (u8 *)adapter->flash;
530 		adapter->osdep.flash_bus_space_tag =
531 		    rman_get_bustag(adapter->flash);
532 		adapter->osdep.flash_bus_space_handle =
533 		    rman_get_bushandle(adapter->flash);
534 	}
535 
536 	/* Do Shared Code initialization */
537 	if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
538 		device_printf(dev, "Setup of Shared code failed\n");
539 		error = ENXIO;
540 		goto err_pci;
541 	}
542 
543 	e1000_get_bus_info(&adapter->hw);
544 
545 	/* Set up some sysctls for the tunable interrupt delays */
546 	em_add_int_delay_sysctl(adapter, "rx_int_delay",
547 	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
548 	    E1000_REGISTER(&adapter->hw, E1000_RDTR), em_rx_int_delay_dflt);
549 	em_add_int_delay_sysctl(adapter, "tx_int_delay",
550 	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
551 	    E1000_REGISTER(&adapter->hw, E1000_TIDV), em_tx_int_delay_dflt);
552 	if (adapter->hw.mac.type >= e1000_82540) {
553 		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
554 		    "receive interrupt delay limit in usecs",
555 		    &adapter->rx_abs_int_delay,
556 		    E1000_REGISTER(&adapter->hw, E1000_RADV),
557 		    em_rx_abs_int_delay_dflt);
558 		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
559 		    "transmit interrupt delay limit in usecs",
560 		    &adapter->tx_abs_int_delay,
561 		    E1000_REGISTER(&adapter->hw, E1000_TADV),
562 		    em_tx_abs_int_delay_dflt);
563 	}
564 
565 #ifndef EM_LEGACY_IRQ
566 	/* Sysctls for limiting the amount of work done in the taskqueue */
567 	em_add_rx_process_limit(adapter, "rx_processing_limit",
568 	    "max number of rx packets to process", &adapter->rx_process_limit,
569 	    em_rx_process_limit);
570 #endif
571 
572 	/*
573 	 * Validate number of transmit and receive descriptors. It
574 	 * must not exceed hardware maximum, and must be multiple
575 	 * of E1000_DBA_ALIGN.
576 	 */
577 	if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
578 	    (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
579 	    (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
580 	    (em_txd < EM_MIN_TXD)) {
581 		device_printf(dev, "Using %d TX descriptors instead of %d!\n",
582 		    EM_DEFAULT_TXD, em_txd);
583 		adapter->num_tx_desc = EM_DEFAULT_TXD;
584 	} else
585 		adapter->num_tx_desc = em_txd;
586 	if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
587 	    (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
588 	    (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
589 	    (em_rxd < EM_MIN_RXD)) {
590 		device_printf(dev, "Using %d RX descriptors instead of %d!\n",
591 		    EM_DEFAULT_RXD, em_rxd);
592 		adapter->num_rx_desc = EM_DEFAULT_RXD;
593 	} else
594 		adapter->num_rx_desc = em_rxd;
595 
596 	adapter->hw.mac.autoneg = DO_AUTO_NEG;
597 	adapter->hw.phy.autoneg_wait_to_complete = FALSE;
598 	adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
599 	adapter->rx_buffer_len = 2048;
600 
601 	e1000_init_script_state_82541(&adapter->hw, TRUE);
602 	e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
603 
604 	/* Copper options */
605 	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
606 		adapter->hw.phy.mdix = AUTO_ALL_MODES;
607 		adapter->hw.phy.disable_polarity_correction = FALSE;
608 		adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
609 	}
610 
611 	/*
612 	 * Set the frame limits assuming
613 	 * standard ethernet sized frames.
614 	 */
615 	adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
616 	adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
617 
618 	/*
619 	 * This controls when hardware reports transmit completion
620 	 * status.
621 	 */
622 	adapter->hw.mac.report_tx_early = 1;
623 
624 	tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
625 	    EM_DBA_ALIGN);
626 
627 	/* Allocate Transmit Descriptor ring */
628 	if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
629 		device_printf(dev, "Unable to allocate tx_desc memory\n");
630 		error = ENOMEM;
631 		goto err_tx_desc;
632 	}
633 	adapter->tx_desc_base =
634 	    (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
635 
636 	rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
637 	    EM_DBA_ALIGN);
638 
639 	/* Allocate Receive Descriptor ring */
640 	if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
641 		device_printf(dev, "Unable to allocate rx_desc memory\n");
642 		error = ENOMEM;
643 		goto err_rx_desc;
644 	}
645 	adapter->rx_desc_base =
646 	    (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
647 
648 	/*
649 	** Start from a known state, this is
650 	** important in reading the nvm and
651 	** mac from that.
652 	*/
653 	e1000_reset_hw(&adapter->hw);
654 
655 	/* Make sure we have a good EEPROM before we read from it */
656 	if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
657 		/*
658 		** Some PCI-E parts fail the first check due to
659 		** the link being in sleep state, call it again,
660 		** if it fails a second time its a real issue.
661 		*/
662 		if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
663 			device_printf(dev,
664 			    "The EEPROM Checksum Is Not Valid\n");
665 			error = EIO;
666 			goto err_hw_init;
667 		}
668 	}
669 
670 	/* Copy the permanent MAC address out of the EEPROM */
671 	if (e1000_read_mac_addr(&adapter->hw) < 0) {
672 		device_printf(dev, "EEPROM read error while reading MAC"
673 		    " address\n");
674 		error = EIO;
675 		goto err_hw_init;
676 	}
677 
678 	if (!em_is_valid_ether_addr(adapter->hw.mac.addr)) {
679 		device_printf(dev, "Invalid MAC address\n");
680 		error = EIO;
681 		goto err_hw_init;
682 	}
683 
684 	/* Initialize the hardware */
685 	if (em_hardware_init(adapter)) {
686 		device_printf(dev, "Unable to initialize the hardware\n");
687 		error = EIO;
688 		goto err_hw_init;
689 	}
690 
691 	/* Allocate transmit descriptors and buffers */
692 	if (em_allocate_transmit_structures(adapter)) {
693 		device_printf(dev, "Could not setup transmit structures\n");
694 		error = ENOMEM;
695 		goto err_tx_struct;
696 	}
697 
698 	/* Allocate receive descriptors and buffers */
699 	if (em_allocate_receive_structures(adapter)) {
700 		device_printf(dev, "Could not setup receive structures\n");
701 		error = ENOMEM;
702 		goto err_rx_struct;
703 	}
704 
705 	/*
706 	**  Do interrupt configuration
707 	*/
708 	if (adapter->msi > 1) /* Do MSI/X */
709 		error = em_allocate_msix(adapter);
710 	else  /* MSI or Legacy */
711 		error = em_allocate_legacy(adapter);
712 	if (error)
713 		goto err_rx_struct;
714 
715 	/* Setup OS specific network interface */
716 	em_setup_interface(dev, adapter);
717 
718 	/* Initialize statistics */
719 	em_update_stats_counters(adapter);
720 
721 	adapter->hw.mac.get_link_status = 1;
722 	em_update_link_status(adapter);
723 
724 	/* Indicate SOL/IDER usage */
725 	if (e1000_check_reset_block(&adapter->hw))
726 		device_printf(dev,
727 		    "PHY reset is blocked due to SOL/IDER session.\n");
728 
729 	/* Determine if we have to control management hardware */
730 	adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
731 
732 	/*
733 	 * Setup Wake-on-Lan
734 	 */
735 	switch (adapter->hw.mac.type) {
736 
737 	case e1000_82542:
738 	case e1000_82543:
739 		break;
740 	case e1000_82546:
741 	case e1000_82546_rev_3:
742 	case e1000_82571:
743 	case e1000_80003es2lan:
744 		if (adapter->hw.bus.func == 1)
745 			e1000_read_nvm(&adapter->hw,
746 			    NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
747 		else
748 			e1000_read_nvm(&adapter->hw,
749 			    NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
750 		eeprom_data &= EM_EEPROM_APME;
751 		break;
752 	default:
753 		/* APME bit in EEPROM is mapped to WUC.APME */
754 		eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) &
755 		    E1000_WUC_APME;
756 		break;
757 	}
758 	if (eeprom_data)
759 		adapter->wol = E1000_WUFC_MAG;
760 	/*
761          * We have the eeprom settings, now apply the special cases
762          * where the eeprom may be wrong or the board won't support
763          * wake on lan on a particular port
764 	 */
765 	device_id = pci_get_device(dev);
766         switch (device_id) {
767 	case E1000_DEV_ID_82546GB_PCIE:
768 		adapter->wol = 0;
769 		break;
770 	case E1000_DEV_ID_82546EB_FIBER:
771 	case E1000_DEV_ID_82546GB_FIBER:
772 	case E1000_DEV_ID_82571EB_FIBER:
773 		/* Wake events only supported on port A for dual fiber
774 		 * regardless of eeprom setting */
775 		if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
776 		    E1000_STATUS_FUNC_1)
777 			adapter->wol = 0;
778 		break;
779 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
780 	case E1000_DEV_ID_82571EB_QUAD_COPPER:
781 	case E1000_DEV_ID_82571EB_QUAD_FIBER:
782 	case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
783                 /* if quad port adapter, disable WoL on all but port A */
784 		if (global_quad_port_a != 0)
785 			adapter->wol = 0;
786 		/* Reset for multiple quad port adapters */
787 		if (++global_quad_port_a == 4)
788 			global_quad_port_a = 0;
789                 break;
790 	}
791 
792 	/* Do we need workaround for 82544 PCI-X adapter? */
793 	if (adapter->hw.bus.type == e1000_bus_type_pcix &&
794 	    adapter->hw.mac.type == e1000_82544)
795 		adapter->pcix_82544 = TRUE;
796 	else
797 		adapter->pcix_82544 = FALSE;
798 
799 	/* Register for VLAN events */
800 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
801 	    em_register_vlan, 0, EVENTHANDLER_PRI_FIRST);
802 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
803 	    em_unregister_vlan, 0, EVENTHANDLER_PRI_FIRST);
804 
805 	/* Tell the stack that the interface is not active */
806 	adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
807 
808 	INIT_DEBUGOUT("em_attach: end");
809 
810 	return (0);
811 
812 err_rx_struct:
813 	em_free_transmit_structures(adapter);
814 err_tx_struct:
815 err_hw_init:
816 	em_release_hw_control(adapter);
817 	em_dma_free(adapter, &adapter->rxdma);
818 err_rx_desc:
819 	em_dma_free(adapter, &adapter->txdma);
820 err_tx_desc:
821 err_pci:
822 	em_free_pci_resources(adapter);
823 	EM_TX_LOCK_DESTROY(adapter);
824 	EM_RX_LOCK_DESTROY(adapter);
825 	EM_CORE_LOCK_DESTROY(adapter);
826 
827 	return (error);
828 }
829 
830 /*********************************************************************
831  *  Device removal routine
832  *
833  *  The detach entry point is called when the driver is being removed.
834  *  This routine stops the adapter and deallocates all the resources
835  *  that were allocated for driver operation.
836  *
837  *  return 0 on success, positive on failure
838  *********************************************************************/
839 
840 static int
841 em_detach(device_t dev)
842 {
843 	struct adapter	*adapter = device_get_softc(dev);
844 	struct ifnet	*ifp = adapter->ifp;
845 
846 	INIT_DEBUGOUT("em_detach: begin");
847 
848 	/* Make sure VLANS are not using driver */
849 #if __FreeBSD_version >= 700000
850 	if (adapter->ifp->if_vlantrunk != NULL) {
851 #else
852 	if (adapter->ifp->if_nvlans != 0) {
853 #endif
854 		device_printf(dev,"Vlan in use, detach first\n");
855 		return (EBUSY);
856 	}
857 
858 #ifdef DEVICE_POLLING
859 	if (ifp->if_capenable & IFCAP_POLLING)
860 		ether_poll_deregister(ifp);
861 #endif
862 
863 	EM_CORE_LOCK(adapter);
864 	EM_TX_LOCK(adapter);
865 	adapter->in_detach = 1;
866 	em_stop(adapter);
867 	e1000_phy_hw_reset(&adapter->hw);
868 
869 	em_release_manageability(adapter);
870 
871 	if (((adapter->hw.mac.type == e1000_82573) ||
872 	    (adapter->hw.mac.type == e1000_ich8lan) ||
873 	    (adapter->hw.mac.type == e1000_ich10lan) ||
874 	    (adapter->hw.mac.type == e1000_ich9lan)) &&
875 	    e1000_check_mng_mode(&adapter->hw))
876 		em_release_hw_control(adapter);
877 
878 	if (adapter->wol) {
879 		E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
880 		E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
881 		em_enable_wakeup(dev);
882 	}
883 
884 	EM_TX_UNLOCK(adapter);
885 	EM_CORE_UNLOCK(adapter);
886 
887 	/* Unregister VLAN events */
888 	if (adapter->vlan_attach != NULL)
889 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
890 	if (adapter->vlan_detach != NULL)
891 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
892 
893 	ether_ifdetach(adapter->ifp);
894 	callout_drain(&adapter->timer);
895 	callout_drain(&adapter->tx_fifo_timer);
896 
897 	em_free_pci_resources(adapter);
898 	bus_generic_detach(dev);
899 	if_free(ifp);
900 
901 	drbr_free(adapter->br, M_DEVBUF);
902 	em_free_transmit_structures(adapter);
903 	em_free_receive_structures(adapter);
904 
905 	/* Free Transmit Descriptor ring */
906 	if (adapter->tx_desc_base) {
907 		em_dma_free(adapter, &adapter->txdma);
908 		adapter->tx_desc_base = NULL;
909 	}
910 
911 	/* Free Receive Descriptor ring */
912 	if (adapter->rx_desc_base) {
913 		em_dma_free(adapter, &adapter->rxdma);
914 		adapter->rx_desc_base = NULL;
915 	}
916 
917 	EM_TX_LOCK_DESTROY(adapter);
918 	EM_RX_LOCK_DESTROY(adapter);
919 	EM_CORE_LOCK_DESTROY(adapter);
920 
921 	return (0);
922 }
923 
924 /*********************************************************************
925  *
926  *  Shutdown entry point
927  *
928  **********************************************************************/
929 
930 static int
931 em_shutdown(device_t dev)
932 {
933 	return em_suspend(dev);
934 }
935 
936 /*
937  * Suspend/resume device methods.
938  */
939 static int
940 em_suspend(device_t dev)
941 {
942 	struct adapter *adapter = device_get_softc(dev);
943 
944 	EM_CORE_LOCK(adapter);
945 
946 	EM_TX_LOCK(adapter);
947 	em_stop(adapter);
948 	EM_TX_UNLOCK(adapter);
949 
950         em_release_manageability(adapter);
951 
952         if (((adapter->hw.mac.type == e1000_82573) ||
953             (adapter->hw.mac.type == e1000_ich8lan) ||
954             (adapter->hw.mac.type == e1000_ich10lan) ||
955             (adapter->hw.mac.type == e1000_ich9lan)) &&
956             e1000_check_mng_mode(&adapter->hw))
957                 em_release_hw_control(adapter);
958 
959         if (adapter->wol) {
960                 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
961                 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
962                 em_enable_wakeup(dev);
963         }
964 
965 	EM_CORE_UNLOCK(adapter);
966 
967 	return bus_generic_suspend(dev);
968 }
969 
970 static int
971 em_resume(device_t dev)
972 {
973 	struct adapter *adapter = device_get_softc(dev);
974 	struct ifnet *ifp = adapter->ifp;
975 
976 	EM_CORE_LOCK(adapter);
977 	em_init_locked(adapter);
978 	em_init_manageability(adapter);
979 	EM_CORE_UNLOCK(adapter);
980 	em_start(ifp);
981 
982 	return bus_generic_resume(dev);
983 }
984 
985 
986 /*********************************************************************
987  *  Transmit entry point
988  *
989  *  em_start is called by the stack to initiate a transmit.
990  *  The driver will remain in this routine as long as there are
991  *  packets to transmit and transmit resources are available.
992  *  In case resources are not available stack is notified and
993  *  the packet is requeued.
994  **********************************************************************/
995 
996 #ifdef IFNET_BUF_RING
997 static int
998 em_transmit_locked(struct ifnet *ifp, struct mbuf *m)
999 {
1000 	struct adapter	*adapter = ifp->if_softc;
1001 	int error;
1002 
1003 	EM_TX_LOCK_ASSERT(adapter);
1004 	if (((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1005 	    IFF_DRV_RUNNING)
1006 	    || (!adapter->link_active)) {
1007 		error = drbr_enqueue(ifp, adapter->br, m);
1008 		return (error);
1009 	} else if (ADAPTER_RING_EMPTY(adapter) &&
1010 	    (adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)) {
1011 		if (em_xmit(adapter, &m)) {
1012 			if (m && (error = drbr_enqueue(ifp, adapter->br, m)) != 0)
1013 				return (error);
1014 		} else {
1015 			/*
1016 			 * We've bypassed the buf ring so we need to update
1017 			 * ifp directly
1018 			 */
1019 			drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
1020 			/*
1021 			** Send a copy of the frame to the BPF
1022 			** listener and set the watchdog on.
1023 			*/
1024 			ETHER_BPF_MTAP(ifp, m);
1025 			adapter->watchdog_timer = EM_TX_TIMEOUT;
1026 		}
1027 	} else if ((error = drbr_enqueue(ifp, adapter->br, m)) != 0)
1028 		return (error);
1029 
1030 	if (!ADAPTER_RING_EMPTY(adapter))
1031 		em_start_locked(ifp);
1032 
1033 	return (0);
1034 }
1035 
1036 static int
1037 em_transmit(struct ifnet *ifp, struct mbuf *m)
1038 {
1039 
1040 	struct adapter *adapter = ifp->if_softc;
1041 	int error = 0;
1042 
1043 	if(EM_TX_TRYLOCK(adapter)) {
1044 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1045 			error = em_transmit_locked(ifp, m);
1046 		EM_TX_UNLOCK(adapter);
1047 	} else
1048 		error = drbr_enqueue(ifp, adapter->br, m);
1049 
1050 	return (error);
1051 }
1052 
1053 static void
1054 em_qflush(struct ifnet *ifp)
1055 {
1056 	struct mbuf *m;
1057 	struct adapter *adapter = (struct adapter *)ifp->if_softc;
1058 
1059 	EM_TX_LOCK(adapter);
1060 	while ((m = buf_ring_dequeue_sc(adapter->br)) != NULL)
1061 		m_freem(m);
1062 	if_qflush(ifp);
1063 	EM_TX_UNLOCK(adapter);
1064 }
1065 #endif
1066 
1067 static void
1068 em_start_locked(struct ifnet *ifp)
1069 {
1070 	struct adapter	*adapter = ifp->if_softc;
1071 	struct mbuf	*m_head;
1072 
1073 	EM_TX_LOCK_ASSERT(adapter);
1074 
1075 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1076 	    IFF_DRV_RUNNING)
1077 		return;
1078 	if (!adapter->link_active)
1079 		return;
1080 
1081 	while ((adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)
1082 	    && (!ADAPTER_RING_EMPTY(adapter))) {
1083 
1084 		m_head = em_dequeue(ifp, adapter->br);
1085 		if (m_head == NULL)
1086 			break;
1087 		/*
1088 		 *  Encapsulation can modify our pointer, and or make it
1089 		 *  NULL on failure.  In that event, we can't requeue.
1090 		 */
1091 		if (em_xmit(adapter, &m_head)) {
1092 			if (m_head == NULL)
1093 				break;
1094 #ifndef IFNET_BUF_RING
1095 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1096 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1097 #endif
1098 			break;
1099 		}
1100 
1101 		/* Send a copy of the frame to the BPF listener */
1102 		ETHER_BPF_MTAP(ifp, m_head);
1103 
1104 		/* Set timeout in case hardware has problems transmitting. */
1105 		adapter->watchdog_timer = EM_TX_TIMEOUT;
1106 	}
1107 	if ((adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD))
1108 		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1109 
1110 }
1111 
1112 static void
1113 em_start(struct ifnet *ifp)
1114 {
1115 	struct adapter *adapter = ifp->if_softc;
1116 
1117 	EM_TX_LOCK(adapter);
1118 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1119 		em_start_locked(ifp);
1120 	EM_TX_UNLOCK(adapter);
1121 }
1122 
1123 /*********************************************************************
1124  *  Ioctl entry point
1125  *
1126  *  em_ioctl is called when the user wants to configure the
1127  *  interface.
1128  *
1129  *  return 0 on success, positive on failure
1130  **********************************************************************/
1131 
1132 static int
1133 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1134 {
1135 	struct adapter	*adapter = ifp->if_softc;
1136 	struct ifreq *ifr = (struct ifreq *)data;
1137 #ifdef INET
1138 	struct ifaddr *ifa = (struct ifaddr *)data;
1139 #endif
1140 	int error = 0;
1141 
1142 	if (adapter->in_detach)
1143 		return (error);
1144 
1145 	switch (command) {
1146 	case SIOCSIFADDR:
1147 #ifdef INET
1148 		if (ifa->ifa_addr->sa_family == AF_INET) {
1149 			/*
1150 			 * XXX
1151 			 * Since resetting hardware takes a very long time
1152 			 * and results in link renegotiation we only
1153 			 * initialize the hardware only when it is absolutely
1154 			 * required.
1155 			 */
1156 			ifp->if_flags |= IFF_UP;
1157 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1158 				EM_CORE_LOCK(adapter);
1159 				em_init_locked(adapter);
1160 				EM_CORE_UNLOCK(adapter);
1161 			}
1162 			arp_ifinit(ifp, ifa);
1163 		} else
1164 #endif
1165 			error = ether_ioctl(ifp, command, data);
1166 		break;
1167 	case SIOCSIFMTU:
1168 	    {
1169 		int max_frame_size;
1170 		u16 eeprom_data = 0;
1171 
1172 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1173 
1174 		EM_CORE_LOCK(adapter);
1175 		switch (adapter->hw.mac.type) {
1176 		case e1000_82573:
1177 			/*
1178 			 * 82573 only supports jumbo frames
1179 			 * if ASPM is disabled.
1180 			 */
1181 			e1000_read_nvm(&adapter->hw,
1182 			    NVM_INIT_3GIO_3, 1, &eeprom_data);
1183 			if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1184 				max_frame_size = ETHER_MAX_LEN;
1185 				break;
1186 			}
1187 			/* Allow Jumbo frames - fall thru */
1188 		case e1000_82571:
1189 		case e1000_82572:
1190 		case e1000_ich9lan:
1191 		case e1000_ich10lan:
1192 		case e1000_82574:
1193 		case e1000_80003es2lan:	/* Limit Jumbo Frame size */
1194 			max_frame_size = 9234;
1195 			break;
1196 			/* Adapters that do not support jumbo frames */
1197 		case e1000_82542:
1198 		case e1000_ich8lan:
1199 			max_frame_size = ETHER_MAX_LEN;
1200 			break;
1201 		default:
1202 			max_frame_size = MAX_JUMBO_FRAME_SIZE;
1203 		}
1204 		if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1205 		    ETHER_CRC_LEN) {
1206 			EM_CORE_UNLOCK(adapter);
1207 			error = EINVAL;
1208 			break;
1209 		}
1210 
1211 		ifp->if_mtu = ifr->ifr_mtu;
1212 		adapter->max_frame_size =
1213 		    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1214 		em_init_locked(adapter);
1215 		EM_CORE_UNLOCK(adapter);
1216 		break;
1217 	    }
1218 	case SIOCSIFFLAGS:
1219 		IOCTL_DEBUGOUT("ioctl rcv'd:\
1220 		    SIOCSIFFLAGS (Set Interface Flags)");
1221 		EM_CORE_LOCK(adapter);
1222 		if (ifp->if_flags & IFF_UP) {
1223 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1224 				if ((ifp->if_flags ^ adapter->if_flags) &
1225 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1226 					em_disable_promisc(adapter);
1227 					em_set_promisc(adapter);
1228 				}
1229 			} else
1230 				em_init_locked(adapter);
1231 		} else
1232 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1233 				EM_TX_LOCK(adapter);
1234 				em_stop(adapter);
1235 				EM_TX_UNLOCK(adapter);
1236 			}
1237 		adapter->if_flags = ifp->if_flags;
1238 		EM_CORE_UNLOCK(adapter);
1239 		break;
1240 	case SIOCADDMULTI:
1241 	case SIOCDELMULTI:
1242 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1243 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1244 			EM_CORE_LOCK(adapter);
1245 			em_disable_intr(adapter);
1246 			em_set_multi(adapter);
1247 			if (adapter->hw.mac.type == e1000_82542 &&
1248 	    		    adapter->hw.revision_id == E1000_REVISION_2) {
1249 				em_initialize_receive_unit(adapter);
1250 			}
1251 #ifdef DEVICE_POLLING
1252 			if (!(ifp->if_capenable & IFCAP_POLLING))
1253 #endif
1254 				em_enable_intr(adapter);
1255 			EM_CORE_UNLOCK(adapter);
1256 		}
1257 		break;
1258 	case SIOCSIFMEDIA:
1259 		/* Check SOL/IDER usage */
1260 		EM_CORE_LOCK(adapter);
1261 		if (e1000_check_reset_block(&adapter->hw)) {
1262 			EM_CORE_UNLOCK(adapter);
1263 			device_printf(adapter->dev, "Media change is"
1264 			    " blocked due to SOL/IDER session.\n");
1265 			break;
1266 		}
1267 		EM_CORE_UNLOCK(adapter);
1268 	case SIOCGIFMEDIA:
1269 		IOCTL_DEBUGOUT("ioctl rcv'd: \
1270 		    SIOCxIFMEDIA (Get/Set Interface Media)");
1271 		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1272 		break;
1273 	case SIOCSIFCAP:
1274 	    {
1275 		int mask, reinit;
1276 
1277 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1278 		reinit = 0;
1279 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1280 #ifdef DEVICE_POLLING
1281 		if (mask & IFCAP_POLLING) {
1282 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1283 				error = ether_poll_register(em_poll, ifp);
1284 				if (error)
1285 					return (error);
1286 				EM_CORE_LOCK(adapter);
1287 				em_disable_intr(adapter);
1288 				ifp->if_capenable |= IFCAP_POLLING;
1289 				EM_CORE_UNLOCK(adapter);
1290 			} else {
1291 				error = ether_poll_deregister(ifp);
1292 				/* Enable interrupt even in error case */
1293 				EM_CORE_LOCK(adapter);
1294 				em_enable_intr(adapter);
1295 				ifp->if_capenable &= ~IFCAP_POLLING;
1296 				EM_CORE_UNLOCK(adapter);
1297 			}
1298 		}
1299 #endif
1300 		if (mask & IFCAP_HWCSUM) {
1301 			ifp->if_capenable ^= IFCAP_HWCSUM;
1302 			reinit = 1;
1303 		}
1304 #if __FreeBSD_version >= 700000
1305 		if (mask & IFCAP_TSO4) {
1306 			ifp->if_capenable ^= IFCAP_TSO4;
1307 			reinit = 1;
1308 		}
1309 #endif
1310 
1311 		if (mask & IFCAP_VLAN_HWTAGGING) {
1312 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1313 			reinit = 1;
1314 		}
1315 		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1316 			em_init(adapter);
1317 #if __FreeBSD_version >= 700000
1318 		VLAN_CAPABILITIES(ifp);
1319 #endif
1320 		break;
1321 	    }
1322 
1323 #ifdef EM_TIMESYNC
1324 	/*
1325 	** IOCTL support for Precision Time (IEEE 1588) Support
1326 	*/
1327 	case EM_TIMESYNC_READTS:
1328 	    {
1329 		u32 rx_ctl, tx_ctl;
1330 		struct em_tsync_read *tdata;
1331 
1332 		tdata = (struct em_tsync_read *) ifr->ifr_data;
1333 
1334 		IOCTL_DEBUGOUT("Reading Timestamp\n");
1335 
1336 		if (tdata->read_current_time) {
1337 			getnanotime(&tdata->system_time);
1338 			tdata->network_time = E1000_READ_REG(&adapter->hw, E1000_SYSTIML);
1339 			tdata->network_time |=
1340 			    (u64)E1000_READ_REG(&adapter->hw, E1000_SYSTIMH ) << 32;
1341 		}
1342 
1343 		rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
1344 		tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
1345 
1346 		IOCTL_DEBUGOUT1("RX_CTL value = %u\n", rx_ctl);
1347 		IOCTL_DEBUGOUT1("TX_CTL value = %u\n", tx_ctl);
1348 
1349 		if (rx_ctl & 0x1) {
1350 			IOCTL_DEBUGOUT("RX timestamp is valid\n");
1351 			u32 tmp;
1352 			unsigned char *tmp_cp;
1353 
1354 			tdata->rx_valid = 1;
1355 			tdata->rx_stamp = E1000_READ_REG(&adapter->hw, E1000_RXSTMPL);
1356 			tdata->rx_stamp |= (u64)E1000_READ_REG(&adapter->hw,
1357 			    E1000_RXSTMPH) << 32;
1358 
1359 			tmp = E1000_READ_REG(&adapter->hw, E1000_RXSATRL);
1360 			tmp_cp = (unsigned char *) &tmp;
1361 			tdata->srcid[0] = tmp_cp[0];
1362 			tdata->srcid[1] = tmp_cp[1];
1363 			tdata->srcid[2] = tmp_cp[2];
1364 			tdata->srcid[3] = tmp_cp[3];
1365 			tmp = E1000_READ_REG(&adapter->hw, E1000_RXSATRH);
1366 			tmp_cp = (unsigned char *) &tmp;
1367 			tdata->srcid[4] = tmp_cp[0];
1368 			tdata->srcid[5] = tmp_cp[1];
1369 			tdata->seqid = tmp >> 16;
1370 			tdata->seqid = htons(tdata->seqid);
1371 		} else
1372 			tdata->rx_valid = 0;
1373 
1374 		if (tx_ctl & 0x1) {
1375 			IOCTL_DEBUGOUT("TX timestamp is valid\n");
1376 			tdata->tx_valid = 1;
1377 			tdata->tx_stamp = E1000_READ_REG(&adapter->hw, E1000_TXSTMPL);
1378 			tdata->tx_stamp |= (u64) E1000_READ_REG(&adapter->hw,
1379 			    E1000_TXSTMPH) << 32;
1380 		} else
1381 			tdata->tx_valid = 0;
1382 
1383 		return (0);
1384 	    }
1385 #endif	/* EM_TIMESYNC */
1386 
1387 	default:
1388 		error = ether_ioctl(ifp, command, data);
1389 		break;
1390 	}
1391 
1392 	return (error);
1393 }
1394 
1395 /*********************************************************************
1396  *  Watchdog timer:
1397  *
1398  *  This routine is called from the local timer every second.
1399  *  As long as transmit descriptors are being cleaned the value
1400  *  is non-zero and we do nothing. Reaching 0 indicates a tx hang
1401  *  and we then reset the device.
1402  *
1403  **********************************************************************/
1404 
1405 static void
1406 em_watchdog(struct adapter *adapter)
1407 {
1408 
1409 	EM_CORE_LOCK_ASSERT(adapter);
1410 
1411 	/*
1412 	** The timer is set to 5 every time start queues a packet.
1413 	** Then txeof keeps resetting it as long as it cleans at
1414 	** least one descriptor.
1415 	** Finally, anytime all descriptors are clean the timer is
1416 	** set to 0.
1417 	*/
1418 	EM_TX_LOCK(adapter);
1419 	if ((adapter->watchdog_timer == 0) || (--adapter->watchdog_timer)) {
1420 		EM_TX_UNLOCK(adapter);
1421 		return;
1422 	}
1423 
1424 	/* If we are in this routine because of pause frames, then
1425 	 * don't reset the hardware.
1426 	 */
1427 	if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
1428 	    E1000_STATUS_TXOFF) {
1429 		adapter->watchdog_timer = EM_TX_TIMEOUT;
1430 		EM_TX_UNLOCK(adapter);
1431 		return;
1432 	}
1433 
1434 	if (e1000_check_for_link(&adapter->hw) == 0)
1435 		device_printf(adapter->dev, "watchdog timeout -- resetting\n");
1436 	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1437 	adapter->watchdog_events++;
1438 	EM_TX_UNLOCK(adapter);
1439 
1440 	em_init_locked(adapter);
1441 }
1442 
1443 /*********************************************************************
1444  *  Init entry point
1445  *
1446  *  This routine is used in two ways. It is used by the stack as
1447  *  init entry point in network interface structure. It is also used
1448  *  by the driver as a hw/sw initialization routine to get to a
1449  *  consistent state.
1450  *
1451  *  return 0 on success, positive on failure
1452  **********************************************************************/
1453 
1454 static void
1455 em_init_locked(struct adapter *adapter)
1456 {
1457 	struct ifnet	*ifp = adapter->ifp;
1458 	device_t	dev = adapter->dev;
1459 	u32		pba;
1460 
1461 	INIT_DEBUGOUT("em_init: begin");
1462 
1463 	EM_CORE_LOCK_ASSERT(adapter);
1464 
1465 	EM_TX_LOCK(adapter);
1466 	em_stop(adapter);
1467 	EM_TX_UNLOCK(adapter);
1468 
1469 	/*
1470 	 * Packet Buffer Allocation (PBA)
1471 	 * Writing PBA sets the receive portion of the buffer
1472 	 * the remainder is used for the transmit buffer.
1473 	 *
1474 	 * Devices before the 82547 had a Packet Buffer of 64K.
1475 	 *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1476 	 * After the 82547 the buffer was reduced to 40K.
1477 	 *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1478 	 *   Note: default does not leave enough room for Jumbo Frame >10k.
1479 	 */
1480 	switch (adapter->hw.mac.type) {
1481 	case e1000_82547:
1482 	case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1483 		if (adapter->max_frame_size > 8192)
1484 			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1485 		else
1486 			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1487 		adapter->tx_fifo_head = 0;
1488 		adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1489 		adapter->tx_fifo_size =
1490 		    (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1491 		break;
1492 	/* Total Packet Buffer on these is 48K */
1493 	case e1000_82571:
1494 	case e1000_82572:
1495 	case e1000_80003es2lan:
1496 			pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1497 		break;
1498 	case e1000_82573: /* 82573: Total Packet Buffer is 32K */
1499 			pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1500 		break;
1501 	case e1000_82574:
1502 			pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
1503 		break;
1504 	case e1000_ich9lan:
1505 	case e1000_ich10lan:
1506 #define E1000_PBA_10K	0x000A
1507 		pba = E1000_PBA_10K;
1508 		break;
1509 	case e1000_ich8lan:
1510 		pba = E1000_PBA_8K;
1511 		break;
1512 	default:
1513 		/* Devices before 82547 had a Packet Buffer of 64K.   */
1514 		if (adapter->max_frame_size > 8192)
1515 			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1516 		else
1517 			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1518 	}
1519 
1520 	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
1521 	E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1522 
1523 	/* Get the latest mac address, User can use a LAA */
1524         bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1525               ETHER_ADDR_LEN);
1526 
1527 	/* Put the address into the Receive Address Array */
1528 	e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1529 
1530 	/*
1531 	 * With the 82571 adapter, RAR[0] may be overwritten
1532 	 * when the other port is reset, we make a duplicate
1533 	 * in RAR[14] for that eventuality, this assures
1534 	 * the interface continues to function.
1535 	 */
1536 	if (adapter->hw.mac.type == e1000_82571) {
1537 		e1000_set_laa_state_82571(&adapter->hw, TRUE);
1538 		e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1539 		    E1000_RAR_ENTRIES - 1);
1540 	}
1541 
1542 	/* Initialize the hardware */
1543 	if (em_hardware_init(adapter)) {
1544 		device_printf(dev, "Unable to initialize the hardware\n");
1545 		return;
1546 	}
1547 	em_update_link_status(adapter);
1548 
1549 	/* Setup VLAN support, basic and offload if available */
1550 	E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1551 
1552 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
1553 	    ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) {
1554 		u32 ctrl;
1555 		ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1556 		ctrl |= E1000_CTRL_VME;
1557 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1558 	}
1559 
1560 
1561 	/* Set hardware offload abilities */
1562 	ifp->if_hwassist = 0;
1563 	if (adapter->hw.mac.type >= e1000_82543) {
1564 		if (ifp->if_capenable & IFCAP_TXCSUM)
1565 			ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1566 #if __FreeBSD_version >= 700000
1567 		if (ifp->if_capenable & IFCAP_TSO4)
1568 			ifp->if_hwassist |= CSUM_TSO;
1569 #endif
1570 	}
1571 
1572 	/* Configure for OS presence */
1573 	em_init_manageability(adapter);
1574 
1575 	/* Prepare transmit descriptors and buffers */
1576 	em_setup_transmit_structures(adapter);
1577 	em_initialize_transmit_unit(adapter);
1578 
1579 	/* Setup Multicast table */
1580 	em_set_multi(adapter);
1581 
1582 	/* Prepare receive descriptors and buffers */
1583 	if (em_setup_receive_structures(adapter)) {
1584 		device_printf(dev, "Could not setup receive structures\n");
1585 		EM_TX_LOCK(adapter);
1586 		em_stop(adapter);
1587 		EM_TX_UNLOCK(adapter);
1588 		return;
1589 	}
1590 	em_initialize_receive_unit(adapter);
1591 
1592 	/* Don't lose promiscuous settings */
1593 	em_set_promisc(adapter);
1594 
1595 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1596 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1597 
1598 	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1599 	e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1600 
1601 	/* MSI/X configuration for 82574 */
1602 	if (adapter->hw.mac.type == e1000_82574) {
1603 		int tmp;
1604 		tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1605 		tmp |= E1000_CTRL_EXT_PBA_CLR;
1606 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1607 		/*
1608 		** Set the IVAR - interrupt vector routing.
1609 		** Each nibble represents a vector, high bit
1610 		** is enable, other 3 bits are the MSIX table
1611 		** entry, we map RXQ0 to 0, TXQ0 to 1, and
1612 		** Link (other) to 2, hence the magic number.
1613 		*/
1614 		E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1615 	}
1616 
1617 #ifdef DEVICE_POLLING
1618 	/*
1619 	 * Only enable interrupts if we are not polling, make sure
1620 	 * they are off otherwise.
1621 	 */
1622 	if (ifp->if_capenable & IFCAP_POLLING)
1623 		em_disable_intr(adapter);
1624 	else
1625 #endif /* DEVICE_POLLING */
1626 		em_enable_intr(adapter);
1627 
1628 #ifdef EM_TIMESYNC
1629 	/* Initializae IEEE 1588 Precision Time hardware */
1630 	if ((adapter->hw.mac.type == e1000_82574) ||
1631 	    (adapter->hw.mac.type == e1000_ich10lan))
1632 		em_tsync_init(adapter);
1633 #endif
1634 
1635 	/* Don't reset the phy next time init gets called */
1636 	adapter->hw.phy.reset_disable = TRUE;
1637 }
1638 
1639 static void
1640 em_init(void *arg)
1641 {
1642 	struct adapter *adapter = arg;
1643 
1644 	EM_CORE_LOCK(adapter);
1645 	em_init_locked(adapter);
1646 	EM_CORE_UNLOCK(adapter);
1647 }
1648 
1649 
1650 #ifdef DEVICE_POLLING
1651 /*********************************************************************
1652  *
1653  *  Legacy polling routine
1654  *
1655  *********************************************************************/
1656 static void
1657 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1658 {
1659 	struct adapter *adapter = ifp->if_softc;
1660 	u32		reg_icr;
1661 
1662 	EM_CORE_LOCK(adapter);
1663 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1664 		EM_CORE_UNLOCK(adapter);
1665 		return;
1666 	}
1667 
1668 	if (cmd == POLL_AND_CHECK_STATUS) {
1669 		reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1670 		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1671 			callout_stop(&adapter->timer);
1672 			adapter->hw.mac.get_link_status = 1;
1673 			em_update_link_status(adapter);
1674 			callout_reset(&adapter->timer, hz,
1675 			    em_local_timer, adapter);
1676 		}
1677 	}
1678 	EM_CORE_UNLOCK(adapter);
1679 
1680 	em_rxeof(adapter, count);
1681 
1682 	EM_TX_LOCK(adapter);
1683 	em_txeof(adapter);
1684 
1685 	if (!ADAPTER_RING_EMPTY(adapter))
1686 		em_start_locked(ifp);
1687 	EM_TX_UNLOCK(adapter);
1688 }
1689 #endif /* DEVICE_POLLING */
1690 
1691 #ifdef EM_LEGACY_IRQ
1692 /*********************************************************************
1693  *
1694  *  Legacy Interrupt Service routine
1695  *
1696  *********************************************************************/
1697 
1698 static void
1699 em_intr(void *arg)
1700 {
1701 	struct adapter	*adapter = arg;
1702 	struct ifnet	*ifp = adapter->ifp;
1703 	u32		reg_icr;
1704 
1705 
1706 	if (ifp->if_capenable & IFCAP_POLLING)
1707 		return;
1708 
1709 	EM_CORE_LOCK(adapter);
1710 	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1711 	if ((reg_icr == 0xffffffff) || (reg_icr == 0)||
1712 	    (adapter->hw.mac.type >= e1000_82571 &&
1713 	    (reg_icr & E1000_ICR_INT_ASSERTED) == 0))
1714 			goto out;
1715 
1716 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1717 			goto out;
1718 
1719 	EM_TX_LOCK(adapter);
1720 	em_txeof(adapter);
1721 	em_rxeof(adapter, -1);
1722 	em_txeof(adapter);
1723 	EM_TX_UNLOCK(adapter);
1724 
1725 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1726 		callout_stop(&adapter->timer);
1727 		adapter->hw.mac.get_link_status = 1;
1728 		em_update_link_status(adapter);
1729 		/* Deal with TX cruft when link lost */
1730 		em_tx_purge(adapter);
1731 		callout_reset(&adapter->timer, hz,
1732 		    em_local_timer, adapter);
1733 	}
1734 
1735 	if (reg_icr & E1000_ICR_RXO)
1736 		adapter->rx_overruns++;
1737 out:
1738 	EM_CORE_UNLOCK(adapter);
1739 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1740 	    !ADAPTER_RING_EMPTY(adapter))
1741 		em_start(ifp);
1742 }
1743 
1744 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1745 
1746 static void
1747 em_handle_link(void *context, int pending)
1748 {
1749 	struct adapter	*adapter = context;
1750 	struct ifnet *ifp = adapter->ifp;
1751 
1752 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1753 		return;
1754 
1755 	EM_CORE_LOCK(adapter);
1756 	callout_stop(&adapter->timer);
1757 	em_update_link_status(adapter);
1758 	/* Deal with TX cruft when link lost */
1759 	em_tx_purge(adapter);
1760 	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1761 	EM_CORE_UNLOCK(adapter);
1762 }
1763 
1764 
1765 /* Combined RX/TX handler, used by Legacy and MSI */
1766 static void
1767 em_handle_rxtx(void *context, int pending)
1768 {
1769 	struct adapter	*adapter = context;
1770 	struct ifnet	*ifp = adapter->ifp;
1771 
1772 
1773 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1774 		if (em_rxeof(adapter, adapter->rx_process_limit) != 0)
1775 			taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1776 		EM_TX_LOCK(adapter);
1777 		em_txeof(adapter);
1778 
1779 		if (!ADAPTER_RING_EMPTY(adapter))
1780 			em_start_locked(ifp);
1781 		EM_TX_UNLOCK(adapter);
1782 	}
1783 
1784 	em_enable_intr(adapter);
1785 }
1786 
1787 /*********************************************************************
1788  *
1789  *  Fast Legacy/MSI Combined Interrupt Service routine
1790  *
1791  *********************************************************************/
1792 #if __FreeBSD_version < 700000
1793 #define FILTER_STRAY
1794 #define FILTER_HANDLED
1795 static void
1796 #else
1797 static int
1798 #endif
1799 em_irq_fast(void *arg)
1800 {
1801 	struct adapter	*adapter = arg;
1802 	struct ifnet	*ifp;
1803 	u32		reg_icr;
1804 
1805 	ifp = adapter->ifp;
1806 
1807 	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1808 
1809 	/* Hot eject?  */
1810 	if (reg_icr == 0xffffffff)
1811 		return FILTER_STRAY;
1812 
1813 	/* Definitely not our interrupt.  */
1814 	if (reg_icr == 0x0)
1815 		return FILTER_STRAY;
1816 
1817 	/*
1818 	 * Starting with the 82571 chip, bit 31 should be used to
1819 	 * determine whether the interrupt belongs to us.
1820 	 */
1821 	if (adapter->hw.mac.type >= e1000_82571 &&
1822 	    (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1823 		return FILTER_STRAY;
1824 
1825 	/*
1826 	 * Mask interrupts until the taskqueue is finished running.  This is
1827 	 * cheap, just assume that it is needed.  This also works around the
1828 	 * MSI message reordering errata on certain systems.
1829 	 */
1830 	em_disable_intr(adapter);
1831 	taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1832 
1833 	/* Link status change */
1834 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1835 		adapter->hw.mac.get_link_status = 1;
1836 		taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1837 	}
1838 
1839 	if (reg_icr & E1000_ICR_RXO)
1840 		adapter->rx_overruns++;
1841 	return FILTER_HANDLED;
1842 }
1843 
1844 /*********************************************************************
1845  *
1846  *  MSIX Interrupt Service Routines
1847  *
1848  **********************************************************************/
1849 #define EM_MSIX_TX	0x00040000
1850 #define EM_MSIX_RX	0x00010000
1851 #define EM_MSIX_LINK	0x00100000
1852 
1853 static void
1854 em_msix_tx(void *arg)
1855 {
1856 	struct adapter *adapter = arg;
1857 	struct ifnet	*ifp = adapter->ifp;
1858 
1859 	++adapter->tx_irq;
1860 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1861 		EM_TX_LOCK(adapter);
1862 		em_txeof(adapter);
1863 		EM_TX_UNLOCK(adapter);
1864 		taskqueue_enqueue(adapter->tq, &adapter->tx_task);
1865 	}
1866 	/* Reenable this interrupt */
1867 	E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_TX);
1868 	return;
1869 }
1870 
1871 /*********************************************************************
1872  *
1873  *  MSIX RX Interrupt Service routine
1874  *
1875  **********************************************************************/
1876 
1877 static void
1878 em_msix_rx(void *arg)
1879 {
1880 	struct adapter *adapter = arg;
1881 	struct ifnet	*ifp = adapter->ifp;
1882 
1883 	++adapter->rx_irq;
1884 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1885 	    (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1886 		taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1887 	/* Reenable this interrupt */
1888 	E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_RX);
1889 	return;
1890 }
1891 
1892 /*********************************************************************
1893  *
1894  *  MSIX Link Fast Interrupt Service routine
1895  *
1896  **********************************************************************/
1897 
1898 static void
1899 em_msix_link(void *arg)
1900 {
1901 	struct adapter	*adapter = arg;
1902 	u32		reg_icr;
1903 
1904 	++adapter->link_irq;
1905 	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1906 
1907 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1908 		adapter->hw.mac.get_link_status = 1;
1909 		taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1910 	}
1911 	E1000_WRITE_REG(&adapter->hw, E1000_IMS,
1912 	    EM_MSIX_LINK | E1000_IMS_LSC);
1913 	return;
1914 }
1915 
1916 static void
1917 em_handle_rx(void *context, int pending)
1918 {
1919 	struct adapter	*adapter = context;
1920 	struct ifnet	*ifp = adapter->ifp;
1921 
1922 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1923 	    (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1924 		taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1925 
1926 }
1927 
1928 static void
1929 em_handle_tx(void *context, int pending)
1930 {
1931 	struct adapter	*adapter = context;
1932 	struct ifnet	*ifp = adapter->ifp;
1933 
1934 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1935 		if (!EM_TX_TRYLOCK(adapter))
1936 			return;
1937 
1938 		em_txeof(adapter);
1939 		if (!ADAPTER_RING_EMPTY(adapter))
1940 			em_start_locked(ifp);
1941 		EM_TX_UNLOCK(adapter);
1942 	}
1943 }
1944 #endif /* EM_FAST_IRQ */
1945 
1946 /*********************************************************************
1947  *
1948  *  Media Ioctl callback
1949  *
1950  *  This routine is called whenever the user queries the status of
1951  *  the interface using ifconfig.
1952  *
1953  **********************************************************************/
1954 static void
1955 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1956 {
1957 	struct adapter *adapter = ifp->if_softc;
1958 	u_char fiber_type = IFM_1000_SX;
1959 
1960 	INIT_DEBUGOUT("em_media_status: begin");
1961 
1962 	EM_CORE_LOCK(adapter);
1963 	em_update_link_status(adapter);
1964 
1965 	ifmr->ifm_status = IFM_AVALID;
1966 	ifmr->ifm_active = IFM_ETHER;
1967 
1968 	if (!adapter->link_active) {
1969 		EM_CORE_UNLOCK(adapter);
1970 		return;
1971 	}
1972 
1973 	ifmr->ifm_status |= IFM_ACTIVE;
1974 
1975 	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1976 	    (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1977 		if (adapter->hw.mac.type == e1000_82545)
1978 			fiber_type = IFM_1000_LX;
1979 		ifmr->ifm_active |= fiber_type | IFM_FDX;
1980 	} else {
1981 		switch (adapter->link_speed) {
1982 		case 10:
1983 			ifmr->ifm_active |= IFM_10_T;
1984 			break;
1985 		case 100:
1986 			ifmr->ifm_active |= IFM_100_TX;
1987 			break;
1988 		case 1000:
1989 			ifmr->ifm_active |= IFM_1000_T;
1990 			break;
1991 		}
1992 		if (adapter->link_duplex == FULL_DUPLEX)
1993 			ifmr->ifm_active |= IFM_FDX;
1994 		else
1995 			ifmr->ifm_active |= IFM_HDX;
1996 	}
1997 	EM_CORE_UNLOCK(adapter);
1998 }
1999 
2000 /*********************************************************************
2001  *
2002  *  Media Ioctl callback
2003  *
2004  *  This routine is called when the user changes speed/duplex using
2005  *  media/mediopt option with ifconfig.
2006  *
2007  **********************************************************************/
2008 static int
2009 em_media_change(struct ifnet *ifp)
2010 {
2011 	struct adapter *adapter = ifp->if_softc;
2012 	struct ifmedia  *ifm = &adapter->media;
2013 
2014 	INIT_DEBUGOUT("em_media_change: begin");
2015 
2016 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2017 		return (EINVAL);
2018 
2019 	EM_CORE_LOCK(adapter);
2020 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2021 	case IFM_AUTO:
2022 		adapter->hw.mac.autoneg = DO_AUTO_NEG;
2023 		adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
2024 		break;
2025 	case IFM_1000_LX:
2026 	case IFM_1000_SX:
2027 	case IFM_1000_T:
2028 		adapter->hw.mac.autoneg = DO_AUTO_NEG;
2029 		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
2030 		break;
2031 	case IFM_100_TX:
2032 		adapter->hw.mac.autoneg = FALSE;
2033 		adapter->hw.phy.autoneg_advertised = 0;
2034 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2035 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
2036 		else
2037 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
2038 		break;
2039 	case IFM_10_T:
2040 		adapter->hw.mac.autoneg = FALSE;
2041 		adapter->hw.phy.autoneg_advertised = 0;
2042 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2043 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
2044 		else
2045 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
2046 		break;
2047 	default:
2048 		device_printf(adapter->dev, "Unsupported media type\n");
2049 	}
2050 
2051 	/* As the speed/duplex settings my have changed we need to
2052 	 * reset the PHY.
2053 	 */
2054 	adapter->hw.phy.reset_disable = FALSE;
2055 
2056 	em_init_locked(adapter);
2057 	EM_CORE_UNLOCK(adapter);
2058 
2059 	return (0);
2060 }
2061 
2062 /*********************************************************************
2063  *
2064  *  This routine maps the mbufs to tx descriptors.
2065  *
2066  *  return 0 on success, positive on failure
2067  **********************************************************************/
2068 
2069 static int
2070 em_xmit(struct adapter *adapter, struct mbuf **m_headp)
2071 {
2072 	bus_dma_segment_t	segs[EM_MAX_SCATTER];
2073 	bus_dmamap_t		map;
2074 	struct em_buffer	*tx_buffer, *tx_buffer_mapped;
2075 	struct e1000_tx_desc	*ctxd = NULL;
2076 	struct mbuf		*m_head;
2077 	u32			txd_upper, txd_lower, txd_used, txd_saved;
2078 	int			nsegs, i, j, first, last = 0;
2079 	int			error, do_tso, tso_desc = 0;
2080 #if __FreeBSD_version < 700000
2081 	struct m_tag		*mtag;
2082 #endif
2083 	m_head = *m_headp;
2084 	txd_upper = txd_lower = txd_used = txd_saved = 0;
2085 
2086 #if __FreeBSD_version >= 700000
2087 	do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
2088 #else
2089 	do_tso = 0;
2090 #endif
2091 
2092         /*
2093          * Force a cleanup if number of TX descriptors
2094          * available hits the threshold
2095          */
2096 	if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
2097 		em_txeof(adapter);
2098 		/* Now do we at least have a minimal? */
2099 		if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
2100 			adapter->no_tx_desc_avail1++;
2101 			return (ENOBUFS);
2102 		}
2103 	}
2104 
2105 
2106 	/*
2107 	 * TSO workaround:
2108 	 *  If an mbuf is only header we need
2109 	 *     to pull 4 bytes of data into it.
2110 	 */
2111 	if (do_tso && (m_head->m_len <= M_TSO_LEN)) {
2112 		m_head = m_pullup(m_head, M_TSO_LEN + 4);
2113 		*m_headp = m_head;
2114 		if (m_head == NULL)
2115 			return (ENOBUFS);
2116 	}
2117 
2118 	/*
2119 	 * Map the packet for DMA
2120 	 *
2121 	 * Capture the first descriptor index,
2122 	 * this descriptor will have the index
2123 	 * of the EOP which is the only one that
2124 	 * now gets a DONE bit writeback.
2125 	 */
2126 	first = adapter->next_avail_tx_desc;
2127 	tx_buffer = &adapter->tx_buffer_area[first];
2128 	tx_buffer_mapped = tx_buffer;
2129 	map = tx_buffer->map;
2130 
2131 	error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2132 	    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2133 
2134 	/*
2135 	 * There are two types of errors we can (try) to handle:
2136 	 * - EFBIG means the mbuf chain was too long and bus_dma ran
2137 	 *   out of segments.  Defragment the mbuf chain and try again.
2138 	 * - ENOMEM means bus_dma could not obtain enough bounce buffers
2139 	 *   at this point in time.  Defer sending and try again later.
2140 	 * All other errors, in particular EINVAL, are fatal and prevent the
2141 	 * mbuf chain from ever going through.  Drop it and report error.
2142 	 */
2143 	if (error == EFBIG) {
2144 		struct mbuf *m;
2145 
2146 		m = m_defrag(*m_headp, M_DONTWAIT);
2147 		if (m == NULL) {
2148 			adapter->mbuf_alloc_failed++;
2149 			m_freem(*m_headp);
2150 			*m_headp = NULL;
2151 			return (ENOBUFS);
2152 		}
2153 		*m_headp = m;
2154 
2155 		/* Try it again */
2156 		error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2157 		    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2158 
2159 		if (error) {
2160 			adapter->no_tx_dma_setup++;
2161 			m_freem(*m_headp);
2162 			*m_headp = NULL;
2163 			return (error);
2164 		}
2165 	} else if (error != 0) {
2166 		adapter->no_tx_dma_setup++;
2167 		return (error);
2168 	}
2169 
2170 	/*
2171 	 * TSO Hardware workaround, if this packet is not
2172 	 * TSO, and is only a single descriptor long, and
2173 	 * it follows a TSO burst, then we need to add a
2174 	 * sentinel descriptor to prevent premature writeback.
2175 	 */
2176 	if ((do_tso == 0) && (adapter->tx_tso == TRUE)) {
2177 		if (nsegs == 1)
2178 			tso_desc = TRUE;
2179 		adapter->tx_tso = FALSE;
2180 	}
2181 
2182         if (nsegs > (adapter->num_tx_desc_avail - 2)) {
2183                 adapter->no_tx_desc_avail2++;
2184 		bus_dmamap_unload(adapter->txtag, map);
2185 		return (ENOBUFS);
2186         }
2187 	m_head = *m_headp;
2188 
2189 	/* Do hardware assists */
2190 #if __FreeBSD_version >= 700000
2191 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2192 		error = em_tso_setup(adapter, m_head, &txd_upper, &txd_lower);
2193 		if (error != TRUE)
2194 			return (ENXIO); /* something foobar */
2195 		/* we need to make a final sentinel transmit desc */
2196 		tso_desc = TRUE;
2197 	} else
2198 #endif
2199 #ifndef EM_TIMESYNC
2200 	/*
2201 	** Timesync needs to check the packet header
2202 	** so call checksum code to do so, but don't
2203 	** penalize the code if not defined.
2204 	*/
2205 	if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
2206 #endif
2207 		em_transmit_checksum_setup(adapter,  m_head,
2208 		    &txd_upper, &txd_lower);
2209 
2210 	i = adapter->next_avail_tx_desc;
2211 	if (adapter->pcix_82544)
2212 		txd_saved = i;
2213 
2214 	/* Set up our transmit descriptors */
2215 	for (j = 0; j < nsegs; j++) {
2216 		bus_size_t seg_len;
2217 		bus_addr_t seg_addr;
2218 		/* If adapter is 82544 and on PCIX bus */
2219 		if(adapter->pcix_82544) {
2220 			DESC_ARRAY	desc_array;
2221 			u32		array_elements, counter;
2222 			/*
2223 			 * Check the Address and Length combination and
2224 			 * split the data accordingly
2225 			 */
2226 			array_elements = em_fill_descriptors(segs[j].ds_addr,
2227 			    segs[j].ds_len, &desc_array);
2228 			for (counter = 0; counter < array_elements; counter++) {
2229 				if (txd_used == adapter->num_tx_desc_avail) {
2230 					adapter->next_avail_tx_desc = txd_saved;
2231 					adapter->no_tx_desc_avail2++;
2232 					bus_dmamap_unload(adapter->txtag, map);
2233 					return (ENOBUFS);
2234 				}
2235 				tx_buffer = &adapter->tx_buffer_area[i];
2236 				ctxd = &adapter->tx_desc_base[i];
2237 				ctxd->buffer_addr = htole64(
2238 				    desc_array.descriptor[counter].address);
2239 				ctxd->lower.data = htole32(
2240 				    (adapter->txd_cmd | txd_lower | (u16)
2241 				    desc_array.descriptor[counter].length));
2242 				ctxd->upper.data =
2243 				    htole32((txd_upper));
2244 				last = i;
2245 				if (++i == adapter->num_tx_desc)
2246                                          i = 0;
2247 				tx_buffer->m_head = NULL;
2248 				tx_buffer->next_eop = -1;
2249 				txd_used++;
2250                         }
2251 		} else {
2252 			tx_buffer = &adapter->tx_buffer_area[i];
2253 			ctxd = &adapter->tx_desc_base[i];
2254 			seg_addr = segs[j].ds_addr;
2255 			seg_len  = segs[j].ds_len;
2256 			/*
2257 			** TSO Workaround:
2258 			** If this is the last descriptor, we want to
2259 			** split it so we have a small final sentinel
2260 			*/
2261 			if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) {
2262 				seg_len -= 4;
2263 				ctxd->buffer_addr = htole64(seg_addr);
2264 				ctxd->lower.data = htole32(
2265 				adapter->txd_cmd | txd_lower | seg_len);
2266 				ctxd->upper.data =
2267 				    htole32(txd_upper);
2268 				if (++i == adapter->num_tx_desc)
2269 					i = 0;
2270 				/* Now make the sentinel */
2271 				++txd_used; /* using an extra txd */
2272 				ctxd = &adapter->tx_desc_base[i];
2273 				tx_buffer = &adapter->tx_buffer_area[i];
2274 				ctxd->buffer_addr =
2275 				    htole64(seg_addr + seg_len);
2276 				ctxd->lower.data = htole32(
2277 				adapter->txd_cmd | txd_lower | 4);
2278 				ctxd->upper.data =
2279 				    htole32(txd_upper);
2280 				last = i;
2281 				if (++i == adapter->num_tx_desc)
2282 					i = 0;
2283 			} else {
2284 				ctxd->buffer_addr = htole64(seg_addr);
2285 				ctxd->lower.data = htole32(
2286 				adapter->txd_cmd | txd_lower | seg_len);
2287 				ctxd->upper.data =
2288 				    htole32(txd_upper);
2289 				last = i;
2290 				if (++i == adapter->num_tx_desc)
2291 					i = 0;
2292 			}
2293 			tx_buffer->m_head = NULL;
2294 			tx_buffer->next_eop = -1;
2295 		}
2296 	}
2297 
2298 	adapter->next_avail_tx_desc = i;
2299 	if (adapter->pcix_82544)
2300 		adapter->num_tx_desc_avail -= txd_used;
2301 	else {
2302 		adapter->num_tx_desc_avail -= nsegs;
2303 		if (tso_desc) /* TSO used an extra for sentinel */
2304 			adapter->num_tx_desc_avail -= txd_used;
2305 	}
2306 
2307         /*
2308 	** Handle VLAN tag, this is the
2309 	** biggest difference between
2310 	** 6.x and 7
2311 	*/
2312 #if __FreeBSD_version < 700000
2313         /* Find out if we are in vlan mode. */
2314         mtag = VLAN_OUTPUT_TAG(ifp, m_head);
2315         if (mtag != NULL) {
2316                 ctxd->upper.fields.special =
2317                     htole16(VLAN_TAG_VALUE(mtag));
2318 #else /* FreeBSD 7 */
2319 	if (m_head->m_flags & M_VLANTAG) {
2320 		/* Set the vlan id. */
2321 		ctxd->upper.fields.special =
2322 		    htole16(m_head->m_pkthdr.ether_vtag);
2323 #endif
2324                 /* Tell hardware to add tag */
2325                 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
2326         }
2327 
2328         tx_buffer->m_head = m_head;
2329 	tx_buffer_mapped->map = tx_buffer->map;
2330 	tx_buffer->map = map;
2331         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
2332 
2333         /*
2334          * Last Descriptor of Packet
2335 	 * needs End Of Packet (EOP)
2336 	 * and Report Status (RS)
2337          */
2338         ctxd->lower.data |=
2339 	    htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
2340 	/*
2341 	 * Keep track in the first buffer which
2342 	 * descriptor will be written back
2343 	 */
2344 	tx_buffer = &adapter->tx_buffer_area[first];
2345 	tx_buffer->next_eop = last;
2346 
2347 	/*
2348 	 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
2349 	 * that this frame is available to transmit.
2350 	 */
2351 	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2352 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2353 	if (adapter->hw.mac.type == e1000_82547 &&
2354 	    adapter->link_duplex == HALF_DUPLEX)
2355 		em_82547_move_tail(adapter);
2356 	else {
2357 		E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
2358 		if (adapter->hw.mac.type == e1000_82547)
2359 			em_82547_update_fifo_head(adapter,
2360 			    m_head->m_pkthdr.len);
2361 	}
2362 
2363 #ifdef EM_TIMESYNC
2364 	if (ctxd->upper.data & E1000_TXD_EXTCMD_TSTAMP) {
2365 		HW_DEBUGOUT( "@@@ Timestamp bit is set in transmit descriptor\n" );
2366 	}
2367 #endif
2368 	return (0);
2369 }
2370 
2371 /*********************************************************************
2372  *
2373  * 82547 workaround to avoid controller hang in half-duplex environment.
2374  * The workaround is to avoid queuing a large packet that would span
2375  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
2376  * in this case. We do that only when FIFO is quiescent.
2377  *
2378  **********************************************************************/
2379 static void
2380 em_82547_move_tail(void *arg)
2381 {
2382 	struct adapter *adapter = arg;
2383 	struct e1000_tx_desc *tx_desc;
2384 	u16	hw_tdt, sw_tdt, length = 0;
2385 	bool	eop = 0;
2386 
2387 	EM_TX_LOCK_ASSERT(adapter);
2388 
2389 	hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
2390 	sw_tdt = adapter->next_avail_tx_desc;
2391 
2392 	while (hw_tdt != sw_tdt) {
2393 		tx_desc = &adapter->tx_desc_base[hw_tdt];
2394 		length += tx_desc->lower.flags.length;
2395 		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
2396 		if (++hw_tdt == adapter->num_tx_desc)
2397 			hw_tdt = 0;
2398 
2399 		if (eop) {
2400 			if (em_82547_fifo_workaround(adapter, length)) {
2401 				adapter->tx_fifo_wrk_cnt++;
2402 				callout_reset(&adapter->tx_fifo_timer, 1,
2403 					em_82547_move_tail, adapter);
2404 				break;
2405 			}
2406 			E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
2407 			em_82547_update_fifo_head(adapter, length);
2408 			length = 0;
2409 		}
2410 	}
2411 }
2412 
2413 static int
2414 em_82547_fifo_workaround(struct adapter *adapter, int len)
2415 {
2416 	int fifo_space, fifo_pkt_len;
2417 
2418 	fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2419 
2420 	if (adapter->link_duplex == HALF_DUPLEX) {
2421 		fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
2422 
2423 		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
2424 			if (em_82547_tx_fifo_reset(adapter))
2425 				return (0);
2426 			else
2427 				return (1);
2428 		}
2429 	}
2430 
2431 	return (0);
2432 }
2433 
2434 static void
2435 em_82547_update_fifo_head(struct adapter *adapter, int len)
2436 {
2437 	int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2438 
2439 	/* tx_fifo_head is always 16 byte aligned */
2440 	adapter->tx_fifo_head += fifo_pkt_len;
2441 	if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
2442 		adapter->tx_fifo_head -= adapter->tx_fifo_size;
2443 	}
2444 }
2445 
2446 
2447 static int
2448 em_82547_tx_fifo_reset(struct adapter *adapter)
2449 {
2450 	u32 tctl;
2451 
2452 	if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
2453 	    E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
2454 	    (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
2455 	    E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
2456 	    (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
2457 	    E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
2458 	    (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
2459 		/* Disable TX unit */
2460 		tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2461 		E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
2462 		    tctl & ~E1000_TCTL_EN);
2463 
2464 		/* Reset FIFO pointers */
2465 		E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
2466 		    adapter->tx_head_addr);
2467 		E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
2468 		    adapter->tx_head_addr);
2469 		E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
2470 		    adapter->tx_head_addr);
2471 		E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
2472 		    adapter->tx_head_addr);
2473 
2474 		/* Re-enable TX unit */
2475 		E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2476 		E1000_WRITE_FLUSH(&adapter->hw);
2477 
2478 		adapter->tx_fifo_head = 0;
2479 		adapter->tx_fifo_reset_cnt++;
2480 
2481 		return (TRUE);
2482 	}
2483 	else {
2484 		return (FALSE);
2485 	}
2486 }
2487 
2488 static void
2489 em_set_promisc(struct adapter *adapter)
2490 {
2491 	struct ifnet	*ifp = adapter->ifp;
2492 	u32		reg_rctl;
2493 
2494 	reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2495 
2496 	if (ifp->if_flags & IFF_PROMISC) {
2497 		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2498 		/* Turn this on if you want to see bad packets */
2499 		if (em_debug_sbp)
2500 			reg_rctl |= E1000_RCTL_SBP;
2501 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2502 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2503 		reg_rctl |= E1000_RCTL_MPE;
2504 		reg_rctl &= ~E1000_RCTL_UPE;
2505 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2506 	}
2507 }
2508 
2509 static void
2510 em_disable_promisc(struct adapter *adapter)
2511 {
2512 	u32	reg_rctl;
2513 
2514 	reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2515 
2516 	reg_rctl &=  (~E1000_RCTL_UPE);
2517 	reg_rctl &=  (~E1000_RCTL_MPE);
2518 	reg_rctl &=  (~E1000_RCTL_SBP);
2519 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2520 }
2521 
2522 
2523 /*********************************************************************
2524  *  Multicast Update
2525  *
2526  *  This routine is called whenever multicast address list is updated.
2527  *
2528  **********************************************************************/
2529 
2530 static void
2531 em_set_multi(struct adapter *adapter)
2532 {
2533 	struct ifnet	*ifp = adapter->ifp;
2534 	struct ifmultiaddr *ifma;
2535 	u32 reg_rctl = 0;
2536 	u8  *mta; /* Multicast array memory */
2537 	int mcnt = 0;
2538 
2539 	IOCTL_DEBUGOUT("em_set_multi: begin");
2540 
2541 	if (adapter->hw.mac.type == e1000_82542 &&
2542 	    adapter->hw.revision_id == E1000_REVISION_2) {
2543 		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2544 		if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2545 			e1000_pci_clear_mwi(&adapter->hw);
2546 		reg_rctl |= E1000_RCTL_RST;
2547 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2548 		msec_delay(5);
2549 	}
2550 
2551 	/* Allocate temporary memory to setup array */
2552 	mta = malloc(sizeof(u8) *
2553 	    (ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES),
2554 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2555 	if (mta == NULL)
2556 		panic("em_set_multi memory failure\n");
2557 
2558 	IF_ADDR_LOCK(ifp);
2559 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2560 		if (ifma->ifma_addr->sa_family != AF_LINK)
2561 			continue;
2562 
2563 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2564 			break;
2565 
2566 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2567 		    &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
2568 		mcnt++;
2569 	}
2570 	IF_ADDR_UNLOCK(ifp);
2571 
2572 	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2573 		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2574 		reg_rctl |= E1000_RCTL_MPE;
2575 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2576 	} else
2577 		e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
2578 
2579 	if (adapter->hw.mac.type == e1000_82542 &&
2580 	    adapter->hw.revision_id == E1000_REVISION_2) {
2581 		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2582 		reg_rctl &= ~E1000_RCTL_RST;
2583 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2584 		msec_delay(5);
2585 		if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2586 			e1000_pci_set_mwi(&adapter->hw);
2587 	}
2588 	free(mta, M_DEVBUF);
2589 }
2590 
2591 
2592 /*********************************************************************
2593  *  Timer routine
2594  *
2595  *  This routine checks for link status and updates statistics.
2596  *
2597  **********************************************************************/
2598 
2599 static void
2600 em_local_timer(void *arg)
2601 {
2602 	struct adapter	*adapter = arg;
2603 	struct ifnet	*ifp = adapter->ifp;
2604 
2605 	EM_CORE_LOCK_ASSERT(adapter);
2606 
2607 	taskqueue_enqueue(adapter->tq,
2608 	    &adapter->rxtx_task);
2609 	em_update_link_status(adapter);
2610 	em_update_stats_counters(adapter);
2611 
2612 	/* Reset LAA into RAR[0] on 82571 */
2613 	if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
2614 		e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2615 
2616 	if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
2617 		em_print_hw_stats(adapter);
2618 
2619 	em_smartspeed(adapter);
2620 
2621 	/*
2622 	 * Each second we check the watchdog to
2623 	 * protect against hardware hangs.
2624 	 */
2625 	em_watchdog(adapter);
2626 
2627 	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
2628 
2629 }
2630 
2631 static void
2632 em_update_link_status(struct adapter *adapter)
2633 {
2634 	struct e1000_hw *hw = &adapter->hw;
2635 	struct ifnet *ifp = adapter->ifp;
2636 	device_t dev = adapter->dev;
2637 	u32 link_check = 0;
2638 
2639 	/* Get the cached link value or read phy for real */
2640 	switch (hw->phy.media_type) {
2641 	case e1000_media_type_copper:
2642 		if (hw->mac.get_link_status) {
2643 			/* Do the work to read phy */
2644 			e1000_check_for_link(hw);
2645 			link_check = !hw->mac.get_link_status;
2646 			if (link_check) /* ESB2 fix */
2647 				e1000_cfg_on_link_up(hw);
2648 		} else
2649 			link_check = TRUE;
2650 		break;
2651 	case e1000_media_type_fiber:
2652 		e1000_check_for_link(hw);
2653 		link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2654                                  E1000_STATUS_LU);
2655 		break;
2656 	case e1000_media_type_internal_serdes:
2657 		e1000_check_for_link(hw);
2658 		link_check = adapter->hw.mac.serdes_has_link;
2659 		break;
2660 	default:
2661 	case e1000_media_type_unknown:
2662 		break;
2663 	}
2664 
2665 	/* Now check for a transition */
2666 	if (link_check && (adapter->link_active == 0)) {
2667 		e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2668 		    &adapter->link_duplex);
2669 		/* Check if we must disable SPEED_MODE bit on PCI-E */
2670 		if ((adapter->link_speed != SPEED_1000) &&
2671 		    ((hw->mac.type == e1000_82571) ||
2672 		    (hw->mac.type == e1000_82572))) {
2673 			int tarc0;
2674 			tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
2675 			tarc0 &= ~SPEED_MODE_BIT;
2676 			E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
2677 		}
2678 		if (bootverbose)
2679 			device_printf(dev, "Link is up %d Mbps %s\n",
2680 			    adapter->link_speed,
2681 			    ((adapter->link_duplex == FULL_DUPLEX) ?
2682 			    "Full Duplex" : "Half Duplex"));
2683 		adapter->link_active = 1;
2684 		adapter->smartspeed = 0;
2685 		ifp->if_baudrate = adapter->link_speed * 1000000;
2686 		if_link_state_change(ifp, LINK_STATE_UP);
2687 	} else if (!link_check && (adapter->link_active == 1)) {
2688 		ifp->if_baudrate = adapter->link_speed = 0;
2689 		adapter->link_duplex = 0;
2690 		if (bootverbose)
2691 			device_printf(dev, "Link is Down\n");
2692 		adapter->link_active = 0;
2693 		/* Link down, disable watchdog */
2694 		adapter->watchdog_timer = FALSE;
2695 		if_link_state_change(ifp, LINK_STATE_DOWN);
2696 	}
2697 }
2698 
2699 /*********************************************************************
2700  *
2701  *  This routine disables all traffic on the adapter by issuing a
2702  *  global reset on the MAC and deallocates TX/RX buffers.
2703  *
2704  *  This routine should always be called with BOTH the CORE
2705  *  and TX locks.
2706  **********************************************************************/
2707 
2708 static void
2709 em_stop(void *arg)
2710 {
2711 	struct adapter	*adapter = arg;
2712 	struct ifnet	*ifp = adapter->ifp;
2713 
2714 	EM_CORE_LOCK_ASSERT(adapter);
2715 	EM_TX_LOCK_ASSERT(adapter);
2716 
2717 	INIT_DEBUGOUT("em_stop: begin");
2718 
2719 	em_disable_intr(adapter);
2720 	callout_stop(&adapter->timer);
2721 	callout_stop(&adapter->tx_fifo_timer);
2722 
2723 	/* Tell the stack that the interface is no longer active */
2724 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2725 
2726 #ifdef EM_TIMESYNC
2727 	/* Disable IEEE 1588 Time hardware */
2728 	if ((adapter->hw.mac.type == e1000_82574) ||
2729 	    (adapter->hw.mac.type == e1000_ich10lan))
2730 		em_tsync_disable(adapter);
2731 #endif
2732 
2733 	e1000_reset_hw(&adapter->hw);
2734 	if (adapter->hw.mac.type >= e1000_82544)
2735 		E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2736 }
2737 
2738 
2739 /*********************************************************************
2740  *
2741  *  Determine hardware revision.
2742  *
2743  **********************************************************************/
2744 static void
2745 em_identify_hardware(struct adapter *adapter)
2746 {
2747 	device_t dev = adapter->dev;
2748 
2749 	/* Make sure our PCI config space has the necessary stuff set */
2750 	adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2751 	if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2752 	    (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2753 		device_printf(dev, "Memory Access and/or Bus Master bits "
2754 		    "were not set!\n");
2755 		adapter->hw.bus.pci_cmd_word |=
2756 		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2757 		pci_write_config(dev, PCIR_COMMAND,
2758 		    adapter->hw.bus.pci_cmd_word, 2);
2759 	}
2760 
2761 	/* Save off the information about this board */
2762 	adapter->hw.vendor_id = pci_get_vendor(dev);
2763 	adapter->hw.device_id = pci_get_device(dev);
2764 	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2765 	adapter->hw.subsystem_vendor_id =
2766 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
2767 	adapter->hw.subsystem_device_id =
2768 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
2769 
2770 	/* Do Shared Code Init and Setup */
2771 	if (e1000_set_mac_type(&adapter->hw)) {
2772 		device_printf(dev, "Setup init failure\n");
2773 		return;
2774 	}
2775 }
2776 
2777 static int
2778 em_allocate_pci_resources(struct adapter *adapter)
2779 {
2780 	device_t	dev = adapter->dev;
2781 	int		val, rid, error = E1000_SUCCESS;
2782 
2783 	rid = PCIR_BAR(0);
2784 	adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2785 	    &rid, RF_ACTIVE);
2786 	if (adapter->memory == NULL) {
2787 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2788 		return (ENXIO);
2789 	}
2790 	adapter->osdep.mem_bus_space_tag =
2791 	    rman_get_bustag(adapter->memory);
2792 	adapter->osdep.mem_bus_space_handle =
2793 	    rman_get_bushandle(adapter->memory);
2794 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2795 
2796 	/* Only older adapters use IO mapping */
2797 	if ((adapter->hw.mac.type > e1000_82543) &&
2798 	    (adapter->hw.mac.type < e1000_82571)) {
2799 		/* Figure our where our IO BAR is ? */
2800 		for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2801 			val = pci_read_config(dev, rid, 4);
2802 			if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2803 				adapter->io_rid = rid;
2804 				break;
2805 			}
2806 			rid += 4;
2807 			/* check for 64bit BAR */
2808 			if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2809 				rid += 4;
2810 		}
2811 		if (rid >= PCIR_CIS) {
2812 			device_printf(dev, "Unable to locate IO BAR\n");
2813 			return (ENXIO);
2814 		}
2815 		adapter->ioport = bus_alloc_resource_any(dev,
2816 		    SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2817 		if (adapter->ioport == NULL) {
2818 			device_printf(dev, "Unable to allocate bus resource: "
2819 			    "ioport\n");
2820 			return (ENXIO);
2821 		}
2822 		adapter->hw.io_base = 0;
2823 		adapter->osdep.io_bus_space_tag =
2824 		    rman_get_bustag(adapter->ioport);
2825 		adapter->osdep.io_bus_space_handle =
2826 		    rman_get_bushandle(adapter->ioport);
2827 	}
2828 
2829 	/*
2830 	** Init the resource arrays
2831 	**  used by MSIX setup
2832 	*/
2833 	for (int i = 0; i < 3; i++) {
2834 		adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */
2835 		adapter->tag[i] = NULL;
2836 		adapter->res[i] = NULL;
2837 	}
2838 
2839 	/*
2840 	 * Setup MSI/X or MSI if PCI Express
2841 	 */
2842 	if (em_enable_msi)
2843 		adapter->msi = em_setup_msix(adapter);
2844 
2845 	adapter->hw.back = &adapter->osdep;
2846 
2847 	return (error);
2848 }
2849 
2850 /*********************************************************************
2851  *
2852  *  Setup the Legacy or MSI Interrupt handler
2853  *
2854  **********************************************************************/
2855 int
2856 em_allocate_legacy(struct adapter *adapter)
2857 {
2858 	device_t dev = adapter->dev;
2859 	int error;
2860 
2861 	/* Manually turn off all interrupts */
2862 	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2863 
2864 	/* Legacy RID is 0 */
2865 	if (adapter->msi == 0)
2866 		adapter->rid[0] = 0;
2867 
2868 	/* We allocate a single interrupt resource */
2869 	adapter->res[0] = bus_alloc_resource_any(dev,
2870 	    SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
2871 	if (adapter->res[0] == NULL) {
2872 		device_printf(dev, "Unable to allocate bus resource: "
2873 		    "interrupt\n");
2874 		return (ENXIO);
2875 	}
2876 
2877 #ifdef EM_LEGACY_IRQ
2878 	/* We do Legacy setup */
2879 	if ((error = bus_setup_intr(dev, adapter->res[0],
2880 #if __FreeBSD_version > 700000
2881 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_intr, adapter,
2882 #else /* 6.X */
2883 	    INTR_TYPE_NET | INTR_MPSAFE, em_intr, adapter,
2884 #endif
2885 	    &adapter->tag[0])) != 0) {
2886 		device_printf(dev, "Failed to register interrupt handler");
2887 		return (error);
2888 	}
2889 
2890 #else /* FAST_IRQ */
2891 	/*
2892 	 * Try allocating a fast interrupt and the associated deferred
2893 	 * processing contexts.
2894 	 */
2895 	TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
2896 	TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2897 	adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2898 	    taskqueue_thread_enqueue, &adapter->tq);
2899 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2900 	    device_get_nameunit(adapter->dev));
2901 #if __FreeBSD_version < 700000
2902 	if ((error = bus_setup_intr(dev, adapter->res[0],
2903 	    INTR_TYPE_NET | INTR_FAST, em_irq_fast, adapter,
2904 #else
2905 	if ((error = bus_setup_intr(dev, adapter->res[0],
2906 	    INTR_TYPE_NET, em_irq_fast, NULL, adapter,
2907 #endif
2908 	    &adapter->tag[0])) != 0) {
2909 		device_printf(dev, "Failed to register fast interrupt "
2910 			    "handler: %d\n", error);
2911 		taskqueue_free(adapter->tq);
2912 		adapter->tq = NULL;
2913 		return (error);
2914 	}
2915 #endif  /* EM_LEGACY_IRQ */
2916 
2917 	return (0);
2918 }
2919 
2920 /*********************************************************************
2921  *
2922  *  Setup the MSIX Interrupt handlers
2923  *   This is not really Multiqueue, rather
2924  *   its just multiple interrupt vectors.
2925  *
2926  **********************************************************************/
2927 int
2928 em_allocate_msix(struct adapter *adapter)
2929 {
2930 	device_t dev = adapter->dev;
2931 	int error;
2932 
2933 	/* Make sure all interrupts are disabled */
2934 	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2935 
2936 	/* First get the resources */
2937 	for (int i = 0; i < adapter->msi; i++) {
2938 		adapter->res[i] = bus_alloc_resource_any(dev,
2939 		    SYS_RES_IRQ, &adapter->rid[i], RF_ACTIVE);
2940 		if (adapter->res[i] == NULL) {
2941 			device_printf(dev,
2942 			    "Unable to allocate bus resource: "
2943 			    "MSIX Interrupt\n");
2944 			return (ENXIO);
2945 		}
2946 	}
2947 
2948 	/*
2949 	 * Now allocate deferred processing contexts.
2950 	 */
2951 	TASK_INIT(&adapter->rx_task, 0, em_handle_rx, adapter);
2952 	TASK_INIT(&adapter->tx_task, 0, em_handle_tx, adapter);
2953 	/*
2954 	 * Handle compatibility for msi case for deferral due to
2955 	 * trylock failure
2956 	 */
2957 	TASK_INIT(&adapter->rxtx_task, 0, em_handle_tx, adapter);
2958 	TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2959 	adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2960 	    taskqueue_thread_enqueue, &adapter->tq);
2961 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2962 	    device_get_nameunit(adapter->dev));
2963 
2964 	/*
2965 	 * And setup the interrupt handlers
2966 	 */
2967 
2968 	/* First slot to RX */
2969 	if ((error = bus_setup_intr(dev, adapter->res[0],
2970 #if __FreeBSD_version > 700000
2971 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx, adapter,
2972 #else /* 6.X */
2973 	    INTR_TYPE_NET | INTR_MPSAFE, em_msix_rx, adapter,
2974 #endif
2975 	    &adapter->tag[0])) != 0) {
2976 		device_printf(dev, "Failed to register RX handler");
2977 		return (error);
2978 	}
2979 
2980 	/* Next TX */
2981 	if ((error = bus_setup_intr(dev, adapter->res[1],
2982 #if __FreeBSD_version > 700000
2983 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx, adapter,
2984 #else /* 6.X */
2985 	    INTR_TYPE_NET | INTR_MPSAFE, em_msix_tx, adapter,
2986 #endif
2987 	    &adapter->tag[1])) != 0) {
2988 		device_printf(dev, "Failed to register TX handler");
2989 		return (error);
2990 	}
2991 
2992 	/* And Link */
2993 	if ((error = bus_setup_intr(dev, adapter->res[2],
2994 #if __FreeBSD_version > 700000
2995 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_link, adapter,
2996 #else /* 6.X */
2997 	    INTR_TYPE_NET | INTR_MPSAFE, em_msix_link, adapter,
2998 #endif
2999 	    &adapter->tag[2])) != 0) {
3000 		device_printf(dev, "Failed to register TX handler");
3001 		return (error);
3002 	}
3003 
3004 	return (0);
3005 }
3006 
3007 
3008 static void
3009 em_free_pci_resources(struct adapter *adapter)
3010 {
3011 	device_t dev = adapter->dev;
3012 
3013 	/* Make sure the for loop below runs once */
3014 	if (adapter->msi == 0)
3015 		adapter->msi = 1;
3016 
3017 	/*
3018 	 * First release all the interrupt resources:
3019 	 *      notice that since these are just kept
3020 	 *      in an array we can do the same logic
3021 	 *      whether its MSIX or just legacy.
3022 	 */
3023 	for (int i = 0; i < adapter->msi; i++) {
3024 		if (adapter->tag[i] != NULL) {
3025 			bus_teardown_intr(dev, adapter->res[i],
3026 			    adapter->tag[i]);
3027 			adapter->tag[i] = NULL;
3028 		}
3029 		if (adapter->res[i] != NULL) {
3030 			bus_release_resource(dev, SYS_RES_IRQ,
3031 			    adapter->rid[i], adapter->res[i]);
3032 		}
3033 	}
3034 
3035 	if (adapter->msi)
3036 		pci_release_msi(dev);
3037 
3038 	if (adapter->msix != NULL)
3039 		bus_release_resource(dev, SYS_RES_MEMORY,
3040 		    PCIR_BAR(EM_MSIX_BAR), adapter->msix);
3041 
3042 	if (adapter->memory != NULL)
3043 		bus_release_resource(dev, SYS_RES_MEMORY,
3044 		    PCIR_BAR(0), adapter->memory);
3045 
3046 	if (adapter->flash != NULL)
3047 		bus_release_resource(dev, SYS_RES_MEMORY,
3048 		    EM_FLASH, adapter->flash);
3049 
3050 	if (adapter->ioport != NULL)
3051 		bus_release_resource(dev, SYS_RES_IOPORT,
3052 		    adapter->io_rid, adapter->ioport);
3053 }
3054 
3055 /*
3056  * Setup MSI or MSI/X
3057  */
3058 static int
3059 em_setup_msix(struct adapter *adapter)
3060 {
3061 	device_t dev = adapter->dev;
3062 	int val = 0;
3063 
3064 	if (adapter->hw.mac.type < e1000_82571)
3065 		return (0);
3066 
3067 	/* Setup MSI/X for Hartwell */
3068 	if (adapter->hw.mac.type == e1000_82574) {
3069 		/* Map the MSIX BAR */
3070 		int rid = PCIR_BAR(EM_MSIX_BAR);
3071 		adapter->msix = bus_alloc_resource_any(dev,
3072 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
3073        		if (!adapter->msix) {
3074 			/* May not be enabled */
3075                		device_printf(adapter->dev,
3076 			    "Unable to map MSIX table \n");
3077 			goto msi;
3078        		}
3079 		val = pci_msix_count(dev);
3080 		/*
3081 		** 82574 can be configured for 5 but
3082 		** we limit use to 3.
3083 		*/
3084 		if (val > 3) val = 3;
3085 		if ((val) && pci_alloc_msix(dev, &val) == 0) {
3086                		device_printf(adapter->dev,"Using MSIX interrupts\n");
3087 			return (val);
3088 		}
3089 	}
3090 msi:
3091        	val = pci_msi_count(dev);
3092        	if (val == 1 && pci_alloc_msi(dev, &val) == 0) {
3093                	adapter->msi = 1;
3094                	device_printf(adapter->dev,"Using MSI interrupt\n");
3095 		return (val);
3096 	}
3097 	return (0);
3098 }
3099 
3100 /*********************************************************************
3101  *
3102  *  Initialize the hardware to a configuration
3103  *  as specified by the adapter structure.
3104  *
3105  **********************************************************************/
3106 static int
3107 em_hardware_init(struct adapter *adapter)
3108 {
3109 	device_t dev = adapter->dev;
3110 	u16 	rx_buffer_size;
3111 
3112 	INIT_DEBUGOUT("em_hardware_init: begin");
3113 
3114 	/* Issue a global reset */
3115 	e1000_reset_hw(&adapter->hw);
3116 
3117 	/* Get control from any management/hw control */
3118 	if (((adapter->hw.mac.type == e1000_82573) ||
3119 	    (adapter->hw.mac.type == e1000_ich8lan) ||
3120 	    (adapter->hw.mac.type == e1000_ich10lan) ||
3121 	    (adapter->hw.mac.type == e1000_ich9lan)) &&
3122 	    e1000_check_mng_mode(&adapter->hw))
3123 		em_get_hw_control(adapter);
3124 
3125 	/* When hardware is reset, fifo_head is also reset */
3126 	adapter->tx_fifo_head = 0;
3127 
3128 	/* Set up smart power down as default off on newer adapters. */
3129 	if (!em_smart_pwr_down && (adapter->hw.mac.type == e1000_82571 ||
3130 	    adapter->hw.mac.type == e1000_82572)) {
3131 		u16 phy_tmp = 0;
3132 
3133 		/* Speed up time to link by disabling smart power down. */
3134 		e1000_read_phy_reg(&adapter->hw,
3135 		    IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
3136 		phy_tmp &= ~IGP02E1000_PM_SPD;
3137 		e1000_write_phy_reg(&adapter->hw,
3138 		    IGP02E1000_PHY_POWER_MGMT, phy_tmp);
3139 	}
3140 
3141 	/*
3142 	 * These parameters control the automatic generation (Tx) and
3143 	 * response (Rx) to Ethernet PAUSE frames.
3144 	 * - High water mark should allow for at least two frames to be
3145 	 *   received after sending an XOFF.
3146 	 * - Low water mark works best when it is very near the high water mark.
3147 	 *   This allows the receiver to restart by sending XON when it has
3148 	 *   drained a bit. Here we use an arbitary value of 1500 which will
3149 	 *   restart after one full frame is pulled from the buffer. There
3150 	 *   could be several smaller frames in the buffer and if so they will
3151 	 *   not trigger the XON until their total number reduces the buffer
3152 	 *   by 1500.
3153 	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
3154 	 */
3155 	rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
3156 	    0xffff) << 10 );
3157 
3158 	adapter->hw.fc.high_water = rx_buffer_size -
3159 	    roundup2(adapter->max_frame_size, 1024);
3160 	adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
3161 
3162 	if (adapter->hw.mac.type == e1000_80003es2lan)
3163 		adapter->hw.fc.pause_time = 0xFFFF;
3164 	else
3165 		adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
3166 	adapter->hw.fc.send_xon = TRUE;
3167 	adapter->hw.fc.requested_mode = e1000_fc_full;
3168 
3169 	if (e1000_init_hw(&adapter->hw) < 0) {
3170 		device_printf(dev, "Hardware Initialization Failed\n");
3171 		return (EIO);
3172 	}
3173 
3174 	e1000_check_for_link(&adapter->hw);
3175 
3176 	return (0);
3177 }
3178 
3179 /*********************************************************************
3180  *
3181  *  Setup networking device structure and register an interface.
3182  *
3183  **********************************************************************/
3184 static void
3185 em_setup_interface(device_t dev, struct adapter *adapter)
3186 {
3187 	struct ifnet   *ifp;
3188 
3189 	INIT_DEBUGOUT("em_setup_interface: begin");
3190 
3191 	ifp = adapter->ifp = if_alloc(IFT_ETHER);
3192 	if (ifp == NULL)
3193 		panic("%s: can not if_alloc()", device_get_nameunit(dev));
3194 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3195 	ifp->if_mtu = ETHERMTU;
3196 	ifp->if_init =  em_init;
3197 	ifp->if_softc = adapter;
3198 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3199 	ifp->if_ioctl = em_ioctl;
3200 	ifp->if_start = em_start;
3201 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
3202 	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
3203 	IFQ_SET_READY(&ifp->if_snd);
3204 
3205 	ether_ifattach(ifp, adapter->hw.mac.addr);
3206 
3207 	ifp->if_capabilities = ifp->if_capenable = 0;
3208 
3209 #ifdef IFNET_BUF_RING
3210 	ifp->if_transmit = em_transmit;
3211 	ifp->if_qflush = em_qflush;
3212 	adapter->br = buf_ring_alloc(2048, M_DEVBUF, M_WAITOK, &adapter->tx_mtx);
3213 #endif
3214 	if (adapter->hw.mac.type >= e1000_82543) {
3215 		int version_cap;
3216 #if __FreeBSD_version < 700000
3217 		version_cap = IFCAP_HWCSUM;
3218 #else
3219 		version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
3220 #endif
3221 		ifp->if_capabilities |= version_cap;
3222 		ifp->if_capenable |= version_cap;
3223 	}
3224 
3225 #if __FreeBSD_version >= 700000
3226 	/* Identify TSO capable adapters */
3227 	if ((adapter->hw.mac.type > e1000_82544) &&
3228 	    (adapter->hw.mac.type != e1000_82547))
3229 		ifp->if_capabilities |= IFCAP_TSO4;
3230 	/*
3231 	 * By default only enable on PCI-E, this
3232 	 * can be overriden by ifconfig.
3233 	 */
3234 	if (adapter->hw.mac.type >= e1000_82571)
3235 		ifp->if_capenable |= IFCAP_TSO4;
3236 #endif
3237 
3238 	/*
3239 	 * Tell the upper layer(s) we support long frames.
3240 	 */
3241 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3242 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
3243 	ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
3244 
3245 #ifdef DEVICE_POLLING
3246 	ifp->if_capabilities |= IFCAP_POLLING;
3247 #endif
3248 
3249 	/*
3250 	 * Specify the media types supported by this adapter and register
3251 	 * callbacks to update media and link information
3252 	 */
3253 	ifmedia_init(&adapter->media, IFM_IMASK,
3254 	    em_media_change, em_media_status);
3255 	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3256 	    (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
3257 		u_char fiber_type = IFM_1000_SX;	/* default type */
3258 
3259 		if (adapter->hw.mac.type == e1000_82545)
3260 			fiber_type = IFM_1000_LX;
3261 		ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
3262 			    0, NULL);
3263 		ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
3264 	} else {
3265 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
3266 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
3267 			    0, NULL);
3268 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
3269 			    0, NULL);
3270 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
3271 			    0, NULL);
3272 		if (adapter->hw.phy.type != e1000_phy_ife) {
3273 			ifmedia_add(&adapter->media,
3274 				IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
3275 			ifmedia_add(&adapter->media,
3276 				IFM_ETHER | IFM_1000_T, 0, NULL);
3277 		}
3278 	}
3279 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3280 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3281 }
3282 
3283 
3284 /*********************************************************************
3285  *
3286  *  Workaround for SmartSpeed on 82541 and 82547 controllers
3287  *
3288  **********************************************************************/
3289 static void
3290 em_smartspeed(struct adapter *adapter)
3291 {
3292 	u16 phy_tmp;
3293 
3294 	if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
3295 	    adapter->hw.mac.autoneg == 0 ||
3296 	    (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
3297 		return;
3298 
3299 	if (adapter->smartspeed == 0) {
3300 		/* If Master/Slave config fault is asserted twice,
3301 		 * we assume back-to-back */
3302 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3303 		if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
3304 			return;
3305 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3306 		if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
3307 			e1000_read_phy_reg(&adapter->hw,
3308 			    PHY_1000T_CTRL, &phy_tmp);
3309 			if(phy_tmp & CR_1000T_MS_ENABLE) {
3310 				phy_tmp &= ~CR_1000T_MS_ENABLE;
3311 				e1000_write_phy_reg(&adapter->hw,
3312 				    PHY_1000T_CTRL, phy_tmp);
3313 				adapter->smartspeed++;
3314 				if(adapter->hw.mac.autoneg &&
3315 				   !e1000_phy_setup_autoneg(&adapter->hw) &&
3316 				   !e1000_read_phy_reg(&adapter->hw,
3317 				    PHY_CONTROL, &phy_tmp)) {
3318 					phy_tmp |= (MII_CR_AUTO_NEG_EN |
3319 						    MII_CR_RESTART_AUTO_NEG);
3320 					e1000_write_phy_reg(&adapter->hw,
3321 					    PHY_CONTROL, phy_tmp);
3322 				}
3323 			}
3324 		}
3325 		return;
3326 	} else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
3327 		/* If still no link, perhaps using 2/3 pair cable */
3328 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
3329 		phy_tmp |= CR_1000T_MS_ENABLE;
3330 		e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
3331 		if(adapter->hw.mac.autoneg &&
3332 		   !e1000_phy_setup_autoneg(&adapter->hw) &&
3333 		   !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
3334 			phy_tmp |= (MII_CR_AUTO_NEG_EN |
3335 				    MII_CR_RESTART_AUTO_NEG);
3336 			e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
3337 		}
3338 	}
3339 	/* Restart process after EM_SMARTSPEED_MAX iterations */
3340 	if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
3341 		adapter->smartspeed = 0;
3342 }
3343 
3344 
3345 /*
3346  * Manage DMA'able memory.
3347  */
3348 static void
3349 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3350 {
3351 	if (error)
3352 		return;
3353 	*(bus_addr_t *) arg = segs[0].ds_addr;
3354 }
3355 
3356 static int
3357 em_dma_malloc(struct adapter *adapter, bus_size_t size,
3358         struct em_dma_alloc *dma, int mapflags)
3359 {
3360 	int error;
3361 
3362 #if __FreeBSD_version >= 700000
3363 	error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
3364 #else
3365 	error = bus_dma_tag_create(NULL,		 /* parent */
3366 #endif
3367 				EM_DBA_ALIGN, 0,	/* alignment, bounds */
3368 				BUS_SPACE_MAXADDR,	/* lowaddr */
3369 				BUS_SPACE_MAXADDR,	/* highaddr */
3370 				NULL, NULL,		/* filter, filterarg */
3371 				size,			/* maxsize */
3372 				1,			/* nsegments */
3373 				size,			/* maxsegsize */
3374 				0,			/* flags */
3375 				NULL,			/* lockfunc */
3376 				NULL,			/* lockarg */
3377 				&dma->dma_tag);
3378 	if (error) {
3379 		device_printf(adapter->dev,
3380 		    "%s: bus_dma_tag_create failed: %d\n",
3381 		    __func__, error);
3382 		goto fail_0;
3383 	}
3384 
3385 	error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
3386 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
3387 	if (error) {
3388 		device_printf(adapter->dev,
3389 		    "%s: bus_dmamem_alloc(%ju) failed: %d\n",
3390 		    __func__, (uintmax_t)size, error);
3391 		goto fail_2;
3392 	}
3393 
3394 	dma->dma_paddr = 0;
3395 	error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3396 	    size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
3397 	if (error || dma->dma_paddr == 0) {
3398 		device_printf(adapter->dev,
3399 		    "%s: bus_dmamap_load failed: %d\n",
3400 		    __func__, error);
3401 		goto fail_3;
3402 	}
3403 
3404 	return (0);
3405 
3406 fail_3:
3407 	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3408 fail_2:
3409 	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3410 	bus_dma_tag_destroy(dma->dma_tag);
3411 fail_0:
3412 	dma->dma_map = NULL;
3413 	dma->dma_tag = NULL;
3414 
3415 	return (error);
3416 }
3417 
3418 static void
3419 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
3420 {
3421 	if (dma->dma_tag == NULL)
3422 		return;
3423 	if (dma->dma_map != NULL) {
3424 		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3425 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3426 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3427 		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3428 		dma->dma_map = NULL;
3429 	}
3430 	bus_dma_tag_destroy(dma->dma_tag);
3431 	dma->dma_tag = NULL;
3432 }
3433 
3434 
3435 /*********************************************************************
3436  *
3437  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
3438  *  the information needed to transmit a packet on the wire.
3439  *
3440  **********************************************************************/
3441 static int
3442 em_allocate_transmit_structures(struct adapter *adapter)
3443 {
3444 	device_t dev = adapter->dev;
3445 	struct em_buffer *tx_buffer;
3446 	int error;
3447 
3448 	/*
3449 	 * Create DMA tags for tx descriptors
3450 	 */
3451 #if __FreeBSD_version >= 700000
3452 	if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3453 #else
3454 	if ((error = bus_dma_tag_create(NULL,		 /* parent */
3455 #endif
3456 				1, 0,			/* alignment, bounds */
3457 				BUS_SPACE_MAXADDR,	/* lowaddr */
3458 				BUS_SPACE_MAXADDR,	/* highaddr */
3459 				NULL, NULL,		/* filter, filterarg */
3460 				EM_TSO_SIZE,		/* maxsize */
3461 				EM_MAX_SCATTER,		/* nsegments */
3462 				EM_TSO_SEG_SIZE,	/* maxsegsize */
3463 				0,			/* flags */
3464 				NULL,		/* lockfunc */
3465 				NULL,		/* lockarg */
3466 				&adapter->txtag)) != 0) {
3467 		device_printf(dev, "Unable to allocate TX DMA tag\n");
3468 		goto fail;
3469 	}
3470 
3471 	adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
3472 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3473 	if (adapter->tx_buffer_area == NULL) {
3474 		device_printf(dev, "Unable to allocate tx_buffer memory\n");
3475 		error = ENOMEM;
3476 		goto fail;
3477 	}
3478 
3479 	/* Create the descriptor buffer dma maps */
3480 	for (int i = 0; i < adapter->num_tx_desc; i++) {
3481 		tx_buffer = &adapter->tx_buffer_area[i];
3482 		error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
3483 		if (error != 0) {
3484 			device_printf(dev, "Unable to create TX DMA map\n");
3485 			goto fail;
3486 		}
3487 		tx_buffer->next_eop = -1;
3488 	}
3489 
3490 	return (0);
3491 fail:
3492 	em_free_transmit_structures(adapter);
3493 	return (error);
3494 }
3495 
3496 /*********************************************************************
3497  *
3498  *  (Re)Initialize transmit structures.
3499  *
3500  **********************************************************************/
3501 static void
3502 em_setup_transmit_structures(struct adapter *adapter)
3503 {
3504 	struct em_buffer *tx_buffer;
3505 
3506 	/* Clear the old ring contents */
3507 	bzero(adapter->tx_desc_base,
3508 	    (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
3509 
3510 	/* Free any existing TX buffers */
3511 	for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
3512 		tx_buffer = &adapter->tx_buffer_area[i];
3513 		bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3514 		    BUS_DMASYNC_POSTWRITE);
3515 		bus_dmamap_unload(adapter->txtag, tx_buffer->map);
3516 		m_freem(tx_buffer->m_head);
3517 		tx_buffer->m_head = NULL;
3518 		tx_buffer->next_eop = -1;
3519 	}
3520 
3521 	/* Reset state */
3522 	adapter->next_avail_tx_desc = 0;
3523 	adapter->next_tx_to_clean = 0;
3524 	adapter->num_tx_desc_avail = adapter->num_tx_desc;
3525 
3526 	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3527 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3528 
3529 	return;
3530 }
3531 
3532 /*********************************************************************
3533  *
3534  *  Enable transmit unit.
3535  *
3536  **********************************************************************/
3537 static void
3538 em_initialize_transmit_unit(struct adapter *adapter)
3539 {
3540 	u32	tctl, tarc, tipg = 0;
3541 	u64	bus_addr;
3542 
3543 	 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
3544 	/* Setup the Base and Length of the Tx Descriptor Ring */
3545 	bus_addr = adapter->txdma.dma_paddr;
3546 	E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
3547 	    adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
3548 	E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
3549 	    (u32)(bus_addr >> 32));
3550 	E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
3551 	    (u32)bus_addr);
3552 	/* Setup the HW Tx Head and Tail descriptor pointers */
3553 	E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
3554 	E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
3555 
3556 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
3557 	    E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
3558 	    E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
3559 
3560 	/* Set the default values for the Tx Inter Packet Gap timer */
3561 	switch (adapter->hw.mac.type) {
3562 	case e1000_82542:
3563 		tipg = DEFAULT_82542_TIPG_IPGT;
3564 		tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3565 		tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3566 		break;
3567 	case e1000_80003es2lan:
3568 		tipg = DEFAULT_82543_TIPG_IPGR1;
3569 		tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
3570 		    E1000_TIPG_IPGR2_SHIFT;
3571 		break;
3572 	default:
3573 		if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3574 		    (adapter->hw.phy.media_type ==
3575 		    e1000_media_type_internal_serdes))
3576 			tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3577 		else
3578 			tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3579 		tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3580 		tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3581 	}
3582 
3583 	E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
3584 	E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
3585 	if(adapter->hw.mac.type >= e1000_82540)
3586 		E1000_WRITE_REG(&adapter->hw, E1000_TADV,
3587 		    adapter->tx_abs_int_delay.value);
3588 
3589 	if ((adapter->hw.mac.type == e1000_82571) ||
3590 	    (adapter->hw.mac.type == e1000_82572)) {
3591 		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3592 		tarc |= SPEED_MODE_BIT;
3593 		E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3594 	} else if (adapter->hw.mac.type == e1000_80003es2lan) {
3595 		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3596 		tarc |= 1;
3597 		E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3598 		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
3599 		tarc |= 1;
3600 		E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
3601 	}
3602 
3603 	/* Program the Transmit Control Register */
3604 	tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
3605 	tctl &= ~E1000_TCTL_CT;
3606 	tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3607 		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
3608 
3609 	if (adapter->hw.mac.type >= e1000_82571)
3610 		tctl |= E1000_TCTL_MULR;
3611 
3612 	/* This write will effectively turn on the transmit unit. */
3613 	E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
3614 
3615 	/* Setup Transmit Descriptor Base Settings */
3616 	adapter->txd_cmd = E1000_TXD_CMD_IFCS;
3617 
3618 	if (adapter->tx_int_delay.value > 0)
3619 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3620 }
3621 
3622 /*********************************************************************
3623  *
3624  *  Free all transmit related data structures.
3625  *
3626  **********************************************************************/
3627 static void
3628 em_free_transmit_structures(struct adapter *adapter)
3629 {
3630 	struct em_buffer *tx_buffer;
3631 
3632 	INIT_DEBUGOUT("free_transmit_structures: begin");
3633 
3634 	if (adapter->tx_buffer_area != NULL) {
3635 		for (int i = 0; i < adapter->num_tx_desc; i++) {
3636 			tx_buffer = &adapter->tx_buffer_area[i];
3637 			if (tx_buffer->m_head != NULL) {
3638 				bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3639 				    BUS_DMASYNC_POSTWRITE);
3640 				bus_dmamap_unload(adapter->txtag,
3641 				    tx_buffer->map);
3642 				m_freem(tx_buffer->m_head);
3643 				tx_buffer->m_head = NULL;
3644 			} else if (tx_buffer->map != NULL)
3645 				bus_dmamap_unload(adapter->txtag,
3646 				    tx_buffer->map);
3647 			if (tx_buffer->map != NULL) {
3648 				bus_dmamap_destroy(adapter->txtag,
3649 				    tx_buffer->map);
3650 				tx_buffer->map = NULL;
3651 			}
3652 		}
3653 	}
3654 	if (adapter->tx_buffer_area != NULL) {
3655 		free(adapter->tx_buffer_area, M_DEVBUF);
3656 		adapter->tx_buffer_area = NULL;
3657 	}
3658 	if (adapter->txtag != NULL) {
3659 		bus_dma_tag_destroy(adapter->txtag);
3660 		adapter->txtag = NULL;
3661 	}
3662 }
3663 
3664 /*********************************************************************
3665  *
3666  *  The offload context needs to be set when we transfer the first
3667  *  packet of a particular protocol (TCP/UDP). This routine has been
3668  *  enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
3669  *
3670  **********************************************************************/
3671 static void
3672 em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
3673     u32 *txd_upper, u32 *txd_lower)
3674 {
3675 	struct e1000_context_desc *TXD;
3676 	struct em_buffer *tx_buffer;
3677 	struct ether_vlan_header *eh;
3678 	struct ip *ip = NULL;
3679 	struct ip6_hdr *ip6;
3680 	struct tcp_hdr *th;
3681 	int curr_txd, ehdrlen;
3682 	u32 cmd, hdr_len, ip_hlen;
3683 	u16 etype;
3684 	u8 ipproto;
3685 
3686 	cmd = hdr_len = ipproto = 0;
3687 	/* Setup checksum offload context. */
3688 	curr_txd = adapter->next_avail_tx_desc;
3689 	tx_buffer = &adapter->tx_buffer_area[curr_txd];
3690 	TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3691 
3692 	/*
3693 	 * Determine where frame payload starts.
3694 	 * Jump over vlan headers if already present,
3695 	 * helpful for QinQ too.
3696 	 */
3697 	eh = mtod(mp, struct ether_vlan_header *);
3698 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3699 		etype = ntohs(eh->evl_proto);
3700 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3701 	} else {
3702 		etype = ntohs(eh->evl_encap_proto);
3703 		ehdrlen = ETHER_HDR_LEN;
3704 	}
3705 
3706 	/*
3707 	 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
3708 	 * TODO: Support SCTP too when it hits the tree.
3709 	 */
3710 	switch (etype) {
3711 	case ETHERTYPE_IP:
3712 		ip = (struct ip *)(mp->m_data + ehdrlen);
3713 		ip_hlen = ip->ip_hl << 2;
3714 
3715 		/* Setup of IP header checksum. */
3716 		if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3717 			/*
3718 			 * Start offset for header checksum calculation.
3719 			 * End offset for header checksum calculation.
3720 			 * Offset of place to put the checksum.
3721 			 */
3722 			TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3723 			TXD->lower_setup.ip_fields.ipcse =
3724 			    htole16(ehdrlen + ip_hlen);
3725 			TXD->lower_setup.ip_fields.ipcso =
3726 			    ehdrlen + offsetof(struct ip, ip_sum);
3727 			cmd |= E1000_TXD_CMD_IP;
3728 			*txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3729 		}
3730 
3731 		if (mp->m_len < ehdrlen + ip_hlen)
3732 			return;	/* failure */
3733 
3734 		hdr_len = ehdrlen + ip_hlen;
3735 		ipproto = ip->ip_p;
3736 
3737 		break;
3738 	case ETHERTYPE_IPV6:
3739 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3740 		ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
3741 
3742 		if (mp->m_len < ehdrlen + ip_hlen)
3743 			return;	/* failure */
3744 
3745 		/* IPv6 doesn't have a header checksum. */
3746 
3747 		hdr_len = ehdrlen + ip_hlen;
3748 		ipproto = ip6->ip6_nxt;
3749 
3750 		break;
3751 #ifdef EM_TIMESYNC
3752 	case ETHERTYPE_IEEE1588:
3753 		*txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
3754 		break;
3755 #endif
3756 	default:
3757 		*txd_upper = 0;
3758 		*txd_lower = 0;
3759 		return;
3760 	}
3761 
3762 	switch (ipproto) {
3763 	case IPPROTO_TCP:
3764 		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3765 			/*
3766 			 * Start offset for payload checksum calculation.
3767 			 * End offset for payload checksum calculation.
3768 			 * Offset of place to put the checksum.
3769 			 */
3770 			th = (struct tcp_hdr *)(mp->m_data + hdr_len);
3771 			TXD->upper_setup.tcp_fields.tucss = hdr_len;
3772 			TXD->upper_setup.tcp_fields.tucse = htole16(0);
3773 			TXD->upper_setup.tcp_fields.tucso =
3774 			    hdr_len + offsetof(struct tcphdr, th_sum);
3775 			cmd |= E1000_TXD_CMD_TCP;
3776 			*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3777 		}
3778 		break;
3779 	case IPPROTO_UDP:
3780 	{
3781 #ifdef EM_TIMESYNC
3782 		void *hdr = (caddr_t) ip + ip_hlen;
3783 		struct udphdr *uh = (struct udphdr *)hdr;
3784 
3785 		if (uh->uh_dport == htons(TSYNC_PORT)) {
3786 			*txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
3787 			IOCTL_DEBUGOUT("@@@ Sending Event Packet\n");
3788 		}
3789 #endif
3790 		if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3791 			/*
3792 			 * Start offset for header checksum calculation.
3793 			 * End offset for header checksum calculation.
3794 			 * Offset of place to put the checksum.
3795 			 */
3796 			TXD->upper_setup.tcp_fields.tucss = hdr_len;
3797 			TXD->upper_setup.tcp_fields.tucse = htole16(0);
3798 			TXD->upper_setup.tcp_fields.tucso =
3799 			    hdr_len + offsetof(struct udphdr, uh_sum);
3800 			*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3801 		}
3802 		/* Fall Thru */
3803 	}
3804 	default:
3805 		break;
3806 	}
3807 
3808 #ifdef EM_TIMESYNC
3809 	/*
3810 	** We might be here just for TIMESYNC
3811 	** which means we don't need the context
3812 	** descriptor.
3813 	*/
3814 	if (!mp->m_pkthdr.csum_flags & CSUM_OFFLOAD)
3815 		return;
3816 #endif
3817 	*txd_lower = E1000_TXD_CMD_DEXT |	/* Extended descr type */
3818 		     E1000_TXD_DTYP_D;		/* Data descr */
3819 	TXD->tcp_seg_setup.data = htole32(0);
3820 	TXD->cmd_and_length =
3821 	    htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3822 	tx_buffer->m_head = NULL;
3823 	tx_buffer->next_eop = -1;
3824 
3825 	if (++curr_txd == adapter->num_tx_desc)
3826 		curr_txd = 0;
3827 
3828 	adapter->num_tx_desc_avail--;
3829 	adapter->next_avail_tx_desc = curr_txd;
3830 }
3831 
3832 
3833 #if __FreeBSD_version >= 700000
3834 /**********************************************************************
3835  *
3836  *  Setup work for hardware segmentation offload (TSO)
3837  *
3838  **********************************************************************/
3839 static bool
3840 em_tso_setup(struct adapter *adapter, struct mbuf *mp, u32 *txd_upper,
3841    u32 *txd_lower)
3842 {
3843 	struct e1000_context_desc *TXD;
3844 	struct em_buffer *tx_buffer;
3845 	struct ether_vlan_header *eh;
3846 	struct ip *ip;
3847 	struct ip6_hdr *ip6;
3848 	struct tcphdr *th;
3849 	int curr_txd, ehdrlen, hdr_len, ip_hlen, isip6;
3850 	u16 etype;
3851 
3852 	/*
3853 	 * This function could/should be extended to support IP/IPv6
3854 	 * fragmentation as well.  But as they say, one step at a time.
3855 	 */
3856 
3857 	/*
3858 	 * Determine where frame payload starts.
3859 	 * Jump over vlan headers if already present,
3860 	 * helpful for QinQ too.
3861 	 */
3862 	eh = mtod(mp, struct ether_vlan_header *);
3863 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3864 		etype = ntohs(eh->evl_proto);
3865 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3866 	} else {
3867 		etype = ntohs(eh->evl_encap_proto);
3868 		ehdrlen = ETHER_HDR_LEN;
3869 	}
3870 
3871 	/* Ensure we have at least the IP+TCP header in the first mbuf. */
3872 	if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3873 		return FALSE;	/* -1 */
3874 
3875 	/*
3876 	 * We only support TCP for IPv4 and IPv6 (notyet) for the moment.
3877 	 * TODO: Support SCTP too when it hits the tree.
3878 	 */
3879 	switch (etype) {
3880 	case ETHERTYPE_IP:
3881 		isip6 = 0;
3882 		ip = (struct ip *)(mp->m_data + ehdrlen);
3883 		if (ip->ip_p != IPPROTO_TCP)
3884 			return FALSE;	/* 0 */
3885 		ip->ip_len = 0;
3886 		ip->ip_sum = 0;
3887 		ip_hlen = ip->ip_hl << 2;
3888 		if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3889 			return FALSE;	/* -1 */
3890 		th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3891 #if 1
3892 		th->th_sum = in_pseudo(ip->ip_src.s_addr,
3893 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3894 #else
3895 		th->th_sum = mp->m_pkthdr.csum_data;
3896 #endif
3897 		break;
3898 	case ETHERTYPE_IPV6:
3899 		isip6 = 1;
3900 		return FALSE;			/* Not supported yet. */
3901 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3902 		if (ip6->ip6_nxt != IPPROTO_TCP)
3903 			return FALSE;	/* 0 */
3904 		ip6->ip6_plen = 0;
3905 		ip_hlen = sizeof(struct ip6_hdr); /* XXX: no header stacking. */
3906 		if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3907 			return FALSE;	/* -1 */
3908 		th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
3909 #if 0
3910 		th->th_sum = in6_pseudo(ip6->ip6_src, ip->ip6_dst,
3911 		    htons(IPPROTO_TCP));	/* XXX: function notyet. */
3912 #else
3913 		th->th_sum = mp->m_pkthdr.csum_data;
3914 #endif
3915 		break;
3916 	default:
3917 		return FALSE;
3918 	}
3919 	hdr_len = ehdrlen + ip_hlen + (th->th_off << 2);
3920 
3921 	*txd_lower = (E1000_TXD_CMD_DEXT |	/* Extended descr type */
3922 		      E1000_TXD_DTYP_D |	/* Data descr type */
3923 		      E1000_TXD_CMD_TSE);	/* Do TSE on this packet */
3924 
3925 	/* IP and/or TCP header checksum calculation and insertion. */
3926 	*txd_upper = ((isip6 ? 0 : E1000_TXD_POPTS_IXSM) |
3927 		      E1000_TXD_POPTS_TXSM) << 8;
3928 
3929 	curr_txd = adapter->next_avail_tx_desc;
3930 	tx_buffer = &adapter->tx_buffer_area[curr_txd];
3931 	TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3932 
3933 	/* IPv6 doesn't have a header checksum. */
3934 	if (!isip6) {
3935 		/*
3936 		 * Start offset for header checksum calculation.
3937 		 * End offset for header checksum calculation.
3938 		 * Offset of place put the checksum.
3939 		 */
3940 		TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3941 		TXD->lower_setup.ip_fields.ipcse =
3942 		    htole16(ehdrlen + ip_hlen - 1);
3943 		TXD->lower_setup.ip_fields.ipcso =
3944 		    ehdrlen + offsetof(struct ip, ip_sum);
3945 	}
3946 	/*
3947 	 * Start offset for payload checksum calculation.
3948 	 * End offset for payload checksum calculation.
3949 	 * Offset of place to put the checksum.
3950 	 */
3951 	TXD->upper_setup.tcp_fields.tucss =
3952 	    ehdrlen + ip_hlen;
3953 	TXD->upper_setup.tcp_fields.tucse = 0;
3954 	TXD->upper_setup.tcp_fields.tucso =
3955 	    ehdrlen + ip_hlen + offsetof(struct tcphdr, th_sum);
3956 	/*
3957 	 * Payload size per packet w/o any headers.
3958 	 * Length of all headers up to payload.
3959 	 */
3960 	TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz);
3961 	TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
3962 
3963 	TXD->cmd_and_length = htole32(adapter->txd_cmd |
3964 				E1000_TXD_CMD_DEXT |	/* Extended descr */
3965 				E1000_TXD_CMD_TSE |	/* TSE context */
3966 				(isip6 ? 0 : E1000_TXD_CMD_IP) | /* Do IP csum */
3967 				E1000_TXD_CMD_TCP |	/* Do TCP checksum */
3968 				(mp->m_pkthdr.len - (hdr_len))); /* Total len */
3969 
3970 	tx_buffer->m_head = NULL;
3971 	tx_buffer->next_eop = -1;
3972 
3973 	if (++curr_txd == adapter->num_tx_desc)
3974 		curr_txd = 0;
3975 
3976 	adapter->num_tx_desc_avail--;
3977 	adapter->next_avail_tx_desc = curr_txd;
3978 	adapter->tx_tso = TRUE;
3979 
3980 	return TRUE;
3981 }
3982 
3983 #endif /* __FreeBSD_version >= 700000 */
3984 
3985 /**********************************************************************
3986  *
3987  *  Examine each tx_buffer in the used queue. If the hardware is done
3988  *  processing the packet then free associated resources. The
3989  *  tx_buffer is put back on the free queue.
3990  *
3991  **********************************************************************/
3992 static void
3993 em_txeof(struct adapter *adapter)
3994 {
3995         int first, last, done, num_avail;
3996 	u32 cleaned = 0;
3997         struct em_buffer *tx_buffer;
3998         struct e1000_tx_desc   *tx_desc, *eop_desc;
3999 	struct ifnet   *ifp = adapter->ifp;
4000 
4001 	EM_TX_LOCK_ASSERT(adapter);
4002 
4003         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
4004                 return;
4005 
4006         num_avail = adapter->num_tx_desc_avail;
4007         first = adapter->next_tx_to_clean;
4008         tx_desc = &adapter->tx_desc_base[first];
4009         tx_buffer = &adapter->tx_buffer_area[first];
4010 	last = tx_buffer->next_eop;
4011         eop_desc = &adapter->tx_desc_base[last];
4012 
4013 	/*
4014 	 * What this does is get the index of the
4015 	 * first descriptor AFTER the EOP of the
4016 	 * first packet, that way we can do the
4017 	 * simple comparison on the inner while loop.
4018 	 */
4019 	if (++last == adapter->num_tx_desc)
4020  		last = 0;
4021 	done = last;
4022 
4023         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
4024             BUS_DMASYNC_POSTREAD);
4025 
4026         while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
4027 		/* We clean the range of the packet */
4028 		while (first != done) {
4029                 	tx_desc->upper.data = 0;
4030                 	tx_desc->lower.data = 0;
4031                 	tx_desc->buffer_addr = 0;
4032                 	++num_avail; ++cleaned;
4033 
4034 			if (tx_buffer->m_head) {
4035 				ifp->if_opackets++;
4036 				bus_dmamap_sync(adapter->txtag,
4037 				    tx_buffer->map,
4038 				    BUS_DMASYNC_POSTWRITE);
4039 				bus_dmamap_unload(adapter->txtag,
4040 				    tx_buffer->map);
4041 
4042                         	m_freem(tx_buffer->m_head);
4043                         	tx_buffer->m_head = NULL;
4044                 	}
4045 			tx_buffer->next_eop = -1;
4046 
4047 	                if (++first == adapter->num_tx_desc)
4048 				first = 0;
4049 
4050 	                tx_buffer = &adapter->tx_buffer_area[first];
4051 			tx_desc = &adapter->tx_desc_base[first];
4052 		}
4053 		/* See if we can continue to the next packet */
4054 		last = tx_buffer->next_eop;
4055 		if (last != -1) {
4056         		eop_desc = &adapter->tx_desc_base[last];
4057 			/* Get new done point */
4058 			if (++last == adapter->num_tx_desc) last = 0;
4059 			done = last;
4060 		} else
4061 			break;
4062         }
4063         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
4064             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4065 
4066         adapter->next_tx_to_clean = first;
4067 
4068         /*
4069          * If we have enough room, clear IFF_DRV_OACTIVE to
4070          * tell the stack that it is OK to send packets.
4071          * If there are no pending descriptors, clear the timeout.
4072          */
4073         if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
4074                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4075                 if (num_avail == adapter->num_tx_desc) {
4076 			adapter->watchdog_timer = 0;
4077         		adapter->num_tx_desc_avail = num_avail;
4078 			return;
4079 		}
4080         }
4081 
4082 	/* If any descriptors cleaned, reset the watchdog */
4083 	if (cleaned)
4084 		adapter->watchdog_timer = EM_TX_TIMEOUT;
4085         adapter->num_tx_desc_avail = num_avail;
4086 	return;
4087 }
4088 
4089 /*********************************************************************
4090  *
4091  *  When Link is lost sometimes there is work still in the TX ring
4092  *  which will result in a watchdog, rather than allow that do an
4093  *  attempted cleanup and then reinit here. Note that this has been
4094  *  seens mostly with fiber adapters.
4095  *
4096  **********************************************************************/
4097 static void
4098 em_tx_purge(struct adapter *adapter)
4099 {
4100 	if ((!adapter->link_active) && (adapter->watchdog_timer)) {
4101 		EM_TX_LOCK(adapter);
4102 		em_txeof(adapter);
4103 		EM_TX_UNLOCK(adapter);
4104 		if (adapter->watchdog_timer) { /* Still not clean? */
4105 			adapter->watchdog_timer = 0;
4106 			em_init_locked(adapter);
4107 		}
4108 	}
4109 }
4110 
4111 /*********************************************************************
4112  *
4113  *  Get a buffer from system mbuf buffer pool.
4114  *
4115  **********************************************************************/
4116 static int
4117 em_get_buf(struct adapter *adapter, int i)
4118 {
4119 	struct mbuf		*m;
4120 	bus_dma_segment_t	segs[1];
4121 	bus_dmamap_t		map;
4122 	struct em_buffer	*rx_buffer;
4123 	int			error, nsegs;
4124 
4125 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
4126 	if (m == NULL) {
4127 		adapter->mbuf_cluster_failed++;
4128 		return (ENOBUFS);
4129 	}
4130 	m->m_len = m->m_pkthdr.len = MCLBYTES;
4131 
4132 	if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
4133 		m_adj(m, ETHER_ALIGN);
4134 
4135 	/*
4136 	 * Using memory from the mbuf cluster pool, invoke the
4137 	 * bus_dma machinery to arrange the memory mapping.
4138 	 */
4139 	error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
4140 	    adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
4141 	if (error != 0) {
4142 		m_free(m);
4143 		return (error);
4144 	}
4145 
4146 	/* If nsegs is wrong then the stack is corrupt. */
4147 	KASSERT(nsegs == 1, ("Too many segments returned!"));
4148 
4149 	rx_buffer = &adapter->rx_buffer_area[i];
4150 	if (rx_buffer->m_head != NULL)
4151 		bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4152 
4153 	map = rx_buffer->map;
4154 	rx_buffer->map = adapter->rx_sparemap;
4155 	adapter->rx_sparemap = map;
4156 	bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
4157 	rx_buffer->m_head = m;
4158 
4159 	adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
4160 	return (0);
4161 }
4162 
4163 /*********************************************************************
4164  *
4165  *  Allocate memory for rx_buffer structures. Since we use one
4166  *  rx_buffer per received packet, the maximum number of rx_buffer's
4167  *  that we'll need is equal to the number of receive descriptors
4168  *  that we've allocated.
4169  *
4170  **********************************************************************/
4171 static int
4172 em_allocate_receive_structures(struct adapter *adapter)
4173 {
4174 	device_t dev = adapter->dev;
4175 	struct em_buffer *rx_buffer;
4176 	int i, error;
4177 
4178 	adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
4179 	    adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
4180 	if (adapter->rx_buffer_area == NULL) {
4181 		device_printf(dev, "Unable to allocate rx_buffer memory\n");
4182 		return (ENOMEM);
4183 	}
4184 
4185 #if __FreeBSD_version >= 700000
4186 	error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
4187 #else
4188 	error = bus_dma_tag_create(NULL,		 /* parent */
4189 #endif
4190 				1, 0,			/* alignment, bounds */
4191 				BUS_SPACE_MAXADDR,	/* lowaddr */
4192 				BUS_SPACE_MAXADDR,	/* highaddr */
4193 				NULL, NULL,		/* filter, filterarg */
4194 				MCLBYTES,		/* maxsize */
4195 				1,			/* nsegments */
4196 				MCLBYTES,		/* maxsegsize */
4197 				0,			/* flags */
4198 				NULL,			/* lockfunc */
4199 				NULL,			/* lockarg */
4200 				&adapter->rxtag);
4201 	if (error) {
4202 		device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
4203 		    __func__, error);
4204 		goto fail;
4205 	}
4206 
4207 	/* Create the spare map (used by getbuf) */
4208 	error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4209 	     &adapter->rx_sparemap);
4210 	if (error) {
4211 		device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4212 		    __func__, error);
4213 		goto fail;
4214 	}
4215 
4216 	rx_buffer = adapter->rx_buffer_area;
4217 	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4218 		error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4219 		    &rx_buffer->map);
4220 		if (error) {
4221 			device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4222 			    __func__, error);
4223 			goto fail;
4224 		}
4225 	}
4226 
4227 	return (0);
4228 
4229 fail:
4230 	em_free_receive_structures(adapter);
4231 	return (error);
4232 }
4233 
4234 /*********************************************************************
4235  *
4236  *  (Re)initialize receive structures.
4237  *
4238  **********************************************************************/
4239 static int
4240 em_setup_receive_structures(struct adapter *adapter)
4241 {
4242 	struct em_buffer *rx_buffer;
4243 	int i, error;
4244 
4245 	/* Reset descriptor ring */
4246 	bzero(adapter->rx_desc_base,
4247 	    (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
4248 
4249 	/* Free current RX buffers. */
4250 	rx_buffer = adapter->rx_buffer_area;
4251 	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4252 		if (rx_buffer->m_head != NULL) {
4253 			bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4254 			    BUS_DMASYNC_POSTREAD);
4255 			bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4256 			m_freem(rx_buffer->m_head);
4257 			rx_buffer->m_head = NULL;
4258 		}
4259         }
4260 
4261 	/* Allocate new ones. */
4262 	for (i = 0; i < adapter->num_rx_desc; i++) {
4263 		error = em_get_buf(adapter, i);
4264 		if (error)
4265                         return (error);
4266 	}
4267 
4268 	/* Setup our descriptor pointers */
4269 	adapter->next_rx_desc_to_check = 0;
4270 	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4271 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4272 
4273 	return (0);
4274 }
4275 
4276 /*********************************************************************
4277  *
4278  *  Enable receive unit.
4279  *
4280  **********************************************************************/
4281 #define MAX_INTS_PER_SEC	8000
4282 #define DEFAULT_ITR	     1000000000/(MAX_INTS_PER_SEC * 256)
4283 
4284 static void
4285 em_initialize_receive_unit(struct adapter *adapter)
4286 {
4287 	struct ifnet	*ifp = adapter->ifp;
4288 	u64	bus_addr;
4289 	u32	rctl, rxcsum;
4290 
4291 	INIT_DEBUGOUT("em_initialize_receive_unit: begin");
4292 
4293 	/*
4294 	 * Make sure receives are disabled while setting
4295 	 * up the descriptor ring
4296 	 */
4297 	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4298 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
4299 
4300 	if (adapter->hw.mac.type >= e1000_82540) {
4301 		E1000_WRITE_REG(&adapter->hw, E1000_RADV,
4302 		    adapter->rx_abs_int_delay.value);
4303 		/*
4304 		 * Set the interrupt throttling rate. Value is calculated
4305 		 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
4306 		 */
4307 		E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
4308 	}
4309 
4310 	/*
4311 	** When using MSIX interrupts we need to throttle
4312 	** using the EITR register (82574 only)
4313 	*/
4314 	if (adapter->msix)
4315 		for (int i = 0; i < 4; i++)
4316 			E1000_WRITE_REG(&adapter->hw,
4317 			    E1000_EITR_82574(i), DEFAULT_ITR);
4318 
4319 	/* Disable accelerated ackknowledge */
4320 	if (adapter->hw.mac.type == e1000_82574)
4321 		E1000_WRITE_REG(&adapter->hw,
4322 		    E1000_RFCTL, E1000_RFCTL_ACK_DIS);
4323 
4324 	/* Setup the Base and Length of the Rx Descriptor Ring */
4325 	bus_addr = adapter->rxdma.dma_paddr;
4326 	E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
4327 	    adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
4328 	E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
4329 	    (u32)(bus_addr >> 32));
4330 	E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
4331 	    (u32)bus_addr);
4332 
4333 	/* Setup the Receive Control Register */
4334 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4335 	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
4336 		   E1000_RCTL_RDMTS_HALF |
4337 		   (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4338 
4339 	/* Make sure VLAN Filters are off */
4340 	rctl &= ~E1000_RCTL_VFE;
4341 
4342 	if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
4343 		rctl |= E1000_RCTL_SBP;
4344 	else
4345 		rctl &= ~E1000_RCTL_SBP;
4346 
4347 	switch (adapter->rx_buffer_len) {
4348 	default:
4349 	case 2048:
4350 		rctl |= E1000_RCTL_SZ_2048;
4351 		break;
4352 	case 4096:
4353 		rctl |= E1000_RCTL_SZ_4096 |
4354 		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
4355 		break;
4356 	case 8192:
4357 		rctl |= E1000_RCTL_SZ_8192 |
4358 		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
4359 		break;
4360 	case 16384:
4361 		rctl |= E1000_RCTL_SZ_16384 |
4362 		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
4363 		break;
4364 	}
4365 
4366 	if (ifp->if_mtu > ETHERMTU)
4367 		rctl |= E1000_RCTL_LPE;
4368 	else
4369 		rctl &= ~E1000_RCTL_LPE;
4370 
4371 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
4372 	if ((adapter->hw.mac.type >= e1000_82543) &&
4373 	    (ifp->if_capenable & IFCAP_RXCSUM)) {
4374 		rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
4375 		rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
4376 		E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
4377 	}
4378 
4379 	/*
4380 	** XXX TEMPORARY WORKAROUND: on some systems with 82573
4381 	** long latencies are observed, like Lenovo X60. This
4382 	** change eliminates the problem, but since having positive
4383 	** values in RDTR is a known source of problems on other
4384 	** platforms another solution is being sought.
4385 	*/
4386 	if (adapter->hw.mac.type == e1000_82573)
4387 		E1000_WRITE_REG(&adapter->hw, E1000_RDTR, 0x20);
4388 
4389 	/* Enable Receives */
4390 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4391 
4392 	/*
4393 	 * Setup the HW Rx Head and
4394 	 * Tail Descriptor Pointers
4395 	 */
4396 	E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
4397 	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
4398 
4399 	return;
4400 }
4401 
4402 /*********************************************************************
4403  *
4404  *  Free receive related data structures.
4405  *
4406  **********************************************************************/
4407 static void
4408 em_free_receive_structures(struct adapter *adapter)
4409 {
4410 	struct em_buffer *rx_buffer;
4411 	int i;
4412 
4413 	INIT_DEBUGOUT("free_receive_structures: begin");
4414 
4415 	if (adapter->rx_sparemap) {
4416 		bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
4417 		adapter->rx_sparemap = NULL;
4418 	}
4419 
4420 	/* Cleanup any existing buffers */
4421 	if (adapter->rx_buffer_area != NULL) {
4422 		rx_buffer = adapter->rx_buffer_area;
4423 		for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4424 			if (rx_buffer->m_head != NULL) {
4425 				bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4426 				    BUS_DMASYNC_POSTREAD);
4427 				bus_dmamap_unload(adapter->rxtag,
4428 				    rx_buffer->map);
4429 				m_freem(rx_buffer->m_head);
4430 				rx_buffer->m_head = NULL;
4431 			} else if (rx_buffer->map != NULL)
4432 				bus_dmamap_unload(adapter->rxtag,
4433 				    rx_buffer->map);
4434 			if (rx_buffer->map != NULL) {
4435 				bus_dmamap_destroy(adapter->rxtag,
4436 				    rx_buffer->map);
4437 				rx_buffer->map = NULL;
4438 			}
4439 		}
4440 	}
4441 
4442 	if (adapter->rx_buffer_area != NULL) {
4443 		free(adapter->rx_buffer_area, M_DEVBUF);
4444 		adapter->rx_buffer_area = NULL;
4445 	}
4446 
4447 	if (adapter->rxtag != NULL) {
4448 		bus_dma_tag_destroy(adapter->rxtag);
4449 		adapter->rxtag = NULL;
4450 	}
4451 }
4452 
4453 /*********************************************************************
4454  *
4455  *  This routine executes in interrupt context. It replenishes
4456  *  the mbufs in the descriptor and sends data which has been
4457  *  dma'ed into host memory to upper layer.
4458  *
4459  *  We loop at most count times if count is > 0, or until done if
4460  *  count < 0.
4461  *
4462  *********************************************************************/
4463 static int
4464 em_rxeof(struct adapter *adapter, int count)
4465 {
4466 	struct ifnet	*ifp = adapter->ifp;;
4467 	struct mbuf	*mp;
4468 	u8		status, accept_frame = 0, eop = 0;
4469 	u16 		len, desc_len, prev_len_adj;
4470 	int		i;
4471 	struct e1000_rx_desc   *current_desc;
4472 
4473 	EM_RX_LOCK(adapter);
4474 	i = adapter->next_rx_desc_to_check;
4475 	current_desc = &adapter->rx_desc_base[i];
4476 	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4477 	    BUS_DMASYNC_POSTREAD);
4478 
4479 	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
4480 		EM_RX_UNLOCK(adapter);
4481 		return (0);
4482 	}
4483 
4484 	while ((current_desc->status & E1000_RXD_STAT_DD) &&
4485 	    (count != 0) &&
4486 	    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4487 		struct mbuf *m = NULL;
4488 
4489 		mp = adapter->rx_buffer_area[i].m_head;
4490 		/*
4491 		 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
4492 		 * needs to access the last received byte in the mbuf.
4493 		 */
4494 		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
4495 		    BUS_DMASYNC_POSTREAD);
4496 
4497 		accept_frame = 1;
4498 		prev_len_adj = 0;
4499 		desc_len = le16toh(current_desc->length);
4500 		status = current_desc->status;
4501 		if (status & E1000_RXD_STAT_EOP) {
4502 			count--;
4503 			eop = 1;
4504 			if (desc_len < ETHER_CRC_LEN) {
4505 				len = 0;
4506 				prev_len_adj = ETHER_CRC_LEN - desc_len;
4507 			} else
4508 				len = desc_len - ETHER_CRC_LEN;
4509 		} else {
4510 			eop = 0;
4511 			len = desc_len;
4512 		}
4513 
4514 		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
4515 			u8	last_byte;
4516 			u32	pkt_len = desc_len;
4517 
4518 			if (adapter->fmp != NULL)
4519 				pkt_len += adapter->fmp->m_pkthdr.len;
4520 
4521 			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
4522 			if (TBI_ACCEPT(&adapter->hw, status,
4523 			    current_desc->errors, pkt_len, last_byte,
4524 			    adapter->min_frame_size, adapter->max_frame_size)) {
4525 				e1000_tbi_adjust_stats_82543(&adapter->hw,
4526 				    &adapter->stats, pkt_len,
4527 				    adapter->hw.mac.addr,
4528 				    adapter->max_frame_size);
4529 				if (len > 0)
4530 					len--;
4531 			} else
4532 				accept_frame = 0;
4533 		}
4534 
4535 		if (accept_frame) {
4536 			if (em_get_buf(adapter, i) != 0) {
4537 				ifp->if_iqdrops++;
4538 				goto discard;
4539 			}
4540 
4541 			/* Assign correct length to the current fragment */
4542 			mp->m_len = len;
4543 
4544 			if (adapter->fmp == NULL) {
4545 				mp->m_pkthdr.len = len;
4546 				adapter->fmp = mp; /* Store the first mbuf */
4547 				adapter->lmp = mp;
4548 			} else {
4549 				/* Chain mbuf's together */
4550 				mp->m_flags &= ~M_PKTHDR;
4551 				/*
4552 				 * Adjust length of previous mbuf in chain if
4553 				 * we received less than 4 bytes in the last
4554 				 * descriptor.
4555 				 */
4556 				if (prev_len_adj > 0) {
4557 					adapter->lmp->m_len -= prev_len_adj;
4558 					adapter->fmp->m_pkthdr.len -=
4559 					    prev_len_adj;
4560 				}
4561 				adapter->lmp->m_next = mp;
4562 				adapter->lmp = adapter->lmp->m_next;
4563 				adapter->fmp->m_pkthdr.len += len;
4564 			}
4565 
4566 			if (eop) {
4567 				adapter->fmp->m_pkthdr.rcvif = ifp;
4568 				ifp->if_ipackets++;
4569 				em_receive_checksum(adapter, current_desc,
4570 				    adapter->fmp);
4571 #ifndef __NO_STRICT_ALIGNMENT
4572 				if (adapter->max_frame_size >
4573 				    (MCLBYTES - ETHER_ALIGN) &&
4574 				    em_fixup_rx(adapter) != 0)
4575 					goto skip;
4576 #endif
4577 				if (status & E1000_RXD_STAT_VP) {
4578 #if __FreeBSD_version < 700000
4579 					VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
4580 					    (le16toh(current_desc->special) &
4581 					    E1000_RXD_SPC_VLAN_MASK));
4582 #else
4583 					adapter->fmp->m_pkthdr.ether_vtag =
4584 					    (le16toh(current_desc->special) &
4585 					    E1000_RXD_SPC_VLAN_MASK);
4586 					adapter->fmp->m_flags |= M_VLANTAG;
4587 #endif
4588 				}
4589 #ifndef __NO_STRICT_ALIGNMENT
4590 skip:
4591 #endif
4592 				m = adapter->fmp;
4593 				adapter->fmp = NULL;
4594 				adapter->lmp = NULL;
4595 			}
4596 		} else {
4597 			ifp->if_ierrors++;
4598 discard:
4599 			/* Reuse loaded DMA map and just update mbuf chain */
4600 			mp = adapter->rx_buffer_area[i].m_head;
4601 			mp->m_len = mp->m_pkthdr.len = MCLBYTES;
4602 			mp->m_data = mp->m_ext.ext_buf;
4603 			mp->m_next = NULL;
4604 			if (adapter->max_frame_size <=
4605 			    (MCLBYTES - ETHER_ALIGN))
4606 				m_adj(mp, ETHER_ALIGN);
4607 			if (adapter->fmp != NULL) {
4608 				m_freem(adapter->fmp);
4609 				adapter->fmp = NULL;
4610 				adapter->lmp = NULL;
4611 			}
4612 			m = NULL;
4613 		}
4614 
4615 		/* Zero out the receive descriptors status. */
4616 		current_desc->status = 0;
4617 		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4618 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4619 
4620 		/* Advance our pointers to the next descriptor. */
4621 		if (++i == adapter->num_rx_desc)
4622 			i = 0;
4623 		if (m != NULL) {
4624 			adapter->next_rx_desc_to_check = i;
4625 			/* Unlock for call into stack */
4626 			EM_RX_UNLOCK(adapter);
4627 			(*ifp->if_input)(ifp, m);
4628 			EM_RX_LOCK(adapter);
4629 			i = adapter->next_rx_desc_to_check;
4630 		}
4631 		current_desc = &adapter->rx_desc_base[i];
4632 	}
4633 	adapter->next_rx_desc_to_check = i;
4634 
4635 	/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
4636 	if (--i < 0)
4637 		i = adapter->num_rx_desc - 1;
4638 	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
4639 	EM_RX_UNLOCK(adapter);
4640 	if (!((current_desc->status) & E1000_RXD_STAT_DD))
4641 		return (0);
4642 
4643 	return (1);
4644 }
4645 
4646 #ifndef __NO_STRICT_ALIGNMENT
4647 /*
4648  * When jumbo frames are enabled we should realign entire payload on
4649  * architecures with strict alignment. This is serious design mistake of 8254x
4650  * as it nullifies DMA operations. 8254x just allows RX buffer size to be
4651  * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
4652  * payload. On architecures without strict alignment restrictions 8254x still
4653  * performs unaligned memory access which would reduce the performance too.
4654  * To avoid copying over an entire frame to align, we allocate a new mbuf and
4655  * copy ethernet header to the new mbuf. The new mbuf is prepended into the
4656  * existing mbuf chain.
4657  *
4658  * Be aware, best performance of the 8254x is achived only when jumbo frame is
4659  * not used at all on architectures with strict alignment.
4660  */
4661 static int
4662 em_fixup_rx(struct adapter *adapter)
4663 {
4664 	struct mbuf *m, *n;
4665 	int error;
4666 
4667 	error = 0;
4668 	m = adapter->fmp;
4669 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
4670 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
4671 		m->m_data += ETHER_HDR_LEN;
4672 	} else {
4673 		MGETHDR(n, M_DONTWAIT, MT_DATA);
4674 		if (n != NULL) {
4675 			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
4676 			m->m_data += ETHER_HDR_LEN;
4677 			m->m_len -= ETHER_HDR_LEN;
4678 			n->m_len = ETHER_HDR_LEN;
4679 			M_MOVE_PKTHDR(n, m);
4680 			n->m_next = m;
4681 			adapter->fmp = n;
4682 		} else {
4683 			adapter->dropped_pkts++;
4684 			m_freem(adapter->fmp);
4685 			adapter->fmp = NULL;
4686 			error = ENOMEM;
4687 		}
4688 	}
4689 
4690 	return (error);
4691 }
4692 #endif
4693 
4694 /*********************************************************************
4695  *
4696  *  Verify that the hardware indicated that the checksum is valid.
4697  *  Inform the stack about the status of checksum so that stack
4698  *  doesn't spend time verifying the checksum.
4699  *
4700  *********************************************************************/
4701 static void
4702 em_receive_checksum(struct adapter *adapter,
4703 	    struct e1000_rx_desc *rx_desc, struct mbuf *mp)
4704 {
4705 	/* 82543 or newer only */
4706 	if ((adapter->hw.mac.type < e1000_82543) ||
4707 	    /* Ignore Checksum bit is set */
4708 	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
4709 		mp->m_pkthdr.csum_flags = 0;
4710 		return;
4711 	}
4712 
4713 	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
4714 		/* Did it pass? */
4715 		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
4716 			/* IP Checksum Good */
4717 			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4718 			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4719 
4720 		} else {
4721 			mp->m_pkthdr.csum_flags = 0;
4722 		}
4723 	}
4724 
4725 	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
4726 		/* Did it pass? */
4727 		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
4728 			mp->m_pkthdr.csum_flags |=
4729 			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4730 			mp->m_pkthdr.csum_data = htons(0xffff);
4731 		}
4732 	}
4733 }
4734 
4735 /*
4736  * This routine is run via an vlan
4737  * config EVENT
4738  */
4739 static void
4740 em_register_vlan(void *unused, struct ifnet *ifp, u16 vtag)
4741 {
4742 	struct adapter	*adapter = ifp->if_softc;
4743 	u32		ctrl, rctl, index, vfta;
4744 
4745 	ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4746 	ctrl |= E1000_CTRL_VME;
4747 	E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4748 
4749 	/* Setup for Hardware Filter */
4750 	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4751 	rctl |= E1000_RCTL_VFE;
4752 	rctl &= ~E1000_RCTL_CFIEN;
4753 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4754 
4755 	/* Make entry in the hardware filter table */
4756 	index = ((vtag >> 5) & 0x7F);
4757 	vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
4758 	vfta |= (1 << (vtag & 0x1F));
4759 	E1000_WRITE_REG_ARRAY(&adapter->hw, E1000_VFTA, index, vfta);
4760 
4761 	/* Update the frame size */
4762 	E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
4763 	    adapter->max_frame_size + VLAN_TAG_SIZE);
4764 
4765 }
4766 
4767 /*
4768  * This routine is run via an vlan
4769  * unconfig EVENT
4770  */
4771 static void
4772 em_unregister_vlan(void *unused, struct ifnet *ifp, u16 vtag)
4773 {
4774 	struct adapter	*adapter = ifp->if_softc;
4775 	u32		index, vfta;
4776 
4777 	/* Remove entry in the hardware filter table */
4778 	index = ((vtag >> 5) & 0x7F);
4779 	vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
4780 	vfta &= ~(1 << (vtag & 0x1F));
4781 	E1000_WRITE_REG_ARRAY(&adapter->hw, E1000_VFTA, index, vfta);
4782 	/* Have all vlans unregistered? */
4783 	if (adapter->ifp->if_vlantrunk == NULL) {
4784 		u32 rctl;
4785 		/* Turn off the filter table */
4786 		rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4787 		rctl &= ~E1000_RCTL_VFE;
4788 		rctl |= E1000_RCTL_CFIEN;
4789 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4790 		/* Reset the frame size */
4791 		E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
4792 		    adapter->max_frame_size);
4793 	}
4794 }
4795 
4796 static void
4797 em_enable_intr(struct adapter *adapter)
4798 {
4799 	struct e1000_hw *hw = &adapter->hw;
4800 	u32 ims_mask = IMS_ENABLE_MASK;
4801 
4802 	if (adapter->msix) {
4803 		E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
4804 		ims_mask |= EM_MSIX_MASK;
4805 	}
4806 	E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
4807 }
4808 
4809 static void
4810 em_disable_intr(struct adapter *adapter)
4811 {
4812 	struct e1000_hw *hw = &adapter->hw;
4813 
4814 	if (adapter->msix)
4815 		E1000_WRITE_REG(hw, EM_EIAC, 0);
4816 	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
4817 }
4818 
4819 /*
4820  * Bit of a misnomer, what this really means is
4821  * to enable OS management of the system... aka
4822  * to disable special hardware management features
4823  */
4824 static void
4825 em_init_manageability(struct adapter *adapter)
4826 {
4827 	/* A shared code workaround */
4828 #define E1000_82542_MANC2H E1000_MANC2H
4829 	if (adapter->has_manage) {
4830 		int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
4831 		int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4832 
4833 		/* disable hardware interception of ARP */
4834 		manc &= ~(E1000_MANC_ARP_EN);
4835 
4836                 /* enable receiving management packets to the host */
4837                 if (adapter->hw.mac.type >= e1000_82571) {
4838 			manc |= E1000_MANC_EN_MNG2HOST;
4839 #define E1000_MNG2HOST_PORT_623 (1 << 5)
4840 #define E1000_MNG2HOST_PORT_664 (1 << 6)
4841 			manc2h |= E1000_MNG2HOST_PORT_623;
4842 			manc2h |= E1000_MNG2HOST_PORT_664;
4843 			E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
4844 		}
4845 
4846 		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4847 	}
4848 }
4849 
4850 /*
4851  * Give control back to hardware management
4852  * controller if there is one.
4853  */
4854 static void
4855 em_release_manageability(struct adapter *adapter)
4856 {
4857 	if (adapter->has_manage) {
4858 		int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4859 
4860 		/* re-enable hardware interception of ARP */
4861 		manc |= E1000_MANC_ARP_EN;
4862 
4863 		if (adapter->hw.mac.type >= e1000_82571)
4864 			manc &= ~E1000_MANC_EN_MNG2HOST;
4865 
4866 		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4867 	}
4868 }
4869 
4870 /*
4871  * em_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4872  * For ASF and Pass Through versions of f/w this means that
4873  * the driver is loaded. For AMT version (only with 82573)
4874  * of the f/w this means that the network i/f is open.
4875  *
4876  */
4877 static void
4878 em_get_hw_control(struct adapter *adapter)
4879 {
4880 	u32 ctrl_ext, swsm;
4881 
4882 	/* Let firmware know the driver has taken over */
4883 	switch (adapter->hw.mac.type) {
4884 	case e1000_82573:
4885 		swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4886 		E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4887 		    swsm | E1000_SWSM_DRV_LOAD);
4888 		break;
4889 	case e1000_82571:
4890 	case e1000_82572:
4891 	case e1000_80003es2lan:
4892 	case e1000_ich8lan:
4893 	case e1000_ich9lan:
4894 	case e1000_ich10lan:
4895 		ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4896 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4897 		    ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4898 		break;
4899 	default:
4900 		break;
4901 	}
4902 }
4903 
4904 /*
4905  * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4906  * For ASF and Pass Through versions of f/w this means that the
4907  * driver is no longer loaded. For AMT version (only with 82573) i
4908  * of the f/w this means that the network i/f is closed.
4909  *
4910  */
4911 static void
4912 em_release_hw_control(struct adapter *adapter)
4913 {
4914 	u32 ctrl_ext, swsm;
4915 
4916 	/* Let firmware taken over control of h/w */
4917 	switch (adapter->hw.mac.type) {
4918 	case e1000_82573:
4919 		swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4920 		E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4921 		    swsm & ~E1000_SWSM_DRV_LOAD);
4922 		break;
4923 	case e1000_82571:
4924 	case e1000_82572:
4925 	case e1000_80003es2lan:
4926 	case e1000_ich8lan:
4927 	case e1000_ich9lan:
4928 	case e1000_ich10lan:
4929 		ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4930 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4931 		    ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4932 		break;
4933 	default:
4934 		break;
4935 
4936 	}
4937 }
4938 
4939 static int
4940 em_is_valid_ether_addr(u8 *addr)
4941 {
4942 	char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4943 
4944 	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4945 		return (FALSE);
4946 	}
4947 
4948 	return (TRUE);
4949 }
4950 
4951 /*
4952  * Enable PCI Wake On Lan capability
4953  */
4954 void
4955 em_enable_wakeup(device_t dev)
4956 {
4957 	u16     cap, status;
4958 	u8      id;
4959 
4960 	/* First find the capabilities pointer*/
4961 	cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
4962 	/* Read the PM Capabilities */
4963 	id = pci_read_config(dev, cap, 1);
4964 	if (id != PCIY_PMG)     /* Something wrong */
4965 		return;
4966 	/* OK, we have the power capabilities, so
4967 	   now get the status register */
4968 	cap += PCIR_POWER_STATUS;
4969 	status = pci_read_config(dev, cap, 2);
4970 	status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4971 	pci_write_config(dev, cap, status, 2);
4972 	return;
4973 }
4974 
4975 
4976 /*********************************************************************
4977 * 82544 Coexistence issue workaround.
4978 *    There are 2 issues.
4979 *       1. Transmit Hang issue.
4980 *    To detect this issue, following equation can be used...
4981 *	  SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4982 *	  If SUM[3:0] is in between 1 to 4, we will have this issue.
4983 *
4984 *       2. DAC issue.
4985 *    To detect this issue, following equation can be used...
4986 *	  SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4987 *	  If SUM[3:0] is in between 9 to c, we will have this issue.
4988 *
4989 *
4990 *    WORKAROUND:
4991 *	  Make sure we do not have ending address
4992 *	  as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4993 *
4994 *************************************************************************/
4995 static u32
4996 em_fill_descriptors (bus_addr_t address, u32 length,
4997 		PDESC_ARRAY desc_array)
4998 {
4999 	u32 safe_terminator;
5000 
5001 	/* Since issue is sensitive to length and address.*/
5002 	/* Let us first check the address...*/
5003 	if (length <= 4) {
5004 		desc_array->descriptor[0].address = address;
5005 		desc_array->descriptor[0].length = length;
5006 		desc_array->elements = 1;
5007 		return (desc_array->elements);
5008 	}
5009 	safe_terminator = (u32)((((u32)address & 0x7) +
5010 	    (length & 0xF)) & 0xF);
5011 	/* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
5012 	if (safe_terminator == 0   ||
5013 	(safe_terminator > 4   &&
5014 	safe_terminator < 9)   ||
5015 	(safe_terminator > 0xC &&
5016 	safe_terminator <= 0xF)) {
5017 		desc_array->descriptor[0].address = address;
5018 		desc_array->descriptor[0].length = length;
5019 		desc_array->elements = 1;
5020 		return (desc_array->elements);
5021 	}
5022 
5023 	desc_array->descriptor[0].address = address;
5024 	desc_array->descriptor[0].length = length - 4;
5025 	desc_array->descriptor[1].address = address + (length - 4);
5026 	desc_array->descriptor[1].length = 4;
5027 	desc_array->elements = 2;
5028 	return (desc_array->elements);
5029 }
5030 
5031 /**********************************************************************
5032  *
5033  *  Update the board statistics counters.
5034  *
5035  **********************************************************************/
5036 static void
5037 em_update_stats_counters(struct adapter *adapter)
5038 {
5039 	struct ifnet   *ifp;
5040 
5041 	if(adapter->hw.phy.media_type == e1000_media_type_copper ||
5042 	   (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
5043 		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
5044 		adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
5045 	}
5046 	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
5047 	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
5048 	adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
5049 	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
5050 
5051 	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
5052 	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
5053 	adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
5054 	adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
5055 	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
5056 	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
5057 	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
5058 	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
5059 	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
5060 	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
5061 	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
5062 	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
5063 	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
5064 	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
5065 	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
5066 	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
5067 	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
5068 	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
5069 	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
5070 	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
5071 
5072 	/* For the 64-bit byte counters the low dword must be read first. */
5073 	/* Both registers clear on the read of the high dword */
5074 
5075 	adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH);
5076 	adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH);
5077 
5078 	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
5079 	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
5080 	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
5081 	adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
5082 	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
5083 
5084 	adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
5085 	adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
5086 
5087 	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
5088 	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
5089 	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
5090 	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
5091 	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
5092 	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
5093 	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
5094 	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
5095 	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
5096 	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
5097 
5098 	if (adapter->hw.mac.type >= e1000_82543) {
5099 		adapter->stats.algnerrc +=
5100 		E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
5101 		adapter->stats.rxerrc +=
5102 		E1000_READ_REG(&adapter->hw, E1000_RXERRC);
5103 		adapter->stats.tncrs +=
5104 		E1000_READ_REG(&adapter->hw, E1000_TNCRS);
5105 		adapter->stats.cexterr +=
5106 		E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
5107 		adapter->stats.tsctc +=
5108 		E1000_READ_REG(&adapter->hw, E1000_TSCTC);
5109 		adapter->stats.tsctfc +=
5110 		E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
5111 	}
5112 	ifp = adapter->ifp;
5113 
5114 	ifp->if_collisions = adapter->stats.colc;
5115 
5116 	/* Rx Errors */
5117 	ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
5118 	    adapter->stats.crcerrs + adapter->stats.algnerrc +
5119 	    adapter->stats.ruc + adapter->stats.roc +
5120 	    adapter->stats.mpc + adapter->stats.cexterr;
5121 
5122 	/* Tx Errors */
5123 	ifp->if_oerrors = adapter->stats.ecol +
5124 	    adapter->stats.latecol + adapter->watchdog_events;
5125 }
5126 
5127 
5128 /**********************************************************************
5129  *
5130  *  This routine is called only when em_display_debug_stats is enabled.
5131  *  This routine provides a way to take a look at important statistics
5132  *  maintained by the driver and hardware.
5133  *
5134  **********************************************************************/
5135 static void
5136 em_print_debug_info(struct adapter *adapter)
5137 {
5138 	device_t dev = adapter->dev;
5139 	u8 *hw_addr = adapter->hw.hw_addr;
5140 
5141 	device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
5142 	device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
5143 	    E1000_READ_REG(&adapter->hw, E1000_CTRL),
5144 	    E1000_READ_REG(&adapter->hw, E1000_RCTL));
5145 	device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
5146 	    ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\
5147 	    (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) );
5148 	device_printf(dev, "Flow control watermarks high = %d low = %d\n",
5149 	    adapter->hw.fc.high_water,
5150 	    adapter->hw.fc.low_water);
5151 	device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
5152 	    E1000_READ_REG(&adapter->hw, E1000_TIDV),
5153 	    E1000_READ_REG(&adapter->hw, E1000_TADV));
5154 	device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
5155 	    E1000_READ_REG(&adapter->hw, E1000_RDTR),
5156 	    E1000_READ_REG(&adapter->hw, E1000_RADV));
5157 	device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
5158 	    (long long)adapter->tx_fifo_wrk_cnt,
5159 	    (long long)adapter->tx_fifo_reset_cnt);
5160 	device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
5161 	    E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
5162 	    E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
5163 	device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
5164 	    E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
5165 	    E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
5166 	device_printf(dev, "Num Tx descriptors avail = %d\n",
5167 	    adapter->num_tx_desc_avail);
5168 	device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
5169 	    adapter->no_tx_desc_avail1);
5170 	device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
5171 	    adapter->no_tx_desc_avail2);
5172 	device_printf(dev, "Std mbuf failed = %ld\n",
5173 	    adapter->mbuf_alloc_failed);
5174 	device_printf(dev, "Std mbuf cluster failed = %ld\n",
5175 	    adapter->mbuf_cluster_failed);
5176 	device_printf(dev, "Driver dropped packets = %ld\n",
5177 	    adapter->dropped_pkts);
5178 	device_printf(dev, "Driver tx dma failure in encap = %ld\n",
5179 		adapter->no_tx_dma_setup);
5180 }
5181 
5182 static void
5183 em_print_hw_stats(struct adapter *adapter)
5184 {
5185 	device_t dev = adapter->dev;
5186 
5187 	device_printf(dev, "Excessive collisions = %lld\n",
5188 	    (long long)adapter->stats.ecol);
5189 #if	(DEBUG_HW > 0)  /* Dont output these errors normally */
5190 	device_printf(dev, "Symbol errors = %lld\n",
5191 	    (long long)adapter->stats.symerrs);
5192 #endif
5193 	device_printf(dev, "Sequence errors = %lld\n",
5194 	    (long long)adapter->stats.sec);
5195 	device_printf(dev, "Defer count = %lld\n",
5196 	    (long long)adapter->stats.dc);
5197 	device_printf(dev, "Missed Packets = %lld\n",
5198 	    (long long)adapter->stats.mpc);
5199 	device_printf(dev, "Receive No Buffers = %lld\n",
5200 	    (long long)adapter->stats.rnbc);
5201 	/* RLEC is inaccurate on some hardware, calculate our own. */
5202 	device_printf(dev, "Receive Length Errors = %lld\n",
5203 	    ((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
5204 	device_printf(dev, "Receive errors = %lld\n",
5205 	    (long long)adapter->stats.rxerrc);
5206 	device_printf(dev, "Crc errors = %lld\n",
5207 	    (long long)adapter->stats.crcerrs);
5208 	device_printf(dev, "Alignment errors = %lld\n",
5209 	    (long long)adapter->stats.algnerrc);
5210 	device_printf(dev, "Collision/Carrier extension errors = %lld\n",
5211 	    (long long)adapter->stats.cexterr);
5212 	device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
5213 	device_printf(dev, "watchdog timeouts = %ld\n",
5214 	    adapter->watchdog_events);
5215 	device_printf(dev, "RX MSIX IRQ = %ld TX MSIX IRQ = %ld"
5216 	    " LINK MSIX IRQ = %ld\n", adapter->rx_irq,
5217 	    adapter->tx_irq , adapter->link_irq);
5218 	device_printf(dev, "XON Rcvd = %lld\n",
5219 	    (long long)adapter->stats.xonrxc);
5220 	device_printf(dev, "XON Xmtd = %lld\n",
5221 	    (long long)adapter->stats.xontxc);
5222 	device_printf(dev, "XOFF Rcvd = %lld\n",
5223 	    (long long)adapter->stats.xoffrxc);
5224 	device_printf(dev, "XOFF Xmtd = %lld\n",
5225 	    (long long)adapter->stats.xofftxc);
5226 	device_printf(dev, "Good Packets Rcvd = %lld\n",
5227 	    (long long)adapter->stats.gprc);
5228 	device_printf(dev, "Good Packets Xmtd = %lld\n",
5229 	    (long long)adapter->stats.gptc);
5230 	device_printf(dev, "TSO Contexts Xmtd = %lld\n",
5231 	    (long long)adapter->stats.tsctc);
5232 	device_printf(dev, "TSO Contexts Failed = %lld\n",
5233 	    (long long)adapter->stats.tsctfc);
5234 }
5235 
5236 /**********************************************************************
5237  *
5238  *  This routine provides a way to dump out the adapter eeprom,
5239  *  often a useful debug/service tool. This only dumps the first
5240  *  32 words, stuff that matters is in that extent.
5241  *
5242  **********************************************************************/
5243 static void
5244 em_print_nvm_info(struct adapter *adapter)
5245 {
5246 	u16	eeprom_data;
5247 	int	i, j, row = 0;
5248 
5249 	/* Its a bit crude, but it gets the job done */
5250 	printf("\nInterface EEPROM Dump:\n");
5251 	printf("Offset\n0x0000  ");
5252 	for (i = 0, j = 0; i < 32; i++, j++) {
5253 		if (j == 8) { /* Make the offset block */
5254 			j = 0; ++row;
5255 			printf("\n0x00%x0  ",row);
5256 		}
5257 		e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
5258 		printf("%04x ", eeprom_data);
5259 	}
5260 	printf("\n");
5261 }
5262 
5263 static int
5264 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5265 {
5266 	struct adapter *adapter;
5267 	int error;
5268 	int result;
5269 
5270 	result = -1;
5271 	error = sysctl_handle_int(oidp, &result, 0, req);
5272 
5273 	if (error || !req->newptr)
5274 		return (error);
5275 
5276 	if (result == 1) {
5277 		adapter = (struct adapter *)arg1;
5278 		em_print_debug_info(adapter);
5279 	}
5280 	/*
5281 	 * This value will cause a hex dump of the
5282 	 * first 32 16-bit words of the EEPROM to
5283 	 * the screen.
5284 	 */
5285 	if (result == 2) {
5286 		adapter = (struct adapter *)arg1;
5287 		em_print_nvm_info(adapter);
5288         }
5289 
5290 	return (error);
5291 }
5292 
5293 
5294 static int
5295 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
5296 {
5297 	struct adapter *adapter;
5298 	int error;
5299 	int result;
5300 
5301 	result = -1;
5302 	error = sysctl_handle_int(oidp, &result, 0, req);
5303 
5304 	if (error || !req->newptr)
5305 		return (error);
5306 
5307 	if (result == 1) {
5308 		adapter = (struct adapter *)arg1;
5309 		em_print_hw_stats(adapter);
5310 	}
5311 
5312 	return (error);
5313 }
5314 
5315 static int
5316 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
5317 {
5318 	struct em_int_delay_info *info;
5319 	struct adapter *adapter;
5320 	u32 regval;
5321 	int error;
5322 	int usecs;
5323 	int ticks;
5324 
5325 	info = (struct em_int_delay_info *)arg1;
5326 	usecs = info->value;
5327 	error = sysctl_handle_int(oidp, &usecs, 0, req);
5328 	if (error != 0 || req->newptr == NULL)
5329 		return (error);
5330 	if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
5331 		return (EINVAL);
5332 	info->value = usecs;
5333 	ticks = EM_USECS_TO_TICKS(usecs);
5334 
5335 	adapter = info->adapter;
5336 
5337 	EM_CORE_LOCK(adapter);
5338 	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
5339 	regval = (regval & ~0xffff) | (ticks & 0xffff);
5340 	/* Handle a few special cases. */
5341 	switch (info->offset) {
5342 	case E1000_RDTR:
5343 		break;
5344 	case E1000_TIDV:
5345 		if (ticks == 0) {
5346 			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
5347 			/* Don't write 0 into the TIDV register. */
5348 			regval++;
5349 		} else
5350 			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
5351 		break;
5352 	}
5353 	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
5354 	EM_CORE_UNLOCK(adapter);
5355 	return (0);
5356 }
5357 
5358 static void
5359 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
5360 	const char *description, struct em_int_delay_info *info,
5361 	int offset, int value)
5362 {
5363 	info->adapter = adapter;
5364 	info->offset = offset;
5365 	info->value = value;
5366 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
5367 	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5368 	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
5369 	    info, 0, em_sysctl_int_delay, "I", description);
5370 }
5371 
5372 #ifndef EM_LEGACY_IRQ
5373 static void
5374 em_add_rx_process_limit(struct adapter *adapter, const char *name,
5375 	const char *description, int *limit, int value)
5376 {
5377 	*limit = value;
5378 	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
5379 	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5380 	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
5381 }
5382 #endif
5383 
5384 #ifdef EM_TIMESYNC
5385 /*
5386  * Initialize the Time Sync Feature
5387  */
5388 static int
5389 em_tsync_init(struct adapter *adapter)
5390 {
5391 	device_t	dev = adapter->dev;
5392 	u32		tx_ctl, rx_ctl;
5393 
5394 
5395 	E1000_WRITE_REG(&adapter->hw, E1000_TIMINCA, (1<<24) |
5396 	    20833/PICOSECS_PER_TICK);
5397 
5398 	adapter->last_stamp =  E1000_READ_REG(&adapter->hw, E1000_SYSTIML);
5399 	adapter->last_stamp |= (u64)E1000_READ_REG(&adapter->hw,
5400 	    E1000_SYSTIMH) << 32ULL;
5401 
5402 	/* Enable the TX side */
5403 	tx_ctl =  E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5404 	tx_ctl |= 0x10;
5405 	E1000_WRITE_REG(&adapter->hw, E1000_TSYNCTXCTL, tx_ctl);
5406 	E1000_WRITE_FLUSH(&adapter->hw);
5407 
5408 	tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5409 	if ((tx_ctl & 0x10) == 0) {
5410      		device_printf(dev, "Failed to enable TX timestamping\n");
5411 		return (ENXIO);
5412 	}
5413 
5414 	/* Enable RX */
5415 	rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5416 	rx_ctl |= 0x10; /* Enable the feature */
5417 	rx_ctl |= 0x0a; /* This value turns on Ver 1 and 2 */
5418 	E1000_WRITE_REG(&adapter->hw, E1000_TSYNCRXCTL, rx_ctl);
5419 
5420 	/*
5421 	 * Ethertype Stamping (Ethertype = 0x88F7)
5422 	 */
5423 	E1000_WRITE_REG(&adapter->hw, E1000_RXMTRL, htonl(0x440088f7));
5424 
5425 	/*
5426 	 * Source Port Queue Filter Setup:
5427 	 *  this is for UDP port filtering
5428 	 */
5429 	E1000_WRITE_REG(&adapter->hw, E1000_RXUDP, htons(TSYNC_PORT));
5430 	/* Protocol = UDP, enable Timestamp, and filter on source/protocol */
5431 
5432 	E1000_WRITE_FLUSH(&adapter->hw);
5433 
5434 	rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5435 	if ((rx_ctl & 0x10) == 0) {
5436      		device_printf(dev, "Failed to enable RX timestamping\n");
5437 		return (ENXIO);
5438 	}
5439 
5440 	device_printf(dev, "IEEE 1588 Precision Time Protocol enabled\n");
5441 
5442 	return (0);
5443 }
5444 
5445 /*
5446  * Disable the Time Sync Feature
5447  */
5448 static void
5449 em_tsync_disable(struct adapter *adapter)
5450 {
5451 	u32		tx_ctl, rx_ctl;
5452 
5453 	tx_ctl =  E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5454 	tx_ctl &= ~0x10;
5455 	E1000_WRITE_REG(&adapter->hw, E1000_TSYNCTXCTL, tx_ctl);
5456 	E1000_WRITE_FLUSH(&adapter->hw);
5457 
5458 	/* Invalidate TX Timestamp */
5459 	E1000_READ_REG(&adapter->hw, E1000_TXSTMPH);
5460 
5461 	tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5462 	if (tx_ctl & 0x10)
5463      		HW_DEBUGOUT("Failed to disable TX timestamping\n");
5464 
5465 	rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5466 	rx_ctl &= ~0x10;
5467 
5468 	E1000_WRITE_REG(&adapter->hw, E1000_TSYNCRXCTL, rx_ctl);
5469 	E1000_WRITE_FLUSH(&adapter->hw);
5470 
5471 	/* Invalidate RX Timestamp */
5472 	E1000_READ_REG(&adapter->hw, E1000_RXSATRH);
5473 
5474 	rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5475 	if (rx_ctl & 0x10)
5476 		HW_DEBUGOUT("Failed to disable RX timestamping\n");
5477 
5478 	return;
5479 }
5480 #endif /* EM_TIMESYNC */
5481