xref: /freebsd/sys/dev/e1000/if_em.c (revision ea906c4152774dff300bb26fbfc1e4188351c89a)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2008, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
37 #endif
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/endian.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/module.h>
48 #include <sys/rman.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
52 #include <sys/taskqueue.h>
53 #ifdef EM_TIMESYNC
54 #include <sys/ioccom.h>
55 #include <sys/time.h>
56 #endif
57 #include <machine/bus.h>
58 #include <machine/resource.h>
59 
60 #include <net/bpf.h>
61 #include <net/ethernet.h>
62 #include <net/if.h>
63 #include <net/if_arp.h>
64 #include <net/if_dl.h>
65 #include <net/if_media.h>
66 
67 #include <net/if_types.h>
68 #include <net/if_vlan_var.h>
69 
70 #include <netinet/in_systm.h>
71 #include <netinet/in.h>
72 #include <netinet/if_ether.h>
73 #include <netinet/ip.h>
74 #include <netinet/ip6.h>
75 #include <netinet/tcp.h>
76 #include <netinet/udp.h>
77 
78 #include <machine/in_cksum.h>
79 #include <dev/pci/pcivar.h>
80 #include <dev/pci/pcireg.h>
81 
82 #include "e1000_api.h"
83 #include "e1000_82571.h"
84 #include "if_em.h"
85 
86 /*********************************************************************
87  *  Set this to one to display debug statistics
88  *********************************************************************/
89 int	em_display_debug_stats = 0;
90 
91 /*********************************************************************
92  *  Driver version:
93  *********************************************************************/
94 char em_driver_version[] = "6.9.5";
95 
96 
97 /*********************************************************************
98  *  PCI Device ID Table
99  *
100  *  Used by probe to select devices to load on
101  *  Last field stores an index into e1000_strings
102  *  Last entry must be all 0s
103  *
104  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
105  *********************************************************************/
106 
107 static em_vendor_info_t em_vendor_info_array[] =
108 {
109 	/* Intel(R) PRO/1000 Network Connection */
110 	{ 0x8086, E1000_DEV_ID_82540EM,		PCI_ANY_ID, PCI_ANY_ID, 0},
111 	{ 0x8086, E1000_DEV_ID_82540EM_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
112 	{ 0x8086, E1000_DEV_ID_82540EP,		PCI_ANY_ID, PCI_ANY_ID, 0},
113 	{ 0x8086, E1000_DEV_ID_82540EP_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
114 	{ 0x8086, E1000_DEV_ID_82540EP_LP,	PCI_ANY_ID, PCI_ANY_ID, 0},
115 
116 	{ 0x8086, E1000_DEV_ID_82541EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
117 	{ 0x8086, E1000_DEV_ID_82541ER,		PCI_ANY_ID, PCI_ANY_ID, 0},
118 	{ 0x8086, E1000_DEV_ID_82541ER_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
119 	{ 0x8086, E1000_DEV_ID_82541EI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
120 	{ 0x8086, E1000_DEV_ID_82541GI,		PCI_ANY_ID, PCI_ANY_ID, 0},
121 	{ 0x8086, E1000_DEV_ID_82541GI_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
122 	{ 0x8086, E1000_DEV_ID_82541GI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
123 
124 	{ 0x8086, E1000_DEV_ID_82542,		PCI_ANY_ID, PCI_ANY_ID, 0},
125 
126 	{ 0x8086, E1000_DEV_ID_82543GC_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
127 	{ 0x8086, E1000_DEV_ID_82543GC_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
128 
129 	{ 0x8086, E1000_DEV_ID_82544EI_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
130 	{ 0x8086, E1000_DEV_ID_82544EI_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
131 	{ 0x8086, E1000_DEV_ID_82544GC_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
132 	{ 0x8086, E1000_DEV_ID_82544GC_LOM,	PCI_ANY_ID, PCI_ANY_ID, 0},
133 
134 	{ 0x8086, E1000_DEV_ID_82545EM_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
135 	{ 0x8086, E1000_DEV_ID_82545EM_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
136 	{ 0x8086, E1000_DEV_ID_82545GM_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
137 	{ 0x8086, E1000_DEV_ID_82545GM_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
138 	{ 0x8086, E1000_DEV_ID_82545GM_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
139 
140 	{ 0x8086, E1000_DEV_ID_82546EB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
141 	{ 0x8086, E1000_DEV_ID_82546EB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
142 	{ 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
143 	{ 0x8086, E1000_DEV_ID_82546GB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
144 	{ 0x8086, E1000_DEV_ID_82546GB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
145 	{ 0x8086, E1000_DEV_ID_82546GB_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
146 	{ 0x8086, E1000_DEV_ID_82546GB_PCIE,	PCI_ANY_ID, PCI_ANY_ID, 0},
147 	{ 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
148 	{ 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
149 						PCI_ANY_ID, PCI_ANY_ID, 0},
150 
151 	{ 0x8086, E1000_DEV_ID_82547EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
152 	{ 0x8086, E1000_DEV_ID_82547EI_MOBILE,	PCI_ANY_ID, PCI_ANY_ID, 0},
153 	{ 0x8086, E1000_DEV_ID_82547GI,		PCI_ANY_ID, PCI_ANY_ID, 0},
154 
155 	{ 0x8086, E1000_DEV_ID_82571EB_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
156 	{ 0x8086, E1000_DEV_ID_82571EB_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
157 	{ 0x8086, E1000_DEV_ID_82571EB_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
158 	{ 0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL,
159 						PCI_ANY_ID, PCI_ANY_ID, 0},
160 	{ 0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD,
161 						PCI_ANY_ID, PCI_ANY_ID, 0},
162 	{ 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
163 						PCI_ANY_ID, PCI_ANY_ID, 0},
164 	{ 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP,
165 						PCI_ANY_ID, PCI_ANY_ID, 0},
166 	{ 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER,
167 						PCI_ANY_ID, PCI_ANY_ID, 0},
168 	{ 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER,
169 						PCI_ANY_ID, PCI_ANY_ID, 0},
170 	{ 0x8086, E1000_DEV_ID_82572EI_COPPER,	PCI_ANY_ID, PCI_ANY_ID, 0},
171 	{ 0x8086, E1000_DEV_ID_82572EI_FIBER,	PCI_ANY_ID, PCI_ANY_ID, 0},
172 	{ 0x8086, E1000_DEV_ID_82572EI_SERDES,	PCI_ANY_ID, PCI_ANY_ID, 0},
173 	{ 0x8086, E1000_DEV_ID_82572EI,		PCI_ANY_ID, PCI_ANY_ID, 0},
174 
175 	{ 0x8086, E1000_DEV_ID_82573E,		PCI_ANY_ID, PCI_ANY_ID, 0},
176 	{ 0x8086, E1000_DEV_ID_82573E_IAMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
177 	{ 0x8086, E1000_DEV_ID_82573L,		PCI_ANY_ID, PCI_ANY_ID, 0},
178 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
179 						PCI_ANY_ID, PCI_ANY_ID, 0},
180 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
181 						PCI_ANY_ID, PCI_ANY_ID, 0},
182 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
183 						PCI_ANY_ID, PCI_ANY_ID, 0},
184 	{ 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
185 						PCI_ANY_ID, PCI_ANY_ID, 0},
186 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
187 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
188 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_C,	PCI_ANY_ID, PCI_ANY_ID, 0},
189 	{ 0x8086, E1000_DEV_ID_ICH8_IFE,	PCI_ANY_ID, PCI_ANY_ID, 0},
190 	{ 0x8086, E1000_DEV_ID_ICH8_IFE_GT,	PCI_ANY_ID, PCI_ANY_ID, 0},
191 	{ 0x8086, E1000_DEV_ID_ICH8_IFE_G,	PCI_ANY_ID, PCI_ANY_ID, 0},
192 	{ 0x8086, E1000_DEV_ID_ICH8_IGP_M,	PCI_ANY_ID, PCI_ANY_ID, 0},
193 
194 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
195 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_AMT,	PCI_ANY_ID, PCI_ANY_ID, 0},
196 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_C,	PCI_ANY_ID, PCI_ANY_ID, 0},
197 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_M,	PCI_ANY_ID, PCI_ANY_ID, 0},
198 	{ 0x8086, E1000_DEV_ID_ICH9_IGP_M_V,	PCI_ANY_ID, PCI_ANY_ID, 0},
199 	{ 0x8086, E1000_DEV_ID_ICH9_IFE,	PCI_ANY_ID, PCI_ANY_ID, 0},
200 	{ 0x8086, E1000_DEV_ID_ICH9_IFE_GT,	PCI_ANY_ID, PCI_ANY_ID, 0},
201 	{ 0x8086, E1000_DEV_ID_ICH9_IFE_G,	PCI_ANY_ID, PCI_ANY_ID, 0},
202 	{ 0x8086, E1000_DEV_ID_ICH9_BM,		PCI_ANY_ID, PCI_ANY_ID, 0},
203 	{ 0x8086, E1000_DEV_ID_82574L,		PCI_ANY_ID, PCI_ANY_ID, 0},
204 	{ 0x8086, E1000_DEV_ID_ICH10_R_BM_LM,	PCI_ANY_ID, PCI_ANY_ID, 0},
205 	{ 0x8086, E1000_DEV_ID_ICH10_R_BM_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
206 	{ 0x8086, E1000_DEV_ID_ICH10_R_BM_V,	PCI_ANY_ID, PCI_ANY_ID, 0},
207 	{ 0x8086, E1000_DEV_ID_ICH10_D_BM_LM,	PCI_ANY_ID, PCI_ANY_ID, 0},
208 	{ 0x8086, E1000_DEV_ID_ICH10_D_BM_LF,	PCI_ANY_ID, PCI_ANY_ID, 0},
209 	/* required last entry */
210 	{ 0, 0, 0, 0, 0}
211 };
212 
213 /*********************************************************************
214  *  Table of branding strings for all supported NICs.
215  *********************************************************************/
216 
217 static char *em_strings[] = {
218 	"Intel(R) PRO/1000 Network Connection"
219 };
220 
221 /*********************************************************************
222  *  Function prototypes
223  *********************************************************************/
224 static int	em_probe(device_t);
225 static int	em_attach(device_t);
226 static int	em_detach(device_t);
227 static int	em_shutdown(device_t);
228 static int	em_suspend(device_t);
229 static int	em_resume(device_t);
230 static void	em_start(struct ifnet *);
231 static void	em_start_locked(struct ifnet *ifp);
232 static int	em_ioctl(struct ifnet *, u_long, caddr_t);
233 static void	em_watchdog(struct adapter *);
234 static void	em_init(void *);
235 static void	em_init_locked(struct adapter *);
236 static void	em_stop(void *);
237 static void	em_media_status(struct ifnet *, struct ifmediareq *);
238 static int	em_media_change(struct ifnet *);
239 static void	em_identify_hardware(struct adapter *);
240 static int	em_allocate_pci_resources(struct adapter *);
241 static int	em_allocate_legacy(struct adapter *adapter);
242 static int	em_allocate_msix(struct adapter *adapter);
243 static int	em_setup_msix(struct adapter *);
244 static void	em_free_pci_resources(struct adapter *);
245 static void	em_local_timer(void *);
246 static int	em_hardware_init(struct adapter *);
247 static void	em_setup_interface(device_t, struct adapter *);
248 static void	em_setup_transmit_structures(struct adapter *);
249 static void	em_initialize_transmit_unit(struct adapter *);
250 static int	em_setup_receive_structures(struct adapter *);
251 static void	em_initialize_receive_unit(struct adapter *);
252 static void	em_enable_intr(struct adapter *);
253 static void	em_disable_intr(struct adapter *);
254 static void	em_free_transmit_structures(struct adapter *);
255 static void	em_free_receive_structures(struct adapter *);
256 static void	em_update_stats_counters(struct adapter *);
257 static void	em_txeof(struct adapter *);
258 static void	em_tx_purge(struct adapter *);
259 static int	em_allocate_receive_structures(struct adapter *);
260 static int	em_allocate_transmit_structures(struct adapter *);
261 static int	em_rxeof(struct adapter *, int);
262 #ifndef __NO_STRICT_ALIGNMENT
263 static int	em_fixup_rx(struct adapter *);
264 #endif
265 static void	em_receive_checksum(struct adapter *, struct e1000_rx_desc *,
266 		    struct mbuf *);
267 static void	em_transmit_checksum_setup(struct adapter *, struct mbuf *,
268 		    u32 *, u32 *);
269 #if __FreeBSD_version >= 700000
270 static bool	em_tso_setup(struct adapter *, struct mbuf *,
271 		    u32 *, u32 *);
272 #endif /* FreeBSD_version >= 700000 */
273 static void	em_set_promisc(struct adapter *);
274 static void	em_disable_promisc(struct adapter *);
275 static void	em_set_multi(struct adapter *);
276 static void	em_print_hw_stats(struct adapter *);
277 static void	em_update_link_status(struct adapter *);
278 static int	em_get_buf(struct adapter *, int);
279 static void	em_enable_hw_vlans(struct adapter *);
280 static int	em_xmit(struct adapter *, struct mbuf **);
281 static void	em_smartspeed(struct adapter *);
282 static int	em_82547_fifo_workaround(struct adapter *, int);
283 static void	em_82547_update_fifo_head(struct adapter *, int);
284 static int	em_82547_tx_fifo_reset(struct adapter *);
285 static void	em_82547_move_tail(void *);
286 static int	em_dma_malloc(struct adapter *, bus_size_t,
287 		    struct em_dma_alloc *, int);
288 static void	em_dma_free(struct adapter *, struct em_dma_alloc *);
289 static void	em_print_debug_info(struct adapter *);
290 static void	em_print_nvm_info(struct adapter *);
291 static int 	em_is_valid_ether_addr(u8 *);
292 static int	em_sysctl_stats(SYSCTL_HANDLER_ARGS);
293 static int	em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
294 static u32	em_fill_descriptors (bus_addr_t address, u32 length,
295 		    PDESC_ARRAY desc_array);
296 static int	em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
297 static void	em_add_int_delay_sysctl(struct adapter *, const char *,
298 		    const char *, struct em_int_delay_info *, int, int);
299 /* Management and WOL Support */
300 static void	em_init_manageability(struct adapter *);
301 static void	em_release_manageability(struct adapter *);
302 static void     em_get_hw_control(struct adapter *);
303 static void     em_release_hw_control(struct adapter *);
304 static void     em_enable_wakeup(device_t);
305 
306 #ifdef EM_TIMESYNC
307 /* Precision Time sync support */
308 static int	em_tsync_init(struct adapter *);
309 static void	em_tsync_disable(struct adapter *);
310 #endif
311 
312 #ifdef EM_LEGACY_IRQ
313 static void	em_intr(void *);
314 #else /* FAST IRQ */
315 #if __FreeBSD_version < 700000
316 static void	em_irq_fast(void *);
317 #else
318 static int	em_irq_fast(void *);
319 #endif
320 /* MSIX handlers */
321 static void	em_msix_tx(void *);
322 static void	em_msix_rx(void *);
323 static void	em_msix_link(void *);
324 static void	em_add_rx_process_limit(struct adapter *, const char *,
325 		    const char *, int *, int);
326 static void	em_handle_rxtx(void *context, int pending);
327 static void	em_handle_rx(void *context, int pending);
328 static void	em_handle_tx(void *context, int pending);
329 static void	em_handle_link(void *context, int pending);
330 #endif /* EM_LEGACY_IRQ */
331 
332 #ifdef DEVICE_POLLING
333 static poll_handler_t em_poll;
334 #endif /* POLLING */
335 
336 /*********************************************************************
337  *  FreeBSD Device Interface Entry Points
338  *********************************************************************/
339 
340 static device_method_t em_methods[] = {
341 	/* Device interface */
342 	DEVMETHOD(device_probe, em_probe),
343 	DEVMETHOD(device_attach, em_attach),
344 	DEVMETHOD(device_detach, em_detach),
345 	DEVMETHOD(device_shutdown, em_shutdown),
346 	DEVMETHOD(device_suspend, em_suspend),
347 	DEVMETHOD(device_resume, em_resume),
348 	{0, 0}
349 };
350 
351 static driver_t em_driver = {
352 	"em", em_methods, sizeof(struct adapter),
353 };
354 
355 static devclass_t em_devclass;
356 DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
357 MODULE_DEPEND(em, pci, 1, 1, 1);
358 MODULE_DEPEND(em, ether, 1, 1, 1);
359 
360 /*********************************************************************
361  *  Tunable default values.
362  *********************************************************************/
363 
364 #define EM_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
365 #define EM_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
366 #define M_TSO_LEN			66
367 
368 /* Allow common code without TSO */
369 #ifndef CSUM_TSO
370 #define CSUM_TSO	0
371 #endif
372 
373 static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
374 static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
375 static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
376 static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
377 static int em_rxd = EM_DEFAULT_RXD;
378 static int em_txd = EM_DEFAULT_TXD;
379 static int em_smart_pwr_down = FALSE;
380 /* Controls whether promiscuous also shows bad packets */
381 static int em_debug_sbp = FALSE;
382 /* Local switch for MSI/MSIX */
383 static int em_enable_msi = TRUE;
384 
385 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
386 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
387 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
388 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
389 TUNABLE_INT("hw.em.rxd", &em_rxd);
390 TUNABLE_INT("hw.em.txd", &em_txd);
391 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
392 TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
393 TUNABLE_INT("hw.em.enable_msi", &em_enable_msi);
394 
395 #ifndef EM_LEGACY_IRQ
396 /* How many packets rxeof tries to clean at a time */
397 static int em_rx_process_limit = 100;
398 TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
399 #endif
400 
401 /* Global used in WOL setup with multiport cards */
402 static int global_quad_port_a = 0;
403 
404 /*********************************************************************
405  *  Device identification routine
406  *
407  *  em_probe determines if the driver should be loaded on
408  *  adapter based on PCI vendor/device id of the adapter.
409  *
410  *  return BUS_PROBE_DEFAULT on success, positive on failure
411  *********************************************************************/
412 
413 static int
414 em_probe(device_t dev)
415 {
416 	char		adapter_name[60];
417 	u16		pci_vendor_id = 0;
418 	u16		pci_device_id = 0;
419 	u16		pci_subvendor_id = 0;
420 	u16		pci_subdevice_id = 0;
421 	em_vendor_info_t *ent;
422 
423 	INIT_DEBUGOUT("em_probe: begin");
424 
425 	pci_vendor_id = pci_get_vendor(dev);
426 	if (pci_vendor_id != EM_VENDOR_ID)
427 		return (ENXIO);
428 
429 	pci_device_id = pci_get_device(dev);
430 	pci_subvendor_id = pci_get_subvendor(dev);
431 	pci_subdevice_id = pci_get_subdevice(dev);
432 
433 	ent = em_vendor_info_array;
434 	while (ent->vendor_id != 0) {
435 		if ((pci_vendor_id == ent->vendor_id) &&
436 		    (pci_device_id == ent->device_id) &&
437 
438 		    ((pci_subvendor_id == ent->subvendor_id) ||
439 		    (ent->subvendor_id == PCI_ANY_ID)) &&
440 
441 		    ((pci_subdevice_id == ent->subdevice_id) ||
442 		    (ent->subdevice_id == PCI_ANY_ID))) {
443 			sprintf(adapter_name, "%s %s",
444 				em_strings[ent->index],
445 				em_driver_version);
446 			device_set_desc_copy(dev, adapter_name);
447 			return (BUS_PROBE_DEFAULT);
448 		}
449 		ent++;
450 	}
451 
452 	return (ENXIO);
453 }
454 
455 /*********************************************************************
456  *  Device initialization routine
457  *
458  *  The attach entry point is called when the driver is being loaded.
459  *  This routine identifies the type of hardware, allocates all resources
460  *  and initializes the hardware.
461  *
462  *  return 0 on success, positive on failure
463  *********************************************************************/
464 
465 static int
466 em_attach(device_t dev)
467 {
468 	struct adapter	*adapter;
469 	int		tsize, rsize;
470 	int		error = 0;
471 	u16		eeprom_data, device_id;
472 
473 	INIT_DEBUGOUT("em_attach: begin");
474 
475 	adapter = device_get_softc(dev);
476 	adapter->dev = adapter->osdep.dev = dev;
477 	EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
478 	EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
479 	EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
480 
481 	/* SYSCTL stuff */
482 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
483 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
484 	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
485 	    em_sysctl_debug_info, "I", "Debug Information");
486 
487 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
488 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
489 	    OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
490 	    em_sysctl_stats, "I", "Statistics");
491 
492 	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
493 	callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
494 
495 	/* Determine hardware and mac info */
496 	em_identify_hardware(adapter);
497 
498 	/* Setup PCI resources */
499 	if (em_allocate_pci_resources(adapter)) {
500 		device_printf(dev, "Allocation of PCI resources failed\n");
501 		error = ENXIO;
502 		goto err_pci;
503 	}
504 
505 	/*
506 	** For ICH8 and family we need to
507 	** map the flash memory, and this
508 	** must happen after the MAC is
509 	** identified
510 	*/
511 	if ((adapter->hw.mac.type == e1000_ich8lan) ||
512 	    (adapter->hw.mac.type == e1000_ich10lan) ||
513 	    (adapter->hw.mac.type == e1000_ich9lan)) {
514 		int rid = EM_BAR_TYPE_FLASH;
515 		adapter->flash = bus_alloc_resource_any(dev,
516 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
517 		if (adapter->flash == NULL) {
518 			device_printf(dev, "Mapping of Flash failed\n");
519 			error = ENXIO;
520 			goto err_pci;
521 		}
522 		/* This is used in the shared code */
523 		adapter->hw.flash_address = (u8 *)adapter->flash;
524 		adapter->osdep.flash_bus_space_tag =
525 		    rman_get_bustag(adapter->flash);
526 		adapter->osdep.flash_bus_space_handle =
527 		    rman_get_bushandle(adapter->flash);
528 	}
529 
530 	/* Do Shared Code initialization */
531 	if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
532 		device_printf(dev, "Setup of Shared code failed\n");
533 		error = ENXIO;
534 		goto err_pci;
535 	}
536 
537 	e1000_get_bus_info(&adapter->hw);
538 
539 	/* Set up some sysctls for the tunable interrupt delays */
540 	em_add_int_delay_sysctl(adapter, "rx_int_delay",
541 	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
542 	    E1000_REGISTER(&adapter->hw, E1000_RDTR), em_rx_int_delay_dflt);
543 	em_add_int_delay_sysctl(adapter, "tx_int_delay",
544 	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
545 	    E1000_REGISTER(&adapter->hw, E1000_TIDV), em_tx_int_delay_dflt);
546 	if (adapter->hw.mac.type >= e1000_82540) {
547 		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
548 		    "receive interrupt delay limit in usecs",
549 		    &adapter->rx_abs_int_delay,
550 		    E1000_REGISTER(&adapter->hw, E1000_RADV),
551 		    em_rx_abs_int_delay_dflt);
552 		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
553 		    "transmit interrupt delay limit in usecs",
554 		    &adapter->tx_abs_int_delay,
555 		    E1000_REGISTER(&adapter->hw, E1000_TADV),
556 		    em_tx_abs_int_delay_dflt);
557 	}
558 
559 #ifndef EM_LEGACY_IRQ
560 	/* Sysctls for limiting the amount of work done in the taskqueue */
561 	em_add_rx_process_limit(adapter, "rx_processing_limit",
562 	    "max number of rx packets to process", &adapter->rx_process_limit,
563 	    em_rx_process_limit);
564 #endif
565 
566 	/*
567 	 * Validate number of transmit and receive descriptors. It
568 	 * must not exceed hardware maximum, and must be multiple
569 	 * of E1000_DBA_ALIGN.
570 	 */
571 	if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
572 	    (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
573 	    (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
574 	    (em_txd < EM_MIN_TXD)) {
575 		device_printf(dev, "Using %d TX descriptors instead of %d!\n",
576 		    EM_DEFAULT_TXD, em_txd);
577 		adapter->num_tx_desc = EM_DEFAULT_TXD;
578 	} else
579 		adapter->num_tx_desc = em_txd;
580 	if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
581 	    (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
582 	    (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
583 	    (em_rxd < EM_MIN_RXD)) {
584 		device_printf(dev, "Using %d RX descriptors instead of %d!\n",
585 		    EM_DEFAULT_RXD, em_rxd);
586 		adapter->num_rx_desc = EM_DEFAULT_RXD;
587 	} else
588 		adapter->num_rx_desc = em_rxd;
589 
590 	adapter->hw.mac.autoneg = DO_AUTO_NEG;
591 	adapter->hw.phy.autoneg_wait_to_complete = FALSE;
592 	adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
593 	adapter->rx_buffer_len = 2048;
594 
595 	e1000_init_script_state_82541(&adapter->hw, TRUE);
596 	e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
597 
598 	/* Copper options */
599 	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
600 		adapter->hw.phy.mdix = AUTO_ALL_MODES;
601 		adapter->hw.phy.disable_polarity_correction = FALSE;
602 		adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
603 	}
604 
605 	/*
606 	 * Set the frame limits assuming
607 	 * standard ethernet sized frames.
608 	 */
609 	adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
610 	adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
611 
612 	/*
613 	 * This controls when hardware reports transmit completion
614 	 * status.
615 	 */
616 	adapter->hw.mac.report_tx_early = 1;
617 
618 	tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
619 	    EM_DBA_ALIGN);
620 
621 	/* Allocate Transmit Descriptor ring */
622 	if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
623 		device_printf(dev, "Unable to allocate tx_desc memory\n");
624 		error = ENOMEM;
625 		goto err_tx_desc;
626 	}
627 	adapter->tx_desc_base =
628 	    (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
629 
630 	rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
631 	    EM_DBA_ALIGN);
632 
633 	/* Allocate Receive Descriptor ring */
634 	if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
635 		device_printf(dev, "Unable to allocate rx_desc memory\n");
636 		error = ENOMEM;
637 		goto err_rx_desc;
638 	}
639 	adapter->rx_desc_base =
640 	    (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
641 
642 	/* Make sure we have a good EEPROM before we read from it */
643 	if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
644 		/*
645 		** Some PCI-E parts fail the first check due to
646 		** the link being in sleep state, call it again,
647 		** if it fails a second time its a real issue.
648 		*/
649 		if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
650 			device_printf(dev,
651 			    "The EEPROM Checksum Is Not Valid\n");
652 			error = EIO;
653 			goto err_hw_init;
654 		}
655 	}
656 
657 	/* Initialize the hardware */
658 	if (em_hardware_init(adapter)) {
659 		device_printf(dev, "Unable to initialize the hardware\n");
660 		error = EIO;
661 		goto err_hw_init;
662 	}
663 
664 	/* Copy the permanent MAC address out of the EEPROM */
665 	if (e1000_read_mac_addr(&adapter->hw) < 0) {
666 		device_printf(dev, "EEPROM read error while reading MAC"
667 		    " address\n");
668 		error = EIO;
669 		goto err_hw_init;
670 	}
671 
672 	if (!em_is_valid_ether_addr(adapter->hw.mac.addr)) {
673 		device_printf(dev, "Invalid MAC address\n");
674 		error = EIO;
675 		goto err_hw_init;
676 	}
677 
678 	/* Allocate transmit descriptors and buffers */
679 	if (em_allocate_transmit_structures(adapter)) {
680 		device_printf(dev, "Could not setup transmit structures\n");
681 		error = ENOMEM;
682 		goto err_tx_struct;
683 	}
684 
685 	/* Allocate receive descriptors and buffers */
686 	if (em_allocate_receive_structures(adapter)) {
687 		device_printf(dev, "Could not setup receive structures\n");
688 		error = ENOMEM;
689 		goto err_rx_struct;
690 	}
691 
692 	/*
693 	**  Do interrupt configuration
694 	*/
695 	if (adapter->msi > 1) /* Do MSI/X */
696 		error = em_allocate_msix(adapter);
697 	else  /* MSI or Legacy */
698 		error = em_allocate_legacy(adapter);
699 	if (error)
700 		goto err_rx_struct;
701 
702 	/* Setup OS specific network interface */
703 	em_setup_interface(dev, adapter);
704 
705 	/* Initialize statistics */
706 	em_update_stats_counters(adapter);
707 
708 	adapter->hw.mac.get_link_status = 1;
709 	em_update_link_status(adapter);
710 
711 	/* Indicate SOL/IDER usage */
712 	if (e1000_check_reset_block(&adapter->hw))
713 		device_printf(dev,
714 		    "PHY reset is blocked due to SOL/IDER session.\n");
715 
716 	/* Determine if we have to control management hardware */
717 	adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
718 
719 	/*
720 	 * Setup Wake-on-Lan
721 	 */
722 	switch (adapter->hw.mac.type) {
723 
724 	case e1000_82542:
725 	case e1000_82543:
726 		break;
727 	case e1000_82546:
728 	case e1000_82546_rev_3:
729 	case e1000_82571:
730 	case e1000_80003es2lan:
731 		if (adapter->hw.bus.func == 1)
732 			e1000_read_nvm(&adapter->hw,
733 			    NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
734 		else
735 			e1000_read_nvm(&adapter->hw,
736 			    NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
737 		eeprom_data &= EM_EEPROM_APME;
738 		break;
739 	default:
740 		/* APME bit in EEPROM is mapped to WUC.APME */
741 		eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) &
742 		    E1000_WUC_APME;
743 		break;
744 	}
745 	if (eeprom_data)
746 		adapter->wol = E1000_WUFC_MAG;
747 	/*
748          * We have the eeprom settings, now apply the special cases
749          * where the eeprom may be wrong or the board won't support
750          * wake on lan on a particular port
751 	 */
752 	device_id = pci_get_device(dev);
753         switch (device_id) {
754 	case E1000_DEV_ID_82546GB_PCIE:
755 		adapter->wol = 0;
756 		break;
757 	case E1000_DEV_ID_82546EB_FIBER:
758 	case E1000_DEV_ID_82546GB_FIBER:
759 	case E1000_DEV_ID_82571EB_FIBER:
760 		/* Wake events only supported on port A for dual fiber
761 		 * regardless of eeprom setting */
762 		if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
763 		    E1000_STATUS_FUNC_1)
764 			adapter->wol = 0;
765 		break;
766 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
767 	case E1000_DEV_ID_82571EB_QUAD_COPPER:
768 	case E1000_DEV_ID_82571EB_QUAD_FIBER:
769 	case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
770                 /* if quad port adapter, disable WoL on all but port A */
771 		if (global_quad_port_a != 0)
772 			adapter->wol = 0;
773 		/* Reset for multiple quad port adapters */
774 		if (++global_quad_port_a == 4)
775 			global_quad_port_a = 0;
776                 break;
777 	}
778 
779 	/* Do we need workaround for 82544 PCI-X adapter? */
780 	if (adapter->hw.bus.type == e1000_bus_type_pcix &&
781 	    adapter->hw.mac.type == e1000_82544)
782 		adapter->pcix_82544 = TRUE;
783 	else
784 		adapter->pcix_82544 = FALSE;
785 
786 	/* Tell the stack that the interface is not active */
787 	adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
788 
789 	INIT_DEBUGOUT("em_attach: end");
790 
791 	return (0);
792 
793 err_rx_struct:
794 	em_free_transmit_structures(adapter);
795 err_tx_struct:
796 err_hw_init:
797 	em_release_hw_control(adapter);
798 	e1000_remove_device(&adapter->hw);
799 	em_dma_free(adapter, &adapter->rxdma);
800 err_rx_desc:
801 	em_dma_free(adapter, &adapter->txdma);
802 err_tx_desc:
803 err_pci:
804 	em_free_pci_resources(adapter);
805 	EM_TX_LOCK_DESTROY(adapter);
806 	EM_RX_LOCK_DESTROY(adapter);
807 	EM_CORE_LOCK_DESTROY(adapter);
808 
809 	return (error);
810 }
811 
812 /*********************************************************************
813  *  Device removal routine
814  *
815  *  The detach entry point is called when the driver is being removed.
816  *  This routine stops the adapter and deallocates all the resources
817  *  that were allocated for driver operation.
818  *
819  *  return 0 on success, positive on failure
820  *********************************************************************/
821 
822 static int
823 em_detach(device_t dev)
824 {
825 	struct adapter	*adapter = device_get_softc(dev);
826 	struct ifnet	*ifp = adapter->ifp;
827 
828 	INIT_DEBUGOUT("em_detach: begin");
829 
830 	/* Make sure VLANS are not using driver */
831 #if __FreeBSD_version >= 700000
832 	if (adapter->ifp->if_vlantrunk != NULL) {
833 #else
834 	if (adapter->ifp->if_nvlans != 0) {
835 #endif
836 		device_printf(dev,"Vlan in use, detach first\n");
837 		return (EBUSY);
838 	}
839 
840 #ifdef DEVICE_POLLING
841 	if (ifp->if_capenable & IFCAP_POLLING)
842 		ether_poll_deregister(ifp);
843 #endif
844 
845 	EM_CORE_LOCK(adapter);
846 	EM_TX_LOCK(adapter);
847 	adapter->in_detach = 1;
848 	em_stop(adapter);
849 	e1000_phy_hw_reset(&adapter->hw);
850 
851 	em_release_manageability(adapter);
852 
853 	if (((adapter->hw.mac.type == e1000_82573) ||
854 	    (adapter->hw.mac.type == e1000_ich8lan) ||
855 	    (adapter->hw.mac.type == e1000_ich10lan) ||
856 	    (adapter->hw.mac.type == e1000_ich9lan)) &&
857 	    e1000_check_mng_mode(&adapter->hw))
858 		em_release_hw_control(adapter);
859 
860 	if (adapter->wol) {
861 		E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
862 		E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
863 		em_enable_wakeup(dev);
864 	}
865 
866 	EM_TX_UNLOCK(adapter);
867 	EM_CORE_UNLOCK(adapter);
868 
869 	ether_ifdetach(adapter->ifp);
870 	callout_drain(&adapter->timer);
871 	callout_drain(&adapter->tx_fifo_timer);
872 
873 	em_free_pci_resources(adapter);
874 	bus_generic_detach(dev);
875 	if_free(ifp);
876 
877 	e1000_remove_device(&adapter->hw);
878 	em_free_transmit_structures(adapter);
879 	em_free_receive_structures(adapter);
880 
881 	/* Free Transmit Descriptor ring */
882 	if (adapter->tx_desc_base) {
883 		em_dma_free(adapter, &adapter->txdma);
884 		adapter->tx_desc_base = NULL;
885 	}
886 
887 	/* Free Receive Descriptor ring */
888 	if (adapter->rx_desc_base) {
889 		em_dma_free(adapter, &adapter->rxdma);
890 		adapter->rx_desc_base = NULL;
891 	}
892 
893 	EM_TX_LOCK_DESTROY(adapter);
894 	EM_RX_LOCK_DESTROY(adapter);
895 	EM_CORE_LOCK_DESTROY(adapter);
896 
897 	return (0);
898 }
899 
900 /*********************************************************************
901  *
902  *  Shutdown entry point
903  *
904  **********************************************************************/
905 
906 static int
907 em_shutdown(device_t dev)
908 {
909 	return em_suspend(dev);
910 }
911 
912 /*
913  * Suspend/resume device methods.
914  */
915 static int
916 em_suspend(device_t dev)
917 {
918 	struct adapter *adapter = device_get_softc(dev);
919 
920 	EM_CORE_LOCK(adapter);
921 
922 	EM_TX_LOCK(adapter);
923 	em_stop(adapter);
924 	EM_TX_UNLOCK(adapter);
925 
926         em_release_manageability(adapter);
927 
928         if (((adapter->hw.mac.type == e1000_82573) ||
929             (adapter->hw.mac.type == e1000_ich8lan) ||
930             (adapter->hw.mac.type == e1000_ich10lan) ||
931             (adapter->hw.mac.type == e1000_ich9lan)) &&
932             e1000_check_mng_mode(&adapter->hw))
933                 em_release_hw_control(adapter);
934 
935         if (adapter->wol) {
936                 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
937                 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
938                 em_enable_wakeup(dev);
939         }
940 
941 	EM_CORE_UNLOCK(adapter);
942 
943 	return bus_generic_suspend(dev);
944 }
945 
946 static int
947 em_resume(device_t dev)
948 {
949 	struct adapter *adapter = device_get_softc(dev);
950 	struct ifnet *ifp = adapter->ifp;
951 
952 	EM_CORE_LOCK(adapter);
953 	em_init_locked(adapter);
954 	em_init_manageability(adapter);
955 	EM_CORE_UNLOCK(adapter);
956 	em_start(ifp);
957 
958 	return bus_generic_resume(dev);
959 }
960 
961 
962 /*********************************************************************
963  *  Transmit entry point
964  *
965  *  em_start is called by the stack to initiate a transmit.
966  *  The driver will remain in this routine as long as there are
967  *  packets to transmit and transmit resources are available.
968  *  In case resources are not available stack is notified and
969  *  the packet is requeued.
970  **********************************************************************/
971 
972 static void
973 em_start_locked(struct ifnet *ifp)
974 {
975 	struct adapter	*adapter = ifp->if_softc;
976 	struct mbuf	*m_head;
977 
978 	EM_TX_LOCK_ASSERT(adapter);
979 
980 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
981 	    IFF_DRV_RUNNING)
982 		return;
983 	if (!adapter->link_active)
984 		return;
985 
986 	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
987 
988 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
989 		if (m_head == NULL)
990 			break;
991 		/*
992 		 *  Encapsulation can modify our pointer, and or make it
993 		 *  NULL on failure.  In that event, we can't requeue.
994 		 */
995 		if (em_xmit(adapter, &m_head)) {
996 			if (m_head == NULL)
997 				break;
998 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
999 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1000 			break;
1001 		}
1002 
1003 		/* Send a copy of the frame to the BPF listener */
1004 		ETHER_BPF_MTAP(ifp, m_head);
1005 
1006 		/* Set timeout in case hardware has problems transmitting. */
1007 		adapter->watchdog_timer = EM_TX_TIMEOUT;
1008 	}
1009 }
1010 
1011 static void
1012 em_start(struct ifnet *ifp)
1013 {
1014 	struct adapter *adapter = ifp->if_softc;
1015 
1016 	EM_TX_LOCK(adapter);
1017 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1018 		em_start_locked(ifp);
1019 	EM_TX_UNLOCK(adapter);
1020 }
1021 
1022 /*********************************************************************
1023  *  Ioctl entry point
1024  *
1025  *  em_ioctl is called when the user wants to configure the
1026  *  interface.
1027  *
1028  *  return 0 on success, positive on failure
1029  **********************************************************************/
1030 
1031 static int
1032 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1033 {
1034 	struct adapter	*adapter = ifp->if_softc;
1035 	struct ifreq *ifr = (struct ifreq *)data;
1036 	struct ifaddr *ifa = (struct ifaddr *)data;
1037 	int error = 0;
1038 
1039 	if (adapter->in_detach)
1040 		return (error);
1041 
1042 	switch (command) {
1043 	case SIOCSIFADDR:
1044 		if (ifa->ifa_addr->sa_family == AF_INET) {
1045 			/*
1046 			 * XXX
1047 			 * Since resetting hardware takes a very long time
1048 			 * and results in link renegotiation we only
1049 			 * initialize the hardware only when it is absolutely
1050 			 * required.
1051 			 */
1052 			ifp->if_flags |= IFF_UP;
1053 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1054 				EM_CORE_LOCK(adapter);
1055 				em_init_locked(adapter);
1056 				EM_CORE_UNLOCK(adapter);
1057 			}
1058 			arp_ifinit(ifp, ifa);
1059 		} else
1060 			error = ether_ioctl(ifp, command, data);
1061 		break;
1062 	case SIOCSIFMTU:
1063 	    {
1064 		int max_frame_size;
1065 		u16 eeprom_data = 0;
1066 
1067 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1068 
1069 		EM_CORE_LOCK(adapter);
1070 		switch (adapter->hw.mac.type) {
1071 		case e1000_82573:
1072 			/*
1073 			 * 82573 only supports jumbo frames
1074 			 * if ASPM is disabled.
1075 			 */
1076 			e1000_read_nvm(&adapter->hw,
1077 			    NVM_INIT_3GIO_3, 1, &eeprom_data);
1078 			if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1079 				max_frame_size = ETHER_MAX_LEN;
1080 				break;
1081 			}
1082 			/* Allow Jumbo frames - fall thru */
1083 		case e1000_82571:
1084 		case e1000_82572:
1085 		case e1000_ich9lan:
1086 		case e1000_ich10lan:
1087 		case e1000_82574:
1088 		case e1000_80003es2lan:	/* Limit Jumbo Frame size */
1089 			max_frame_size = 9234;
1090 			break;
1091 			/* Adapters that do not support jumbo frames */
1092 		case e1000_82542:
1093 		case e1000_ich8lan:
1094 			max_frame_size = ETHER_MAX_LEN;
1095 			break;
1096 		default:
1097 			max_frame_size = MAX_JUMBO_FRAME_SIZE;
1098 		}
1099 		if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1100 		    ETHER_CRC_LEN) {
1101 			EM_CORE_UNLOCK(adapter);
1102 			error = EINVAL;
1103 			break;
1104 		}
1105 
1106 		ifp->if_mtu = ifr->ifr_mtu;
1107 		adapter->max_frame_size =
1108 		    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1109 		em_init_locked(adapter);
1110 		EM_CORE_UNLOCK(adapter);
1111 		break;
1112 	    }
1113 	case SIOCSIFFLAGS:
1114 		IOCTL_DEBUGOUT("ioctl rcv'd:\
1115 		    SIOCSIFFLAGS (Set Interface Flags)");
1116 		EM_CORE_LOCK(adapter);
1117 		if (ifp->if_flags & IFF_UP) {
1118 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1119 				if ((ifp->if_flags ^ adapter->if_flags) &
1120 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1121 					em_disable_promisc(adapter);
1122 					em_set_promisc(adapter);
1123 				}
1124 			} else
1125 				em_init_locked(adapter);
1126 		} else
1127 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1128 				EM_TX_LOCK(adapter);
1129 				em_stop(adapter);
1130 				EM_TX_UNLOCK(adapter);
1131 			}
1132 		adapter->if_flags = ifp->if_flags;
1133 		EM_CORE_UNLOCK(adapter);
1134 		break;
1135 	case SIOCADDMULTI:
1136 	case SIOCDELMULTI:
1137 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1138 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1139 			EM_CORE_LOCK(adapter);
1140 			em_disable_intr(adapter);
1141 			em_set_multi(adapter);
1142 			if (adapter->hw.mac.type == e1000_82542 &&
1143 	    		    adapter->hw.revision_id == E1000_REVISION_2) {
1144 				em_initialize_receive_unit(adapter);
1145 			}
1146 #ifdef DEVICE_POLLING
1147 			if (!(ifp->if_capenable & IFCAP_POLLING))
1148 #endif
1149 				em_enable_intr(adapter);
1150 			EM_CORE_UNLOCK(adapter);
1151 		}
1152 		break;
1153 	case SIOCSIFMEDIA:
1154 		/* Check SOL/IDER usage */
1155 		EM_CORE_LOCK(adapter);
1156 		if (e1000_check_reset_block(&adapter->hw)) {
1157 			EM_CORE_UNLOCK(adapter);
1158 			device_printf(adapter->dev, "Media change is"
1159 			    " blocked due to SOL/IDER session.\n");
1160 			break;
1161 		}
1162 		EM_CORE_UNLOCK(adapter);
1163 	case SIOCGIFMEDIA:
1164 		IOCTL_DEBUGOUT("ioctl rcv'd: \
1165 		    SIOCxIFMEDIA (Get/Set Interface Media)");
1166 		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1167 		break;
1168 	case SIOCSIFCAP:
1169 	    {
1170 		int mask, reinit;
1171 
1172 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1173 		reinit = 0;
1174 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1175 #ifdef DEVICE_POLLING
1176 		if (mask & IFCAP_POLLING) {
1177 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1178 				error = ether_poll_register(em_poll, ifp);
1179 				if (error)
1180 					return (error);
1181 				EM_CORE_LOCK(adapter);
1182 				em_disable_intr(adapter);
1183 				ifp->if_capenable |= IFCAP_POLLING;
1184 				EM_CORE_UNLOCK(adapter);
1185 			} else {
1186 				error = ether_poll_deregister(ifp);
1187 				/* Enable interrupt even in error case */
1188 				EM_CORE_LOCK(adapter);
1189 				em_enable_intr(adapter);
1190 				ifp->if_capenable &= ~IFCAP_POLLING;
1191 				EM_CORE_UNLOCK(adapter);
1192 			}
1193 		}
1194 #endif
1195 		if (mask & IFCAP_HWCSUM) {
1196 			ifp->if_capenable ^= IFCAP_HWCSUM;
1197 			reinit = 1;
1198 		}
1199 #if __FreeBSD_version >= 700000
1200 		if (mask & IFCAP_TSO4) {
1201 			ifp->if_capenable ^= IFCAP_TSO4;
1202 			reinit = 1;
1203 		}
1204 #endif
1205 
1206 		if (mask & IFCAP_VLAN_HWTAGGING) {
1207 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1208 			reinit = 1;
1209 		}
1210 		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1211 			em_init(adapter);
1212 #if __FreeBSD_version >= 700000
1213 		VLAN_CAPABILITIES(ifp);
1214 #endif
1215 		break;
1216 	    }
1217 
1218 #ifdef EM_TIMESYNC
1219 	/*
1220 	** IOCTL support for Precision Time (IEEE 1588) Support
1221 	*/
1222 	case EM_TIMESYNC_READTS:
1223 	    {
1224 		u32 rx_ctl, tx_ctl;
1225 		struct em_tsync_read *tdata;
1226 
1227 		tdata = (struct em_tsync_read *) ifr->ifr_data;
1228 
1229 		IOCTL_DEBUGOUT("Reading Timestamp\n");
1230 
1231 		if (tdata->read_current_time) {
1232 			getnanotime(&tdata->system_time);
1233 			tdata->network_time = E1000_READ_REG(&adapter->hw, E1000_SYSTIML);
1234 			tdata->network_time |=
1235 			    (u64)E1000_READ_REG(&adapter->hw, E1000_SYSTIMH ) << 32;
1236 		}
1237 
1238 		rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
1239 		tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
1240 
1241 		IOCTL_DEBUGOUT1("RX_CTL value = %u\n", rx_ctl);
1242 		IOCTL_DEBUGOUT1("TX_CTL value = %u\n", tx_ctl);
1243 
1244 		if (rx_ctl & 0x1) {
1245 			IOCTL_DEBUGOUT("RX timestamp is valid\n");
1246 			u32 tmp;
1247 			unsigned char *tmp_cp;
1248 
1249 			tdata->rx_valid = 1;
1250 			tdata->rx_stamp = E1000_READ_REG(&adapter->hw, E1000_RXSTMPL);
1251 			tdata->rx_stamp |= (u64)E1000_READ_REG(&adapter->hw,
1252 			    E1000_RXSTMPH) << 32;
1253 
1254 			tmp = E1000_READ_REG(&adapter->hw, E1000_RXSATRL);
1255 			tmp_cp = (unsigned char *) &tmp;
1256 			tdata->srcid[0] = tmp_cp[0];
1257 			tdata->srcid[1] = tmp_cp[1];
1258 			tdata->srcid[2] = tmp_cp[2];
1259 			tdata->srcid[3] = tmp_cp[3];
1260 			tmp = E1000_READ_REG(&adapter->hw, E1000_RXSATRH);
1261 			tmp_cp = (unsigned char *) &tmp;
1262 			tdata->srcid[4] = tmp_cp[0];
1263 			tdata->srcid[5] = tmp_cp[1];
1264 			tdata->seqid = tmp >> 16;
1265 			tdata->seqid = htons(tdata->seqid);
1266 		} else
1267 			tdata->rx_valid = 0;
1268 
1269 		if (tx_ctl & 0x1) {
1270 			IOCTL_DEBUGOUT("TX timestamp is valid\n");
1271 			tdata->tx_valid = 1;
1272 			tdata->tx_stamp = E1000_READ_REG(&adapter->hw, E1000_TXSTMPL);
1273 			tdata->tx_stamp |= (u64) E1000_READ_REG(&adapter->hw,
1274 			    E1000_TXSTMPH) << 32;
1275 		} else
1276 			tdata->tx_valid = 0;
1277 
1278 		return (0);
1279 	    }
1280 #endif	/* EM_TIMESYNC */
1281 
1282 	default:
1283 		error = ether_ioctl(ifp, command, data);
1284 		break;
1285 	}
1286 
1287 	return (error);
1288 }
1289 
1290 /*********************************************************************
1291  *  Watchdog timer:
1292  *
1293  *  This routine is called from the local timer every second.
1294  *  As long as transmit descriptors are being cleaned the value
1295  *  is non-zero and we do nothing. Reaching 0 indicates a tx hang
1296  *  and we then reset the device.
1297  *
1298  **********************************************************************/
1299 
1300 static void
1301 em_watchdog(struct adapter *adapter)
1302 {
1303 
1304 	EM_CORE_LOCK_ASSERT(adapter);
1305 
1306 	/*
1307 	** The timer is set to 5 every time start queues a packet.
1308 	** Then txeof keeps resetting it as long as it cleans at
1309 	** least one descriptor.
1310 	** Finally, anytime all descriptors are clean the timer is
1311 	** set to 0.
1312 	*/
1313 	EM_TX_LOCK(adapter);
1314 	if ((adapter->watchdog_timer == 0) || (--adapter->watchdog_timer)) {
1315 		EM_TX_UNLOCK(adapter);
1316 		return;
1317 	}
1318 
1319 	/* If we are in this routine because of pause frames, then
1320 	 * don't reset the hardware.
1321 	 */
1322 	if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
1323 	    E1000_STATUS_TXOFF) {
1324 		adapter->watchdog_timer = EM_TX_TIMEOUT;
1325 		EM_TX_UNLOCK(adapter);
1326 		return;
1327 	}
1328 
1329 	if (e1000_check_for_link(&adapter->hw) == 0)
1330 		device_printf(adapter->dev, "watchdog timeout -- resetting\n");
1331 	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1332 	adapter->watchdog_events++;
1333 	EM_TX_UNLOCK(adapter);
1334 
1335 	em_init_locked(adapter);
1336 }
1337 
1338 /*********************************************************************
1339  *  Init entry point
1340  *
1341  *  This routine is used in two ways. It is used by the stack as
1342  *  init entry point in network interface structure. It is also used
1343  *  by the driver as a hw/sw initialization routine to get to a
1344  *  consistent state.
1345  *
1346  *  return 0 on success, positive on failure
1347  **********************************************************************/
1348 
1349 static void
1350 em_init_locked(struct adapter *adapter)
1351 {
1352 	struct ifnet	*ifp = adapter->ifp;
1353 	device_t	dev = adapter->dev;
1354 	u32		pba;
1355 
1356 	INIT_DEBUGOUT("em_init: begin");
1357 
1358 	EM_CORE_LOCK_ASSERT(adapter);
1359 
1360 	EM_TX_LOCK(adapter);
1361 	em_stop(adapter);
1362 	EM_TX_UNLOCK(adapter);
1363 
1364 	/*
1365 	 * Packet Buffer Allocation (PBA)
1366 	 * Writing PBA sets the receive portion of the buffer
1367 	 * the remainder is used for the transmit buffer.
1368 	 *
1369 	 * Devices before the 82547 had a Packet Buffer of 64K.
1370 	 *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1371 	 * After the 82547 the buffer was reduced to 40K.
1372 	 *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1373 	 *   Note: default does not leave enough room for Jumbo Frame >10k.
1374 	 */
1375 	switch (adapter->hw.mac.type) {
1376 	case e1000_82547:
1377 	case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1378 		if (adapter->max_frame_size > 8192)
1379 			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1380 		else
1381 			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1382 		adapter->tx_fifo_head = 0;
1383 		adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1384 		adapter->tx_fifo_size =
1385 		    (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1386 		break;
1387 	/* Total Packet Buffer on these is 48K */
1388 	case e1000_82571:
1389 	case e1000_82572:
1390 	case e1000_80003es2lan:
1391 			pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1392 		break;
1393 	case e1000_82573: /* 82573: Total Packet Buffer is 32K */
1394 			pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1395 		break;
1396 	case e1000_82574:
1397 			pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
1398 		break;
1399 	case e1000_ich9lan:
1400 	case e1000_ich10lan:
1401 #define E1000_PBA_10K	0x000A
1402 		pba = E1000_PBA_10K;
1403 		break;
1404 	case e1000_ich8lan:
1405 		pba = E1000_PBA_8K;
1406 		break;
1407 	default:
1408 		/* Devices before 82547 had a Packet Buffer of 64K.   */
1409 		if (adapter->max_frame_size > 8192)
1410 			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1411 		else
1412 			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1413 	}
1414 
1415 	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
1416 	E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1417 
1418 	/* Get the latest mac address, User can use a LAA */
1419         bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1420               ETHER_ADDR_LEN);
1421 
1422 	/* Put the address into the Receive Address Array */
1423 	e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1424 
1425 	/*
1426 	 * With the 82571 adapter, RAR[0] may be overwritten
1427 	 * when the other port is reset, we make a duplicate
1428 	 * in RAR[14] for that eventuality, this assures
1429 	 * the interface continues to function.
1430 	 */
1431 	if (adapter->hw.mac.type == e1000_82571) {
1432 		e1000_set_laa_state_82571(&adapter->hw, TRUE);
1433 		e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1434 		    E1000_RAR_ENTRIES - 1);
1435 	}
1436 
1437 	/* Initialize the hardware */
1438 	if (em_hardware_init(adapter)) {
1439 		device_printf(dev, "Unable to initialize the hardware\n");
1440 		return;
1441 	}
1442 	em_update_link_status(adapter);
1443 
1444 	/* Setup VLAN support, basic and offload if available */
1445 	E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1446 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1447 		em_enable_hw_vlans(adapter);
1448 
1449 	/* Set hardware offload abilities */
1450 	ifp->if_hwassist = 0;
1451 	if (adapter->hw.mac.type >= e1000_82543) {
1452 		if (ifp->if_capenable & IFCAP_TXCSUM)
1453 			ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1454 #if __FreeBSD_version >= 700000
1455 		if (ifp->if_capenable & IFCAP_TSO4)
1456 			ifp->if_hwassist |= CSUM_TSO;
1457 #endif
1458 	}
1459 
1460 	/* Configure for OS presence */
1461 	em_init_manageability(adapter);
1462 
1463 	/* Prepare transmit descriptors and buffers */
1464 	em_setup_transmit_structures(adapter);
1465 	em_initialize_transmit_unit(adapter);
1466 
1467 	/* Setup Multicast table */
1468 	em_set_multi(adapter);
1469 
1470 	/* Prepare receive descriptors and buffers */
1471 	if (em_setup_receive_structures(adapter)) {
1472 		device_printf(dev, "Could not setup receive structures\n");
1473 		EM_TX_LOCK(adapter);
1474 		em_stop(adapter);
1475 		EM_TX_UNLOCK(adapter);
1476 		return;
1477 	}
1478 	em_initialize_receive_unit(adapter);
1479 
1480 	/* Don't lose promiscuous settings */
1481 	em_set_promisc(adapter);
1482 
1483 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1484 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1485 
1486 	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1487 	e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1488 
1489 	/* MSI/X configuration for 82574 */
1490 	if (adapter->hw.mac.type == e1000_82574) {
1491 		int tmp;
1492 		tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1493 		tmp |= E1000_CTRL_EXT_PBA_CLR;
1494 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1495 		/*
1496 		** Set the IVAR - interrupt vector routing.
1497 		** Each nibble represents a vector, high bit
1498 		** is enable, other 3 bits are the MSIX table
1499 		** entry, we map RXQ0 to 0, TXQ0 to 1, and
1500 		** Link (other) to 2, hence the magic number.
1501 		*/
1502 		E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1503 	}
1504 
1505 #ifdef DEVICE_POLLING
1506 	/*
1507 	 * Only enable interrupts if we are not polling, make sure
1508 	 * they are off otherwise.
1509 	 */
1510 	if (ifp->if_capenable & IFCAP_POLLING)
1511 		em_disable_intr(adapter);
1512 	else
1513 #endif /* DEVICE_POLLING */
1514 		em_enable_intr(adapter);
1515 
1516 #ifdef EM_TIMESYNC
1517 	/* Initializae IEEE 1588 Precision Time hardware */
1518 	if ((adapter->hw.mac.type == e1000_82574) ||
1519 	    (adapter->hw.mac.type == e1000_ich10lan))
1520 		em_tsync_init(adapter);
1521 #endif
1522 
1523 	/* Don't reset the phy next time init gets called */
1524 	adapter->hw.phy.reset_disable = TRUE;
1525 }
1526 
1527 static void
1528 em_init(void *arg)
1529 {
1530 	struct adapter *adapter = arg;
1531 
1532 	EM_CORE_LOCK(adapter);
1533 	em_init_locked(adapter);
1534 	EM_CORE_UNLOCK(adapter);
1535 }
1536 
1537 
1538 #ifdef DEVICE_POLLING
1539 /*********************************************************************
1540  *
1541  *  Legacy polling routine
1542  *
1543  *********************************************************************/
1544 static void
1545 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1546 {
1547 	struct adapter *adapter = ifp->if_softc;
1548 	u32		reg_icr;
1549 
1550 	EM_CORE_LOCK(adapter);
1551 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1552 		EM_CORE_UNLOCK(adapter);
1553 		return;
1554 	}
1555 
1556 	if (cmd == POLL_AND_CHECK_STATUS) {
1557 		reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1558 		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1559 			callout_stop(&adapter->timer);
1560 			adapter->hw.mac.get_link_status = 1;
1561 			em_update_link_status(adapter);
1562 			callout_reset(&adapter->timer, hz,
1563 			    em_local_timer, adapter);
1564 		}
1565 	}
1566 	EM_CORE_UNLOCK(adapter);
1567 
1568 	em_rxeof(adapter, count);
1569 
1570 	EM_TX_LOCK(adapter);
1571 	em_txeof(adapter);
1572 
1573 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1574 		em_start_locked(ifp);
1575 	EM_TX_UNLOCK(adapter);
1576 }
1577 #endif /* DEVICE_POLLING */
1578 
1579 #ifdef EM_LEGACY_IRQ
1580 /*********************************************************************
1581  *
1582  *  Legacy Interrupt Service routine
1583  *
1584  *********************************************************************/
1585 
1586 static void
1587 em_intr(void *arg)
1588 {
1589 	struct adapter	*adapter = arg;
1590 	struct ifnet	*ifp = adapter->ifp;
1591 	u32		reg_icr;
1592 
1593 
1594 	if (ifp->if_capenable & IFCAP_POLLING)
1595 		return;
1596 
1597 	EM_CORE_LOCK(adapter);
1598 	for (;;) {
1599 		reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1600 
1601 		if (adapter->hw.mac.type >= e1000_82571 &&
1602 	    	    (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1603 			break;
1604 		else if (reg_icr == 0)
1605 			break;
1606 
1607 		/*
1608 		 * XXX: some laptops trigger several spurious interrupts
1609 		 * on em(4) when in the resume cycle. The ICR register
1610 		 * reports all-ones value in this case. Processing such
1611 		 * interrupts would lead to a freeze. I don't know why.
1612 		 */
1613 		if (reg_icr == 0xffffffff)
1614 			break;
1615 
1616 		EM_CORE_UNLOCK(adapter);
1617 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1618 			em_rxeof(adapter, -1);
1619 			EM_TX_LOCK(adapter);
1620 			em_txeof(adapter);
1621 			EM_TX_UNLOCK(adapter);
1622 		}
1623 		EM_CORE_LOCK(adapter);
1624 
1625 		/* Link status change */
1626 		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1627 			callout_stop(&adapter->timer);
1628 			adapter->hw.mac.get_link_status = 1;
1629 			em_update_link_status(adapter);
1630 			/* Deal with TX cruft when link lost */
1631 			em_tx_purge(adapter);
1632 			callout_reset(&adapter->timer, hz,
1633 			    em_local_timer, adapter);
1634 		}
1635 
1636 		if (reg_icr & E1000_ICR_RXO)
1637 			adapter->rx_overruns++;
1638 	}
1639 	EM_CORE_UNLOCK(adapter);
1640 
1641 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1642 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1643 		em_start(ifp);
1644 }
1645 
1646 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1647 
1648 static void
1649 em_handle_link(void *context, int pending)
1650 {
1651 	struct adapter	*adapter = context;
1652 	struct ifnet *ifp = adapter->ifp;
1653 
1654 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1655 		return;
1656 
1657 	EM_CORE_LOCK(adapter);
1658 	callout_stop(&adapter->timer);
1659 	em_update_link_status(adapter);
1660 	/* Deal with TX cruft when link lost */
1661 	em_tx_purge(adapter);
1662 	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1663 	EM_CORE_UNLOCK(adapter);
1664 }
1665 
1666 
1667 /* Combined RX/TX handler, used by Legacy and MSI */
1668 static void
1669 em_handle_rxtx(void *context, int pending)
1670 {
1671 	struct adapter	*adapter = context;
1672 	struct ifnet	*ifp = adapter->ifp;
1673 
1674 
1675 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1676 		if (em_rxeof(adapter, adapter->rx_process_limit) != 0)
1677 			taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1678 		EM_TX_LOCK(adapter);
1679 		em_txeof(adapter);
1680 
1681 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1682 			em_start_locked(ifp);
1683 		EM_TX_UNLOCK(adapter);
1684 	}
1685 
1686 	em_enable_intr(adapter);
1687 }
1688 
1689 static void
1690 em_handle_rx(void *context, int pending)
1691 {
1692 	struct adapter	*adapter = context;
1693 	struct ifnet	*ifp = adapter->ifp;
1694 
1695 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1696 	    (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1697 		taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1698 
1699 }
1700 
1701 static void
1702 em_handle_tx(void *context, int pending)
1703 {
1704 	struct adapter	*adapter = context;
1705 	struct ifnet	*ifp = adapter->ifp;
1706 
1707 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1708 		EM_TX_LOCK(adapter);
1709 		em_txeof(adapter);
1710 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1711 			em_start_locked(ifp);
1712 		EM_TX_UNLOCK(adapter);
1713 	}
1714 }
1715 
1716 /*********************************************************************
1717  *
1718  *  Fast Legacy/MSI Combined Interrupt Service routine
1719  *
1720  *********************************************************************/
1721 #if __FreeBSD_version < 700000
1722 #define FILTER_STRAY
1723 #define FILTER_HANDLED
1724 static void
1725 #else
1726 static int
1727 #endif
1728 em_irq_fast(void *arg)
1729 {
1730 	struct adapter	*adapter = arg;
1731 	struct ifnet	*ifp;
1732 	u32		reg_icr;
1733 
1734 	ifp = adapter->ifp;
1735 
1736 	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1737 
1738 	/* Hot eject?  */
1739 	if (reg_icr == 0xffffffff)
1740 		return FILTER_STRAY;
1741 
1742 	/* Definitely not our interrupt.  */
1743 	if (reg_icr == 0x0)
1744 		return FILTER_STRAY;
1745 
1746 	/*
1747 	 * Starting with the 82571 chip, bit 31 should be used to
1748 	 * determine whether the interrupt belongs to us.
1749 	 */
1750 	if (adapter->hw.mac.type >= e1000_82571 &&
1751 	    (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1752 		return FILTER_STRAY;
1753 
1754 	/*
1755 	 * Mask interrupts until the taskqueue is finished running.  This is
1756 	 * cheap, just assume that it is needed.  This also works around the
1757 	 * MSI message reordering errata on certain systems.
1758 	 */
1759 	em_disable_intr(adapter);
1760 	taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1761 
1762 	/* Link status change */
1763 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1764 		adapter->hw.mac.get_link_status = 1;
1765 		taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1766 	}
1767 
1768 	if (reg_icr & E1000_ICR_RXO)
1769 		adapter->rx_overruns++;
1770 	return FILTER_HANDLED;
1771 }
1772 
1773 /*********************************************************************
1774  *
1775  *  MSIX Interrupt Service Routines
1776  *
1777  **********************************************************************/
1778 #define EM_MSIX_TX	0x00040000
1779 #define EM_MSIX_RX	0x00010000
1780 #define EM_MSIX_LINK	0x00100000
1781 
1782 static void
1783 em_msix_tx(void *arg)
1784 {
1785 	struct adapter *adapter = arg;
1786 	struct ifnet	*ifp = adapter->ifp;
1787 
1788 	++adapter->tx_irq;
1789 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1790 		EM_TX_LOCK(adapter);
1791 		em_txeof(adapter);
1792 		EM_TX_UNLOCK(adapter);
1793 		taskqueue_enqueue(adapter->tq, &adapter->tx_task);
1794 	}
1795 	/* Reenable this interrupt */
1796 	E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_TX);
1797 	return;
1798 }
1799 
1800 /*********************************************************************
1801  *
1802  *  MSIX RX Interrupt Service routine
1803  *
1804  **********************************************************************/
1805 
1806 static void
1807 em_msix_rx(void *arg)
1808 {
1809 	struct adapter *adapter = arg;
1810 	struct ifnet	*ifp = adapter->ifp;
1811 
1812 	++adapter->rx_irq;
1813 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1814 	    (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1815 		taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1816 	/* Reenable this interrupt */
1817 	E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_RX);
1818 	return;
1819 }
1820 
1821 /*********************************************************************
1822  *
1823  *  MSIX Link Fast Interrupt Service routine
1824  *
1825  **********************************************************************/
1826 
1827 static void
1828 em_msix_link(void *arg)
1829 {
1830 	struct adapter	*adapter = arg;
1831 	u32		reg_icr;
1832 
1833 	++adapter->link_irq;
1834 	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1835 
1836 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1837 		adapter->hw.mac.get_link_status = 1;
1838 		taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1839 	}
1840 	E1000_WRITE_REG(&adapter->hw, E1000_IMS,
1841 	    EM_MSIX_LINK | E1000_IMS_LSC);
1842 	return;
1843 }
1844 #endif /* EM_FAST_IRQ */
1845 
1846 /*********************************************************************
1847  *
1848  *  Media Ioctl callback
1849  *
1850  *  This routine is called whenever the user queries the status of
1851  *  the interface using ifconfig.
1852  *
1853  **********************************************************************/
1854 static void
1855 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1856 {
1857 	struct adapter *adapter = ifp->if_softc;
1858 	u_char fiber_type = IFM_1000_SX;
1859 
1860 	INIT_DEBUGOUT("em_media_status: begin");
1861 
1862 	EM_CORE_LOCK(adapter);
1863 	em_update_link_status(adapter);
1864 
1865 	ifmr->ifm_status = IFM_AVALID;
1866 	ifmr->ifm_active = IFM_ETHER;
1867 
1868 	if (!adapter->link_active) {
1869 		EM_CORE_UNLOCK(adapter);
1870 		return;
1871 	}
1872 
1873 	ifmr->ifm_status |= IFM_ACTIVE;
1874 
1875 	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1876 	    (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1877 		if (adapter->hw.mac.type == e1000_82545)
1878 			fiber_type = IFM_1000_LX;
1879 		ifmr->ifm_active |= fiber_type | IFM_FDX;
1880 	} else {
1881 		switch (adapter->link_speed) {
1882 		case 10:
1883 			ifmr->ifm_active |= IFM_10_T;
1884 			break;
1885 		case 100:
1886 			ifmr->ifm_active |= IFM_100_TX;
1887 			break;
1888 		case 1000:
1889 			ifmr->ifm_active |= IFM_1000_T;
1890 			break;
1891 		}
1892 		if (adapter->link_duplex == FULL_DUPLEX)
1893 			ifmr->ifm_active |= IFM_FDX;
1894 		else
1895 			ifmr->ifm_active |= IFM_HDX;
1896 	}
1897 	EM_CORE_UNLOCK(adapter);
1898 }
1899 
1900 /*********************************************************************
1901  *
1902  *  Media Ioctl callback
1903  *
1904  *  This routine is called when the user changes speed/duplex using
1905  *  media/mediopt option with ifconfig.
1906  *
1907  **********************************************************************/
1908 static int
1909 em_media_change(struct ifnet *ifp)
1910 {
1911 	struct adapter *adapter = ifp->if_softc;
1912 	struct ifmedia  *ifm = &adapter->media;
1913 
1914 	INIT_DEBUGOUT("em_media_change: begin");
1915 
1916 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1917 		return (EINVAL);
1918 
1919 	EM_CORE_LOCK(adapter);
1920 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1921 	case IFM_AUTO:
1922 		adapter->hw.mac.autoneg = DO_AUTO_NEG;
1923 		adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1924 		break;
1925 	case IFM_1000_LX:
1926 	case IFM_1000_SX:
1927 	case IFM_1000_T:
1928 		adapter->hw.mac.autoneg = DO_AUTO_NEG;
1929 		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1930 		break;
1931 	case IFM_100_TX:
1932 		adapter->hw.mac.autoneg = FALSE;
1933 		adapter->hw.phy.autoneg_advertised = 0;
1934 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1935 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1936 		else
1937 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1938 		break;
1939 	case IFM_10_T:
1940 		adapter->hw.mac.autoneg = FALSE;
1941 		adapter->hw.phy.autoneg_advertised = 0;
1942 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1943 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1944 		else
1945 			adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1946 		break;
1947 	default:
1948 		device_printf(adapter->dev, "Unsupported media type\n");
1949 	}
1950 
1951 	/* As the speed/duplex settings my have changed we need to
1952 	 * reset the PHY.
1953 	 */
1954 	adapter->hw.phy.reset_disable = FALSE;
1955 
1956 	em_init_locked(adapter);
1957 	EM_CORE_UNLOCK(adapter);
1958 
1959 	return (0);
1960 }
1961 
1962 /*********************************************************************
1963  *
1964  *  This routine maps the mbufs to tx descriptors.
1965  *
1966  *  return 0 on success, positive on failure
1967  **********************************************************************/
1968 
1969 static int
1970 em_xmit(struct adapter *adapter, struct mbuf **m_headp)
1971 {
1972 	bus_dma_segment_t	segs[EM_MAX_SCATTER];
1973 	bus_dmamap_t		map;
1974 	struct em_buffer	*tx_buffer, *tx_buffer_mapped;
1975 	struct e1000_tx_desc	*ctxd = NULL;
1976 	struct mbuf		*m_head;
1977 	u32			txd_upper, txd_lower, txd_used, txd_saved;
1978 	int			nsegs, i, j, first, last = 0;
1979 	int			error, do_tso, tso_desc = 0;
1980 #if __FreeBSD_version < 700000
1981 	struct m_tag		*mtag;
1982 #endif
1983 	m_head = *m_headp;
1984 	txd_upper = txd_lower = txd_used = txd_saved = 0;
1985 
1986 #if __FreeBSD_version >= 700000
1987 	do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
1988 #else
1989 	do_tso = 0;
1990 #endif
1991 
1992         /*
1993          * Force a cleanup if number of TX descriptors
1994          * available hits the threshold
1995          */
1996 	if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1997 		em_txeof(adapter);
1998 		/* Now do we at least have a minimal? */
1999 		if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
2000 			adapter->no_tx_desc_avail1++;
2001 			return (ENOBUFS);
2002 		}
2003 	}
2004 
2005 
2006 	/*
2007 	 * TSO workaround:
2008 	 *  If an mbuf is only header we need
2009 	 *     to pull 4 bytes of data into it.
2010 	 */
2011 	if (do_tso && (m_head->m_len <= M_TSO_LEN)) {
2012 		m_head = m_pullup(m_head, M_TSO_LEN + 4);
2013 		*m_headp = m_head;
2014 		if (m_head == NULL)
2015 			return (ENOBUFS);
2016 	}
2017 
2018 	/*
2019 	 * Map the packet for DMA
2020 	 *
2021 	 * Capture the first descriptor index,
2022 	 * this descriptor will have the index
2023 	 * of the EOP which is the only one that
2024 	 * now gets a DONE bit writeback.
2025 	 */
2026 	first = adapter->next_avail_tx_desc;
2027 	tx_buffer = &adapter->tx_buffer_area[first];
2028 	tx_buffer_mapped = tx_buffer;
2029 	map = tx_buffer->map;
2030 
2031 	error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2032 	    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2033 
2034 	/*
2035 	 * There are two types of errors we can (try) to handle:
2036 	 * - EFBIG means the mbuf chain was too long and bus_dma ran
2037 	 *   out of segments.  Defragment the mbuf chain and try again.
2038 	 * - ENOMEM means bus_dma could not obtain enough bounce buffers
2039 	 *   at this point in time.  Defer sending and try again later.
2040 	 * All other errors, in particular EINVAL, are fatal and prevent the
2041 	 * mbuf chain from ever going through.  Drop it and report error.
2042 	 */
2043 	if (error == EFBIG) {
2044 		struct mbuf *m;
2045 
2046 		m = m_defrag(*m_headp, M_DONTWAIT);
2047 		if (m == NULL) {
2048 			adapter->mbuf_alloc_failed++;
2049 			m_freem(*m_headp);
2050 			*m_headp = NULL;
2051 			return (ENOBUFS);
2052 		}
2053 		*m_headp = m;
2054 
2055 		/* Try it again */
2056 		error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2057 		    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2058 
2059 		if (error == ENOMEM) {
2060 			adapter->no_tx_dma_setup++;
2061 			return (error);
2062 		} else if (error != 0) {
2063 			adapter->no_tx_dma_setup++;
2064 			m_freem(*m_headp);
2065 			*m_headp = NULL;
2066 			return (error);
2067 		}
2068 	} else if (error == ENOMEM) {
2069 		adapter->no_tx_dma_setup++;
2070 		return (error);
2071 	} else if (error != 0) {
2072 		adapter->no_tx_dma_setup++;
2073 		m_freem(*m_headp);
2074 		*m_headp = NULL;
2075 		return (error);
2076 	}
2077 
2078 	/*
2079 	 * TSO Hardware workaround, if this packet is not
2080 	 * TSO, and is only a single descriptor long, and
2081 	 * it follows a TSO burst, then we need to add a
2082 	 * sentinel descriptor to prevent premature writeback.
2083 	 */
2084 	if ((do_tso == 0) && (adapter->tx_tso == TRUE)) {
2085 		if (nsegs == 1)
2086 			tso_desc = TRUE;
2087 		adapter->tx_tso = FALSE;
2088 	}
2089 
2090         if (nsegs > (adapter->num_tx_desc_avail - 2)) {
2091                 adapter->no_tx_desc_avail2++;
2092 		bus_dmamap_unload(adapter->txtag, map);
2093 		return (ENOBUFS);
2094         }
2095 	m_head = *m_headp;
2096 
2097 	/* Do hardware assists */
2098 #if __FreeBSD_version >= 700000
2099 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2100 		error = em_tso_setup(adapter, m_head, &txd_upper, &txd_lower);
2101 		if (error != TRUE)
2102 			return (ENXIO); /* something foobar */
2103 		/* we need to make a final sentinel transmit desc */
2104 		tso_desc = TRUE;
2105 	} else
2106 #endif
2107 #ifndef EM_TIMESYNC
2108 	/*
2109 	** Timesync needs to check the packet header
2110 	** so call checksum code to do so, but don't
2111 	** penalize the code if not defined.
2112 	*/
2113 	if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
2114 #endif
2115 		em_transmit_checksum_setup(adapter,  m_head,
2116 		    &txd_upper, &txd_lower);
2117 
2118 	i = adapter->next_avail_tx_desc;
2119 	if (adapter->pcix_82544)
2120 		txd_saved = i;
2121 
2122 	/* Set up our transmit descriptors */
2123 	for (j = 0; j < nsegs; j++) {
2124 		bus_size_t seg_len;
2125 		bus_addr_t seg_addr;
2126 		/* If adapter is 82544 and on PCIX bus */
2127 		if(adapter->pcix_82544) {
2128 			DESC_ARRAY	desc_array;
2129 			u32		array_elements, counter;
2130 			/*
2131 			 * Check the Address and Length combination and
2132 			 * split the data accordingly
2133 			 */
2134 			array_elements = em_fill_descriptors(segs[j].ds_addr,
2135 			    segs[j].ds_len, &desc_array);
2136 			for (counter = 0; counter < array_elements; counter++) {
2137 				if (txd_used == adapter->num_tx_desc_avail) {
2138 					adapter->next_avail_tx_desc = txd_saved;
2139 					adapter->no_tx_desc_avail2++;
2140 					bus_dmamap_unload(adapter->txtag, map);
2141 					return (ENOBUFS);
2142 				}
2143 				tx_buffer = &adapter->tx_buffer_area[i];
2144 				ctxd = &adapter->tx_desc_base[i];
2145 				ctxd->buffer_addr = htole64(
2146 				    desc_array.descriptor[counter].address);
2147 				ctxd->lower.data = htole32(
2148 				    (adapter->txd_cmd | txd_lower | (u16)
2149 				    desc_array.descriptor[counter].length));
2150 				ctxd->upper.data =
2151 				    htole32((txd_upper));
2152 				last = i;
2153 				if (++i == adapter->num_tx_desc)
2154                                          i = 0;
2155 				tx_buffer->m_head = NULL;
2156 				tx_buffer->next_eop = -1;
2157 				txd_used++;
2158                         }
2159 		} else {
2160 			tx_buffer = &adapter->tx_buffer_area[i];
2161 			ctxd = &adapter->tx_desc_base[i];
2162 			seg_addr = segs[j].ds_addr;
2163 			seg_len  = segs[j].ds_len;
2164 			/*
2165 			** TSO Workaround:
2166 			** If this is the last descriptor, we want to
2167 			** split it so we have a small final sentinel
2168 			*/
2169 			if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) {
2170 				seg_len -= 4;
2171 				ctxd->buffer_addr = htole64(seg_addr);
2172 				ctxd->lower.data = htole32(
2173 				adapter->txd_cmd | txd_lower | seg_len);
2174 				ctxd->upper.data =
2175 				    htole32(txd_upper);
2176 				if (++i == adapter->num_tx_desc)
2177 					i = 0;
2178 				/* Now make the sentinel */
2179 				++txd_used; /* using an extra txd */
2180 				ctxd = &adapter->tx_desc_base[i];
2181 				tx_buffer = &adapter->tx_buffer_area[i];
2182 				ctxd->buffer_addr =
2183 				    htole64(seg_addr + seg_len);
2184 				ctxd->lower.data = htole32(
2185 				adapter->txd_cmd | txd_lower | 4);
2186 				ctxd->upper.data =
2187 				    htole32(txd_upper);
2188 				last = i;
2189 				if (++i == adapter->num_tx_desc)
2190 					i = 0;
2191 			} else {
2192 				ctxd->buffer_addr = htole64(seg_addr);
2193 				ctxd->lower.data = htole32(
2194 				adapter->txd_cmd | txd_lower | seg_len);
2195 				ctxd->upper.data =
2196 				    htole32(txd_upper);
2197 				last = i;
2198 				if (++i == adapter->num_tx_desc)
2199 					i = 0;
2200 			}
2201 			tx_buffer->m_head = NULL;
2202 			tx_buffer->next_eop = -1;
2203 		}
2204 	}
2205 
2206 	adapter->next_avail_tx_desc = i;
2207 	if (adapter->pcix_82544)
2208 		adapter->num_tx_desc_avail -= txd_used;
2209 	else {
2210 		adapter->num_tx_desc_avail -= nsegs;
2211 		if (tso_desc) /* TSO used an extra for sentinel */
2212 			adapter->num_tx_desc_avail -= txd_used;
2213 	}
2214 
2215         /*
2216 	** Handle VLAN tag, this is the
2217 	** biggest difference between
2218 	** 6.x and 7
2219 	*/
2220 #if __FreeBSD_version < 700000
2221         /* Find out if we are in vlan mode. */
2222         mtag = VLAN_OUTPUT_TAG(ifp, m_head);
2223         if (mtag != NULL) {
2224                 ctxd->upper.fields.special =
2225                     htole16(VLAN_TAG_VALUE(mtag));
2226 #else /* FreeBSD 7 */
2227 	if (m_head->m_flags & M_VLANTAG) {
2228 		/* Set the vlan id. */
2229 		ctxd->upper.fields.special =
2230 		    htole16(m_head->m_pkthdr.ether_vtag);
2231 #endif
2232                 /* Tell hardware to add tag */
2233                 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
2234         }
2235 
2236         tx_buffer->m_head = m_head;
2237 	tx_buffer_mapped->map = tx_buffer->map;
2238 	tx_buffer->map = map;
2239         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
2240 
2241         /*
2242          * Last Descriptor of Packet
2243 	 * needs End Of Packet (EOP)
2244 	 * and Report Status (RS)
2245          */
2246         ctxd->lower.data |=
2247 	    htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
2248 	/*
2249 	 * Keep track in the first buffer which
2250 	 * descriptor will be written back
2251 	 */
2252 	tx_buffer = &adapter->tx_buffer_area[first];
2253 	tx_buffer->next_eop = last;
2254 
2255 	/*
2256 	 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
2257 	 * that this frame is available to transmit.
2258 	 */
2259 	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2260 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2261 	if (adapter->hw.mac.type == e1000_82547 &&
2262 	    adapter->link_duplex == HALF_DUPLEX)
2263 		em_82547_move_tail(adapter);
2264 	else {
2265 		E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
2266 		if (adapter->hw.mac.type == e1000_82547)
2267 			em_82547_update_fifo_head(adapter,
2268 			    m_head->m_pkthdr.len);
2269 	}
2270 
2271 #ifdef EM_TIMESYNC
2272 	if (ctxd->upper.data & E1000_TXD_EXTCMD_TSTAMP) {
2273 		HW_DEBUGOUT( "@@@ Timestamp bit is set in transmit descriptor\n" );
2274 	}
2275 #endif
2276 	return (0);
2277 }
2278 
2279 /*********************************************************************
2280  *
2281  * 82547 workaround to avoid controller hang in half-duplex environment.
2282  * The workaround is to avoid queuing a large packet that would span
2283  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
2284  * in this case. We do that only when FIFO is quiescent.
2285  *
2286  **********************************************************************/
2287 static void
2288 em_82547_move_tail(void *arg)
2289 {
2290 	struct adapter *adapter = arg;
2291 	struct e1000_tx_desc *tx_desc;
2292 	u16	hw_tdt, sw_tdt, length = 0;
2293 	bool	eop = 0;
2294 
2295 	EM_TX_LOCK_ASSERT(adapter);
2296 
2297 	hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
2298 	sw_tdt = adapter->next_avail_tx_desc;
2299 
2300 	while (hw_tdt != sw_tdt) {
2301 		tx_desc = &adapter->tx_desc_base[hw_tdt];
2302 		length += tx_desc->lower.flags.length;
2303 		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
2304 		if (++hw_tdt == adapter->num_tx_desc)
2305 			hw_tdt = 0;
2306 
2307 		if (eop) {
2308 			if (em_82547_fifo_workaround(adapter, length)) {
2309 				adapter->tx_fifo_wrk_cnt++;
2310 				callout_reset(&adapter->tx_fifo_timer, 1,
2311 					em_82547_move_tail, adapter);
2312 				break;
2313 			}
2314 			E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
2315 			em_82547_update_fifo_head(adapter, length);
2316 			length = 0;
2317 		}
2318 	}
2319 }
2320 
2321 static int
2322 em_82547_fifo_workaround(struct adapter *adapter, int len)
2323 {
2324 	int fifo_space, fifo_pkt_len;
2325 
2326 	fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2327 
2328 	if (adapter->link_duplex == HALF_DUPLEX) {
2329 		fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
2330 
2331 		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
2332 			if (em_82547_tx_fifo_reset(adapter))
2333 				return (0);
2334 			else
2335 				return (1);
2336 		}
2337 	}
2338 
2339 	return (0);
2340 }
2341 
2342 static void
2343 em_82547_update_fifo_head(struct adapter *adapter, int len)
2344 {
2345 	int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2346 
2347 	/* tx_fifo_head is always 16 byte aligned */
2348 	adapter->tx_fifo_head += fifo_pkt_len;
2349 	if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
2350 		adapter->tx_fifo_head -= adapter->tx_fifo_size;
2351 	}
2352 }
2353 
2354 
2355 static int
2356 em_82547_tx_fifo_reset(struct adapter *adapter)
2357 {
2358 	u32 tctl;
2359 
2360 	if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
2361 	    E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
2362 	    (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
2363 	    E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
2364 	    (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
2365 	    E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
2366 	    (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
2367 		/* Disable TX unit */
2368 		tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2369 		E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
2370 		    tctl & ~E1000_TCTL_EN);
2371 
2372 		/* Reset FIFO pointers */
2373 		E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
2374 		    adapter->tx_head_addr);
2375 		E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
2376 		    adapter->tx_head_addr);
2377 		E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
2378 		    adapter->tx_head_addr);
2379 		E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
2380 		    adapter->tx_head_addr);
2381 
2382 		/* Re-enable TX unit */
2383 		E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2384 		E1000_WRITE_FLUSH(&adapter->hw);
2385 
2386 		adapter->tx_fifo_head = 0;
2387 		adapter->tx_fifo_reset_cnt++;
2388 
2389 		return (TRUE);
2390 	}
2391 	else {
2392 		return (FALSE);
2393 	}
2394 }
2395 
2396 static void
2397 em_set_promisc(struct adapter *adapter)
2398 {
2399 	struct ifnet	*ifp = adapter->ifp;
2400 	u32		reg_rctl;
2401 
2402 	reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2403 
2404 	if (ifp->if_flags & IFF_PROMISC) {
2405 		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2406 		/* Turn this on if you want to see bad packets */
2407 		if (em_debug_sbp)
2408 			reg_rctl |= E1000_RCTL_SBP;
2409 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2410 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2411 		reg_rctl |= E1000_RCTL_MPE;
2412 		reg_rctl &= ~E1000_RCTL_UPE;
2413 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2414 	}
2415 }
2416 
2417 static void
2418 em_disable_promisc(struct adapter *adapter)
2419 {
2420 	u32	reg_rctl;
2421 
2422 	reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2423 
2424 	reg_rctl &=  (~E1000_RCTL_UPE);
2425 	reg_rctl &=  (~E1000_RCTL_MPE);
2426 	reg_rctl &=  (~E1000_RCTL_SBP);
2427 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2428 }
2429 
2430 
2431 /*********************************************************************
2432  *  Multicast Update
2433  *
2434  *  This routine is called whenever multicast address list is updated.
2435  *
2436  **********************************************************************/
2437 
2438 static void
2439 em_set_multi(struct adapter *adapter)
2440 {
2441 	struct ifnet	*ifp = adapter->ifp;
2442 	struct ifmultiaddr *ifma;
2443 	u32 reg_rctl = 0;
2444 	u8  mta[512]; /* Largest MTS is 4096 bits */
2445 	int mcnt = 0;
2446 
2447 	IOCTL_DEBUGOUT("em_set_multi: begin");
2448 
2449 	if (adapter->hw.mac.type == e1000_82542 &&
2450 	    adapter->hw.revision_id == E1000_REVISION_2) {
2451 		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2452 		if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2453 			e1000_pci_clear_mwi(&adapter->hw);
2454 		reg_rctl |= E1000_RCTL_RST;
2455 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2456 		msec_delay(5);
2457 	}
2458 
2459 	IF_ADDR_LOCK(ifp);
2460 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2461 		if (ifma->ifma_addr->sa_family != AF_LINK)
2462 			continue;
2463 
2464 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2465 			break;
2466 
2467 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2468 		    &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
2469 		mcnt++;
2470 	}
2471 	IF_ADDR_UNLOCK(ifp);
2472 
2473 	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2474 		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2475 		reg_rctl |= E1000_RCTL_MPE;
2476 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2477 	} else
2478 		e1000_update_mc_addr_list(&adapter->hw, mta,
2479 		    mcnt, 1, adapter->hw.mac.rar_entry_count);
2480 
2481 	if (adapter->hw.mac.type == e1000_82542 &&
2482 	    adapter->hw.revision_id == E1000_REVISION_2) {
2483 		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2484 		reg_rctl &= ~E1000_RCTL_RST;
2485 		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2486 		msec_delay(5);
2487 		if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2488 			e1000_pci_set_mwi(&adapter->hw);
2489 	}
2490 }
2491 
2492 
2493 /*********************************************************************
2494  *  Timer routine
2495  *
2496  *  This routine checks for link status and updates statistics.
2497  *
2498  **********************************************************************/
2499 
2500 static void
2501 em_local_timer(void *arg)
2502 {
2503 	struct adapter	*adapter = arg;
2504 	struct ifnet	*ifp = adapter->ifp;
2505 
2506 	EM_CORE_LOCK_ASSERT(adapter);
2507 
2508 	em_update_link_status(adapter);
2509 	em_update_stats_counters(adapter);
2510 
2511 	/* Reset LAA into RAR[0] on 82571 */
2512 	if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
2513 		e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2514 
2515 	if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
2516 		em_print_hw_stats(adapter);
2517 
2518 	em_smartspeed(adapter);
2519 
2520 	/*
2521 	 * Each second we check the watchdog to
2522 	 * protect against hardware hangs.
2523 	 */
2524 	em_watchdog(adapter);
2525 
2526 	callout_reset(&adapter->timer, hz, em_local_timer, adapter);
2527 
2528 }
2529 
2530 static void
2531 em_update_link_status(struct adapter *adapter)
2532 {
2533 	struct e1000_hw *hw = &adapter->hw;
2534 	struct ifnet *ifp = adapter->ifp;
2535 	device_t dev = adapter->dev;
2536 	u32 link_check = 0;
2537 
2538 	/* Get the cached link value or read phy for real */
2539 	switch (hw->phy.media_type) {
2540 	case e1000_media_type_copper:
2541 		if (hw->mac.get_link_status) {
2542 			/* Do the work to read phy */
2543 			e1000_check_for_link(hw);
2544 			link_check = !hw->mac.get_link_status;
2545 		} else
2546 			link_check = TRUE;
2547 		break;
2548 	case e1000_media_type_fiber:
2549 		e1000_check_for_link(hw);
2550 		link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2551                                  E1000_STATUS_LU);
2552 		break;
2553 	case e1000_media_type_internal_serdes:
2554 		e1000_check_for_link(hw);
2555 		link_check = adapter->hw.mac.serdes_has_link;
2556 		break;
2557 	default:
2558 	case e1000_media_type_unknown:
2559 		break;
2560 	}
2561 
2562 	/* Now check for a transition */
2563 	if (link_check && (adapter->link_active == 0)) {
2564 		e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2565 		    &adapter->link_duplex);
2566 		/* Check if we must disable SPEED_MODE bit on PCI-E */
2567 		if ((adapter->link_speed != SPEED_1000) &&
2568 		    ((hw->mac.type == e1000_82571) ||
2569 		    (hw->mac.type == e1000_82572))) {
2570 			int tarc0;
2571 			tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
2572 			tarc0 &= ~SPEED_MODE_BIT;
2573 			E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
2574 		}
2575 		if (bootverbose)
2576 			device_printf(dev, "Link is up %d Mbps %s\n",
2577 			    adapter->link_speed,
2578 			    ((adapter->link_duplex == FULL_DUPLEX) ?
2579 			    "Full Duplex" : "Half Duplex"));
2580 		adapter->link_active = 1;
2581 		adapter->smartspeed = 0;
2582 		ifp->if_baudrate = adapter->link_speed * 1000000;
2583 		if_link_state_change(ifp, LINK_STATE_UP);
2584 	} else if (!link_check && (adapter->link_active == 1)) {
2585 		ifp->if_baudrate = adapter->link_speed = 0;
2586 		adapter->link_duplex = 0;
2587 		if (bootverbose)
2588 			device_printf(dev, "Link is Down\n");
2589 		adapter->link_active = 0;
2590 		/* Link down, disable watchdog */
2591 		adapter->watchdog_timer = FALSE;
2592 		if_link_state_change(ifp, LINK_STATE_DOWN);
2593 	}
2594 }
2595 
2596 /*********************************************************************
2597  *
2598  *  This routine disables all traffic on the adapter by issuing a
2599  *  global reset on the MAC and deallocates TX/RX buffers.
2600  *
2601  *  This routine should always be called with BOTH the CORE
2602  *  and TX locks.
2603  **********************************************************************/
2604 
2605 static void
2606 em_stop(void *arg)
2607 {
2608 	struct adapter	*adapter = arg;
2609 	struct ifnet	*ifp = adapter->ifp;
2610 
2611 	EM_CORE_LOCK_ASSERT(adapter);
2612 	EM_TX_LOCK_ASSERT(adapter);
2613 
2614 	INIT_DEBUGOUT("em_stop: begin");
2615 
2616 	em_disable_intr(adapter);
2617 	callout_stop(&adapter->timer);
2618 	callout_stop(&adapter->tx_fifo_timer);
2619 
2620 	/* Tell the stack that the interface is no longer active */
2621 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2622 
2623 #ifdef EM_TIMESYNC
2624 	/* Disable IEEE 1588 Time hardware */
2625 	if ((adapter->hw.mac.type == e1000_82574) ||
2626 	    (adapter->hw.mac.type == e1000_ich10lan))
2627 		em_tsync_disable(adapter);
2628 #endif
2629 
2630 	e1000_reset_hw(&adapter->hw);
2631 	if (adapter->hw.mac.type >= e1000_82544)
2632 		E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2633 }
2634 
2635 
2636 /*********************************************************************
2637  *
2638  *  Determine hardware revision.
2639  *
2640  **********************************************************************/
2641 static void
2642 em_identify_hardware(struct adapter *adapter)
2643 {
2644 	device_t dev = adapter->dev;
2645 
2646 	/* Make sure our PCI config space has the necessary stuff set */
2647 	adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2648 	if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2649 	    (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2650 		device_printf(dev, "Memory Access and/or Bus Master bits "
2651 		    "were not set!\n");
2652 		adapter->hw.bus.pci_cmd_word |=
2653 		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2654 		pci_write_config(dev, PCIR_COMMAND,
2655 		    adapter->hw.bus.pci_cmd_word, 2);
2656 	}
2657 
2658 	/* Save off the information about this board */
2659 	adapter->hw.vendor_id = pci_get_vendor(dev);
2660 	adapter->hw.device_id = pci_get_device(dev);
2661 	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2662 	adapter->hw.subsystem_vendor_id =
2663 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
2664 	adapter->hw.subsystem_device_id =
2665 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
2666 
2667 	/* Do Shared Code Init and Setup */
2668 	if (e1000_set_mac_type(&adapter->hw)) {
2669 		device_printf(dev, "Setup init failure\n");
2670 		return;
2671 	}
2672 }
2673 
2674 static int
2675 em_allocate_pci_resources(struct adapter *adapter)
2676 {
2677 	device_t	dev = adapter->dev;
2678 	int		val, rid, error = E1000_SUCCESS;
2679 
2680 	rid = PCIR_BAR(0);
2681 	adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2682 	    &rid, RF_ACTIVE);
2683 	if (adapter->memory == NULL) {
2684 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2685 		return (ENXIO);
2686 	}
2687 	adapter->osdep.mem_bus_space_tag =
2688 	    rman_get_bustag(adapter->memory);
2689 	adapter->osdep.mem_bus_space_handle =
2690 	    rman_get_bushandle(adapter->memory);
2691 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2692 
2693 	/* Only older adapters use IO mapping */
2694 	if ((adapter->hw.mac.type > e1000_82543) &&
2695 	    (adapter->hw.mac.type < e1000_82571)) {
2696 		/* Figure our where our IO BAR is ? */
2697 		for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2698 			val = pci_read_config(dev, rid, 4);
2699 			if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2700 				adapter->io_rid = rid;
2701 				break;
2702 			}
2703 			rid += 4;
2704 			/* check for 64bit BAR */
2705 			if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2706 				rid += 4;
2707 		}
2708 		if (rid >= PCIR_CIS) {
2709 			device_printf(dev, "Unable to locate IO BAR\n");
2710 			return (ENXIO);
2711 		}
2712 		adapter->ioport = bus_alloc_resource_any(dev,
2713 		    SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2714 		if (adapter->ioport == NULL) {
2715 			device_printf(dev, "Unable to allocate bus resource: "
2716 			    "ioport\n");
2717 			return (ENXIO);
2718 		}
2719 		adapter->hw.io_base = 0;
2720 		adapter->osdep.io_bus_space_tag =
2721 		    rman_get_bustag(adapter->ioport);
2722 		adapter->osdep.io_bus_space_handle =
2723 		    rman_get_bushandle(adapter->ioport);
2724 	}
2725 
2726 	/*
2727 	** Init the resource arrays
2728 	**  used by MSIX setup
2729 	*/
2730 	for (int i = 0; i < 3; i++) {
2731 		adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */
2732 		adapter->tag[i] = NULL;
2733 		adapter->res[i] = NULL;
2734 	}
2735 
2736 	/*
2737 	 * Setup MSI/X or MSI if PCI Express
2738 	 */
2739 	if (em_enable_msi)
2740 		adapter->msi = em_setup_msix(adapter);
2741 
2742 	adapter->hw.back = &adapter->osdep;
2743 
2744 	return (error);
2745 }
2746 
2747 /*********************************************************************
2748  *
2749  *  Setup the Legacy or MSI Interrupt handler
2750  *
2751  **********************************************************************/
2752 int
2753 em_allocate_legacy(struct adapter *adapter)
2754 {
2755 	device_t dev = adapter->dev;
2756 	int error;
2757 
2758 	/* Manually turn off all interrupts */
2759 	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2760 
2761 	/* Legacy RID is 0 */
2762 	if (adapter->msi == 0)
2763 		adapter->rid[0] = 0;
2764 
2765 	/* We allocate a single interrupt resource */
2766 	adapter->res[0] = bus_alloc_resource_any(dev,
2767 	    SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
2768 	if (adapter->res[0] == NULL) {
2769 		device_printf(dev, "Unable to allocate bus resource: "
2770 		    "interrupt\n");
2771 		return (ENXIO);
2772 	}
2773 
2774 #ifdef EM_LEGACY_IRQ
2775 	/* We do Legacy setup */
2776 	if ((error = bus_setup_intr(dev, adapter->res[0],
2777 #if __FreeBSD_version > 700000
2778 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_intr, adapter,
2779 #else /* 6.X */
2780 	    INTR_TYPE_NET | INTR_MPSAFE, em_intr, adapter,
2781 #endif
2782 	    &adapter->tag[0])) != 0) {
2783 		device_printf(dev, "Failed to register interrupt handler");
2784 		return (error);
2785 	}
2786 
2787 #else /* FAST_IRQ */
2788 	/*
2789 	 * Try allocating a fast interrupt and the associated deferred
2790 	 * processing contexts.
2791 	 */
2792 	TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
2793 	TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2794 	adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2795 	    taskqueue_thread_enqueue, &adapter->tq);
2796 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2797 	    device_get_nameunit(adapter->dev));
2798 #if __FreeBSD_version < 700000
2799 	if ((error = bus_setup_intr(dev, adapter->res[0],
2800 	    INTR_TYPE_NET | INTR_FAST, em_irq_fast, adapter,
2801 #else
2802 	if ((error = bus_setup_intr(dev, adapter->res[0],
2803 	    INTR_TYPE_NET, em_irq_fast, NULL, adapter,
2804 #endif
2805 	    &adapter->tag[0])) != 0) {
2806 		device_printf(dev, "Failed to register fast interrupt "
2807 			    "handler: %d\n", error);
2808 		taskqueue_free(adapter->tq);
2809 		adapter->tq = NULL;
2810 		return (error);
2811 	}
2812 #endif  /* EM_LEGACY_IRQ */
2813 
2814 	return (0);
2815 }
2816 
2817 /*********************************************************************
2818  *
2819  *  Setup the MSIX Interrupt handlers
2820  *   This is not really Multiqueue, rather
2821  *   its just multiple interrupt vectors.
2822  *
2823  **********************************************************************/
2824 int
2825 em_allocate_msix(struct adapter *adapter)
2826 {
2827 	device_t dev = adapter->dev;
2828 	int error;
2829 
2830 	/* Make sure all interrupts are disabled */
2831 	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2832 
2833 	/* First get the resources */
2834 	for (int i = 0; i < adapter->msi; i++) {
2835 		adapter->res[i] = bus_alloc_resource_any(dev,
2836 		    SYS_RES_IRQ, &adapter->rid[i], RF_ACTIVE);
2837 		if (adapter->res[i] == NULL) {
2838 			device_printf(dev,
2839 			    "Unable to allocate bus resource: "
2840 			    "MSIX Interrupt\n");
2841 			return (ENXIO);
2842 		}
2843 	}
2844 
2845 	/*
2846 	 * Now allocate deferred processing contexts.
2847 	 */
2848 	TASK_INIT(&adapter->rx_task, 0, em_handle_rx, adapter);
2849 	TASK_INIT(&adapter->tx_task, 0, em_handle_tx, adapter);
2850 	TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2851 	adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2852 	    taskqueue_thread_enqueue, &adapter->tq);
2853 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2854 	    device_get_nameunit(adapter->dev));
2855 
2856 	/*
2857 	 * And setup the interrupt handlers
2858 	 */
2859 
2860 	/* First slot to RX */
2861 	if ((error = bus_setup_intr(dev, adapter->res[0],
2862 #if __FreeBSD_version > 700000
2863 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx, adapter,
2864 #else /* 6.X */
2865 	    INTR_TYPE_NET | INTR_MPSAFE, em_msix_rx, adapter,
2866 #endif
2867 	    &adapter->tag[0])) != 0) {
2868 		device_printf(dev, "Failed to register RX handler");
2869 		return (error);
2870 	}
2871 
2872 	/* Next TX */
2873 	if ((error = bus_setup_intr(dev, adapter->res[1],
2874 #if __FreeBSD_version > 700000
2875 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx, adapter,
2876 #else /* 6.X */
2877 	    INTR_TYPE_NET | INTR_MPSAFE, em_msix_tx, adapter,
2878 #endif
2879 	    &adapter->tag[1])) != 0) {
2880 		device_printf(dev, "Failed to register TX handler");
2881 		return (error);
2882 	}
2883 
2884 	/* And Link */
2885 	if ((error = bus_setup_intr(dev, adapter->res[2],
2886 #if __FreeBSD_version > 700000
2887 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_link, adapter,
2888 #else /* 6.X */
2889 	    INTR_TYPE_NET | INTR_MPSAFE, em_msix_link, adapter,
2890 #endif
2891 	    &adapter->tag[2])) != 0) {
2892 		device_printf(dev, "Failed to register TX handler");
2893 		return (error);
2894 	}
2895 
2896 	return (0);
2897 }
2898 
2899 static void
2900 em_free_pci_resources(struct adapter *adapter)
2901 {
2902 	device_t dev = adapter->dev;
2903 
2904 	/* Make sure the for loop below runs once */
2905 	if (adapter->msi == 0)
2906 		adapter->msi = 1;
2907 
2908 	/*
2909 	 * First release all the interrupt resources:
2910 	 *      notice that since these are just kept
2911 	 *      in an array we can do the same logic
2912 	 *      whether its MSIX or just legacy.
2913 	 */
2914 	for (int i = 0; i < adapter->msi; i++) {
2915 		if (adapter->tag[i] != NULL) {
2916 			bus_teardown_intr(dev, adapter->res[i],
2917 			    adapter->tag[i]);
2918 			adapter->tag[i] = NULL;
2919 		}
2920 		if (adapter->res[i] != NULL) {
2921 			bus_release_resource(dev, SYS_RES_IRQ,
2922 			    adapter->rid[i], adapter->res[i]);
2923 		}
2924 	}
2925 
2926 	if (adapter->msi)
2927 		pci_release_msi(dev);
2928 
2929 	if (adapter->msix != NULL)
2930 		bus_release_resource(dev, SYS_RES_MEMORY,
2931 		    PCIR_BAR(EM_MSIX_BAR), adapter->msix);
2932 
2933 	if (adapter->memory != NULL)
2934 		bus_release_resource(dev, SYS_RES_MEMORY,
2935 		    PCIR_BAR(0), adapter->memory);
2936 
2937 	if (adapter->flash != NULL)
2938 		bus_release_resource(dev, SYS_RES_MEMORY,
2939 		    EM_FLASH, adapter->flash);
2940 
2941 	if (adapter->ioport != NULL)
2942 		bus_release_resource(dev, SYS_RES_IOPORT,
2943 		    adapter->io_rid, adapter->ioport);
2944 }
2945 
2946 /*
2947  * Setup MSI/X
2948  */
2949 static int
2950 em_setup_msix(struct adapter *adapter)
2951 {
2952 	device_t dev = adapter->dev;
2953 	int val = 0;
2954 
2955 	if (adapter->hw.mac.type < e1000_82571)
2956 		return (0);
2957 
2958 	/* Setup MSI/X for Hartwell */
2959 	if (adapter->hw.mac.type == e1000_82574) {
2960 		/* Map the MSIX BAR */
2961 		int rid = PCIR_BAR(EM_MSIX_BAR);
2962 		adapter->msix = bus_alloc_resource_any(dev,
2963 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2964        		if (!adapter->msix) {
2965 			/* May not be enabled */
2966                		device_printf(adapter->dev,
2967 			    "Unable to map MSIX table \n");
2968 			goto msi;
2969        		}
2970 		val = pci_msix_count(dev);
2971 		/*
2972 		** 82574 can be configured for 5 but
2973 		** we limit use to 3.
2974 		*/
2975 		if (val > 3) val = 3;
2976 		if ((val) && pci_alloc_msix(dev, &val) == 0) {
2977                		device_printf(adapter->dev,"Using MSIX interrupts\n");
2978 			return (val);
2979 		}
2980 	}
2981 msi:
2982        	val = pci_msi_count(dev);
2983        	if (val == 1 && pci_alloc_msi(dev, &val) == 0) {
2984                	adapter->msi = 1;
2985                	device_printf(adapter->dev,"Using MSI interrupt\n");
2986 		return (val);
2987 	}
2988 	return (0);
2989 }
2990 
2991 /*********************************************************************
2992  *
2993  *  Initialize the hardware to a configuration
2994  *  as specified by the adapter structure.
2995  *
2996  **********************************************************************/
2997 static int
2998 em_hardware_init(struct adapter *adapter)
2999 {
3000 	device_t dev = adapter->dev;
3001 	u16 	rx_buffer_size;
3002 
3003 	INIT_DEBUGOUT("em_hardware_init: begin");
3004 
3005 	/* Issue a global reset */
3006 	e1000_reset_hw(&adapter->hw);
3007 
3008 	/* Get control from any management/hw control */
3009 	if (((adapter->hw.mac.type == e1000_82573) ||
3010 	    (adapter->hw.mac.type == e1000_ich8lan) ||
3011 	    (adapter->hw.mac.type == e1000_ich10lan) ||
3012 	    (adapter->hw.mac.type == e1000_ich9lan)) &&
3013 	    e1000_check_mng_mode(&adapter->hw))
3014 		em_get_hw_control(adapter);
3015 
3016 	/* When hardware is reset, fifo_head is also reset */
3017 	adapter->tx_fifo_head = 0;
3018 
3019 	/* Set up smart power down as default off on newer adapters. */
3020 	if (!em_smart_pwr_down && (adapter->hw.mac.type == e1000_82571 ||
3021 	    adapter->hw.mac.type == e1000_82572)) {
3022 		u16 phy_tmp = 0;
3023 
3024 		/* Speed up time to link by disabling smart power down. */
3025 		e1000_read_phy_reg(&adapter->hw,
3026 		    IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
3027 		phy_tmp &= ~IGP02E1000_PM_SPD;
3028 		e1000_write_phy_reg(&adapter->hw,
3029 		    IGP02E1000_PHY_POWER_MGMT, phy_tmp);
3030 	}
3031 
3032 	/*
3033 	 * These parameters control the automatic generation (Tx) and
3034 	 * response (Rx) to Ethernet PAUSE frames.
3035 	 * - High water mark should allow for at least two frames to be
3036 	 *   received after sending an XOFF.
3037 	 * - Low water mark works best when it is very near the high water mark.
3038 	 *   This allows the receiver to restart by sending XON when it has
3039 	 *   drained a bit. Here we use an arbitary value of 1500 which will
3040 	 *   restart after one full frame is pulled from the buffer. There
3041 	 *   could be several smaller frames in the buffer and if so they will
3042 	 *   not trigger the XON until their total number reduces the buffer
3043 	 *   by 1500.
3044 	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
3045 	 */
3046 	rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
3047 	    0xffff) << 10 );
3048 
3049 	adapter->hw.fc.high_water = rx_buffer_size -
3050 	    roundup2(adapter->max_frame_size, 1024);
3051 	adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
3052 
3053 	if (adapter->hw.mac.type == e1000_80003es2lan)
3054 		adapter->hw.fc.pause_time = 0xFFFF;
3055 	else
3056 		adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
3057 	adapter->hw.fc.send_xon = TRUE;
3058 	adapter->hw.fc.type = e1000_fc_full;
3059 
3060 	if (e1000_init_hw(&adapter->hw) < 0) {
3061 		device_printf(dev, "Hardware Initialization Failed\n");
3062 		return (EIO);
3063 	}
3064 
3065 	e1000_check_for_link(&adapter->hw);
3066 
3067 	return (0);
3068 }
3069 
3070 /*********************************************************************
3071  *
3072  *  Setup networking device structure and register an interface.
3073  *
3074  **********************************************************************/
3075 static void
3076 em_setup_interface(device_t dev, struct adapter *adapter)
3077 {
3078 	struct ifnet   *ifp;
3079 
3080 	INIT_DEBUGOUT("em_setup_interface: begin");
3081 
3082 	ifp = adapter->ifp = if_alloc(IFT_ETHER);
3083 	if (ifp == NULL)
3084 		panic("%s: can not if_alloc()", device_get_nameunit(dev));
3085 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3086 	ifp->if_mtu = ETHERMTU;
3087 	ifp->if_init =  em_init;
3088 	ifp->if_softc = adapter;
3089 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3090 	ifp->if_ioctl = em_ioctl;
3091 	ifp->if_start = em_start;
3092 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
3093 	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
3094 	IFQ_SET_READY(&ifp->if_snd);
3095 
3096 	ether_ifattach(ifp, adapter->hw.mac.addr);
3097 
3098 	ifp->if_capabilities = ifp->if_capenable = 0;
3099 
3100 	if (adapter->hw.mac.type >= e1000_82543) {
3101 		int version_cap;
3102 #if __FreeBSD_version < 700000
3103 		version_cap = IFCAP_HWCSUM;
3104 #else
3105 		version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
3106 #endif
3107 		ifp->if_capabilities |= version_cap;
3108 		ifp->if_capenable |= version_cap;
3109 	}
3110 
3111 #if __FreeBSD_version >= 700000
3112 	/* Identify TSO capable adapters */
3113 	if ((adapter->hw.mac.type > e1000_82544) &&
3114 	    (adapter->hw.mac.type != e1000_82547))
3115 		ifp->if_capabilities |= IFCAP_TSO4;
3116 	/*
3117 	 * By default only enable on PCI-E, this
3118 	 * can be overriden by ifconfig.
3119 	 */
3120 	if (adapter->hw.mac.type >= e1000_82571)
3121 		ifp->if_capenable |= IFCAP_TSO4;
3122 #endif
3123 
3124 	/*
3125 	 * Tell the upper layer(s) we support long frames.
3126 	 */
3127 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3128 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
3129 	ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
3130 
3131 #ifdef DEVICE_POLLING
3132 	ifp->if_capabilities |= IFCAP_POLLING;
3133 #endif
3134 
3135 	/*
3136 	 * Specify the media types supported by this adapter and register
3137 	 * callbacks to update media and link information
3138 	 */
3139 	ifmedia_init(&adapter->media, IFM_IMASK,
3140 	    em_media_change, em_media_status);
3141 	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3142 	    (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
3143 		u_char fiber_type = IFM_1000_SX;	/* default type */
3144 
3145 		if (adapter->hw.mac.type == e1000_82545)
3146 			fiber_type = IFM_1000_LX;
3147 		ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
3148 			    0, NULL);
3149 		ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
3150 	} else {
3151 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
3152 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
3153 			    0, NULL);
3154 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
3155 			    0, NULL);
3156 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
3157 			    0, NULL);
3158 		if (adapter->hw.phy.type != e1000_phy_ife) {
3159 			ifmedia_add(&adapter->media,
3160 				IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
3161 			ifmedia_add(&adapter->media,
3162 				IFM_ETHER | IFM_1000_T, 0, NULL);
3163 		}
3164 	}
3165 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3166 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3167 }
3168 
3169 
3170 /*********************************************************************
3171  *
3172  *  Workaround for SmartSpeed on 82541 and 82547 controllers
3173  *
3174  **********************************************************************/
3175 static void
3176 em_smartspeed(struct adapter *adapter)
3177 {
3178 	u16 phy_tmp;
3179 
3180 	if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
3181 	    adapter->hw.mac.autoneg == 0 ||
3182 	    (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
3183 		return;
3184 
3185 	if (adapter->smartspeed == 0) {
3186 		/* If Master/Slave config fault is asserted twice,
3187 		 * we assume back-to-back */
3188 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3189 		if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
3190 			return;
3191 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3192 		if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
3193 			e1000_read_phy_reg(&adapter->hw,
3194 			    PHY_1000T_CTRL, &phy_tmp);
3195 			if(phy_tmp & CR_1000T_MS_ENABLE) {
3196 				phy_tmp &= ~CR_1000T_MS_ENABLE;
3197 				e1000_write_phy_reg(&adapter->hw,
3198 				    PHY_1000T_CTRL, phy_tmp);
3199 				adapter->smartspeed++;
3200 				if(adapter->hw.mac.autoneg &&
3201 				   !e1000_phy_setup_autoneg(&adapter->hw) &&
3202 				   !e1000_read_phy_reg(&adapter->hw,
3203 				    PHY_CONTROL, &phy_tmp)) {
3204 					phy_tmp |= (MII_CR_AUTO_NEG_EN |
3205 						    MII_CR_RESTART_AUTO_NEG);
3206 					e1000_write_phy_reg(&adapter->hw,
3207 					    PHY_CONTROL, phy_tmp);
3208 				}
3209 			}
3210 		}
3211 		return;
3212 	} else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
3213 		/* If still no link, perhaps using 2/3 pair cable */
3214 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
3215 		phy_tmp |= CR_1000T_MS_ENABLE;
3216 		e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
3217 		if(adapter->hw.mac.autoneg &&
3218 		   !e1000_phy_setup_autoneg(&adapter->hw) &&
3219 		   !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
3220 			phy_tmp |= (MII_CR_AUTO_NEG_EN |
3221 				    MII_CR_RESTART_AUTO_NEG);
3222 			e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
3223 		}
3224 	}
3225 	/* Restart process after EM_SMARTSPEED_MAX iterations */
3226 	if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
3227 		adapter->smartspeed = 0;
3228 }
3229 
3230 
3231 /*
3232  * Manage DMA'able memory.
3233  */
3234 static void
3235 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3236 {
3237 	if (error)
3238 		return;
3239 	*(bus_addr_t *) arg = segs[0].ds_addr;
3240 }
3241 
3242 static int
3243 em_dma_malloc(struct adapter *adapter, bus_size_t size,
3244         struct em_dma_alloc *dma, int mapflags)
3245 {
3246 	int error;
3247 
3248 #if __FreeBSD_version >= 700000
3249 	error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
3250 #else
3251 	error = bus_dma_tag_create(NULL,		 /* parent */
3252 #endif
3253 				EM_DBA_ALIGN, 0,	/* alignment, bounds */
3254 				BUS_SPACE_MAXADDR,	/* lowaddr */
3255 				BUS_SPACE_MAXADDR,	/* highaddr */
3256 				NULL, NULL,		/* filter, filterarg */
3257 				size,			/* maxsize */
3258 				1,			/* nsegments */
3259 				size,			/* maxsegsize */
3260 				0,			/* flags */
3261 				NULL,			/* lockfunc */
3262 				NULL,			/* lockarg */
3263 				&dma->dma_tag);
3264 	if (error) {
3265 		device_printf(adapter->dev,
3266 		    "%s: bus_dma_tag_create failed: %d\n",
3267 		    __func__, error);
3268 		goto fail_0;
3269 	}
3270 
3271 	error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
3272 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
3273 	if (error) {
3274 		device_printf(adapter->dev,
3275 		    "%s: bus_dmamem_alloc(%ju) failed: %d\n",
3276 		    __func__, (uintmax_t)size, error);
3277 		goto fail_2;
3278 	}
3279 
3280 	dma->dma_paddr = 0;
3281 	error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3282 	    size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
3283 	if (error || dma->dma_paddr == 0) {
3284 		device_printf(adapter->dev,
3285 		    "%s: bus_dmamap_load failed: %d\n",
3286 		    __func__, error);
3287 		goto fail_3;
3288 	}
3289 
3290 	return (0);
3291 
3292 fail_3:
3293 	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3294 fail_2:
3295 	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3296 	bus_dma_tag_destroy(dma->dma_tag);
3297 fail_0:
3298 	dma->dma_map = NULL;
3299 	dma->dma_tag = NULL;
3300 
3301 	return (error);
3302 }
3303 
3304 static void
3305 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
3306 {
3307 	if (dma->dma_tag == NULL)
3308 		return;
3309 	if (dma->dma_map != NULL) {
3310 		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3311 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3312 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3313 		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3314 		dma->dma_map = NULL;
3315 	}
3316 	bus_dma_tag_destroy(dma->dma_tag);
3317 	dma->dma_tag = NULL;
3318 }
3319 
3320 
3321 /*********************************************************************
3322  *
3323  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
3324  *  the information needed to transmit a packet on the wire.
3325  *
3326  **********************************************************************/
3327 static int
3328 em_allocate_transmit_structures(struct adapter *adapter)
3329 {
3330 	device_t dev = adapter->dev;
3331 	struct em_buffer *tx_buffer;
3332 	int error;
3333 
3334 	/*
3335 	 * Create DMA tags for tx descriptors
3336 	 */
3337 #if __FreeBSD_version >= 700000
3338 	if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3339 #else
3340 	if ((error = bus_dma_tag_create(NULL,		 /* parent */
3341 #endif
3342 				1, 0,			/* alignment, bounds */
3343 				BUS_SPACE_MAXADDR,	/* lowaddr */
3344 				BUS_SPACE_MAXADDR,	/* highaddr */
3345 				NULL, NULL,		/* filter, filterarg */
3346 				EM_TSO_SIZE,		/* maxsize */
3347 				EM_MAX_SCATTER,		/* nsegments */
3348 				EM_TSO_SEG_SIZE,	/* maxsegsize */
3349 				0,			/* flags */
3350 				NULL,		/* lockfunc */
3351 				NULL,		/* lockarg */
3352 				&adapter->txtag)) != 0) {
3353 		device_printf(dev, "Unable to allocate TX DMA tag\n");
3354 		goto fail;
3355 	}
3356 
3357 	adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
3358 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3359 	if (adapter->tx_buffer_area == NULL) {
3360 		device_printf(dev, "Unable to allocate tx_buffer memory\n");
3361 		error = ENOMEM;
3362 		goto fail;
3363 	}
3364 
3365 	/* Create the descriptor buffer dma maps */
3366 	for (int i = 0; i < adapter->num_tx_desc; i++) {
3367 		tx_buffer = &adapter->tx_buffer_area[i];
3368 		error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
3369 		if (error != 0) {
3370 			device_printf(dev, "Unable to create TX DMA map\n");
3371 			goto fail;
3372 		}
3373 		tx_buffer->next_eop = -1;
3374 	}
3375 
3376 	return (0);
3377 fail:
3378 	em_free_transmit_structures(adapter);
3379 	return (error);
3380 }
3381 
3382 /*********************************************************************
3383  *
3384  *  (Re)Initialize transmit structures.
3385  *
3386  **********************************************************************/
3387 static void
3388 em_setup_transmit_structures(struct adapter *adapter)
3389 {
3390 	struct em_buffer *tx_buffer;
3391 
3392 	/* Clear the old ring contents */
3393 	bzero(adapter->tx_desc_base,
3394 	    (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
3395 
3396 	/* Free any existing TX buffers */
3397 	for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
3398 		tx_buffer = &adapter->tx_buffer_area[i];
3399 		bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3400 		    BUS_DMASYNC_POSTWRITE);
3401 		bus_dmamap_unload(adapter->txtag, tx_buffer->map);
3402 		m_freem(tx_buffer->m_head);
3403 		tx_buffer->m_head = NULL;
3404 		tx_buffer->next_eop = -1;
3405 	}
3406 
3407 	/* Reset state */
3408 	adapter->next_avail_tx_desc = 0;
3409 	adapter->next_tx_to_clean = 0;
3410 	adapter->num_tx_desc_avail = adapter->num_tx_desc;
3411 
3412 	bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3413 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3414 
3415 	return;
3416 }
3417 
3418 /*********************************************************************
3419  *
3420  *  Enable transmit unit.
3421  *
3422  **********************************************************************/
3423 static void
3424 em_initialize_transmit_unit(struct adapter *adapter)
3425 {
3426 	u32	tctl, tarc, tipg = 0;
3427 	u64	bus_addr;
3428 
3429 	 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
3430 	/* Setup the Base and Length of the Tx Descriptor Ring */
3431 	bus_addr = adapter->txdma.dma_paddr;
3432 	E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
3433 	    adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
3434 	E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
3435 	    (u32)(bus_addr >> 32));
3436 	E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
3437 	    (u32)bus_addr);
3438 	/* Setup the HW Tx Head and Tail descriptor pointers */
3439 	E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
3440 	E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
3441 
3442 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
3443 	    E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
3444 	    E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
3445 
3446 	/* Set the default values for the Tx Inter Packet Gap timer */
3447 	switch (adapter->hw.mac.type) {
3448 	case e1000_82542:
3449 		tipg = DEFAULT_82542_TIPG_IPGT;
3450 		tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3451 		tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3452 		break;
3453 	case e1000_80003es2lan:
3454 		tipg = DEFAULT_82543_TIPG_IPGR1;
3455 		tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
3456 		    E1000_TIPG_IPGR2_SHIFT;
3457 		break;
3458 	default:
3459 		if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3460 		    (adapter->hw.phy.media_type ==
3461 		    e1000_media_type_internal_serdes))
3462 			tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3463 		else
3464 			tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3465 		tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3466 		tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3467 	}
3468 
3469 	E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
3470 	E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
3471 	if(adapter->hw.mac.type >= e1000_82540)
3472 		E1000_WRITE_REG(&adapter->hw, E1000_TADV,
3473 		    adapter->tx_abs_int_delay.value);
3474 
3475 	if ((adapter->hw.mac.type == e1000_82571) ||
3476 	    (adapter->hw.mac.type == e1000_82572)) {
3477 		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3478 		tarc |= SPEED_MODE_BIT;
3479 		E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3480 	} else if (adapter->hw.mac.type == e1000_80003es2lan) {
3481 		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3482 		tarc |= 1;
3483 		E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3484 		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
3485 		tarc |= 1;
3486 		E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
3487 	}
3488 
3489 	/* Program the Transmit Control Register */
3490 	tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
3491 	tctl &= ~E1000_TCTL_CT;
3492 	tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3493 		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
3494 
3495 	if (adapter->hw.mac.type >= e1000_82571)
3496 		tctl |= E1000_TCTL_MULR;
3497 
3498 	/* This write will effectively turn on the transmit unit. */
3499 	E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
3500 
3501 	/* Setup Transmit Descriptor Base Settings */
3502 	adapter->txd_cmd = E1000_TXD_CMD_IFCS;
3503 
3504 	if (adapter->tx_int_delay.value > 0)
3505 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3506 }
3507 
3508 /*********************************************************************
3509  *
3510  *  Free all transmit related data structures.
3511  *
3512  **********************************************************************/
3513 static void
3514 em_free_transmit_structures(struct adapter *adapter)
3515 {
3516 	struct em_buffer *tx_buffer;
3517 
3518 	INIT_DEBUGOUT("free_transmit_structures: begin");
3519 
3520 	if (adapter->tx_buffer_area != NULL) {
3521 		for (int i = 0; i < adapter->num_tx_desc; i++) {
3522 			tx_buffer = &adapter->tx_buffer_area[i];
3523 			if (tx_buffer->m_head != NULL) {
3524 				bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3525 				    BUS_DMASYNC_POSTWRITE);
3526 				bus_dmamap_unload(adapter->txtag,
3527 				    tx_buffer->map);
3528 				m_freem(tx_buffer->m_head);
3529 				tx_buffer->m_head = NULL;
3530 			} else if (tx_buffer->map != NULL)
3531 				bus_dmamap_unload(adapter->txtag,
3532 				    tx_buffer->map);
3533 			if (tx_buffer->map != NULL) {
3534 				bus_dmamap_destroy(adapter->txtag,
3535 				    tx_buffer->map);
3536 				tx_buffer->map = NULL;
3537 			}
3538 		}
3539 	}
3540 	if (adapter->tx_buffer_area != NULL) {
3541 		free(adapter->tx_buffer_area, M_DEVBUF);
3542 		adapter->tx_buffer_area = NULL;
3543 	}
3544 	if (adapter->txtag != NULL) {
3545 		bus_dma_tag_destroy(adapter->txtag);
3546 		adapter->txtag = NULL;
3547 	}
3548 }
3549 
3550 /*********************************************************************
3551  *
3552  *  The offload context needs to be set when we transfer the first
3553  *  packet of a particular protocol (TCP/UDP). This routine has been
3554  *  enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
3555  *
3556  **********************************************************************/
3557 static void
3558 em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
3559     u32 *txd_upper, u32 *txd_lower)
3560 {
3561 	struct e1000_context_desc *TXD;
3562 	struct em_buffer *tx_buffer;
3563 	struct ether_vlan_header *eh;
3564 	struct ip *ip = NULL;
3565 	struct ip6_hdr *ip6;
3566 	struct tcp_hdr *th;
3567 	int curr_txd, ehdrlen;
3568 	u32 cmd, hdr_len, ip_hlen;
3569 	u16 etype;
3570 	u8 ipproto;
3571 
3572 	cmd = hdr_len = ipproto = 0;
3573 	/* Setup checksum offload context. */
3574 	curr_txd = adapter->next_avail_tx_desc;
3575 	tx_buffer = &adapter->tx_buffer_area[curr_txd];
3576 	TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3577 
3578 	/*
3579 	 * Determine where frame payload starts.
3580 	 * Jump over vlan headers if already present,
3581 	 * helpful for QinQ too.
3582 	 */
3583 	eh = mtod(mp, struct ether_vlan_header *);
3584 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3585 		etype = ntohs(eh->evl_proto);
3586 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3587 	} else {
3588 		etype = ntohs(eh->evl_encap_proto);
3589 		ehdrlen = ETHER_HDR_LEN;
3590 	}
3591 
3592 	/*
3593 	 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
3594 	 * TODO: Support SCTP too when it hits the tree.
3595 	 */
3596 	switch (etype) {
3597 	case ETHERTYPE_IP:
3598 		ip = (struct ip *)(mp->m_data + ehdrlen);
3599 		ip_hlen = ip->ip_hl << 2;
3600 
3601 		/* Setup of IP header checksum. */
3602 		if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3603 			/*
3604 			 * Start offset for header checksum calculation.
3605 			 * End offset for header checksum calculation.
3606 			 * Offset of place to put the checksum.
3607 			 */
3608 			TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3609 			TXD->lower_setup.ip_fields.ipcse =
3610 			    htole16(ehdrlen + ip_hlen);
3611 			TXD->lower_setup.ip_fields.ipcso =
3612 			    ehdrlen + offsetof(struct ip, ip_sum);
3613 			cmd |= E1000_TXD_CMD_IP;
3614 			*txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3615 		}
3616 
3617 		if (mp->m_len < ehdrlen + ip_hlen)
3618 			return;	/* failure */
3619 
3620 		hdr_len = ehdrlen + ip_hlen;
3621 		ipproto = ip->ip_p;
3622 
3623 		break;
3624 	case ETHERTYPE_IPV6:
3625 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3626 		ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
3627 
3628 		if (mp->m_len < ehdrlen + ip_hlen)
3629 			return;	/* failure */
3630 
3631 		/* IPv6 doesn't have a header checksum. */
3632 
3633 		hdr_len = ehdrlen + ip_hlen;
3634 		ipproto = ip6->ip6_nxt;
3635 
3636 		break;
3637 #ifdef EM_TIMESYNC
3638 	case ETHERTYPE_IEEE1588:
3639 		*txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
3640 		break;
3641 #endif
3642 	default:
3643 		*txd_upper = 0;
3644 		*txd_lower = 0;
3645 		return;
3646 	}
3647 
3648 	switch (ipproto) {
3649 	case IPPROTO_TCP:
3650 		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3651 			/*
3652 			 * Start offset for payload checksum calculation.
3653 			 * End offset for payload checksum calculation.
3654 			 * Offset of place to put the checksum.
3655 			 */
3656 			th = (struct tcp_hdr *)(mp->m_data + hdr_len);
3657 			TXD->upper_setup.tcp_fields.tucss = hdr_len;
3658 			TXD->upper_setup.tcp_fields.tucse = htole16(0);
3659 			TXD->upper_setup.tcp_fields.tucso =
3660 			    hdr_len + offsetof(struct tcphdr, th_sum);
3661 			cmd |= E1000_TXD_CMD_TCP;
3662 			*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3663 		}
3664 		break;
3665 	case IPPROTO_UDP:
3666 	{
3667 #ifdef EM_TIMESYNC
3668 		void *hdr = (caddr_t) ip + ip_hlen;
3669 		struct udphdr *uh = (struct udphdr *)hdr;
3670 
3671 		if (uh->uh_dport == htons(TSYNC_PORT)) {
3672 			*txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
3673 			IOCTL_DEBUGOUT("@@@ Sending Event Packet\n");
3674 		}
3675 #endif
3676 		if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3677 			/*
3678 			 * Start offset for header checksum calculation.
3679 			 * End offset for header checksum calculation.
3680 			 * Offset of place to put the checksum.
3681 			 */
3682 			TXD->upper_setup.tcp_fields.tucss = hdr_len;
3683 			TXD->upper_setup.tcp_fields.tucse = htole16(0);
3684 			TXD->upper_setup.tcp_fields.tucso =
3685 			    hdr_len + offsetof(struct udphdr, uh_sum);
3686 			*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3687 		}
3688 		/* Fall Thru */
3689 	}
3690 	default:
3691 		break;
3692 	}
3693 
3694 #ifdef EM_TIMESYNC
3695 	/*
3696 	** We might be here just for TIMESYNC
3697 	** which means we don't need the context
3698 	** descriptor.
3699 	*/
3700 	if (!mp->m_pkthdr.csum_flags & CSUM_OFFLOAD)
3701 		return;
3702 #endif
3703 	*txd_lower = E1000_TXD_CMD_DEXT |	/* Extended descr type */
3704 		     E1000_TXD_DTYP_D;		/* Data descr */
3705 	TXD->tcp_seg_setup.data = htole32(0);
3706 	TXD->cmd_and_length =
3707 	    htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3708 	tx_buffer->m_head = NULL;
3709 	tx_buffer->next_eop = -1;
3710 
3711 	if (++curr_txd == adapter->num_tx_desc)
3712 		curr_txd = 0;
3713 
3714 	adapter->num_tx_desc_avail--;
3715 	adapter->next_avail_tx_desc = curr_txd;
3716 }
3717 
3718 
3719 #if __FreeBSD_version >= 700000
3720 /**********************************************************************
3721  *
3722  *  Setup work for hardware segmentation offload (TSO)
3723  *
3724  **********************************************************************/
3725 static bool
3726 em_tso_setup(struct adapter *adapter, struct mbuf *mp, u32 *txd_upper,
3727    u32 *txd_lower)
3728 {
3729 	struct e1000_context_desc *TXD;
3730 	struct em_buffer *tx_buffer;
3731 	struct ether_vlan_header *eh;
3732 	struct ip *ip;
3733 	struct ip6_hdr *ip6;
3734 	struct tcphdr *th;
3735 	int curr_txd, ehdrlen, hdr_len, ip_hlen, isip6;
3736 	u16 etype;
3737 
3738 	/*
3739 	 * This function could/should be extended to support IP/IPv6
3740 	 * fragmentation as well.  But as they say, one step at a time.
3741 	 */
3742 
3743 	/*
3744 	 * Determine where frame payload starts.
3745 	 * Jump over vlan headers if already present,
3746 	 * helpful for QinQ too.
3747 	 */
3748 	eh = mtod(mp, struct ether_vlan_header *);
3749 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3750 		etype = ntohs(eh->evl_proto);
3751 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3752 	} else {
3753 		etype = ntohs(eh->evl_encap_proto);
3754 		ehdrlen = ETHER_HDR_LEN;
3755 	}
3756 
3757 	/* Ensure we have at least the IP+TCP header in the first mbuf. */
3758 	if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3759 		return FALSE;	/* -1 */
3760 
3761 	/*
3762 	 * We only support TCP for IPv4 and IPv6 (notyet) for the moment.
3763 	 * TODO: Support SCTP too when it hits the tree.
3764 	 */
3765 	switch (etype) {
3766 	case ETHERTYPE_IP:
3767 		isip6 = 0;
3768 		ip = (struct ip *)(mp->m_data + ehdrlen);
3769 		if (ip->ip_p != IPPROTO_TCP)
3770 			return FALSE;	/* 0 */
3771 		ip->ip_len = 0;
3772 		ip->ip_sum = 0;
3773 		ip_hlen = ip->ip_hl << 2;
3774 		if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3775 			return FALSE;	/* -1 */
3776 		th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3777 #if 1
3778 		th->th_sum = in_pseudo(ip->ip_src.s_addr,
3779 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3780 #else
3781 		th->th_sum = mp->m_pkthdr.csum_data;
3782 #endif
3783 		break;
3784 	case ETHERTYPE_IPV6:
3785 		isip6 = 1;
3786 		return FALSE;			/* Not supported yet. */
3787 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3788 		if (ip6->ip6_nxt != IPPROTO_TCP)
3789 			return FALSE;	/* 0 */
3790 		ip6->ip6_plen = 0;
3791 		ip_hlen = sizeof(struct ip6_hdr); /* XXX: no header stacking. */
3792 		if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3793 			return FALSE;	/* -1 */
3794 		th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
3795 #if 0
3796 		th->th_sum = in6_pseudo(ip6->ip6_src, ip->ip6_dst,
3797 		    htons(IPPROTO_TCP));	/* XXX: function notyet. */
3798 #else
3799 		th->th_sum = mp->m_pkthdr.csum_data;
3800 #endif
3801 		break;
3802 	default:
3803 		return FALSE;
3804 	}
3805 	hdr_len = ehdrlen + ip_hlen + (th->th_off << 2);
3806 
3807 	*txd_lower = (E1000_TXD_CMD_DEXT |	/* Extended descr type */
3808 		      E1000_TXD_DTYP_D |	/* Data descr type */
3809 		      E1000_TXD_CMD_TSE);	/* Do TSE on this packet */
3810 
3811 	/* IP and/or TCP header checksum calculation and insertion. */
3812 	*txd_upper = ((isip6 ? 0 : E1000_TXD_POPTS_IXSM) |
3813 		      E1000_TXD_POPTS_TXSM) << 8;
3814 
3815 	curr_txd = adapter->next_avail_tx_desc;
3816 	tx_buffer = &adapter->tx_buffer_area[curr_txd];
3817 	TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3818 
3819 	/* IPv6 doesn't have a header checksum. */
3820 	if (!isip6) {
3821 		/*
3822 		 * Start offset for header checksum calculation.
3823 		 * End offset for header checksum calculation.
3824 		 * Offset of place put the checksum.
3825 		 */
3826 		TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3827 		TXD->lower_setup.ip_fields.ipcse =
3828 		    htole16(ehdrlen + ip_hlen - 1);
3829 		TXD->lower_setup.ip_fields.ipcso =
3830 		    ehdrlen + offsetof(struct ip, ip_sum);
3831 	}
3832 	/*
3833 	 * Start offset for payload checksum calculation.
3834 	 * End offset for payload checksum calculation.
3835 	 * Offset of place to put the checksum.
3836 	 */
3837 	TXD->upper_setup.tcp_fields.tucss =
3838 	    ehdrlen + ip_hlen;
3839 	TXD->upper_setup.tcp_fields.tucse = 0;
3840 	TXD->upper_setup.tcp_fields.tucso =
3841 	    ehdrlen + ip_hlen + offsetof(struct tcphdr, th_sum);
3842 	/*
3843 	 * Payload size per packet w/o any headers.
3844 	 * Length of all headers up to payload.
3845 	 */
3846 	TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz);
3847 	TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
3848 
3849 	TXD->cmd_and_length = htole32(adapter->txd_cmd |
3850 				E1000_TXD_CMD_DEXT |	/* Extended descr */
3851 				E1000_TXD_CMD_TSE |	/* TSE context */
3852 				(isip6 ? 0 : E1000_TXD_CMD_IP) | /* Do IP csum */
3853 				E1000_TXD_CMD_TCP |	/* Do TCP checksum */
3854 				(mp->m_pkthdr.len - (hdr_len))); /* Total len */
3855 
3856 	tx_buffer->m_head = NULL;
3857 	tx_buffer->next_eop = -1;
3858 
3859 	if (++curr_txd == adapter->num_tx_desc)
3860 		curr_txd = 0;
3861 
3862 	adapter->num_tx_desc_avail--;
3863 	adapter->next_avail_tx_desc = curr_txd;
3864 	adapter->tx_tso = TRUE;
3865 
3866 	return TRUE;
3867 }
3868 
3869 #endif /* __FreeBSD_version >= 700000 */
3870 
3871 /**********************************************************************
3872  *
3873  *  Examine each tx_buffer in the used queue. If the hardware is done
3874  *  processing the packet then free associated resources. The
3875  *  tx_buffer is put back on the free queue.
3876  *
3877  **********************************************************************/
3878 static void
3879 em_txeof(struct adapter *adapter)
3880 {
3881         int first, last, done, num_avail;
3882         struct em_buffer *tx_buffer;
3883         struct e1000_tx_desc   *tx_desc, *eop_desc;
3884 	struct ifnet   *ifp = adapter->ifp;
3885 
3886 	EM_TX_LOCK_ASSERT(adapter);
3887 
3888         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3889                 return;
3890 
3891         num_avail = adapter->num_tx_desc_avail;
3892         first = adapter->next_tx_to_clean;
3893         tx_desc = &adapter->tx_desc_base[first];
3894         tx_buffer = &adapter->tx_buffer_area[first];
3895 	last = tx_buffer->next_eop;
3896         eop_desc = &adapter->tx_desc_base[last];
3897 
3898 	/*
3899 	 * What this does is get the index of the
3900 	 * first descriptor AFTER the EOP of the
3901 	 * first packet, that way we can do the
3902 	 * simple comparison on the inner while loop.
3903 	 */
3904 	if (++last == adapter->num_tx_desc)
3905  		last = 0;
3906 	done = last;
3907 
3908         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3909             BUS_DMASYNC_POSTREAD);
3910 
3911         while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3912 		/* We clean the range of the packet */
3913 		while (first != done) {
3914                 	tx_desc->upper.data = 0;
3915                 	tx_desc->lower.data = 0;
3916                 	tx_desc->buffer_addr = 0;
3917                 	num_avail++;
3918 
3919 			if (tx_buffer->m_head) {
3920 				ifp->if_opackets++;
3921 				bus_dmamap_sync(adapter->txtag,
3922 				    tx_buffer->map,
3923 				    BUS_DMASYNC_POSTWRITE);
3924 				bus_dmamap_unload(adapter->txtag,
3925 				    tx_buffer->map);
3926 
3927                         	m_freem(tx_buffer->m_head);
3928                         	tx_buffer->m_head = NULL;
3929                 	}
3930 			tx_buffer->next_eop = -1;
3931 
3932 	                if (++first == adapter->num_tx_desc)
3933 				first = 0;
3934 
3935 	                tx_buffer = &adapter->tx_buffer_area[first];
3936 			tx_desc = &adapter->tx_desc_base[first];
3937 		}
3938 		/* See if we can continue to the next packet */
3939 		last = tx_buffer->next_eop;
3940 		if (last != -1) {
3941         		eop_desc = &adapter->tx_desc_base[last];
3942 			/* Get new done point */
3943 			if (++last == adapter->num_tx_desc) last = 0;
3944 			done = last;
3945 		} else
3946 			break;
3947         }
3948         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3949             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3950 
3951         adapter->next_tx_to_clean = first;
3952 
3953         /*
3954          * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
3955          * that it is OK to send packets.
3956          * If there are no pending descriptors, clear the timeout. Otherwise,
3957          * if some descriptors have been freed, restart the timeout.
3958          */
3959         if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
3960                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3961 		/* All clean, turn off the timer */
3962                 if (num_avail == adapter->num_tx_desc) {
3963 			adapter->watchdog_timer = 0;
3964 		} else
3965 		/* Some cleaned, reset the timer */
3966                 if (num_avail != adapter->num_tx_desc_avail)
3967 			adapter->watchdog_timer = EM_TX_TIMEOUT;
3968         }
3969         adapter->num_tx_desc_avail = num_avail;
3970 	return;
3971 }
3972 
3973 /*********************************************************************
3974  *
3975  *  When Link is lost sometimes there is work still in the TX ring
3976  *  which will result in a watchdog, rather than allow that do an
3977  *  attempted cleanup and then reinit here. Note that this has been
3978  *  seens mostly with fiber adapters.
3979  *
3980  **********************************************************************/
3981 static void
3982 em_tx_purge(struct adapter *adapter)
3983 {
3984 	if ((!adapter->link_active) && (adapter->watchdog_timer)) {
3985 		EM_TX_LOCK(adapter);
3986 		em_txeof(adapter);
3987 		EM_TX_UNLOCK(adapter);
3988 		if (adapter->watchdog_timer) { /* Still not clean? */
3989 			adapter->watchdog_timer = 0;
3990 			em_init_locked(adapter);
3991 		}
3992 	}
3993 }
3994 
3995 /*********************************************************************
3996  *
3997  *  Get a buffer from system mbuf buffer pool.
3998  *
3999  **********************************************************************/
4000 static int
4001 em_get_buf(struct adapter *adapter, int i)
4002 {
4003 	struct mbuf		*m;
4004 	bus_dma_segment_t	segs[1];
4005 	bus_dmamap_t		map;
4006 	struct em_buffer	*rx_buffer;
4007 	int			error, nsegs;
4008 
4009 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
4010 	if (m == NULL) {
4011 		adapter->mbuf_cluster_failed++;
4012 		return (ENOBUFS);
4013 	}
4014 	m->m_len = m->m_pkthdr.len = MCLBYTES;
4015 
4016 	if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
4017 		m_adj(m, ETHER_ALIGN);
4018 
4019 	/*
4020 	 * Using memory from the mbuf cluster pool, invoke the
4021 	 * bus_dma machinery to arrange the memory mapping.
4022 	 */
4023 	error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
4024 	    adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
4025 	if (error != 0) {
4026 		m_free(m);
4027 		return (error);
4028 	}
4029 
4030 	/* If nsegs is wrong then the stack is corrupt. */
4031 	KASSERT(nsegs == 1, ("Too many segments returned!"));
4032 
4033 	rx_buffer = &adapter->rx_buffer_area[i];
4034 	if (rx_buffer->m_head != NULL)
4035 		bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4036 
4037 	map = rx_buffer->map;
4038 	rx_buffer->map = adapter->rx_sparemap;
4039 	adapter->rx_sparemap = map;
4040 	bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
4041 	rx_buffer->m_head = m;
4042 
4043 	adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
4044 	return (0);
4045 }
4046 
4047 /*********************************************************************
4048  *
4049  *  Allocate memory for rx_buffer structures. Since we use one
4050  *  rx_buffer per received packet, the maximum number of rx_buffer's
4051  *  that we'll need is equal to the number of receive descriptors
4052  *  that we've allocated.
4053  *
4054  **********************************************************************/
4055 static int
4056 em_allocate_receive_structures(struct adapter *adapter)
4057 {
4058 	device_t dev = adapter->dev;
4059 	struct em_buffer *rx_buffer;
4060 	int i, error;
4061 
4062 	adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
4063 	    adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
4064 	if (adapter->rx_buffer_area == NULL) {
4065 		device_printf(dev, "Unable to allocate rx_buffer memory\n");
4066 		return (ENOMEM);
4067 	}
4068 
4069 #if __FreeBSD_version >= 700000
4070 	error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
4071 #else
4072 	error = bus_dma_tag_create(NULL,		 /* parent */
4073 #endif
4074 				1, 0,			/* alignment, bounds */
4075 				BUS_SPACE_MAXADDR,	/* lowaddr */
4076 				BUS_SPACE_MAXADDR,	/* highaddr */
4077 				NULL, NULL,		/* filter, filterarg */
4078 				MCLBYTES,		/* maxsize */
4079 				1,			/* nsegments */
4080 				MCLBYTES,		/* maxsegsize */
4081 				0,			/* flags */
4082 				NULL,			/* lockfunc */
4083 				NULL,			/* lockarg */
4084 				&adapter->rxtag);
4085 	if (error) {
4086 		device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
4087 		    __func__, error);
4088 		goto fail;
4089 	}
4090 
4091 	/* Create the spare map (used by getbuf) */
4092 	error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4093 	     &adapter->rx_sparemap);
4094 	if (error) {
4095 		device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4096 		    __func__, error);
4097 		goto fail;
4098 	}
4099 
4100 	rx_buffer = adapter->rx_buffer_area;
4101 	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4102 		error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4103 		    &rx_buffer->map);
4104 		if (error) {
4105 			device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4106 			    __func__, error);
4107 			goto fail;
4108 		}
4109 	}
4110 
4111 	return (0);
4112 
4113 fail:
4114 	em_free_receive_structures(adapter);
4115 	return (error);
4116 }
4117 
4118 /*********************************************************************
4119  *
4120  *  (Re)initialize receive structures.
4121  *
4122  **********************************************************************/
4123 static int
4124 em_setup_receive_structures(struct adapter *adapter)
4125 {
4126 	struct em_buffer *rx_buffer;
4127 	int i, error;
4128 
4129 	/* Reset descriptor ring */
4130 	bzero(adapter->rx_desc_base,
4131 	    (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
4132 
4133 	/* Free current RX buffers. */
4134 	rx_buffer = adapter->rx_buffer_area;
4135 	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4136 		if (rx_buffer->m_head != NULL) {
4137 			bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4138 			    BUS_DMASYNC_POSTREAD);
4139 			bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4140 			m_freem(rx_buffer->m_head);
4141 			rx_buffer->m_head = NULL;
4142 		}
4143         }
4144 
4145 	/* Allocate new ones. */
4146 	for (i = 0; i < adapter->num_rx_desc; i++) {
4147 		error = em_get_buf(adapter, i);
4148 		if (error)
4149                         return (error);
4150 	}
4151 
4152 	/* Setup our descriptor pointers */
4153 	adapter->next_rx_desc_to_check = 0;
4154 	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4155 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4156 
4157 	return (0);
4158 }
4159 
4160 /*********************************************************************
4161  *
4162  *  Enable receive unit.
4163  *
4164  **********************************************************************/
4165 #define MAX_INTS_PER_SEC	8000
4166 #define DEFAULT_ITR	     1000000000/(MAX_INTS_PER_SEC * 256)
4167 
4168 static void
4169 em_initialize_receive_unit(struct adapter *adapter)
4170 {
4171 	struct ifnet	*ifp = adapter->ifp;
4172 	u64	bus_addr;
4173 	u32	rctl, rxcsum;
4174 
4175 	INIT_DEBUGOUT("em_initialize_receive_unit: begin");
4176 
4177 	/*
4178 	 * Make sure receives are disabled while setting
4179 	 * up the descriptor ring
4180 	 */
4181 	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4182 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
4183 
4184 	if (adapter->hw.mac.type >= e1000_82540) {
4185 		E1000_WRITE_REG(&adapter->hw, E1000_RADV,
4186 		    adapter->rx_abs_int_delay.value);
4187 		/*
4188 		 * Set the interrupt throttling rate. Value is calculated
4189 		 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
4190 		 */
4191 		E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
4192 	}
4193 
4194 	/*
4195 	** When using MSIX interrupts we need to throttle
4196 	** using the EITR register (82574 only)
4197 	*/
4198 	if (adapter->msix)
4199 		for (int i = 0; i < 4; i++)
4200 			E1000_WRITE_REG(&adapter->hw,
4201 			    E1000_EITR_82574(i), DEFAULT_ITR);
4202 
4203 	/* Disable accelerated ackknowledge */
4204 	if (adapter->hw.mac.type == e1000_82574)
4205 		E1000_WRITE_REG(&adapter->hw,
4206 		    E1000_RFCTL, E1000_RFCTL_ACK_DIS);
4207 
4208 	/* Setup the Base and Length of the Rx Descriptor Ring */
4209 	bus_addr = adapter->rxdma.dma_paddr;
4210 	E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
4211 	    adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
4212 	E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
4213 	    (u32)(bus_addr >> 32));
4214 	E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
4215 	    (u32)bus_addr);
4216 
4217 	/* Setup the Receive Control Register */
4218 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4219 	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
4220 		   E1000_RCTL_RDMTS_HALF |
4221 		   (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4222 
4223 	/* Make sure VLAN Filters are off */
4224 	rctl &= ~E1000_RCTL_VFE;
4225 
4226 	if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
4227 		rctl |= E1000_RCTL_SBP;
4228 	else
4229 		rctl &= ~E1000_RCTL_SBP;
4230 
4231 	switch (adapter->rx_buffer_len) {
4232 	default:
4233 	case 2048:
4234 		rctl |= E1000_RCTL_SZ_2048;
4235 		break;
4236 	case 4096:
4237 		rctl |= E1000_RCTL_SZ_4096 |
4238 		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
4239 		break;
4240 	case 8192:
4241 		rctl |= E1000_RCTL_SZ_8192 |
4242 		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
4243 		break;
4244 	case 16384:
4245 		rctl |= E1000_RCTL_SZ_16384 |
4246 		    E1000_RCTL_BSEX | E1000_RCTL_LPE;
4247 		break;
4248 	}
4249 
4250 	if (ifp->if_mtu > ETHERMTU)
4251 		rctl |= E1000_RCTL_LPE;
4252 	else
4253 		rctl &= ~E1000_RCTL_LPE;
4254 
4255 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
4256 	if ((adapter->hw.mac.type >= e1000_82543) &&
4257 	    (ifp->if_capenable & IFCAP_RXCSUM)) {
4258 		rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
4259 		rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
4260 		E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
4261 	}
4262 
4263 	/*
4264 	** XXX TEMPORARY WORKAROUND: on some systems with 82573
4265 	** long latencies are observed, like Lenovo X60. This
4266 	** change eliminates the problem, but since having positive
4267 	** values in RDTR is a known source of problems on other
4268 	** platforms another solution is being sought.
4269 	*/
4270 	if (adapter->hw.mac.type == e1000_82573)
4271 		E1000_WRITE_REG(&adapter->hw, E1000_RDTR, 0x20);
4272 
4273 	/* Enable Receives */
4274 	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4275 
4276 	/*
4277 	 * Setup the HW Rx Head and
4278 	 * Tail Descriptor Pointers
4279 	 */
4280 	E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
4281 	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
4282 
4283 	return;
4284 }
4285 
4286 /*********************************************************************
4287  *
4288  *  Free receive related data structures.
4289  *
4290  **********************************************************************/
4291 static void
4292 em_free_receive_structures(struct adapter *adapter)
4293 {
4294 	struct em_buffer *rx_buffer;
4295 	int i;
4296 
4297 	INIT_DEBUGOUT("free_receive_structures: begin");
4298 
4299 	if (adapter->rx_sparemap) {
4300 		bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
4301 		adapter->rx_sparemap = NULL;
4302 	}
4303 
4304 	/* Cleanup any existing buffers */
4305 	if (adapter->rx_buffer_area != NULL) {
4306 		rx_buffer = adapter->rx_buffer_area;
4307 		for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4308 			if (rx_buffer->m_head != NULL) {
4309 				bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4310 				    BUS_DMASYNC_POSTREAD);
4311 				bus_dmamap_unload(adapter->rxtag,
4312 				    rx_buffer->map);
4313 				m_freem(rx_buffer->m_head);
4314 				rx_buffer->m_head = NULL;
4315 			} else if (rx_buffer->map != NULL)
4316 				bus_dmamap_unload(adapter->rxtag,
4317 				    rx_buffer->map);
4318 			if (rx_buffer->map != NULL) {
4319 				bus_dmamap_destroy(adapter->rxtag,
4320 				    rx_buffer->map);
4321 				rx_buffer->map = NULL;
4322 			}
4323 		}
4324 	}
4325 
4326 	if (adapter->rx_buffer_area != NULL) {
4327 		free(adapter->rx_buffer_area, M_DEVBUF);
4328 		adapter->rx_buffer_area = NULL;
4329 	}
4330 
4331 	if (adapter->rxtag != NULL) {
4332 		bus_dma_tag_destroy(adapter->rxtag);
4333 		adapter->rxtag = NULL;
4334 	}
4335 }
4336 
4337 /*********************************************************************
4338  *
4339  *  This routine executes in interrupt context. It replenishes
4340  *  the mbufs in the descriptor and sends data which has been
4341  *  dma'ed into host memory to upper layer.
4342  *
4343  *  We loop at most count times if count is > 0, or until done if
4344  *  count < 0.
4345  *
4346  *********************************************************************/
4347 static int
4348 em_rxeof(struct adapter *adapter, int count)
4349 {
4350 	struct ifnet	*ifp = adapter->ifp;;
4351 	struct mbuf	*mp;
4352 	u8		status, accept_frame = 0, eop = 0;
4353 	u16 		len, desc_len, prev_len_adj;
4354 	int		i;
4355 	struct e1000_rx_desc   *current_desc;
4356 
4357 	EM_RX_LOCK(adapter);
4358 	i = adapter->next_rx_desc_to_check;
4359 	current_desc = &adapter->rx_desc_base[i];
4360 	bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4361 	    BUS_DMASYNC_POSTREAD);
4362 
4363 	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
4364 		EM_RX_UNLOCK(adapter);
4365 		return (0);
4366 	}
4367 
4368 	while ((current_desc->status & E1000_RXD_STAT_DD) &&
4369 	    (count != 0) &&
4370 	    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4371 		struct mbuf *m = NULL;
4372 
4373 		mp = adapter->rx_buffer_area[i].m_head;
4374 		/*
4375 		 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
4376 		 * needs to access the last received byte in the mbuf.
4377 		 */
4378 		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
4379 		    BUS_DMASYNC_POSTREAD);
4380 
4381 		accept_frame = 1;
4382 		prev_len_adj = 0;
4383 		desc_len = le16toh(current_desc->length);
4384 		status = current_desc->status;
4385 		if (status & E1000_RXD_STAT_EOP) {
4386 			count--;
4387 			eop = 1;
4388 			if (desc_len < ETHER_CRC_LEN) {
4389 				len = 0;
4390 				prev_len_adj = ETHER_CRC_LEN - desc_len;
4391 			} else
4392 				len = desc_len - ETHER_CRC_LEN;
4393 		} else {
4394 			eop = 0;
4395 			len = desc_len;
4396 		}
4397 
4398 		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
4399 			u8	last_byte;
4400 			u32	pkt_len = desc_len;
4401 
4402 			if (adapter->fmp != NULL)
4403 				pkt_len += adapter->fmp->m_pkthdr.len;
4404 
4405 			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
4406 			if (TBI_ACCEPT(&adapter->hw, status,
4407 			    current_desc->errors, pkt_len, last_byte,
4408 			    adapter->min_frame_size, adapter->max_frame_size)) {
4409 				e1000_tbi_adjust_stats_82543(&adapter->hw,
4410 				    &adapter->stats, pkt_len,
4411 				    adapter->hw.mac.addr,
4412 				    adapter->max_frame_size);
4413 				if (len > 0)
4414 					len--;
4415 			} else
4416 				accept_frame = 0;
4417 		}
4418 
4419 		if (accept_frame) {
4420 			if (em_get_buf(adapter, i) != 0) {
4421 				ifp->if_iqdrops++;
4422 				goto discard;
4423 			}
4424 
4425 			/* Assign correct length to the current fragment */
4426 			mp->m_len = len;
4427 
4428 			if (adapter->fmp == NULL) {
4429 				mp->m_pkthdr.len = len;
4430 				adapter->fmp = mp; /* Store the first mbuf */
4431 				adapter->lmp = mp;
4432 			} else {
4433 				/* Chain mbuf's together */
4434 				mp->m_flags &= ~M_PKTHDR;
4435 				/*
4436 				 * Adjust length of previous mbuf in chain if
4437 				 * we received less than 4 bytes in the last
4438 				 * descriptor.
4439 				 */
4440 				if (prev_len_adj > 0) {
4441 					adapter->lmp->m_len -= prev_len_adj;
4442 					adapter->fmp->m_pkthdr.len -=
4443 					    prev_len_adj;
4444 				}
4445 				adapter->lmp->m_next = mp;
4446 				adapter->lmp = adapter->lmp->m_next;
4447 				adapter->fmp->m_pkthdr.len += len;
4448 			}
4449 
4450 			if (eop) {
4451 				adapter->fmp->m_pkthdr.rcvif = ifp;
4452 				ifp->if_ipackets++;
4453 				em_receive_checksum(adapter, current_desc,
4454 				    adapter->fmp);
4455 #ifndef __NO_STRICT_ALIGNMENT
4456 				if (adapter->max_frame_size >
4457 				    (MCLBYTES - ETHER_ALIGN) &&
4458 				    em_fixup_rx(adapter) != 0)
4459 					goto skip;
4460 #endif
4461 				if (status & E1000_RXD_STAT_VP) {
4462 #if __FreeBSD_version < 700000
4463 					VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
4464 					    (le16toh(current_desc->special) &
4465 					    E1000_RXD_SPC_VLAN_MASK));
4466 #else
4467 					adapter->fmp->m_pkthdr.ether_vtag =
4468 					    (le16toh(current_desc->special) &
4469 					    E1000_RXD_SPC_VLAN_MASK);
4470 					adapter->fmp->m_flags |= M_VLANTAG;
4471 #endif
4472 				}
4473 #ifndef __NO_STRICT_ALIGNMENT
4474 skip:
4475 #endif
4476 				m = adapter->fmp;
4477 				adapter->fmp = NULL;
4478 				adapter->lmp = NULL;
4479 			}
4480 		} else {
4481 			ifp->if_ierrors++;
4482 discard:
4483 			/* Reuse loaded DMA map and just update mbuf chain */
4484 			mp = adapter->rx_buffer_area[i].m_head;
4485 			mp->m_len = mp->m_pkthdr.len = MCLBYTES;
4486 			mp->m_data = mp->m_ext.ext_buf;
4487 			mp->m_next = NULL;
4488 			if (adapter->max_frame_size <=
4489 			    (MCLBYTES - ETHER_ALIGN))
4490 				m_adj(mp, ETHER_ALIGN);
4491 			if (adapter->fmp != NULL) {
4492 				m_freem(adapter->fmp);
4493 				adapter->fmp = NULL;
4494 				adapter->lmp = NULL;
4495 			}
4496 			m = NULL;
4497 		}
4498 
4499 		/* Zero out the receive descriptors status. */
4500 		current_desc->status = 0;
4501 		bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4502 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4503 
4504 		/* Advance our pointers to the next descriptor. */
4505 		if (++i == adapter->num_rx_desc)
4506 			i = 0;
4507 		if (m != NULL) {
4508 			adapter->next_rx_desc_to_check = i;
4509 			/* Unlock for call into stack */
4510 			EM_RX_UNLOCK(adapter);
4511 			(*ifp->if_input)(ifp, m);
4512 			EM_RX_LOCK(adapter);
4513 			i = adapter->next_rx_desc_to_check;
4514 		}
4515 		current_desc = &adapter->rx_desc_base[i];
4516 	}
4517 	adapter->next_rx_desc_to_check = i;
4518 
4519 	/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
4520 	if (--i < 0)
4521 		i = adapter->num_rx_desc - 1;
4522 	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
4523 	EM_RX_UNLOCK(adapter);
4524 	if (!((current_desc->status) & E1000_RXD_STAT_DD))
4525 		return (0);
4526 
4527 	return (1);
4528 }
4529 
4530 #ifndef __NO_STRICT_ALIGNMENT
4531 /*
4532  * When jumbo frames are enabled we should realign entire payload on
4533  * architecures with strict alignment. This is serious design mistake of 8254x
4534  * as it nullifies DMA operations. 8254x just allows RX buffer size to be
4535  * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
4536  * payload. On architecures without strict alignment restrictions 8254x still
4537  * performs unaligned memory access which would reduce the performance too.
4538  * To avoid copying over an entire frame to align, we allocate a new mbuf and
4539  * copy ethernet header to the new mbuf. The new mbuf is prepended into the
4540  * existing mbuf chain.
4541  *
4542  * Be aware, best performance of the 8254x is achived only when jumbo frame is
4543  * not used at all on architectures with strict alignment.
4544  */
4545 static int
4546 em_fixup_rx(struct adapter *adapter)
4547 {
4548 	struct mbuf *m, *n;
4549 	int error;
4550 
4551 	error = 0;
4552 	m = adapter->fmp;
4553 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
4554 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
4555 		m->m_data += ETHER_HDR_LEN;
4556 	} else {
4557 		MGETHDR(n, M_DONTWAIT, MT_DATA);
4558 		if (n != NULL) {
4559 			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
4560 			m->m_data += ETHER_HDR_LEN;
4561 			m->m_len -= ETHER_HDR_LEN;
4562 			n->m_len = ETHER_HDR_LEN;
4563 			M_MOVE_PKTHDR(n, m);
4564 			n->m_next = m;
4565 			adapter->fmp = n;
4566 		} else {
4567 			adapter->dropped_pkts++;
4568 			m_freem(adapter->fmp);
4569 			adapter->fmp = NULL;
4570 			error = ENOMEM;
4571 		}
4572 	}
4573 
4574 	return (error);
4575 }
4576 #endif
4577 
4578 /*********************************************************************
4579  *
4580  *  Verify that the hardware indicated that the checksum is valid.
4581  *  Inform the stack about the status of checksum so that stack
4582  *  doesn't spend time verifying the checksum.
4583  *
4584  *********************************************************************/
4585 static void
4586 em_receive_checksum(struct adapter *adapter,
4587 	    struct e1000_rx_desc *rx_desc, struct mbuf *mp)
4588 {
4589 	/* 82543 or newer only */
4590 	if ((adapter->hw.mac.type < e1000_82543) ||
4591 	    /* Ignore Checksum bit is set */
4592 	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
4593 		mp->m_pkthdr.csum_flags = 0;
4594 		return;
4595 	}
4596 
4597 	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
4598 		/* Did it pass? */
4599 		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
4600 			/* IP Checksum Good */
4601 			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4602 			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4603 
4604 		} else {
4605 			mp->m_pkthdr.csum_flags = 0;
4606 		}
4607 	}
4608 
4609 	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
4610 		/* Did it pass? */
4611 		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
4612 			mp->m_pkthdr.csum_flags |=
4613 			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4614 			mp->m_pkthdr.csum_data = htons(0xffff);
4615 		}
4616 	}
4617 }
4618 
4619 /*
4620  * This turns on the hardware offload of the VLAN
4621  * tag insertion and strip
4622  */
4623 static void
4624 em_enable_hw_vlans(struct adapter *adapter)
4625 {
4626 	u32 ctrl;
4627 
4628 	ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4629 	ctrl |= E1000_CTRL_VME;
4630 	E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4631 }
4632 
4633 static void
4634 em_enable_intr(struct adapter *adapter)
4635 {
4636 	struct e1000_hw *hw = &adapter->hw;
4637 	u32 ims_mask = IMS_ENABLE_MASK;
4638 
4639 	if (adapter->msix) {
4640 		E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
4641 		ims_mask |= EM_MSIX_MASK;
4642 	}
4643 	E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
4644 }
4645 
4646 static void
4647 em_disable_intr(struct adapter *adapter)
4648 {
4649 	struct e1000_hw *hw = &adapter->hw;
4650 
4651 	if (adapter->msix)
4652 		E1000_WRITE_REG(hw, EM_EIAC, 0);
4653 	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
4654 }
4655 
4656 /*
4657  * Bit of a misnomer, what this really means is
4658  * to enable OS management of the system... aka
4659  * to disable special hardware management features
4660  */
4661 static void
4662 em_init_manageability(struct adapter *adapter)
4663 {
4664 	/* A shared code workaround */
4665 #define E1000_82542_MANC2H E1000_MANC2H
4666 	if (adapter->has_manage) {
4667 		int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
4668 		int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4669 
4670 		/* disable hardware interception of ARP */
4671 		manc &= ~(E1000_MANC_ARP_EN);
4672 
4673                 /* enable receiving management packets to the host */
4674                 if (adapter->hw.mac.type >= e1000_82571) {
4675 			manc |= E1000_MANC_EN_MNG2HOST;
4676 #define E1000_MNG2HOST_PORT_623 (1 << 5)
4677 #define E1000_MNG2HOST_PORT_664 (1 << 6)
4678 			manc2h |= E1000_MNG2HOST_PORT_623;
4679 			manc2h |= E1000_MNG2HOST_PORT_664;
4680 			E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
4681 		}
4682 
4683 		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4684 	}
4685 }
4686 
4687 /*
4688  * Give control back to hardware management
4689  * controller if there is one.
4690  */
4691 static void
4692 em_release_manageability(struct adapter *adapter)
4693 {
4694 	if (adapter->has_manage) {
4695 		int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4696 
4697 		/* re-enable hardware interception of ARP */
4698 		manc |= E1000_MANC_ARP_EN;
4699 
4700 		if (adapter->hw.mac.type >= e1000_82571)
4701 			manc &= ~E1000_MANC_EN_MNG2HOST;
4702 
4703 		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4704 	}
4705 }
4706 
4707 /*
4708  * em_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4709  * For ASF and Pass Through versions of f/w this means that
4710  * the driver is loaded. For AMT version (only with 82573)
4711  * of the f/w this means that the network i/f is open.
4712  *
4713  */
4714 static void
4715 em_get_hw_control(struct adapter *adapter)
4716 {
4717 	u32 ctrl_ext, swsm;
4718 
4719 	/* Let firmware know the driver has taken over */
4720 	switch (adapter->hw.mac.type) {
4721 	case e1000_82573:
4722 		swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4723 		E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4724 		    swsm | E1000_SWSM_DRV_LOAD);
4725 		break;
4726 	case e1000_82571:
4727 	case e1000_82572:
4728 	case e1000_80003es2lan:
4729 	case e1000_ich8lan:
4730 	case e1000_ich9lan:
4731 	case e1000_ich10lan:
4732 		ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4733 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4734 		    ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4735 		break;
4736 	default:
4737 		break;
4738 	}
4739 }
4740 
4741 /*
4742  * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4743  * For ASF and Pass Through versions of f/w this means that the
4744  * driver is no longer loaded. For AMT version (only with 82573) i
4745  * of the f/w this means that the network i/f is closed.
4746  *
4747  */
4748 static void
4749 em_release_hw_control(struct adapter *adapter)
4750 {
4751 	u32 ctrl_ext, swsm;
4752 
4753 	/* Let firmware taken over control of h/w */
4754 	switch (adapter->hw.mac.type) {
4755 	case e1000_82573:
4756 		swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4757 		E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4758 		    swsm & ~E1000_SWSM_DRV_LOAD);
4759 		break;
4760 	case e1000_82571:
4761 	case e1000_82572:
4762 	case e1000_80003es2lan:
4763 	case e1000_ich8lan:
4764 	case e1000_ich9lan:
4765 	case e1000_ich10lan:
4766 		ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4767 		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4768 		    ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4769 		break;
4770 	default:
4771 		break;
4772 
4773 	}
4774 }
4775 
4776 static int
4777 em_is_valid_ether_addr(u8 *addr)
4778 {
4779 	char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4780 
4781 	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4782 		return (FALSE);
4783 	}
4784 
4785 	return (TRUE);
4786 }
4787 
4788 /*
4789  * Enable PCI Wake On Lan capability
4790  */
4791 void
4792 em_enable_wakeup(device_t dev)
4793 {
4794 	u16     cap, status;
4795 	u8      id;
4796 
4797 	/* First find the capabilities pointer*/
4798 	cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
4799 	/* Read the PM Capabilities */
4800 	id = pci_read_config(dev, cap, 1);
4801 	if (id != PCIY_PMG)     /* Something wrong */
4802 		return;
4803 	/* OK, we have the power capabilities, so
4804 	   now get the status register */
4805 	cap += PCIR_POWER_STATUS;
4806 	status = pci_read_config(dev, cap, 2);
4807 	status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4808 	pci_write_config(dev, cap, status, 2);
4809 	return;
4810 }
4811 
4812 
4813 /*********************************************************************
4814 * 82544 Coexistence issue workaround.
4815 *    There are 2 issues.
4816 *       1. Transmit Hang issue.
4817 *    To detect this issue, following equation can be used...
4818 *	  SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4819 *	  If SUM[3:0] is in between 1 to 4, we will have this issue.
4820 *
4821 *       2. DAC issue.
4822 *    To detect this issue, following equation can be used...
4823 *	  SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4824 *	  If SUM[3:0] is in between 9 to c, we will have this issue.
4825 *
4826 *
4827 *    WORKAROUND:
4828 *	  Make sure we do not have ending address
4829 *	  as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4830 *
4831 *************************************************************************/
4832 static u32
4833 em_fill_descriptors (bus_addr_t address, u32 length,
4834 		PDESC_ARRAY desc_array)
4835 {
4836 	u32 safe_terminator;
4837 
4838 	/* Since issue is sensitive to length and address.*/
4839 	/* Let us first check the address...*/
4840 	if (length <= 4) {
4841 		desc_array->descriptor[0].address = address;
4842 		desc_array->descriptor[0].length = length;
4843 		desc_array->elements = 1;
4844 		return (desc_array->elements);
4845 	}
4846 	safe_terminator = (u32)((((u32)address & 0x7) +
4847 	    (length & 0xF)) & 0xF);
4848 	/* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4849 	if (safe_terminator == 0   ||
4850 	(safe_terminator > 4   &&
4851 	safe_terminator < 9)   ||
4852 	(safe_terminator > 0xC &&
4853 	safe_terminator <= 0xF)) {
4854 		desc_array->descriptor[0].address = address;
4855 		desc_array->descriptor[0].length = length;
4856 		desc_array->elements = 1;
4857 		return (desc_array->elements);
4858 	}
4859 
4860 	desc_array->descriptor[0].address = address;
4861 	desc_array->descriptor[0].length = length - 4;
4862 	desc_array->descriptor[1].address = address + (length - 4);
4863 	desc_array->descriptor[1].length = 4;
4864 	desc_array->elements = 2;
4865 	return (desc_array->elements);
4866 }
4867 
4868 /**********************************************************************
4869  *
4870  *  Update the board statistics counters.
4871  *
4872  **********************************************************************/
4873 static void
4874 em_update_stats_counters(struct adapter *adapter)
4875 {
4876 	struct ifnet   *ifp;
4877 
4878 	if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4879 	   (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4880 		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4881 		adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4882 	}
4883 	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4884 	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4885 	adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4886 	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4887 
4888 	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4889 	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4890 	adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4891 	adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4892 	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4893 	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4894 	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4895 	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4896 	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4897 	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4898 	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4899 	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4900 	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4901 	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4902 	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4903 	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4904 	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4905 	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4906 	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4907 	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4908 
4909 	/* For the 64-bit byte counters the low dword must be read first. */
4910 	/* Both registers clear on the read of the high dword */
4911 
4912 	adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH);
4913 	adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH);
4914 
4915 	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4916 	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4917 	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4918 	adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4919 	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4920 
4921 	adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4922 	adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4923 
4924 	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4925 	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4926 	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4927 	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4928 	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4929 	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4930 	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4931 	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4932 	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4933 	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4934 
4935 	if (adapter->hw.mac.type >= e1000_82543) {
4936 		adapter->stats.algnerrc +=
4937 		E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4938 		adapter->stats.rxerrc +=
4939 		E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4940 		adapter->stats.tncrs +=
4941 		E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4942 		adapter->stats.cexterr +=
4943 		E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4944 		adapter->stats.tsctc +=
4945 		E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4946 		adapter->stats.tsctfc +=
4947 		E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4948 	}
4949 	ifp = adapter->ifp;
4950 
4951 	ifp->if_collisions = adapter->stats.colc;
4952 
4953 	/* Rx Errors */
4954 	ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4955 	    adapter->stats.crcerrs + adapter->stats.algnerrc +
4956 	    adapter->stats.ruc + adapter->stats.roc +
4957 	    adapter->stats.mpc + adapter->stats.cexterr;
4958 
4959 	/* Tx Errors */
4960 	ifp->if_oerrors = adapter->stats.ecol +
4961 	    adapter->stats.latecol + adapter->watchdog_events;
4962 }
4963 
4964 
4965 /**********************************************************************
4966  *
4967  *  This routine is called only when em_display_debug_stats is enabled.
4968  *  This routine provides a way to take a look at important statistics
4969  *  maintained by the driver and hardware.
4970  *
4971  **********************************************************************/
4972 static void
4973 em_print_debug_info(struct adapter *adapter)
4974 {
4975 	device_t dev = adapter->dev;
4976 	u8 *hw_addr = adapter->hw.hw_addr;
4977 
4978 	device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
4979 	device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
4980 	    E1000_READ_REG(&adapter->hw, E1000_CTRL),
4981 	    E1000_READ_REG(&adapter->hw, E1000_RCTL));
4982 	device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
4983 	    ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\
4984 	    (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) );
4985 	device_printf(dev, "Flow control watermarks high = %d low = %d\n",
4986 	    adapter->hw.fc.high_water,
4987 	    adapter->hw.fc.low_water);
4988 	device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
4989 	    E1000_READ_REG(&adapter->hw, E1000_TIDV),
4990 	    E1000_READ_REG(&adapter->hw, E1000_TADV));
4991 	device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
4992 	    E1000_READ_REG(&adapter->hw, E1000_RDTR),
4993 	    E1000_READ_REG(&adapter->hw, E1000_RADV));
4994 	device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
4995 	    (long long)adapter->tx_fifo_wrk_cnt,
4996 	    (long long)adapter->tx_fifo_reset_cnt);
4997 	device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
4998 	    E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
4999 	    E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
5000 	device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
5001 	    E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
5002 	    E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
5003 	device_printf(dev, "Num Tx descriptors avail = %d\n",
5004 	    adapter->num_tx_desc_avail);
5005 	device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
5006 	    adapter->no_tx_desc_avail1);
5007 	device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
5008 	    adapter->no_tx_desc_avail2);
5009 	device_printf(dev, "Std mbuf failed = %ld\n",
5010 	    adapter->mbuf_alloc_failed);
5011 	device_printf(dev, "Std mbuf cluster failed = %ld\n",
5012 	    adapter->mbuf_cluster_failed);
5013 	device_printf(dev, "Driver dropped packets = %ld\n",
5014 	    adapter->dropped_pkts);
5015 	device_printf(dev, "Driver tx dma failure in encap = %ld\n",
5016 		adapter->no_tx_dma_setup);
5017 }
5018 
5019 static void
5020 em_print_hw_stats(struct adapter *adapter)
5021 {
5022 	device_t dev = adapter->dev;
5023 
5024 	device_printf(dev, "Excessive collisions = %lld\n",
5025 	    (long long)adapter->stats.ecol);
5026 #if	(DEBUG_HW > 0)  /* Dont output these errors normally */
5027 	device_printf(dev, "Symbol errors = %lld\n",
5028 	    (long long)adapter->stats.symerrs);
5029 #endif
5030 	device_printf(dev, "Sequence errors = %lld\n",
5031 	    (long long)adapter->stats.sec);
5032 	device_printf(dev, "Defer count = %lld\n",
5033 	    (long long)adapter->stats.dc);
5034 	device_printf(dev, "Missed Packets = %lld\n",
5035 	    (long long)adapter->stats.mpc);
5036 	device_printf(dev, "Receive No Buffers = %lld\n",
5037 	    (long long)adapter->stats.rnbc);
5038 	/* RLEC is inaccurate on some hardware, calculate our own. */
5039 	device_printf(dev, "Receive Length Errors = %lld\n",
5040 	    ((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
5041 	device_printf(dev, "Receive errors = %lld\n",
5042 	    (long long)adapter->stats.rxerrc);
5043 	device_printf(dev, "Crc errors = %lld\n",
5044 	    (long long)adapter->stats.crcerrs);
5045 	device_printf(dev, "Alignment errors = %lld\n",
5046 	    (long long)adapter->stats.algnerrc);
5047 	device_printf(dev, "Collision/Carrier extension errors = %lld\n",
5048 	    (long long)adapter->stats.cexterr);
5049 	device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
5050 	device_printf(dev, "watchdog timeouts = %ld\n",
5051 	    adapter->watchdog_events);
5052 	device_printf(dev, "RX MSIX IRQ = %ld TX MSIX IRQ = %ld"
5053 	    " LINK MSIX IRQ = %ld\n", adapter->rx_irq,
5054 	    adapter->tx_irq , adapter->link_irq);
5055 	device_printf(dev, "XON Rcvd = %lld\n",
5056 	    (long long)adapter->stats.xonrxc);
5057 	device_printf(dev, "XON Xmtd = %lld\n",
5058 	    (long long)adapter->stats.xontxc);
5059 	device_printf(dev, "XOFF Rcvd = %lld\n",
5060 	    (long long)adapter->stats.xoffrxc);
5061 	device_printf(dev, "XOFF Xmtd = %lld\n",
5062 	    (long long)adapter->stats.xofftxc);
5063 	device_printf(dev, "Good Packets Rcvd = %lld\n",
5064 	    (long long)adapter->stats.gprc);
5065 	device_printf(dev, "Good Packets Xmtd = %lld\n",
5066 	    (long long)adapter->stats.gptc);
5067 	device_printf(dev, "TSO Contexts Xmtd = %lld\n",
5068 	    (long long)adapter->stats.tsctc);
5069 	device_printf(dev, "TSO Contexts Failed = %lld\n",
5070 	    (long long)adapter->stats.tsctfc);
5071 }
5072 
5073 /**********************************************************************
5074  *
5075  *  This routine provides a way to dump out the adapter eeprom,
5076  *  often a useful debug/service tool. This only dumps the first
5077  *  32 words, stuff that matters is in that extent.
5078  *
5079  **********************************************************************/
5080 static void
5081 em_print_nvm_info(struct adapter *adapter)
5082 {
5083 	u16	eeprom_data;
5084 	int	i, j, row = 0;
5085 
5086 	/* Its a bit crude, but it gets the job done */
5087 	printf("\nInterface EEPROM Dump:\n");
5088 	printf("Offset\n0x0000  ");
5089 	for (i = 0, j = 0; i < 32; i++, j++) {
5090 		if (j == 8) { /* Make the offset block */
5091 			j = 0; ++row;
5092 			printf("\n0x00%x0  ",row);
5093 		}
5094 		e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
5095 		printf("%04x ", eeprom_data);
5096 	}
5097 	printf("\n");
5098 }
5099 
5100 static int
5101 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5102 {
5103 	struct adapter *adapter;
5104 	int error;
5105 	int result;
5106 
5107 	result = -1;
5108 	error = sysctl_handle_int(oidp, &result, 0, req);
5109 
5110 	if (error || !req->newptr)
5111 		return (error);
5112 
5113 	if (result == 1) {
5114 		adapter = (struct adapter *)arg1;
5115 		em_print_debug_info(adapter);
5116 	}
5117 	/*
5118 	 * This value will cause a hex dump of the
5119 	 * first 32 16-bit words of the EEPROM to
5120 	 * the screen.
5121 	 */
5122 	if (result == 2) {
5123 		adapter = (struct adapter *)arg1;
5124 		em_print_nvm_info(adapter);
5125         }
5126 
5127 	return (error);
5128 }
5129 
5130 
5131 static int
5132 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
5133 {
5134 	struct adapter *adapter;
5135 	int error;
5136 	int result;
5137 
5138 	result = -1;
5139 	error = sysctl_handle_int(oidp, &result, 0, req);
5140 
5141 	if (error || !req->newptr)
5142 		return (error);
5143 
5144 	if (result == 1) {
5145 		adapter = (struct adapter *)arg1;
5146 		em_print_hw_stats(adapter);
5147 	}
5148 
5149 	return (error);
5150 }
5151 
5152 static int
5153 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
5154 {
5155 	struct em_int_delay_info *info;
5156 	struct adapter *adapter;
5157 	u32 regval;
5158 	int error;
5159 	int usecs;
5160 	int ticks;
5161 
5162 	info = (struct em_int_delay_info *)arg1;
5163 	usecs = info->value;
5164 	error = sysctl_handle_int(oidp, &usecs, 0, req);
5165 	if (error != 0 || req->newptr == NULL)
5166 		return (error);
5167 	if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
5168 		return (EINVAL);
5169 	info->value = usecs;
5170 	ticks = EM_USECS_TO_TICKS(usecs);
5171 
5172 	adapter = info->adapter;
5173 
5174 	EM_CORE_LOCK(adapter);
5175 	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
5176 	regval = (regval & ~0xffff) | (ticks & 0xffff);
5177 	/* Handle a few special cases. */
5178 	switch (info->offset) {
5179 	case E1000_RDTR:
5180 		break;
5181 	case E1000_TIDV:
5182 		if (ticks == 0) {
5183 			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
5184 			/* Don't write 0 into the TIDV register. */
5185 			regval++;
5186 		} else
5187 			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
5188 		break;
5189 	}
5190 	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
5191 	EM_CORE_UNLOCK(adapter);
5192 	return (0);
5193 }
5194 
5195 static void
5196 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
5197 	const char *description, struct em_int_delay_info *info,
5198 	int offset, int value)
5199 {
5200 	info->adapter = adapter;
5201 	info->offset = offset;
5202 	info->value = value;
5203 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
5204 	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5205 	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
5206 	    info, 0, em_sysctl_int_delay, "I", description);
5207 }
5208 
5209 #ifndef EM_LEGACY_IRQ
5210 static void
5211 em_add_rx_process_limit(struct adapter *adapter, const char *name,
5212 	const char *description, int *limit, int value)
5213 {
5214 	*limit = value;
5215 	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
5216 	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5217 	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
5218 }
5219 #endif
5220 
5221 #ifdef EM_TIMESYNC
5222 /*
5223  * Initialize the Time Sync Feature
5224  */
5225 static int
5226 em_tsync_init(struct adapter *adapter)
5227 {
5228 	device_t	dev = adapter->dev;
5229 	u32		tx_ctl, rx_ctl;
5230 
5231 
5232 	E1000_WRITE_REG(&adapter->hw, E1000_TIMINCA, (1<<24) |
5233 	    20833/PICOSECS_PER_TICK);
5234 
5235 	adapter->last_stamp =  E1000_READ_REG(&adapter->hw, E1000_SYSTIML);
5236 	adapter->last_stamp |= (u64)E1000_READ_REG(&adapter->hw,
5237 	    E1000_SYSTIMH) << 32ULL;
5238 
5239 	/* Enable the TX side */
5240 	tx_ctl =  E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5241 	tx_ctl |= 0x10;
5242 	E1000_WRITE_REG(&adapter->hw, E1000_TSYNCTXCTL, tx_ctl);
5243 	E1000_WRITE_FLUSH(&adapter->hw);
5244 
5245 	tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5246 	if ((tx_ctl & 0x10) == 0) {
5247      		device_printf(dev, "Failed to enable TX timestamping\n");
5248 		return (ENXIO);
5249 	}
5250 
5251 	/* Enable RX */
5252 	rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5253 	rx_ctl |= 0x10; /* Enable the feature */
5254 	rx_ctl |= 0x0a; /* This value turns on Ver 1 and 2 */
5255 	E1000_WRITE_REG(&adapter->hw, E1000_TSYNCRXCTL, rx_ctl);
5256 
5257 	/*
5258 	 * Ethertype Stamping (Ethertype = 0x88F7)
5259 	 */
5260 	E1000_WRITE_REG(&adapter->hw, E1000_RXMTRL, htonl(0x440088f7));
5261 
5262 	/*
5263 	 * Source Port Queue Filter Setup:
5264 	 *  this is for UDP port filtering
5265 	 */
5266 	E1000_WRITE_REG(&adapter->hw, E1000_RXUDP, htons(TSYNC_PORT));
5267 	/* Protocol = UDP, enable Timestamp, and filter on source/protocol */
5268 
5269 	E1000_WRITE_FLUSH(&adapter->hw);
5270 
5271 	rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5272 	if ((rx_ctl & 0x10) == 0) {
5273      		device_printf(dev, "Failed to enable RX timestamping\n");
5274 		return (ENXIO);
5275 	}
5276 
5277 	device_printf(dev, "IEEE 1588 Precision Time Protocol enabled\n");
5278 
5279 	return (0);
5280 }
5281 
5282 /*
5283  * Disable the Time Sync Feature
5284  */
5285 static void
5286 em_tsync_disable(struct adapter *adapter)
5287 {
5288 	u32		tx_ctl, rx_ctl;
5289 
5290 	tx_ctl =  E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5291 	tx_ctl &= ~0x10;
5292 	E1000_WRITE_REG(&adapter->hw, E1000_TSYNCTXCTL, tx_ctl);
5293 	E1000_WRITE_FLUSH(&adapter->hw);
5294 
5295 	/* Invalidate TX Timestamp */
5296 	E1000_READ_REG(&adapter->hw, E1000_TXSTMPH);
5297 
5298 	tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5299 	if (tx_ctl & 0x10)
5300      		HW_DEBUGOUT("Failed to disable TX timestamping\n");
5301 
5302 	rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5303 	rx_ctl &= ~0x10;
5304 
5305 	E1000_WRITE_REG(&adapter->hw, E1000_TSYNCRXCTL, rx_ctl);
5306 	E1000_WRITE_FLUSH(&adapter->hw);
5307 
5308 	/* Invalidate RX Timestamp */
5309 	E1000_READ_REG(&adapter->hw, E1000_RXSATRH);
5310 
5311 	rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5312 	if (rx_ctl & 0x10)
5313 		HW_DEBUGOUT("Failed to disable RX timestamping\n");
5314 
5315 	return;
5316 }
5317 #endif /* EM_TIMESYNC */
5318