xref: /freebsd/usr.sbin/bhyve/pci_e82545.c (revision 3c4ba5f55438f7afd4f4b0b56f88f2bb505fd6a6)
1 /*
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2016 Alexander Motin <mav@FreeBSD.org>
5  * Copyright (c) 2015 Peter Grehan <grehan@freebsd.org>
6  * Copyright (c) 2013 Jeremiah Lott, Avere Systems
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer
14  *    in this position and unchanged.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/types.h>
36 #ifndef WITHOUT_CAPSICUM
37 #include <sys/capsicum.h>
38 #endif
39 #include <sys/limits.h>
40 #include <sys/ioctl.h>
41 #include <sys/uio.h>
42 #include <net/ethernet.h>
43 #include <netinet/in.h>
44 #include <netinet/tcp.h>
45 
46 #ifndef WITHOUT_CAPSICUM
47 #include <capsicum_helpers.h>
48 #endif
49 
50 #include <err.h>
51 #include <errno.h>
52 #include <fcntl.h>
53 #include <md5.h>
54 #include <stdio.h>
55 #include <stdlib.h>
56 #include <string.h>
57 #include <sysexits.h>
58 #include <unistd.h>
59 #include <pthread.h>
60 #include <pthread_np.h>
61 
62 #include "e1000_regs.h"
63 #include "e1000_defines.h"
64 #include "mii.h"
65 
66 #include "bhyverun.h"
67 #include "config.h"
68 #include "debug.h"
69 #include "pci_emul.h"
70 #ifdef BHYVE_SNAPSHOT
71 #include "snapshot.h"
72 #endif
73 #include "mevent.h"
74 #include "net_utils.h"
75 #include "net_backends.h"
76 
77 /* Hardware/register definitions XXX: move some to common code. */
78 #define E82545_VENDOR_ID_INTEL			0x8086
79 #define E82545_DEV_ID_82545EM_COPPER		0x100F
80 #define E82545_SUBDEV_ID			0x1008
81 
82 #define E82545_REVISION_4			4
83 
84 #define E82545_MDIC_DATA_MASK			0x0000FFFF
85 #define E82545_MDIC_OP_MASK			0x0c000000
86 #define E82545_MDIC_IE				0x20000000
87 
88 #define E82545_EECD_FWE_DIS	0x00000010 /* Flash writes disabled */
89 #define E82545_EECD_FWE_EN	0x00000020 /* Flash writes enabled */
90 #define E82545_EECD_FWE_MASK	0x00000030 /* Flash writes mask */
91 
92 #define E82545_BAR_REGISTER			0
93 #define E82545_BAR_REGISTER_LEN			(128*1024)
94 #define E82545_BAR_FLASH			1
95 #define E82545_BAR_FLASH_LEN			(64*1024)
96 #define E82545_BAR_IO				2
97 #define E82545_BAR_IO_LEN			8
98 
99 #define E82545_IOADDR				0x00000000
100 #define E82545_IODATA				0x00000004
101 #define E82545_IO_REGISTER_MAX			0x0001FFFF
102 #define E82545_IO_FLASH_BASE			0x00080000
103 #define E82545_IO_FLASH_MAX			0x000FFFFF
104 
105 #define E82545_ARRAY_ENTRY(reg, offset)		(reg + (offset<<2))
106 #define E82545_RAR_MAX				15
107 #define E82545_MTA_MAX				127
108 #define E82545_VFTA_MAX				127
109 
110 /* Slightly modified from the driver versions, hardcoded for 3 opcode bits,
111  * followed by 6 address bits.
112  * TODO: make opcode bits and addr bits configurable?
113  * NVM Commands - Microwire */
114 #define E82545_NVM_OPCODE_BITS	3
115 #define E82545_NVM_ADDR_BITS	6
116 #define E82545_NVM_DATA_BITS	16
117 #define E82545_NVM_OPADDR_BITS	(E82545_NVM_OPCODE_BITS + E82545_NVM_ADDR_BITS)
118 #define E82545_NVM_ADDR_MASK	((1 << E82545_NVM_ADDR_BITS)-1)
119 #define E82545_NVM_OPCODE_MASK	\
120     (((1 << E82545_NVM_OPCODE_BITS) - 1) << E82545_NVM_ADDR_BITS)
121 #define E82545_NVM_OPCODE_READ	(0x6 << E82545_NVM_ADDR_BITS)	/* read */
122 #define E82545_NVM_OPCODE_WRITE	(0x5 << E82545_NVM_ADDR_BITS)	/* write */
123 #define E82545_NVM_OPCODE_ERASE	(0x7 << E82545_NVM_ADDR_BITS)	/* erase */
124 #define	E82545_NVM_OPCODE_EWEN	(0x4 << E82545_NVM_ADDR_BITS)	/* wr-enable */
125 
126 #define	E82545_NVM_EEPROM_SIZE	64 /* 64 * 16-bit values == 128K */
127 
128 #define E1000_ICR_SRPD		0x00010000
129 
130 /* This is an arbitrary number.  There is no hard limit on the chip. */
131 #define I82545_MAX_TXSEGS	64
132 
133 /* Legacy receive descriptor */
134 struct e1000_rx_desc {
135 	uint64_t buffer_addr;	/* Address of the descriptor's data buffer */
136 	uint16_t length;	/* Length of data DMAed into data buffer */
137 	uint16_t csum;		/* Packet checksum */
138 	uint8_t	 status;       	/* Descriptor status */
139 	uint8_t  errors;	/* Descriptor Errors */
140 	uint16_t special;
141 };
142 
143 /* Transmit descriptor types */
144 #define	E1000_TXD_MASK		(E1000_TXD_CMD_DEXT | 0x00F00000)
145 #define E1000_TXD_TYP_L		(0)
146 #define E1000_TXD_TYP_C		(E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C)
147 #define E1000_TXD_TYP_D		(E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)
148 
149 /* Legacy transmit descriptor */
150 struct e1000_tx_desc {
151 	uint64_t buffer_addr;   /* Address of the descriptor's data buffer */
152 	union {
153 		uint32_t data;
154 		struct {
155 			uint16_t length;  /* Data buffer length */
156 			uint8_t  cso;  /* Checksum offset */
157 			uint8_t  cmd;  /* Descriptor control */
158 		} flags;
159 	} lower;
160 	union {
161 		uint32_t data;
162 		struct {
163 			uint8_t status; /* Descriptor status */
164 			uint8_t css;  /* Checksum start */
165 			uint16_t special;
166 		} fields;
167 	} upper;
168 };
169 
170 /* Context descriptor */
171 struct e1000_context_desc {
172 	union {
173 		uint32_t ip_config;
174 		struct {
175 			uint8_t ipcss;  /* IP checksum start */
176 			uint8_t ipcso;  /* IP checksum offset */
177 			uint16_t ipcse;  /* IP checksum end */
178 		} ip_fields;
179 	} lower_setup;
180 	union {
181 		uint32_t tcp_config;
182 		struct {
183 			uint8_t tucss;  /* TCP checksum start */
184 			uint8_t tucso;  /* TCP checksum offset */
185 			uint16_t tucse;  /* TCP checksum end */
186 		} tcp_fields;
187 	} upper_setup;
188 	uint32_t cmd_and_length;
189 	union {
190 		uint32_t data;
191 		struct {
192 			uint8_t status;  /* Descriptor status */
193 			uint8_t hdr_len;  /* Header length */
194 			uint16_t mss;  /* Maximum segment size */
195 		} fields;
196 	} tcp_seg_setup;
197 };
198 
199 /* Data descriptor */
200 struct e1000_data_desc {
201 	uint64_t buffer_addr;  /* Address of the descriptor's buffer address */
202 	union {
203 		uint32_t data;
204 		struct {
205 			uint16_t length;  /* Data buffer length */
206 			uint8_t typ_len_ext;
207 			uint8_t cmd;
208 		} flags;
209 	} lower;
210 	union {
211 		uint32_t data;
212 		struct {
213 			uint8_t status;  /* Descriptor status */
214 			uint8_t popts;  /* Packet Options */
215 			uint16_t special;
216 		} fields;
217 	} upper;
218 };
219 
220 union e1000_tx_udesc {
221 	struct e1000_tx_desc td;
222 	struct e1000_context_desc cd;
223 	struct e1000_data_desc dd;
224 };
225 
226 /* Tx checksum info for a packet. */
227 struct ck_info {
228 	int	ck_valid;	/* ck_info is valid */
229 	uint8_t	ck_start;	/* start byte of cksum calcuation */
230 	uint8_t	ck_off;		/* offset of cksum insertion */
231 	uint16_t ck_len;	/* length of cksum calc: 0 is to packet-end */
232 };
233 
234 /*
235  * Debug printf
236  */
237 static int e82545_debug = 0;
238 #define WPRINTF(msg,params...) PRINTLN("e82545: " msg, ##params)
239 #define DPRINTF(msg,params...) if (e82545_debug) WPRINTF(msg, params)
240 
241 #define	MIN(a,b) (((a)<(b))?(a):(b))
242 #define	MAX(a,b) (((a)>(b))?(a):(b))
243 
244 /* s/w representation of the RAL/RAH regs */
245 struct  eth_uni {
246 	int		eu_valid;
247 	int		eu_addrsel;
248 	struct ether_addr eu_eth;
249 };
250 
251 
252 struct e82545_softc {
253 	struct pci_devinst *esc_pi;
254 	struct vmctx	*esc_ctx;
255 	struct mevent   *esc_mevpitr;
256 	pthread_mutex_t	esc_mtx;
257 	struct ether_addr esc_mac;
258 	net_backend_t	*esc_be;
259 
260 	/* General */
261 	uint32_t	esc_CTRL;	/* x0000 device ctl */
262 	uint32_t	esc_FCAL;	/* x0028 flow ctl addr lo */
263 	uint32_t	esc_FCAH;	/* x002C flow ctl addr hi */
264 	uint32_t	esc_FCT;	/* x0030 flow ctl type */
265 	uint32_t	esc_VET;	/* x0038 VLAN eth type */
266 	uint32_t	esc_FCTTV;	/* x0170 flow ctl tx timer */
267 	uint32_t	esc_LEDCTL;	/* x0E00 LED control */
268 	uint32_t	esc_PBA;	/* x1000 pkt buffer allocation */
269 
270 	/* Interrupt control */
271 	int		esc_irq_asserted;
272 	uint32_t	esc_ICR;	/* x00C0 cause read/clear */
273 	uint32_t	esc_ITR;	/* x00C4 intr throttling */
274 	uint32_t	esc_ICS;	/* x00C8 cause set */
275 	uint32_t	esc_IMS;	/* x00D0 mask set/read */
276 	uint32_t	esc_IMC;	/* x00D8 mask clear */
277 
278 	/* Transmit */
279 	union e1000_tx_udesc *esc_txdesc;
280 	struct e1000_context_desc esc_txctx;
281 	pthread_t	esc_tx_tid;
282 	pthread_cond_t	esc_tx_cond;
283 	int		esc_tx_enabled;
284 	int		esc_tx_active;
285 	uint32_t	esc_TXCW;	/* x0178 transmit config */
286 	uint32_t	esc_TCTL;	/* x0400 transmit ctl */
287 	uint32_t	esc_TIPG;	/* x0410 inter-packet gap */
288 	uint16_t	esc_AIT;	/* x0458 Adaptive Interframe Throttle */
289 	uint64_t	esc_tdba;      	/* verified 64-bit desc table addr */
290 	uint32_t	esc_TDBAL;	/* x3800 desc table addr, low bits */
291 	uint32_t	esc_TDBAH;	/* x3804 desc table addr, hi 32-bits */
292 	uint32_t	esc_TDLEN;	/* x3808 # descriptors in bytes */
293 	uint16_t	esc_TDH;	/* x3810 desc table head idx */
294 	uint16_t	esc_TDHr;	/* internal read version of TDH */
295 	uint16_t	esc_TDT;	/* x3818 desc table tail idx */
296 	uint32_t	esc_TIDV;	/* x3820 intr delay */
297 	uint32_t	esc_TXDCTL;	/* x3828 desc control */
298 	uint32_t	esc_TADV;	/* x382C intr absolute delay */
299 
300 	/* L2 frame acceptance */
301 	struct eth_uni	esc_uni[16];	/* 16 x unicast MAC addresses */
302 	uint32_t	esc_fmcast[128]; /* Multicast filter bit-match */
303 	uint32_t	esc_fvlan[128]; /* VLAN 4096-bit filter */
304 
305 	/* Receive */
306 	struct e1000_rx_desc *esc_rxdesc;
307 	pthread_cond_t	esc_rx_cond;
308 	int		esc_rx_enabled;
309 	int		esc_rx_active;
310 	int		esc_rx_loopback;
311 	uint32_t	esc_RCTL;	/* x0100 receive ctl */
312 	uint32_t	esc_FCRTL;	/* x2160 flow cntl thresh, low */
313 	uint32_t	esc_FCRTH;	/* x2168 flow cntl thresh, hi */
314 	uint64_t	esc_rdba;	/* verified 64-bit desc table addr */
315 	uint32_t	esc_RDBAL;	/* x2800 desc table addr, low bits */
316 	uint32_t	esc_RDBAH;	/* x2804 desc table addr, hi 32-bits*/
317 	uint32_t	esc_RDLEN;	/* x2808 #descriptors */
318 	uint16_t	esc_RDH;	/* x2810 desc table head idx */
319 	uint16_t	esc_RDT;	/* x2818 desc table tail idx */
320 	uint32_t	esc_RDTR;	/* x2820 intr delay */
321 	uint32_t	esc_RXDCTL;	/* x2828 desc control */
322 	uint32_t	esc_RADV;	/* x282C intr absolute delay */
323 	uint32_t	esc_RSRPD;	/* x2C00 recv small packet detect */
324 	uint32_t	esc_RXCSUM;     /* x5000 receive cksum ctl */
325 
326 	/* IO Port register access */
327 	uint32_t io_addr;
328 
329 	/* Shadow copy of MDIC */
330 	uint32_t mdi_control;
331 	/* Shadow copy of EECD */
332 	uint32_t eeprom_control;
333 	/* Latest NVM in/out */
334 	uint16_t nvm_data;
335 	uint16_t nvm_opaddr;
336 	/* stats */
337 	uint32_t missed_pkt_count; /* dropped for no room in rx queue */
338 	uint32_t pkt_rx_by_size[6];
339 	uint32_t pkt_tx_by_size[6];
340 	uint32_t good_pkt_rx_count;
341 	uint32_t bcast_pkt_rx_count;
342 	uint32_t mcast_pkt_rx_count;
343 	uint32_t good_pkt_tx_count;
344 	uint32_t bcast_pkt_tx_count;
345 	uint32_t mcast_pkt_tx_count;
346 	uint32_t oversize_rx_count;
347 	uint32_t tso_tx_count;
348 	uint64_t good_octets_rx;
349 	uint64_t good_octets_tx;
350 	uint64_t missed_octets; /* counts missed and oversized */
351 
352 	uint8_t nvm_bits:6; /* number of bits remaining in/out */
353 	uint8_t nvm_mode:2;
354 #define E82545_NVM_MODE_OPADDR  0x0
355 #define E82545_NVM_MODE_DATAIN  0x1
356 #define E82545_NVM_MODE_DATAOUT 0x2
357 	/* EEPROM data */
358 	uint16_t eeprom_data[E82545_NVM_EEPROM_SIZE];
359 };
360 
361 static void e82545_reset(struct e82545_softc *sc, int dev);
362 static void e82545_rx_enable(struct e82545_softc *sc);
363 static void e82545_rx_disable(struct e82545_softc *sc);
364 static void e82545_rx_callback(int fd, enum ev_type type, void *param);
365 static void e82545_tx_start(struct e82545_softc *sc);
366 static void e82545_tx_enable(struct e82545_softc *sc);
367 static void e82545_tx_disable(struct e82545_softc *sc);
368 
369 static inline int __unused
370 e82545_size_stat_index(uint32_t size)
371 {
372 	if (size <= 64) {
373 		return 0;
374 	} else if (size >= 1024) {
375 		return 5;
376 	} else {
377 		/* should be 1-4 */
378 		return (ffs(size) - 6);
379 	}
380 }
381 
382 static void
383 e82545_init_eeprom(struct e82545_softc *sc)
384 {
385 	uint16_t checksum, i;
386 
387         /* mac addr */
388 	sc->eeprom_data[NVM_MAC_ADDR] = ((uint16_t)sc->esc_mac.octet[0]) |
389 		(((uint16_t)sc->esc_mac.octet[1]) << 8);
390 	sc->eeprom_data[NVM_MAC_ADDR+1] = ((uint16_t)sc->esc_mac.octet[2]) |
391 		(((uint16_t)sc->esc_mac.octet[3]) << 8);
392 	sc->eeprom_data[NVM_MAC_ADDR+2] = ((uint16_t)sc->esc_mac.octet[4]) |
393 		(((uint16_t)sc->esc_mac.octet[5]) << 8);
394 
395 	/* pci ids */
396 	sc->eeprom_data[NVM_SUB_DEV_ID] = E82545_SUBDEV_ID;
397 	sc->eeprom_data[NVM_SUB_VEN_ID] = E82545_VENDOR_ID_INTEL;
398 	sc->eeprom_data[NVM_DEV_ID] = E82545_DEV_ID_82545EM_COPPER;
399 	sc->eeprom_data[NVM_VEN_ID] = E82545_VENDOR_ID_INTEL;
400 
401 	/* fill in the checksum */
402         checksum = 0;
403 	for (i = 0; i < NVM_CHECKSUM_REG; i++) {
404 		checksum += sc->eeprom_data[i];
405 	}
406 	checksum = NVM_SUM - checksum;
407 	sc->eeprom_data[NVM_CHECKSUM_REG] = checksum;
408 	DPRINTF("eeprom checksum: 0x%x", checksum);
409 }
410 
411 static void
412 e82545_write_mdi(struct e82545_softc *sc __unused, uint8_t reg_addr,
413     uint8_t phy_addr, uint32_t data)
414 {
415 	DPRINTF("Write mdi reg:0x%x phy:0x%x data: 0x%x", reg_addr, phy_addr, data);
416 }
417 
418 static uint32_t
419 e82545_read_mdi(struct e82545_softc *sc __unused, uint8_t reg_addr,
420     uint8_t phy_addr)
421 {
422 	//DPRINTF("Read mdi reg:0x%x phy:0x%x", reg_addr, phy_addr);
423 	switch (reg_addr) {
424 	case PHY_STATUS:
425 		return (MII_SR_LINK_STATUS | MII_SR_AUTONEG_CAPS |
426 			MII_SR_AUTONEG_COMPLETE);
427 	case PHY_AUTONEG_ADV:
428 		return NWAY_AR_SELECTOR_FIELD;
429 	case PHY_LP_ABILITY:
430 		return 0;
431 	case PHY_1000T_STATUS:
432 		return (SR_1000T_LP_FD_CAPS | SR_1000T_REMOTE_RX_STATUS |
433 			SR_1000T_LOCAL_RX_STATUS);
434 	case PHY_ID1:
435 		return (M88E1011_I_PHY_ID >> 16) & 0xFFFF;
436 	case PHY_ID2:
437 		return (M88E1011_I_PHY_ID | E82545_REVISION_4) & 0xFFFF;
438 	default:
439 		DPRINTF("Unknown mdi read reg:0x%x phy:0x%x", reg_addr, phy_addr);
440 		return 0;
441 	}
442 	/* not reached */
443 }
444 
445 static void
446 e82545_eecd_strobe(struct e82545_softc *sc)
447 {
448 	/* Microwire state machine */
449 	/*
450 	DPRINTF("eeprom state machine srtobe "
451 		"0x%x 0x%x 0x%x 0x%x",
452 		sc->nvm_mode, sc->nvm_bits,
453 		sc->nvm_opaddr, sc->nvm_data);*/
454 
455 	if (sc->nvm_bits == 0) {
456 		DPRINTF("eeprom state machine not expecting data! "
457 			"0x%x 0x%x 0x%x 0x%x",
458 			sc->nvm_mode, sc->nvm_bits,
459 			sc->nvm_opaddr, sc->nvm_data);
460 		return;
461 	}
462 	sc->nvm_bits--;
463 	if (sc->nvm_mode == E82545_NVM_MODE_DATAOUT) {
464 		/* shifting out */
465 		if (sc->nvm_data & 0x8000) {
466 			sc->eeprom_control |= E1000_EECD_DO;
467 		} else {
468 			sc->eeprom_control &= ~E1000_EECD_DO;
469 		}
470 		sc->nvm_data <<= 1;
471 		if (sc->nvm_bits == 0) {
472 			/* read done, back to opcode mode. */
473 			sc->nvm_opaddr = 0;
474 			sc->nvm_mode = E82545_NVM_MODE_OPADDR;
475 			sc->nvm_bits = E82545_NVM_OPADDR_BITS;
476 		}
477 	} else if (sc->nvm_mode == E82545_NVM_MODE_DATAIN) {
478 		/* shifting in */
479 		sc->nvm_data <<= 1;
480 		if (sc->eeprom_control & E1000_EECD_DI) {
481 			sc->nvm_data |= 1;
482 		}
483 		if (sc->nvm_bits == 0) {
484 			/* eeprom write */
485 			uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
486 			uint16_t addr = sc->nvm_opaddr & E82545_NVM_ADDR_MASK;
487 			if (op != E82545_NVM_OPCODE_WRITE) {
488 				DPRINTF("Illegal eeprom write op 0x%x",
489 					sc->nvm_opaddr);
490 			} else if (addr >= E82545_NVM_EEPROM_SIZE) {
491 				DPRINTF("Illegal eeprom write addr 0x%x",
492 					sc->nvm_opaddr);
493 			} else {
494 				DPRINTF("eeprom write eeprom[0x%x] = 0x%x",
495 				addr, sc->nvm_data);
496 				sc->eeprom_data[addr] = sc->nvm_data;
497 			}
498 			/* back to opcode mode */
499 			sc->nvm_opaddr = 0;
500 			sc->nvm_mode = E82545_NVM_MODE_OPADDR;
501 			sc->nvm_bits = E82545_NVM_OPADDR_BITS;
502 		}
503 	} else if (sc->nvm_mode == E82545_NVM_MODE_OPADDR) {
504 		sc->nvm_opaddr <<= 1;
505 		if (sc->eeprom_control & E1000_EECD_DI) {
506 			sc->nvm_opaddr |= 1;
507 		}
508 		if (sc->nvm_bits == 0) {
509 			uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
510 			switch (op) {
511 			case E82545_NVM_OPCODE_EWEN:
512 				DPRINTF("eeprom write enable: 0x%x",
513 					sc->nvm_opaddr);
514 				/* back to opcode mode */
515 				sc->nvm_opaddr = 0;
516 				sc->nvm_mode = E82545_NVM_MODE_OPADDR;
517 				sc->nvm_bits = E82545_NVM_OPADDR_BITS;
518 				break;
519 			case E82545_NVM_OPCODE_READ:
520 			{
521 				uint16_t addr = sc->nvm_opaddr &
522 					E82545_NVM_ADDR_MASK;
523 				sc->nvm_mode = E82545_NVM_MODE_DATAOUT;
524 				sc->nvm_bits = E82545_NVM_DATA_BITS;
525 				if (addr < E82545_NVM_EEPROM_SIZE) {
526 					sc->nvm_data = sc->eeprom_data[addr];
527 					DPRINTF("eeprom read: eeprom[0x%x] = 0x%x",
528 						addr, sc->nvm_data);
529 				} else {
530 					DPRINTF("eeprom illegal read: 0x%x",
531 						sc->nvm_opaddr);
532 					sc->nvm_data = 0;
533 				}
534 				break;
535 			}
536 			case E82545_NVM_OPCODE_WRITE:
537 				sc->nvm_mode = E82545_NVM_MODE_DATAIN;
538 				sc->nvm_bits = E82545_NVM_DATA_BITS;
539 				sc->nvm_data = 0;
540 				break;
541 			default:
542 				DPRINTF("eeprom unknown op: 0x%x",
543 					sc->nvm_opaddr);
544 				/* back to opcode mode */
545 				sc->nvm_opaddr = 0;
546 				sc->nvm_mode = E82545_NVM_MODE_OPADDR;
547 				sc->nvm_bits = E82545_NVM_OPADDR_BITS;
548 			}
549 		}
550 	} else {
551 		DPRINTF("eeprom state machine wrong state! "
552 			"0x%x 0x%x 0x%x 0x%x",
553 			sc->nvm_mode, sc->nvm_bits,
554 			sc->nvm_opaddr, sc->nvm_data);
555 	}
556 }
557 
558 static void
559 e82545_itr_callback(int fd __unused, enum ev_type type __unused, void *param)
560 {
561 	uint32_t new;
562 	struct e82545_softc *sc = param;
563 
564 	pthread_mutex_lock(&sc->esc_mtx);
565 	new = sc->esc_ICR & sc->esc_IMS;
566 	if (new && !sc->esc_irq_asserted) {
567 		DPRINTF("itr callback: lintr assert %x", new);
568 		sc->esc_irq_asserted = 1;
569 		pci_lintr_assert(sc->esc_pi);
570 	} else {
571 		mevent_delete(sc->esc_mevpitr);
572 		sc->esc_mevpitr = NULL;
573 	}
574 	pthread_mutex_unlock(&sc->esc_mtx);
575 }
576 
577 static void
578 e82545_icr_assert(struct e82545_softc *sc, uint32_t bits)
579 {
580 	uint32_t new;
581 
582 	DPRINTF("icr assert: 0x%x", bits);
583 
584 	/*
585 	 * An interrupt is only generated if bits are set that
586 	 * aren't already in the ICR, these bits are unmasked,
587 	 * and there isn't an interrupt already pending.
588 	 */
589 	new = bits & ~sc->esc_ICR & sc->esc_IMS;
590 	sc->esc_ICR |= bits;
591 
592 	if (new == 0) {
593 		DPRINTF("icr assert: masked %x, ims %x", new, sc->esc_IMS);
594 	} else if (sc->esc_mevpitr != NULL) {
595 		DPRINTF("icr assert: throttled %x, ims %x", new, sc->esc_IMS);
596 	} else if (!sc->esc_irq_asserted) {
597 		DPRINTF("icr assert: lintr assert %x", new);
598 		sc->esc_irq_asserted = 1;
599 		pci_lintr_assert(sc->esc_pi);
600 		if (sc->esc_ITR != 0) {
601 			sc->esc_mevpitr = mevent_add(
602 			    (sc->esc_ITR + 3905) / 3906,  /* 256ns -> 1ms */
603 			    EVF_TIMER, e82545_itr_callback, sc);
604 		}
605 	}
606 }
607 
608 static void
609 e82545_ims_change(struct e82545_softc *sc, uint32_t bits)
610 {
611 	uint32_t new;
612 
613 	/*
614 	 * Changing the mask may allow previously asserted
615 	 * but masked interrupt requests to generate an interrupt.
616 	 */
617 	new = bits & sc->esc_ICR & ~sc->esc_IMS;
618 	sc->esc_IMS |= bits;
619 
620 	if (new == 0) {
621 		DPRINTF("ims change: masked %x, ims %x", new, sc->esc_IMS);
622 	} else if (sc->esc_mevpitr != NULL) {
623 		DPRINTF("ims change: throttled %x, ims %x", new, sc->esc_IMS);
624 	} else if (!sc->esc_irq_asserted) {
625 		DPRINTF("ims change: lintr assert %x", new);
626 		sc->esc_irq_asserted = 1;
627 		pci_lintr_assert(sc->esc_pi);
628 		if (sc->esc_ITR != 0) {
629 			sc->esc_mevpitr = mevent_add(
630 			    (sc->esc_ITR + 3905) / 3906,  /* 256ns -> 1ms */
631 			    EVF_TIMER, e82545_itr_callback, sc);
632 		}
633 	}
634 }
635 
636 static void
637 e82545_icr_deassert(struct e82545_softc *sc, uint32_t bits)
638 {
639 
640 	DPRINTF("icr deassert: 0x%x", bits);
641 	sc->esc_ICR &= ~bits;
642 
643 	/*
644 	 * If there are no longer any interrupt sources and there
645 	 * was an asserted interrupt, clear it
646 	 */
647 	if (sc->esc_irq_asserted && !(sc->esc_ICR & sc->esc_IMS)) {
648 		DPRINTF("icr deassert: lintr deassert %x", bits);
649 		pci_lintr_deassert(sc->esc_pi);
650 		sc->esc_irq_asserted = 0;
651 	}
652 }
653 
654 static void
655 e82545_intr_write(struct e82545_softc *sc, uint32_t offset, uint32_t value)
656 {
657 
658 	DPRINTF("intr_write: off %x, val %x", offset, value);
659 
660 	switch (offset) {
661 	case E1000_ICR:
662 		e82545_icr_deassert(sc, value);
663 		break;
664 	case E1000_ITR:
665 		sc->esc_ITR = value;
666 		break;
667 	case E1000_ICS:
668 		sc->esc_ICS = value;	/* not used: store for debug */
669 		e82545_icr_assert(sc, value);
670 		break;
671 	case E1000_IMS:
672 		e82545_ims_change(sc, value);
673 		break;
674 	case E1000_IMC:
675 		sc->esc_IMC = value;	/* for debug */
676 		sc->esc_IMS &= ~value;
677 		// XXX clear interrupts if all ICR bits now masked
678 		// and interrupt was pending ?
679 		break;
680 	default:
681 		break;
682 	}
683 }
684 
685 static uint32_t
686 e82545_intr_read(struct e82545_softc *sc, uint32_t offset)
687 {
688 	uint32_t retval;
689 
690 	retval = 0;
691 
692 	DPRINTF("intr_read: off %x", offset);
693 
694 	switch (offset) {
695 	case E1000_ICR:
696 		retval = sc->esc_ICR;
697 		sc->esc_ICR = 0;
698 		e82545_icr_deassert(sc, ~0);
699 		break;
700 	case E1000_ITR:
701 		retval = sc->esc_ITR;
702 		break;
703 	case E1000_ICS:
704 		/* write-only register */
705 		break;
706 	case E1000_IMS:
707 		retval = sc->esc_IMS;
708 		break;
709 	case E1000_IMC:
710 		/* write-only register */
711 		break;
712 	default:
713 		break;
714 	}
715 
716 	return (retval);
717 }
718 
719 static void
720 e82545_devctl(struct e82545_softc *sc, uint32_t val)
721 {
722 
723 	sc->esc_CTRL = val & ~E1000_CTRL_RST;
724 
725 	if (val & E1000_CTRL_RST) {
726 		DPRINTF("e1k: s/w reset, ctl %x", val);
727 		e82545_reset(sc, 1);
728 	}
729 	/* XXX check for phy reset ? */
730 }
731 
732 static void
733 e82545_rx_update_rdba(struct e82545_softc *sc)
734 {
735 
736 	/* XXX verify desc base/len within phys mem range */
737 	sc->esc_rdba = (uint64_t)sc->esc_RDBAH << 32 |
738 	    sc->esc_RDBAL;
739 
740 	/* Cache host mapping of guest descriptor array */
741 	sc->esc_rxdesc = paddr_guest2host(sc->esc_ctx,
742 	    sc->esc_rdba, sc->esc_RDLEN);
743 }
744 
745 static void
746 e82545_rx_ctl(struct e82545_softc *sc, uint32_t val)
747 {
748 	int on;
749 
750 	on = ((val & E1000_RCTL_EN) == E1000_RCTL_EN);
751 
752 	/* Save RCTL after stripping reserved bits 31:27,24,21,14,11:10,0 */
753 	sc->esc_RCTL = val & ~0xF9204c01;
754 
755 	DPRINTF("rx_ctl - %s RCTL %x, val %x",
756 		on ? "on" : "off", sc->esc_RCTL, val);
757 
758 	/* state change requested */
759 	if (on != sc->esc_rx_enabled) {
760 		if (on) {
761 			/* Catch disallowed/unimplemented settings */
762 			//assert(!(val & E1000_RCTL_LBM_TCVR));
763 
764 			if (sc->esc_RCTL & E1000_RCTL_LBM_TCVR) {
765 				sc->esc_rx_loopback = 1;
766 			} else {
767 				sc->esc_rx_loopback = 0;
768 			}
769 
770 			e82545_rx_update_rdba(sc);
771 			e82545_rx_enable(sc);
772 		} else {
773 			e82545_rx_disable(sc);
774 			sc->esc_rx_loopback = 0;
775 			sc->esc_rdba = 0;
776 			sc->esc_rxdesc = NULL;
777 		}
778 	}
779 }
780 
781 static void
782 e82545_tx_update_tdba(struct e82545_softc *sc)
783 {
784 
785 	/* XXX verify desc base/len within phys mem range */
786 	sc->esc_tdba = (uint64_t)sc->esc_TDBAH << 32 | sc->esc_TDBAL;
787 
788 	/* Cache host mapping of guest descriptor array */
789 	sc->esc_txdesc = paddr_guest2host(sc->esc_ctx, sc->esc_tdba,
790             sc->esc_TDLEN);
791 }
792 
793 static void
794 e82545_tx_ctl(struct e82545_softc *sc, uint32_t val)
795 {
796 	int on;
797 
798 	on = ((val & E1000_TCTL_EN) == E1000_TCTL_EN);
799 
800 	/* ignore TCTL_EN settings that don't change state */
801 	if (on == sc->esc_tx_enabled)
802 		return;
803 
804 	if (on) {
805 		e82545_tx_update_tdba(sc);
806 		e82545_tx_enable(sc);
807 	} else {
808 		e82545_tx_disable(sc);
809 		sc->esc_tdba = 0;
810 		sc->esc_txdesc = NULL;
811 	}
812 
813 	/* Save TCTL value after stripping reserved bits 31:25,23,2,0 */
814 	sc->esc_TCTL = val & ~0xFE800005;
815 }
816 
817 static int
818 e82545_bufsz(uint32_t rctl)
819 {
820 
821 	switch (rctl & (E1000_RCTL_BSEX | E1000_RCTL_SZ_256)) {
822 	case (E1000_RCTL_SZ_2048): return (2048);
823 	case (E1000_RCTL_SZ_1024): return (1024);
824 	case (E1000_RCTL_SZ_512): return (512);
825 	case (E1000_RCTL_SZ_256): return (256);
826 	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_16384): return (16384);
827 	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_8192): return (8192);
828 	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_4096): return (4096);
829 	}
830 	return (256);	/* Forbidden value. */
831 }
832 
833 /* XXX one packet at a time until this is debugged */
834 static void
835 e82545_rx_callback(int fd __unused, enum ev_type type __unused, void *param)
836 {
837 	struct e82545_softc *sc = param;
838 	struct e1000_rx_desc *rxd;
839 	struct iovec vec[64];
840 	ssize_t len;
841 	int left, lim, maxpktsz, maxpktdesc, bufsz, i, n, size;
842 	uint32_t cause = 0;
843 	uint16_t *tp, tag, head;
844 
845 	pthread_mutex_lock(&sc->esc_mtx);
846 	DPRINTF("rx_run: head %x, tail %x", sc->esc_RDH, sc->esc_RDT);
847 
848 	if (!sc->esc_rx_enabled || sc->esc_rx_loopback) {
849 		DPRINTF("rx disabled (!%d || %d) -- packet(s) dropped",
850 		    sc->esc_rx_enabled, sc->esc_rx_loopback);
851 		while (netbe_rx_discard(sc->esc_be) > 0) {
852 		}
853 		goto done1;
854 	}
855 	bufsz = e82545_bufsz(sc->esc_RCTL);
856 	maxpktsz = (sc->esc_RCTL & E1000_RCTL_LPE) ? 16384 : 1522;
857 	maxpktdesc = (maxpktsz + bufsz - 1) / bufsz;
858 	size = sc->esc_RDLEN / 16;
859 	head = sc->esc_RDH;
860 	left = (size + sc->esc_RDT - head) % size;
861 	if (left < maxpktdesc) {
862 		DPRINTF("rx overflow (%d < %d) -- packet(s) dropped",
863 		    left, maxpktdesc);
864 		while (netbe_rx_discard(sc->esc_be) > 0) {
865 		}
866 		goto done1;
867 	}
868 
869 	sc->esc_rx_active = 1;
870 	pthread_mutex_unlock(&sc->esc_mtx);
871 
872 	for (lim = size / 4; lim > 0 && left >= maxpktdesc; lim -= n) {
873 
874 		/* Grab rx descriptor pointed to by the head pointer */
875 		for (i = 0; i < maxpktdesc; i++) {
876 			rxd = &sc->esc_rxdesc[(head + i) % size];
877 			vec[i].iov_base = paddr_guest2host(sc->esc_ctx,
878 			    rxd->buffer_addr, bufsz);
879 			vec[i].iov_len = bufsz;
880 		}
881 		len = netbe_recv(sc->esc_be, vec, maxpktdesc);
882 		if (len <= 0) {
883 			DPRINTF("netbe_recv() returned %zd", len);
884 			goto done;
885 		}
886 
887 		/*
888 		 * Adjust the packet length based on whether the CRC needs
889 		 * to be stripped or if the packet is less than the minimum
890 		 * eth packet size.
891 		 */
892 		if (len < ETHER_MIN_LEN - ETHER_CRC_LEN)
893 			len = ETHER_MIN_LEN - ETHER_CRC_LEN;
894 		if (!(sc->esc_RCTL & E1000_RCTL_SECRC))
895 			len += ETHER_CRC_LEN;
896 		n = (len + bufsz - 1) / bufsz;
897 
898 		DPRINTF("packet read %zd bytes, %d segs, head %d",
899 		    len, n, head);
900 
901 		/* Apply VLAN filter. */
902 		tp = (uint16_t *)vec[0].iov_base + 6;
903 		if ((sc->esc_RCTL & E1000_RCTL_VFE) &&
904 		    (ntohs(tp[0]) == sc->esc_VET)) {
905 			tag = ntohs(tp[1]) & 0x0fff;
906 			if ((sc->esc_fvlan[tag >> 5] &
907 			    (1 << (tag & 0x1f))) != 0) {
908 				DPRINTF("known VLAN %d", tag);
909 			} else {
910 				DPRINTF("unknown VLAN %d", tag);
911 				n = 0;
912 				continue;
913 			}
914 		}
915 
916 		/* Update all consumed descriptors. */
917 		for (i = 0; i < n - 1; i++) {
918 			rxd = &sc->esc_rxdesc[(head + i) % size];
919 			rxd->length = bufsz;
920 			rxd->csum = 0;
921 			rxd->errors = 0;
922 			rxd->special = 0;
923 			rxd->status = E1000_RXD_STAT_DD;
924 		}
925 		rxd = &sc->esc_rxdesc[(head + i) % size];
926 		rxd->length = len % bufsz;
927 		rxd->csum = 0;
928 		rxd->errors = 0;
929 		rxd->special = 0;
930 		/* XXX signal no checksum for now */
931 		rxd->status = E1000_RXD_STAT_PIF | E1000_RXD_STAT_IXSM |
932 		    E1000_RXD_STAT_EOP | E1000_RXD_STAT_DD;
933 
934 		/* Schedule receive interrupts. */
935 		if ((uint32_t)len <= sc->esc_RSRPD) {
936 			cause |= E1000_ICR_SRPD | E1000_ICR_RXT0;
937 		} else {
938 			/* XXX: RDRT and RADV timers should be here. */
939 			cause |= E1000_ICR_RXT0;
940 		}
941 
942 		head = (head + n) % size;
943 		left -= n;
944 	}
945 
946 done:
947 	pthread_mutex_lock(&sc->esc_mtx);
948 	sc->esc_rx_active = 0;
949 	if (sc->esc_rx_enabled == 0)
950 		pthread_cond_signal(&sc->esc_rx_cond);
951 
952 	sc->esc_RDH = head;
953 	/* Respect E1000_RCTL_RDMTS */
954 	left = (size + sc->esc_RDT - head) % size;
955 	if (left < (size >> (((sc->esc_RCTL >> 8) & 3) + 1)))
956 		cause |= E1000_ICR_RXDMT0;
957 	/* Assert all accumulated interrupts. */
958 	if (cause != 0)
959 		e82545_icr_assert(sc, cause);
960 done1:
961 	DPRINTF("rx_run done: head %x, tail %x", sc->esc_RDH, sc->esc_RDT);
962 	pthread_mutex_unlock(&sc->esc_mtx);
963 }
964 
965 static uint16_t
966 e82545_carry(uint32_t sum)
967 {
968 
969 	sum = (sum & 0xFFFF) + (sum >> 16);
970 	if (sum > 0xFFFF)
971 		sum -= 0xFFFF;
972 	return (sum);
973 }
974 
975 static uint16_t
976 e82545_buf_checksum(uint8_t *buf, int len)
977 {
978 	int i;
979 	uint32_t sum = 0;
980 
981 	/* Checksum all the pairs of bytes first... */
982 	for (i = 0; i < (len & ~1); i += 2)
983 		sum += *((u_int16_t *)(buf + i));
984 
985 	/*
986 	 * If there's a single byte left over, checksum it, too.
987 	 * Network byte order is big-endian, so the remaining byte is
988 	 * the high byte.
989 	 */
990 	if (i < len)
991 		sum += htons(buf[i] << 8);
992 
993 	return (e82545_carry(sum));
994 }
995 
996 static uint16_t
997 e82545_iov_checksum(struct iovec *iov, int iovcnt, unsigned int off,
998     unsigned int len)
999 {
1000 	unsigned int now, odd;
1001 	uint32_t sum = 0, s;
1002 
1003 	/* Skip completely unneeded vectors. */
1004 	while (iovcnt > 0 && iov->iov_len <= off && off > 0) {
1005 		off -= iov->iov_len;
1006 		iov++;
1007 		iovcnt--;
1008 	}
1009 
1010 	/* Calculate checksum of requested range. */
1011 	odd = 0;
1012 	while (len > 0 && iovcnt > 0) {
1013 		now = MIN(len, iov->iov_len - off);
1014 		s = e82545_buf_checksum((uint8_t *)iov->iov_base + off, now);
1015 		sum += odd ? (s << 8) : s;
1016 		odd ^= (now & 1);
1017 		len -= now;
1018 		off = 0;
1019 		iov++;
1020 		iovcnt--;
1021 	}
1022 
1023 	return (e82545_carry(sum));
1024 }
1025 
1026 /*
1027  * Return the transmit descriptor type.
1028  */
1029 static int
1030 e82545_txdesc_type(uint32_t lower)
1031 {
1032 	int type;
1033 
1034 	type = 0;
1035 
1036 	if (lower & E1000_TXD_CMD_DEXT)
1037 		type = lower & E1000_TXD_MASK;
1038 
1039 	return (type);
1040 }
1041 
1042 static void
1043 e82545_transmit_checksum(struct iovec *iov, int iovcnt, struct ck_info *ck)
1044 {
1045 	uint16_t cksum;
1046 	unsigned int cklen;
1047 
1048 	DPRINTF("tx cksum: iovcnt/s/off/len %d/%d/%d/%d",
1049 	    iovcnt, ck->ck_start, ck->ck_off, ck->ck_len);
1050 	cklen = ck->ck_len ? ck->ck_len - ck->ck_start + 1U : UINT_MAX;
1051 	cksum = e82545_iov_checksum(iov, iovcnt, ck->ck_start, cklen);
1052 	*(uint16_t *)((uint8_t *)iov[0].iov_base + ck->ck_off) = ~cksum;
1053 }
1054 
1055 static void
1056 e82545_transmit_backend(struct e82545_softc *sc, struct iovec *iov, int iovcnt)
1057 {
1058 
1059 	if (sc->esc_be == NULL)
1060 		return;
1061 
1062 	(void) netbe_send(sc->esc_be, iov, iovcnt);
1063 }
1064 
1065 static void
1066 e82545_transmit_done(struct e82545_softc *sc, uint16_t head, uint16_t tail,
1067     uint16_t dsize, int *tdwb)
1068 {
1069 	union e1000_tx_udesc *dsc;
1070 
1071 	for ( ; head != tail; head = (head + 1) % dsize) {
1072 		dsc = &sc->esc_txdesc[head];
1073 		if (dsc->td.lower.data & E1000_TXD_CMD_RS) {
1074 			dsc->td.upper.data |= E1000_TXD_STAT_DD;
1075 			*tdwb = 1;
1076 		}
1077 	}
1078 }
1079 
1080 static int
1081 e82545_transmit(struct e82545_softc *sc, uint16_t head, uint16_t tail,
1082     uint16_t dsize, uint16_t *rhead, int *tdwb)
1083 {
1084 	uint8_t *hdr, *hdrp;
1085 	struct iovec iovb[I82545_MAX_TXSEGS + 2];
1086 	struct iovec tiov[I82545_MAX_TXSEGS + 2];
1087 	struct e1000_context_desc *cd;
1088 	struct ck_info ckinfo[2];
1089 	struct iovec *iov;
1090 	union  e1000_tx_udesc *dsc;
1091 	int desc, dtype, ntype, iovcnt, tcp, tso, paylen, seg, tiovcnt, pv;
1092 	unsigned hdrlen, vlen, pktlen, len, left, mss, now, nnow, nleft, pvoff;
1093 	uint32_t tcpsum, tcpseq;
1094 	uint16_t ipcs, tcpcs, ipid, ohead;
1095 	bool invalid;
1096 
1097 	ckinfo[0].ck_valid = ckinfo[1].ck_valid = 0;
1098 	iovcnt = 0;
1099 	ntype = 0;
1100 	tso = 0;
1101 	pktlen = 0;
1102 	ohead = head;
1103 	invalid = false;
1104 
1105 	/* iovb[0/1] may be used for writable copy of headers. */
1106 	iov = &iovb[2];
1107 
1108 	for (desc = 0; ; desc++, head = (head + 1) % dsize) {
1109 		if (head == tail) {
1110 			*rhead = head;
1111 			return (0);
1112 		}
1113 		dsc = &sc->esc_txdesc[head];
1114 		dtype = e82545_txdesc_type(dsc->td.lower.data);
1115 
1116 		if (desc == 0) {
1117 			switch (dtype) {
1118 			case E1000_TXD_TYP_C:
1119 				DPRINTF("tx ctxt desc idx %d: %016jx "
1120 				    "%08x%08x",
1121 				    head, dsc->td.buffer_addr,
1122 				    dsc->td.upper.data, dsc->td.lower.data);
1123 				/* Save context and return */
1124 				sc->esc_txctx = dsc->cd;
1125 				goto done;
1126 			case E1000_TXD_TYP_L:
1127 				DPRINTF("tx legacy desc idx %d: %08x%08x",
1128 				    head, dsc->td.upper.data, dsc->td.lower.data);
1129 				/*
1130 				 * legacy cksum start valid in first descriptor
1131 				 */
1132 				ntype = dtype;
1133 				ckinfo[0].ck_start = dsc->td.upper.fields.css;
1134 				break;
1135 			case E1000_TXD_TYP_D:
1136 				DPRINTF("tx data desc idx %d: %08x%08x",
1137 				    head, dsc->td.upper.data, dsc->td.lower.data);
1138 				ntype = dtype;
1139 				break;
1140 			default:
1141 				break;
1142 			}
1143 		} else {
1144 			/* Descriptor type must be consistent */
1145 			assert(dtype == ntype);
1146 			DPRINTF("tx next desc idx %d: %08x%08x",
1147 			    head, dsc->td.upper.data, dsc->td.lower.data);
1148 		}
1149 
1150 		len = (dtype == E1000_TXD_TYP_L) ? dsc->td.lower.flags.length :
1151 		    dsc->dd.lower.data & 0xFFFFF;
1152 
1153 		/* Strip checksum supplied by guest. */
1154 		if ((dsc->td.lower.data & E1000_TXD_CMD_EOP) != 0 &&
1155 		    (dsc->td.lower.data & E1000_TXD_CMD_IFCS) == 0) {
1156 			if (len <= 2) {
1157 				WPRINTF("final descriptor too short (%d) -- dropped",
1158 				    len);
1159 				invalid = true;
1160 			} else
1161 				len -= 2;
1162 		}
1163 
1164 		if (len > 0 && iovcnt < I82545_MAX_TXSEGS) {
1165 			iov[iovcnt].iov_base = paddr_guest2host(sc->esc_ctx,
1166 			    dsc->td.buffer_addr, len);
1167 			iov[iovcnt].iov_len = len;
1168 			iovcnt++;
1169 			pktlen += len;
1170 		}
1171 
1172 		/*
1173 		 * Pull out info that is valid in the final descriptor
1174 		 * and exit descriptor loop.
1175 		 */
1176 		if (dsc->td.lower.data & E1000_TXD_CMD_EOP) {
1177 			if (dtype == E1000_TXD_TYP_L) {
1178 				if (dsc->td.lower.data & E1000_TXD_CMD_IC) {
1179 					ckinfo[0].ck_valid = 1;
1180 					ckinfo[0].ck_off =
1181 					    dsc->td.lower.flags.cso;
1182 					ckinfo[0].ck_len = 0;
1183 				}
1184 			} else {
1185 				cd = &sc->esc_txctx;
1186 				if (dsc->dd.lower.data & E1000_TXD_CMD_TSE)
1187 					tso = 1;
1188 				if (dsc->dd.upper.fields.popts &
1189 				    E1000_TXD_POPTS_IXSM)
1190 					ckinfo[0].ck_valid = 1;
1191 				if (dsc->dd.upper.fields.popts &
1192 				    E1000_TXD_POPTS_IXSM || tso) {
1193 					ckinfo[0].ck_start =
1194 					    cd->lower_setup.ip_fields.ipcss;
1195 					ckinfo[0].ck_off =
1196 					    cd->lower_setup.ip_fields.ipcso;
1197 					ckinfo[0].ck_len =
1198 					    cd->lower_setup.ip_fields.ipcse;
1199 				}
1200 				if (dsc->dd.upper.fields.popts &
1201 				    E1000_TXD_POPTS_TXSM)
1202 					ckinfo[1].ck_valid = 1;
1203 				if (dsc->dd.upper.fields.popts &
1204 				    E1000_TXD_POPTS_TXSM || tso) {
1205 					ckinfo[1].ck_start =
1206 					    cd->upper_setup.tcp_fields.tucss;
1207 					ckinfo[1].ck_off =
1208 					    cd->upper_setup.tcp_fields.tucso;
1209 					ckinfo[1].ck_len =
1210 					    cd->upper_setup.tcp_fields.tucse;
1211 				}
1212 			}
1213 			break;
1214 		}
1215 	}
1216 
1217 	if (invalid)
1218 		goto done;
1219 
1220 	if (iovcnt > I82545_MAX_TXSEGS) {
1221 		WPRINTF("tx too many descriptors (%d > %d) -- dropped",
1222 		    iovcnt, I82545_MAX_TXSEGS);
1223 		goto done;
1224 	}
1225 
1226 	hdrlen = vlen = 0;
1227 	/* Estimate writable space for VLAN header insertion. */
1228 	if ((sc->esc_CTRL & E1000_CTRL_VME) &&
1229 	    (dsc->td.lower.data & E1000_TXD_CMD_VLE)) {
1230 		hdrlen = ETHER_ADDR_LEN*2;
1231 		vlen = ETHER_VLAN_ENCAP_LEN;
1232 	}
1233 	if (!tso) {
1234 		/* Estimate required writable space for checksums. */
1235 		if (ckinfo[0].ck_valid)
1236 			hdrlen = MAX(hdrlen, ckinfo[0].ck_off + 2U);
1237 		if (ckinfo[1].ck_valid)
1238 			hdrlen = MAX(hdrlen, ckinfo[1].ck_off + 2U);
1239 		/* Round up writable space to the first vector. */
1240 		if (hdrlen != 0 && iov[0].iov_len > hdrlen &&
1241 		    iov[0].iov_len < hdrlen + 100)
1242 			hdrlen = iov[0].iov_len;
1243 	} else {
1244 		/* In case of TSO header length provided by software. */
1245 		hdrlen = sc->esc_txctx.tcp_seg_setup.fields.hdr_len;
1246 
1247 		/*
1248 		 * Cap the header length at 240 based on 7.2.4.5 of
1249 		 * the Intel 82576EB (Rev 2.63) datasheet.
1250 		 */
1251 		if (hdrlen > 240) {
1252 			WPRINTF("TSO hdrlen too large: %d", hdrlen);
1253 			goto done;
1254 		}
1255 
1256 		/*
1257 		 * If VLAN insertion is requested, ensure the header
1258 		 * at least holds the amount of data copied during
1259 		 * VLAN insertion below.
1260 		 *
1261 		 * XXX: Realistic packets will include a full Ethernet
1262 		 * header before the IP header at ckinfo[0].ck_start,
1263 		 * but this check is sufficient to prevent
1264 		 * out-of-bounds access below.
1265 		 */
1266 		if (vlen != 0 && hdrlen < ETHER_ADDR_LEN*2) {
1267 			WPRINTF("TSO hdrlen too small for vlan insertion "
1268 			    "(%d vs %d) -- dropped", hdrlen,
1269 			    ETHER_ADDR_LEN*2);
1270 			goto done;
1271 		}
1272 
1273 		/*
1274 		 * Ensure that the header length covers the used fields
1275 		 * in the IP and TCP headers as well as the IP and TCP
1276 		 * checksums.  The following fields are accessed below:
1277 		 *
1278 		 * Header | Field | Offset | Length
1279 		 * -------+-------+--------+-------
1280 		 * IPv4   | len   | 2      | 2
1281 		 * IPv4   | ID    | 4      | 2
1282 		 * IPv6   | len   | 4      | 2
1283 		 * TCP    | seq # | 4      | 4
1284 		 * TCP    | flags | 13     | 1
1285 		 * UDP    | len   | 4      | 4
1286 		 */
1287 		if (hdrlen < ckinfo[0].ck_start + 6U ||
1288 		    hdrlen < ckinfo[0].ck_off + 2U) {
1289 			WPRINTF("TSO hdrlen too small for IP fields (%d) "
1290 			    "-- dropped", hdrlen);
1291 			goto done;
1292 		}
1293 		if (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_TCP) {
1294 			if (hdrlen < ckinfo[1].ck_start + 14U) {
1295 				WPRINTF("TSO hdrlen too small for TCP fields "
1296 				    "(%d) -- dropped", hdrlen);
1297 				goto done;
1298 			}
1299 		} else {
1300 			if (hdrlen < ckinfo[1].ck_start + 8U) {
1301 				WPRINTF("TSO hdrlen too small for UDP fields "
1302 				    "(%d) -- dropped", hdrlen);
1303 				goto done;
1304 			}
1305 		}
1306 		if (ckinfo[1].ck_valid && hdrlen < ckinfo[1].ck_off + 2U) {
1307 			WPRINTF("TSO hdrlen too small for TCP/UDP fields "
1308 			    "(%d) -- dropped", hdrlen);
1309 			goto done;
1310 		}
1311 	}
1312 
1313 	if (pktlen < hdrlen + vlen) {
1314 		WPRINTF("packet too small for writable header");
1315 		goto done;
1316 	}
1317 
1318 	/* Allocate, fill and prepend writable header vector. */
1319 	if (hdrlen + vlen != 0) {
1320 		hdr = __builtin_alloca(hdrlen + vlen);
1321 		hdr += vlen;
1322 		for (left = hdrlen, hdrp = hdr; left > 0;
1323 		    left -= now, hdrp += now) {
1324 			now = MIN(left, iov->iov_len);
1325 			memcpy(hdrp, iov->iov_base, now);
1326 			iov->iov_base = (uint8_t *)iov->iov_base + now;
1327 			iov->iov_len -= now;
1328 			if (iov->iov_len == 0) {
1329 				iov++;
1330 				iovcnt--;
1331 			}
1332 		}
1333 		iov--;
1334 		iovcnt++;
1335 		iov->iov_base = hdr;
1336 		iov->iov_len = hdrlen;
1337 	} else
1338 		hdr = NULL;
1339 
1340 	/* Insert VLAN tag. */
1341 	if (vlen != 0) {
1342 		hdr -= ETHER_VLAN_ENCAP_LEN;
1343 		memmove(hdr, hdr + ETHER_VLAN_ENCAP_LEN, ETHER_ADDR_LEN*2);
1344 		hdrlen += ETHER_VLAN_ENCAP_LEN;
1345 		hdr[ETHER_ADDR_LEN*2 + 0] = sc->esc_VET >> 8;
1346 		hdr[ETHER_ADDR_LEN*2 + 1] = sc->esc_VET & 0xff;
1347 		hdr[ETHER_ADDR_LEN*2 + 2] = dsc->td.upper.fields.special >> 8;
1348 		hdr[ETHER_ADDR_LEN*2 + 3] = dsc->td.upper.fields.special & 0xff;
1349 		iov->iov_base = hdr;
1350 		iov->iov_len += ETHER_VLAN_ENCAP_LEN;
1351 		/* Correct checksum offsets after VLAN tag insertion. */
1352 		ckinfo[0].ck_start += ETHER_VLAN_ENCAP_LEN;
1353 		ckinfo[0].ck_off += ETHER_VLAN_ENCAP_LEN;
1354 		if (ckinfo[0].ck_len != 0)
1355 			ckinfo[0].ck_len += ETHER_VLAN_ENCAP_LEN;
1356 		ckinfo[1].ck_start += ETHER_VLAN_ENCAP_LEN;
1357 		ckinfo[1].ck_off += ETHER_VLAN_ENCAP_LEN;
1358 		if (ckinfo[1].ck_len != 0)
1359 			ckinfo[1].ck_len += ETHER_VLAN_ENCAP_LEN;
1360 	}
1361 
1362 	/* Simple non-TSO case. */
1363 	if (!tso) {
1364 		/* Calculate checksums and transmit. */
1365 		if (ckinfo[0].ck_valid)
1366 			e82545_transmit_checksum(iov, iovcnt, &ckinfo[0]);
1367 		if (ckinfo[1].ck_valid)
1368 			e82545_transmit_checksum(iov, iovcnt, &ckinfo[1]);
1369 		e82545_transmit_backend(sc, iov, iovcnt);
1370 		goto done;
1371 	}
1372 
1373 	/* Doing TSO. */
1374 	tcp = (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_TCP) != 0;
1375 	mss = sc->esc_txctx.tcp_seg_setup.fields.mss;
1376 	paylen = (sc->esc_txctx.cmd_and_length & 0x000fffff);
1377 	DPRINTF("tx %s segmentation offload %d+%d/%u bytes %d iovs",
1378 	    tcp ? "TCP" : "UDP", hdrlen, paylen, mss, iovcnt);
1379 	ipid = ntohs(*(uint16_t *)&hdr[ckinfo[0].ck_start + 4]);
1380 	tcpseq = 0;
1381 	if (tcp)
1382 		tcpseq = ntohl(*(uint32_t *)&hdr[ckinfo[1].ck_start + 4]);
1383 	ipcs = *(uint16_t *)&hdr[ckinfo[0].ck_off];
1384 	tcpcs = 0;
1385 	if (ckinfo[1].ck_valid)	/* Save partial pseudo-header checksum. */
1386 		tcpcs = *(uint16_t *)&hdr[ckinfo[1].ck_off];
1387 	pv = 1;
1388 	pvoff = 0;
1389 	for (seg = 0, left = paylen; left > 0; seg++, left -= now) {
1390 		now = MIN(left, mss);
1391 
1392 		/* Construct IOVs for the segment. */
1393 		/* Include whole original header. */
1394 		tiov[0].iov_base = hdr;
1395 		tiov[0].iov_len = hdrlen;
1396 		tiovcnt = 1;
1397 		/* Include respective part of payload IOV. */
1398 		for (nleft = now; pv < iovcnt && nleft > 0; nleft -= nnow) {
1399 			nnow = MIN(nleft, iov[pv].iov_len - pvoff);
1400 			tiov[tiovcnt].iov_base = (uint8_t *)iov[pv].iov_base +
1401 			    pvoff;
1402 			tiov[tiovcnt++].iov_len = nnow;
1403 			if (pvoff + nnow == iov[pv].iov_len) {
1404 				pv++;
1405 				pvoff = 0;
1406 			} else
1407 				pvoff += nnow;
1408 		}
1409 		DPRINTF("tx segment %d %d+%d bytes %d iovs",
1410 		    seg, hdrlen, now, tiovcnt);
1411 
1412 		/* Update IP header. */
1413 		if (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_IP) {
1414 			/* IPv4 -- set length and ID */
1415 			*(uint16_t *)&hdr[ckinfo[0].ck_start + 2] =
1416 			    htons(hdrlen - ckinfo[0].ck_start + now);
1417 			*(uint16_t *)&hdr[ckinfo[0].ck_start + 4] =
1418 			    htons(ipid + seg);
1419 		} else {
1420 			/* IPv6 -- set length */
1421 			*(uint16_t *)&hdr[ckinfo[0].ck_start + 4] =
1422 			    htons(hdrlen - ckinfo[0].ck_start - 40 +
1423 				  now);
1424 		}
1425 
1426 		/* Update pseudo-header checksum. */
1427 		tcpsum = tcpcs;
1428 		tcpsum += htons(hdrlen - ckinfo[1].ck_start + now);
1429 
1430 		/* Update TCP/UDP headers. */
1431 		if (tcp) {
1432 			/* Update sequence number and FIN/PUSH flags. */
1433 			*(uint32_t *)&hdr[ckinfo[1].ck_start + 4] =
1434 			    htonl(tcpseq + paylen - left);
1435 			if (now < left) {
1436 				hdr[ckinfo[1].ck_start + 13] &=
1437 				    ~(TH_FIN | TH_PUSH);
1438 			}
1439 		} else {
1440 			/* Update payload length. */
1441 			*(uint32_t *)&hdr[ckinfo[1].ck_start + 4] =
1442 			    hdrlen - ckinfo[1].ck_start + now;
1443 		}
1444 
1445 		/* Calculate checksums and transmit. */
1446 		if (ckinfo[0].ck_valid) {
1447 			*(uint16_t *)&hdr[ckinfo[0].ck_off] = ipcs;
1448 			e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[0]);
1449 		}
1450 		if (ckinfo[1].ck_valid) {
1451 			*(uint16_t *)&hdr[ckinfo[1].ck_off] =
1452 			    e82545_carry(tcpsum);
1453 			e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[1]);
1454 		}
1455 		e82545_transmit_backend(sc, tiov, tiovcnt);
1456 	}
1457 
1458 done:
1459 	head = (head + 1) % dsize;
1460 	e82545_transmit_done(sc, ohead, head, dsize, tdwb);
1461 
1462 	*rhead = head;
1463 	return (desc + 1);
1464 }
1465 
1466 static void
1467 e82545_tx_run(struct e82545_softc *sc)
1468 {
1469 	uint32_t cause;
1470 	uint16_t head, rhead, tail, size;
1471 	int lim, tdwb, sent;
1472 
1473 	size = sc->esc_TDLEN / 16;
1474 	if (size == 0)
1475 		return;
1476 
1477 	head = sc->esc_TDH % size;
1478 	tail = sc->esc_TDT % size;
1479 	DPRINTF("tx_run: head %x, rhead %x, tail %x",
1480 	    sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
1481 
1482 	pthread_mutex_unlock(&sc->esc_mtx);
1483 	rhead = head;
1484 	tdwb = 0;
1485 	for (lim = size / 4; sc->esc_tx_enabled && lim > 0; lim -= sent) {
1486 		sent = e82545_transmit(sc, head, tail, size, &rhead, &tdwb);
1487 		if (sent == 0)
1488 			break;
1489 		head = rhead;
1490 	}
1491 	pthread_mutex_lock(&sc->esc_mtx);
1492 
1493 	sc->esc_TDH = head;
1494 	sc->esc_TDHr = rhead;
1495 	cause = 0;
1496 	if (tdwb)
1497 		cause |= E1000_ICR_TXDW;
1498 	if (lim != size / 4 && sc->esc_TDH == sc->esc_TDT)
1499 		cause |= E1000_ICR_TXQE;
1500 	if (cause)
1501 		e82545_icr_assert(sc, cause);
1502 
1503 	DPRINTF("tx_run done: head %x, rhead %x, tail %x",
1504 	    sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
1505 }
1506 
1507 static _Noreturn void *
1508 e82545_tx_thread(void *param)
1509 {
1510 	struct e82545_softc *sc = param;
1511 
1512 	pthread_mutex_lock(&sc->esc_mtx);
1513 	for (;;) {
1514 		while (!sc->esc_tx_enabled || sc->esc_TDHr == sc->esc_TDT) {
1515 			if (sc->esc_tx_enabled && sc->esc_TDHr != sc->esc_TDT)
1516 				break;
1517 			sc->esc_tx_active = 0;
1518 			if (sc->esc_tx_enabled == 0)
1519 				pthread_cond_signal(&sc->esc_tx_cond);
1520 			pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
1521 		}
1522 		sc->esc_tx_active = 1;
1523 
1524 		/* Process some tx descriptors.  Lock dropped inside. */
1525 		e82545_tx_run(sc);
1526 	}
1527 }
1528 
1529 static void
1530 e82545_tx_start(struct e82545_softc *sc)
1531 {
1532 
1533 	if (sc->esc_tx_active == 0)
1534 		pthread_cond_signal(&sc->esc_tx_cond);
1535 }
1536 
1537 static void
1538 e82545_tx_enable(struct e82545_softc *sc)
1539 {
1540 
1541 	sc->esc_tx_enabled = 1;
1542 }
1543 
1544 static void
1545 e82545_tx_disable(struct e82545_softc *sc)
1546 {
1547 
1548 	sc->esc_tx_enabled = 0;
1549 	while (sc->esc_tx_active)
1550 		pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
1551 }
1552 
1553 static void
1554 e82545_rx_enable(struct e82545_softc *sc)
1555 {
1556 
1557 	sc->esc_rx_enabled = 1;
1558 }
1559 
1560 static void
1561 e82545_rx_disable(struct e82545_softc *sc)
1562 {
1563 
1564 	sc->esc_rx_enabled = 0;
1565 	while (sc->esc_rx_active)
1566 		pthread_cond_wait(&sc->esc_rx_cond, &sc->esc_mtx);
1567 }
1568 
1569 static void
1570 e82545_write_ra(struct e82545_softc *sc, int reg, uint32_t wval)
1571 {
1572 	struct eth_uni *eu;
1573 	int idx;
1574 
1575 	idx = reg >> 1;
1576 	assert(idx < 15);
1577 
1578 	eu = &sc->esc_uni[idx];
1579 
1580 	if (reg & 0x1) {
1581 		/* RAH */
1582 		eu->eu_valid = ((wval & E1000_RAH_AV) == E1000_RAH_AV);
1583 		eu->eu_addrsel = (wval >> 16) & 0x3;
1584 		eu->eu_eth.octet[5] = wval >> 8;
1585 		eu->eu_eth.octet[4] = wval;
1586 	} else {
1587 		/* RAL */
1588 		eu->eu_eth.octet[3] = wval >> 24;
1589 		eu->eu_eth.octet[2] = wval >> 16;
1590 		eu->eu_eth.octet[1] = wval >> 8;
1591 		eu->eu_eth.octet[0] = wval;
1592 	}
1593 }
1594 
1595 static uint32_t
1596 e82545_read_ra(struct e82545_softc *sc, int reg)
1597 {
1598 	struct eth_uni *eu;
1599 	uint32_t retval;
1600 	int idx;
1601 
1602 	idx = reg >> 1;
1603 	assert(idx < 15);
1604 
1605 	eu = &sc->esc_uni[idx];
1606 
1607 	if (reg & 0x1) {
1608 		/* RAH */
1609 		retval = (eu->eu_valid << 31) |
1610 			 (eu->eu_addrsel << 16) |
1611 			 (eu->eu_eth.octet[5] << 8) |
1612 			 eu->eu_eth.octet[4];
1613 	} else {
1614 		/* RAL */
1615 		retval = (eu->eu_eth.octet[3] << 24) |
1616 			 (eu->eu_eth.octet[2] << 16) |
1617 			 (eu->eu_eth.octet[1] << 8) |
1618 			 eu->eu_eth.octet[0];
1619 	}
1620 
1621 	return (retval);
1622 }
1623 
1624 static void
1625 e82545_write_register(struct e82545_softc *sc, uint32_t offset, uint32_t value)
1626 {
1627 	int ridx;
1628 
1629 	if (offset & 0x3) {
1630 		DPRINTF("Unaligned register write offset:0x%x value:0x%x", offset, value);
1631 		return;
1632 	}
1633 	DPRINTF("Register write: 0x%x value: 0x%x", offset, value);
1634 
1635 	switch (offset) {
1636 	case E1000_CTRL:
1637 	case E1000_CTRL_DUP:
1638 		e82545_devctl(sc, value);
1639 		break;
1640 	case E1000_FCAL:
1641 		sc->esc_FCAL = value;
1642 		break;
1643 	case E1000_FCAH:
1644 		sc->esc_FCAH = value & ~0xFFFF0000;
1645 		break;
1646 	case E1000_FCT:
1647 		sc->esc_FCT = value & ~0xFFFF0000;
1648 		break;
1649 	case E1000_VET:
1650 		sc->esc_VET = value & ~0xFFFF0000;
1651 		break;
1652 	case E1000_FCTTV:
1653 		sc->esc_FCTTV = value & ~0xFFFF0000;
1654 		break;
1655 	case E1000_LEDCTL:
1656 		sc->esc_LEDCTL = value & ~0x30303000;
1657 		break;
1658 	case E1000_PBA:
1659 		sc->esc_PBA = value & 0x0000FF80;
1660 		break;
1661 	case E1000_ICR:
1662 	case E1000_ITR:
1663 	case E1000_ICS:
1664 	case E1000_IMS:
1665 	case E1000_IMC:
1666 		e82545_intr_write(sc, offset, value);
1667 		break;
1668 	case E1000_RCTL:
1669 		e82545_rx_ctl(sc, value);
1670 		break;
1671 	case E1000_FCRTL:
1672 		sc->esc_FCRTL = value & ~0xFFFF0007;
1673 		break;
1674 	case E1000_FCRTH:
1675 		sc->esc_FCRTH = value & ~0xFFFF0007;
1676 		break;
1677 	case E1000_RDBAL(0):
1678 		sc->esc_RDBAL = value & ~0xF;
1679 		if (sc->esc_rx_enabled) {
1680 			/* Apparently legal: update cached address */
1681 			e82545_rx_update_rdba(sc);
1682 		}
1683 		break;
1684 	case E1000_RDBAH(0):
1685 		assert(!sc->esc_rx_enabled);
1686 		sc->esc_RDBAH = value;
1687 		break;
1688 	case E1000_RDLEN(0):
1689 		assert(!sc->esc_rx_enabled);
1690 		sc->esc_RDLEN = value & ~0xFFF0007F;
1691 		break;
1692 	case E1000_RDH(0):
1693 		/* XXX should only ever be zero ? Range check ? */
1694 		sc->esc_RDH = value;
1695 		break;
1696 	case E1000_RDT(0):
1697 		/* XXX if this opens up the rx ring, do something ? */
1698 		sc->esc_RDT = value;
1699 		break;
1700 	case E1000_RDTR:
1701 		/* ignore FPD bit 31 */
1702 		sc->esc_RDTR = value & ~0xFFFF0000;
1703 		break;
1704 	case E1000_RXDCTL(0):
1705 		sc->esc_RXDCTL = value & ~0xFEC0C0C0;
1706 		break;
1707 	case E1000_RADV:
1708 		sc->esc_RADV = value & ~0xFFFF0000;
1709 		break;
1710 	case E1000_RSRPD:
1711 		sc->esc_RSRPD = value & ~0xFFFFF000;
1712 		break;
1713 	case E1000_RXCSUM:
1714 		sc->esc_RXCSUM = value & ~0xFFFFF800;
1715 		break;
1716 	case E1000_TXCW:
1717 		sc->esc_TXCW = value & ~0x3FFF0000;
1718 		break;
1719 	case E1000_TCTL:
1720 		e82545_tx_ctl(sc, value);
1721 		break;
1722 	case E1000_TIPG:
1723 		sc->esc_TIPG = value;
1724 		break;
1725 	case E1000_AIT:
1726 		sc->esc_AIT = value;
1727 		break;
1728 	case E1000_TDBAL(0):
1729 		sc->esc_TDBAL = value & ~0xF;
1730 		if (sc->esc_tx_enabled)
1731 			e82545_tx_update_tdba(sc);
1732 		break;
1733 	case E1000_TDBAH(0):
1734 		sc->esc_TDBAH = value;
1735 		if (sc->esc_tx_enabled)
1736 			e82545_tx_update_tdba(sc);
1737 		break;
1738 	case E1000_TDLEN(0):
1739 		sc->esc_TDLEN = value & ~0xFFF0007F;
1740 		if (sc->esc_tx_enabled)
1741 			e82545_tx_update_tdba(sc);
1742 		break;
1743 	case E1000_TDH(0):
1744 		if (sc->esc_tx_enabled) {
1745 			WPRINTF("ignoring write to TDH while transmit enabled");
1746 			break;
1747 		}
1748 		if (value != 0) {
1749 			WPRINTF("ignoring non-zero value written to TDH");
1750 			break;
1751 		}
1752 		sc->esc_TDHr = sc->esc_TDH = value;
1753 		break;
1754 	case E1000_TDT(0):
1755 		sc->esc_TDT = value;
1756 		if (sc->esc_tx_enabled)
1757 			e82545_tx_start(sc);
1758 		break;
1759 	case E1000_TIDV:
1760 		sc->esc_TIDV = value & ~0xFFFF0000;
1761 		break;
1762 	case E1000_TXDCTL(0):
1763 		//assert(!sc->esc_tx_enabled);
1764 		sc->esc_TXDCTL = value & ~0xC0C0C0;
1765 		break;
1766 	case E1000_TADV:
1767 		sc->esc_TADV = value & ~0xFFFF0000;
1768 		break;
1769 	case E1000_RAL(0) ... E1000_RAH(15):
1770 		/* convert to u32 offset */
1771 		ridx = (offset - E1000_RAL(0)) >> 2;
1772 		e82545_write_ra(sc, ridx, value);
1773 		break;
1774 	case E1000_MTA ... (E1000_MTA + (127*4)):
1775 		sc->esc_fmcast[(offset - E1000_MTA) >> 2] = value;
1776 		break;
1777 	case E1000_VFTA ... (E1000_VFTA + (127*4)):
1778 		sc->esc_fvlan[(offset - E1000_VFTA) >> 2] = value;
1779 		break;
1780 	case E1000_EECD:
1781 	{
1782 		//DPRINTF("EECD write 0x%x -> 0x%x", sc->eeprom_control, value);
1783 		/* edge triggered low->high */
1784 		uint32_t eecd_strobe = ((sc->eeprom_control & E1000_EECD_SK) ?
1785 			0 : (value & E1000_EECD_SK));
1786 		uint32_t eecd_mask = (E1000_EECD_SK|E1000_EECD_CS|
1787 					E1000_EECD_DI|E1000_EECD_REQ);
1788 		sc->eeprom_control &= ~eecd_mask;
1789 		sc->eeprom_control |= (value & eecd_mask);
1790 		/* grant/revoke immediately */
1791 		if (value & E1000_EECD_REQ) {
1792 			sc->eeprom_control |= E1000_EECD_GNT;
1793 		} else {
1794                         sc->eeprom_control &= ~E1000_EECD_GNT;
1795 		}
1796 		if (eecd_strobe && (sc->eeprom_control & E1000_EECD_CS)) {
1797 			e82545_eecd_strobe(sc);
1798 		}
1799 		return;
1800 	}
1801 	case E1000_MDIC:
1802 	{
1803 		uint8_t reg_addr = (uint8_t)((value & E1000_MDIC_REG_MASK) >>
1804 						E1000_MDIC_REG_SHIFT);
1805 		uint8_t phy_addr = (uint8_t)((value & E1000_MDIC_PHY_MASK) >>
1806 						E1000_MDIC_PHY_SHIFT);
1807 		sc->mdi_control =
1808 			(value & ~(E1000_MDIC_ERROR|E1000_MDIC_DEST));
1809 		if ((value & E1000_MDIC_READY) != 0) {
1810 			DPRINTF("Incorrect MDIC ready bit: 0x%x", value);
1811 			return;
1812 		}
1813 		switch (value & E82545_MDIC_OP_MASK) {
1814 		case E1000_MDIC_OP_READ:
1815 			sc->mdi_control &= ~E82545_MDIC_DATA_MASK;
1816 			sc->mdi_control |= e82545_read_mdi(sc, reg_addr, phy_addr);
1817 			break;
1818 		case E1000_MDIC_OP_WRITE:
1819 			e82545_write_mdi(sc, reg_addr, phy_addr,
1820 				value & E82545_MDIC_DATA_MASK);
1821 			break;
1822 		default:
1823 			DPRINTF("Unknown MDIC op: 0x%x", value);
1824 			return;
1825 		}
1826 		/* TODO: barrier? */
1827 		sc->mdi_control |= E1000_MDIC_READY;
1828 		if (value & E82545_MDIC_IE) {
1829 			// TODO: generate interrupt
1830 		}
1831 		return;
1832 	}
1833 	case E1000_MANC:
1834 	case E1000_STATUS:
1835 		return;
1836 	default:
1837 		DPRINTF("Unknown write register: 0x%x value:%x", offset, value);
1838 		return;
1839 	}
1840 }
1841 
1842 static uint32_t
1843 e82545_read_register(struct e82545_softc *sc, uint32_t offset)
1844 {
1845 	uint32_t retval;
1846 	int ridx;
1847 
1848 	if (offset & 0x3) {
1849 		DPRINTF("Unaligned register read offset:0x%x", offset);
1850 		return 0;
1851 	}
1852 
1853 	DPRINTF("Register read: 0x%x", offset);
1854 
1855 	switch (offset) {
1856 	case E1000_CTRL:
1857 		retval = sc->esc_CTRL;
1858 		break;
1859 	case E1000_STATUS:
1860 		retval = E1000_STATUS_FD | E1000_STATUS_LU |
1861 		    E1000_STATUS_SPEED_1000;
1862 		break;
1863 	case E1000_FCAL:
1864 		retval = sc->esc_FCAL;
1865 		break;
1866 	case E1000_FCAH:
1867 		retval = sc->esc_FCAH;
1868 		break;
1869 	case E1000_FCT:
1870 		retval = sc->esc_FCT;
1871 		break;
1872 	case E1000_VET:
1873 		retval = sc->esc_VET;
1874 		break;
1875 	case E1000_FCTTV:
1876 		retval = sc->esc_FCTTV;
1877 		break;
1878 	case E1000_LEDCTL:
1879 		retval = sc->esc_LEDCTL;
1880 		break;
1881 	case E1000_PBA:
1882 		retval = sc->esc_PBA;
1883 		break;
1884 	case E1000_ICR:
1885 	case E1000_ITR:
1886 	case E1000_ICS:
1887 	case E1000_IMS:
1888 	case E1000_IMC:
1889 		retval = e82545_intr_read(sc, offset);
1890 		break;
1891 	case E1000_RCTL:
1892 		retval = sc->esc_RCTL;
1893 		break;
1894 	case E1000_FCRTL:
1895 		retval = sc->esc_FCRTL;
1896 		break;
1897 	case E1000_FCRTH:
1898 		retval = sc->esc_FCRTH;
1899 		break;
1900 	case E1000_RDBAL(0):
1901 		retval = sc->esc_RDBAL;
1902 		break;
1903 	case E1000_RDBAH(0):
1904 		retval = sc->esc_RDBAH;
1905 		break;
1906 	case E1000_RDLEN(0):
1907 		retval = sc->esc_RDLEN;
1908 		break;
1909 	case E1000_RDH(0):
1910 		retval = sc->esc_RDH;
1911 		break;
1912 	case E1000_RDT(0):
1913 		retval = sc->esc_RDT;
1914 		break;
1915 	case E1000_RDTR:
1916 		retval = sc->esc_RDTR;
1917 		break;
1918 	case E1000_RXDCTL(0):
1919 		retval = sc->esc_RXDCTL;
1920 		break;
1921 	case E1000_RADV:
1922 		retval = sc->esc_RADV;
1923 		break;
1924 	case E1000_RSRPD:
1925 		retval = sc->esc_RSRPD;
1926 		break;
1927 	case E1000_RXCSUM:
1928 		retval = sc->esc_RXCSUM;
1929 		break;
1930 	case E1000_TXCW:
1931 		retval = sc->esc_TXCW;
1932 		break;
1933 	case E1000_TCTL:
1934 		retval = sc->esc_TCTL;
1935 		break;
1936 	case E1000_TIPG:
1937 		retval = sc->esc_TIPG;
1938 		break;
1939 	case E1000_AIT:
1940 		retval = sc->esc_AIT;
1941 		break;
1942 	case E1000_TDBAL(0):
1943 		retval = sc->esc_TDBAL;
1944 		break;
1945 	case E1000_TDBAH(0):
1946 		retval = sc->esc_TDBAH;
1947 		break;
1948 	case E1000_TDLEN(0):
1949 		retval = sc->esc_TDLEN;
1950 		break;
1951 	case E1000_TDH(0):
1952 		retval = sc->esc_TDH;
1953 		break;
1954 	case E1000_TDT(0):
1955 		retval = sc->esc_TDT;
1956 		break;
1957 	case E1000_TIDV:
1958 		retval = sc->esc_TIDV;
1959 		break;
1960 	case E1000_TXDCTL(0):
1961 		retval = sc->esc_TXDCTL;
1962 		break;
1963 	case E1000_TADV:
1964 		retval = sc->esc_TADV;
1965 		break;
1966 	case E1000_RAL(0) ... E1000_RAH(15):
1967 		/* convert to u32 offset */
1968 		ridx = (offset - E1000_RAL(0)) >> 2;
1969 		retval = e82545_read_ra(sc, ridx);
1970 		break;
1971 	case E1000_MTA ... (E1000_MTA + (127*4)):
1972 		retval = sc->esc_fmcast[(offset - E1000_MTA) >> 2];
1973 		break;
1974 	case E1000_VFTA ... (E1000_VFTA + (127*4)):
1975 		retval = sc->esc_fvlan[(offset - E1000_VFTA) >> 2];
1976 		break;
1977 	case E1000_EECD:
1978 		//DPRINTF("EECD read %x", sc->eeprom_control);
1979 		retval = sc->eeprom_control;
1980 		break;
1981 	case E1000_MDIC:
1982 		retval = sc->mdi_control;
1983 		break;
1984 	case E1000_MANC:
1985 		retval = 0;
1986 		break;
1987 	/* stats that we emulate. */
1988 	case E1000_MPC:
1989 		retval = sc->missed_pkt_count;
1990 		break;
1991 	case E1000_PRC64:
1992 		retval = sc->pkt_rx_by_size[0];
1993 		break;
1994 	case E1000_PRC127:
1995 		retval = sc->pkt_rx_by_size[1];
1996 		break;
1997 	case E1000_PRC255:
1998 		retval = sc->pkt_rx_by_size[2];
1999 		break;
2000 	case E1000_PRC511:
2001 		retval = sc->pkt_rx_by_size[3];
2002 		break;
2003 	case E1000_PRC1023:
2004 		retval = sc->pkt_rx_by_size[4];
2005 		break;
2006 	case E1000_PRC1522:
2007 		retval = sc->pkt_rx_by_size[5];
2008 		break;
2009 	case E1000_GPRC:
2010 		retval = sc->good_pkt_rx_count;
2011 		break;
2012 	case E1000_BPRC:
2013 		retval = sc->bcast_pkt_rx_count;
2014 		break;
2015 	case E1000_MPRC:
2016 		retval = sc->mcast_pkt_rx_count;
2017 		break;
2018 	case E1000_GPTC:
2019 	case E1000_TPT:
2020 		retval = sc->good_pkt_tx_count;
2021 		break;
2022 	case E1000_GORCL:
2023 		retval = (uint32_t)sc->good_octets_rx;
2024 		break;
2025 	case E1000_GORCH:
2026 		retval = (uint32_t)(sc->good_octets_rx >> 32);
2027 		break;
2028 	case E1000_TOTL:
2029 	case E1000_GOTCL:
2030 		retval = (uint32_t)sc->good_octets_tx;
2031 		break;
2032 	case E1000_TOTH:
2033 	case E1000_GOTCH:
2034 		retval = (uint32_t)(sc->good_octets_tx >> 32);
2035 		break;
2036 	case E1000_ROC:
2037 		retval = sc->oversize_rx_count;
2038 		break;
2039 	case E1000_TORL:
2040 		retval = (uint32_t)(sc->good_octets_rx + sc->missed_octets);
2041 		break;
2042 	case E1000_TORH:
2043 		retval = (uint32_t)((sc->good_octets_rx +
2044 		    sc->missed_octets) >> 32);
2045 		break;
2046 	case E1000_TPR:
2047 		retval = sc->good_pkt_rx_count + sc->missed_pkt_count +
2048 		    sc->oversize_rx_count;
2049 		break;
2050 	case E1000_PTC64:
2051 		retval = sc->pkt_tx_by_size[0];
2052 		break;
2053 	case E1000_PTC127:
2054 		retval = sc->pkt_tx_by_size[1];
2055 		break;
2056 	case E1000_PTC255:
2057 		retval = sc->pkt_tx_by_size[2];
2058 		break;
2059 	case E1000_PTC511:
2060 		retval = sc->pkt_tx_by_size[3];
2061 		break;
2062 	case E1000_PTC1023:
2063 		retval = sc->pkt_tx_by_size[4];
2064 		break;
2065 	case E1000_PTC1522:
2066 		retval = sc->pkt_tx_by_size[5];
2067 		break;
2068 	case E1000_MPTC:
2069 		retval = sc->mcast_pkt_tx_count;
2070 		break;
2071 	case E1000_BPTC:
2072 		retval = sc->bcast_pkt_tx_count;
2073 		break;
2074 	case E1000_TSCTC:
2075 		retval = sc->tso_tx_count;
2076 		break;
2077 	/* stats that are always 0. */
2078 	case E1000_CRCERRS:
2079 	case E1000_ALGNERRC:
2080 	case E1000_SYMERRS:
2081 	case E1000_RXERRC:
2082 	case E1000_SCC:
2083 	case E1000_ECOL:
2084 	case E1000_MCC:
2085 	case E1000_LATECOL:
2086 	case E1000_COLC:
2087 	case E1000_DC:
2088 	case E1000_TNCRS:
2089 	case E1000_SEC:
2090 	case E1000_CEXTERR:
2091 	case E1000_RLEC:
2092 	case E1000_XONRXC:
2093 	case E1000_XONTXC:
2094 	case E1000_XOFFRXC:
2095 	case E1000_XOFFTXC:
2096 	case E1000_FCRUC:
2097 	case E1000_RNBC:
2098 	case E1000_RUC:
2099 	case E1000_RFC:
2100 	case E1000_RJC:
2101 	case E1000_MGTPRC:
2102 	case E1000_MGTPDC:
2103 	case E1000_MGTPTC:
2104 	case E1000_TSCTFC:
2105 		retval = 0;
2106 		break;
2107 	default:
2108 		DPRINTF("Unknown read register: 0x%x", offset);
2109 		retval = 0;
2110 		break;
2111 	}
2112 
2113 	return (retval);
2114 }
2115 
2116 static void
2117 e82545_write(struct pci_devinst *pi, int baridx, uint64_t offset, int size,
2118     uint64_t value)
2119 {
2120 	struct e82545_softc *sc;
2121 
2122 	//DPRINTF("Write bar:%d offset:0x%lx value:0x%lx size:%d", baridx, offset, value, size);
2123 
2124 	sc = pi->pi_arg;
2125 
2126 	pthread_mutex_lock(&sc->esc_mtx);
2127 
2128 	switch (baridx) {
2129 	case E82545_BAR_IO:
2130 		switch (offset) {
2131 		case E82545_IOADDR:
2132 			if (size != 4) {
2133 				DPRINTF("Wrong io addr write sz:%d value:0x%lx", size, value);
2134 			} else
2135 				sc->io_addr = (uint32_t)value;
2136 			break;
2137 		case E82545_IODATA:
2138 			if (size != 4) {
2139 				DPRINTF("Wrong io data write size:%d value:0x%lx", size, value);
2140 			} else if (sc->io_addr > E82545_IO_REGISTER_MAX) {
2141 				DPRINTF("Non-register io write addr:0x%x value:0x%lx", sc->io_addr, value);
2142 			} else
2143 				e82545_write_register(sc, sc->io_addr,
2144 						      (uint32_t)value);
2145 			break;
2146 		default:
2147 			DPRINTF("Unknown io bar write offset:0x%lx value:0x%lx size:%d", offset, value, size);
2148 			break;
2149 		}
2150 		break;
2151 	case E82545_BAR_REGISTER:
2152 		if (size != 4) {
2153 			DPRINTF("Wrong register write size:%d offset:0x%lx value:0x%lx", size, offset, value);
2154 		} else
2155 			e82545_write_register(sc, (uint32_t)offset,
2156 					      (uint32_t)value);
2157 		break;
2158 	default:
2159 		DPRINTF("Unknown write bar:%d off:0x%lx val:0x%lx size:%d",
2160 			baridx, offset, value, size);
2161 	}
2162 
2163 	pthread_mutex_unlock(&sc->esc_mtx);
2164 }
2165 
2166 static uint64_t
2167 e82545_read(struct pci_devinst *pi, int baridx, uint64_t offset, int size)
2168 {
2169 	struct e82545_softc *sc;
2170 	uint64_t retval;
2171 
2172 	//DPRINTF("Read  bar:%d offset:0x%lx size:%d", baridx, offset, size);
2173 	sc = pi->pi_arg;
2174 	retval = 0;
2175 
2176 	pthread_mutex_lock(&sc->esc_mtx);
2177 
2178 	switch (baridx) {
2179 	case E82545_BAR_IO:
2180 		switch (offset) {
2181 		case E82545_IOADDR:
2182 			if (size != 4) {
2183 				DPRINTF("Wrong io addr read sz:%d", size);
2184 			} else
2185 				retval = sc->io_addr;
2186 			break;
2187 		case E82545_IODATA:
2188 			if (size != 4) {
2189 				DPRINTF("Wrong io data read sz:%d", size);
2190 			}
2191 			if (sc->io_addr > E82545_IO_REGISTER_MAX) {
2192 				DPRINTF("Non-register io read addr:0x%x",
2193 					sc->io_addr);
2194 			} else
2195 				retval = e82545_read_register(sc, sc->io_addr);
2196 			break;
2197 		default:
2198 			DPRINTF("Unknown io bar read offset:0x%lx size:%d",
2199 				offset, size);
2200 			break;
2201 		}
2202 		break;
2203 	case E82545_BAR_REGISTER:
2204 		if (size != 4) {
2205 			DPRINTF("Wrong register read size:%d offset:0x%lx",
2206 				size, offset);
2207 		} else
2208 			retval = e82545_read_register(sc, (uint32_t)offset);
2209 		break;
2210 	default:
2211 		DPRINTF("Unknown read bar:%d offset:0x%lx size:%d",
2212 			baridx, offset, size);
2213 		break;
2214 	}
2215 
2216 	pthread_mutex_unlock(&sc->esc_mtx);
2217 
2218 	return (retval);
2219 }
2220 
2221 static void
2222 e82545_reset(struct e82545_softc *sc, int drvr)
2223 {
2224 	int i;
2225 
2226 	e82545_rx_disable(sc);
2227 	e82545_tx_disable(sc);
2228 
2229 	/* clear outstanding interrupts */
2230 	if (sc->esc_irq_asserted)
2231 		pci_lintr_deassert(sc->esc_pi);
2232 
2233 	/* misc */
2234 	if (!drvr) {
2235 		sc->esc_FCAL = 0;
2236 		sc->esc_FCAH = 0;
2237 		sc->esc_FCT = 0;
2238 		sc->esc_VET = 0;
2239 		sc->esc_FCTTV = 0;
2240 	}
2241 	sc->esc_LEDCTL = 0x07061302;
2242 	sc->esc_PBA = 0x00100030;
2243 
2244 	/* start nvm in opcode mode. */
2245 	sc->nvm_opaddr = 0;
2246 	sc->nvm_mode = E82545_NVM_MODE_OPADDR;
2247 	sc->nvm_bits = E82545_NVM_OPADDR_BITS;
2248 	sc->eeprom_control = E1000_EECD_PRES | E82545_EECD_FWE_EN;
2249 	e82545_init_eeprom(sc);
2250 
2251 	/* interrupt */
2252 	sc->esc_ICR = 0;
2253 	sc->esc_ITR = 250;
2254 	sc->esc_ICS = 0;
2255 	sc->esc_IMS = 0;
2256 	sc->esc_IMC = 0;
2257 
2258 	/* L2 filters */
2259 	if (!drvr) {
2260 		memset(sc->esc_fvlan, 0, sizeof(sc->esc_fvlan));
2261 		memset(sc->esc_fmcast, 0, sizeof(sc->esc_fmcast));
2262 		memset(sc->esc_uni, 0, sizeof(sc->esc_uni));
2263 
2264 		/* XXX not necessary on 82545 ?? */
2265 		sc->esc_uni[0].eu_valid = 1;
2266 		memcpy(sc->esc_uni[0].eu_eth.octet, sc->esc_mac.octet,
2267 		    ETHER_ADDR_LEN);
2268 	} else {
2269 		/* Clear RAH valid bits */
2270 		for (i = 0; i < 16; i++)
2271 			sc->esc_uni[i].eu_valid = 0;
2272 	}
2273 
2274 	/* receive */
2275 	if (!drvr) {
2276 		sc->esc_RDBAL = 0;
2277 		sc->esc_RDBAH = 0;
2278 	}
2279 	sc->esc_RCTL = 0;
2280 	sc->esc_FCRTL = 0;
2281 	sc->esc_FCRTH = 0;
2282 	sc->esc_RDLEN = 0;
2283 	sc->esc_RDH = 0;
2284 	sc->esc_RDT = 0;
2285 	sc->esc_RDTR = 0;
2286 	sc->esc_RXDCTL = (1 << 24) | (1 << 16); /* default GRAN/WTHRESH */
2287 	sc->esc_RADV = 0;
2288 	sc->esc_RXCSUM = 0;
2289 
2290 	/* transmit */
2291 	if (!drvr) {
2292 		sc->esc_TDBAL = 0;
2293 		sc->esc_TDBAH = 0;
2294 		sc->esc_TIPG = 0;
2295 		sc->esc_AIT = 0;
2296 		sc->esc_TIDV = 0;
2297 		sc->esc_TADV = 0;
2298 	}
2299 	sc->esc_tdba = 0;
2300 	sc->esc_txdesc = NULL;
2301 	sc->esc_TXCW = 0;
2302 	sc->esc_TCTL = 0;
2303 	sc->esc_TDLEN = 0;
2304 	sc->esc_TDT = 0;
2305 	sc->esc_TDHr = sc->esc_TDH = 0;
2306 	sc->esc_TXDCTL = 0;
2307 }
2308 
2309 static int
2310 e82545_init(struct pci_devinst *pi, nvlist_t *nvl)
2311 {
2312 	char nstr[80];
2313 	struct e82545_softc *sc;
2314 	const char *mac;
2315 	int err;
2316 
2317 	/* Setup our softc */
2318 	sc = calloc(1, sizeof(*sc));
2319 
2320 	pi->pi_arg = sc;
2321 	sc->esc_pi = pi;
2322 	sc->esc_ctx = pi->pi_vmctx;
2323 
2324 	pthread_mutex_init(&sc->esc_mtx, NULL);
2325 	pthread_cond_init(&sc->esc_rx_cond, NULL);
2326 	pthread_cond_init(&sc->esc_tx_cond, NULL);
2327 	pthread_create(&sc->esc_tx_tid, NULL, e82545_tx_thread, sc);
2328 	snprintf(nstr, sizeof(nstr), "e82545-%d:%d tx", pi->pi_slot,
2329 	    pi->pi_func);
2330         pthread_set_name_np(sc->esc_tx_tid, nstr);
2331 
2332 	pci_set_cfgdata16(pi, PCIR_DEVICE, E82545_DEV_ID_82545EM_COPPER);
2333 	pci_set_cfgdata16(pi, PCIR_VENDOR, E82545_VENDOR_ID_INTEL);
2334 	pci_set_cfgdata8(pi,  PCIR_CLASS, PCIC_NETWORK);
2335 	pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_NETWORK_ETHERNET);
2336 	pci_set_cfgdata16(pi, PCIR_SUBDEV_0, E82545_SUBDEV_ID);
2337 	pci_set_cfgdata16(pi, PCIR_SUBVEND_0, E82545_VENDOR_ID_INTEL);
2338 
2339 	pci_set_cfgdata8(pi,  PCIR_HDRTYPE, PCIM_HDRTYPE_NORMAL);
2340 	pci_set_cfgdata8(pi,  PCIR_INTPIN, 0x1);
2341 
2342 	/* TODO: this card also supports msi, but the freebsd driver for it
2343 	 * does not, so I have not implemented it. */
2344 	pci_lintr_request(pi);
2345 
2346 	pci_emul_alloc_bar(pi, E82545_BAR_REGISTER, PCIBAR_MEM32,
2347 		E82545_BAR_REGISTER_LEN);
2348 	pci_emul_alloc_bar(pi, E82545_BAR_FLASH, PCIBAR_MEM32,
2349 		E82545_BAR_FLASH_LEN);
2350 	pci_emul_alloc_bar(pi, E82545_BAR_IO, PCIBAR_IO,
2351 		E82545_BAR_IO_LEN);
2352 
2353 	mac = get_config_value_node(nvl, "mac");
2354 	if (mac != NULL) {
2355 		err = net_parsemac(mac, sc->esc_mac.octet);
2356 		if (err) {
2357 			free(sc);
2358 			return (err);
2359 		}
2360 	} else
2361 		net_genmac(pi, sc->esc_mac.octet);
2362 
2363 	err = netbe_init(&sc->esc_be, nvl, e82545_rx_callback, sc);
2364 	if (err) {
2365 		free(sc);
2366 		return (err);
2367 	}
2368 
2369 	netbe_rx_enable(sc->esc_be);
2370 
2371 	/* H/w initiated reset */
2372 	e82545_reset(sc, 0);
2373 
2374 	return (0);
2375 }
2376 
2377 #ifdef BHYVE_SNAPSHOT
2378 static int
2379 e82545_snapshot(struct vm_snapshot_meta *meta)
2380 {
2381 	int i;
2382 	int ret;
2383 	struct e82545_softc *sc;
2384 	struct pci_devinst *pi;
2385 	uint64_t bitmap_value;
2386 
2387 	pi = meta->dev_data;
2388 	sc = pi->pi_arg;
2389 
2390 	/* esc_mevp and esc_mevpitr should be reinitiated at init. */
2391 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_mac, meta, ret, done);
2392 
2393 	/* General */
2394 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_CTRL, meta, ret, done);
2395 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_FCAL, meta, ret, done);
2396 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_FCAH, meta, ret, done);
2397 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_FCT, meta, ret, done);
2398 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_VET, meta, ret, done);
2399 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_FCTTV, meta, ret, done);
2400 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_LEDCTL, meta, ret, done);
2401 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_PBA, meta, ret, done);
2402 
2403 	/* Interrupt control */
2404 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_irq_asserted, meta, ret, done);
2405 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_ICR, meta, ret, done);
2406 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_ITR, meta, ret, done);
2407 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_ICS, meta, ret, done);
2408 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_IMS, meta, ret, done);
2409 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_IMC, meta, ret, done);
2410 
2411 	/*
2412 	 * Transmit
2413 	 *
2414 	 * The fields in the unions are in superposition to access certain
2415 	 * bytes in the larger uint variables.
2416 	 * e.g., ip_config = [ipcss|ipcso|ipcse0|ipcse1]
2417 	 */
2418 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_txctx.lower_setup.ip_config, meta, ret, done);
2419 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_txctx.upper_setup.tcp_config, meta, ret, done);
2420 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_txctx.cmd_and_length, meta, ret, done);
2421 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_txctx.tcp_seg_setup.data, meta, ret, done);
2422 
2423 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_tx_enabled, meta, ret, done);
2424 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_tx_active, meta, ret, done);
2425 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TXCW, meta, ret, done);
2426 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TCTL, meta, ret, done);
2427 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TIPG, meta, ret, done);
2428 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_AIT, meta, ret, done);
2429 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_tdba, meta, ret, done);
2430 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TDBAL, meta, ret, done);
2431 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TDBAH, meta, ret, done);
2432 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TDLEN, meta, ret, done);
2433 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TDH, meta, ret, done);
2434 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TDHr, meta, ret, done);
2435 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TDT, meta, ret, done);
2436 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TIDV, meta, ret, done);
2437 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TXDCTL, meta, ret, done);
2438 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_TADV, meta, ret, done);
2439 
2440 	/* Has dependency on esc_TDLEN; reoreder of fields from struct. */
2441 	SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(pi->pi_vmctx, sc->esc_txdesc,
2442 	    sc->esc_TDLEN, true, meta, ret, done);
2443 
2444 	/* L2 frame acceptance */
2445 	for (i = 0; i < (int)nitems(sc->esc_uni); i++) {
2446 		SNAPSHOT_VAR_OR_LEAVE(sc->esc_uni[i].eu_valid, meta, ret, done);
2447 		SNAPSHOT_VAR_OR_LEAVE(sc->esc_uni[i].eu_addrsel, meta, ret, done);
2448 		SNAPSHOT_VAR_OR_LEAVE(sc->esc_uni[i].eu_eth, meta, ret, done);
2449 	}
2450 
2451 	SNAPSHOT_BUF_OR_LEAVE(sc->esc_fmcast, sizeof(sc->esc_fmcast),
2452 			      meta, ret, done);
2453 	SNAPSHOT_BUF_OR_LEAVE(sc->esc_fvlan, sizeof(sc->esc_fvlan),
2454 			      meta, ret, done);
2455 
2456 	/* Receive */
2457 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_rx_enabled, meta, ret, done);
2458 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_rx_active, meta, ret, done);
2459 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_rx_loopback, meta, ret, done);
2460 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RCTL, meta, ret, done);
2461 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_FCRTL, meta, ret, done);
2462 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_FCRTH, meta, ret, done);
2463 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_rdba, meta, ret, done);
2464 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RDBAL, meta, ret, done);
2465 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RDBAH, meta, ret, done);
2466 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RDLEN, meta, ret, done);
2467 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RDH, meta, ret, done);
2468 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RDT, meta, ret, done);
2469 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RDTR, meta, ret, done);
2470 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RXDCTL, meta, ret, done);
2471 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RADV, meta, ret, done);
2472 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RSRPD, meta, ret, done);
2473 	SNAPSHOT_VAR_OR_LEAVE(sc->esc_RXCSUM, meta, ret, done);
2474 
2475 	/* Has dependency on esc_RDLEN; reoreder of fields from struct. */
2476 	SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(pi->pi_vmctx, sc->esc_rxdesc,
2477 	    sc->esc_TDLEN, true, meta, ret, done);
2478 
2479 	/* IO Port register access */
2480 	SNAPSHOT_VAR_OR_LEAVE(sc->io_addr, meta, ret, done);
2481 
2482 	/* Shadow copy of MDIC */
2483 	SNAPSHOT_VAR_OR_LEAVE(sc->mdi_control, meta, ret, done);
2484 
2485 	/* Shadow copy of EECD */
2486 	SNAPSHOT_VAR_OR_LEAVE(sc->eeprom_control, meta, ret, done);
2487 
2488 	/* Latest NVM in/out */
2489 	SNAPSHOT_VAR_OR_LEAVE(sc->nvm_data, meta, ret, done);
2490 	SNAPSHOT_VAR_OR_LEAVE(sc->nvm_opaddr, meta, ret, done);
2491 
2492 	/* Stats */
2493 	SNAPSHOT_VAR_OR_LEAVE(sc->missed_pkt_count, meta, ret, done);
2494 	SNAPSHOT_BUF_OR_LEAVE(sc->pkt_rx_by_size, sizeof(sc->pkt_rx_by_size),
2495 			      meta, ret, done);
2496 	SNAPSHOT_BUF_OR_LEAVE(sc->pkt_tx_by_size, sizeof(sc->pkt_tx_by_size),
2497 			      meta, ret, done);
2498 	SNAPSHOT_VAR_OR_LEAVE(sc->good_pkt_rx_count, meta, ret, done);
2499 	SNAPSHOT_VAR_OR_LEAVE(sc->bcast_pkt_rx_count, meta, ret, done);
2500 	SNAPSHOT_VAR_OR_LEAVE(sc->mcast_pkt_rx_count, meta, ret, done);
2501 	SNAPSHOT_VAR_OR_LEAVE(sc->good_pkt_tx_count, meta, ret, done);
2502 	SNAPSHOT_VAR_OR_LEAVE(sc->bcast_pkt_tx_count, meta, ret, done);
2503 	SNAPSHOT_VAR_OR_LEAVE(sc->mcast_pkt_tx_count, meta, ret, done);
2504 	SNAPSHOT_VAR_OR_LEAVE(sc->oversize_rx_count, meta, ret, done);
2505 	SNAPSHOT_VAR_OR_LEAVE(sc->tso_tx_count, meta, ret, done);
2506 	SNAPSHOT_VAR_OR_LEAVE(sc->good_octets_rx, meta, ret, done);
2507 	SNAPSHOT_VAR_OR_LEAVE(sc->good_octets_tx, meta, ret, done);
2508 	SNAPSHOT_VAR_OR_LEAVE(sc->missed_octets, meta, ret, done);
2509 
2510 	if (meta->op == VM_SNAPSHOT_SAVE)
2511 		bitmap_value = sc->nvm_bits;
2512 	SNAPSHOT_VAR_OR_LEAVE(bitmap_value, meta, ret, done);
2513 	if (meta->op == VM_SNAPSHOT_RESTORE)
2514 		sc->nvm_bits = bitmap_value;
2515 
2516 	if (meta->op == VM_SNAPSHOT_SAVE)
2517 		bitmap_value = sc->nvm_bits;
2518 	SNAPSHOT_VAR_OR_LEAVE(bitmap_value, meta, ret, done);
2519 	if (meta->op == VM_SNAPSHOT_RESTORE)
2520 		sc->nvm_bits = bitmap_value;
2521 
2522 	/* EEPROM data */
2523 	SNAPSHOT_BUF_OR_LEAVE(sc->eeprom_data, sizeof(sc->eeprom_data),
2524 			      meta, ret, done);
2525 
2526 done:
2527 	return (ret);
2528 }
2529 #endif
2530 
2531 static const struct pci_devemu pci_de_e82545 = {
2532 	.pe_emu = 	"e1000",
2533 	.pe_init =	e82545_init,
2534 	.pe_legacy_config = netbe_legacy_config,
2535 	.pe_barwrite =	e82545_write,
2536 	.pe_barread =	e82545_read,
2537 #ifdef BHYVE_SNAPSHOT
2538 	.pe_snapshot =	e82545_snapshot,
2539 #endif
2540 };
2541 PCI_EMUL_SET(pci_de_e82545);
2542