xref: /illumos-gate/usr/src/cmd/bhyve/pci_e82545.c (revision ddb365bfc9e868ad24ccdcb0dc91af18b10df082)
1 /*
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2016 Alexander Motin <mav@FreeBSD.org>
5  * Copyright (c) 2015 Peter Grehan <grehan@freebsd.org>
6  * Copyright (c) 2013 Jeremiah Lott, Avere Systems
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer
14  *    in this position and unchanged.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/types.h>
36 #ifndef WITHOUT_CAPSICUM
37 #include <sys/capsicum.h>
38 #endif
39 #include <sys/limits.h>
40 #include <sys/ioctl.h>
41 #include <sys/uio.h>
42 #include <net/ethernet.h>
43 #include <netinet/in.h>
44 #include <netinet/tcp.h>
45 
46 #ifndef WITHOUT_CAPSICUM
47 #include <capsicum_helpers.h>
48 #endif
49 
50 #include <err.h>
51 #include <errno.h>
52 #include <fcntl.h>
53 #include <md5.h>
54 #include <stdio.h>
55 #include <stdlib.h>
56 #include <string.h>
57 #include <sysexits.h>
58 #include <unistd.h>
59 #include <pthread.h>
60 #include <pthread_np.h>
61 
62 #include "e1000_regs.h"
63 #include "e1000_defines.h"
64 #include "mii.h"
65 
66 #include "bhyverun.h"
67 #include "config.h"
68 #include "debug.h"
69 #include "pci_emul.h"
70 #include "mevent.h"
71 #include "net_utils.h"
72 #include "net_backends.h"
73 
74 /* Hardware/register definitions XXX: move some to common code. */
75 #define E82545_VENDOR_ID_INTEL			0x8086
76 #define E82545_DEV_ID_82545EM_COPPER		0x100F
77 #define E82545_SUBDEV_ID			0x1008
78 
79 #define E82545_REVISION_4			4
80 
81 #define E82545_MDIC_DATA_MASK			0x0000FFFF
82 #define E82545_MDIC_OP_MASK			0x0c000000
83 #define E82545_MDIC_IE				0x20000000
84 
85 #define E82545_EECD_FWE_DIS	0x00000010 /* Flash writes disabled */
86 #define E82545_EECD_FWE_EN	0x00000020 /* Flash writes enabled */
87 #define E82545_EECD_FWE_MASK	0x00000030 /* Flash writes mask */
88 
89 #define E82545_BAR_REGISTER			0
90 #define E82545_BAR_REGISTER_LEN			(128*1024)
91 #define E82545_BAR_FLASH			1
92 #define E82545_BAR_FLASH_LEN			(64*1024)
93 #define E82545_BAR_IO				2
94 #define E82545_BAR_IO_LEN			8
95 
96 #define E82545_IOADDR				0x00000000
97 #define E82545_IODATA				0x00000004
98 #define E82545_IO_REGISTER_MAX			0x0001FFFF
99 #define E82545_IO_FLASH_BASE			0x00080000
100 #define E82545_IO_FLASH_MAX			0x000FFFFF
101 
102 #define E82545_ARRAY_ENTRY(reg, offset)		(reg + (offset<<2))
103 #define E82545_RAR_MAX				15
104 #define E82545_MTA_MAX				127
105 #define E82545_VFTA_MAX				127
106 
107 /* Slightly modified from the driver versions, hardcoded for 3 opcode bits,
108  * followed by 6 address bits.
109  * TODO: make opcode bits and addr bits configurable?
110  * NVM Commands - Microwire */
111 #define E82545_NVM_OPCODE_BITS	3
112 #define E82545_NVM_ADDR_BITS	6
113 #define E82545_NVM_DATA_BITS	16
114 #define E82545_NVM_OPADDR_BITS	(E82545_NVM_OPCODE_BITS + E82545_NVM_ADDR_BITS)
115 #define E82545_NVM_ADDR_MASK	((1 << E82545_NVM_ADDR_BITS)-1)
116 #define E82545_NVM_OPCODE_MASK	\
117     (((1 << E82545_NVM_OPCODE_BITS) - 1) << E82545_NVM_ADDR_BITS)
118 #define E82545_NVM_OPCODE_READ	(0x6 << E82545_NVM_ADDR_BITS)	/* read */
119 #define E82545_NVM_OPCODE_WRITE	(0x5 << E82545_NVM_ADDR_BITS)	/* write */
120 #define E82545_NVM_OPCODE_ERASE	(0x7 << E82545_NVM_ADDR_BITS)	/* erase */
121 #define	E82545_NVM_OPCODE_EWEN	(0x4 << E82545_NVM_ADDR_BITS)	/* wr-enable */
122 
123 #define	E82545_NVM_EEPROM_SIZE	64 /* 64 * 16-bit values == 128K */
124 
125 #define E1000_ICR_SRPD		0x00010000
126 
127 /* This is an arbitrary number.  There is no hard limit on the chip. */
128 #define I82545_MAX_TXSEGS	64
129 
130 /* Legacy receive descriptor */
131 struct e1000_rx_desc {
132 	uint64_t buffer_addr;	/* Address of the descriptor's data buffer */
133 	uint16_t length;	/* Length of data DMAed into data buffer */
134 	uint16_t csum;		/* Packet checksum */
135 	uint8_t	 status;       	/* Descriptor status */
136 	uint8_t  errors;	/* Descriptor Errors */
137 	uint16_t special;
138 };
139 
140 /* Transmit descriptor types */
141 #define	E1000_TXD_MASK		(E1000_TXD_CMD_DEXT | 0x00F00000)
142 #define E1000_TXD_TYP_L		(0)
143 #define E1000_TXD_TYP_C		(E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C)
144 #define E1000_TXD_TYP_D		(E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)
145 
146 /* Legacy transmit descriptor */
147 struct e1000_tx_desc {
148 	uint64_t buffer_addr;   /* Address of the descriptor's data buffer */
149 	union {
150 		uint32_t data;
151 		struct {
152 			uint16_t length;  /* Data buffer length */
153 			uint8_t  cso;  /* Checksum offset */
154 			uint8_t  cmd;  /* Descriptor control */
155 		} flags;
156 	} lower;
157 	union {
158 		uint32_t data;
159 		struct {
160 			uint8_t status; /* Descriptor status */
161 			uint8_t css;  /* Checksum start */
162 			uint16_t special;
163 		} fields;
164 	} upper;
165 };
166 
167 /* Context descriptor */
168 struct e1000_context_desc {
169 	union {
170 		uint32_t ip_config;
171 		struct {
172 			uint8_t ipcss;  /* IP checksum start */
173 			uint8_t ipcso;  /* IP checksum offset */
174 			uint16_t ipcse;  /* IP checksum end */
175 		} ip_fields;
176 	} lower_setup;
177 	union {
178 		uint32_t tcp_config;
179 		struct {
180 			uint8_t tucss;  /* TCP checksum start */
181 			uint8_t tucso;  /* TCP checksum offset */
182 			uint16_t tucse;  /* TCP checksum end */
183 		} tcp_fields;
184 	} upper_setup;
185 	uint32_t cmd_and_length;
186 	union {
187 		uint32_t data;
188 		struct {
189 			uint8_t status;  /* Descriptor status */
190 			uint8_t hdr_len;  /* Header length */
191 			uint16_t mss;  /* Maximum segment size */
192 		} fields;
193 	} tcp_seg_setup;
194 };
195 
196 /* Data descriptor */
197 struct e1000_data_desc {
198 	uint64_t buffer_addr;  /* Address of the descriptor's buffer address */
199 	union {
200 		uint32_t data;
201 		struct {
202 			uint16_t length;  /* Data buffer length */
203 			uint8_t typ_len_ext;
204 			uint8_t cmd;
205 		} flags;
206 	} lower;
207 	union {
208 		uint32_t data;
209 		struct {
210 			uint8_t status;  /* Descriptor status */
211 			uint8_t popts;  /* Packet Options */
212 			uint16_t special;
213 		} fields;
214 	} upper;
215 };
216 
217 union e1000_tx_udesc {
218 	struct e1000_tx_desc td;
219 	struct e1000_context_desc cd;
220 	struct e1000_data_desc dd;
221 };
222 
223 /* Tx checksum info for a packet. */
224 struct ck_info {
225 	int	ck_valid;	/* ck_info is valid */
226 	uint8_t	ck_start;	/* start byte of cksum calcuation */
227 	uint8_t	ck_off;		/* offset of cksum insertion */
228 	uint16_t ck_len;	/* length of cksum calc: 0 is to packet-end */
229 };
230 
231 /*
232  * Debug printf
233  */
234 static int e82545_debug = 0;
235 #define WPRINTF(msg,params...) PRINTLN("e82545: " msg, ##params)
236 #define DPRINTF(msg,params...) if (e82545_debug) WPRINTF(msg, params)
237 
238 #define	MIN(a,b) (((a)<(b))?(a):(b))
239 #define	MAX(a,b) (((a)>(b))?(a):(b))
240 
241 /* s/w representation of the RAL/RAH regs */
242 struct  eth_uni {
243 	int		eu_valid;
244 	int		eu_addrsel;
245 	struct ether_addr eu_eth;
246 };
247 
248 
249 struct e82545_softc {
250 	struct pci_devinst *esc_pi;
251 	struct vmctx	*esc_ctx;
252 	struct mevent   *esc_mevpitr;
253 	pthread_mutex_t	esc_mtx;
254 	struct ether_addr esc_mac;
255 	net_backend_t	*esc_be;
256 
257 	/* General */
258 	uint32_t	esc_CTRL;	/* x0000 device ctl */
259 	uint32_t	esc_FCAL;	/* x0028 flow ctl addr lo */
260 	uint32_t	esc_FCAH;	/* x002C flow ctl addr hi */
261 	uint32_t	esc_FCT;	/* x0030 flow ctl type */
262 	uint32_t	esc_VET;	/* x0038 VLAN eth type */
263 	uint32_t	esc_FCTTV;	/* x0170 flow ctl tx timer */
264 	uint32_t	esc_LEDCTL;	/* x0E00 LED control */
265 	uint32_t	esc_PBA;	/* x1000 pkt buffer allocation */
266 
267 	/* Interrupt control */
268 	int		esc_irq_asserted;
269 	uint32_t	esc_ICR;	/* x00C0 cause read/clear */
270 	uint32_t	esc_ITR;	/* x00C4 intr throttling */
271 	uint32_t	esc_ICS;	/* x00C8 cause set */
272 	uint32_t	esc_IMS;	/* x00D0 mask set/read */
273 	uint32_t	esc_IMC;	/* x00D8 mask clear */
274 
275 	/* Transmit */
276 	union e1000_tx_udesc *esc_txdesc;
277 	struct e1000_context_desc esc_txctx;
278 	pthread_t	esc_tx_tid;
279 	pthread_cond_t	esc_tx_cond;
280 	int		esc_tx_enabled;
281 	int		esc_tx_active;
282 	uint32_t	esc_TXCW;	/* x0178 transmit config */
283 	uint32_t	esc_TCTL;	/* x0400 transmit ctl */
284 	uint32_t	esc_TIPG;	/* x0410 inter-packet gap */
285 	uint16_t	esc_AIT;	/* x0458 Adaptive Interframe Throttle */
286 	uint64_t	esc_tdba;      	/* verified 64-bit desc table addr */
287 	uint32_t	esc_TDBAL;	/* x3800 desc table addr, low bits */
288 	uint32_t	esc_TDBAH;	/* x3804 desc table addr, hi 32-bits */
289 	uint32_t	esc_TDLEN;	/* x3808 # descriptors in bytes */
290 	uint16_t	esc_TDH;	/* x3810 desc table head idx */
291 	uint16_t	esc_TDHr;	/* internal read version of TDH */
292 	uint16_t	esc_TDT;	/* x3818 desc table tail idx */
293 	uint32_t	esc_TIDV;	/* x3820 intr delay */
294 	uint32_t	esc_TXDCTL;	/* x3828 desc control */
295 	uint32_t	esc_TADV;	/* x382C intr absolute delay */
296 
297 	/* L2 frame acceptance */
298 	struct eth_uni	esc_uni[16];	/* 16 x unicast MAC addresses */
299 	uint32_t	esc_fmcast[128]; /* Multicast filter bit-match */
300 	uint32_t	esc_fvlan[128]; /* VLAN 4096-bit filter */
301 
302 	/* Receive */
303 	struct e1000_rx_desc *esc_rxdesc;
304 	pthread_cond_t	esc_rx_cond;
305 	int		esc_rx_enabled;
306 	int		esc_rx_active;
307 	int		esc_rx_loopback;
308 	uint32_t	esc_RCTL;	/* x0100 receive ctl */
309 	uint32_t	esc_FCRTL;	/* x2160 flow cntl thresh, low */
310 	uint32_t	esc_FCRTH;	/* x2168 flow cntl thresh, hi */
311 	uint64_t	esc_rdba;	/* verified 64-bit desc table addr */
312 	uint32_t	esc_RDBAL;	/* x2800 desc table addr, low bits */
313 	uint32_t	esc_RDBAH;	/* x2804 desc table addr, hi 32-bits*/
314 	uint32_t	esc_RDLEN;	/* x2808 #descriptors */
315 	uint16_t	esc_RDH;	/* x2810 desc table head idx */
316 	uint16_t	esc_RDT;	/* x2818 desc table tail idx */
317 	uint32_t	esc_RDTR;	/* x2820 intr delay */
318 	uint32_t	esc_RXDCTL;	/* x2828 desc control */
319 	uint32_t	esc_RADV;	/* x282C intr absolute delay */
320 	uint32_t	esc_RSRPD;	/* x2C00 recv small packet detect */
321 	uint32_t	esc_RXCSUM;     /* x5000 receive cksum ctl */
322 
323 	/* IO Port register access */
324 	uint32_t io_addr;
325 
326 	/* Shadow copy of MDIC */
327 	uint32_t mdi_control;
328 	/* Shadow copy of EECD */
329 	uint32_t eeprom_control;
330 	/* Latest NVM in/out */
331 	uint16_t nvm_data;
332 	uint16_t nvm_opaddr;
333 	/* stats */
334 	uint32_t missed_pkt_count; /* dropped for no room in rx queue */
335 	uint32_t pkt_rx_by_size[6];
336 	uint32_t pkt_tx_by_size[6];
337 	uint32_t good_pkt_rx_count;
338 	uint32_t bcast_pkt_rx_count;
339 	uint32_t mcast_pkt_rx_count;
340 	uint32_t good_pkt_tx_count;
341 	uint32_t bcast_pkt_tx_count;
342 	uint32_t mcast_pkt_tx_count;
343 	uint32_t oversize_rx_count;
344 	uint32_t tso_tx_count;
345 	uint64_t good_octets_rx;
346 	uint64_t good_octets_tx;
347 	uint64_t missed_octets; /* counts missed and oversized */
348 
349 	uint8_t nvm_bits:6; /* number of bits remaining in/out */
350 	uint8_t nvm_mode:2;
351 #define E82545_NVM_MODE_OPADDR  0x0
352 #define E82545_NVM_MODE_DATAIN  0x1
353 #define E82545_NVM_MODE_DATAOUT 0x2
354 	/* EEPROM data */
355 	uint16_t eeprom_data[E82545_NVM_EEPROM_SIZE];
356 };
357 
358 static void e82545_reset(struct e82545_softc *sc, int dev);
359 static void e82545_rx_enable(struct e82545_softc *sc);
360 static void e82545_rx_disable(struct e82545_softc *sc);
361 static void e82545_rx_callback(int fd, enum ev_type type, void *param);
362 static void e82545_tx_start(struct e82545_softc *sc);
363 static void e82545_tx_enable(struct e82545_softc *sc);
364 static void e82545_tx_disable(struct e82545_softc *sc);
365 
366 static inline int __unused
367 e82545_size_stat_index(uint32_t size)
368 {
369 	if (size <= 64) {
370 		return 0;
371 	} else if (size >= 1024) {
372 		return 5;
373 	} else {
374 		/* should be 1-4 */
375 		return (ffs(size) - 6);
376 	}
377 }
378 
379 static void
380 e82545_init_eeprom(struct e82545_softc *sc)
381 {
382 	uint16_t checksum, i;
383 
384         /* mac addr */
385 	sc->eeprom_data[NVM_MAC_ADDR] = ((uint16_t)sc->esc_mac.octet[0]) |
386 		(((uint16_t)sc->esc_mac.octet[1]) << 8);
387 	sc->eeprom_data[NVM_MAC_ADDR+1] = ((uint16_t)sc->esc_mac.octet[2]) |
388 		(((uint16_t)sc->esc_mac.octet[3]) << 8);
389 	sc->eeprom_data[NVM_MAC_ADDR+2] = ((uint16_t)sc->esc_mac.octet[4]) |
390 		(((uint16_t)sc->esc_mac.octet[5]) << 8);
391 
392 	/* pci ids */
393 	sc->eeprom_data[NVM_SUB_DEV_ID] = E82545_SUBDEV_ID;
394 	sc->eeprom_data[NVM_SUB_VEN_ID] = E82545_VENDOR_ID_INTEL;
395 	sc->eeprom_data[NVM_DEV_ID] = E82545_DEV_ID_82545EM_COPPER;
396 	sc->eeprom_data[NVM_VEN_ID] = E82545_VENDOR_ID_INTEL;
397 
398 	/* fill in the checksum */
399         checksum = 0;
400 	for (i = 0; i < NVM_CHECKSUM_REG; i++) {
401 		checksum += sc->eeprom_data[i];
402 	}
403 	checksum = NVM_SUM - checksum;
404 	sc->eeprom_data[NVM_CHECKSUM_REG] = checksum;
405 	DPRINTF("eeprom checksum: 0x%x", checksum);
406 }
407 
408 static void
409 e82545_write_mdi(struct e82545_softc *sc __unused, uint8_t reg_addr,
410     uint8_t phy_addr, uint32_t data)
411 {
412 	DPRINTF("Write mdi reg:0x%x phy:0x%x data: 0x%x", reg_addr, phy_addr, data);
413 }
414 
415 static uint32_t
416 e82545_read_mdi(struct e82545_softc *sc __unused, uint8_t reg_addr,
417     uint8_t phy_addr)
418 {
419 	//DPRINTF("Read mdi reg:0x%x phy:0x%x", reg_addr, phy_addr);
420 	switch (reg_addr) {
421 	case PHY_STATUS:
422 		return (MII_SR_LINK_STATUS | MII_SR_AUTONEG_CAPS |
423 			MII_SR_AUTONEG_COMPLETE);
424 	case PHY_AUTONEG_ADV:
425 		return NWAY_AR_SELECTOR_FIELD;
426 	case PHY_LP_ABILITY:
427 		return 0;
428 	case PHY_1000T_STATUS:
429 		return (SR_1000T_LP_FD_CAPS | SR_1000T_REMOTE_RX_STATUS |
430 			SR_1000T_LOCAL_RX_STATUS);
431 	case PHY_ID1:
432 		return (M88E1011_I_PHY_ID >> 16) & 0xFFFF;
433 	case PHY_ID2:
434 		return (M88E1011_I_PHY_ID | E82545_REVISION_4) & 0xFFFF;
435 	default:
436 		DPRINTF("Unknown mdi read reg:0x%x phy:0x%x", reg_addr, phy_addr);
437 		return 0;
438 	}
439 	/* not reached */
440 }
441 
442 static void
443 e82545_eecd_strobe(struct e82545_softc *sc)
444 {
445 	/* Microwire state machine */
446 	/*
447 	DPRINTF("eeprom state machine srtobe "
448 		"0x%x 0x%x 0x%x 0x%x",
449 		sc->nvm_mode, sc->nvm_bits,
450 		sc->nvm_opaddr, sc->nvm_data);*/
451 
452 	if (sc->nvm_bits == 0) {
453 		DPRINTF("eeprom state machine not expecting data! "
454 			"0x%x 0x%x 0x%x 0x%x",
455 			sc->nvm_mode, sc->nvm_bits,
456 			sc->nvm_opaddr, sc->nvm_data);
457 		return;
458 	}
459 	sc->nvm_bits--;
460 	if (sc->nvm_mode == E82545_NVM_MODE_DATAOUT) {
461 		/* shifting out */
462 		if (sc->nvm_data & 0x8000) {
463 			sc->eeprom_control |= E1000_EECD_DO;
464 		} else {
465 			sc->eeprom_control &= ~E1000_EECD_DO;
466 		}
467 		sc->nvm_data <<= 1;
468 		if (sc->nvm_bits == 0) {
469 			/* read done, back to opcode mode. */
470 			sc->nvm_opaddr = 0;
471 			sc->nvm_mode = E82545_NVM_MODE_OPADDR;
472 			sc->nvm_bits = E82545_NVM_OPADDR_BITS;
473 		}
474 	} else if (sc->nvm_mode == E82545_NVM_MODE_DATAIN) {
475 		/* shifting in */
476 		sc->nvm_data <<= 1;
477 		if (sc->eeprom_control & E1000_EECD_DI) {
478 			sc->nvm_data |= 1;
479 		}
480 		if (sc->nvm_bits == 0) {
481 			/* eeprom write */
482 			uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
483 			uint16_t addr = sc->nvm_opaddr & E82545_NVM_ADDR_MASK;
484 			if (op != E82545_NVM_OPCODE_WRITE) {
485 				DPRINTF("Illegal eeprom write op 0x%x",
486 					sc->nvm_opaddr);
487 			} else if (addr >= E82545_NVM_EEPROM_SIZE) {
488 				DPRINTF("Illegal eeprom write addr 0x%x",
489 					sc->nvm_opaddr);
490 			} else {
491 				DPRINTF("eeprom write eeprom[0x%x] = 0x%x",
492 				addr, sc->nvm_data);
493 				sc->eeprom_data[addr] = sc->nvm_data;
494 			}
495 			/* back to opcode mode */
496 			sc->nvm_opaddr = 0;
497 			sc->nvm_mode = E82545_NVM_MODE_OPADDR;
498 			sc->nvm_bits = E82545_NVM_OPADDR_BITS;
499 		}
500 	} else if (sc->nvm_mode == E82545_NVM_MODE_OPADDR) {
501 		sc->nvm_opaddr <<= 1;
502 		if (sc->eeprom_control & E1000_EECD_DI) {
503 			sc->nvm_opaddr |= 1;
504 		}
505 		if (sc->nvm_bits == 0) {
506 			uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
507 			switch (op) {
508 			case E82545_NVM_OPCODE_EWEN:
509 				DPRINTF("eeprom write enable: 0x%x",
510 					sc->nvm_opaddr);
511 				/* back to opcode mode */
512 				sc->nvm_opaddr = 0;
513 				sc->nvm_mode = E82545_NVM_MODE_OPADDR;
514 				sc->nvm_bits = E82545_NVM_OPADDR_BITS;
515 				break;
516 			case E82545_NVM_OPCODE_READ:
517 			{
518 				uint16_t addr = sc->nvm_opaddr &
519 					E82545_NVM_ADDR_MASK;
520 				sc->nvm_mode = E82545_NVM_MODE_DATAOUT;
521 				sc->nvm_bits = E82545_NVM_DATA_BITS;
522 				if (addr < E82545_NVM_EEPROM_SIZE) {
523 					sc->nvm_data = sc->eeprom_data[addr];
524 					DPRINTF("eeprom read: eeprom[0x%x] = 0x%x",
525 						addr, sc->nvm_data);
526 				} else {
527 					DPRINTF("eeprom illegal read: 0x%x",
528 						sc->nvm_opaddr);
529 					sc->nvm_data = 0;
530 				}
531 				break;
532 			}
533 			case E82545_NVM_OPCODE_WRITE:
534 				sc->nvm_mode = E82545_NVM_MODE_DATAIN;
535 				sc->nvm_bits = E82545_NVM_DATA_BITS;
536 				sc->nvm_data = 0;
537 				break;
538 			default:
539 				DPRINTF("eeprom unknown op: 0x%x",
540 					sc->nvm_opaddr);
541 				/* back to opcode mode */
542 				sc->nvm_opaddr = 0;
543 				sc->nvm_mode = E82545_NVM_MODE_OPADDR;
544 				sc->nvm_bits = E82545_NVM_OPADDR_BITS;
545 			}
546 		}
547 	} else {
548 		DPRINTF("eeprom state machine wrong state! "
549 			"0x%x 0x%x 0x%x 0x%x",
550 			sc->nvm_mode, sc->nvm_bits,
551 			sc->nvm_opaddr, sc->nvm_data);
552 	}
553 }
554 
555 static void
556 e82545_itr_callback(int fd __unused, enum ev_type type __unused, void *param)
557 {
558 	uint32_t new;
559 	struct e82545_softc *sc = param;
560 
561 	pthread_mutex_lock(&sc->esc_mtx);
562 	new = sc->esc_ICR & sc->esc_IMS;
563 	if (new && !sc->esc_irq_asserted) {
564 		DPRINTF("itr callback: lintr assert %x", new);
565 		sc->esc_irq_asserted = 1;
566 		pci_lintr_assert(sc->esc_pi);
567 	} else {
568 		mevent_delete(sc->esc_mevpitr);
569 		sc->esc_mevpitr = NULL;
570 	}
571 	pthread_mutex_unlock(&sc->esc_mtx);
572 }
573 
574 static void
575 e82545_icr_assert(struct e82545_softc *sc, uint32_t bits)
576 {
577 	uint32_t new;
578 
579 	DPRINTF("icr assert: 0x%x", bits);
580 
581 	/*
582 	 * An interrupt is only generated if bits are set that
583 	 * aren't already in the ICR, these bits are unmasked,
584 	 * and there isn't an interrupt already pending.
585 	 */
586 	new = bits & ~sc->esc_ICR & sc->esc_IMS;
587 	sc->esc_ICR |= bits;
588 
589 	if (new == 0) {
590 		DPRINTF("icr assert: masked %x, ims %x", new, sc->esc_IMS);
591 	} else if (sc->esc_mevpitr != NULL) {
592 		DPRINTF("icr assert: throttled %x, ims %x", new, sc->esc_IMS);
593 	} else if (!sc->esc_irq_asserted) {
594 		DPRINTF("icr assert: lintr assert %x", new);
595 		sc->esc_irq_asserted = 1;
596 		pci_lintr_assert(sc->esc_pi);
597 		if (sc->esc_ITR != 0) {
598 			sc->esc_mevpitr = mevent_add(
599 			    (sc->esc_ITR + 3905) / 3906,  /* 256ns -> 1ms */
600 			    EVF_TIMER, e82545_itr_callback, sc);
601 		}
602 	}
603 }
604 
605 static void
606 e82545_ims_change(struct e82545_softc *sc, uint32_t bits)
607 {
608 	uint32_t new;
609 
610 	/*
611 	 * Changing the mask may allow previously asserted
612 	 * but masked interrupt requests to generate an interrupt.
613 	 */
614 	new = bits & sc->esc_ICR & ~sc->esc_IMS;
615 	sc->esc_IMS |= bits;
616 
617 	if (new == 0) {
618 		DPRINTF("ims change: masked %x, ims %x", new, sc->esc_IMS);
619 	} else if (sc->esc_mevpitr != NULL) {
620 		DPRINTF("ims change: throttled %x, ims %x", new, sc->esc_IMS);
621 	} else if (!sc->esc_irq_asserted) {
622 		DPRINTF("ims change: lintr assert %x", new);
623 		sc->esc_irq_asserted = 1;
624 		pci_lintr_assert(sc->esc_pi);
625 		if (sc->esc_ITR != 0) {
626 			sc->esc_mevpitr = mevent_add(
627 			    (sc->esc_ITR + 3905) / 3906,  /* 256ns -> 1ms */
628 			    EVF_TIMER, e82545_itr_callback, sc);
629 		}
630 	}
631 }
632 
633 static void
634 e82545_icr_deassert(struct e82545_softc *sc, uint32_t bits)
635 {
636 
637 	DPRINTF("icr deassert: 0x%x", bits);
638 	sc->esc_ICR &= ~bits;
639 
640 	/*
641 	 * If there are no longer any interrupt sources and there
642 	 * was an asserted interrupt, clear it
643 	 */
644 	if (sc->esc_irq_asserted && !(sc->esc_ICR & sc->esc_IMS)) {
645 		DPRINTF("icr deassert: lintr deassert %x", bits);
646 		pci_lintr_deassert(sc->esc_pi);
647 		sc->esc_irq_asserted = 0;
648 	}
649 }
650 
651 static void
652 e82545_intr_write(struct e82545_softc *sc, uint32_t offset, uint32_t value)
653 {
654 
655 	DPRINTF("intr_write: off %x, val %x", offset, value);
656 
657 	switch (offset) {
658 	case E1000_ICR:
659 		e82545_icr_deassert(sc, value);
660 		break;
661 	case E1000_ITR:
662 		sc->esc_ITR = value;
663 		break;
664 	case E1000_ICS:
665 		sc->esc_ICS = value;	/* not used: store for debug */
666 		e82545_icr_assert(sc, value);
667 		break;
668 	case E1000_IMS:
669 		e82545_ims_change(sc, value);
670 		break;
671 	case E1000_IMC:
672 		sc->esc_IMC = value;	/* for debug */
673 		sc->esc_IMS &= ~value;
674 		// XXX clear interrupts if all ICR bits now masked
675 		// and interrupt was pending ?
676 		break;
677 	default:
678 		break;
679 	}
680 }
681 
682 static uint32_t
683 e82545_intr_read(struct e82545_softc *sc, uint32_t offset)
684 {
685 	uint32_t retval;
686 
687 	retval = 0;
688 
689 	DPRINTF("intr_read: off %x", offset);
690 
691 	switch (offset) {
692 	case E1000_ICR:
693 		retval = sc->esc_ICR;
694 		sc->esc_ICR = 0;
695 		e82545_icr_deassert(sc, ~0);
696 		break;
697 	case E1000_ITR:
698 		retval = sc->esc_ITR;
699 		break;
700 	case E1000_ICS:
701 		/* write-only register */
702 		break;
703 	case E1000_IMS:
704 		retval = sc->esc_IMS;
705 		break;
706 	case E1000_IMC:
707 		/* write-only register */
708 		break;
709 	default:
710 		break;
711 	}
712 
713 	return (retval);
714 }
715 
716 static void
717 e82545_devctl(struct e82545_softc *sc, uint32_t val)
718 {
719 
720 	sc->esc_CTRL = val & ~E1000_CTRL_RST;
721 
722 	if (val & E1000_CTRL_RST) {
723 		DPRINTF("e1k: s/w reset, ctl %x", val);
724 		e82545_reset(sc, 1);
725 	}
726 	/* XXX check for phy reset ? */
727 }
728 
729 static void
730 e82545_rx_update_rdba(struct e82545_softc *sc)
731 {
732 
733 	/* XXX verify desc base/len within phys mem range */
734 	sc->esc_rdba = (uint64_t)sc->esc_RDBAH << 32 |
735 	    sc->esc_RDBAL;
736 
737 	/* Cache host mapping of guest descriptor array */
738 	sc->esc_rxdesc = paddr_guest2host(sc->esc_ctx,
739 	    sc->esc_rdba, sc->esc_RDLEN);
740 }
741 
742 static void
743 e82545_rx_ctl(struct e82545_softc *sc, uint32_t val)
744 {
745 	int on;
746 
747 	on = ((val & E1000_RCTL_EN) == E1000_RCTL_EN);
748 
749 	/* Save RCTL after stripping reserved bits 31:27,24,21,14,11:10,0 */
750 	sc->esc_RCTL = val & ~0xF9204c01;
751 
752 	DPRINTF("rx_ctl - %s RCTL %x, val %x",
753 		on ? "on" : "off", sc->esc_RCTL, val);
754 
755 	/* state change requested */
756 	if (on != sc->esc_rx_enabled) {
757 		if (on) {
758 			/* Catch disallowed/unimplemented settings */
759 			//assert(!(val & E1000_RCTL_LBM_TCVR));
760 
761 			if (sc->esc_RCTL & E1000_RCTL_LBM_TCVR) {
762 				sc->esc_rx_loopback = 1;
763 			} else {
764 				sc->esc_rx_loopback = 0;
765 			}
766 
767 			e82545_rx_update_rdba(sc);
768 			e82545_rx_enable(sc);
769 		} else {
770 			e82545_rx_disable(sc);
771 			sc->esc_rx_loopback = 0;
772 			sc->esc_rdba = 0;
773 			sc->esc_rxdesc = NULL;
774 		}
775 	}
776 }
777 
778 static void
779 e82545_tx_update_tdba(struct e82545_softc *sc)
780 {
781 
782 	/* XXX verify desc base/len within phys mem range */
783 	sc->esc_tdba = (uint64_t)sc->esc_TDBAH << 32 | sc->esc_TDBAL;
784 
785 	/* Cache host mapping of guest descriptor array */
786 	sc->esc_txdesc = paddr_guest2host(sc->esc_ctx, sc->esc_tdba,
787             sc->esc_TDLEN);
788 }
789 
790 static void
791 e82545_tx_ctl(struct e82545_softc *sc, uint32_t val)
792 {
793 	int on;
794 
795 	on = ((val & E1000_TCTL_EN) == E1000_TCTL_EN);
796 
797 	/* ignore TCTL_EN settings that don't change state */
798 	if (on == sc->esc_tx_enabled)
799 		return;
800 
801 	if (on) {
802 		e82545_tx_update_tdba(sc);
803 		e82545_tx_enable(sc);
804 	} else {
805 		e82545_tx_disable(sc);
806 		sc->esc_tdba = 0;
807 		sc->esc_txdesc = NULL;
808 	}
809 
810 	/* Save TCTL value after stripping reserved bits 31:25,23,2,0 */
811 	sc->esc_TCTL = val & ~0xFE800005;
812 }
813 
814 static int
815 e82545_bufsz(uint32_t rctl)
816 {
817 
818 	switch (rctl & (E1000_RCTL_BSEX | E1000_RCTL_SZ_256)) {
819 	case (E1000_RCTL_SZ_2048): return (2048);
820 	case (E1000_RCTL_SZ_1024): return (1024);
821 	case (E1000_RCTL_SZ_512): return (512);
822 	case (E1000_RCTL_SZ_256): return (256);
823 	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_16384): return (16384);
824 	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_8192): return (8192);
825 	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_4096): return (4096);
826 	}
827 	return (256);	/* Forbidden value. */
828 }
829 
830 /* XXX one packet at a time until this is debugged */
831 static void
832 e82545_rx_callback(int fd __unused, enum ev_type type __unused, void *param)
833 {
834 	struct e82545_softc *sc = param;
835 	struct e1000_rx_desc *rxd;
836 	struct iovec vec[64];
837 	ssize_t len;
838 	int left, lim, maxpktsz, maxpktdesc, bufsz, i, n, size;
839 	uint32_t cause = 0;
840 	uint16_t *tp, tag, head;
841 
842 	pthread_mutex_lock(&sc->esc_mtx);
843 	DPRINTF("rx_run: head %x, tail %x", sc->esc_RDH, sc->esc_RDT);
844 
845 	if (!sc->esc_rx_enabled || sc->esc_rx_loopback) {
846 		DPRINTF("rx disabled (!%d || %d) -- packet(s) dropped",
847 		    sc->esc_rx_enabled, sc->esc_rx_loopback);
848 		while (netbe_rx_discard(sc->esc_be) > 0) {
849 		}
850 		goto done1;
851 	}
852 	bufsz = e82545_bufsz(sc->esc_RCTL);
853 	maxpktsz = (sc->esc_RCTL & E1000_RCTL_LPE) ? 16384 : 1522;
854 	maxpktdesc = (maxpktsz + bufsz - 1) / bufsz;
855 	size = sc->esc_RDLEN / 16;
856 	head = sc->esc_RDH;
857 	left = (size + sc->esc_RDT - head) % size;
858 	if (left < maxpktdesc) {
859 		DPRINTF("rx overflow (%d < %d) -- packet(s) dropped",
860 		    left, maxpktdesc);
861 		while (netbe_rx_discard(sc->esc_be) > 0) {
862 		}
863 		goto done1;
864 	}
865 
866 	sc->esc_rx_active = 1;
867 	pthread_mutex_unlock(&sc->esc_mtx);
868 
869 	for (lim = size / 4; lim > 0 && left >= maxpktdesc; lim -= n) {
870 
871 		/* Grab rx descriptor pointed to by the head pointer */
872 		for (i = 0; i < maxpktdesc; i++) {
873 			rxd = &sc->esc_rxdesc[(head + i) % size];
874 			vec[i].iov_base = paddr_guest2host(sc->esc_ctx,
875 			    rxd->buffer_addr, bufsz);
876 			vec[i].iov_len = bufsz;
877 		}
878 		len = netbe_recv(sc->esc_be, vec, maxpktdesc);
879 		if (len <= 0) {
880 			DPRINTF("netbe_recv() returned %zd", len);
881 			goto done;
882 		}
883 
884 		/*
885 		 * Adjust the packet length based on whether the CRC needs
886 		 * to be stripped or if the packet is less than the minimum
887 		 * eth packet size.
888 		 */
889 		if (len < ETHER_MIN_LEN - ETHER_CRC_LEN)
890 			len = ETHER_MIN_LEN - ETHER_CRC_LEN;
891 		if (!(sc->esc_RCTL & E1000_RCTL_SECRC))
892 			len += ETHER_CRC_LEN;
893 		n = (len + bufsz - 1) / bufsz;
894 
895 		DPRINTF("packet read %zd bytes, %d segs, head %d",
896 		    len, n, head);
897 
898 		/* Apply VLAN filter. */
899 		tp = (uint16_t *)vec[0].iov_base + 6;
900 		if ((sc->esc_RCTL & E1000_RCTL_VFE) &&
901 		    (ntohs(tp[0]) == sc->esc_VET)) {
902 			tag = ntohs(tp[1]) & 0x0fff;
903 			if ((sc->esc_fvlan[tag >> 5] &
904 			    (1 << (tag & 0x1f))) != 0) {
905 				DPRINTF("known VLAN %d", tag);
906 			} else {
907 				DPRINTF("unknown VLAN %d", tag);
908 				n = 0;
909 				continue;
910 			}
911 		}
912 
913 		/* Update all consumed descriptors. */
914 		for (i = 0; i < n - 1; i++) {
915 			rxd = &sc->esc_rxdesc[(head + i) % size];
916 			rxd->length = bufsz;
917 			rxd->csum = 0;
918 			rxd->errors = 0;
919 			rxd->special = 0;
920 			rxd->status = E1000_RXD_STAT_DD;
921 		}
922 		rxd = &sc->esc_rxdesc[(head + i) % size];
923 		rxd->length = len % bufsz;
924 		rxd->csum = 0;
925 		rxd->errors = 0;
926 		rxd->special = 0;
927 		/* XXX signal no checksum for now */
928 		rxd->status = E1000_RXD_STAT_PIF | E1000_RXD_STAT_IXSM |
929 		    E1000_RXD_STAT_EOP | E1000_RXD_STAT_DD;
930 
931 		/* Schedule receive interrupts. */
932 		if ((uint32_t)len <= sc->esc_RSRPD) {
933 			cause |= E1000_ICR_SRPD | E1000_ICR_RXT0;
934 		} else {
935 			/* XXX: RDRT and RADV timers should be here. */
936 			cause |= E1000_ICR_RXT0;
937 		}
938 
939 		head = (head + n) % size;
940 		left -= n;
941 	}
942 
943 done:
944 	pthread_mutex_lock(&sc->esc_mtx);
945 	sc->esc_rx_active = 0;
946 	if (sc->esc_rx_enabled == 0)
947 		pthread_cond_signal(&sc->esc_rx_cond);
948 
949 	sc->esc_RDH = head;
950 	/* Respect E1000_RCTL_RDMTS */
951 	left = (size + sc->esc_RDT - head) % size;
952 	if (left < (size >> (((sc->esc_RCTL >> 8) & 3) + 1)))
953 		cause |= E1000_ICR_RXDMT0;
954 	/* Assert all accumulated interrupts. */
955 	if (cause != 0)
956 		e82545_icr_assert(sc, cause);
957 done1:
958 	DPRINTF("rx_run done: head %x, tail %x", sc->esc_RDH, sc->esc_RDT);
959 	pthread_mutex_unlock(&sc->esc_mtx);
960 }
961 
962 static uint16_t
963 e82545_carry(uint32_t sum)
964 {
965 
966 	sum = (sum & 0xFFFF) + (sum >> 16);
967 	if (sum > 0xFFFF)
968 		sum -= 0xFFFF;
969 	return (sum);
970 }
971 
972 static uint16_t
973 e82545_buf_checksum(uint8_t *buf, int len)
974 {
975 	int i;
976 	uint32_t sum = 0;
977 
978 	/* Checksum all the pairs of bytes first... */
979 	for (i = 0; i < (len & ~1); i += 2)
980 		sum += *((u_int16_t *)(buf + i));
981 
982 	/*
983 	 * If there's a single byte left over, checksum it, too.
984 	 * Network byte order is big-endian, so the remaining byte is
985 	 * the high byte.
986 	 */
987 	if (i < len)
988 		sum += htons(buf[i] << 8);
989 
990 	return (e82545_carry(sum));
991 }
992 
993 static uint16_t
994 e82545_iov_checksum(struct iovec *iov, int iovcnt, unsigned int off,
995     unsigned int len)
996 {
997 	unsigned int now, odd;
998 	uint32_t sum = 0, s;
999 
1000 	/* Skip completely unneeded vectors. */
1001 	while (iovcnt > 0 && iov->iov_len <= off && off > 0) {
1002 		off -= iov->iov_len;
1003 		iov++;
1004 		iovcnt--;
1005 	}
1006 
1007 	/* Calculate checksum of requested range. */
1008 	odd = 0;
1009 	while (len > 0 && iovcnt > 0) {
1010 		now = MIN(len, iov->iov_len - off);
1011 		s = e82545_buf_checksum((uint8_t *)iov->iov_base + off, now);
1012 		sum += odd ? (s << 8) : s;
1013 		odd ^= (now & 1);
1014 		len -= now;
1015 		off = 0;
1016 		iov++;
1017 		iovcnt--;
1018 	}
1019 
1020 	return (e82545_carry(sum));
1021 }
1022 
1023 /*
1024  * Return the transmit descriptor type.
1025  */
1026 static int
1027 e82545_txdesc_type(uint32_t lower)
1028 {
1029 	int type;
1030 
1031 	type = 0;
1032 
1033 	if (lower & E1000_TXD_CMD_DEXT)
1034 		type = lower & E1000_TXD_MASK;
1035 
1036 	return (type);
1037 }
1038 
1039 static void
1040 e82545_transmit_checksum(struct iovec *iov, int iovcnt, struct ck_info *ck)
1041 {
1042 	uint16_t cksum;
1043 	unsigned int cklen;
1044 
1045 	DPRINTF("tx cksum: iovcnt/s/off/len %d/%d/%d/%d",
1046 	    iovcnt, ck->ck_start, ck->ck_off, ck->ck_len);
1047 	cklen = ck->ck_len ? ck->ck_len - ck->ck_start + 1U : UINT_MAX;
1048 	cksum = e82545_iov_checksum(iov, iovcnt, ck->ck_start, cklen);
1049 	*(uint16_t *)((uint8_t *)iov[0].iov_base + ck->ck_off) = ~cksum;
1050 }
1051 
1052 static void
1053 e82545_transmit_backend(struct e82545_softc *sc, struct iovec *iov, int iovcnt)
1054 {
1055 
1056 	if (sc->esc_be == NULL)
1057 		return;
1058 
1059 	(void) netbe_send(sc->esc_be, iov, iovcnt);
1060 }
1061 
1062 static void
1063 e82545_transmit_done(struct e82545_softc *sc, uint16_t head, uint16_t tail,
1064     uint16_t dsize, int *tdwb)
1065 {
1066 	union e1000_tx_udesc *dsc;
1067 
1068 	for ( ; head != tail; head = (head + 1) % dsize) {
1069 		dsc = &sc->esc_txdesc[head];
1070 		if (dsc->td.lower.data & E1000_TXD_CMD_RS) {
1071 			dsc->td.upper.data |= E1000_TXD_STAT_DD;
1072 			*tdwb = 1;
1073 		}
1074 	}
1075 }
1076 
1077 static int
1078 e82545_transmit(struct e82545_softc *sc, uint16_t head, uint16_t tail,
1079     uint16_t dsize, uint16_t *rhead, int *tdwb)
1080 {
1081 	uint8_t *hdr, *hdrp;
1082 	struct iovec iovb[I82545_MAX_TXSEGS + 2];
1083 	struct iovec tiov[I82545_MAX_TXSEGS + 2];
1084 	struct e1000_context_desc *cd;
1085 	struct ck_info ckinfo[2];
1086 	struct iovec *iov;
1087 	union  e1000_tx_udesc *dsc;
1088 	int desc, dtype, ntype, iovcnt, tcp, tso, paylen, seg, tiovcnt, pv;
1089 	unsigned hdrlen, vlen, pktlen, len, left, mss, now, nnow, nleft, pvoff;
1090 	uint32_t tcpsum, tcpseq;
1091 	uint16_t ipcs, tcpcs, ipid, ohead;
1092 	bool invalid;
1093 
1094 	ckinfo[0].ck_valid = ckinfo[1].ck_valid = 0;
1095 	iovcnt = 0;
1096 	ntype = 0;
1097 	tso = 0;
1098 	pktlen = 0;
1099 	ohead = head;
1100 	invalid = false;
1101 
1102 	/* iovb[0/1] may be used for writable copy of headers. */
1103 	iov = &iovb[2];
1104 
1105 	for (desc = 0; ; desc++, head = (head + 1) % dsize) {
1106 		if (head == tail) {
1107 			*rhead = head;
1108 			return (0);
1109 		}
1110 		dsc = &sc->esc_txdesc[head];
1111 		dtype = e82545_txdesc_type(dsc->td.lower.data);
1112 
1113 		if (desc == 0) {
1114 			switch (dtype) {
1115 			case E1000_TXD_TYP_C:
1116 				DPRINTF("tx ctxt desc idx %d: %016jx "
1117 				    "%08x%08x",
1118 				    head, dsc->td.buffer_addr,
1119 				    dsc->td.upper.data, dsc->td.lower.data);
1120 				/* Save context and return */
1121 				sc->esc_txctx = dsc->cd;
1122 				goto done;
1123 			case E1000_TXD_TYP_L:
1124 				DPRINTF("tx legacy desc idx %d: %08x%08x",
1125 				    head, dsc->td.upper.data, dsc->td.lower.data);
1126 				/*
1127 				 * legacy cksum start valid in first descriptor
1128 				 */
1129 				ntype = dtype;
1130 				ckinfo[0].ck_start = dsc->td.upper.fields.css;
1131 				break;
1132 			case E1000_TXD_TYP_D:
1133 				DPRINTF("tx data desc idx %d: %08x%08x",
1134 				    head, dsc->td.upper.data, dsc->td.lower.data);
1135 				ntype = dtype;
1136 				break;
1137 			default:
1138 				break;
1139 			}
1140 		} else {
1141 			/* Descriptor type must be consistent */
1142 			assert(dtype == ntype);
1143 			DPRINTF("tx next desc idx %d: %08x%08x",
1144 			    head, dsc->td.upper.data, dsc->td.lower.data);
1145 		}
1146 
1147 		len = (dtype == E1000_TXD_TYP_L) ? dsc->td.lower.flags.length :
1148 		    dsc->dd.lower.data & 0xFFFFF;
1149 
1150 		/* Strip checksum supplied by guest. */
1151 		if ((dsc->td.lower.data & E1000_TXD_CMD_EOP) != 0 &&
1152 		    (dsc->td.lower.data & E1000_TXD_CMD_IFCS) == 0) {
1153 			if (len <= 2) {
1154 				WPRINTF("final descriptor too short (%d) -- dropped",
1155 				    len);
1156 				invalid = true;
1157 			} else
1158 				len -= 2;
1159 		}
1160 
1161 		if (len > 0 && iovcnt < I82545_MAX_TXSEGS) {
1162 			iov[iovcnt].iov_base = paddr_guest2host(sc->esc_ctx,
1163 			    dsc->td.buffer_addr, len);
1164 			iov[iovcnt].iov_len = len;
1165 			iovcnt++;
1166 			pktlen += len;
1167 		}
1168 
1169 		/*
1170 		 * Pull out info that is valid in the final descriptor
1171 		 * and exit descriptor loop.
1172 		 */
1173 		if (dsc->td.lower.data & E1000_TXD_CMD_EOP) {
1174 			if (dtype == E1000_TXD_TYP_L) {
1175 				if (dsc->td.lower.data & E1000_TXD_CMD_IC) {
1176 					ckinfo[0].ck_valid = 1;
1177 					ckinfo[0].ck_off =
1178 					    dsc->td.lower.flags.cso;
1179 					ckinfo[0].ck_len = 0;
1180 				}
1181 			} else {
1182 				cd = &sc->esc_txctx;
1183 				if (dsc->dd.lower.data & E1000_TXD_CMD_TSE)
1184 					tso = 1;
1185 				if (dsc->dd.upper.fields.popts &
1186 				    E1000_TXD_POPTS_IXSM)
1187 					ckinfo[0].ck_valid = 1;
1188 				if (dsc->dd.upper.fields.popts &
1189 				    E1000_TXD_POPTS_IXSM || tso) {
1190 					ckinfo[0].ck_start =
1191 					    cd->lower_setup.ip_fields.ipcss;
1192 					ckinfo[0].ck_off =
1193 					    cd->lower_setup.ip_fields.ipcso;
1194 					ckinfo[0].ck_len =
1195 					    cd->lower_setup.ip_fields.ipcse;
1196 				}
1197 				if (dsc->dd.upper.fields.popts &
1198 				    E1000_TXD_POPTS_TXSM)
1199 					ckinfo[1].ck_valid = 1;
1200 				if (dsc->dd.upper.fields.popts &
1201 				    E1000_TXD_POPTS_TXSM || tso) {
1202 					ckinfo[1].ck_start =
1203 					    cd->upper_setup.tcp_fields.tucss;
1204 					ckinfo[1].ck_off =
1205 					    cd->upper_setup.tcp_fields.tucso;
1206 					ckinfo[1].ck_len =
1207 					    cd->upper_setup.tcp_fields.tucse;
1208 				}
1209 			}
1210 			break;
1211 		}
1212 	}
1213 
1214 	if (invalid)
1215 		goto done;
1216 
1217 	if (iovcnt > I82545_MAX_TXSEGS) {
1218 		WPRINTF("tx too many descriptors (%d > %d) -- dropped",
1219 		    iovcnt, I82545_MAX_TXSEGS);
1220 		goto done;
1221 	}
1222 
1223 	hdrlen = vlen = 0;
1224 	/* Estimate writable space for VLAN header insertion. */
1225 	if ((sc->esc_CTRL & E1000_CTRL_VME) &&
1226 	    (dsc->td.lower.data & E1000_TXD_CMD_VLE)) {
1227 		hdrlen = ETHER_ADDR_LEN*2;
1228 		vlen = ETHER_VLAN_ENCAP_LEN;
1229 	}
1230 	if (!tso) {
1231 		/* Estimate required writable space for checksums. */
1232 		if (ckinfo[0].ck_valid)
1233 			hdrlen = MAX(hdrlen, ckinfo[0].ck_off + 2U);
1234 		if (ckinfo[1].ck_valid)
1235 			hdrlen = MAX(hdrlen, ckinfo[1].ck_off + 2U);
1236 		/* Round up writable space to the first vector. */
1237 		if (hdrlen != 0 && iov[0].iov_len > hdrlen &&
1238 		    iov[0].iov_len < hdrlen + 100)
1239 			hdrlen = iov[0].iov_len;
1240 	} else {
1241 		/* In case of TSO header length provided by software. */
1242 		hdrlen = sc->esc_txctx.tcp_seg_setup.fields.hdr_len;
1243 
1244 		/*
1245 		 * Cap the header length at 240 based on 7.2.4.5 of
1246 		 * the Intel 82576EB (Rev 2.63) datasheet.
1247 		 */
1248 		if (hdrlen > 240) {
1249 			WPRINTF("TSO hdrlen too large: %d", hdrlen);
1250 			goto done;
1251 		}
1252 
1253 		/*
1254 		 * If VLAN insertion is requested, ensure the header
1255 		 * at least holds the amount of data copied during
1256 		 * VLAN insertion below.
1257 		 *
1258 		 * XXX: Realistic packets will include a full Ethernet
1259 		 * header before the IP header at ckinfo[0].ck_start,
1260 		 * but this check is sufficient to prevent
1261 		 * out-of-bounds access below.
1262 		 */
1263 		if (vlen != 0 && hdrlen < ETHER_ADDR_LEN*2) {
1264 			WPRINTF("TSO hdrlen too small for vlan insertion "
1265 			    "(%d vs %d) -- dropped", hdrlen,
1266 			    ETHER_ADDR_LEN*2);
1267 			goto done;
1268 		}
1269 
1270 		/*
1271 		 * Ensure that the header length covers the used fields
1272 		 * in the IP and TCP headers as well as the IP and TCP
1273 		 * checksums.  The following fields are accessed below:
1274 		 *
1275 		 * Header | Field | Offset | Length
1276 		 * -------+-------+--------+-------
1277 		 * IPv4   | len   | 2      | 2
1278 		 * IPv4   | ID    | 4      | 2
1279 		 * IPv6   | len   | 4      | 2
1280 		 * TCP    | seq # | 4      | 4
1281 		 * TCP    | flags | 13     | 1
1282 		 * UDP    | len   | 4      | 4
1283 		 */
1284 		if (hdrlen < ckinfo[0].ck_start + 6U ||
1285 		    hdrlen < ckinfo[0].ck_off + 2U) {
1286 			WPRINTF("TSO hdrlen too small for IP fields (%d) "
1287 			    "-- dropped", hdrlen);
1288 			goto done;
1289 		}
1290 		if (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_TCP) {
1291 			if (hdrlen < ckinfo[1].ck_start + 14U) {
1292 				WPRINTF("TSO hdrlen too small for TCP fields "
1293 				    "(%d) -- dropped", hdrlen);
1294 				goto done;
1295 			}
1296 		} else {
1297 			if (hdrlen < ckinfo[1].ck_start + 8U) {
1298 				WPRINTF("TSO hdrlen too small for UDP fields "
1299 				    "(%d) -- dropped", hdrlen);
1300 				goto done;
1301 			}
1302 		}
1303 		if (ckinfo[1].ck_valid && hdrlen < ckinfo[1].ck_off + 2U) {
1304 			WPRINTF("TSO hdrlen too small for TCP/UDP fields "
1305 			    "(%d) -- dropped", hdrlen);
1306 			goto done;
1307 		}
1308 		if (ckinfo[1].ck_valid && hdrlen < ckinfo[1].ck_off + 2) {
1309 			WPRINTF("TSO hdrlen too small for TCP/UDP fields "
1310 			    "(%d) -- dropped", hdrlen);
1311 			goto done;
1312 		}
1313 	}
1314 
1315 	if (pktlen < hdrlen + vlen) {
1316 		WPRINTF("packet too small for writable header");
1317 		goto done;
1318 	}
1319 
1320 	/* Allocate, fill and prepend writable header vector. */
1321 	if (hdrlen + vlen != 0) {
1322 		hdr = __builtin_alloca(hdrlen + vlen);
1323 		hdr += vlen;
1324 		for (left = hdrlen, hdrp = hdr; left > 0;
1325 		    left -= now, hdrp += now) {
1326 			now = MIN(left, iov->iov_len);
1327 			memcpy(hdrp, iov->iov_base, now);
1328 #ifdef	__FreeBSD__
1329 			iov->iov_base = (uint8_t *)iov->iov_base + now;
1330 #else
1331 			/*
1332 			 * The type of iov_base changed in SUS (XPG4v2) from
1333 			 * caddr_t (char * - note signed) to 'void *'. On
1334 			 * illumos, bhyve is not currently compiled with XPG4v2
1335 			 * or higher, and so we can't cast the RHS to unsigned.
1336 			 * error: pointer targets in assignment differ in
1337 			 *	  signedness
1338 			 * This also means that we need to apply some casts to
1339 			 * (caddr_t) below.
1340 			 */
1341 			iov->iov_base += now;
1342 #endif
1343 			iov->iov_len -= now;
1344 			if (iov->iov_len == 0) {
1345 				iov++;
1346 				iovcnt--;
1347 			}
1348 		}
1349 		iov--;
1350 		iovcnt++;
1351 #ifdef __FreeBSD__
1352 		iov->iov_base = hdr;
1353 #else
1354 		iov->iov_base = (caddr_t)hdr;
1355 #endif
1356 		iov->iov_len = hdrlen;
1357 	} else
1358 		hdr = NULL;
1359 
1360 	/* Insert VLAN tag. */
1361 	if (vlen != 0) {
1362 		hdr -= ETHER_VLAN_ENCAP_LEN;
1363 		memmove(hdr, hdr + ETHER_VLAN_ENCAP_LEN, ETHER_ADDR_LEN*2);
1364 		hdrlen += ETHER_VLAN_ENCAP_LEN;
1365 		hdr[ETHER_ADDR_LEN*2 + 0] = sc->esc_VET >> 8;
1366 		hdr[ETHER_ADDR_LEN*2 + 1] = sc->esc_VET & 0xff;
1367 		hdr[ETHER_ADDR_LEN*2 + 2] = dsc->td.upper.fields.special >> 8;
1368 		hdr[ETHER_ADDR_LEN*2 + 3] = dsc->td.upper.fields.special & 0xff;
1369 #ifdef __FreeBSD__
1370 		iov->iov_base = hdr;
1371 #else
1372 		iov->iov_base = (caddr_t)hdr;
1373 #endif
1374 		iov->iov_len += ETHER_VLAN_ENCAP_LEN;
1375 		/* Correct checksum offsets after VLAN tag insertion. */
1376 		ckinfo[0].ck_start += ETHER_VLAN_ENCAP_LEN;
1377 		ckinfo[0].ck_off += ETHER_VLAN_ENCAP_LEN;
1378 		if (ckinfo[0].ck_len != 0)
1379 			ckinfo[0].ck_len += ETHER_VLAN_ENCAP_LEN;
1380 		ckinfo[1].ck_start += ETHER_VLAN_ENCAP_LEN;
1381 		ckinfo[1].ck_off += ETHER_VLAN_ENCAP_LEN;
1382 		if (ckinfo[1].ck_len != 0)
1383 			ckinfo[1].ck_len += ETHER_VLAN_ENCAP_LEN;
1384 	}
1385 
1386 	/* Simple non-TSO case. */
1387 	if (!tso) {
1388 		/* Calculate checksums and transmit. */
1389 		if (ckinfo[0].ck_valid)
1390 			e82545_transmit_checksum(iov, iovcnt, &ckinfo[0]);
1391 		if (ckinfo[1].ck_valid)
1392 			e82545_transmit_checksum(iov, iovcnt, &ckinfo[1]);
1393 		e82545_transmit_backend(sc, iov, iovcnt);
1394 		goto done;
1395 	}
1396 
1397 	/* Doing TSO. */
1398 	tcp = (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_TCP) != 0;
1399 	mss = sc->esc_txctx.tcp_seg_setup.fields.mss;
1400 	paylen = (sc->esc_txctx.cmd_and_length & 0x000fffff);
1401 	DPRINTF("tx %s segmentation offload %d+%d/%u bytes %d iovs",
1402 	    tcp ? "TCP" : "UDP", hdrlen, paylen, mss, iovcnt);
1403 	ipid = ntohs(*(uint16_t *)&hdr[ckinfo[0].ck_start + 4]);
1404 	tcpseq = 0;
1405 	if (tcp)
1406 		tcpseq = ntohl(*(uint32_t *)&hdr[ckinfo[1].ck_start + 4]);
1407 	ipcs = *(uint16_t *)&hdr[ckinfo[0].ck_off];
1408 	tcpcs = 0;
1409 	if (ckinfo[1].ck_valid)	/* Save partial pseudo-header checksum. */
1410 		tcpcs = *(uint16_t *)&hdr[ckinfo[1].ck_off];
1411 	pv = 1;
1412 	pvoff = 0;
1413 	for (seg = 0, left = paylen; left > 0; seg++, left -= now) {
1414 		now = MIN(left, mss);
1415 
1416 		/* Construct IOVs for the segment. */
1417 		/* Include whole original header. */
1418 #ifdef __FreeBSD__
1419 		tiov[0].iov_base = hdr;
1420 #else
1421 		tiov[0].iov_base = (caddr_t)hdr;
1422 #endif
1423 		tiov[0].iov_len = hdrlen;
1424 		tiovcnt = 1;
1425 		/* Include respective part of payload IOV. */
1426 		for (nleft = now; pv < iovcnt && nleft > 0; nleft -= nnow) {
1427 			nnow = MIN(nleft, iov[pv].iov_len - pvoff);
1428 #ifdef	__FreeBSD__
1429 			tiov[tiovcnt].iov_base = (uint8_t *)iov[pv].iov_base +
1430 			    pvoff;
1431 #else
1432 			tiov[tiovcnt].iov_base += pvoff;
1433 #endif
1434 			tiov[tiovcnt++].iov_len = nnow;
1435 			if (pvoff + nnow == iov[pv].iov_len) {
1436 				pv++;
1437 				pvoff = 0;
1438 			} else
1439 				pvoff += nnow;
1440 		}
1441 		DPRINTF("tx segment %d %d+%d bytes %d iovs",
1442 		    seg, hdrlen, now, tiovcnt);
1443 
1444 		/* Update IP header. */
1445 		if (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_IP) {
1446 			/* IPv4 -- set length and ID */
1447 			*(uint16_t *)&hdr[ckinfo[0].ck_start + 2] =
1448 			    htons(hdrlen - ckinfo[0].ck_start + now);
1449 			*(uint16_t *)&hdr[ckinfo[0].ck_start + 4] =
1450 			    htons(ipid + seg);
1451 		} else {
1452 			/* IPv6 -- set length */
1453 			*(uint16_t *)&hdr[ckinfo[0].ck_start + 4] =
1454 			    htons(hdrlen - ckinfo[0].ck_start - 40 +
1455 				  now);
1456 		}
1457 
1458 		/* Update pseudo-header checksum. */
1459 		tcpsum = tcpcs;
1460 		tcpsum += htons(hdrlen - ckinfo[1].ck_start + now);
1461 
1462 		/* Update TCP/UDP headers. */
1463 		if (tcp) {
1464 			/* Update sequence number and FIN/PUSH flags. */
1465 			*(uint32_t *)&hdr[ckinfo[1].ck_start + 4] =
1466 			    htonl(tcpseq + paylen - left);
1467 			if (now < left) {
1468 				hdr[ckinfo[1].ck_start + 13] &=
1469 				    ~(TH_FIN | TH_PUSH);
1470 			}
1471 		} else {
1472 			/* Update payload length. */
1473 			*(uint32_t *)&hdr[ckinfo[1].ck_start + 4] =
1474 			    hdrlen - ckinfo[1].ck_start + now;
1475 		}
1476 
1477 		/* Calculate checksums and transmit. */
1478 		if (ckinfo[0].ck_valid) {
1479 			*(uint16_t *)&hdr[ckinfo[0].ck_off] = ipcs;
1480 			e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[0]);
1481 		}
1482 		if (ckinfo[1].ck_valid) {
1483 			*(uint16_t *)&hdr[ckinfo[1].ck_off] =
1484 			    e82545_carry(tcpsum);
1485 			e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[1]);
1486 		}
1487 		e82545_transmit_backend(sc, tiov, tiovcnt);
1488 	}
1489 
1490 done:
1491 	head = (head + 1) % dsize;
1492 	e82545_transmit_done(sc, ohead, head, dsize, tdwb);
1493 
1494 	*rhead = head;
1495 	return (desc + 1);
1496 }
1497 
1498 static void
1499 e82545_tx_run(struct e82545_softc *sc)
1500 {
1501 	uint32_t cause;
1502 	uint16_t head, rhead, tail, size;
1503 	int lim, tdwb, sent;
1504 
1505 	size = sc->esc_TDLEN / 16;
1506 	if (size == 0)
1507 		return;
1508 
1509 	head = sc->esc_TDH % size;
1510 	tail = sc->esc_TDT % size;
1511 	DPRINTF("tx_run: head %x, rhead %x, tail %x",
1512 	    sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
1513 
1514 	pthread_mutex_unlock(&sc->esc_mtx);
1515 	rhead = head;
1516 	tdwb = 0;
1517 	for (lim = size / 4; sc->esc_tx_enabled && lim > 0; lim -= sent) {
1518 		sent = e82545_transmit(sc, head, tail, size, &rhead, &tdwb);
1519 		if (sent == 0)
1520 			break;
1521 		head = rhead;
1522 	}
1523 	pthread_mutex_lock(&sc->esc_mtx);
1524 
1525 	sc->esc_TDH = head;
1526 	sc->esc_TDHr = rhead;
1527 	cause = 0;
1528 	if (tdwb)
1529 		cause |= E1000_ICR_TXDW;
1530 	if (lim != size / 4 && sc->esc_TDH == sc->esc_TDT)
1531 		cause |= E1000_ICR_TXQE;
1532 	if (cause)
1533 		e82545_icr_assert(sc, cause);
1534 
1535 	DPRINTF("tx_run done: head %x, rhead %x, tail %x",
1536 	    sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
1537 }
1538 
1539 static _Noreturn void *
1540 e82545_tx_thread(void *param)
1541 {
1542 	struct e82545_softc *sc = param;
1543 
1544 	pthread_mutex_lock(&sc->esc_mtx);
1545 	for (;;) {
1546 		while (!sc->esc_tx_enabled || sc->esc_TDHr == sc->esc_TDT) {
1547 			if (sc->esc_tx_enabled && sc->esc_TDHr != sc->esc_TDT)
1548 				break;
1549 			sc->esc_tx_active = 0;
1550 			if (sc->esc_tx_enabled == 0)
1551 				pthread_cond_signal(&sc->esc_tx_cond);
1552 			pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
1553 		}
1554 		sc->esc_tx_active = 1;
1555 
1556 		/* Process some tx descriptors.  Lock dropped inside. */
1557 		e82545_tx_run(sc);
1558 	}
1559 }
1560 
1561 static void
1562 e82545_tx_start(struct e82545_softc *sc)
1563 {
1564 
1565 	if (sc->esc_tx_active == 0)
1566 		pthread_cond_signal(&sc->esc_tx_cond);
1567 }
1568 
1569 static void
1570 e82545_tx_enable(struct e82545_softc *sc)
1571 {
1572 
1573 	sc->esc_tx_enabled = 1;
1574 }
1575 
1576 static void
1577 e82545_tx_disable(struct e82545_softc *sc)
1578 {
1579 
1580 	sc->esc_tx_enabled = 0;
1581 	while (sc->esc_tx_active)
1582 		pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
1583 }
1584 
1585 static void
1586 e82545_rx_enable(struct e82545_softc *sc)
1587 {
1588 
1589 	sc->esc_rx_enabled = 1;
1590 }
1591 
1592 static void
1593 e82545_rx_disable(struct e82545_softc *sc)
1594 {
1595 
1596 	sc->esc_rx_enabled = 0;
1597 	while (sc->esc_rx_active)
1598 		pthread_cond_wait(&sc->esc_rx_cond, &sc->esc_mtx);
1599 }
1600 
1601 static void
1602 e82545_write_ra(struct e82545_softc *sc, int reg, uint32_t wval)
1603 {
1604 	struct eth_uni *eu;
1605 	int idx;
1606 
1607 	idx = reg >> 1;
1608 	assert(idx < 15);
1609 
1610 	eu = &sc->esc_uni[idx];
1611 
1612 	if (reg & 0x1) {
1613 		/* RAH */
1614 		eu->eu_valid = ((wval & E1000_RAH_AV) == E1000_RAH_AV);
1615 		eu->eu_addrsel = (wval >> 16) & 0x3;
1616 		eu->eu_eth.octet[5] = wval >> 8;
1617 		eu->eu_eth.octet[4] = wval;
1618 	} else {
1619 		/* RAL */
1620 		eu->eu_eth.octet[3] = wval >> 24;
1621 		eu->eu_eth.octet[2] = wval >> 16;
1622 		eu->eu_eth.octet[1] = wval >> 8;
1623 		eu->eu_eth.octet[0] = wval;
1624 	}
1625 }
1626 
1627 static uint32_t
1628 e82545_read_ra(struct e82545_softc *sc, int reg)
1629 {
1630 	struct eth_uni *eu;
1631 	uint32_t retval;
1632 	int idx;
1633 
1634 	idx = reg >> 1;
1635 	assert(idx < 15);
1636 
1637 	eu = &sc->esc_uni[idx];
1638 
1639 	if (reg & 0x1) {
1640 		/* RAH */
1641 		retval = (eu->eu_valid << 31) |
1642 			 (eu->eu_addrsel << 16) |
1643 			 (eu->eu_eth.octet[5] << 8) |
1644 			 eu->eu_eth.octet[4];
1645 	} else {
1646 		/* RAL */
1647 		retval = (eu->eu_eth.octet[3] << 24) |
1648 			 (eu->eu_eth.octet[2] << 16) |
1649 			 (eu->eu_eth.octet[1] << 8) |
1650 			 eu->eu_eth.octet[0];
1651 	}
1652 
1653 	return (retval);
1654 }
1655 
1656 static void
1657 e82545_write_register(struct e82545_softc *sc, uint32_t offset, uint32_t value)
1658 {
1659 	int ridx;
1660 
1661 	if (offset & 0x3) {
1662 		DPRINTF("Unaligned register write offset:0x%x value:0x%x", offset, value);
1663 		return;
1664 	}
1665 	DPRINTF("Register write: 0x%x value: 0x%x", offset, value);
1666 
1667 	switch (offset) {
1668 	case E1000_CTRL:
1669 	case E1000_CTRL_DUP:
1670 		e82545_devctl(sc, value);
1671 		break;
1672 	case E1000_FCAL:
1673 		sc->esc_FCAL = value;
1674 		break;
1675 	case E1000_FCAH:
1676 		sc->esc_FCAH = value & ~0xFFFF0000;
1677 		break;
1678 	case E1000_FCT:
1679 		sc->esc_FCT = value & ~0xFFFF0000;
1680 		break;
1681 	case E1000_VET:
1682 		sc->esc_VET = value & ~0xFFFF0000;
1683 		break;
1684 	case E1000_FCTTV:
1685 		sc->esc_FCTTV = value & ~0xFFFF0000;
1686 		break;
1687 	case E1000_LEDCTL:
1688 		sc->esc_LEDCTL = value & ~0x30303000;
1689 		break;
1690 	case E1000_PBA:
1691 		sc->esc_PBA = value & 0x0000FF80;
1692 		break;
1693 	case E1000_ICR:
1694 	case E1000_ITR:
1695 	case E1000_ICS:
1696 	case E1000_IMS:
1697 	case E1000_IMC:
1698 		e82545_intr_write(sc, offset, value);
1699 		break;
1700 	case E1000_RCTL:
1701 		e82545_rx_ctl(sc, value);
1702 		break;
1703 	case E1000_FCRTL:
1704 		sc->esc_FCRTL = value & ~0xFFFF0007;
1705 		break;
1706 	case E1000_FCRTH:
1707 		sc->esc_FCRTH = value & ~0xFFFF0007;
1708 		break;
1709 	case E1000_RDBAL(0):
1710 		sc->esc_RDBAL = value & ~0xF;
1711 		if (sc->esc_rx_enabled) {
1712 			/* Apparently legal: update cached address */
1713 			e82545_rx_update_rdba(sc);
1714 		}
1715 		break;
1716 	case E1000_RDBAH(0):
1717 		assert(!sc->esc_rx_enabled);
1718 		sc->esc_RDBAH = value;
1719 		break;
1720 	case E1000_RDLEN(0):
1721 		assert(!sc->esc_rx_enabled);
1722 		sc->esc_RDLEN = value & ~0xFFF0007F;
1723 		break;
1724 	case E1000_RDH(0):
1725 		/* XXX should only ever be zero ? Range check ? */
1726 		sc->esc_RDH = value;
1727 		break;
1728 	case E1000_RDT(0):
1729 		/* XXX if this opens up the rx ring, do something ? */
1730 		sc->esc_RDT = value;
1731 		break;
1732 	case E1000_RDTR:
1733 		/* ignore FPD bit 31 */
1734 		sc->esc_RDTR = value & ~0xFFFF0000;
1735 		break;
1736 	case E1000_RXDCTL(0):
1737 		sc->esc_RXDCTL = value & ~0xFEC0C0C0;
1738 		break;
1739 	case E1000_RADV:
1740 		sc->esc_RADV = value & ~0xFFFF0000;
1741 		break;
1742 	case E1000_RSRPD:
1743 		sc->esc_RSRPD = value & ~0xFFFFF000;
1744 		break;
1745 	case E1000_RXCSUM:
1746 		sc->esc_RXCSUM = value & ~0xFFFFF800;
1747 		break;
1748 	case E1000_TXCW:
1749 		sc->esc_TXCW = value & ~0x3FFF0000;
1750 		break;
1751 	case E1000_TCTL:
1752 		e82545_tx_ctl(sc, value);
1753 		break;
1754 	case E1000_TIPG:
1755 		sc->esc_TIPG = value;
1756 		break;
1757 	case E1000_AIT:
1758 		sc->esc_AIT = value;
1759 		break;
1760 	case E1000_TDBAL(0):
1761 		sc->esc_TDBAL = value & ~0xF;
1762 		if (sc->esc_tx_enabled)
1763 			e82545_tx_update_tdba(sc);
1764 		break;
1765 	case E1000_TDBAH(0):
1766 		sc->esc_TDBAH = value;
1767 		if (sc->esc_tx_enabled)
1768 			e82545_tx_update_tdba(sc);
1769 		break;
1770 	case E1000_TDLEN(0):
1771 		sc->esc_TDLEN = value & ~0xFFF0007F;
1772 		if (sc->esc_tx_enabled)
1773 			e82545_tx_update_tdba(sc);
1774 		break;
1775 	case E1000_TDH(0):
1776 		if (sc->esc_tx_enabled) {
1777 			WPRINTF("ignoring write to TDH while transmit enabled");
1778 			break;
1779 		}
1780 		if (value != 0) {
1781 			WPRINTF("ignoring non-zero value written to TDH");
1782 			break;
1783 		}
1784 		sc->esc_TDHr = sc->esc_TDH = value;
1785 		break;
1786 	case E1000_TDT(0):
1787 		sc->esc_TDT = value;
1788 		if (sc->esc_tx_enabled)
1789 			e82545_tx_start(sc);
1790 		break;
1791 	case E1000_TIDV:
1792 		sc->esc_TIDV = value & ~0xFFFF0000;
1793 		break;
1794 	case E1000_TXDCTL(0):
1795 		//assert(!sc->esc_tx_enabled);
1796 		sc->esc_TXDCTL = value & ~0xC0C0C0;
1797 		break;
1798 	case E1000_TADV:
1799 		sc->esc_TADV = value & ~0xFFFF0000;
1800 		break;
1801 	case E1000_RAL(0) ... E1000_RAH(15):
1802 		/* convert to u32 offset */
1803 		ridx = (offset - E1000_RAL(0)) >> 2;
1804 		e82545_write_ra(sc, ridx, value);
1805 		break;
1806 	case E1000_MTA ... (E1000_MTA + (127*4)):
1807 		sc->esc_fmcast[(offset - E1000_MTA) >> 2] = value;
1808 		break;
1809 	case E1000_VFTA ... (E1000_VFTA + (127*4)):
1810 		sc->esc_fvlan[(offset - E1000_VFTA) >> 2] = value;
1811 		break;
1812 	case E1000_EECD:
1813 	{
1814 		//DPRINTF("EECD write 0x%x -> 0x%x", sc->eeprom_control, value);
1815 		/* edge triggered low->high */
1816 		uint32_t eecd_strobe = ((sc->eeprom_control & E1000_EECD_SK) ?
1817 			0 : (value & E1000_EECD_SK));
1818 		uint32_t eecd_mask = (E1000_EECD_SK|E1000_EECD_CS|
1819 					E1000_EECD_DI|E1000_EECD_REQ);
1820 		sc->eeprom_control &= ~eecd_mask;
1821 		sc->eeprom_control |= (value & eecd_mask);
1822 		/* grant/revoke immediately */
1823 		if (value & E1000_EECD_REQ) {
1824 			sc->eeprom_control |= E1000_EECD_GNT;
1825 		} else {
1826                         sc->eeprom_control &= ~E1000_EECD_GNT;
1827 		}
1828 		if (eecd_strobe && (sc->eeprom_control & E1000_EECD_CS)) {
1829 			e82545_eecd_strobe(sc);
1830 		}
1831 		return;
1832 	}
1833 	case E1000_MDIC:
1834 	{
1835 		uint8_t reg_addr = (uint8_t)((value & E1000_MDIC_REG_MASK) >>
1836 						E1000_MDIC_REG_SHIFT);
1837 		uint8_t phy_addr = (uint8_t)((value & E1000_MDIC_PHY_MASK) >>
1838 						E1000_MDIC_PHY_SHIFT);
1839 		sc->mdi_control =
1840 			(value & ~(E1000_MDIC_ERROR|E1000_MDIC_DEST));
1841 		if ((value & E1000_MDIC_READY) != 0) {
1842 			DPRINTF("Incorrect MDIC ready bit: 0x%x", value);
1843 			return;
1844 		}
1845 		switch (value & E82545_MDIC_OP_MASK) {
1846 		case E1000_MDIC_OP_READ:
1847 			sc->mdi_control &= ~E82545_MDIC_DATA_MASK;
1848 			sc->mdi_control |= e82545_read_mdi(sc, reg_addr, phy_addr);
1849 			break;
1850 		case E1000_MDIC_OP_WRITE:
1851 			e82545_write_mdi(sc, reg_addr, phy_addr,
1852 				value & E82545_MDIC_DATA_MASK);
1853 			break;
1854 		default:
1855 			DPRINTF("Unknown MDIC op: 0x%x", value);
1856 			return;
1857 		}
1858 		/* TODO: barrier? */
1859 		sc->mdi_control |= E1000_MDIC_READY;
1860 		if (value & E82545_MDIC_IE) {
1861 			// TODO: generate interrupt
1862 		}
1863 		return;
1864 	}
1865 	case E1000_MANC:
1866 	case E1000_STATUS:
1867 		return;
1868 	default:
1869 		DPRINTF("Unknown write register: 0x%x value:%x", offset, value);
1870 		return;
1871 	}
1872 }
1873 
1874 static uint32_t
1875 e82545_read_register(struct e82545_softc *sc, uint32_t offset)
1876 {
1877 	uint32_t retval;
1878 	int ridx;
1879 
1880 	if (offset & 0x3) {
1881 		DPRINTF("Unaligned register read offset:0x%x", offset);
1882 		return 0;
1883 	}
1884 
1885 	DPRINTF("Register read: 0x%x", offset);
1886 
1887 	switch (offset) {
1888 	case E1000_CTRL:
1889 		retval = sc->esc_CTRL;
1890 		break;
1891 	case E1000_STATUS:
1892 		retval = E1000_STATUS_FD | E1000_STATUS_LU |
1893 		    E1000_STATUS_SPEED_1000;
1894 		break;
1895 	case E1000_FCAL:
1896 		retval = sc->esc_FCAL;
1897 		break;
1898 	case E1000_FCAH:
1899 		retval = sc->esc_FCAH;
1900 		break;
1901 	case E1000_FCT:
1902 		retval = sc->esc_FCT;
1903 		break;
1904 	case E1000_VET:
1905 		retval = sc->esc_VET;
1906 		break;
1907 	case E1000_FCTTV:
1908 		retval = sc->esc_FCTTV;
1909 		break;
1910 	case E1000_LEDCTL:
1911 		retval = sc->esc_LEDCTL;
1912 		break;
1913 	case E1000_PBA:
1914 		retval = sc->esc_PBA;
1915 		break;
1916 	case E1000_ICR:
1917 	case E1000_ITR:
1918 	case E1000_ICS:
1919 	case E1000_IMS:
1920 	case E1000_IMC:
1921 		retval = e82545_intr_read(sc, offset);
1922 		break;
1923 	case E1000_RCTL:
1924 		retval = sc->esc_RCTL;
1925 		break;
1926 	case E1000_FCRTL:
1927 		retval = sc->esc_FCRTL;
1928 		break;
1929 	case E1000_FCRTH:
1930 		retval = sc->esc_FCRTH;
1931 		break;
1932 	case E1000_RDBAL(0):
1933 		retval = sc->esc_RDBAL;
1934 		break;
1935 	case E1000_RDBAH(0):
1936 		retval = sc->esc_RDBAH;
1937 		break;
1938 	case E1000_RDLEN(0):
1939 		retval = sc->esc_RDLEN;
1940 		break;
1941 	case E1000_RDH(0):
1942 		retval = sc->esc_RDH;
1943 		break;
1944 	case E1000_RDT(0):
1945 		retval = sc->esc_RDT;
1946 		break;
1947 	case E1000_RDTR:
1948 		retval = sc->esc_RDTR;
1949 		break;
1950 	case E1000_RXDCTL(0):
1951 		retval = sc->esc_RXDCTL;
1952 		break;
1953 	case E1000_RADV:
1954 		retval = sc->esc_RADV;
1955 		break;
1956 	case E1000_RSRPD:
1957 		retval = sc->esc_RSRPD;
1958 		break;
1959 	case E1000_RXCSUM:
1960 		retval = sc->esc_RXCSUM;
1961 		break;
1962 	case E1000_TXCW:
1963 		retval = sc->esc_TXCW;
1964 		break;
1965 	case E1000_TCTL:
1966 		retval = sc->esc_TCTL;
1967 		break;
1968 	case E1000_TIPG:
1969 		retval = sc->esc_TIPG;
1970 		break;
1971 	case E1000_AIT:
1972 		retval = sc->esc_AIT;
1973 		break;
1974 	case E1000_TDBAL(0):
1975 		retval = sc->esc_TDBAL;
1976 		break;
1977 	case E1000_TDBAH(0):
1978 		retval = sc->esc_TDBAH;
1979 		break;
1980 	case E1000_TDLEN(0):
1981 		retval = sc->esc_TDLEN;
1982 		break;
1983 	case E1000_TDH(0):
1984 		retval = sc->esc_TDH;
1985 		break;
1986 	case E1000_TDT(0):
1987 		retval = sc->esc_TDT;
1988 		break;
1989 	case E1000_TIDV:
1990 		retval = sc->esc_TIDV;
1991 		break;
1992 	case E1000_TXDCTL(0):
1993 		retval = sc->esc_TXDCTL;
1994 		break;
1995 	case E1000_TADV:
1996 		retval = sc->esc_TADV;
1997 		break;
1998 	case E1000_RAL(0) ... E1000_RAH(15):
1999 		/* convert to u32 offset */
2000 		ridx = (offset - E1000_RAL(0)) >> 2;
2001 		retval = e82545_read_ra(sc, ridx);
2002 		break;
2003 	case E1000_MTA ... (E1000_MTA + (127*4)):
2004 		retval = sc->esc_fmcast[(offset - E1000_MTA) >> 2];
2005 		break;
2006 	case E1000_VFTA ... (E1000_VFTA + (127*4)):
2007 		retval = sc->esc_fvlan[(offset - E1000_VFTA) >> 2];
2008 		break;
2009 	case E1000_EECD:
2010 		//DPRINTF("EECD read %x", sc->eeprom_control);
2011 		retval = sc->eeprom_control;
2012 		break;
2013 	case E1000_MDIC:
2014 		retval = sc->mdi_control;
2015 		break;
2016 	case E1000_MANC:
2017 		retval = 0;
2018 		break;
2019 	/* stats that we emulate. */
2020 	case E1000_MPC:
2021 		retval = sc->missed_pkt_count;
2022 		break;
2023 	case E1000_PRC64:
2024 		retval = sc->pkt_rx_by_size[0];
2025 		break;
2026 	case E1000_PRC127:
2027 		retval = sc->pkt_rx_by_size[1];
2028 		break;
2029 	case E1000_PRC255:
2030 		retval = sc->pkt_rx_by_size[2];
2031 		break;
2032 	case E1000_PRC511:
2033 		retval = sc->pkt_rx_by_size[3];
2034 		break;
2035 	case E1000_PRC1023:
2036 		retval = sc->pkt_rx_by_size[4];
2037 		break;
2038 	case E1000_PRC1522:
2039 		retval = sc->pkt_rx_by_size[5];
2040 		break;
2041 	case E1000_GPRC:
2042 		retval = sc->good_pkt_rx_count;
2043 		break;
2044 	case E1000_BPRC:
2045 		retval = sc->bcast_pkt_rx_count;
2046 		break;
2047 	case E1000_MPRC:
2048 		retval = sc->mcast_pkt_rx_count;
2049 		break;
2050 	case E1000_GPTC:
2051 	case E1000_TPT:
2052 		retval = sc->good_pkt_tx_count;
2053 		break;
2054 	case E1000_GORCL:
2055 		retval = (uint32_t)sc->good_octets_rx;
2056 		break;
2057 	case E1000_GORCH:
2058 		retval = (uint32_t)(sc->good_octets_rx >> 32);
2059 		break;
2060 	case E1000_TOTL:
2061 	case E1000_GOTCL:
2062 		retval = (uint32_t)sc->good_octets_tx;
2063 		break;
2064 	case E1000_TOTH:
2065 	case E1000_GOTCH:
2066 		retval = (uint32_t)(sc->good_octets_tx >> 32);
2067 		break;
2068 	case E1000_ROC:
2069 		retval = sc->oversize_rx_count;
2070 		break;
2071 	case E1000_TORL:
2072 		retval = (uint32_t)(sc->good_octets_rx + sc->missed_octets);
2073 		break;
2074 	case E1000_TORH:
2075 		retval = (uint32_t)((sc->good_octets_rx +
2076 		    sc->missed_octets) >> 32);
2077 		break;
2078 	case E1000_TPR:
2079 		retval = sc->good_pkt_rx_count + sc->missed_pkt_count +
2080 		    sc->oversize_rx_count;
2081 		break;
2082 	case E1000_PTC64:
2083 		retval = sc->pkt_tx_by_size[0];
2084 		break;
2085 	case E1000_PTC127:
2086 		retval = sc->pkt_tx_by_size[1];
2087 		break;
2088 	case E1000_PTC255:
2089 		retval = sc->pkt_tx_by_size[2];
2090 		break;
2091 	case E1000_PTC511:
2092 		retval = sc->pkt_tx_by_size[3];
2093 		break;
2094 	case E1000_PTC1023:
2095 		retval = sc->pkt_tx_by_size[4];
2096 		break;
2097 	case E1000_PTC1522:
2098 		retval = sc->pkt_tx_by_size[5];
2099 		break;
2100 	case E1000_MPTC:
2101 		retval = sc->mcast_pkt_tx_count;
2102 		break;
2103 	case E1000_BPTC:
2104 		retval = sc->bcast_pkt_tx_count;
2105 		break;
2106 	case E1000_TSCTC:
2107 		retval = sc->tso_tx_count;
2108 		break;
2109 	/* stats that are always 0. */
2110 	case E1000_CRCERRS:
2111 	case E1000_ALGNERRC:
2112 	case E1000_SYMERRS:
2113 	case E1000_RXERRC:
2114 	case E1000_SCC:
2115 	case E1000_ECOL:
2116 	case E1000_MCC:
2117 	case E1000_LATECOL:
2118 	case E1000_COLC:
2119 	case E1000_DC:
2120 	case E1000_TNCRS:
2121 	case E1000_SEC:
2122 	case E1000_CEXTERR:
2123 	case E1000_RLEC:
2124 	case E1000_XONRXC:
2125 	case E1000_XONTXC:
2126 	case E1000_XOFFRXC:
2127 	case E1000_XOFFTXC:
2128 	case E1000_FCRUC:
2129 	case E1000_RNBC:
2130 	case E1000_RUC:
2131 	case E1000_RFC:
2132 	case E1000_RJC:
2133 	case E1000_MGTPRC:
2134 	case E1000_MGTPDC:
2135 	case E1000_MGTPTC:
2136 	case E1000_TSCTFC:
2137 		retval = 0;
2138 		break;
2139 	default:
2140 		DPRINTF("Unknown read register: 0x%x", offset);
2141 		retval = 0;
2142 		break;
2143 	}
2144 
2145 	return (retval);
2146 }
2147 
2148 static void
2149 e82545_write(struct vmctx *ctx __unused,
2150     struct pci_devinst *pi, int baridx, uint64_t offset, int size,
2151     uint64_t value)
2152 {
2153 	struct e82545_softc *sc;
2154 
2155 	//DPRINTF("Write bar:%d offset:0x%lx value:0x%lx size:%d", baridx, offset, value, size);
2156 
2157 	sc = pi->pi_arg;
2158 
2159 	pthread_mutex_lock(&sc->esc_mtx);
2160 
2161 	switch (baridx) {
2162 	case E82545_BAR_IO:
2163 		switch (offset) {
2164 		case E82545_IOADDR:
2165 			if (size != 4) {
2166 				DPRINTF("Wrong io addr write sz:%d value:0x%lx", size, value);
2167 			} else
2168 				sc->io_addr = (uint32_t)value;
2169 			break;
2170 		case E82545_IODATA:
2171 			if (size != 4) {
2172 				DPRINTF("Wrong io data write size:%d value:0x%lx", size, value);
2173 			} else if (sc->io_addr > E82545_IO_REGISTER_MAX) {
2174 				DPRINTF("Non-register io write addr:0x%x value:0x%lx", sc->io_addr, value);
2175 			} else
2176 				e82545_write_register(sc, sc->io_addr,
2177 						      (uint32_t)value);
2178 			break;
2179 		default:
2180 			DPRINTF("Unknown io bar write offset:0x%lx value:0x%lx size:%d", offset, value, size);
2181 			break;
2182 		}
2183 		break;
2184 	case E82545_BAR_REGISTER:
2185 		if (size != 4) {
2186 			DPRINTF("Wrong register write size:%d offset:0x%lx value:0x%lx", size, offset, value);
2187 		} else
2188 			e82545_write_register(sc, (uint32_t)offset,
2189 					      (uint32_t)value);
2190 		break;
2191 	default:
2192 		DPRINTF("Unknown write bar:%d off:0x%lx val:0x%lx size:%d",
2193 			baridx, offset, value, size);
2194 	}
2195 
2196 	pthread_mutex_unlock(&sc->esc_mtx);
2197 }
2198 
2199 static uint64_t
2200 e82545_read(struct vmctx *ctx __unused,
2201     struct pci_devinst *pi, int baridx, uint64_t offset, int size)
2202 {
2203 	struct e82545_softc *sc;
2204 	uint64_t retval;
2205 
2206 	//DPRINTF("Read  bar:%d offset:0x%lx size:%d", baridx, offset, size);
2207 	sc = pi->pi_arg;
2208 	retval = 0;
2209 
2210 	pthread_mutex_lock(&sc->esc_mtx);
2211 
2212 	switch (baridx) {
2213 	case E82545_BAR_IO:
2214 		switch (offset) {
2215 		case E82545_IOADDR:
2216 			if (size != 4) {
2217 				DPRINTF("Wrong io addr read sz:%d", size);
2218 			} else
2219 				retval = sc->io_addr;
2220 			break;
2221 		case E82545_IODATA:
2222 			if (size != 4) {
2223 				DPRINTF("Wrong io data read sz:%d", size);
2224 			}
2225 			if (sc->io_addr > E82545_IO_REGISTER_MAX) {
2226 				DPRINTF("Non-register io read addr:0x%x",
2227 					sc->io_addr);
2228 			} else
2229 				retval = e82545_read_register(sc, sc->io_addr);
2230 			break;
2231 		default:
2232 			DPRINTF("Unknown io bar read offset:0x%lx size:%d",
2233 				offset, size);
2234 			break;
2235 		}
2236 		break;
2237 	case E82545_BAR_REGISTER:
2238 		if (size != 4) {
2239 			DPRINTF("Wrong register read size:%d offset:0x%lx",
2240 				size, offset);
2241 		} else
2242 			retval = e82545_read_register(sc, (uint32_t)offset);
2243 		break;
2244 	default:
2245 		DPRINTF("Unknown read bar:%d offset:0x%lx size:%d",
2246 			baridx, offset, size);
2247 		break;
2248 	}
2249 
2250 	pthread_mutex_unlock(&sc->esc_mtx);
2251 
2252 	return (retval);
2253 }
2254 
2255 static void
2256 e82545_reset(struct e82545_softc *sc, int drvr)
2257 {
2258 	int i;
2259 
2260 	e82545_rx_disable(sc);
2261 	e82545_tx_disable(sc);
2262 
2263 	/* clear outstanding interrupts */
2264 	if (sc->esc_irq_asserted)
2265 		pci_lintr_deassert(sc->esc_pi);
2266 
2267 	/* misc */
2268 	if (!drvr) {
2269 		sc->esc_FCAL = 0;
2270 		sc->esc_FCAH = 0;
2271 		sc->esc_FCT = 0;
2272 		sc->esc_VET = 0;
2273 		sc->esc_FCTTV = 0;
2274 	}
2275 	sc->esc_LEDCTL = 0x07061302;
2276 	sc->esc_PBA = 0x00100030;
2277 
2278 	/* start nvm in opcode mode. */
2279 	sc->nvm_opaddr = 0;
2280 	sc->nvm_mode = E82545_NVM_MODE_OPADDR;
2281 	sc->nvm_bits = E82545_NVM_OPADDR_BITS;
2282 	sc->eeprom_control = E1000_EECD_PRES | E82545_EECD_FWE_EN;
2283 	e82545_init_eeprom(sc);
2284 
2285 	/* interrupt */
2286 	sc->esc_ICR = 0;
2287 	sc->esc_ITR = 250;
2288 	sc->esc_ICS = 0;
2289 	sc->esc_IMS = 0;
2290 	sc->esc_IMC = 0;
2291 
2292 	/* L2 filters */
2293 	if (!drvr) {
2294 		memset(sc->esc_fvlan, 0, sizeof(sc->esc_fvlan));
2295 		memset(sc->esc_fmcast, 0, sizeof(sc->esc_fmcast));
2296 		memset(sc->esc_uni, 0, sizeof(sc->esc_uni));
2297 
2298 		/* XXX not necessary on 82545 ?? */
2299 		sc->esc_uni[0].eu_valid = 1;
2300 		memcpy(sc->esc_uni[0].eu_eth.octet, sc->esc_mac.octet,
2301 		    ETHER_ADDR_LEN);
2302 	} else {
2303 		/* Clear RAH valid bits */
2304 		for (i = 0; i < 16; i++)
2305 			sc->esc_uni[i].eu_valid = 0;
2306 	}
2307 
2308 	/* receive */
2309 	if (!drvr) {
2310 		sc->esc_RDBAL = 0;
2311 		sc->esc_RDBAH = 0;
2312 	}
2313 	sc->esc_RCTL = 0;
2314 	sc->esc_FCRTL = 0;
2315 	sc->esc_FCRTH = 0;
2316 	sc->esc_RDLEN = 0;
2317 	sc->esc_RDH = 0;
2318 	sc->esc_RDT = 0;
2319 	sc->esc_RDTR = 0;
2320 	sc->esc_RXDCTL = (1 << 24) | (1 << 16); /* default GRAN/WTHRESH */
2321 	sc->esc_RADV = 0;
2322 	sc->esc_RXCSUM = 0;
2323 
2324 	/* transmit */
2325 	if (!drvr) {
2326 		sc->esc_TDBAL = 0;
2327 		sc->esc_TDBAH = 0;
2328 		sc->esc_TIPG = 0;
2329 		sc->esc_AIT = 0;
2330 		sc->esc_TIDV = 0;
2331 		sc->esc_TADV = 0;
2332 	}
2333 	sc->esc_tdba = 0;
2334 	sc->esc_txdesc = NULL;
2335 	sc->esc_TXCW = 0;
2336 	sc->esc_TCTL = 0;
2337 	sc->esc_TDLEN = 0;
2338 	sc->esc_TDT = 0;
2339 	sc->esc_TDHr = sc->esc_TDH = 0;
2340 	sc->esc_TXDCTL = 0;
2341 }
2342 
2343 static int
2344 e82545_init(struct vmctx *ctx, struct pci_devinst *pi, nvlist_t *nvl)
2345 {
2346 	char nstr[80];
2347 	struct e82545_softc *sc;
2348 	const char *mac;
2349 	int err;
2350 
2351 	/* Setup our softc */
2352 	sc = calloc(1, sizeof(*sc));
2353 
2354 	pi->pi_arg = sc;
2355 	sc->esc_pi = pi;
2356 	sc->esc_ctx = ctx;
2357 
2358 	pthread_mutex_init(&sc->esc_mtx, NULL);
2359 	pthread_cond_init(&sc->esc_rx_cond, NULL);
2360 	pthread_cond_init(&sc->esc_tx_cond, NULL);
2361 	pthread_create(&sc->esc_tx_tid, NULL, e82545_tx_thread, sc);
2362 	snprintf(nstr, sizeof(nstr), "e82545-%d:%d tx", pi->pi_slot,
2363 	    pi->pi_func);
2364         pthread_set_name_np(sc->esc_tx_tid, nstr);
2365 
2366 	pci_set_cfgdata16(pi, PCIR_DEVICE, E82545_DEV_ID_82545EM_COPPER);
2367 	pci_set_cfgdata16(pi, PCIR_VENDOR, E82545_VENDOR_ID_INTEL);
2368 	pci_set_cfgdata8(pi,  PCIR_CLASS, PCIC_NETWORK);
2369 	pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_NETWORK_ETHERNET);
2370 	pci_set_cfgdata16(pi, PCIR_SUBDEV_0, E82545_SUBDEV_ID);
2371 	pci_set_cfgdata16(pi, PCIR_SUBVEND_0, E82545_VENDOR_ID_INTEL);
2372 
2373 	pci_set_cfgdata8(pi,  PCIR_HDRTYPE, PCIM_HDRTYPE_NORMAL);
2374 	pci_set_cfgdata8(pi,  PCIR_INTPIN, 0x1);
2375 
2376 	/* TODO: this card also supports msi, but the freebsd driver for it
2377 	 * does not, so I have not implemented it. */
2378 	pci_lintr_request(pi);
2379 
2380 	pci_emul_alloc_bar(pi, E82545_BAR_REGISTER, PCIBAR_MEM32,
2381 		E82545_BAR_REGISTER_LEN);
2382 	pci_emul_alloc_bar(pi, E82545_BAR_FLASH, PCIBAR_MEM32,
2383 		E82545_BAR_FLASH_LEN);
2384 	pci_emul_alloc_bar(pi, E82545_BAR_IO, PCIBAR_IO,
2385 		E82545_BAR_IO_LEN);
2386 
2387 	mac = get_config_value_node(nvl, "mac");
2388 	if (mac != NULL) {
2389 		err = net_parsemac(mac, sc->esc_mac.octet);
2390 		if (err) {
2391 			free(sc);
2392 			return (err);
2393 		}
2394 	} else
2395 		net_genmac(pi, sc->esc_mac.octet);
2396 
2397 	err = netbe_init(&sc->esc_be, nvl, e82545_rx_callback, sc);
2398 	if (err) {
2399 		free(sc);
2400 		return (err);
2401 	}
2402 
2403 #ifndef __FreeBSD__
2404 	size_t buflen = sizeof (sc->esc_mac.octet);
2405 
2406 	err = netbe_get_mac(sc->esc_be, sc->esc_mac.octet, &buflen);
2407 	if (err != 0) {
2408 		free(sc);
2409 		return (err);
2410 	}
2411 #endif
2412 
2413 	netbe_rx_enable(sc->esc_be);
2414 
2415 	/* H/w initiated reset */
2416 	e82545_reset(sc, 0);
2417 
2418 	return (0);
2419 }
2420 
2421 static const struct pci_devemu pci_de_e82545 = {
2422 	.pe_emu = 	"e1000",
2423 	.pe_init =	e82545_init,
2424 	.pe_legacy_config = netbe_legacy_config,
2425 	.pe_barwrite =	e82545_write,
2426 	.pe_barread =	e82545_read,
2427 };
2428 PCI_EMUL_SET(pci_de_e82545);
2429