xref: /illumos-gate/usr/src/cmd/bhyve/common/pci_e82545.c (revision 5c4a5fe16715fb423db76577a6883b5bbecdbe45)
1 /*
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2016 Alexander Motin <mav@FreeBSD.org>
5  * Copyright (c) 2015 Peter Grehan <grehan@freebsd.org>
6  * Copyright (c) 2013 Jeremiah Lott, Avere Systems
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer
14  *    in this position and unchanged.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 
33 #include <sys/types.h>
34 #ifndef WITHOUT_CAPSICUM
35 #include <sys/capsicum.h>
36 #endif
37 #include <sys/limits.h>
38 #include <sys/ioctl.h>
39 #include <sys/uio.h>
40 #include <net/ethernet.h>
41 #include <netinet/in.h>
42 #include <netinet/tcp.h>
43 
44 #ifndef WITHOUT_CAPSICUM
45 #include <capsicum_helpers.h>
46 #endif
47 
48 #include <err.h>
49 #include <errno.h>
50 #include <fcntl.h>
51 #include <md5.h>
52 #include <stdio.h>
53 #include <stdlib.h>
54 #include <string.h>
55 #include <sysexits.h>
56 #include <unistd.h>
57 #include <pthread.h>
58 #include <pthread_np.h>
59 
60 #include "e1000_regs.h"
61 #include "e1000_defines.h"
62 #include "mii.h"
63 
64 #include "bhyverun.h"
65 #include "config.h"
66 #include "debug.h"
67 #include "pci_emul.h"
68 #include "mevent.h"
69 #include "net_utils.h"
70 #include "net_backends.h"
71 
72 /* Hardware/register definitions XXX: move some to common code. */
73 #define E82545_VENDOR_ID_INTEL			0x8086
74 #define E82545_DEV_ID_82545EM_COPPER		0x100F
75 #define E82545_SUBDEV_ID			0x1008
76 
77 #define E82545_REVISION_4			4
78 
79 #define E82545_MDIC_DATA_MASK			0x0000FFFF
80 #define E82545_MDIC_OP_MASK			0x0c000000
81 #define E82545_MDIC_IE				0x20000000
82 
83 #define E82545_EECD_FWE_DIS	0x00000010 /* Flash writes disabled */
84 #define E82545_EECD_FWE_EN	0x00000020 /* Flash writes enabled */
85 #define E82545_EECD_FWE_MASK	0x00000030 /* Flash writes mask */
86 
87 #define E82545_BAR_REGISTER			0
88 #define E82545_BAR_REGISTER_LEN			(128*1024)
89 #define E82545_BAR_FLASH			1
90 #define E82545_BAR_FLASH_LEN			(64*1024)
91 #define E82545_BAR_IO				2
92 #define E82545_BAR_IO_LEN			8
93 
94 #define E82545_IOADDR				0x00000000
95 #define E82545_IODATA				0x00000004
96 #define E82545_IO_REGISTER_MAX			0x0001FFFF
97 #define E82545_IO_FLASH_BASE			0x00080000
98 #define E82545_IO_FLASH_MAX			0x000FFFFF
99 
100 #define E82545_ARRAY_ENTRY(reg, offset)		(reg + (offset<<2))
101 #define E82545_RAR_MAX				15
102 #define E82545_MTA_MAX				127
103 #define E82545_VFTA_MAX				127
104 
105 /* Slightly modified from the driver versions, hardcoded for 3 opcode bits,
106  * followed by 6 address bits.
107  * TODO: make opcode bits and addr bits configurable?
108  * NVM Commands - Microwire */
109 #define E82545_NVM_OPCODE_BITS	3
110 #define E82545_NVM_ADDR_BITS	6
111 #define E82545_NVM_DATA_BITS	16
112 #define E82545_NVM_OPADDR_BITS	(E82545_NVM_OPCODE_BITS + E82545_NVM_ADDR_BITS)
113 #define E82545_NVM_ADDR_MASK	((1 << E82545_NVM_ADDR_BITS)-1)
114 #define E82545_NVM_OPCODE_MASK	\
115     (((1 << E82545_NVM_OPCODE_BITS) - 1) << E82545_NVM_ADDR_BITS)
116 #define E82545_NVM_OPCODE_READ	(0x6 << E82545_NVM_ADDR_BITS)	/* read */
117 #define E82545_NVM_OPCODE_WRITE	(0x5 << E82545_NVM_ADDR_BITS)	/* write */
118 #define E82545_NVM_OPCODE_ERASE	(0x7 << E82545_NVM_ADDR_BITS)	/* erase */
119 #define	E82545_NVM_OPCODE_EWEN	(0x4 << E82545_NVM_ADDR_BITS)	/* wr-enable */
120 
121 #define	E82545_NVM_EEPROM_SIZE	64 /* 64 * 16-bit values == 128K */
122 
123 #define E1000_ICR_SRPD		0x00010000
124 
125 /* This is an arbitrary number.  There is no hard limit on the chip. */
126 #define I82545_MAX_TXSEGS	64
127 
128 /* Legacy receive descriptor */
129 struct e1000_rx_desc {
130 	uint64_t buffer_addr;	/* Address of the descriptor's data buffer */
131 	uint16_t length;	/* Length of data DMAed into data buffer */
132 	uint16_t csum;		/* Packet checksum */
133 	uint8_t	 status;       	/* Descriptor status */
134 	uint8_t  errors;	/* Descriptor Errors */
135 	uint16_t special;
136 };
137 
138 /* Transmit descriptor types */
139 #define	E1000_TXD_MASK		(E1000_TXD_CMD_DEXT | 0x00F00000)
140 #define E1000_TXD_TYP_L		(0)
141 #define E1000_TXD_TYP_C		(E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C)
142 #define E1000_TXD_TYP_D		(E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)
143 
144 /* Legacy transmit descriptor */
145 struct e1000_tx_desc {
146 	uint64_t buffer_addr;   /* Address of the descriptor's data buffer */
147 	union {
148 		uint32_t data;
149 		struct {
150 			uint16_t length;  /* Data buffer length */
151 			uint8_t  cso;  /* Checksum offset */
152 			uint8_t  cmd;  /* Descriptor control */
153 		} flags;
154 	} lower;
155 	union {
156 		uint32_t data;
157 		struct {
158 			uint8_t status; /* Descriptor status */
159 			uint8_t css;  /* Checksum start */
160 			uint16_t special;
161 		} fields;
162 	} upper;
163 };
164 
165 /* Context descriptor */
166 struct e1000_context_desc {
167 	union {
168 		uint32_t ip_config;
169 		struct {
170 			uint8_t ipcss;  /* IP checksum start */
171 			uint8_t ipcso;  /* IP checksum offset */
172 			uint16_t ipcse;  /* IP checksum end */
173 		} ip_fields;
174 	} lower_setup;
175 	union {
176 		uint32_t tcp_config;
177 		struct {
178 			uint8_t tucss;  /* TCP checksum start */
179 			uint8_t tucso;  /* TCP checksum offset */
180 			uint16_t tucse;  /* TCP checksum end */
181 		} tcp_fields;
182 	} upper_setup;
183 	uint32_t cmd_and_length;
184 	union {
185 		uint32_t data;
186 		struct {
187 			uint8_t status;  /* Descriptor status */
188 			uint8_t hdr_len;  /* Header length */
189 			uint16_t mss;  /* Maximum segment size */
190 		} fields;
191 	} tcp_seg_setup;
192 };
193 
194 /* Data descriptor */
195 struct e1000_data_desc {
196 	uint64_t buffer_addr;  /* Address of the descriptor's buffer address */
197 	union {
198 		uint32_t data;
199 		struct {
200 			uint16_t length;  /* Data buffer length */
201 			uint8_t typ_len_ext;
202 			uint8_t cmd;
203 		} flags;
204 	} lower;
205 	union {
206 		uint32_t data;
207 		struct {
208 			uint8_t status;  /* Descriptor status */
209 			uint8_t popts;  /* Packet Options */
210 			uint16_t special;
211 		} fields;
212 	} upper;
213 };
214 
215 union e1000_tx_udesc {
216 	struct e1000_tx_desc td;
217 	struct e1000_context_desc cd;
218 	struct e1000_data_desc dd;
219 };
220 
221 /* Tx checksum info for a packet. */
222 struct ck_info {
223 	int	ck_valid;	/* ck_info is valid */
224 	uint8_t	ck_start;	/* start byte of cksum calcuation */
225 	uint8_t	ck_off;		/* offset of cksum insertion */
226 	uint16_t ck_len;	/* length of cksum calc: 0 is to packet-end */
227 };
228 
229 /*
230  * Debug printf
231  */
232 static int e82545_debug = 0;
233 #define WPRINTF(msg,params...) PRINTLN("e82545: " msg, ##params)
234 #define DPRINTF(msg,params...) if (e82545_debug) WPRINTF(msg, params)
235 
236 #define	MIN(a,b) (((a)<(b))?(a):(b))
237 #define	MAX(a,b) (((a)>(b))?(a):(b))
238 
239 /* s/w representation of the RAL/RAH regs */
240 struct  eth_uni {
241 	int		eu_valid;
242 	int		eu_addrsel;
243 	struct ether_addr eu_eth;
244 };
245 
246 
247 struct e82545_softc {
248 	struct pci_devinst *esc_pi;
249 	struct vmctx	*esc_ctx;
250 	struct mevent   *esc_mevpitr;
251 	pthread_mutex_t	esc_mtx;
252 	struct ether_addr esc_mac;
253 	net_backend_t	*esc_be;
254 
255 	/* General */
256 	uint32_t	esc_CTRL;	/* x0000 device ctl */
257 	uint32_t	esc_FCAL;	/* x0028 flow ctl addr lo */
258 	uint32_t	esc_FCAH;	/* x002C flow ctl addr hi */
259 	uint32_t	esc_FCT;	/* x0030 flow ctl type */
260 	uint32_t	esc_VET;	/* x0038 VLAN eth type */
261 	uint32_t	esc_FCTTV;	/* x0170 flow ctl tx timer */
262 	uint32_t	esc_LEDCTL;	/* x0E00 LED control */
263 	uint32_t	esc_PBA;	/* x1000 pkt buffer allocation */
264 
265 	/* Interrupt control */
266 	int		esc_irq_asserted;
267 	uint32_t	esc_ICR;	/* x00C0 cause read/clear */
268 	uint32_t	esc_ITR;	/* x00C4 intr throttling */
269 	uint32_t	esc_ICS;	/* x00C8 cause set */
270 	uint32_t	esc_IMS;	/* x00D0 mask set/read */
271 	uint32_t	esc_IMC;	/* x00D8 mask clear */
272 
273 	/* Transmit */
274 	union e1000_tx_udesc *esc_txdesc;
275 	struct e1000_context_desc esc_txctx;
276 	pthread_t	esc_tx_tid;
277 	pthread_cond_t	esc_tx_cond;
278 	int		esc_tx_enabled;
279 	int		esc_tx_active;
280 	uint32_t	esc_TXCW;	/* x0178 transmit config */
281 	uint32_t	esc_TCTL;	/* x0400 transmit ctl */
282 	uint32_t	esc_TIPG;	/* x0410 inter-packet gap */
283 	uint16_t	esc_AIT;	/* x0458 Adaptive Interframe Throttle */
284 	uint64_t	esc_tdba;      	/* verified 64-bit desc table addr */
285 	uint32_t	esc_TDBAL;	/* x3800 desc table addr, low bits */
286 	uint32_t	esc_TDBAH;	/* x3804 desc table addr, hi 32-bits */
287 	uint32_t	esc_TDLEN;	/* x3808 # descriptors in bytes */
288 	uint16_t	esc_TDH;	/* x3810 desc table head idx */
289 	uint16_t	esc_TDHr;	/* internal read version of TDH */
290 	uint16_t	esc_TDT;	/* x3818 desc table tail idx */
291 	uint32_t	esc_TIDV;	/* x3820 intr delay */
292 	uint32_t	esc_TXDCTL;	/* x3828 desc control */
293 	uint32_t	esc_TADV;	/* x382C intr absolute delay */
294 
295 	/* L2 frame acceptance */
296 	struct eth_uni	esc_uni[16];	/* 16 x unicast MAC addresses */
297 	uint32_t	esc_fmcast[128]; /* Multicast filter bit-match */
298 	uint32_t	esc_fvlan[128]; /* VLAN 4096-bit filter */
299 
300 	/* Receive */
301 	struct e1000_rx_desc *esc_rxdesc;
302 	pthread_cond_t	esc_rx_cond;
303 	int		esc_rx_enabled;
304 	int		esc_rx_active;
305 	int		esc_rx_loopback;
306 	uint32_t	esc_RCTL;	/* x0100 receive ctl */
307 	uint32_t	esc_FCRTL;	/* x2160 flow cntl thresh, low */
308 	uint32_t	esc_FCRTH;	/* x2168 flow cntl thresh, hi */
309 	uint64_t	esc_rdba;	/* verified 64-bit desc table addr */
310 	uint32_t	esc_RDBAL;	/* x2800 desc table addr, low bits */
311 	uint32_t	esc_RDBAH;	/* x2804 desc table addr, hi 32-bits*/
312 	uint32_t	esc_RDLEN;	/* x2808 #descriptors */
313 	uint16_t	esc_RDH;	/* x2810 desc table head idx */
314 	uint16_t	esc_RDT;	/* x2818 desc table tail idx */
315 	uint32_t	esc_RDTR;	/* x2820 intr delay */
316 	uint32_t	esc_RXDCTL;	/* x2828 desc control */
317 	uint32_t	esc_RADV;	/* x282C intr absolute delay */
318 	uint32_t	esc_RSRPD;	/* x2C00 recv small packet detect */
319 	uint32_t	esc_RXCSUM;     /* x5000 receive cksum ctl */
320 
321 	/* IO Port register access */
322 	uint32_t io_addr;
323 
324 	/* Shadow copy of MDIC */
325 	uint32_t mdi_control;
326 	/* Shadow copy of EECD */
327 	uint32_t eeprom_control;
328 	/* Latest NVM in/out */
329 	uint16_t nvm_data;
330 	uint16_t nvm_opaddr;
331 	/* stats */
332 	uint32_t missed_pkt_count; /* dropped for no room in rx queue */
333 	uint32_t pkt_rx_by_size[6];
334 	uint32_t pkt_tx_by_size[6];
335 	uint32_t good_pkt_rx_count;
336 	uint32_t bcast_pkt_rx_count;
337 	uint32_t mcast_pkt_rx_count;
338 	uint32_t good_pkt_tx_count;
339 	uint32_t bcast_pkt_tx_count;
340 	uint32_t mcast_pkt_tx_count;
341 	uint32_t oversize_rx_count;
342 	uint32_t tso_tx_count;
343 	uint64_t good_octets_rx;
344 	uint64_t good_octets_tx;
345 	uint64_t missed_octets; /* counts missed and oversized */
346 
347 	uint8_t nvm_bits:6; /* number of bits remaining in/out */
348 	uint8_t nvm_mode:2;
349 #define E82545_NVM_MODE_OPADDR  0x0
350 #define E82545_NVM_MODE_DATAIN  0x1
351 #define E82545_NVM_MODE_DATAOUT 0x2
352 	/* EEPROM data */
353 	uint16_t eeprom_data[E82545_NVM_EEPROM_SIZE];
354 };
355 
356 static void e82545_reset(struct e82545_softc *sc, int dev);
357 static void e82545_rx_enable(struct e82545_softc *sc);
358 static void e82545_rx_disable(struct e82545_softc *sc);
359 static void e82545_rx_callback(int fd, enum ev_type type, void *param);
360 static void e82545_tx_start(struct e82545_softc *sc);
361 static void e82545_tx_enable(struct e82545_softc *sc);
362 static void e82545_tx_disable(struct e82545_softc *sc);
363 
364 static inline int __unused
e82545_size_stat_index(uint32_t size)365 e82545_size_stat_index(uint32_t size)
366 {
367 	if (size <= 64) {
368 		return 0;
369 	} else if (size >= 1024) {
370 		return 5;
371 	} else {
372 		/* should be 1-4 */
373 		return (ffs(size) - 6);
374 	}
375 }
376 
377 static void
e82545_init_eeprom(struct e82545_softc * sc)378 e82545_init_eeprom(struct e82545_softc *sc)
379 {
380 	uint16_t checksum, i;
381 
382         /* mac addr */
383 	sc->eeprom_data[NVM_MAC_ADDR] = ((uint16_t)sc->esc_mac.octet[0]) |
384 		(((uint16_t)sc->esc_mac.octet[1]) << 8);
385 	sc->eeprom_data[NVM_MAC_ADDR+1] = ((uint16_t)sc->esc_mac.octet[2]) |
386 		(((uint16_t)sc->esc_mac.octet[3]) << 8);
387 	sc->eeprom_data[NVM_MAC_ADDR+2] = ((uint16_t)sc->esc_mac.octet[4]) |
388 		(((uint16_t)sc->esc_mac.octet[5]) << 8);
389 
390 	/* pci ids */
391 	sc->eeprom_data[NVM_SUB_DEV_ID] = E82545_SUBDEV_ID;
392 	sc->eeprom_data[NVM_SUB_VEN_ID] = E82545_VENDOR_ID_INTEL;
393 	sc->eeprom_data[NVM_DEV_ID] = E82545_DEV_ID_82545EM_COPPER;
394 	sc->eeprom_data[NVM_VEN_ID] = E82545_VENDOR_ID_INTEL;
395 
396 	/* fill in the checksum */
397         checksum = 0;
398 	for (i = 0; i < NVM_CHECKSUM_REG; i++) {
399 		checksum += sc->eeprom_data[i];
400 	}
401 	checksum = NVM_SUM - checksum;
402 	sc->eeprom_data[NVM_CHECKSUM_REG] = checksum;
403 	DPRINTF("eeprom checksum: 0x%x", checksum);
404 }
405 
406 static void
e82545_write_mdi(struct e82545_softc * sc __unused,uint8_t reg_addr,uint8_t phy_addr,uint32_t data)407 e82545_write_mdi(struct e82545_softc *sc __unused, uint8_t reg_addr,
408     uint8_t phy_addr, uint32_t data)
409 {
410 	DPRINTF("Write mdi reg:0x%x phy:0x%x data: 0x%x", reg_addr, phy_addr, data);
411 }
412 
413 static uint32_t
e82545_read_mdi(struct e82545_softc * sc __unused,uint8_t reg_addr,uint8_t phy_addr)414 e82545_read_mdi(struct e82545_softc *sc __unused, uint8_t reg_addr,
415     uint8_t phy_addr)
416 {
417 	//DPRINTF("Read mdi reg:0x%x phy:0x%x", reg_addr, phy_addr);
418 	switch (reg_addr) {
419 	case PHY_STATUS:
420 		return (MII_SR_LINK_STATUS | MII_SR_AUTONEG_CAPS |
421 			MII_SR_AUTONEG_COMPLETE);
422 	case PHY_AUTONEG_ADV:
423 		return NWAY_AR_SELECTOR_FIELD;
424 	case PHY_LP_ABILITY:
425 		return 0;
426 	case PHY_1000T_STATUS:
427 		return (SR_1000T_LP_FD_CAPS | SR_1000T_REMOTE_RX_STATUS |
428 			SR_1000T_LOCAL_RX_STATUS);
429 	case PHY_ID1:
430 		return (M88E1011_I_PHY_ID >> 16) & 0xFFFF;
431 	case PHY_ID2:
432 		return (M88E1011_I_PHY_ID | E82545_REVISION_4) & 0xFFFF;
433 	default:
434 		DPRINTF("Unknown mdi read reg:0x%x phy:0x%x", reg_addr, phy_addr);
435 		return 0;
436 	}
437 	/* not reached */
438 }
439 
440 static void
e82545_eecd_strobe(struct e82545_softc * sc)441 e82545_eecd_strobe(struct e82545_softc *sc)
442 {
443 	/* Microwire state machine */
444 	/*
445 	DPRINTF("eeprom state machine srtobe "
446 		"0x%x 0x%x 0x%x 0x%x",
447 		sc->nvm_mode, sc->nvm_bits,
448 		sc->nvm_opaddr, sc->nvm_data);*/
449 
450 	if (sc->nvm_bits == 0) {
451 		DPRINTF("eeprom state machine not expecting data! "
452 			"0x%x 0x%x 0x%x 0x%x",
453 			sc->nvm_mode, sc->nvm_bits,
454 			sc->nvm_opaddr, sc->nvm_data);
455 		return;
456 	}
457 	sc->nvm_bits--;
458 	if (sc->nvm_mode == E82545_NVM_MODE_DATAOUT) {
459 		/* shifting out */
460 		if (sc->nvm_data & 0x8000) {
461 			sc->eeprom_control |= E1000_EECD_DO;
462 		} else {
463 			sc->eeprom_control &= ~E1000_EECD_DO;
464 		}
465 		sc->nvm_data <<= 1;
466 		if (sc->nvm_bits == 0) {
467 			/* read done, back to opcode mode. */
468 			sc->nvm_opaddr = 0;
469 			sc->nvm_mode = E82545_NVM_MODE_OPADDR;
470 			sc->nvm_bits = E82545_NVM_OPADDR_BITS;
471 		}
472 	} else if (sc->nvm_mode == E82545_NVM_MODE_DATAIN) {
473 		/* shifting in */
474 		sc->nvm_data <<= 1;
475 		if (sc->eeprom_control & E1000_EECD_DI) {
476 			sc->nvm_data |= 1;
477 		}
478 		if (sc->nvm_bits == 0) {
479 			/* eeprom write */
480 			uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
481 			uint16_t addr = sc->nvm_opaddr & E82545_NVM_ADDR_MASK;
482 			if (op != E82545_NVM_OPCODE_WRITE) {
483 				DPRINTF("Illegal eeprom write op 0x%x",
484 					sc->nvm_opaddr);
485 			} else if (addr >= E82545_NVM_EEPROM_SIZE) {
486 				DPRINTF("Illegal eeprom write addr 0x%x",
487 					sc->nvm_opaddr);
488 			} else {
489 				DPRINTF("eeprom write eeprom[0x%x] = 0x%x",
490 				addr, sc->nvm_data);
491 				sc->eeprom_data[addr] = sc->nvm_data;
492 			}
493 			/* back to opcode mode */
494 			sc->nvm_opaddr = 0;
495 			sc->nvm_mode = E82545_NVM_MODE_OPADDR;
496 			sc->nvm_bits = E82545_NVM_OPADDR_BITS;
497 		}
498 	} else if (sc->nvm_mode == E82545_NVM_MODE_OPADDR) {
499 		sc->nvm_opaddr <<= 1;
500 		if (sc->eeprom_control & E1000_EECD_DI) {
501 			sc->nvm_opaddr |= 1;
502 		}
503 		if (sc->nvm_bits == 0) {
504 			uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
505 			switch (op) {
506 			case E82545_NVM_OPCODE_EWEN:
507 				DPRINTF("eeprom write enable: 0x%x",
508 					sc->nvm_opaddr);
509 				/* back to opcode mode */
510 				sc->nvm_opaddr = 0;
511 				sc->nvm_mode = E82545_NVM_MODE_OPADDR;
512 				sc->nvm_bits = E82545_NVM_OPADDR_BITS;
513 				break;
514 			case E82545_NVM_OPCODE_READ:
515 			{
516 				uint16_t addr = sc->nvm_opaddr &
517 					E82545_NVM_ADDR_MASK;
518 				sc->nvm_mode = E82545_NVM_MODE_DATAOUT;
519 				sc->nvm_bits = E82545_NVM_DATA_BITS;
520 				if (addr < E82545_NVM_EEPROM_SIZE) {
521 					sc->nvm_data = sc->eeprom_data[addr];
522 					DPRINTF("eeprom read: eeprom[0x%x] = 0x%x",
523 						addr, sc->nvm_data);
524 				} else {
525 					DPRINTF("eeprom illegal read: 0x%x",
526 						sc->nvm_opaddr);
527 					sc->nvm_data = 0;
528 				}
529 				break;
530 			}
531 			case E82545_NVM_OPCODE_WRITE:
532 				sc->nvm_mode = E82545_NVM_MODE_DATAIN;
533 				sc->nvm_bits = E82545_NVM_DATA_BITS;
534 				sc->nvm_data = 0;
535 				break;
536 			default:
537 				DPRINTF("eeprom unknown op: 0x%x",
538 					sc->nvm_opaddr);
539 				/* back to opcode mode */
540 				sc->nvm_opaddr = 0;
541 				sc->nvm_mode = E82545_NVM_MODE_OPADDR;
542 				sc->nvm_bits = E82545_NVM_OPADDR_BITS;
543 			}
544 		}
545 	} else {
546 		DPRINTF("eeprom state machine wrong state! "
547 			"0x%x 0x%x 0x%x 0x%x",
548 			sc->nvm_mode, sc->nvm_bits,
549 			sc->nvm_opaddr, sc->nvm_data);
550 	}
551 }
552 
553 static void
e82545_itr_callback(int fd __unused,enum ev_type type __unused,void * param)554 e82545_itr_callback(int fd __unused, enum ev_type type __unused, void *param)
555 {
556 	uint32_t new;
557 	struct e82545_softc *sc = param;
558 
559 	pthread_mutex_lock(&sc->esc_mtx);
560 	new = sc->esc_ICR & sc->esc_IMS;
561 	if (new && !sc->esc_irq_asserted) {
562 		DPRINTF("itr callback: lintr assert %x", new);
563 		sc->esc_irq_asserted = 1;
564 		pci_lintr_assert(sc->esc_pi);
565 	} else {
566 		mevent_delete(sc->esc_mevpitr);
567 		sc->esc_mevpitr = NULL;
568 	}
569 	pthread_mutex_unlock(&sc->esc_mtx);
570 }
571 
572 static void
e82545_icr_assert(struct e82545_softc * sc,uint32_t bits)573 e82545_icr_assert(struct e82545_softc *sc, uint32_t bits)
574 {
575 	uint32_t new;
576 
577 	DPRINTF("icr assert: 0x%x", bits);
578 
579 	/*
580 	 * An interrupt is only generated if bits are set that
581 	 * aren't already in the ICR, these bits are unmasked,
582 	 * and there isn't an interrupt already pending.
583 	 */
584 	new = bits & ~sc->esc_ICR & sc->esc_IMS;
585 	sc->esc_ICR |= bits;
586 
587 	if (new == 0) {
588 		DPRINTF("icr assert: masked %x, ims %x", new, sc->esc_IMS);
589 	} else if (sc->esc_mevpitr != NULL) {
590 		DPRINTF("icr assert: throttled %x, ims %x", new, sc->esc_IMS);
591 	} else if (!sc->esc_irq_asserted) {
592 		DPRINTF("icr assert: lintr assert %x", new);
593 		sc->esc_irq_asserted = 1;
594 		pci_lintr_assert(sc->esc_pi);
595 		if (sc->esc_ITR != 0) {
596 			sc->esc_mevpitr = mevent_add(
597 			    (sc->esc_ITR + 3905) / 3906,  /* 256ns -> 1ms */
598 			    EVF_TIMER, e82545_itr_callback, sc);
599 		}
600 	}
601 }
602 
603 static void
e82545_ims_change(struct e82545_softc * sc,uint32_t bits)604 e82545_ims_change(struct e82545_softc *sc, uint32_t bits)
605 {
606 	uint32_t new;
607 
608 	/*
609 	 * Changing the mask may allow previously asserted
610 	 * but masked interrupt requests to generate an interrupt.
611 	 */
612 	new = bits & sc->esc_ICR & ~sc->esc_IMS;
613 	sc->esc_IMS |= bits;
614 
615 	if (new == 0) {
616 		DPRINTF("ims change: masked %x, ims %x", new, sc->esc_IMS);
617 	} else if (sc->esc_mevpitr != NULL) {
618 		DPRINTF("ims change: throttled %x, ims %x", new, sc->esc_IMS);
619 	} else if (!sc->esc_irq_asserted) {
620 		DPRINTF("ims change: lintr assert %x", new);
621 		sc->esc_irq_asserted = 1;
622 		pci_lintr_assert(sc->esc_pi);
623 		if (sc->esc_ITR != 0) {
624 			sc->esc_mevpitr = mevent_add(
625 			    (sc->esc_ITR + 3905) / 3906,  /* 256ns -> 1ms */
626 			    EVF_TIMER, e82545_itr_callback, sc);
627 		}
628 	}
629 }
630 
631 static void
e82545_icr_deassert(struct e82545_softc * sc,uint32_t bits)632 e82545_icr_deassert(struct e82545_softc *sc, uint32_t bits)
633 {
634 
635 	DPRINTF("icr deassert: 0x%x", bits);
636 	sc->esc_ICR &= ~bits;
637 
638 	/*
639 	 * If there are no longer any interrupt sources and there
640 	 * was an asserted interrupt, clear it
641 	 */
642 	if (sc->esc_irq_asserted && !(sc->esc_ICR & sc->esc_IMS)) {
643 		DPRINTF("icr deassert: lintr deassert %x", bits);
644 		pci_lintr_deassert(sc->esc_pi);
645 		sc->esc_irq_asserted = 0;
646 	}
647 }
648 
649 static void
e82545_intr_write(struct e82545_softc * sc,uint32_t offset,uint32_t value)650 e82545_intr_write(struct e82545_softc *sc, uint32_t offset, uint32_t value)
651 {
652 
653 	DPRINTF("intr_write: off %x, val %x", offset, value);
654 
655 	switch (offset) {
656 	case E1000_ICR:
657 		e82545_icr_deassert(sc, value);
658 		break;
659 	case E1000_ITR:
660 		sc->esc_ITR = value;
661 		break;
662 	case E1000_ICS:
663 		sc->esc_ICS = value;	/* not used: store for debug */
664 		e82545_icr_assert(sc, value);
665 		break;
666 	case E1000_IMS:
667 		e82545_ims_change(sc, value);
668 		break;
669 	case E1000_IMC:
670 		sc->esc_IMC = value;	/* for debug */
671 		sc->esc_IMS &= ~value;
672 		// XXX clear interrupts if all ICR bits now masked
673 		// and interrupt was pending ?
674 		break;
675 	default:
676 		break;
677 	}
678 }
679 
680 static uint32_t
e82545_intr_read(struct e82545_softc * sc,uint32_t offset)681 e82545_intr_read(struct e82545_softc *sc, uint32_t offset)
682 {
683 	uint32_t retval;
684 
685 	retval = 0;
686 
687 	DPRINTF("intr_read: off %x", offset);
688 
689 	switch (offset) {
690 	case E1000_ICR:
691 		retval = sc->esc_ICR;
692 		sc->esc_ICR = 0;
693 		e82545_icr_deassert(sc, ~0);
694 		break;
695 	case E1000_ITR:
696 		retval = sc->esc_ITR;
697 		break;
698 	case E1000_ICS:
699 		/* write-only register */
700 		break;
701 	case E1000_IMS:
702 		retval = sc->esc_IMS;
703 		break;
704 	case E1000_IMC:
705 		/* write-only register */
706 		break;
707 	default:
708 		break;
709 	}
710 
711 	return (retval);
712 }
713 
714 static void
e82545_devctl(struct e82545_softc * sc,uint32_t val)715 e82545_devctl(struct e82545_softc *sc, uint32_t val)
716 {
717 
718 	sc->esc_CTRL = val & ~E1000_CTRL_RST;
719 
720 	if (val & E1000_CTRL_RST) {
721 		DPRINTF("e1k: s/w reset, ctl %x", val);
722 		e82545_reset(sc, 1);
723 	}
724 	/* XXX check for phy reset ? */
725 }
726 
727 static void
e82545_rx_update_rdba(struct e82545_softc * sc)728 e82545_rx_update_rdba(struct e82545_softc *sc)
729 {
730 
731 	/* XXX verify desc base/len within phys mem range */
732 	sc->esc_rdba = (uint64_t)sc->esc_RDBAH << 32 |
733 	    sc->esc_RDBAL;
734 
735 	/* Cache host mapping of guest descriptor array */
736 	sc->esc_rxdesc = paddr_guest2host(sc->esc_ctx,
737 	    sc->esc_rdba, sc->esc_RDLEN);
738 }
739 
740 static void
e82545_rx_ctl(struct e82545_softc * sc,uint32_t val)741 e82545_rx_ctl(struct e82545_softc *sc, uint32_t val)
742 {
743 	int on;
744 
745 	on = ((val & E1000_RCTL_EN) == E1000_RCTL_EN);
746 
747 	/* Save RCTL after stripping reserved bits 31:27,24,21,14,11:10,0 */
748 	sc->esc_RCTL = val & ~0xF9204c01;
749 
750 	DPRINTF("rx_ctl - %s RCTL %x, val %x",
751 		on ? "on" : "off", sc->esc_RCTL, val);
752 
753 	/* state change requested */
754 	if (on != sc->esc_rx_enabled) {
755 		if (on) {
756 			/* Catch disallowed/unimplemented settings */
757 			//assert(!(val & E1000_RCTL_LBM_TCVR));
758 
759 			if (sc->esc_RCTL & E1000_RCTL_LBM_TCVR) {
760 				sc->esc_rx_loopback = 1;
761 			} else {
762 				sc->esc_rx_loopback = 0;
763 			}
764 
765 			e82545_rx_update_rdba(sc);
766 			e82545_rx_enable(sc);
767 		} else {
768 			e82545_rx_disable(sc);
769 			sc->esc_rx_loopback = 0;
770 			sc->esc_rdba = 0;
771 			sc->esc_rxdesc = NULL;
772 		}
773 	}
774 }
775 
776 static void
e82545_tx_update_tdba(struct e82545_softc * sc)777 e82545_tx_update_tdba(struct e82545_softc *sc)
778 {
779 
780 	/* XXX verify desc base/len within phys mem range */
781 	sc->esc_tdba = (uint64_t)sc->esc_TDBAH << 32 | sc->esc_TDBAL;
782 
783 	/* Cache host mapping of guest descriptor array */
784 	sc->esc_txdesc = paddr_guest2host(sc->esc_ctx, sc->esc_tdba,
785             sc->esc_TDLEN);
786 }
787 
788 static void
e82545_tx_ctl(struct e82545_softc * sc,uint32_t val)789 e82545_tx_ctl(struct e82545_softc *sc, uint32_t val)
790 {
791 	int on;
792 
793 	on = ((val & E1000_TCTL_EN) == E1000_TCTL_EN);
794 
795 	/* ignore TCTL_EN settings that don't change state */
796 	if (on == sc->esc_tx_enabled)
797 		return;
798 
799 	if (on) {
800 		e82545_tx_update_tdba(sc);
801 		e82545_tx_enable(sc);
802 	} else {
803 		e82545_tx_disable(sc);
804 		sc->esc_tdba = 0;
805 		sc->esc_txdesc = NULL;
806 	}
807 
808 	/* Save TCTL value after stripping reserved bits 31:25,23,2,0 */
809 	sc->esc_TCTL = val & ~0xFE800005;
810 }
811 
812 static int
e82545_bufsz(uint32_t rctl)813 e82545_bufsz(uint32_t rctl)
814 {
815 
816 	switch (rctl & (E1000_RCTL_BSEX | E1000_RCTL_SZ_256)) {
817 	case (E1000_RCTL_SZ_2048): return (2048);
818 	case (E1000_RCTL_SZ_1024): return (1024);
819 	case (E1000_RCTL_SZ_512): return (512);
820 	case (E1000_RCTL_SZ_256): return (256);
821 	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_16384): return (16384);
822 	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_8192): return (8192);
823 	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_4096): return (4096);
824 	}
825 	return (256);	/* Forbidden value. */
826 }
827 
828 /* XXX one packet at a time until this is debugged */
829 static void
e82545_rx_callback(int fd __unused,enum ev_type type __unused,void * param)830 e82545_rx_callback(int fd __unused, enum ev_type type __unused, void *param)
831 {
832 	struct e82545_softc *sc = param;
833 	struct e1000_rx_desc *rxd;
834 	struct iovec vec[64];
835 	ssize_t len;
836 	int left, lim, maxpktsz, maxpktdesc, bufsz, i, n, size;
837 	uint32_t cause = 0;
838 	uint16_t *tp, tag, head;
839 
840 	pthread_mutex_lock(&sc->esc_mtx);
841 	DPRINTF("rx_run: head %x, tail %x", sc->esc_RDH, sc->esc_RDT);
842 
843 	if (!sc->esc_rx_enabled || sc->esc_rx_loopback) {
844 		DPRINTF("rx disabled (!%d || %d) -- packet(s) dropped",
845 		    sc->esc_rx_enabled, sc->esc_rx_loopback);
846 		while (netbe_rx_discard(sc->esc_be) > 0) {
847 		}
848 		goto done1;
849 	}
850 	bufsz = e82545_bufsz(sc->esc_RCTL);
851 	maxpktsz = (sc->esc_RCTL & E1000_RCTL_LPE) ? 16384 : 1522;
852 	maxpktdesc = (maxpktsz + bufsz - 1) / bufsz;
853 	size = sc->esc_RDLEN / 16;
854 	head = sc->esc_RDH;
855 	left = (size + sc->esc_RDT - head) % size;
856 	if (left < maxpktdesc) {
857 		DPRINTF("rx overflow (%d < %d) -- packet(s) dropped",
858 		    left, maxpktdesc);
859 		while (netbe_rx_discard(sc->esc_be) > 0) {
860 		}
861 		goto done1;
862 	}
863 
864 	sc->esc_rx_active = 1;
865 	pthread_mutex_unlock(&sc->esc_mtx);
866 
867 	for (lim = size / 4; lim > 0 && left >= maxpktdesc; lim -= n) {
868 
869 		/* Grab rx descriptor pointed to by the head pointer */
870 		for (i = 0; i < maxpktdesc; i++) {
871 			rxd = &sc->esc_rxdesc[(head + i) % size];
872 			vec[i].iov_base = paddr_guest2host(sc->esc_ctx,
873 			    rxd->buffer_addr, bufsz);
874 			vec[i].iov_len = bufsz;
875 		}
876 		len = netbe_recv(sc->esc_be, vec, maxpktdesc);
877 		if (len <= 0) {
878 			DPRINTF("netbe_recv() returned %zd", len);
879 			goto done;
880 		}
881 
882 		/*
883 		 * Adjust the packet length based on whether the CRC needs
884 		 * to be stripped or if the packet is less than the minimum
885 		 * eth packet size.
886 		 */
887 		if (len < ETHER_MIN_LEN - ETHER_CRC_LEN)
888 			len = ETHER_MIN_LEN - ETHER_CRC_LEN;
889 		if (!(sc->esc_RCTL & E1000_RCTL_SECRC))
890 			len += ETHER_CRC_LEN;
891 		n = (len + bufsz - 1) / bufsz;
892 
893 		DPRINTF("packet read %zd bytes, %d segs, head %d",
894 		    len, n, head);
895 
896 		/* Apply VLAN filter. */
897 		tp = (uint16_t *)vec[0].iov_base + 6;
898 		if ((sc->esc_RCTL & E1000_RCTL_VFE) &&
899 		    (ntohs(tp[0]) == sc->esc_VET)) {
900 			tag = ntohs(tp[1]) & 0x0fff;
901 			if ((sc->esc_fvlan[tag >> 5] &
902 			    (1 << (tag & 0x1f))) != 0) {
903 				DPRINTF("known VLAN %d", tag);
904 			} else {
905 				DPRINTF("unknown VLAN %d", tag);
906 				n = 0;
907 				continue;
908 			}
909 		}
910 
911 		/* Update all consumed descriptors. */
912 		for (i = 0; i < n - 1; i++) {
913 			rxd = &sc->esc_rxdesc[(head + i) % size];
914 			rxd->length = bufsz;
915 			rxd->csum = 0;
916 			rxd->errors = 0;
917 			rxd->special = 0;
918 			rxd->status = E1000_RXD_STAT_DD;
919 		}
920 		rxd = &sc->esc_rxdesc[(head + i) % size];
921 		rxd->length = len % bufsz;
922 		rxd->csum = 0;
923 		rxd->errors = 0;
924 		rxd->special = 0;
925 		/* XXX signal no checksum for now */
926 		rxd->status = E1000_RXD_STAT_PIF | E1000_RXD_STAT_IXSM |
927 		    E1000_RXD_STAT_EOP | E1000_RXD_STAT_DD;
928 
929 		/* Schedule receive interrupts. */
930 		if ((uint32_t)len <= sc->esc_RSRPD) {
931 			cause |= E1000_ICR_SRPD | E1000_ICR_RXT0;
932 		} else {
933 			/* XXX: RDRT and RADV timers should be here. */
934 			cause |= E1000_ICR_RXT0;
935 		}
936 
937 		head = (head + n) % size;
938 		left -= n;
939 	}
940 
941 done:
942 	pthread_mutex_lock(&sc->esc_mtx);
943 	sc->esc_rx_active = 0;
944 	if (sc->esc_rx_enabled == 0)
945 		pthread_cond_signal(&sc->esc_rx_cond);
946 
947 	sc->esc_RDH = head;
948 	/* Respect E1000_RCTL_RDMTS */
949 	left = (size + sc->esc_RDT - head) % size;
950 	if (left < (size >> (((sc->esc_RCTL >> 8) & 3) + 1)))
951 		cause |= E1000_ICR_RXDMT0;
952 	/* Assert all accumulated interrupts. */
953 	if (cause != 0)
954 		e82545_icr_assert(sc, cause);
955 done1:
956 	DPRINTF("rx_run done: head %x, tail %x", sc->esc_RDH, sc->esc_RDT);
957 	pthread_mutex_unlock(&sc->esc_mtx);
958 }
959 
960 static uint16_t
e82545_carry(uint32_t sum)961 e82545_carry(uint32_t sum)
962 {
963 
964 	sum = (sum & 0xFFFF) + (sum >> 16);
965 	if (sum > 0xFFFF)
966 		sum -= 0xFFFF;
967 	return (sum);
968 }
969 
970 static uint16_t
e82545_buf_checksum(uint8_t * buf,int len)971 e82545_buf_checksum(uint8_t *buf, int len)
972 {
973 	int i;
974 	uint32_t sum = 0;
975 
976 	/* Checksum all the pairs of bytes first... */
977 	for (i = 0; i < (len & ~1); i += 2)
978 		sum += *((u_int16_t *)(buf + i));
979 
980 	/*
981 	 * If there's a single byte left over, checksum it, too.
982 	 * Network byte order is big-endian, so the remaining byte is
983 	 * the high byte.
984 	 */
985 	if (i < len)
986 		sum += htons(buf[i] << 8);
987 
988 	return (e82545_carry(sum));
989 }
990 
991 static uint16_t
e82545_iov_checksum(struct iovec * iov,int iovcnt,unsigned int off,unsigned int len)992 e82545_iov_checksum(struct iovec *iov, int iovcnt, unsigned int off,
993     unsigned int len)
994 {
995 	unsigned int now, odd;
996 	uint32_t sum = 0, s;
997 
998 	/* Skip completely unneeded vectors. */
999 	while (iovcnt > 0 && iov->iov_len <= off && off > 0) {
1000 		off -= iov->iov_len;
1001 		iov++;
1002 		iovcnt--;
1003 	}
1004 
1005 	/* Calculate checksum of requested range. */
1006 	odd = 0;
1007 	while (len > 0 && iovcnt > 0) {
1008 		now = MIN(len, iov->iov_len - off);
1009 		s = e82545_buf_checksum((uint8_t *)iov->iov_base + off, now);
1010 		sum += odd ? (s << 8) : s;
1011 		odd ^= (now & 1);
1012 		len -= now;
1013 		off = 0;
1014 		iov++;
1015 		iovcnt--;
1016 	}
1017 
1018 	return (e82545_carry(sum));
1019 }
1020 
1021 /*
1022  * Return the transmit descriptor type.
1023  */
1024 static int
e82545_txdesc_type(uint32_t lower)1025 e82545_txdesc_type(uint32_t lower)
1026 {
1027 	int type;
1028 
1029 	type = 0;
1030 
1031 	if (lower & E1000_TXD_CMD_DEXT)
1032 		type = lower & E1000_TXD_MASK;
1033 
1034 	return (type);
1035 }
1036 
1037 static void
e82545_transmit_checksum(struct iovec * iov,int iovcnt,struct ck_info * ck)1038 e82545_transmit_checksum(struct iovec *iov, int iovcnt, struct ck_info *ck)
1039 {
1040 	uint16_t cksum;
1041 	unsigned int cklen;
1042 
1043 	DPRINTF("tx cksum: iovcnt/s/off/len %d/%d/%d/%d",
1044 	    iovcnt, ck->ck_start, ck->ck_off, ck->ck_len);
1045 	cklen = ck->ck_len ? ck->ck_len - ck->ck_start + 1U : UINT_MAX;
1046 	cksum = e82545_iov_checksum(iov, iovcnt, ck->ck_start, cklen);
1047 	*(uint16_t *)((uint8_t *)iov[0].iov_base + ck->ck_off) = ~cksum;
1048 }
1049 
1050 static void
e82545_transmit_backend(struct e82545_softc * sc,struct iovec * iov,int iovcnt)1051 e82545_transmit_backend(struct e82545_softc *sc, struct iovec *iov, int iovcnt)
1052 {
1053 
1054 	if (sc->esc_be == NULL)
1055 		return;
1056 
1057 	(void) netbe_send(sc->esc_be, iov, iovcnt);
1058 }
1059 
1060 static void
e82545_transmit_done(struct e82545_softc * sc,uint16_t head,uint16_t tail,uint16_t dsize,int * tdwb)1061 e82545_transmit_done(struct e82545_softc *sc, uint16_t head, uint16_t tail,
1062     uint16_t dsize, int *tdwb)
1063 {
1064 	union e1000_tx_udesc *dsc;
1065 
1066 	for ( ; head != tail; head = (head + 1) % dsize) {
1067 		dsc = &sc->esc_txdesc[head];
1068 		if (dsc->td.lower.data & E1000_TXD_CMD_RS) {
1069 			dsc->td.upper.data |= E1000_TXD_STAT_DD;
1070 			*tdwb = 1;
1071 		}
1072 	}
1073 }
1074 
1075 static int
e82545_transmit(struct e82545_softc * sc,uint16_t head,uint16_t tail,uint16_t dsize,uint16_t * rhead,int * tdwb)1076 e82545_transmit(struct e82545_softc *sc, uint16_t head, uint16_t tail,
1077     uint16_t dsize, uint16_t *rhead, int *tdwb)
1078 {
1079 	uint8_t *hdr, *hdrp;
1080 	struct iovec iovb[I82545_MAX_TXSEGS + 2];
1081 	struct iovec tiov[I82545_MAX_TXSEGS + 2];
1082 	struct e1000_context_desc *cd;
1083 	struct ck_info ckinfo[2];
1084 	struct iovec *iov;
1085 	union  e1000_tx_udesc *dsc;
1086 	int desc, dtype, ntype, iovcnt, tcp, tso, paylen, seg, tiovcnt, pv;
1087 	unsigned hdrlen, vlen, pktlen, len, left, mss, now, nnow, nleft, pvoff;
1088 	uint32_t tcpsum, tcpseq;
1089 	uint16_t ipcs, tcpcs, ipid, ohead;
1090 	bool invalid;
1091 
1092 	ckinfo[0].ck_valid = ckinfo[1].ck_valid = 0;
1093 	iovcnt = 0;
1094 	ntype = 0;
1095 	tso = 0;
1096 	pktlen = 0;
1097 	ohead = head;
1098 	invalid = false;
1099 
1100 	/* iovb[0/1] may be used for writable copy of headers. */
1101 	iov = &iovb[2];
1102 
1103 	for (desc = 0; ; desc++, head = (head + 1) % dsize) {
1104 		if (head == tail) {
1105 			*rhead = head;
1106 			return (0);
1107 		}
1108 		dsc = &sc->esc_txdesc[head];
1109 		dtype = e82545_txdesc_type(dsc->td.lower.data);
1110 
1111 		if (desc == 0) {
1112 			switch (dtype) {
1113 			case E1000_TXD_TYP_C:
1114 				DPRINTF("tx ctxt desc idx %d: %016jx "
1115 				    "%08x%08x",
1116 				    head, dsc->td.buffer_addr,
1117 				    dsc->td.upper.data, dsc->td.lower.data);
1118 				/* Save context and return */
1119 				sc->esc_txctx = dsc->cd;
1120 				goto done;
1121 			case E1000_TXD_TYP_L:
1122 				DPRINTF("tx legacy desc idx %d: %08x%08x",
1123 				    head, dsc->td.upper.data, dsc->td.lower.data);
1124 				/*
1125 				 * legacy cksum start valid in first descriptor
1126 				 */
1127 				ntype = dtype;
1128 				ckinfo[0].ck_start = dsc->td.upper.fields.css;
1129 				break;
1130 			case E1000_TXD_TYP_D:
1131 				DPRINTF("tx data desc idx %d: %08x%08x",
1132 				    head, dsc->td.upper.data, dsc->td.lower.data);
1133 				ntype = dtype;
1134 				break;
1135 			default:
1136 				break;
1137 			}
1138 		} else {
1139 			/* Descriptor type must be consistent */
1140 			assert(dtype == ntype);
1141 			DPRINTF("tx next desc idx %d: %08x%08x",
1142 			    head, dsc->td.upper.data, dsc->td.lower.data);
1143 		}
1144 
1145 		len = (dtype == E1000_TXD_TYP_L) ? dsc->td.lower.flags.length :
1146 		    dsc->dd.lower.data & 0xFFFFF;
1147 
1148 		/* Strip checksum supplied by guest. */
1149 		if ((dsc->td.lower.data & E1000_TXD_CMD_EOP) != 0 &&
1150 		    (dsc->td.lower.data & E1000_TXD_CMD_IFCS) == 0) {
1151 			if (len <= 2) {
1152 				WPRINTF("final descriptor too short (%d) -- dropped",
1153 				    len);
1154 				invalid = true;
1155 			} else
1156 				len -= 2;
1157 		}
1158 
1159 		if (len > 0 && iovcnt < I82545_MAX_TXSEGS) {
1160 			iov[iovcnt].iov_base = paddr_guest2host(sc->esc_ctx,
1161 			    dsc->td.buffer_addr, len);
1162 			iov[iovcnt].iov_len = len;
1163 			iovcnt++;
1164 			pktlen += len;
1165 		}
1166 
1167 		/*
1168 		 * Pull out info that is valid in the final descriptor
1169 		 * and exit descriptor loop.
1170 		 */
1171 		if (dsc->td.lower.data & E1000_TXD_CMD_EOP) {
1172 			if (dtype == E1000_TXD_TYP_L) {
1173 				if (dsc->td.lower.data & E1000_TXD_CMD_IC) {
1174 					ckinfo[0].ck_valid = 1;
1175 					ckinfo[0].ck_off =
1176 					    dsc->td.lower.flags.cso;
1177 					ckinfo[0].ck_len = 0;
1178 				}
1179 			} else {
1180 				cd = &sc->esc_txctx;
1181 				if (dsc->dd.lower.data & E1000_TXD_CMD_TSE)
1182 					tso = 1;
1183 				if (dsc->dd.upper.fields.popts &
1184 				    E1000_TXD_POPTS_IXSM)
1185 					ckinfo[0].ck_valid = 1;
1186 				if (dsc->dd.upper.fields.popts &
1187 				    E1000_TXD_POPTS_IXSM || tso) {
1188 					ckinfo[0].ck_start =
1189 					    cd->lower_setup.ip_fields.ipcss;
1190 					ckinfo[0].ck_off =
1191 					    cd->lower_setup.ip_fields.ipcso;
1192 					ckinfo[0].ck_len =
1193 					    cd->lower_setup.ip_fields.ipcse;
1194 				}
1195 				if (dsc->dd.upper.fields.popts &
1196 				    E1000_TXD_POPTS_TXSM)
1197 					ckinfo[1].ck_valid = 1;
1198 				if (dsc->dd.upper.fields.popts &
1199 				    E1000_TXD_POPTS_TXSM || tso) {
1200 					ckinfo[1].ck_start =
1201 					    cd->upper_setup.tcp_fields.tucss;
1202 					ckinfo[1].ck_off =
1203 					    cd->upper_setup.tcp_fields.tucso;
1204 					ckinfo[1].ck_len =
1205 					    cd->upper_setup.tcp_fields.tucse;
1206 				}
1207 			}
1208 			break;
1209 		}
1210 	}
1211 
1212 	if (invalid)
1213 		goto done;
1214 
1215 	if (iovcnt > I82545_MAX_TXSEGS) {
1216 		WPRINTF("tx too many descriptors (%d > %d) -- dropped",
1217 		    iovcnt, I82545_MAX_TXSEGS);
1218 		goto done;
1219 	}
1220 
1221 	hdrlen = vlen = 0;
1222 	/* Estimate writable space for VLAN header insertion. */
1223 	if ((sc->esc_CTRL & E1000_CTRL_VME) &&
1224 	    (dsc->td.lower.data & E1000_TXD_CMD_VLE)) {
1225 		hdrlen = ETHER_ADDR_LEN*2;
1226 		vlen = ETHER_VLAN_ENCAP_LEN;
1227 	}
1228 	if (!tso) {
1229 		/* Estimate required writable space for checksums. */
1230 		if (ckinfo[0].ck_valid)
1231 			hdrlen = MAX(hdrlen, ckinfo[0].ck_off + 2U);
1232 		if (ckinfo[1].ck_valid)
1233 			hdrlen = MAX(hdrlen, ckinfo[1].ck_off + 2U);
1234 		/* Round up writable space to the first vector. */
1235 		if (hdrlen != 0 && iov[0].iov_len > hdrlen &&
1236 		    iov[0].iov_len < hdrlen + 100)
1237 			hdrlen = iov[0].iov_len;
1238 	} else {
1239 		/* In case of TSO header length provided by software. */
1240 		hdrlen = sc->esc_txctx.tcp_seg_setup.fields.hdr_len;
1241 
1242 		/*
1243 		 * Cap the header length at 240 based on 7.2.4.5 of
1244 		 * the Intel 82576EB (Rev 2.63) datasheet.
1245 		 */
1246 		if (hdrlen > 240) {
1247 			WPRINTF("TSO hdrlen too large: %d", hdrlen);
1248 			goto done;
1249 		}
1250 
1251 		/*
1252 		 * If VLAN insertion is requested, ensure the header
1253 		 * at least holds the amount of data copied during
1254 		 * VLAN insertion below.
1255 		 *
1256 		 * XXX: Realistic packets will include a full Ethernet
1257 		 * header before the IP header at ckinfo[0].ck_start,
1258 		 * but this check is sufficient to prevent
1259 		 * out-of-bounds access below.
1260 		 */
1261 		if (vlen != 0 && hdrlen < ETHER_ADDR_LEN*2) {
1262 			WPRINTF("TSO hdrlen too small for vlan insertion "
1263 			    "(%d vs %d) -- dropped", hdrlen,
1264 			    ETHER_ADDR_LEN*2);
1265 			goto done;
1266 		}
1267 
1268 		/*
1269 		 * Ensure that the header length covers the used fields
1270 		 * in the IP and TCP headers as well as the IP and TCP
1271 		 * checksums.  The following fields are accessed below:
1272 		 *
1273 		 * Header | Field | Offset | Length
1274 		 * -------+-------+--------+-------
1275 		 * IPv4   | len   | 2      | 2
1276 		 * IPv4   | ID    | 4      | 2
1277 		 * IPv6   | len   | 4      | 2
1278 		 * TCP    | seq # | 4      | 4
1279 		 * TCP    | flags | 13     | 1
1280 		 * UDP    | len   | 4      | 4
1281 		 */
1282 		if (hdrlen < ckinfo[0].ck_start + 6U ||
1283 		    hdrlen < ckinfo[0].ck_off + 2U) {
1284 			WPRINTF("TSO hdrlen too small for IP fields (%d) "
1285 			    "-- dropped", hdrlen);
1286 			goto done;
1287 		}
1288 		if (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_TCP) {
1289 			if (hdrlen < ckinfo[1].ck_start + 14U) {
1290 				WPRINTF("TSO hdrlen too small for TCP fields "
1291 				    "(%d) -- dropped", hdrlen);
1292 				goto done;
1293 			}
1294 		} else {
1295 			if (hdrlen < ckinfo[1].ck_start + 8U) {
1296 				WPRINTF("TSO hdrlen too small for UDP fields "
1297 				    "(%d) -- dropped", hdrlen);
1298 				goto done;
1299 			}
1300 		}
1301 		if (ckinfo[1].ck_valid && hdrlen < ckinfo[1].ck_off + 2U) {
1302 			WPRINTF("TSO hdrlen too small for TCP/UDP fields "
1303 			    "(%d) -- dropped", hdrlen);
1304 			goto done;
1305 		}
1306 		if (ckinfo[1].ck_valid && hdrlen < ckinfo[1].ck_off + 2) {
1307 			WPRINTF("TSO hdrlen too small for TCP/UDP fields "
1308 			    "(%d) -- dropped", hdrlen);
1309 			goto done;
1310 		}
1311 	}
1312 
1313 	if (pktlen < hdrlen + vlen) {
1314 		WPRINTF("packet too small for writable header");
1315 		goto done;
1316 	}
1317 
1318 	/* Allocate, fill and prepend writable header vector. */
1319 	if (hdrlen + vlen != 0) {
1320 		hdr = __builtin_alloca(hdrlen + vlen);
1321 		hdr += vlen;
1322 		for (left = hdrlen, hdrp = hdr; left > 0;
1323 		    left -= now, hdrp += now) {
1324 			now = MIN(left, iov->iov_len);
1325 			memcpy(hdrp, iov->iov_base, now);
1326 #ifdef	__FreeBSD__
1327 			iov->iov_base = (uint8_t *)iov->iov_base + now;
1328 #else
1329 			/*
1330 			 * The type of iov_base changed in SUS (XPG4v2) from
1331 			 * caddr_t (char * - note signed) to 'void *'. On
1332 			 * illumos, bhyve is not currently compiled with XPG4v2
1333 			 * or higher, and so we can't cast the RHS to unsigned.
1334 			 * error: pointer targets in assignment differ in
1335 			 *	  signedness
1336 			 * This also means that we need to apply some casts to
1337 			 * (caddr_t) below.
1338 			 */
1339 			iov->iov_base += now;
1340 #endif
1341 			iov->iov_len -= now;
1342 			if (iov->iov_len == 0) {
1343 				iov++;
1344 				iovcnt--;
1345 			}
1346 		}
1347 		iov--;
1348 		iovcnt++;
1349 #ifdef __FreeBSD__
1350 		iov->iov_base = hdr;
1351 #else
1352 		iov->iov_base = (caddr_t)hdr;
1353 #endif
1354 		iov->iov_len = hdrlen;
1355 	} else
1356 		hdr = NULL;
1357 
1358 	/* Insert VLAN tag. */
1359 	if (vlen != 0) {
1360 		hdr -= ETHER_VLAN_ENCAP_LEN;
1361 		memmove(hdr, hdr + ETHER_VLAN_ENCAP_LEN, ETHER_ADDR_LEN*2);
1362 		hdrlen += ETHER_VLAN_ENCAP_LEN;
1363 		hdr[ETHER_ADDR_LEN*2 + 0] = sc->esc_VET >> 8;
1364 		hdr[ETHER_ADDR_LEN*2 + 1] = sc->esc_VET & 0xff;
1365 		hdr[ETHER_ADDR_LEN*2 + 2] = dsc->td.upper.fields.special >> 8;
1366 		hdr[ETHER_ADDR_LEN*2 + 3] = dsc->td.upper.fields.special & 0xff;
1367 #ifdef __FreeBSD__
1368 		iov->iov_base = hdr;
1369 #else
1370 		iov->iov_base = (caddr_t)hdr;
1371 #endif
1372 		iov->iov_len += ETHER_VLAN_ENCAP_LEN;
1373 		/* Correct checksum offsets after VLAN tag insertion. */
1374 		ckinfo[0].ck_start += ETHER_VLAN_ENCAP_LEN;
1375 		ckinfo[0].ck_off += ETHER_VLAN_ENCAP_LEN;
1376 		if (ckinfo[0].ck_len != 0)
1377 			ckinfo[0].ck_len += ETHER_VLAN_ENCAP_LEN;
1378 		ckinfo[1].ck_start += ETHER_VLAN_ENCAP_LEN;
1379 		ckinfo[1].ck_off += ETHER_VLAN_ENCAP_LEN;
1380 		if (ckinfo[1].ck_len != 0)
1381 			ckinfo[1].ck_len += ETHER_VLAN_ENCAP_LEN;
1382 	}
1383 
1384 	/* Simple non-TSO case. */
1385 	if (!tso) {
1386 		/* Calculate checksums and transmit. */
1387 		if (ckinfo[0].ck_valid)
1388 			e82545_transmit_checksum(iov, iovcnt, &ckinfo[0]);
1389 		if (ckinfo[1].ck_valid)
1390 			e82545_transmit_checksum(iov, iovcnt, &ckinfo[1]);
1391 		e82545_transmit_backend(sc, iov, iovcnt);
1392 		goto done;
1393 	}
1394 
1395 	/* Doing TSO. */
1396 	tcp = (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_TCP) != 0;
1397 	mss = sc->esc_txctx.tcp_seg_setup.fields.mss;
1398 	paylen = (sc->esc_txctx.cmd_and_length & 0x000fffff);
1399 	DPRINTF("tx %s segmentation offload %d+%d/%u bytes %d iovs",
1400 	    tcp ? "TCP" : "UDP", hdrlen, paylen, mss, iovcnt);
1401 	ipid = ntohs(*(uint16_t *)&hdr[ckinfo[0].ck_start + 4]);
1402 	tcpseq = 0;
1403 	if (tcp)
1404 		tcpseq = ntohl(*(uint32_t *)&hdr[ckinfo[1].ck_start + 4]);
1405 	ipcs = *(uint16_t *)&hdr[ckinfo[0].ck_off];
1406 	tcpcs = 0;
1407 	if (ckinfo[1].ck_valid)	/* Save partial pseudo-header checksum. */
1408 		tcpcs = *(uint16_t *)&hdr[ckinfo[1].ck_off];
1409 	pv = 1;
1410 	pvoff = 0;
1411 	for (seg = 0, left = paylen; left > 0; seg++, left -= now) {
1412 		now = MIN(left, mss);
1413 
1414 		/* Construct IOVs for the segment. */
1415 		/* Include whole original header. */
1416 #ifdef __FreeBSD__
1417 		tiov[0].iov_base = hdr;
1418 #else
1419 		tiov[0].iov_base = (caddr_t)hdr;
1420 #endif
1421 		tiov[0].iov_len = hdrlen;
1422 		tiovcnt = 1;
1423 		/* Include respective part of payload IOV. */
1424 		for (nleft = now; pv < iovcnt && nleft > 0; nleft -= nnow) {
1425 			nnow = MIN(nleft, iov[pv].iov_len - pvoff);
1426 #ifdef	__FreeBSD__
1427 			tiov[tiovcnt].iov_base = (uint8_t *)iov[pv].iov_base +
1428 			    pvoff;
1429 #else
1430 			tiov[tiovcnt].iov_base += pvoff;
1431 #endif
1432 			tiov[tiovcnt++].iov_len = nnow;
1433 			if (pvoff + nnow == iov[pv].iov_len) {
1434 				pv++;
1435 				pvoff = 0;
1436 			} else
1437 				pvoff += nnow;
1438 		}
1439 		DPRINTF("tx segment %d %d+%d bytes %d iovs",
1440 		    seg, hdrlen, now, tiovcnt);
1441 
1442 		/* Update IP header. */
1443 		if (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_IP) {
1444 			/* IPv4 -- set length and ID */
1445 			*(uint16_t *)&hdr[ckinfo[0].ck_start + 2] =
1446 			    htons(hdrlen - ckinfo[0].ck_start + now);
1447 			*(uint16_t *)&hdr[ckinfo[0].ck_start + 4] =
1448 			    htons(ipid + seg);
1449 		} else {
1450 			/* IPv6 -- set length */
1451 			*(uint16_t *)&hdr[ckinfo[0].ck_start + 4] =
1452 			    htons(hdrlen - ckinfo[0].ck_start - 40 +
1453 				  now);
1454 		}
1455 
1456 		/* Update pseudo-header checksum. */
1457 		tcpsum = tcpcs;
1458 		tcpsum += htons(hdrlen - ckinfo[1].ck_start + now);
1459 
1460 		/* Update TCP/UDP headers. */
1461 		if (tcp) {
1462 			/* Update sequence number and FIN/PUSH flags. */
1463 			*(uint32_t *)&hdr[ckinfo[1].ck_start + 4] =
1464 			    htonl(tcpseq + paylen - left);
1465 			if (now < left) {
1466 				hdr[ckinfo[1].ck_start + 13] &=
1467 				    ~(TH_FIN | TH_PUSH);
1468 			}
1469 		} else {
1470 			/* Update payload length. */
1471 			*(uint32_t *)&hdr[ckinfo[1].ck_start + 4] =
1472 			    hdrlen - ckinfo[1].ck_start + now;
1473 		}
1474 
1475 		/* Calculate checksums and transmit. */
1476 		if (ckinfo[0].ck_valid) {
1477 			*(uint16_t *)&hdr[ckinfo[0].ck_off] = ipcs;
1478 			e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[0]);
1479 		}
1480 		if (ckinfo[1].ck_valid) {
1481 			*(uint16_t *)&hdr[ckinfo[1].ck_off] =
1482 			    e82545_carry(tcpsum);
1483 			e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[1]);
1484 		}
1485 		e82545_transmit_backend(sc, tiov, tiovcnt);
1486 	}
1487 
1488 done:
1489 	head = (head + 1) % dsize;
1490 	e82545_transmit_done(sc, ohead, head, dsize, tdwb);
1491 
1492 	*rhead = head;
1493 	return (desc + 1);
1494 }
1495 
1496 static void
e82545_tx_run(struct e82545_softc * sc)1497 e82545_tx_run(struct e82545_softc *sc)
1498 {
1499 	uint32_t cause;
1500 	uint16_t head, rhead, tail, size;
1501 	int lim, tdwb, sent;
1502 
1503 	size = sc->esc_TDLEN / 16;
1504 	if (size == 0)
1505 		return;
1506 
1507 	head = sc->esc_TDH % size;
1508 	tail = sc->esc_TDT % size;
1509 	DPRINTF("tx_run: head %x, rhead %x, tail %x",
1510 	    sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
1511 
1512 	pthread_mutex_unlock(&sc->esc_mtx);
1513 	rhead = head;
1514 	tdwb = 0;
1515 	for (lim = size / 4; sc->esc_tx_enabled && lim > 0; lim -= sent) {
1516 		sent = e82545_transmit(sc, head, tail, size, &rhead, &tdwb);
1517 		if (sent == 0)
1518 			break;
1519 		head = rhead;
1520 	}
1521 	pthread_mutex_lock(&sc->esc_mtx);
1522 
1523 	sc->esc_TDH = head;
1524 	sc->esc_TDHr = rhead;
1525 	cause = 0;
1526 	if (tdwb)
1527 		cause |= E1000_ICR_TXDW;
1528 	if (lim != size / 4 && sc->esc_TDH == sc->esc_TDT)
1529 		cause |= E1000_ICR_TXQE;
1530 	if (cause)
1531 		e82545_icr_assert(sc, cause);
1532 
1533 	DPRINTF("tx_run done: head %x, rhead %x, tail %x",
1534 	    sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
1535 }
1536 
1537 static _Noreturn void *
e82545_tx_thread(void * param)1538 e82545_tx_thread(void *param)
1539 {
1540 	struct e82545_softc *sc = param;
1541 
1542 	pthread_mutex_lock(&sc->esc_mtx);
1543 	for (;;) {
1544 		while (!sc->esc_tx_enabled || sc->esc_TDHr == sc->esc_TDT) {
1545 			if (sc->esc_tx_enabled && sc->esc_TDHr != sc->esc_TDT)
1546 				break;
1547 			sc->esc_tx_active = 0;
1548 			if (sc->esc_tx_enabled == 0)
1549 				pthread_cond_signal(&sc->esc_tx_cond);
1550 			pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
1551 		}
1552 		sc->esc_tx_active = 1;
1553 
1554 		/* Process some tx descriptors.  Lock dropped inside. */
1555 		e82545_tx_run(sc);
1556 	}
1557 }
1558 
1559 static void
e82545_tx_start(struct e82545_softc * sc)1560 e82545_tx_start(struct e82545_softc *sc)
1561 {
1562 
1563 	if (sc->esc_tx_active == 0)
1564 		pthread_cond_signal(&sc->esc_tx_cond);
1565 }
1566 
1567 static void
e82545_tx_enable(struct e82545_softc * sc)1568 e82545_tx_enable(struct e82545_softc *sc)
1569 {
1570 
1571 	sc->esc_tx_enabled = 1;
1572 }
1573 
1574 static void
e82545_tx_disable(struct e82545_softc * sc)1575 e82545_tx_disable(struct e82545_softc *sc)
1576 {
1577 
1578 	sc->esc_tx_enabled = 0;
1579 	while (sc->esc_tx_active)
1580 		pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
1581 }
1582 
1583 static void
e82545_rx_enable(struct e82545_softc * sc)1584 e82545_rx_enable(struct e82545_softc *sc)
1585 {
1586 
1587 	sc->esc_rx_enabled = 1;
1588 }
1589 
1590 static void
e82545_rx_disable(struct e82545_softc * sc)1591 e82545_rx_disable(struct e82545_softc *sc)
1592 {
1593 
1594 	sc->esc_rx_enabled = 0;
1595 	while (sc->esc_rx_active)
1596 		pthread_cond_wait(&sc->esc_rx_cond, &sc->esc_mtx);
1597 }
1598 
1599 static void
e82545_write_ra(struct e82545_softc * sc,int reg,uint32_t wval)1600 e82545_write_ra(struct e82545_softc *sc, int reg, uint32_t wval)
1601 {
1602 	struct eth_uni *eu;
1603 	int idx;
1604 
1605 	idx = reg >> 1;
1606 	assert(idx < 15);
1607 
1608 	eu = &sc->esc_uni[idx];
1609 
1610 	if (reg & 0x1) {
1611 		/* RAH */
1612 		eu->eu_valid = ((wval & E1000_RAH_AV) == E1000_RAH_AV);
1613 		eu->eu_addrsel = (wval >> 16) & 0x3;
1614 		eu->eu_eth.octet[5] = wval >> 8;
1615 		eu->eu_eth.octet[4] = wval;
1616 	} else {
1617 		/* RAL */
1618 		eu->eu_eth.octet[3] = wval >> 24;
1619 		eu->eu_eth.octet[2] = wval >> 16;
1620 		eu->eu_eth.octet[1] = wval >> 8;
1621 		eu->eu_eth.octet[0] = wval;
1622 	}
1623 }
1624 
1625 static uint32_t
e82545_read_ra(struct e82545_softc * sc,int reg)1626 e82545_read_ra(struct e82545_softc *sc, int reg)
1627 {
1628 	struct eth_uni *eu;
1629 	uint32_t retval;
1630 	int idx;
1631 
1632 	idx = reg >> 1;
1633 	assert(idx < 15);
1634 
1635 	eu = &sc->esc_uni[idx];
1636 
1637 	if (reg & 0x1) {
1638 		/* RAH */
1639 		retval = (eu->eu_valid << 31) |
1640 			 (eu->eu_addrsel << 16) |
1641 			 (eu->eu_eth.octet[5] << 8) |
1642 			 eu->eu_eth.octet[4];
1643 	} else {
1644 		/* RAL */
1645 		retval = (eu->eu_eth.octet[3] << 24) |
1646 			 (eu->eu_eth.octet[2] << 16) |
1647 			 (eu->eu_eth.octet[1] << 8) |
1648 			 eu->eu_eth.octet[0];
1649 	}
1650 
1651 	return (retval);
1652 }
1653 
1654 static void
e82545_write_register(struct e82545_softc * sc,uint32_t offset,uint32_t value)1655 e82545_write_register(struct e82545_softc *sc, uint32_t offset, uint32_t value)
1656 {
1657 	int ridx;
1658 
1659 	if (offset & 0x3) {
1660 		DPRINTF("Unaligned register write offset:0x%x value:0x%x", offset, value);
1661 		return;
1662 	}
1663 	DPRINTF("Register write: 0x%x value: 0x%x", offset, value);
1664 
1665 	switch (offset) {
1666 	case E1000_CTRL:
1667 	case E1000_CTRL_DUP:
1668 		e82545_devctl(sc, value);
1669 		break;
1670 	case E1000_FCAL:
1671 		sc->esc_FCAL = value;
1672 		break;
1673 	case E1000_FCAH:
1674 		sc->esc_FCAH = value & ~0xFFFF0000;
1675 		break;
1676 	case E1000_FCT:
1677 		sc->esc_FCT = value & ~0xFFFF0000;
1678 		break;
1679 	case E1000_VET:
1680 		sc->esc_VET = value & ~0xFFFF0000;
1681 		break;
1682 	case E1000_FCTTV:
1683 		sc->esc_FCTTV = value & ~0xFFFF0000;
1684 		break;
1685 	case E1000_LEDCTL:
1686 		sc->esc_LEDCTL = value & ~0x30303000;
1687 		break;
1688 	case E1000_PBA:
1689 		sc->esc_PBA = value & 0x0000FF80;
1690 		break;
1691 	case E1000_ICR:
1692 	case E1000_ITR:
1693 	case E1000_ICS:
1694 	case E1000_IMS:
1695 	case E1000_IMC:
1696 		e82545_intr_write(sc, offset, value);
1697 		break;
1698 	case E1000_RCTL:
1699 		e82545_rx_ctl(sc, value);
1700 		break;
1701 	case E1000_FCRTL:
1702 		sc->esc_FCRTL = value & ~0xFFFF0007;
1703 		break;
1704 	case E1000_FCRTH:
1705 		sc->esc_FCRTH = value & ~0xFFFF0007;
1706 		break;
1707 	case E1000_RDBAL(0):
1708 		sc->esc_RDBAL = value & ~0xF;
1709 		if (sc->esc_rx_enabled) {
1710 			/* Apparently legal: update cached address */
1711 			e82545_rx_update_rdba(sc);
1712 		}
1713 		break;
1714 	case E1000_RDBAH(0):
1715 		assert(!sc->esc_rx_enabled);
1716 		sc->esc_RDBAH = value;
1717 		break;
1718 	case E1000_RDLEN(0):
1719 		assert(!sc->esc_rx_enabled);
1720 		sc->esc_RDLEN = value & ~0xFFF0007F;
1721 		break;
1722 	case E1000_RDH(0):
1723 		/* XXX should only ever be zero ? Range check ? */
1724 		sc->esc_RDH = value;
1725 		break;
1726 	case E1000_RDT(0):
1727 		/* XXX if this opens up the rx ring, do something ? */
1728 		sc->esc_RDT = value;
1729 		break;
1730 	case E1000_RDTR:
1731 		/* ignore FPD bit 31 */
1732 		sc->esc_RDTR = value & ~0xFFFF0000;
1733 		break;
1734 	case E1000_RXDCTL(0):
1735 		sc->esc_RXDCTL = value & ~0xFEC0C0C0;
1736 		break;
1737 	case E1000_RADV:
1738 		sc->esc_RADV = value & ~0xFFFF0000;
1739 		break;
1740 	case E1000_RSRPD:
1741 		sc->esc_RSRPD = value & ~0xFFFFF000;
1742 		break;
1743 	case E1000_RXCSUM:
1744 		sc->esc_RXCSUM = value & ~0xFFFFF800;
1745 		break;
1746 	case E1000_TXCW:
1747 		sc->esc_TXCW = value & ~0x3FFF0000;
1748 		break;
1749 	case E1000_TCTL:
1750 		e82545_tx_ctl(sc, value);
1751 		break;
1752 	case E1000_TIPG:
1753 		sc->esc_TIPG = value;
1754 		break;
1755 	case E1000_AIT:
1756 		sc->esc_AIT = value;
1757 		break;
1758 	case E1000_TDBAL(0):
1759 		sc->esc_TDBAL = value & ~0xF;
1760 		if (sc->esc_tx_enabled)
1761 			e82545_tx_update_tdba(sc);
1762 		break;
1763 	case E1000_TDBAH(0):
1764 		sc->esc_TDBAH = value;
1765 		if (sc->esc_tx_enabled)
1766 			e82545_tx_update_tdba(sc);
1767 		break;
1768 	case E1000_TDLEN(0):
1769 		sc->esc_TDLEN = value & ~0xFFF0007F;
1770 		if (sc->esc_tx_enabled)
1771 			e82545_tx_update_tdba(sc);
1772 		break;
1773 	case E1000_TDH(0):
1774 		if (sc->esc_tx_enabled) {
1775 			WPRINTF("ignoring write to TDH while transmit enabled");
1776 			break;
1777 		}
1778 		if (value != 0) {
1779 			WPRINTF("ignoring non-zero value written to TDH");
1780 			break;
1781 		}
1782 		sc->esc_TDHr = sc->esc_TDH = value;
1783 		break;
1784 	case E1000_TDT(0):
1785 		sc->esc_TDT = value;
1786 		if (sc->esc_tx_enabled)
1787 			e82545_tx_start(sc);
1788 		break;
1789 	case E1000_TIDV:
1790 		sc->esc_TIDV = value & ~0xFFFF0000;
1791 		break;
1792 	case E1000_TXDCTL(0):
1793 		//assert(!sc->esc_tx_enabled);
1794 		sc->esc_TXDCTL = value & ~0xC0C0C0;
1795 		break;
1796 	case E1000_TADV:
1797 		sc->esc_TADV = value & ~0xFFFF0000;
1798 		break;
1799 	case E1000_RAL(0) ... E1000_RAH(15):
1800 		/* convert to u32 offset */
1801 		ridx = (offset - E1000_RAL(0)) >> 2;
1802 		e82545_write_ra(sc, ridx, value);
1803 		break;
1804 	case E1000_MTA ... (E1000_MTA + (127*4)):
1805 		sc->esc_fmcast[(offset - E1000_MTA) >> 2] = value;
1806 		break;
1807 	case E1000_VFTA ... (E1000_VFTA + (127*4)):
1808 		sc->esc_fvlan[(offset - E1000_VFTA) >> 2] = value;
1809 		break;
1810 	case E1000_EECD:
1811 	{
1812 		//DPRINTF("EECD write 0x%x -> 0x%x", sc->eeprom_control, value);
1813 		/* edge triggered low->high */
1814 		uint32_t eecd_strobe = ((sc->eeprom_control & E1000_EECD_SK) ?
1815 			0 : (value & E1000_EECD_SK));
1816 		uint32_t eecd_mask = (E1000_EECD_SK|E1000_EECD_CS|
1817 					E1000_EECD_DI|E1000_EECD_REQ);
1818 		sc->eeprom_control &= ~eecd_mask;
1819 		sc->eeprom_control |= (value & eecd_mask);
1820 		/* grant/revoke immediately */
1821 		if (value & E1000_EECD_REQ) {
1822 			sc->eeprom_control |= E1000_EECD_GNT;
1823 		} else {
1824                         sc->eeprom_control &= ~E1000_EECD_GNT;
1825 		}
1826 		if (eecd_strobe && (sc->eeprom_control & E1000_EECD_CS)) {
1827 			e82545_eecd_strobe(sc);
1828 		}
1829 		return;
1830 	}
1831 	case E1000_MDIC:
1832 	{
1833 		uint8_t reg_addr = (uint8_t)((value & E1000_MDIC_REG_MASK) >>
1834 						E1000_MDIC_REG_SHIFT);
1835 		uint8_t phy_addr = (uint8_t)((value & E1000_MDIC_PHY_MASK) >>
1836 						E1000_MDIC_PHY_SHIFT);
1837 		sc->mdi_control =
1838 			(value & ~(E1000_MDIC_ERROR|E1000_MDIC_DEST));
1839 		if ((value & E1000_MDIC_READY) != 0) {
1840 			DPRINTF("Incorrect MDIC ready bit: 0x%x", value);
1841 			return;
1842 		}
1843 		switch (value & E82545_MDIC_OP_MASK) {
1844 		case E1000_MDIC_OP_READ:
1845 			sc->mdi_control &= ~E82545_MDIC_DATA_MASK;
1846 			sc->mdi_control |= e82545_read_mdi(sc, reg_addr, phy_addr);
1847 			break;
1848 		case E1000_MDIC_OP_WRITE:
1849 			e82545_write_mdi(sc, reg_addr, phy_addr,
1850 				value & E82545_MDIC_DATA_MASK);
1851 			break;
1852 		default:
1853 			DPRINTF("Unknown MDIC op: 0x%x", value);
1854 			return;
1855 		}
1856 		/* TODO: barrier? */
1857 		sc->mdi_control |= E1000_MDIC_READY;
1858 		if (value & E82545_MDIC_IE) {
1859 			// TODO: generate interrupt
1860 		}
1861 		return;
1862 	}
1863 	case E1000_MANC:
1864 	case E1000_STATUS:
1865 		return;
1866 	default:
1867 		DPRINTF("Unknown write register: 0x%x value:%x", offset, value);
1868 		return;
1869 	}
1870 }
1871 
1872 static uint32_t
e82545_read_register(struct e82545_softc * sc,uint32_t offset)1873 e82545_read_register(struct e82545_softc *sc, uint32_t offset)
1874 {
1875 	uint32_t retval;
1876 	int ridx;
1877 
1878 	if (offset & 0x3) {
1879 		DPRINTF("Unaligned register read offset:0x%x", offset);
1880 		return 0;
1881 	}
1882 
1883 	DPRINTF("Register read: 0x%x", offset);
1884 
1885 	switch (offset) {
1886 	case E1000_CTRL:
1887 		retval = sc->esc_CTRL;
1888 		break;
1889 	case E1000_STATUS:
1890 		retval = E1000_STATUS_FD | E1000_STATUS_LU |
1891 		    E1000_STATUS_SPEED_1000;
1892 		break;
1893 	case E1000_FCAL:
1894 		retval = sc->esc_FCAL;
1895 		break;
1896 	case E1000_FCAH:
1897 		retval = sc->esc_FCAH;
1898 		break;
1899 	case E1000_FCT:
1900 		retval = sc->esc_FCT;
1901 		break;
1902 	case E1000_VET:
1903 		retval = sc->esc_VET;
1904 		break;
1905 	case E1000_FCTTV:
1906 		retval = sc->esc_FCTTV;
1907 		break;
1908 	case E1000_LEDCTL:
1909 		retval = sc->esc_LEDCTL;
1910 		break;
1911 	case E1000_PBA:
1912 		retval = sc->esc_PBA;
1913 		break;
1914 	case E1000_ICR:
1915 	case E1000_ITR:
1916 	case E1000_ICS:
1917 	case E1000_IMS:
1918 	case E1000_IMC:
1919 		retval = e82545_intr_read(sc, offset);
1920 		break;
1921 	case E1000_RCTL:
1922 		retval = sc->esc_RCTL;
1923 		break;
1924 	case E1000_FCRTL:
1925 		retval = sc->esc_FCRTL;
1926 		break;
1927 	case E1000_FCRTH:
1928 		retval = sc->esc_FCRTH;
1929 		break;
1930 	case E1000_RDBAL(0):
1931 		retval = sc->esc_RDBAL;
1932 		break;
1933 	case E1000_RDBAH(0):
1934 		retval = sc->esc_RDBAH;
1935 		break;
1936 	case E1000_RDLEN(0):
1937 		retval = sc->esc_RDLEN;
1938 		break;
1939 	case E1000_RDH(0):
1940 		retval = sc->esc_RDH;
1941 		break;
1942 	case E1000_RDT(0):
1943 		retval = sc->esc_RDT;
1944 		break;
1945 	case E1000_RDTR:
1946 		retval = sc->esc_RDTR;
1947 		break;
1948 	case E1000_RXDCTL(0):
1949 		retval = sc->esc_RXDCTL;
1950 		break;
1951 	case E1000_RADV:
1952 		retval = sc->esc_RADV;
1953 		break;
1954 	case E1000_RSRPD:
1955 		retval = sc->esc_RSRPD;
1956 		break;
1957 	case E1000_RXCSUM:
1958 		retval = sc->esc_RXCSUM;
1959 		break;
1960 	case E1000_TXCW:
1961 		retval = sc->esc_TXCW;
1962 		break;
1963 	case E1000_TCTL:
1964 		retval = sc->esc_TCTL;
1965 		break;
1966 	case E1000_TIPG:
1967 		retval = sc->esc_TIPG;
1968 		break;
1969 	case E1000_AIT:
1970 		retval = sc->esc_AIT;
1971 		break;
1972 	case E1000_TDBAL(0):
1973 		retval = sc->esc_TDBAL;
1974 		break;
1975 	case E1000_TDBAH(0):
1976 		retval = sc->esc_TDBAH;
1977 		break;
1978 	case E1000_TDLEN(0):
1979 		retval = sc->esc_TDLEN;
1980 		break;
1981 	case E1000_TDH(0):
1982 		retval = sc->esc_TDH;
1983 		break;
1984 	case E1000_TDT(0):
1985 		retval = sc->esc_TDT;
1986 		break;
1987 	case E1000_TIDV:
1988 		retval = sc->esc_TIDV;
1989 		break;
1990 	case E1000_TXDCTL(0):
1991 		retval = sc->esc_TXDCTL;
1992 		break;
1993 	case E1000_TADV:
1994 		retval = sc->esc_TADV;
1995 		break;
1996 	case E1000_RAL(0) ... E1000_RAH(15):
1997 		/* convert to u32 offset */
1998 		ridx = (offset - E1000_RAL(0)) >> 2;
1999 		retval = e82545_read_ra(sc, ridx);
2000 		break;
2001 	case E1000_MTA ... (E1000_MTA + (127*4)):
2002 		retval = sc->esc_fmcast[(offset - E1000_MTA) >> 2];
2003 		break;
2004 	case E1000_VFTA ... (E1000_VFTA + (127*4)):
2005 		retval = sc->esc_fvlan[(offset - E1000_VFTA) >> 2];
2006 		break;
2007 	case E1000_EECD:
2008 		//DPRINTF("EECD read %x", sc->eeprom_control);
2009 		retval = sc->eeprom_control;
2010 		break;
2011 	case E1000_MDIC:
2012 		retval = sc->mdi_control;
2013 		break;
2014 	case E1000_MANC:
2015 		retval = 0;
2016 		break;
2017 	/* stats that we emulate. */
2018 	case E1000_MPC:
2019 		retval = sc->missed_pkt_count;
2020 		break;
2021 	case E1000_PRC64:
2022 		retval = sc->pkt_rx_by_size[0];
2023 		break;
2024 	case E1000_PRC127:
2025 		retval = sc->pkt_rx_by_size[1];
2026 		break;
2027 	case E1000_PRC255:
2028 		retval = sc->pkt_rx_by_size[2];
2029 		break;
2030 	case E1000_PRC511:
2031 		retval = sc->pkt_rx_by_size[3];
2032 		break;
2033 	case E1000_PRC1023:
2034 		retval = sc->pkt_rx_by_size[4];
2035 		break;
2036 	case E1000_PRC1522:
2037 		retval = sc->pkt_rx_by_size[5];
2038 		break;
2039 	case E1000_GPRC:
2040 		retval = sc->good_pkt_rx_count;
2041 		break;
2042 	case E1000_BPRC:
2043 		retval = sc->bcast_pkt_rx_count;
2044 		break;
2045 	case E1000_MPRC:
2046 		retval = sc->mcast_pkt_rx_count;
2047 		break;
2048 	case E1000_GPTC:
2049 	case E1000_TPT:
2050 		retval = sc->good_pkt_tx_count;
2051 		break;
2052 	case E1000_GORCL:
2053 		retval = (uint32_t)sc->good_octets_rx;
2054 		break;
2055 	case E1000_GORCH:
2056 		retval = (uint32_t)(sc->good_octets_rx >> 32);
2057 		break;
2058 	case E1000_TOTL:
2059 	case E1000_GOTCL:
2060 		retval = (uint32_t)sc->good_octets_tx;
2061 		break;
2062 	case E1000_TOTH:
2063 	case E1000_GOTCH:
2064 		retval = (uint32_t)(sc->good_octets_tx >> 32);
2065 		break;
2066 	case E1000_ROC:
2067 		retval = sc->oversize_rx_count;
2068 		break;
2069 	case E1000_TORL:
2070 		retval = (uint32_t)(sc->good_octets_rx + sc->missed_octets);
2071 		break;
2072 	case E1000_TORH:
2073 		retval = (uint32_t)((sc->good_octets_rx +
2074 		    sc->missed_octets) >> 32);
2075 		break;
2076 	case E1000_TPR:
2077 		retval = sc->good_pkt_rx_count + sc->missed_pkt_count +
2078 		    sc->oversize_rx_count;
2079 		break;
2080 	case E1000_PTC64:
2081 		retval = sc->pkt_tx_by_size[0];
2082 		break;
2083 	case E1000_PTC127:
2084 		retval = sc->pkt_tx_by_size[1];
2085 		break;
2086 	case E1000_PTC255:
2087 		retval = sc->pkt_tx_by_size[2];
2088 		break;
2089 	case E1000_PTC511:
2090 		retval = sc->pkt_tx_by_size[3];
2091 		break;
2092 	case E1000_PTC1023:
2093 		retval = sc->pkt_tx_by_size[4];
2094 		break;
2095 	case E1000_PTC1522:
2096 		retval = sc->pkt_tx_by_size[5];
2097 		break;
2098 	case E1000_MPTC:
2099 		retval = sc->mcast_pkt_tx_count;
2100 		break;
2101 	case E1000_BPTC:
2102 		retval = sc->bcast_pkt_tx_count;
2103 		break;
2104 	case E1000_TSCTC:
2105 		retval = sc->tso_tx_count;
2106 		break;
2107 	/* stats that are always 0. */
2108 	case E1000_CRCERRS:
2109 	case E1000_ALGNERRC:
2110 	case E1000_SYMERRS:
2111 	case E1000_RXERRC:
2112 	case E1000_SCC:
2113 	case E1000_ECOL:
2114 	case E1000_MCC:
2115 	case E1000_LATECOL:
2116 	case E1000_COLC:
2117 	case E1000_DC:
2118 	case E1000_TNCRS:
2119 	case E1000_SEC:
2120 	case E1000_CEXTERR:
2121 	case E1000_RLEC:
2122 	case E1000_XONRXC:
2123 	case E1000_XONTXC:
2124 	case E1000_XOFFRXC:
2125 	case E1000_XOFFTXC:
2126 	case E1000_FCRUC:
2127 	case E1000_RNBC:
2128 	case E1000_RUC:
2129 	case E1000_RFC:
2130 	case E1000_RJC:
2131 	case E1000_MGTPRC:
2132 	case E1000_MGTPDC:
2133 	case E1000_MGTPTC:
2134 	case E1000_TSCTFC:
2135 		retval = 0;
2136 		break;
2137 	default:
2138 		DPRINTF("Unknown read register: 0x%x", offset);
2139 		retval = 0;
2140 		break;
2141 	}
2142 
2143 	return (retval);
2144 }
2145 
2146 static void
e82545_write(struct pci_devinst * pi,int baridx,uint64_t offset,int size,uint64_t value)2147 e82545_write(struct pci_devinst *pi, int baridx, uint64_t offset, int size,
2148     uint64_t value)
2149 {
2150 	struct e82545_softc *sc;
2151 
2152 	//DPRINTF("Write bar:%d offset:0x%lx value:0x%lx size:%d", baridx, offset, value, size);
2153 
2154 	sc = pi->pi_arg;
2155 
2156 	pthread_mutex_lock(&sc->esc_mtx);
2157 
2158 	switch (baridx) {
2159 	case E82545_BAR_IO:
2160 		switch (offset) {
2161 		case E82545_IOADDR:
2162 			if (size != 4) {
2163 				DPRINTF("Wrong io addr write sz:%d value:0x%lx", size, value);
2164 			} else
2165 				sc->io_addr = (uint32_t)value;
2166 			break;
2167 		case E82545_IODATA:
2168 			if (size != 4) {
2169 				DPRINTF("Wrong io data write size:%d value:0x%lx", size, value);
2170 			} else if (sc->io_addr > E82545_IO_REGISTER_MAX) {
2171 				DPRINTF("Non-register io write addr:0x%x value:0x%lx", sc->io_addr, value);
2172 			} else
2173 				e82545_write_register(sc, sc->io_addr,
2174 						      (uint32_t)value);
2175 			break;
2176 		default:
2177 			DPRINTF("Unknown io bar write offset:0x%lx value:0x%lx size:%d", offset, value, size);
2178 			break;
2179 		}
2180 		break;
2181 	case E82545_BAR_REGISTER:
2182 		if (size != 4) {
2183 			DPRINTF("Wrong register write size:%d offset:0x%lx value:0x%lx", size, offset, value);
2184 		} else
2185 			e82545_write_register(sc, (uint32_t)offset,
2186 					      (uint32_t)value);
2187 		break;
2188 	default:
2189 		DPRINTF("Unknown write bar:%d off:0x%lx val:0x%lx size:%d",
2190 			baridx, offset, value, size);
2191 	}
2192 
2193 	pthread_mutex_unlock(&sc->esc_mtx);
2194 }
2195 
2196 static uint64_t
e82545_read(struct pci_devinst * pi,int baridx,uint64_t offset,int size)2197 e82545_read(struct pci_devinst *pi, int baridx, uint64_t offset, int size)
2198 {
2199 	struct e82545_softc *sc;
2200 	uint64_t retval;
2201 
2202 	//DPRINTF("Read  bar:%d offset:0x%lx size:%d", baridx, offset, size);
2203 	sc = pi->pi_arg;
2204 	retval = 0;
2205 
2206 	pthread_mutex_lock(&sc->esc_mtx);
2207 
2208 	switch (baridx) {
2209 	case E82545_BAR_IO:
2210 		switch (offset) {
2211 		case E82545_IOADDR:
2212 			if (size != 4) {
2213 				DPRINTF("Wrong io addr read sz:%d", size);
2214 			} else
2215 				retval = sc->io_addr;
2216 			break;
2217 		case E82545_IODATA:
2218 			if (size != 4) {
2219 				DPRINTF("Wrong io data read sz:%d", size);
2220 			}
2221 			if (sc->io_addr > E82545_IO_REGISTER_MAX) {
2222 				DPRINTF("Non-register io read addr:0x%x",
2223 					sc->io_addr);
2224 			} else
2225 				retval = e82545_read_register(sc, sc->io_addr);
2226 			break;
2227 		default:
2228 			DPRINTF("Unknown io bar read offset:0x%lx size:%d",
2229 				offset, size);
2230 			break;
2231 		}
2232 		break;
2233 	case E82545_BAR_REGISTER:
2234 		if (size != 4) {
2235 			DPRINTF("Wrong register read size:%d offset:0x%lx",
2236 				size, offset);
2237 		} else
2238 			retval = e82545_read_register(sc, (uint32_t)offset);
2239 		break;
2240 	default:
2241 		DPRINTF("Unknown read bar:%d offset:0x%lx size:%d",
2242 			baridx, offset, size);
2243 		break;
2244 	}
2245 
2246 	pthread_mutex_unlock(&sc->esc_mtx);
2247 
2248 	return (retval);
2249 }
2250 
2251 static void
e82545_reset(struct e82545_softc * sc,int drvr)2252 e82545_reset(struct e82545_softc *sc, int drvr)
2253 {
2254 	int i;
2255 
2256 	e82545_rx_disable(sc);
2257 	e82545_tx_disable(sc);
2258 
2259 	/* clear outstanding interrupts */
2260 	if (sc->esc_irq_asserted)
2261 		pci_lintr_deassert(sc->esc_pi);
2262 
2263 	/* misc */
2264 	if (!drvr) {
2265 		sc->esc_FCAL = 0;
2266 		sc->esc_FCAH = 0;
2267 		sc->esc_FCT = 0;
2268 		sc->esc_VET = 0;
2269 		sc->esc_FCTTV = 0;
2270 	}
2271 	sc->esc_LEDCTL = 0x07061302;
2272 	sc->esc_PBA = 0x00100030;
2273 
2274 	/* start nvm in opcode mode. */
2275 	sc->nvm_opaddr = 0;
2276 	sc->nvm_mode = E82545_NVM_MODE_OPADDR;
2277 	sc->nvm_bits = E82545_NVM_OPADDR_BITS;
2278 	sc->eeprom_control = E1000_EECD_PRES | E82545_EECD_FWE_EN;
2279 	e82545_init_eeprom(sc);
2280 
2281 	/* interrupt */
2282 	sc->esc_ICR = 0;
2283 	sc->esc_ITR = 250;
2284 	sc->esc_ICS = 0;
2285 	sc->esc_IMS = 0;
2286 	sc->esc_IMC = 0;
2287 
2288 	/* L2 filters */
2289 	if (!drvr) {
2290 		memset(sc->esc_fvlan, 0, sizeof(sc->esc_fvlan));
2291 		memset(sc->esc_fmcast, 0, sizeof(sc->esc_fmcast));
2292 		memset(sc->esc_uni, 0, sizeof(sc->esc_uni));
2293 
2294 		/* XXX not necessary on 82545 ?? */
2295 		sc->esc_uni[0].eu_valid = 1;
2296 		memcpy(sc->esc_uni[0].eu_eth.octet, sc->esc_mac.octet,
2297 		    ETHER_ADDR_LEN);
2298 	} else {
2299 		/* Clear RAH valid bits */
2300 		for (i = 0; i < 16; i++)
2301 			sc->esc_uni[i].eu_valid = 0;
2302 	}
2303 
2304 	/* receive */
2305 	if (!drvr) {
2306 		sc->esc_RDBAL = 0;
2307 		sc->esc_RDBAH = 0;
2308 	}
2309 	sc->esc_RCTL = 0;
2310 	sc->esc_FCRTL = 0;
2311 	sc->esc_FCRTH = 0;
2312 	sc->esc_RDLEN = 0;
2313 	sc->esc_RDH = 0;
2314 	sc->esc_RDT = 0;
2315 	sc->esc_RDTR = 0;
2316 	sc->esc_RXDCTL = (1 << 24) | (1 << 16); /* default GRAN/WTHRESH */
2317 	sc->esc_RADV = 0;
2318 	sc->esc_RXCSUM = 0;
2319 
2320 	/* transmit */
2321 	if (!drvr) {
2322 		sc->esc_TDBAL = 0;
2323 		sc->esc_TDBAH = 0;
2324 		sc->esc_TIPG = 0;
2325 		sc->esc_AIT = 0;
2326 		sc->esc_TIDV = 0;
2327 		sc->esc_TADV = 0;
2328 	}
2329 	sc->esc_tdba = 0;
2330 	sc->esc_txdesc = NULL;
2331 	sc->esc_TXCW = 0;
2332 	sc->esc_TCTL = 0;
2333 	sc->esc_TDLEN = 0;
2334 	sc->esc_TDT = 0;
2335 	sc->esc_TDHr = sc->esc_TDH = 0;
2336 	sc->esc_TXDCTL = 0;
2337 }
2338 
2339 static int
e82545_init(struct pci_devinst * pi,nvlist_t * nvl)2340 e82545_init(struct pci_devinst *pi, nvlist_t *nvl)
2341 {
2342 	char nstr[80];
2343 	struct e82545_softc *sc;
2344 	const char *mac;
2345 	int err;
2346 
2347 	/* Setup our softc */
2348 	sc = calloc(1, sizeof(*sc));
2349 
2350 	pi->pi_arg = sc;
2351 	sc->esc_pi = pi;
2352 	sc->esc_ctx = pi->pi_vmctx;
2353 
2354 	pthread_mutex_init(&sc->esc_mtx, NULL);
2355 	pthread_cond_init(&sc->esc_rx_cond, NULL);
2356 	pthread_cond_init(&sc->esc_tx_cond, NULL);
2357 	pthread_create(&sc->esc_tx_tid, NULL, e82545_tx_thread, sc);
2358 	snprintf(nstr, sizeof(nstr), "e82545-%d:%d tx", pi->pi_slot,
2359 	    pi->pi_func);
2360         pthread_set_name_np(sc->esc_tx_tid, nstr);
2361 
2362 	pci_set_cfgdata16(pi, PCIR_DEVICE, E82545_DEV_ID_82545EM_COPPER);
2363 	pci_set_cfgdata16(pi, PCIR_VENDOR, E82545_VENDOR_ID_INTEL);
2364 	pci_set_cfgdata8(pi,  PCIR_CLASS, PCIC_NETWORK);
2365 	pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_NETWORK_ETHERNET);
2366 	pci_set_cfgdata16(pi, PCIR_SUBDEV_0, E82545_SUBDEV_ID);
2367 	pci_set_cfgdata16(pi, PCIR_SUBVEND_0, E82545_VENDOR_ID_INTEL);
2368 
2369 	pci_set_cfgdata8(pi,  PCIR_HDRTYPE, PCIM_HDRTYPE_NORMAL);
2370 	pci_set_cfgdata8(pi,  PCIR_INTPIN, 0x1);
2371 
2372 	/* TODO: this card also supports msi, but the freebsd driver for it
2373 	 * does not, so I have not implemented it. */
2374 	pci_lintr_request(pi);
2375 
2376 	pci_emul_alloc_bar(pi, E82545_BAR_REGISTER, PCIBAR_MEM32,
2377 		E82545_BAR_REGISTER_LEN);
2378 	pci_emul_alloc_bar(pi, E82545_BAR_FLASH, PCIBAR_MEM32,
2379 		E82545_BAR_FLASH_LEN);
2380 	pci_emul_alloc_bar(pi, E82545_BAR_IO, PCIBAR_IO,
2381 		E82545_BAR_IO_LEN);
2382 
2383 	mac = get_config_value_node(nvl, "mac");
2384 	if (mac != NULL) {
2385 		err = net_parsemac(mac, sc->esc_mac.octet);
2386 		if (err) {
2387 			free(sc);
2388 			return (err);
2389 		}
2390 	} else
2391 		net_genmac(pi, sc->esc_mac.octet);
2392 
2393 	err = netbe_init(&sc->esc_be, nvl, e82545_rx_callback, sc);
2394 	if (err) {
2395 		free(sc);
2396 		return (err);
2397 	}
2398 
2399 #ifndef __FreeBSD__
2400 	size_t buflen = sizeof (sc->esc_mac.octet);
2401 
2402 	err = netbe_get_mac(sc->esc_be, sc->esc_mac.octet, &buflen);
2403 	if (err != 0) {
2404 		free(sc);
2405 		return (err);
2406 	}
2407 #endif
2408 
2409 	netbe_rx_enable(sc->esc_be);
2410 
2411 	/* H/w initiated reset */
2412 	e82545_reset(sc, 0);
2413 
2414 	return (0);
2415 }
2416 
2417 static const struct pci_devemu pci_de_e82545 = {
2418 	.pe_emu = 	"e1000",
2419 	.pe_init =	e82545_init,
2420 	.pe_legacy_config = netbe_legacy_config,
2421 	.pe_barwrite =	e82545_write,
2422 	.pe_barread =	e82545_read,
2423 };
2424 PCI_EMUL_SET(pci_de_e82545);
2425