xref: /freebsd/sys/dev/ti/if_ti.c (revision 6472ac3d8a86336899b6cfb789a4cd9897e3fab5)
1 /*-
2  * Copyright (c) 1997, 1998, 1999
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Alteon Networks Tigon PCI gigabit ethernet driver for FreeBSD.
35  * Manuals, sample driver and firmware source kits are available
36  * from http://www.alteon.com/support/openkits.
37  *
38  * Written by Bill Paul <wpaul@ctr.columbia.edu>
39  * Electrical Engineering Department
40  * Columbia University, New York City
41  */
42 
43 /*
44  * The Alteon Networks Tigon chip contains an embedded R4000 CPU,
45  * gigabit MAC, dual DMA channels and a PCI interface unit. NICs
46  * using the Tigon may have anywhere from 512K to 2MB of SRAM. The
47  * Tigon supports hardware IP, TCP and UCP checksumming, multicast
48  * filtering and jumbo (9014 byte) frames. The hardware is largely
49  * controlled by firmware, which must be loaded into the NIC during
50  * initialization.
51  *
52  * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware
53  * revision, which supports new features such as extended commands,
54  * extended jumbo receive ring desciptors and a mini receive ring.
55  *
56  * Alteon Networks is to be commended for releasing such a vast amount
57  * of development material for the Tigon NIC without requiring an NDA
58  * (although they really should have done it a long time ago). With
59  * any luck, the other vendors will finally wise up and follow Alteon's
60  * stellar example.
61  *
62  * The firmware for the Tigon 1 and 2 NICs is compiled directly into
63  * this driver by #including it as a C header file. This bloats the
64  * driver somewhat, but it's the easiest method considering that the
65  * driver code and firmware code need to be kept in sync. The source
66  * for the firmware is not provided with the FreeBSD distribution since
67  * compiling it requires a GNU toolchain targeted for mips-sgi-irix5.3.
68  *
69  * The following people deserve special thanks:
70  * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board
71  *   for testing
72  * - Raymond Lee of Netgear, for providing a pair of Netgear
73  *   GA620 Tigon 2 boards for testing
74  * - Ulf Zimmermann, for bringing the GA260 to my attention and
75  *   convincing me to write this driver.
76  * - Andrew Gallatin for providing FreeBSD/Alpha support.
77  */
78 
79 #include <sys/cdefs.h>
80 __FBSDID("$FreeBSD$");
81 
82 #include "opt_ti.h"
83 
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/sockio.h>
87 #include <sys/mbuf.h>
88 #include <sys/malloc.h>
89 #include <sys/kernel.h>
90 #include <sys/module.h>
91 #include <sys/socket.h>
92 #include <sys/queue.h>
93 #include <sys/conf.h>
94 #include <sys/sf_buf.h>
95 
96 #include <net/if.h>
97 #include <net/if_arp.h>
98 #include <net/ethernet.h>
99 #include <net/if_dl.h>
100 #include <net/if_media.h>
101 #include <net/if_types.h>
102 #include <net/if_vlan_var.h>
103 
104 #include <net/bpf.h>
105 
106 #include <netinet/in_systm.h>
107 #include <netinet/in.h>
108 #include <netinet/ip.h>
109 
110 #include <machine/bus.h>
111 #include <machine/resource.h>
112 #include <sys/bus.h>
113 #include <sys/rman.h>
114 
115 /* #define TI_PRIVATE_JUMBOS */
116 #ifndef TI_PRIVATE_JUMBOS
117 #include <vm/vm.h>
118 #include <vm/vm_page.h>
119 #endif
120 
121 #include <dev/pci/pcireg.h>
122 #include <dev/pci/pcivar.h>
123 
124 #include <sys/tiio.h>
125 #include <dev/ti/if_tireg.h>
126 #include <dev/ti/ti_fw.h>
127 #include <dev/ti/ti_fw2.h>
128 
129 #define TI_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS)
130 /*
131  * We can only turn on header splitting if we're using extended receive
132  * BDs.
133  */
134 #if defined(TI_JUMBO_HDRSPLIT) && defined(TI_PRIVATE_JUMBOS)
135 #error "options TI_JUMBO_HDRSPLIT and TI_PRIVATE_JUMBOS are mutually exclusive"
136 #endif /* TI_JUMBO_HDRSPLIT && TI_JUMBO_HDRSPLIT */
137 
138 typedef enum {
139 	TI_SWAP_HTON,
140 	TI_SWAP_NTOH
141 } ti_swap_type;
142 
143 /*
144  * Various supported device vendors/types and their names.
145  */
146 
147 static const struct ti_type const ti_devs[] = {
148 	{ ALT_VENDORID,	ALT_DEVICEID_ACENIC,
149 		"Alteon AceNIC 1000baseSX Gigabit Ethernet" },
150 	{ ALT_VENDORID,	ALT_DEVICEID_ACENIC_COPPER,
151 		"Alteon AceNIC 1000baseT Gigabit Ethernet" },
152 	{ TC_VENDORID,	TC_DEVICEID_3C985,
153 		"3Com 3c985-SX Gigabit Ethernet" },
154 	{ NG_VENDORID, NG_DEVICEID_GA620,
155 		"Netgear GA620 1000baseSX Gigabit Ethernet" },
156 	{ NG_VENDORID, NG_DEVICEID_GA620T,
157 		"Netgear GA620 1000baseT Gigabit Ethernet" },
158 	{ SGI_VENDORID, SGI_DEVICEID_TIGON,
159 		"Silicon Graphics Gigabit Ethernet" },
160 	{ DEC_VENDORID, DEC_DEVICEID_FARALLON_PN9000SX,
161 		"Farallon PN9000SX Gigabit Ethernet" },
162 	{ 0, 0, NULL }
163 };
164 
165 
166 static	d_open_t	ti_open;
167 static	d_close_t	ti_close;
168 static	d_ioctl_t	ti_ioctl2;
169 
170 static struct cdevsw ti_cdevsw = {
171 	.d_version =	D_VERSION,
172 	.d_flags =	0,
173 	.d_open =	ti_open,
174 	.d_close =	ti_close,
175 	.d_ioctl =	ti_ioctl2,
176 	.d_name =	"ti",
177 };
178 
179 static int ti_probe(device_t);
180 static int ti_attach(device_t);
181 static int ti_detach(device_t);
182 static void ti_txeof(struct ti_softc *);
183 static void ti_rxeof(struct ti_softc *);
184 
185 static void ti_stats_update(struct ti_softc *);
186 static int ti_encap(struct ti_softc *, struct mbuf **);
187 
188 static void ti_intr(void *);
189 static void ti_start(struct ifnet *);
190 static void ti_start_locked(struct ifnet *);
191 static int ti_ioctl(struct ifnet *, u_long, caddr_t);
192 static void ti_init(void *);
193 static void ti_init_locked(void *);
194 static void ti_init2(struct ti_softc *);
195 static void ti_stop(struct ti_softc *);
196 static void ti_watchdog(void *);
197 static int ti_shutdown(device_t);
198 static int ti_ifmedia_upd(struct ifnet *);
199 static int ti_ifmedia_upd_locked(struct ti_softc *);
200 static void ti_ifmedia_sts(struct ifnet *, struct ifmediareq *);
201 
202 static uint32_t ti_eeprom_putbyte(struct ti_softc *, int);
203 static uint8_t	ti_eeprom_getbyte(struct ti_softc *, int, uint8_t *);
204 static int ti_read_eeprom(struct ti_softc *, caddr_t, int, int);
205 
206 static void ti_add_mcast(struct ti_softc *, struct ether_addr *);
207 static void ti_del_mcast(struct ti_softc *, struct ether_addr *);
208 static void ti_setmulti(struct ti_softc *);
209 
210 static void ti_mem_read(struct ti_softc *, uint32_t, uint32_t, void *);
211 static void ti_mem_write(struct ti_softc *, uint32_t, uint32_t, void *);
212 static void ti_mem_zero(struct ti_softc *, uint32_t, uint32_t);
213 static int ti_copy_mem(struct ti_softc *, uint32_t, uint32_t, caddr_t, int,
214     int);
215 static int ti_copy_scratch(struct ti_softc *, uint32_t, uint32_t, caddr_t,
216     int, int, int);
217 static int ti_bcopy_swap(const void *, void *, size_t, ti_swap_type);
218 static void ti_loadfw(struct ti_softc *);
219 static void ti_cmd(struct ti_softc *, struct ti_cmd_desc *);
220 static void ti_cmd_ext(struct ti_softc *, struct ti_cmd_desc *, caddr_t, int);
221 static void ti_handle_events(struct ti_softc *);
222 static int ti_alloc_dmamaps(struct ti_softc *);
223 static void ti_free_dmamaps(struct ti_softc *);
224 static int ti_alloc_jumbo_mem(struct ti_softc *);
225 #ifdef TI_PRIVATE_JUMBOS
226 static void *ti_jalloc(struct ti_softc *);
227 static void ti_jfree(void *, void *);
228 #endif /* TI_PRIVATE_JUMBOS */
229 static int ti_newbuf_std(struct ti_softc *, int, struct mbuf *);
230 static int ti_newbuf_mini(struct ti_softc *, int, struct mbuf *);
231 static int ti_newbuf_jumbo(struct ti_softc *, int, struct mbuf *);
232 static int ti_init_rx_ring_std(struct ti_softc *);
233 static void ti_free_rx_ring_std(struct ti_softc *);
234 static int ti_init_rx_ring_jumbo(struct ti_softc *);
235 static void ti_free_rx_ring_jumbo(struct ti_softc *);
236 static int ti_init_rx_ring_mini(struct ti_softc *);
237 static void ti_free_rx_ring_mini(struct ti_softc *);
238 static void ti_free_tx_ring(struct ti_softc *);
239 static int ti_init_tx_ring(struct ti_softc *);
240 
241 static int ti_64bitslot_war(struct ti_softc *);
242 static int ti_chipinit(struct ti_softc *);
243 static int ti_gibinit(struct ti_softc *);
244 
245 #ifdef TI_JUMBO_HDRSPLIT
246 static __inline void ti_hdr_split(struct mbuf *top, int hdr_len, int pkt_len,
247     int idx);
248 #endif /* TI_JUMBO_HDRSPLIT */
249 
250 static device_method_t ti_methods[] = {
251 	/* Device interface */
252 	DEVMETHOD(device_probe,		ti_probe),
253 	DEVMETHOD(device_attach,	ti_attach),
254 	DEVMETHOD(device_detach,	ti_detach),
255 	DEVMETHOD(device_shutdown,	ti_shutdown),
256 	{ 0, 0 }
257 };
258 
259 static driver_t ti_driver = {
260 	"ti",
261 	ti_methods,
262 	sizeof(struct ti_softc)
263 };
264 
265 static devclass_t ti_devclass;
266 
267 DRIVER_MODULE(ti, pci, ti_driver, ti_devclass, 0, 0);
268 MODULE_DEPEND(ti, pci, 1, 1, 1);
269 MODULE_DEPEND(ti, ether, 1, 1, 1);
270 
271 /*
272  * Send an instruction or address to the EEPROM, check for ACK.
273  */
274 static uint32_t
275 ti_eeprom_putbyte(struct ti_softc *sc, int byte)
276 {
277 	int i, ack = 0;
278 
279 	/*
280 	 * Make sure we're in TX mode.
281 	 */
282 	TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
283 
284 	/*
285 	 * Feed in each bit and stobe the clock.
286 	 */
287 	for (i = 0x80; i; i >>= 1) {
288 		if (byte & i) {
289 			TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT);
290 		} else {
291 			TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT);
292 		}
293 		DELAY(1);
294 		TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
295 		DELAY(1);
296 		TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
297 	}
298 
299 	/*
300 	 * Turn off TX mode.
301 	 */
302 	TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
303 
304 	/*
305 	 * Check for ack.
306 	 */
307 	TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
308 	ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN;
309 	TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
310 
311 	return (ack);
312 }
313 
314 /*
315  * Read a byte of data stored in the EEPROM at address 'addr.'
316  * We have to send two address bytes since the EEPROM can hold
317  * more than 256 bytes of data.
318  */
319 static uint8_t
320 ti_eeprom_getbyte(struct ti_softc *sc, int addr, uint8_t *dest)
321 {
322 	int i;
323 	uint8_t byte = 0;
324 
325 	EEPROM_START;
326 
327 	/*
328 	 * Send write control code to EEPROM.
329 	 */
330 	if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
331 		device_printf(sc->ti_dev,
332 		    "failed to send write command, status: %x\n",
333 		    CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
334 		return (1);
335 	}
336 
337 	/*
338 	 * Send first byte of address of byte we want to read.
339 	 */
340 	if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) {
341 		device_printf(sc->ti_dev, "failed to send address, status: %x\n",
342 		    CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
343 		return (1);
344 	}
345 	/*
346 	 * Send second byte address of byte we want to read.
347 	 */
348 	if (ti_eeprom_putbyte(sc, addr & 0xFF)) {
349 		device_printf(sc->ti_dev, "failed to send address, status: %x\n",
350 		    CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
351 		return (1);
352 	}
353 
354 	EEPROM_STOP;
355 	EEPROM_START;
356 	/*
357 	 * Send read control code to EEPROM.
358 	 */
359 	if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
360 		device_printf(sc->ti_dev,
361 		    "failed to send read command, status: %x\n",
362 		    CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
363 		return (1);
364 	}
365 
366 	/*
367 	 * Start reading bits from EEPROM.
368 	 */
369 	TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
370 	for (i = 0x80; i; i >>= 1) {
371 		TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
372 		DELAY(1);
373 		if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN)
374 			byte |= i;
375 		TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
376 		DELAY(1);
377 	}
378 
379 	EEPROM_STOP;
380 
381 	/*
382 	 * No ACK generated for read, so just return byte.
383 	 */
384 
385 	*dest = byte;
386 
387 	return (0);
388 }
389 
390 /*
391  * Read a sequence of bytes from the EEPROM.
392  */
393 static int
394 ti_read_eeprom(struct ti_softc *sc, caddr_t dest, int off, int cnt)
395 {
396 	int err = 0, i;
397 	uint8_t byte = 0;
398 
399 	for (i = 0; i < cnt; i++) {
400 		err = ti_eeprom_getbyte(sc, off + i, &byte);
401 		if (err)
402 			break;
403 		*(dest + i) = byte;
404 	}
405 
406 	return (err ? 1 : 0);
407 }
408 
409 /*
410  * NIC memory read function.
411  * Can be used to copy data from NIC local memory.
412  */
413 static void
414 ti_mem_read(struct ti_softc *sc, uint32_t addr, uint32_t len, void *buf)
415 {
416 	int segptr, segsize, cnt;
417 	char *ptr;
418 
419 	segptr = addr;
420 	cnt = len;
421 	ptr = buf;
422 
423 	while (cnt) {
424 		if (cnt < TI_WINLEN)
425 			segsize = cnt;
426 		else
427 			segsize = TI_WINLEN - (segptr % TI_WINLEN);
428 		CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
429 		bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
430 		    TI_WINDOW + (segptr & (TI_WINLEN - 1)), (uint32_t *)ptr,
431 		    segsize / 4);
432 		ptr += segsize;
433 		segptr += segsize;
434 		cnt -= segsize;
435 	}
436 }
437 
438 
439 /*
440  * NIC memory write function.
441  * Can be used to copy data into NIC local memory.
442  */
443 static void
444 ti_mem_write(struct ti_softc *sc, uint32_t addr, uint32_t len, void *buf)
445 {
446 	int segptr, segsize, cnt;
447 	char *ptr;
448 
449 	segptr = addr;
450 	cnt = len;
451 	ptr = buf;
452 
453 	while (cnt) {
454 		if (cnt < TI_WINLEN)
455 			segsize = cnt;
456 		else
457 			segsize = TI_WINLEN - (segptr % TI_WINLEN);
458 		CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
459 		bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
460 		    TI_WINDOW + (segptr & (TI_WINLEN - 1)), (uint32_t *)ptr,
461 		    segsize / 4);
462 		ptr += segsize;
463 		segptr += segsize;
464 		cnt -= segsize;
465 	}
466 }
467 
468 /*
469  * NIC memory read function.
470  * Can be used to clear a section of NIC local memory.
471  */
472 static void
473 ti_mem_zero(struct ti_softc *sc, uint32_t addr, uint32_t len)
474 {
475 	int segptr, segsize, cnt;
476 
477 	segptr = addr;
478 	cnt = len;
479 
480 	while (cnt) {
481 		if (cnt < TI_WINLEN)
482 			segsize = cnt;
483 		else
484 			segsize = TI_WINLEN - (segptr % TI_WINLEN);
485 		CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
486 		bus_space_set_region_4(sc->ti_btag, sc->ti_bhandle,
487 		    TI_WINDOW + (segptr & (TI_WINLEN - 1)), 0, segsize / 4);
488 		segptr += segsize;
489 		cnt -= segsize;
490 	}
491 }
492 
493 static int
494 ti_copy_mem(struct ti_softc *sc, uint32_t tigon_addr, uint32_t len,
495     caddr_t buf, int useraddr, int readdata)
496 {
497 	int segptr, segsize, cnt;
498 	caddr_t ptr;
499 	uint32_t origwin;
500 	uint8_t tmparray[TI_WINLEN], tmparray2[TI_WINLEN];
501 	int resid, segresid;
502 	int first_pass;
503 
504 	TI_LOCK_ASSERT(sc);
505 
506 	/*
507 	 * At the moment, we don't handle non-aligned cases, we just bail.
508 	 * If this proves to be a problem, it will be fixed.
509 	 */
510 	if ((readdata == 0)
511 	 && (tigon_addr & 0x3)) {
512 		device_printf(sc->ti_dev, "%s: tigon address %#x isn't "
513 		    "word-aligned\n", __func__, tigon_addr);
514 		device_printf(sc->ti_dev, "%s: unaligned writes aren't "
515 		    "yet supported\n", __func__);
516 		return (EINVAL);
517 	}
518 
519 	segptr = tigon_addr & ~0x3;
520 	segresid = tigon_addr - segptr;
521 
522 	/*
523 	 * This is the non-aligned amount left over that we'll need to
524 	 * copy.
525 	 */
526 	resid = len & 0x3;
527 
528 	/* Add in the left over amount at the front of the buffer */
529 	resid += segresid;
530 
531 	cnt = len & ~0x3;
532 	/*
533 	 * If resid + segresid is >= 4, add multiples of 4 to the count and
534 	 * decrease the residual by that much.
535 	 */
536 	cnt += resid & ~0x3;
537 	resid -= resid & ~0x3;
538 
539 	ptr = buf;
540 
541 	first_pass = 1;
542 
543 	/*
544 	 * Save the old window base value.
545 	 */
546 	origwin = CSR_READ_4(sc, TI_WINBASE);
547 
548 	while (cnt) {
549 		bus_size_t ti_offset;
550 
551 		if (cnt < TI_WINLEN)
552 			segsize = cnt;
553 		else
554 			segsize = TI_WINLEN - (segptr % TI_WINLEN);
555 		CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
556 
557 		ti_offset = TI_WINDOW + (segptr & (TI_WINLEN -1));
558 
559 		if (readdata) {
560 
561 			bus_space_read_region_4(sc->ti_btag,
562 						sc->ti_bhandle, ti_offset,
563 						(uint32_t *)tmparray,
564 						segsize >> 2);
565 			if (useraddr) {
566 				/*
567 				 * Yeah, this is a little on the kludgy
568 				 * side, but at least this code is only
569 				 * used for debugging.
570 				 */
571 				ti_bcopy_swap(tmparray, tmparray2, segsize,
572 					      TI_SWAP_NTOH);
573 
574 				TI_UNLOCK(sc);
575 				if (first_pass) {
576 					copyout(&tmparray2[segresid], ptr,
577 						segsize - segresid);
578 					first_pass = 0;
579 				} else
580 					copyout(tmparray2, ptr, segsize);
581 				TI_LOCK(sc);
582 			} else {
583 				if (first_pass) {
584 
585 					ti_bcopy_swap(tmparray, tmparray2,
586 						      segsize, TI_SWAP_NTOH);
587 					TI_UNLOCK(sc);
588 					bcopy(&tmparray2[segresid], ptr,
589 					      segsize - segresid);
590 					TI_LOCK(sc);
591 					first_pass = 0;
592 				} else
593 					ti_bcopy_swap(tmparray, ptr, segsize,
594 						      TI_SWAP_NTOH);
595 			}
596 
597 		} else {
598 			if (useraddr) {
599 				TI_UNLOCK(sc);
600 				copyin(ptr, tmparray2, segsize);
601 				TI_LOCK(sc);
602 				ti_bcopy_swap(tmparray2, tmparray, segsize,
603 					      TI_SWAP_HTON);
604 			} else
605 				ti_bcopy_swap(ptr, tmparray, segsize,
606 					      TI_SWAP_HTON);
607 
608 			bus_space_write_region_4(sc->ti_btag,
609 						 sc->ti_bhandle, ti_offset,
610 						 (uint32_t *)tmparray,
611 						 segsize >> 2);
612 		}
613 		segptr += segsize;
614 		ptr += segsize;
615 		cnt -= segsize;
616 	}
617 
618 	/*
619 	 * Handle leftover, non-word-aligned bytes.
620 	 */
621 	if (resid != 0) {
622 		uint32_t	tmpval, tmpval2;
623 		bus_size_t	ti_offset;
624 
625 		/*
626 		 * Set the segment pointer.
627 		 */
628 		CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
629 
630 		ti_offset = TI_WINDOW + (segptr & (TI_WINLEN - 1));
631 
632 		/*
633 		 * First, grab whatever is in our source/destination.
634 		 * We'll obviously need this for reads, but also for
635 		 * writes, since we'll be doing read/modify/write.
636 		 */
637 		bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
638 					ti_offset, &tmpval, 1);
639 
640 		/*
641 		 * Next, translate this from little-endian to big-endian
642 		 * (at least on i386 boxes).
643 		 */
644 		tmpval2 = ntohl(tmpval);
645 
646 		if (readdata) {
647 			/*
648 			 * If we're reading, just copy the leftover number
649 			 * of bytes from the host byte order buffer to
650 			 * the user's buffer.
651 			 */
652 			if (useraddr) {
653 				TI_UNLOCK(sc);
654 				copyout(&tmpval2, ptr, resid);
655 				TI_LOCK(sc);
656 			} else
657 				bcopy(&tmpval2, ptr, resid);
658 		} else {
659 			/*
660 			 * If we're writing, first copy the bytes to be
661 			 * written into the network byte order buffer,
662 			 * leaving the rest of the buffer with whatever was
663 			 * originally in there.  Then, swap the bytes
664 			 * around into host order and write them out.
665 			 *
666 			 * XXX KDM the read side of this has been verified
667 			 * to work, but the write side of it has not been
668 			 * verified.  So user beware.
669 			 */
670 			if (useraddr) {
671 				TI_UNLOCK(sc);
672 				copyin(ptr, &tmpval2, resid);
673 				TI_LOCK(sc);
674 			} else
675 				bcopy(ptr, &tmpval2, resid);
676 
677 			tmpval = htonl(tmpval2);
678 
679 			bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
680 						 ti_offset, &tmpval, 1);
681 		}
682 	}
683 
684 	CSR_WRITE_4(sc, TI_WINBASE, origwin);
685 
686 	return (0);
687 }
688 
689 static int
690 ti_copy_scratch(struct ti_softc *sc, uint32_t tigon_addr, uint32_t len,
691     caddr_t buf, int useraddr, int readdata, int cpu)
692 {
693 	uint32_t segptr;
694 	int cnt;
695 	uint32_t tmpval, tmpval2;
696 	caddr_t ptr;
697 
698 	TI_LOCK_ASSERT(sc);
699 
700 	/*
701 	 * At the moment, we don't handle non-aligned cases, we just bail.
702 	 * If this proves to be a problem, it will be fixed.
703 	 */
704 	if (tigon_addr & 0x3) {
705 		device_printf(sc->ti_dev, "%s: tigon address %#x "
706 		    "isn't word-aligned\n", __func__, tigon_addr);
707 		return (EINVAL);
708 	}
709 
710 	if (len & 0x3) {
711 		device_printf(sc->ti_dev, "%s: transfer length %d "
712 		    "isn't word-aligned\n", __func__, len);
713 		return (EINVAL);
714 	}
715 
716 	segptr = tigon_addr;
717 	cnt = len;
718 	ptr = buf;
719 
720 	while (cnt) {
721 		CSR_WRITE_4(sc, CPU_REG(TI_SRAM_ADDR, cpu), segptr);
722 
723 		if (readdata) {
724 			tmpval2 = CSR_READ_4(sc, CPU_REG(TI_SRAM_DATA, cpu));
725 
726 			tmpval = ntohl(tmpval2);
727 
728 			/*
729 			 * Note:  I've used this debugging interface
730 			 * extensively with Alteon's 12.3.15 firmware,
731 			 * compiled with GCC 2.7.2.1 and binutils 2.9.1.
732 			 *
733 			 * When you compile the firmware without
734 			 * optimization, which is necessary sometimes in
735 			 * order to properly step through it, you sometimes
736 			 * read out a bogus value of 0xc0017c instead of
737 			 * whatever was supposed to be in that scratchpad
738 			 * location.  That value is on the stack somewhere,
739 			 * but I've never been able to figure out what was
740 			 * causing the problem.
741 			 *
742 			 * The address seems to pop up in random places,
743 			 * often not in the same place on two subsequent
744 			 * reads.
745 			 *
746 			 * In any case, the underlying data doesn't seem
747 			 * to be affected, just the value read out.
748 			 *
749 			 * KDM, 3/7/2000
750 			 */
751 
752 			if (tmpval2 == 0xc0017c)
753 				device_printf(sc->ti_dev, "found 0xc0017c at "
754 				    "%#x (tmpval2)\n", segptr);
755 
756 			if (tmpval == 0xc0017c)
757 				device_printf(sc->ti_dev, "found 0xc0017c at "
758 				    "%#x (tmpval)\n", segptr);
759 
760 			if (useraddr)
761 				copyout(&tmpval, ptr, 4);
762 			else
763 				bcopy(&tmpval, ptr, 4);
764 		} else {
765 			if (useraddr)
766 				copyin(ptr, &tmpval2, 4);
767 			else
768 				bcopy(ptr, &tmpval2, 4);
769 
770 			tmpval = htonl(tmpval2);
771 
772 			CSR_WRITE_4(sc, CPU_REG(TI_SRAM_DATA, cpu), tmpval);
773 		}
774 
775 		cnt -= 4;
776 		segptr += 4;
777 		ptr += 4;
778 	}
779 
780 	return (0);
781 }
782 
783 static int
784 ti_bcopy_swap(const void *src, void *dst, size_t len, ti_swap_type swap_type)
785 {
786 	const uint8_t *tmpsrc;
787 	uint8_t *tmpdst;
788 	size_t tmplen;
789 
790 	if (len & 0x3) {
791 		printf("ti_bcopy_swap: length %zd isn't 32-bit aligned\n",
792 		       len);
793 		return (-1);
794 	}
795 
796 	tmpsrc = src;
797 	tmpdst = dst;
798 	tmplen = len;
799 
800 	while (tmplen) {
801 		if (swap_type == TI_SWAP_NTOH)
802 			*(uint32_t *)tmpdst =
803 				ntohl(*(const uint32_t *)tmpsrc);
804 		else
805 			*(uint32_t *)tmpdst =
806 				htonl(*(const uint32_t *)tmpsrc);
807 
808 		tmpsrc += 4;
809 		tmpdst += 4;
810 		tmplen -= 4;
811 	}
812 
813 	return (0);
814 }
815 
816 /*
817  * Load firmware image into the NIC. Check that the firmware revision
818  * is acceptable and see if we want the firmware for the Tigon 1 or
819  * Tigon 2.
820  */
821 static void
822 ti_loadfw(struct ti_softc *sc)
823 {
824 
825 	TI_LOCK_ASSERT(sc);
826 
827 	switch (sc->ti_hwrev) {
828 	case TI_HWREV_TIGON:
829 		if (tigonFwReleaseMajor != TI_FIRMWARE_MAJOR ||
830 		    tigonFwReleaseMinor != TI_FIRMWARE_MINOR ||
831 		    tigonFwReleaseFix != TI_FIRMWARE_FIX) {
832 			device_printf(sc->ti_dev, "firmware revision mismatch; "
833 			    "want %d.%d.%d, got %d.%d.%d\n",
834 			    TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR,
835 			    TI_FIRMWARE_FIX, tigonFwReleaseMajor,
836 			    tigonFwReleaseMinor, tigonFwReleaseFix);
837 			return;
838 		}
839 		ti_mem_write(sc, tigonFwTextAddr, tigonFwTextLen, tigonFwText);
840 		ti_mem_write(sc, tigonFwDataAddr, tigonFwDataLen, tigonFwData);
841 		ti_mem_write(sc, tigonFwRodataAddr, tigonFwRodataLen,
842 		    tigonFwRodata);
843 		ti_mem_zero(sc, tigonFwBssAddr, tigonFwBssLen);
844 		ti_mem_zero(sc, tigonFwSbssAddr, tigonFwSbssLen);
845 		CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigonFwStartAddr);
846 		break;
847 	case TI_HWREV_TIGON_II:
848 		if (tigon2FwReleaseMajor != TI_FIRMWARE_MAJOR ||
849 		    tigon2FwReleaseMinor != TI_FIRMWARE_MINOR ||
850 		    tigon2FwReleaseFix != TI_FIRMWARE_FIX) {
851 			device_printf(sc->ti_dev, "firmware revision mismatch; "
852 			    "want %d.%d.%d, got %d.%d.%d\n",
853 			    TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR,
854 			    TI_FIRMWARE_FIX, tigon2FwReleaseMajor,
855 			    tigon2FwReleaseMinor, tigon2FwReleaseFix);
856 			return;
857 		}
858 		ti_mem_write(sc, tigon2FwTextAddr, tigon2FwTextLen,
859 		    tigon2FwText);
860 		ti_mem_write(sc, tigon2FwDataAddr, tigon2FwDataLen,
861 		    tigon2FwData);
862 		ti_mem_write(sc, tigon2FwRodataAddr, tigon2FwRodataLen,
863 		    tigon2FwRodata);
864 		ti_mem_zero(sc, tigon2FwBssAddr, tigon2FwBssLen);
865 		ti_mem_zero(sc, tigon2FwSbssAddr, tigon2FwSbssLen);
866 		CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigon2FwStartAddr);
867 		break;
868 	default:
869 		device_printf(sc->ti_dev,
870 		    "can't load firmware: unknown hardware rev\n");
871 		break;
872 	}
873 }
874 
875 /*
876  * Send the NIC a command via the command ring.
877  */
878 static void
879 ti_cmd(struct ti_softc *sc, struct ti_cmd_desc *cmd)
880 {
881 	int index;
882 
883 	index = sc->ti_cmd_saved_prodidx;
884 	CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd));
885 	TI_INC(index, TI_CMD_RING_CNT);
886 	CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index);
887 	sc->ti_cmd_saved_prodidx = index;
888 }
889 
890 /*
891  * Send the NIC an extended command. The 'len' parameter specifies the
892  * number of command slots to include after the initial command.
893  */
894 static void
895 ti_cmd_ext(struct ti_softc *sc, struct ti_cmd_desc *cmd, caddr_t arg, int len)
896 {
897 	int index;
898 	int i;
899 
900 	index = sc->ti_cmd_saved_prodidx;
901 	CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd));
902 	TI_INC(index, TI_CMD_RING_CNT);
903 	for (i = 0; i < len; i++) {
904 		CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4),
905 		    *(uint32_t *)(&arg[i * 4]));
906 		TI_INC(index, TI_CMD_RING_CNT);
907 	}
908 	CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index);
909 	sc->ti_cmd_saved_prodidx = index;
910 }
911 
912 /*
913  * Handle events that have triggered interrupts.
914  */
915 static void
916 ti_handle_events(struct ti_softc *sc)
917 {
918 	struct ti_event_desc *e;
919 
920 	if (sc->ti_rdata->ti_event_ring == NULL)
921 		return;
922 
923 	while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) {
924 		e = &sc->ti_rdata->ti_event_ring[sc->ti_ev_saved_considx];
925 		switch (TI_EVENT_EVENT(e)) {
926 		case TI_EV_LINKSTAT_CHANGED:
927 			sc->ti_linkstat = TI_EVENT_CODE(e);
928 			if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) {
929 				if_link_state_change(sc->ti_ifp, LINK_STATE_UP);
930 				sc->ti_ifp->if_baudrate = IF_Mbps(100);
931 				if (bootverbose)
932 					device_printf(sc->ti_dev,
933 					    "10/100 link up\n");
934 			} else if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) {
935 				if_link_state_change(sc->ti_ifp, LINK_STATE_UP);
936 				sc->ti_ifp->if_baudrate = IF_Gbps(1UL);
937 				if (bootverbose)
938 					device_printf(sc->ti_dev,
939 					    "gigabit link up\n");
940 			} else if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) {
941 				if_link_state_change(sc->ti_ifp,
942 				    LINK_STATE_DOWN);
943 				sc->ti_ifp->if_baudrate = 0;
944 				if (bootverbose)
945 					device_printf(sc->ti_dev,
946 					    "link down\n");
947 			}
948 			break;
949 		case TI_EV_ERROR:
950 			if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_INVAL_CMD)
951 				device_printf(sc->ti_dev, "invalid command\n");
952 			else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_UNIMP_CMD)
953 				device_printf(sc->ti_dev, "unknown command\n");
954 			else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_BADCFG)
955 				device_printf(sc->ti_dev, "bad config data\n");
956 			break;
957 		case TI_EV_FIRMWARE_UP:
958 			ti_init2(sc);
959 			break;
960 		case TI_EV_STATS_UPDATED:
961 			ti_stats_update(sc);
962 			break;
963 		case TI_EV_RESET_JUMBO_RING:
964 		case TI_EV_MCAST_UPDATED:
965 			/* Who cares. */
966 			break;
967 		default:
968 			device_printf(sc->ti_dev, "unknown event: %d\n",
969 			    TI_EVENT_EVENT(e));
970 			break;
971 		}
972 		/* Advance the consumer index. */
973 		TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT);
974 		CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx);
975 	}
976 }
977 
978 static int
979 ti_alloc_dmamaps(struct ti_softc *sc)
980 {
981 	int i;
982 
983 	for (i = 0; i < TI_TX_RING_CNT; i++) {
984 		sc->ti_cdata.ti_txdesc[i].tx_m = NULL;
985 		sc->ti_cdata.ti_txdesc[i].tx_dmamap = 0;
986 		if (bus_dmamap_create(sc->ti_mbuftx_dmat, 0,
987 				      &sc->ti_cdata.ti_txdesc[i].tx_dmamap))
988 			return (ENOBUFS);
989 	}
990 	for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
991 		if (bus_dmamap_create(sc->ti_mbufrx_dmat, 0,
992 				      &sc->ti_cdata.ti_rx_std_maps[i]))
993 			return (ENOBUFS);
994 	}
995 
996 	for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
997 		if (bus_dmamap_create(sc->ti_jumbo_dmat, 0,
998 				      &sc->ti_cdata.ti_rx_jumbo_maps[i]))
999 			return (ENOBUFS);
1000 	}
1001 	for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
1002 		if (bus_dmamap_create(sc->ti_mbufrx_dmat, 0,
1003 				      &sc->ti_cdata.ti_rx_mini_maps[i]))
1004 			return (ENOBUFS);
1005 	}
1006 
1007 	return (0);
1008 }
1009 
1010 static void
1011 ti_free_dmamaps(struct ti_softc *sc)
1012 {
1013 	int i;
1014 
1015 	if (sc->ti_mbuftx_dmat)
1016 		for (i = 0; i < TI_TX_RING_CNT; i++)
1017 			if (sc->ti_cdata.ti_txdesc[i].tx_dmamap) {
1018 				bus_dmamap_destroy(sc->ti_mbuftx_dmat,
1019 				    sc->ti_cdata.ti_txdesc[i].tx_dmamap);
1020 				sc->ti_cdata.ti_txdesc[i].tx_dmamap = 0;
1021 			}
1022 
1023 	if (sc->ti_mbufrx_dmat)
1024 		for (i = 0; i < TI_STD_RX_RING_CNT; i++)
1025 			if (sc->ti_cdata.ti_rx_std_maps[i]) {
1026 				bus_dmamap_destroy(sc->ti_mbufrx_dmat,
1027 				    sc->ti_cdata.ti_rx_std_maps[i]);
1028 				sc->ti_cdata.ti_rx_std_maps[i] = 0;
1029 			}
1030 
1031 	if (sc->ti_jumbo_dmat)
1032 		for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++)
1033 			if (sc->ti_cdata.ti_rx_jumbo_maps[i]) {
1034 				bus_dmamap_destroy(sc->ti_jumbo_dmat,
1035 				    sc->ti_cdata.ti_rx_jumbo_maps[i]);
1036 				sc->ti_cdata.ti_rx_jumbo_maps[i] = 0;
1037 			}
1038 	if (sc->ti_mbufrx_dmat)
1039 		for (i = 0; i < TI_MINI_RX_RING_CNT; i++)
1040 			if (sc->ti_cdata.ti_rx_mini_maps[i]) {
1041 				bus_dmamap_destroy(sc->ti_mbufrx_dmat,
1042 				    sc->ti_cdata.ti_rx_mini_maps[i]);
1043 				sc->ti_cdata.ti_rx_mini_maps[i] = 0;
1044 			}
1045 }
1046 
1047 #ifdef TI_PRIVATE_JUMBOS
1048 
1049 /*
1050  * Memory management for the jumbo receive ring is a pain in the
1051  * butt. We need to allocate at least 9018 bytes of space per frame,
1052  * _and_ it has to be contiguous (unless you use the extended
1053  * jumbo descriptor format). Using malloc() all the time won't
1054  * work: malloc() allocates memory in powers of two, which means we
1055  * would end up wasting a considerable amount of space by allocating
1056  * 9K chunks. We don't have a jumbo mbuf cluster pool. Thus, we have
1057  * to do our own memory management.
1058  *
1059  * The driver needs to allocate a contiguous chunk of memory at boot
1060  * time. We then chop this up ourselves into 9K pieces and use them
1061  * as external mbuf storage.
1062  *
1063  * One issue here is how much memory to allocate. The jumbo ring has
1064  * 256 slots in it, but at 9K per slot than can consume over 2MB of
1065  * RAM. This is a bit much, especially considering we also need
1066  * RAM for the standard ring and mini ring (on the Tigon 2). To
1067  * save space, we only actually allocate enough memory for 64 slots
1068  * by default, which works out to between 500 and 600K. This can
1069  * be tuned by changing a #define in if_tireg.h.
1070  */
1071 
1072 static int
1073 ti_alloc_jumbo_mem(struct ti_softc *sc)
1074 {
1075 	struct ti_jpool_entry *entry;
1076 	caddr_t ptr;
1077 	int i;
1078 
1079 	/*
1080 	 * Grab a big chunk o' storage.  Since we are chopping this pool up
1081 	 * into ~9k chunks, there doesn't appear to be a need to use page
1082 	 * alignment.
1083 	 */
1084 	if (bus_dma_tag_create(sc->ti_parent_dmat,	/* parent */
1085 				1, 0,			/* algnmnt, boundary */
1086 				BUS_SPACE_MAXADDR,	/* lowaddr */
1087 				BUS_SPACE_MAXADDR,	/* highaddr */
1088 				NULL, NULL,		/* filter, filterarg */
1089 				TI_JMEM,		/* maxsize */
1090 				1,			/* nsegments */
1091 				TI_JMEM,		/* maxsegsize */
1092 				0,			/* flags */
1093 				NULL, NULL,		/* lockfunc, lockarg */
1094 				&sc->ti_jumbo_dmat) != 0) {
1095 		device_printf(sc->ti_dev, "Failed to allocate jumbo dmat\n");
1096 		return (ENOBUFS);
1097 	}
1098 
1099 	if (bus_dmamem_alloc(sc->ti_jumbo_dmat,
1100 			     (void**)&sc->ti_cdata.ti_jumbo_buf,
1101 			     BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
1102 			     &sc->ti_jumbo_dmamap) != 0) {
1103 		device_printf(sc->ti_dev, "Failed to allocate jumbo memory\n");
1104 		return (ENOBUFS);
1105 	}
1106 
1107 	SLIST_INIT(&sc->ti_jfree_listhead);
1108 	SLIST_INIT(&sc->ti_jinuse_listhead);
1109 
1110 	/*
1111 	 * Now divide it up into 9K pieces and save the addresses
1112 	 * in an array.
1113 	 */
1114 	ptr = sc->ti_cdata.ti_jumbo_buf;
1115 	for (i = 0; i < TI_JSLOTS; i++) {
1116 		sc->ti_cdata.ti_jslots[i] = ptr;
1117 		ptr += TI_JLEN;
1118 		entry = malloc(sizeof(struct ti_jpool_entry),
1119 			       M_DEVBUF, M_NOWAIT);
1120 		if (entry == NULL) {
1121 			device_printf(sc->ti_dev, "no memory for jumbo "
1122 			    "buffer queue!\n");
1123 			return (ENOBUFS);
1124 		}
1125 		entry->slot = i;
1126 		SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries);
1127 	}
1128 
1129 	return (0);
1130 }
1131 
1132 /*
1133  * Allocate a jumbo buffer.
1134  */
1135 static void *ti_jalloc(struct ti_softc *sc)
1136 {
1137 	struct ti_jpool_entry *entry;
1138 
1139 	entry = SLIST_FIRST(&sc->ti_jfree_listhead);
1140 
1141 	if (entry == NULL) {
1142 		device_printf(sc->ti_dev, "no free jumbo buffers\n");
1143 		return (NULL);
1144 	}
1145 
1146 	SLIST_REMOVE_HEAD(&sc->ti_jfree_listhead, jpool_entries);
1147 	SLIST_INSERT_HEAD(&sc->ti_jinuse_listhead, entry, jpool_entries);
1148 	return (sc->ti_cdata.ti_jslots[entry->slot]);
1149 }
1150 
1151 /*
1152  * Release a jumbo buffer.
1153  */
1154 static void
1155 ti_jfree(void *buf, void *args)
1156 {
1157 	struct ti_softc *sc;
1158 	int i;
1159 	struct ti_jpool_entry *entry;
1160 
1161 	/* Extract the softc struct pointer. */
1162 	sc = (struct ti_softc *)args;
1163 
1164 	if (sc == NULL)
1165 		panic("ti_jfree: didn't get softc pointer!");
1166 
1167 	/* calculate the slot this buffer belongs to */
1168 	i = ((vm_offset_t)buf
1169 	     - (vm_offset_t)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN;
1170 
1171 	if ((i < 0) || (i >= TI_JSLOTS))
1172 		panic("ti_jfree: asked to free buffer that we don't manage!");
1173 
1174 	entry = SLIST_FIRST(&sc->ti_jinuse_listhead);
1175 	if (entry == NULL)
1176 		panic("ti_jfree: buffer not in use!");
1177 	entry->slot = i;
1178 	SLIST_REMOVE_HEAD(&sc->ti_jinuse_listhead, jpool_entries);
1179 	SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries);
1180 }
1181 
1182 #else
1183 
1184 static int
1185 ti_alloc_jumbo_mem(struct ti_softc *sc)
1186 {
1187 
1188 	/*
1189 	 * The VM system will take care of providing aligned pages.  Alignment
1190 	 * is set to 1 here so that busdma resources won't be wasted.
1191 	 */
1192 	if (bus_dma_tag_create(sc->ti_parent_dmat,	/* parent */
1193 				1, 0,			/* algnmnt, boundary */
1194 				BUS_SPACE_MAXADDR,	/* lowaddr */
1195 				BUS_SPACE_MAXADDR,	/* highaddr */
1196 				NULL, NULL,		/* filter, filterarg */
1197 				PAGE_SIZE * 4 /*XXX*/,	/* maxsize */
1198 				4,			/* nsegments */
1199 				PAGE_SIZE,		/* maxsegsize */
1200 				0,			/* flags */
1201 				NULL, NULL,		/* lockfunc, lockarg */
1202 				&sc->ti_jumbo_dmat) != 0) {
1203 		device_printf(sc->ti_dev, "Failed to allocate jumbo dmat\n");
1204 		return (ENOBUFS);
1205 	}
1206 
1207 	return (0);
1208 }
1209 
1210 #endif /* TI_PRIVATE_JUMBOS */
1211 
1212 /*
1213  * Intialize a standard receive ring descriptor.
1214  */
1215 static int
1216 ti_newbuf_std(struct ti_softc *sc, int i, struct mbuf *m)
1217 {
1218 	bus_dmamap_t map;
1219 	bus_dma_segment_t segs;
1220 	struct mbuf *m_new = NULL;
1221 	struct ti_rx_desc *r;
1222 	int nsegs;
1223 
1224 	nsegs = 0;
1225 	if (m == NULL) {
1226 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1227 		if (m_new == NULL)
1228 			return (ENOBUFS);
1229 
1230 		MCLGET(m_new, M_DONTWAIT);
1231 		if (!(m_new->m_flags & M_EXT)) {
1232 			m_freem(m_new);
1233 			return (ENOBUFS);
1234 		}
1235 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1236 	} else {
1237 		m_new = m;
1238 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1239 		m_new->m_data = m_new->m_ext.ext_buf;
1240 	}
1241 
1242 	m_adj(m_new, ETHER_ALIGN);
1243 	sc->ti_cdata.ti_rx_std_chain[i] = m_new;
1244 	r = &sc->ti_rdata->ti_rx_std_ring[i];
1245 	map = sc->ti_cdata.ti_rx_std_maps[i];
1246 	if (bus_dmamap_load_mbuf_sg(sc->ti_mbufrx_dmat, map, m_new, &segs,
1247 				    &nsegs, 0))
1248 		return (ENOBUFS);
1249 	if (nsegs != 1)
1250 		return (ENOBUFS);
1251 	ti_hostaddr64(&r->ti_addr, segs.ds_addr);
1252 	r->ti_len = segs.ds_len;
1253 	r->ti_type = TI_BDTYPE_RECV_BD;
1254 	r->ti_flags = 0;
1255 	if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
1256 		r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
1257 	r->ti_idx = i;
1258 
1259 	bus_dmamap_sync(sc->ti_mbufrx_dmat, map, BUS_DMASYNC_PREREAD);
1260 	return (0);
1261 }
1262 
1263 /*
1264  * Intialize a mini receive ring descriptor. This only applies to
1265  * the Tigon 2.
1266  */
1267 static int
1268 ti_newbuf_mini(struct ti_softc *sc, int i, struct mbuf *m)
1269 {
1270 	bus_dma_segment_t segs;
1271 	bus_dmamap_t map;
1272 	struct mbuf *m_new = NULL;
1273 	struct ti_rx_desc *r;
1274 	int nsegs;
1275 
1276 	nsegs = 0;
1277 	if (m == NULL) {
1278 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1279 		if (m_new == NULL) {
1280 			return (ENOBUFS);
1281 		}
1282 		m_new->m_len = m_new->m_pkthdr.len = MHLEN;
1283 	} else {
1284 		m_new = m;
1285 		m_new->m_data = m_new->m_pktdat;
1286 		m_new->m_len = m_new->m_pkthdr.len = MHLEN;
1287 	}
1288 
1289 	m_adj(m_new, ETHER_ALIGN);
1290 	r = &sc->ti_rdata->ti_rx_mini_ring[i];
1291 	sc->ti_cdata.ti_rx_mini_chain[i] = m_new;
1292 	map = sc->ti_cdata.ti_rx_mini_maps[i];
1293 	if (bus_dmamap_load_mbuf_sg(sc->ti_mbufrx_dmat, map, m_new, &segs,
1294 				    &nsegs, 0))
1295 		return (ENOBUFS);
1296 	if (nsegs != 1)
1297 		return (ENOBUFS);
1298 	ti_hostaddr64(&r->ti_addr, segs.ds_addr);
1299 	r->ti_len = segs.ds_len;
1300 	r->ti_type = TI_BDTYPE_RECV_BD;
1301 	r->ti_flags = TI_BDFLAG_MINI_RING;
1302 	if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
1303 		r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
1304 	r->ti_idx = i;
1305 
1306 	bus_dmamap_sync(sc->ti_mbufrx_dmat, map, BUS_DMASYNC_PREREAD);
1307 	return (0);
1308 }
1309 
1310 #ifdef TI_PRIVATE_JUMBOS
1311 
1312 /*
1313  * Initialize a jumbo receive ring descriptor. This allocates
1314  * a jumbo buffer from the pool managed internally by the driver.
1315  */
1316 static int
1317 ti_newbuf_jumbo(struct ti_softc *sc, int i, struct mbuf *m)
1318 {
1319 	bus_dmamap_t map;
1320 	struct mbuf *m_new = NULL;
1321 	struct ti_rx_desc *r;
1322 	int nsegs;
1323 	bus_dma_segment_t segs;
1324 
1325 	if (m == NULL) {
1326 		caddr_t *buf = NULL;
1327 
1328 		/* Allocate the mbuf. */
1329 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1330 		if (m_new == NULL) {
1331 			return (ENOBUFS);
1332 		}
1333 
1334 		/* Allocate the jumbo buffer */
1335 		buf = ti_jalloc(sc);
1336 		if (buf == NULL) {
1337 			m_freem(m_new);
1338 			device_printf(sc->ti_dev, "jumbo allocation failed "
1339 			    "-- packet dropped!\n");
1340 			return (ENOBUFS);
1341 		}
1342 
1343 		/* Attach the buffer to the mbuf. */
1344 		m_new->m_data = (void *) buf;
1345 		m_new->m_len = m_new->m_pkthdr.len = TI_JUMBO_FRAMELEN;
1346 		MEXTADD(m_new, buf, TI_JUMBO_FRAMELEN, ti_jfree, buf,
1347 		    (struct ti_softc *)sc, 0, EXT_NET_DRV);
1348 	} else {
1349 		m_new = m;
1350 		m_new->m_data = m_new->m_ext.ext_buf;
1351 		m_new->m_ext.ext_size = TI_JUMBO_FRAMELEN;
1352 	}
1353 
1354 	m_adj(m_new, ETHER_ALIGN);
1355 	/* Set up the descriptor. */
1356 	r = &sc->ti_rdata->ti_rx_jumbo_ring[i];
1357 	sc->ti_cdata.ti_rx_jumbo_chain[i] = m_new;
1358 	map = sc->ti_cdata.ti_rx_jumbo_maps[i];
1359 	if (bus_dmamap_load_mbuf_sg(sc->ti_jumbo_dmat, map, m_new, &segs,
1360 				    &nsegs, 0))
1361 		return (ENOBUFS);
1362 	if (nsegs != 1)
1363 		return (ENOBUFS);
1364 	ti_hostaddr64(&r->ti_addr, segs.ds_addr);
1365 	r->ti_len = segs.ds_len;
1366 	r->ti_type = TI_BDTYPE_RECV_JUMBO_BD;
1367 	r->ti_flags = TI_BDFLAG_JUMBO_RING;
1368 	if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
1369 		r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
1370 	r->ti_idx = i;
1371 
1372 	bus_dmamap_sync(sc->ti_jumbo_dmat, map, BUS_DMASYNC_PREREAD);
1373 	return (0);
1374 }
1375 
1376 #else
1377 
1378 #if (PAGE_SIZE == 4096)
1379 #define NPAYLOAD 2
1380 #else
1381 #define NPAYLOAD 1
1382 #endif
1383 
1384 #define TCP_HDR_LEN (52 + sizeof(struct ether_header))
1385 #define UDP_HDR_LEN (28 + sizeof(struct ether_header))
1386 #define NFS_HDR_LEN (UDP_HDR_LEN)
1387 static int HDR_LEN = TCP_HDR_LEN;
1388 
1389 /*
1390  * Initialize a jumbo receive ring descriptor. This allocates
1391  * a jumbo buffer from the pool managed internally by the driver.
1392  */
1393 static int
1394 ti_newbuf_jumbo(struct ti_softc *sc, int idx, struct mbuf *m_old)
1395 {
1396 	bus_dmamap_t map;
1397 	struct mbuf *cur, *m_new = NULL;
1398 	struct mbuf *m[3] = {NULL, NULL, NULL};
1399 	struct ti_rx_desc_ext *r;
1400 	vm_page_t frame;
1401 	static int color;
1402 	/* 1 extra buf to make nobufs easy*/
1403 	struct sf_buf *sf[3] = {NULL, NULL, NULL};
1404 	int i;
1405 	bus_dma_segment_t segs[4];
1406 	int nsegs;
1407 
1408 	if (m_old != NULL) {
1409 		m_new = m_old;
1410 		cur = m_old->m_next;
1411 		for (i = 0; i <= NPAYLOAD; i++){
1412 			m[i] = cur;
1413 			cur = cur->m_next;
1414 		}
1415 	} else {
1416 		/* Allocate the mbufs. */
1417 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1418 		if (m_new == NULL) {
1419 			device_printf(sc->ti_dev, "mbuf allocation failed "
1420 			    "-- packet dropped!\n");
1421 			goto nobufs;
1422 		}
1423 		MGET(m[NPAYLOAD], M_DONTWAIT, MT_DATA);
1424 		if (m[NPAYLOAD] == NULL) {
1425 			device_printf(sc->ti_dev, "cluster mbuf allocation "
1426 			    "failed -- packet dropped!\n");
1427 			goto nobufs;
1428 		}
1429 		MCLGET(m[NPAYLOAD], M_DONTWAIT);
1430 		if ((m[NPAYLOAD]->m_flags & M_EXT) == 0) {
1431 			device_printf(sc->ti_dev, "mbuf allocation failed "
1432 			    "-- packet dropped!\n");
1433 			goto nobufs;
1434 		}
1435 		m[NPAYLOAD]->m_len = MCLBYTES;
1436 
1437 		for (i = 0; i < NPAYLOAD; i++){
1438 			MGET(m[i], M_DONTWAIT, MT_DATA);
1439 			if (m[i] == NULL) {
1440 				device_printf(sc->ti_dev, "mbuf allocation "
1441 				    "failed -- packet dropped!\n");
1442 				goto nobufs;
1443 			}
1444 			frame = vm_page_alloc(NULL, color++,
1445 			    VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
1446 			    VM_ALLOC_WIRED);
1447 			if (frame == NULL) {
1448 				device_printf(sc->ti_dev, "buffer allocation "
1449 				    "failed -- packet dropped!\n");
1450 				printf("      index %d page %d\n", idx, i);
1451 				goto nobufs;
1452 			}
1453 			sf[i] = sf_buf_alloc(frame, SFB_NOWAIT);
1454 			if (sf[i] == NULL) {
1455 				vm_page_unwire(frame, 0);
1456 				vm_page_free(frame);
1457 				device_printf(sc->ti_dev, "buffer allocation "
1458 				    "failed -- packet dropped!\n");
1459 				printf("      index %d page %d\n", idx, i);
1460 				goto nobufs;
1461 			}
1462 		}
1463 		for (i = 0; i < NPAYLOAD; i++){
1464 		/* Attach the buffer to the mbuf. */
1465 			m[i]->m_data = (void *)sf_buf_kva(sf[i]);
1466 			m[i]->m_len = PAGE_SIZE;
1467 			MEXTADD(m[i], sf_buf_kva(sf[i]), PAGE_SIZE,
1468 			    sf_buf_mext, (void*)sf_buf_kva(sf[i]), sf[i],
1469 			    0, EXT_DISPOSABLE);
1470 			m[i]->m_next = m[i+1];
1471 		}
1472 		/* link the buffers to the header */
1473 		m_new->m_next = m[0];
1474 		m_new->m_data += ETHER_ALIGN;
1475 		if (sc->ti_hdrsplit)
1476 			m_new->m_len = MHLEN - ETHER_ALIGN;
1477 		else
1478 			m_new->m_len = HDR_LEN;
1479 		m_new->m_pkthdr.len = NPAYLOAD * PAGE_SIZE + m_new->m_len;
1480 	}
1481 
1482 	/* Set up the descriptor. */
1483 	r = &sc->ti_rdata->ti_rx_jumbo_ring[idx];
1484 	sc->ti_cdata.ti_rx_jumbo_chain[idx] = m_new;
1485 	map = sc->ti_cdata.ti_rx_jumbo_maps[i];
1486 	if (bus_dmamap_load_mbuf_sg(sc->ti_jumbo_dmat, map, m_new, segs,
1487 				    &nsegs, 0))
1488 		return (ENOBUFS);
1489 	if ((nsegs < 1) || (nsegs > 4))
1490 		return (ENOBUFS);
1491 	ti_hostaddr64(&r->ti_addr0, segs[0].ds_addr);
1492 	r->ti_len0 = m_new->m_len;
1493 
1494 	ti_hostaddr64(&r->ti_addr1, segs[1].ds_addr);
1495 	r->ti_len1 = PAGE_SIZE;
1496 
1497 	ti_hostaddr64(&r->ti_addr2, segs[2].ds_addr);
1498 	r->ti_len2 = m[1]->m_ext.ext_size; /* could be PAGE_SIZE or MCLBYTES */
1499 
1500 	if (PAGE_SIZE == 4096) {
1501 		ti_hostaddr64(&r->ti_addr3, segs[3].ds_addr);
1502 		r->ti_len3 = MCLBYTES;
1503 	} else {
1504 		r->ti_len3 = 0;
1505 	}
1506 	r->ti_type = TI_BDTYPE_RECV_JUMBO_BD;
1507 
1508 	r->ti_flags = TI_BDFLAG_JUMBO_RING|TI_RCB_FLAG_USE_EXT_RX_BD;
1509 
1510 	if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
1511 		r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM|TI_BDFLAG_IP_CKSUM;
1512 
1513 	r->ti_idx = idx;
1514 
1515 	bus_dmamap_sync(sc->ti_jumbo_dmat, map, BUS_DMASYNC_PREREAD);
1516 	return (0);
1517 
1518 nobufs:
1519 
1520 	/*
1521 	 * Warning! :
1522 	 * This can only be called before the mbufs are strung together.
1523 	 * If the mbufs are strung together, m_freem() will free the chain,
1524 	 * so that the later mbufs will be freed multiple times.
1525 	 */
1526 	if (m_new)
1527 		m_freem(m_new);
1528 
1529 	for (i = 0; i < 3; i++) {
1530 		if (m[i])
1531 			m_freem(m[i]);
1532 		if (sf[i])
1533 			sf_buf_mext((void *)sf_buf_kva(sf[i]), sf[i]);
1534 	}
1535 	return (ENOBUFS);
1536 }
1537 #endif
1538 
1539 /*
1540  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1541  * that's 1MB or memory, which is a lot. For now, we fill only the first
1542  * 256 ring entries and hope that our CPU is fast enough to keep up with
1543  * the NIC.
1544  */
1545 static int
1546 ti_init_rx_ring_std(struct ti_softc *sc)
1547 {
1548 	int i;
1549 	struct ti_cmd_desc cmd;
1550 
1551 	for (i = 0; i < TI_SSLOTS; i++) {
1552 		if (ti_newbuf_std(sc, i, NULL) == ENOBUFS)
1553 			return (ENOBUFS);
1554 	};
1555 
1556 	TI_UPDATE_STDPROD(sc, i - 1);
1557 	sc->ti_std = i - 1;
1558 
1559 	return (0);
1560 }
1561 
1562 static void
1563 ti_free_rx_ring_std(struct ti_softc *sc)
1564 {
1565 	bus_dmamap_t map;
1566 	int i;
1567 
1568 	for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
1569 		if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) {
1570 			map = sc->ti_cdata.ti_rx_std_maps[i];
1571 			bus_dmamap_sync(sc->ti_mbufrx_dmat, map,
1572 			    BUS_DMASYNC_POSTREAD);
1573 			bus_dmamap_unload(sc->ti_mbufrx_dmat, map);
1574 			m_freem(sc->ti_cdata.ti_rx_std_chain[i]);
1575 			sc->ti_cdata.ti_rx_std_chain[i] = NULL;
1576 		}
1577 		bzero((char *)&sc->ti_rdata->ti_rx_std_ring[i],
1578 		    sizeof(struct ti_rx_desc));
1579 	}
1580 }
1581 
1582 static int
1583 ti_init_rx_ring_jumbo(struct ti_softc *sc)
1584 {
1585 	struct ti_cmd_desc cmd;
1586 	int i;
1587 
1588 	for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
1589 		if (ti_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1590 			return (ENOBUFS);
1591 	};
1592 
1593 	TI_UPDATE_JUMBOPROD(sc, i - 1);
1594 	sc->ti_jumbo = i - 1;
1595 
1596 	return (0);
1597 }
1598 
1599 static void
1600 ti_free_rx_ring_jumbo(struct ti_softc *sc)
1601 {
1602 	bus_dmamap_t map;
1603 	int i;
1604 
1605 	for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
1606 		if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) {
1607 			map = sc->ti_cdata.ti_rx_jumbo_maps[i];
1608 			bus_dmamap_sync(sc->ti_jumbo_dmat, map,
1609 			    BUS_DMASYNC_POSTREAD);
1610 			bus_dmamap_unload(sc->ti_jumbo_dmat, map);
1611 			m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]);
1612 			sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL;
1613 		}
1614 		bzero((char *)&sc->ti_rdata->ti_rx_jumbo_ring[i],
1615 		    sizeof(struct ti_rx_desc));
1616 	}
1617 }
1618 
1619 static int
1620 ti_init_rx_ring_mini(struct ti_softc *sc)
1621 {
1622 	int i;
1623 
1624 	for (i = 0; i < TI_MSLOTS; i++) {
1625 		if (ti_newbuf_mini(sc, i, NULL) == ENOBUFS)
1626 			return (ENOBUFS);
1627 	};
1628 
1629 	TI_UPDATE_MINIPROD(sc, i - 1);
1630 	sc->ti_mini = i - 1;
1631 
1632 	return (0);
1633 }
1634 
1635 static void
1636 ti_free_rx_ring_mini(struct ti_softc *sc)
1637 {
1638 	bus_dmamap_t map;
1639 	int i;
1640 
1641 	for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
1642 		if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) {
1643 			map = sc->ti_cdata.ti_rx_mini_maps[i];
1644 			bus_dmamap_sync(sc->ti_mbufrx_dmat, map,
1645 			    BUS_DMASYNC_POSTREAD);
1646 			bus_dmamap_unload(sc->ti_mbufrx_dmat, map);
1647 			m_freem(sc->ti_cdata.ti_rx_mini_chain[i]);
1648 			sc->ti_cdata.ti_rx_mini_chain[i] = NULL;
1649 		}
1650 		bzero((char *)&sc->ti_rdata->ti_rx_mini_ring[i],
1651 		    sizeof(struct ti_rx_desc));
1652 	}
1653 }
1654 
1655 static void
1656 ti_free_tx_ring(struct ti_softc *sc)
1657 {
1658 	struct ti_txdesc *txd;
1659 	int i;
1660 
1661 	if (sc->ti_rdata->ti_tx_ring == NULL)
1662 		return;
1663 
1664 	for (i = 0; i < TI_TX_RING_CNT; i++) {
1665 		txd = &sc->ti_cdata.ti_txdesc[i];
1666 		if (txd->tx_m != NULL) {
1667 			bus_dmamap_sync(sc->ti_mbuftx_dmat, txd->tx_dmamap,
1668 			    BUS_DMASYNC_POSTWRITE);
1669 			bus_dmamap_unload(sc->ti_mbuftx_dmat, txd->tx_dmamap);
1670 			m_freem(txd->tx_m);
1671 			txd->tx_m = NULL;
1672 		}
1673 		bzero((char *)&sc->ti_rdata->ti_tx_ring[i],
1674 		    sizeof(struct ti_tx_desc));
1675 	}
1676 }
1677 
1678 static int
1679 ti_init_tx_ring(struct ti_softc *sc)
1680 {
1681 	struct ti_txdesc *txd;
1682 	int i;
1683 
1684 	STAILQ_INIT(&sc->ti_cdata.ti_txfreeq);
1685 	STAILQ_INIT(&sc->ti_cdata.ti_txbusyq);
1686 	for (i = 0; i < TI_TX_RING_CNT; i++) {
1687 		txd = &sc->ti_cdata.ti_txdesc[i];
1688 		STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txfreeq, txd, tx_q);
1689 	}
1690 	sc->ti_txcnt = 0;
1691 	sc->ti_tx_saved_considx = 0;
1692 	sc->ti_tx_saved_prodidx = 0;
1693 	CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0);
1694 	return (0);
1695 }
1696 
1697 /*
1698  * The Tigon 2 firmware has a new way to add/delete multicast addresses,
1699  * but we have to support the old way too so that Tigon 1 cards will
1700  * work.
1701  */
1702 static void
1703 ti_add_mcast(struct ti_softc *sc, struct ether_addr *addr)
1704 {
1705 	struct ti_cmd_desc cmd;
1706 	uint16_t *m;
1707 	uint32_t ext[2] = {0, 0};
1708 
1709 	m = (uint16_t *)&addr->octet[0];
1710 
1711 	switch (sc->ti_hwrev) {
1712 	case TI_HWREV_TIGON:
1713 		CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0]));
1714 		CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2]));
1715 		TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0);
1716 		break;
1717 	case TI_HWREV_TIGON_II:
1718 		ext[0] = htons(m[0]);
1719 		ext[1] = (htons(m[1]) << 16) | htons(m[2]);
1720 		TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (caddr_t)&ext, 2);
1721 		break;
1722 	default:
1723 		device_printf(sc->ti_dev, "unknown hwrev\n");
1724 		break;
1725 	}
1726 }
1727 
1728 static void
1729 ti_del_mcast(struct ti_softc *sc, struct ether_addr *addr)
1730 {
1731 	struct ti_cmd_desc cmd;
1732 	uint16_t *m;
1733 	uint32_t ext[2] = {0, 0};
1734 
1735 	m = (uint16_t *)&addr->octet[0];
1736 
1737 	switch (sc->ti_hwrev) {
1738 	case TI_HWREV_TIGON:
1739 		CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0]));
1740 		CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2]));
1741 		TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0);
1742 		break;
1743 	case TI_HWREV_TIGON_II:
1744 		ext[0] = htons(m[0]);
1745 		ext[1] = (htons(m[1]) << 16) | htons(m[2]);
1746 		TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (caddr_t)&ext, 2);
1747 		break;
1748 	default:
1749 		device_printf(sc->ti_dev, "unknown hwrev\n");
1750 		break;
1751 	}
1752 }
1753 
1754 /*
1755  * Configure the Tigon's multicast address filter.
1756  *
1757  * The actual multicast table management is a bit of a pain, thanks to
1758  * slight brain damage on the part of both Alteon and us. With our
1759  * multicast code, we are only alerted when the multicast address table
1760  * changes and at that point we only have the current list of addresses:
1761  * we only know the current state, not the previous state, so we don't
1762  * actually know what addresses were removed or added. The firmware has
1763  * state, but we can't get our grubby mits on it, and there is no 'delete
1764  * all multicast addresses' command. Hence, we have to maintain our own
1765  * state so we know what addresses have been programmed into the NIC at
1766  * any given time.
1767  */
1768 static void
1769 ti_setmulti(struct ti_softc *sc)
1770 {
1771 	struct ifnet *ifp;
1772 	struct ifmultiaddr *ifma;
1773 	struct ti_cmd_desc cmd;
1774 	struct ti_mc_entry *mc;
1775 	uint32_t intrs;
1776 
1777 	TI_LOCK_ASSERT(sc);
1778 
1779 	ifp = sc->ti_ifp;
1780 
1781 	if (ifp->if_flags & IFF_ALLMULTI) {
1782 		TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_ENB, 0);
1783 		return;
1784 	} else {
1785 		TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0);
1786 	}
1787 
1788 	/* Disable interrupts. */
1789 	intrs = CSR_READ_4(sc, TI_MB_HOSTINTR);
1790 	CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
1791 
1792 	/* First, zot all the existing filters. */
1793 	while (SLIST_FIRST(&sc->ti_mc_listhead) != NULL) {
1794 		mc = SLIST_FIRST(&sc->ti_mc_listhead);
1795 		ti_del_mcast(sc, &mc->mc_addr);
1796 		SLIST_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries);
1797 		free(mc, M_DEVBUF);
1798 	}
1799 
1800 	/* Now program new ones. */
1801 	if_maddr_rlock(ifp);
1802 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1803 		if (ifma->ifma_addr->sa_family != AF_LINK)
1804 			continue;
1805 		mc = malloc(sizeof(struct ti_mc_entry), M_DEVBUF, M_NOWAIT);
1806 		if (mc == NULL) {
1807 			device_printf(sc->ti_dev,
1808 			    "no memory for mcast filter entry\n");
1809 			continue;
1810 		}
1811 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1812 		    (char *)&mc->mc_addr, ETHER_ADDR_LEN);
1813 		SLIST_INSERT_HEAD(&sc->ti_mc_listhead, mc, mc_entries);
1814 		ti_add_mcast(sc, &mc->mc_addr);
1815 	}
1816 	if_maddr_runlock(ifp);
1817 
1818 	/* Re-enable interrupts. */
1819 	CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs);
1820 }
1821 
1822 /*
1823  * Check to see if the BIOS has configured us for a 64 bit slot when
1824  * we aren't actually in one. If we detect this condition, we can work
1825  * around it on the Tigon 2 by setting a bit in the PCI state register,
1826  * but for the Tigon 1 we must give up and abort the interface attach.
1827  */
1828 static int ti_64bitslot_war(struct ti_softc *sc)
1829 {
1830 
1831 	if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) {
1832 		CSR_WRITE_4(sc, 0x600, 0);
1833 		CSR_WRITE_4(sc, 0x604, 0);
1834 		CSR_WRITE_4(sc, 0x600, 0x5555AAAA);
1835 		if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) {
1836 			if (sc->ti_hwrev == TI_HWREV_TIGON)
1837 				return (EINVAL);
1838 			else {
1839 				TI_SETBIT(sc, TI_PCI_STATE,
1840 				    TI_PCISTATE_32BIT_BUS);
1841 				return (0);
1842 			}
1843 		}
1844 	}
1845 
1846 	return (0);
1847 }
1848 
1849 /*
1850  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1851  * self-test results.
1852  */
1853 static int
1854 ti_chipinit(struct ti_softc *sc)
1855 {
1856 	uint32_t cacheline;
1857 	uint32_t pci_writemax = 0;
1858 	uint32_t hdrsplit;
1859 
1860 	/* Initialize link to down state. */
1861 	sc->ti_linkstat = TI_EV_CODE_LINK_DOWN;
1862 
1863 	/* Set endianness before we access any non-PCI registers. */
1864 #if 0 && BYTE_ORDER == BIG_ENDIAN
1865 	CSR_WRITE_4(sc, TI_MISC_HOST_CTL,
1866 	    TI_MHC_BIGENDIAN_INIT | (TI_MHC_BIGENDIAN_INIT << 24));
1867 #else
1868 	CSR_WRITE_4(sc, TI_MISC_HOST_CTL,
1869 	    TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24));
1870 #endif
1871 
1872 	/* Check the ROM failed bit to see if self-tests passed. */
1873 	if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) {
1874 		device_printf(sc->ti_dev, "board self-diagnostics failed!\n");
1875 		return (ENODEV);
1876 	}
1877 
1878 	/* Halt the CPU. */
1879 	TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT);
1880 
1881 	/* Figure out the hardware revision. */
1882 	switch (CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK) {
1883 	case TI_REV_TIGON_I:
1884 		sc->ti_hwrev = TI_HWREV_TIGON;
1885 		break;
1886 	case TI_REV_TIGON_II:
1887 		sc->ti_hwrev = TI_HWREV_TIGON_II;
1888 		break;
1889 	default:
1890 		device_printf(sc->ti_dev, "unsupported chip revision\n");
1891 		return (ENODEV);
1892 	}
1893 
1894 	/* Do special setup for Tigon 2. */
1895 	if (sc->ti_hwrev == TI_HWREV_TIGON_II) {
1896 		TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT);
1897 		TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_512K);
1898 		TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS);
1899 	}
1900 
1901 	/*
1902 	 * We don't have firmware source for the Tigon 1, so Tigon 1 boards
1903 	 * can't do header splitting.
1904 	 */
1905 #ifdef TI_JUMBO_HDRSPLIT
1906 	if (sc->ti_hwrev != TI_HWREV_TIGON)
1907 		sc->ti_hdrsplit = 1;
1908 	else
1909 		device_printf(sc->ti_dev,
1910 		    "can't do header splitting on a Tigon I board\n");
1911 #endif /* TI_JUMBO_HDRSPLIT */
1912 
1913 	/* Set up the PCI state register. */
1914 	CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD|TI_PCI_WRITE_CMD);
1915 	if (sc->ti_hwrev == TI_HWREV_TIGON_II) {
1916 		TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT);
1917 	}
1918 
1919 	/* Clear the read/write max DMA parameters. */
1920 	TI_CLRBIT(sc, TI_PCI_STATE, (TI_PCISTATE_WRITE_MAXDMA|
1921 	    TI_PCISTATE_READ_MAXDMA));
1922 
1923 	/* Get cache line size. */
1924 	cacheline = CSR_READ_4(sc, TI_PCI_BIST) & 0xFF;
1925 
1926 	/*
1927 	 * If the system has set enabled the PCI memory write
1928 	 * and invalidate command in the command register, set
1929 	 * the write max parameter accordingly. This is necessary
1930 	 * to use MWI with the Tigon 2.
1931 	 */
1932 	if (CSR_READ_4(sc, TI_PCI_CMDSTAT) & PCIM_CMD_MWIEN) {
1933 		switch (cacheline) {
1934 		case 1:
1935 		case 4:
1936 		case 8:
1937 		case 16:
1938 		case 32:
1939 		case 64:
1940 			break;
1941 		default:
1942 		/* Disable PCI memory write and invalidate. */
1943 			if (bootverbose)
1944 				device_printf(sc->ti_dev, "cache line size %d"
1945 				    " not supported; disabling PCI MWI\n",
1946 				    cacheline);
1947 			CSR_WRITE_4(sc, TI_PCI_CMDSTAT, CSR_READ_4(sc,
1948 			    TI_PCI_CMDSTAT) & ~PCIM_CMD_MWIEN);
1949 			break;
1950 		}
1951 	}
1952 
1953 	TI_SETBIT(sc, TI_PCI_STATE, pci_writemax);
1954 
1955 	/* This sets the min dma param all the way up (0xff). */
1956 	TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA);
1957 
1958 	if (sc->ti_hdrsplit)
1959 		hdrsplit = TI_OPMODE_JUMBO_HDRSPLIT;
1960 	else
1961 		hdrsplit = 0;
1962 
1963 	/* Configure DMA variables. */
1964 #if BYTE_ORDER == BIG_ENDIAN
1965 	CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_BD |
1966 	    TI_OPMODE_BYTESWAP_DATA | TI_OPMODE_WORDSWAP_BD |
1967 	    TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB |
1968 	    TI_OPMODE_DONT_FRAG_JUMBO | hdrsplit);
1969 #else /* BYTE_ORDER */
1970 	CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_DATA|
1971 	    TI_OPMODE_WORDSWAP_BD|TI_OPMODE_DONT_FRAG_JUMBO|
1972 	    TI_OPMODE_WARN_ENB|TI_OPMODE_FATAL_ENB | hdrsplit);
1973 #endif /* BYTE_ORDER */
1974 
1975 	/*
1976 	 * Only allow 1 DMA channel to be active at a time.
1977 	 * I don't think this is a good idea, but without it
1978 	 * the firmware racks up lots of nicDmaReadRingFull
1979 	 * errors.  This is not compatible with hardware checksums.
1980 	 */
1981 	if ((sc->ti_ifp->if_capenable & (IFCAP_TXCSUM | IFCAP_RXCSUM)) == 0)
1982 		TI_SETBIT(sc, TI_GCR_OPMODE, TI_OPMODE_1_DMA_ACTIVE);
1983 
1984 	/* Recommended settings from Tigon manual. */
1985 	CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W);
1986 	CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W);
1987 
1988 	if (ti_64bitslot_war(sc)) {
1989 		device_printf(sc->ti_dev, "bios thinks we're in a 64 bit slot, "
1990 		    "but we aren't");
1991 		return (EINVAL);
1992 	}
1993 
1994 	return (0);
1995 }
1996 
1997 /*
1998  * Initialize the general information block and firmware, and
1999  * start the CPU(s) running.
2000  */
2001 static int
2002 ti_gibinit(struct ti_softc *sc)
2003 {
2004 	struct ifnet *ifp;
2005 	struct ti_rcb *rcb;
2006 	uint32_t rdphys;
2007 	int i;
2008 
2009 	TI_LOCK_ASSERT(sc);
2010 
2011 	ifp = sc->ti_ifp;
2012 	rdphys = sc->ti_rdata_phys;
2013 
2014 	/* Disable interrupts for now. */
2015 	CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
2016 
2017 	/*
2018 	 * Tell the chip where to find the general information block.
2019 	 * While this struct could go into >4GB memory, we allocate it in a
2020 	 * single slab with the other descriptors, and those don't seem to
2021 	 * support being located in a 64-bit region.
2022 	 */
2023 	CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, 0);
2024 	CSR_WRITE_4(sc, TI_GCR_GENINFO_LO, rdphys + TI_RD_OFF(ti_info));
2025 
2026 	/* Load the firmware into SRAM. */
2027 	ti_loadfw(sc);
2028 
2029 	/* Set up the contents of the general info and ring control blocks. */
2030 
2031 	/* Set up the event ring and producer pointer. */
2032 	rcb = &sc->ti_rdata->ti_info.ti_ev_rcb;
2033 
2034 	TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_event_ring);
2035 	rcb->ti_flags = 0;
2036 	TI_HOSTADDR(sc->ti_rdata->ti_info.ti_ev_prodidx_ptr) =
2037 	    rdphys + TI_RD_OFF(ti_ev_prodidx_r);
2038 	sc->ti_ev_prodidx.ti_idx = 0;
2039 	CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0);
2040 	sc->ti_ev_saved_considx = 0;
2041 
2042 	/* Set up the command ring and producer mailbox. */
2043 	rcb = &sc->ti_rdata->ti_info.ti_cmd_rcb;
2044 
2045 	TI_HOSTADDR(rcb->ti_hostaddr) = TI_GCR_NIC_ADDR(TI_GCR_CMDRING);
2046 	rcb->ti_flags = 0;
2047 	rcb->ti_max_len = 0;
2048 	for (i = 0; i < TI_CMD_RING_CNT; i++) {
2049 		CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0);
2050 	}
2051 	CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0);
2052 	CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0);
2053 	sc->ti_cmd_saved_prodidx = 0;
2054 
2055 	/*
2056 	 * Assign the address of the stats refresh buffer.
2057 	 * We re-use the current stats buffer for this to
2058 	 * conserve memory.
2059 	 */
2060 	TI_HOSTADDR(sc->ti_rdata->ti_info.ti_refresh_stats_ptr) =
2061 	    rdphys + TI_RD_OFF(ti_info.ti_stats);
2062 
2063 	/* Set up the standard receive ring. */
2064 	rcb = &sc->ti_rdata->ti_info.ti_std_rx_rcb;
2065 	TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_std_ring);
2066 	rcb->ti_max_len = TI_FRAMELEN;
2067 	rcb->ti_flags = 0;
2068 	if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
2069 		rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
2070 		     TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
2071 	if (sc->ti_ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2072 		rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
2073 
2074 	/* Set up the jumbo receive ring. */
2075 	rcb = &sc->ti_rdata->ti_info.ti_jumbo_rx_rcb;
2076 	TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_jumbo_ring);
2077 
2078 #ifdef TI_PRIVATE_JUMBOS
2079 	rcb->ti_max_len = TI_JUMBO_FRAMELEN;
2080 	rcb->ti_flags = 0;
2081 #else
2082 	rcb->ti_max_len = PAGE_SIZE;
2083 	rcb->ti_flags = TI_RCB_FLAG_USE_EXT_RX_BD;
2084 #endif
2085 	if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
2086 		rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
2087 		     TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
2088 	if (sc->ti_ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2089 		rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
2090 
2091 	/*
2092 	 * Set up the mini ring. Only activated on the
2093 	 * Tigon 2 but the slot in the config block is
2094 	 * still there on the Tigon 1.
2095 	 */
2096 	rcb = &sc->ti_rdata->ti_info.ti_mini_rx_rcb;
2097 	TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_mini_ring);
2098 	rcb->ti_max_len = MHLEN - ETHER_ALIGN;
2099 	if (sc->ti_hwrev == TI_HWREV_TIGON)
2100 		rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED;
2101 	else
2102 		rcb->ti_flags = 0;
2103 	if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
2104 		rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
2105 		     TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
2106 	if (sc->ti_ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2107 		rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
2108 
2109 	/*
2110 	 * Set up the receive return ring.
2111 	 */
2112 	rcb = &sc->ti_rdata->ti_info.ti_return_rcb;
2113 	TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_return_ring);
2114 	rcb->ti_flags = 0;
2115 	rcb->ti_max_len = TI_RETURN_RING_CNT;
2116 	TI_HOSTADDR(sc->ti_rdata->ti_info.ti_return_prodidx_ptr) =
2117 	    rdphys + TI_RD_OFF(ti_return_prodidx_r);
2118 
2119 	/*
2120 	 * Set up the tx ring. Note: for the Tigon 2, we have the option
2121 	 * of putting the transmit ring in the host's address space and
2122 	 * letting the chip DMA it instead of leaving the ring in the NIC's
2123 	 * memory and accessing it through the shared memory region. We
2124 	 * do this for the Tigon 2, but it doesn't work on the Tigon 1,
2125 	 * so we have to revert to the shared memory scheme if we detect
2126 	 * a Tigon 1 chip.
2127 	 */
2128 	CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE);
2129 	bzero((char *)sc->ti_rdata->ti_tx_ring,
2130 	    TI_TX_RING_CNT * sizeof(struct ti_tx_desc));
2131 	rcb = &sc->ti_rdata->ti_info.ti_tx_rcb;
2132 	if (sc->ti_hwrev == TI_HWREV_TIGON)
2133 		rcb->ti_flags = 0;
2134 	else
2135 		rcb->ti_flags = TI_RCB_FLAG_HOST_RING;
2136 	if (sc->ti_ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2137 		rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
2138 	if (sc->ti_ifp->if_capenable & IFCAP_TXCSUM)
2139 		rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
2140 		     TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
2141 	rcb->ti_max_len = TI_TX_RING_CNT;
2142 	if (sc->ti_hwrev == TI_HWREV_TIGON)
2143 		TI_HOSTADDR(rcb->ti_hostaddr) = TI_TX_RING_BASE;
2144 	else
2145 		TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_tx_ring);
2146 	TI_HOSTADDR(sc->ti_rdata->ti_info.ti_tx_considx_ptr) =
2147 	    rdphys + TI_RD_OFF(ti_tx_considx_r);
2148 
2149 	bus_dmamap_sync(sc->ti_rdata_dmat, sc->ti_rdata_dmamap,
2150 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2151 
2152 	/* Set up tuneables */
2153 #if 0
2154 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2155 		CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS,
2156 		    (sc->ti_rx_coal_ticks / 10));
2157 	else
2158 #endif
2159 		CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, sc->ti_rx_coal_ticks);
2160 	CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks);
2161 	CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks);
2162 	CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds);
2163 	CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds);
2164 	CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio);
2165 
2166 	/* Turn interrupts on. */
2167 	CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0);
2168 	CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
2169 
2170 	/* Start CPU. */
2171 	TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT|TI_CPUSTATE_STEP));
2172 
2173 	return (0);
2174 }
2175 
2176 static void
2177 ti_rdata_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2178 {
2179 	struct ti_softc *sc;
2180 
2181 	sc = arg;
2182 	if (error || nseg != 1)
2183 		return;
2184 
2185 	/*
2186 	 * All of the Tigon data structures need to live at <4GB.  This
2187 	 * cast is fine since busdma was told about this constraint.
2188 	 */
2189 	sc->ti_rdata_phys = segs[0].ds_addr;
2190 	return;
2191 }
2192 
2193 /*
2194  * Probe for a Tigon chip. Check the PCI vendor and device IDs
2195  * against our list and return its name if we find a match.
2196  */
2197 static int
2198 ti_probe(device_t dev)
2199 {
2200 	const struct ti_type *t;
2201 
2202 	t = ti_devs;
2203 
2204 	while (t->ti_name != NULL) {
2205 		if ((pci_get_vendor(dev) == t->ti_vid) &&
2206 		    (pci_get_device(dev) == t->ti_did)) {
2207 			device_set_desc(dev, t->ti_name);
2208 			return (BUS_PROBE_DEFAULT);
2209 		}
2210 		t++;
2211 	}
2212 
2213 	return (ENXIO);
2214 }
2215 
2216 static int
2217 ti_attach(device_t dev)
2218 {
2219 	struct ifnet *ifp;
2220 	struct ti_softc *sc;
2221 	int error = 0, rid;
2222 	u_char eaddr[6];
2223 
2224 	sc = device_get_softc(dev);
2225 	sc->ti_unit = device_get_unit(dev);
2226 	sc->ti_dev = dev;
2227 
2228 	mtx_init(&sc->ti_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
2229 	    MTX_DEF);
2230 	callout_init_mtx(&sc->ti_watchdog, &sc->ti_mtx, 0);
2231 	ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts);
2232 	ifp = sc->ti_ifp = if_alloc(IFT_ETHER);
2233 	if (ifp == NULL) {
2234 		device_printf(dev, "can not if_alloc()\n");
2235 		error = ENOSPC;
2236 		goto fail;
2237 	}
2238 	sc->ti_ifp->if_hwassist = TI_CSUM_FEATURES;
2239 	sc->ti_ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM;
2240 	sc->ti_ifp->if_capenable = sc->ti_ifp->if_capabilities;
2241 
2242 	/*
2243 	 * Map control/status registers.
2244 	 */
2245 	pci_enable_busmaster(dev);
2246 
2247 	rid = TI_PCI_LOMEM;
2248 	sc->ti_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2249 	    RF_ACTIVE);
2250 
2251 	if (sc->ti_res == NULL) {
2252 		device_printf(dev, "couldn't map memory\n");
2253 		error = ENXIO;
2254 		goto fail;
2255 	}
2256 
2257 	sc->ti_btag = rman_get_bustag(sc->ti_res);
2258 	sc->ti_bhandle = rman_get_bushandle(sc->ti_res);
2259 
2260 	/* Allocate interrupt */
2261 	rid = 0;
2262 
2263 	sc->ti_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2264 	    RF_SHAREABLE | RF_ACTIVE);
2265 
2266 	if (sc->ti_irq == NULL) {
2267 		device_printf(dev, "couldn't map interrupt\n");
2268 		error = ENXIO;
2269 		goto fail;
2270 	}
2271 
2272 	if (ti_chipinit(sc)) {
2273 		device_printf(dev, "chip initialization failed\n");
2274 		error = ENXIO;
2275 		goto fail;
2276 	}
2277 
2278 	/* Zero out the NIC's on-board SRAM. */
2279 	ti_mem_zero(sc, 0x2000, 0x100000 - 0x2000);
2280 
2281 	/* Init again -- zeroing memory may have clobbered some registers. */
2282 	if (ti_chipinit(sc)) {
2283 		device_printf(dev, "chip initialization failed\n");
2284 		error = ENXIO;
2285 		goto fail;
2286 	}
2287 
2288 	/*
2289 	 * Get station address from the EEPROM. Note: the manual states
2290 	 * that the MAC address is at offset 0x8c, however the data is
2291 	 * stored as two longwords (since that's how it's loaded into
2292 	 * the NIC). This means the MAC address is actually preceded
2293 	 * by two zero bytes. We need to skip over those.
2294 	 */
2295 	if (ti_read_eeprom(sc, eaddr,
2296 				TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2297 		device_printf(dev, "failed to read station address\n");
2298 		error = ENXIO;
2299 		goto fail;
2300 	}
2301 
2302 	/* Allocate the general information block and ring buffers. */
2303 	if (bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
2304 				1, 0,			/* algnmnt, boundary */
2305 				BUS_SPACE_MAXADDR,	/* lowaddr */
2306 				BUS_SPACE_MAXADDR,	/* highaddr */
2307 				NULL, NULL,		/* filter, filterarg */
2308 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
2309 				0,			/* nsegments */
2310 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
2311 				0,			/* flags */
2312 				NULL, NULL,		/* lockfunc, lockarg */
2313 				&sc->ti_parent_dmat) != 0) {
2314 		device_printf(dev, "Failed to allocate parent dmat\n");
2315 		error = ENOMEM;
2316 		goto fail;
2317 	}
2318 
2319 	if (bus_dma_tag_create(sc->ti_parent_dmat,	/* parent */
2320 				PAGE_SIZE, 0,		/* algnmnt, boundary */
2321 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2322 				BUS_SPACE_MAXADDR,	/* highaddr */
2323 				NULL, NULL,		/* filter, filterarg */
2324 				sizeof(struct ti_ring_data),	/* maxsize */
2325 				1,			/* nsegments */
2326 				sizeof(struct ti_ring_data),	/* maxsegsize */
2327 				0,			/* flags */
2328 				NULL, NULL,		/* lockfunc, lockarg */
2329 				&sc->ti_rdata_dmat) != 0) {
2330 		device_printf(dev, "Failed to allocate rdata dmat\n");
2331 		error = ENOMEM;
2332 		goto fail;
2333 	}
2334 
2335 	if (bus_dmamem_alloc(sc->ti_rdata_dmat, (void**)&sc->ti_rdata,
2336 			     BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2337 			     &sc->ti_rdata_dmamap) != 0) {
2338 		device_printf(dev, "Failed to allocate rdata memory\n");
2339 		error = ENOMEM;
2340 		goto fail;
2341 	}
2342 
2343 	if (bus_dmamap_load(sc->ti_rdata_dmat, sc->ti_rdata_dmamap,
2344 			    sc->ti_rdata, sizeof(struct ti_ring_data),
2345 			    ti_rdata_cb, sc, BUS_DMA_NOWAIT) != 0) {
2346 		device_printf(dev, "Failed to load rdata segments\n");
2347 		error = ENOMEM;
2348 		goto fail;
2349 	}
2350 
2351 	bzero(sc->ti_rdata, sizeof(struct ti_ring_data));
2352 
2353 	/* Try to allocate memory for jumbo buffers. */
2354 	if (ti_alloc_jumbo_mem(sc)) {
2355 		device_printf(dev, "jumbo buffer allocation failed\n");
2356 		error = ENXIO;
2357 		goto fail;
2358 	}
2359 
2360 	if (bus_dma_tag_create(sc->ti_parent_dmat,	/* parent */
2361 				1, 0,			/* algnmnt, boundary */
2362 				BUS_SPACE_MAXADDR,	/* lowaddr */
2363 				BUS_SPACE_MAXADDR,	/* highaddr */
2364 				NULL, NULL,		/* filter, filterarg */
2365 				MCLBYTES * TI_MAXTXSEGS,/* maxsize */
2366 				TI_MAXTXSEGS,		/* nsegments */
2367 				MCLBYTES,		/* maxsegsize */
2368 				0,			/* flags */
2369 				NULL, NULL,		/* lockfunc, lockarg */
2370 				&sc->ti_mbuftx_dmat) != 0) {
2371 		device_printf(dev, "Failed to allocate rdata dmat\n");
2372 		error = ENOMEM;
2373 		goto fail;
2374 	}
2375 
2376 	if (bus_dma_tag_create(sc->ti_parent_dmat,	/* parent */
2377 				1, 0,			/* algnmnt, boundary */
2378 				BUS_SPACE_MAXADDR,	/* lowaddr */
2379 				BUS_SPACE_MAXADDR,	/* highaddr */
2380 				NULL, NULL,		/* filter, filterarg */
2381 				MCLBYTES,		/* maxsize */
2382 				1,			/* nsegments */
2383 				MCLBYTES,		/* maxsegsize */
2384 				0,			/* flags */
2385 				NULL, NULL,		/* lockfunc, lockarg */
2386 				&sc->ti_mbufrx_dmat) != 0) {
2387 		device_printf(dev, "Failed to allocate rdata dmat\n");
2388 		error = ENOMEM;
2389 		goto fail;
2390 	}
2391 
2392 	if (ti_alloc_dmamaps(sc)) {
2393 		device_printf(dev, "dma map creation failed\n");
2394 		error = ENXIO;
2395 		goto fail;
2396 	}
2397 
2398 	/*
2399 	 * We really need a better way to tell a 1000baseTX card
2400 	 * from a 1000baseSX one, since in theory there could be
2401 	 * OEMed 1000baseTX cards from lame vendors who aren't
2402 	 * clever enough to change the PCI ID. For the moment
2403 	 * though, the AceNIC is the only copper card available.
2404 	 */
2405 	if (pci_get_vendor(dev) == ALT_VENDORID &&
2406 	    pci_get_device(dev) == ALT_DEVICEID_ACENIC_COPPER)
2407 		sc->ti_copper = 1;
2408 	/* Ok, it's not the only copper card available. */
2409 	if (pci_get_vendor(dev) == NG_VENDORID &&
2410 	    pci_get_device(dev) == NG_DEVICEID_GA620T)
2411 		sc->ti_copper = 1;
2412 
2413 	/* Set default tuneable values. */
2414 	sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC;
2415 #if 0
2416 	sc->ti_rx_coal_ticks = TI_TICKS_PER_SEC / 5000;
2417 #endif
2418 	sc->ti_rx_coal_ticks = 170;
2419 	sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500;
2420 	sc->ti_rx_max_coal_bds = 64;
2421 #if 0
2422 	sc->ti_tx_max_coal_bds = 128;
2423 #endif
2424 	sc->ti_tx_max_coal_bds = 32;
2425 	sc->ti_tx_buf_ratio = 21;
2426 
2427 	/* Set up ifnet structure */
2428 	ifp->if_softc = sc;
2429 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2430 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2431 	ifp->if_ioctl = ti_ioctl;
2432 	ifp->if_start = ti_start;
2433 	ifp->if_init = ti_init;
2434 	ifp->if_baudrate = IF_Gbps(1UL);
2435 	ifp->if_snd.ifq_drv_maxlen = TI_TX_RING_CNT - 1;
2436 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2437 	IFQ_SET_READY(&ifp->if_snd);
2438 
2439 	/* Set up ifmedia support. */
2440 	if (sc->ti_copper) {
2441 		/*
2442 		 * Copper cards allow manual 10/100 mode selection,
2443 		 * but not manual 1000baseTX mode selection. Why?
2444 		 * Becuase currently there's no way to specify the
2445 		 * master/slave setting through the firmware interface,
2446 		 * so Alteon decided to just bag it and handle it
2447 		 * via autonegotiation.
2448 		 */
2449 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
2450 		ifmedia_add(&sc->ifmedia,
2451 		    IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
2452 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
2453 		ifmedia_add(&sc->ifmedia,
2454 		    IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
2455 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_T, 0, NULL);
2456 		ifmedia_add(&sc->ifmedia,
2457 		    IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
2458 	} else {
2459 		/* Fiber cards don't support 10/100 modes. */
2460 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2461 		ifmedia_add(&sc->ifmedia,
2462 		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2463 	}
2464 	ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2465 	ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO);
2466 
2467 	/*
2468 	 * We're assuming here that card initialization is a sequential
2469 	 * thing.  If it isn't, multiple cards probing at the same time
2470 	 * could stomp on the list of softcs here.
2471 	 */
2472 
2473 	/* Register the device */
2474 	sc->dev = make_dev(&ti_cdevsw, sc->ti_unit, UID_ROOT, GID_OPERATOR,
2475 			   0600, "ti%d", sc->ti_unit);
2476 	sc->dev->si_drv1 = sc;
2477 
2478 	/*
2479 	 * Call MI attach routine.
2480 	 */
2481 	ether_ifattach(ifp, eaddr);
2482 
2483 	/* VLAN capability setup. */
2484 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM |
2485 	    IFCAP_VLAN_HWTAGGING;
2486 	ifp->if_capenable = ifp->if_capabilities;
2487 	/* Tell the upper layer we support VLAN over-sized frames. */
2488 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2489 
2490 	/* Driver supports link state tracking. */
2491 	ifp->if_capabilities |= IFCAP_LINKSTATE;
2492 	ifp->if_capenable |= IFCAP_LINKSTATE;
2493 
2494 	/* Hook interrupt last to avoid having to lock softc */
2495 	error = bus_setup_intr(dev, sc->ti_irq, INTR_TYPE_NET|INTR_MPSAFE,
2496 	   NULL, ti_intr, sc, &sc->ti_intrhand);
2497 
2498 	if (error) {
2499 		device_printf(dev, "couldn't set up irq\n");
2500 		goto fail;
2501 	}
2502 
2503 fail:
2504 	if (error)
2505 		ti_detach(dev);
2506 
2507 	return (error);
2508 }
2509 
2510 /*
2511  * Shutdown hardware and free up resources. This can be called any
2512  * time after the mutex has been initialized. It is called in both
2513  * the error case in attach and the normal detach case so it needs
2514  * to be careful about only freeing resources that have actually been
2515  * allocated.
2516  */
2517 static int
2518 ti_detach(device_t dev)
2519 {
2520 	struct ti_softc *sc;
2521 	struct ifnet *ifp;
2522 
2523 	sc = device_get_softc(dev);
2524 	if (sc->dev)
2525 		destroy_dev(sc->dev);
2526 	KASSERT(mtx_initialized(&sc->ti_mtx), ("ti mutex not initialized"));
2527 	ifp = sc->ti_ifp;
2528 	if (device_is_attached(dev)) {
2529 		ether_ifdetach(ifp);
2530 		TI_LOCK(sc);
2531 		ti_stop(sc);
2532 		TI_UNLOCK(sc);
2533 	}
2534 
2535 	/* These should only be active if attach succeeded */
2536 	callout_drain(&sc->ti_watchdog);
2537 	bus_generic_detach(dev);
2538 	ti_free_dmamaps(sc);
2539 	ifmedia_removeall(&sc->ifmedia);
2540 
2541 #ifdef TI_PRIVATE_JUMBOS
2542 	if (sc->ti_cdata.ti_jumbo_buf)
2543 		bus_dmamem_free(sc->ti_jumbo_dmat, sc->ti_cdata.ti_jumbo_buf,
2544 		    sc->ti_jumbo_dmamap);
2545 #endif
2546 	if (sc->ti_jumbo_dmat)
2547 		bus_dma_tag_destroy(sc->ti_jumbo_dmat);
2548 	if (sc->ti_mbuftx_dmat)
2549 		bus_dma_tag_destroy(sc->ti_mbuftx_dmat);
2550 	if (sc->ti_mbufrx_dmat)
2551 		bus_dma_tag_destroy(sc->ti_mbufrx_dmat);
2552 	if (sc->ti_rdata && sc->ti_rdata_dmamap)
2553 		bus_dmamap_unload(sc->ti_rdata_dmat, sc->ti_rdata_dmamap);
2554 	if (sc->ti_rdata)
2555 		bus_dmamem_free(sc->ti_rdata_dmat, sc->ti_rdata,
2556 				sc->ti_rdata_dmamap);
2557 	if (sc->ti_rdata_dmat)
2558 		bus_dma_tag_destroy(sc->ti_rdata_dmat);
2559 	if (sc->ti_parent_dmat)
2560 		bus_dma_tag_destroy(sc->ti_parent_dmat);
2561 	if (sc->ti_intrhand)
2562 		bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand);
2563 	if (sc->ti_irq)
2564 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq);
2565 	if (sc->ti_res) {
2566 		bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM,
2567 		    sc->ti_res);
2568 	}
2569 	if (ifp)
2570 		if_free(ifp);
2571 
2572 	mtx_destroy(&sc->ti_mtx);
2573 
2574 	return (0);
2575 }
2576 
2577 #ifdef TI_JUMBO_HDRSPLIT
2578 /*
2579  * If hdr_len is 0, that means that header splitting wasn't done on
2580  * this packet for some reason.  The two most likely reasons are that
2581  * the protocol isn't a supported protocol for splitting, or this
2582  * packet had a fragment offset that wasn't 0.
2583  *
2584  * The header length, if it is non-zero, will always be the length of
2585  * the headers on the packet, but that length could be longer than the
2586  * first mbuf.  So we take the minimum of the two as the actual
2587  * length.
2588  */
2589 static __inline void
2590 ti_hdr_split(struct mbuf *top, int hdr_len, int pkt_len, int idx)
2591 {
2592 	int i = 0;
2593 	int lengths[4] = {0, 0, 0, 0};
2594 	struct mbuf *m, *mp;
2595 
2596 	if (hdr_len != 0)
2597 		top->m_len = min(hdr_len, top->m_len);
2598 	pkt_len -= top->m_len;
2599 	lengths[i++] = top->m_len;
2600 
2601 	mp = top;
2602 	for (m = top->m_next; m && pkt_len; m = m->m_next) {
2603 		m->m_len = m->m_ext.ext_size = min(m->m_len, pkt_len);
2604 		pkt_len -= m->m_len;
2605 		lengths[i++] = m->m_len;
2606 		mp = m;
2607 	}
2608 
2609 #if 0
2610 	if (hdr_len != 0)
2611 		printf("got split packet: ");
2612 	else
2613 		printf("got non-split packet: ");
2614 
2615 	printf("%d,%d,%d,%d = %d\n", lengths[0],
2616 	    lengths[1], lengths[2], lengths[3],
2617 	    lengths[0] + lengths[1] + lengths[2] +
2618 	    lengths[3]);
2619 #endif
2620 
2621 	if (pkt_len)
2622 		panic("header splitting didn't");
2623 
2624 	if (m) {
2625 		m_freem(m);
2626 		mp->m_next = NULL;
2627 
2628 	}
2629 	if (mp->m_next != NULL)
2630 		panic("ti_hdr_split: last mbuf in chain should be null");
2631 }
2632 #endif /* TI_JUMBO_HDRSPLIT */
2633 
2634 /*
2635  * Frame reception handling. This is called if there's a frame
2636  * on the receive return list.
2637  *
2638  * Note: we have to be able to handle three possibilities here:
2639  * 1) the frame is from the mini receive ring (can only happen)
2640  *    on Tigon 2 boards)
2641  * 2) the frame is from the jumbo recieve ring
2642  * 3) the frame is from the standard receive ring
2643  */
2644 
2645 static void
2646 ti_rxeof(struct ti_softc *sc)
2647 {
2648 	struct ifnet *ifp;
2649 	bus_dmamap_t map;
2650 	struct ti_cmd_desc cmd;
2651 
2652 	TI_LOCK_ASSERT(sc);
2653 
2654 	ifp = sc->ti_ifp;
2655 
2656 	while (sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) {
2657 		struct ti_rx_desc *cur_rx;
2658 		struct mbuf *m = NULL;
2659 		uint32_t rxidx;
2660 		uint16_t vlan_tag = 0;
2661 		int have_tag = 0;
2662 
2663 		cur_rx =
2664 		    &sc->ti_rdata->ti_rx_return_ring[sc->ti_rx_saved_considx];
2665 		rxidx = cur_rx->ti_idx;
2666 		TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT);
2667 
2668 		if (cur_rx->ti_flags & TI_BDFLAG_VLAN_TAG) {
2669 			have_tag = 1;
2670 			vlan_tag = cur_rx->ti_vlan_tag;
2671 		}
2672 
2673 		if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) {
2674 
2675 			TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT);
2676 			m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx];
2677 			sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL;
2678 			map = sc->ti_cdata.ti_rx_jumbo_maps[rxidx];
2679 			bus_dmamap_sync(sc->ti_jumbo_dmat, map,
2680 			    BUS_DMASYNC_POSTREAD);
2681 			bus_dmamap_unload(sc->ti_jumbo_dmat, map);
2682 			if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
2683 				ifp->if_ierrors++;
2684 				ti_newbuf_jumbo(sc, sc->ti_jumbo, m);
2685 				continue;
2686 			}
2687 			if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL) == ENOBUFS) {
2688 				ifp->if_ierrors++;
2689 				ti_newbuf_jumbo(sc, sc->ti_jumbo, m);
2690 				continue;
2691 			}
2692 #ifdef TI_PRIVATE_JUMBOS
2693 			m->m_len = cur_rx->ti_len;
2694 #else /* TI_PRIVATE_JUMBOS */
2695 #ifdef TI_JUMBO_HDRSPLIT
2696 			if (sc->ti_hdrsplit)
2697 				ti_hdr_split(m, TI_HOSTADDR(cur_rx->ti_addr),
2698 					     cur_rx->ti_len, rxidx);
2699 			else
2700 #endif /* TI_JUMBO_HDRSPLIT */
2701 			m_adj(m, cur_rx->ti_len - m->m_pkthdr.len);
2702 #endif /* TI_PRIVATE_JUMBOS */
2703 		} else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) {
2704 			TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT);
2705 			m = sc->ti_cdata.ti_rx_mini_chain[rxidx];
2706 			sc->ti_cdata.ti_rx_mini_chain[rxidx] = NULL;
2707 			map = sc->ti_cdata.ti_rx_mini_maps[rxidx];
2708 			bus_dmamap_sync(sc->ti_mbufrx_dmat, map,
2709 			    BUS_DMASYNC_POSTREAD);
2710 			bus_dmamap_unload(sc->ti_mbufrx_dmat, map);
2711 			if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
2712 				ifp->if_ierrors++;
2713 				ti_newbuf_mini(sc, sc->ti_mini, m);
2714 				continue;
2715 			}
2716 			if (ti_newbuf_mini(sc, sc->ti_mini, NULL) == ENOBUFS) {
2717 				ifp->if_ierrors++;
2718 				ti_newbuf_mini(sc, sc->ti_mini, m);
2719 				continue;
2720 			}
2721 			m->m_len = cur_rx->ti_len;
2722 		} else {
2723 			TI_INC(sc->ti_std, TI_STD_RX_RING_CNT);
2724 			m = sc->ti_cdata.ti_rx_std_chain[rxidx];
2725 			sc->ti_cdata.ti_rx_std_chain[rxidx] = NULL;
2726 			map = sc->ti_cdata.ti_rx_std_maps[rxidx];
2727 			bus_dmamap_sync(sc->ti_mbufrx_dmat, map,
2728 			    BUS_DMASYNC_POSTREAD);
2729 			bus_dmamap_unload(sc->ti_mbufrx_dmat, map);
2730 			if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
2731 				ifp->if_ierrors++;
2732 				ti_newbuf_std(sc, sc->ti_std, m);
2733 				continue;
2734 			}
2735 			if (ti_newbuf_std(sc, sc->ti_std, NULL) == ENOBUFS) {
2736 				ifp->if_ierrors++;
2737 				ti_newbuf_std(sc, sc->ti_std, m);
2738 				continue;
2739 			}
2740 			m->m_len = cur_rx->ti_len;
2741 		}
2742 
2743 		m->m_pkthdr.len = cur_rx->ti_len;
2744 		ifp->if_ipackets++;
2745 		m->m_pkthdr.rcvif = ifp;
2746 
2747 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2748 			if (cur_rx->ti_flags & TI_BDFLAG_IP_CKSUM) {
2749 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2750 				if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0)
2751 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2752 			}
2753 			if (cur_rx->ti_flags & TI_BDFLAG_TCP_UDP_CKSUM) {
2754 				m->m_pkthdr.csum_data =
2755 				    cur_rx->ti_tcp_udp_cksum;
2756 				m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2757 			}
2758 		}
2759 
2760 		/*
2761 		 * If we received a packet with a vlan tag,
2762 		 * tag it before passing the packet upward.
2763 		 */
2764 		if (have_tag) {
2765 			m->m_pkthdr.ether_vtag = vlan_tag;
2766 			m->m_flags |= M_VLANTAG;
2767 		}
2768 		TI_UNLOCK(sc);
2769 		(*ifp->if_input)(ifp, m);
2770 		TI_LOCK(sc);
2771 	}
2772 
2773 	/* Only necessary on the Tigon 1. */
2774 	if (sc->ti_hwrev == TI_HWREV_TIGON)
2775 		CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX,
2776 		    sc->ti_rx_saved_considx);
2777 
2778 	TI_UPDATE_STDPROD(sc, sc->ti_std);
2779 	TI_UPDATE_MINIPROD(sc, sc->ti_mini);
2780 	TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo);
2781 }
2782 
2783 static void
2784 ti_txeof(struct ti_softc *sc)
2785 {
2786 	struct ti_txdesc *txd;
2787 	struct ti_tx_desc txdesc;
2788 	struct ti_tx_desc *cur_tx = NULL;
2789 	struct ifnet *ifp;
2790 	int idx;
2791 
2792 	ifp = sc->ti_ifp;
2793 
2794 	txd = STAILQ_FIRST(&sc->ti_cdata.ti_txbusyq);
2795 	if (txd == NULL)
2796 		return;
2797 	/*
2798 	 * Go through our tx ring and free mbufs for those
2799 	 * frames that have been sent.
2800 	 */
2801 	for (idx = sc->ti_tx_saved_considx; idx != sc->ti_tx_considx.ti_idx;
2802 	    TI_INC(idx, TI_TX_RING_CNT)) {
2803 		if (sc->ti_hwrev == TI_HWREV_TIGON) {
2804 			ti_mem_read(sc, TI_TX_RING_BASE + idx * sizeof(txdesc),
2805 			    sizeof(txdesc), &txdesc);
2806 			cur_tx = &txdesc;
2807 		} else
2808 			cur_tx = &sc->ti_rdata->ti_tx_ring[idx];
2809 		sc->ti_txcnt--;
2810 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2811 		if ((cur_tx->ti_flags & TI_BDFLAG_END) == 0)
2812 			continue;
2813 		bus_dmamap_sync(sc->ti_mbuftx_dmat, txd->tx_dmamap,
2814 		    BUS_DMASYNC_POSTWRITE);
2815 		bus_dmamap_unload(sc->ti_mbuftx_dmat, txd->tx_dmamap);
2816 
2817 		ifp->if_opackets++;
2818 		m_freem(txd->tx_m);
2819 		txd->tx_m = NULL;
2820 		STAILQ_REMOVE_HEAD(&sc->ti_cdata.ti_txbusyq, tx_q);
2821 		STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txfreeq, txd, tx_q);
2822 		txd = STAILQ_FIRST(&sc->ti_cdata.ti_txbusyq);
2823 	}
2824 	sc->ti_tx_saved_considx = idx;
2825 
2826 	sc->ti_timer = sc->ti_txcnt > 0 ? 5 : 0;
2827 }
2828 
2829 static void
2830 ti_intr(void *xsc)
2831 {
2832 	struct ti_softc *sc;
2833 	struct ifnet *ifp;
2834 
2835 	sc = xsc;
2836 	TI_LOCK(sc);
2837 	ifp = sc->ti_ifp;
2838 
2839 /*#ifdef notdef*/
2840 	/* Avoid this for now -- checking this register is expensive. */
2841 	/* Make sure this is really our interrupt. */
2842 	if (!(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE)) {
2843 		TI_UNLOCK(sc);
2844 		return;
2845 	}
2846 /*#endif*/
2847 
2848 	/* Ack interrupt and stop others from occuring. */
2849 	CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
2850 
2851 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2852 		/* Check RX return ring producer/consumer */
2853 		ti_rxeof(sc);
2854 
2855 		/* Check TX ring producer/consumer */
2856 		ti_txeof(sc);
2857 	}
2858 
2859 	ti_handle_events(sc);
2860 
2861 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2862 		/* Re-enable interrupts. */
2863 		CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
2864 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2865 			ti_start_locked(ifp);
2866 	}
2867 
2868 	TI_UNLOCK(sc);
2869 }
2870 
2871 static void
2872 ti_stats_update(struct ti_softc *sc)
2873 {
2874 	struct ifnet *ifp;
2875 
2876 	ifp = sc->ti_ifp;
2877 
2878 	bus_dmamap_sync(sc->ti_rdata_dmat, sc->ti_rdata_dmamap,
2879 	    BUS_DMASYNC_POSTREAD);
2880 
2881 	ifp->if_collisions +=
2882 	   (sc->ti_rdata->ti_info.ti_stats.dot3StatsSingleCollisionFrames +
2883 	   sc->ti_rdata->ti_info.ti_stats.dot3StatsMultipleCollisionFrames +
2884 	   sc->ti_rdata->ti_info.ti_stats.dot3StatsExcessiveCollisions +
2885 	   sc->ti_rdata->ti_info.ti_stats.dot3StatsLateCollisions) -
2886 	   ifp->if_collisions;
2887 
2888 	bus_dmamap_sync(sc->ti_rdata_dmat, sc->ti_rdata_dmamap,
2889 	    BUS_DMASYNC_PREREAD);
2890 }
2891 
2892 /*
2893  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2894  * pointers to descriptors.
2895  */
2896 static int
2897 ti_encap(struct ti_softc *sc, struct mbuf **m_head)
2898 {
2899 	struct ti_txdesc *txd;
2900 	struct ti_tx_desc *f;
2901 	struct ti_tx_desc txdesc;
2902 	struct mbuf *m;
2903 	bus_dma_segment_t txsegs[TI_MAXTXSEGS];
2904 	uint16_t csum_flags;
2905 	int error, frag, i, nseg;
2906 
2907 	if ((txd = STAILQ_FIRST(&sc->ti_cdata.ti_txfreeq)) == NULL)
2908 		return (ENOBUFS);
2909 
2910 	error = bus_dmamap_load_mbuf_sg(sc->ti_mbuftx_dmat, txd->tx_dmamap,
2911 	    *m_head, txsegs, &nseg, 0);
2912 	if (error == EFBIG) {
2913 		m = m_defrag(*m_head, M_DONTWAIT);
2914 		if (m == NULL) {
2915 			m_freem(*m_head);
2916 			*m_head = NULL;
2917 			return (ENOMEM);
2918 		}
2919 		*m_head = m;
2920 		error = bus_dmamap_load_mbuf_sg(sc->ti_mbuftx_dmat,
2921 		    txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2922 		if (error) {
2923 			m_freem(*m_head);
2924 			*m_head = NULL;
2925 			return (error);
2926 		}
2927 	} else if (error != 0)
2928 		return (error);
2929 	if (nseg == 0) {
2930 		m_freem(*m_head);
2931 		*m_head = NULL;
2932 		return (EIO);
2933 	}
2934 
2935 	if (sc->ti_txcnt + nseg >= TI_TX_RING_CNT) {
2936 		bus_dmamap_unload(sc->ti_mbuftx_dmat, txd->tx_dmamap);
2937 		return (ENOBUFS);
2938 	}
2939 
2940 	m = *m_head;
2941 	csum_flags = 0;
2942 	if (m->m_pkthdr.csum_flags) {
2943 		if (m->m_pkthdr.csum_flags & CSUM_IP)
2944 			csum_flags |= TI_BDFLAG_IP_CKSUM;
2945 		if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2946 			csum_flags |= TI_BDFLAG_TCP_UDP_CKSUM;
2947 		if (m->m_flags & M_LASTFRAG)
2948 			csum_flags |= TI_BDFLAG_IP_FRAG_END;
2949 		else if (m->m_flags & M_FRAG)
2950 			csum_flags |= TI_BDFLAG_IP_FRAG;
2951 	}
2952 
2953 	bus_dmamap_sync(sc->ti_mbuftx_dmat, txd->tx_dmamap,
2954 	    BUS_DMASYNC_PREWRITE);
2955 	bus_dmamap_sync(sc->ti_rdata_dmat, sc->ti_rdata_dmamap,
2956 	    BUS_DMASYNC_PREWRITE);
2957 
2958 	frag = sc->ti_tx_saved_prodidx;
2959 	for (i = 0; i < nseg; i++) {
2960 		if (sc->ti_hwrev == TI_HWREV_TIGON) {
2961 			bzero(&txdesc, sizeof(txdesc));
2962 			f = &txdesc;
2963 		} else
2964 			f = &sc->ti_rdata->ti_tx_ring[frag];
2965 		ti_hostaddr64(&f->ti_addr, txsegs[i].ds_addr);
2966 		f->ti_len = txsegs[i].ds_len;
2967 		f->ti_flags = csum_flags;
2968 		if (m->m_flags & M_VLANTAG) {
2969 			f->ti_flags |= TI_BDFLAG_VLAN_TAG;
2970 			f->ti_vlan_tag = m->m_pkthdr.ether_vtag;
2971 		} else {
2972 			f->ti_vlan_tag = 0;
2973 		}
2974 
2975 		if (sc->ti_hwrev == TI_HWREV_TIGON)
2976 			ti_mem_write(sc, TI_TX_RING_BASE + frag *
2977 			    sizeof(txdesc), sizeof(txdesc), &txdesc);
2978 		TI_INC(frag, TI_TX_RING_CNT);
2979 	}
2980 
2981 	sc->ti_tx_saved_prodidx = frag;
2982 	/* set TI_BDFLAG_END on the last descriptor */
2983 	frag = (frag + TI_TX_RING_CNT - 1) % TI_TX_RING_CNT;
2984 	if (sc->ti_hwrev == TI_HWREV_TIGON) {
2985 		txdesc.ti_flags |= TI_BDFLAG_END;
2986 		ti_mem_write(sc, TI_TX_RING_BASE + frag * sizeof(txdesc),
2987 		    sizeof(txdesc), &txdesc);
2988 	} else
2989 		sc->ti_rdata->ti_tx_ring[frag].ti_flags |= TI_BDFLAG_END;
2990 
2991 	STAILQ_REMOVE_HEAD(&sc->ti_cdata.ti_txfreeq, tx_q);
2992 	STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txbusyq, txd, tx_q);
2993 	txd->tx_m = m;
2994 	sc->ti_txcnt += nseg;
2995 
2996 	return (0);
2997 }
2998 
2999 static void
3000 ti_start(struct ifnet *ifp)
3001 {
3002 	struct ti_softc *sc;
3003 
3004 	sc = ifp->if_softc;
3005 	TI_LOCK(sc);
3006 	ti_start_locked(ifp);
3007 	TI_UNLOCK(sc);
3008 }
3009 
3010 /*
3011  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3012  * to the mbuf data regions directly in the transmit descriptors.
3013  */
3014 static void
3015 ti_start_locked(struct ifnet *ifp)
3016 {
3017 	struct ti_softc *sc;
3018 	struct mbuf *m_head = NULL;
3019 	int enq = 0;
3020 
3021 	sc = ifp->if_softc;
3022 
3023 	for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
3024 	    sc->ti_txcnt < (TI_TX_RING_CNT - 16);) {
3025 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3026 		if (m_head == NULL)
3027 			break;
3028 
3029 		/*
3030 		 * XXX
3031 		 * safety overkill.  If this is a fragmented packet chain
3032 		 * with delayed TCP/UDP checksums, then only encapsulate
3033 		 * it if we have enough descriptors to handle the entire
3034 		 * chain at once.
3035 		 * (paranoia -- may not actually be needed)
3036 		 */
3037 		if (m_head->m_flags & M_FIRSTFRAG &&
3038 		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3039 			if ((TI_TX_RING_CNT - sc->ti_txcnt) <
3040 			    m_head->m_pkthdr.csum_data + 16) {
3041 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3042 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3043 				break;
3044 			}
3045 		}
3046 
3047 		/*
3048 		 * Pack the data into the transmit ring. If we
3049 		 * don't have room, set the OACTIVE flag and wait
3050 		 * for the NIC to drain the ring.
3051 		 */
3052 		if (ti_encap(sc, &m_head)) {
3053 			if (m_head == NULL)
3054 				break;
3055 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3056 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3057 			break;
3058 		}
3059 
3060 		enq++;
3061 		/*
3062 		 * If there's a BPF listener, bounce a copy of this frame
3063 		 * to him.
3064 		 */
3065 		ETHER_BPF_MTAP(ifp, m_head);
3066 	}
3067 
3068 	if (enq > 0) {
3069 		/* Transmit */
3070 		CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, sc->ti_tx_saved_prodidx);
3071 
3072 		/*
3073 		 * Set a timeout in case the chip goes out to lunch.
3074 		 */
3075 		sc->ti_timer = 5;
3076 	}
3077 }
3078 
3079 static void
3080 ti_init(void *xsc)
3081 {
3082 	struct ti_softc *sc;
3083 
3084 	sc = xsc;
3085 	TI_LOCK(sc);
3086 	ti_init_locked(sc);
3087 	TI_UNLOCK(sc);
3088 }
3089 
3090 static void
3091 ti_init_locked(void *xsc)
3092 {
3093 	struct ti_softc *sc = xsc;
3094 
3095 	/* Cancel pending I/O and flush buffers. */
3096 	ti_stop(sc);
3097 
3098 	/* Init the gen info block, ring control blocks and firmware. */
3099 	if (ti_gibinit(sc)) {
3100 		device_printf(sc->ti_dev, "initialization failure\n");
3101 		return;
3102 	}
3103 }
3104 
3105 static void ti_init2(struct ti_softc *sc)
3106 {
3107 	struct ti_cmd_desc cmd;
3108 	struct ifnet *ifp;
3109 	uint8_t *ea;
3110 	struct ifmedia *ifm;
3111 	int tmp;
3112 
3113 	TI_LOCK_ASSERT(sc);
3114 
3115 	ifp = sc->ti_ifp;
3116 
3117 	/* Specify MTU and interface index. */
3118 	CSR_WRITE_4(sc, TI_GCR_IFINDEX, sc->ti_unit);
3119 	CSR_WRITE_4(sc, TI_GCR_IFMTU, ifp->if_mtu +
3120 	    ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3121 	TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0);
3122 
3123 	/* Load our MAC address. */
3124 	ea = IF_LLADDR(sc->ti_ifp);
3125 	CSR_WRITE_4(sc, TI_GCR_PAR0, (ea[0] << 8) | ea[1]);
3126 	CSR_WRITE_4(sc, TI_GCR_PAR1,
3127 	    (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5]);
3128 	TI_DO_CMD(TI_CMD_SET_MAC_ADDR, 0, 0);
3129 
3130 	/* Enable or disable promiscuous mode as needed. */
3131 	if (ifp->if_flags & IFF_PROMISC) {
3132 		TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0);
3133 	} else {
3134 		TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0);
3135 	}
3136 
3137 	/* Program multicast filter. */
3138 	ti_setmulti(sc);
3139 
3140 	/*
3141 	 * If this is a Tigon 1, we should tell the
3142 	 * firmware to use software packet filtering.
3143 	 */
3144 	if (sc->ti_hwrev == TI_HWREV_TIGON) {
3145 		TI_DO_CMD(TI_CMD_FDR_FILTERING, TI_CMD_CODE_FILT_ENB, 0);
3146 	}
3147 
3148 	/* Init RX ring. */
3149 	ti_init_rx_ring_std(sc);
3150 
3151 	/* Init jumbo RX ring. */
3152 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3153 		ti_init_rx_ring_jumbo(sc);
3154 
3155 	/*
3156 	 * If this is a Tigon 2, we can also configure the
3157 	 * mini ring.
3158 	 */
3159 	if (sc->ti_hwrev == TI_HWREV_TIGON_II)
3160 		ti_init_rx_ring_mini(sc);
3161 
3162 	CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0);
3163 	sc->ti_rx_saved_considx = 0;
3164 
3165 	/* Init TX ring. */
3166 	ti_init_tx_ring(sc);
3167 
3168 	/* Tell firmware we're alive. */
3169 	TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_UP, 0);
3170 
3171 	/* Enable host interrupts. */
3172 	CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
3173 
3174 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3175 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3176 	callout_reset(&sc->ti_watchdog, hz, ti_watchdog, sc);
3177 
3178 	/*
3179 	 * Make sure to set media properly. We have to do this
3180 	 * here since we have to issue commands in order to set
3181 	 * the link negotiation and we can't issue commands until
3182 	 * the firmware is running.
3183 	 */
3184 	ifm = &sc->ifmedia;
3185 	tmp = ifm->ifm_media;
3186 	ifm->ifm_media = ifm->ifm_cur->ifm_media;
3187 	ti_ifmedia_upd_locked(sc);
3188 	ifm->ifm_media = tmp;
3189 }
3190 
3191 /*
3192  * Set media options.
3193  */
3194 static int
3195 ti_ifmedia_upd(struct ifnet *ifp)
3196 {
3197 	struct ti_softc *sc;
3198 	int error;
3199 
3200 	sc = ifp->if_softc;
3201 	TI_LOCK(sc);
3202 	error = ti_ifmedia_upd(ifp);
3203 	TI_UNLOCK(sc);
3204 
3205 	return (error);
3206 }
3207 
3208 static int
3209 ti_ifmedia_upd_locked(struct ti_softc *sc)
3210 {
3211 	struct ifmedia *ifm;
3212 	struct ti_cmd_desc cmd;
3213 	uint32_t flowctl;
3214 
3215 	ifm = &sc->ifmedia;
3216 
3217 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3218 		return (EINVAL);
3219 
3220 	flowctl = 0;
3221 
3222 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
3223 	case IFM_AUTO:
3224 		/*
3225 		 * Transmit flow control doesn't work on the Tigon 1.
3226 		 */
3227 		flowctl = TI_GLNK_RX_FLOWCTL_Y;
3228 
3229 		/*
3230 		 * Transmit flow control can also cause problems on the
3231 		 * Tigon 2, apparantly with both the copper and fiber
3232 		 * boards.  The symptom is that the interface will just
3233 		 * hang.  This was reproduced with Alteon 180 switches.
3234 		 */
3235 #if 0
3236 		if (sc->ti_hwrev != TI_HWREV_TIGON)
3237 			flowctl |= TI_GLNK_TX_FLOWCTL_Y;
3238 #endif
3239 
3240 		CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB|
3241 		    TI_GLNK_FULL_DUPLEX| flowctl |
3242 		    TI_GLNK_AUTONEGENB|TI_GLNK_ENB);
3243 
3244 		flowctl = TI_LNK_RX_FLOWCTL_Y;
3245 #if 0
3246 		if (sc->ti_hwrev != TI_HWREV_TIGON)
3247 			flowctl |= TI_LNK_TX_FLOWCTL_Y;
3248 #endif
3249 
3250 		CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_100MB|TI_LNK_10MB|
3251 		    TI_LNK_FULL_DUPLEX|TI_LNK_HALF_DUPLEX| flowctl |
3252 		    TI_LNK_AUTONEGENB|TI_LNK_ENB);
3253 		TI_DO_CMD(TI_CMD_LINK_NEGOTIATION,
3254 		    TI_CMD_CODE_NEGOTIATE_BOTH, 0);
3255 		break;
3256 	case IFM_1000_SX:
3257 	case IFM_1000_T:
3258 		flowctl = TI_GLNK_RX_FLOWCTL_Y;
3259 #if 0
3260 		if (sc->ti_hwrev != TI_HWREV_TIGON)
3261 			flowctl |= TI_GLNK_TX_FLOWCTL_Y;
3262 #endif
3263 
3264 		CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB|
3265 		    flowctl |TI_GLNK_ENB);
3266 		CSR_WRITE_4(sc, TI_GCR_LINK, 0);
3267 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3268 			TI_SETBIT(sc, TI_GCR_GLINK, TI_GLNK_FULL_DUPLEX);
3269 		}
3270 		TI_DO_CMD(TI_CMD_LINK_NEGOTIATION,
3271 		    TI_CMD_CODE_NEGOTIATE_GIGABIT, 0);
3272 		break;
3273 	case IFM_100_FX:
3274 	case IFM_10_FL:
3275 	case IFM_100_TX:
3276 	case IFM_10_T:
3277 		flowctl = TI_LNK_RX_FLOWCTL_Y;
3278 #if 0
3279 		if (sc->ti_hwrev != TI_HWREV_TIGON)
3280 			flowctl |= TI_LNK_TX_FLOWCTL_Y;
3281 #endif
3282 
3283 		CSR_WRITE_4(sc, TI_GCR_GLINK, 0);
3284 		CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_ENB|TI_LNK_PREF|flowctl);
3285 		if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX ||
3286 		    IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
3287 			TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_100MB);
3288 		} else {
3289 			TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_10MB);
3290 		}
3291 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3292 			TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_FULL_DUPLEX);
3293 		} else {
3294 			TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_HALF_DUPLEX);
3295 		}
3296 		TI_DO_CMD(TI_CMD_LINK_NEGOTIATION,
3297 		    TI_CMD_CODE_NEGOTIATE_10_100, 0);
3298 		break;
3299 	}
3300 
3301 	return (0);
3302 }
3303 
3304 /*
3305  * Report current media status.
3306  */
3307 static void
3308 ti_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3309 {
3310 	struct ti_softc *sc;
3311 	uint32_t media = 0;
3312 
3313 	sc = ifp->if_softc;
3314 
3315 	TI_LOCK(sc);
3316 
3317 	ifmr->ifm_status = IFM_AVALID;
3318 	ifmr->ifm_active = IFM_ETHER;
3319 
3320 	if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) {
3321 		TI_UNLOCK(sc);
3322 		return;
3323 	}
3324 
3325 	ifmr->ifm_status |= IFM_ACTIVE;
3326 
3327 	if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) {
3328 		media = CSR_READ_4(sc, TI_GCR_GLINK_STAT);
3329 		if (sc->ti_copper)
3330 			ifmr->ifm_active |= IFM_1000_T;
3331 		else
3332 			ifmr->ifm_active |= IFM_1000_SX;
3333 		if (media & TI_GLNK_FULL_DUPLEX)
3334 			ifmr->ifm_active |= IFM_FDX;
3335 		else
3336 			ifmr->ifm_active |= IFM_HDX;
3337 	} else if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) {
3338 		media = CSR_READ_4(sc, TI_GCR_LINK_STAT);
3339 		if (sc->ti_copper) {
3340 			if (media & TI_LNK_100MB)
3341 				ifmr->ifm_active |= IFM_100_TX;
3342 			if (media & TI_LNK_10MB)
3343 				ifmr->ifm_active |= IFM_10_T;
3344 		} else {
3345 			if (media & TI_LNK_100MB)
3346 				ifmr->ifm_active |= IFM_100_FX;
3347 			if (media & TI_LNK_10MB)
3348 				ifmr->ifm_active |= IFM_10_FL;
3349 		}
3350 		if (media & TI_LNK_FULL_DUPLEX)
3351 			ifmr->ifm_active |= IFM_FDX;
3352 		if (media & TI_LNK_HALF_DUPLEX)
3353 			ifmr->ifm_active |= IFM_HDX;
3354 	}
3355 	TI_UNLOCK(sc);
3356 }
3357 
3358 static int
3359 ti_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3360 {
3361 	struct ti_softc *sc = ifp->if_softc;
3362 	struct ifreq *ifr = (struct ifreq *) data;
3363 	struct ti_cmd_desc cmd;
3364 	int mask, error = 0;
3365 
3366 	switch (command) {
3367 	case SIOCSIFMTU:
3368 		TI_LOCK(sc);
3369 		if (ifr->ifr_mtu > TI_JUMBO_MTU)
3370 			error = EINVAL;
3371 		else {
3372 			ifp->if_mtu = ifr->ifr_mtu;
3373 			ti_init_locked(sc);
3374 		}
3375 		TI_UNLOCK(sc);
3376 		break;
3377 	case SIOCSIFFLAGS:
3378 		TI_LOCK(sc);
3379 		if (ifp->if_flags & IFF_UP) {
3380 			/*
3381 			 * If only the state of the PROMISC flag changed,
3382 			 * then just use the 'set promisc mode' command
3383 			 * instead of reinitializing the entire NIC. Doing
3384 			 * a full re-init means reloading the firmware and
3385 			 * waiting for it to start up, which may take a
3386 			 * second or two.
3387 			 */
3388 			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3389 			    ifp->if_flags & IFF_PROMISC &&
3390 			    !(sc->ti_if_flags & IFF_PROMISC)) {
3391 				TI_DO_CMD(TI_CMD_SET_PROMISC_MODE,
3392 				    TI_CMD_CODE_PROMISC_ENB, 0);
3393 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3394 			    !(ifp->if_flags & IFF_PROMISC) &&
3395 			    sc->ti_if_flags & IFF_PROMISC) {
3396 				TI_DO_CMD(TI_CMD_SET_PROMISC_MODE,
3397 				    TI_CMD_CODE_PROMISC_DIS, 0);
3398 			} else
3399 				ti_init_locked(sc);
3400 		} else {
3401 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3402 				ti_stop(sc);
3403 			}
3404 		}
3405 		sc->ti_if_flags = ifp->if_flags;
3406 		TI_UNLOCK(sc);
3407 		break;
3408 	case SIOCADDMULTI:
3409 	case SIOCDELMULTI:
3410 		TI_LOCK(sc);
3411 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3412 			ti_setmulti(sc);
3413 		TI_UNLOCK(sc);
3414 		break;
3415 	case SIOCSIFMEDIA:
3416 	case SIOCGIFMEDIA:
3417 		error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
3418 		break;
3419 	case SIOCSIFCAP:
3420 		TI_LOCK(sc);
3421 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3422 		if ((mask & IFCAP_TXCSUM) != 0 &&
3423 		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
3424 			ifp->if_capenable ^= IFCAP_TXCSUM;
3425 			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
3426 				ifp->if_hwassist |= TI_CSUM_FEATURES;
3427                         else
3428 				ifp->if_hwassist &= ~TI_CSUM_FEATURES;
3429                 }
3430 		if ((mask & IFCAP_RXCSUM) != 0 &&
3431 		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
3432 			ifp->if_capenable ^= IFCAP_RXCSUM;
3433 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
3434 		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0)
3435                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3436 		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
3437 		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
3438 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
3439 		if ((mask & (IFCAP_TXCSUM | IFCAP_RXCSUM |
3440 		    IFCAP_VLAN_HWTAGGING)) != 0) {
3441 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3442 				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3443 				ti_init_locked(sc);
3444 			}
3445 		}
3446 		TI_UNLOCK(sc);
3447 		VLAN_CAPABILITIES(ifp);
3448 		break;
3449 	default:
3450 		error = ether_ioctl(ifp, command, data);
3451 		break;
3452 	}
3453 
3454 	return (error);
3455 }
3456 
3457 static int
3458 ti_open(struct cdev *dev, int flags, int fmt, struct thread *td)
3459 {
3460 	struct ti_softc *sc;
3461 
3462 	sc = dev->si_drv1;
3463 	if (sc == NULL)
3464 		return (ENODEV);
3465 
3466 	TI_LOCK(sc);
3467 	sc->ti_flags |= TI_FLAG_DEBUGING;
3468 	TI_UNLOCK(sc);
3469 
3470 	return (0);
3471 }
3472 
3473 static int
3474 ti_close(struct cdev *dev, int flag, int fmt, struct thread *td)
3475 {
3476 	struct ti_softc *sc;
3477 
3478 	sc = dev->si_drv1;
3479 	if (sc == NULL)
3480 		return (ENODEV);
3481 
3482 	TI_LOCK(sc);
3483 	sc->ti_flags &= ~TI_FLAG_DEBUGING;
3484 	TI_UNLOCK(sc);
3485 
3486 	return (0);
3487 }
3488 
3489 /*
3490  * This ioctl routine goes along with the Tigon character device.
3491  */
3492 static int
3493 ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
3494     struct thread *td)
3495 {
3496 	struct ti_softc *sc;
3497 	int error;
3498 
3499 	sc = dev->si_drv1;
3500 	if (sc == NULL)
3501 		return (ENODEV);
3502 
3503 	error = 0;
3504 
3505 	switch (cmd) {
3506 	case TIIOCGETSTATS:
3507 	{
3508 		struct ti_stats *outstats;
3509 
3510 		outstats = (struct ti_stats *)addr;
3511 
3512 		TI_LOCK(sc);
3513 		bcopy(&sc->ti_rdata->ti_info.ti_stats, outstats,
3514 		      sizeof(struct ti_stats));
3515 		TI_UNLOCK(sc);
3516 		break;
3517 	}
3518 	case TIIOCGETPARAMS:
3519 	{
3520 		struct ti_params *params;
3521 
3522 		params = (struct ti_params *)addr;
3523 
3524 		TI_LOCK(sc);
3525 		params->ti_stat_ticks = sc->ti_stat_ticks;
3526 		params->ti_rx_coal_ticks = sc->ti_rx_coal_ticks;
3527 		params->ti_tx_coal_ticks = sc->ti_tx_coal_ticks;
3528 		params->ti_rx_max_coal_bds = sc->ti_rx_max_coal_bds;
3529 		params->ti_tx_max_coal_bds = sc->ti_tx_max_coal_bds;
3530 		params->ti_tx_buf_ratio = sc->ti_tx_buf_ratio;
3531 		params->param_mask = TI_PARAM_ALL;
3532 		TI_UNLOCK(sc);
3533 
3534 		error = 0;
3535 
3536 		break;
3537 	}
3538 	case TIIOCSETPARAMS:
3539 	{
3540 		struct ti_params *params;
3541 
3542 		params = (struct ti_params *)addr;
3543 
3544 		TI_LOCK(sc);
3545 		if (params->param_mask & TI_PARAM_STAT_TICKS) {
3546 			sc->ti_stat_ticks = params->ti_stat_ticks;
3547 			CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks);
3548 		}
3549 
3550 		if (params->param_mask & TI_PARAM_RX_COAL_TICKS) {
3551 			sc->ti_rx_coal_ticks = params->ti_rx_coal_ticks;
3552 			CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS,
3553 				    sc->ti_rx_coal_ticks);
3554 		}
3555 
3556 		if (params->param_mask & TI_PARAM_TX_COAL_TICKS) {
3557 			sc->ti_tx_coal_ticks = params->ti_tx_coal_ticks;
3558 			CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS,
3559 				    sc->ti_tx_coal_ticks);
3560 		}
3561 
3562 		if (params->param_mask & TI_PARAM_RX_COAL_BDS) {
3563 			sc->ti_rx_max_coal_bds = params->ti_rx_max_coal_bds;
3564 			CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD,
3565 				    sc->ti_rx_max_coal_bds);
3566 		}
3567 
3568 		if (params->param_mask & TI_PARAM_TX_COAL_BDS) {
3569 			sc->ti_tx_max_coal_bds = params->ti_tx_max_coal_bds;
3570 			CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD,
3571 				    sc->ti_tx_max_coal_bds);
3572 		}
3573 
3574 		if (params->param_mask & TI_PARAM_TX_BUF_RATIO) {
3575 			sc->ti_tx_buf_ratio = params->ti_tx_buf_ratio;
3576 			CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO,
3577 				    sc->ti_tx_buf_ratio);
3578 		}
3579 		TI_UNLOCK(sc);
3580 
3581 		error = 0;
3582 
3583 		break;
3584 	}
3585 	case TIIOCSETTRACE: {
3586 		ti_trace_type	trace_type;
3587 
3588 		trace_type = *(ti_trace_type *)addr;
3589 
3590 		/*
3591 		 * Set tracing to whatever the user asked for.  Setting
3592 		 * this register to 0 should have the effect of disabling
3593 		 * tracing.
3594 		 */
3595 		CSR_WRITE_4(sc, TI_GCR_NIC_TRACING, trace_type);
3596 
3597 		error = 0;
3598 
3599 		break;
3600 	}
3601 	case TIIOCGETTRACE: {
3602 		struct ti_trace_buf *trace_buf;
3603 		uint32_t trace_start, cur_trace_ptr, trace_len;
3604 
3605 		trace_buf = (struct ti_trace_buf *)addr;
3606 
3607 		TI_LOCK(sc);
3608 		trace_start = CSR_READ_4(sc, TI_GCR_NICTRACE_START);
3609 		cur_trace_ptr = CSR_READ_4(sc, TI_GCR_NICTRACE_PTR);
3610 		trace_len = CSR_READ_4(sc, TI_GCR_NICTRACE_LEN);
3611 
3612 #if 0
3613 		if_printf(sc->ti_ifp, "trace_start = %#x, cur_trace_ptr = %#x, "
3614 		       "trace_len = %d\n", trace_start,
3615 		       cur_trace_ptr, trace_len);
3616 		if_printf(sc->ti_ifp, "trace_buf->buf_len = %d\n",
3617 		       trace_buf->buf_len);
3618 #endif
3619 
3620 		error = ti_copy_mem(sc, trace_start, min(trace_len,
3621 				    trace_buf->buf_len),
3622 				    (caddr_t)trace_buf->buf, 1, 1);
3623 
3624 		if (error == 0) {
3625 			trace_buf->fill_len = min(trace_len,
3626 						  trace_buf->buf_len);
3627 			if (cur_trace_ptr < trace_start)
3628 				trace_buf->cur_trace_ptr =
3629 					trace_start - cur_trace_ptr;
3630 			else
3631 				trace_buf->cur_trace_ptr =
3632 					cur_trace_ptr - trace_start;
3633 		} else
3634 			trace_buf->fill_len = 0;
3635 		TI_UNLOCK(sc);
3636 
3637 		break;
3638 	}
3639 
3640 	/*
3641 	 * For debugging, five ioctls are needed:
3642 	 * ALT_ATTACH
3643 	 * ALT_READ_TG_REG
3644 	 * ALT_WRITE_TG_REG
3645 	 * ALT_READ_TG_MEM
3646 	 * ALT_WRITE_TG_MEM
3647 	 */
3648 	case ALT_ATTACH:
3649 		/*
3650 		 * From what I can tell, Alteon's Solaris Tigon driver
3651 		 * only has one character device, so you have to attach
3652 		 * to the Tigon board you're interested in.  This seems
3653 		 * like a not-so-good way to do things, since unless you
3654 		 * subsequently specify the unit number of the device
3655 		 * you're interested in every ioctl, you'll only be
3656 		 * able to debug one board at a time.
3657 		 */
3658 		error = 0;
3659 		break;
3660 	case ALT_READ_TG_MEM:
3661 	case ALT_WRITE_TG_MEM:
3662 	{
3663 		struct tg_mem *mem_param;
3664 		uint32_t sram_end, scratch_end;
3665 
3666 		mem_param = (struct tg_mem *)addr;
3667 
3668 		if (sc->ti_hwrev == TI_HWREV_TIGON) {
3669 			sram_end = TI_END_SRAM_I;
3670 			scratch_end = TI_END_SCRATCH_I;
3671 		} else {
3672 			sram_end = TI_END_SRAM_II;
3673 			scratch_end = TI_END_SCRATCH_II;
3674 		}
3675 
3676 		/*
3677 		 * For now, we'll only handle accessing regular SRAM,
3678 		 * nothing else.
3679 		 */
3680 		TI_LOCK(sc);
3681 		if ((mem_param->tgAddr >= TI_BEG_SRAM)
3682 		 && ((mem_param->tgAddr + mem_param->len) <= sram_end)) {
3683 			/*
3684 			 * In this instance, we always copy to/from user
3685 			 * space, so the user space argument is set to 1.
3686 			 */
3687 			error = ti_copy_mem(sc, mem_param->tgAddr,
3688 					    mem_param->len,
3689 					    mem_param->userAddr, 1,
3690 					    (cmd == ALT_READ_TG_MEM) ? 1 : 0);
3691 		} else if ((mem_param->tgAddr >= TI_BEG_SCRATCH)
3692 			&& (mem_param->tgAddr <= scratch_end)) {
3693 			error = ti_copy_scratch(sc, mem_param->tgAddr,
3694 						mem_param->len,
3695 						mem_param->userAddr, 1,
3696 						(cmd == ALT_READ_TG_MEM) ?
3697 						1 : 0, TI_PROCESSOR_A);
3698 		} else if ((mem_param->tgAddr >= TI_BEG_SCRATCH_B_DEBUG)
3699 			&& (mem_param->tgAddr <= TI_BEG_SCRATCH_B_DEBUG)) {
3700 			if (sc->ti_hwrev == TI_HWREV_TIGON) {
3701 				if_printf(sc->ti_ifp,
3702 				    "invalid memory range for Tigon I\n");
3703 				error = EINVAL;
3704 				break;
3705 			}
3706 			error = ti_copy_scratch(sc, mem_param->tgAddr -
3707 						TI_SCRATCH_DEBUG_OFF,
3708 						mem_param->len,
3709 						mem_param->userAddr, 1,
3710 						(cmd == ALT_READ_TG_MEM) ?
3711 						1 : 0, TI_PROCESSOR_B);
3712 		} else {
3713 			if_printf(sc->ti_ifp, "memory address %#x len %d is "
3714 			        "out of supported range\n",
3715 			        mem_param->tgAddr, mem_param->len);
3716 			error = EINVAL;
3717 		}
3718 		TI_UNLOCK(sc);
3719 
3720 		break;
3721 	}
3722 	case ALT_READ_TG_REG:
3723 	case ALT_WRITE_TG_REG:
3724 	{
3725 		struct tg_reg	*regs;
3726 		uint32_t	tmpval;
3727 
3728 		regs = (struct tg_reg *)addr;
3729 
3730 		/*
3731 		 * Make sure the address in question isn't out of range.
3732 		 */
3733 		if (regs->addr > TI_REG_MAX) {
3734 			error = EINVAL;
3735 			break;
3736 		}
3737 		TI_LOCK(sc);
3738 		if (cmd == ALT_READ_TG_REG) {
3739 			bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
3740 						regs->addr, &tmpval, 1);
3741 			regs->data = ntohl(tmpval);
3742 #if 0
3743 			if ((regs->addr == TI_CPU_STATE)
3744 			 || (regs->addr == TI_CPU_CTL_B)) {
3745 				if_printf(sc->ti_ifp, "register %#x = %#x\n",
3746 				       regs->addr, tmpval);
3747 			}
3748 #endif
3749 		} else {
3750 			tmpval = htonl(regs->data);
3751 			bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
3752 						 regs->addr, &tmpval, 1);
3753 		}
3754 		TI_UNLOCK(sc);
3755 
3756 		break;
3757 	}
3758 	default:
3759 		error = ENOTTY;
3760 		break;
3761 	}
3762 	return (error);
3763 }
3764 
3765 static void
3766 ti_watchdog(void *arg)
3767 {
3768 	struct ti_softc *sc;
3769 	struct ifnet *ifp;
3770 
3771 	sc = arg;
3772 	TI_LOCK_ASSERT(sc);
3773 	callout_reset(&sc->ti_watchdog, hz, ti_watchdog, sc);
3774 	if (sc->ti_timer == 0 || --sc->ti_timer > 0)
3775 		return;
3776 
3777 	/*
3778 	 * When we're debugging, the chip is often stopped for long periods
3779 	 * of time, and that would normally cause the watchdog timer to fire.
3780 	 * Since that impedes debugging, we don't want to do that.
3781 	 */
3782 	if (sc->ti_flags & TI_FLAG_DEBUGING)
3783 		return;
3784 
3785 	ifp = sc->ti_ifp;
3786 	if_printf(ifp, "watchdog timeout -- resetting\n");
3787 	ti_stop(sc);
3788 	ti_init_locked(sc);
3789 
3790 	ifp->if_oerrors++;
3791 }
3792 
3793 /*
3794  * Stop the adapter and free any mbufs allocated to the
3795  * RX and TX lists.
3796  */
3797 static void
3798 ti_stop(struct ti_softc *sc)
3799 {
3800 	struct ifnet *ifp;
3801 	struct ti_cmd_desc cmd;
3802 
3803 	TI_LOCK_ASSERT(sc);
3804 
3805 	ifp = sc->ti_ifp;
3806 
3807 	/* Disable host interrupts. */
3808 	CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
3809 	/*
3810 	 * Tell firmware we're shutting down.
3811 	 */
3812 	TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0);
3813 
3814 	/* Halt and reinitialize. */
3815 	if (ti_chipinit(sc) != 0)
3816 		return;
3817 	ti_mem_zero(sc, 0x2000, 0x100000 - 0x2000);
3818 	if (ti_chipinit(sc) != 0)
3819 		return;
3820 
3821 	/* Free the RX lists. */
3822 	ti_free_rx_ring_std(sc);
3823 
3824 	/* Free jumbo RX list. */
3825 	ti_free_rx_ring_jumbo(sc);
3826 
3827 	/* Free mini RX list. */
3828 	ti_free_rx_ring_mini(sc);
3829 
3830 	/* Free TX buffers. */
3831 	ti_free_tx_ring(sc);
3832 
3833 	sc->ti_ev_prodidx.ti_idx = 0;
3834 	sc->ti_return_prodidx.ti_idx = 0;
3835 	sc->ti_tx_considx.ti_idx = 0;
3836 	sc->ti_tx_saved_considx = TI_TXCONS_UNSET;
3837 
3838 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3839 	callout_stop(&sc->ti_watchdog);
3840 }
3841 
3842 /*
3843  * Stop all chip I/O so that the kernel's probe routines don't
3844  * get confused by errant DMAs when rebooting.
3845  */
3846 static int
3847 ti_shutdown(device_t dev)
3848 {
3849 	struct ti_softc *sc;
3850 
3851 	sc = device_get_softc(dev);
3852 	TI_LOCK(sc);
3853 	ti_chipinit(sc);
3854 	TI_UNLOCK(sc);
3855 
3856 	return (0);
3857 }
3858