1 /*- 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Alteon Networks Tigon PCI gigabit ethernet driver for FreeBSD. 35 * Manuals, sample driver and firmware source kits are available 36 * from http://www.alteon.com/support/openkits. 37 * 38 * Written by Bill Paul <wpaul@ctr.columbia.edu> 39 * Electrical Engineering Department 40 * Columbia University, New York City 41 */ 42 43 /* 44 * The Alteon Networks Tigon chip contains an embedded R4000 CPU, 45 * gigabit MAC, dual DMA channels and a PCI interface unit. NICs 46 * using the Tigon may have anywhere from 512K to 2MB of SRAM. The 47 * Tigon supports hardware IP, TCP and UCP checksumming, multicast 48 * filtering and jumbo (9014 byte) frames. The hardware is largely 49 * controlled by firmware, which must be loaded into the NIC during 50 * initialization. 51 * 52 * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware 53 * revision, which supports new features such as extended commands, 54 * extended jumbo receive ring desciptors and a mini receive ring. 55 * 56 * Alteon Networks is to be commended for releasing such a vast amount 57 * of development material for the Tigon NIC without requiring an NDA 58 * (although they really should have done it a long time ago). With 59 * any luck, the other vendors will finally wise up and follow Alteon's 60 * stellar example. 61 * 62 * The firmware for the Tigon 1 and 2 NICs is compiled directly into 63 * this driver by #including it as a C header file. This bloats the 64 * driver somewhat, but it's the easiest method considering that the 65 * driver code and firmware code need to be kept in sync. The source 66 * for the firmware is not provided with the FreeBSD distribution since 67 * compiling it requires a GNU toolchain targeted for mips-sgi-irix5.3. 68 * 69 * The following people deserve special thanks: 70 * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board 71 * for testing 72 * - Raymond Lee of Netgear, for providing a pair of Netgear 73 * GA620 Tigon 2 boards for testing 74 * - Ulf Zimmermann, for bringing the GA260 to my attention and 75 * convincing me to write this driver. 76 * - Andrew Gallatin for providing FreeBSD/Alpha support. 77 */ 78 79 #include <sys/cdefs.h> 80 __FBSDID("$FreeBSD$"); 81 82 #include "opt_ti.h" 83 84 #include <sys/param.h> 85 #include <sys/systm.h> 86 #include <sys/sockio.h> 87 #include <sys/mbuf.h> 88 #include <sys/malloc.h> 89 #include <sys/kernel.h> 90 #include <sys/module.h> 91 #include <sys/socket.h> 92 #include <sys/queue.h> 93 #include <sys/conf.h> 94 95 #include <net/if.h> 96 #include <net/if_arp.h> 97 #include <net/ethernet.h> 98 #include <net/if_dl.h> 99 #include <net/if_media.h> 100 #include <net/if_types.h> 101 #include <net/if_vlan_var.h> 102 103 #include <net/bpf.h> 104 105 #include <netinet/in_systm.h> 106 #include <netinet/in.h> 107 #include <netinet/ip.h> 108 109 #include <vm/vm.h> /* for vtophys */ 110 #include <vm/pmap.h> /* for vtophys */ 111 #include <machine/bus.h> 112 #include <machine/resource.h> 113 #include <sys/bus.h> 114 #include <sys/rman.h> 115 116 /* #define TI_PRIVATE_JUMBOS */ 117 118 #if !defined(TI_PRIVATE_JUMBOS) 119 #include <sys/sockio.h> 120 #include <sys/uio.h> 121 #include <sys/lock.h> 122 #include <sys/sf_buf.h> 123 #include <vm/vm_extern.h> 124 #include <vm/pmap.h> 125 #include <vm/vm_map.h> 126 #include <vm/vm_map.h> 127 #include <vm/vm_param.h> 128 #include <vm/vm_pageout.h> 129 #include <sys/vmmeter.h> 130 #include <vm/vm_page.h> 131 #include <vm/vm_object.h> 132 #include <vm/vm_kern.h> 133 #include <sys/proc.h> 134 #endif /* !TI_PRIVATE_JUMBOS */ 135 136 #include <dev/pci/pcireg.h> 137 #include <dev/pci/pcivar.h> 138 139 #include <sys/tiio.h> 140 #include <dev/ti/if_tireg.h> 141 #include <dev/ti/ti_fw.h> 142 #include <dev/ti/ti_fw2.h> 143 144 #define TI_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS) 145 /* 146 * We can only turn on header splitting if we're using extended receive 147 * BDs. 148 */ 149 #if defined(TI_JUMBO_HDRSPLIT) && defined(TI_PRIVATE_JUMBOS) 150 #error "options TI_JUMBO_HDRSPLIT and TI_PRIVATE_JUMBOS are mutually exclusive" 151 #endif /* TI_JUMBO_HDRSPLIT && TI_JUMBO_HDRSPLIT */ 152 153 struct ti_softc *tis[8]; 154 155 typedef enum { 156 TI_SWAP_HTON, 157 TI_SWAP_NTOH 158 } ti_swap_type; 159 160 161 /* 162 * Various supported device vendors/types and their names. 163 */ 164 165 static struct ti_type ti_devs[] = { 166 { ALT_VENDORID, ALT_DEVICEID_ACENIC, 167 "Alteon AceNIC 1000baseSX Gigabit Ethernet" }, 168 { ALT_VENDORID, ALT_DEVICEID_ACENIC_COPPER, 169 "Alteon AceNIC 1000baseT Gigabit Ethernet" }, 170 { TC_VENDORID, TC_DEVICEID_3C985, 171 "3Com 3c985-SX Gigabit Ethernet" }, 172 { NG_VENDORID, NG_DEVICEID_GA620, 173 "Netgear GA620 1000baseSX Gigabit Ethernet" }, 174 { NG_VENDORID, NG_DEVICEID_GA620T, 175 "Netgear GA620 1000baseT Gigabit Ethernet" }, 176 { SGI_VENDORID, SGI_DEVICEID_TIGON, 177 "Silicon Graphics Gigabit Ethernet" }, 178 { DEC_VENDORID, DEC_DEVICEID_FARALLON_PN9000SX, 179 "Farallon PN9000SX Gigabit Ethernet" }, 180 { 0, 0, NULL } 181 }; 182 183 184 static d_open_t ti_open; 185 static d_close_t ti_close; 186 static d_ioctl_t ti_ioctl2; 187 188 static struct cdevsw ti_cdevsw = { 189 .d_version = D_VERSION, 190 .d_flags = 0, 191 .d_open = ti_open, 192 .d_close = ti_close, 193 .d_ioctl = ti_ioctl2, 194 .d_name = "ti", 195 }; 196 197 static int ti_probe(device_t); 198 static int ti_attach(device_t); 199 static int ti_detach(device_t); 200 static void ti_txeof(struct ti_softc *); 201 static void ti_rxeof(struct ti_softc *); 202 203 static void ti_stats_update(struct ti_softc *); 204 static int ti_encap(struct ti_softc *, struct mbuf *, u_int32_t *); 205 206 static void ti_intr(void *); 207 static void ti_start(struct ifnet *); 208 static int ti_ioctl(struct ifnet *, u_long, caddr_t); 209 static void ti_init(void *); 210 static void ti_init2(struct ti_softc *); 211 static void ti_stop(struct ti_softc *); 212 static void ti_watchdog(struct ifnet *); 213 static void ti_shutdown(device_t); 214 static int ti_ifmedia_upd(struct ifnet *); 215 static void ti_ifmedia_sts(struct ifnet *, struct ifmediareq *); 216 217 static u_int32_t ti_eeprom_putbyte(struct ti_softc *, int); 218 static u_int8_t ti_eeprom_getbyte(struct ti_softc *, int, u_int8_t *); 219 static int ti_read_eeprom(struct ti_softc *, caddr_t, int, int); 220 221 static void ti_add_mcast(struct ti_softc *, struct ether_addr *); 222 static void ti_del_mcast(struct ti_softc *, struct ether_addr *); 223 static void ti_setmulti(struct ti_softc *); 224 225 static void ti_mem(struct ti_softc *, u_int32_t, u_int32_t, caddr_t); 226 static int ti_copy_mem(struct ti_softc *, u_int32_t, u_int32_t, caddr_t, int, int); 227 static int ti_copy_scratch(struct ti_softc *, u_int32_t, u_int32_t, caddr_t, 228 int, int, int); 229 static int ti_bcopy_swap(const void *, void *, size_t, ti_swap_type); 230 static void ti_loadfw(struct ti_softc *); 231 static void ti_cmd(struct ti_softc *, struct ti_cmd_desc *); 232 static void ti_cmd_ext(struct ti_softc *, struct ti_cmd_desc *, caddr_t, int); 233 static void ti_handle_events(struct ti_softc *); 234 #ifdef TI_PRIVATE_JUMBOS 235 static int ti_alloc_jumbo_mem(struct ti_softc *); 236 static void *ti_jalloc(struct ti_softc *); 237 static void ti_jfree(void *, void *); 238 #endif /* TI_PRIVATE_JUMBOS */ 239 static int ti_newbuf_std(struct ti_softc *, int, struct mbuf *); 240 static int ti_newbuf_mini(struct ti_softc *, int, struct mbuf *); 241 static int ti_newbuf_jumbo(struct ti_softc *, int, struct mbuf *); 242 static int ti_init_rx_ring_std(struct ti_softc *); 243 static void ti_free_rx_ring_std(struct ti_softc *); 244 static int ti_init_rx_ring_jumbo(struct ti_softc *); 245 static void ti_free_rx_ring_jumbo(struct ti_softc *); 246 static int ti_init_rx_ring_mini(struct ti_softc *); 247 static void ti_free_rx_ring_mini(struct ti_softc *); 248 static void ti_free_tx_ring(struct ti_softc *); 249 static int ti_init_tx_ring(struct ti_softc *); 250 251 static int ti_64bitslot_war(struct ti_softc *); 252 static int ti_chipinit(struct ti_softc *); 253 static int ti_gibinit(struct ti_softc *); 254 255 #ifdef TI_JUMBO_HDRSPLIT 256 static __inline void ti_hdr_split (struct mbuf *top, int hdr_len, 257 int pkt_len, int idx); 258 #endif /* TI_JUMBO_HDRSPLIT */ 259 260 static device_method_t ti_methods[] = { 261 /* Device interface */ 262 DEVMETHOD(device_probe, ti_probe), 263 DEVMETHOD(device_attach, ti_attach), 264 DEVMETHOD(device_detach, ti_detach), 265 DEVMETHOD(device_shutdown, ti_shutdown), 266 { 0, 0 } 267 }; 268 269 static driver_t ti_driver = { 270 "ti", 271 ti_methods, 272 sizeof(struct ti_softc) 273 }; 274 275 static devclass_t ti_devclass; 276 277 DRIVER_MODULE(ti, pci, ti_driver, ti_devclass, 0, 0); 278 MODULE_DEPEND(ti, pci, 1, 1, 1); 279 MODULE_DEPEND(ti, ether, 1, 1, 1); 280 281 /* 282 * Send an instruction or address to the EEPROM, check for ACK. 283 */ 284 static u_int32_t ti_eeprom_putbyte(sc, byte) 285 struct ti_softc *sc; 286 int byte; 287 { 288 register int i, ack = 0; 289 290 /* 291 * Make sure we're in TX mode. 292 */ 293 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 294 295 /* 296 * Feed in each bit and stobe the clock. 297 */ 298 for (i = 0x80; i; i >>= 1) { 299 if (byte & i) { 300 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); 301 } else { 302 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); 303 } 304 DELAY(1); 305 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 306 DELAY(1); 307 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 308 } 309 310 /* 311 * Turn off TX mode. 312 */ 313 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 314 315 /* 316 * Check for ack. 317 */ 318 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 319 ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN; 320 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 321 322 return (ack); 323 } 324 325 /* 326 * Read a byte of data stored in the EEPROM at address 'addr.' 327 * We have to send two address bytes since the EEPROM can hold 328 * more than 256 bytes of data. 329 */ 330 static u_int8_t ti_eeprom_getbyte(sc, addr, dest) 331 struct ti_softc *sc; 332 int addr; 333 u_int8_t *dest; 334 { 335 register int i; 336 u_int8_t byte = 0; 337 338 EEPROM_START; 339 340 /* 341 * Send write control code to EEPROM. 342 */ 343 if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { 344 if_printf(sc->ti_ifp, 345 "failed to send write command, status: %x\n", 346 CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 347 return (1); 348 } 349 350 /* 351 * Send first byte of address of byte we want to read. 352 */ 353 if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) { 354 if_printf(sc->ti_ifp, "failed to send address, status: %x\n", 355 CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 356 return (1); 357 } 358 /* 359 * Send second byte address of byte we want to read. 360 */ 361 if (ti_eeprom_putbyte(sc, addr & 0xFF)) { 362 if_printf(sc->ti_ifp, "failed to send address, status: %x\n", 363 CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 364 return (1); 365 } 366 367 EEPROM_STOP; 368 EEPROM_START; 369 /* 370 * Send read control code to EEPROM. 371 */ 372 if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) { 373 if_printf(sc->ti_ifp, 374 "failed to send read command, status: %x\n", 375 CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 376 return (1); 377 } 378 379 /* 380 * Start reading bits from EEPROM. 381 */ 382 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 383 for (i = 0x80; i; i >>= 1) { 384 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 385 DELAY(1); 386 if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN) 387 byte |= i; 388 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 389 DELAY(1); 390 } 391 392 EEPROM_STOP; 393 394 /* 395 * No ACK generated for read, so just return byte. 396 */ 397 398 *dest = byte; 399 400 return (0); 401 } 402 403 /* 404 * Read a sequence of bytes from the EEPROM. 405 */ 406 static int 407 ti_read_eeprom(sc, dest, off, cnt) 408 struct ti_softc *sc; 409 caddr_t dest; 410 int off; 411 int cnt; 412 { 413 int err = 0, i; 414 u_int8_t byte = 0; 415 416 for (i = 0; i < cnt; i++) { 417 err = ti_eeprom_getbyte(sc, off + i, &byte); 418 if (err) 419 break; 420 *(dest + i) = byte; 421 } 422 423 return (err ? 1 : 0); 424 } 425 426 /* 427 * NIC memory access function. Can be used to either clear a section 428 * of NIC local memory or (if buf is non-NULL) copy data into it. 429 */ 430 static void 431 ti_mem(sc, addr, len, buf) 432 struct ti_softc *sc; 433 u_int32_t addr, len; 434 caddr_t buf; 435 { 436 int segptr, segsize, cnt; 437 caddr_t ti_winbase, ptr; 438 439 segptr = addr; 440 cnt = len; 441 ti_winbase = (caddr_t)(sc->ti_vhandle + TI_WINDOW); 442 ptr = buf; 443 444 while (cnt) { 445 if (cnt < TI_WINLEN) 446 segsize = cnt; 447 else 448 segsize = TI_WINLEN - (segptr % TI_WINLEN); 449 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 450 if (buf == NULL) 451 bzero((char *)ti_winbase + (segptr & 452 (TI_WINLEN - 1)), segsize); 453 else { 454 bcopy((char *)ptr, (char *)ti_winbase + 455 (segptr & (TI_WINLEN - 1)), segsize); 456 ptr += segsize; 457 } 458 segptr += segsize; 459 cnt -= segsize; 460 } 461 } 462 463 static int 464 ti_copy_mem(sc, tigon_addr, len, buf, useraddr, readdata) 465 struct ti_softc *sc; 466 u_int32_t tigon_addr, len; 467 caddr_t buf; 468 int useraddr, readdata; 469 { 470 int segptr, segsize, cnt; 471 caddr_t ptr; 472 u_int32_t origwin; 473 u_int8_t tmparray[TI_WINLEN], tmparray2[TI_WINLEN]; 474 int resid, segresid; 475 int first_pass; 476 477 /* 478 * At the moment, we don't handle non-aligned cases, we just bail. 479 * If this proves to be a problem, it will be fixed. 480 */ 481 if ((readdata == 0) 482 && (tigon_addr & 0x3)) { 483 if_printf(sc->ti_ifp, "ti_copy_mem: tigon address %#x isn't " 484 "word-aligned\n", tigon_addr); 485 if_printf(sc->ti_ifp, "ti_copy_mem: unaligned writes aren't " 486 "yet supported\n"); 487 return (EINVAL); 488 } 489 490 segptr = tigon_addr & ~0x3; 491 segresid = tigon_addr - segptr; 492 493 /* 494 * This is the non-aligned amount left over that we'll need to 495 * copy. 496 */ 497 resid = len & 0x3; 498 499 /* Add in the left over amount at the front of the buffer */ 500 resid += segresid; 501 502 cnt = len & ~0x3; 503 /* 504 * If resid + segresid is >= 4, add multiples of 4 to the count and 505 * decrease the residual by that much. 506 */ 507 cnt += resid & ~0x3; 508 resid -= resid & ~0x3; 509 510 ptr = buf; 511 512 first_pass = 1; 513 514 /* 515 * Save the old window base value. 516 */ 517 origwin = CSR_READ_4(sc, TI_WINBASE); 518 519 while (cnt) { 520 bus_size_t ti_offset; 521 522 if (cnt < TI_WINLEN) 523 segsize = cnt; 524 else 525 segsize = TI_WINLEN - (segptr % TI_WINLEN); 526 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 527 528 ti_offset = TI_WINDOW + (segptr & (TI_WINLEN -1)); 529 530 if (readdata) { 531 532 bus_space_read_region_4(sc->ti_btag, 533 sc->ti_bhandle, ti_offset, 534 (u_int32_t *)tmparray, 535 segsize >> 2); 536 if (useraddr) { 537 /* 538 * Yeah, this is a little on the kludgy 539 * side, but at least this code is only 540 * used for debugging. 541 */ 542 ti_bcopy_swap(tmparray, tmparray2, segsize, 543 TI_SWAP_NTOH); 544 545 TI_UNLOCK(sc); 546 if (first_pass) { 547 copyout(&tmparray2[segresid], ptr, 548 segsize - segresid); 549 first_pass = 0; 550 } else 551 copyout(tmparray2, ptr, segsize); 552 TI_LOCK(sc); 553 } else { 554 if (first_pass) { 555 556 ti_bcopy_swap(tmparray, tmparray2, 557 segsize, TI_SWAP_NTOH); 558 TI_UNLOCK(sc); 559 bcopy(&tmparray2[segresid], ptr, 560 segsize - segresid); 561 TI_LOCK(sc); 562 first_pass = 0; 563 } else 564 ti_bcopy_swap(tmparray, ptr, segsize, 565 TI_SWAP_NTOH); 566 } 567 568 } else { 569 if (useraddr) { 570 TI_UNLOCK(sc); 571 copyin(ptr, tmparray2, segsize); 572 TI_LOCK(sc); 573 ti_bcopy_swap(tmparray2, tmparray, segsize, 574 TI_SWAP_HTON); 575 } else 576 ti_bcopy_swap(ptr, tmparray, segsize, 577 TI_SWAP_HTON); 578 579 bus_space_write_region_4(sc->ti_btag, 580 sc->ti_bhandle, ti_offset, 581 (u_int32_t *)tmparray, 582 segsize >> 2); 583 } 584 segptr += segsize; 585 ptr += segsize; 586 cnt -= segsize; 587 } 588 589 /* 590 * Handle leftover, non-word-aligned bytes. 591 */ 592 if (resid != 0) { 593 u_int32_t tmpval, tmpval2; 594 bus_size_t ti_offset; 595 596 /* 597 * Set the segment pointer. 598 */ 599 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 600 601 ti_offset = TI_WINDOW + (segptr & (TI_WINLEN - 1)); 602 603 /* 604 * First, grab whatever is in our source/destination. 605 * We'll obviously need this for reads, but also for 606 * writes, since we'll be doing read/modify/write. 607 */ 608 bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle, 609 ti_offset, &tmpval, 1); 610 611 /* 612 * Next, translate this from little-endian to big-endian 613 * (at least on i386 boxes). 614 */ 615 tmpval2 = ntohl(tmpval); 616 617 if (readdata) { 618 /* 619 * If we're reading, just copy the leftover number 620 * of bytes from the host byte order buffer to 621 * the user's buffer. 622 */ 623 if (useraddr) { 624 TI_UNLOCK(sc); 625 copyout(&tmpval2, ptr, resid); 626 TI_LOCK(sc); 627 } else 628 bcopy(&tmpval2, ptr, resid); 629 } else { 630 /* 631 * If we're writing, first copy the bytes to be 632 * written into the network byte order buffer, 633 * leaving the rest of the buffer with whatever was 634 * originally in there. Then, swap the bytes 635 * around into host order and write them out. 636 * 637 * XXX KDM the read side of this has been verified 638 * to work, but the write side of it has not been 639 * verified. So user beware. 640 */ 641 if (useraddr) { 642 TI_UNLOCK(sc); 643 copyin(ptr, &tmpval2, resid); 644 TI_LOCK(sc); 645 } else 646 bcopy(ptr, &tmpval2, resid); 647 648 tmpval = htonl(tmpval2); 649 650 bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle, 651 ti_offset, &tmpval, 1); 652 } 653 } 654 655 CSR_WRITE_4(sc, TI_WINBASE, origwin); 656 657 return (0); 658 } 659 660 static int 661 ti_copy_scratch(sc, tigon_addr, len, buf, useraddr, readdata, cpu) 662 struct ti_softc *sc; 663 u_int32_t tigon_addr, len; 664 caddr_t buf; 665 int useraddr, readdata; 666 int cpu; 667 { 668 u_int32_t segptr; 669 int cnt; 670 u_int32_t tmpval, tmpval2; 671 caddr_t ptr; 672 673 /* 674 * At the moment, we don't handle non-aligned cases, we just bail. 675 * If this proves to be a problem, it will be fixed. 676 */ 677 if (tigon_addr & 0x3) { 678 if_printf(sc->ti_ifp, "ti_copy_scratch: tigon address %#x " 679 "isn't word-aligned\n", tigon_addr); 680 return (EINVAL); 681 } 682 683 if (len & 0x3) { 684 if_printf(sc->ti_ifp, "ti_copy_scratch: transfer length %d " 685 "isn't word-aligned\n", len); 686 return (EINVAL); 687 } 688 689 segptr = tigon_addr; 690 cnt = len; 691 ptr = buf; 692 693 while (cnt) { 694 CSR_WRITE_4(sc, CPU_REG(TI_SRAM_ADDR, cpu), segptr); 695 696 if (readdata) { 697 tmpval2 = CSR_READ_4(sc, CPU_REG(TI_SRAM_DATA, cpu)); 698 699 tmpval = ntohl(tmpval2); 700 701 /* 702 * Note: I've used this debugging interface 703 * extensively with Alteon's 12.3.15 firmware, 704 * compiled with GCC 2.7.2.1 and binutils 2.9.1. 705 * 706 * When you compile the firmware without 707 * optimization, which is necessary sometimes in 708 * order to properly step through it, you sometimes 709 * read out a bogus value of 0xc0017c instead of 710 * whatever was supposed to be in that scratchpad 711 * location. That value is on the stack somewhere, 712 * but I've never been able to figure out what was 713 * causing the problem. 714 * 715 * The address seems to pop up in random places, 716 * often not in the same place on two subsequent 717 * reads. 718 * 719 * In any case, the underlying data doesn't seem 720 * to be affected, just the value read out. 721 * 722 * KDM, 3/7/2000 723 */ 724 725 if (tmpval2 == 0xc0017c) 726 if_printf(sc->ti_ifp, "found 0xc0017c at %#x " 727 "(tmpval2)\n", segptr); 728 729 if (tmpval == 0xc0017c) 730 if_printf(sc->ti_ifp, "found 0xc0017c at %#x " 731 "(tmpval)\n", segptr); 732 733 if (useraddr) 734 copyout(&tmpval, ptr, 4); 735 else 736 bcopy(&tmpval, ptr, 4); 737 } else { 738 if (useraddr) 739 copyin(ptr, &tmpval2, 4); 740 else 741 bcopy(ptr, &tmpval2, 4); 742 743 tmpval = htonl(tmpval2); 744 745 CSR_WRITE_4(sc, CPU_REG(TI_SRAM_DATA, cpu), tmpval); 746 } 747 748 cnt -= 4; 749 segptr += 4; 750 ptr += 4; 751 } 752 753 return (0); 754 } 755 756 static int 757 ti_bcopy_swap(src, dst, len, swap_type) 758 const void *src; 759 void *dst; 760 size_t len; 761 ti_swap_type swap_type; 762 { 763 const u_int8_t *tmpsrc; 764 u_int8_t *tmpdst; 765 size_t tmplen; 766 767 if (len & 0x3) { 768 printf("ti_bcopy_swap: length %zd isn't 32-bit aligned\n", 769 len); 770 return (-1); 771 } 772 773 tmpsrc = src; 774 tmpdst = dst; 775 tmplen = len; 776 777 while (tmplen) { 778 if (swap_type == TI_SWAP_NTOH) 779 *(u_int32_t *)tmpdst = 780 ntohl(*(const u_int32_t *)tmpsrc); 781 else 782 *(u_int32_t *)tmpdst = 783 htonl(*(const u_int32_t *)tmpsrc); 784 785 tmpsrc += 4; 786 tmpdst += 4; 787 tmplen -= 4; 788 } 789 790 return (0); 791 } 792 793 /* 794 * Load firmware image into the NIC. Check that the firmware revision 795 * is acceptable and see if we want the firmware for the Tigon 1 or 796 * Tigon 2. 797 */ 798 static void 799 ti_loadfw(sc) 800 struct ti_softc *sc; 801 { 802 switch (sc->ti_hwrev) { 803 case TI_HWREV_TIGON: 804 if (tigonFwReleaseMajor != TI_FIRMWARE_MAJOR || 805 tigonFwReleaseMinor != TI_FIRMWARE_MINOR || 806 tigonFwReleaseFix != TI_FIRMWARE_FIX) { 807 if_printf(sc->ti_ifp, "firmware revision mismatch; " 808 "want %d.%d.%d, got %d.%d.%d\n", 809 TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, 810 TI_FIRMWARE_FIX, tigonFwReleaseMajor, 811 tigonFwReleaseMinor, tigonFwReleaseFix); 812 return; 813 } 814 ti_mem(sc, tigonFwTextAddr, tigonFwTextLen, 815 (caddr_t)tigonFwText); 816 ti_mem(sc, tigonFwDataAddr, tigonFwDataLen, 817 (caddr_t)tigonFwData); 818 ti_mem(sc, tigonFwRodataAddr, tigonFwRodataLen, 819 (caddr_t)tigonFwRodata); 820 ti_mem(sc, tigonFwBssAddr, tigonFwBssLen, NULL); 821 ti_mem(sc, tigonFwSbssAddr, tigonFwSbssLen, NULL); 822 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigonFwStartAddr); 823 break; 824 case TI_HWREV_TIGON_II: 825 if (tigon2FwReleaseMajor != TI_FIRMWARE_MAJOR || 826 tigon2FwReleaseMinor != TI_FIRMWARE_MINOR || 827 tigon2FwReleaseFix != TI_FIRMWARE_FIX) { 828 if_printf(sc->ti_ifp, "firmware revision mismatch; " 829 "want %d.%d.%d, got %d.%d.%d\n", 830 TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, 831 TI_FIRMWARE_FIX, tigon2FwReleaseMajor, 832 tigon2FwReleaseMinor, tigon2FwReleaseFix); 833 return; 834 } 835 ti_mem(sc, tigon2FwTextAddr, tigon2FwTextLen, 836 (caddr_t)tigon2FwText); 837 ti_mem(sc, tigon2FwDataAddr, tigon2FwDataLen, 838 (caddr_t)tigon2FwData); 839 ti_mem(sc, tigon2FwRodataAddr, tigon2FwRodataLen, 840 (caddr_t)tigon2FwRodata); 841 ti_mem(sc, tigon2FwBssAddr, tigon2FwBssLen, NULL); 842 ti_mem(sc, tigon2FwSbssAddr, tigon2FwSbssLen, NULL); 843 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigon2FwStartAddr); 844 break; 845 default: 846 if_printf(sc->ti_ifp, 847 "can't load firmware: unknown hardware rev\n"); 848 break; 849 } 850 } 851 852 /* 853 * Send the NIC a command via the command ring. 854 */ 855 static void 856 ti_cmd(sc, cmd) 857 struct ti_softc *sc; 858 struct ti_cmd_desc *cmd; 859 { 860 u_int32_t index; 861 862 if (sc->ti_rdata->ti_cmd_ring == NULL) 863 return; 864 865 index = sc->ti_cmd_saved_prodidx; 866 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd)); 867 TI_INC(index, TI_CMD_RING_CNT); 868 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); 869 sc->ti_cmd_saved_prodidx = index; 870 } 871 872 /* 873 * Send the NIC an extended command. The 'len' parameter specifies the 874 * number of command slots to include after the initial command. 875 */ 876 static void 877 ti_cmd_ext(sc, cmd, arg, len) 878 struct ti_softc *sc; 879 struct ti_cmd_desc *cmd; 880 caddr_t arg; 881 int len; 882 { 883 u_int32_t index; 884 register int i; 885 886 if (sc->ti_rdata->ti_cmd_ring == NULL) 887 return; 888 889 index = sc->ti_cmd_saved_prodidx; 890 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd)); 891 TI_INC(index, TI_CMD_RING_CNT); 892 for (i = 0; i < len; i++) { 893 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), 894 *(u_int32_t *)(&arg[i * 4])); 895 TI_INC(index, TI_CMD_RING_CNT); 896 } 897 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); 898 sc->ti_cmd_saved_prodidx = index; 899 } 900 901 /* 902 * Handle events that have triggered interrupts. 903 */ 904 static void 905 ti_handle_events(sc) 906 struct ti_softc *sc; 907 { 908 struct ti_event_desc *e; 909 910 if (sc->ti_rdata->ti_event_ring == NULL) 911 return; 912 913 while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) { 914 e = &sc->ti_rdata->ti_event_ring[sc->ti_ev_saved_considx]; 915 switch (e->ti_event) { 916 case TI_EV_LINKSTAT_CHANGED: 917 sc->ti_linkstat = e->ti_code; 918 if (e->ti_code == TI_EV_CODE_LINK_UP) 919 if_printf(sc->ti_ifp, "10/100 link up\n"); 920 else if (e->ti_code == TI_EV_CODE_GIG_LINK_UP) 921 if_printf(sc->ti_ifp, "gigabit link up\n"); 922 else if (e->ti_code == TI_EV_CODE_LINK_DOWN) 923 if_printf(sc->ti_ifp, "link down\n"); 924 break; 925 case TI_EV_ERROR: 926 if (e->ti_code == TI_EV_CODE_ERR_INVAL_CMD) 927 if_printf(sc->ti_ifp, "invalid command\n"); 928 else if (e->ti_code == TI_EV_CODE_ERR_UNIMP_CMD) 929 if_printf(sc->ti_ifp, "unknown command\n"); 930 else if (e->ti_code == TI_EV_CODE_ERR_BADCFG) 931 if_printf(sc->ti_ifp, "bad config data\n"); 932 break; 933 case TI_EV_FIRMWARE_UP: 934 ti_init2(sc); 935 break; 936 case TI_EV_STATS_UPDATED: 937 ti_stats_update(sc); 938 break; 939 case TI_EV_RESET_JUMBO_RING: 940 case TI_EV_MCAST_UPDATED: 941 /* Who cares. */ 942 break; 943 default: 944 if_printf(sc->ti_ifp, "unknown event: %d\n", 945 e->ti_event); 946 break; 947 } 948 /* Advance the consumer index. */ 949 TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT); 950 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx); 951 } 952 } 953 954 #ifdef TI_PRIVATE_JUMBOS 955 956 /* 957 * Memory management for the jumbo receive ring is a pain in the 958 * butt. We need to allocate at least 9018 bytes of space per frame, 959 * _and_ it has to be contiguous (unless you use the extended 960 * jumbo descriptor format). Using malloc() all the time won't 961 * work: malloc() allocates memory in powers of two, which means we 962 * would end up wasting a considerable amount of space by allocating 963 * 9K chunks. We don't have a jumbo mbuf cluster pool. Thus, we have 964 * to do our own memory management. 965 * 966 * The driver needs to allocate a contiguous chunk of memory at boot 967 * time. We then chop this up ourselves into 9K pieces and use them 968 * as external mbuf storage. 969 * 970 * One issue here is how much memory to allocate. The jumbo ring has 971 * 256 slots in it, but at 9K per slot than can consume over 2MB of 972 * RAM. This is a bit much, especially considering we also need 973 * RAM for the standard ring and mini ring (on the Tigon 2). To 974 * save space, we only actually allocate enough memory for 64 slots 975 * by default, which works out to between 500 and 600K. This can 976 * be tuned by changing a #define in if_tireg.h. 977 */ 978 979 static int 980 ti_alloc_jumbo_mem(sc) 981 struct ti_softc *sc; 982 { 983 caddr_t ptr; 984 register int i; 985 struct ti_jpool_entry *entry; 986 987 /* Grab a big chunk o' storage. */ 988 if (bus_dma_tag_create(sc->ti_parent_dmat, /* parent */ 989 PAGE_SIZE, 0, /* algnmnt, boundary */ 990 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 991 BUS_SPACE_MAXADDR, /* highaddr */ 992 NULL, NULL, /* filter, filterarg */ 993 TI_JMEM, /* maxsize */ 994 1, /* nsegments */ 995 TI_JMEM, /* maxsegsize */ 996 0, /* flags */ 997 NULL, NULL, /* lockfunc, lockarg */ 998 &sc->ti_jumbo_dmat) != 0) { 999 device_printf(dev, "Failed to allocate jumbo dmat\n"); 1000 return (ENOBUFS); 1001 } 1002 1003 if (bus_dmamem_alloc(sc->ti_jumbo_dmat, 1004 (void**)&sc->ti_cdata.ti_jumbo_buf, 1005 BUS_DMA_NOWAIT, &sc->ti_jumbo_dmamap) != 0) { 1006 device_printf(dev, "Failed to allocate jumbo memory\n"); 1007 return (ENOBUFS); 1008 } 1009 1010 SLIST_INIT(&sc->ti_jfree_listhead); 1011 SLIST_INIT(&sc->ti_jinuse_listhead); 1012 1013 /* 1014 * Now divide it up into 9K pieces and save the addresses 1015 * in an array. 1016 */ 1017 ptr = sc->ti_cdata.ti_jumbo_buf; 1018 for (i = 0; i < TI_JSLOTS; i++) { 1019 sc->ti_cdata.ti_jslots[i] = ptr; 1020 ptr += TI_JLEN; 1021 entry = malloc(sizeof(struct ti_jpool_entry), 1022 M_DEVBUF, M_NOWAIT); 1023 if (entry == NULL) { 1024 device_printf(dev, "no memory for jumbo " 1025 "buffer queue!\n"); 1026 return (ENOBUFS); 1027 } 1028 entry->slot = i; 1029 SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries); 1030 } 1031 1032 return (0); 1033 } 1034 1035 /* 1036 * Allocate a jumbo buffer. 1037 */ 1038 static void *ti_jalloc(sc) 1039 struct ti_softc *sc; 1040 { 1041 struct ti_jpool_entry *entry; 1042 1043 entry = SLIST_FIRST(&sc->ti_jfree_listhead); 1044 1045 if (entry == NULL) { 1046 if_printf(sc->ti_ifp, "no free jumbo buffers\n"); 1047 return (NULL); 1048 } 1049 1050 SLIST_REMOVE_HEAD(&sc->ti_jfree_listhead, jpool_entries); 1051 SLIST_INSERT_HEAD(&sc->ti_jinuse_listhead, entry, jpool_entries); 1052 return (sc->ti_cdata.ti_jslots[entry->slot]); 1053 } 1054 1055 /* 1056 * Release a jumbo buffer. 1057 */ 1058 static void 1059 ti_jfree(buf, args) 1060 void *buf; 1061 void *args; 1062 { 1063 struct ti_softc *sc; 1064 int i; 1065 struct ti_jpool_entry *entry; 1066 1067 /* Extract the softc struct pointer. */ 1068 sc = (struct ti_softc *)args; 1069 1070 if (sc == NULL) 1071 panic("ti_jfree: didn't get softc pointer!"); 1072 1073 /* calculate the slot this buffer belongs to */ 1074 i = ((vm_offset_t)buf 1075 - (vm_offset_t)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN; 1076 1077 if ((i < 0) || (i >= TI_JSLOTS)) 1078 panic("ti_jfree: asked to free buffer that we don't manage!"); 1079 1080 entry = SLIST_FIRST(&sc->ti_jinuse_listhead); 1081 if (entry == NULL) 1082 panic("ti_jfree: buffer not in use!"); 1083 entry->slot = i; 1084 SLIST_REMOVE_HEAD(&sc->ti_jinuse_listhead, jpool_entries); 1085 SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries); 1086 } 1087 1088 #endif /* TI_PRIVATE_JUMBOS */ 1089 1090 /* 1091 * Intialize a standard receive ring descriptor. 1092 */ 1093 static int 1094 ti_newbuf_std(sc, i, m) 1095 struct ti_softc *sc; 1096 int i; 1097 struct mbuf *m; 1098 { 1099 struct mbuf *m_new = NULL; 1100 struct ti_rx_desc *r; 1101 1102 if (m == NULL) { 1103 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1104 if (m_new == NULL) 1105 return (ENOBUFS); 1106 1107 MCLGET(m_new, M_DONTWAIT); 1108 if (!(m_new->m_flags & M_EXT)) { 1109 m_freem(m_new); 1110 return (ENOBUFS); 1111 } 1112 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1113 } else { 1114 m_new = m; 1115 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1116 m_new->m_data = m_new->m_ext.ext_buf; 1117 } 1118 1119 m_adj(m_new, ETHER_ALIGN); 1120 sc->ti_cdata.ti_rx_std_chain[i] = m_new; 1121 r = &sc->ti_rdata->ti_rx_std_ring[i]; 1122 TI_HOSTADDR(r->ti_addr) = vtophys(mtod(m_new, caddr_t)); 1123 r->ti_type = TI_BDTYPE_RECV_BD; 1124 r->ti_flags = 0; 1125 if (sc->ti_ifp->if_hwassist) 1126 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; 1127 r->ti_len = m_new->m_len; 1128 r->ti_idx = i; 1129 1130 return (0); 1131 } 1132 1133 /* 1134 * Intialize a mini receive ring descriptor. This only applies to 1135 * the Tigon 2. 1136 */ 1137 static int 1138 ti_newbuf_mini(sc, i, m) 1139 struct ti_softc *sc; 1140 int i; 1141 struct mbuf *m; 1142 { 1143 struct mbuf *m_new = NULL; 1144 struct ti_rx_desc *r; 1145 1146 if (m == NULL) { 1147 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1148 if (m_new == NULL) { 1149 return (ENOBUFS); 1150 } 1151 m_new->m_len = m_new->m_pkthdr.len = MHLEN; 1152 } else { 1153 m_new = m; 1154 m_new->m_data = m_new->m_pktdat; 1155 m_new->m_len = m_new->m_pkthdr.len = MHLEN; 1156 } 1157 1158 m_adj(m_new, ETHER_ALIGN); 1159 r = &sc->ti_rdata->ti_rx_mini_ring[i]; 1160 sc->ti_cdata.ti_rx_mini_chain[i] = m_new; 1161 TI_HOSTADDR(r->ti_addr) = vtophys(mtod(m_new, caddr_t)); 1162 r->ti_type = TI_BDTYPE_RECV_BD; 1163 r->ti_flags = TI_BDFLAG_MINI_RING; 1164 if (sc->ti_ifp->if_hwassist) 1165 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; 1166 r->ti_len = m_new->m_len; 1167 r->ti_idx = i; 1168 1169 return (0); 1170 } 1171 1172 #ifdef TI_PRIVATE_JUMBOS 1173 1174 /* 1175 * Initialize a jumbo receive ring descriptor. This allocates 1176 * a jumbo buffer from the pool managed internally by the driver. 1177 */ 1178 static int 1179 ti_newbuf_jumbo(sc, i, m) 1180 struct ti_softc *sc; 1181 int i; 1182 struct mbuf *m; 1183 { 1184 struct mbuf *m_new = NULL; 1185 struct ti_rx_desc *r; 1186 1187 if (m == NULL) { 1188 caddr_t *buf = NULL; 1189 1190 /* Allocate the mbuf. */ 1191 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1192 if (m_new == NULL) { 1193 return (ENOBUFS); 1194 } 1195 1196 /* Allocate the jumbo buffer */ 1197 buf = ti_jalloc(sc); 1198 if (buf == NULL) { 1199 m_freem(m_new); 1200 if_printf(sc->ti_ifp, "jumbo allocation failed " 1201 "-- packet dropped!\n"); 1202 return (ENOBUFS); 1203 } 1204 1205 /* Attach the buffer to the mbuf. */ 1206 m_new->m_data = (void *) buf; 1207 m_new->m_len = m_new->m_pkthdr.len = TI_JUMBO_FRAMELEN; 1208 MEXTADD(m_new, buf, TI_JUMBO_FRAMELEN, ti_jfree, 1209 (struct ti_softc *)sc, 0, EXT_NET_DRV); 1210 } else { 1211 m_new = m; 1212 m_new->m_data = m_new->m_ext.ext_buf; 1213 m_new->m_ext.ext_size = TI_JUMBO_FRAMELEN; 1214 } 1215 1216 m_adj(m_new, ETHER_ALIGN); 1217 /* Set up the descriptor. */ 1218 r = &sc->ti_rdata->ti_rx_jumbo_ring[i]; 1219 sc->ti_cdata.ti_rx_jumbo_chain[i] = m_new; 1220 TI_HOSTADDR(r->ti_addr) = vtophys(mtod(m_new, caddr_t)); 1221 r->ti_type = TI_BDTYPE_RECV_JUMBO_BD; 1222 r->ti_flags = TI_BDFLAG_JUMBO_RING; 1223 if (sc->ti_ifp->if_hwassist) 1224 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; 1225 r->ti_len = m_new->m_len; 1226 r->ti_idx = i; 1227 1228 return (0); 1229 } 1230 1231 #else 1232 #include <vm/vm_page.h> 1233 1234 #if (PAGE_SIZE == 4096) 1235 #define NPAYLOAD 2 1236 #else 1237 #define NPAYLOAD 1 1238 #endif 1239 1240 #define TCP_HDR_LEN (52 + sizeof(struct ether_header)) 1241 #define UDP_HDR_LEN (28 + sizeof(struct ether_header)) 1242 #define NFS_HDR_LEN (UDP_HDR_LEN) 1243 static int HDR_LEN = TCP_HDR_LEN; 1244 1245 1246 /* 1247 * Initialize a jumbo receive ring descriptor. This allocates 1248 * a jumbo buffer from the pool managed internally by the driver. 1249 */ 1250 static int 1251 ti_newbuf_jumbo(sc, idx, m_old) 1252 struct ti_softc *sc; 1253 int idx; 1254 struct mbuf *m_old; 1255 { 1256 struct mbuf *cur, *m_new = NULL; 1257 struct mbuf *m[3] = {NULL, NULL, NULL}; 1258 struct ti_rx_desc_ext *r; 1259 vm_page_t frame; 1260 static int color; 1261 /* 1 extra buf to make nobufs easy*/ 1262 struct sf_buf *sf[3] = {NULL, NULL, NULL}; 1263 int i; 1264 1265 if (m_old != NULL) { 1266 m_new = m_old; 1267 cur = m_old->m_next; 1268 for (i = 0; i <= NPAYLOAD; i++){ 1269 m[i] = cur; 1270 cur = cur->m_next; 1271 } 1272 } else { 1273 /* Allocate the mbufs. */ 1274 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1275 if (m_new == NULL) { 1276 if_printf(sc->ti_ifp, "mbuf allocation failed " 1277 "-- packet dropped!\n"); 1278 goto nobufs; 1279 } 1280 MGET(m[NPAYLOAD], M_DONTWAIT, MT_DATA); 1281 if (m[NPAYLOAD] == NULL) { 1282 if_printf(sc->ti_ifp, "cluster mbuf allocation failed " 1283 "-- packet dropped!\n"); 1284 goto nobufs; 1285 } 1286 MCLGET(m[NPAYLOAD], M_DONTWAIT); 1287 if ((m[NPAYLOAD]->m_flags & M_EXT) == 0) { 1288 if_printf(sc->ti_ifp, "mbuf allocation failed " 1289 "-- packet dropped!\n"); 1290 goto nobufs; 1291 } 1292 m[NPAYLOAD]->m_len = MCLBYTES; 1293 1294 for (i = 0; i < NPAYLOAD; i++){ 1295 MGET(m[i], M_DONTWAIT, MT_DATA); 1296 if (m[i] == NULL) { 1297 if_printf(sc->ti_ifp, "mbuf allocation failed " 1298 "-- packet dropped!\n"); 1299 goto nobufs; 1300 } 1301 frame = vm_page_alloc(NULL, color++, 1302 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | 1303 VM_ALLOC_WIRED); 1304 if (frame == NULL) { 1305 if_printf(sc->ti_ifp, "buffer allocation " 1306 "failed -- packet dropped!\n"); 1307 printf(" index %d page %d\n", idx, i); 1308 goto nobufs; 1309 } 1310 sf[i] = sf_buf_alloc(frame, SFB_NOWAIT); 1311 if (sf[i] == NULL) { 1312 vm_page_lock_queues(); 1313 vm_page_unwire(frame, 0); 1314 vm_page_free(frame); 1315 vm_page_unlock_queues(); 1316 if_printf(sc->ti_ifp, "buffer allocation " 1317 "failed -- packet dropped!\n"); 1318 printf(" index %d page %d\n", idx, i); 1319 goto nobufs; 1320 } 1321 } 1322 for (i = 0; i < NPAYLOAD; i++){ 1323 /* Attach the buffer to the mbuf. */ 1324 m[i]->m_data = (void *)sf_buf_kva(sf[i]); 1325 m[i]->m_len = PAGE_SIZE; 1326 MEXTADD(m[i], sf_buf_kva(sf[i]), PAGE_SIZE, 1327 sf_buf_mext, sf[i], 0, EXT_DISPOSABLE); 1328 m[i]->m_next = m[i+1]; 1329 } 1330 /* link the buffers to the header */ 1331 m_new->m_next = m[0]; 1332 m_new->m_data += ETHER_ALIGN; 1333 if (sc->ti_hdrsplit) 1334 m_new->m_len = MHLEN - ETHER_ALIGN; 1335 else 1336 m_new->m_len = HDR_LEN; 1337 m_new->m_pkthdr.len = NPAYLOAD * PAGE_SIZE + m_new->m_len; 1338 } 1339 1340 /* Set up the descriptor. */ 1341 r = &sc->ti_rdata->ti_rx_jumbo_ring[idx]; 1342 sc->ti_cdata.ti_rx_jumbo_chain[idx] = m_new; 1343 TI_HOSTADDR(r->ti_addr0) = vtophys(mtod(m_new, caddr_t)); 1344 r->ti_len0 = m_new->m_len; 1345 1346 TI_HOSTADDR(r->ti_addr1) = vtophys(mtod(m[0], caddr_t)); 1347 r->ti_len1 = PAGE_SIZE; 1348 1349 TI_HOSTADDR(r->ti_addr2) = vtophys(mtod(m[1], caddr_t)); 1350 r->ti_len2 = m[1]->m_ext.ext_size; /* could be PAGE_SIZE or MCLBYTES */ 1351 1352 if (PAGE_SIZE == 4096) { 1353 TI_HOSTADDR(r->ti_addr3) = vtophys(mtod(m[2], caddr_t)); 1354 r->ti_len3 = MCLBYTES; 1355 } else { 1356 r->ti_len3 = 0; 1357 } 1358 r->ti_type = TI_BDTYPE_RECV_JUMBO_BD; 1359 1360 r->ti_flags = TI_BDFLAG_JUMBO_RING|TI_RCB_FLAG_USE_EXT_RX_BD; 1361 1362 if (sc->ti_ifp->if_hwassist) 1363 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM|TI_BDFLAG_IP_CKSUM; 1364 1365 r->ti_idx = idx; 1366 1367 return (0); 1368 1369 nobufs: 1370 1371 /* 1372 * Warning! : 1373 * This can only be called before the mbufs are strung together. 1374 * If the mbufs are strung together, m_freem() will free the chain, 1375 * so that the later mbufs will be freed multiple times. 1376 */ 1377 if (m_new) 1378 m_freem(m_new); 1379 1380 for (i = 0; i < 3; i++) { 1381 if (m[i]) 1382 m_freem(m[i]); 1383 if (sf[i]) 1384 sf_buf_mext((void *)sf_buf_kva(sf[i]), sf[i]); 1385 } 1386 return (ENOBUFS); 1387 } 1388 #endif 1389 1390 1391 1392 /* 1393 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 1394 * that's 1MB or memory, which is a lot. For now, we fill only the first 1395 * 256 ring entries and hope that our CPU is fast enough to keep up with 1396 * the NIC. 1397 */ 1398 static int 1399 ti_init_rx_ring_std(sc) 1400 struct ti_softc *sc; 1401 { 1402 register int i; 1403 struct ti_cmd_desc cmd; 1404 1405 for (i = 0; i < TI_SSLOTS; i++) { 1406 if (ti_newbuf_std(sc, i, NULL) == ENOBUFS) 1407 return (ENOBUFS); 1408 }; 1409 1410 TI_UPDATE_STDPROD(sc, i - 1); 1411 sc->ti_std = i - 1; 1412 1413 return (0); 1414 } 1415 1416 static void 1417 ti_free_rx_ring_std(sc) 1418 struct ti_softc *sc; 1419 { 1420 register int i; 1421 1422 for (i = 0; i < TI_STD_RX_RING_CNT; i++) { 1423 if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) { 1424 m_freem(sc->ti_cdata.ti_rx_std_chain[i]); 1425 sc->ti_cdata.ti_rx_std_chain[i] = NULL; 1426 } 1427 bzero((char *)&sc->ti_rdata->ti_rx_std_ring[i], 1428 sizeof(struct ti_rx_desc)); 1429 } 1430 } 1431 1432 static int 1433 ti_init_rx_ring_jumbo(sc) 1434 struct ti_softc *sc; 1435 { 1436 register int i; 1437 struct ti_cmd_desc cmd; 1438 1439 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { 1440 if (ti_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1441 return (ENOBUFS); 1442 }; 1443 1444 TI_UPDATE_JUMBOPROD(sc, i - 1); 1445 sc->ti_jumbo = i - 1; 1446 1447 return (0); 1448 } 1449 1450 static void 1451 ti_free_rx_ring_jumbo(sc) 1452 struct ti_softc *sc; 1453 { 1454 register int i; 1455 1456 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { 1457 if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) { 1458 m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]); 1459 sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL; 1460 } 1461 bzero((char *)&sc->ti_rdata->ti_rx_jumbo_ring[i], 1462 sizeof(struct ti_rx_desc)); 1463 } 1464 } 1465 1466 static int 1467 ti_init_rx_ring_mini(sc) 1468 struct ti_softc *sc; 1469 { 1470 register int i; 1471 1472 for (i = 0; i < TI_MSLOTS; i++) { 1473 if (ti_newbuf_mini(sc, i, NULL) == ENOBUFS) 1474 return (ENOBUFS); 1475 }; 1476 1477 TI_UPDATE_MINIPROD(sc, i - 1); 1478 sc->ti_mini = i - 1; 1479 1480 return (0); 1481 } 1482 1483 static void 1484 ti_free_rx_ring_mini(sc) 1485 struct ti_softc *sc; 1486 { 1487 register int i; 1488 1489 for (i = 0; i < TI_MINI_RX_RING_CNT; i++) { 1490 if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) { 1491 m_freem(sc->ti_cdata.ti_rx_mini_chain[i]); 1492 sc->ti_cdata.ti_rx_mini_chain[i] = NULL; 1493 } 1494 bzero((char *)&sc->ti_rdata->ti_rx_mini_ring[i], 1495 sizeof(struct ti_rx_desc)); 1496 } 1497 } 1498 1499 static void 1500 ti_free_tx_ring(sc) 1501 struct ti_softc *sc; 1502 { 1503 register int i; 1504 1505 if (sc->ti_rdata->ti_tx_ring == NULL) 1506 return; 1507 1508 for (i = 0; i < TI_TX_RING_CNT; i++) { 1509 if (sc->ti_cdata.ti_tx_chain[i] != NULL) { 1510 m_freem(sc->ti_cdata.ti_tx_chain[i]); 1511 sc->ti_cdata.ti_tx_chain[i] = NULL; 1512 } 1513 bzero((char *)&sc->ti_rdata->ti_tx_ring[i], 1514 sizeof(struct ti_tx_desc)); 1515 } 1516 } 1517 1518 static int 1519 ti_init_tx_ring(sc) 1520 struct ti_softc *sc; 1521 { 1522 sc->ti_txcnt = 0; 1523 sc->ti_tx_saved_considx = 0; 1524 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0); 1525 return (0); 1526 } 1527 1528 /* 1529 * The Tigon 2 firmware has a new way to add/delete multicast addresses, 1530 * but we have to support the old way too so that Tigon 1 cards will 1531 * work. 1532 */ 1533 static void 1534 ti_add_mcast(sc, addr) 1535 struct ti_softc *sc; 1536 struct ether_addr *addr; 1537 { 1538 struct ti_cmd_desc cmd; 1539 u_int16_t *m; 1540 u_int32_t ext[2] = {0, 0}; 1541 1542 m = (u_int16_t *)&addr->octet[0]; 1543 1544 switch (sc->ti_hwrev) { 1545 case TI_HWREV_TIGON: 1546 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); 1547 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); 1548 TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0); 1549 break; 1550 case TI_HWREV_TIGON_II: 1551 ext[0] = htons(m[0]); 1552 ext[1] = (htons(m[1]) << 16) | htons(m[2]); 1553 TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (caddr_t)&ext, 2); 1554 break; 1555 default: 1556 if_printf(sc->ti_ifp, "unknown hwrev\n"); 1557 break; 1558 } 1559 } 1560 1561 static void 1562 ti_del_mcast(sc, addr) 1563 struct ti_softc *sc; 1564 struct ether_addr *addr; 1565 { 1566 struct ti_cmd_desc cmd; 1567 u_int16_t *m; 1568 u_int32_t ext[2] = {0, 0}; 1569 1570 m = (u_int16_t *)&addr->octet[0]; 1571 1572 switch (sc->ti_hwrev) { 1573 case TI_HWREV_TIGON: 1574 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); 1575 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); 1576 TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0); 1577 break; 1578 case TI_HWREV_TIGON_II: 1579 ext[0] = htons(m[0]); 1580 ext[1] = (htons(m[1]) << 16) | htons(m[2]); 1581 TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (caddr_t)&ext, 2); 1582 break; 1583 default: 1584 if_printf(sc->ti_ifp, "unknown hwrev\n"); 1585 break; 1586 } 1587 } 1588 1589 /* 1590 * Configure the Tigon's multicast address filter. 1591 * 1592 * The actual multicast table management is a bit of a pain, thanks to 1593 * slight brain damage on the part of both Alteon and us. With our 1594 * multicast code, we are only alerted when the multicast address table 1595 * changes and at that point we only have the current list of addresses: 1596 * we only know the current state, not the previous state, so we don't 1597 * actually know what addresses were removed or added. The firmware has 1598 * state, but we can't get our grubby mits on it, and there is no 'delete 1599 * all multicast addresses' command. Hence, we have to maintain our own 1600 * state so we know what addresses have been programmed into the NIC at 1601 * any given time. 1602 */ 1603 static void 1604 ti_setmulti(sc) 1605 struct ti_softc *sc; 1606 { 1607 struct ifnet *ifp; 1608 struct ifmultiaddr *ifma; 1609 struct ti_cmd_desc cmd; 1610 struct ti_mc_entry *mc; 1611 u_int32_t intrs; 1612 1613 ifp = sc->ti_ifp; 1614 1615 if (ifp->if_flags & IFF_ALLMULTI) { 1616 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_ENB, 0); 1617 return; 1618 } else { 1619 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0); 1620 } 1621 1622 /* Disable interrupts. */ 1623 intrs = CSR_READ_4(sc, TI_MB_HOSTINTR); 1624 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1625 1626 /* First, zot all the existing filters. */ 1627 while (SLIST_FIRST(&sc->ti_mc_listhead) != NULL) { 1628 mc = SLIST_FIRST(&sc->ti_mc_listhead); 1629 ti_del_mcast(sc, &mc->mc_addr); 1630 SLIST_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries); 1631 free(mc, M_DEVBUF); 1632 } 1633 1634 /* Now program new ones. */ 1635 IF_ADDR_LOCK(ifp); 1636 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1637 if (ifma->ifma_addr->sa_family != AF_LINK) 1638 continue; 1639 mc = malloc(sizeof(struct ti_mc_entry), M_DEVBUF, M_NOWAIT); 1640 if (mc == NULL) { 1641 if_printf(ifp, "no memory for mcast filter entry\n"); 1642 continue; 1643 } 1644 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1645 (char *)&mc->mc_addr, ETHER_ADDR_LEN); 1646 SLIST_INSERT_HEAD(&sc->ti_mc_listhead, mc, mc_entries); 1647 ti_add_mcast(sc, &mc->mc_addr); 1648 } 1649 IF_ADDR_UNLOCK(ifp); 1650 1651 /* Re-enable interrupts. */ 1652 CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs); 1653 } 1654 1655 /* 1656 * Check to see if the BIOS has configured us for a 64 bit slot when 1657 * we aren't actually in one. If we detect this condition, we can work 1658 * around it on the Tigon 2 by setting a bit in the PCI state register, 1659 * but for the Tigon 1 we must give up and abort the interface attach. 1660 */ 1661 static int ti_64bitslot_war(sc) 1662 struct ti_softc *sc; 1663 { 1664 if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) { 1665 CSR_WRITE_4(sc, 0x600, 0); 1666 CSR_WRITE_4(sc, 0x604, 0); 1667 CSR_WRITE_4(sc, 0x600, 0x5555AAAA); 1668 if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) { 1669 if (sc->ti_hwrev == TI_HWREV_TIGON) 1670 return (EINVAL); 1671 else { 1672 TI_SETBIT(sc, TI_PCI_STATE, 1673 TI_PCISTATE_32BIT_BUS); 1674 return (0); 1675 } 1676 } 1677 } 1678 1679 return (0); 1680 } 1681 1682 /* 1683 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1684 * self-test results. 1685 */ 1686 static int 1687 ti_chipinit(sc) 1688 struct ti_softc *sc; 1689 { 1690 u_int32_t cacheline; 1691 u_int32_t pci_writemax = 0; 1692 u_int32_t hdrsplit; 1693 1694 /* Initialize link to down state. */ 1695 sc->ti_linkstat = TI_EV_CODE_LINK_DOWN; 1696 1697 if (sc->ti_ifp->if_capenable & IFCAP_HWCSUM) 1698 sc->ti_ifp->if_hwassist = TI_CSUM_FEATURES; 1699 else 1700 sc->ti_ifp->if_hwassist = 0; 1701 1702 /* Set endianness before we access any non-PCI registers. */ 1703 #if BYTE_ORDER == BIG_ENDIAN 1704 CSR_WRITE_4(sc, TI_MISC_HOST_CTL, 1705 TI_MHC_BIGENDIAN_INIT | (TI_MHC_BIGENDIAN_INIT << 24)); 1706 #else 1707 CSR_WRITE_4(sc, TI_MISC_HOST_CTL, 1708 TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24)); 1709 #endif 1710 1711 /* Check the ROM failed bit to see if self-tests passed. */ 1712 if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) { 1713 if_printf(sc->ti_ifp, "board self-diagnostics failed!\n"); 1714 return (ENODEV); 1715 } 1716 1717 /* Halt the CPU. */ 1718 TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT); 1719 1720 /* Figure out the hardware revision. */ 1721 switch (CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK) { 1722 case TI_REV_TIGON_I: 1723 sc->ti_hwrev = TI_HWREV_TIGON; 1724 break; 1725 case TI_REV_TIGON_II: 1726 sc->ti_hwrev = TI_HWREV_TIGON_II; 1727 break; 1728 default: 1729 if_printf(sc->ti_ifp, "unsupported chip revision\n"); 1730 return (ENODEV); 1731 } 1732 1733 /* Do special setup for Tigon 2. */ 1734 if (sc->ti_hwrev == TI_HWREV_TIGON_II) { 1735 TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT); 1736 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_512K); 1737 TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS); 1738 } 1739 1740 /* 1741 * We don't have firmware source for the Tigon 1, so Tigon 1 boards 1742 * can't do header splitting. 1743 */ 1744 #ifdef TI_JUMBO_HDRSPLIT 1745 if (sc->ti_hwrev != TI_HWREV_TIGON) 1746 sc->ti_hdrsplit = 1; 1747 else 1748 if_printf(sc->ti_ifp, 1749 "can't do header splitting on a Tigon I board\n"); 1750 #endif /* TI_JUMBO_HDRSPLIT */ 1751 1752 /* Set up the PCI state register. */ 1753 CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD|TI_PCI_WRITE_CMD); 1754 if (sc->ti_hwrev == TI_HWREV_TIGON_II) { 1755 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT); 1756 } 1757 1758 /* Clear the read/write max DMA parameters. */ 1759 TI_CLRBIT(sc, TI_PCI_STATE, (TI_PCISTATE_WRITE_MAXDMA| 1760 TI_PCISTATE_READ_MAXDMA)); 1761 1762 /* Get cache line size. */ 1763 cacheline = CSR_READ_4(sc, TI_PCI_BIST) & 0xFF; 1764 1765 /* 1766 * If the system has set enabled the PCI memory write 1767 * and invalidate command in the command register, set 1768 * the write max parameter accordingly. This is necessary 1769 * to use MWI with the Tigon 2. 1770 */ 1771 if (CSR_READ_4(sc, TI_PCI_CMDSTAT) & PCIM_CMD_MWIEN) { 1772 switch (cacheline) { 1773 case 1: 1774 case 4: 1775 case 8: 1776 case 16: 1777 case 32: 1778 case 64: 1779 break; 1780 default: 1781 /* Disable PCI memory write and invalidate. */ 1782 if (bootverbose) 1783 if_printf(sc->ti_ifp, "cache line size %d not " 1784 "supported; disabling PCI MWI\n", 1785 cacheline); 1786 CSR_WRITE_4(sc, TI_PCI_CMDSTAT, CSR_READ_4(sc, 1787 TI_PCI_CMDSTAT) & ~PCIM_CMD_MWIEN); 1788 break; 1789 } 1790 } 1791 1792 #ifdef __brokenalpha__ 1793 /* 1794 * From the Alteon sample driver: 1795 * Must insure that we do not cross an 8K (bytes) boundary 1796 * for DMA reads. Our highest limit is 1K bytes. This is a 1797 * restriction on some ALPHA platforms with early revision 1798 * 21174 PCI chipsets, such as the AlphaPC 164lx 1799 */ 1800 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax|TI_PCI_READMAX_1024); 1801 #else 1802 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax); 1803 #endif 1804 1805 /* This sets the min dma param all the way up (0xff). */ 1806 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA); 1807 1808 if (sc->ti_hdrsplit) 1809 hdrsplit = TI_OPMODE_JUMBO_HDRSPLIT; 1810 else 1811 hdrsplit = 0; 1812 1813 /* Configure DMA variables. */ 1814 #if BYTE_ORDER == BIG_ENDIAN 1815 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_BD | 1816 TI_OPMODE_BYTESWAP_DATA | TI_OPMODE_WORDSWAP_BD | 1817 TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB | 1818 TI_OPMODE_DONT_FRAG_JUMBO | hdrsplit); 1819 #else /* BYTE_ORDER */ 1820 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_DATA| 1821 TI_OPMODE_WORDSWAP_BD|TI_OPMODE_DONT_FRAG_JUMBO| 1822 TI_OPMODE_WARN_ENB|TI_OPMODE_FATAL_ENB | hdrsplit); 1823 #endif /* BYTE_ORDER */ 1824 1825 /* 1826 * Only allow 1 DMA channel to be active at a time. 1827 * I don't think this is a good idea, but without it 1828 * the firmware racks up lots of nicDmaReadRingFull 1829 * errors. This is not compatible with hardware checksums. 1830 */ 1831 if (sc->ti_ifp->if_hwassist == 0) 1832 TI_SETBIT(sc, TI_GCR_OPMODE, TI_OPMODE_1_DMA_ACTIVE); 1833 1834 /* Recommended settings from Tigon manual. */ 1835 CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W); 1836 CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W); 1837 1838 if (ti_64bitslot_war(sc)) { 1839 if_printf(sc->ti_ifp, "bios thinks we're in a 64 bit slot, " 1840 "but we aren't"); 1841 return (EINVAL); 1842 } 1843 1844 return (0); 1845 } 1846 1847 #define TI_RD_OFF(x) offsetof(struct ti_ring_data, x) 1848 1849 /* 1850 * Initialize the general information block and firmware, and 1851 * start the CPU(s) running. 1852 */ 1853 static int 1854 ti_gibinit(sc) 1855 struct ti_softc *sc; 1856 { 1857 struct ti_rcb *rcb; 1858 int i; 1859 struct ifnet *ifp; 1860 uint32_t rdphys; 1861 1862 ifp = sc->ti_ifp; 1863 rdphys = sc->ti_rdata_phys; 1864 1865 /* Disable interrupts for now. */ 1866 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1867 1868 /* 1869 * Tell the chip where to find the general information block. 1870 * While this struct could go into >4GB memory, we allocate it in a 1871 * single slab with the other descriptors, and those don't seem to 1872 * support being located in a 64-bit region. 1873 */ 1874 CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, 0); 1875 CSR_WRITE_4(sc, TI_GCR_GENINFO_LO, rdphys + TI_RD_OFF(ti_info)); 1876 1877 /* Load the firmware into SRAM. */ 1878 ti_loadfw(sc); 1879 1880 /* Set up the contents of the general info and ring control blocks. */ 1881 1882 /* Set up the event ring and producer pointer. */ 1883 rcb = &sc->ti_rdata->ti_info.ti_ev_rcb; 1884 1885 TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_event_ring); 1886 rcb->ti_flags = 0; 1887 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_ev_prodidx_ptr) = 1888 rdphys + TI_RD_OFF(ti_ev_prodidx_r); 1889 sc->ti_ev_prodidx.ti_idx = 0; 1890 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0); 1891 sc->ti_ev_saved_considx = 0; 1892 1893 /* Set up the command ring and producer mailbox. */ 1894 rcb = &sc->ti_rdata->ti_info.ti_cmd_rcb; 1895 1896 sc->ti_rdata->ti_cmd_ring = 1897 (struct ti_cmd_desc *)(sc->ti_vhandle + TI_GCR_CMDRING); 1898 TI_HOSTADDR(rcb->ti_hostaddr) = TI_GCR_NIC_ADDR(TI_GCR_CMDRING); 1899 rcb->ti_flags = 0; 1900 rcb->ti_max_len = 0; 1901 for (i = 0; i < TI_CMD_RING_CNT; i++) { 1902 CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0); 1903 } 1904 CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0); 1905 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0); 1906 sc->ti_cmd_saved_prodidx = 0; 1907 1908 /* 1909 * Assign the address of the stats refresh buffer. 1910 * We re-use the current stats buffer for this to 1911 * conserve memory. 1912 */ 1913 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_refresh_stats_ptr) = 1914 rdphys + TI_RD_OFF(ti_info.ti_stats); 1915 1916 /* Set up the standard receive ring. */ 1917 rcb = &sc->ti_rdata->ti_info.ti_std_rx_rcb; 1918 TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_std_ring); 1919 rcb->ti_max_len = TI_FRAMELEN; 1920 rcb->ti_flags = 0; 1921 if (sc->ti_ifp->if_hwassist) 1922 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | 1923 TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1924 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1925 1926 /* Set up the jumbo receive ring. */ 1927 rcb = &sc->ti_rdata->ti_info.ti_jumbo_rx_rcb; 1928 TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_jumbo_ring); 1929 1930 #ifdef TI_PRIVATE_JUMBOS 1931 rcb->ti_max_len = TI_JUMBO_FRAMELEN; 1932 rcb->ti_flags = 0; 1933 #else 1934 rcb->ti_max_len = PAGE_SIZE; 1935 rcb->ti_flags = TI_RCB_FLAG_USE_EXT_RX_BD; 1936 #endif 1937 if (sc->ti_ifp->if_hwassist) 1938 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | 1939 TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1940 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1941 1942 /* 1943 * Set up the mini ring. Only activated on the 1944 * Tigon 2 but the slot in the config block is 1945 * still there on the Tigon 1. 1946 */ 1947 rcb = &sc->ti_rdata->ti_info.ti_mini_rx_rcb; 1948 TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_mini_ring); 1949 rcb->ti_max_len = MHLEN - ETHER_ALIGN; 1950 if (sc->ti_hwrev == TI_HWREV_TIGON) 1951 rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED; 1952 else 1953 rcb->ti_flags = 0; 1954 if (sc->ti_ifp->if_hwassist) 1955 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | 1956 TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1957 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1958 1959 /* 1960 * Set up the receive return ring. 1961 */ 1962 rcb = &sc->ti_rdata->ti_info.ti_return_rcb; 1963 TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_return_ring); 1964 rcb->ti_flags = 0; 1965 rcb->ti_max_len = TI_RETURN_RING_CNT; 1966 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_return_prodidx_ptr) = 1967 rdphys + TI_RD_OFF(ti_return_prodidx_r); 1968 1969 /* 1970 * Set up the tx ring. Note: for the Tigon 2, we have the option 1971 * of putting the transmit ring in the host's address space and 1972 * letting the chip DMA it instead of leaving the ring in the NIC's 1973 * memory and accessing it through the shared memory region. We 1974 * do this for the Tigon 2, but it doesn't work on the Tigon 1, 1975 * so we have to revert to the shared memory scheme if we detect 1976 * a Tigon 1 chip. 1977 */ 1978 CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); 1979 if (sc->ti_hwrev == TI_HWREV_TIGON) { 1980 sc->ti_rdata->ti_tx_ring_nic = 1981 (struct ti_tx_desc *)(sc->ti_vhandle + TI_WINDOW); 1982 } 1983 bzero((char *)sc->ti_rdata->ti_tx_ring, 1984 TI_TX_RING_CNT * sizeof(struct ti_tx_desc)); 1985 rcb = &sc->ti_rdata->ti_info.ti_tx_rcb; 1986 if (sc->ti_hwrev == TI_HWREV_TIGON) 1987 rcb->ti_flags = 0; 1988 else 1989 rcb->ti_flags = TI_RCB_FLAG_HOST_RING; 1990 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1991 if (sc->ti_ifp->if_hwassist) 1992 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | 1993 TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1994 rcb->ti_max_len = TI_TX_RING_CNT; 1995 if (sc->ti_hwrev == TI_HWREV_TIGON) 1996 TI_HOSTADDR(rcb->ti_hostaddr) = TI_TX_RING_BASE; 1997 else 1998 TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_tx_ring); 1999 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_tx_considx_ptr) = 2000 rdphys + TI_RD_OFF(ti_tx_considx_r); 2001 2002 /* Set up tuneables */ 2003 #if 0 2004 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2005 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, 2006 (sc->ti_rx_coal_ticks / 10)); 2007 else 2008 #endif 2009 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, sc->ti_rx_coal_ticks); 2010 CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks); 2011 CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks); 2012 CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds); 2013 CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds); 2014 CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio); 2015 2016 /* Turn interrupts on. */ 2017 CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0); 2018 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 2019 2020 /* Start CPU. */ 2021 TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT|TI_CPUSTATE_STEP)); 2022 2023 return (0); 2024 } 2025 2026 static void 2027 ti_rdata_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2028 { 2029 struct ti_softc *sc; 2030 2031 sc = arg; 2032 if (error || nseg != 1) 2033 return; 2034 2035 /* 2036 * All of the Tigon data structures need to live at <4GB. This 2037 * cast is fine since busdma was told about this constraint. 2038 */ 2039 sc->ti_rdata_phys = (uint32_t)segs[0].ds_addr; 2040 return; 2041 } 2042 2043 /* 2044 * Probe for a Tigon chip. Check the PCI vendor and device IDs 2045 * against our list and return its name if we find a match. 2046 */ 2047 static int 2048 ti_probe(dev) 2049 device_t dev; 2050 { 2051 struct ti_type *t; 2052 2053 t = ti_devs; 2054 2055 while (t->ti_name != NULL) { 2056 if ((pci_get_vendor(dev) == t->ti_vid) && 2057 (pci_get_device(dev) == t->ti_did)) { 2058 device_set_desc(dev, t->ti_name); 2059 return (BUS_PROBE_DEFAULT); 2060 } 2061 t++; 2062 } 2063 2064 return (ENXIO); 2065 } 2066 2067 static int 2068 ti_attach(dev) 2069 device_t dev; 2070 { 2071 struct ifnet *ifp; 2072 struct ti_softc *sc; 2073 int error = 0, rid; 2074 u_char eaddr[6]; 2075 2076 sc = device_get_softc(dev); 2077 sc->ti_unit = device_get_unit(dev); 2078 2079 mtx_init(&sc->ti_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 2080 MTX_DEF | MTX_RECURSE); 2081 ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts); 2082 ifp = sc->ti_ifp = if_alloc(IFT_ETHER); 2083 if (ifp == NULL) { 2084 device_printf(dev, "can not if_alloc()\n"); 2085 error = ENOSPC; 2086 goto fail; 2087 } 2088 sc->ti_ifp->if_capabilities = IFCAP_HWCSUM | 2089 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2090 sc->ti_ifp->if_capenable = sc->ti_ifp->if_capabilities; 2091 2092 /* 2093 * Map control/status registers. 2094 */ 2095 pci_enable_busmaster(dev); 2096 2097 rid = TI_PCI_LOMEM; 2098 sc->ti_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2099 RF_ACTIVE|PCI_RF_DENSE); 2100 2101 if (sc->ti_res == NULL) { 2102 device_printf(dev, "couldn't map memory\n"); 2103 error = ENXIO; 2104 goto fail; 2105 } 2106 2107 sc->ti_btag = rman_get_bustag(sc->ti_res); 2108 sc->ti_bhandle = rman_get_bushandle(sc->ti_res); 2109 sc->ti_vhandle = (vm_offset_t)rman_get_virtual(sc->ti_res); 2110 2111 /* Allocate interrupt */ 2112 rid = 0; 2113 2114 sc->ti_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2115 RF_SHAREABLE | RF_ACTIVE); 2116 2117 if (sc->ti_irq == NULL) { 2118 device_printf(dev, "couldn't map interrupt\n"); 2119 error = ENXIO; 2120 goto fail; 2121 } 2122 2123 if (ti_chipinit(sc)) { 2124 device_printf(dev, "chip initialization failed\n"); 2125 error = ENXIO; 2126 goto fail; 2127 } 2128 2129 /* Zero out the NIC's on-board SRAM. */ 2130 ti_mem(sc, 0x2000, 0x100000 - 0x2000, NULL); 2131 2132 /* Init again -- zeroing memory may have clobbered some registers. */ 2133 if (ti_chipinit(sc)) { 2134 device_printf(dev, "chip initialization failed\n"); 2135 error = ENXIO; 2136 goto fail; 2137 } 2138 2139 /* 2140 * Get station address from the EEPROM. Note: the manual states 2141 * that the MAC address is at offset 0x8c, however the data is 2142 * stored as two longwords (since that's how it's loaded into 2143 * the NIC). This means the MAC address is actually preceded 2144 * by two zero bytes. We need to skip over those. 2145 */ 2146 if (ti_read_eeprom(sc, eaddr, 2147 TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2148 device_printf(dev, "failed to read station address\n"); 2149 error = ENXIO; 2150 goto fail; 2151 } 2152 2153 /* Allocate the general information block and ring buffers. */ 2154 if (bus_dma_tag_create(NULL, /* parent */ 2155 1, 0, /* algnmnt, boundary */ 2156 BUS_SPACE_MAXADDR, /* lowaddr */ 2157 BUS_SPACE_MAXADDR, /* highaddr */ 2158 NULL, NULL, /* filter, filterarg */ 2159 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 2160 0, /* nsegments */ 2161 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 2162 0, /* flags */ 2163 NULL, NULL, /* lockfunc, lockarg */ 2164 &sc->ti_parent_dmat) != 0) { 2165 device_printf(dev, "Failed to allocate parent dmat\n"); 2166 error = ENOMEM; 2167 goto fail; 2168 } 2169 2170 if (bus_dma_tag_create(sc->ti_parent_dmat, /* parent */ 2171 PAGE_SIZE, 0, /* algnmnt, boundary */ 2172 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 2173 BUS_SPACE_MAXADDR, /* highaddr */ 2174 NULL, NULL, /* filter, filterarg */ 2175 sizeof(struct ti_ring_data), /* maxsize */ 2176 1, /* nsegments */ 2177 sizeof(struct ti_ring_data), /* maxsegsize */ 2178 0, /* flags */ 2179 NULL, NULL, /* lockfunc, lockarg */ 2180 &sc->ti_rdata_dmat) != 0) { 2181 device_printf(dev, "Failed to allocate rdata dmat\n"); 2182 error = ENOMEM; 2183 goto fail; 2184 } 2185 2186 if (bus_dmamem_alloc(sc->ti_rdata_dmat, (void**)&sc->ti_rdata, 2187 BUS_DMA_NOWAIT, &sc->ti_rdata_dmamap) != 0) { 2188 device_printf(dev, "Failed to allocate rdata memory\n"); 2189 error = ENOMEM; 2190 goto fail; 2191 } 2192 2193 if (bus_dmamap_load(sc->ti_rdata_dmat, sc->ti_rdata_dmamap, 2194 sc->ti_rdata, sizeof(struct ti_ring_data), 2195 ti_rdata_cb, sc, BUS_DMA_NOWAIT) != 0) { 2196 device_printf(dev, "Failed to load rdata segments\n"); 2197 error = ENOMEM; 2198 goto fail; 2199 } 2200 2201 bzero(sc->ti_rdata, sizeof(struct ti_ring_data)); 2202 2203 /* Try to allocate memory for jumbo buffers. */ 2204 #ifdef TI_PRIVATE_JUMBOS 2205 if (ti_alloc_jumbo_mem(sc)) { 2206 device_printf(dev, "jumbo buffer allocation failed\n"); 2207 error = ENXIO; 2208 goto fail; 2209 } 2210 #endif 2211 2212 /* 2213 * We really need a better way to tell a 1000baseTX card 2214 * from a 1000baseSX one, since in theory there could be 2215 * OEMed 1000baseTX cards from lame vendors who aren't 2216 * clever enough to change the PCI ID. For the moment 2217 * though, the AceNIC is the only copper card available. 2218 */ 2219 if (pci_get_vendor(dev) == ALT_VENDORID && 2220 pci_get_device(dev) == ALT_DEVICEID_ACENIC_COPPER) 2221 sc->ti_copper = 1; 2222 /* Ok, it's not the only copper card available. */ 2223 if (pci_get_vendor(dev) == NG_VENDORID && 2224 pci_get_device(dev) == NG_DEVICEID_GA620T) 2225 sc->ti_copper = 1; 2226 2227 /* Set default tuneable values. */ 2228 sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC; 2229 #if 0 2230 sc->ti_rx_coal_ticks = TI_TICKS_PER_SEC / 5000; 2231 #endif 2232 sc->ti_rx_coal_ticks = 170; 2233 sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500; 2234 sc->ti_rx_max_coal_bds = 64; 2235 #if 0 2236 sc->ti_tx_max_coal_bds = 128; 2237 #endif 2238 sc->ti_tx_max_coal_bds = 32; 2239 sc->ti_tx_buf_ratio = 21; 2240 2241 /* Set up ifnet structure */ 2242 ifp->if_softc = sc; 2243 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2244 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2245 tis[sc->ti_unit] = sc; 2246 ifp->if_ioctl = ti_ioctl; 2247 ifp->if_start = ti_start; 2248 ifp->if_watchdog = ti_watchdog; 2249 ifp->if_init = ti_init; 2250 ifp->if_mtu = ETHERMTU; 2251 ifp->if_snd.ifq_maxlen = TI_TX_RING_CNT - 1; 2252 2253 /* Set up ifmedia support. */ 2254 if (sc->ti_copper) { 2255 /* 2256 * Copper cards allow manual 10/100 mode selection, 2257 * but not manual 1000baseTX mode selection. Why? 2258 * Becuase currently there's no way to specify the 2259 * master/slave setting through the firmware interface, 2260 * so Alteon decided to just bag it and handle it 2261 * via autonegotiation. 2262 */ 2263 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 2264 ifmedia_add(&sc->ifmedia, 2265 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 2266 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); 2267 ifmedia_add(&sc->ifmedia, 2268 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 2269 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_T, 0, NULL); 2270 ifmedia_add(&sc->ifmedia, 2271 IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); 2272 } else { 2273 /* Fiber cards don't support 10/100 modes. */ 2274 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2275 ifmedia_add(&sc->ifmedia, 2276 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 2277 } 2278 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2279 ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO); 2280 2281 /* 2282 * We're assuming here that card initialization is a sequential 2283 * thing. If it isn't, multiple cards probing at the same time 2284 * could stomp on the list of softcs here. 2285 */ 2286 2287 /* Register the device */ 2288 sc->dev = make_dev(&ti_cdevsw, sc->ti_unit, UID_ROOT, GID_OPERATOR, 2289 0600, "ti%d", sc->ti_unit); 2290 sc->dev->si_drv1 = sc; 2291 2292 /* 2293 * Call MI attach routine. 2294 */ 2295 ether_ifattach(ifp, eaddr); 2296 2297 /* Hook interrupt last to avoid having to lock softc */ 2298 error = bus_setup_intr(dev, sc->ti_irq, INTR_TYPE_NET|INTR_MPSAFE, 2299 ti_intr, sc, &sc->ti_intrhand); 2300 2301 if (error) { 2302 device_printf(dev, "couldn't set up irq\n"); 2303 ether_ifdetach(ifp); 2304 goto fail; 2305 } 2306 2307 fail: 2308 if (sc && error) 2309 ti_detach(dev); 2310 2311 return (error); 2312 } 2313 2314 /* 2315 * Shutdown hardware and free up resources. This can be called any 2316 * time after the mutex has been initialized. It is called in both 2317 * the error case in attach and the normal detach case so it needs 2318 * to be careful about only freeing resources that have actually been 2319 * allocated. 2320 */ 2321 static int 2322 ti_detach(dev) 2323 device_t dev; 2324 { 2325 struct ti_softc *sc; 2326 struct ifnet *ifp; 2327 2328 sc = device_get_softc(dev); 2329 if (sc->dev) 2330 destroy_dev(sc->dev); 2331 KASSERT(mtx_initialized(&sc->ti_mtx), ("ti mutex not initialized")); 2332 TI_LOCK(sc); 2333 ifp = sc->ti_ifp; 2334 2335 /* These should only be active if attach succeeded */ 2336 if (device_is_attached(dev)) { 2337 ti_stop(sc); 2338 ether_ifdetach(ifp); 2339 bus_generic_detach(dev); 2340 } 2341 ifmedia_removeall(&sc->ifmedia); 2342 2343 #ifdef TI_PRIVATE_JUMBOS 2344 if (sc->ti_cdata.ti_jumbo_buf) 2345 bus_dmamem_free(sc->ti_jumbo_dmat, sc->ti_cdata.ti_jumbo_buf, 2346 sc->ti_jumbo_dmamap); 2347 if (sc->ti_jumbo_dmat) 2348 bus_dma_tag_destroy(sc->ti_jumbo_dmat); 2349 #endif 2350 if (sc->ti_rdata) 2351 bus_dmamem_free(sc->ti_rdata_dmat, sc->ti_rdata, 2352 sc->ti_rdata_dmamap); 2353 if (sc->ti_rdata_dmat) 2354 bus_dma_tag_destroy(sc->ti_rdata_dmat); 2355 if (sc->ti_parent_dmat) 2356 bus_dma_tag_destroy(sc->ti_parent_dmat); 2357 if (sc->ti_intrhand) 2358 bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand); 2359 if (sc->ti_irq) 2360 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); 2361 if (sc->ti_res) { 2362 bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM, 2363 sc->ti_res); 2364 } 2365 if (ifp) 2366 if_free(ifp); 2367 2368 TI_UNLOCK(sc); 2369 mtx_destroy(&sc->ti_mtx); 2370 2371 return (0); 2372 } 2373 2374 #ifdef TI_JUMBO_HDRSPLIT 2375 /* 2376 * If hdr_len is 0, that means that header splitting wasn't done on 2377 * this packet for some reason. The two most likely reasons are that 2378 * the protocol isn't a supported protocol for splitting, or this 2379 * packet had a fragment offset that wasn't 0. 2380 * 2381 * The header length, if it is non-zero, will always be the length of 2382 * the headers on the packet, but that length could be longer than the 2383 * first mbuf. So we take the minimum of the two as the actual 2384 * length. 2385 */ 2386 static __inline void 2387 ti_hdr_split(struct mbuf *top, int hdr_len, int pkt_len, int idx) 2388 { 2389 int i = 0; 2390 int lengths[4] = {0, 0, 0, 0}; 2391 struct mbuf *m, *mp; 2392 2393 if (hdr_len != 0) 2394 top->m_len = min(hdr_len, top->m_len); 2395 pkt_len -= top->m_len; 2396 lengths[i++] = top->m_len; 2397 2398 mp = top; 2399 for (m = top->m_next; m && pkt_len; m = m->m_next) { 2400 m->m_len = m->m_ext.ext_size = min(m->m_len, pkt_len); 2401 pkt_len -= m->m_len; 2402 lengths[i++] = m->m_len; 2403 mp = m; 2404 } 2405 2406 #if 0 2407 if (hdr_len != 0) 2408 printf("got split packet: "); 2409 else 2410 printf("got non-split packet: "); 2411 2412 printf("%d,%d,%d,%d = %d\n", lengths[0], 2413 lengths[1], lengths[2], lengths[3], 2414 lengths[0] + lengths[1] + lengths[2] + 2415 lengths[3]); 2416 #endif 2417 2418 if (pkt_len) 2419 panic("header splitting didn't"); 2420 2421 if (m) { 2422 m_freem(m); 2423 mp->m_next = NULL; 2424 2425 } 2426 if (mp->m_next != NULL) 2427 panic("ti_hdr_split: last mbuf in chain should be null"); 2428 } 2429 #endif /* TI_JUMBO_HDRSPLIT */ 2430 2431 /* 2432 * Frame reception handling. This is called if there's a frame 2433 * on the receive return list. 2434 * 2435 * Note: we have to be able to handle three possibilities here: 2436 * 1) the frame is from the mini receive ring (can only happen) 2437 * on Tigon 2 boards) 2438 * 2) the frame is from the jumbo recieve ring 2439 * 3) the frame is from the standard receive ring 2440 */ 2441 2442 static void 2443 ti_rxeof(sc) 2444 struct ti_softc *sc; 2445 { 2446 struct ifnet *ifp; 2447 struct ti_cmd_desc cmd; 2448 2449 TI_LOCK_ASSERT(sc); 2450 2451 ifp = sc->ti_ifp; 2452 2453 while (sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) { 2454 struct ti_rx_desc *cur_rx; 2455 u_int32_t rxidx; 2456 struct mbuf *m = NULL; 2457 u_int16_t vlan_tag = 0; 2458 int have_tag = 0; 2459 2460 cur_rx = 2461 &sc->ti_rdata->ti_rx_return_ring[sc->ti_rx_saved_considx]; 2462 rxidx = cur_rx->ti_idx; 2463 TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT); 2464 2465 if (cur_rx->ti_flags & TI_BDFLAG_VLAN_TAG) { 2466 have_tag = 1; 2467 vlan_tag = cur_rx->ti_vlan_tag & 0xfff; 2468 } 2469 2470 if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) { 2471 2472 TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT); 2473 m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx]; 2474 sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL; 2475 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 2476 ifp->if_ierrors++; 2477 ti_newbuf_jumbo(sc, sc->ti_jumbo, m); 2478 continue; 2479 } 2480 if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL) == ENOBUFS) { 2481 ifp->if_ierrors++; 2482 ti_newbuf_jumbo(sc, sc->ti_jumbo, m); 2483 continue; 2484 } 2485 #ifdef TI_PRIVATE_JUMBOS 2486 m->m_len = cur_rx->ti_len; 2487 #else /* TI_PRIVATE_JUMBOS */ 2488 #ifdef TI_JUMBO_HDRSPLIT 2489 if (sc->ti_hdrsplit) 2490 ti_hdr_split(m, TI_HOSTADDR(cur_rx->ti_addr), 2491 cur_rx->ti_len, rxidx); 2492 else 2493 #endif /* TI_JUMBO_HDRSPLIT */ 2494 m_adj(m, cur_rx->ti_len - m->m_pkthdr.len); 2495 #endif /* TI_PRIVATE_JUMBOS */ 2496 } else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) { 2497 TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT); 2498 m = sc->ti_cdata.ti_rx_mini_chain[rxidx]; 2499 sc->ti_cdata.ti_rx_mini_chain[rxidx] = NULL; 2500 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 2501 ifp->if_ierrors++; 2502 ti_newbuf_mini(sc, sc->ti_mini, m); 2503 continue; 2504 } 2505 if (ti_newbuf_mini(sc, sc->ti_mini, NULL) == ENOBUFS) { 2506 ifp->if_ierrors++; 2507 ti_newbuf_mini(sc, sc->ti_mini, m); 2508 continue; 2509 } 2510 m->m_len = cur_rx->ti_len; 2511 } else { 2512 TI_INC(sc->ti_std, TI_STD_RX_RING_CNT); 2513 m = sc->ti_cdata.ti_rx_std_chain[rxidx]; 2514 sc->ti_cdata.ti_rx_std_chain[rxidx] = NULL; 2515 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 2516 ifp->if_ierrors++; 2517 ti_newbuf_std(sc, sc->ti_std, m); 2518 continue; 2519 } 2520 if (ti_newbuf_std(sc, sc->ti_std, NULL) == ENOBUFS) { 2521 ifp->if_ierrors++; 2522 ti_newbuf_std(sc, sc->ti_std, m); 2523 continue; 2524 } 2525 m->m_len = cur_rx->ti_len; 2526 } 2527 2528 m->m_pkthdr.len = cur_rx->ti_len; 2529 ifp->if_ipackets++; 2530 m->m_pkthdr.rcvif = ifp; 2531 2532 if (ifp->if_hwassist) { 2533 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 2534 CSUM_DATA_VALID; 2535 if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0) 2536 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2537 m->m_pkthdr.csum_data = cur_rx->ti_tcp_udp_cksum; 2538 } 2539 2540 /* 2541 * If we received a packet with a vlan tag, 2542 * tag it before passing the packet upward. 2543 */ 2544 if (have_tag) 2545 VLAN_INPUT_TAG(ifp, m, vlan_tag, continue); 2546 TI_UNLOCK(sc); 2547 (*ifp->if_input)(ifp, m); 2548 TI_LOCK(sc); 2549 } 2550 2551 /* Only necessary on the Tigon 1. */ 2552 if (sc->ti_hwrev == TI_HWREV_TIGON) 2553 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 2554 sc->ti_rx_saved_considx); 2555 2556 TI_UPDATE_STDPROD(sc, sc->ti_std); 2557 TI_UPDATE_MINIPROD(sc, sc->ti_mini); 2558 TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo); 2559 } 2560 2561 static void 2562 ti_txeof(sc) 2563 struct ti_softc *sc; 2564 { 2565 struct ti_tx_desc *cur_tx = NULL; 2566 struct ifnet *ifp; 2567 2568 ifp = sc->ti_ifp; 2569 2570 /* 2571 * Go through our tx ring and free mbufs for those 2572 * frames that have been sent. 2573 */ 2574 while (sc->ti_tx_saved_considx != sc->ti_tx_considx.ti_idx) { 2575 u_int32_t idx = 0; 2576 2577 idx = sc->ti_tx_saved_considx; 2578 if (sc->ti_hwrev == TI_HWREV_TIGON) { 2579 if (idx > 383) 2580 CSR_WRITE_4(sc, TI_WINBASE, 2581 TI_TX_RING_BASE + 6144); 2582 else if (idx > 255) 2583 CSR_WRITE_4(sc, TI_WINBASE, 2584 TI_TX_RING_BASE + 4096); 2585 else if (idx > 127) 2586 CSR_WRITE_4(sc, TI_WINBASE, 2587 TI_TX_RING_BASE + 2048); 2588 else 2589 CSR_WRITE_4(sc, TI_WINBASE, 2590 TI_TX_RING_BASE); 2591 cur_tx = &sc->ti_rdata->ti_tx_ring_nic[idx % 128]; 2592 } else 2593 cur_tx = &sc->ti_rdata->ti_tx_ring[idx]; 2594 if (cur_tx->ti_flags & TI_BDFLAG_END) 2595 ifp->if_opackets++; 2596 if (sc->ti_cdata.ti_tx_chain[idx] != NULL) { 2597 m_freem(sc->ti_cdata.ti_tx_chain[idx]); 2598 sc->ti_cdata.ti_tx_chain[idx] = NULL; 2599 } 2600 sc->ti_txcnt--; 2601 TI_INC(sc->ti_tx_saved_considx, TI_TX_RING_CNT); 2602 ifp->if_timer = 0; 2603 } 2604 2605 if (cur_tx != NULL) 2606 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2607 } 2608 2609 static void 2610 ti_intr(xsc) 2611 void *xsc; 2612 { 2613 struct ti_softc *sc; 2614 struct ifnet *ifp; 2615 2616 sc = xsc; 2617 TI_LOCK(sc); 2618 ifp = sc->ti_ifp; 2619 2620 /*#ifdef notdef*/ 2621 /* Avoid this for now -- checking this register is expensive. */ 2622 /* Make sure this is really our interrupt. */ 2623 if (!(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE)) { 2624 TI_UNLOCK(sc); 2625 return; 2626 } 2627 /*#endif*/ 2628 2629 /* Ack interrupt and stop others from occuring. */ 2630 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 2631 2632 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2633 /* Check RX return ring producer/consumer */ 2634 ti_rxeof(sc); 2635 2636 /* Check TX ring producer/consumer */ 2637 ti_txeof(sc); 2638 } 2639 2640 ti_handle_events(sc); 2641 2642 /* Re-enable interrupts. */ 2643 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 2644 2645 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2646 ifp->if_snd.ifq_head != NULL) 2647 ti_start(ifp); 2648 2649 TI_UNLOCK(sc); 2650 } 2651 2652 static void 2653 ti_stats_update(sc) 2654 struct ti_softc *sc; 2655 { 2656 struct ifnet *ifp; 2657 2658 ifp = sc->ti_ifp; 2659 2660 ifp->if_collisions += 2661 (sc->ti_rdata->ti_info.ti_stats.dot3StatsSingleCollisionFrames + 2662 sc->ti_rdata->ti_info.ti_stats.dot3StatsMultipleCollisionFrames + 2663 sc->ti_rdata->ti_info.ti_stats.dot3StatsExcessiveCollisions + 2664 sc->ti_rdata->ti_info.ti_stats.dot3StatsLateCollisions) - 2665 ifp->if_collisions; 2666 } 2667 2668 /* 2669 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2670 * pointers to descriptors. 2671 */ 2672 static int 2673 ti_encap(sc, m_head, txidx) 2674 struct ti_softc *sc; 2675 struct mbuf *m_head; 2676 u_int32_t *txidx; 2677 { 2678 struct ti_tx_desc *f = NULL; 2679 struct mbuf *m; 2680 u_int32_t frag, cur, cnt = 0; 2681 u_int16_t csum_flags = 0; 2682 struct m_tag *mtag; 2683 2684 m = m_head; 2685 cur = frag = *txidx; 2686 2687 if (m_head->m_pkthdr.csum_flags) { 2688 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 2689 csum_flags |= TI_BDFLAG_IP_CKSUM; 2690 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 2691 csum_flags |= TI_BDFLAG_TCP_UDP_CKSUM; 2692 if (m_head->m_flags & M_LASTFRAG) 2693 csum_flags |= TI_BDFLAG_IP_FRAG_END; 2694 else if (m_head->m_flags & M_FRAG) 2695 csum_flags |= TI_BDFLAG_IP_FRAG; 2696 } 2697 2698 mtag = VLAN_OUTPUT_TAG(sc->ti_ifp, m); 2699 2700 /* 2701 * Start packing the mbufs in this chain into 2702 * the fragment pointers. Stop when we run out 2703 * of fragments or hit the end of the mbuf chain. 2704 */ 2705 for (m = m_head; m != NULL; m = m->m_next) { 2706 if (m->m_len != 0) { 2707 if (sc->ti_hwrev == TI_HWREV_TIGON) { 2708 if (frag > 383) 2709 CSR_WRITE_4(sc, TI_WINBASE, 2710 TI_TX_RING_BASE + 6144); 2711 else if (frag > 255) 2712 CSR_WRITE_4(sc, TI_WINBASE, 2713 TI_TX_RING_BASE + 4096); 2714 else if (frag > 127) 2715 CSR_WRITE_4(sc, TI_WINBASE, 2716 TI_TX_RING_BASE + 2048); 2717 else 2718 CSR_WRITE_4(sc, TI_WINBASE, 2719 TI_TX_RING_BASE); 2720 f = &sc->ti_rdata->ti_tx_ring_nic[frag % 128]; 2721 } else 2722 f = &sc->ti_rdata->ti_tx_ring[frag]; 2723 if (sc->ti_cdata.ti_tx_chain[frag] != NULL) 2724 break; 2725 TI_HOSTADDR(f->ti_addr) = vtophys(mtod(m, vm_offset_t)); 2726 f->ti_len = m->m_len; 2727 f->ti_flags = csum_flags; 2728 2729 if (mtag != NULL) { 2730 f->ti_flags |= TI_BDFLAG_VLAN_TAG; 2731 f->ti_vlan_tag = VLAN_TAG_VALUE(mtag) & 0xfff; 2732 } else { 2733 f->ti_vlan_tag = 0; 2734 } 2735 2736 /* 2737 * Sanity check: avoid coming within 16 descriptors 2738 * of the end of the ring. 2739 */ 2740 if ((TI_TX_RING_CNT - (sc->ti_txcnt + cnt)) < 16) 2741 return (ENOBUFS); 2742 cur = frag; 2743 TI_INC(frag, TI_TX_RING_CNT); 2744 cnt++; 2745 } 2746 } 2747 2748 if (m != NULL) 2749 return (ENOBUFS); 2750 2751 if (frag == sc->ti_tx_saved_considx) 2752 return (ENOBUFS); 2753 2754 if (sc->ti_hwrev == TI_HWREV_TIGON) 2755 sc->ti_rdata->ti_tx_ring_nic[cur % 128].ti_flags |= 2756 TI_BDFLAG_END; 2757 else 2758 sc->ti_rdata->ti_tx_ring[cur].ti_flags |= TI_BDFLAG_END; 2759 sc->ti_cdata.ti_tx_chain[cur] = m_head; 2760 sc->ti_txcnt += cnt; 2761 2762 *txidx = frag; 2763 2764 return (0); 2765 } 2766 2767 /* 2768 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2769 * to the mbuf data regions directly in the transmit descriptors. 2770 */ 2771 static void 2772 ti_start(ifp) 2773 struct ifnet *ifp; 2774 { 2775 struct ti_softc *sc; 2776 struct mbuf *m_head = NULL; 2777 u_int32_t prodidx = 0; 2778 2779 sc = ifp->if_softc; 2780 TI_LOCK(sc); 2781 2782 prodidx = CSR_READ_4(sc, TI_MB_SENDPROD_IDX); 2783 2784 while (sc->ti_cdata.ti_tx_chain[prodidx] == NULL) { 2785 IF_DEQUEUE(&ifp->if_snd, m_head); 2786 if (m_head == NULL) 2787 break; 2788 2789 /* 2790 * XXX 2791 * safety overkill. If this is a fragmented packet chain 2792 * with delayed TCP/UDP checksums, then only encapsulate 2793 * it if we have enough descriptors to handle the entire 2794 * chain at once. 2795 * (paranoia -- may not actually be needed) 2796 */ 2797 if (m_head->m_flags & M_FIRSTFRAG && 2798 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 2799 if ((TI_TX_RING_CNT - sc->ti_txcnt) < 2800 m_head->m_pkthdr.csum_data + 16) { 2801 IF_PREPEND(&ifp->if_snd, m_head); 2802 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2803 break; 2804 } 2805 } 2806 2807 /* 2808 * Pack the data into the transmit ring. If we 2809 * don't have room, set the OACTIVE flag and wait 2810 * for the NIC to drain the ring. 2811 */ 2812 if (ti_encap(sc, m_head, &prodidx)) { 2813 IF_PREPEND(&ifp->if_snd, m_head); 2814 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2815 break; 2816 } 2817 2818 /* 2819 * If there's a BPF listener, bounce a copy of this frame 2820 * to him. 2821 */ 2822 BPF_MTAP(ifp, m_head); 2823 } 2824 2825 /* Transmit */ 2826 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, prodidx); 2827 2828 /* 2829 * Set a timeout in case the chip goes out to lunch. 2830 */ 2831 ifp->if_timer = 5; 2832 TI_UNLOCK(sc); 2833 } 2834 2835 static void 2836 ti_init(xsc) 2837 void *xsc; 2838 { 2839 struct ti_softc *sc = xsc; 2840 2841 /* Cancel pending I/O and flush buffers. */ 2842 ti_stop(sc); 2843 2844 TI_LOCK(sc); 2845 /* Init the gen info block, ring control blocks and firmware. */ 2846 if (ti_gibinit(sc)) { 2847 if_printf(sc->ti_ifp, "initialization failure\n"); 2848 TI_UNLOCK(sc); 2849 return; 2850 } 2851 2852 TI_UNLOCK(sc); 2853 } 2854 2855 static void ti_init2(sc) 2856 struct ti_softc *sc; 2857 { 2858 struct ti_cmd_desc cmd; 2859 struct ifnet *ifp; 2860 u_int16_t *m; 2861 struct ifmedia *ifm; 2862 int tmp; 2863 2864 ifp = sc->ti_ifp; 2865 2866 /* Specify MTU and interface index. */ 2867 CSR_WRITE_4(sc, TI_GCR_IFINDEX, sc->ti_unit); 2868 CSR_WRITE_4(sc, TI_GCR_IFMTU, ifp->if_mtu + 2869 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 2870 TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0); 2871 2872 /* Load our MAC address. */ 2873 m = (u_int16_t *)IF_LLADDR(sc->ti_ifp); 2874 CSR_WRITE_4(sc, TI_GCR_PAR0, htons(m[0])); 2875 CSR_WRITE_4(sc, TI_GCR_PAR1, (htons(m[1]) << 16) | htons(m[2])); 2876 TI_DO_CMD(TI_CMD_SET_MAC_ADDR, 0, 0); 2877 2878 /* Enable or disable promiscuous mode as needed. */ 2879 if (ifp->if_flags & IFF_PROMISC) { 2880 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0); 2881 } else { 2882 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0); 2883 } 2884 2885 /* Program multicast filter. */ 2886 ti_setmulti(sc); 2887 2888 /* 2889 * If this is a Tigon 1, we should tell the 2890 * firmware to use software packet filtering. 2891 */ 2892 if (sc->ti_hwrev == TI_HWREV_TIGON) { 2893 TI_DO_CMD(TI_CMD_FDR_FILTERING, TI_CMD_CODE_FILT_ENB, 0); 2894 } 2895 2896 /* Init RX ring. */ 2897 ti_init_rx_ring_std(sc); 2898 2899 /* Init jumbo RX ring. */ 2900 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2901 ti_init_rx_ring_jumbo(sc); 2902 2903 /* 2904 * If this is a Tigon 2, we can also configure the 2905 * mini ring. 2906 */ 2907 if (sc->ti_hwrev == TI_HWREV_TIGON_II) 2908 ti_init_rx_ring_mini(sc); 2909 2910 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0); 2911 sc->ti_rx_saved_considx = 0; 2912 2913 /* Init TX ring. */ 2914 ti_init_tx_ring(sc); 2915 2916 /* Tell firmware we're alive. */ 2917 TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_UP, 0); 2918 2919 /* Enable host interrupts. */ 2920 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 2921 2922 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2923 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2924 2925 /* 2926 * Make sure to set media properly. We have to do this 2927 * here since we have to issue commands in order to set 2928 * the link negotiation and we can't issue commands until 2929 * the firmware is running. 2930 */ 2931 ifm = &sc->ifmedia; 2932 tmp = ifm->ifm_media; 2933 ifm->ifm_media = ifm->ifm_cur->ifm_media; 2934 ti_ifmedia_upd(ifp); 2935 ifm->ifm_media = tmp; 2936 } 2937 2938 /* 2939 * Set media options. 2940 */ 2941 static int 2942 ti_ifmedia_upd(ifp) 2943 struct ifnet *ifp; 2944 { 2945 struct ti_softc *sc; 2946 struct ifmedia *ifm; 2947 struct ti_cmd_desc cmd; 2948 u_int32_t flowctl; 2949 2950 sc = ifp->if_softc; 2951 ifm = &sc->ifmedia; 2952 2953 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2954 return (EINVAL); 2955 2956 flowctl = 0; 2957 2958 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2959 case IFM_AUTO: 2960 /* 2961 * Transmit flow control doesn't work on the Tigon 1. 2962 */ 2963 flowctl = TI_GLNK_RX_FLOWCTL_Y; 2964 2965 /* 2966 * Transmit flow control can also cause problems on the 2967 * Tigon 2, apparantly with both the copper and fiber 2968 * boards. The symptom is that the interface will just 2969 * hang. This was reproduced with Alteon 180 switches. 2970 */ 2971 #if 0 2972 if (sc->ti_hwrev != TI_HWREV_TIGON) 2973 flowctl |= TI_GLNK_TX_FLOWCTL_Y; 2974 #endif 2975 2976 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| 2977 TI_GLNK_FULL_DUPLEX| flowctl | 2978 TI_GLNK_AUTONEGENB|TI_GLNK_ENB); 2979 2980 flowctl = TI_LNK_RX_FLOWCTL_Y; 2981 #if 0 2982 if (sc->ti_hwrev != TI_HWREV_TIGON) 2983 flowctl |= TI_LNK_TX_FLOWCTL_Y; 2984 #endif 2985 2986 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_100MB|TI_LNK_10MB| 2987 TI_LNK_FULL_DUPLEX|TI_LNK_HALF_DUPLEX| flowctl | 2988 TI_LNK_AUTONEGENB|TI_LNK_ENB); 2989 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2990 TI_CMD_CODE_NEGOTIATE_BOTH, 0); 2991 break; 2992 case IFM_1000_SX: 2993 case IFM_1000_T: 2994 flowctl = TI_GLNK_RX_FLOWCTL_Y; 2995 #if 0 2996 if (sc->ti_hwrev != TI_HWREV_TIGON) 2997 flowctl |= TI_GLNK_TX_FLOWCTL_Y; 2998 #endif 2999 3000 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| 3001 flowctl |TI_GLNK_ENB); 3002 CSR_WRITE_4(sc, TI_GCR_LINK, 0); 3003 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3004 TI_SETBIT(sc, TI_GCR_GLINK, TI_GLNK_FULL_DUPLEX); 3005 } 3006 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 3007 TI_CMD_CODE_NEGOTIATE_GIGABIT, 0); 3008 break; 3009 case IFM_100_FX: 3010 case IFM_10_FL: 3011 case IFM_100_TX: 3012 case IFM_10_T: 3013 flowctl = TI_LNK_RX_FLOWCTL_Y; 3014 #if 0 3015 if (sc->ti_hwrev != TI_HWREV_TIGON) 3016 flowctl |= TI_LNK_TX_FLOWCTL_Y; 3017 #endif 3018 3019 CSR_WRITE_4(sc, TI_GCR_GLINK, 0); 3020 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_ENB|TI_LNK_PREF|flowctl); 3021 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX || 3022 IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 3023 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_100MB); 3024 } else { 3025 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_10MB); 3026 } 3027 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3028 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_FULL_DUPLEX); 3029 } else { 3030 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_HALF_DUPLEX); 3031 } 3032 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 3033 TI_CMD_CODE_NEGOTIATE_10_100, 0); 3034 break; 3035 } 3036 3037 return (0); 3038 } 3039 3040 /* 3041 * Report current media status. 3042 */ 3043 static void 3044 ti_ifmedia_sts(ifp, ifmr) 3045 struct ifnet *ifp; 3046 struct ifmediareq *ifmr; 3047 { 3048 struct ti_softc *sc; 3049 u_int32_t media = 0; 3050 3051 sc = ifp->if_softc; 3052 3053 ifmr->ifm_status = IFM_AVALID; 3054 ifmr->ifm_active = IFM_ETHER; 3055 3056 if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) 3057 return; 3058 3059 ifmr->ifm_status |= IFM_ACTIVE; 3060 3061 if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) { 3062 media = CSR_READ_4(sc, TI_GCR_GLINK_STAT); 3063 if (sc->ti_copper) 3064 ifmr->ifm_active |= IFM_1000_T; 3065 else 3066 ifmr->ifm_active |= IFM_1000_SX; 3067 if (media & TI_GLNK_FULL_DUPLEX) 3068 ifmr->ifm_active |= IFM_FDX; 3069 else 3070 ifmr->ifm_active |= IFM_HDX; 3071 } else if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) { 3072 media = CSR_READ_4(sc, TI_GCR_LINK_STAT); 3073 if (sc->ti_copper) { 3074 if (media & TI_LNK_100MB) 3075 ifmr->ifm_active |= IFM_100_TX; 3076 if (media & TI_LNK_10MB) 3077 ifmr->ifm_active |= IFM_10_T; 3078 } else { 3079 if (media & TI_LNK_100MB) 3080 ifmr->ifm_active |= IFM_100_FX; 3081 if (media & TI_LNK_10MB) 3082 ifmr->ifm_active |= IFM_10_FL; 3083 } 3084 if (media & TI_LNK_FULL_DUPLEX) 3085 ifmr->ifm_active |= IFM_FDX; 3086 if (media & TI_LNK_HALF_DUPLEX) 3087 ifmr->ifm_active |= IFM_HDX; 3088 } 3089 } 3090 3091 static int 3092 ti_ioctl(ifp, command, data) 3093 struct ifnet *ifp; 3094 u_long command; 3095 caddr_t data; 3096 { 3097 struct ti_softc *sc = ifp->if_softc; 3098 struct ifreq *ifr = (struct ifreq *) data; 3099 int mask, error = 0; 3100 struct ti_cmd_desc cmd; 3101 3102 TI_LOCK(sc); 3103 3104 switch (command) { 3105 case SIOCSIFMTU: 3106 if (ifr->ifr_mtu > TI_JUMBO_MTU) 3107 error = EINVAL; 3108 else { 3109 ifp->if_mtu = ifr->ifr_mtu; 3110 ti_init(sc); 3111 } 3112 break; 3113 case SIOCSIFFLAGS: 3114 if (ifp->if_flags & IFF_UP) { 3115 /* 3116 * If only the state of the PROMISC flag changed, 3117 * then just use the 'set promisc mode' command 3118 * instead of reinitializing the entire NIC. Doing 3119 * a full re-init means reloading the firmware and 3120 * waiting for it to start up, which may take a 3121 * second or two. 3122 */ 3123 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 3124 ifp->if_flags & IFF_PROMISC && 3125 !(sc->ti_if_flags & IFF_PROMISC)) { 3126 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, 3127 TI_CMD_CODE_PROMISC_ENB, 0); 3128 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 3129 !(ifp->if_flags & IFF_PROMISC) && 3130 sc->ti_if_flags & IFF_PROMISC) { 3131 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, 3132 TI_CMD_CODE_PROMISC_DIS, 0); 3133 } else 3134 ti_init(sc); 3135 } else { 3136 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3137 ti_stop(sc); 3138 } 3139 } 3140 sc->ti_if_flags = ifp->if_flags; 3141 error = 0; 3142 break; 3143 case SIOCADDMULTI: 3144 case SIOCDELMULTI: 3145 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3146 ti_setmulti(sc); 3147 error = 0; 3148 } 3149 break; 3150 case SIOCSIFMEDIA: 3151 case SIOCGIFMEDIA: 3152 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 3153 break; 3154 case SIOCSIFCAP: 3155 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3156 if (mask & IFCAP_HWCSUM) { 3157 if (IFCAP_HWCSUM & ifp->if_capenable) 3158 ifp->if_capenable &= ~IFCAP_HWCSUM; 3159 else 3160 ifp->if_capenable |= IFCAP_HWCSUM; 3161 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3162 ti_init(sc); 3163 } 3164 error = 0; 3165 break; 3166 default: 3167 error = ether_ioctl(ifp, command, data); 3168 break; 3169 } 3170 3171 TI_UNLOCK(sc); 3172 3173 return (error); 3174 } 3175 3176 static int 3177 ti_open(struct cdev *dev, int flags, int fmt, struct thread *td) 3178 { 3179 struct ti_softc *sc; 3180 3181 sc = dev->si_drv1; 3182 if (sc == NULL) 3183 return (ENODEV); 3184 3185 TI_LOCK(sc); 3186 sc->ti_flags |= TI_FLAG_DEBUGING; 3187 TI_UNLOCK(sc); 3188 3189 return (0); 3190 } 3191 3192 static int 3193 ti_close(struct cdev *dev, int flag, int fmt, struct thread *td) 3194 { 3195 struct ti_softc *sc; 3196 3197 sc = dev->si_drv1; 3198 if (sc == NULL) 3199 return (ENODEV); 3200 3201 TI_LOCK(sc); 3202 sc->ti_flags &= ~TI_FLAG_DEBUGING; 3203 TI_UNLOCK(sc); 3204 3205 return (0); 3206 } 3207 3208 /* 3209 * This ioctl routine goes along with the Tigon character device. 3210 */ 3211 static int 3212 ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 3213 struct thread *td) 3214 { 3215 int error; 3216 struct ti_softc *sc; 3217 3218 sc = dev->si_drv1; 3219 if (sc == NULL) 3220 return (ENODEV); 3221 3222 error = 0; 3223 3224 switch (cmd) { 3225 case TIIOCGETSTATS: 3226 { 3227 struct ti_stats *outstats; 3228 3229 outstats = (struct ti_stats *)addr; 3230 3231 TI_LOCK(sc); 3232 bcopy(&sc->ti_rdata->ti_info.ti_stats, outstats, 3233 sizeof(struct ti_stats)); 3234 TI_UNLOCK(sc); 3235 break; 3236 } 3237 case TIIOCGETPARAMS: 3238 { 3239 struct ti_params *params; 3240 3241 params = (struct ti_params *)addr; 3242 3243 TI_LOCK(sc); 3244 params->ti_stat_ticks = sc->ti_stat_ticks; 3245 params->ti_rx_coal_ticks = sc->ti_rx_coal_ticks; 3246 params->ti_tx_coal_ticks = sc->ti_tx_coal_ticks; 3247 params->ti_rx_max_coal_bds = sc->ti_rx_max_coal_bds; 3248 params->ti_tx_max_coal_bds = sc->ti_tx_max_coal_bds; 3249 params->ti_tx_buf_ratio = sc->ti_tx_buf_ratio; 3250 params->param_mask = TI_PARAM_ALL; 3251 TI_UNLOCK(sc); 3252 3253 error = 0; 3254 3255 break; 3256 } 3257 case TIIOCSETPARAMS: 3258 { 3259 struct ti_params *params; 3260 3261 params = (struct ti_params *)addr; 3262 3263 TI_LOCK(sc); 3264 if (params->param_mask & TI_PARAM_STAT_TICKS) { 3265 sc->ti_stat_ticks = params->ti_stat_ticks; 3266 CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks); 3267 } 3268 3269 if (params->param_mask & TI_PARAM_RX_COAL_TICKS) { 3270 sc->ti_rx_coal_ticks = params->ti_rx_coal_ticks; 3271 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, 3272 sc->ti_rx_coal_ticks); 3273 } 3274 3275 if (params->param_mask & TI_PARAM_TX_COAL_TICKS) { 3276 sc->ti_tx_coal_ticks = params->ti_tx_coal_ticks; 3277 CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, 3278 sc->ti_tx_coal_ticks); 3279 } 3280 3281 if (params->param_mask & TI_PARAM_RX_COAL_BDS) { 3282 sc->ti_rx_max_coal_bds = params->ti_rx_max_coal_bds; 3283 CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, 3284 sc->ti_rx_max_coal_bds); 3285 } 3286 3287 if (params->param_mask & TI_PARAM_TX_COAL_BDS) { 3288 sc->ti_tx_max_coal_bds = params->ti_tx_max_coal_bds; 3289 CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, 3290 sc->ti_tx_max_coal_bds); 3291 } 3292 3293 if (params->param_mask & TI_PARAM_TX_BUF_RATIO) { 3294 sc->ti_tx_buf_ratio = params->ti_tx_buf_ratio; 3295 CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, 3296 sc->ti_tx_buf_ratio); 3297 } 3298 TI_UNLOCK(sc); 3299 3300 error = 0; 3301 3302 break; 3303 } 3304 case TIIOCSETTRACE: { 3305 ti_trace_type trace_type; 3306 3307 trace_type = *(ti_trace_type *)addr; 3308 3309 /* 3310 * Set tracing to whatever the user asked for. Setting 3311 * this register to 0 should have the effect of disabling 3312 * tracing. 3313 */ 3314 CSR_WRITE_4(sc, TI_GCR_NIC_TRACING, trace_type); 3315 3316 error = 0; 3317 3318 break; 3319 } 3320 case TIIOCGETTRACE: { 3321 struct ti_trace_buf *trace_buf; 3322 u_int32_t trace_start, cur_trace_ptr, trace_len; 3323 3324 trace_buf = (struct ti_trace_buf *)addr; 3325 3326 TI_LOCK(sc); 3327 trace_start = CSR_READ_4(sc, TI_GCR_NICTRACE_START); 3328 cur_trace_ptr = CSR_READ_4(sc, TI_GCR_NICTRACE_PTR); 3329 trace_len = CSR_READ_4(sc, TI_GCR_NICTRACE_LEN); 3330 3331 #if 0 3332 if_printf(sc->ti_ifp, "trace_start = %#x, cur_trace_ptr = %#x, " 3333 "trace_len = %d\n", trace_start, 3334 cur_trace_ptr, trace_len); 3335 if_printf(sc->ti_ifp, "trace_buf->buf_len = %d\n", 3336 trace_buf->buf_len); 3337 #endif 3338 3339 error = ti_copy_mem(sc, trace_start, min(trace_len, 3340 trace_buf->buf_len), 3341 (caddr_t)trace_buf->buf, 1, 1); 3342 3343 if (error == 0) { 3344 trace_buf->fill_len = min(trace_len, 3345 trace_buf->buf_len); 3346 if (cur_trace_ptr < trace_start) 3347 trace_buf->cur_trace_ptr = 3348 trace_start - cur_trace_ptr; 3349 else 3350 trace_buf->cur_trace_ptr = 3351 cur_trace_ptr - trace_start; 3352 } else 3353 trace_buf->fill_len = 0; 3354 TI_UNLOCK(sc); 3355 3356 break; 3357 } 3358 3359 /* 3360 * For debugging, five ioctls are needed: 3361 * ALT_ATTACH 3362 * ALT_READ_TG_REG 3363 * ALT_WRITE_TG_REG 3364 * ALT_READ_TG_MEM 3365 * ALT_WRITE_TG_MEM 3366 */ 3367 case ALT_ATTACH: 3368 /* 3369 * From what I can tell, Alteon's Solaris Tigon driver 3370 * only has one character device, so you have to attach 3371 * to the Tigon board you're interested in. This seems 3372 * like a not-so-good way to do things, since unless you 3373 * subsequently specify the unit number of the device 3374 * you're interested in in every ioctl, you'll only be 3375 * able to debug one board at a time. 3376 */ 3377 error = 0; 3378 break; 3379 case ALT_READ_TG_MEM: 3380 case ALT_WRITE_TG_MEM: 3381 { 3382 struct tg_mem *mem_param; 3383 u_int32_t sram_end, scratch_end; 3384 3385 mem_param = (struct tg_mem *)addr; 3386 3387 if (sc->ti_hwrev == TI_HWREV_TIGON) { 3388 sram_end = TI_END_SRAM_I; 3389 scratch_end = TI_END_SCRATCH_I; 3390 } else { 3391 sram_end = TI_END_SRAM_II; 3392 scratch_end = TI_END_SCRATCH_II; 3393 } 3394 3395 /* 3396 * For now, we'll only handle accessing regular SRAM, 3397 * nothing else. 3398 */ 3399 TI_LOCK(sc); 3400 if ((mem_param->tgAddr >= TI_BEG_SRAM) 3401 && ((mem_param->tgAddr + mem_param->len) <= sram_end)) { 3402 /* 3403 * In this instance, we always copy to/from user 3404 * space, so the user space argument is set to 1. 3405 */ 3406 error = ti_copy_mem(sc, mem_param->tgAddr, 3407 mem_param->len, 3408 mem_param->userAddr, 1, 3409 (cmd == ALT_READ_TG_MEM) ? 1 : 0); 3410 } else if ((mem_param->tgAddr >= TI_BEG_SCRATCH) 3411 && (mem_param->tgAddr <= scratch_end)) { 3412 error = ti_copy_scratch(sc, mem_param->tgAddr, 3413 mem_param->len, 3414 mem_param->userAddr, 1, 3415 (cmd == ALT_READ_TG_MEM) ? 3416 1 : 0, TI_PROCESSOR_A); 3417 } else if ((mem_param->tgAddr >= TI_BEG_SCRATCH_B_DEBUG) 3418 && (mem_param->tgAddr <= TI_BEG_SCRATCH_B_DEBUG)) { 3419 if (sc->ti_hwrev == TI_HWREV_TIGON) { 3420 if_printf(sc->ti_ifp, 3421 "invalid memory range for Tigon I\n"); 3422 error = EINVAL; 3423 break; 3424 } 3425 error = ti_copy_scratch(sc, mem_param->tgAddr - 3426 TI_SCRATCH_DEBUG_OFF, 3427 mem_param->len, 3428 mem_param->userAddr, 1, 3429 (cmd == ALT_READ_TG_MEM) ? 3430 1 : 0, TI_PROCESSOR_B); 3431 } else { 3432 if_printf(sc->ti_ifp, "memory address %#x len %d is " 3433 "out of supported range\n", 3434 mem_param->tgAddr, mem_param->len); 3435 error = EINVAL; 3436 } 3437 TI_UNLOCK(sc); 3438 3439 break; 3440 } 3441 case ALT_READ_TG_REG: 3442 case ALT_WRITE_TG_REG: 3443 { 3444 struct tg_reg *regs; 3445 u_int32_t tmpval; 3446 3447 regs = (struct tg_reg *)addr; 3448 3449 /* 3450 * Make sure the address in question isn't out of range. 3451 */ 3452 if (regs->addr > TI_REG_MAX) { 3453 error = EINVAL; 3454 break; 3455 } 3456 TI_LOCK(sc); 3457 if (cmd == ALT_READ_TG_REG) { 3458 bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle, 3459 regs->addr, &tmpval, 1); 3460 regs->data = ntohl(tmpval); 3461 #if 0 3462 if ((regs->addr == TI_CPU_STATE) 3463 || (regs->addr == TI_CPU_CTL_B)) { 3464 if_printf(sc->ti_ifp, "register %#x = %#x\n", 3465 regs->addr, tmpval); 3466 } 3467 #endif 3468 } else { 3469 tmpval = htonl(regs->data); 3470 bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle, 3471 regs->addr, &tmpval, 1); 3472 } 3473 TI_UNLOCK(sc); 3474 3475 break; 3476 } 3477 default: 3478 error = ENOTTY; 3479 break; 3480 } 3481 return (error); 3482 } 3483 3484 static void 3485 ti_watchdog(ifp) 3486 struct ifnet *ifp; 3487 { 3488 struct ti_softc *sc; 3489 3490 sc = ifp->if_softc; 3491 TI_LOCK(sc); 3492 3493 /* 3494 * When we're debugging, the chip is often stopped for long periods 3495 * of time, and that would normally cause the watchdog timer to fire. 3496 * Since that impedes debugging, we don't want to do that. 3497 */ 3498 if (sc->ti_flags & TI_FLAG_DEBUGING) { 3499 TI_UNLOCK(sc); 3500 return; 3501 } 3502 3503 if_printf(ifp, "watchdog timeout -- resetting\n"); 3504 ti_stop(sc); 3505 ti_init(sc); 3506 3507 ifp->if_oerrors++; 3508 TI_UNLOCK(sc); 3509 } 3510 3511 /* 3512 * Stop the adapter and free any mbufs allocated to the 3513 * RX and TX lists. 3514 */ 3515 static void 3516 ti_stop(sc) 3517 struct ti_softc *sc; 3518 { 3519 struct ifnet *ifp; 3520 struct ti_cmd_desc cmd; 3521 3522 TI_LOCK(sc); 3523 3524 ifp = sc->ti_ifp; 3525 3526 /* Disable host interrupts. */ 3527 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 3528 /* 3529 * Tell firmware we're shutting down. 3530 */ 3531 TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0); 3532 3533 /* Halt and reinitialize. */ 3534 ti_chipinit(sc); 3535 ti_mem(sc, 0x2000, 0x100000 - 0x2000, NULL); 3536 ti_chipinit(sc); 3537 3538 /* Free the RX lists. */ 3539 ti_free_rx_ring_std(sc); 3540 3541 /* Free jumbo RX list. */ 3542 ti_free_rx_ring_jumbo(sc); 3543 3544 /* Free mini RX list. */ 3545 ti_free_rx_ring_mini(sc); 3546 3547 /* Free TX buffers. */ 3548 ti_free_tx_ring(sc); 3549 3550 sc->ti_ev_prodidx.ti_idx = 0; 3551 sc->ti_return_prodidx.ti_idx = 0; 3552 sc->ti_tx_considx.ti_idx = 0; 3553 sc->ti_tx_saved_considx = TI_TXCONS_UNSET; 3554 3555 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 3556 TI_UNLOCK(sc); 3557 } 3558 3559 /* 3560 * Stop all chip I/O so that the kernel's probe routines don't 3561 * get confused by errant DMAs when rebooting. 3562 */ 3563 static void 3564 ti_shutdown(dev) 3565 device_t dev; 3566 { 3567 struct ti_softc *sc; 3568 3569 sc = device_get_softc(dev); 3570 TI_LOCK(sc); 3571 ti_chipinit(sc); 3572 TI_UNLOCK(sc); 3573 } 3574