1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 28 /* 29 * dnet -- DEC 21x4x 30 * 31 * Currently supports: 32 * 21040, 21041, 21140, 21142, 21143 33 * SROM versions 1, 3, 3.03, 4 34 * TP, AUI, BNC, 100BASETX, 100BASET4 35 * 36 * XXX NEEDSWORK 37 * All media SHOULD work, FX is untested 38 * 39 * Depends on the Generic LAN Driver utility functions in /kernel/misc/mac 40 */ 41 42 #define BUG_4010796 /* See 4007871, 4010796 */ 43 44 #include <sys/types.h> 45 #include <sys/errno.h> 46 #include <sys/param.h> 47 #include <sys/stropts.h> 48 #include <sys/stream.h> 49 #include <sys/kmem.h> 50 #include <sys/conf.h> 51 #include <sys/devops.h> 52 #include <sys/ksynch.h> 53 #include <sys/stat.h> 54 #include <sys/modctl.h> 55 #include <sys/debug.h> 56 #include <sys/dlpi.h> 57 #include <sys/ethernet.h> 58 #include <sys/vlan.h> 59 #include <sys/mac.h> 60 #include <sys/mac_ether.h> 61 #include <sys/mac_provider.h> 62 #include <sys/pci.h> 63 #include <sys/ddi.h> 64 #include <sys/sunddi.h> 65 #include <sys/strsun.h> 66 67 #include "dnet_mii.h" 68 #include "dnet.h" 69 70 /* 71 * Declarations and Module Linkage 72 */ 73 74 #define IDENT "DNET 21x4x" 75 76 /* 77 * #define DNET_NOISY 78 * #define SROMDEBUG 79 * #define SROMDUMPSTRUCTURES 80 */ 81 82 #ifdef DNETDEBUG 83 #ifdef DNET_NOISY 84 int dnetdebug = -1; 85 #else 86 int dnetdebug = 0; 87 #endif 88 #endif 89 90 /* used for message allocated using desballoc() */ 91 struct free_ptr { 92 struct free_rtn free_rtn; 93 caddr_t buf; 94 }; 95 96 struct rbuf_list { 97 struct rbuf_list *rbuf_next; /* next in the list */ 98 caddr_t rbuf_vaddr; /* virual addr of the buf */ 99 uint32_t rbuf_paddr; /* physical addr of the buf */ 100 uint32_t rbuf_endpaddr; /* physical addr at the end */ 101 ddi_dma_handle_t rbuf_dmahdl; /* dma handle */ 102 ddi_acc_handle_t rbuf_acchdl; /* handle for DDI functions */ 103 }; 104 105 /* Required system entry points */ 106 static int dnet_probe(dev_info_t *); 107 static int dnet_attach(dev_info_t *, ddi_attach_cmd_t); 108 static int dnet_detach(dev_info_t *, ddi_detach_cmd_t); 109 static int dnet_quiesce(dev_info_t *); 110 111 /* Required driver entry points for GLDv3 */ 112 static int dnet_m_start(void *); 113 static void dnet_m_stop(void *); 114 static int dnet_m_getstat(void *, uint_t, uint64_t *); 115 static int dnet_m_setpromisc(void *, boolean_t); 116 static int dnet_m_multicst(void *, boolean_t, const uint8_t *); 117 static int dnet_m_unicst(void *, const uint8_t *); 118 static mblk_t *dnet_m_tx(void *, mblk_t *); 119 120 static uint_t dnet_intr(caddr_t); 121 122 /* Internal functions used by the above entry points */ 123 static void write_gpr(struct dnetinstance *dnetp, uint32_t val); 124 static void dnet_reset_board(struct dnetinstance *); 125 static void dnet_init_board(struct dnetinstance *); 126 static void dnet_chip_init(struct dnetinstance *); 127 static uint32_t hashindex(const uint8_t *); 128 static int dnet_start(struct dnetinstance *); 129 static int dnet_set_addr(struct dnetinstance *); 130 131 static boolean_t dnet_send(struct dnetinstance *, mblk_t *); 132 133 static void dnet_getp(struct dnetinstance *); 134 static void update_rx_stats(struct dnetinstance *, int); 135 static void update_tx_stats(struct dnetinstance *, int); 136 137 /* Media Selection Setup Routines */ 138 static void set_gpr(struct dnetinstance *); 139 static void set_opr(struct dnetinstance *); 140 static void set_sia(struct dnetinstance *); 141 142 /* Buffer Management Routines */ 143 static int dnet_alloc_bufs(struct dnetinstance *); 144 static void dnet_free_bufs(struct dnetinstance *); 145 static void dnet_init_txrx_bufs(struct dnetinstance *); 146 static int alloc_descriptor(struct dnetinstance *); 147 static void dnet_reclaim_Tx_desc(struct dnetinstance *); 148 static int dnet_rbuf_init(dev_info_t *, int); 149 static int dnet_rbuf_destroy(); 150 static struct rbuf_list *dnet_rbuf_alloc(dev_info_t *, int); 151 static void dnet_rbuf_free(caddr_t); 152 static void dnet_freemsg_buf(struct free_ptr *); 153 154 static void setup_block(struct dnetinstance *); 155 156 /* SROM read functions */ 157 static int dnet_read_srom(dev_info_t *, int, ddi_acc_handle_t, caddr_t, 158 uchar_t *, int); 159 static void dnet_read21040addr(dev_info_t *, ddi_acc_handle_t, caddr_t, 160 uchar_t *, int *); 161 static void dnet_read21140srom(ddi_acc_handle_t, caddr_t, uchar_t *, int); 162 static int get_alternative_srom_image(dev_info_t *, uchar_t *, int); 163 static void dnet_print_srom(SROM_FORMAT *sr); 164 static void dnet_dump_leaf(LEAF_FORMAT *leaf); 165 static void dnet_dump_block(media_block_t *block); 166 #ifdef BUG_4010796 167 static void set_alternative_srom_image(dev_info_t *, uchar_t *, int); 168 static int dnet_hack(dev_info_t *); 169 #endif 170 171 static int dnet_hack_interrupts(struct dnetinstance *, int); 172 static int dnet_detach_hacked_interrupt(dev_info_t *devinfo); 173 static void enable_interrupts(struct dnetinstance *); 174 175 /* SROM parsing functions */ 176 static void dnet_parse_srom(struct dnetinstance *dnetp, SROM_FORMAT *sr, 177 uchar_t *vi); 178 static void parse_controller_leaf(struct dnetinstance *dnetp, LEAF_FORMAT *leaf, 179 uchar_t *vi); 180 static uchar_t *parse_media_block(struct dnetinstance *dnetp, 181 media_block_t *block, uchar_t *vi); 182 static int check_srom_valid(uchar_t *); 183 static void dnet_dumpbin(char *msg, uchar_t *, int size, int len); 184 static void setup_legacy_blocks(); 185 /* Active Media Determination Routines */ 186 static void find_active_media(struct dnetinstance *); 187 static int send_test_packet(struct dnetinstance *); 188 static int dnet_link_sense(struct dnetinstance *); 189 190 /* PHY MII Routines */ 191 static ushort_t dnet_mii_read(dev_info_t *dip, int phy_addr, int reg_num); 192 static void dnet_mii_write(dev_info_t *dip, int phy_addr, int reg_num, 193 int reg_dat); 194 static void write_mii(struct dnetinstance *, uint32_t, int); 195 static void mii_tristate(struct dnetinstance *); 196 static void do_phy(struct dnetinstance *); 197 static void dnet_mii_link_cb(dev_info_t *, int, enum mii_phy_state); 198 static void set_leaf(SROM_FORMAT *sr, LEAF_FORMAT *leaf); 199 200 #ifdef DNETDEBUG 201 uint32_t dnet_usecelapsed(struct dnetinstance *dnetp); 202 void dnet_timestamp(struct dnetinstance *, char *); 203 void dnet_usectimeout(struct dnetinstance *, uint32_t, int, timercb_t); 204 #endif 205 static char *media_str[] = { 206 "10BaseT", 207 "10Base2", 208 "10Base5", 209 "100BaseTX", 210 "10BaseT FD", 211 "100BaseTX FD", 212 "100BaseT4", 213 "100BaseFX", 214 "100BaseFX FD", 215 "MII" 216 }; 217 218 /* default SROM info for cards with no SROMs */ 219 static LEAF_FORMAT leaf_default_100; 220 static LEAF_FORMAT leaf_asante; 221 static LEAF_FORMAT leaf_phylegacy; 222 static LEAF_FORMAT leaf_cogent_100; 223 static LEAF_FORMAT leaf_21041; 224 static LEAF_FORMAT leaf_21040; 225 226 /* rx buffer size (rounded up to 4) */ 227 int rx_buf_size = (ETHERMAX + ETHERFCSL + VLAN_TAGSZ + 3) & ~3; 228 229 int max_rx_desc_21040 = MAX_RX_DESC_21040; 230 int max_rx_desc_21140 = MAX_RX_DESC_21140; 231 int max_tx_desc = MAX_TX_DESC; 232 int dnet_xmit_threshold = MAX_TX_DESC >> 2; /* XXX need tuning? */ 233 234 static kmutex_t dnet_rbuf_lock; /* mutex to protect rbuf_list data */ 235 236 /* used for buffers allocated by ddi_dma_mem_alloc() */ 237 static ddi_dma_attr_t dma_attr = { 238 DMA_ATTR_V0, /* dma_attr version */ 239 0, /* dma_attr_addr_lo */ 240 (uint64_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 241 0x7FFFFFFF, /* dma_attr_count_max */ 242 4, /* dma_attr_align */ 243 0x3F, /* dma_attr_burstsizes */ 244 1, /* dma_attr_minxfer */ 245 (uint64_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 246 (uint64_t)0xFFFFFFFF, /* dma_attr_seg */ 247 1, /* dma_attr_sgllen */ 248 1, /* dma_attr_granular */ 249 0, /* dma_attr_flags */ 250 }; 251 252 /* used for buffers allocated for rbuf, allow 2 cookies */ 253 static ddi_dma_attr_t dma_attr_rb = { 254 DMA_ATTR_V0, /* dma_attr version */ 255 0, /* dma_attr_addr_lo */ 256 (uint64_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 257 0x7FFFFFFF, /* dma_attr_count_max */ 258 4, /* dma_attr_align */ 259 0x3F, /* dma_attr_burstsizes */ 260 1, /* dma_attr_minxfer */ 261 (uint64_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 262 (uint64_t)0xFFFFFFFF, /* dma_attr_seg */ 263 2, /* dma_attr_sgllen */ 264 1, /* dma_attr_granular */ 265 0, /* dma_attr_flags */ 266 }; 267 /* used for buffers which are NOT from ddi_dma_mem_alloc() - xmit side */ 268 static ddi_dma_attr_t dma_attr_tx = { 269 DMA_ATTR_V0, /* dma_attr version */ 270 0, /* dma_attr_addr_lo */ 271 (uint64_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 272 0x7FFFFFFF, /* dma_attr_count_max */ 273 1, /* dma_attr_align */ 274 0x3F, /* dma_attr_burstsizes */ 275 1, /* dma_attr_minxfer */ 276 (uint64_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 277 (uint64_t)0xFFFFFFFF, /* dma_attr_seg */ 278 0x7FFF, /* dma_attr_sgllen */ 279 1, /* dma_attr_granular */ 280 0, /* dma_attr_flags */ 281 }; 282 283 static ddi_device_acc_attr_t accattr = { 284 DDI_DEVICE_ATTR_V0, 285 DDI_NEVERSWAP_ACC, 286 DDI_STRICTORDER_ACC, 287 }; 288 289 uchar_t dnet_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 290 291 /* Standard Module linkage initialization for a Streams driver */ 292 extern struct mod_ops mod_driverops; 293 294 DDI_DEFINE_STREAM_OPS(dnet_devops, nulldev, dnet_probe, dnet_attach, 295 dnet_detach, nodev, NULL, D_MP, NULL, dnet_quiesce); 296 297 static struct modldrv dnet_modldrv = { 298 &mod_driverops, /* Type of module. This one is a driver */ 299 IDENT, /* short description */ 300 &dnet_devops /* driver specific ops */ 301 }; 302 303 static struct modlinkage dnet_modlinkage = { 304 MODREV_1, /* ml_rev */ 305 { &dnet_modldrv, NULL } /* ml_linkage */ 306 }; 307 308 static mac_callbacks_t dnet_m_callbacks = { 309 0, /* mc_callbacks */ 310 dnet_m_getstat, /* mc_getstat */ 311 dnet_m_start, /* mc_start */ 312 dnet_m_stop, /* mc_stop */ 313 dnet_m_setpromisc, /* mc_setpromisc */ 314 dnet_m_multicst, /* mc_multicst */ 315 dnet_m_unicst, /* mc_unicst */ 316 dnet_m_tx, /* mc_tx */ 317 NULL, 318 NULL, /* mc_ioctl */ 319 NULL, /* mc_getcapab */ 320 NULL, /* mc_open */ 321 NULL /* mc_close */ 322 }; 323 324 /* 325 * Passed to the hacked interrupt for multiport Cogent and ZNYX cards with 326 * dodgy interrupt routing 327 */ 328 #define MAX_INST 8 /* Maximum instances on a multiport adapter. */ 329 struct hackintr_inf 330 { 331 struct dnetinstance *dnetps[MAX_INST]; /* dnetps for each port */ 332 dev_info_t *devinfo; /* Devinfo of the primary device */ 333 kmutex_t lock; 334 /* Ensures the interrupt doesn't get called while detaching */ 335 }; 336 static char hackintr_propname[] = "InterruptData"; 337 static char macoffset_propname[] = "MAC_offset"; 338 static char speed_propname[] = "speed"; 339 static char ofloprob_propname[] = "dmaworkaround"; 340 static char duplex_propname[] = "full-duplex"; /* Must agree with MII */ 341 static char printsrom_propname[] = "print-srom"; 342 343 static uint_t dnet_hack_intr(struct hackintr_inf *); 344 345 int 346 _init(void) 347 { 348 int i; 349 350 /* Configure fake sroms for legacy cards */ 351 mutex_init(&dnet_rbuf_lock, NULL, MUTEX_DRIVER, NULL); 352 setup_legacy_blocks(); 353 354 mac_init_ops(&dnet_devops, "dnet"); 355 356 if ((i = mod_install(&dnet_modlinkage)) != 0) { 357 mac_fini_ops(&dnet_devops); 358 mutex_destroy(&dnet_rbuf_lock); 359 } 360 return (i); 361 } 362 363 int 364 _fini(void) 365 { 366 int i; 367 368 if ((i = mod_remove(&dnet_modlinkage)) == 0) { 369 mac_fini_ops(&dnet_devops); 370 371 /* loop until all the receive buffers are freed */ 372 while (dnet_rbuf_destroy() != 0) { 373 delay(drv_usectohz(100000)); 374 #ifdef DNETDEBUG 375 if (dnetdebug & DNETDDI) 376 cmn_err(CE_WARN, "dnet _fini delay"); 377 #endif 378 } 379 mutex_destroy(&dnet_rbuf_lock); 380 } 381 return (i); 382 } 383 384 int 385 _info(struct modinfo *modinfop) 386 { 387 return (mod_info(&dnet_modlinkage, modinfop)); 388 } 389 390 /* 391 * probe(9E) -- Determine if a device is present 392 */ 393 static int 394 dnet_probe(dev_info_t *devinfo) 395 { 396 ddi_acc_handle_t handle; 397 uint16_t vendorid; 398 uint16_t deviceid; 399 400 if (pci_config_setup(devinfo, &handle) != DDI_SUCCESS) 401 return (DDI_PROBE_FAILURE); 402 403 vendorid = pci_config_get16(handle, PCI_CONF_VENID); 404 405 if (vendorid != DEC_VENDOR_ID) { 406 pci_config_teardown(&handle); 407 return (DDI_PROBE_FAILURE); 408 } 409 410 deviceid = pci_config_get16(handle, PCI_CONF_DEVID); 411 switch (deviceid) { 412 case DEVICE_ID_21040: 413 case DEVICE_ID_21041: 414 case DEVICE_ID_21140: 415 case DEVICE_ID_21143: /* And 142 */ 416 break; 417 default: 418 pci_config_teardown(&handle); 419 return (DDI_PROBE_FAILURE); 420 } 421 422 pci_config_teardown(&handle); 423 #ifndef BUG_4010796 424 return (DDI_PROBE_SUCCESS); 425 #else 426 return (dnet_hack(devinfo)); 427 #endif 428 } 429 430 #ifdef BUG_4010796 431 /* 432 * If we have a device, but we cannot presently access its SROM data, 433 * then we return DDI_PROBE_PARTIAL and hope that sometime later we 434 * will be able to get at the SROM data. This can only happen if we 435 * are a secondary port with no SROM, and the bootstrap failed to set 436 * our DNET_SROM property, and our primary sibling has not yet probed. 437 */ 438 static int 439 dnet_hack(dev_info_t *devinfo) 440 { 441 uchar_t vendor_info[SROM_SIZE]; 442 uint32_t csr; 443 uint16_t deviceid; 444 ddi_acc_handle_t handle; 445 uint32_t retval; 446 int secondary; 447 ddi_acc_handle_t io_handle; 448 caddr_t io_reg; 449 450 #define DNET_PCI_RNUMBER 1 451 452 if (pci_config_setup(devinfo, &handle) != DDI_SUCCESS) 453 return (DDI_PROBE_FAILURE); 454 455 deviceid = pci_config_get16(handle, PCI_CONF_DEVID); 456 457 /* 458 * Turn on Master Enable and IO Enable bits. 459 */ 460 csr = pci_config_get32(handle, PCI_CONF_COMM); 461 pci_config_put32(handle, PCI_CONF_COMM, (csr |PCI_COMM_ME|PCI_COMM_IO)); 462 463 pci_config_teardown(&handle); 464 465 /* Now map I/O register */ 466 if (ddi_regs_map_setup(devinfo, DNET_PCI_RNUMBER, 467 &io_reg, 0, 0, &accattr, &io_handle) != DDI_SUCCESS) { 468 return (DDI_PROBE_FAILURE); 469 } 470 471 /* 472 * Reset the chip 473 */ 474 ddi_put32(io_handle, REG32(io_reg, BUS_MODE_REG), SW_RESET); 475 drv_usecwait(3); 476 ddi_put32(io_handle, REG32(io_reg, BUS_MODE_REG), 0); 477 drv_usecwait(8); 478 479 secondary = dnet_read_srom(devinfo, deviceid, io_handle, 480 io_reg, vendor_info, sizeof (vendor_info)); 481 482 switch (secondary) { 483 case -1: 484 /* We can't access our SROM data! */ 485 retval = DDI_PROBE_PARTIAL; 486 break; 487 case 0: 488 retval = DDI_PROBE_SUCCESS; 489 break; 490 default: 491 retval = DDI_PROBE_SUCCESS; 492 } 493 494 ddi_regs_map_free(&io_handle); 495 return (retval); 496 } 497 #endif /* BUG_4010796 */ 498 499 /* 500 * attach(9E) -- Attach a device to the system 501 * 502 * Called once for each board successfully probed. 503 */ 504 static int 505 dnet_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 506 { 507 uint16_t revid; 508 struct dnetinstance *dnetp; /* Our private device info */ 509 mac_register_t *macp; 510 uchar_t vendor_info[SROM_SIZE]; 511 uint32_t csr; 512 uint16_t deviceid; 513 ddi_acc_handle_t handle; 514 int secondary; 515 516 #define DNET_PCI_RNUMBER 1 517 518 switch (cmd) { 519 case DDI_ATTACH: 520 break; 521 522 case DDI_RESUME: 523 /* Get the driver private (dnetinstance) structure */ 524 dnetp = ddi_get_driver_private(devinfo); 525 526 mutex_enter(&dnetp->intrlock); 527 mutex_enter(&dnetp->txlock); 528 dnet_reset_board(dnetp); 529 dnet_init_board(dnetp); 530 dnetp->suspended = B_FALSE; 531 532 if (dnetp->running) { 533 dnetp->need_tx_update = B_FALSE; 534 mutex_exit(&dnetp->txlock); 535 (void) dnet_start(dnetp); 536 mutex_exit(&dnetp->intrlock); 537 mac_tx_update(dnetp->mac_handle); 538 } else { 539 mutex_exit(&dnetp->txlock); 540 mutex_exit(&dnetp->intrlock); 541 } 542 return (DDI_SUCCESS); 543 default: 544 return (DDI_FAILURE); 545 } 546 547 if (pci_config_setup(devinfo, &handle) != DDI_SUCCESS) 548 return (DDI_FAILURE); 549 550 deviceid = pci_config_get16(handle, PCI_CONF_DEVID); 551 switch (deviceid) { 552 case DEVICE_ID_21040: 553 case DEVICE_ID_21041: 554 case DEVICE_ID_21140: 555 case DEVICE_ID_21143: /* And 142 */ 556 break; 557 default: 558 pci_config_teardown(&handle); 559 return (DDI_FAILURE); 560 } 561 562 /* 563 * Turn on Master Enable and IO Enable bits. 564 */ 565 csr = pci_config_get32(handle, PCI_CONF_COMM); 566 pci_config_put32(handle, PCI_CONF_COMM, (csr |PCI_COMM_ME|PCI_COMM_IO)); 567 568 /* Make sure the device is not asleep */ 569 csr = pci_config_get32(handle, PCI_DNET_CONF_CFDD); 570 pci_config_put32(handle, PCI_DNET_CONF_CFDD, 571 csr & ~(CFDD_SLEEP|CFDD_SNOOZE)); 572 573 revid = pci_config_get8(handle, PCI_CONF_REVID); 574 pci_config_teardown(&handle); 575 576 dnetp = kmem_zalloc(sizeof (struct dnetinstance), KM_SLEEP); 577 ddi_set_driver_private(devinfo, dnetp); 578 579 /* Now map I/O register */ 580 if (ddi_regs_map_setup(devinfo, DNET_PCI_RNUMBER, &dnetp->io_reg, 581 0, 0, &accattr, &dnetp->io_handle) != DDI_SUCCESS) { 582 kmem_free(dnetp, sizeof (struct dnetinstance)); 583 return (DDI_FAILURE); 584 } 585 586 dnetp->devinfo = devinfo; 587 dnetp->board_type = deviceid; 588 589 /* 590 * Get the iblock cookie with which to initialize the mutexes. 591 */ 592 if (ddi_get_iblock_cookie(devinfo, 0, &dnetp->icookie) 593 != DDI_SUCCESS) 594 goto fail; 595 596 /* 597 * Initialize mutex's for this device. 598 * Do this before registering the interrupt handler to avoid 599 * condition where interrupt handler can try using uninitialized 600 * mutex. 601 * Lock ordering rules: always lock intrlock first before 602 * txlock if both are required. 603 */ 604 mutex_init(&dnetp->txlock, NULL, MUTEX_DRIVER, dnetp->icookie); 605 mutex_init(&dnetp->intrlock, NULL, MUTEX_DRIVER, dnetp->icookie); 606 607 /* 608 * Get the BNC/TP indicator from the conf file for 21040 609 */ 610 dnetp->bnc_indicator = 611 ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 612 "bncaui", -1); 613 614 /* 615 * For 21140 check the data rate set in the conf file. Default is 616 * 100Mb/s. Disallow connections at settings that would conflict 617 * with what's in the conf file 618 */ 619 dnetp->speed = 620 ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 621 speed_propname, 0); 622 dnetp->full_duplex = 623 ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 624 duplex_propname, -1); 625 626 if (dnetp->speed == 100) { 627 dnetp->disallowed_media |= (1UL<<MEDIA_TP) | (1UL<<MEDIA_TP_FD); 628 } else if (dnetp->speed == 10) { 629 dnetp->disallowed_media |= 630 (1UL<<MEDIA_SYM_SCR) | (1UL<<MEDIA_SYM_SCR_FD); 631 } 632 633 if (dnetp->full_duplex == 1) { 634 dnetp->disallowed_media |= 635 (1UL<<MEDIA_TP) | (1UL<<MEDIA_SYM_SCR); 636 } else if (dnetp->full_duplex == 0) { 637 dnetp->disallowed_media |= 638 (1UL<<MEDIA_TP_FD) | (1UL<<MEDIA_SYM_SCR_FD); 639 } 640 641 if (dnetp->bnc_indicator == 0) /* Disable BNC and AUI media */ 642 dnetp->disallowed_media |= (1UL<<MEDIA_BNC) | (1UL<<MEDIA_AUI); 643 else if (dnetp->bnc_indicator == 1) /* Force BNC only */ 644 dnetp->disallowed_media = (uint32_t)~(1U<<MEDIA_BNC); 645 else if (dnetp->bnc_indicator == 2) /* Force AUI only */ 646 dnetp->disallowed_media = (uint32_t)~(1U<<MEDIA_AUI); 647 648 dnet_reset_board(dnetp); 649 650 secondary = dnet_read_srom(devinfo, dnetp->board_type, dnetp->io_handle, 651 dnetp->io_reg, vendor_info, sizeof (vendor_info)); 652 653 if (secondary == -1) /* ASSERT (vendor_info not big enough) */ 654 goto fail1; 655 656 dnet_parse_srom(dnetp, &dnetp->sr, vendor_info); 657 658 if (ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 659 printsrom_propname, 0)) 660 dnet_print_srom(&dnetp->sr); 661 662 dnetp->sr.netaddr[ETHERADDRL-1] += secondary; /* unique ether addr */ 663 664 BCOPY((caddr_t)dnetp->sr.netaddr, 665 (caddr_t)dnetp->vendor_addr, ETHERADDRL); 666 667 BCOPY((caddr_t)dnetp->sr.netaddr, 668 (caddr_t)dnetp->curr_macaddr, ETHERADDRL); 669 670 /* 671 * determine whether to implement workaround from DEC 672 * for DMA overrun errata. 673 */ 674 dnetp->overrun_workaround = 675 ((dnetp->board_type == DEVICE_ID_21140 && revid >= 0x20) || 676 (dnetp->board_type == DEVICE_ID_21143 && revid <= 0x30)) ? 1 : 0; 677 678 dnetp->overrun_workaround = 679 ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 680 ofloprob_propname, dnetp->overrun_workaround); 681 682 /* 683 * Add the interrupt handler if dnet_hack_interrupts() returns 0. 684 * Otherwise dnet_hack_interrupts() itself adds the handler. 685 */ 686 if (!dnet_hack_interrupts(dnetp, secondary)) { 687 (void) ddi_add_intr(devinfo, 0, NULL, 688 NULL, dnet_intr, (caddr_t)dnetp); 689 } 690 691 dnetp->max_tx_desc = max_tx_desc; 692 dnetp->max_rx_desc = max_rx_desc_21040; 693 if (dnetp->board_type != DEVICE_ID_21040 && 694 dnetp->board_type != DEVICE_ID_21041 && 695 dnetp->speed != 10) 696 dnetp->max_rx_desc = max_rx_desc_21140; 697 698 /* Allocate the TX and RX descriptors/buffers. */ 699 if (dnet_alloc_bufs(dnetp) == FAILURE) { 700 cmn_err(CE_WARN, "DNET: Not enough DMA memory for buffers."); 701 goto fail2; 702 } 703 704 /* 705 * Register ourselves with the GLDv3 interface 706 */ 707 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 708 goto fail2; 709 710 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 711 macp->m_driver = dnetp; 712 macp->m_dip = devinfo; 713 macp->m_src_addr = dnetp->curr_macaddr; 714 macp->m_callbacks = &dnet_m_callbacks; 715 macp->m_min_sdu = 0; 716 macp->m_max_sdu = ETHERMTU; 717 macp->m_margin = VLAN_TAGSZ; 718 719 if (mac_register(macp, &dnetp->mac_handle) == 0) { 720 mac_free(macp); 721 722 mutex_enter(&dnetp->intrlock); 723 724 dnetp->phyaddr = -1; 725 if (dnetp->board_type == DEVICE_ID_21140 || 726 dnetp->board_type == DEVICE_ID_21143) 727 do_phy(dnetp); /* Initialize the PHY, if any */ 728 find_active_media(dnetp); 729 730 /* if the chosen media is non-MII, stop the port monitor */ 731 if (dnetp->selected_media_block->media_code != MEDIA_MII && 732 dnetp->mii != NULL) { 733 mii_destroy(dnetp->mii); 734 dnetp->mii = NULL; 735 dnetp->phyaddr = -1; 736 } 737 738 #ifdef DNETDEBUG 739 if (dnetdebug & DNETSENSE) 740 cmn_err(CE_NOTE, "dnet: link configured : %s", 741 media_str[dnetp->selected_media_block->media_code]); 742 #endif 743 bzero(dnetp->setup_buf_vaddr, SETUPBUF_SIZE); 744 745 dnet_reset_board(dnetp); 746 dnet_init_board(dnetp); 747 748 mutex_exit(&dnetp->intrlock); 749 750 (void) dnet_m_unicst(dnetp, dnetp->curr_macaddr); 751 (void) dnet_m_multicst(dnetp, B_TRUE, dnet_broadcastaddr); 752 753 return (DDI_SUCCESS); 754 } 755 756 mac_free(macp); 757 fail2: 758 /* XXX function return value ignored */ 759 /* 760 * dnet_detach_hacked_interrupt() will remove 761 * interrupt for the non-hacked case also. 762 */ 763 (void) dnet_detach_hacked_interrupt(devinfo); 764 dnet_free_bufs(dnetp); 765 fail1: 766 mutex_destroy(&dnetp->txlock); 767 mutex_destroy(&dnetp->intrlock); 768 fail: 769 ddi_regs_map_free(&dnetp->io_handle); 770 kmem_free(dnetp, sizeof (struct dnetinstance)); 771 return (DDI_FAILURE); 772 } 773 774 /* 775 * detach(9E) -- Detach a device from the system 776 */ 777 static int 778 dnet_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 779 { 780 int32_t rc; 781 struct dnetinstance *dnetp; /* Our private device info */ 782 int32_t proplen; 783 784 /* Get the driver private (dnetinstance) structure */ 785 dnetp = ddi_get_driver_private(devinfo); 786 787 switch (cmd) { 788 case DDI_DETACH: 789 break; 790 791 case DDI_SUSPEND: 792 /* 793 * NB: dnetp->suspended can only be modified (marked true) 794 * if both intrlock and txlock are held. This keeps both 795 * tx and rx code paths excluded. 796 */ 797 mutex_enter(&dnetp->intrlock); 798 mutex_enter(&dnetp->txlock); 799 dnetp->suspended = B_TRUE; 800 dnet_reset_board(dnetp); 801 mutex_exit(&dnetp->txlock); 802 mutex_exit(&dnetp->intrlock); 803 return (DDI_SUCCESS); 804 805 default: 806 return (DDI_FAILURE); 807 } 808 809 /* 810 * Unregister ourselves from the GLDv3 interface 811 */ 812 if (mac_unregister(dnetp->mac_handle) != 0) 813 return (DDI_FAILURE); 814 815 /* stop the board if it is running */ 816 dnet_reset_board(dnetp); 817 818 if ((rc = dnet_detach_hacked_interrupt(devinfo)) != DDI_SUCCESS) 819 return (rc); 820 821 if (dnetp->mii != NULL) 822 mii_destroy(dnetp->mii); 823 824 /* Free leaf information */ 825 set_leaf(&dnetp->sr, NULL); 826 827 ddi_regs_map_free(&dnetp->io_handle); 828 dnet_free_bufs(dnetp); 829 mutex_destroy(&dnetp->txlock); 830 mutex_destroy(&dnetp->intrlock); 831 kmem_free(dnetp, sizeof (struct dnetinstance)); 832 833 #ifdef BUG_4010796 834 if (ddi_getproplen(DDI_DEV_T_ANY, devinfo, 0, 835 "DNET_HACK", &proplen) != DDI_PROP_SUCCESS) 836 return (DDI_SUCCESS); 837 838 /* 839 * We must remove the properties we added, because if we leave 840 * them in the devinfo nodes and the driver is unloaded, when 841 * the driver is reloaded the info will still be there, causing 842 * nodes which had returned PROBE_PARTIAL the first time to 843 * instead return PROBE_SUCCESS, in turn causing the nodes to be 844 * attached in a different order, causing their PPA numbers to 845 * be different the second time around, which is undesirable. 846 */ 847 (void) ddi_prop_remove(DDI_DEV_T_NONE, devinfo, "DNET_HACK"); 848 (void) ddi_prop_remove(DDI_DEV_T_NONE, ddi_get_parent(devinfo), 849 "DNET_SROM"); 850 (void) ddi_prop_remove(DDI_DEV_T_NONE, ddi_get_parent(devinfo), 851 "DNET_DEVNUM"); 852 #endif 853 854 return (DDI_SUCCESS); 855 } 856 857 int 858 dnet_quiesce(dev_info_t *dip) 859 { 860 struct dnetinstance *dnetp = ddi_get_driver_private(dip); 861 862 /* 863 * Reset chip (disables interrupts). 864 */ 865 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, INT_MASK_REG), 0); 866 ddi_put32(dnetp->io_handle, 867 REG32(dnetp->io_reg, BUS_MODE_REG), SW_RESET); 868 869 return (DDI_SUCCESS); 870 } 871 872 static void 873 dnet_reset_board(struct dnetinstance *dnetp) 874 { 875 uint32_t val; 876 877 /* 878 * before initializing the dnet should be in STOP state 879 */ 880 val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG)); 881 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG), 882 val & ~(START_TRANSMIT | START_RECEIVE)); 883 884 /* 885 * Reset the chip 886 */ 887 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, INT_MASK_REG), 0); 888 ddi_put32(dnetp->io_handle, 889 REG32(dnetp->io_reg, BUS_MODE_REG), SW_RESET); 890 drv_usecwait(5); 891 } 892 893 /* 894 * dnet_init_board() -- initialize the specified network board short of 895 * actually starting the board. Call after dnet_reset_board(). 896 * called with intrlock held. 897 */ 898 static void 899 dnet_init_board(struct dnetinstance *dnetp) 900 { 901 set_opr(dnetp); 902 set_gpr(dnetp); 903 set_sia(dnetp); 904 dnet_chip_init(dnetp); 905 } 906 907 /* dnet_chip_init() - called with intrlock held */ 908 static void 909 dnet_chip_init(struct dnetinstance *dnetp) 910 { 911 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, BUS_MODE_REG), 912 CACHE_ALIGN | BURST_SIZE); /* CSR0 */ 913 914 /* 915 * Initialize the TX and RX descriptors/buffers 916 */ 917 dnet_init_txrx_bufs(dnetp); 918 919 /* 920 * Set the base address of the Rx descriptor list in CSR3 921 */ 922 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, RX_BASE_ADDR_REG), 923 dnetp->rx_desc_paddr); 924 925 /* 926 * Set the base address of the Tx descrptor list in CSR4 927 */ 928 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, TX_BASE_ADDR_REG), 929 dnetp->tx_desc_paddr); 930 931 dnetp->tx_current_desc = dnetp->rx_current_desc = 0; 932 dnetp->transmitted_desc = 0; 933 dnetp->free_desc = dnetp->max_tx_desc; 934 enable_interrupts(dnetp); 935 } 936 937 /* 938 * dnet_start() -- start the board receiving and allow transmits. 939 * Called with intrlock held. 940 */ 941 static int 942 dnet_start(struct dnetinstance *dnetp) 943 { 944 uint32_t val; 945 946 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 947 /* 948 * start the board and enable receiving 949 */ 950 val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG)); 951 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG), 952 val | START_TRANSMIT); 953 (void) dnet_set_addr(dnetp); 954 val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG)); 955 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG), 956 val | START_RECEIVE); 957 enable_interrupts(dnetp); 958 return (0); 959 } 960 961 static int 962 dnet_m_start(void *arg) 963 { 964 struct dnetinstance *dnetp = arg; 965 966 mutex_enter(&dnetp->intrlock); 967 dnetp->running = B_TRUE; 968 /* 969 * start the board and enable receiving 970 */ 971 if (!dnetp->suspended) 972 (void) dnet_start(dnetp); 973 mutex_exit(&dnetp->intrlock); 974 return (0); 975 } 976 977 static void 978 dnet_m_stop(void *arg) 979 { 980 struct dnetinstance *dnetp = arg; 981 uint32_t val; 982 983 /* 984 * stop the board and disable transmit/receive 985 */ 986 mutex_enter(&dnetp->intrlock); 987 if (!dnetp->suspended) { 988 val = ddi_get32(dnetp->io_handle, 989 REG32(dnetp->io_reg, OPN_MODE_REG)); 990 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG), 991 val & ~(START_TRANSMIT | START_RECEIVE)); 992 } 993 mac_link_update(dnetp->mac_handle, LINK_STATE_UNKNOWN); 994 dnetp->running = B_FALSE; 995 mutex_exit(&dnetp->intrlock); 996 } 997 998 /* 999 * dnet_set_addr() -- set the physical network address on the board 1000 * Called with intrlock held. 1001 */ 1002 static int 1003 dnet_set_addr(struct dnetinstance *dnetp) 1004 { 1005 struct tx_desc_type *desc; 1006 int current_desc; 1007 uint32_t val; 1008 1009 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 1010 1011 val = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG)); 1012 if (!(val & START_TRANSMIT)) 1013 return (0); 1014 1015 current_desc = dnetp->tx_current_desc; 1016 desc = &dnetp->tx_desc[current_desc]; 1017 1018 mutex_enter(&dnetp->txlock); 1019 dnetp->need_saddr = 0; 1020 mutex_exit(&dnetp->txlock); 1021 1022 if ((alloc_descriptor(dnetp)) == FAILURE) { 1023 mutex_enter(&dnetp->txlock); 1024 dnetp->need_saddr = 1; 1025 mutex_exit(&dnetp->txlock); 1026 #ifdef DNETDEBUG 1027 if (dnetdebug & DNETTRACE) 1028 cmn_err(CE_WARN, "DNET saddr:alloc descriptor failure"); 1029 #endif 1030 return (0); 1031 } 1032 1033 desc->buffer1 = dnetp->setup_buf_paddr; 1034 desc->buffer2 = 0; 1035 desc->desc1.buffer_size1 = SETUPBUF_SIZE; 1036 desc->desc1.buffer_size2 = 0; 1037 desc->desc1.setup_packet = 1; 1038 desc->desc1.first_desc = 0; 1039 desc->desc1.last_desc = 0; 1040 desc->desc1.filter_type0 = 1; 1041 desc->desc1.filter_type1 = 1; 1042 desc->desc1.int_on_comp = 1; 1043 1044 desc->desc0.own = 1; 1045 ddi_put8(dnetp->io_handle, REG8(dnetp->io_reg, TX_POLL_REG), 1046 TX_POLL_DEMAND); 1047 return (0); 1048 } 1049 1050 static int 1051 dnet_m_unicst(void *arg, const uint8_t *macaddr) 1052 { 1053 struct dnetinstance *dnetp = arg; 1054 uint32_t index; 1055 uint32_t *hashp; 1056 1057 mutex_enter(&dnetp->intrlock); 1058 1059 bcopy(macaddr, dnetp->curr_macaddr, ETHERADDRL); 1060 1061 /* 1062 * As we are using Imperfect filtering, the broadcast address has to 1063 * be set explicitly in the 512 bit hash table. Hence the index into 1064 * the hash table is calculated and the bit set to enable reception 1065 * of broadcast packets. 1066 * 1067 * We also use HASH_ONLY mode, without using the perfect filter for 1068 * our station address, because there appears to be a bug in the 1069 * 21140 where it fails to receive the specified perfect filter 1070 * address. 1071 * 1072 * Since dlsdmult comes through here, it doesn't matter that the count 1073 * is wrong for the two bits that correspond to the cases below. The 1074 * worst that could happen is that we'd leave on a bit for an old 1075 * macaddr, in the case where the macaddr gets changed, which is rare. 1076 * Since filtering is imperfect, it is OK if that happens. 1077 */ 1078 hashp = (uint32_t *)dnetp->setup_buf_vaddr; 1079 index = hashindex((uint8_t *)dnet_broadcastaddr); 1080 hashp[ index / 16 ] |= 1 << (index % 16); 1081 1082 index = hashindex((uint8_t *)dnetp->curr_macaddr); 1083 hashp[ index / 16 ] |= 1 << (index % 16); 1084 1085 if (!dnetp->suspended) 1086 (void) dnet_set_addr(dnetp); 1087 mutex_exit(&dnetp->intrlock); 1088 return (0); 1089 } 1090 1091 static int 1092 dnet_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr) 1093 { 1094 struct dnetinstance *dnetp = arg; 1095 uint32_t index; 1096 uint32_t *hashp; 1097 uint32_t retval; 1098 1099 mutex_enter(&dnetp->intrlock); 1100 index = hashindex(macaddr); 1101 hashp = (uint32_t *)dnetp->setup_buf_vaddr; 1102 if (add) { 1103 if (dnetp->multicast_cnt[index]++) { 1104 mutex_exit(&dnetp->intrlock); 1105 return (0); 1106 } 1107 hashp[ index / 16 ] |= 1 << (index % 16); 1108 } else { 1109 if (--dnetp->multicast_cnt[index]) { 1110 mutex_exit(&dnetp->intrlock); 1111 return (0); 1112 } 1113 hashp[ index / 16 ] &= ~ (1 << (index % 16)); 1114 } 1115 if (!dnetp->suspended) 1116 retval = dnet_set_addr(dnetp); 1117 else 1118 retval = 0; 1119 mutex_exit(&dnetp->intrlock); 1120 return (retval); 1121 } 1122 1123 /* 1124 * A hashing function used for setting the 1125 * node address or a multicast address 1126 */ 1127 static uint32_t 1128 hashindex(const uint8_t *address) 1129 { 1130 uint32_t crc = (uint32_t)HASH_CRC; 1131 uint32_t const POLY = HASH_POLY; 1132 uint32_t msb; 1133 int32_t byteslength; 1134 uint8_t currentbyte; 1135 uint32_t index; 1136 int32_t bit; 1137 int32_t shift; 1138 1139 for (byteslength = 0; byteslength < ETHERADDRL; byteslength++) { 1140 currentbyte = address[byteslength]; 1141 for (bit = 0; bit < 8; bit++) { 1142 msb = crc >> 31; 1143 crc <<= 1; 1144 if (msb ^ (currentbyte & 1)) { 1145 crc ^= POLY; 1146 crc |= 0x00000001; 1147 } 1148 currentbyte >>= 1; 1149 } 1150 } 1151 1152 for (index = 0, bit = 23, shift = 8; shift >= 0; bit++, shift--) { 1153 index |= (((crc >> bit) & 1) << shift); 1154 } 1155 return (index); 1156 } 1157 1158 static int 1159 dnet_m_setpromisc(void *arg, boolean_t on) 1160 { 1161 struct dnetinstance *dnetp = arg; 1162 uint32_t val; 1163 1164 mutex_enter(&dnetp->intrlock); 1165 if (dnetp->promisc == on) { 1166 mutex_exit(&dnetp->intrlock); 1167 return (0); 1168 } 1169 dnetp->promisc = on; 1170 1171 if (!dnetp->suspended) { 1172 val = ddi_get32(dnetp->io_handle, 1173 REG32(dnetp->io_reg, OPN_MODE_REG)); 1174 if (on) 1175 ddi_put32(dnetp->io_handle, 1176 REG32(dnetp->io_reg, OPN_MODE_REG), 1177 val | PROM_MODE); 1178 else 1179 ddi_put32(dnetp->io_handle, 1180 REG32(dnetp->io_reg, OPN_MODE_REG), 1181 val & (~PROM_MODE)); 1182 } 1183 mutex_exit(&dnetp->intrlock); 1184 return (0); 1185 } 1186 1187 static int 1188 dnet_m_getstat(void *arg, uint_t stat, uint64_t *val) 1189 { 1190 struct dnetinstance *dnetp = arg; 1191 1192 switch (stat) { 1193 case MAC_STAT_IFSPEED: 1194 if (!dnetp->running) { 1195 *val = 0; 1196 } else { 1197 *val = (dnetp->mii_up ? 1198 dnetp->mii_speed : dnetp->speed) * 1000000; 1199 } 1200 break; 1201 1202 case MAC_STAT_NORCVBUF: 1203 *val = dnetp->stat_norcvbuf; 1204 break; 1205 1206 case MAC_STAT_IERRORS: 1207 *val = dnetp->stat_errrcv; 1208 break; 1209 1210 case MAC_STAT_OERRORS: 1211 *val = dnetp->stat_errxmt; 1212 break; 1213 1214 case MAC_STAT_COLLISIONS: 1215 *val = dnetp->stat_collisions; 1216 break; 1217 1218 case ETHER_STAT_DEFER_XMTS: 1219 *val = dnetp->stat_defer; 1220 break; 1221 1222 case ETHER_STAT_CARRIER_ERRORS: 1223 *val = dnetp->stat_nocarrier; 1224 break; 1225 1226 case ETHER_STAT_TOOSHORT_ERRORS: 1227 *val = dnetp->stat_short; 1228 break; 1229 1230 case ETHER_STAT_LINK_DUPLEX: 1231 if (!dnetp->running) { 1232 *val = LINK_DUPLEX_UNKNOWN; 1233 1234 } else if (dnetp->mii_up) { 1235 *val = dnetp->mii_duplex ? 1236 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF; 1237 } else { 1238 *val = dnetp->full_duplex ? 1239 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF; 1240 } 1241 break; 1242 1243 case ETHER_STAT_TX_LATE_COLLISIONS: 1244 *val = dnetp->stat_xmtlatecoll; 1245 break; 1246 1247 case ETHER_STAT_EX_COLLISIONS: 1248 *val = dnetp->stat_excoll; 1249 break; 1250 1251 case MAC_STAT_OVERFLOWS: 1252 *val = dnetp->stat_overflow; 1253 break; 1254 1255 case MAC_STAT_UNDERFLOWS: 1256 *val = dnetp->stat_underflow; 1257 break; 1258 1259 default: 1260 return (ENOTSUP); 1261 } 1262 1263 return (0); 1264 } 1265 1266 #define NextTXIndex(index) (((index)+1) % dnetp->max_tx_desc) 1267 #define PrevTXIndex(index) (((index)-1) < 0 ? dnetp->max_tx_desc - 1: (index)-1) 1268 1269 static mblk_t * 1270 dnet_m_tx(void *arg, mblk_t *mp) 1271 { 1272 struct dnetinstance *dnetp = arg; 1273 1274 mutex_enter(&dnetp->txlock); 1275 1276 /* if suspended, drop the packet on the floor, we missed it */ 1277 if (dnetp->suspended) { 1278 mutex_exit(&dnetp->txlock); 1279 freemsg(mp); 1280 return (NULL); 1281 } 1282 1283 if (dnetp->need_saddr) { 1284 /* XXX function return value ignored */ 1285 mutex_exit(&dnetp->txlock); 1286 mutex_enter(&dnetp->intrlock); 1287 (void) dnet_set_addr(dnetp); 1288 mutex_exit(&dnetp->intrlock); 1289 mutex_enter(&dnetp->txlock); 1290 } 1291 1292 while (mp != NULL) { 1293 if (!dnet_send(dnetp, mp)) { 1294 mutex_exit(&dnetp->txlock); 1295 return (mp); 1296 } 1297 mp = mp->b_next; 1298 } 1299 1300 mutex_exit(&dnetp->txlock); 1301 1302 /* 1303 * Enable xmit interrupt in case we are running out of xmit descriptors 1304 * or there are more packets on the queue waiting to be transmitted. 1305 */ 1306 mutex_enter(&dnetp->intrlock); 1307 1308 enable_interrupts(dnetp); 1309 1310 /* 1311 * Kick the transmitter 1312 */ 1313 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, TX_POLL_REG), 1314 TX_POLL_DEMAND); 1315 1316 mutex_exit(&dnetp->intrlock); 1317 1318 return (NULL); 1319 } 1320 1321 static boolean_t 1322 dnet_send(struct dnetinstance *dnetp, mblk_t *mp) 1323 { 1324 struct tx_desc_type *ring = dnetp->tx_desc; 1325 int mblen, totlen; 1326 int index, end_index, start_index; 1327 int avail; 1328 int error; 1329 int bufn; 1330 int retval; 1331 mblk_t *bp; 1332 1333 ASSERT(MUTEX_HELD(&dnetp->txlock)); 1334 end_index = 0; 1335 1336 /* reclaim any xmit descriptors completed */ 1337 dnet_reclaim_Tx_desc(dnetp); 1338 1339 /* 1340 * Use the data buffers from the message and construct the 1341 * scatter/gather list by calling ddi_dma_addr_bind_handle(). 1342 */ 1343 error = 0; 1344 totlen = 0; 1345 bp = mp; 1346 bufn = 0; 1347 index = start_index = dnetp->tx_current_desc; 1348 avail = dnetp->free_desc; 1349 while (bp != NULL) { 1350 uint_t ncookies; 1351 ddi_dma_cookie_t dma_cookie; 1352 1353 mblen = MBLKL(bp); 1354 1355 if (!mblen) { /* skip zero-length message blocks */ 1356 bp = bp->b_cont; 1357 continue; 1358 } 1359 1360 retval = ddi_dma_addr_bind_handle(dnetp->dma_handle_tx, NULL, 1361 (caddr_t)bp->b_rptr, mblen, 1362 DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 0, 1363 &dma_cookie, &ncookies); 1364 1365 switch (retval) { 1366 case DDI_DMA_MAPPED: 1367 break; /* everything's fine */ 1368 1369 case DDI_DMA_NORESOURCES: 1370 error = 1; /* allow retry by gld */ 1371 break; 1372 1373 case DDI_DMA_NOMAPPING: 1374 case DDI_DMA_INUSE: 1375 case DDI_DMA_TOOBIG: 1376 default: 1377 error = 2; /* error, no retry */ 1378 break; 1379 } 1380 1381 /* 1382 * we can use two cookies per descriptor (i.e buffer1 and 1383 * buffer2) so we need at least (ncookies+1)/2 descriptors. 1384 */ 1385 if (((ncookies + 1) >> 1) > dnetp->free_desc) { 1386 (void) ddi_dma_unbind_handle(dnetp->dma_handle_tx); 1387 error = 1; 1388 break; 1389 } 1390 1391 /* setup the descriptors for this data buffer */ 1392 while (ncookies) { 1393 end_index = index; 1394 if (bufn % 2) { 1395 ring[index].buffer2 = 1396 (uint32_t)dma_cookie.dmac_address; 1397 ring[index].desc1.buffer_size2 = 1398 dma_cookie.dmac_size; 1399 index = NextTXIndex(index); /* goto next desc */ 1400 } else { 1401 /* initialize the descriptor */ 1402 ASSERT(ring[index].desc0.own == 0); 1403 *(uint32_t *)&ring[index].desc0 = 0; 1404 *(uint32_t *)&ring[index].desc1 &= 1405 DNET_END_OF_RING; 1406 ring[index].buffer1 = 1407 (uint32_t)dma_cookie.dmac_address; 1408 ring[index].desc1.buffer_size1 = 1409 dma_cookie.dmac_size; 1410 ring[index].buffer2 = (uint32_t)(0); 1411 dnetp->free_desc--; 1412 ASSERT(dnetp->free_desc >= 0); 1413 } 1414 totlen += dma_cookie.dmac_size; 1415 bufn++; 1416 if (--ncookies) 1417 ddi_dma_nextcookie(dnetp->dma_handle_tx, 1418 &dma_cookie); 1419 } 1420 (void) ddi_dma_unbind_handle(dnetp->dma_handle_tx); 1421 bp = bp->b_cont; 1422 } 1423 1424 if (error == 1) { 1425 dnetp->stat_defer++; 1426 dnetp->free_desc = avail; 1427 dnetp->need_tx_update = B_TRUE; 1428 return (B_FALSE); 1429 } else if (error) { 1430 dnetp->free_desc = avail; 1431 freemsg(mp); 1432 return (B_TRUE); /* Drop packet, don't retry */ 1433 } 1434 1435 if (totlen > ETHERMAX + VLAN_TAGSZ) { 1436 cmn_err(CE_WARN, "DNET: tried to send large %d packet", totlen); 1437 dnetp->free_desc = avail; 1438 freemsg(mp); 1439 return (B_TRUE); /* Don't repeat this attempt */ 1440 } 1441 1442 /* 1443 * Remeber the message buffer pointer to do freemsg() at xmit 1444 * interrupt time. 1445 */ 1446 dnetp->tx_msgbufp[end_index] = mp; 1447 1448 /* 1449 * Now set the first/last buffer and own bits 1450 * Since the 21040 looks for these bits set in the 1451 * first buffer, work backwards in multiple buffers. 1452 */ 1453 ring[end_index].desc1.last_desc = 1; 1454 ring[end_index].desc1.int_on_comp = 1; 1455 for (index = end_index; index != start_index; 1456 index = PrevTXIndex(index)) 1457 ring[index].desc0.own = 1; 1458 ring[start_index].desc1.first_desc = 1; 1459 ring[start_index].desc0.own = 1; 1460 1461 dnetp->tx_current_desc = NextTXIndex(end_index); 1462 1463 /* 1464 * Safety check: make sure end-of-ring is set in last desc. 1465 */ 1466 ASSERT(ring[dnetp->max_tx_desc-1].desc1.end_of_ring != 0); 1467 1468 return (B_TRUE); 1469 } 1470 1471 /* 1472 * dnet_intr() -- interrupt from board to inform us that a receive or 1473 * transmit has completed. 1474 */ 1475 static uint_t 1476 dnet_intr(caddr_t arg) 1477 { 1478 struct dnetinstance *dnetp = (struct dnetinstance *)arg; 1479 uint32_t int_status; 1480 1481 mutex_enter(&dnetp->intrlock); 1482 1483 if (dnetp->suspended) { 1484 mutex_exit(&dnetp->intrlock); 1485 return (DDI_INTR_UNCLAIMED); 1486 } 1487 1488 int_status = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, 1489 STATUS_REG)); 1490 1491 /* 1492 * If interrupt was not from this board 1493 */ 1494 if (!(int_status & (NORMAL_INTR_SUMM | ABNORMAL_INTR_SUMM))) { 1495 mutex_exit(&dnetp->intrlock); 1496 return (DDI_INTR_UNCLAIMED); 1497 } 1498 1499 dnetp->stat_intr++; 1500 1501 if (int_status & GPTIMER_INTR) { 1502 ddi_put32(dnetp->io_handle, 1503 REG32(dnetp->io_reg, STATUS_REG), GPTIMER_INTR); 1504 if (dnetp->timer.cb) 1505 dnetp->timer.cb(dnetp); 1506 else 1507 cmn_err(CE_WARN, "dnet: unhandled timer interrupt"); 1508 } 1509 1510 if (int_status & TX_INTR) { 1511 ddi_put32(dnetp->io_handle, 1512 REG32(dnetp->io_reg, STATUS_REG), TX_INTR); 1513 mutex_enter(&dnetp->txlock); 1514 if (dnetp->need_tx_update) { 1515 mutex_exit(&dnetp->txlock); 1516 mutex_exit(&dnetp->intrlock); 1517 mac_tx_update(dnetp->mac_handle); 1518 mutex_enter(&dnetp->intrlock); 1519 mutex_enter(&dnetp->txlock); 1520 dnetp->need_tx_update = B_FALSE; 1521 } 1522 /* reclaim any xmit descriptors that are completed */ 1523 dnet_reclaim_Tx_desc(dnetp); 1524 mutex_exit(&dnetp->txlock); 1525 } 1526 1527 /* 1528 * Check if receive interrupt bit is set 1529 */ 1530 if (int_status & (RX_INTR | RX_UNAVAIL_INTR)) { 1531 ddi_put32(dnetp->io_handle, 1532 REG32(dnetp->io_reg, STATUS_REG), 1533 int_status & (RX_INTR | RX_UNAVAIL_INTR)); 1534 dnet_getp(dnetp); 1535 } 1536 1537 if (int_status & ABNORMAL_INTR_SUMM) { 1538 /* 1539 * Check for system error 1540 */ 1541 if (int_status & SYS_ERR) { 1542 if ((int_status & SYS_ERR_BITS) == MASTER_ABORT) 1543 cmn_err(CE_WARN, "DNET: Bus Master Abort"); 1544 if ((int_status & SYS_ERR_BITS) == TARGET_ABORT) 1545 cmn_err(CE_WARN, "DNET: Bus Target Abort"); 1546 if ((int_status & SYS_ERR_BITS) == PARITY_ERROR) 1547 cmn_err(CE_WARN, "DNET: Parity error"); 1548 } 1549 1550 /* 1551 * If the jabber has timed out then reset the chip 1552 */ 1553 if (int_status & TX_JABBER_TIMEOUT) 1554 cmn_err(CE_WARN, "DNET: Jabber timeout."); 1555 1556 /* 1557 * If an underflow has occurred, reset the chip 1558 */ 1559 if (int_status & TX_UNDERFLOW) 1560 cmn_err(CE_WARN, "DNET: Tx Underflow."); 1561 1562 #ifdef DNETDEBUG 1563 if (dnetdebug & DNETINT) 1564 cmn_err(CE_NOTE, "Trying to reset..."); 1565 #endif 1566 dnet_reset_board(dnetp); 1567 dnet_init_board(dnetp); 1568 /* XXX function return value ignored */ 1569 (void) dnet_start(dnetp); 1570 } 1571 1572 /* 1573 * Enable the interrupts. Enable xmit interrupt in case we are 1574 * running out of free descriptors or if there are packets 1575 * in the queue waiting to be transmitted. 1576 */ 1577 enable_interrupts(dnetp); 1578 mutex_exit(&dnetp->intrlock); 1579 return (DDI_INTR_CLAIMED); /* Indicate it was our interrupt */ 1580 } 1581 1582 static void 1583 dnet_getp(struct dnetinstance *dnetp) 1584 { 1585 int packet_length, index; 1586 mblk_t *mp; 1587 caddr_t virtual_address; 1588 struct rx_desc_type *desc = dnetp->rx_desc; 1589 int marker = dnetp->rx_current_desc; 1590 int misses; 1591 1592 if (!dnetp->overrun_workaround) { 1593 /* 1594 * If the workaround is not in place, we must still update 1595 * the missed frame statistic from the on-chip counter. 1596 */ 1597 misses = ddi_get32(dnetp->io_handle, 1598 REG32(dnetp->io_reg, MISSED_FRAME_REG)); 1599 dnetp->stat_missed += (misses & MISSED_FRAME_MASK); 1600 } 1601 1602 /* While host owns the current descriptor */ 1603 while (!(desc[dnetp->rx_current_desc].desc0.own)) { 1604 struct free_ptr *frp; 1605 caddr_t newbuf; 1606 struct rbuf_list *rp; 1607 1608 index = dnetp->rx_current_desc; 1609 ASSERT(desc[index].desc0.first_desc != 0); 1610 1611 /* 1612 * DMA overrun errata from DEC: avoid possible bus hangs 1613 * and data corruption 1614 */ 1615 if (dnetp->overrun_workaround && 1616 marker == dnetp->rx_current_desc) { 1617 int opn; 1618 do { 1619 marker = (marker+1) % dnetp->max_rx_desc; 1620 } while (!(dnetp->rx_desc[marker].desc0.own) && 1621 marker != index); 1622 1623 misses = ddi_get32(dnetp->io_handle, 1624 REG32(dnetp->io_reg, MISSED_FRAME_REG)); 1625 dnetp->stat_missed += 1626 (misses & MISSED_FRAME_MASK); 1627 if (misses & OVERFLOW_COUNTER_MASK) { 1628 /* 1629 * Overflow(s) have occurred : stop receiver, 1630 * and wait until in stopped state 1631 */ 1632 opn = ddi_get32(dnetp->io_handle, 1633 REG32(dnetp->io_reg, OPN_MODE_REG)); 1634 ddi_put32(dnetp->io_handle, 1635 REG32(dnetp->io_reg, OPN_MODE_REG), 1636 opn & ~(START_RECEIVE)); 1637 1638 do { 1639 drv_usecwait(10); 1640 } while ((ddi_get32(dnetp->io_handle, 1641 REG32(dnetp->io_reg, STATUS_REG)) & 1642 RECEIVE_PROCESS_STATE) != 0); 1643 #ifdef DNETDEBUG 1644 if (dnetdebug & DNETRECV) 1645 cmn_err(CE_CONT, "^*"); 1646 #endif 1647 /* Discard probably corrupt frames */ 1648 while (!(dnetp->rx_desc[index].desc0.own)) { 1649 dnetp->rx_desc[index].desc0.own = 1; 1650 index = (index+1) % dnetp->max_rx_desc; 1651 dnetp->stat_missed++; 1652 } 1653 1654 /* restart the receiver */ 1655 opn = ddi_get32(dnetp->io_handle, 1656 REG32(dnetp->io_reg, OPN_MODE_REG)); 1657 ddi_put32(dnetp->io_handle, 1658 REG32(dnetp->io_reg, OPN_MODE_REG), 1659 opn | START_RECEIVE); 1660 marker = dnetp->rx_current_desc = index; 1661 continue; 1662 } 1663 /* 1664 * At this point, we know that all packets before 1665 * "marker" were received before a dma overrun occurred 1666 */ 1667 } 1668 1669 /* 1670 * If we get an oversized packet it could span multiple 1671 * descriptors. If this happens an error bit should be set. 1672 */ 1673 while (desc[index].desc0.last_desc == 0) { 1674 index = (index + 1) % dnetp->max_rx_desc; 1675 if (desc[index].desc0.own) 1676 return; /* not done receiving large packet */ 1677 } 1678 while (dnetp->rx_current_desc != index) { 1679 desc[dnetp->rx_current_desc].desc0.own = 1; 1680 dnetp->rx_current_desc = 1681 (dnetp->rx_current_desc + 1) % dnetp->max_rx_desc; 1682 #ifdef DNETDEBUG 1683 if (dnetdebug & DNETRECV) 1684 cmn_err(CE_WARN, "dnet: received large packet"); 1685 #endif 1686 } 1687 1688 packet_length = desc[index].desc0.frame_len; 1689 1690 /* 1691 * Remove CRC from received data. This is an artefact of the 1692 * 21x4x chip and should not be passed higher up the network 1693 * stack. 1694 */ 1695 packet_length -= ETHERFCSL; 1696 1697 /* get the virtual address of the packet received */ 1698 virtual_address = 1699 dnetp->rx_buf_vaddr[index]; 1700 1701 /* 1702 * If no packet errors then do: 1703 * 1. Allocate a new receive buffer so that we can 1704 * use the current buffer as streams buffer to 1705 * avoid bcopy. 1706 * 2. If we got a new receive buffer then allocate 1707 * an mblk using desballoc(). 1708 * 3. Otherwise use the mblk from allocb() and do 1709 * the bcopy. 1710 */ 1711 frp = NULL; 1712 rp = NULL; 1713 newbuf = NULL; 1714 mp = NULL; 1715 if (!desc[index].desc0.err_summary || 1716 (desc[index].desc0.frame2long && 1717 packet_length < rx_buf_size)) { 1718 ASSERT(packet_length < rx_buf_size); 1719 /* 1720 * Allocate another receive buffer for this descriptor. 1721 * If we fail to allocate then we do the normal bcopy. 1722 */ 1723 rp = dnet_rbuf_alloc(dnetp->devinfo, 0); 1724 if (rp != NULL) { 1725 newbuf = rp->rbuf_vaddr; 1726 frp = kmem_zalloc(sizeof (*frp), KM_NOSLEEP); 1727 if (frp != NULL) { 1728 frp->free_rtn.free_func = 1729 dnet_freemsg_buf; 1730 frp->free_rtn.free_arg = (char *)frp; 1731 frp->buf = virtual_address; 1732 mp = desballoc( 1733 (uchar_t *)virtual_address, 1734 packet_length, 0, &frp->free_rtn); 1735 if (mp == NULL) { 1736 kmem_free(frp, sizeof (*frp)); 1737 dnet_rbuf_free((caddr_t)newbuf); 1738 frp = NULL; 1739 newbuf = NULL; 1740 } 1741 } 1742 } 1743 if (mp == NULL) { 1744 if (newbuf != NULL) 1745 dnet_rbuf_free((caddr_t)newbuf); 1746 mp = allocb(packet_length, 0); 1747 } 1748 } 1749 1750 if ((desc[index].desc0.err_summary && 1751 packet_length >= rx_buf_size) || mp == NULL) { 1752 1753 /* Update gld statistics */ 1754 if (desc[index].desc0.err_summary) 1755 update_rx_stats(dnetp, index); 1756 else 1757 dnetp->stat_norcvbuf++; 1758 1759 /* 1760 * Reset ownership of the descriptor. 1761 */ 1762 desc[index].desc0.own = 1; 1763 dnetp->rx_current_desc = 1764 (dnetp->rx_current_desc+1) % dnetp->max_rx_desc; 1765 1766 /* Demand receive polling by the chip */ 1767 ddi_put32(dnetp->io_handle, 1768 REG32(dnetp->io_reg, RX_POLL_REG), RX_POLL_DEMAND); 1769 1770 continue; 1771 } 1772 1773 if (newbuf != NULL) { 1774 uint32_t end_paddr; 1775 /* attach the new buffer to the rx descriptor */ 1776 dnetp->rx_buf_vaddr[index] = newbuf; 1777 dnetp->rx_buf_paddr[index] = rp->rbuf_paddr; 1778 desc[index].buffer1 = rp->rbuf_paddr; 1779 desc[index].desc1.buffer_size1 = rx_buf_size; 1780 desc[index].desc1.buffer_size2 = 0; 1781 end_paddr = rp->rbuf_endpaddr; 1782 if ((desc[index].buffer1 & ~dnetp->pgmask) != 1783 (end_paddr & ~dnetp->pgmask)) { 1784 /* discontiguous */ 1785 desc[index].buffer2 = end_paddr&~dnetp->pgmask; 1786 desc[index].desc1.buffer_size2 = 1787 (end_paddr & dnetp->pgmask) + 1; 1788 desc[index].desc1.buffer_size1 = 1789 rx_buf_size-desc[index].desc1.buffer_size2; 1790 } 1791 } else { 1792 /* couldn't allocate another buffer; copy the data */ 1793 BCOPY((caddr_t)virtual_address, (caddr_t)mp->b_wptr, 1794 packet_length); 1795 } 1796 1797 mp->b_wptr += packet_length; 1798 1799 desc[dnetp->rx_current_desc].desc0.own = 1; 1800 1801 /* 1802 * Increment receive desc index. This is for the scan of 1803 * next packet 1804 */ 1805 dnetp->rx_current_desc = 1806 (dnetp->rx_current_desc+1) % dnetp->max_rx_desc; 1807 1808 /* Demand polling by chip */ 1809 ddi_put32(dnetp->io_handle, 1810 REG32(dnetp->io_reg, RX_POLL_REG), RX_POLL_DEMAND); 1811 1812 /* send the packet upstream */ 1813 mutex_exit(&dnetp->intrlock); 1814 mac_rx(dnetp->mac_handle, NULL, mp); 1815 mutex_enter(&dnetp->intrlock); 1816 } 1817 } 1818 /* 1819 * Function to update receive statistics 1820 */ 1821 static void 1822 update_rx_stats(struct dnetinstance *dnetp, int index) 1823 { 1824 struct rx_desc_type *descp = &(dnetp->rx_desc[index]); 1825 1826 /* 1827 * Update gld statistics 1828 */ 1829 dnetp->stat_errrcv++; 1830 1831 if (descp->desc0.overflow) { 1832 /* FIFO Overrun */ 1833 dnetp->stat_overflow++; 1834 } 1835 1836 if (descp->desc0.collision) { 1837 /*EMPTY*/ 1838 /* Late Colllision on receive */ 1839 /* no appropriate counter */ 1840 } 1841 1842 if (descp->desc0.crc) { 1843 /* CRC Error */ 1844 dnetp->stat_crc++; 1845 } 1846 1847 if (descp->desc0.runt_frame) { 1848 /* Runt Error */ 1849 dnetp->stat_short++; 1850 } 1851 1852 if (descp->desc0.desc_err) { 1853 /*EMPTY*/ 1854 /* Not enough receive descriptors */ 1855 /* This condition is accounted in dnet_intr() */ 1856 } 1857 1858 if (descp->desc0.frame2long) { 1859 dnetp->stat_frame++; 1860 } 1861 } 1862 1863 /* 1864 * Function to update transmit statistics 1865 */ 1866 static void 1867 update_tx_stats(struct dnetinstance *dnetp, int index) 1868 { 1869 struct tx_desc_type *descp = &(dnetp->tx_desc[index]); 1870 int fd; 1871 media_block_t *block = dnetp->selected_media_block; 1872 1873 1874 /* Update gld statistics */ 1875 dnetp->stat_errxmt++; 1876 1877 /* If we're in full-duplex don't count collisions or carrier loss. */ 1878 if (dnetp->mii_up) { 1879 fd = dnetp->mii_duplex; 1880 } else { 1881 /* Rely on media code */ 1882 fd = block->media_code == MEDIA_TP_FD || 1883 block->media_code == MEDIA_SYM_SCR_FD; 1884 } 1885 1886 if (descp->desc0.collision_count && !fd) { 1887 dnetp->stat_collisions += descp->desc0.collision_count; 1888 } 1889 1890 if (descp->desc0.late_collision && !fd) { 1891 dnetp->stat_xmtlatecoll++; 1892 } 1893 1894 if (descp->desc0.excess_collision && !fd) { 1895 dnetp->stat_excoll++; 1896 } 1897 1898 if (descp->desc0.underflow) { 1899 dnetp->stat_underflow++; 1900 } 1901 1902 #if 0 1903 if (descp->desc0.tx_jabber_to) { 1904 /* no appropriate counter */ 1905 } 1906 #endif 1907 1908 if (descp->desc0.carrier_loss && !fd) { 1909 dnetp->stat_nocarrier++; 1910 } 1911 1912 if (descp->desc0.no_carrier && !fd) { 1913 dnetp->stat_nocarrier++; 1914 } 1915 } 1916 1917 /* 1918 * ========== Media Selection Setup Routines ========== 1919 */ 1920 1921 1922 static void 1923 write_gpr(struct dnetinstance *dnetp, uint32_t val) 1924 { 1925 #ifdef DEBUG 1926 if (dnetdebug & DNETREGCFG) 1927 cmn_err(CE_NOTE, "GPR: %x", val); 1928 #endif 1929 switch (dnetp->board_type) { 1930 case DEVICE_ID_21143: 1931 /* Set the correct bit for a control write */ 1932 if (val & GPR_CONTROL_WRITE) 1933 val |= CWE_21143, val &= ~GPR_CONTROL_WRITE; 1934 /* Write to upper half of CSR15 */ 1935 dnetp->gprsia = (dnetp->gprsia & 0xffff) | (val << 16); 1936 ddi_put32(dnetp->io_handle, 1937 REG32(dnetp->io_reg, SIA_GENERAL_REG), dnetp->gprsia); 1938 break; 1939 default: 1940 /* Set the correct bit for a control write */ 1941 if (val & GPR_CONTROL_WRITE) 1942 val |= CWE_21140, val &= ~GPR_CONTROL_WRITE; 1943 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, GP_REG), val); 1944 break; 1945 } 1946 } 1947 1948 static uint32_t 1949 read_gpr(struct dnetinstance *dnetp) 1950 { 1951 switch (dnetp->board_type) { 1952 case DEVICE_ID_21143: 1953 /* Read upper half of CSR15 */ 1954 return (ddi_get32(dnetp->io_handle, 1955 REG32(dnetp->io_reg, SIA_GENERAL_REG)) >> 16); 1956 default: 1957 return (ddi_get32(dnetp->io_handle, 1958 REG32(dnetp->io_reg, GP_REG))); 1959 } 1960 } 1961 1962 static void 1963 set_gpr(struct dnetinstance *dnetp) 1964 { 1965 uint32_t *sequence; 1966 int len; 1967 LEAF_FORMAT *leaf = &dnetp->sr.leaf[dnetp->leaf]; 1968 media_block_t *block = dnetp->selected_media_block; 1969 int i; 1970 1971 if (ddi_getlongprop(DDI_DEV_T_ANY, dnetp->devinfo, 1972 DDI_PROP_DONTPASS, "gpr-sequence", (caddr_t)&sequence, 1973 &len) == DDI_PROP_SUCCESS) { 1974 for (i = 0; i < len / sizeof (uint32_t); i++) 1975 write_gpr(dnetp, sequence[i]); 1976 kmem_free(sequence, len); 1977 } else { 1978 /* 1979 * Write the reset sequence if this is the first time this 1980 * block has been selected. 1981 */ 1982 if (block->rstseqlen) { 1983 for (i = 0; i < block->rstseqlen; i++) 1984 write_gpr(dnetp, block->rstseq[i]); 1985 /* 1986 * XXX Legacy blocks do not have reset sequences, so the 1987 * static blocks will never be modified by this 1988 */ 1989 block->rstseqlen = 0; 1990 } 1991 if (leaf->gpr) 1992 write_gpr(dnetp, leaf->gpr | GPR_CONTROL_WRITE); 1993 1994 /* write GPR sequence each time */ 1995 for (i = 0; i < block->gprseqlen; i++) 1996 write_gpr(dnetp, block->gprseq[i]); 1997 } 1998 1999 /* This has possibly caused a PHY to reset. Let MII know */ 2000 if (dnetp->phyaddr != -1) 2001 /* XXX function return value ignored */ 2002 (void) mii_sync(dnetp->mii, dnetp->phyaddr); 2003 drv_usecwait(5); 2004 } 2005 2006 /* set_opr() - must be called with intrlock held */ 2007 2008 static void 2009 set_opr(struct dnetinstance *dnetp) 2010 { 2011 uint32_t fd, mb1, sf; 2012 2013 int opnmode_len; 2014 uint32_t val; 2015 media_block_t *block = dnetp->selected_media_block; 2016 2017 ASSERT(block); 2018 2019 /* Check for custom "opnmode_reg" property */ 2020 opnmode_len = sizeof (val); 2021 if (ddi_prop_op(DDI_DEV_T_ANY, dnetp->devinfo, 2022 PROP_LEN_AND_VAL_BUF, DDI_PROP_DONTPASS, "opnmode_reg", 2023 (caddr_t)&val, &opnmode_len) != DDI_PROP_SUCCESS) 2024 opnmode_len = 0; 2025 2026 /* Some bits exist only on 21140 and greater */ 2027 if (dnetp->board_type != DEVICE_ID_21040 && 2028 dnetp->board_type != DEVICE_ID_21041) { 2029 mb1 = OPN_REG_MB1; 2030 sf = STORE_AND_FORWARD; 2031 } else { 2032 mb1 = sf = 0; 2033 mb1 = OPN_REG_MB1; /* Needed for 21040? */ 2034 } 2035 2036 if (opnmode_len) { 2037 ddi_put32(dnetp->io_handle, 2038 REG32(dnetp->io_reg, OPN_MODE_REG), val); 2039 dnet_reset_board(dnetp); 2040 ddi_put32(dnetp->io_handle, 2041 REG32(dnetp->io_reg, OPN_MODE_REG), val); 2042 return; 2043 } 2044 2045 /* 2046 * Set each bit in CSR6 that we want 2047 */ 2048 2049 /* Always want these bits set */ 2050 val = HASH_FILTERING | HASH_ONLY | TX_THRESHOLD_160 | mb1 | sf; 2051 2052 /* Promiscuous mode */ 2053 val |= dnetp->promisc ? PROM_MODE : 0; 2054 2055 /* Scrambler for SYM style media */ 2056 val |= ((block->command & CMD_SCR) && !dnetp->disable_scrambler) ? 2057 SCRAMBLER_MODE : 0; 2058 2059 /* Full duplex */ 2060 if (dnetp->mii_up) { 2061 fd = dnetp->mii_duplex; 2062 } else { 2063 /* Rely on media code */ 2064 fd = block->media_code == MEDIA_TP_FD || 2065 block->media_code == MEDIA_SYM_SCR_FD; 2066 } 2067 2068 /* Port select (and therefore, heartbeat disable) */ 2069 val |= block->command & CMD_PS ? (PORT_SELECT | HEARTBEAT_DISABLE) : 0; 2070 2071 /* PCS function */ 2072 val |= (block->command) & CMD_PCS ? PCS_FUNCTION : 0; 2073 val |= fd ? FULL_DUPLEX : 0; 2074 2075 #ifdef DNETDEBUG 2076 if (dnetdebug & DNETREGCFG) 2077 cmn_err(CE_NOTE, "OPN: %x", val); 2078 #endif 2079 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG), val); 2080 dnet_reset_board(dnetp); 2081 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, OPN_MODE_REG), val); 2082 } 2083 2084 static void 2085 set_sia(struct dnetinstance *dnetp) 2086 { 2087 media_block_t *block = dnetp->selected_media_block; 2088 2089 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 2090 if (block->type == 2) { 2091 int sia_delay; 2092 #ifdef DNETDEBUG 2093 if (dnetdebug & DNETREGCFG) 2094 cmn_err(CE_NOTE, 2095 "SIA: CSR13: %x, CSR14: %x, CSR15: %x", 2096 block->un.sia.csr13, 2097 block->un.sia.csr14, 2098 block->un.sia.csr15); 2099 #endif 2100 sia_delay = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo, 2101 DDI_PROP_DONTPASS, "sia-delay", 10000); 2102 2103 ddi_put32(dnetp->io_handle, 2104 REG32(dnetp->io_reg, SIA_CONNECT_REG), 0); 2105 2106 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, SIA_TXRX_REG), 2107 block->un.sia.csr14); 2108 2109 /* 2110 * For '143, we need to write through a copy of the register 2111 * to keep the GP half intact 2112 */ 2113 dnetp->gprsia = (dnetp->gprsia&0xffff0000)|block->un.sia.csr15; 2114 ddi_put32(dnetp->io_handle, 2115 REG32(dnetp->io_reg, SIA_GENERAL_REG), 2116 dnetp->gprsia); 2117 2118 ddi_put32(dnetp->io_handle, 2119 REG32(dnetp->io_reg, SIA_CONNECT_REG), 2120 block->un.sia.csr13); 2121 2122 drv_usecwait(sia_delay); 2123 2124 } else if (dnetp->board_type != DEVICE_ID_21140) { 2125 ddi_put32(dnetp->io_handle, 2126 REG32(dnetp->io_reg, SIA_CONNECT_REG), 0); 2127 ddi_put32(dnetp->io_handle, 2128 REG32(dnetp->io_reg, SIA_TXRX_REG), 0); 2129 } 2130 } 2131 2132 /* 2133 * This function (re)allocates the receive and transmit buffers and 2134 * descriptors. It can be called more than once per instance, though 2135 * currently it is only called from attach. It should only be called 2136 * while the device is reset. 2137 */ 2138 static int 2139 dnet_alloc_bufs(struct dnetinstance *dnetp) 2140 { 2141 int i; 2142 size_t len; 2143 int page_size; 2144 int realloc = 0; 2145 int nrecv_desc_old = 0; 2146 ddi_dma_cookie_t cookie; 2147 uint_t ncookies; 2148 2149 /* 2150 * check if we are trying to reallocate with different xmit/recv 2151 * descriptor ring sizes. 2152 */ 2153 if ((dnetp->tx_desc != NULL) && 2154 (dnetp->nxmit_desc != dnetp->max_tx_desc)) 2155 realloc = 1; 2156 2157 if ((dnetp->rx_desc != NULL) && 2158 (dnetp->nrecv_desc != dnetp->max_rx_desc)) 2159 realloc = 1; 2160 2161 /* free up the old buffers if we are reallocating them */ 2162 if (realloc) { 2163 nrecv_desc_old = dnetp->nrecv_desc; 2164 dnet_free_bufs(dnetp); /* free the old buffers */ 2165 } 2166 2167 if (dnetp->dma_handle == NULL) 2168 if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr, 2169 DDI_DMA_SLEEP, 0, &dnetp->dma_handle) != DDI_SUCCESS) 2170 return (FAILURE); 2171 2172 if (dnetp->dma_handle_tx == NULL) 2173 if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr_tx, 2174 DDI_DMA_SLEEP, 0, &dnetp->dma_handle_tx) != DDI_SUCCESS) 2175 return (FAILURE); 2176 2177 if (dnetp->dma_handle_txdesc == NULL) 2178 if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr, 2179 DDI_DMA_SLEEP, 0, &dnetp->dma_handle_txdesc) != DDI_SUCCESS) 2180 return (FAILURE); 2181 2182 if (dnetp->dma_handle_setbuf == NULL) 2183 if (ddi_dma_alloc_handle(dnetp->devinfo, &dma_attr, 2184 DDI_DMA_SLEEP, 0, &dnetp->dma_handle_setbuf) != DDI_SUCCESS) 2185 return (FAILURE); 2186 2187 page_size = ddi_ptob(dnetp->devinfo, 1); 2188 2189 dnetp->pgmask = page_size - 1; 2190 2191 /* allocate setup buffer if necessary */ 2192 if (dnetp->setup_buf_vaddr == NULL) { 2193 if (ddi_dma_mem_alloc(dnetp->dma_handle_setbuf, 2194 SETUPBUF_SIZE, &accattr, DDI_DMA_STREAMING, 2195 DDI_DMA_DONTWAIT, 0, (caddr_t *)&dnetp->setup_buf_vaddr, 2196 &len, &dnetp->setup_buf_acchdl) != DDI_SUCCESS) 2197 return (FAILURE); 2198 2199 if (ddi_dma_addr_bind_handle(dnetp->dma_handle_setbuf, 2200 NULL, dnetp->setup_buf_vaddr, SETUPBUF_SIZE, 2201 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 2202 NULL, &cookie, &ncookies) != DDI_DMA_MAPPED) 2203 return (FAILURE); 2204 2205 dnetp->setup_buf_paddr = cookie.dmac_address; 2206 bzero(dnetp->setup_buf_vaddr, len); 2207 } 2208 2209 /* allocate xmit descriptor array of size dnetp->max_tx_desc */ 2210 if (dnetp->tx_desc == NULL) { 2211 if (ddi_dma_mem_alloc(dnetp->dma_handle_txdesc, 2212 sizeof (struct tx_desc_type) * dnetp->max_tx_desc, 2213 &accattr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0, 2214 (caddr_t *)&dnetp->tx_desc, &len, 2215 &dnetp->tx_desc_acchdl) != DDI_SUCCESS) 2216 return (FAILURE); 2217 2218 if (ddi_dma_addr_bind_handle(dnetp->dma_handle_txdesc, 2219 NULL, (caddr_t)dnetp->tx_desc, 2220 sizeof (struct tx_desc_type) * dnetp->max_tx_desc, 2221 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 2222 NULL, &cookie, &ncookies) != DDI_DMA_MAPPED) 2223 return (FAILURE); 2224 dnetp->tx_desc_paddr = cookie.dmac_address; 2225 bzero(dnetp->tx_desc, len); 2226 dnetp->nxmit_desc = dnetp->max_tx_desc; 2227 2228 dnetp->tx_msgbufp = 2229 kmem_zalloc(dnetp->max_tx_desc * sizeof (mblk_t **), 2230 KM_SLEEP); 2231 } 2232 2233 /* allocate receive descriptor array of size dnetp->max_rx_desc */ 2234 if (dnetp->rx_desc == NULL) { 2235 int ndesc; 2236 2237 if (ddi_dma_mem_alloc(dnetp->dma_handle, 2238 sizeof (struct rx_desc_type) * dnetp->max_rx_desc, 2239 &accattr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0, 2240 (caddr_t *)&dnetp->rx_desc, &len, 2241 &dnetp->rx_desc_acchdl) != DDI_SUCCESS) 2242 return (FAILURE); 2243 2244 if (ddi_dma_addr_bind_handle(dnetp->dma_handle, 2245 NULL, (caddr_t)dnetp->rx_desc, 2246 sizeof (struct rx_desc_type) * dnetp->max_rx_desc, 2247 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 2248 NULL, &cookie, &ncookies) != DDI_DMA_MAPPED) 2249 return (FAILURE); 2250 2251 dnetp->rx_desc_paddr = cookie.dmac_address; 2252 bzero(dnetp->rx_desc, len); 2253 dnetp->nrecv_desc = dnetp->max_rx_desc; 2254 2255 dnetp->rx_buf_vaddr = 2256 kmem_zalloc(dnetp->max_rx_desc * sizeof (caddr_t), 2257 KM_SLEEP); 2258 dnetp->rx_buf_paddr = 2259 kmem_zalloc(dnetp->max_rx_desc * sizeof (uint32_t), 2260 KM_SLEEP); 2261 /* 2262 * Allocate or add to the pool of receive buffers. The pool 2263 * is shared among all instances of dnet. 2264 * 2265 * XXX NEEDSWORK 2266 * 2267 * We arbitrarily allocate twice as many receive buffers as 2268 * receive descriptors because we use the buffers for streams 2269 * messages to pass the packets up the stream. We should 2270 * instead have initialized constants reflecting 2271 * MAX_RX_BUF_2104x and MAX_RX_BUF_2114x, and we should also 2272 * probably have a total maximum for the free pool, so that we 2273 * don't get out of hand when someone puts in an 8-port board. 2274 * The maximum for the entire pool should be the total number 2275 * of descriptors for all attached instances together, plus the 2276 * total maximum for the free pool. This maximum would only be 2277 * reached after some number of instances allocate buffers: 2278 * each instance would add (max_rx_buf-max_rx_desc) to the free 2279 * pool. 2280 */ 2281 ndesc = dnetp->max_rx_desc - nrecv_desc_old; 2282 if ((ndesc > 0) && 2283 (dnet_rbuf_init(dnetp->devinfo, ndesc * 2) != 0)) 2284 return (FAILURE); 2285 2286 for (i = 0; i < dnetp->max_rx_desc; i++) { 2287 struct rbuf_list *rp; 2288 2289 rp = dnet_rbuf_alloc(dnetp->devinfo, 1); 2290 if (rp == NULL) 2291 return (FAILURE); 2292 dnetp->rx_buf_vaddr[i] = rp->rbuf_vaddr; 2293 dnetp->rx_buf_paddr[i] = rp->rbuf_paddr; 2294 } 2295 } 2296 2297 return (SUCCESS); 2298 } 2299 /* 2300 * free descriptors/buffers allocated for this device instance. This routine 2301 * should only be called while the device is reset. 2302 */ 2303 static void 2304 dnet_free_bufs(struct dnetinstance *dnetp) 2305 { 2306 int i; 2307 /* free up any xmit descriptors/buffers */ 2308 if (dnetp->tx_desc != NULL) { 2309 ddi_dma_mem_free(&dnetp->tx_desc_acchdl); 2310 dnetp->tx_desc = NULL; 2311 /* we use streams buffers for DMA in xmit process */ 2312 if (dnetp->tx_msgbufp != NULL) { 2313 /* free up any streams message buffers unclaimed */ 2314 for (i = 0; i < dnetp->nxmit_desc; i++) { 2315 if (dnetp->tx_msgbufp[i] != NULL) { 2316 freemsg(dnetp->tx_msgbufp[i]); 2317 } 2318 } 2319 kmem_free(dnetp->tx_msgbufp, 2320 dnetp->nxmit_desc * sizeof (mblk_t **)); 2321 dnetp->tx_msgbufp = NULL; 2322 } 2323 dnetp->nxmit_desc = 0; 2324 } 2325 2326 /* free up any receive descriptors/buffers */ 2327 if (dnetp->rx_desc != NULL) { 2328 ddi_dma_mem_free(&dnetp->rx_desc_acchdl); 2329 dnetp->rx_desc = NULL; 2330 if (dnetp->rx_buf_vaddr != NULL) { 2331 /* free up the attached rbufs if any */ 2332 for (i = 0; i < dnetp->nrecv_desc; i++) { 2333 if (dnetp->rx_buf_vaddr[i]) 2334 dnet_rbuf_free( 2335 (caddr_t)dnetp->rx_buf_vaddr[i]); 2336 } 2337 kmem_free(dnetp->rx_buf_vaddr, 2338 dnetp->nrecv_desc * sizeof (caddr_t)); 2339 kmem_free(dnetp->rx_buf_paddr, 2340 dnetp->nrecv_desc * sizeof (uint32_t)); 2341 dnetp->rx_buf_vaddr = NULL; 2342 dnetp->rx_buf_paddr = NULL; 2343 } 2344 dnetp->nrecv_desc = 0; 2345 } 2346 2347 if (dnetp->setup_buf_vaddr != NULL) { 2348 ddi_dma_mem_free(&dnetp->setup_buf_acchdl); 2349 dnetp->setup_buf_vaddr = NULL; 2350 } 2351 2352 if (dnetp->dma_handle != NULL) { 2353 (void) ddi_dma_unbind_handle(dnetp->dma_handle); 2354 ddi_dma_free_handle(&dnetp->dma_handle); 2355 dnetp->dma_handle = NULL; 2356 } 2357 2358 if (dnetp->dma_handle_tx != NULL) { 2359 (void) ddi_dma_unbind_handle(dnetp->dma_handle_tx); 2360 ddi_dma_free_handle(&dnetp->dma_handle_tx); 2361 dnetp->dma_handle_tx = NULL; 2362 } 2363 2364 if (dnetp->dma_handle_txdesc != NULL) { 2365 (void) ddi_dma_unbind_handle(dnetp->dma_handle_txdesc); 2366 ddi_dma_free_handle(&dnetp->dma_handle_txdesc); 2367 dnetp->dma_handle_txdesc = NULL; 2368 } 2369 2370 if (dnetp->dma_handle_setbuf != NULL) { 2371 (void) ddi_dma_unbind_handle(dnetp->dma_handle_setbuf); 2372 ddi_dma_free_handle(&dnetp->dma_handle_setbuf); 2373 dnetp->dma_handle_setbuf = NULL; 2374 } 2375 2376 } 2377 2378 /* 2379 * Initialize transmit and receive descriptors. 2380 */ 2381 static void 2382 dnet_init_txrx_bufs(struct dnetinstance *dnetp) 2383 { 2384 int i; 2385 2386 /* 2387 * Initilize all the Tx descriptors 2388 */ 2389 for (i = 0; i < dnetp->nxmit_desc; i++) { 2390 /* 2391 * We may be resetting the device due to errors, 2392 * so free up any streams message buffer unclaimed. 2393 */ 2394 if (dnetp->tx_msgbufp[i] != NULL) { 2395 freemsg(dnetp->tx_msgbufp[i]); 2396 dnetp->tx_msgbufp[i] = NULL; 2397 } 2398 *(uint32_t *)&dnetp->tx_desc[i].desc0 = 0; 2399 *(uint32_t *)&dnetp->tx_desc[i].desc1 = 0; 2400 dnetp->tx_desc[i].buffer1 = 0; 2401 dnetp->tx_desc[i].buffer2 = 0; 2402 } 2403 dnetp->tx_desc[i - 1].desc1.end_of_ring = 1; 2404 2405 /* 2406 * Initialize the Rx descriptors 2407 */ 2408 for (i = 0; i < dnetp->nrecv_desc; i++) { 2409 uint32_t end_paddr; 2410 *(uint32_t *)&dnetp->rx_desc[i].desc0 = 0; 2411 *(uint32_t *)&dnetp->rx_desc[i].desc1 = 0; 2412 dnetp->rx_desc[i].desc0.own = 1; 2413 dnetp->rx_desc[i].desc1.buffer_size1 = rx_buf_size; 2414 dnetp->rx_desc[i].buffer1 = dnetp->rx_buf_paddr[i]; 2415 dnetp->rx_desc[i].buffer2 = 0; 2416 end_paddr = dnetp->rx_buf_paddr[i]+rx_buf_size-1; 2417 2418 if ((dnetp->rx_desc[i].buffer1 & ~dnetp->pgmask) != 2419 (end_paddr & ~dnetp->pgmask)) { 2420 /* discontiguous */ 2421 dnetp->rx_desc[i].buffer2 = end_paddr&~dnetp->pgmask; 2422 dnetp->rx_desc[i].desc1.buffer_size2 = 2423 (end_paddr & dnetp->pgmask) + 1; 2424 dnetp->rx_desc[i].desc1.buffer_size1 = 2425 rx_buf_size-dnetp->rx_desc[i].desc1.buffer_size2; 2426 } 2427 } 2428 dnetp->rx_desc[i - 1].desc1.end_of_ring = 1; 2429 } 2430 2431 static int 2432 alloc_descriptor(struct dnetinstance *dnetp) 2433 { 2434 int index; 2435 struct tx_desc_type *ring = dnetp->tx_desc; 2436 2437 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 2438 alloctop: 2439 mutex_enter(&dnetp->txlock); 2440 index = dnetp->tx_current_desc; 2441 2442 dnet_reclaim_Tx_desc(dnetp); 2443 2444 /* we do have free descriptors, right? */ 2445 if (dnetp->free_desc <= 0) { 2446 #ifdef DNETDEBUG 2447 if (dnetdebug & DNETRECV) 2448 cmn_err(CE_NOTE, "dnet: Ring buffer is full"); 2449 #endif 2450 mutex_exit(&dnetp->txlock); 2451 return (FAILURE); 2452 } 2453 2454 /* sanity, make sure the next descriptor is free for use (should be) */ 2455 if (ring[index].desc0.own) { 2456 #ifdef DNETDEBUG 2457 if (dnetdebug & DNETRECV) 2458 cmn_err(CE_WARN, 2459 "dnet: next descriptor is not free for use"); 2460 #endif 2461 mutex_exit(&dnetp->txlock); 2462 return (FAILURE); 2463 } 2464 if (dnetp->need_saddr) { 2465 mutex_exit(&dnetp->txlock); 2466 /* XXX function return value ignored */ 2467 if (!dnetp->suspended) 2468 (void) dnet_set_addr(dnetp); 2469 goto alloctop; 2470 } 2471 2472 *(uint32_t *)&ring[index].desc0 = 0; /* init descs */ 2473 *(uint32_t *)&ring[index].desc1 &= DNET_END_OF_RING; 2474 2475 /* hardware will own this descriptor when poll activated */ 2476 dnetp->free_desc--; 2477 2478 /* point to next free descriptor to be used */ 2479 dnetp->tx_current_desc = NextTXIndex(index); 2480 2481 #ifdef DNET_NOISY 2482 cmn_err(CE_WARN, "sfree 0x%x, transmitted 0x%x, tx_current 0x%x", 2483 dnetp->free_desc, dnetp->transmitted_desc, dnetp->tx_current_desc); 2484 #endif 2485 mutex_exit(&dnetp->txlock); 2486 return (SUCCESS); 2487 } 2488 2489 /* 2490 * dnet_reclaim_Tx_desc() - called with txlock held. 2491 */ 2492 static void 2493 dnet_reclaim_Tx_desc(struct dnetinstance *dnetp) 2494 { 2495 struct tx_desc_type *desc = dnetp->tx_desc; 2496 int index; 2497 2498 ASSERT(MUTEX_HELD(&dnetp->txlock)); 2499 2500 index = dnetp->transmitted_desc; 2501 while (((dnetp->free_desc == 0) || (index != dnetp->tx_current_desc)) && 2502 !(desc[index].desc0.own)) { 2503 /* 2504 * Check for Tx Error that gets set 2505 * in the last desc. 2506 */ 2507 if (desc[index].desc1.setup_packet == 0 && 2508 desc[index].desc1.last_desc && 2509 desc[index].desc0.err_summary) 2510 update_tx_stats(dnetp, index); 2511 2512 /* 2513 * If we have used the streams message buffer for this 2514 * descriptor then free up the message now. 2515 */ 2516 if (dnetp->tx_msgbufp[index] != NULL) { 2517 freemsg(dnetp->tx_msgbufp[index]); 2518 dnetp->tx_msgbufp[index] = NULL; 2519 } 2520 dnetp->free_desc++; 2521 index = (index+1) % dnetp->max_tx_desc; 2522 } 2523 2524 dnetp->transmitted_desc = index; 2525 } 2526 2527 /* 2528 * Receive buffer allocation/freeing routines. 2529 * 2530 * There is a common pool of receive buffers shared by all dnet instances. 2531 * 2532 * XXX NEEDSWORK 2533 * 2534 * We arbitrarily allocate twice as many receive buffers as 2535 * receive descriptors because we use the buffers for streams 2536 * messages to pass the packets up the stream. We should 2537 * instead have initialized constants reflecting 2538 * MAX_RX_BUF_2104x and MAX_RX_BUF_2114x, and we should also 2539 * probably have a total maximum for the free pool, so that we 2540 * don't get out of hand when someone puts in an 8-port board. 2541 * The maximum for the entire pool should be the total number 2542 * of descriptors for all attached instances together, plus the 2543 * total maximum for the free pool. This maximum would only be 2544 * reached after some number of instances allocate buffers: 2545 * each instance would add (max_rx_buf-max_rx_desc) to the free 2546 * pool. 2547 */ 2548 2549 static struct rbuf_list *rbuf_usedlist_head; 2550 static struct rbuf_list *rbuf_freelist_head; 2551 static struct rbuf_list *rbuf_usedlist_end; /* last buffer allocated */ 2552 2553 static int rbuf_freebufs; /* no. of free buffers in the pool */ 2554 static int rbuf_pool_size; /* total no. of buffers in the pool */ 2555 2556 /* initialize/add 'nbufs' buffers to the rbuf pool */ 2557 /* ARGSUSED */ 2558 static int 2559 dnet_rbuf_init(dev_info_t *dip, int nbufs) 2560 { 2561 int i; 2562 struct rbuf_list *rp; 2563 ddi_dma_cookie_t cookie; 2564 uint_t ncookies; 2565 size_t len; 2566 2567 mutex_enter(&dnet_rbuf_lock); 2568 2569 /* allocate buffers and add them to the pool */ 2570 for (i = 0; i < nbufs; i++) { 2571 /* allocate rbuf_list element */ 2572 rp = kmem_zalloc(sizeof (struct rbuf_list), KM_SLEEP); 2573 if (ddi_dma_alloc_handle(dip, &dma_attr_rb, DDI_DMA_SLEEP, 2574 0, &rp->rbuf_dmahdl) != DDI_SUCCESS) 2575 goto fail_kfree; 2576 2577 /* allocate dma memory for the buffer */ 2578 if (ddi_dma_mem_alloc(rp->rbuf_dmahdl, rx_buf_size, &accattr, 2579 DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0, 2580 &rp->rbuf_vaddr, &len, 2581 &rp->rbuf_acchdl) != DDI_SUCCESS) 2582 goto fail_freehdl; 2583 2584 if (ddi_dma_addr_bind_handle(rp->rbuf_dmahdl, NULL, 2585 rp->rbuf_vaddr, len, DDI_DMA_RDWR | DDI_DMA_STREAMING, 2586 DDI_DMA_SLEEP, NULL, &cookie, 2587 &ncookies) != DDI_DMA_MAPPED) 2588 goto fail_free; 2589 2590 if (ncookies > 2) 2591 goto fail_unbind; 2592 if (ncookies == 1) { 2593 rp->rbuf_endpaddr = 2594 cookie.dmac_address + rx_buf_size - 1; 2595 } else { 2596 ddi_dma_nextcookie(rp->rbuf_dmahdl, &cookie); 2597 rp->rbuf_endpaddr = 2598 cookie.dmac_address + cookie.dmac_size - 1; 2599 } 2600 rp->rbuf_paddr = cookie.dmac_address; 2601 2602 rp->rbuf_next = rbuf_freelist_head; 2603 rbuf_freelist_head = rp; 2604 rbuf_pool_size++; 2605 rbuf_freebufs++; 2606 } 2607 2608 mutex_exit(&dnet_rbuf_lock); 2609 return (0); 2610 fail_unbind: 2611 (void) ddi_dma_unbind_handle(rp->rbuf_dmahdl); 2612 fail_free: 2613 ddi_dma_mem_free(&rp->rbuf_acchdl); 2614 fail_freehdl: 2615 ddi_dma_free_handle(&rp->rbuf_dmahdl); 2616 fail_kfree: 2617 kmem_free(rp, sizeof (struct rbuf_list)); 2618 2619 mutex_exit(&dnet_rbuf_lock); 2620 return (-1); 2621 } 2622 2623 /* 2624 * Try to free up all the rbufs in the pool. Returns 0 if it frees up all 2625 * buffers. The buffers in the used list are considered busy so these 2626 * buffers are not freed. 2627 */ 2628 static int 2629 dnet_rbuf_destroy() 2630 { 2631 struct rbuf_list *rp, *next; 2632 2633 mutex_enter(&dnet_rbuf_lock); 2634 2635 for (rp = rbuf_freelist_head; rp; rp = next) { 2636 next = rp->rbuf_next; 2637 ddi_dma_mem_free(&rp->rbuf_acchdl); 2638 (void) ddi_dma_unbind_handle(rp->rbuf_dmahdl); 2639 kmem_free(rp, sizeof (struct rbuf_list)); 2640 rbuf_pool_size--; 2641 rbuf_freebufs--; 2642 } 2643 rbuf_freelist_head = NULL; 2644 2645 if (rbuf_pool_size) { /* pool is still not empty */ 2646 mutex_exit(&dnet_rbuf_lock); 2647 return (-1); 2648 } 2649 mutex_exit(&dnet_rbuf_lock); 2650 return (0); 2651 } 2652 static struct rbuf_list * 2653 dnet_rbuf_alloc(dev_info_t *dip, int cansleep) 2654 { 2655 struct rbuf_list *rp; 2656 size_t len; 2657 ddi_dma_cookie_t cookie; 2658 uint_t ncookies; 2659 2660 mutex_enter(&dnet_rbuf_lock); 2661 2662 if (rbuf_freelist_head == NULL) { 2663 2664 if (!cansleep) { 2665 mutex_exit(&dnet_rbuf_lock); 2666 return (NULL); 2667 } 2668 2669 /* allocate rbuf_list element */ 2670 rp = kmem_zalloc(sizeof (struct rbuf_list), KM_SLEEP); 2671 if (ddi_dma_alloc_handle(dip, &dma_attr_rb, DDI_DMA_SLEEP, 2672 0, &rp->rbuf_dmahdl) != DDI_SUCCESS) 2673 goto fail_kfree; 2674 2675 /* allocate dma memory for the buffer */ 2676 if (ddi_dma_mem_alloc(rp->rbuf_dmahdl, rx_buf_size, &accattr, 2677 DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0, 2678 &rp->rbuf_vaddr, &len, 2679 &rp->rbuf_acchdl) != DDI_SUCCESS) 2680 goto fail_freehdl; 2681 2682 if (ddi_dma_addr_bind_handle(rp->rbuf_dmahdl, NULL, 2683 rp->rbuf_vaddr, len, DDI_DMA_RDWR | DDI_DMA_STREAMING, 2684 DDI_DMA_SLEEP, NULL, &cookie, 2685 &ncookies) != DDI_DMA_MAPPED) 2686 goto fail_free; 2687 2688 if (ncookies > 2) 2689 goto fail_unbind; 2690 if (ncookies == 1) { 2691 rp->rbuf_endpaddr = 2692 cookie.dmac_address + rx_buf_size - 1; 2693 } else { 2694 ddi_dma_nextcookie(rp->rbuf_dmahdl, &cookie); 2695 rp->rbuf_endpaddr = 2696 cookie.dmac_address + cookie.dmac_size - 1; 2697 } 2698 rp->rbuf_paddr = cookie.dmac_address; 2699 2700 rbuf_freelist_head = rp; 2701 rbuf_pool_size++; 2702 rbuf_freebufs++; 2703 } 2704 2705 /* take the buffer from the head of the free list */ 2706 rp = rbuf_freelist_head; 2707 rbuf_freelist_head = rbuf_freelist_head->rbuf_next; 2708 2709 /* update the used list; put the entry at the end */ 2710 if (rbuf_usedlist_head == NULL) 2711 rbuf_usedlist_head = rp; 2712 else 2713 rbuf_usedlist_end->rbuf_next = rp; 2714 rp->rbuf_next = NULL; 2715 rbuf_usedlist_end = rp; 2716 rbuf_freebufs--; 2717 2718 mutex_exit(&dnet_rbuf_lock); 2719 2720 return (rp); 2721 fail_unbind: 2722 (void) ddi_dma_unbind_handle(rp->rbuf_dmahdl); 2723 fail_free: 2724 ddi_dma_mem_free(&rp->rbuf_acchdl); 2725 fail_freehdl: 2726 ddi_dma_free_handle(&rp->rbuf_dmahdl); 2727 fail_kfree: 2728 kmem_free(rp, sizeof (struct rbuf_list)); 2729 mutex_exit(&dnet_rbuf_lock); 2730 return (NULL); 2731 } 2732 2733 static void 2734 dnet_rbuf_free(caddr_t vaddr) 2735 { 2736 struct rbuf_list *rp, *prev; 2737 2738 ASSERT(vaddr != NULL); 2739 ASSERT(rbuf_usedlist_head != NULL); 2740 2741 mutex_enter(&dnet_rbuf_lock); 2742 2743 /* find the entry in the used list */ 2744 for (prev = rp = rbuf_usedlist_head; rp; rp = rp->rbuf_next) { 2745 if (rp->rbuf_vaddr == vaddr) 2746 break; 2747 prev = rp; 2748 } 2749 2750 if (rp == NULL) { 2751 cmn_err(CE_WARN, "DNET: rbuf_free: bad addr 0x%p", 2752 (void *)vaddr); 2753 mutex_exit(&dnet_rbuf_lock); 2754 return; 2755 } 2756 2757 /* update the used list and put the buffer back in the free list */ 2758 if (rbuf_usedlist_head != rp) { 2759 prev->rbuf_next = rp->rbuf_next; 2760 if (rbuf_usedlist_end == rp) 2761 rbuf_usedlist_end = prev; 2762 } else { 2763 rbuf_usedlist_head = rp->rbuf_next; 2764 if (rbuf_usedlist_end == rp) 2765 rbuf_usedlist_end = NULL; 2766 } 2767 rp->rbuf_next = rbuf_freelist_head; 2768 rbuf_freelist_head = rp; 2769 rbuf_freebufs++; 2770 2771 mutex_exit(&dnet_rbuf_lock); 2772 } 2773 2774 /* 2775 * Free the receive buffer used in a stream's message block allocated 2776 * thru desballoc(). 2777 */ 2778 static void 2779 dnet_freemsg_buf(struct free_ptr *frp) 2780 { 2781 dnet_rbuf_free((caddr_t)frp->buf); /* buffer goes back to the pool */ 2782 kmem_free(frp, sizeof (*frp)); /* free up the free_rtn structure */ 2783 } 2784 2785 /* 2786 * ========== SROM Read Routines ========== 2787 */ 2788 2789 /* 2790 * The following code gets the SROM information, either by reading it 2791 * from the device or, failing that, by reading a property. 2792 */ 2793 static int 2794 dnet_read_srom(dev_info_t *devinfo, int board_type, ddi_acc_handle_t io_handle, 2795 caddr_t io_reg, uchar_t *vi, int maxlen) 2796 { 2797 int all_ones, zerocheck, i; 2798 2799 /* 2800 * Load SROM into vendor_info 2801 */ 2802 if (board_type == DEVICE_ID_21040) 2803 dnet_read21040addr(devinfo, io_handle, io_reg, vi, &maxlen); 2804 else 2805 /* 21041/21140 serial rom */ 2806 dnet_read21140srom(io_handle, io_reg, vi, maxlen); 2807 /* 2808 * If the dumpsrom property is present in the conf file, print 2809 * the contents of the SROM to the console 2810 */ 2811 if (ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 2812 "dumpsrom", 0)) 2813 dnet_dumpbin("SROM", vi, 1, maxlen); 2814 2815 for (zerocheck = i = 0, all_ones = 0xff; i < maxlen; i++) { 2816 zerocheck |= vi[i]; 2817 all_ones &= vi[i]; 2818 } 2819 if (zerocheck == 0 || all_ones == 0xff) { 2820 return (get_alternative_srom_image(devinfo, vi, maxlen)); 2821 } else { 2822 #ifdef BUG_4010796 2823 set_alternative_srom_image(devinfo, vi, maxlen); 2824 #endif 2825 return (0); /* Primary */ 2826 } 2827 } 2828 2829 /* 2830 * The function reads the ethernet address of the 21040 adapter 2831 */ 2832 static void 2833 dnet_read21040addr(dev_info_t *dip, ddi_acc_handle_t io_handle, caddr_t io_reg, 2834 uchar_t *addr, int *len) 2835 { 2836 uint32_t val; 2837 int i; 2838 2839 /* No point reading more than the ethernet address */ 2840 *len = ddi_getprop(DDI_DEV_T_ANY, dip, 2841 DDI_PROP_DONTPASS, macoffset_propname, 0) + ETHERADDRL; 2842 2843 /* Reset ROM pointer */ 2844 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 0); 2845 for (i = 0; i < *len; i++) { 2846 do { 2847 val = ddi_get32(io_handle, 2848 REG32(io_reg, ETHER_ROM_REG)); 2849 } while (val & 0x80000000); 2850 addr[i] = val & 0xFF; 2851 } 2852 } 2853 2854 #define drv_nsecwait(x) drv_usecwait(((x)+999)/1000) /* XXX */ 2855 2856 /* 2857 * The function reads the SROM of the 21140 adapter 2858 */ 2859 static void 2860 dnet_read21140srom(ddi_acc_handle_t io_handle, caddr_t io_reg, uchar_t *addr, 2861 int maxlen) 2862 { 2863 uint32_t i, j; 2864 uint32_t dout; 2865 uint16_t word; 2866 uint8_t rom_addr; 2867 uint8_t bit; 2868 2869 2870 rom_addr = 0; 2871 for (i = 0; i < maxlen; i += 2) { 2872 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2873 READ_OP | SEL_ROM); 2874 drv_nsecwait(30); 2875 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2876 READ_OP | SEL_ROM | SEL_CHIP); 2877 drv_nsecwait(50); 2878 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2879 READ_OP | SEL_ROM | SEL_CHIP | SEL_CLK); 2880 drv_nsecwait(250); 2881 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2882 READ_OP | SEL_ROM | SEL_CHIP); 2883 drv_nsecwait(100); 2884 2885 /* command */ 2886 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2887 READ_OP | SEL_ROM | SEL_CHIP | DATA_IN); 2888 drv_nsecwait(150); 2889 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2890 READ_OP | SEL_ROM | SEL_CHIP | DATA_IN | SEL_CLK); 2891 drv_nsecwait(250); 2892 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2893 READ_OP | SEL_ROM | SEL_CHIP | DATA_IN); 2894 drv_nsecwait(250); 2895 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2896 READ_OP | SEL_ROM | SEL_CHIP | DATA_IN | SEL_CLK); 2897 drv_nsecwait(250); 2898 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2899 READ_OP | SEL_ROM | SEL_CHIP | DATA_IN); 2900 drv_nsecwait(100); 2901 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2902 READ_OP | SEL_ROM | SEL_CHIP); 2903 drv_nsecwait(150); 2904 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2905 READ_OP | SEL_ROM | SEL_CHIP | SEL_CLK); 2906 drv_nsecwait(250); 2907 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2908 READ_OP | SEL_ROM | SEL_CHIP); 2909 drv_nsecwait(100); 2910 2911 /* Address */ 2912 for (j = HIGH_ADDRESS_BIT; j >= 1; j >>= 1) { 2913 bit = (rom_addr & j) ? DATA_IN : 0; 2914 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2915 READ_OP | SEL_ROM | SEL_CHIP | bit); 2916 drv_nsecwait(150); 2917 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2918 READ_OP | SEL_ROM | SEL_CHIP | bit | SEL_CLK); 2919 drv_nsecwait(250); 2920 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2921 READ_OP | SEL_ROM | SEL_CHIP | bit); 2922 drv_nsecwait(100); 2923 } 2924 drv_nsecwait(150); 2925 2926 /* Data */ 2927 word = 0; 2928 for (j = 0x8000; j >= 1; j >>= 1) { 2929 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2930 READ_OP | SEL_ROM | SEL_CHIP | SEL_CLK); 2931 drv_nsecwait(100); 2932 dout = ddi_get32(io_handle, 2933 REG32(io_reg, ETHER_ROM_REG)); 2934 drv_nsecwait(150); 2935 if (dout & DATA_OUT) 2936 word |= j; 2937 ddi_put32(io_handle, 2938 REG32(io_reg, ETHER_ROM_REG), 2939 READ_OP | SEL_ROM | SEL_CHIP); 2940 drv_nsecwait(250); 2941 } 2942 addr[i] = (word & 0x0000FF); 2943 addr[i + 1] = (word >> 8); 2944 rom_addr++; 2945 ddi_put32(io_handle, REG32(io_reg, ETHER_ROM_REG), 2946 READ_OP | SEL_ROM); 2947 drv_nsecwait(100); 2948 } 2949 } 2950 2951 2952 /* 2953 * XXX NEEDSWORK 2954 * 2955 * Some lame multiport cards have only one SROM, which can be accessed 2956 * only from the "first" 21x4x chip, whichever that one is. If we can't 2957 * get at our SROM, we look for its contents in a property instead, which 2958 * we rely on the bootstrap to have properly set. 2959 * #ifdef BUG_4010796 2960 * We also have a hack to try to set it ourselves, when the "first" port 2961 * attaches, if it has not already been properly set. However, this method 2962 * is not reliable, since it makes the unwarrented assumption that the 2963 * "first" port will attach first. 2964 * #endif 2965 */ 2966 2967 static int 2968 get_alternative_srom_image(dev_info_t *devinfo, uchar_t *vi, int len) 2969 { 2970 int l = len; 2971 2972 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 2973 "DNET_SROM", (caddr_t)vi, &len) != DDI_PROP_SUCCESS && 2974 (len = l) && ddi_getlongprop_buf(DDI_DEV_T_ANY, 2975 ddi_get_parent(devinfo), DDI_PROP_DONTPASS, "DNET_SROM", 2976 (caddr_t)vi, &len) != DDI_PROP_SUCCESS) 2977 return (-1); /* Can't find it! */ 2978 2979 /* 2980 * The return value from this routine specifies which port number 2981 * we are. The primary port is denoted port 0. On a QUAD card we 2982 * should return 1, 2, and 3 from this routine. The return value 2983 * is used to modify the ethernet address from the SROM data. 2984 */ 2985 2986 #ifdef BUG_4010796 2987 { 2988 /* 2989 * For the present, we remember the device number of our primary 2990 * sibling and hope we and our other siblings are consecutively 2991 * numbered up from there. In the future perhaps the bootstrap 2992 * will pass us the necessary information telling us which physical 2993 * port we really are. 2994 */ 2995 pci_regspec_t *assignp; 2996 int assign_len; 2997 int devnum; 2998 int primary_devnum; 2999 3000 primary_devnum = ddi_getprop(DDI_DEV_T_ANY, devinfo, 0, 3001 "DNET_DEVNUM", -1); 3002 if (primary_devnum == -1) 3003 return (1); /* XXX NEEDSWORK -- We have no better idea */ 3004 3005 if ((ddi_getlongprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 3006 "assigned-addresses", (caddr_t)&assignp, 3007 &assign_len)) != DDI_PROP_SUCCESS) 3008 return (1); /* XXX NEEDSWORK -- We have no better idea */ 3009 3010 devnum = PCI_REG_DEV_G(assignp->pci_phys_hi); 3011 kmem_free(assignp, assign_len); 3012 return (devnum - primary_devnum); 3013 } 3014 #else 3015 return (1); /* XXX NEEDSWORK -- We have no better idea */ 3016 #endif 3017 } 3018 3019 3020 #ifdef BUG_4010796 3021 static void 3022 set_alternative_srom_image(dev_info_t *devinfo, uchar_t *vi, int len) 3023 { 3024 int proplen; 3025 pci_regspec_t *assignp; 3026 int assign_len; 3027 int devnum; 3028 3029 if (ddi_getproplen(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 3030 "DNET_SROM", &proplen) == DDI_PROP_SUCCESS || 3031 ddi_getproplen(DDI_DEV_T_ANY, ddi_get_parent(devinfo), 3032 DDI_PROP_DONTPASS, "DNET_SROM", &proplen) == DDI_PROP_SUCCESS) 3033 return; /* Already done! */ 3034 3035 /* function return value ignored */ 3036 (void) ddi_prop_update_byte_array(DDI_DEV_T_NONE, 3037 ddi_get_parent(devinfo), "DNET_SROM", (uchar_t *)vi, len); 3038 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devinfo, 3039 "DNET_HACK", "hack"); 3040 3041 if ((ddi_getlongprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 3042 "assigned-addresses", (caddr_t)&assignp, 3043 &assign_len)) == DDI_PROP_SUCCESS) { 3044 devnum = PCI_REG_DEV_G(assignp->pci_phys_hi); 3045 kmem_free(assignp, assign_len); 3046 /* function return value ignored */ 3047 (void) ddi_prop_update_int(DDI_DEV_T_NONE, 3048 ddi_get_parent(devinfo), "DNET_DEVNUM", devnum); 3049 } 3050 } 3051 #endif 3052 3053 /* 3054 * ========== SROM Parsing Routines ========== 3055 */ 3056 3057 static int 3058 check_srom_valid(uchar_t *vi) 3059 { 3060 int word, bit; 3061 uint8_t crc; 3062 uint16_t *wvi; /* word16 pointer to vendor info */ 3063 uint16_t bitval; 3064 3065 /* verify that the number of controllers on the card is within range */ 3066 if (vi[SROM_ADAPTER_CNT] < 1 || vi[SROM_ADAPTER_CNT] > MAX_ADAPTERS) 3067 return (0); 3068 3069 /* 3070 * version 1 and 3 of this card did not check the id block CRC value 3071 * and this can't be changed without retesting every supported card 3072 * 3073 * however version 4 of the SROM can have this test applied 3074 * without fear of breaking something that used to work. 3075 * the CRC algorithm is taken from the Intel document 3076 * "21x4 Serial ROM Format" 3077 * version 4.09 3078 * 3-Mar-1999 3079 */ 3080 3081 switch (vi[SROM_VERSION]) { 3082 case 1: 3083 /* fallthru */ 3084 case 3: 3085 return (vi[SROM_MBZ] == 0 && /* must be zero */ 3086 vi[SROM_MBZ2] == 0 && /* must be zero */ 3087 vi[SROM_MBZ3] == 0); /* must be zero */ 3088 3089 case 4: 3090 wvi = (uint16_t *)vi; 3091 crc = 0xff; 3092 for (word = 0; word < 9; word++) 3093 for (bit = 15; bit >= 0; bit--) { 3094 if (word == 8 && bit == 7) 3095 return (crc == vi[16]); 3096 bitval = 3097 ((wvi[word] >> bit) & 1) ^ ((crc >> 7) & 1); 3098 crc <<= 1; 3099 if (bitval == 1) { 3100 crc ^= 7; 3101 } 3102 } 3103 /* FALLTHROUGH */ 3104 3105 default: 3106 return (0); 3107 } 3108 } 3109 3110 /* 3111 * ========== Active Media Determination Routines ========== 3112 */ 3113 3114 /* This routine is also called for V3 Compact and extended type 0 SROMs */ 3115 static int 3116 is_fdmedia(int media) 3117 { 3118 if (media == MEDIA_TP_FD || media == MEDIA_SYM_SCR_FD) 3119 return (1); 3120 else 3121 return (0); 3122 } 3123 3124 /* 3125 * "Linkset" is used to merge media that use the same link test check. So, 3126 * if the TP link is added to the linkset, so is the TP Full duplex link. 3127 * Used to avoid checking the same link status twice. 3128 */ 3129 static void 3130 linkset_add(uint32_t *set, int media) 3131 { 3132 if (media == MEDIA_TP_FD || media == MEDIA_TP) 3133 *set |= (1UL<<MEDIA_TP_FD) | (1UL<<MEDIA_TP); 3134 else if (media == MEDIA_SYM_SCR_FD || media == MEDIA_SYM_SCR) 3135 *set |= (1UL<<MEDIA_SYM_SCR_FD) | (1UL<<MEDIA_SYM_SCR); 3136 else *set |= 1UL<<media; 3137 } 3138 static int 3139 linkset_isset(uint32_t linkset, int media) 3140 { 3141 return (((1UL<<media) & linkset) ? 1:0); 3142 } 3143 3144 /* 3145 * The following code detects which Media is connected for 21041/21140 3146 * Expect to change this code to support new 21140 variants. 3147 * find_active_media() - called with intrlock held. 3148 */ 3149 static void 3150 find_active_media(struct dnetinstance *dnetp) 3151 { 3152 int i; 3153 media_block_t *block; 3154 media_block_t *best_allowed = NULL; 3155 media_block_t *hd_found = NULL; 3156 media_block_t *fd_found = NULL; 3157 LEAF_FORMAT *leaf = &dnetp->sr.leaf[dnetp->leaf]; 3158 uint32_t checked = 0, links_up = 0; 3159 3160 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 3161 3162 dnetp->selected_media_block = leaf->default_block; 3163 3164 if (dnetp->phyaddr != -1) { 3165 dnetp->selected_media_block = leaf->mii_block; 3166 setup_block(dnetp); 3167 3168 if (ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo, 3169 DDI_PROP_DONTPASS, "portmon", 1)) { 3170 /* XXX return value ignored */ 3171 (void) mii_start_portmon(dnetp->mii, dnet_mii_link_cb, 3172 &dnetp->intrlock); 3173 /* 3174 * If the port monitor detects the link is already 3175 * up, there is no point going through the rest of the 3176 * link sense 3177 */ 3178 if (dnetp->mii_up) { 3179 return; 3180 } 3181 } 3182 } 3183 3184 /* 3185 * Media is searched for in order of Precedence. This DEC SROM spec 3186 * tells us that the first media entry in the SROM is the lowest 3187 * precedence and should be checked last. This is why we go to the last 3188 * Media block and work back to the beginning. 3189 * 3190 * However, some older SROMs (Cogent EM110's etc.) have this the wrong 3191 * way around. As a result, following the SROM spec would result in a 3192 * 10 link being chosen over a 100 link if both media are available. 3193 * So we continue trying the media until we have at least tried the 3194 * DEFAULT media. 3195 */ 3196 3197 /* Search for an active medium, and select it */ 3198 for (block = leaf->block + leaf->block_count - 1; 3199 block >= leaf->block; block--) { 3200 int media = block->media_code; 3201 3202 /* User settings disallow selection of this block */ 3203 if (dnetp->disallowed_media & (1UL<<media)) 3204 continue; 3205 3206 /* We may not be able to pick the default */ 3207 if (best_allowed == NULL || block == leaf->default_block) 3208 best_allowed = block; 3209 #ifdef DEBUG 3210 if (dnetdebug & DNETSENSE) 3211 cmn_err(CE_NOTE, "Testing %s medium (block type %d)", 3212 media_str[media], block->type); 3213 #endif 3214 3215 dnetp->selected_media_block = block; 3216 switch (block->type) { 3217 3218 case 2: /* SIA Media block: Best we can do is send a packet */ 3219 setup_block(dnetp); 3220 if (send_test_packet(dnetp)) { 3221 if (!is_fdmedia(media)) 3222 return; 3223 if (!fd_found) 3224 fd_found = block; 3225 } 3226 break; 3227 3228 /* SYM/SCR or TP block: Use the link-sense bits */ 3229 case 0: 3230 if (!linkset_isset(checked, media)) { 3231 linkset_add(&checked, media); 3232 if (((media == MEDIA_BNC || 3233 media == MEDIA_AUI) && 3234 send_test_packet(dnetp)) || 3235 dnet_link_sense(dnetp)) 3236 linkset_add(&links_up, media); 3237 } 3238 3239 if (linkset_isset(links_up, media)) { 3240 /* 3241 * Half Duplex is *always* the favoured media. 3242 * Full Duplex can be set and forced via the 3243 * conf file. 3244 */ 3245 if (!is_fdmedia(media) && 3246 dnetp->selected_media_block == 3247 leaf->default_block) { 3248 /* 3249 * Cogent cards have the media in 3250 * opposite order to the spec., 3251 * this code forces the media test to 3252 * keep going until the default media 3253 * is tested. 3254 * 3255 * In Cogent case, 10, 10FD, 100FD, 100 3256 * 100 is the default but 10 could have 3257 * been detected and would have been 3258 * chosen but now we force it through to 3259 * 100. 3260 */ 3261 setup_block(dnetp); 3262 return; 3263 } else if (!is_fdmedia(media)) { 3264 /* 3265 * This allows all the others to work 3266 * properly by remembering the media 3267 * that works and not defaulting to 3268 * a FD link. 3269 */ 3270 if (hd_found == NULL) 3271 hd_found = block; 3272 } else if (fd_found == NULL) { 3273 /* 3274 * No media have already been found 3275 * so far, this is FD, it works so 3276 * remember it and if no others are 3277 * detected, use it. 3278 */ 3279 fd_found = block; 3280 } 3281 } 3282 break; 3283 3284 /* 3285 * MII block: May take up to a second or so to settle if 3286 * setup causes a PHY reset 3287 */ 3288 case 1: case 3: 3289 setup_block(dnetp); 3290 for (i = 0; ; i++) { 3291 if (mii_linkup(dnetp->mii, dnetp->phyaddr)) { 3292 /* XXX function return value ignored */ 3293 (void) mii_getspeed(dnetp->mii, 3294 dnetp->phyaddr, 3295 &dnetp->mii_speed, 3296 &dnetp->mii_duplex); 3297 dnetp->mii_up = 1; 3298 leaf->mii_block = block; 3299 return; 3300 } 3301 if (i == 10) 3302 break; 3303 delay(drv_usectohz(150000)); 3304 } 3305 dnetp->mii_up = 0; 3306 break; 3307 } 3308 } /* for loop */ 3309 if (hd_found) { 3310 dnetp->selected_media_block = hd_found; 3311 } else if (fd_found) { 3312 dnetp->selected_media_block = fd_found; 3313 } else { 3314 if (best_allowed == NULL) 3315 best_allowed = leaf->default_block; 3316 dnetp->selected_media_block = best_allowed; 3317 cmn_err(CE_WARN, "!dnet: Default media selected\n"); 3318 } 3319 setup_block(dnetp); 3320 } 3321 3322 /* 3323 * Do anything neccessary to select the selected_media_block. 3324 * setup_block() - called with intrlock held. 3325 */ 3326 static void 3327 setup_block(struct dnetinstance *dnetp) 3328 { 3329 dnet_reset_board(dnetp); 3330 dnet_init_board(dnetp); 3331 /* XXX function return value ignored */ 3332 (void) dnet_start(dnetp); 3333 } 3334 3335 /* dnet_link_sense() - called with intrlock held */ 3336 static int 3337 dnet_link_sense(struct dnetinstance *dnetp) 3338 { 3339 /* 3340 * This routine makes use of the command word from the srom config. 3341 * Details of the auto-sensing information contained in this can 3342 * be found in the "Digital Semiconductor 21X4 Serial ROM Format v3.03" 3343 * spec. Section 4.3.2.1, and 4.5.2.1.3 3344 */ 3345 media_block_t *block = dnetp->selected_media_block; 3346 uint32_t link, status, mask, polarity; 3347 int settletime, stabletime, waittime, upsamples; 3348 int delay_100, delay_10; 3349 3350 3351 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 3352 /* Don't autosense if the medium does not support it */ 3353 if (block->command & (1 << 15)) { 3354 /* This should be the default block */ 3355 if (block->command & (1UL<<14)) 3356 dnetp->sr.leaf[dnetp->leaf].default_block = block; 3357 return (0); 3358 } 3359 3360 delay_100 = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo, 3361 DDI_PROP_DONTPASS, "autosense-delay-100", 2000); 3362 3363 delay_10 = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo, 3364 DDI_PROP_DONTPASS, "autosense-delay-10", 400); 3365 3366 /* 3367 * Scrambler may need to be disabled for link sensing 3368 * to work 3369 */ 3370 dnetp->disable_scrambler = 1; 3371 setup_block(dnetp); 3372 dnetp->disable_scrambler = 0; 3373 3374 if (block->media_code == MEDIA_TP || block->media_code == MEDIA_TP_FD) 3375 settletime = delay_10; 3376 else 3377 settletime = delay_100; 3378 stabletime = settletime / 4; 3379 3380 mask = 1 << ((block->command & CMD_MEDIABIT_MASK) >> 1); 3381 polarity = block->command & CMD_POL ? 0xffffffff : 0; 3382 3383 for (waittime = 0, upsamples = 0; 3384 waittime <= settletime + stabletime && upsamples < 8; 3385 waittime += stabletime/8) { 3386 delay(drv_usectohz(stabletime*1000 / 8)); 3387 status = read_gpr(dnetp); 3388 link = (status^polarity) & mask; 3389 if (link) 3390 upsamples++; 3391 else 3392 upsamples = 0; 3393 } 3394 #ifdef DNETDEBUG 3395 if (dnetdebug & DNETSENSE) 3396 cmn_err(CE_NOTE, "%s upsamples:%d stat:%x polarity:%x " 3397 "mask:%x link:%x", 3398 upsamples == 8 ? "UP":"DOWN", 3399 upsamples, status, polarity, mask, link); 3400 #endif 3401 if (upsamples == 8) 3402 return (1); 3403 return (0); 3404 } 3405 3406 static int 3407 send_test_packet(struct dnetinstance *dnetp) 3408 { 3409 int packet_delay; 3410 struct tx_desc_type *desc; 3411 int bufindex; 3412 int media_code = dnetp->selected_media_block->media_code; 3413 uint32_t del; 3414 3415 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 3416 /* 3417 * For a successful test packet, the card must have settled into 3418 * its current setting. Almost all cards we've tested manage to 3419 * do this with all media within 50ms. However, the SMC 8432 3420 * requires 300ms to settle into BNC mode. We now only do this 3421 * from attach, and we do sleeping delay() instead of drv_usecwait() 3422 * so we hope this .2 second delay won't cause too much suffering. 3423 * ALSO: with an autonegotiating hub, an aditional 1 second delay is 3424 * required. This is done if the media type is TP 3425 */ 3426 if (media_code == MEDIA_TP || media_code == MEDIA_TP_FD) { 3427 packet_delay = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo, 3428 DDI_PROP_DONTPASS, "test_packet_delay_tp", 1300000); 3429 } else { 3430 packet_delay = ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo, 3431 DDI_PROP_DONTPASS, "test_packet_delay", 300000); 3432 } 3433 delay(drv_usectohz(packet_delay)); 3434 3435 desc = dnetp->tx_desc; 3436 3437 bufindex = dnetp->tx_current_desc; 3438 if (alloc_descriptor(dnetp) == FAILURE) { 3439 cmn_err(CE_WARN, "DNET: send_test_packet: alloc_descriptor" 3440 "failed"); 3441 return (0); 3442 } 3443 3444 /* 3445 * use setup buffer as the buffer for the test packet 3446 * instead of allocating one. 3447 */ 3448 3449 ASSERT(dnetp->setup_buf_vaddr != NULL); 3450 /* Put something decent in dest address so we don't annoy other cards */ 3451 BCOPY((caddr_t)dnetp->curr_macaddr, 3452 (caddr_t)dnetp->setup_buf_vaddr, ETHERADDRL); 3453 BCOPY((caddr_t)dnetp->curr_macaddr, 3454 (caddr_t)dnetp->setup_buf_vaddr+ETHERADDRL, ETHERADDRL); 3455 3456 desc[bufindex].buffer1 = dnetp->setup_buf_paddr; 3457 desc[bufindex].desc1.buffer_size1 = SETUPBUF_SIZE; 3458 desc[bufindex].buffer2 = (uint32_t)(0); 3459 desc[bufindex].desc1.first_desc = 1; 3460 desc[bufindex].desc1.last_desc = 1; 3461 desc[bufindex].desc1.int_on_comp = 1; 3462 desc[bufindex].desc0.own = 1; 3463 3464 ddi_put8(dnetp->io_handle, REG8(dnetp->io_reg, TX_POLL_REG), 3465 TX_POLL_DEMAND); 3466 3467 /* 3468 * Give enough time for the chip to transmit the packet 3469 */ 3470 #if 1 3471 del = 1000; 3472 while (desc[bufindex].desc0.own && --del) 3473 drv_usecwait(10); /* quickly wait up to 10ms */ 3474 if (desc[bufindex].desc0.own) 3475 delay(drv_usectohz(200000)); /* nicely wait a longer time */ 3476 #else 3477 del = 0x10000; 3478 while (desc[bufindex].desc0.own && --del) 3479 drv_usecwait(10); 3480 #endif 3481 3482 #ifdef DNETDEBUG 3483 if (dnetdebug & DNETSENSE) 3484 cmn_err(CE_NOTE, "desc0 bits = %u, %u, %u, %u, %u, %u", 3485 desc[bufindex].desc0.own, 3486 desc[bufindex].desc0.err_summary, 3487 desc[bufindex].desc0.carrier_loss, 3488 desc[bufindex].desc0.no_carrier, 3489 desc[bufindex].desc0.late_collision, 3490 desc[bufindex].desc0.link_fail); 3491 #endif 3492 if (desc[bufindex].desc0.own) /* it shouldn't take this long, error */ 3493 return (0); 3494 3495 return (!desc[bufindex].desc0.err_summary); 3496 } 3497 3498 /* enable_interrupts - called with intrlock held */ 3499 static void 3500 enable_interrupts(struct dnetinstance *dnetp) 3501 { 3502 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 3503 /* Don't enable interrupts if they have been forced off */ 3504 if (dnetp->interrupts_disabled) 3505 return; 3506 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, INT_MASK_REG), 3507 ABNORMAL_INTR_MASK | NORMAL_INTR_MASK | SYSTEM_ERROR_MASK | 3508 (dnetp->timer.cb ? GPTIMER_INTR : 0) | 3509 RX_INTERRUPT_MASK | 3510 TX_INTERRUPT_MASK | TX_JABBER_MASK | TX_UNDERFLOW_MASK); 3511 } 3512 3513 /* 3514 * Some older multiport cards are non-PCI compliant in their interrupt routing. 3515 * Second and subsequent devices are incorrectly configured by the BIOS 3516 * (either in their ILINE configuration or the MP Configuration Table for PC+MP 3517 * systems). 3518 * The hack stops registering the interrupt routine for the FIRST 3519 * device on the adapter, and registers its own. It builds up a table 3520 * of dnetp structures for each device, and the new interrupt routine 3521 * calls dnet_intr for each of them. 3522 * Known cards that suffer from this problem are: 3523 * All Cogent multiport cards; 3524 * Znyx 314; 3525 * Znyx 315. 3526 * 3527 * XXX NEEDSWORK -- see comments above get_alternative_srom_image(). This 3528 * hack relies on the fact that the offending cards will have only one SROM. 3529 * It uses this fact to identify devices that are on the same multiport 3530 * adapter, as opposed to multiple devices from the same vendor (as 3531 * indicated by "secondary") 3532 */ 3533 static int 3534 dnet_hack_interrupts(struct dnetinstance *dnetp, int secondary) 3535 { 3536 int i; 3537 struct hackintr_inf *hackintr_inf; 3538 dev_info_t *devinfo = dnetp->devinfo; 3539 uint32_t oui = 0; /* Organizationally Unique ID */ 3540 3541 if (ddi_getprop(DDI_DEV_T_ANY, devinfo, DDI_PROP_DONTPASS, 3542 "no_INTA_workaround", 0) != 0) 3543 return (0); 3544 3545 for (i = 0; i < 3; i++) 3546 oui = (oui << 8) | dnetp->vendor_addr[i]; 3547 3548 /* Check wheather or not we need to implement the hack */ 3549 3550 switch (oui) { 3551 case ZNYX_ETHER: 3552 /* Znyx multiport 21040 cards <<==>> ZX314 or ZX315 */ 3553 if (dnetp->board_type != DEVICE_ID_21040) 3554 return (0); 3555 break; 3556 3557 case COGENT_ETHER: 3558 /* All known Cogent multiport cards */ 3559 break; 3560 3561 case ADAPTEC_ETHER: 3562 /* Adaptec multiport cards */ 3563 break; 3564 3565 default: 3566 /* Other cards work correctly */ 3567 return (0); 3568 } 3569 3570 /* card is (probably) non-PCI compliant in its interrupt routing */ 3571 3572 3573 if (!secondary) { 3574 3575 /* 3576 * If we have already registered a hacked interrupt, and 3577 * this is also a 'primary' adapter, then this is NOT part of 3578 * a multiport card, but a second card on the same PCI bus. 3579 * BUGID: 4057747 3580 */ 3581 if (ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(devinfo), 3582 DDI_PROP_DONTPASS, hackintr_propname, 0) != 0) 3583 return (0); 3584 /* ... Primary not part of a multiport device */ 3585 3586 #ifdef DNETDEBUG 3587 if (dnetdebug & DNETTRACE) 3588 cmn_err(CE_NOTE, "dnet: Implementing hardware " 3589 "interrupt flaw workaround"); 3590 #endif 3591 dnetp->hackintr_inf = hackintr_inf = 3592 kmem_zalloc(sizeof (struct hackintr_inf), KM_SLEEP); 3593 if (hackintr_inf == NULL) 3594 goto fail; 3595 3596 hackintr_inf->dnetps[0] = dnetp; 3597 hackintr_inf->devinfo = devinfo; 3598 3599 /* 3600 * Add a property to allow successive attaches to find the 3601 * table 3602 */ 3603 3604 if (ddi_prop_update_byte_array(DDI_DEV_T_NONE, 3605 ddi_get_parent(devinfo), hackintr_propname, 3606 (uchar_t *)&dnetp->hackintr_inf, 3607 sizeof (void *)) != DDI_PROP_SUCCESS) 3608 goto fail; 3609 3610 3611 /* Register our hacked interrupt routine */ 3612 if (ddi_add_intr(devinfo, 0, &dnetp->icookie, NULL, 3613 (uint_t (*)(char *))dnet_hack_intr, 3614 (caddr_t)hackintr_inf) != DDI_SUCCESS) { 3615 /* XXX function return value ignored */ 3616 (void) ddi_prop_remove(DDI_DEV_T_NONE, 3617 ddi_get_parent(devinfo), 3618 hackintr_propname); 3619 goto fail; 3620 } 3621 3622 /* 3623 * Mutex required to ensure interrupt routine has completed 3624 * when detaching devices 3625 */ 3626 mutex_init(&hackintr_inf->lock, NULL, MUTEX_DRIVER, 3627 dnetp->icookie); 3628 3629 /* Stop GLD registering an interrupt */ 3630 return (-1); 3631 } else { 3632 3633 /* Add the dnetp for this secondary device to the table */ 3634 3635 hackintr_inf = (struct hackintr_inf *)(uintptr_t) 3636 ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(devinfo), 3637 DDI_PROP_DONTPASS, hackintr_propname, 0); 3638 3639 if (hackintr_inf == NULL) 3640 goto fail; 3641 3642 /* Find an empty slot */ 3643 for (i = 0; i < MAX_INST; i++) 3644 if (hackintr_inf->dnetps[i] == NULL) 3645 break; 3646 3647 /* More than 8 ports on adapter ?! */ 3648 if (i == MAX_INST) 3649 goto fail; 3650 3651 hackintr_inf->dnetps[i] = dnetp; 3652 3653 /* 3654 * Allow GLD to register a handler for this 3655 * device. If the card is actually broken, as we suspect, this 3656 * handler will never get called. However, by registering the 3657 * interrupt handler, we can copy gracefully with new multiport 3658 * Cogent cards that decide to fix the hardware problem 3659 */ 3660 return (0); 3661 } 3662 3663 fail: 3664 cmn_err(CE_WARN, "dnet: Could not work around hardware interrupt" 3665 " routing problem"); 3666 return (0); 3667 } 3668 3669 /* 3670 * Call dnet_intr for all adapters on a multiport card 3671 */ 3672 static uint_t 3673 dnet_hack_intr(struct hackintr_inf *hackintr_inf) 3674 { 3675 int i; 3676 int claimed = DDI_INTR_UNCLAIMED; 3677 3678 /* Stop detaches while processing interrupts */ 3679 mutex_enter(&hackintr_inf->lock); 3680 3681 for (i = 0; i < MAX_INST; i++) { 3682 if (hackintr_inf->dnetps[i] && 3683 dnet_intr((caddr_t)hackintr_inf->dnetps[i]) == 3684 DDI_INTR_CLAIMED) { 3685 claimed = DDI_INTR_CLAIMED; 3686 } 3687 } 3688 mutex_exit(&hackintr_inf->lock); 3689 return (claimed); 3690 } 3691 3692 /* 3693 * This removes the detaching device from the table procesed by the hacked 3694 * interrupt routine. Because the interrupts from all devices come in to the 3695 * same interrupt handler, ALL devices must stop interrupting once the 3696 * primary device detaches. This isn't a problem at present, because all 3697 * instances of a device are detached when the driver is unloaded. 3698 */ 3699 static int 3700 dnet_detach_hacked_interrupt(dev_info_t *devinfo) 3701 { 3702 int i; 3703 struct hackintr_inf *hackintr_inf; 3704 struct dnetinstance *altdnetp, *dnetp = 3705 ddi_get_driver_private(devinfo); 3706 3707 hackintr_inf = (struct hackintr_inf *)(uintptr_t) 3708 ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(devinfo), 3709 DDI_PROP_DONTPASS, hackintr_propname, 0); 3710 3711 /* 3712 * No hackintr_inf implies hack was not required or the primary has 3713 * detached, and our interrupts are already disabled 3714 */ 3715 if (!hackintr_inf) { 3716 /* remove the interrupt for the non-hacked case */ 3717 ddi_remove_intr(devinfo, 0, dnetp->icookie); 3718 return (DDI_SUCCESS); 3719 } 3720 3721 /* Remove this device from the handled table */ 3722 mutex_enter(&hackintr_inf->lock); 3723 for (i = 0; i < MAX_INST; i++) { 3724 if (hackintr_inf->dnetps[i] == dnetp) { 3725 hackintr_inf->dnetps[i] = NULL; 3726 break; 3727 } 3728 } 3729 3730 mutex_exit(&hackintr_inf->lock); 3731 3732 /* Not the primary card, we are done */ 3733 if (devinfo != hackintr_inf->devinfo) 3734 return (DDI_SUCCESS); 3735 3736 /* 3737 * This is the primary card. All remaining adapters on this device 3738 * must have their interrupts disabled before we remove the handler 3739 */ 3740 for (i = 0; i < MAX_INST; i++) { 3741 if ((altdnetp = hackintr_inf->dnetps[i]) != NULL) { 3742 altdnetp->interrupts_disabled = 1; 3743 ddi_put32(altdnetp->io_handle, 3744 REG32(altdnetp->io_reg, INT_MASK_REG), 0); 3745 } 3746 } 3747 3748 /* It should now be safe to remove the interrupt handler */ 3749 3750 ddi_remove_intr(devinfo, 0, dnetp->icookie); 3751 mutex_destroy(&hackintr_inf->lock); 3752 /* XXX function return value ignored */ 3753 (void) ddi_prop_remove(DDI_DEV_T_NONE, ddi_get_parent(devinfo), 3754 hackintr_propname); 3755 kmem_free(hackintr_inf, sizeof (struct hackintr_inf)); 3756 return (DDI_SUCCESS); 3757 } 3758 3759 /* do_phy() - called with intrlock held */ 3760 static void 3761 do_phy(struct dnetinstance *dnetp) 3762 { 3763 dev_info_t *dip; 3764 LEAF_FORMAT *leaf = dnetp->sr.leaf + dnetp->leaf; 3765 media_block_t *block; 3766 int phy; 3767 3768 dip = dnetp->devinfo; 3769 3770 /* 3771 * Find and configure the PHY media block. If NO PHY blocks are 3772 * found on the SROM, but a PHY device is present, we assume the card 3773 * is a legacy device, and that there is ONLY a PHY interface on the 3774 * card (ie, no BNC or AUI, and 10BaseT is implemented by the PHY 3775 */ 3776 3777 for (block = leaf->block + leaf->block_count -1; 3778 block >= leaf->block; block --) { 3779 if (block->type == 3 || block->type == 1) { 3780 leaf->mii_block = block; 3781 break; 3782 } 3783 } 3784 3785 /* 3786 * If no MII block, select default, and hope this configuration will 3787 * allow the phy to be read/written if it is present 3788 */ 3789 dnetp->selected_media_block = leaf->mii_block ? 3790 leaf->mii_block : leaf->default_block; 3791 3792 setup_block(dnetp); 3793 /* XXX function return value ignored */ 3794 (void) mii_create(dip, dnet_mii_write, dnet_mii_read, &dnetp->mii); 3795 3796 /* 3797 * We try PHY 0 LAST because it is less likely to be connected 3798 */ 3799 for (phy = 1; phy < 33; phy++) 3800 if (mii_probe_phy(dnetp->mii, phy % 32) == MII_SUCCESS && 3801 mii_init_phy(dnetp->mii, phy % 32) == MII_SUCCESS) { 3802 #ifdef DNETDEBUG 3803 if (dnetdebug & DNETSENSE) 3804 cmn_err(CE_NOTE, "dnet: " 3805 "PHY at address %d", phy % 32); 3806 #endif 3807 dnetp->phyaddr = phy % 32; 3808 if (!leaf->mii_block) { 3809 /* Legacy card, change the leaf node */ 3810 set_leaf(&dnetp->sr, &leaf_phylegacy); 3811 } 3812 return; 3813 } 3814 #ifdef DNETDEBUG 3815 if (dnetdebug & DNETSENSE) 3816 cmn_err(CE_NOTE, "dnet: No PHY found"); 3817 #endif 3818 } 3819 3820 static ushort_t 3821 dnet_mii_read(dev_info_t *dip, int phy_addr, int reg_num) 3822 { 3823 struct dnetinstance *dnetp; 3824 3825 uint32_t command_word; 3826 uint32_t tmp; 3827 uint32_t data = 0; 3828 int i; 3829 int bits_in_ushort = ((sizeof (ushort_t))*8); 3830 int turned_around = 0; 3831 3832 dnetp = ddi_get_driver_private(dip); 3833 3834 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 3835 /* Write Preamble */ 3836 write_mii(dnetp, MII_PRE, 2*bits_in_ushort); 3837 3838 /* Prepare command word */ 3839 command_word = (uint32_t)phy_addr << MII_PHY_ADDR_ALIGN; 3840 command_word |= (uint32_t)reg_num << MII_REG_ADDR_ALIGN; 3841 command_word |= MII_READ_FRAME; 3842 3843 write_mii(dnetp, command_word, bits_in_ushort-2); 3844 3845 mii_tristate(dnetp); 3846 3847 /* Check that the PHY generated a zero bit the 2nd clock */ 3848 tmp = ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, ETHER_ROM_REG)); 3849 3850 turned_around = (tmp & MII_DATA_IN) ? 0 : 1; 3851 3852 /* read data WORD */ 3853 for (i = 0; i < bits_in_ushort; i++) { 3854 ddi_put32(dnetp->io_handle, 3855 REG32(dnetp->io_reg, ETHER_ROM_REG), MII_READ); 3856 drv_usecwait(MII_DELAY); 3857 ddi_put32(dnetp->io_handle, 3858 REG32(dnetp->io_reg, ETHER_ROM_REG), MII_READ | MII_CLOCK); 3859 drv_usecwait(MII_DELAY); 3860 tmp = ddi_get32(dnetp->io_handle, 3861 REG32(dnetp->io_reg, ETHER_ROM_REG)); 3862 drv_usecwait(MII_DELAY); 3863 data = (data << 1) | (tmp >> MII_DATA_IN_POSITION) & 0x0001; 3864 } 3865 3866 mii_tristate(dnetp); 3867 return (turned_around ? data: -1); 3868 } 3869 3870 static void 3871 dnet_mii_write(dev_info_t *dip, int phy_addr, int reg_num, int reg_dat) 3872 { 3873 struct dnetinstance *dnetp; 3874 uint32_t command_word; 3875 int bits_in_ushort = ((sizeof (ushort_t))*8); 3876 3877 dnetp = ddi_get_driver_private(dip); 3878 3879 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 3880 write_mii(dnetp, MII_PRE, 2*bits_in_ushort); 3881 3882 /* Prepare command word */ 3883 command_word = ((uint32_t)phy_addr << MII_PHY_ADDR_ALIGN); 3884 command_word |= ((uint32_t)reg_num << MII_REG_ADDR_ALIGN); 3885 command_word |= (MII_WRITE_FRAME | (uint32_t)reg_dat); 3886 3887 write_mii(dnetp, command_word, 2*bits_in_ushort); 3888 mii_tristate(dnetp); 3889 } 3890 3891 /* 3892 * Write data size bits from mii_data to the MII control lines. 3893 */ 3894 static void 3895 write_mii(struct dnetinstance *dnetp, uint32_t mii_data, int data_size) 3896 { 3897 int i; 3898 uint32_t dbit; 3899 3900 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 3901 for (i = data_size; i > 0; i--) { 3902 dbit = ((mii_data >> 3903 (31 - MII_WRITE_DATA_POSITION)) & MII_WRITE_DATA); 3904 ddi_put32(dnetp->io_handle, 3905 REG32(dnetp->io_reg, ETHER_ROM_REG), 3906 MII_WRITE | dbit); 3907 drv_usecwait(MII_DELAY); 3908 ddi_put32(dnetp->io_handle, 3909 REG32(dnetp->io_reg, ETHER_ROM_REG), 3910 MII_WRITE | MII_CLOCK | dbit); 3911 drv_usecwait(MII_DELAY); 3912 mii_data <<= 1; 3913 } 3914 } 3915 3916 /* 3917 * Put the MDIO port in tri-state for the turn around bits 3918 * in MII read and at end of MII management sequence. 3919 */ 3920 static void 3921 mii_tristate(struct dnetinstance *dnetp) 3922 { 3923 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 3924 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, ETHER_ROM_REG), 3925 MII_WRITE_TS); 3926 drv_usecwait(MII_DELAY); 3927 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, ETHER_ROM_REG), 3928 MII_WRITE_TS | MII_CLOCK); 3929 drv_usecwait(MII_DELAY); 3930 } 3931 3932 3933 static void 3934 set_leaf(SROM_FORMAT *sr, LEAF_FORMAT *leaf) 3935 { 3936 if (sr->leaf && !sr->leaf->is_static) 3937 kmem_free(sr->leaf, sr->adapters * sizeof (LEAF_FORMAT)); 3938 sr->leaf = leaf; 3939 } 3940 3941 /* 3942 * Callback from MII module. Makes sure that the CSR registers are 3943 * configured properly if the PHY changes mode. 3944 */ 3945 /* ARGSUSED */ 3946 /* dnet_mii_link_cb - called with intrlock held */ 3947 static void 3948 dnet_mii_link_cb(dev_info_t *dip, int phy, enum mii_phy_state state) 3949 { 3950 struct dnetinstance *dnetp = ddi_get_driver_private(dip); 3951 LEAF_FORMAT *leaf; 3952 3953 ASSERT(MUTEX_HELD(&dnetp->intrlock)); 3954 3955 leaf = dnetp->sr.leaf + dnetp->leaf; 3956 if (state == phy_state_linkup) { 3957 dnetp->mii_up = 1; 3958 3959 (void) mii_getspeed(dnetp->mii, dnetp->phyaddr, 3960 &dnetp->mii_speed, &dnetp->mii_duplex); 3961 3962 dnetp->selected_media_block = leaf->mii_block; 3963 setup_block(dnetp); 3964 } else { 3965 /* NEEDSWORK: Probably can call find_active_media here */ 3966 dnetp->mii_up = 0; 3967 3968 if (leaf->default_block->media_code == MEDIA_MII) 3969 dnetp->selected_media_block = leaf->default_block; 3970 setup_block(dnetp); 3971 } 3972 3973 if (dnetp->running) { 3974 mac_link_update(dnetp->mac_handle, 3975 (dnetp->mii_up ? LINK_STATE_UP : LINK_STATE_DOWN)); 3976 } 3977 } 3978 3979 /* 3980 * SROM parsing routines. 3981 * Refer to the Digital 3.03 SROM spec while reading this! (references refer 3982 * to this document) 3983 * Where possible ALL vendor specific changes should be localised here. The 3984 * SROM data should be capable of describing any programmatic irregularities 3985 * of DNET cards (via SIA or GP registers, in particular), so vendor specific 3986 * code elsewhere should not be required 3987 */ 3988 static void 3989 dnet_parse_srom(struct dnetinstance *dnetp, SROM_FORMAT *sr, uchar_t *vi) 3990 { 3991 uint32_t ether_mfg = 0; 3992 int i; 3993 uchar_t *p; 3994 3995 if (!ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo, 3996 DDI_PROP_DONTPASS, "no_sromconfig", 0)) 3997 dnetp->sr.init_from_srom = check_srom_valid(vi); 3998 3999 if (dnetp->sr.init_from_srom && dnetp->board_type != DEVICE_ID_21040) { 4000 /* Section 2/3: General SROM Format/ ID Block */ 4001 p = vi+18; 4002 sr->version = *p++; 4003 sr->adapters = *p++; 4004 4005 sr->leaf = 4006 kmem_zalloc(sr->adapters * sizeof (LEAF_FORMAT), KM_SLEEP); 4007 for (i = 0; i < 6; i++) 4008 sr->netaddr[i] = *p++; 4009 4010 for (i = 0; i < sr->adapters; i++) { 4011 uchar_t devno = *p++; 4012 uint16_t offset = *p++; 4013 offset |= *p++ << 8; 4014 sr->leaf[i].device_number = devno; 4015 parse_controller_leaf(dnetp, sr->leaf+i, vi+offset); 4016 } 4017 /* 4018 * 'Orrible hack for cogent cards. The 6911A board seems to 4019 * have an incorrect SROM. (From the OEMDEMO program 4020 * supplied by cogent, it seems that the ROM matches a setup 4021 * or a board with a QSI or ICS PHY. 4022 */ 4023 for (i = 0; i < 3; i++) 4024 ether_mfg = (ether_mfg << 8) | sr->netaddr[i]; 4025 4026 if (ether_mfg == ADAPTEC_ETHER) { 4027 static uint16_t cogent_gprseq[] = {0x821, 0}; 4028 switch (vi[COGENT_SROM_ID]) { 4029 case COGENT_ANA6911A_C: 4030 case COGENT_ANA6911AC_C: 4031 #ifdef DNETDEBUG 4032 if (dnetdebug & DNETTRACE) 4033 cmn_err(CE_WARN, 4034 "Suspected bad GPR sequence." 4035 " Making a guess (821,0)"); 4036 #endif 4037 4038 /* XXX function return value ignored */ 4039 (void) ddi_prop_update_byte_array( 4040 DDI_DEV_T_NONE, dnetp->devinfo, 4041 "gpr-sequence", (uchar_t *)cogent_gprseq, 4042 sizeof (cogent_gprseq)); 4043 break; 4044 } 4045 } 4046 } else { 4047 /* 4048 * Adhoc SROM, check for some cards which need special handling 4049 * Assume vendor info contains ether address in first six bytes 4050 */ 4051 4052 uchar_t *mac = vi + ddi_getprop(DDI_DEV_T_ANY, dnetp->devinfo, 4053 DDI_PROP_DONTPASS, macoffset_propname, 0); 4054 4055 for (i = 0; i < 6; i++) 4056 sr->netaddr[i] = mac[i]; 4057 4058 if (dnetp->board_type == DEVICE_ID_21140) { 4059 for (i = 0; i < 3; i++) 4060 ether_mfg = (ether_mfg << 8) | mac[i]; 4061 4062 switch (ether_mfg) { 4063 case ASANTE_ETHER: 4064 dnetp->vendor_21140 = ASANTE_TYPE; 4065 dnetp->vendor_revision = 0; 4066 set_leaf(sr, &leaf_asante); 4067 sr->adapters = 1; 4068 break; 4069 4070 case COGENT_ETHER: 4071 case ADAPTEC_ETHER: 4072 dnetp->vendor_21140 = COGENT_EM_TYPE; 4073 dnetp->vendor_revision = 4074 vi[VENDOR_REVISION_OFFSET]; 4075 set_leaf(sr, &leaf_cogent_100); 4076 sr->adapters = 1; 4077 break; 4078 4079 default: 4080 dnetp->vendor_21140 = DEFAULT_TYPE; 4081 dnetp->vendor_revision = 0; 4082 set_leaf(sr, &leaf_default_100); 4083 sr->adapters = 1; 4084 break; 4085 } 4086 } else if (dnetp->board_type == DEVICE_ID_21041) { 4087 set_leaf(sr, &leaf_21041); 4088 } else if (dnetp->board_type == DEVICE_ID_21040) { 4089 set_leaf(sr, &leaf_21040); 4090 } 4091 } 4092 } 4093 4094 /* Section 4.2, 4.3, 4.4, 4.5 */ 4095 static void 4096 parse_controller_leaf(struct dnetinstance *dnetp, LEAF_FORMAT *leaf, 4097 uchar_t *vi) 4098 { 4099 int i; 4100 4101 leaf->selected_contype = *vi++; 4102 leaf->selected_contype |= *vi++ << 8; 4103 4104 if (dnetp->board_type == DEVICE_ID_21140) /* Sect. 4.3 */ 4105 leaf->gpr = *vi++; 4106 4107 leaf->block_count = *vi++; 4108 4109 if (leaf->block_count > MAX_MEDIA) { 4110 cmn_err(CE_WARN, "dnet: Too many media in SROM!"); 4111 leaf->block_count = 1; 4112 } 4113 for (i = 0; i <= leaf->block_count; i++) { 4114 vi = parse_media_block(dnetp, leaf->block + i, vi); 4115 if (leaf->block[i].command & CMD_DEFAULT_MEDIUM) 4116 leaf->default_block = leaf->block+i; 4117 } 4118 /* No explicit default block: use last in the ROM */ 4119 if (leaf->default_block == NULL) 4120 leaf->default_block = leaf->block + leaf->block_count -1; 4121 4122 } 4123 4124 static uchar_t * 4125 parse_media_block(struct dnetinstance *dnetp, media_block_t *block, uchar_t *vi) 4126 { 4127 int i; 4128 4129 /* 4130 * There are three kinds of media block we need to worry about: 4131 * The 21041 blocks. 4132 * 21140 blocks from a version 1 SROM 4133 * 2114[023] block from a version 3 SROM 4134 */ 4135 4136 if (dnetp->board_type == DEVICE_ID_21041) { 4137 /* Section 4.2 */ 4138 block->media_code = *vi & 0x3f; 4139 block->type = 2; 4140 if (*vi++ & 0x40) { 4141 block->un.sia.csr13 = *vi++; 4142 block->un.sia.csr13 |= *vi++ << 8; 4143 block->un.sia.csr14 = *vi++; 4144 block->un.sia.csr14 |= *vi++ << 8; 4145 block->un.sia.csr15 = *vi++; 4146 block->un.sia.csr15 |= *vi++ << 8; 4147 } else { 4148 /* No media data (csrs 13,14,15). Insert defaults */ 4149 switch (block->media_code) { 4150 case MEDIA_TP: 4151 block->un.sia.csr13 = 0xef01; 4152 block->un.sia.csr14 = 0x7f3f; 4153 block->un.sia.csr15 = 0x0008; 4154 break; 4155 case MEDIA_TP_FD: 4156 block->un.sia.csr13 = 0xef01; 4157 block->un.sia.csr14 = 0x7f3d; 4158 block->un.sia.csr15 = 0x0008; 4159 break; 4160 case MEDIA_BNC: 4161 block->un.sia.csr13 = 0xef09; 4162 block->un.sia.csr14 = 0x0705; 4163 block->un.sia.csr15 = 0x0006; 4164 break; 4165 case MEDIA_AUI: 4166 block->un.sia.csr13 = 0xef09; 4167 block->un.sia.csr14 = 0x0705; 4168 block->un.sia.csr15 = 0x000e; 4169 break; 4170 } 4171 } 4172 } else if (*vi & 0x80) { /* Extended format: Section 4.3.2.2 */ 4173 int blocklen = *vi++ & 0x7f; 4174 block->type = *vi++; 4175 switch (block->type) { 4176 case 0: /* "non-MII": Section 4.3.2.2.1 */ 4177 block->media_code = (*vi++) & 0x3f; 4178 block->gprseqlen = 1; 4179 block->gprseq[0] = *vi++; 4180 block->command = *vi++; 4181 block->command |= *vi++ << 8; 4182 break; 4183 4184 case 1: /* MII/PHY: Section 4.3.2.2.2 */ 4185 block->command = CMD_PS; 4186 block->media_code = MEDIA_MII; 4187 /* This is whats needed in CSR6 */ 4188 4189 block->un.mii.phy_num = *vi++; 4190 block->gprseqlen = *vi++; 4191 4192 for (i = 0; i < block->gprseqlen; i++) 4193 block->gprseq[i] = *vi++; 4194 block->rstseqlen = *vi++; 4195 for (i = 0; i < block->rstseqlen; i++) 4196 block->rstseq[i] = *vi++; 4197 4198 block->un.mii.mediacaps = *vi++; 4199 block->un.mii.mediacaps |= *vi++ << 8; 4200 block->un.mii.nwayadvert = *vi++; 4201 block->un.mii.nwayadvert |= *vi++ << 8; 4202 block->un.mii.fdxmask = *vi++; 4203 block->un.mii.fdxmask |= *vi++ << 8; 4204 block->un.mii.ttmmask = *vi++; 4205 block->un.mii.ttmmask |= *vi++ << 8; 4206 break; 4207 4208 case 2: /* SIA Media: Section 4.4.2.1.1 */ 4209 block->media_code = *vi & 0x3f; 4210 if (*vi++ & 0x40) { 4211 block->un.sia.csr13 = *vi++; 4212 block->un.sia.csr13 |= *vi++ << 8; 4213 block->un.sia.csr14 = *vi++; 4214 block->un.sia.csr14 |= *vi++ << 8; 4215 block->un.sia.csr15 = *vi++; 4216 block->un.sia.csr15 |= *vi++ << 8; 4217 } else { 4218 /* 4219 * SIA values not provided by SROM; provide 4220 * defaults. See appendix D of 2114[23] manuals. 4221 */ 4222 switch (block->media_code) { 4223 case MEDIA_BNC: 4224 block->un.sia.csr13 = 0x0009; 4225 block->un.sia.csr14 = 0x0705; 4226 block->un.sia.csr15 = 0x0000; 4227 break; 4228 case MEDIA_AUI: 4229 block->un.sia.csr13 = 0x0009; 4230 block->un.sia.csr14 = 0x0705; 4231 block->un.sia.csr15 = 0x0008; 4232 break; 4233 case MEDIA_TP: 4234 block->un.sia.csr13 = 0x0001; 4235 block->un.sia.csr14 = 0x7f3f; 4236 block->un.sia.csr15 = 0x0000; 4237 break; 4238 case MEDIA_TP_FD: 4239 block->un.sia.csr13 = 0x0001; 4240 block->un.sia.csr14 = 0x7f3d; 4241 block->un.sia.csr15 = 0x0000; 4242 break; 4243 default: 4244 block->un.sia.csr13 = 0x0000; 4245 block->un.sia.csr14 = 0x0000; 4246 block->un.sia.csr15 = 0x0000; 4247 } 4248 } 4249 4250 /* Treat GP control/data as a GPR sequence */ 4251 block->gprseqlen = 2; 4252 block->gprseq[0] = *vi++; 4253 block->gprseq[0] |= *vi++ << 8; 4254 block->gprseq[0] |= GPR_CONTROL_WRITE; 4255 block->gprseq[1] = *vi++; 4256 block->gprseq[1] |= *vi++ << 8; 4257 break; 4258 4259 case 3: /* MII/PHY : Section 4.4.2.1.2 */ 4260 block->command = CMD_PS; 4261 block->media_code = MEDIA_MII; 4262 block->un.mii.phy_num = *vi++; 4263 4264 block->gprseqlen = *vi++; 4265 for (i = 0; i < block->gprseqlen; i++) { 4266 block->gprseq[i] = *vi++; 4267 block->gprseq[i] |= *vi++ << 8; 4268 } 4269 4270 block->rstseqlen = *vi++; 4271 for (i = 0; i < block->rstseqlen; i++) { 4272 block->rstseq[i] = *vi++; 4273 block->rstseq[i] |= *vi++ << 8; 4274 } 4275 block->un.mii.mediacaps = *vi++; 4276 block->un.mii.mediacaps |= *vi++ << 8; 4277 block->un.mii.nwayadvert = *vi++; 4278 block->un.mii.nwayadvert |= *vi++ << 8; 4279 block->un.mii.fdxmask = *vi++; 4280 block->un.mii.fdxmask |= *vi++ << 8; 4281 block->un.mii.ttmmask = *vi++; 4282 block->un.mii.ttmmask |= *vi++ << 8; 4283 block->un.mii.miiintr |= *vi++; 4284 break; 4285 4286 case 4: /* SYM Media: 4.5.2.1.3 */ 4287 block->media_code = *vi++ & 0x3f; 4288 /* Treat GP control and data as a GPR sequence */ 4289 block->gprseqlen = 2; 4290 block->gprseq[0] = *vi++; 4291 block->gprseq[0] |= *vi++ << 8; 4292 block->gprseq[0] |= GPR_CONTROL_WRITE; 4293 block->gprseq[1] = *vi++; 4294 block->gprseq[1] |= *vi++ << 8; 4295 block->command = *vi++; 4296 block->command |= *vi++ << 8; 4297 break; 4298 4299 case 5: /* GPR reset sequence: Section 4.5.2.1.4 */ 4300 block->rstseqlen = *vi++; 4301 for (i = 0; i < block->rstseqlen; i++) 4302 block->rstseq[i] = *vi++; 4303 break; 4304 4305 default: /* Unknown media block. Skip it. */ 4306 cmn_err(CE_WARN, "dnet: Unsupported SROM block."); 4307 vi += blocklen; 4308 break; 4309 } 4310 } else { /* Compact format (or V1 SROM): Section 4.3.2.1 */ 4311 block->type = 0; 4312 block->media_code = *vi++ & 0x3f; 4313 block->gprseqlen = 1; 4314 block->gprseq[0] = *vi++; 4315 block->command = *vi++; 4316 block->command |= (*vi++) << 8; 4317 } 4318 return (vi); 4319 } 4320 4321 4322 /* 4323 * An alternative to doing this would be to store the legacy ROMs in binary 4324 * format in the conf file, and in read_srom, pick out the data. This would 4325 * then allow the parser to continue on as normal. This makes it a little 4326 * easier to read. 4327 */ 4328 static void 4329 setup_legacy_blocks() 4330 { 4331 LEAF_FORMAT *leaf; 4332 media_block_t *block; 4333 4334 /* Default FAKE SROM */ 4335 leaf = &leaf_default_100; 4336 leaf->is_static = 1; 4337 leaf->default_block = &leaf->block[3]; 4338 leaf->block_count = 4; /* 100 cards are highly unlikely to have BNC */ 4339 block = leaf->block; 4340 block->media_code = MEDIA_TP_FD; 4341 block->type = 0; 4342 block->command = 0x8e; /* PCS, PS off, media sense: bit7, pol=1 */ 4343 block++; 4344 block->media_code = MEDIA_TP; 4345 block->type = 0; 4346 block->command = 0x8e; /* PCS, PS off, media sense: bit7, pol=1 */ 4347 block++; 4348 block->media_code = MEDIA_SYM_SCR_FD; 4349 block->type = 0; 4350 block->command = 0x6d; /* PCS, PS, SCR on, media sense: bit6, pol=0 */ 4351 block++; 4352 block->media_code = MEDIA_SYM_SCR; 4353 block->type = 0; 4354 block->command = 0x406d; /* PCS, PS, SCR on, media sense: bit6, pol=0 */ 4355 4356 /* COGENT FAKE SROM */ 4357 leaf = &leaf_cogent_100; 4358 leaf->is_static = 1; 4359 leaf->default_block = &leaf->block[4]; 4360 leaf->block_count = 5; /* 100TX, 100TX-FD, 10T 10T-FD, BNC */ 4361 block = leaf->block; /* BNC */ 4362 block->media_code = MEDIA_BNC; 4363 block->type = 0; 4364 block->command = 0x8000; /* No media sense, PCS, SCR, PS all off */ 4365 block->gprseqlen = 2; 4366 block->rstseqlen = 0; 4367 block->gprseq[0] = 0x13f; 4368 block->gprseq[1] = 1; 4369 4370 block++; 4371 block->media_code = MEDIA_TP_FD; 4372 block->type = 0; 4373 block->command = 0x8e; /* PCS, PS off, media sense: bit7, pol=1 */ 4374 block->gprseqlen = 2; 4375 block->rstseqlen = 0; 4376 block->gprseq[0] = 0x13f; 4377 block->gprseq[1] = 0x26; 4378 4379 block++; /* 10BaseT */ 4380 block->media_code = MEDIA_TP; 4381 block->type = 0; 4382 block->command = 0x8e; /* PCS, PS off, media sense: bit7, pol=1 */ 4383 block->gprseqlen = 2; 4384 block->rstseqlen = 0; 4385 block->gprseq[0] = 0x13f; 4386 block->gprseq[1] = 0x3e; 4387 4388 block++; /* 100BaseTX-FD */ 4389 block->media_code = MEDIA_SYM_SCR_FD; 4390 block->type = 0; 4391 block->command = 0x6d; /* PCS, PS, SCR on, media sense: bit6, pol=0 */ 4392 block->gprseqlen = 2; 4393 block->rstseqlen = 0; 4394 block->gprseq[0] = 0x13f; 4395 block->gprseq[1] = 1; 4396 4397 block++; /* 100BaseTX */ 4398 block->media_code = MEDIA_SYM_SCR; 4399 block->type = 0; 4400 block->command = 0x406d; /* PCS, PS, SCR on, media sense: bit6, pol=0 */ 4401 block->gprseqlen = 2; 4402 block->rstseqlen = 0; 4403 block->gprseq[0] = 0x13f; 4404 block->gprseq[1] = 1; 4405 4406 /* Generic legacy card with a PHY. */ 4407 leaf = &leaf_phylegacy; 4408 leaf->block_count = 1; 4409 leaf->mii_block = leaf->block; 4410 leaf->default_block = &leaf->block[0]; 4411 leaf->is_static = 1; 4412 block = leaf->block; 4413 block->media_code = MEDIA_MII; 4414 block->type = 1; /* MII Block type 1 */ 4415 block->command = 1; /* Port select */ 4416 block->gprseqlen = 0; 4417 block->rstseqlen = 0; 4418 4419 /* ASANTE FAKE SROM */ 4420 leaf = &leaf_asante; 4421 leaf->is_static = 1; 4422 leaf->default_block = &leaf->block[0]; 4423 leaf->block_count = 1; 4424 block = leaf->block; 4425 block->media_code = MEDIA_MII; 4426 block->type = 1; /* MII Block type 1 */ 4427 block->command = 1; /* Port select */ 4428 block->gprseqlen = 3; 4429 block->rstseqlen = 0; 4430 block->gprseq[0] = 0x180; 4431 block->gprseq[1] = 0x80; 4432 block->gprseq[2] = 0x0; 4433 4434 /* LEGACY 21041 card FAKE SROM */ 4435 leaf = &leaf_21041; 4436 leaf->is_static = 1; 4437 leaf->block_count = 4; /* SIA Blocks for TP, TPfd, BNC, AUI */ 4438 leaf->default_block = &leaf->block[3]; 4439 4440 block = leaf->block; 4441 block->media_code = MEDIA_AUI; 4442 block->type = 2; 4443 block->un.sia.csr13 = 0xef09; 4444 block->un.sia.csr14 = 0x0705; 4445 block->un.sia.csr15 = 0x000e; 4446 4447 block++; 4448 block->media_code = MEDIA_TP_FD; 4449 block->type = 2; 4450 block->un.sia.csr13 = 0xef01; 4451 block->un.sia.csr14 = 0x7f3d; 4452 block->un.sia.csr15 = 0x0008; 4453 4454 block++; 4455 block->media_code = MEDIA_BNC; 4456 block->type = 2; 4457 block->un.sia.csr13 = 0xef09; 4458 block->un.sia.csr14 = 0x0705; 4459 block->un.sia.csr15 = 0x0006; 4460 4461 block++; 4462 block->media_code = MEDIA_TP; 4463 block->type = 2; 4464 block->un.sia.csr13 = 0xef01; 4465 block->un.sia.csr14 = 0x7f3f; 4466 block->un.sia.csr15 = 0x0008; 4467 4468 /* LEGACY 21040 card FAKE SROM */ 4469 leaf = &leaf_21040; 4470 leaf->is_static = 1; 4471 leaf->block_count = 4; /* SIA Blocks for TP, TPfd, BNC, AUI */ 4472 block = leaf->block; 4473 block->media_code = MEDIA_AUI; 4474 block->type = 2; 4475 block->un.sia.csr13 = 0x8f09; 4476 block->un.sia.csr14 = 0x0705; 4477 block->un.sia.csr15 = 0x000e; 4478 block++; 4479 block->media_code = MEDIA_TP_FD; 4480 block->type = 2; 4481 block->un.sia.csr13 = 0x0f01; 4482 block->un.sia.csr14 = 0x7f3d; 4483 block->un.sia.csr15 = 0x0008; 4484 block++; 4485 block->media_code = MEDIA_BNC; 4486 block->type = 2; 4487 block->un.sia.csr13 = 0xef09; 4488 block->un.sia.csr14 = 0x0705; 4489 block->un.sia.csr15 = 0x0006; 4490 block++; 4491 block->media_code = MEDIA_TP; 4492 block->type = 2; 4493 block->un.sia.csr13 = 0x8f01; 4494 block->un.sia.csr14 = 0x7f3f; 4495 block->un.sia.csr15 = 0x0008; 4496 } 4497 4498 static void 4499 dnet_print_srom(SROM_FORMAT *sr) 4500 { 4501 int i; 4502 uchar_t *a = sr->netaddr; 4503 cmn_err(CE_NOTE, "SROM Dump: %d. ver %d, Num adapters %d," 4504 "Addr:%x:%x:%x:%x:%x:%x", 4505 sr->init_from_srom, sr->version, sr->adapters, 4506 a[0], a[1], a[2], a[3], a[4], a[5]); 4507 4508 for (i = 0; i < sr->adapters; i++) 4509 dnet_dump_leaf(sr->leaf+i); 4510 } 4511 4512 static void 4513 dnet_dump_leaf(LEAF_FORMAT *leaf) 4514 { 4515 int i; 4516 cmn_err(CE_NOTE, "Leaf: Device %d, block_count %d, gpr: %x", 4517 leaf->device_number, leaf->block_count, leaf->gpr); 4518 for (i = 0; i < leaf->block_count; i++) 4519 dnet_dump_block(leaf->block+i); 4520 } 4521 4522 static void 4523 dnet_dump_block(media_block_t *block) 4524 { 4525 cmn_err(CE_NOTE, "Block(%p): type %x, media %s, command: %x ", 4526 (void *)block, 4527 block->type, media_str[block->media_code], block->command); 4528 dnet_dumpbin("\tGPR Seq", (uchar_t *)block->gprseq, 2, 4529 block->gprseqlen *2); 4530 dnet_dumpbin("\tGPR Reset", (uchar_t *)block->rstseq, 2, 4531 block->rstseqlen *2); 4532 switch (block->type) { 4533 case 1: case 3: 4534 cmn_err(CE_NOTE, "\tMII Info: phy %d, nway %x, fdx" 4535 "%x, ttm %x, mediacap %x", 4536 block->un.mii.phy_num, block->un.mii.nwayadvert, 4537 block->un.mii.fdxmask, block->un.mii.ttmmask, 4538 block->un.mii.mediacaps); 4539 break; 4540 case 2: 4541 cmn_err(CE_NOTE, "\tSIA Regs: CSR13:%x, CSR14:%x, CSR15:%x", 4542 block->un.sia.csr13, block->un.sia.csr14, 4543 block->un.sia.csr15); 4544 break; 4545 } 4546 } 4547 4548 4549 /* Utility to print out binary info dumps. Handy for SROMs, etc */ 4550 4551 static int 4552 hexcode(unsigned val) 4553 { 4554 if (val <= 9) 4555 return (val +'0'); 4556 if (val <= 15) 4557 return (val + 'a' - 10); 4558 return (-1); 4559 } 4560 4561 static void 4562 dnet_dumpbin(char *msg, unsigned char *data, int size, int len) 4563 { 4564 char hex[128], *p = hex; 4565 char ascii[128], *q = ascii; 4566 int i, j; 4567 4568 if (!len) 4569 return; 4570 4571 for (i = 0; i < len; i += size) { 4572 for (j = size - 1; j >= 0; j--) { /* PORTABILITY: byte order */ 4573 *p++ = hexcode(data[i+j] >> 4); 4574 *p++ = hexcode(data[i+j] & 0xf); 4575 *q++ = (data[i+j] < 32 || data[i+j] > 127) ? 4576 '.' : data[i]; 4577 } 4578 *p++ = ' '; 4579 if (q-ascii >= 8) { 4580 *p = *q = 0; 4581 cmn_err(CE_NOTE, "%s: %s\t%s", msg, hex, ascii); 4582 p = hex; 4583 q = ascii; 4584 } 4585 } 4586 if (p != hex) { 4587 while ((p - hex) < 8*3) 4588 *p++ = ' '; 4589 *p = *q = 0; 4590 cmn_err(CE_NOTE, "%s: %s\t%s", msg, hex, ascii); 4591 } 4592 } 4593 4594 #ifdef DNETDEBUG 4595 void 4596 dnet_usectimeout(struct dnetinstance *dnetp, uint32_t usecs, int contin, 4597 timercb_t cback) 4598 { 4599 mutex_enter(&dnetp->intrlock); 4600 dnetp->timer.start_ticks = (usecs * 100) / 8192; 4601 dnetp->timer.cb = cback; 4602 ddi_put32(dnetp->io_handle, REG32(dnetp->io_reg, GP_TIMER_REG), 4603 dnetp->timer.start_ticks | (contin ? GPTIMER_CONT : 0)); 4604 if (dnetp->timer.cb) 4605 enable_interrupts(dnetp); 4606 mutex_exit(&dnetp->intrlock); 4607 } 4608 4609 uint32_t 4610 dnet_usecelapsed(struct dnetinstance *dnetp) 4611 { 4612 uint32_t ticks = dnetp->timer.start_ticks - 4613 (ddi_get32(dnetp->io_handle, REG32(dnetp->io_reg, GP_TIMER_REG)) & 4614 0xffff); 4615 return ((ticks * 8192) / 100); 4616 } 4617 4618 /* ARGSUSED */ 4619 void 4620 dnet_timestamp(struct dnetinstance *dnetp, char *buf) 4621 { 4622 uint32_t elapsed = dnet_usecelapsed(dnetp); 4623 char loc[32], *p = loc; 4624 int firstdigit = 1; 4625 uint32_t divisor; 4626 4627 while (*p++ = *buf++) 4628 ; 4629 p--; 4630 4631 for (divisor = 1000000000; divisor /= 10; ) { 4632 int digit = (elapsed / divisor); 4633 elapsed -= digit * divisor; 4634 if (!firstdigit || digit) { 4635 *p++ = digit + '0'; 4636 firstdigit = 0; 4637 } 4638 4639 } 4640 4641 /* Actual zero, output it */ 4642 if (firstdigit) 4643 *p++ = '0'; 4644 4645 *p++ = '-'; 4646 *p++ = '>'; 4647 *p++ = 0; 4648 4649 printf(loc); 4650 dnet_usectimeout(dnetp, 1000000, 0, 0); 4651 } 4652 4653 #endif 4654