1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * SunOS MT STREAMS ERI(PCI) 10/100 Mb Ethernet Device Driver 28 */ 29 30 #include <sys/types.h> 31 #include <sys/debug.h> 32 #include <sys/stropts.h> 33 #include <sys/stream.h> 34 #include <sys/strsubr.h> 35 #include <sys/kmem.h> 36 #include <sys/crc32.h> 37 #include <sys/ddi.h> 38 #include <sys/sunddi.h> 39 #include <sys/strsun.h> 40 #include <sys/stat.h> 41 #include <sys/cpu.h> 42 #include <sys/kstat.h> 43 #include <inet/common.h> 44 #include <sys/pattr.h> 45 #include <inet/mi.h> 46 #include <inet/nd.h> 47 #include <sys/ethernet.h> 48 #include <sys/vlan.h> 49 #include <sys/policy.h> 50 #include <sys/mac.h> 51 #include <sys/mac_ether.h> 52 #include <sys/dlpi.h> 53 54 #include <sys/pci.h> 55 56 #include "eri_phy.h" 57 #include "eri_mac.h" 58 #include "eri.h" 59 #include "eri_common.h" 60 61 #include "eri_msg.h" 62 63 /* 64 * **** Function Prototypes ***** 65 */ 66 /* 67 * Entry points (man9e) 68 */ 69 static int eri_attach(dev_info_t *, ddi_attach_cmd_t); 70 static int eri_detach(dev_info_t *, ddi_detach_cmd_t); 71 static uint_t eri_intr(caddr_t); 72 73 /* 74 * I/O (Input/Output) Functions 75 */ 76 static boolean_t eri_send_msg(struct eri *, mblk_t *); 77 static mblk_t *eri_read_dma(struct eri *, volatile struct rmd *, 78 volatile int, uint64_t flags); 79 80 /* 81 * Initialization Functions 82 */ 83 static boolean_t eri_init(struct eri *); 84 static int eri_allocthings(struct eri *); 85 static int eri_init_xfer_params(struct eri *); 86 static void eri_statinit(struct eri *); 87 static int eri_burstsize(struct eri *); 88 89 static void eri_setup_mac_address(struct eri *, dev_info_t *); 90 91 static uint32_t eri_init_rx_channel(struct eri *); 92 static void eri_init_rx(struct eri *); 93 static void eri_init_txmac(struct eri *); 94 95 /* 96 * Un-init Functions 97 */ 98 static uint32_t eri_txmac_disable(struct eri *); 99 static uint32_t eri_rxmac_disable(struct eri *); 100 static int eri_stop(struct eri *); 101 static void eri_uninit(struct eri *erip); 102 static int eri_freebufs(struct eri *); 103 static boolean_t eri_reclaim(struct eri *, uint32_t); 104 105 /* 106 * Transceiver (xcvr) Functions 107 */ 108 static int eri_new_xcvr(struct eri *); /* Initializes & detects xcvrs */ 109 static int eri_reset_xcvr(struct eri *); 110 111 #ifdef ERI_10_10_FORCE_SPEED_WORKAROUND 112 static void eri_xcvr_force_mode(struct eri *, uint32_t *); 113 #endif 114 115 static void eri_mif_poll(struct eri *, soft_mif_enable_t); 116 static void eri_check_link(struct eri *); 117 static uint32_t eri_check_link_noind(struct eri *); 118 static link_state_t eri_mif_check(struct eri *, uint16_t, uint16_t); 119 static void eri_mii_write(struct eri *, uint8_t, uint16_t); 120 static uint32_t eri_mii_read(struct eri *, uint8_t, uint16_t *); 121 122 /* 123 * Reset Functions 124 */ 125 static uint32_t eri_etx_reset(struct eri *); 126 static uint32_t eri_erx_reset(struct eri *); 127 128 /* 129 * Error Functions 130 */ 131 static void eri_fatal_err(struct eri *, uint32_t); 132 static void eri_nonfatal_err(struct eri *, uint32_t); 133 134 #ifdef ERI_TX_HUNG 135 static int eri_check_txhung(struct eri *); 136 #endif 137 138 /* 139 * Hardening Functions 140 */ 141 static void eri_fault_msg(struct eri *, uint_t, msg_t, const char *, ...); 142 143 /* 144 * Misc Functions 145 */ 146 static void eri_savecntrs(struct eri *); 147 148 static void eri_stop_timer(struct eri *erip); 149 static void eri_start_timer(struct eri *erip, fptrv_t func, clock_t msec); 150 151 static void eri_bb_force_idle(struct eri *); 152 153 /* 154 * Utility Functions 155 */ 156 static mblk_t *eri_allocb(size_t size); 157 static mblk_t *eri_allocb_sp(size_t size); 158 static int eri_param_get(queue_t *q, mblk_t *mp, caddr_t cp); 159 static int eri_param_set(queue_t *, mblk_t *, char *, caddr_t); 160 161 /* 162 * Functions to support ndd 163 */ 164 static void eri_nd_free(caddr_t *nd_pparam); 165 166 static boolean_t eri_nd_load(caddr_t *nd_pparam, char *name, 167 pfi_t get_pfi, pfi_t set_pfi, caddr_t data); 168 169 static int eri_nd_getset(queue_t *q, caddr_t nd_param, MBLKP mp); 170 static void eri_param_cleanup(struct eri *); 171 static int eri_param_register(struct eri *, param_t *, int); 172 static void eri_process_ndd_ioctl(struct eri *, queue_t *, mblk_t *, int); 173 static int eri_mk_mblk_tail_space(mblk_t *, mblk_t **, size_t); 174 175 176 static void eri_loopback(struct eri *, queue_t *, mblk_t *); 177 178 static uint32_t eri_ladrf_bit(const uint8_t *); 179 180 181 /* 182 * Nemo (GLDv3) Functions. 183 */ 184 static int eri_m_stat(void *, uint_t, uint64_t *); 185 static int eri_m_start(void *); 186 static void eri_m_stop(void *); 187 static int eri_m_promisc(void *, boolean_t); 188 static int eri_m_multicst(void *, boolean_t, const uint8_t *); 189 static int eri_m_unicst(void *, const uint8_t *); 190 static void eri_m_ioctl(void *, queue_t *, mblk_t *); 191 static boolean_t eri_m_getcapab(void *, mac_capab_t, void *); 192 static mblk_t *eri_m_tx(void *, mblk_t *); 193 194 static mac_callbacks_t eri_m_callbacks = { 195 MC_IOCTL | MC_GETCAPAB, 196 eri_m_stat, 197 eri_m_start, 198 eri_m_stop, 199 eri_m_promisc, 200 eri_m_multicst, 201 eri_m_unicst, 202 eri_m_tx, 203 NULL, 204 eri_m_ioctl, 205 eri_m_getcapab 206 }; 207 208 /* 209 * Define PHY Vendors: Matches to IEEE 210 * Organizationally Unique Identifier (OUI) 211 */ 212 /* 213 * The first two are supported as Internal XCVRs 214 */ 215 #define PHY_VENDOR_LUCENT 0x601d 216 217 #define PHY_LINK_NONE 0 /* Not attempted yet or retry */ 218 #define PHY_LINK_DOWN 1 /* Not being used */ 219 #define PHY_LINK_UP 2 /* Not being used */ 220 221 #define AUTO_SPEED 0 222 #define FORCE_SPEED 1 223 224 /* 225 * MIB II broadcast/multicast packets 226 */ 227 228 #define IS_BROADCAST(pkt) (bcmp(pkt, ðerbroadcastaddr, ETHERADDRL) == 0) 229 #define IS_MULTICAST(pkt) ((pkt[0] & 01) == 1) 230 231 #define BUMP_InNUcast(erip, pkt) \ 232 if (IS_BROADCAST(pkt)) { \ 233 HSTAT(erip, brdcstrcv); \ 234 } else if (IS_MULTICAST(pkt)) { \ 235 HSTAT(erip, multircv); \ 236 } 237 238 #define BUMP_OutNUcast(erip, pkt) \ 239 if (IS_BROADCAST(pkt)) { \ 240 HSTAT(erip, brdcstxmt); \ 241 } else if (IS_MULTICAST(pkt)) { \ 242 HSTAT(erip, multixmt); \ 243 } 244 245 #define NEXTTMDP(tbasep, tmdlimp, tmdp) (((tmdp) + 1) == tmdlimp \ 246 ? tbasep : ((tmdp) + 1)) 247 248 #define ETHERHEADER_SIZE (sizeof (struct ether_header)) 249 250 #ifdef ERI_RCV_CKSUM 251 #define ERI_PROCESS_READ(erip, bp, sum) \ 252 { \ 253 t_uscalar_t type; \ 254 uint_t start_offset, end_offset; \ 255 \ 256 *(bp->b_wptr) = 0; /* pad byte */ \ 257 \ 258 /* \ 259 * update MIB II statistics \ 260 */ \ 261 HSTAT(erip, ipackets64); \ 262 HSTATN(erip, rbytes64, len); \ 263 BUMP_InNUcast(erip, bp->b_rptr); \ 264 type = get_ether_type(bp->b_rptr); \ 265 if (type == ETHERTYPE_IP || type == ETHERTYPE_IPV6) { \ 266 start_offset = 0; \ 267 end_offset = MBLKL(bp) - ETHERHEADER_SIZE; \ 268 (void) hcksum_assoc(bp, NULL, NULL, \ 269 start_offset, 0, end_offset, sum, \ 270 HCK_PARTIALCKSUM, 0); \ 271 } else { \ 272 /* \ 273 * Strip the PADS for 802.3 \ 274 */ \ 275 if (type <= ETHERMTU) \ 276 bp->b_wptr = bp->b_rptr + \ 277 ETHERHEADER_SIZE + type; \ 278 } \ 279 } 280 #else 281 282 #define ERI_PROCESS_READ(erip, bp) \ 283 { \ 284 t_uscalar_t type; \ 285 type = get_ether_type(bp->b_rptr); \ 286 \ 287 /* \ 288 * update MIB II statistics \ 289 */ \ 290 HSTAT(erip, ipackets64); \ 291 HSTATN(erip, rbytes64, len); \ 292 BUMP_InNUcast(erip, bp->b_rptr); \ 293 /* \ 294 * Strip the PADS for 802.3 \ 295 */ \ 296 if (type <= ETHERMTU) \ 297 bp->b_wptr = bp->b_rptr + ETHERHEADER_SIZE + \ 298 type; \ 299 } 300 #endif /* ERI_RCV_CKSUM */ 301 302 /* 303 * TX Interrupt Rate 304 */ 305 static int tx_interrupt_rate = 16; 306 307 /* 308 * Ethernet broadcast address definition. 309 */ 310 static uint8_t etherbroadcastaddr[] = { 311 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 312 }; 313 314 /* 315 * The following variables are used for configuring various features 316 */ 317 #define ERI_DESC_HANDLE_ALLOC 0x0001 318 #define ERI_DESC_MEM_ALLOC 0x0002 319 #define ERI_DESC_MEM_MAP 0x0004 320 #define ERI_RCV_HANDLE_ALLOC 0x0020 321 #define ERI_RCV_HANDLE_BIND 0x0040 322 #define ERI_XMIT_DVMA_ALLOC 0x0100 323 #define ERI_RCV_DVMA_ALLOC 0x0200 324 #define ERI_XBUFS_HANDLE_ALLOC 0x0400 325 #define ERI_XBUFS_KMEM_ALLOC 0x0800 326 #define ERI_XBUFS_KMEM_DMABIND 0x1000 327 328 329 #define ERI_DONT_STRIP_CRC 330 /* 331 * Translate a kernel virtual address to i/o address. 332 */ 333 #define ERI_IOPBIOADDR(erip, a) \ 334 ((erip)->iopbiobase + ((uintptr_t)a - (erip)->iopbkbase)) 335 336 /* 337 * ERI Configuration Register Value 338 * Used to configure parameters that define DMA burst 339 * and internal arbitration behavior. 340 * for equal TX and RX bursts, set the following in global 341 * configuration register. 342 * static int global_config = 0x42; 343 */ 344 345 /* 346 * ERI ERX Interrupt Blanking Time 347 * Each count is about 16 us (2048 clocks) for 66 MHz PCI. 348 */ 349 static int intr_blank_time = 6; /* for about 96 us */ 350 static int intr_blank_packets = 8; /* */ 351 352 /* 353 * ERX PAUSE Threshold Register value 354 * The following value is for an OFF Threshold of about 15.5 Kbytes 355 * and an ON Threshold of 4K bytes. 356 */ 357 static int rx_pause_threshold = 0xf8 | (0x40 << 12); 358 static int eri_reinit_fatal = 0; 359 #ifdef DEBUG 360 static int noteri = 0; 361 #endif 362 363 #ifdef ERI_TX_HUNG 364 static int eri_reinit_txhung = 0; 365 #endif 366 367 #ifdef ERI_HDX_BUG_WORKAROUND 368 /* 369 * By default enable padding in hdx mode to 97 bytes. 370 * To disabled, in /etc/system: 371 * set eri:eri_hdx_pad_enable=0 372 */ 373 static uchar_t eri_hdx_pad_enable = 1; 374 #endif 375 376 /* 377 * Default values to initialize the cache line size and latency timer 378 * registers in the PCI configuration space. 379 * ERI_G_CACHE_LINE_SIZE_16 is defined as 16 since RIO expects in units 380 * of 4 bytes. 381 */ 382 #ifdef ERI_PM_WORKAROUND_PCI 383 static int eri_pci_cache_line = ERI_G_CACHE_LINE_SIZE_32; /* 128 bytes */ 384 static int eri_pci_latency_timer = 0xff; /* 255 PCI cycles */ 385 #else 386 static int eri_pci_cache_line = ERI_G_CACHE_LINE_SIZE_16; /* 64 bytes */ 387 static int eri_pci_latency_timer = 0x40; /* 64 PCI cycles */ 388 #endif 389 #define ERI_CACHE_LINE_SIZE (eri_pci_cache_line << ERI_G_CACHE_BIT) 390 391 /* 392 * Claim the device is ultra-capable of burst in the beginning. Use 393 * the value returned by ddi_dma_burstsizes() to actually set the ERI 394 * global configuration register later. 395 * 396 * PCI_ERI supports Infinite burst or 64-byte-multiple bursts. 397 */ 398 #define ERI_LIMADDRLO ((uint64_t)0x00000000) 399 #define ERI_LIMADDRHI ((uint64_t)0xffffffff) 400 401 static ddi_dma_attr_t dma_attr = { 402 DMA_ATTR_V0, /* version number. */ 403 (uint64_t)ERI_LIMADDRLO, /* low address */ 404 (uint64_t)ERI_LIMADDRHI, /* high address */ 405 (uint64_t)0x00ffffff, /* address counter max */ 406 (uint64_t)1, /* alignment */ 407 (uint_t)0xe000e0, /* dlim_burstsizes for 32 4 bit xfers */ 408 (uint32_t)0x1, /* minimum transfer size */ 409 (uint64_t)0x7fffffff, /* maximum transfer size */ 410 (uint64_t)0x00ffffff, /* maximum segment size */ 411 1, /* scatter/gather list length */ 412 (uint32_t)1, /* granularity */ 413 (uint_t)0 /* attribute flags */ 414 }; 415 416 static ddi_dma_attr_t desc_dma_attr = { 417 DMA_ATTR_V0, /* version number. */ 418 (uint64_t)ERI_LIMADDRLO, /* low address */ 419 (uint64_t)ERI_LIMADDRHI, /* high address */ 420 (uint64_t)0x00ffffff, /* address counter max */ 421 (uint64_t)8, /* alignment */ 422 (uint_t)0xe000e0, /* dlim_burstsizes for 32 4 bit xfers */ 423 (uint32_t)0x1, /* minimum transfer size */ 424 (uint64_t)0x7fffffff, /* maximum transfer size */ 425 (uint64_t)0x00ffffff, /* maximum segment size */ 426 1, /* scatter/gather list length */ 427 16, /* granularity */ 428 0 /* attribute flags */ 429 }; 430 431 static ddi_device_acc_attr_t buf_attr = { 432 DDI_DEVICE_ATTR_V0, /* devacc_attr_version */ 433 DDI_NEVERSWAP_ACC, /* devacc_attr_endian_flags */ 434 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */ 435 DDI_DEFAULT_ACC, /* devacc_attr_access */ 436 }; 437 438 ddi_dma_lim_t eri_dma_limits = { 439 (uint64_t)ERI_LIMADDRLO, /* dlim_addr_lo */ 440 (uint64_t)ERI_LIMADDRHI, /* dlim_addr_hi */ 441 (uint64_t)ERI_LIMADDRHI, /* dlim_cntr_max */ 442 (uint_t)0x00e000e0, /* dlim_burstsizes for 32 and 64 bit xfers */ 443 (uint32_t)0x1, /* dlim_minxfer */ 444 1024 /* dlim_speed */ 445 }; 446 447 /* 448 * Link Configuration variables 449 * 450 * On Motherboard implementations, 10/100 Mbps speeds may be supported 451 * by using both the Serial Link and the MII on Non-serial-link interface. 452 * When both links are present, the driver automatically tries to bring up 453 * both. If both are up, the Gigabit Serial Link is selected for use, by 454 * default. The following configuration variable is used to force the selection 455 * of one of the links when both are up. 456 * To change the default selection to the MII link when both the Serial 457 * Link and the MII link are up, change eri_default_link to 1. 458 * 459 * Once a link is in use, the driver will continue to use that link till it 460 * goes down. When it goes down, the driver will look at the status of both the 461 * links again for link selection. 462 * 463 * Currently the standard is not stable w.r.t. gigabit link configuration 464 * using auto-negotiation procedures. Meanwhile, the link may be configured 465 * in "forced" mode using the "autonegotiation enable" bit (bit-12) in the 466 * PCS MII Command Register. In this mode the PCS sends "idles" until sees 467 * "idles" as initialization instead of the Link Configuration protocol 468 * where a Config register is exchanged. In this mode, the ERI is programmed 469 * for full-duplex operation with both pauseTX and pauseRX (for flow control) 470 * enabled. 471 */ 472 473 static int select_link = 0; /* automatic selection */ 474 static int default_link = 0; /* Select Serial link if both are up */ 475 476 /* 477 * The following variables are used for configuring link-operation 478 * for all the "eri" interfaces in the system. 479 * Later these parameters may be changed per interface using "ndd" command 480 * These parameters may also be specified as properties using the .conf 481 * file mechanism for each interface. 482 */ 483 484 /* 485 * The following variable value will be overridden by "link-pulse-disabled" 486 * property which may be created by OBP or eri.conf file. This property is 487 * applicable only for 10 Mbps links. 488 */ 489 static int link_pulse_disabled = 0; /* link pulse disabled */ 490 491 /* For MII-based FastEthernet links */ 492 static int adv_autoneg_cap = 1; 493 static int adv_100T4_cap = 0; 494 static int adv_100fdx_cap = 1; 495 static int adv_100hdx_cap = 1; 496 static int adv_10fdx_cap = 1; 497 static int adv_10hdx_cap = 1; 498 static int adv_pauseTX_cap = 0; 499 static int adv_pauseRX_cap = 0; 500 501 /* 502 * The following gap parameters are in terms of byte times. 503 */ 504 static int ipg0 = 8; 505 static int ipg1 = 8; 506 static int ipg2 = 4; 507 508 static int lance_mode = 1; /* to enable LANCE mode */ 509 static int mifpoll_enable = 0; /* to enable mif poll */ 510 static int ngu_enable = 0; /* to enable Never Give Up mode */ 511 512 static int eri_force_mlf = 0; /* to enable mif poll */ 513 static int eri_phy_mintrans = 1; /* Lu3X31T mintrans algorithm */ 514 /* 515 * For the MII interface, the External Transceiver is selected when present. 516 * The following variable is used to select the Internal Transceiver even 517 * when the External Transceiver is present. 518 */ 519 static int use_int_xcvr = 0; 520 static int pace_size = 0; /* Do not use pacing for now */ 521 522 static int eri_use_dvma_rx = 0; /* =1:use dvma */ 523 static int eri_rx_bcopy_max = RX_BCOPY_MAX; /* =1:use bcopy() */ 524 static int eri_overflow_reset = 1; /* global reset if rx_fifo_overflow */ 525 static int eri_tx_ring_size = 2048; /* number of entries in tx ring */ 526 static int eri_rx_ring_size = 1024; /* number of entries in rx ring */ 527 /* 528 * The following parameters may be configured by the user. If they are not 529 * configured by the user, the values will be based on the capabilities of 530 * the transceiver. 531 * The value "ERI_NOTUSR" is ORed with the parameter value to indicate values 532 * which are NOT configured by the user. 533 */ 534 535 #define ERI_NOTUSR 0x0f000000 536 #define ERI_MASK_1BIT 0x1 537 #define ERI_MASK_2BIT 0x3 538 #define ERI_MASK_8BIT 0xff 539 540 541 /* 542 * Note: 543 * ERI has all of the above capabilities. 544 * Only when an External Transceiver is selected for MII-based FastEthernet 545 * link operation, the capabilities depend upon the capabilities of the 546 * External Transceiver. 547 */ 548 549 /* ------------------------------------------------------------------------- */ 550 551 static param_t param_arr[] = { 552 /* min max value r/w/hidden+name */ 553 { 0, 2, 2, "-transceiver_inuse"}, 554 { 0, 1, 0, "-link_status"}, 555 { 0, 1, 0, "-link_speed"}, 556 { 0, 1, 0, "-link_mode"}, 557 { 0, 255, 8, "+ipg1"}, 558 { 0, 255, 4, "+ipg2"}, 559 { 0, 1, 0, "+use_int_xcvr"}, 560 { 0, 255, 0, "+pace_size"}, 561 { 0, 1, 1, "+adv_autoneg_cap"}, 562 { 0, 1, 1, "+adv_100T4_cap"}, 563 { 0, 1, 1, "+adv_100fdx_cap"}, 564 { 0, 1, 1, "+adv_100hdx_cap"}, 565 { 0, 1, 1, "+adv_10fdx_cap"}, 566 { 0, 1, 1, "+adv_10hdx_cap"}, 567 { 0, 1, 1, "-autoneg_cap"}, 568 { 0, 1, 1, "-100T4_cap"}, 569 { 0, 1, 1, "-100fdx_cap"}, 570 { 0, 1, 1, "-100hdx_cap"}, 571 { 0, 1, 1, "-10fdx_cap"}, 572 { 0, 1, 1, "-10hdx_cap"}, 573 { 0, 1, 0, "-lp_autoneg_cap"}, 574 { 0, 1, 0, "-lp_100T4_cap"}, 575 { 0, 1, 0, "-lp_100fdx_cap"}, 576 { 0, 1, 0, "-lp_100hdx_cap"}, 577 { 0, 1, 0, "-lp_10fdx_cap"}, 578 { 0, 1, 0, "-lp_10hdx_cap"}, 579 { 0, 1, 1, "+lance_mode"}, 580 { 0, 31, 8, "+ipg0"}, 581 { 0, 127, 6, "+intr_blank_time"}, 582 { 0, 255, 8, "+intr_blank_packets"}, 583 { 0, 1, 1, "!serial-link"}, 584 { 0, 2, 1, "!non-serial-link"}, 585 { 0, 1, 0, "%select-link"}, 586 { 0, 1, 0, "%default-link"}, 587 { 0, 2, 0, "!link-in-use"}, 588 { 0, 1, 1, "%adv_asm_dir_cap"}, 589 { 0, 1, 1, "%adv_pause_cap"}, 590 { 0, 1, 0, "!asm_dir_cap"}, 591 { 0, 1, 0, "!pause_cap"}, 592 { 0, 1, 0, "!lp_asm_dir_cap"}, 593 { 0, 1, 0, "!lp_pause_cap"}, 594 }; 595 596 DDI_DEFINE_STREAM_OPS(eri_dev_ops, nulldev, nulldev, eri_attach, eri_detach, 597 nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported); 598 599 /* 600 * This is the loadable module wrapper. 601 */ 602 #include <sys/modctl.h> 603 604 /* 605 * Module linkage information for the kernel. 606 */ 607 static struct modldrv modldrv = { 608 &mod_driverops, /* Type of module. This one is a driver */ 609 "Sun RIO 10/100 Mb Ethernet", 610 &eri_dev_ops, /* driver ops */ 611 }; 612 613 static struct modlinkage modlinkage = { 614 MODREV_1, &modldrv, NULL 615 }; 616 617 /* 618 * Hardware Independent Functions 619 * New Section 620 */ 621 622 int 623 _init(void) 624 { 625 int status; 626 627 mac_init_ops(&eri_dev_ops, "eri"); 628 if ((status = mod_install(&modlinkage)) != 0) { 629 mac_fini_ops(&eri_dev_ops); 630 } 631 return (status); 632 } 633 634 int 635 _fini(void) 636 { 637 int status; 638 639 status = mod_remove(&modlinkage); 640 if (status == 0) { 641 mac_fini_ops(&eri_dev_ops); 642 } 643 return (status); 644 } 645 646 int 647 _info(struct modinfo *modinfop) 648 { 649 return (mod_info(&modlinkage, modinfop)); 650 } 651 652 653 /* 654 * Interface exists: make available by filling in network interface 655 * record. System will initialize the interface when it is ready 656 * to accept packets. 657 */ 658 static int 659 eri_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 660 { 661 struct eri *erip = NULL; 662 mac_register_t *macp = NULL; 663 int regno; 664 boolean_t doinit; 665 boolean_t mutex_inited = B_FALSE; 666 boolean_t intr_add = B_FALSE; 667 668 switch (cmd) { 669 case DDI_ATTACH: 670 break; 671 672 case DDI_RESUME: 673 if ((erip = ddi_get_driver_private(dip)) == NULL) 674 return (DDI_FAILURE); 675 676 mutex_enter(&erip->intrlock); 677 erip->flags &= ~ERI_SUSPENDED; 678 erip->init_macregs = 1; 679 param_linkup = 0; 680 erip->stats.link_up = LINK_STATE_DOWN; 681 erip->linkcheck = 0; 682 683 doinit = (erip->flags & ERI_STARTED) ? B_TRUE : B_FALSE; 684 mutex_exit(&erip->intrlock); 685 686 if (doinit && !eri_init(erip)) { 687 return (DDI_FAILURE); 688 } 689 return (DDI_SUCCESS); 690 691 default: 692 return (DDI_FAILURE); 693 } 694 695 /* 696 * Allocate soft device data structure 697 */ 698 erip = kmem_zalloc(sizeof (struct eri), KM_SLEEP); 699 700 /* 701 * Initialize as many elements as possible. 702 */ 703 ddi_set_driver_private(dip, erip); 704 erip->dip = dip; /* dip */ 705 erip->instance = ddi_get_instance(dip); /* instance */ 706 erip->flags = 0; 707 erip->multi_refcnt = 0; 708 erip->promisc = B_FALSE; 709 710 if ((macp = mac_alloc(MAC_VERSION)) == NULL) { 711 ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG, 712 "mac_alloc failed"); 713 goto attach_fail; 714 } 715 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 716 macp->m_driver = erip; 717 macp->m_dip = dip; 718 macp->m_src_addr = erip->ouraddr; 719 macp->m_callbacks = &eri_m_callbacks; 720 macp->m_min_sdu = 0; 721 macp->m_max_sdu = ETHERMTU; 722 macp->m_margin = VLAN_TAGSZ; 723 724 /* 725 * Map in the device registers. 726 * Separate pointers will be set up for the following 727 * register groups within the GEM Register Space: 728 * Global register set 729 * ETX register set 730 * ERX register set 731 * BigMAC register set. 732 * MIF register set 733 */ 734 735 if (ddi_dev_nregs(dip, ®no) != (DDI_SUCCESS)) { 736 ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG, 737 "ddi_dev_nregs failed, returned %d", regno); 738 goto attach_fail; 739 } 740 741 /* 742 * Map the PCI config space 743 */ 744 if (pci_config_setup(dip, &erip->pci_config_handle) != DDI_SUCCESS) { 745 ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG, 746 "%s pci_config_setup()", config_space_fatal_msg); 747 goto attach_fail; 748 } 749 750 /* 751 * Initialize device attributes structure 752 */ 753 erip->dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 754 erip->dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 755 erip->dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 756 757 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&(erip->globregp), 0, 0, 758 &erip->dev_attr, &erip->globregh)) { 759 goto attach_fail; 760 } 761 erip->etxregh = erip->globregh; 762 erip->erxregh = erip->globregh; 763 erip->bmacregh = erip->globregh; 764 erip->mifregh = erip->globregh; 765 766 erip->etxregp = (void *)(((caddr_t)erip->globregp) + 0x2000); 767 erip->erxregp = (void *)(((caddr_t)erip->globregp) + 0x4000); 768 erip->bmacregp = (void *)(((caddr_t)erip->globregp) + 0x6000); 769 erip->mifregp = (void *)(((caddr_t)erip->globregp) + 0x6200); 770 771 /* 772 * Map the software reset register. 773 */ 774 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&(erip->sw_reset_reg), 775 0x1010, 4, &erip->dev_attr, &erip->sw_reset_regh)) { 776 ERI_FAULT_MSG1(erip, SEVERITY_MID, ERI_VERB_MSG, 777 mregs_4soft_reset_fail_msg); 778 goto attach_fail; 779 } 780 781 /* 782 * Try and stop the device. 783 * This is done until we want to handle interrupts. 784 */ 785 if (eri_stop(erip)) 786 goto attach_fail; 787 788 /* 789 * set PCI latency timer register. 790 */ 791 pci_config_put8(erip->pci_config_handle, PCI_CONF_LATENCY_TIMER, 792 (uchar_t)eri_pci_latency_timer); 793 794 if (ddi_intr_hilevel(dip, 0)) { 795 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, 796 " high-level interrupts are not supported"); 797 goto attach_fail; 798 } 799 800 /* 801 * Get the interrupt cookie so the mutexes can be 802 * Initialized. 803 */ 804 if (ddi_get_iblock_cookie(dip, 0, &erip->cookie) != DDI_SUCCESS) 805 goto attach_fail; 806 807 /* 808 * Initialize mutex's for this device. 809 */ 810 mutex_init(&erip->xmitlock, NULL, MUTEX_DRIVER, (void *)erip->cookie); 811 mutex_init(&erip->intrlock, NULL, MUTEX_DRIVER, (void *)erip->cookie); 812 mutex_init(&erip->linklock, NULL, MUTEX_DRIVER, (void *)erip->cookie); 813 mutex_init(&erip->xcvrlock, NULL, MUTEX_DRIVER, (void *)erip->cookie); 814 815 mutex_inited = B_TRUE; 816 817 /* 818 * Add interrupt to system 819 */ 820 if (ddi_add_intr(dip, 0, &erip->cookie, 0, eri_intr, (caddr_t)erip) == 821 DDI_SUCCESS) 822 intr_add = B_TRUE; 823 else { 824 goto attach_fail; 825 } 826 827 /* 828 * Set up the ethernet mac address. 829 */ 830 (void) eri_setup_mac_address(erip, dip); 831 832 if (eri_init_xfer_params(erip)) 833 goto attach_fail; 834 835 if (eri_burstsize(erip) == DDI_FAILURE) { 836 goto attach_fail; 837 } 838 839 /* 840 * Setup fewer receive bufers. 841 */ 842 ERI_RPENDING = eri_rx_ring_size; 843 ERI_TPENDING = eri_tx_ring_size; 844 845 erip->rpending_mask = ERI_RPENDING - 1; 846 erip->rmdmax_mask = ERI_RPENDING - 1; 847 erip->mif_config = (ERI_PHY_BMSR << ERI_MIF_CFGPR_SHIFT); 848 849 erip->stats.pmcap = ERI_PMCAP_NONE; 850 if (pci_report_pmcap(dip, PCI_PM_IDLESPEED, (void *)4000) == 851 DDI_SUCCESS) 852 erip->stats.pmcap = ERI_PMCAP_4MHZ; 853 854 if (mac_register(macp, &erip->mh) != 0) 855 goto attach_fail; 856 857 mac_free(macp); 858 859 return (DDI_SUCCESS); 860 861 attach_fail: 862 if (erip->pci_config_handle) 863 (void) pci_config_teardown(&erip->pci_config_handle); 864 865 if (mutex_inited) { 866 mutex_destroy(&erip->xmitlock); 867 mutex_destroy(&erip->intrlock); 868 mutex_destroy(&erip->linklock); 869 mutex_destroy(&erip->xcvrlock); 870 } 871 872 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, attach_fail_msg); 873 874 if (intr_add) 875 ddi_remove_intr(dip, 0, erip->cookie); 876 877 if (erip->globregh) 878 ddi_regs_map_free(&erip->globregh); 879 880 if (macp != NULL) 881 mac_free(macp); 882 if (erip != NULL) 883 kmem_free(erip, sizeof (*erip)); 884 885 return (DDI_FAILURE); 886 } 887 888 static int 889 eri_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 890 { 891 struct eri *erip; 892 int i; 893 894 if ((erip = ddi_get_driver_private(dip)) == NULL) { 895 /* 896 * No resources allocated. 897 */ 898 return (DDI_FAILURE); 899 } 900 901 switch (cmd) { 902 case DDI_DETACH: 903 break; 904 905 case DDI_SUSPEND: 906 erip->flags |= ERI_SUSPENDED; 907 eri_uninit(erip); 908 return (DDI_SUCCESS); 909 910 default: 911 return (DDI_FAILURE); 912 } 913 914 if (erip->flags & (ERI_RUNNING | ERI_SUSPENDED)) { 915 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, busy_msg); 916 return (DDI_FAILURE); 917 } 918 919 if (mac_unregister(erip->mh) != 0) { 920 return (DDI_FAILURE); 921 } 922 923 /* 924 * Make the device quiescent 925 */ 926 (void) eri_stop(erip); 927 928 /* 929 * Remove instance of the intr 930 */ 931 ddi_remove_intr(dip, 0, erip->cookie); 932 933 if (erip->pci_config_handle) 934 (void) pci_config_teardown(&erip->pci_config_handle); 935 936 /* 937 * Destroy all mutexes and data structures allocated during 938 * attach time. 939 */ 940 941 if (erip->globregh) 942 ddi_regs_map_free(&erip->globregh); 943 944 erip->etxregh = NULL; 945 erip->erxregh = NULL; 946 erip->bmacregh = NULL; 947 erip->mifregh = NULL; 948 erip->globregh = NULL; 949 950 if (erip->sw_reset_regh) 951 ddi_regs_map_free(&erip->sw_reset_regh); 952 953 if (erip->ksp) 954 kstat_delete(erip->ksp); 955 956 eri_stop_timer(erip); /* acquire linklock */ 957 eri_start_timer(erip, eri_check_link, 0); 958 mutex_destroy(&erip->xmitlock); 959 mutex_destroy(&erip->intrlock); 960 mutex_destroy(&erip->linklock); 961 mutex_destroy(&erip->xcvrlock); 962 963 if (erip->md_h) { 964 if (ddi_dma_unbind_handle(erip->md_h) == 965 DDI_FAILURE) 966 return (DDI_FAILURE); 967 ddi_dma_mem_free(&erip->mdm_h); 968 ddi_dma_free_handle(&erip->md_h); 969 } 970 971 if (eri_freebufs(erip)) 972 return (DDI_FAILURE); 973 974 /* dvma handle case */ 975 976 if (erip->eri_dvmarh) { 977 (void) dvma_release(erip->eri_dvmarh); 978 erip->eri_dvmarh = NULL; 979 } 980 /* 981 * xmit_dma_mode, erip->ndmaxh[i]=NULL for dvma 982 */ 983 else { 984 for (i = 0; i < ERI_RPENDING; i++) 985 if (erip->ndmarh[i]) 986 ddi_dma_free_handle(&erip->ndmarh[i]); 987 } 988 /* 989 * Release TX buffer 990 */ 991 if (erip->tbuf_ioaddr != 0) { 992 (void) ddi_dma_unbind_handle(erip->tbuf_handle); 993 erip->tbuf_ioaddr = 0; 994 } 995 if (erip->tbuf_kaddr != NULL) { 996 ddi_dma_mem_free(&erip->tbuf_acch); 997 erip->tbuf_kaddr = NULL; 998 } 999 if (erip->tbuf_handle != NULL) { 1000 ddi_dma_free_handle(&erip->tbuf_handle); 1001 erip->tbuf_handle = NULL; 1002 } 1003 1004 eri_param_cleanup(erip); 1005 1006 ddi_set_driver_private(dip, NULL); 1007 kmem_free((caddr_t)erip, sizeof (struct eri)); 1008 1009 return (DDI_SUCCESS); 1010 } 1011 1012 /* 1013 * To set up the mac address for the network interface: 1014 * The adapter card may support a local mac address which is published 1015 * in a device node property "local-mac-address". This mac address is 1016 * treated as the factory-installed mac address for DLPI interface. 1017 * If the adapter firmware has used the device for diskless boot 1018 * operation it publishes a property called "mac-address" for use by 1019 * inetboot and the device driver. 1020 * If "mac-address" is not found, the system options property 1021 * "local-mac-address" is used to select the mac-address. If this option 1022 * is set to "true", and "local-mac-address" has been found, then 1023 * local-mac-address is used; otherwise the system mac address is used 1024 * by calling the "localetheraddr()" function. 1025 */ 1026 1027 static void 1028 eri_setup_mac_address(struct eri *erip, dev_info_t *dip) 1029 { 1030 uchar_t *prop; 1031 char *uselocal; 1032 unsigned prop_len; 1033 uint32_t addrflags = 0; 1034 struct ether_addr factaddr; 1035 1036 /* 1037 * Check if it is an adapter with its own local mac address 1038 * If it is present, save it as the "factory-address" 1039 * for this adapter. 1040 */ 1041 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1042 "local-mac-address", &prop, &prop_len) == DDI_PROP_SUCCESS) { 1043 if (prop_len == ETHERADDRL) { 1044 addrflags = ERI_FACTADDR_PRESENT; 1045 bcopy(prop, &factaddr, ETHERADDRL); 1046 ERI_FAULT_MSG2(erip, SEVERITY_NONE, ERI_VERB_MSG, 1047 lether_addr_msg, ether_sprintf(&factaddr)); 1048 } 1049 ddi_prop_free(prop); 1050 } 1051 /* 1052 * Check if the adapter has published "mac-address" property. 1053 * If it is present, use it as the mac address for this device. 1054 */ 1055 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1056 "mac-address", &prop, &prop_len) == DDI_PROP_SUCCESS) { 1057 if (prop_len >= ETHERADDRL) { 1058 bcopy(prop, erip->ouraddr, ETHERADDRL); 1059 ddi_prop_free(prop); 1060 return; 1061 } 1062 ddi_prop_free(prop); 1063 } 1064 1065 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, "local-mac-address?", 1066 &uselocal) == DDI_PROP_SUCCESS) { 1067 if ((strcmp("true", uselocal) == 0) && 1068 (addrflags & ERI_FACTADDR_PRESENT)) { 1069 addrflags |= ERI_FACTADDR_USE; 1070 bcopy(&factaddr, erip->ouraddr, ETHERADDRL); 1071 ddi_prop_free(uselocal); 1072 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, 1073 lmac_addr_msg); 1074 return; 1075 } 1076 ddi_prop_free(uselocal); 1077 } 1078 1079 /* 1080 * Get the system ethernet address. 1081 */ 1082 (void) localetheraddr(NULL, &factaddr); 1083 bcopy(&factaddr, erip->ouraddr, ETHERADDRL); 1084 } 1085 1086 1087 /* 1088 * Calculate the bit in the multicast address filter that selects the given 1089 * address. 1090 * Note: For ERI, the last 8-bits are used. 1091 */ 1092 1093 static uint32_t 1094 eri_ladrf_bit(const uint8_t *addr) 1095 { 1096 uint32_t crc; 1097 1098 CRC32(crc, addr, ETHERADDRL, -1U, crc32_table); 1099 1100 /* 1101 * Just want the 8 most significant bits. 1102 */ 1103 return ((~crc) >> 24); 1104 } 1105 1106 static void 1107 eri_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1108 { 1109 struct eri *erip = arg; 1110 struct iocblk *iocp = (void *)mp->b_rptr; 1111 int err; 1112 1113 ASSERT(erip != NULL); 1114 1115 /* 1116 * Privilege checks. 1117 */ 1118 switch (iocp->ioc_cmd) { 1119 case ERI_SET_LOOP_MODE: 1120 case ERI_ND_SET: 1121 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1122 if (err != 0) { 1123 miocnak(wq, mp, 0, err); 1124 return; 1125 } 1126 break; 1127 default: 1128 break; 1129 } 1130 1131 switch (iocp->ioc_cmd) { 1132 case ERI_ND_GET: 1133 case ERI_ND_SET: 1134 eri_process_ndd_ioctl(erip, wq, mp, iocp->ioc_cmd); 1135 break; 1136 1137 case ERI_SET_LOOP_MODE: 1138 case ERI_GET_LOOP_MODE: 1139 /* 1140 * XXX: Consider updating this to the new netlb ioctls. 1141 */ 1142 eri_loopback(erip, wq, mp); 1143 break; 1144 1145 default: 1146 miocnak(wq, mp, 0, EINVAL); 1147 break; 1148 } 1149 1150 ASSERT(!MUTEX_HELD(&erip->linklock)); 1151 } 1152 1153 static void 1154 eri_loopback(struct eri *erip, queue_t *wq, mblk_t *mp) 1155 { 1156 struct iocblk *iocp = (void *)mp->b_rptr; 1157 loopback_t *al; 1158 1159 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < sizeof (loopback_t)) { 1160 miocnak(wq, mp, 0, EINVAL); 1161 return; 1162 } 1163 1164 al = (void *)mp->b_cont->b_rptr; 1165 1166 switch (iocp->ioc_cmd) { 1167 case ERI_SET_LOOP_MODE: 1168 switch (al->loopback) { 1169 case ERI_LOOPBACK_OFF: 1170 erip->flags &= (~ERI_MACLOOPBACK & ~ERI_SERLOOPBACK); 1171 /* force link status to go down */ 1172 param_linkup = 0; 1173 erip->stats.link_up = LINK_STATE_DOWN; 1174 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN; 1175 (void) eri_init(erip); 1176 break; 1177 1178 case ERI_MAC_LOOPBACK_ON: 1179 erip->flags |= ERI_MACLOOPBACK; 1180 erip->flags &= ~ERI_SERLOOPBACK; 1181 param_linkup = 0; 1182 erip->stats.link_up = LINK_STATE_DOWN; 1183 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN; 1184 (void) eri_init(erip); 1185 break; 1186 1187 case ERI_PCS_LOOPBACK_ON: 1188 break; 1189 1190 case ERI_SER_LOOPBACK_ON: 1191 erip->flags |= ERI_SERLOOPBACK; 1192 erip->flags &= ~ERI_MACLOOPBACK; 1193 /* force link status to go down */ 1194 param_linkup = 0; 1195 erip->stats.link_up = LINK_STATE_DOWN; 1196 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN; 1197 (void) eri_init(erip); 1198 break; 1199 1200 default: 1201 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, 1202 loopback_val_default); 1203 miocnak(wq, mp, 0, EINVAL); 1204 return; 1205 } 1206 miocnak(wq, mp, 0, 0); 1207 break; 1208 1209 case ERI_GET_LOOP_MODE: 1210 al->loopback = ERI_MAC_LOOPBACK_ON | ERI_PCS_LOOPBACK_ON | 1211 ERI_SER_LOOPBACK_ON; 1212 miocack(wq, mp, sizeof (loopback_t), 0); 1213 break; 1214 1215 default: 1216 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG, 1217 loopback_cmd_default); 1218 } 1219 } 1220 1221 static int 1222 eri_m_promisc(void *arg, boolean_t on) 1223 { 1224 struct eri *erip = arg; 1225 1226 mutex_enter(&erip->intrlock); 1227 erip->promisc = on; 1228 eri_init_rx(erip); 1229 mutex_exit(&erip->intrlock); 1230 return (0); 1231 } 1232 1233 /* 1234 * This is to support unlimited number of members 1235 * in Multicast. 1236 */ 1237 static int 1238 eri_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1239 { 1240 struct eri *erip = arg; 1241 uint32_t ladrf_bit; 1242 1243 /* 1244 * If this address's bit was not already set in the local address 1245 * filter, add it and re-initialize the Hardware. 1246 */ 1247 ladrf_bit = eri_ladrf_bit(mca); 1248 1249 mutex_enter(&erip->intrlock); 1250 if (add) { 1251 erip->ladrf_refcnt[ladrf_bit]++; 1252 if (erip->ladrf_refcnt[ladrf_bit] == 1) { 1253 LADRF_SET(erip, ladrf_bit); 1254 erip->multi_refcnt++; 1255 eri_init_rx(erip); 1256 } 1257 } else { 1258 erip->ladrf_refcnt[ladrf_bit]--; 1259 if (erip->ladrf_refcnt[ladrf_bit] == 0) { 1260 LADRF_CLR(erip, ladrf_bit); 1261 erip->multi_refcnt--; 1262 eri_init_rx(erip); 1263 } 1264 } 1265 mutex_exit(&erip->intrlock); 1266 return (0); 1267 } 1268 1269 static int 1270 eri_m_unicst(void *arg, const uint8_t *macaddr) 1271 { 1272 struct eri *erip = arg; 1273 1274 /* 1275 * Set new interface local address and re-init device. 1276 * This is destructive to any other streams attached 1277 * to this device. 1278 */ 1279 mutex_enter(&erip->intrlock); 1280 bcopy(macaddr, &erip->ouraddr, ETHERADDRL); 1281 eri_init_rx(erip); 1282 mutex_exit(&erip->intrlock); 1283 return (0); 1284 } 1285 1286 /*ARGSUSED*/ 1287 static boolean_t 1288 eri_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1289 { 1290 switch (cap) { 1291 case MAC_CAPAB_HCKSUM: { 1292 uint32_t *hcksum_txflags = cap_data; 1293 *hcksum_txflags = HCKSUM_INET_PARTIAL; 1294 return (B_TRUE); 1295 } 1296 case MAC_CAPAB_POLL: 1297 default: 1298 return (B_FALSE); 1299 } 1300 } 1301 1302 static int 1303 eri_m_start(void *arg) 1304 { 1305 struct eri *erip = arg; 1306 1307 mutex_enter(&erip->intrlock); 1308 erip->flags |= ERI_STARTED; 1309 mutex_exit(&erip->intrlock); 1310 1311 if (!eri_init(erip)) { 1312 mutex_enter(&erip->intrlock); 1313 erip->flags &= ~ERI_STARTED; 1314 mutex_exit(&erip->intrlock); 1315 return (EIO); 1316 } 1317 return (0); 1318 } 1319 1320 static void 1321 eri_m_stop(void *arg) 1322 { 1323 struct eri *erip = arg; 1324 1325 mutex_enter(&erip->intrlock); 1326 erip->flags &= ~ERI_STARTED; 1327 mutex_exit(&erip->intrlock); 1328 eri_uninit(erip); 1329 } 1330 1331 static int 1332 eri_m_stat(void *arg, uint_t stat, uint64_t *val) 1333 { 1334 struct eri *erip = arg; 1335 struct stats *esp; 1336 boolean_t macupdate = B_FALSE; 1337 1338 esp = &erip->stats; 1339 1340 mutex_enter(&erip->xmitlock); 1341 if ((erip->flags & ERI_RUNNING) && (erip->flags & ERI_TXINIT)) { 1342 erip->tx_completion = 1343 GET_ETXREG(tx_completion) & ETX_COMPLETION_MASK; 1344 macupdate |= eri_reclaim(erip, erip->tx_completion); 1345 } 1346 mutex_exit(&erip->xmitlock); 1347 if (macupdate) 1348 mac_tx_update(erip->mh); 1349 1350 eri_savecntrs(erip); 1351 1352 switch (stat) { 1353 case MAC_STAT_IFSPEED: 1354 *val = esp->ifspeed * 1000000ULL; 1355 break; 1356 case MAC_STAT_MULTIRCV: 1357 *val = esp->multircv; 1358 break; 1359 case MAC_STAT_BRDCSTRCV: 1360 *val = esp->brdcstrcv; 1361 break; 1362 case MAC_STAT_IPACKETS: 1363 *val = esp->ipackets64; 1364 break; 1365 case MAC_STAT_RBYTES: 1366 *val = esp->rbytes64; 1367 break; 1368 case MAC_STAT_OBYTES: 1369 *val = esp->obytes64; 1370 break; 1371 case MAC_STAT_OPACKETS: 1372 *val = esp->opackets64; 1373 break; 1374 case MAC_STAT_IERRORS: 1375 *val = esp->ierrors; 1376 break; 1377 case MAC_STAT_OERRORS: 1378 *val = esp->oerrors; 1379 break; 1380 case MAC_STAT_MULTIXMT: 1381 *val = esp->multixmt; 1382 break; 1383 case MAC_STAT_BRDCSTXMT: 1384 *val = esp->brdcstxmt; 1385 break; 1386 case MAC_STAT_NORCVBUF: 1387 *val = esp->norcvbuf; 1388 break; 1389 case MAC_STAT_NOXMTBUF: 1390 *val = esp->noxmtbuf; 1391 break; 1392 case MAC_STAT_UNDERFLOWS: 1393 *val = esp->txmac_urun; 1394 break; 1395 case MAC_STAT_OVERFLOWS: 1396 *val = esp->rx_overflow; 1397 break; 1398 case MAC_STAT_COLLISIONS: 1399 *val = esp->collisions; 1400 break; 1401 case ETHER_STAT_ALIGN_ERRORS: 1402 *val = esp->rx_align_err; 1403 break; 1404 case ETHER_STAT_FCS_ERRORS: 1405 *val = esp->rx_crc_err; 1406 break; 1407 case ETHER_STAT_EX_COLLISIONS: 1408 *val = esp->excessive_coll; 1409 break; 1410 case ETHER_STAT_TX_LATE_COLLISIONS: 1411 *val = esp->late_coll; 1412 break; 1413 case ETHER_STAT_FIRST_COLLISIONS: 1414 *val = esp->first_coll; 1415 break; 1416 case ETHER_STAT_LINK_DUPLEX: 1417 *val = esp->link_duplex; 1418 break; 1419 case ETHER_STAT_TOOLONG_ERRORS: 1420 *val = esp->rx_toolong_pkts; 1421 break; 1422 case ETHER_STAT_TOOSHORT_ERRORS: 1423 *val = esp->rx_runt; 1424 break; 1425 1426 case ETHER_STAT_XCVR_ADDR: 1427 *val = erip->phyad; 1428 break; 1429 1430 case ETHER_STAT_XCVR_INUSE: 1431 *val = XCVR_100X; /* should always be 100X for now */ 1432 break; 1433 1434 case ETHER_STAT_CAP_100FDX: 1435 *val = param_bmsr_100fdx; 1436 break; 1437 case ETHER_STAT_CAP_100HDX: 1438 *val = param_bmsr_100hdx; 1439 break; 1440 case ETHER_STAT_CAP_10FDX: 1441 *val = param_bmsr_10fdx; 1442 break; 1443 case ETHER_STAT_CAP_10HDX: 1444 *val = param_bmsr_10hdx; 1445 break; 1446 case ETHER_STAT_CAP_AUTONEG: 1447 *val = param_bmsr_ancap; 1448 break; 1449 case ETHER_STAT_CAP_ASMPAUSE: 1450 *val = param_bmsr_asm_dir; 1451 break; 1452 case ETHER_STAT_CAP_PAUSE: 1453 *val = param_bmsr_pause; 1454 break; 1455 case ETHER_STAT_ADV_CAP_100FDX: 1456 *val = param_anar_100fdx; 1457 break; 1458 case ETHER_STAT_ADV_CAP_100HDX: 1459 *val = param_anar_100hdx; 1460 break; 1461 case ETHER_STAT_ADV_CAP_10FDX: 1462 *val = param_anar_10fdx; 1463 break; 1464 case ETHER_STAT_ADV_CAP_10HDX: 1465 *val = param_anar_10hdx; 1466 break; 1467 case ETHER_STAT_ADV_CAP_AUTONEG: 1468 *val = param_autoneg; 1469 break; 1470 case ETHER_STAT_ADV_CAP_ASMPAUSE: 1471 *val = param_anar_asm_dir; 1472 break; 1473 case ETHER_STAT_ADV_CAP_PAUSE: 1474 *val = param_anar_pause; 1475 break; 1476 case ETHER_STAT_LP_CAP_100FDX: 1477 *val = param_anlpar_100fdx; 1478 break; 1479 case ETHER_STAT_LP_CAP_100HDX: 1480 *val = param_anlpar_100hdx; 1481 break; 1482 case ETHER_STAT_LP_CAP_10FDX: 1483 *val = param_anlpar_10fdx; 1484 break; 1485 case ETHER_STAT_LP_CAP_10HDX: 1486 *val = param_anlpar_10hdx; 1487 break; 1488 case ETHER_STAT_LP_CAP_AUTONEG: 1489 *val = param_aner_lpancap; 1490 break; 1491 case ETHER_STAT_LP_CAP_ASMPAUSE: 1492 *val = param_anlpar_pauseTX; 1493 break; 1494 case ETHER_STAT_LP_CAP_PAUSE: 1495 *val = param_anlpar_pauseRX; 1496 break; 1497 case ETHER_STAT_LINK_PAUSE: 1498 *val = esp->pausing; 1499 break; 1500 case ETHER_STAT_LINK_ASMPAUSE: 1501 *val = param_anar_asm_dir && 1502 param_anlpar_pauseTX && 1503 (param_anar_pause != param_anlpar_pauseRX); 1504 break; 1505 case ETHER_STAT_LINK_AUTONEG: 1506 *val = param_autoneg && param_aner_lpancap; 1507 break; 1508 } 1509 return (0); 1510 } 1511 1512 /* 1513 * Hardware Functions 1514 * New Section 1515 */ 1516 1517 /* 1518 * Initialize the MAC registers. Some of of the MAC registers are initialized 1519 * just once since Global Reset or MAC reset doesn't clear them. Others (like 1520 * Host MAC Address Registers) are cleared on every reset and have to be 1521 * reinitialized. 1522 */ 1523 static void 1524 eri_init_macregs_generic(struct eri *erip) 1525 { 1526 /* 1527 * set up the MAC parameter registers once 1528 * after power cycle. SUSPEND/RESUME also requires 1529 * setting these registers. 1530 */ 1531 if ((erip->stats.inits == 1) || (erip->init_macregs)) { 1532 erip->init_macregs = 0; 1533 PUT_MACREG(ipg0, param_ipg0); 1534 PUT_MACREG(ipg1, param_ipg1); 1535 PUT_MACREG(ipg2, param_ipg2); 1536 PUT_MACREG(macmin, BMAC_MIN_FRAME_SIZE); 1537 #ifdef ERI_RX_TAG_ERROR_WORKAROUND 1538 PUT_MACREG(macmax, BMAC_MAX_FRAME_SIZE_TAG | BMAC_MAX_BURST); 1539 #else 1540 PUT_MACREG(macmax, BMAC_MAX_FRAME_SIZE | BMAC_MAX_BURST); 1541 #endif 1542 PUT_MACREG(palen, BMAC_PREAMBLE_SIZE); 1543 PUT_MACREG(jam, BMAC_JAM_SIZE); 1544 PUT_MACREG(alimit, BMAC_ATTEMPT_LIMIT); 1545 PUT_MACREG(macctl_type, BMAC_CONTROL_TYPE); 1546 PUT_MACREG(rseed, 1547 ((erip->ouraddr[0] & 0x3) << 8) | erip->ouraddr[1]); 1548 1549 PUT_MACREG(madd3, BMAC_ADDRESS_3); 1550 PUT_MACREG(madd4, BMAC_ADDRESS_4); 1551 PUT_MACREG(madd5, BMAC_ADDRESS_5); 1552 1553 /* Program MAC Control address */ 1554 PUT_MACREG(madd6, BMAC_ADDRESS_6); 1555 PUT_MACREG(madd7, BMAC_ADDRESS_7); 1556 PUT_MACREG(madd8, BMAC_ADDRESS_8); 1557 1558 PUT_MACREG(afr0, BMAC_AF_0); 1559 PUT_MACREG(afr1, BMAC_AF_1); 1560 PUT_MACREG(afr2, BMAC_AF_2); 1561 PUT_MACREG(afmr1_2, BMAC_AF21_MASK); 1562 PUT_MACREG(afmr0, BMAC_AF0_MASK); 1563 } 1564 1565 /* The counters need to be zeroed */ 1566 PUT_MACREG(nccnt, 0); 1567 PUT_MACREG(fccnt, 0); 1568 PUT_MACREG(excnt, 0); 1569 PUT_MACREG(ltcnt, 0); 1570 PUT_MACREG(dcnt, 0); 1571 PUT_MACREG(frcnt, 0); 1572 PUT_MACREG(lecnt, 0); 1573 PUT_MACREG(aecnt, 0); 1574 PUT_MACREG(fecnt, 0); 1575 PUT_MACREG(rxcv, 0); 1576 1577 if (erip->pauseTX) 1578 PUT_MACREG(spcmd, BMAC_SEND_PAUSE_CMD); 1579 else 1580 PUT_MACREG(spcmd, 0); 1581 1582 /* 1583 * Program BigMAC with local individual ethernet address. 1584 */ 1585 1586 PUT_MACREG(madd0, (erip->ouraddr[4] << 8) | erip->ouraddr[5]); 1587 PUT_MACREG(madd1, (erip->ouraddr[2] << 8) | erip->ouraddr[3]); 1588 PUT_MACREG(madd2, (erip->ouraddr[0] << 8) | erip->ouraddr[1]); 1589 1590 /* 1591 * Install multicast address filter. 1592 */ 1593 1594 PUT_MACREG(hash0, erip->ladrf[0]); 1595 PUT_MACREG(hash1, erip->ladrf[1]); 1596 PUT_MACREG(hash2, erip->ladrf[2]); 1597 PUT_MACREG(hash3, erip->ladrf[3]); 1598 PUT_MACREG(hash4, erip->ladrf[4]); 1599 PUT_MACREG(hash5, erip->ladrf[5]); 1600 PUT_MACREG(hash6, erip->ladrf[6]); 1601 PUT_MACREG(hash7, erip->ladrf[7]); 1602 PUT_MACREG(hash8, erip->ladrf[8]); 1603 PUT_MACREG(hash9, erip->ladrf[9]); 1604 PUT_MACREG(hash10, erip->ladrf[10]); 1605 PUT_MACREG(hash11, erip->ladrf[11]); 1606 PUT_MACREG(hash12, erip->ladrf[12]); 1607 PUT_MACREG(hash13, erip->ladrf[13]); 1608 PUT_MACREG(hash14, erip->ladrf[14]); 1609 } 1610 1611 static int 1612 eri_flush_rxbufs(struct eri *erip) 1613 { 1614 uint_t i; 1615 int status = 0; 1616 /* 1617 * Free and dvma_unload pending recv buffers. 1618 * Maintaining the 1-to-1 ordered sequence of 1619 * dvma_load() followed by dvma_unload() is critical. 1620 * Always unload anything before loading it again. 1621 * Never unload anything twice. Always unload 1622 * before freeing the buffer. We satisfy these 1623 * requirements by unloading only those descriptors 1624 * which currently have an mblk associated with them. 1625 */ 1626 for (i = 0; i < ERI_RPENDING; i++) { 1627 if (erip->rmblkp[i]) { 1628 if (erip->eri_dvmarh) 1629 dvma_unload(erip->eri_dvmarh, 2 * i, 1630 DDI_DMA_SYNC_FORCPU); 1631 else if ((ddi_dma_unbind_handle(erip->ndmarh[i]) == 1632 DDI_FAILURE)) 1633 status = -1; 1634 freeb(erip->rmblkp[i]); 1635 erip->rmblkp[i] = NULL; 1636 } 1637 } 1638 return (status); 1639 } 1640 1641 static void 1642 eri_init_txbufs(struct eri *erip) 1643 { 1644 /* 1645 * Clear TX descriptors. 1646 */ 1647 bzero((caddr_t)erip->eri_tmdp, ERI_TPENDING * sizeof (struct eri_tmd)); 1648 1649 /* 1650 * sync TXDMA descriptors. 1651 */ 1652 ERI_SYNCIOPB(erip, erip->eri_tmdp, 1653 (ERI_TPENDING * sizeof (struct eri_tmd)), DDI_DMA_SYNC_FORDEV); 1654 /* 1655 * Reset TMD 'walking' pointers. 1656 */ 1657 erip->tcurp = erip->eri_tmdp; 1658 erip->tnextp = erip->eri_tmdp; 1659 erip->tx_cur_cnt = 0; 1660 erip->tx_kick = 0; 1661 erip->tx_completion = 0; 1662 } 1663 1664 static int 1665 eri_init_rxbufs(struct eri *erip) 1666 { 1667 1668 ddi_dma_cookie_t dma_cookie; 1669 mblk_t *bp; 1670 int i, status = 0; 1671 uint32_t ccnt; 1672 1673 /* 1674 * clear rcv descriptors 1675 */ 1676 bzero((caddr_t)erip->rmdp, ERI_RPENDING * sizeof (struct rmd)); 1677 1678 for (i = 0; i < ERI_RPENDING; i++) { 1679 if ((bp = eri_allocb(ERI_BUFSIZE)) == NULL) { 1680 status = -1; 1681 continue; 1682 } 1683 /* Load data buffer to DVMA space */ 1684 if (erip->eri_dvmarh) 1685 dvma_kaddr_load(erip->eri_dvmarh, 1686 (caddr_t)bp->b_rptr, ERI_BUFSIZE, 1687 2 * i, &dma_cookie); 1688 /* 1689 * Bind data buffer to DMA handle 1690 */ 1691 else if (ddi_dma_addr_bind_handle(erip->ndmarh[i], NULL, 1692 (caddr_t)bp->b_rptr, ERI_BUFSIZE, 1693 DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0, 1694 &dma_cookie, &ccnt) != DDI_DMA_MAPPED) 1695 status = -1; 1696 1697 PUT_RMD((&erip->rmdp[i]), dma_cookie); 1698 erip->rmblkp[i] = bp; /* save for later use */ 1699 } 1700 1701 /* 1702 * sync RXDMA descriptors. 1703 */ 1704 ERI_SYNCIOPB(erip, erip->rmdp, (ERI_RPENDING * sizeof (struct rmd)), 1705 DDI_DMA_SYNC_FORDEV); 1706 /* 1707 * Reset RMD 'walking' pointers. 1708 */ 1709 erip->rnextp = erip->rmdp; 1710 erip->rx_completion = 0; 1711 erip->rx_kick = ERI_RPENDING - 4; 1712 return (status); 1713 } 1714 1715 static uint32_t 1716 eri_txmac_disable(struct eri *erip) 1717 { 1718 int n; 1719 1720 PUT_MACREG(txcfg, GET_MACREG(txcfg) & ~BMAC_TXCFG_ENAB); 1721 n = (BMACTXRSTDELAY * 10) / ERI_WAITPERIOD; 1722 1723 while (--n > 0) { 1724 drv_usecwait(ERI_WAITPERIOD); 1725 if ((GET_MACREG(txcfg) & 1) == 0) 1726 return (0); 1727 } 1728 return (1); 1729 } 1730 1731 static uint32_t 1732 eri_rxmac_disable(struct eri *erip) 1733 { 1734 int n; 1735 PUT_MACREG(rxcfg, GET_MACREG(rxcfg) & ~BMAC_RXCFG_ENAB); 1736 n = BMACRXRSTDELAY / ERI_WAITPERIOD; 1737 1738 while (--n > 0) { 1739 drv_usecwait(ERI_WAITPERIOD); 1740 if ((GET_MACREG(rxcfg) & 1) == 0) 1741 return (0); 1742 } 1743 return (1); 1744 } 1745 1746 /* 1747 * Return 0 upon success, 1 on failure. 1748 */ 1749 static int 1750 eri_stop(struct eri *erip) 1751 { 1752 (void) eri_erx_reset(erip); 1753 (void) eri_etx_reset(erip); 1754 1755 /* 1756 * set up cache line to 16 for 64 bytes of pci burst size 1757 */ 1758 PUT_SWRSTREG(reset, ERI_G_RESET_GLOBAL | ERI_CACHE_LINE_SIZE); 1759 1760 if (erip->linkcheck) { 1761 erip->linkcheck = 0; 1762 erip->global_reset_issued = 2; 1763 } else { 1764 param_linkup = 0; 1765 erip->stats.link_up = LINK_STATE_DOWN; 1766 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN; 1767 erip->global_reset_issued = -1; 1768 } 1769 1770 ERI_DELAY((GET_SWRSTREG(reset) == ERI_CACHE_LINE_SIZE), 1771 ERI_MAX_RST_DELAY); 1772 erip->rx_reset_issued = -1; 1773 erip->tx_reset_issued = -1; 1774 1775 /* 1776 * workaround for RIO not resetting the interrupt mask 1777 * register to default value 0xffffffff. 1778 */ 1779 PUT_GLOBREG(intmask, ERI_G_MASK_ALL); 1780 1781 if (GET_SWRSTREG(reset) == ERI_CACHE_LINE_SIZE) { 1782 return (0); 1783 } else { 1784 return (1); 1785 } 1786 } 1787 1788 /* 1789 * Reset Just the RX Portion 1790 * Return 0 upon success, 1 on failure. 1791 * 1792 * Resetting the rxdma while there is a rx dma transaction going on the 1793 * bus, will cause bus hang or parity errors. To avoid this, we would first 1794 * disable the rxdma by clearing the ENABLE bit (bit 0). To make sure it is 1795 * disabled, we will poll it until it realy clears. Furthermore, to verify 1796 * any RX DMA activity is subsided, we delay for 5 msec. 1797 */ 1798 static uint32_t 1799 eri_erx_reset(struct eri *erip) 1800 { 1801 (void) eri_rxmac_disable(erip); /* Disable the RX MAC */ 1802 1803 /* Disable the RX DMA */ 1804 PUT_ERXREG(config, GET_ERXREG(config) & ~GET_CONFIG_RXDMA_EN); 1805 ERI_DELAY(((GET_ERXREG(config) & 1) == 0), ERI_MAX_RST_DELAY); 1806 if ((GET_ERXREG(config) & 1) != 0) 1807 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG, 1808 disable_erx_msg); 1809 1810 drv_usecwait(5000); /* Delay to insure no RX DMA activity */ 1811 1812 PUT_SWRSTREG(reset, ERI_G_RESET_ERX | ERI_CACHE_LINE_SIZE); 1813 /* 1814 * Wait until the reset is completed which is indicated by 1815 * the reset bit cleared or time out.. 1816 */ 1817 ERI_DELAY(((GET_SWRSTREG(reset) & (ERI_G_RESET_ERX)) == 1818 ERI_CACHE_LINE_SIZE), ERI_MAX_RST_DELAY); 1819 erip->rx_reset_issued = -1; 1820 1821 return ((GET_SWRSTREG(reset) & (ERI_G_RESET_ERX)) ? 1 : 0); 1822 } 1823 1824 /* 1825 * Reset Just the TX Portion 1826 * Return 0 upon success, 1 on failure. 1827 * Resetting the txdma while there is a tx dma transaction on the bus, may cause 1828 * bus hang or parity errors. To avoid this we would first disable the txdma by 1829 * clearing the ENABLE bit (bit 0). To make sure it is disabled, we will poll 1830 * it until it realy clears. Furthermore, to any TX DMA activity is subsided, 1831 * we delay for 1 msec. 1832 */ 1833 static uint32_t 1834 eri_etx_reset(struct eri *erip) 1835 { 1836 (void) eri_txmac_disable(erip); 1837 1838 /* Disable the TX DMA */ 1839 PUT_ETXREG(config, GET_ETXREG(config) & ~GET_CONFIG_TXDMA_EN); 1840 #ifdef ORIG 1841 ERI_DELAY(((GET_ETXREG(config) & 1) == 0), ERI_MAX_RST_DELAY); 1842 if ((GET_ETXREG(config) & 1) != 0) 1843 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG, 1844 disable_etx_msg); 1845 drv_usecwait(5000); /* Delay to ensure DMA completed (if any). */ 1846 #endif 1847 drv_usecwait(5000); /* Delay to ensure DMA completed (if any). */ 1848 ERI_DELAY(((GET_ETXREG(config) & 1) == 0), ERI_MAX_RST_DELAY); 1849 if ((GET_ETXREG(config) & 1) != 0) 1850 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG, 1851 disable_etx_msg); 1852 1853 PUT_SWRSTREG(reset, ERI_G_RESET_ETX | ERI_CACHE_LINE_SIZE); 1854 1855 /* 1856 * Wait until the reset is completed which is indicated by the reset bit 1857 * cleared or time out.. 1858 */ 1859 ERI_DELAY(((GET_SWRSTREG(reset) & (ERI_G_RESET_ETX)) == 1860 ERI_CACHE_LINE_SIZE), ERI_MAX_RST_DELAY); 1861 erip->tx_reset_issued = -1; 1862 1863 if (GET_SWRSTREG(reset) & (ERI_G_RESET_ETX)) { 1864 return (1); 1865 } else 1866 return (0); 1867 } 1868 1869 1870 /* 1871 * Initialize the TX DMA registers and Enable the TX DMA. 1872 */ 1873 static uint32_t 1874 eri_init_txregs(struct eri *erip) 1875 { 1876 1877 uint32_t i; 1878 uint64_t tx_ring; 1879 1880 /* 1881 * Initialize ETX Registers: 1882 * config, txring_lo, txring_hi 1883 */ 1884 tx_ring = ERI_IOPBIOADDR(erip, erip->eri_tmdp); 1885 PUT_ETXREG(txring_lo, (uint32_t)(tx_ring)); 1886 PUT_ETXREG(txring_hi, (uint32_t)(tx_ring >> 32)); 1887 1888 /* 1889 * Get TX Ring Size Masks. 1890 * The ring size ERI_TPENDING is defined in eri_mac.h. 1891 */ 1892 switch (ERI_TPENDING) { 1893 case 32: i = ETX_RINGSZ_32; 1894 break; 1895 case 64: i = ETX_RINGSZ_64; 1896 break; 1897 case 128: i = ETX_RINGSZ_128; 1898 break; 1899 case 256: i = ETX_RINGSZ_256; 1900 break; 1901 case 512: i = ETX_RINGSZ_512; 1902 break; 1903 case 1024: i = ETX_RINGSZ_1024; 1904 break; 1905 case 2048: i = ETX_RINGSZ_2048; 1906 break; 1907 case 4096: i = ETX_RINGSZ_4096; 1908 break; 1909 default: 1910 ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG, 1911 unk_tx_descr_sze_msg, ERI_TPENDING); 1912 return (1); 1913 } 1914 1915 i <<= ERI_TX_RINGSZ_SHIFT; 1916 PUT_ETXREG(config, ETX_CONFIG_THRESHOLD | i); 1917 ENABLE_TXDMA(erip); 1918 ENABLE_MAC(erip); 1919 return (0); 1920 } 1921 1922 1923 /* 1924 * Initialize the RX DMA registers and Enable the RX DMA. 1925 */ 1926 static uint32_t 1927 eri_init_rxregs(struct eri *erip) 1928 { 1929 int i; 1930 uint64_t rx_ring; 1931 1932 /* 1933 * Initialize ERX Registers: 1934 * rxring_lo, rxring_hi, config, rx_blanking, rx_pause_threshold. 1935 * Also, rx_kick 1936 * Read and save rxfifo_size. 1937 * XXX: Use this to properly configure PAUSE threshold values. 1938 */ 1939 rx_ring = ERI_IOPBIOADDR(erip, erip->rmdp); 1940 PUT_ERXREG(rxring_lo, (uint32_t)(rx_ring)); 1941 PUT_ERXREG(rxring_hi, (uint32_t)(rx_ring >> 32)); 1942 PUT_ERXREG(rx_kick, erip->rx_kick); 1943 1944 /* 1945 * The Max ring size, ERI_RMDMAX is defined in eri_mac.h. 1946 * More ERI_RPENDING will provide better performance but requires more 1947 * system DVMA memory. 1948 * eri_rx_ring_size can be used to tune this value from /etc/system 1949 * eri_rx_ring_size cannot be NDD'able due to non-recoverable errors 1950 * which cannot be detected from NDD operations 1951 */ 1952 1953 /* 1954 * get the rxring size bits 1955 */ 1956 switch (ERI_RPENDING) { 1957 case 32: i = ERX_RINGSZ_32; 1958 break; 1959 case 64: i = ERX_RINGSZ_64; 1960 break; 1961 case 128: i = ERX_RINGSZ_128; 1962 break; 1963 case 256: i = ERX_RINGSZ_256; 1964 break; 1965 case 512: i = ERX_RINGSZ_512; 1966 break; 1967 case 1024: i = ERX_RINGSZ_1024; 1968 break; 1969 case 2048: i = ERX_RINGSZ_2048; 1970 break; 1971 case 4096: i = ERX_RINGSZ_4096; 1972 break; 1973 default: 1974 ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG, 1975 unk_rx_descr_sze_msg, ERI_RPENDING); 1976 return (1); 1977 } 1978 1979 i <<= ERI_RX_RINGSZ_SHIFT; 1980 i |= (ERI_FSTBYTE_OFFSET << ERI_RX_CONFIG_FBO_SHIFT) | 1981 (ETHERHEADER_SIZE << ERI_RX_CONFIG_RX_CSSTART_SHIFT) | 1982 (ERI_RX_FIFOTH_1024 << ERI_RX_CONFIG_RXFIFOTH_SHIFT); 1983 1984 PUT_ERXREG(config, i); 1985 PUT_ERXREG(rx_blanking, 1986 (param_intr_blank_time << ERI_RX_BLNK_INTR_TIME_SHIFT) | 1987 param_intr_blank_packets); 1988 1989 PUT_ERXREG(rx_pause_threshold, rx_pause_threshold); 1990 erip->rxfifo_size = GET_ERXREG(rxfifo_size); 1991 ENABLE_RXDMA(erip); 1992 return (0); 1993 } 1994 1995 static int 1996 eri_freebufs(struct eri *erip) 1997 { 1998 int status = 0; 1999 2000 status = eri_flush_rxbufs(erip); 2001 return (status); 2002 } 2003 2004 static void 2005 eri_update_rxbufs(struct eri *erip) 2006 { 2007 int i; 2008 volatile struct rmd *rmdp, *rmdpbase; 2009 2010 /* 2011 * Hang out receive buffers. 2012 */ 2013 rmdpbase = erip->rmdp; 2014 for (i = 0; i < ERI_RPENDING; i++) { 2015 rmdp = rmdpbase + i; 2016 UPDATE_RMD(rmdp); 2017 } 2018 2019 /* 2020 * sync RXDMA descriptors. 2021 */ 2022 ERI_SYNCIOPB(erip, erip->rmdp, (ERI_RPENDING * sizeof (struct rmd)), 2023 DDI_DMA_SYNC_FORDEV); 2024 /* 2025 * Reset RMD 'walking' pointers. 2026 */ 2027 erip->rnextp = erip->rmdp; 2028 erip->rx_completion = 0; 2029 erip->rx_kick = ERI_RPENDING - 4; 2030 } 2031 2032 /* 2033 * This routine is used to reset the RX DMA only. In the case of RX 2034 * failures such as RX Tag Error, RX hang etc... we don't want to 2035 * do global reset which takes down the link and clears the FIFO's 2036 * By doing RX only reset, we leave the TX and the link intact. 2037 */ 2038 static uint32_t 2039 eri_init_rx_channel(struct eri *erip) 2040 { 2041 erip->flags &= ~ERI_RXINIT; 2042 (void) eri_erx_reset(erip); 2043 eri_update_rxbufs(erip); 2044 if (eri_init_rxregs(erip)) 2045 return (1); 2046 PUT_MACREG(rxmask, BMAC_RXINTR_MASK); 2047 PUT_MACREG(rxcfg, GET_MACREG(rxcfg) | BMAC_RXCFG_ENAB); 2048 erip->rx_reset_issued = 0; 2049 HSTAT(erip, rx_inits); 2050 erip->flags |= ERI_RXINIT; 2051 return (0); 2052 } 2053 2054 static void 2055 eri_init_rx(struct eri *erip) 2056 { 2057 uint16_t *ladrf; 2058 2059 /* 2060 * First of all make sure the Receive MAC is stop. 2061 */ 2062 (void) eri_rxmac_disable(erip); /* Disable the RX MAC */ 2063 2064 /* 2065 * Program BigMAC with local individual ethernet address. 2066 */ 2067 2068 PUT_MACREG(madd0, (erip->ouraddr[4] << 8) | erip->ouraddr[5]); 2069 PUT_MACREG(madd1, (erip->ouraddr[2] << 8) | erip->ouraddr[3]); 2070 PUT_MACREG(madd2, (erip->ouraddr[0] << 8) | erip->ouraddr[1]); 2071 2072 /* 2073 * Set up multicast address filter by passing all multicast 2074 * addresses through a crc generator, and then using the 2075 * low order 8 bits as a index into the 256 bit logical 2076 * address filter. The high order four bits select the word, 2077 * while the rest of the bits select the bit within the word. 2078 */ 2079 2080 ladrf = erip->ladrf; 2081 2082 PUT_MACREG(hash0, ladrf[0]); 2083 PUT_MACREG(hash1, ladrf[1]); 2084 PUT_MACREG(hash2, ladrf[2]); 2085 PUT_MACREG(hash3, ladrf[3]); 2086 PUT_MACREG(hash4, ladrf[4]); 2087 PUT_MACREG(hash5, ladrf[5]); 2088 PUT_MACREG(hash6, ladrf[6]); 2089 PUT_MACREG(hash7, ladrf[7]); 2090 PUT_MACREG(hash8, ladrf[8]); 2091 PUT_MACREG(hash9, ladrf[9]); 2092 PUT_MACREG(hash10, ladrf[10]); 2093 PUT_MACREG(hash11, ladrf[11]); 2094 PUT_MACREG(hash12, ladrf[12]); 2095 PUT_MACREG(hash13, ladrf[13]); 2096 PUT_MACREG(hash14, ladrf[14]); 2097 PUT_MACREG(hash15, ladrf[15]); 2098 2099 #ifdef ERI_DONT_STRIP_CRC 2100 PUT_MACREG(rxcfg, 2101 ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) | 2102 (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) | 2103 BMAC_RXCFG_ENAB)); 2104 #else 2105 PUT_MACREG(rxcfg, 2106 ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) | 2107 (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) | 2108 BMAC_RXCFG_ENAB | BMAC_RXCFG_STRIP_CRC)); 2109 #endif 2110 /* wait after setting Hash Enable bit */ 2111 /* drv_usecwait(10); */ 2112 2113 HSTAT(erip, rx_inits); 2114 } 2115 2116 /* 2117 * This routine is used to init the TX MAC only. 2118 * &erip->xmitlock is held before calling this routine. 2119 */ 2120 void 2121 eri_init_txmac(struct eri *erip) 2122 { 2123 uint32_t carrier_ext = 0; 2124 2125 erip->flags &= ~ERI_TXINIT; 2126 /* 2127 * Stop the Transmit MAC. 2128 */ 2129 (void) eri_txmac_disable(erip); 2130 2131 /* 2132 * Must be Internal Transceiver 2133 */ 2134 if (param_mode) 2135 PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ? 2136 BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE)); 2137 else 2138 PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ? 2139 BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE | 2140 BMAC_XIFC_DIS_ECHO)); 2141 2142 /* 2143 * Initialize the interpacket gap registers 2144 */ 2145 PUT_MACREG(ipg1, param_ipg1); 2146 PUT_MACREG(ipg2, param_ipg2); 2147 2148 if (erip->ngu_enable) 2149 PUT_MACREG(txcfg, ((param_mode ? BMAC_TXCFG_FDX: 0) | 2150 ((param_lance_mode && (erip->lance_mode_enable)) ? 2151 BMAC_TXCFG_ENIPG0 : 0) | 2152 (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0) | 2153 BMAC_TXCFG_NGU)); 2154 else 2155 PUT_MACREG(txcfg, ((param_mode ? BMAC_TXCFG_FDX: 0) | 2156 ((param_lance_mode && (erip->lance_mode_enable)) ? 2157 BMAC_TXCFG_ENIPG0 : 0) | 2158 (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0))); 2159 2160 ENABLE_TXDMA(erip); 2161 ENABLE_TXMAC(erip); 2162 2163 HSTAT(erip, tx_inits); 2164 erip->flags |= ERI_TXINIT; 2165 } 2166 2167 static void 2168 eri_unallocthings(struct eri *erip) 2169 { 2170 uint32_t flag; 2171 uint32_t i; 2172 2173 flag = erip->alloc_flag; 2174 2175 if (flag & ERI_DESC_MEM_MAP) 2176 (void) ddi_dma_unbind_handle(erip->md_h); 2177 2178 if (flag & ERI_DESC_MEM_ALLOC) { 2179 ddi_dma_mem_free(&erip->mdm_h); 2180 erip->rmdp = NULL; 2181 erip->eri_tmdp = NULL; 2182 } 2183 2184 if (flag & ERI_DESC_HANDLE_ALLOC) 2185 ddi_dma_free_handle(&erip->md_h); 2186 2187 (void) eri_freebufs(erip); 2188 2189 if (flag & ERI_RCV_HANDLE_ALLOC) 2190 for (i = 0; i < erip->rcv_handle_cnt; i++) 2191 ddi_dma_free_handle(&erip->ndmarh[i]); 2192 2193 if (flag & ERI_RCV_DVMA_ALLOC) { 2194 (void) dvma_release(erip->eri_dvmarh); 2195 erip->eri_dvmarh = NULL; 2196 } 2197 2198 if (flag & ERI_XBUFS_KMEM_DMABIND) { 2199 (void) ddi_dma_unbind_handle(erip->tbuf_handle); 2200 erip->tbuf_ioaddr = 0; 2201 } 2202 2203 if (flag & ERI_XBUFS_KMEM_ALLOC) { 2204 ddi_dma_mem_free(&erip->tbuf_acch); 2205 erip->tbuf_kaddr = NULL; 2206 } 2207 2208 if (flag & ERI_XBUFS_HANDLE_ALLOC) { 2209 ddi_dma_free_handle(&erip->tbuf_handle); 2210 erip->tbuf_handle = NULL; 2211 } 2212 2213 } 2214 2215 /* 2216 * Initialize channel. 2217 * Return true on success, false on error. 2218 * 2219 * The recommended sequence for initialization is: 2220 * 1. Issue a Global Reset command to the Ethernet Channel. 2221 * 2. Poll the Global_Reset bits until the execution of the reset has been 2222 * completed. 2223 * 2(a). Use the MIF Frame/Output register to reset the transceiver. 2224 * Poll Register 0 to till the Resetbit is 0. 2225 * 2(b). Use the MIF Frame/Output register to set the PHY in in Normal-Op, 2226 * 100Mbps and Non-Isolated mode. The main point here is to bring the 2227 * PHY out of Isolate mode so that it can generate the rx_clk and tx_clk 2228 * to the MII interface so that the Bigmac core can correctly reset 2229 * upon a software reset. 2230 * 2(c). Issue another Global Reset command to the Ethernet Channel and poll 2231 * the Global_Reset bits till completion. 2232 * 3. Set up all the data structures in the host memory. 2233 * 4. Program the TX_MAC registers/counters (excluding the TX_MAC Configuration 2234 * Register). 2235 * 5. Program the RX_MAC registers/counters (excluding the RX_MAC Configuration 2236 * Register). 2237 * 6. Program the Transmit Descriptor Ring Base Address in the ETX. 2238 * 7. Program the Receive Descriptor Ring Base Address in the ERX. 2239 * 8. Program the Global Configuration and the Global Interrupt Mask Registers. 2240 * 9. Program the ETX Configuration register (enable the Transmit DMA channel). 2241 * 10. Program the ERX Configuration register (enable the Receive DMA channel). 2242 * 11. Program the XIF Configuration Register (enable the XIF). 2243 * 12. Program the RX_MAC Configuration Register (Enable the RX_MAC). 2244 * 13. Program the TX_MAC Configuration Register (Enable the TX_MAC). 2245 */ 2246 /* 2247 * lock order: 2248 * intrlock->linklock->xmitlock->xcvrlock 2249 */ 2250 static boolean_t 2251 eri_init(struct eri *erip) 2252 { 2253 uint32_t init_stat = 0; 2254 uint32_t partial_init = 0; 2255 uint32_t carrier_ext = 0; 2256 uint32_t mac_ctl = 0; 2257 boolean_t ret; 2258 uint32_t link_timeout = ERI_LINKCHECK_TIMER; 2259 link_state_t linkupdate = LINK_STATE_UNKNOWN; 2260 2261 /* 2262 * Just return successfully if device is suspended. 2263 * eri_init() will be called again from resume. 2264 */ 2265 ASSERT(erip != NULL); 2266 2267 if (erip->flags & ERI_SUSPENDED) { 2268 ret = B_TRUE; 2269 goto init_exit; 2270 } 2271 2272 mutex_enter(&erip->intrlock); 2273 eri_stop_timer(erip); /* acquire linklock */ 2274 mutex_enter(&erip->xmitlock); 2275 erip->flags &= (ERI_DLPI_LINKUP | ERI_STARTED); 2276 erip->wantw = B_FALSE; 2277 HSTAT(erip, inits); 2278 erip->txhung = 0; 2279 2280 if ((erip->stats.inits > 1) && (erip->init_macregs == 0)) 2281 eri_savecntrs(erip); 2282 2283 mutex_enter(&erip->xcvrlock); 2284 if (!param_linkup || erip->linkcheck) { 2285 if (!erip->linkcheck) 2286 linkupdate = LINK_STATE_DOWN; 2287 (void) eri_stop(erip); 2288 } 2289 if (!(erip->flags & ERI_DLPI_LINKUP) || !param_linkup) { 2290 erip->flags |= ERI_DLPI_LINKUP; 2291 eri_mif_poll(erip, MIF_POLL_STOP); 2292 (void) eri_new_xcvr(erip); 2293 ERI_DEBUG_MSG1(erip, XCVR_MSG, "New transceiver detected."); 2294 if (param_transceiver != NO_XCVR) { 2295 /* 2296 * Reset the new PHY and bring up the 2297 * link 2298 */ 2299 if (eri_reset_xcvr(erip)) { 2300 ERI_FAULT_MSG1(erip, SEVERITY_NONE, 2301 ERI_VERB_MSG, "In Init after reset"); 2302 mutex_exit(&erip->xcvrlock); 2303 link_timeout = 0; 2304 goto done; 2305 } 2306 if (erip->stats.link_up == LINK_STATE_UP) 2307 linkupdate = LINK_STATE_UP; 2308 } else { 2309 erip->flags |= (ERI_RUNNING | ERI_INITIALIZED); 2310 param_linkup = 0; 2311 erip->stats.link_up = LINK_STATE_DOWN; 2312 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN; 2313 linkupdate = LINK_STATE_DOWN; 2314 /* 2315 * Still go on and complete the MAC initialization as 2316 * xcvr might show up later. 2317 * you must return to their mutex ordering. 2318 */ 2319 } 2320 eri_mif_poll(erip, MIF_POLL_START); 2321 } 2322 2323 mutex_exit(&erip->xcvrlock); 2324 2325 /* 2326 * Allocate data structures. 2327 */ 2328 if (erip->global_reset_issued) { 2329 if (erip->global_reset_issued == 2) { /* fast path */ 2330 2331 /* 2332 * Hang out/Initialize descriptors and buffers. 2333 */ 2334 eri_init_txbufs(erip); 2335 2336 eri_update_rxbufs(erip); 2337 } else { 2338 init_stat = eri_allocthings(erip); 2339 if (init_stat) 2340 goto done; 2341 2342 if (eri_freebufs(erip)) 2343 goto done; 2344 /* 2345 * Hang out/Initialize descriptors and buffers. 2346 */ 2347 eri_init_txbufs(erip); 2348 if (eri_init_rxbufs(erip)) 2349 goto done; 2350 } 2351 } 2352 2353 /* 2354 * BigMAC requires that we confirm that tx, rx and hash are in 2355 * quiescent state. 2356 * MAC will not reset successfully if the transceiver is not reset and 2357 * brought out of Isolate mode correctly. TXMAC reset may fail if the 2358 * ext. transceiver is just disconnected. If it fails, try again by 2359 * checking the transceiver. 2360 */ 2361 if (eri_txmac_disable(erip)) { 2362 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG, 2363 disable_txmac_msg); 2364 param_linkup = 0; /* force init again */ 2365 erip->stats.link_up = LINK_STATE_DOWN; 2366 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN; 2367 linkupdate = LINK_STATE_DOWN; 2368 goto done; 2369 } 2370 2371 if (eri_rxmac_disable(erip)) { 2372 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG, 2373 disable_rxmac_msg); 2374 param_linkup = 0; /* force init again */ 2375 erip->stats.link_up = LINK_STATE_DOWN; 2376 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN; 2377 linkupdate = LINK_STATE_DOWN; 2378 goto done; 2379 } 2380 2381 eri_init_macregs_generic(erip); 2382 2383 /* 2384 * Initialize ERI Global registers : 2385 * config 2386 * For PCI : err_mask, bif_cfg 2387 * 2388 * Use user-configurable parameter for enabling 64-bit transfers. 2389 * Note:For PCI, burst sizes are in multiples of 64-bytes. 2390 */ 2391 2392 /* 2393 * Significant performance improvements can be achieved by 2394 * disabling transmit interrupt. Thus TMD's are reclaimed 2395 * only very infrequently. 2396 * The PCS Interrupt is masked here. It is enabled only when 2397 * a PCS link is brought up because there is no second level 2398 * mask for this interrupt.. 2399 * Init GLOBAL, TXMAC, RXMAC and MACCTL interrupt masks here. 2400 */ 2401 if (! partial_init) { 2402 PUT_GLOBREG(intmask, ERI_G_MASK_INTR); 2403 erip->tx_int_me = 0; 2404 PUT_MACREG(txmask, BMAC_TXINTR_MASK); 2405 PUT_MACREG(rxmask, BMAC_RXINTR_MASK); 2406 PUT_MACREG(macctl_mask, ERI_MACCTL_INTR_MASK); 2407 } 2408 2409 if (erip->global_reset_issued) { 2410 /* 2411 * Initialize ETX Registers: 2412 * config, txring_lo, txring_hi 2413 */ 2414 if (eri_init_txregs(erip)) 2415 goto done; 2416 /* 2417 * Initialize ERX Registers: 2418 * rxring_lo, rxring_hi, config, rx_blanking, 2419 * rx_pause_threshold. Also, rx_kick 2420 * Read and save rxfifo_size. 2421 */ 2422 if (eri_init_rxregs(erip)) 2423 goto done; 2424 } 2425 2426 PUT_MACREG(macctl_mask, ERI_MACCTL_INTR_MASK); 2427 2428 /* 2429 * Set up the slottime,and rxconfig, txconfig without enabling 2430 * the latter two at this time 2431 */ 2432 PUT_MACREG(slot, BMAC_SLOT_TIME); 2433 carrier_ext = 0; 2434 2435 #ifdef ERI_DONT_STRIP_CRC 2436 PUT_MACREG(rxcfg, 2437 ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) | 2438 (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) | 2439 (carrier_ext ? BMAC_RXCFG_CARR_EXT : 0))); 2440 #else 2441 PUT_MACREG(rxcfg, 2442 ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) | 2443 (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) | 2444 BMAC_RXCFG_STRIP_CRC | 2445 (carrier_ext ? BMAC_RXCFG_CARR_EXT : 0))); 2446 #endif 2447 drv_usecwait(10); /* wait after setting Hash Enable bit */ 2448 2449 if (erip->ngu_enable) 2450 PUT_MACREG(txcfg, 2451 ((param_mode ? BMAC_TXCFG_FDX: 0) | 2452 ((param_lance_mode && (erip->lance_mode_enable)) ? 2453 BMAC_TXCFG_ENIPG0 : 0) | 2454 (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0) | 2455 BMAC_TXCFG_NGU)); 2456 else 2457 PUT_MACREG(txcfg, 2458 ((param_mode ? BMAC_TXCFG_FDX: 0) | 2459 ((param_lance_mode && (erip->lance_mode_enable)) ? 2460 BMAC_TXCFG_ENIPG0 : 0) | 2461 (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0))); 2462 2463 if (erip->pauseRX) 2464 mac_ctl = ERI_MCTLCFG_RXPAUSE; 2465 if (erip->pauseTX) 2466 mac_ctl |= ERI_MCTLCFG_TXPAUSE; 2467 2468 PUT_MACREG(macctl_cfg, mac_ctl); 2469 2470 /* 2471 * Must be Internal Transceiver 2472 */ 2473 if (param_mode) 2474 PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ? 2475 BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE)); 2476 else { 2477 PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ? 2478 BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE | 2479 BMAC_XIFC_DIS_ECHO)); 2480 2481 link_timeout = ERI_CHECK_HANG_TIMER; 2482 } 2483 2484 /* 2485 * if MAC int loopback flag is set, put xifc reg in mii loopback 2486 * mode {DIAG} 2487 */ 2488 if (erip->flags & ERI_MACLOOPBACK) { 2489 PUT_MACREG(xifc, GET_MACREG(xifc) | BMAC_XIFC_MIILPBK); 2490 } 2491 2492 /* 2493 * Enable TX and RX MACs. 2494 */ 2495 ENABLE_MAC(erip); 2496 erip->flags |= (ERI_RUNNING | ERI_INITIALIZED | 2497 ERI_TXINIT | ERI_RXINIT); 2498 mac_tx_update(erip->mh); 2499 erip->global_reset_issued = 0; 2500 2501 #ifdef ERI_10_10_FORCE_SPEED_WORKAROUND 2502 eri_xcvr_force_mode(erip, &link_timeout); 2503 #endif 2504 2505 done: 2506 if (init_stat) 2507 eri_unallocthings(erip); 2508 2509 mutex_exit(&erip->xmitlock); 2510 eri_start_timer(erip, eri_check_link, link_timeout); 2511 mutex_exit(&erip->intrlock); 2512 2513 if (linkupdate != LINK_STATE_UNKNOWN) 2514 mac_link_update(erip->mh, linkupdate); 2515 2516 ret = (erip->flags & ERI_RUNNING) ? B_TRUE : B_FALSE; 2517 if (!ret) { 2518 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, 2519 "eri_init failed"); 2520 } 2521 2522 init_exit: 2523 ASSERT(!MUTEX_HELD(&erip->linklock)); 2524 return (ret); 2525 } 2526 2527 /* 2528 * 0 as burstsize upon failure as it signifies no burst size. 2529 */ 2530 static int 2531 eri_burstsize(struct eri *erip) 2532 { 2533 ddi_dma_handle_t handle; 2534 2535 if (ddi_dma_alloc_handle(erip->dip, &dma_attr, DDI_DMA_DONTWAIT, 2536 NULL, &handle)) 2537 return (DDI_FAILURE); 2538 2539 erip->burstsizes = ddi_dma_burstsizes(handle); 2540 ddi_dma_free_handle(&handle); 2541 2542 if (erip->burstsizes) 2543 return (DDI_SUCCESS); 2544 2545 return (DDI_FAILURE); 2546 } 2547 2548 /* 2549 * Un-initialize (STOP) ERI channel. 2550 */ 2551 static void 2552 eri_uninit(struct eri *erip) 2553 { 2554 boolean_t needind; 2555 2556 /* 2557 * Allow up to 'ERI_DRAINTIME' for pending xmit's to complete. 2558 */ 2559 ERI_DELAY((erip->tcurp == erip->tnextp), ERI_DRAINTIME); 2560 2561 mutex_enter(&erip->intrlock); 2562 eri_stop_timer(erip); /* acquire linklock */ 2563 mutex_enter(&erip->xmitlock); 2564 mutex_enter(&erip->xcvrlock); 2565 eri_mif_poll(erip, MIF_POLL_STOP); 2566 erip->flags &= ~ERI_DLPI_LINKUP; 2567 mutex_exit(&erip->xcvrlock); 2568 2569 needind = !erip->linkcheck; 2570 (void) eri_stop(erip); 2571 erip->flags &= ~ERI_RUNNING; 2572 2573 mutex_exit(&erip->xmitlock); 2574 eri_start_timer(erip, eri_check_link, 0); 2575 mutex_exit(&erip->intrlock); 2576 2577 if (needind) 2578 mac_link_update(erip->mh, LINK_STATE_DOWN); 2579 } 2580 2581 /* 2582 * Allocate CONSISTENT memory for rmds and tmds with appropriate alignment and 2583 * map it in IO space. 2584 * 2585 * The driver allocates STREAMS buffers which will be mapped in DVMA 2586 * space using DDI DMA resources. 2587 * 2588 */ 2589 static int 2590 eri_allocthings(struct eri *erip) 2591 { 2592 2593 uintptr_t a; 2594 int size; 2595 uint32_t rval; 2596 int i; 2597 size_t real_len; 2598 uint32_t cookiec; 2599 int alloc_stat = 0; 2600 ddi_dma_cookie_t dma_cookie; 2601 2602 /* 2603 * Return if resources are already allocated. 2604 */ 2605 if (erip->rmdp) 2606 return (alloc_stat); 2607 2608 erip->alloc_flag = 0; 2609 2610 /* 2611 * Allocate the TMD and RMD descriptors and extra for alignments. 2612 */ 2613 size = (ERI_RPENDING * sizeof (struct rmd) + 2614 ERI_TPENDING * sizeof (struct eri_tmd)) + ERI_GMDALIGN; 2615 2616 rval = ddi_dma_alloc_handle(erip->dip, &desc_dma_attr, 2617 DDI_DMA_DONTWAIT, 0, &erip->md_h); 2618 if (rval != DDI_SUCCESS) { 2619 return (++alloc_stat); 2620 } 2621 erip->alloc_flag |= ERI_DESC_HANDLE_ALLOC; 2622 2623 rval = ddi_dma_mem_alloc(erip->md_h, size, &erip->dev_attr, 2624 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0, 2625 (caddr_t *)&erip->iopbkbase, &real_len, &erip->mdm_h); 2626 if (rval != DDI_SUCCESS) { 2627 return (++alloc_stat); 2628 } 2629 erip->alloc_flag |= ERI_DESC_MEM_ALLOC; 2630 2631 rval = ddi_dma_addr_bind_handle(erip->md_h, NULL, 2632 (caddr_t)erip->iopbkbase, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2633 DDI_DMA_DONTWAIT, 0, &erip->md_c, &cookiec); 2634 2635 if (rval != DDI_DMA_MAPPED) 2636 return (++alloc_stat); 2637 2638 erip->alloc_flag |= ERI_DESC_MEM_MAP; 2639 2640 if (cookiec != 1) 2641 return (++alloc_stat); 2642 2643 erip->iopbiobase = erip->md_c.dmac_address; 2644 2645 a = erip->iopbkbase; 2646 a = ROUNDUP(a, ERI_GMDALIGN); 2647 erip->rmdp = (struct rmd *)a; 2648 a += ERI_RPENDING * sizeof (struct rmd); 2649 erip->eri_tmdp = (struct eri_tmd *)a; 2650 /* 2651 * Specifically we reserve n (ERI_TPENDING + ERI_RPENDING) 2652 * pagetable entries. Therefore we have 2 ptes for each 2653 * descriptor. Since the ethernet buffers are 1518 bytes 2654 * so they can at most use 2 ptes. 2655 * Will do a ddi_dma_addr_setup for each bufer 2656 */ 2657 /* 2658 * In the current implementation, we use the ddi compliant 2659 * dma interface. We allocate ERI_RPENDING dma handles for receive 2660 * activity. The actual dma mapping is done in the io function 2661 * eri_read_dma(), by calling the ddi_dma_addr_bind_handle. 2662 * Dma resources are deallocated by calling ddi_dma_unbind_handle 2663 * in eri_reclaim() for transmit and eri_read_dma(), for receive io. 2664 */ 2665 2666 if (eri_use_dvma_rx && 2667 (dvma_reserve(erip->dip, &eri_dma_limits, (ERI_RPENDING * 2), 2668 &erip->eri_dvmarh)) == DDI_SUCCESS) { 2669 erip->alloc_flag |= ERI_RCV_DVMA_ALLOC; 2670 } else { 2671 erip->eri_dvmarh = NULL; 2672 2673 for (i = 0; i < ERI_RPENDING; i++) { 2674 rval = ddi_dma_alloc_handle(erip->dip, 2675 &dma_attr, DDI_DMA_DONTWAIT, 2676 0, &erip->ndmarh[i]); 2677 2678 if (rval != DDI_SUCCESS) { 2679 ERI_FAULT_MSG1(erip, SEVERITY_HIGH, 2680 ERI_VERB_MSG, alloc_rx_dmah_msg); 2681 alloc_stat++; 2682 break; 2683 } 2684 } 2685 2686 erip->rcv_handle_cnt = i; 2687 2688 if (i) 2689 erip->alloc_flag |= ERI_RCV_HANDLE_ALLOC; 2690 2691 if (alloc_stat) 2692 return (alloc_stat); 2693 2694 } 2695 2696 /* 2697 * Allocate TX buffer 2698 * Note: buffers must always be allocated in the native 2699 * ordering of the CPU (always big-endian for Sparc). 2700 * ddi_dma_mem_alloc returns memory in the native ordering 2701 * of the bus (big endian for SBus, little endian for PCI). 2702 * So we cannot use ddi_dma_mem_alloc(, &erip->ge_dev_attr) 2703 * because we'll get little endian memory on PCI. 2704 */ 2705 if (ddi_dma_alloc_handle(erip->dip, &desc_dma_attr, DDI_DMA_DONTWAIT, 2706 0, &erip->tbuf_handle) != DDI_SUCCESS) { 2707 ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG, 2708 alloc_tx_dmah_msg); 2709 return (++alloc_stat); 2710 } 2711 erip->alloc_flag |= ERI_XBUFS_HANDLE_ALLOC; 2712 size = ERI_TPENDING * ERI_BUFSIZE; 2713 if (ddi_dma_mem_alloc(erip->tbuf_handle, size, &buf_attr, 2714 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, &erip->tbuf_kaddr, 2715 &real_len, &erip->tbuf_acch) != DDI_SUCCESS) { 2716 ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG, 2717 alloc_tx_dmah_msg); 2718 return (++alloc_stat); 2719 } 2720 erip->alloc_flag |= ERI_XBUFS_KMEM_ALLOC; 2721 if (ddi_dma_addr_bind_handle(erip->tbuf_handle, NULL, 2722 erip->tbuf_kaddr, size, DDI_DMA_WRITE | DDI_DMA_CONSISTENT, 2723 DDI_DMA_DONTWAIT, 0, &dma_cookie, &cookiec) != DDI_DMA_MAPPED) { 2724 return (++alloc_stat); 2725 } 2726 erip->tbuf_ioaddr = dma_cookie.dmac_address; 2727 erip->alloc_flag |= ERI_XBUFS_KMEM_DMABIND; 2728 if (cookiec != 1) 2729 return (++alloc_stat); 2730 2731 /* 2732 * Keep handy limit values for RMD, TMD, and Buffers. 2733 */ 2734 erip->rmdlimp = &((erip->rmdp)[ERI_RPENDING]); 2735 erip->eri_tmdlimp = &((erip->eri_tmdp)[ERI_TPENDING]); 2736 2737 /* 2738 * Zero out RCV holders. 2739 */ 2740 bzero((caddr_t)erip->rmblkp, sizeof (erip->rmblkp)); 2741 return (alloc_stat); 2742 } 2743 2744 /* <<<<<<<<<<<<<<<<< INTERRUPT HANDLING FUNCTION >>>>>>>>>>>>>>>>>>>> */ 2745 /* 2746 * First check to see if it is our device interrupting. 2747 */ 2748 static uint_t 2749 eri_intr(caddr_t arg) 2750 { 2751 struct eri *erip = (void *)arg; 2752 uint32_t erisbits; 2753 uint32_t mif_status; 2754 uint32_t serviced = DDI_INTR_UNCLAIMED; 2755 link_state_t linkupdate = LINK_STATE_UNKNOWN; 2756 boolean_t macupdate = B_FALSE; 2757 mblk_t *mp; 2758 mblk_t *head; 2759 mblk_t **tail; 2760 2761 head = NULL; 2762 tail = &head; 2763 2764 mutex_enter(&erip->intrlock); 2765 2766 erisbits = GET_GLOBREG(status); 2767 2768 /* 2769 * Check if it is only the RX_DONE interrupt, which is 2770 * the most frequent one. 2771 */ 2772 if (((erisbits & ERI_G_STATUS_RX_INT) == ERI_G_STATUS_RX_DONE) && 2773 (erip->flags & ERI_RUNNING)) { 2774 serviced = DDI_INTR_CLAIMED; 2775 goto rx_done_int; 2776 } 2777 2778 /* Claim the first interrupt after initialization */ 2779 if (erip->flags & ERI_INITIALIZED) { 2780 erip->flags &= ~ERI_INITIALIZED; 2781 serviced = DDI_INTR_CLAIMED; 2782 } 2783 2784 /* Check for interesting events */ 2785 if ((erisbits & ERI_G_STATUS_INTR) == 0) { 2786 #ifdef ESTAR_WORKAROUND 2787 uint32_t linkupdate; 2788 #endif 2789 2790 ERI_DEBUG_MSG2(erip, DIAG_MSG, 2791 "eri_intr: Interrupt Not Claimed gsbits %X", erisbits); 2792 #ifdef DEBUG 2793 noteri++; 2794 #endif 2795 ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:MIF Config = 0x%X", 2796 GET_MIFREG(mif_cfg)); 2797 ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:MIF imask = 0x%X", 2798 GET_MIFREG(mif_imask)); 2799 ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:INT imask = 0x%X", 2800 GET_GLOBREG(intmask)); 2801 ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:alias %X", 2802 GET_GLOBREG(status_alias)); 2803 #ifdef ESTAR_WORKAROUND 2804 linkupdate = eri_check_link_noind(erip); 2805 #endif 2806 mutex_exit(&erip->intrlock); 2807 #ifdef ESTAR_WORKAROUND 2808 if (linkupdate != LINK_STATE_UNKNOWN) 2809 mac_link_update(erip->mh, linkupdate); 2810 #endif 2811 return (serviced); 2812 } 2813 serviced = DDI_INTR_CLAIMED; 2814 2815 if (!(erip->flags & ERI_RUNNING)) { 2816 mutex_exit(&erip->intrlock); 2817 eri_uninit(erip); 2818 return (serviced); 2819 } 2820 2821 if (erisbits & ERI_G_STATUS_FATAL_ERR) { 2822 ERI_DEBUG_MSG2(erip, INTR_MSG, 2823 "eri_intr: fatal error: erisbits = %X", erisbits); 2824 (void) eri_fatal_err(erip, erisbits); 2825 eri_reinit_fatal++; 2826 2827 if (erip->rx_reset_issued) { 2828 erip->rx_reset_issued = 0; 2829 (void) eri_init_rx_channel(erip); 2830 mutex_exit(&erip->intrlock); 2831 } else { 2832 param_linkup = 0; 2833 erip->stats.link_up = LINK_STATE_DOWN; 2834 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN; 2835 DISABLE_MAC(erip); 2836 mutex_exit(&erip->intrlock); 2837 (void) eri_init(erip); 2838 } 2839 return (serviced); 2840 } 2841 2842 if (erisbits & ERI_G_STATUS_NONFATAL_ERR) { 2843 ERI_DEBUG_MSG2(erip, INTR_MSG, 2844 "eri_intr: non-fatal error: erisbits = %X", erisbits); 2845 (void) eri_nonfatal_err(erip, erisbits); 2846 if (erip->linkcheck) { 2847 mutex_exit(&erip->intrlock); 2848 (void) eri_init(erip); 2849 return (serviced); 2850 } 2851 } 2852 2853 if (erisbits & ERI_G_STATUS_MIF_INT) { 2854 uint16_t stat; 2855 ERI_DEBUG_MSG2(erip, XCVR_MSG, 2856 "eri_intr:MIF Interrupt:mii_status %X", erip->mii_status); 2857 eri_stop_timer(erip); /* acquire linklock */ 2858 2859 mutex_enter(&erip->xmitlock); 2860 mutex_enter(&erip->xcvrlock); 2861 #ifdef ERI_MIF_POLL_STATUS_WORKAROUND 2862 mif_status = GET_MIFREG(mif_bsts); 2863 eri_mif_poll(erip, MIF_POLL_STOP); 2864 ERI_DEBUG_MSG3(erip, XCVR_MSG, 2865 "eri_intr: new MIF interrupt status %X XCVR status %X", 2866 mif_status, erip->mii_status); 2867 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat); 2868 linkupdate = eri_mif_check(erip, stat, stat); 2869 2870 #else 2871 mif_status = GET_MIFREG(mif_bsts); 2872 eri_mif_poll(erip, MIF_POLL_STOP); 2873 linkupdate = eri_mif_check(erip, (uint16_t)mif_status, 2874 (uint16_t)(mif_status >> 16)); 2875 #endif 2876 eri_mif_poll(erip, MIF_POLL_START); 2877 mutex_exit(&erip->xcvrlock); 2878 mutex_exit(&erip->xmitlock); 2879 2880 if (!erip->openloop_autoneg) 2881 eri_start_timer(erip, eri_check_link, 2882 ERI_LINKCHECK_TIMER); 2883 else 2884 eri_start_timer(erip, eri_check_link, 2885 ERI_P_FAULT_TIMER); 2886 } 2887 2888 ERI_DEBUG_MSG2(erip, INTR_MSG, 2889 "eri_intr:May have Read Interrupt status:status %X", erisbits); 2890 2891 rx_done_int: 2892 if ((erisbits & (ERI_G_STATUS_TX_INT_ME)) || 2893 (erip->tx_cur_cnt >= tx_interrupt_rate)) { 2894 mutex_enter(&erip->xmitlock); 2895 erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) & 2896 ETX_COMPLETION_MASK); 2897 2898 macupdate |= eri_reclaim(erip, erip->tx_completion); 2899 mutex_exit(&erip->xmitlock); 2900 } 2901 2902 if (erisbits & ERI_G_STATUS_RX_DONE) { 2903 volatile struct rmd *rmdp, *rmdpbase; 2904 volatile uint32_t rmdi; 2905 uint8_t loop_limit = 0x20; 2906 uint64_t flags; 2907 uint32_t rmdmax_mask = erip->rmdmax_mask; 2908 2909 rmdpbase = erip->rmdp; 2910 rmdi = erip->rx_completion; 2911 rmdp = rmdpbase + rmdi; 2912 2913 /* 2914 * Sync RMD before looking at it. 2915 */ 2916 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd), 2917 DDI_DMA_SYNC_FORCPU); 2918 /* 2919 * Loop through each RMD. 2920 */ 2921 2922 flags = GET_RMD_FLAGS(rmdp); 2923 while (((flags & ERI_RMD_OWN) == 0) && (loop_limit)) { 2924 /* process one packet */ 2925 mp = eri_read_dma(erip, rmdp, rmdi, flags); 2926 rmdi = (rmdi + 1) & rmdmax_mask; 2927 rmdp = rmdpbase + rmdi; 2928 2929 if (mp != NULL) { 2930 *tail = mp; 2931 tail = &mp->b_next; 2932 } 2933 2934 /* 2935 * ERI RCV DMA fetches or updates four descriptors 2936 * a time. Also we don't want to update the desc. 2937 * batch we just received packet on. So we update 2938 * descriptors for every 4 packets and we update 2939 * the group of 4 after the current batch. 2940 */ 2941 2942 if (!(rmdi % 4)) { 2943 if (eri_overflow_reset && 2944 (GET_GLOBREG(status_alias) & 2945 ERI_G_STATUS_NONFATAL_ERR)) { 2946 loop_limit = 1; 2947 } else { 2948 erip->rx_kick = 2949 (rmdi + ERI_RPENDING - 4) & 2950 rmdmax_mask; 2951 PUT_ERXREG(rx_kick, erip->rx_kick); 2952 } 2953 } 2954 2955 /* 2956 * Sync the next RMD before looking at it. 2957 */ 2958 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd), 2959 DDI_DMA_SYNC_FORCPU); 2960 flags = GET_RMD_FLAGS(rmdp); 2961 loop_limit--; 2962 } 2963 erip->rx_completion = rmdi; 2964 } 2965 2966 erip->wantw = B_FALSE; 2967 2968 mutex_exit(&erip->intrlock); 2969 2970 if (head) 2971 mac_rx(erip->mh, NULL, head); 2972 2973 if (macupdate) 2974 mac_tx_update(erip->mh); 2975 2976 if (linkupdate != LINK_STATE_UNKNOWN) 2977 mac_link_update(erip->mh, linkupdate); 2978 2979 return (serviced); 2980 } 2981 2982 /* 2983 * Handle interrupts for fatal errors 2984 * Need reinitialization. 2985 */ 2986 #define PCI_DATA_PARITY_REP (1 << 8) 2987 #define PCI_SING_TARGET_ABORT (1 << 11) 2988 #define PCI_RCV_TARGET_ABORT (1 << 12) 2989 #define PCI_RCV_MASTER_ABORT (1 << 13) 2990 #define PCI_SING_SYSTEM_ERR (1 << 14) 2991 #define PCI_DATA_PARITY_ERR (1 << 15) 2992 2993 /* called with intrlock held */ 2994 static void 2995 eri_fatal_err(struct eri *erip, uint32_t erisbits) 2996 { 2997 uint16_t pci_status; 2998 uint32_t pci_error_int = 0; 2999 3000 if (erisbits & ERI_G_STATUS_RX_TAG_ERR) { 3001 erip->rx_reset_issued = 1; 3002 HSTAT(erip, rxtag_err); 3003 } else { 3004 erip->global_reset_issued = 1; 3005 if (erisbits & ERI_G_STATUS_BUS_ERR_INT) { 3006 pci_error_int = 1; 3007 HSTAT(erip, pci_error_int); 3008 } else if (erisbits & ERI_G_STATUS_PERR_INT) { 3009 HSTAT(erip, parity_error); 3010 } else { 3011 HSTAT(erip, unknown_fatal); 3012 } 3013 } 3014 3015 /* 3016 * PCI bus error 3017 */ 3018 if (pci_error_int && erip->pci_config_handle) { 3019 pci_status = pci_config_get16(erip->pci_config_handle, 3020 PCI_CONF_STAT); 3021 ERI_DEBUG_MSG2(erip, FATAL_ERR_MSG, "Bus Error Status %x", 3022 pci_status); 3023 if (pci_status & PCI_DATA_PARITY_REP) 3024 HSTAT(erip, pci_data_parity_err); 3025 if (pci_status & PCI_SING_TARGET_ABORT) 3026 HSTAT(erip, pci_signal_target_abort); 3027 if (pci_status & PCI_RCV_TARGET_ABORT) 3028 HSTAT(erip, pci_rcvd_target_abort); 3029 if (pci_status & PCI_RCV_MASTER_ABORT) 3030 HSTAT(erip, pci_rcvd_master_abort); 3031 if (pci_status & PCI_SING_SYSTEM_ERR) 3032 HSTAT(erip, pci_signal_system_err); 3033 if (pci_status & PCI_DATA_PARITY_ERR) 3034 HSTAT(erip, pci_signal_system_err); 3035 /* 3036 * clear it by writing the value that was read back. 3037 */ 3038 pci_config_put16(erip->pci_config_handle, PCI_CONF_STAT, 3039 pci_status); 3040 } 3041 } 3042 3043 /* 3044 * Handle interrupts regarding non-fatal events. 3045 * TXMAC, RXMAC and MACCTL events 3046 */ 3047 static void 3048 eri_nonfatal_err(struct eri *erip, uint32_t erisbits) 3049 { 3050 3051 uint32_t txmac_sts, rxmac_sts, macctl_sts, pause_time; 3052 3053 #ifdef ERI_PM_WORKAROUND 3054 if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED, 3055 PCI_PM_IDLESPEED_NONE) == DDI_SUCCESS) 3056 erip->stats.pmcap = ERI_PMCAP_NONE; 3057 #endif 3058 3059 if (erisbits & ERI_G_STATUS_TX_MAC_INT) { 3060 txmac_sts = GET_MACREG(txsts); 3061 if (txmac_sts & BMAC_TXSTS_TX_URUN) { 3062 erip->linkcheck = 1; 3063 HSTAT(erip, txmac_urun); 3064 HSTAT(erip, oerrors); 3065 } 3066 3067 if (txmac_sts & BMAC_TXSTS_MAXPKT_ERR) { 3068 erip->linkcheck = 1; 3069 HSTAT(erip, txmac_maxpkt_err); 3070 HSTAT(erip, oerrors); 3071 } 3072 if (txmac_sts & BMAC_TXSTS_NCC_EXP) { 3073 erip->stats.collisions += 0x10000; 3074 } 3075 3076 if (txmac_sts & BMAC_TXSTS_ECC_EXP) { 3077 erip->stats.excessive_coll += 0x10000; 3078 } 3079 3080 if (txmac_sts & BMAC_TXSTS_LCC_EXP) { 3081 erip->stats.late_coll += 0x10000; 3082 } 3083 3084 if (txmac_sts & BMAC_TXSTS_FCC_EXP) { 3085 erip->stats.first_coll += 0x10000; 3086 } 3087 3088 if (txmac_sts & BMAC_TXSTS_DEFER_EXP) { 3089 HSTAT(erip, defer_timer_exp); 3090 } 3091 3092 if (txmac_sts & BMAC_TXSTS_PEAK_EXP) { 3093 erip->stats.peak_attempt_cnt += 0x100; 3094 } 3095 } 3096 3097 if (erisbits & ERI_G_STATUS_RX_NO_BUF) { 3098 ERI_DEBUG_MSG1(erip, NONFATAL_MSG, "rx dropped/no free desc"); 3099 3100 if (eri_overflow_reset) 3101 erip->linkcheck = 1; 3102 3103 HSTAT(erip, no_free_rx_desc); 3104 HSTAT(erip, ierrors); 3105 } 3106 if (erisbits & ERI_G_STATUS_RX_MAC_INT) { 3107 rxmac_sts = GET_MACREG(rxsts); 3108 if (rxmac_sts & BMAC_RXSTS_RX_OVF) { 3109 #ifndef ERI_RMAC_HANG_WORKAROUND 3110 eri_stop_timer(erip); /* acquire linklock */ 3111 erip->check_rmac_hang ++; 3112 erip->check2_rmac_hang = 0; 3113 erip->rxfifo_wr_ptr = GET_ERXREG(rxfifo_wr_ptr); 3114 erip->rxfifo_rd_ptr = GET_ERXREG(rxfifo_rd_ptr); 3115 3116 ERI_DEBUG_MSG5(erip, NONFATAL_MSG, 3117 "overflow intr %d: %8x wr:%2x rd:%2x", 3118 erip->check_rmac_hang, 3119 GET_MACREG(macsm), 3120 GET_ERXREG(rxfifo_wr_ptr), 3121 GET_ERXREG(rxfifo_rd_ptr)); 3122 3123 eri_start_timer(erip, eri_check_link, 3124 ERI_CHECK_HANG_TIMER); 3125 #endif 3126 if (eri_overflow_reset) 3127 erip->linkcheck = 1; 3128 3129 HSTAT(erip, rx_overflow); 3130 HSTAT(erip, ierrors); 3131 } 3132 3133 if (rxmac_sts & BMAC_RXSTS_ALE_EXP) { 3134 erip->stats.rx_align_err += 0x10000; 3135 erip->stats.ierrors += 0x10000; 3136 } 3137 3138 if (rxmac_sts & BMAC_RXSTS_CRC_EXP) { 3139 erip->stats.rx_crc_err += 0x10000; 3140 erip->stats.ierrors += 0x10000; 3141 } 3142 3143 if (rxmac_sts & BMAC_RXSTS_LEN_EXP) { 3144 erip->stats.rx_length_err += 0x10000; 3145 erip->stats.ierrors += 0x10000; 3146 } 3147 3148 if (rxmac_sts & BMAC_RXSTS_CVI_EXP) { 3149 erip->stats.rx_code_viol_err += 0x10000; 3150 erip->stats.ierrors += 0x10000; 3151 } 3152 } 3153 3154 if (erisbits & ERI_G_STATUS_MAC_CTRL_INT) { 3155 3156 macctl_sts = GET_MACREG(macctl_sts); 3157 if (macctl_sts & ERI_MCTLSTS_PAUSE_RCVD) { 3158 pause_time = ((macctl_sts & 3159 ERI_MCTLSTS_PAUSE_TIME) >> 16); 3160 ERI_DEBUG_MSG2(erip, NONFATAL_MSG, 3161 "PAUSE Received. pause time = %X slot_times", 3162 pause_time); 3163 HSTAT(erip, pause_rxcount); 3164 erip->stats.pause_time_count += pause_time; 3165 } 3166 3167 if (macctl_sts & ERI_MCTLSTS_PAUSE_STATE) { 3168 HSTAT(erip, pause_oncount); 3169 erip->stats.pausing = 1; 3170 } 3171 3172 if (macctl_sts & ERI_MCTLSTS_NONPAUSE) { 3173 HSTAT(erip, pause_offcount); 3174 erip->stats.pausing = 0; 3175 } 3176 } 3177 3178 } 3179 3180 /* 3181 * if this is the first init do not bother to save the 3182 * counters. 3183 */ 3184 static void 3185 eri_savecntrs(struct eri *erip) 3186 { 3187 uint32_t fecnt, aecnt, lecnt, rxcv; 3188 uint32_t ltcnt, excnt, fccnt; 3189 3190 /* XXX What all gets added in ierrors and oerrors? */ 3191 fecnt = GET_MACREG(fecnt); 3192 HSTATN(erip, rx_crc_err, fecnt); 3193 PUT_MACREG(fecnt, 0); 3194 3195 aecnt = GET_MACREG(aecnt); 3196 HSTATN(erip, rx_align_err, aecnt); 3197 PUT_MACREG(aecnt, 0); 3198 3199 lecnt = GET_MACREG(lecnt); 3200 HSTATN(erip, rx_length_err, lecnt); 3201 PUT_MACREG(lecnt, 0); 3202 3203 rxcv = GET_MACREG(rxcv); 3204 HSTATN(erip, rx_code_viol_err, rxcv); 3205 PUT_MACREG(rxcv, 0); 3206 3207 ltcnt = GET_MACREG(ltcnt); 3208 HSTATN(erip, late_coll, ltcnt); 3209 PUT_MACREG(ltcnt, 0); 3210 3211 erip->stats.collisions += (GET_MACREG(nccnt) + ltcnt); 3212 PUT_MACREG(nccnt, 0); 3213 3214 excnt = GET_MACREG(excnt); 3215 HSTATN(erip, excessive_coll, excnt); 3216 PUT_MACREG(excnt, 0); 3217 3218 fccnt = GET_MACREG(fccnt); 3219 HSTATN(erip, first_coll, fccnt); 3220 PUT_MACREG(fccnt, 0); 3221 3222 /* 3223 * Do not add code violations to input errors. 3224 * They are already counted in CRC errors 3225 */ 3226 HSTATN(erip, ierrors, (fecnt + aecnt + lecnt)); 3227 HSTATN(erip, oerrors, (ltcnt + excnt)); 3228 } 3229 3230 mblk_t * 3231 eri_allocb_sp(size_t size) 3232 { 3233 mblk_t *mp; 3234 3235 size += 128; 3236 if ((mp = allocb(size + 3 * ERI_BURSTSIZE, BPRI_HI)) == NULL) { 3237 return (NULL); 3238 } 3239 mp->b_wptr += 128; 3240 mp->b_wptr = (uint8_t *)ROUNDUP2(mp->b_wptr, ERI_BURSTSIZE); 3241 mp->b_rptr = mp->b_wptr; 3242 3243 return (mp); 3244 } 3245 3246 mblk_t * 3247 eri_allocb(size_t size) 3248 { 3249 mblk_t *mp; 3250 3251 if ((mp = allocb(size + 3 * ERI_BURSTSIZE, BPRI_HI)) == NULL) { 3252 return (NULL); 3253 } 3254 mp->b_wptr = (uint8_t *)ROUNDUP2(mp->b_wptr, ERI_BURSTSIZE); 3255 mp->b_rptr = mp->b_wptr; 3256 3257 return (mp); 3258 } 3259 3260 /* 3261 * Hardware Dependent Functions 3262 * New Section. 3263 */ 3264 3265 /* <<<<<<<<<<<<<<<< Fast Ethernet PHY Bit Bang Operations >>>>>>>>>>>>>>>>>> */ 3266 3267 static void 3268 send_bit(struct eri *erip, uint32_t x) 3269 { 3270 PUT_MIFREG(mif_bbdata, x); 3271 PUT_MIFREG(mif_bbclk, ERI_BBCLK_LOW); 3272 PUT_MIFREG(mif_bbclk, ERI_BBCLK_HIGH); 3273 } 3274 3275 /* 3276 * To read the MII register bits according to the IEEE Standard 3277 */ 3278 static uint32_t 3279 get_bit_std(struct eri *erip) 3280 { 3281 uint32_t x; 3282 3283 PUT_MIFREG(mif_bbclk, ERI_BBCLK_LOW); 3284 drv_usecwait(1); /* wait for >330 ns for stable data */ 3285 if (param_transceiver == INTERNAL_XCVR) 3286 x = (GET_MIFREG(mif_cfg) & ERI_MIF_CFGM0) ? 1 : 0; 3287 else 3288 x = (GET_MIFREG(mif_cfg) & ERI_MIF_CFGM1) ? 1 : 0; 3289 PUT_MIFREG(mif_bbclk, ERI_BBCLK_HIGH); 3290 return (x); 3291 } 3292 3293 #define SEND_BIT(x) send_bit(erip, x) 3294 #define GET_BIT_STD(x) x = get_bit_std(erip) 3295 3296 3297 static void 3298 eri_bb_mii_write(struct eri *erip, uint8_t regad, uint16_t data) 3299 { 3300 uint8_t phyad; 3301 int i; 3302 3303 PUT_MIFREG(mif_bbopenb, 1); /* Enable the MII driver */ 3304 phyad = erip->phyad; 3305 (void) eri_bb_force_idle(erip); 3306 SEND_BIT(0); SEND_BIT(1); /* <ST> */ 3307 SEND_BIT(0); SEND_BIT(1); /* <OP> */ 3308 for (i = 4; i >= 0; i--) { /* <AAAAA> */ 3309 SEND_BIT((phyad >> i) & 1); 3310 } 3311 for (i = 4; i >= 0; i--) { /* <RRRRR> */ 3312 SEND_BIT((regad >> i) & 1); 3313 } 3314 SEND_BIT(1); SEND_BIT(0); /* <TA> */ 3315 for (i = 0xf; i >= 0; i--) { /* <DDDDDDDDDDDDDDDD> */ 3316 SEND_BIT((data >> i) & 1); 3317 } 3318 PUT_MIFREG(mif_bbopenb, 0); /* Disable the MII driver */ 3319 } 3320 3321 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */ 3322 static uint32_t 3323 eri_bb_mii_read(struct eri *erip, uint8_t regad, uint16_t *datap) 3324 { 3325 uint8_t phyad; 3326 int i; 3327 uint32_t x; 3328 uint32_t y; 3329 3330 *datap = 0; 3331 3332 PUT_MIFREG(mif_bbopenb, 1); /* Enable the MII driver */ 3333 phyad = erip->phyad; 3334 (void) eri_bb_force_idle(erip); 3335 SEND_BIT(0); SEND_BIT(1); /* <ST> */ 3336 SEND_BIT(1); SEND_BIT(0); /* <OP> */ 3337 for (i = 4; i >= 0; i--) { /* <AAAAA> */ 3338 SEND_BIT((phyad >> i) & 1); 3339 } 3340 for (i = 4; i >= 0; i--) { /* <RRRRR> */ 3341 SEND_BIT((regad >> i) & 1); 3342 } 3343 3344 PUT_MIFREG(mif_bbopenb, 0); /* Disable the MII driver */ 3345 3346 GET_BIT_STD(x); 3347 GET_BIT_STD(y); /* <TA> */ 3348 for (i = 0xf; i >= 0; i--) { /* <DDDDDDDDDDDDDDDD> */ 3349 GET_BIT_STD(x); 3350 *datap += (x << i); 3351 } 3352 /* Kludge to get the Transceiver out of hung mode */ 3353 /* XXX: Test if this is still needed */ 3354 GET_BIT_STD(x); 3355 GET_BIT_STD(x); 3356 GET_BIT_STD(x); 3357 3358 return (y); 3359 } 3360 3361 static void 3362 eri_bb_force_idle(struct eri *erip) 3363 { 3364 int i; 3365 3366 for (i = 0; i < 33; i++) { 3367 SEND_BIT(1); 3368 } 3369 } 3370 3371 /* <<<<<<<<<<<<<<<<<<<<End of Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */ 3372 3373 3374 /* <<<<<<<<<<<<< Frame Register used for MII operations >>>>>>>>>>>>>>>>>>>> */ 3375 3376 #ifdef ERI_FRM_DEBUG 3377 int frame_flag = 0; 3378 #endif 3379 3380 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */ 3381 static uint32_t 3382 eri_mii_read(struct eri *erip, uint8_t regad, uint16_t *datap) 3383 { 3384 uint32_t frame; 3385 uint8_t phyad; 3386 3387 if (param_transceiver == NO_XCVR) 3388 return (1); /* No xcvr present */ 3389 3390 if (!erip->frame_enable) 3391 return (eri_bb_mii_read(erip, regad, datap)); 3392 3393 phyad = erip->phyad; 3394 #ifdef ERI_FRM_DEBUG 3395 if (!frame_flag) { 3396 eri_errror(erip->dip, "Frame Register used for MII"); 3397 frame_flag = 1; 3398 } 3399 #endif 3400 ERI_DEBUG_MSG3(erip, FRM_MSG, 3401 "Frame Reg :mii_read: phyad = %X reg = %X ", phyad, regad); 3402 3403 PUT_MIFREG(mif_frame, ERI_MIF_FRREAD | 3404 (phyad << ERI_MIF_FRPHYAD_SHIFT) | 3405 (regad << ERI_MIF_FRREGAD_SHIFT)); 3406 MIF_ERIDELAY(300, phyad, regad); 3407 frame = GET_MIFREG(mif_frame); 3408 if ((frame & ERI_MIF_FRTA0) == 0) { 3409 return (1); 3410 } else { 3411 *datap = (uint16_t)(frame & ERI_MIF_FRDATA); 3412 return (0); 3413 } 3414 3415 } 3416 3417 static void 3418 eri_mii_write(struct eri *erip, uint8_t regad, uint16_t data) 3419 { 3420 uint8_t phyad; 3421 3422 if (!erip->frame_enable) { 3423 eri_bb_mii_write(erip, regad, data); 3424 return; 3425 } 3426 3427 phyad = erip->phyad; 3428 3429 PUT_MIFREG(mif_frame, (ERI_MIF_FRWRITE | 3430 (phyad << ERI_MIF_FRPHYAD_SHIFT) | 3431 (regad << ERI_MIF_FRREGAD_SHIFT) | data)); 3432 MIF_ERIDELAY(300, phyad, regad); 3433 (void) GET_MIFREG(mif_frame); 3434 } 3435 3436 3437 /* <<<<<<<<<<<<<<<<< PACKET TRANSMIT FUNCTIONS >>>>>>>>>>>>>>>>>>>> */ 3438 3439 #define ERI_CROSS_PAGE_BOUNDRY(i, size, pagesize) \ 3440 ((i & pagesize) != ((i + size) & pagesize)) 3441 3442 /* 3443 * Send a single mblk. Returns B_TRUE if the packet is sent, or disposed of 3444 * by freemsg. Returns B_FALSE if the packet was not sent or queued, and 3445 * should be retried later (due to tx resource exhaustion.) 3446 */ 3447 static boolean_t 3448 eri_send_msg(struct eri *erip, mblk_t *mp) 3449 { 3450 volatile struct eri_tmd *tmdp = NULL; 3451 volatile struct eri_tmd *tbasep = NULL; 3452 uint32_t len_msg = 0; 3453 uint32_t i; 3454 uint64_t int_me = 0; 3455 uint_t tmdcsum = 0; 3456 uint_t start_offset = 0; 3457 uint_t stuff_offset = 0; 3458 uint_t flags = 0; 3459 boolean_t macupdate = B_FALSE; 3460 3461 caddr_t ptr; 3462 uint32_t offset; 3463 uint64_t ctrl; 3464 ddi_dma_cookie_t c; 3465 3466 if (!param_linkup) { 3467 freemsg(mp); 3468 HSTAT(erip, tnocar); 3469 HSTAT(erip, oerrors); 3470 return (B_TRUE); 3471 } 3472 3473 #ifdef ERI_HWCSUM 3474 hcksum_retrieve(mp, NULL, NULL, &start_offset, &stuff_offset, 3475 NULL, NULL, &flags); 3476 3477 if (flags & HCK_PARTIALCKSUM) { 3478 if (get_ether_type(mp->b_rptr) == ETHERTYPE_VLAN) { 3479 start_offset += ETHERHEADER_SIZE + 4; 3480 stuff_offset += ETHERHEADER_SIZE + 4; 3481 } else { 3482 start_offset += ETHERHEADER_SIZE; 3483 stuff_offset += ETHERHEADER_SIZE; 3484 } 3485 tmdcsum = ERI_TMD_CSENABL; 3486 } 3487 #endif /* ERI_HWCSUM */ 3488 3489 if ((len_msg = msgsize(mp)) > ERI_BUFSIZE) { 3490 /* 3491 * This sholdn't ever occur, as GLD should not send us 3492 * packets that are too big. 3493 */ 3494 HSTAT(erip, oerrors); 3495 freemsg(mp); 3496 return (B_TRUE); 3497 } 3498 3499 /* 3500 * update MIB II statistics 3501 */ 3502 BUMP_OutNUcast(erip, mp->b_rptr); 3503 3504 mutex_enter(&erip->xmitlock); 3505 3506 tbasep = erip->eri_tmdp; 3507 3508 /* Check if there are enough descriptors for this packet */ 3509 tmdp = erip->tnextp; 3510 3511 if (tmdp >= erip->tcurp) /* check notmds */ 3512 i = tmdp - erip->tcurp; 3513 else 3514 i = tmdp + ERI_TPENDING - erip->tcurp; 3515 3516 if (i > (ERI_TPENDING - 4)) 3517 goto notmds; 3518 3519 if (i >= (ERI_TPENDING >> 1) && !(erip->starts & 0x7)) 3520 int_me = ERI_TMD_INTME; 3521 3522 i = tmdp - tbasep; /* index */ 3523 3524 offset = (i * ERI_BUFSIZE); 3525 ptr = erip->tbuf_kaddr + offset; 3526 3527 mcopymsg(mp, ptr); 3528 3529 #ifdef ERI_HDX_BUG_WORKAROUND 3530 if ((param_mode) || (eri_hdx_pad_enable == 0)) { 3531 if (len_msg < ETHERMIN) { 3532 bzero((ptr + len_msg), (ETHERMIN - len_msg)); 3533 len_msg = ETHERMIN; 3534 } 3535 } else { 3536 if (len_msg < 97) { 3537 bzero((ptr + len_msg), (97 - len_msg)); 3538 len_msg = 97; 3539 } 3540 } 3541 #endif 3542 c.dmac_address = erip->tbuf_ioaddr + offset; 3543 (void) ddi_dma_sync(erip->tbuf_handle, 3544 (off_t)offset, len_msg, DDI_DMA_SYNC_FORDEV); 3545 3546 /* first and last (and only!) descr of packet */ 3547 ctrl = ERI_TMD_SOP | ERI_TMD_EOP | int_me | tmdcsum | 3548 (start_offset << ERI_TMD_CSSTART_SHIFT) | 3549 (stuff_offset << ERI_TMD_CSSTUFF_SHIFT); 3550 3551 PUT_TMD(tmdp, c, len_msg, ctrl); 3552 ERI_SYNCIOPB(erip, tmdp, sizeof (struct eri_tmd), 3553 DDI_DMA_SYNC_FORDEV); 3554 3555 tmdp = NEXTTMD(erip, tmdp); 3556 erip->tx_cur_cnt++; 3557 3558 erip->tx_kick = tmdp - tbasep; 3559 PUT_ETXREG(tx_kick, erip->tx_kick); 3560 erip->tnextp = tmdp; 3561 3562 erip->starts++; 3563 3564 if (erip->tx_cur_cnt >= tx_interrupt_rate) { 3565 erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) & 3566 ETX_COMPLETION_MASK); 3567 macupdate |= eri_reclaim(erip, erip->tx_completion); 3568 } 3569 mutex_exit(&erip->xmitlock); 3570 3571 if (macupdate) 3572 mac_tx_update(erip->mh); 3573 3574 return (B_TRUE); 3575 3576 notmds: 3577 HSTAT(erip, notmds); 3578 erip->wantw = B_TRUE; 3579 3580 if (!erip->tx_int_me) { 3581 PUT_GLOBREG(intmask, GET_GLOBREG(intmask) & 3582 ~(ERI_G_MASK_TX_INT_ME)); 3583 erip->tx_int_me = 1; 3584 } 3585 3586 if (erip->tx_cur_cnt >= tx_interrupt_rate) { 3587 erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) & 3588 ETX_COMPLETION_MASK); 3589 macupdate |= eri_reclaim(erip, erip->tx_completion); 3590 } 3591 3592 mutex_exit(&erip->xmitlock); 3593 3594 if (macupdate) 3595 mac_tx_update(erip->mh); 3596 3597 return (B_FALSE); 3598 } 3599 3600 static mblk_t * 3601 eri_m_tx(void *arg, mblk_t *mp) 3602 { 3603 struct eri *erip = arg; 3604 mblk_t *next; 3605 3606 while (mp != NULL) { 3607 next = mp->b_next; 3608 mp->b_next = NULL; 3609 if (!eri_send_msg(erip, mp)) { 3610 mp->b_next = next; 3611 break; 3612 } 3613 mp = next; 3614 } 3615 3616 return (mp); 3617 } 3618 3619 /* 3620 * Transmit completion reclaiming. 3621 */ 3622 static boolean_t 3623 eri_reclaim(struct eri *erip, uint32_t tx_completion) 3624 { 3625 volatile struct eri_tmd *tmdp; 3626 struct eri_tmd *tcomp; 3627 struct eri_tmd *tbasep; 3628 struct eri_tmd *tlimp; 3629 uint64_t flags; 3630 uint_t reclaimed = 0; 3631 3632 tbasep = erip->eri_tmdp; 3633 tlimp = erip->eri_tmdlimp; 3634 3635 tmdp = erip->tcurp; 3636 tcomp = tbasep + tx_completion; /* pointer to completion tmd */ 3637 3638 /* 3639 * Loop through each TMD starting from tcurp and upto tcomp. 3640 */ 3641 while (tmdp != tcomp) { 3642 flags = GET_TMD_FLAGS(tmdp); 3643 if (flags & (ERI_TMD_SOP)) 3644 HSTAT(erip, opackets64); 3645 3646 HSTATN(erip, obytes64, (flags & ERI_TMD_BUFSIZE)); 3647 3648 tmdp = NEXTTMDP(tbasep, tlimp, tmdp); 3649 reclaimed++; 3650 } 3651 3652 erip->tcurp = tmdp; 3653 erip->tx_cur_cnt -= reclaimed; 3654 3655 return (erip->wantw && reclaimed ? B_TRUE : B_FALSE); 3656 } 3657 3658 3659 /* <<<<<<<<<<<<<<<<<<< PACKET RECEIVE FUNCTIONS >>>>>>>>>>>>>>>>>>> */ 3660 static mblk_t * 3661 eri_read_dma(struct eri *erip, volatile struct rmd *rmdp, 3662 int rmdi, uint64_t flags) 3663 { 3664 mblk_t *bp, *nbp; 3665 int len; 3666 uint_t ccnt; 3667 ddi_dma_cookie_t c; 3668 #ifdef ERI_RCV_CKSUM 3669 ushort_t sum; 3670 #endif /* ERI_RCV_CKSUM */ 3671 mblk_t *retmp = NULL; 3672 3673 bp = erip->rmblkp[rmdi]; 3674 len = (flags & ERI_RMD_BUFSIZE) >> ERI_RMD_BUFSIZE_SHIFT; 3675 #ifdef ERI_DONT_STRIP_CRC 3676 len -= 4; 3677 #endif 3678 /* 3679 * In the event of RX FIFO overflow error, ERI REV 1.0 ASIC can 3680 * corrupt packets following the descriptor corresponding the 3681 * overflow. To detect the corrupted packets, we disable the 3682 * dropping of the "bad" packets at the MAC. The descriptor 3683 * then would have the "BAD" bit set. We drop the overflowing 3684 * packet and the packet following it. We could have done some sort 3685 * of checking to determine if the second packet was indeed bad 3686 * (using CRC or checksum) but it would be expensive in this 3687 * routine, since it is run in interrupt context. 3688 */ 3689 if ((flags & ERI_RMD_BAD) || (len < ETHERMIN) || (len > ETHERMAX+4)) { 3690 3691 HSTAT(erip, rx_bad_pkts); 3692 if ((flags & ERI_RMD_BAD) == 0) 3693 HSTAT(erip, ierrors); 3694 if (len < ETHERMIN) { 3695 HSTAT(erip, rx_runt); 3696 } else if (len > ETHERMAX+4) { 3697 HSTAT(erip, rx_toolong_pkts); 3698 } 3699 HSTAT(erip, drop); 3700 UPDATE_RMD(rmdp); 3701 3702 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd), 3703 DDI_DMA_SYNC_FORDEV); 3704 return (NULL); 3705 } 3706 #ifdef ERI_DONT_STRIP_CRC 3707 { 3708 uint32_t hw_fcs, tail_fcs; 3709 /* 3710 * since we don't let the hardware strip the CRC in hdx 3711 * then the driver needs to do it. 3712 * this is to workaround a hardware bug 3713 */ 3714 bp->b_wptr = bp->b_rptr + ERI_FSTBYTE_OFFSET + len; 3715 /* 3716 * Get the Checksum calculated by the hardware. 3717 */ 3718 hw_fcs = flags & ERI_RMD_CKSUM; 3719 /* 3720 * Catch the case when the CRC starts on an odd 3721 * boundary. 3722 */ 3723 tail_fcs = bp->b_wptr[0] << 8 | bp->b_wptr[1]; 3724 tail_fcs += bp->b_wptr[2] << 8 | bp->b_wptr[3]; 3725 tail_fcs = (tail_fcs & 0xffff) + (tail_fcs >> 16); 3726 if ((uintptr_t)(bp->b_wptr) & 1) { 3727 tail_fcs = (tail_fcs << 8) & 0xffff | (tail_fcs >> 8); 3728 } 3729 hw_fcs += tail_fcs; 3730 hw_fcs = (hw_fcs & 0xffff) + (hw_fcs >> 16); 3731 hw_fcs &= 0xffff; 3732 /* 3733 * Now we can replace what the hardware wrote, make believe 3734 * it got it right in the first place. 3735 */ 3736 flags = (flags & ~(uint64_t)ERI_RMD_CKSUM) | hw_fcs; 3737 } 3738 #endif 3739 /* 3740 * Packet Processing 3741 * Once we get a packet bp, we try allocate a new mblk, nbp 3742 * to replace this one. If we succeed, we map it to the current 3743 * dma handle and update the descriptor with the new cookie. We 3744 * then put bp in our read service queue erip->ipq, if it exists 3745 * or we just bp to the streams expecting it. 3746 * If allocation of the new mblk fails, we implicitly drop the 3747 * current packet, i.e do not pass up the mblk and re-use it. 3748 * Re-mapping is not required. 3749 */ 3750 3751 if (len < eri_rx_bcopy_max) { 3752 if ((nbp = eri_allocb_sp(len + ERI_FSTBYTE_OFFSET))) { 3753 (void) ddi_dma_sync(erip->ndmarh[rmdi], 0, 3754 len + ERI_FSTBYTE_OFFSET, DDI_DMA_SYNC_FORCPU); 3755 DB_TYPE(nbp) = M_DATA; 3756 bcopy(bp->b_rptr, nbp->b_rptr, 3757 len + ERI_FSTBYTE_OFFSET); 3758 UPDATE_RMD(rmdp); 3759 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd), 3760 DDI_DMA_SYNC_FORDEV); 3761 3762 /* Add the First Byte offset to the b_rptr */ 3763 nbp->b_rptr += ERI_FSTBYTE_OFFSET; 3764 nbp->b_wptr = nbp->b_rptr + len; 3765 3766 #ifdef ERI_RCV_CKSUM 3767 sum = ~(uint16_t)(flags & ERI_RMD_CKSUM); 3768 ERI_PROCESS_READ(erip, nbp, sum); 3769 #else 3770 ERI_PROCESS_READ(erip, nbp); 3771 #endif 3772 retmp = nbp; 3773 } else { 3774 3775 /* 3776 * mblk allocation has failed. Re-use the old mblk for 3777 * the next packet. Re-mapping is not required since 3778 * the same mblk and dma cookie is to be used again. 3779 */ 3780 HSTAT(erip, ierrors); 3781 HSTAT(erip, allocbfail); 3782 HSTAT(erip, norcvbuf); 3783 3784 UPDATE_RMD(rmdp); 3785 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd), 3786 DDI_DMA_SYNC_FORDEV); 3787 ERI_DEBUG_MSG1(erip, RESOURCE_MSG, "allocb fail"); 3788 } 3789 } else { 3790 /* Use dma unmap/map */ 3791 if ((nbp = eri_allocb_sp(ERI_BUFSIZE))) { 3792 /* 3793 * How do we harden this, specially if unbind 3794 * succeeds and then bind fails? 3795 * If Unbind fails, we can leave without updating 3796 * the descriptor but would it continue to work on 3797 * next round? 3798 */ 3799 (void) ddi_dma_unbind_handle(erip->ndmarh[rmdi]); 3800 (void) ddi_dma_addr_bind_handle(erip->ndmarh[rmdi], 3801 NULL, (caddr_t)nbp->b_rptr, ERI_BUFSIZE, 3802 DDI_DMA_READ | DDI_DMA_CONSISTENT, 3803 DDI_DMA_DONTWAIT, 0, &c, &ccnt); 3804 3805 erip->rmblkp[rmdi] = nbp; 3806 PUT_RMD(rmdp, c); 3807 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd), 3808 DDI_DMA_SYNC_FORDEV); 3809 3810 /* Add the First Byte offset to the b_rptr */ 3811 3812 bp->b_rptr += ERI_FSTBYTE_OFFSET; 3813 bp->b_wptr = bp->b_rptr + len; 3814 3815 #ifdef ERI_RCV_CKSUM 3816 sum = ~(uint16_t)(flags & ERI_RMD_CKSUM); 3817 ERI_PROCESS_READ(erip, bp, sum); 3818 #else 3819 ERI_PROCESS_READ(erip, bp); 3820 #endif 3821 retmp = bp; 3822 } else { 3823 3824 /* 3825 * mblk allocation has failed. Re-use the old mblk for 3826 * the next packet. Re-mapping is not required since 3827 * the same mblk and dma cookie is to be used again. 3828 */ 3829 HSTAT(erip, ierrors); 3830 HSTAT(erip, allocbfail); 3831 HSTAT(erip, norcvbuf); 3832 3833 UPDATE_RMD(rmdp); 3834 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd), 3835 DDI_DMA_SYNC_FORDEV); 3836 ERI_DEBUG_MSG1(erip, RESOURCE_MSG, "allocb fail"); 3837 } 3838 } 3839 3840 return (retmp); 3841 } 3842 3843 #define LINK_STAT_DISPLAY_TIME 20 3844 3845 static int 3846 eri_init_xfer_params(struct eri *erip) 3847 { 3848 int i; 3849 dev_info_t *dip; 3850 3851 dip = erip->dip; 3852 3853 for (i = 0; i < A_CNT(param_arr); i++) 3854 erip->param_arr[i] = param_arr[i]; 3855 3856 erip->xmit_dma_mode = 0; 3857 erip->rcv_dma_mode = 0; 3858 erip->mifpoll_enable = mifpoll_enable; 3859 erip->lance_mode_enable = lance_mode; 3860 erip->frame_enable = 1; 3861 erip->ngu_enable = ngu_enable; 3862 3863 if (!erip->g_nd && !eri_param_register(erip, 3864 erip->param_arr, A_CNT(param_arr))) { 3865 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG, 3866 param_reg_fail_msg); 3867 return (-1); 3868 } 3869 3870 /* 3871 * Set up the start-up values for user-configurable parameters 3872 * Get the values from the global variables first. 3873 * Use the MASK to limit the value to allowed maximum. 3874 */ 3875 3876 param_transceiver = NO_XCVR; 3877 3878 /* 3879 * The link speed may be forced to either 10 Mbps or 100 Mbps using the 3880 * property "transfer-speed". This may be done in OBP by using the command 3881 * "apply transfer-speed=<speed> <device>". The speed may be either 10 or 100. 3882 */ 3883 i = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "transfer-speed", 0); 3884 if (i != 0) { 3885 param_autoneg = 0; /* force speed */ 3886 param_anar_100T4 = 0; 3887 param_anar_10fdx = 0; 3888 param_anar_10hdx = 0; 3889 param_anar_100fdx = 0; 3890 param_anar_100hdx = 0; 3891 param_anar_asm_dir = 0; 3892 param_anar_pause = 0; 3893 3894 if (i == 10) 3895 param_anar_10hdx = 1; 3896 else if (i == 100) 3897 param_anar_100hdx = 1; 3898 } 3899 3900 /* 3901 * Get the parameter values configured in .conf file. 3902 */ 3903 param_ipg1 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg1", ipg1) & 3904 ERI_MASK_8BIT; 3905 3906 param_ipg2 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg2", ipg2) & 3907 ERI_MASK_8BIT; 3908 3909 param_use_intphy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 3910 "use_int_xcvr", use_int_xcvr) & ERI_MASK_1BIT; 3911 3912 param_use_intphy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 3913 "pace_size", pace_size) & ERI_MASK_8BIT; 3914 3915 param_autoneg = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 3916 "adv_autoneg_cap", adv_autoneg_cap) & ERI_MASK_1BIT; 3917 3918 param_autoneg = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 3919 "adv_autoneg_cap", adv_autoneg_cap) & ERI_MASK_1BIT; 3920 3921 param_anar_100T4 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 3922 "adv_100T4_cap", adv_100T4_cap) & ERI_MASK_1BIT; 3923 3924 param_anar_100fdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 3925 "adv_100fdx_cap", adv_100fdx_cap) & ERI_MASK_1BIT; 3926 3927 param_anar_100hdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 3928 "adv_100hdx_cap", adv_100hdx_cap) & ERI_MASK_1BIT; 3929 3930 param_anar_10fdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 3931 "adv_10fdx_cap", adv_10fdx_cap) & ERI_MASK_1BIT; 3932 3933 param_anar_10hdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 3934 "adv_10hdx_cap", adv_10hdx_cap) & ERI_MASK_1BIT; 3935 3936 param_ipg0 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg0", ipg0) & 3937 ERI_MASK_8BIT; 3938 3939 param_intr_blank_time = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 3940 "intr_blank_time", intr_blank_time) & ERI_MASK_8BIT; 3941 3942 param_intr_blank_packets = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 3943 "intr_blank_packets", intr_blank_packets) & ERI_MASK_8BIT; 3944 3945 param_lance_mode = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 3946 "lance_mode", lance_mode) & ERI_MASK_1BIT; 3947 3948 param_select_link = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 3949 "select_link", select_link) & ERI_MASK_1BIT; 3950 3951 param_default_link = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 3952 "default_link", default_link) & ERI_MASK_1BIT; 3953 3954 param_anar_asm_dir = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 3955 "adv_asm_dir_cap", adv_pauseTX_cap) & ERI_MASK_1BIT; 3956 3957 param_anar_pause = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 3958 "adv_pause_cap", adv_pauseRX_cap) & ERI_MASK_1BIT; 3959 3960 if (link_pulse_disabled) 3961 erip->link_pulse_disabled = 1; 3962 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, 0, "link-pulse-disabled")) 3963 erip->link_pulse_disabled = 1; 3964 3965 eri_statinit(erip); 3966 return (0); 3967 3968 } 3969 3970 static void 3971 eri_process_ndd_ioctl(struct eri *erip, queue_t *wq, mblk_t *mp, int cmd) 3972 { 3973 3974 uint32_t old_ipg1, old_ipg2, old_use_int_xcvr, old_autoneg; 3975 uint32_t old_100T4; 3976 uint32_t old_100fdx, old_100hdx, old_10fdx, old_10hdx; 3977 uint32_t old_ipg0, old_lance_mode; 3978 uint32_t old_intr_blank_time, old_intr_blank_packets; 3979 uint32_t old_asm_dir, old_pause; 3980 uint32_t old_select_link, old_default_link; 3981 3982 switch (cmd) { 3983 case ERI_ND_GET: 3984 3985 old_autoneg = param_autoneg; 3986 old_100T4 = param_anar_100T4; 3987 old_100fdx = param_anar_100fdx; 3988 old_100hdx = param_anar_100hdx; 3989 old_10fdx = param_anar_10fdx; 3990 old_10hdx = param_anar_10hdx; 3991 old_asm_dir = param_anar_asm_dir; 3992 old_pause = param_anar_pause; 3993 3994 param_autoneg = old_autoneg & ~ERI_NOTUSR; 3995 param_anar_100T4 = old_100T4 & ~ERI_NOTUSR; 3996 param_anar_100fdx = old_100fdx & ~ERI_NOTUSR; 3997 param_anar_100hdx = old_100hdx & ~ERI_NOTUSR; 3998 param_anar_10fdx = old_10fdx & ~ERI_NOTUSR; 3999 param_anar_10hdx = old_10hdx & ~ERI_NOTUSR; 4000 param_anar_asm_dir = old_asm_dir & ~ERI_NOTUSR; 4001 param_anar_pause = old_pause & ~ERI_NOTUSR; 4002 4003 if (!eri_nd_getset(wq, erip->g_nd, mp)) { 4004 param_autoneg = old_autoneg; 4005 param_anar_100T4 = old_100T4; 4006 param_anar_100fdx = old_100fdx; 4007 param_anar_100hdx = old_100hdx; 4008 param_anar_10fdx = old_10fdx; 4009 param_anar_10hdx = old_10hdx; 4010 param_anar_asm_dir = old_asm_dir; 4011 param_anar_pause = old_pause; 4012 miocnak(wq, mp, 0, EINVAL); 4013 return; 4014 } 4015 param_autoneg = old_autoneg; 4016 param_anar_100T4 = old_100T4; 4017 param_anar_100fdx = old_100fdx; 4018 param_anar_100hdx = old_100hdx; 4019 param_anar_10fdx = old_10fdx; 4020 param_anar_10hdx = old_10hdx; 4021 param_anar_asm_dir = old_asm_dir; 4022 param_anar_pause = old_pause; 4023 4024 qreply(wq, mp); 4025 break; 4026 4027 case ERI_ND_SET: 4028 old_ipg0 = param_ipg0; 4029 old_intr_blank_time = param_intr_blank_time; 4030 old_intr_blank_packets = param_intr_blank_packets; 4031 old_lance_mode = param_lance_mode; 4032 old_ipg1 = param_ipg1; 4033 old_ipg2 = param_ipg2; 4034 old_use_int_xcvr = param_use_intphy; 4035 old_autoneg = param_autoneg; 4036 old_100T4 = param_anar_100T4; 4037 old_100fdx = param_anar_100fdx; 4038 old_100hdx = param_anar_100hdx; 4039 old_10fdx = param_anar_10fdx; 4040 old_10hdx = param_anar_10hdx; 4041 param_autoneg = 0xff; 4042 old_asm_dir = param_anar_asm_dir; 4043 param_anar_asm_dir = 0xff; 4044 old_pause = param_anar_pause; 4045 param_anar_pause = 0xff; 4046 old_select_link = param_select_link; 4047 old_default_link = param_default_link; 4048 4049 if (!eri_nd_getset(wq, erip->g_nd, mp)) { 4050 param_autoneg = old_autoneg; 4051 miocnak(wq, mp, 0, EINVAL); 4052 return; 4053 } 4054 4055 qreply(wq, mp); 4056 4057 if (param_autoneg != 0xff) { 4058 ERI_DEBUG_MSG2(erip, NDD_MSG, 4059 "ndd_ioctl: new param_autoneg %d", param_autoneg); 4060 param_linkup = 0; 4061 erip->stats.link_up = LINK_STATE_DOWN; 4062 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN; 4063 (void) eri_init(erip); 4064 } else { 4065 param_autoneg = old_autoneg; 4066 if ((old_use_int_xcvr != param_use_intphy) || 4067 (old_default_link != param_default_link) || 4068 (old_select_link != param_select_link)) { 4069 param_linkup = 0; 4070 erip->stats.link_up = LINK_STATE_DOWN; 4071 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN; 4072 (void) eri_init(erip); 4073 } else if ((old_ipg1 != param_ipg1) || 4074 (old_ipg2 != param_ipg2) || 4075 (old_ipg0 != param_ipg0) || 4076 (old_intr_blank_time != param_intr_blank_time) || 4077 (old_intr_blank_packets != 4078 param_intr_blank_packets) || 4079 (old_lance_mode != param_lance_mode)) { 4080 param_linkup = 0; 4081 erip->stats.link_up = LINK_STATE_DOWN; 4082 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN; 4083 (void) eri_init(erip); 4084 } 4085 } 4086 break; 4087 } 4088 } 4089 4090 4091 static int 4092 eri_stat_kstat_update(kstat_t *ksp, int rw) 4093 { 4094 struct eri *erip; 4095 struct erikstat *erikp; 4096 struct stats *esp; 4097 boolean_t macupdate = B_FALSE; 4098 4099 erip = (struct eri *)ksp->ks_private; 4100 erikp = (struct erikstat *)ksp->ks_data; 4101 4102 if (rw != KSTAT_READ) 4103 return (EACCES); 4104 /* 4105 * Update all the stats by reading all the counter registers. 4106 * Counter register stats are not updated till they overflow 4107 * and interrupt. 4108 */ 4109 4110 mutex_enter(&erip->xmitlock); 4111 if ((erip->flags & ERI_RUNNING) && (erip->flags & ERI_TXINIT)) { 4112 erip->tx_completion = 4113 GET_ETXREG(tx_completion) & ETX_COMPLETION_MASK; 4114 macupdate |= eri_reclaim(erip, erip->tx_completion); 4115 } 4116 mutex_exit(&erip->xmitlock); 4117 if (macupdate) 4118 mac_tx_update(erip->mh); 4119 4120 eri_savecntrs(erip); 4121 4122 esp = &erip->stats; 4123 4124 erikp->erik_txmac_maxpkt_err.value.ul = esp->txmac_maxpkt_err; 4125 erikp->erik_defer_timer_exp.value.ul = esp->defer_timer_exp; 4126 erikp->erik_peak_attempt_cnt.value.ul = esp->peak_attempt_cnt; 4127 erikp->erik_tx_hang.value.ul = esp->tx_hang; 4128 4129 erikp->erik_no_free_rx_desc.value.ul = esp->no_free_rx_desc; 4130 4131 erikp->erik_rx_hang.value.ul = esp->rx_hang; 4132 erikp->erik_rx_length_err.value.ul = esp->rx_length_err; 4133 erikp->erik_rx_code_viol_err.value.ul = esp->rx_code_viol_err; 4134 erikp->erik_pause_rxcount.value.ul = esp->pause_rxcount; 4135 erikp->erik_pause_oncount.value.ul = esp->pause_oncount; 4136 erikp->erik_pause_offcount.value.ul = esp->pause_offcount; 4137 erikp->erik_pause_time_count.value.ul = esp->pause_time_count; 4138 4139 erikp->erik_inits.value.ul = esp->inits; 4140 erikp->erik_jab.value.ul = esp->jab; 4141 erikp->erik_notmds.value.ul = esp->notmds; 4142 erikp->erik_allocbfail.value.ul = esp->allocbfail; 4143 erikp->erik_drop.value.ul = esp->drop; 4144 erikp->erik_rx_bad_pkts.value.ul = esp->rx_bad_pkts; 4145 erikp->erik_rx_inits.value.ul = esp->rx_inits; 4146 erikp->erik_tx_inits.value.ul = esp->tx_inits; 4147 erikp->erik_rxtag_err.value.ul = esp->rxtag_err; 4148 erikp->erik_parity_error.value.ul = esp->parity_error; 4149 erikp->erik_pci_error_int.value.ul = esp->pci_error_int; 4150 erikp->erik_unknown_fatal.value.ul = esp->unknown_fatal; 4151 erikp->erik_pci_data_parity_err.value.ul = esp->pci_data_parity_err; 4152 erikp->erik_pci_signal_target_abort.value.ul = 4153 esp->pci_signal_target_abort; 4154 erikp->erik_pci_rcvd_target_abort.value.ul = 4155 esp->pci_rcvd_target_abort; 4156 erikp->erik_pci_rcvd_master_abort.value.ul = 4157 esp->pci_rcvd_master_abort; 4158 erikp->erik_pci_signal_system_err.value.ul = 4159 esp->pci_signal_system_err; 4160 erikp->erik_pci_det_parity_err.value.ul = esp->pci_det_parity_err; 4161 4162 erikp->erik_pmcap.value.ul = esp->pmcap; 4163 4164 return (0); 4165 } 4166 4167 static void 4168 eri_statinit(struct eri *erip) 4169 { 4170 struct kstat *ksp; 4171 struct erikstat *erikp; 4172 4173 if ((ksp = kstat_create("eri", erip->instance, "driver_info", "net", 4174 KSTAT_TYPE_NAMED, 4175 sizeof (struct erikstat) / sizeof (kstat_named_t), 0)) == NULL) { 4176 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG, 4177 kstat_create_fail_msg); 4178 return; 4179 } 4180 4181 erip->ksp = ksp; 4182 erikp = (struct erikstat *)(ksp->ks_data); 4183 /* 4184 * MIB II kstat variables 4185 */ 4186 4187 kstat_named_init(&erikp->erik_inits, "inits", KSTAT_DATA_ULONG); 4188 4189 kstat_named_init(&erikp->erik_txmac_maxpkt_err, "txmac_maxpkt_err", 4190 KSTAT_DATA_ULONG); 4191 kstat_named_init(&erikp->erik_defer_timer_exp, "defer_timer_exp", 4192 KSTAT_DATA_ULONG); 4193 kstat_named_init(&erikp->erik_peak_attempt_cnt, "peak_attempt_cnt", 4194 KSTAT_DATA_ULONG); 4195 kstat_named_init(&erikp->erik_tx_hang, "tx_hang", KSTAT_DATA_ULONG); 4196 4197 kstat_named_init(&erikp->erik_no_free_rx_desc, "no_free_rx_desc", 4198 KSTAT_DATA_ULONG); 4199 kstat_named_init(&erikp->erik_rx_hang, "rx_hang", KSTAT_DATA_ULONG); 4200 kstat_named_init(&erikp->erik_rx_length_err, "rx_length_err", 4201 KSTAT_DATA_ULONG); 4202 kstat_named_init(&erikp->erik_rx_code_viol_err, "rx_code_viol_err", 4203 KSTAT_DATA_ULONG); 4204 4205 kstat_named_init(&erikp->erik_pause_rxcount, "pause_rcv_cnt", 4206 KSTAT_DATA_ULONG); 4207 4208 kstat_named_init(&erikp->erik_pause_oncount, "pause_on_cnt", 4209 KSTAT_DATA_ULONG); 4210 4211 kstat_named_init(&erikp->erik_pause_offcount, "pause_off_cnt", 4212 KSTAT_DATA_ULONG); 4213 kstat_named_init(&erikp->erik_pause_time_count, "pause_time_cnt", 4214 KSTAT_DATA_ULONG); 4215 4216 kstat_named_init(&erikp->erik_jab, "jabber", KSTAT_DATA_ULONG); 4217 kstat_named_init(&erikp->erik_notmds, "no_tmds", KSTAT_DATA_ULONG); 4218 kstat_named_init(&erikp->erik_allocbfail, "allocbfail", 4219 KSTAT_DATA_ULONG); 4220 4221 kstat_named_init(&erikp->erik_drop, "drop", KSTAT_DATA_ULONG); 4222 4223 kstat_named_init(&erikp->erik_rx_bad_pkts, "bad_pkts", 4224 KSTAT_DATA_ULONG); 4225 4226 kstat_named_init(&erikp->erik_rx_inits, "rx_inits", KSTAT_DATA_ULONG); 4227 4228 kstat_named_init(&erikp->erik_tx_inits, "tx_inits", KSTAT_DATA_ULONG); 4229 4230 kstat_named_init(&erikp->erik_rxtag_err, "rxtag_error", 4231 KSTAT_DATA_ULONG); 4232 4233 kstat_named_init(&erikp->erik_parity_error, "parity_error", 4234 KSTAT_DATA_ULONG); 4235 4236 kstat_named_init(&erikp->erik_pci_error_int, "pci_error_interrupt", 4237 KSTAT_DATA_ULONG); 4238 kstat_named_init(&erikp->erik_unknown_fatal, "unknown_fatal", 4239 KSTAT_DATA_ULONG); 4240 kstat_named_init(&erikp->erik_pci_data_parity_err, 4241 "pci_data_parity_err", KSTAT_DATA_ULONG); 4242 kstat_named_init(&erikp->erik_pci_signal_target_abort, 4243 "pci_signal_target_abort", KSTAT_DATA_ULONG); 4244 kstat_named_init(&erikp->erik_pci_rcvd_target_abort, 4245 "pci_rcvd_target_abort", KSTAT_DATA_ULONG); 4246 kstat_named_init(&erikp->erik_pci_rcvd_master_abort, 4247 "pci_rcvd_master_abort", KSTAT_DATA_ULONG); 4248 kstat_named_init(&erikp->erik_pci_signal_system_err, 4249 "pci_signal_system_err", KSTAT_DATA_ULONG); 4250 kstat_named_init(&erikp->erik_pci_det_parity_err, 4251 "pci_det_parity_err", KSTAT_DATA_ULONG); 4252 4253 kstat_named_init(&erikp->erik_pmcap, "pmcap", KSTAT_DATA_ULONG); 4254 4255 4256 ksp->ks_update = eri_stat_kstat_update; 4257 ksp->ks_private = (void *) erip; 4258 kstat_install(ksp); 4259 } 4260 4261 4262 /* <<<<<<<<<<<<<<<<<<<<<<< NDD SUPPORT FUNCTIONS >>>>>>>>>>>>>>>>>>> */ 4263 /* 4264 * ndd support functions to get/set parameters 4265 */ 4266 /* Free the Named Dispatch Table by calling eri_nd_free */ 4267 static void 4268 eri_param_cleanup(struct eri *erip) 4269 { 4270 if (erip->g_nd) 4271 (void) eri_nd_free(&erip->g_nd); 4272 } 4273 4274 /* 4275 * Extracts the value from the eri parameter array and prints the 4276 * parameter value. cp points to the required parameter. 4277 */ 4278 /* ARGSUSED */ 4279 static int 4280 eri_param_get(queue_t *q, mblk_t *mp, caddr_t cp) 4281 { 4282 param_t *eripa = (void *)cp; 4283 int param_len = 1; 4284 uint32_t param_val; 4285 mblk_t *nmp; 4286 int ok; 4287 4288 param_val = eripa->param_val; 4289 /* 4290 * Calculate space required in mblk. 4291 * Remember to include NULL terminator. 4292 */ 4293 do { 4294 param_len++; 4295 param_val /= 10; 4296 } while (param_val); 4297 4298 ok = eri_mk_mblk_tail_space(mp, &nmp, param_len); 4299 if (ok == 0) { 4300 (void) sprintf((char *)nmp->b_wptr, "%d", eripa->param_val); 4301 nmp->b_wptr += param_len; 4302 } 4303 4304 return (ok); 4305 } 4306 4307 /* 4308 * Check if there is space for p_val at the end if mblk. 4309 * If not, allocate new 1k mblk. 4310 */ 4311 static int 4312 eri_mk_mblk_tail_space(mblk_t *mp, mblk_t **nmp, size_t sz) 4313 { 4314 mblk_t *tmp = mp; 4315 4316 while (tmp->b_cont) 4317 tmp = tmp->b_cont; 4318 4319 if (MBLKTAIL(tmp) < sz) { 4320 if ((tmp->b_cont = allocb(1024, BPRI_HI)) == NULL) 4321 return (ENOMEM); 4322 tmp = tmp->b_cont; 4323 } 4324 *nmp = tmp; 4325 return (0); 4326 } 4327 4328 /* 4329 * Register each element of the parameter array with the 4330 * named dispatch handler. Each element is loaded using 4331 * eri_nd_load() 4332 */ 4333 static int 4334 eri_param_register(struct eri *erip, param_t *eripa, int cnt) 4335 { 4336 /* cnt gives the count of the number of */ 4337 /* elements present in the parameter array */ 4338 4339 int i; 4340 4341 for (i = 0; i < cnt; i++, eripa++) { 4342 pfi_t setter = (pfi_t)eri_param_set; 4343 4344 switch (eripa->param_name[0]) { 4345 case '+': /* read-write */ 4346 setter = (pfi_t)eri_param_set; 4347 break; 4348 4349 case '-': /* read-only */ 4350 setter = NULL; 4351 break; 4352 4353 case '!': /* read-only, not displayed */ 4354 case '%': /* read-write, not displayed */ 4355 continue; 4356 } 4357 4358 if (!eri_nd_load(&erip->g_nd, eripa->param_name + 1, 4359 (pfi_t)eri_param_get, setter, (caddr_t)eripa)) { 4360 (void) eri_nd_free(&erip->g_nd); 4361 return (B_FALSE); 4362 } 4363 } 4364 4365 return (B_TRUE); 4366 } 4367 4368 /* 4369 * Sets the eri parameter to the value in the param_register using 4370 * eri_nd_load(). 4371 */ 4372 /* ARGSUSED */ 4373 static int 4374 eri_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp) 4375 { 4376 char *end; 4377 long new_value; 4378 param_t *eripa = (void *)cp; 4379 4380 if (ddi_strtol(value, &end, 10, &new_value) != 0) 4381 return (EINVAL); 4382 if (end == value || new_value < eripa->param_min || 4383 new_value > eripa->param_max) { 4384 return (EINVAL); 4385 } 4386 eripa->param_val = (uint32_t)new_value; 4387 return (0); 4388 4389 } 4390 4391 /* Free the table pointed to by 'ndp' */ 4392 static void 4393 eri_nd_free(caddr_t *nd_pparam) 4394 { 4395 ND *nd; 4396 4397 if ((nd = (void *)(*nd_pparam)) != NULL) { 4398 if (nd->nd_tbl) 4399 kmem_free(nd->nd_tbl, nd->nd_size); 4400 kmem_free(nd, sizeof (ND)); 4401 *nd_pparam = NULL; 4402 } 4403 } 4404 4405 static int 4406 eri_nd_getset(queue_t *q, caddr_t nd_param, MBLKP mp) 4407 { 4408 int err; 4409 IOCP iocp; 4410 MBLKP mp1; 4411 ND *nd; 4412 NDE *nde; 4413 char *valp; 4414 size_t avail; 4415 mblk_t *nmp; 4416 4417 if (!nd_param) 4418 return (B_FALSE); 4419 4420 nd = (void *)nd_param; 4421 iocp = (void *)mp->b_rptr; 4422 if ((iocp->ioc_count == 0) || !(mp1 = mp->b_cont)) { 4423 mp->b_datap->db_type = M_IOCACK; 4424 iocp->ioc_count = 0; 4425 iocp->ioc_error = EINVAL; 4426 return (B_TRUE); 4427 } 4428 /* 4429 * NOTE - logic throughout nd_xxx assumes single data block for ioctl. 4430 * However, existing code sends in some big buffers. 4431 */ 4432 avail = iocp->ioc_count; 4433 if (mp1->b_cont) { 4434 freemsg(mp1->b_cont); 4435 mp1->b_cont = NULL; 4436 } 4437 4438 mp1->b_datap->db_lim[-1] = '\0'; /* Force null termination */ 4439 valp = (char *)mp1->b_rptr; 4440 4441 for (nde = nd->nd_tbl; /* */; nde++) { 4442 if (!nde->nde_name) 4443 return (B_FALSE); 4444 if (strcmp(nde->nde_name, valp) == 0) 4445 break; 4446 } 4447 err = EINVAL; 4448 4449 while (*valp++) 4450 ; 4451 4452 if (!*valp || valp >= (char *)mp1->b_wptr) 4453 valp = NULL; 4454 4455 switch (iocp->ioc_cmd) { 4456 case ND_GET: 4457 /* 4458 * (XXX) hack: "*valp" is size of user buffer for copyout. If result 4459 * of action routine is too big, free excess and return ioc_rval as buf 4460 * size needed. Return as many mblocks as will fit, free the rest. For 4461 * backward compatibility, assume size of orig ioctl buffer if "*valp" 4462 * bad or not given. 4463 */ 4464 if (valp) 4465 (void) ddi_strtol(valp, NULL, 10, (long *)&avail); 4466 /* We overwrite the name/value with the reply data */ 4467 { 4468 mblk_t *mp2 = mp1; 4469 4470 while (mp2) { 4471 mp2->b_wptr = mp2->b_rptr; 4472 mp2 = mp2->b_cont; 4473 } 4474 } 4475 err = (*nde->nde_get_pfi)(q, mp1, nde->nde_data, iocp->ioc_cr); 4476 if (!err) { 4477 size_t size_out; 4478 ssize_t excess; 4479 4480 iocp->ioc_rval = 0; 4481 4482 /* Tack on the null */ 4483 err = eri_mk_mblk_tail_space(mp1, &nmp, 1); 4484 if (!err) { 4485 *nmp->b_wptr++ = '\0'; 4486 size_out = msgdsize(mp1); 4487 excess = size_out - avail; 4488 if (excess > 0) { 4489 iocp->ioc_rval = (unsigned)size_out; 4490 size_out -= excess; 4491 (void) adjmsg(mp1, -(excess + 1)); 4492 err = eri_mk_mblk_tail_space(mp1, 4493 &nmp, 1); 4494 if (!err) 4495 *nmp->b_wptr++ = '\0'; 4496 else 4497 size_out = 0; 4498 } 4499 4500 } else 4501 size_out = 0; 4502 4503 iocp->ioc_count = size_out; 4504 } 4505 break; 4506 4507 case ND_SET: 4508 if (valp) { 4509 err = (*nde->nde_set_pfi)(q, mp1, valp, 4510 nde->nde_data, iocp->ioc_cr); 4511 iocp->ioc_count = 0; 4512 freemsg(mp1); 4513 mp->b_cont = NULL; 4514 } 4515 break; 4516 } 4517 4518 iocp->ioc_error = err; 4519 mp->b_datap->db_type = M_IOCACK; 4520 return (B_TRUE); 4521 } 4522 4523 /* 4524 * Load 'name' into the named dispatch table pointed to by 'ndp'. 4525 * 'ndp' should be the address of a char pointer cell. If the table 4526 * does not exist (*ndp == 0), a new table is allocated and 'ndp' 4527 * is stuffed. If there is not enough space in the table for a new 4528 * entry, more space is allocated. 4529 */ 4530 static boolean_t 4531 eri_nd_load(caddr_t *nd_pparam, char *name, pfi_t get_pfi, 4532 pfi_t set_pfi, caddr_t data) 4533 { 4534 ND *nd; 4535 NDE *nde; 4536 4537 if (!nd_pparam) 4538 return (B_FALSE); 4539 4540 if ((nd = (void *)(*nd_pparam)) == NULL) { 4541 if ((nd = (ND *)kmem_zalloc(sizeof (ND), KM_NOSLEEP)) 4542 == NULL) 4543 return (B_FALSE); 4544 *nd_pparam = (caddr_t)nd; 4545 } 4546 if (nd->nd_tbl) { 4547 for (nde = nd->nd_tbl; nde->nde_name; nde++) { 4548 if (strcmp(name, nde->nde_name) == 0) 4549 goto fill_it; 4550 } 4551 } 4552 if (nd->nd_free_count <= 1) { 4553 if ((nde = (NDE *)kmem_zalloc(nd->nd_size + 4554 NDE_ALLOC_SIZE, KM_NOSLEEP)) == NULL) 4555 return (B_FALSE); 4556 4557 nd->nd_free_count += NDE_ALLOC_COUNT; 4558 if (nd->nd_tbl) { 4559 bcopy((char *)nd->nd_tbl, (char *)nde, nd->nd_size); 4560 kmem_free((char *)nd->nd_tbl, nd->nd_size); 4561 } else { 4562 nd->nd_free_count--; 4563 nde->nde_name = "?"; 4564 nde->nde_get_pfi = nd_get_names; 4565 nde->nde_set_pfi = nd_set_default; 4566 } 4567 nde->nde_data = (caddr_t)nd; 4568 nd->nd_tbl = nde; 4569 nd->nd_size += NDE_ALLOC_SIZE; 4570 } 4571 for (nde = nd->nd_tbl; nde->nde_name; nde++) 4572 ; 4573 nd->nd_free_count--; 4574 fill_it: 4575 nde->nde_name = name; 4576 nde->nde_get_pfi = get_pfi ? get_pfi : nd_get_default; 4577 nde->nde_set_pfi = set_pfi ? set_pfi : nd_set_default; 4578 nde->nde_data = data; 4579 return (B_TRUE); 4580 } 4581 4582 /* 4583 * Hardening Functions 4584 * New Section 4585 */ 4586 #ifdef DEBUG 4587 /*PRINTFLIKE5*/ 4588 static void 4589 eri_debug_msg(const char *file, int line, struct eri *erip, 4590 debug_msg_t type, const char *fmt, ...) 4591 { 4592 char msg_buffer[255]; 4593 va_list ap; 4594 4595 va_start(ap, fmt); 4596 (void) vsprintf(msg_buffer, fmt, ap); 4597 va_end(ap); 4598 4599 if (eri_msg_out & ERI_CON_MSG) { 4600 if (((type <= eri_debug_level) && eri_debug_all) || 4601 ((type == eri_debug_level) && !eri_debug_all)) { 4602 if (erip) 4603 cmn_err(CE_CONT, "D: %s %s%d:(%s%d) %s\n", 4604 debug_msg_string[type], file, line, 4605 ddi_driver_name(erip->dip), erip->instance, 4606 msg_buffer); 4607 else 4608 cmn_err(CE_CONT, "D: %s %s(%d): %s\n", 4609 debug_msg_string[type], file, 4610 line, msg_buffer); 4611 } 4612 } 4613 } 4614 #endif 4615 4616 4617 /*PRINTFLIKE4*/ 4618 static void 4619 eri_fault_msg(struct eri *erip, uint_t severity, msg_t type, 4620 const char *fmt, ...) 4621 { 4622 char msg_buffer[255]; 4623 va_list ap; 4624 4625 va_start(ap, fmt); 4626 (void) vsprintf(msg_buffer, fmt, ap); 4627 va_end(ap); 4628 4629 if (erip == NULL) { 4630 cmn_err(CE_NOTE, "eri : %s", msg_buffer); 4631 return; 4632 } 4633 4634 if (severity == SEVERITY_HIGH) { 4635 cmn_err(CE_WARN, "%s%d : %s", ddi_driver_name(erip->dip), 4636 erip->instance, msg_buffer); 4637 } else switch (type) { 4638 case ERI_VERB_MSG: 4639 cmn_err(CE_CONT, "?%s%d : %s", ddi_driver_name(erip->dip), 4640 erip->instance, msg_buffer); 4641 break; 4642 case ERI_LOG_MSG: 4643 cmn_err(CE_NOTE, "^%s%d : %s", ddi_driver_name(erip->dip), 4644 erip->instance, msg_buffer); 4645 break; 4646 case ERI_BUF_MSG: 4647 cmn_err(CE_NOTE, "!%s%d : %s", ddi_driver_name(erip->dip), 4648 erip->instance, msg_buffer); 4649 break; 4650 case ERI_CON_MSG: 4651 cmn_err(CE_CONT, "%s%d : %s", ddi_driver_name(erip->dip), 4652 erip->instance, msg_buffer); 4653 default: 4654 break; 4655 } 4656 } 4657 4658 /* 4659 * Transceiver (xcvr) Functions 4660 * New Section 4661 */ 4662 /* 4663 * eri_stop_timer function is used by a function before doing link-related 4664 * processing. It locks the "linklock" to protect the link-related data 4665 * structures. This lock will be subsequently released in eri_start_timer(). 4666 */ 4667 static void 4668 eri_stop_timer(struct eri *erip) 4669 { 4670 timeout_id_t id; 4671 mutex_enter(&erip->linklock); 4672 if (erip->timerid) { 4673 erip->flags |= ERI_NOTIMEOUTS; /* prevent multiple timeout */ 4674 id = erip->timerid; 4675 erip->timerid = 0; /* prevent other thread do untimeout */ 4676 mutex_exit(&erip->linklock); /* no mutex across untimeout() */ 4677 4678 (void) untimeout(id); 4679 mutex_enter(&erip->linklock); /* acquire mutex again */ 4680 erip->flags &= ~ERI_NOTIMEOUTS; 4681 } 4682 } 4683 4684 /* 4685 * If msec parameter is zero, just release "linklock". 4686 */ 4687 static void 4688 eri_start_timer(struct eri *erip, fptrv_t func, clock_t msec) 4689 { 4690 if (msec) { 4691 if (!(erip->flags & ERI_NOTIMEOUTS) && 4692 (erip->flags & ERI_RUNNING)) { 4693 erip->timerid = timeout(func, (caddr_t)erip, 4694 drv_usectohz(1000*msec)); 4695 } 4696 } 4697 4698 mutex_exit(&erip->linklock); 4699 } 4700 4701 static int 4702 eri_new_xcvr(struct eri *erip) 4703 { 4704 int status; 4705 uint32_t cfg; 4706 int old_transceiver; 4707 4708 if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED, 4709 PCI_PM_IDLESPEED_NONE) == DDI_SUCCESS) 4710 erip->stats.pmcap = ERI_PMCAP_NONE; 4711 4712 status = B_FALSE; /* no change */ 4713 cfg = GET_MIFREG(mif_cfg); 4714 ERI_DEBUG_MSG2(erip, MIF_MSG, "cfg value = %X", cfg); 4715 old_transceiver = param_transceiver; 4716 4717 if ((cfg & ERI_MIF_CFGM1) && !use_int_xcvr) { 4718 ERI_DEBUG_MSG1(erip, PHY_MSG, "Found External XCVR"); 4719 /* 4720 * An External Transceiver was found and it takes priority 4721 * over an internal, given the use_int_xcvr flag 4722 * is false. 4723 */ 4724 if (old_transceiver != EXTERNAL_XCVR) { 4725 /* 4726 * External transceiver has just been plugged 4727 * in. Isolate the internal Transceiver. 4728 */ 4729 if (old_transceiver == INTERNAL_XCVR) { 4730 eri_mii_write(erip, ERI_PHY_BMCR, 4731 (PHY_BMCR_ISOLATE | PHY_BMCR_PWRDN | 4732 PHY_BMCR_LPBK)); 4733 } 4734 status = B_TRUE; 4735 } 4736 /* 4737 * Select the external Transceiver. 4738 */ 4739 erip->phyad = ERI_EXTERNAL_PHYAD; 4740 param_transceiver = EXTERNAL_XCVR; 4741 erip->mif_config &= ~ERI_MIF_CFGPD; 4742 erip->mif_config |= (erip->phyad << ERI_MIF_CFGPD_SHIFT); 4743 erip->mif_config |= ERI_MIF_CFGPS; 4744 PUT_MIFREG(mif_cfg, erip->mif_config); 4745 4746 PUT_MACREG(xifc, GET_MACREG(xifc) | BMAC_XIFC_MIIBUF_OE); 4747 drv_usecwait(ERI_MIF_POLL_DELAY); 4748 } else if (cfg & ERI_MIF_CFGM0) { 4749 ERI_DEBUG_MSG1(erip, PHY_MSG, "Found Internal XCVR"); 4750 /* 4751 * An Internal Transceiver was found or the 4752 * use_int_xcvr flag is true. 4753 */ 4754 if (old_transceiver != INTERNAL_XCVR) { 4755 /* 4756 * The external transceiver has just been 4757 * disconnected or we're moving from a no 4758 * transceiver state. 4759 */ 4760 if ((old_transceiver == EXTERNAL_XCVR) && 4761 (cfg & ERI_MIF_CFGM0)) { 4762 eri_mii_write(erip, ERI_PHY_BMCR, 4763 (PHY_BMCR_ISOLATE | PHY_BMCR_PWRDN | 4764 PHY_BMCR_LPBK)); 4765 } 4766 status = B_TRUE; 4767 } 4768 /* 4769 * Select the internal transceiver. 4770 */ 4771 erip->phyad = ERI_INTERNAL_PHYAD; 4772 param_transceiver = INTERNAL_XCVR; 4773 erip->mif_config &= ~ERI_MIF_CFGPD; 4774 erip->mif_config |= (erip->phyad << ERI_MIF_CFGPD_SHIFT); 4775 erip->mif_config &= ~ERI_MIF_CFGPS; 4776 PUT_MIFREG(mif_cfg, erip->mif_config); 4777 4778 PUT_MACREG(xifc, GET_MACREG(xifc) & ~ BMAC_XIFC_MIIBUF_OE); 4779 drv_usecwait(ERI_MIF_POLL_DELAY); 4780 } else { 4781 /* 4782 * Did not find a valid xcvr. 4783 */ 4784 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, 4785 "Eri_new_xcvr : Select None"); 4786 param_transceiver = NO_XCVR; 4787 erip->xcvr_status = PHY_LINK_DOWN; 4788 } 4789 4790 if (erip->stats.pmcap == ERI_PMCAP_NONE) { 4791 if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED, 4792 (void *)4000) == DDI_SUCCESS) 4793 erip->stats.pmcap = ERI_PMCAP_4MHZ; 4794 } 4795 4796 return (status); 4797 } 4798 4799 /* 4800 * This function is used for timers. No locks are held on timer expiry. 4801 */ 4802 static void 4803 eri_check_link(struct eri *erip) 4804 { 4805 link_state_t linkupdate = eri_check_link_noind(erip); 4806 4807 if (linkupdate != LINK_STATE_UNKNOWN) 4808 mac_link_update(erip->mh, linkupdate); 4809 } 4810 4811 /* 4812 * Compare our xcvr in our structure to the xcvr that we get from 4813 * eri_check_mii_xcvr(). If they are different then mark the 4814 * link down, reset xcvr, and return. 4815 * 4816 * Note without the MII connector, conditions can not change that 4817 * will then use a external phy, thus this code has been cleaned 4818 * to not even call the function or to possibly change the xcvr. 4819 */ 4820 static uint32_t 4821 eri_check_link_noind(struct eri *erip) 4822 { 4823 uint16_t stat, control, mif_ints; 4824 uint32_t link_timeout = ERI_LINKCHECK_TIMER; 4825 uint32_t linkupdate = 0; 4826 4827 eri_stop_timer(erip); /* acquire linklock */ 4828 4829 mutex_enter(&erip->xmitlock); 4830 mutex_enter(&erip->xcvrlock); 4831 eri_mif_poll(erip, MIF_POLL_STOP); 4832 4833 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat); 4834 mif_ints = erip->mii_status ^ stat; 4835 4836 if (erip->openloop_autoneg) { 4837 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat); 4838 ERI_DEBUG_MSG3(erip, XCVR_MSG, 4839 "eri_check_link:openloop stat %X mii_status %X", 4840 stat, erip->mii_status); 4841 (void) eri_mii_read(erip, ERI_PHY_BMCR, &control); 4842 if (!(stat & PHY_BMSR_LNKSTS) && 4843 (erip->openloop_autoneg < 2)) { 4844 if (param_speed) { 4845 control &= ~PHY_BMCR_100M; 4846 param_anlpar_100hdx = 0; 4847 param_anlpar_10hdx = 1; 4848 param_speed = 0; 4849 erip->stats.ifspeed = 10; 4850 4851 } else { 4852 control |= PHY_BMCR_100M; 4853 param_anlpar_100hdx = 1; 4854 param_anlpar_10hdx = 0; 4855 param_speed = 1; 4856 erip->stats.ifspeed = 100; 4857 } 4858 ERI_DEBUG_MSG3(erip, XCVR_MSG, 4859 "eri_check_link: trying speed %X stat %X", 4860 param_speed, stat); 4861 4862 erip->openloop_autoneg ++; 4863 eri_mii_write(erip, ERI_PHY_BMCR, control); 4864 link_timeout = ERI_P_FAULT_TIMER; 4865 } else { 4866 erip->openloop_autoneg = 0; 4867 linkupdate = eri_mif_check(erip, stat, stat); 4868 if (erip->openloop_autoneg) 4869 link_timeout = ERI_P_FAULT_TIMER; 4870 } 4871 eri_mif_poll(erip, MIF_POLL_START); 4872 mutex_exit(&erip->xcvrlock); 4873 mutex_exit(&erip->xmitlock); 4874 4875 eri_start_timer(erip, eri_check_link, link_timeout); 4876 return (linkupdate); 4877 } 4878 4879 linkupdate = eri_mif_check(erip, mif_ints, stat); 4880 eri_mif_poll(erip, MIF_POLL_START); 4881 mutex_exit(&erip->xcvrlock); 4882 mutex_exit(&erip->xmitlock); 4883 4884 #ifdef ERI_RMAC_HANG_WORKAROUND 4885 /* 4886 * Check if rx hung. 4887 */ 4888 if ((erip->flags & ERI_RUNNING) && param_linkup) { 4889 if (erip->check_rmac_hang) { 4890 ERI_DEBUG_MSG5(erip, 4891 NONFATAL_MSG, 4892 "check1 %d: macsm:%8x wr:%2x rd:%2x", 4893 erip->check_rmac_hang, 4894 GET_MACREG(macsm), 4895 GET_ERXREG(rxfifo_wr_ptr), 4896 GET_ERXREG(rxfifo_rd_ptr)); 4897 4898 erip->check_rmac_hang = 0; 4899 erip->check2_rmac_hang ++; 4900 4901 erip->rxfifo_wr_ptr_c = GET_ERXREG(rxfifo_wr_ptr); 4902 erip->rxfifo_rd_ptr_c = GET_ERXREG(rxfifo_rd_ptr); 4903 4904 eri_start_timer(erip, eri_check_link, 4905 ERI_CHECK_HANG_TIMER); 4906 return (linkupdate); 4907 } 4908 4909 if (erip->check2_rmac_hang) { 4910 ERI_DEBUG_MSG5(erip, 4911 NONFATAL_MSG, 4912 "check2 %d: macsm:%8x wr:%2x rd:%2x", 4913 erip->check2_rmac_hang, 4914 GET_MACREG(macsm), 4915 GET_ERXREG(rxfifo_wr_ptr), 4916 GET_ERXREG(rxfifo_rd_ptr)); 4917 4918 erip->check2_rmac_hang = 0; 4919 4920 erip->rxfifo_wr_ptr = GET_ERXREG(rxfifo_wr_ptr); 4921 erip->rxfifo_rd_ptr = GET_ERXREG(rxfifo_rd_ptr); 4922 4923 if (((GET_MACREG(macsm) & BMAC_OVERFLOW_STATE) == 4924 BMAC_OVERFLOW_STATE) && 4925 ((erip->rxfifo_wr_ptr_c == erip->rxfifo_rd_ptr_c) || 4926 ((erip->rxfifo_rd_ptr == erip->rxfifo_rd_ptr_c) && 4927 (erip->rxfifo_wr_ptr == erip->rxfifo_wr_ptr_c)))) { 4928 ERI_DEBUG_MSG1(erip, 4929 NONFATAL_MSG, 4930 "RX hang: Reset mac"); 4931 4932 HSTAT(erip, rx_hang); 4933 erip->linkcheck = 1; 4934 4935 eri_start_timer(erip, eri_check_link, 4936 ERI_LINKCHECK_TIMER); 4937 (void) eri_init(erip); 4938 return (linkupdate); 4939 } 4940 } 4941 } 4942 #endif 4943 4944 /* 4945 * Check if tx hung. 4946 */ 4947 #ifdef ERI_TX_HUNG 4948 if ((erip->flags & ERI_RUNNING) && param_linkup && 4949 (eri_check_txhung(erip))) { 4950 HSTAT(erip, tx_hang); 4951 eri_reinit_txhung++; 4952 erip->linkcheck = 1; 4953 eri_start_timer(erip, eri_check_link, ERI_CHECK_HANG_TIMER); 4954 (void) eri_init(erip); 4955 return (linkupdate); 4956 } 4957 #endif 4958 4959 #ifdef ERI_PM_WORKAROUND 4960 if (erip->stats.pmcap == ERI_PMCAP_NONE) { 4961 if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED, 4962 (void *)4000) == DDI_SUCCESS) 4963 erip->stats.pmcap = ERI_PMCAP_4MHZ; 4964 4965 ERI_DEBUG_MSG2(erip, NONFATAL_MSG, 4966 "eri_check_link: PMCAP %d", erip->stats.pmcap); 4967 } 4968 #endif 4969 if ((!param_mode) && (param_transceiver != NO_XCVR)) 4970 eri_start_timer(erip, eri_check_link, ERI_CHECK_HANG_TIMER); 4971 else 4972 eri_start_timer(erip, eri_check_link, ERI_LINKCHECK_TIMER); 4973 return (linkupdate); 4974 } 4975 4976 static link_state_t 4977 eri_mif_check(struct eri *erip, uint16_t mif_ints, uint16_t mif_data) 4978 { 4979 uint16_t control, aner, anlpar, anar, an_common; 4980 uint16_t old_mintrans; 4981 int restart_autoneg = 0; 4982 link_state_t retv; 4983 4984 ERI_DEBUG_MSG4(erip, XCVR_MSG, "eri_mif_check: mif_mask: %X, %X, %X", 4985 erip->mif_mask, mif_ints, mif_data); 4986 4987 mif_ints &= ~erip->mif_mask; 4988 erip->mii_status = mif_data; 4989 /* 4990 * Now check if someone has pulled the xcvr or 4991 * a new xcvr has shown up 4992 * If so try to find out what the new xcvr setup is. 4993 */ 4994 if (((mif_ints & PHY_BMSR_RES1) && (mif_data == 0xFFFF)) || 4995 (param_transceiver == NO_XCVR)) { 4996 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, 4997 "No status transceiver gone"); 4998 if (eri_new_xcvr(erip)) { 4999 if (param_transceiver != NO_XCVR) { 5000 /* 5001 * Reset the new PHY and bring up the link 5002 */ 5003 (void) eri_reset_xcvr(erip); 5004 } 5005 } 5006 return (LINK_STATE_UNKNOWN); 5007 } 5008 5009 if (param_autoneg && (mif_ints & PHY_BMSR_LNKSTS) && 5010 (mif_data & PHY_BMSR_LNKSTS) && (mif_data & PHY_BMSR_ANC)) { 5011 mif_ints |= PHY_BMSR_ANC; 5012 ERI_DEBUG_MSG3(erip, PHY_MSG, 5013 "eri_mif_check: Set ANC bit mif_data %X mig_ints %X", 5014 mif_data, mif_ints); 5015 } 5016 5017 if ((mif_ints & PHY_BMSR_ANC) && (mif_data & PHY_BMSR_ANC)) { 5018 ERI_DEBUG_MSG1(erip, PHY_MSG, "Auto-negotiation interrupt."); 5019 5020 /* 5021 * Switch off Auto-negotiation interrupts and switch on 5022 * Link ststus interrupts. 5023 */ 5024 erip->mif_mask |= PHY_BMSR_ANC; 5025 erip->mif_mask &= ~PHY_BMSR_LNKSTS; 5026 (void) eri_mii_read(erip, ERI_PHY_ANER, &aner); 5027 param_aner_lpancap = 1 && (aner & PHY_ANER_LPNW); 5028 if ((aner & PHY_ANER_MLF) || (eri_force_mlf)) { 5029 ERI_DEBUG_MSG1(erip, XCVR_MSG, 5030 "parallel detection fault"); 5031 /* 5032 * Consider doing open loop auto-negotiation. 5033 */ 5034 ERI_DEBUG_MSG1(erip, XCVR_MSG, 5035 "Going into Open loop Auto-neg"); 5036 (void) eri_mii_read(erip, ERI_PHY_BMCR, &control); 5037 5038 control &= ~(PHY_BMCR_ANE | PHY_BMCR_RAN | 5039 PHY_BMCR_FDX); 5040 if (param_anar_100fdx || param_anar_100hdx) { 5041 control |= PHY_BMCR_100M; 5042 param_anlpar_100hdx = 1; 5043 param_anlpar_10hdx = 0; 5044 param_speed = 1; 5045 erip->stats.ifspeed = 100; 5046 5047 } else if (param_anar_10fdx || param_anar_10hdx) { 5048 control &= ~PHY_BMCR_100M; 5049 param_anlpar_100hdx = 0; 5050 param_anlpar_10hdx = 1; 5051 param_speed = 0; 5052 erip->stats.ifspeed = 10; 5053 } else { 5054 ERI_FAULT_MSG1(erip, SEVERITY_NONE, 5055 ERI_VERB_MSG, 5056 "Transceiver speed set incorrectly."); 5057 return (0); 5058 } 5059 5060 (void) eri_mii_write(erip, ERI_PHY_BMCR, control); 5061 param_anlpar_100fdx = 0; 5062 param_anlpar_10fdx = 0; 5063 param_mode = 0; 5064 erip->openloop_autoneg = 1; 5065 return (0); 5066 } 5067 (void) eri_mii_read(erip, ERI_PHY_ANLPAR, &anlpar); 5068 (void) eri_mii_read(erip, ERI_PHY_ANAR, &anar); 5069 an_common = anar & anlpar; 5070 5071 ERI_DEBUG_MSG2(erip, XCVR_MSG, "an_common = 0x%X", an_common); 5072 5073 if (an_common & (PHY_ANLPAR_TXFDX | PHY_ANLPAR_TX)) { 5074 param_speed = 1; 5075 erip->stats.ifspeed = 100; 5076 param_mode = 1 && (an_common & PHY_ANLPAR_TXFDX); 5077 5078 } else if (an_common & (PHY_ANLPAR_10FDX | PHY_ANLPAR_10)) { 5079 param_speed = 0; 5080 erip->stats.ifspeed = 10; 5081 param_mode = 1 && (an_common & PHY_ANLPAR_10FDX); 5082 5083 } else an_common = 0x0; 5084 5085 if (!an_common) { 5086 ERI_FAULT_MSG1(erip, SEVERITY_MID, ERI_VERB_MSG, 5087 "Transceiver: anar not set with speed selection"); 5088 } 5089 param_anlpar_100T4 = 1 && (anlpar & PHY_ANLPAR_T4); 5090 param_anlpar_100fdx = 1 && (anlpar & PHY_ANLPAR_TXFDX); 5091 param_anlpar_100hdx = 1 && (anlpar & PHY_ANLPAR_TX); 5092 param_anlpar_10fdx = 1 && (anlpar & PHY_ANLPAR_10FDX); 5093 param_anlpar_10hdx = 1 && (anlpar & PHY_ANLPAR_10); 5094 5095 ERI_DEBUG_MSG2(erip, PHY_MSG, 5096 "Link duplex = 0x%X", param_mode); 5097 ERI_DEBUG_MSG2(erip, PHY_MSG, 5098 "Link speed = 0x%X", param_speed); 5099 /* mif_ints |= PHY_BMSR_LNKSTS; prevent double msg */ 5100 /* mif_data |= PHY_BMSR_LNKSTS; prevent double msg */ 5101 } 5102 retv = LINK_STATE_UNKNOWN; 5103 if (mif_ints & PHY_BMSR_LNKSTS) { 5104 if (mif_data & PHY_BMSR_LNKSTS) { 5105 ERI_DEBUG_MSG1(erip, PHY_MSG, "Link Up"); 5106 /* 5107 * Program Lu3X31T for mininum transition 5108 */ 5109 if (eri_phy_mintrans) { 5110 eri_mii_write(erip, 31, 0x8000); 5111 (void) eri_mii_read(erip, 0, &old_mintrans); 5112 eri_mii_write(erip, 0, 0x00F1); 5113 eri_mii_write(erip, 31, 0x0000); 5114 } 5115 /* 5116 * The link is up. 5117 */ 5118 eri_init_txmac(erip); 5119 param_linkup = 1; 5120 erip->stats.link_up = LINK_STATE_UP; 5121 if (param_mode) 5122 erip->stats.link_duplex = LINK_DUPLEX_FULL; 5123 else 5124 erip->stats.link_duplex = LINK_DUPLEX_HALF; 5125 5126 retv = LINK_STATE_UP; 5127 } else { 5128 ERI_DEBUG_MSG1(erip, PHY_MSG, "Link down."); 5129 param_linkup = 0; 5130 erip->stats.link_up = LINK_STATE_DOWN; 5131 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN; 5132 retv = LINK_STATE_DOWN; 5133 if (param_autoneg) { 5134 restart_autoneg = 1; 5135 } 5136 } 5137 } else { 5138 if (mif_data & PHY_BMSR_LNKSTS) { 5139 if (!param_linkup) { 5140 ERI_DEBUG_MSG1(erip, PHY_MSG, 5141 "eri_mif_check: MIF data link up"); 5142 /* 5143 * Program Lu3X31T for minimum transition 5144 */ 5145 if (eri_phy_mintrans) { 5146 eri_mii_write(erip, 31, 0x8000); 5147 (void) eri_mii_read(erip, 0, 5148 &old_mintrans); 5149 eri_mii_write(erip, 0, 0x00F1); 5150 eri_mii_write(erip, 31, 0x0000); 5151 } 5152 /* 5153 * The link is up. 5154 */ 5155 eri_init_txmac(erip); 5156 5157 param_linkup = 1; 5158 erip->stats.link_up = LINK_STATE_UP; 5159 if (param_mode) 5160 erip->stats.link_duplex = 5161 LINK_DUPLEX_FULL; 5162 else 5163 erip->stats.link_duplex = 5164 LINK_DUPLEX_HALF; 5165 5166 retv = LINK_STATE_UP; 5167 } 5168 } else if (param_linkup) { 5169 /* 5170 * The link is down now. 5171 */ 5172 ERI_DEBUG_MSG1(erip, PHY_MSG, 5173 "eri_mif_check:Link was up and went down"); 5174 param_linkup = 0; 5175 erip->stats.link_up = LINK_STATE_DOWN; 5176 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN; 5177 retv = LINK_STATE_DOWN; 5178 if (param_autoneg) 5179 restart_autoneg = 1; 5180 } 5181 } 5182 if (restart_autoneg) { 5183 /* 5184 * Restart normal auto-negotiation. 5185 */ 5186 ERI_DEBUG_MSG1(erip, PHY_MSG, 5187 "eri_mif_check:Restart AUto Negotiation"); 5188 erip->openloop_autoneg = 0; 5189 param_mode = 0; 5190 param_speed = 0; 5191 param_anlpar_100T4 = 0; 5192 param_anlpar_100fdx = 0; 5193 param_anlpar_100hdx = 0; 5194 param_anlpar_10fdx = 0; 5195 param_anlpar_10hdx = 0; 5196 param_aner_lpancap = 0; 5197 (void) eri_mii_read(erip, ERI_PHY_BMCR, &control); 5198 control |= (PHY_BMCR_ANE | PHY_BMCR_RAN); 5199 eri_mii_write(erip, ERI_PHY_BMCR, control); 5200 } 5201 if (mif_ints & PHY_BMSR_JABDET) { 5202 if (mif_data & PHY_BMSR_JABDET) { 5203 ERI_DEBUG_MSG1(erip, PHY_MSG, "Jabber detected."); 5204 HSTAT(erip, jab); 5205 /* 5206 * Reset the new PHY and bring up the link 5207 * (Check for failure?) 5208 */ 5209 (void) eri_reset_xcvr(erip); 5210 } 5211 } 5212 return (retv); 5213 } 5214 5215 #define PHYRST_PERIOD 500 5216 static int 5217 eri_reset_xcvr(struct eri *erip) 5218 { 5219 uint16_t stat; 5220 uint16_t anar; 5221 uint16_t control; 5222 uint16_t idr1; 5223 uint16_t idr2; 5224 uint16_t nicr; 5225 uint32_t speed_100; 5226 uint32_t speed_10; 5227 int n; 5228 5229 #ifdef ERI_10_10_FORCE_SPEED_WORKAROUND 5230 erip->ifspeed_old = erip->stats.ifspeed; 5231 #endif 5232 /* 5233 * Reset Open loop auto-negotiation this means you can try 5234 * Normal auto-negotiation, until you get a Multiple Link fault 5235 * at which point you try 100M half duplex then 10M half duplex 5236 * until you get a Link up. 5237 */ 5238 erip->openloop_autoneg = 0; 5239 5240 /* 5241 * Reset the xcvr. 5242 */ 5243 eri_mii_write(erip, ERI_PHY_BMCR, PHY_BMCR_RESET); 5244 5245 /* Check for transceiver reset completion */ 5246 5247 n = 1000; 5248 while (--n > 0) { 5249 drv_usecwait((clock_t)PHYRST_PERIOD); 5250 if (eri_mii_read(erip, ERI_PHY_BMCR, &control) == 1) { 5251 /* Transceiver does not talk MII */ 5252 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, 5253 "eri_reset_xcvr: no mii"); 5254 } 5255 if ((control & PHY_BMCR_RESET) == 0) 5256 goto reset_done; 5257 } 5258 ERI_FAULT_MSG2(erip, SEVERITY_NONE, ERI_VERB_MSG, 5259 "eri_reset_xcvr:reset_failed n == 0, control %x", control); 5260 goto eri_reset_xcvr_failed; 5261 5262 reset_done: 5263 5264 ERI_DEBUG_MSG2(erip, AUTOCONFIG_MSG, 5265 "eri_reset_xcvr: reset complete in %d us", 5266 (1000 - n) * PHYRST_PERIOD); 5267 5268 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat); 5269 (void) eri_mii_read(erip, ERI_PHY_ANAR, &anar); 5270 (void) eri_mii_read(erip, ERI_PHY_IDR1, &idr1); 5271 (void) eri_mii_read(erip, ERI_PHY_IDR2, &idr2); 5272 5273 ERI_DEBUG_MSG4(erip, XCVR_MSG, 5274 "eri_reset_xcvr: control %x stat %x anar %x", control, stat, anar); 5275 5276 /* 5277 * Initialize the read only transceiver ndd information 5278 * the values are either 0 or 1. 5279 */ 5280 param_bmsr_ancap = 1 && (stat & PHY_BMSR_ACFG); 5281 param_bmsr_100T4 = 1 && (stat & PHY_BMSR_100T4); 5282 param_bmsr_100fdx = 1 && (stat & PHY_BMSR_100FDX); 5283 param_bmsr_100hdx = 1 && (stat & PHY_BMSR_100HDX); 5284 param_bmsr_10fdx = 1 && (stat & PHY_BMSR_10FDX); 5285 param_bmsr_10hdx = 1 && (stat & PHY_BMSR_10HDX); 5286 5287 /* 5288 * Match up the ndd capabilities with the transceiver. 5289 */ 5290 param_autoneg &= param_bmsr_ancap; 5291 param_anar_100fdx &= param_bmsr_100fdx; 5292 param_anar_100hdx &= param_bmsr_100hdx; 5293 param_anar_10fdx &= param_bmsr_10fdx; 5294 param_anar_10hdx &= param_bmsr_10hdx; 5295 5296 /* 5297 * Select the operation mode of the transceiver. 5298 */ 5299 if (param_autoneg) { 5300 /* 5301 * Initialize our auto-negotiation capabilities. 5302 */ 5303 anar = PHY_SELECTOR; 5304 if (param_anar_100T4) 5305 anar |= PHY_ANAR_T4; 5306 if (param_anar_100fdx) 5307 anar |= PHY_ANAR_TXFDX; 5308 if (param_anar_100hdx) 5309 anar |= PHY_ANAR_TX; 5310 if (param_anar_10fdx) 5311 anar |= PHY_ANAR_10FDX; 5312 if (param_anar_10hdx) 5313 anar |= PHY_ANAR_10; 5314 ERI_DEBUG_MSG2(erip, XCVR_MSG, "anar = %x", anar); 5315 eri_mii_write(erip, ERI_PHY_ANAR, anar); 5316 } 5317 5318 /* Place the Transceiver in normal operation mode */ 5319 if ((control & PHY_BMCR_ISOLATE) || (control & PHY_BMCR_LPBK)) { 5320 control &= ~(PHY_BMCR_ISOLATE | PHY_BMCR_LPBK); 5321 eri_mii_write(erip, ERI_PHY_BMCR, 5322 (control & ~PHY_BMCR_ISOLATE)); 5323 } 5324 5325 /* 5326 * If Lu3X31T then allow nonzero eri_phy_mintrans 5327 */ 5328 if (eri_phy_mintrans && 5329 (idr1 != 0x43 || (idr2 & 0xFFF0) != 0x7420)) { 5330 eri_phy_mintrans = 0; 5331 } 5332 /* 5333 * Initialize the mif interrupt mask. 5334 */ 5335 erip->mif_mask = (uint16_t)(~PHY_BMSR_RES1); 5336 5337 /* 5338 * Establish link speeds and do necessary special stuff based 5339 * in the speed. 5340 */ 5341 speed_100 = param_anar_100fdx | param_anar_100hdx; 5342 speed_10 = param_anar_10fdx | param_anar_10hdx; 5343 5344 ERI_DEBUG_MSG5(erip, XCVR_MSG, "eri_reset_xcvr: %d %d %d %d", 5345 param_anar_100fdx, param_anar_100hdx, param_anar_10fdx, 5346 param_anar_10hdx); 5347 5348 ERI_DEBUG_MSG3(erip, XCVR_MSG, 5349 "eri_reset_xcvr: speed_100 %d speed_10 %d", speed_100, speed_10); 5350 5351 if ((!speed_100) && (speed_10)) { 5352 erip->mif_mask &= ~PHY_BMSR_JABDET; 5353 if (!(param_anar_10fdx) && 5354 (param_anar_10hdx) && 5355 (erip->link_pulse_disabled)) { 5356 param_speed = 0; 5357 param_mode = 0; 5358 (void) eri_mii_read(erip, ERI_PHY_NICR, &nicr); 5359 nicr &= ~PHY_NICR_LD; 5360 eri_mii_write(erip, ERI_PHY_NICR, nicr); 5361 param_linkup = 1; 5362 erip->stats.link_up = LINK_STATE_UP; 5363 if (param_mode) 5364 erip->stats.link_duplex = LINK_DUPLEX_FULL; 5365 else 5366 erip->stats.link_duplex = LINK_DUPLEX_HALF; 5367 } 5368 } 5369 5370 /* 5371 * Clear the autonegotitation before re-starting 5372 */ 5373 control = PHY_BMCR_100M | PHY_BMCR_FDX; 5374 /* eri_mii_write(erip, ERI_PHY_BMCR, control); */ 5375 if (param_autoneg) { 5376 /* 5377 * Setup the transceiver for autonegotiation. 5378 */ 5379 erip->mif_mask &= ~PHY_BMSR_ANC; 5380 5381 /* 5382 * Clear the Auto-negotiation before re-starting 5383 */ 5384 eri_mii_write(erip, ERI_PHY_BMCR, control & ~PHY_BMCR_ANE); 5385 5386 /* 5387 * Switch on auto-negotiation. 5388 */ 5389 control |= (PHY_BMCR_ANE | PHY_BMCR_RAN); 5390 5391 eri_mii_write(erip, ERI_PHY_BMCR, control); 5392 } else { 5393 /* 5394 * Force the transceiver. 5395 */ 5396 erip->mif_mask &= ~PHY_BMSR_LNKSTS; 5397 5398 /* 5399 * Switch off auto-negotiation. 5400 */ 5401 control &= ~(PHY_BMCR_FDX | PHY_BMCR_ANE | PHY_BMCR_RAN); 5402 5403 if (speed_100) { 5404 control |= PHY_BMCR_100M; 5405 param_aner_lpancap = 0; /* Clear LP nway */ 5406 param_anlpar_10fdx = 0; 5407 param_anlpar_10hdx = 0; 5408 param_anlpar_100T4 = param_anar_100T4; 5409 param_anlpar_100fdx = param_anar_100fdx; 5410 param_anlpar_100hdx = param_anar_100hdx; 5411 param_speed = 1; 5412 erip->stats.ifspeed = 100; 5413 param_mode = param_anar_100fdx; 5414 if (param_mode) { 5415 param_anlpar_100hdx = 0; 5416 erip->stats.link_duplex = LINK_DUPLEX_FULL; 5417 } else { 5418 erip->stats.link_duplex = LINK_DUPLEX_HALF; 5419 } 5420 } else if (speed_10) { 5421 control &= ~PHY_BMCR_100M; 5422 param_aner_lpancap = 0; /* Clear LP nway */ 5423 param_anlpar_100fdx = 0; 5424 param_anlpar_100hdx = 0; 5425 param_anlpar_100T4 = 0; 5426 param_anlpar_10fdx = param_anar_10fdx; 5427 param_anlpar_10hdx = param_anar_10hdx; 5428 param_speed = 0; 5429 erip->stats.ifspeed = 10; 5430 param_mode = param_anar_10fdx; 5431 if (param_mode) { 5432 param_anlpar_10hdx = 0; 5433 erip->stats.link_duplex = LINK_DUPLEX_FULL; 5434 } else { 5435 erip->stats.link_duplex = LINK_DUPLEX_HALF; 5436 } 5437 } else { 5438 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, 5439 "Transceiver speed set incorrectly."); 5440 } 5441 5442 if (param_mode) { 5443 control |= PHY_BMCR_FDX; 5444 } 5445 5446 ERI_DEBUG_MSG4(erip, PHY_MSG, 5447 "control = %x status = %x param_mode %d", 5448 control, stat, param_mode); 5449 5450 eri_mii_write(erip, ERI_PHY_BMCR, control); 5451 /* 5452 * if (param_mode) { 5453 * control |= PHY_BMCR_FDX; 5454 * } 5455 * control &= ~(PHY_BMCR_FDX | PHY_BMCR_ANE | PHY_BMCR_RAN); 5456 * eri_mii_write(erip, ERI_PHY_BMCR, control); 5457 */ 5458 } 5459 5460 #ifdef DEBUG 5461 (void) eri_mii_read(erip, ERI_PHY_BMCR, &control); 5462 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat); 5463 (void) eri_mii_read(erip, ERI_PHY_ANAR, &anar); 5464 #endif 5465 ERI_DEBUG_MSG4(erip, PHY_MSG, 5466 "control %X status %X anar %X", control, stat, anar); 5467 5468 eri_reset_xcvr_exit: 5469 return (0); 5470 5471 eri_reset_xcvr_failed: 5472 return (1); 5473 } 5474 5475 #ifdef ERI_10_10_FORCE_SPEED_WORKAROUND 5476 5477 static void 5478 eri_xcvr_force_mode(struct eri *erip, uint32_t *link_timeout) 5479 { 5480 5481 if (!param_autoneg && !param_linkup && (erip->stats.ifspeed == 10) && 5482 (param_anar_10fdx | param_anar_10hdx)) { 5483 *link_timeout = SECOND(1); 5484 return; 5485 } 5486 5487 if (!param_autoneg && !param_linkup && (erip->ifspeed_old == 10) && 5488 (param_anar_100fdx | param_anar_100hdx)) { 5489 /* 5490 * May have to set link partner's speed and mode. 5491 */ 5492 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_LOG_MSG, 5493 "May have to set link partner's speed and duplex mode."); 5494 } 5495 } 5496 #endif 5497 5498 static void 5499 eri_mif_poll(struct eri *erip, soft_mif_enable_t enable) 5500 { 5501 if (enable == MIF_POLL_START) { 5502 if (erip->mifpoll_enable && !erip->openloop_autoneg) { 5503 erip->mif_config |= ERI_MIF_CFGPE; 5504 PUT_MIFREG(mif_cfg, erip->mif_config); 5505 drv_usecwait(ERI_MIF_POLL_DELAY); 5506 PUT_GLOBREG(intmask, GET_GLOBREG(intmask) & 5507 ~ERI_G_MASK_MIF_INT); 5508 PUT_MIFREG(mif_imask, erip->mif_mask); 5509 } 5510 } else if (enable == MIF_POLL_STOP) { 5511 erip->mif_config &= ~ERI_MIF_CFGPE; 5512 PUT_MIFREG(mif_cfg, erip->mif_config); 5513 drv_usecwait(ERI_MIF_POLL_DELAY); 5514 PUT_GLOBREG(intmask, GET_GLOBREG(intmask) | 5515 ERI_G_MASK_MIF_INT); 5516 PUT_MIFREG(mif_imask, ERI_MIF_INTMASK); 5517 } 5518 ERI_DEBUG_MSG2(erip, XCVR_MSG, "MIF Config = 0x%X", 5519 GET_MIFREG(mif_cfg)); 5520 ERI_DEBUG_MSG2(erip, XCVR_MSG, "MIF imask = 0x%X", 5521 GET_MIFREG(mif_imask)); 5522 ERI_DEBUG_MSG2(erip, XCVR_MSG, "INT imask = 0x%X", 5523 GET_GLOBREG(intmask)); 5524 ERI_DEBUG_MSG1(erip, XCVR_MSG, "<== mif_poll"); 5525 } 5526 5527 /* Decide if transmitter went dead and reinitialize everything */ 5528 #ifdef ERI_TX_HUNG 5529 static int eri_txhung_limit = 2; 5530 static int 5531 eri_check_txhung(struct eri *erip) 5532 { 5533 boolean_t macupdate = B_FALSE; 5534 5535 mutex_enter(&erip->xmitlock); 5536 if (erip->flags & ERI_RUNNING) 5537 erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) & 5538 ETX_COMPLETION_MASK); 5539 macupdate |= eri_reclaim(erip, erip->tx_completion); 5540 5541 /* Something needs to be sent out but it is not going out */ 5542 if ((erip->tcurp != erip->tnextp) && 5543 (erip->stats.opackets64 == erip->erisave.reclaim_opackets) && 5544 (erip->stats.collisions == erip->erisave.starts)) 5545 erip->txhung++; 5546 else 5547 erip->txhung = 0; 5548 5549 erip->erisave.reclaim_opackets = erip->stats.opackets64; 5550 erip->erisave.starts = erip->stats.collisions; 5551 mutex_exit(&erip->xmitlock); 5552 5553 if (macupdate) 5554 mac_tx_update(erip->mh); 5555 5556 return (erip->txhung >= eri_txhung_limit); 5557 } 5558 #endif 5559