1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 27 /* 28 * SunOS MT STREAMS FEPS(SBus)/Cheerio(PCI) 10/100Mb Ethernet Device Driver 29 */ 30 31 #include <sys/types.h> 32 #include <sys/debug.h> 33 #include <sys/stream.h> 34 #include <sys/cmn_err.h> 35 #include <sys/kmem.h> 36 #include <sys/crc32.h> 37 #include <sys/modctl.h> 38 #include <sys/conf.h> 39 #include <sys/strsun.h> 40 #include <sys/kstat.h> 41 #include <sys/pattr.h> 42 #include <sys/dlpi.h> 43 #include <sys/strsubr.h> 44 #include <sys/mac_provider.h> 45 #include <sys/mac_ether.h> 46 #include <sys/mii.h> 47 #include <sys/ethernet.h> 48 #include <sys/vlan.h> 49 #include <sys/pci.h> 50 #include <sys/policy.h> 51 #include <sys/ddi.h> 52 #include <sys/sunddi.h> 53 #include "hme_phy.h" 54 #include "hme_mac.h" 55 #include "hme.h" 56 57 typedef void (*fptrv_t)(); 58 59 typedef enum { 60 NO_MSG = 0, 61 AUTOCONFIG_MSG, 62 DISPLAY_MSG, 63 INIT_MSG, 64 UNINIT_MSG, 65 CONFIG_MSG, 66 MII_MSG, 67 FATAL_ERR_MSG, 68 NFATAL_ERR_MSG, 69 XCVR_MSG, 70 NOXCVR_MSG, 71 ERX_MSG, 72 DDI_MSG, 73 } msg_t; 74 75 msg_t hme_debug_level = NO_MSG; 76 77 static char *msg_string[] = { 78 "NONE ", 79 "AUTOCONFIG ", 80 "DISPLAY " 81 "INIT ", 82 "UNINIT ", 83 "CONFIG ", 84 "MII ", 85 "FATAL_ERR ", 86 "NFATAL_ERR ", 87 "XCVR ", 88 "NOXCVR ", 89 "ERX ", 90 "DDI ", 91 }; 92 93 #define SEVERITY_NONE 0 94 #define SEVERITY_LOW 0 95 #define SEVERITY_MID 1 96 #define SEVERITY_HIGH 2 97 #define SEVERITY_UNKNOWN 99 98 99 #define FEPS_URUN_BUG 100 #define HME_CODEVIOL_BUG 101 102 #define KIOIP KSTAT_INTR_PTR(hmep->hme_intrstats) 103 104 /* 105 * The following variables are used for checking fixes in Sbus/FEPS 2.0 106 */ 107 static int hme_urun_fix = 0; /* Bug fixed in Sbus/FEPS 2.0 */ 108 109 /* 110 * The following variables are used for configuring various features 111 */ 112 static int hme_64bit_enable = 1; /* Use 64-bit sbus transfers */ 113 static int hme_reject_own = 1; /* Reject packets with own SA */ 114 static int hme_ngu_enable = 0; /* Never Give Up mode */ 115 116 mac_priv_prop_t hme_priv_prop[] = { 117 { "_ipg0", MAC_PROP_PERM_RW }, 118 { "_ipg1", MAC_PROP_PERM_RW }, 119 { "_ipg2", MAC_PROP_PERM_RW }, 120 { "_lance_mode", MAC_PROP_PERM_RW }, 121 }; 122 123 static int hme_lance_mode = 1; /* to enable lance mode */ 124 static int hme_ipg0 = 16; 125 static int hme_ipg1 = 8; 126 static int hme_ipg2 = 4; 127 128 /* 129 * The following parameters may be configured by the user. If they are not 130 * configured by the user, the values will be based on the capabilities of 131 * the transceiver. 132 * The value "HME_NOTUSR" is ORed with the parameter value to indicate values 133 * which are NOT configured by the user. 134 */ 135 136 #define HME_NOTUSR 0x0f000000 137 #define HME_MASK_1BIT 0x1 138 #define HME_MASK_5BIT 0x1f 139 #define HME_MASK_8BIT 0xff 140 141 /* 142 * All strings used by hme messaging functions 143 */ 144 145 static char *no_xcvr_msg = 146 "No transceiver found."; 147 148 static char *burst_size_msg = 149 "Could not identify the burst size"; 150 151 static char *unk_rx_ringsz_msg = 152 "Unknown receive RINGSZ"; 153 154 static char *add_intr_fail_msg = 155 "ddi_add_intr(9F) failed"; 156 157 static char *mregs_4global_reg_fail_msg = 158 "ddi_regs_map_setup(9F) for global reg failed"; 159 160 static char *mregs_4etx_reg_fail_msg = 161 "ddi_map_regs for etx reg failed"; 162 163 static char *mregs_4erx_reg_fail_msg = 164 "ddi_map_regs for erx reg failed"; 165 166 static char *mregs_4bmac_reg_fail_msg = 167 "ddi_map_regs for bmac reg failed"; 168 169 static char *mregs_4mif_reg_fail_msg = 170 "ddi_map_regs for mif reg failed"; 171 172 static char *init_fail_gen_msg = 173 "Failed to initialize hardware/driver"; 174 175 static char *ddi_nregs_fail_msg = 176 "ddi_dev_nregs failed(9F), returned %d"; 177 178 static char *bad_num_regs_msg = 179 "Invalid number of registers."; 180 181 182 /* FATAL ERR msgs */ 183 /* 184 * Function prototypes. 185 */ 186 /* these two are global so that qfe can use them */ 187 int hmeattach(dev_info_t *, ddi_attach_cmd_t); 188 int hmedetach(dev_info_t *, ddi_detach_cmd_t); 189 int hmequiesce(dev_info_t *); 190 static boolean_t hmeinit_xfer_params(struct hme *); 191 static uint_t hmestop(struct hme *); 192 static void hmestatinit(struct hme *); 193 static int hmeallocthings(struct hme *); 194 static void hmefreethings(struct hme *); 195 static int hmeallocbuf(struct hme *, hmebuf_t *, int); 196 static int hmeallocbufs(struct hme *); 197 static void hmefreebufs(struct hme *); 198 static void hmeget_hm_rev_property(struct hme *); 199 static boolean_t hmestart(struct hme *, mblk_t *); 200 static uint_t hmeintr(caddr_t); 201 static void hmereclaim(struct hme *); 202 static int hmeinit(struct hme *); 203 static void hmeuninit(struct hme *hmep); 204 static mblk_t *hmeread(struct hme *, hmebuf_t *, uint32_t); 205 static void hmesavecntrs(struct hme *); 206 static void hme_fatal_err(struct hme *, uint_t); 207 static void hme_nonfatal_err(struct hme *, uint_t); 208 static int hmeburstsizes(struct hme *); 209 static void send_bit(struct hme *, uint16_t); 210 static uint16_t get_bit_std(uint8_t, struct hme *); 211 static uint16_t hme_bb_mii_read(struct hme *, uint8_t, uint8_t); 212 static void hme_bb_mii_write(struct hme *, uint8_t, uint8_t, uint16_t); 213 static void hme_bb_force_idle(struct hme *); 214 static uint16_t hme_mii_read(void *, uint8_t, uint8_t); 215 static void hme_mii_write(void *, uint8_t, uint8_t, uint16_t); 216 static void hme_setup_mac_address(struct hme *, dev_info_t *); 217 static void hme_mii_notify(void *, link_state_t); 218 219 static void hme_fault_msg(struct hme *, uint_t, msg_t, char *, ...); 220 221 static void hme_check_acc_handle(char *, uint_t, struct hme *, 222 ddi_acc_handle_t); 223 224 /* 225 * Nemo (GLDv3) Functions. 226 */ 227 static int hme_m_stat(void *, uint_t, uint64_t *); 228 static int hme_m_start(void *); 229 static void hme_m_stop(void *); 230 static int hme_m_promisc(void *, boolean_t); 231 static int hme_m_multicst(void *, boolean_t, const uint8_t *); 232 static int hme_m_unicst(void *, const uint8_t *); 233 static mblk_t *hme_m_tx(void *, mblk_t *); 234 static boolean_t hme_m_getcapab(void *, mac_capab_t, void *); 235 static int hme_m_getprop(void *, const char *, mac_prop_id_t, uint_t, 236 uint_t, void *, uint_t *); 237 static int hme_m_setprop(void *, const char *, mac_prop_id_t, uint_t, 238 const void *); 239 240 static mii_ops_t hme_mii_ops = { 241 MII_OPS_VERSION, 242 hme_mii_read, 243 hme_mii_write, 244 hme_mii_notify, 245 NULL 246 }; 247 248 static mac_callbacks_t hme_m_callbacks = { 249 MC_GETCAPAB | MC_SETPROP | MC_GETPROP, 250 hme_m_stat, 251 hme_m_start, 252 hme_m_stop, 253 hme_m_promisc, 254 hme_m_multicst, 255 hme_m_unicst, 256 hme_m_tx, 257 NULL, 258 hme_m_getcapab, 259 NULL, 260 NULL, 261 hme_m_setprop, 262 hme_m_getprop, 263 }; 264 265 DDI_DEFINE_STREAM_OPS(hme_dev_ops, nulldev, nulldev, hmeattach, hmedetach, 266 nodev, NULL, D_MP, NULL, hmequiesce); 267 268 #define HME_FAULT_MSG1(p, s, t, f) \ 269 hme_fault_msg((p), (s), (t), (f)); 270 271 #define HME_FAULT_MSG2(p, s, t, f, a) \ 272 hme_fault_msg((p), (s), (t), (f), (a)); 273 274 #define HME_FAULT_MSG3(p, s, t, f, a, b) \ 275 hme_fault_msg((p), (s), (t), (f), (a), (b)); 276 277 #define HME_FAULT_MSG4(p, s, t, f, a, b, c) \ 278 hme_fault_msg((p), (s), (t), (f), (a), (b), (c)); 279 280 #define CHECK_MIFREG() \ 281 hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_mifregh) 282 #define CHECK_ETXREG() \ 283 hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_etxregh) 284 #define CHECK_ERXREG() \ 285 hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_erxregh) 286 #define CHECK_MACREG() \ 287 hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_bmacregh) 288 #define CHECK_GLOBREG() \ 289 hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_globregh) 290 291 /* 292 * Claim the device is ultra-capable of burst in the beginning. Use 293 * the value returned by ddi_dma_burstsizes() to actually set the HME 294 * global configuration register later. 295 * 296 * Sbus/FEPS supports burst sizes of 16, 32 and 64 bytes. Also, it supports 297 * 32-bit and 64-bit Sbus transfers. Hence the dlim_burstsizes field contains 298 * the the burstsizes in both the lo and hi words. 299 */ 300 #define HMELIMADDRLO ((uint64_t)0x00000000) 301 #define HMELIMADDRHI ((uint64_t)0xffffffff) 302 303 /* 304 * Note that rx and tx data buffers can be arbitrarily aligned, but 305 * that the descriptor rings need to be aligned on 2K boundaries, per 306 * the spec. 307 */ 308 static ddi_dma_attr_t hme_dma_attr = { 309 DMA_ATTR_V0, /* version number. */ 310 (uint64_t)HMELIMADDRLO, /* low address */ 311 (uint64_t)HMELIMADDRHI, /* high address */ 312 (uint64_t)0x00ffffff, /* address counter max */ 313 (uint64_t)HME_HMDALIGN, /* alignment */ 314 (uint_t)0x00700070, /* dlim_burstsizes for 32 and 64 bit xfers */ 315 (uint32_t)0x1, /* minimum transfer size */ 316 (uint64_t)0x7fffffff, /* maximum transfer size */ 317 (uint64_t)0x00ffffff, /* maximum segment size */ 318 1, /* scatter/gather list length */ 319 512, /* granularity */ 320 0 /* attribute flags */ 321 }; 322 323 static ddi_device_acc_attr_t hme_buf_attr = { 324 DDI_DEVICE_ATTR_V0, 325 DDI_NEVERSWAP_ACC, 326 DDI_STRICTORDER_ACC, /* probably could allow merging & caching */ 327 DDI_DEFAULT_ACC, 328 }; 329 330 static uchar_t pci_latency_timer = 0; 331 332 /* 333 * Module linkage information for the kernel. 334 */ 335 static struct modldrv modldrv = { 336 &mod_driverops, /* Type of module. This one is a driver */ 337 "Sun HME 10/100 Mb Ethernet", 338 &hme_dev_ops, /* driver ops */ 339 }; 340 341 static struct modlinkage modlinkage = { 342 MODREV_1, &modldrv, NULL 343 }; 344 345 /* <<<<<<<<<<<<<<<<<<<<<< Register operations >>>>>>>>>>>>>>>>>>>>> */ 346 347 #define GET_MIFREG(reg) \ 348 ddi_get32(hmep->hme_mifregh, (uint32_t *)&hmep->hme_mifregp->reg) 349 #define PUT_MIFREG(reg, value) \ 350 ddi_put32(hmep->hme_mifregh, (uint32_t *)&hmep->hme_mifregp->reg, value) 351 352 #define GET_ETXREG(reg) \ 353 ddi_get32(hmep->hme_etxregh, (uint32_t *)&hmep->hme_etxregp->reg) 354 #define PUT_ETXREG(reg, value) \ 355 ddi_put32(hmep->hme_etxregh, (uint32_t *)&hmep->hme_etxregp->reg, value) 356 #define GET_ERXREG(reg) \ 357 ddi_get32(hmep->hme_erxregh, (uint32_t *)&hmep->hme_erxregp->reg) 358 #define PUT_ERXREG(reg, value) \ 359 ddi_put32(hmep->hme_erxregh, (uint32_t *)&hmep->hme_erxregp->reg, value) 360 #define GET_MACREG(reg) \ 361 ddi_get32(hmep->hme_bmacregh, (uint32_t *)&hmep->hme_bmacregp->reg) 362 #define PUT_MACREG(reg, value) \ 363 ddi_put32(hmep->hme_bmacregh, \ 364 (uint32_t *)&hmep->hme_bmacregp->reg, value) 365 #define GET_GLOBREG(reg) \ 366 ddi_get32(hmep->hme_globregh, (uint32_t *)&hmep->hme_globregp->reg) 367 #define PUT_GLOBREG(reg, value) \ 368 ddi_put32(hmep->hme_globregh, \ 369 (uint32_t *)&hmep->hme_globregp->reg, value) 370 #define PUT_TMD(ptr, paddr, len, flags) \ 371 ddi_put32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_addr, paddr); \ 372 ddi_put32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_flags, \ 373 len | flags) 374 #define GET_TMD_FLAGS(ptr) \ 375 ddi_get32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_flags) 376 #define PUT_RMD(ptr, paddr) \ 377 ddi_put32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_addr, paddr); \ 378 ddi_put32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_flags, \ 379 (uint32_t)(HMEBUFSIZE << HMERMD_BUFSIZE_SHIFT) | HMERMD_OWN) 380 #define GET_RMD_FLAGS(ptr) \ 381 ddi_get32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_flags) 382 383 #define GET_ROM8(offset) \ 384 ddi_get8((hmep->hme_romh), (offset)) 385 386 /* 387 * Ether_copy is not endian-correct. Define an endian-correct version. 388 */ 389 #define ether_bcopy(a, b) (bcopy(a, b, 6)) 390 391 /* 392 * Ether-type is specifically big-endian, but data region is unknown endian 393 */ 394 #define get_ether_type(ptr) \ 395 (((((uint8_t *)ptr)[12] << 8) | (((uint8_t *)ptr)[13]))) 396 397 /* <<<<<<<<<<<<<<<<<<<<<< Configuration Parameters >>>>>>>>>>>>>>>>>>>>> */ 398 399 #define BMAC_DEFAULT_JAMSIZE (0x04) /* jamsize equals 4 */ 400 #define BMAC_LONG_JAMSIZE (0x10) /* jamsize equals 0x10 */ 401 static int jamsize = BMAC_DEFAULT_JAMSIZE; 402 403 404 /* 405 * Calculate the bit in the multicast address filter that selects the given 406 * address. 407 */ 408 409 static uint32_t 410 hmeladrf_bit(const uint8_t *addr) 411 { 412 uint32_t crc; 413 414 CRC32(crc, addr, ETHERADDRL, -1U, crc32_table); 415 416 /* 417 * Just want the 6 most significant bits. 418 */ 419 return (crc >> 26); 420 } 421 422 /* <<<<<<<<<<<<<<<<<<<<<<<< Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */ 423 424 static void 425 send_bit(struct hme *hmep, uint16_t x) 426 { 427 PUT_MIFREG(mif_bbdata, x); 428 PUT_MIFREG(mif_bbclk, HME_BBCLK_LOW); 429 PUT_MIFREG(mif_bbclk, HME_BBCLK_HIGH); 430 } 431 432 433 /* 434 * To read the MII register bits according to the IEEE Standard 435 */ 436 static uint16_t 437 get_bit_std(uint8_t phyad, struct hme *hmep) 438 { 439 uint16_t x; 440 441 PUT_MIFREG(mif_bbclk, HME_BBCLK_LOW); 442 drv_usecwait(1); /* wait for >330 ns for stable data */ 443 if (phyad == HME_INTERNAL_PHYAD) 444 x = (GET_MIFREG(mif_cfg) & HME_MIF_CFGM0) ? 1 : 0; 445 else 446 x = (GET_MIFREG(mif_cfg) & HME_MIF_CFGM1) ? 1 : 0; 447 PUT_MIFREG(mif_bbclk, HME_BBCLK_HIGH); 448 return (x); 449 } 450 451 #define SEND_BIT(x) send_bit(hmep, x) 452 #define GET_BIT_STD(phyad, x) x = get_bit_std(phyad, hmep) 453 454 455 static void 456 hme_bb_mii_write(struct hme *hmep, uint8_t phyad, uint8_t regad, uint16_t data) 457 { 458 int i; 459 460 PUT_MIFREG(mif_bbopenb, 1); /* Enable the MII driver */ 461 (void) hme_bb_force_idle(hmep); 462 SEND_BIT(0); SEND_BIT(1); /* <ST> */ 463 SEND_BIT(0); SEND_BIT(1); /* <OP> */ 464 465 for (i = 4; i >= 0; i--) { /* <AAAAA> */ 466 SEND_BIT((phyad >> i) & 1); 467 } 468 469 for (i = 4; i >= 0; i--) { /* <RRRRR> */ 470 SEND_BIT((regad >> i) & 1); 471 } 472 473 SEND_BIT(1); SEND_BIT(0); /* <TA> */ 474 475 for (i = 0xf; i >= 0; i--) { /* <DDDDDDDDDDDDDDDD> */ 476 SEND_BIT((data >> i) & 1); 477 } 478 479 PUT_MIFREG(mif_bbopenb, 0); /* Disable the MII driver */ 480 CHECK_MIFREG(); 481 } 482 483 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */ 484 static uint16_t 485 hme_bb_mii_read(struct hme *hmep, uint8_t phyad, uint8_t regad) 486 { 487 int i; 488 uint32_t x; 489 uint16_t data = 0; 490 491 PUT_MIFREG(mif_bbopenb, 1); /* Enable the MII driver */ 492 (void) hme_bb_force_idle(hmep); 493 SEND_BIT(0); SEND_BIT(1); /* <ST> */ 494 SEND_BIT(1); SEND_BIT(0); /* <OP> */ 495 for (i = 4; i >= 0; i--) { /* <AAAAA> */ 496 SEND_BIT((phyad >> i) & 1); 497 } 498 for (i = 4; i >= 0; i--) { /* <RRRRR> */ 499 SEND_BIT((regad >> i) & 1); 500 } 501 502 PUT_MIFREG(mif_bbopenb, 0); /* Disable the MII driver */ 503 504 GET_BIT_STD(phyad, x); 505 GET_BIT_STD(phyad, x); /* <TA> */ 506 for (i = 0xf; i >= 0; i--) { /* <DDDDDDDDDDDDDDDD> */ 507 GET_BIT_STD(phyad, x); 508 data += (x << i); 509 } 510 /* 511 * Kludge to get the Transceiver out of hung mode 512 */ 513 GET_BIT_STD(phyad, x); 514 GET_BIT_STD(phyad, x); 515 GET_BIT_STD(phyad, x); 516 CHECK_MIFREG(); 517 return (data); 518 } 519 520 521 static void 522 hme_bb_force_idle(struct hme *hmep) 523 { 524 int i; 525 526 for (i = 0; i < 33; i++) { 527 SEND_BIT(1); 528 } 529 } 530 531 /* <<<<<<<<<<<<<<<<<<<<End of Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */ 532 533 534 /* <<<<<<<<<<<<< Frame Register used for MII operations >>>>>>>>>>>>>>>>>>>> */ 535 536 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */ 537 static uint16_t 538 hme_mii_read(void *arg, uint8_t phyad, uint8_t regad) 539 { 540 struct hme *hmep = arg; 541 uint32_t frame; 542 543 if (!hmep->hme_frame_enable) 544 return (hme_bb_mii_read(hmep, phyad, regad)); 545 546 PUT_MIFREG(mif_frame, 547 HME_MIF_FRREAD | (phyad << HME_MIF_FRPHYAD_SHIFT) | 548 (regad << HME_MIF_FRREGAD_SHIFT)); 549 /* 550 * HMEDELAY((*framerp & HME_MIF_FRTA0), HMEMAXRSTDELAY); 551 */ 552 HMEDELAY((GET_MIFREG(mif_frame) & HME_MIF_FRTA0), 300); 553 frame = GET_MIFREG(mif_frame); 554 CHECK_MIFREG(); 555 if ((frame & HME_MIF_FRTA0) == 0) { 556 557 558 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, MII_MSG, 559 "MIF Read failure"); 560 return (0xffff); 561 } 562 return ((uint16_t)(frame & HME_MIF_FRDATA)); 563 } 564 565 static void 566 hme_mii_write(void *arg, uint8_t phyad, uint8_t regad, uint16_t data) 567 { 568 struct hme *hmep = arg; 569 uint32_t frame; 570 571 if (!hmep->hme_frame_enable) { 572 hme_bb_mii_write(hmep, phyad, regad, data); 573 return; 574 } 575 576 PUT_MIFREG(mif_frame, 577 HME_MIF_FRWRITE | (phyad << HME_MIF_FRPHYAD_SHIFT) | 578 (regad << HME_MIF_FRREGAD_SHIFT) | data); 579 /* 580 * HMEDELAY((*framerp & HME_MIF_FRTA0), HMEMAXRSTDELAY); 581 */ 582 HMEDELAY((GET_MIFREG(mif_frame) & HME_MIF_FRTA0), 300); 583 frame = GET_MIFREG(mif_frame); 584 CHECK_MIFREG(); 585 if ((frame & HME_MIF_FRTA0) == 0) { 586 HME_FAULT_MSG1(hmep, SEVERITY_MID, MII_MSG, 587 "MIF Write failure"); 588 } 589 } 590 591 static void 592 hme_mii_notify(void *arg, link_state_t link) 593 { 594 struct hme *hmep = arg; 595 596 if (link == LINK_STATE_UP) { 597 (void) hmeinit(hmep); 598 } 599 mac_link_update(hmep->hme_mh, link); 600 } 601 602 /* <<<<<<<<<<<<<<<<<<<<<<<<<<< LOADABLE ENTRIES >>>>>>>>>>>>>>>>>>>>>>> */ 603 604 int 605 _init(void) 606 { 607 int status; 608 609 mac_init_ops(&hme_dev_ops, "hme"); 610 if ((status = mod_install(&modlinkage)) != 0) { 611 mac_fini_ops(&hme_dev_ops); 612 } 613 return (status); 614 } 615 616 int 617 _fini(void) 618 { 619 int status; 620 621 if ((status = mod_remove(&modlinkage)) == 0) { 622 mac_fini_ops(&hme_dev_ops); 623 } 624 return (status); 625 } 626 627 int 628 _info(struct modinfo *modinfop) 629 { 630 return (mod_info(&modlinkage, modinfop)); 631 } 632 633 /* 634 * ddi_dma_sync() a TMD or RMD descriptor. 635 */ 636 #define HMESYNCRMD(num, who) \ 637 (void) ddi_dma_sync(hmep->hme_rmd_dmah, \ 638 (num * sizeof (struct hme_rmd)), \ 639 sizeof (struct hme_rmd), \ 640 who) 641 642 #define HMESYNCTMD(num, who) \ 643 (void) ddi_dma_sync(hmep->hme_tmd_dmah, \ 644 (num * sizeof (struct hme_tmd)), \ 645 sizeof (struct hme_tmd), \ 646 who) 647 648 /* 649 * Ethernet broadcast address definition. 650 */ 651 static struct ether_addr etherbroadcastaddr = { 652 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 653 }; 654 655 /* 656 * MIB II broadcast/multicast packets 657 */ 658 #define IS_BROADCAST(pkt) (bcmp(pkt, ðerbroadcastaddr, ETHERADDRL) == 0) 659 #define IS_MULTICAST(pkt) ((pkt[0] & 01) == 1) 660 #define BUMP_InNUcast(hmep, pkt) \ 661 if (IS_MULTICAST(pkt)) { \ 662 if (IS_BROADCAST(pkt)) { \ 663 hmep->hme_brdcstrcv++; \ 664 } else { \ 665 hmep->hme_multircv++; \ 666 } \ 667 } 668 #define BUMP_OutNUcast(hmep, pkt) \ 669 if (IS_MULTICAST(pkt)) { \ 670 if (IS_BROADCAST(pkt)) { \ 671 hmep->hme_brdcstxmt++; \ 672 } else { \ 673 hmep->hme_multixmt++; \ 674 } \ 675 } 676 677 static int 678 hme_create_prop_from_kw(dev_info_t *dip, char *vpdname, char *vpdstr) 679 { 680 char propstr[80]; 681 int i, needprop = 0; 682 struct ether_addr local_mac; 683 684 if (strcmp(vpdname, "NA") == 0) { 685 (void) strcpy(propstr, "local-mac-address"); 686 needprop = 1; 687 } else if (strcmp(vpdname, "Z0") == 0) { 688 (void) strcpy(propstr, "model"); 689 needprop = 1; 690 } else if (strcmp(vpdname, "Z1") == 0) { 691 (void) strcpy(propstr, "board-model"); 692 needprop = 1; 693 } 694 695 if (needprop == 1) { 696 697 if (strcmp(propstr, "local-mac-address") == 0) { 698 for (i = 0; i < ETHERADDRL; i++) 699 local_mac.ether_addr_octet[i] = 700 (uchar_t)vpdstr[i]; 701 if (ddi_prop_create(DDI_DEV_T_NONE, dip, 702 DDI_PROP_CANSLEEP, propstr, 703 (char *)local_mac.ether_addr_octet, ETHERADDRL) 704 != DDI_SUCCESS) { 705 return (DDI_FAILURE); 706 } 707 } else { 708 if (ddi_prop_create(DDI_DEV_T_NONE, dip, 709 DDI_PROP_CANSLEEP, propstr, vpdstr, 710 strlen(vpdstr)+1) != DDI_SUCCESS) { 711 return (DDI_FAILURE); 712 } 713 } 714 } 715 return (0); 716 } 717 718 /* 719 * Get properties from old VPD 720 * for PCI cards 721 */ 722 static int 723 hme_get_oldvpd_props(dev_info_t *dip, int vpd_base) 724 { 725 struct hme *hmep; 726 int vpd_start, vpd_len, kw_start, kw_len, kw_ptr; 727 char kw_namestr[3]; 728 char kw_fieldstr[256]; 729 int i; 730 731 hmep = ddi_get_driver_private(dip); 732 733 vpd_start = vpd_base; 734 735 if ((GET_ROM8(&hmep->hme_romp[vpd_start]) & 0xff) != 0x90) { 736 return (1); /* error */ 737 } else { 738 vpd_len = 9; 739 } 740 741 /* Get local-mac-address */ 742 kw_start = vpd_start + 3; /* Location of 1st keyword */ 743 kw_ptr = kw_start; 744 while ((kw_ptr - kw_start) < vpd_len) { /* Get all keywords */ 745 kw_namestr[0] = GET_ROM8(&hmep->hme_romp[kw_ptr]); 746 kw_namestr[1] = GET_ROM8(&hmep->hme_romp[kw_ptr+1]); 747 kw_namestr[2] = '\0'; 748 kw_len = (int)(GET_ROM8(&hmep->hme_romp[kw_ptr+2]) & 0xff); 749 for (i = 0, kw_ptr += 3; i < kw_len; i++) 750 kw_fieldstr[i] = GET_ROM8(&hmep->hme_romp[kw_ptr+i]); 751 kw_fieldstr[i] = '\0'; 752 if (hme_create_prop_from_kw(dip, kw_namestr, kw_fieldstr)) { 753 return (DDI_FAILURE); 754 } 755 kw_ptr += kw_len; 756 } /* next keyword */ 757 758 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, "model", 759 "SUNW,cheerio", strlen("SUNW,cheerio")+1) != DDI_SUCCESS) { 760 return (DDI_FAILURE); 761 } 762 return (0); 763 } 764 765 766 /* 767 * Get properties from new VPD 768 * for CompactPCI cards 769 */ 770 static int 771 hme_get_newvpd_props(dev_info_t *dip, int vpd_base) 772 { 773 struct hme *hmep; 774 int vpd_start, vpd_len, kw_start, kw_len, kw_ptr; 775 char kw_namestr[3]; 776 char kw_fieldstr[256]; 777 int maxvpdsize, i; 778 779 hmep = ddi_get_driver_private(dip); 780 781 maxvpdsize = 1024; /* Real size not known until after it is read */ 782 783 vpd_start = (int)((GET_ROM8(&(hmep->hme_romp[vpd_base+1])) & 0xff) | 784 ((GET_ROM8(&hmep->hme_romp[vpd_base+2]) & 0xff) << 8)) +3; 785 vpd_start = vpd_base + vpd_start; 786 while (vpd_start < (vpd_base + maxvpdsize)) { /* Get all VPDs */ 787 if ((GET_ROM8(&hmep->hme_romp[vpd_start]) & 0xff) != 0x90) { 788 break; /* no VPD found */ 789 } else { 790 vpd_len = (int)((GET_ROM8(&hmep->hme_romp[vpd_start 791 + 1]) & 0xff) | (GET_ROM8(&hmep->hme_romp[vpd_start 792 + 2]) & 0xff) << 8); 793 } 794 /* Get all keywords in this VPD */ 795 kw_start = vpd_start + 3; /* Location of 1st keyword */ 796 kw_ptr = kw_start; 797 while ((kw_ptr - kw_start) < vpd_len) { /* Get all keywords */ 798 kw_namestr[0] = GET_ROM8(&hmep->hme_romp[kw_ptr]); 799 kw_namestr[1] = GET_ROM8(&hmep->hme_romp[kw_ptr+1]); 800 kw_namestr[2] = '\0'; 801 kw_len = 802 (int)(GET_ROM8(&hmep->hme_romp[kw_ptr+2]) & 0xff); 803 for (i = 0, kw_ptr += 3; i < kw_len; i++) 804 kw_fieldstr[i] = 805 GET_ROM8(&hmep->hme_romp[kw_ptr+i]); 806 kw_fieldstr[i] = '\0'; 807 if (hme_create_prop_from_kw(dip, kw_namestr, 808 kw_fieldstr)) { 809 return (DDI_FAILURE); 810 } 811 kw_ptr += kw_len; 812 } /* next keyword */ 813 vpd_start += (vpd_len + 3); 814 } /* next VPD */ 815 return (0); 816 } 817 818 819 /* 820 * Get properties from VPD 821 */ 822 static int 823 hme_get_vpd_props(dev_info_t *dip) 824 { 825 struct hme *hmep; 826 int v0, v1, vpd_base; 827 int i, epromsrchlimit; 828 829 830 hmep = ddi_get_driver_private(dip); 831 832 v0 = (int)(GET_ROM8(&(hmep->hme_romp[0]))); 833 v1 = (int)(GET_ROM8(&(hmep->hme_romp[1]))); 834 v0 = ((v0 & 0xff) << 8 | v1); 835 836 if ((v0 & 0xffff) != 0x55aa) { 837 cmn_err(CE_NOTE, " Valid pci prom not found \n"); 838 return (1); 839 } 840 841 epromsrchlimit = 4096; 842 for (i = 2; i < epromsrchlimit; i++) { 843 /* "PCIR" */ 844 if (((GET_ROM8(&(hmep->hme_romp[i])) & 0xff) == 'P') && 845 ((GET_ROM8(&(hmep->hme_romp[i+1])) & 0xff) == 'C') && 846 ((GET_ROM8(&(hmep->hme_romp[i+2])) & 0xff) == 'I') && 847 ((GET_ROM8(&(hmep->hme_romp[i+3])) & 0xff) == 'R')) { 848 vpd_base = 849 (int)((GET_ROM8(&(hmep->hme_romp[i+8])) & 0xff) | 850 (GET_ROM8(&(hmep->hme_romp[i+9])) & 0xff) << 8); 851 break; /* VPD pointer found */ 852 } 853 } 854 855 /* No VPD found */ 856 if (vpd_base == 0) { 857 cmn_err(CE_NOTE, " Vital Product Data pointer not found \n"); 858 return (1); 859 } 860 861 v0 = (int)(GET_ROM8(&(hmep->hme_romp[vpd_base]))); 862 if (v0 == 0x82) { 863 if (hme_get_newvpd_props(dip, vpd_base)) 864 return (1); 865 return (0); 866 } else if (v0 == 0x90) { 867 /* If we are are SUNW,qfe card, look for the Nth "NA" descr */ 868 if ((GET_ROM8(&hmep->hme_romp[vpd_base + 12]) != 0x79) && 869 GET_ROM8(&hmep->hme_romp[vpd_base + 4 * 12]) == 0x79) { 870 vpd_base += hmep->hme_devno * 12; 871 } 872 if (hme_get_oldvpd_props(dip, vpd_base)) 873 return (1); 874 return (0); 875 } else 876 return (1); /* unknown start byte in VPD */ 877 } 878 879 /* 880 * For x86, the BIOS doesn't map the PCI Rom register for the qfe 881 * cards, so we have to extract it from the ebus bridge that is 882 * function zero of the same device. This is a bit of an ugly hack. 883 * (The ebus bridge leaves the entire ROM mapped at base address 884 * register 0x10.) 885 */ 886 887 typedef struct { 888 struct hme *hmep; 889 dev_info_t *parent; 890 uint8_t bus, dev; 891 ddi_acc_handle_t acch; 892 caddr_t romp; 893 } ebus_rom_t; 894 895 static int 896 hme_mapebusrom(dev_info_t *dip, void *arg) 897 { 898 int *regs; 899 unsigned nregs; 900 int reg; 901 ebus_rom_t *rom = arg; 902 struct hme *hmep = rom->hmep; 903 904 /* 905 * We only want to look at our peers. Skip our parent. 906 */ 907 if (dip == rom->parent) { 908 return (DDI_WALK_PRUNESIB); 909 } 910 911 if (ddi_get_parent(dip) != rom->parent) 912 return (DDI_WALK_CONTINUE); 913 914 if ((ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, 915 "reg", ®s, &nregs)) != DDI_PROP_SUCCESS) { 916 return (DDI_WALK_PRUNECHILD); 917 } 918 919 if (nregs < 1) { 920 ddi_prop_free(regs); 921 return (DDI_WALK_PRUNECHILD); 922 } 923 reg = regs[0]; 924 ddi_prop_free(regs); 925 926 /* 927 * Look for function 0 on our bus and device. If the device doesn't 928 * match, it might be an alternate peer, in which case we don't want 929 * to examine any of its children. 930 */ 931 if ((PCI_REG_BUS_G(reg) != rom->bus) || 932 (PCI_REG_DEV_G(reg) != rom->dev) || 933 (PCI_REG_FUNC_G(reg) != 0)) { 934 return (DDI_WALK_PRUNECHILD); 935 } 936 937 (void) ddi_regs_map_setup(dip, 1, &rom->romp, 0, 0, &hmep->hme_dev_attr, 938 &rom->acch); 939 /* 940 * If we can't map the registers, the caller will notice that 941 * the acch is NULL. 942 */ 943 return (DDI_WALK_TERMINATE); 944 } 945 946 static int 947 hmeget_promebus(dev_info_t *dip) 948 { 949 ebus_rom_t rom; 950 int *regs; 951 unsigned nregs; 952 struct hme *hmep; 953 954 hmep = ddi_get_driver_private(dip); 955 956 bzero(&rom, sizeof (rom)); 957 958 /* 959 * For x86, the BIOS doesn't map the PCI Rom register for the qfe 960 * cards, so we have to extract it from the eBus bridge that is 961 * function zero. This is a bit of an ugly hack. 962 */ 963 if ((ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, 964 "reg", ®s, &nregs)) != DDI_PROP_SUCCESS) { 965 return (DDI_FAILURE); 966 } 967 968 if (nregs < 5) { 969 ddi_prop_free(regs); 970 return (DDI_FAILURE); 971 } 972 rom.hmep = hmep; 973 rom.bus = PCI_REG_BUS_G(regs[0]); 974 rom.dev = PCI_REG_DEV_G(regs[0]); 975 hmep->hme_devno = rom.dev; 976 rom.parent = ddi_get_parent(dip); 977 978 /* 979 * The implementation of ddi_walk_devs says that we must not 980 * be called during autoconfiguration. However, it turns out 981 * that it is safe to call this during our attach routine, 982 * because we are not a nexus device. 983 * 984 * Previously we rooted our search at our immediate parent, 985 * but this triggered an assertion panic in debug kernels. 986 */ 987 ddi_walk_devs(ddi_root_node(), hme_mapebusrom, &rom); 988 989 if (rom.acch) { 990 hmep->hme_romh = rom.acch; 991 hmep->hme_romp = (unsigned char *)rom.romp; 992 return (DDI_SUCCESS); 993 } 994 return (DDI_FAILURE); 995 } 996 997 static int 998 hmeget_promprops(dev_info_t *dip) 999 { 1000 struct hme *hmep; 1001 int rom_bar; 1002 ddi_acc_handle_t cfg_handle; 1003 struct { 1004 uint16_t vendorid; 1005 uint16_t devid; 1006 uint16_t command; 1007 uint16_t status; 1008 uint32_t junk1; 1009 uint8_t cache_line; 1010 uint8_t latency; 1011 uint8_t header; 1012 uint8_t bist; 1013 uint32_t base; 1014 uint32_t base14; 1015 uint32_t base18; 1016 uint32_t base1c; 1017 uint32_t base20; 1018 uint32_t base24; 1019 uint32_t base28; 1020 uint32_t base2c; 1021 uint32_t base30; 1022 } *cfg_ptr; 1023 1024 hmep = ddi_get_driver_private(dip); 1025 1026 1027 /* 1028 * map configuration space 1029 */ 1030 if (ddi_regs_map_setup(hmep->dip, 0, (caddr_t *)&cfg_ptr, 1031 0, 0, &hmep->hme_dev_attr, &cfg_handle)) { 1032 return (DDI_FAILURE); 1033 } 1034 1035 /* 1036 * Enable bus-master and memory accesses 1037 */ 1038 ddi_put16(cfg_handle, &cfg_ptr->command, 1039 PCI_COMM_SERR_ENABLE | PCI_COMM_PARITY_DETECT | 1040 PCI_COMM_MAE | PCI_COMM_ME); 1041 1042 /* 1043 * Enable rom accesses 1044 */ 1045 rom_bar = ddi_get32(cfg_handle, &cfg_ptr->base30); 1046 ddi_put32(cfg_handle, &cfg_ptr->base30, rom_bar | 1); 1047 1048 1049 if ((ddi_regs_map_setup(dip, 2, (caddr_t *)&(hmep->hme_romp), 0, 0, 1050 &hmep->hme_dev_attr, &hmep->hme_romh) != DDI_SUCCESS) && 1051 (hmeget_promebus(dip) != DDI_SUCCESS)) { 1052 1053 if (cfg_ptr) 1054 ddi_regs_map_free(&cfg_handle); 1055 return (DDI_FAILURE); 1056 } else { 1057 if (hme_get_vpd_props(dip)) 1058 return (DDI_FAILURE); 1059 } 1060 if (hmep->hme_romp) 1061 ddi_regs_map_free(&hmep->hme_romh); 1062 if (cfg_ptr) 1063 ddi_regs_map_free(&cfg_handle); 1064 return (DDI_SUCCESS); 1065 1066 } 1067 1068 static void 1069 hmeget_hm_rev_property(struct hme *hmep) 1070 { 1071 int hm_rev; 1072 1073 1074 hm_rev = hmep->asic_rev; 1075 switch (hm_rev) { 1076 case HME_2P1_REVID: 1077 case HME_2P1_REVID_OBP: 1078 HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG, 1079 "SBus 2.1 Found (Rev Id = %x)", hm_rev); 1080 hmep->hme_frame_enable = 1; 1081 break; 1082 1083 case HME_2P0_REVID: 1084 HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG, 1085 "SBus 2.0 Found (Rev Id = %x)", hm_rev); 1086 break; 1087 1088 case HME_1C0_REVID: 1089 HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG, 1090 "PCI IO 1.0 Found (Rev Id = %x)", hm_rev); 1091 break; 1092 1093 default: 1094 HME_FAULT_MSG3(hmep, SEVERITY_NONE, DISPLAY_MSG, 1095 "%s (Rev Id = %x) Found", 1096 (hm_rev == HME_2C0_REVID) ? "PCI IO 2.0" : "Sbus", hm_rev); 1097 hmep->hme_frame_enable = 1; 1098 hmep->hme_lance_mode_enable = 1; 1099 hmep->hme_rxcv_enable = 1; 1100 break; 1101 } 1102 } 1103 1104 /* 1105 * Interface exists: make available by filling in network interface 1106 * record. System will initialize the interface when it is ready 1107 * to accept packets. 1108 */ 1109 int 1110 hmeattach(dev_info_t *dip, ddi_attach_cmd_t cmd) 1111 { 1112 struct hme *hmep; 1113 mac_register_t *macp = NULL; 1114 int regno; 1115 int hm_rev = 0; 1116 int prop_len = sizeof (int); 1117 ddi_acc_handle_t cfg_handle; 1118 struct { 1119 uint16_t vendorid; 1120 uint16_t devid; 1121 uint16_t command; 1122 uint16_t status; 1123 uint8_t revid; 1124 uint8_t j1; 1125 uint16_t j2; 1126 } *cfg_ptr; 1127 1128 switch (cmd) { 1129 case DDI_ATTACH: 1130 break; 1131 1132 case DDI_RESUME: 1133 if ((hmep = ddi_get_driver_private(dip)) == NULL) 1134 return (DDI_FAILURE); 1135 1136 hmep->hme_flags &= ~HMESUSPENDED; 1137 1138 mii_resume(hmep->hme_mii); 1139 1140 if (hmep->hme_started) 1141 (void) hmeinit(hmep); 1142 return (DDI_SUCCESS); 1143 1144 default: 1145 return (DDI_FAILURE); 1146 } 1147 1148 /* 1149 * Allocate soft device data structure 1150 */ 1151 hmep = kmem_zalloc(sizeof (*hmep), KM_SLEEP); 1152 1153 /* 1154 * Might as well set up elements of data structure 1155 */ 1156 hmep->dip = dip; 1157 hmep->instance = ddi_get_instance(dip); 1158 hmep->pagesize = ddi_ptob(dip, (ulong_t)1); /* IOMMU PSize */ 1159 1160 /* 1161 * Might as well setup the driver private 1162 * structure as part of the dip. 1163 */ 1164 ddi_set_driver_private(dip, hmep); 1165 1166 /* 1167 * Reject this device if it's in a slave-only slot. 1168 */ 1169 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 1170 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG, 1171 "Dev not used - dev in slave only slot"); 1172 goto error_state; 1173 } 1174 1175 /* 1176 * Map in the device registers. 1177 * 1178 * Reg # 0 is the Global register set 1179 * Reg # 1 is the ETX register set 1180 * Reg # 2 is the ERX register set 1181 * Reg # 3 is the BigMAC register set. 1182 * Reg # 4 is the MIF register set 1183 */ 1184 if (ddi_dev_nregs(dip, ®no) != (DDI_SUCCESS)) { 1185 HME_FAULT_MSG2(hmep, SEVERITY_HIGH, INIT_MSG, 1186 ddi_nregs_fail_msg, regno); 1187 goto error_state; 1188 } 1189 1190 switch (regno) { 1191 case 5: 1192 hmep->hme_cheerio_mode = 0; 1193 break; 1194 case 2: 1195 case 3: /* for hot swap/plug, there will be 3 entries in "reg" prop */ 1196 hmep->hme_cheerio_mode = 1; 1197 break; 1198 default: 1199 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, 1200 bad_num_regs_msg); 1201 goto error_state; 1202 } 1203 1204 /* Initialize device attributes structure */ 1205 hmep->hme_dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1206 1207 if (hmep->hme_cheerio_mode) 1208 hmep->hme_dev_attr.devacc_attr_endian_flags = 1209 DDI_STRUCTURE_LE_ACC; 1210 else 1211 hmep->hme_dev_attr.devacc_attr_endian_flags = 1212 DDI_STRUCTURE_BE_ACC; 1213 1214 hmep->hme_dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1215 1216 if (hmep->hme_cheerio_mode) { 1217 uint8_t oldLT; 1218 uint8_t newLT = 0; 1219 dev_info_t *pdip; 1220 const char *pdrvname; 1221 1222 /* 1223 * Map the PCI config space 1224 */ 1225 if (pci_config_setup(dip, &hmep->pci_config_handle) != 1226 DDI_SUCCESS) { 1227 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG, 1228 "pci_config_setup() failed.."); 1229 goto error_state; 1230 } 1231 1232 if (ddi_regs_map_setup(dip, 1, 1233 (caddr_t *)&(hmep->hme_globregp), 0, 0, 1234 &hmep->hme_dev_attr, &hmep->hme_globregh)) { 1235 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG, 1236 mregs_4global_reg_fail_msg); 1237 goto error_unmap; 1238 } 1239 hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh = 1240 hmep->hme_mifregh = hmep->hme_globregh; 1241 1242 hmep->hme_etxregp = 1243 (void *)(((caddr_t)hmep->hme_globregp) + 0x2000); 1244 hmep->hme_erxregp = 1245 (void *)(((caddr_t)hmep->hme_globregp) + 0x4000); 1246 hmep->hme_bmacregp = 1247 (void *)(((caddr_t)hmep->hme_globregp) + 0x6000); 1248 hmep->hme_mifregp = 1249 (void *)(((caddr_t)hmep->hme_globregp) + 0x7000); 1250 1251 /* 1252 * Get parent pci bridge info. 1253 */ 1254 pdip = ddi_get_parent(dip); 1255 pdrvname = ddi_driver_name(pdip); 1256 1257 oldLT = pci_config_get8(hmep->pci_config_handle, 1258 PCI_CONF_LATENCY_TIMER); 1259 /* 1260 * Honor value set in /etc/system 1261 * "set hme:pci_latency_timer=0xYY" 1262 */ 1263 if (pci_latency_timer) 1264 newLT = pci_latency_timer; 1265 /* 1266 * Modify LT for simba 1267 */ 1268 else if (strcmp("simba", pdrvname) == 0) 1269 newLT = 0xf0; 1270 /* 1271 * Ensure minimum cheerio latency timer of 0x50 1272 * Usually OBP or pci bridge should set this value 1273 * based on cheerio 1274 * min_grant * 8(33MHz) = 0x50 = 0xa * 0x8 1275 * Some system set cheerio LT at 0x40 1276 */ 1277 else if (oldLT < 0x40) 1278 newLT = 0x50; 1279 1280 /* 1281 * Now program cheerio's pci latency timer with newLT 1282 */ 1283 if (newLT) 1284 pci_config_put8(hmep->pci_config_handle, 1285 PCI_CONF_LATENCY_TIMER, (uchar_t)newLT); 1286 } else { /* Map register sets */ 1287 if (ddi_regs_map_setup(dip, 0, 1288 (caddr_t *)&(hmep->hme_globregp), 0, 0, 1289 &hmep->hme_dev_attr, &hmep->hme_globregh)) { 1290 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG, 1291 mregs_4global_reg_fail_msg); 1292 goto error_state; 1293 } 1294 if (ddi_regs_map_setup(dip, 1, 1295 (caddr_t *)&(hmep->hme_etxregp), 0, 0, 1296 &hmep->hme_dev_attr, &hmep->hme_etxregh)) { 1297 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG, 1298 mregs_4etx_reg_fail_msg); 1299 goto error_unmap; 1300 } 1301 if (ddi_regs_map_setup(dip, 2, 1302 (caddr_t *)&(hmep->hme_erxregp), 0, 0, 1303 &hmep->hme_dev_attr, &hmep->hme_erxregh)) { 1304 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG, 1305 mregs_4erx_reg_fail_msg); 1306 goto error_unmap; 1307 } 1308 if (ddi_regs_map_setup(dip, 3, 1309 (caddr_t *)&(hmep->hme_bmacregp), 0, 0, 1310 &hmep->hme_dev_attr, &hmep->hme_bmacregh)) { 1311 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG, 1312 mregs_4bmac_reg_fail_msg); 1313 goto error_unmap; 1314 } 1315 1316 if (ddi_regs_map_setup(dip, 4, 1317 (caddr_t *)&(hmep->hme_mifregp), 0, 0, 1318 &hmep->hme_dev_attr, &hmep->hme_mifregh)) { 1319 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG, 1320 mregs_4mif_reg_fail_msg); 1321 goto error_unmap; 1322 } 1323 } /* Endif cheerio_mode */ 1324 1325 /* 1326 * Based on the hm-rev, set some capabilities 1327 * Set up default capabilities for HM 2.0 1328 */ 1329 hmep->hme_frame_enable = 0; 1330 hmep->hme_lance_mode_enable = 0; 1331 hmep->hme_rxcv_enable = 0; 1332 1333 /* NEW routine to get the properties */ 1334 1335 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, hmep->dip, 0, "hm-rev", 1336 (caddr_t)&hm_rev, &prop_len) == DDI_PROP_SUCCESS) { 1337 1338 hmep->asic_rev = hm_rev; 1339 hmeget_hm_rev_property(hmep); 1340 } else { 1341 /* 1342 * hm_rev property not found so, this is 1343 * case of hot insertion of card without interpreting fcode. 1344 * Get it from revid in config space after mapping it. 1345 */ 1346 if (ddi_regs_map_setup(hmep->dip, 0, (caddr_t *)&cfg_ptr, 1347 0, 0, &hmep->hme_dev_attr, &cfg_handle)) { 1348 return (DDI_FAILURE); 1349 } 1350 /* 1351 * Since this is cheerio-based PCI card, we write 0xC in the 1352 * top 4 bits(4-7) of hm-rev and retain the bottom(0-3) bits 1353 * for Cheerio version(1.0 or 2.0 = 0xC0 or 0xC1) 1354 */ 1355 hm_rev = ddi_get8(cfg_handle, &cfg_ptr->revid); 1356 hm_rev = HME_1C0_REVID | (hm_rev & HME_REV_VERS_MASK); 1357 hmep->asic_rev = hm_rev; 1358 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 1359 "hm-rev", (caddr_t)&hm_rev, sizeof (hm_rev)) != 1360 DDI_SUCCESS) { 1361 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, AUTOCONFIG_MSG, 1362 "ddi_prop_create error for hm_rev"); 1363 } 1364 ddi_regs_map_free(&cfg_handle); 1365 1366 hmeget_hm_rev_property(hmep); 1367 1368 /* get info via VPD */ 1369 if (hmeget_promprops(dip) != DDI_SUCCESS) { 1370 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, AUTOCONFIG_MSG, 1371 "no promprops"); 1372 } 1373 } 1374 1375 if (ddi_intr_hilevel(dip, 0)) { 1376 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, NFATAL_ERR_MSG, 1377 " high-level interrupts are not supported"); 1378 goto error_unmap; 1379 } 1380 1381 /* 1382 * Get intr. block cookie so that mutex locks can be initialized. 1383 */ 1384 if (ddi_get_iblock_cookie(dip, 0, &hmep->hme_cookie) != DDI_SUCCESS) 1385 goto error_unmap; 1386 1387 /* 1388 * Initialize mutex's for this device. 1389 */ 1390 mutex_init(&hmep->hme_xmitlock, NULL, MUTEX_DRIVER, hmep->hme_cookie); 1391 mutex_init(&hmep->hme_intrlock, NULL, MUTEX_DRIVER, hmep->hme_cookie); 1392 1393 /* 1394 * Quiesce the hardware. 1395 */ 1396 (void) hmestop(hmep); 1397 1398 /* 1399 * Add interrupt to system 1400 */ 1401 if (ddi_add_intr(dip, 0, (ddi_iblock_cookie_t *)NULL, 1402 (ddi_idevice_cookie_t *)NULL, hmeintr, (caddr_t)hmep)) { 1403 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG, 1404 add_intr_fail_msg); 1405 goto error_mutex; 1406 } 1407 1408 /* 1409 * Set up the ethernet mac address. 1410 */ 1411 hme_setup_mac_address(hmep, dip); 1412 1413 if (!hmeinit_xfer_params(hmep)) 1414 goto error_intr; 1415 1416 if (hmeburstsizes(hmep) == DDI_FAILURE) { 1417 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, burst_size_msg); 1418 goto error_intr; 1419 } 1420 1421 if (hmeallocthings(hmep) != DDI_SUCCESS) { 1422 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG, 1423 "resource allocation failed"); 1424 goto error_intr; 1425 } 1426 1427 if (hmeallocbufs(hmep) != DDI_SUCCESS) { 1428 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG, 1429 "buffer allocation failed"); 1430 goto error_intr; 1431 } 1432 1433 hmestatinit(hmep); 1434 1435 hmep->hme_mii = mii_alloc(hmep, dip, &hme_mii_ops); 1436 if (hmep->hme_mii == NULL) { 1437 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG, 1438 "mii_alloc failed"); 1439 goto error_intr; 1440 } 1441 /* force a probe for the PHY */ 1442 mii_probe(hmep->hme_mii); 1443 1444 if ((macp = mac_alloc(MAC_VERSION)) == NULL) { 1445 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG, 1446 "mac_alloc failed"); 1447 goto error_intr; 1448 } 1449 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 1450 macp->m_driver = hmep; 1451 macp->m_dip = dip; 1452 macp->m_src_addr = hmep->hme_ouraddr.ether_addr_octet; 1453 macp->m_callbacks = &hme_m_callbacks; 1454 macp->m_min_sdu = 0; 1455 macp->m_max_sdu = ETHERMTU; 1456 macp->m_margin = VLAN_TAGSZ; 1457 macp->m_priv_props = hme_priv_prop; 1458 macp->m_priv_prop_count = 1459 sizeof (hme_priv_prop) / sizeof (hme_priv_prop[0]); 1460 if (mac_register(macp, &hmep->hme_mh) != 0) { 1461 mac_free(macp); 1462 goto error_intr; 1463 } 1464 1465 mac_free(macp); 1466 1467 ddi_report_dev(dip); 1468 return (DDI_SUCCESS); 1469 1470 /* 1471 * Failure Exit 1472 */ 1473 1474 error_intr: 1475 if (hmep->hme_cookie) 1476 ddi_remove_intr(dip, 0, (ddi_iblock_cookie_t)0); 1477 1478 if (hmep->hme_mii) 1479 mii_free(hmep->hme_mii); 1480 1481 error_mutex: 1482 mutex_destroy(&hmep->hme_xmitlock); 1483 mutex_destroy(&hmep->hme_intrlock); 1484 1485 error_unmap: 1486 if (hmep->hme_globregh) 1487 ddi_regs_map_free(&hmep->hme_globregh); 1488 if (hmep->hme_cheerio_mode == 0) { 1489 if (hmep->hme_etxregh) 1490 ddi_regs_map_free(&hmep->hme_etxregh); 1491 if (hmep->hme_erxregh) 1492 ddi_regs_map_free(&hmep->hme_erxregh); 1493 if (hmep->hme_bmacregh) 1494 ddi_regs_map_free(&hmep->hme_bmacregh); 1495 if (hmep->hme_mifregh) 1496 ddi_regs_map_free(&hmep->hme_mifregh); 1497 } else { 1498 if (hmep->pci_config_handle) 1499 (void) pci_config_teardown(&hmep->pci_config_handle); 1500 hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh = 1501 hmep->hme_mifregh = hmep->hme_globregh = NULL; 1502 } 1503 1504 error_state: 1505 hmefreethings(hmep); 1506 hmefreebufs(hmep); 1507 1508 if (hmep) { 1509 kmem_free((caddr_t)hmep, sizeof (*hmep)); 1510 ddi_set_driver_private(dip, NULL); 1511 } 1512 1513 return (DDI_FAILURE); 1514 } 1515 1516 int 1517 hmedetach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1518 { 1519 struct hme *hmep; 1520 1521 if ((hmep = ddi_get_driver_private(dip)) == NULL) 1522 return (DDI_FAILURE); 1523 1524 switch (cmd) { 1525 case DDI_DETACH: 1526 break; 1527 1528 case DDI_SUSPEND: 1529 mii_suspend(hmep->hme_mii); 1530 hmep->hme_flags |= HMESUSPENDED; 1531 hmeuninit(hmep); 1532 return (DDI_SUCCESS); 1533 1534 default: 1535 return (DDI_FAILURE); 1536 } 1537 1538 1539 if (mac_unregister(hmep->hme_mh) != 0) { 1540 return (DDI_FAILURE); 1541 } 1542 1543 /* 1544 * Make driver quiescent, we don't want to prevent the 1545 * detach on failure. Note that this should be redundant, 1546 * since mac_stop should already have called hmeuninit(). 1547 */ 1548 if (!(hmep->hme_flags & HMESUSPENDED)) { 1549 (void) hmestop(hmep); 1550 } 1551 1552 if (hmep->hme_mii) 1553 mii_free(hmep->hme_mii); 1554 1555 /* 1556 * Remove instance of the intr 1557 */ 1558 ddi_remove_intr(dip, 0, (ddi_iblock_cookie_t)0); 1559 1560 /* 1561 * Unregister kstats. 1562 */ 1563 if (hmep->hme_ksp != NULL) 1564 kstat_delete(hmep->hme_ksp); 1565 if (hmep->hme_intrstats != NULL) 1566 kstat_delete(hmep->hme_intrstats); 1567 1568 hmep->hme_ksp = NULL; 1569 hmep->hme_intrstats = NULL; 1570 1571 /* 1572 * Destroy all mutexes and data structures allocated during 1573 * attach time. 1574 * 1575 * Note: at this time we should be the only thread accessing 1576 * the structures for this instance. 1577 */ 1578 1579 if (hmep->hme_globregh) 1580 ddi_regs_map_free(&hmep->hme_globregh); 1581 if (hmep->hme_cheerio_mode == 0) { 1582 if (hmep->hme_etxregh) 1583 ddi_regs_map_free(&hmep->hme_etxregh); 1584 if (hmep->hme_erxregh) 1585 ddi_regs_map_free(&hmep->hme_erxregh); 1586 if (hmep->hme_bmacregh) 1587 ddi_regs_map_free(&hmep->hme_bmacregh); 1588 if (hmep->hme_mifregh) 1589 ddi_regs_map_free(&hmep->hme_mifregh); 1590 } else { 1591 if (hmep->pci_config_handle) 1592 (void) pci_config_teardown(&hmep->pci_config_handle); 1593 hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh = 1594 hmep->hme_mifregh = hmep->hme_globregh = NULL; 1595 } 1596 1597 mutex_destroy(&hmep->hme_xmitlock); 1598 mutex_destroy(&hmep->hme_intrlock); 1599 1600 hmefreethings(hmep); 1601 hmefreebufs(hmep); 1602 1603 ddi_set_driver_private(dip, NULL); 1604 kmem_free(hmep, sizeof (struct hme)); 1605 1606 return (DDI_SUCCESS); 1607 } 1608 1609 int 1610 hmequiesce(dev_info_t *dip) 1611 { 1612 struct hme *hmep; 1613 1614 if ((hmep = ddi_get_driver_private(dip)) == NULL) 1615 return (DDI_FAILURE); 1616 1617 (void) hmestop(hmep); 1618 return (DDI_SUCCESS); 1619 } 1620 1621 static boolean_t 1622 hmeinit_xfer_params(struct hme *hmep) 1623 { 1624 int hme_ipg1_conf, hme_ipg2_conf; 1625 int hme_ipg0_conf, hme_lance_mode_conf; 1626 int prop_len = sizeof (int); 1627 dev_info_t *dip; 1628 1629 dip = hmep->dip; 1630 1631 /* 1632 * Set up the start-up values for user-configurable parameters 1633 * Get the values from the global variables first. 1634 * Use the MASK to limit the value to allowed maximum. 1635 */ 1636 hmep->hme_ipg1 = hme_ipg1 & HME_MASK_8BIT; 1637 hmep->hme_ipg2 = hme_ipg2 & HME_MASK_8BIT; 1638 hmep->hme_ipg0 = hme_ipg0 & HME_MASK_5BIT; 1639 1640 /* 1641 * Get the parameter values configured in .conf file. 1642 */ 1643 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg1", 1644 (caddr_t)&hme_ipg1_conf, &prop_len) == DDI_PROP_SUCCESS) { 1645 hmep->hme_ipg1 = hme_ipg1_conf & HME_MASK_8BIT; 1646 } 1647 1648 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg2", 1649 (caddr_t)&hme_ipg2_conf, &prop_len) == DDI_PROP_SUCCESS) { 1650 hmep->hme_ipg2 = hme_ipg2_conf & HME_MASK_8BIT; 1651 } 1652 1653 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg0", 1654 (caddr_t)&hme_ipg0_conf, &prop_len) == DDI_PROP_SUCCESS) { 1655 hmep->hme_ipg0 = hme_ipg0_conf & HME_MASK_5BIT; 1656 } 1657 1658 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "lance_mode", 1659 (caddr_t)&hme_lance_mode_conf, &prop_len) == DDI_PROP_SUCCESS) { 1660 hmep->hme_lance_mode = hme_lance_mode_conf & HME_MASK_1BIT; 1661 } 1662 1663 return (B_TRUE); 1664 } 1665 1666 /* 1667 * Return 0 upon success, 1 on failure. 1668 */ 1669 static uint_t 1670 hmestop(struct hme *hmep) 1671 { 1672 /* 1673 * Disable the Tx dma engine. 1674 */ 1675 PUT_ETXREG(config, (GET_ETXREG(config) & ~HMET_CONFIG_TXDMA_EN)); 1676 HMEDELAY(((GET_ETXREG(state_mach) & 0x1f) == 0x1), HMEMAXRSTDELAY); 1677 1678 /* 1679 * Disable the Rx dma engine. 1680 */ 1681 PUT_ERXREG(config, (GET_ERXREG(config) & ~HMER_CONFIG_RXDMA_EN)); 1682 HMEDELAY(((GET_ERXREG(state_mach) & 0x3f) == 0), HMEMAXRSTDELAY); 1683 1684 /* 1685 * By this time all things should be quiet, so hit the 1686 * chip with a reset. 1687 */ 1688 PUT_GLOBREG(reset, HMEG_RESET_GLOBAL); 1689 1690 HMEDELAY((GET_GLOBREG(reset) == 0), HMEMAXRSTDELAY); 1691 if (GET_GLOBREG(reset)) { 1692 return (1); 1693 } 1694 1695 CHECK_GLOBREG(); 1696 return (0); 1697 } 1698 1699 static int 1700 hmestat_kstat_update(kstat_t *ksp, int rw) 1701 { 1702 struct hme *hmep; 1703 struct hmekstat *hkp; 1704 1705 hmep = (struct hme *)ksp->ks_private; 1706 hkp = (struct hmekstat *)ksp->ks_data; 1707 1708 if (rw != KSTAT_READ) 1709 return (EACCES); 1710 1711 /* 1712 * Update all the stats by reading all the counter registers. 1713 * Counter register stats are not updated till they overflow 1714 * and interrupt. 1715 */ 1716 1717 mutex_enter(&hmep->hme_xmitlock); 1718 if (hmep->hme_flags & HMERUNNING) { 1719 hmereclaim(hmep); 1720 hmesavecntrs(hmep); 1721 } 1722 mutex_exit(&hmep->hme_xmitlock); 1723 1724 hkp->hk_cvc.value.ul = hmep->hme_cvc; 1725 hkp->hk_lenerr.value.ul = hmep->hme_lenerr; 1726 hkp->hk_buff.value.ul = hmep->hme_buff; 1727 hkp->hk_missed.value.ul = hmep->hme_missed; 1728 hkp->hk_allocbfail.value.ul = hmep->hme_allocbfail; 1729 hkp->hk_babl.value.ul = hmep->hme_babl; 1730 hkp->hk_tmder.value.ul = hmep->hme_tmder; 1731 hkp->hk_txlaterr.value.ul = hmep->hme_txlaterr; 1732 hkp->hk_rxlaterr.value.ul = hmep->hme_rxlaterr; 1733 hkp->hk_slvparerr.value.ul = hmep->hme_slvparerr; 1734 hkp->hk_txparerr.value.ul = hmep->hme_txparerr; 1735 hkp->hk_rxparerr.value.ul = hmep->hme_rxparerr; 1736 hkp->hk_slverrack.value.ul = hmep->hme_slverrack; 1737 hkp->hk_txerrack.value.ul = hmep->hme_txerrack; 1738 hkp->hk_rxerrack.value.ul = hmep->hme_rxerrack; 1739 hkp->hk_txtagerr.value.ul = hmep->hme_txtagerr; 1740 hkp->hk_rxtagerr.value.ul = hmep->hme_rxtagerr; 1741 hkp->hk_eoperr.value.ul = hmep->hme_eoperr; 1742 hkp->hk_notmds.value.ul = hmep->hme_notmds; 1743 hkp->hk_notbufs.value.ul = hmep->hme_notbufs; 1744 hkp->hk_norbufs.value.ul = hmep->hme_norbufs; 1745 1746 /* 1747 * Debug kstats 1748 */ 1749 hkp->hk_inits.value.ul = hmep->inits; 1750 hkp->hk_phyfail.value.ul = hmep->phyfail; 1751 1752 /* 1753 * xcvr kstats 1754 */ 1755 hkp->hk_asic_rev.value.ul = hmep->asic_rev; 1756 1757 return (0); 1758 } 1759 1760 static void 1761 hmestatinit(struct hme *hmep) 1762 { 1763 struct kstat *ksp; 1764 struct hmekstat *hkp; 1765 const char *driver; 1766 int instance; 1767 char buf[16]; 1768 1769 instance = hmep->instance; 1770 driver = ddi_driver_name(hmep->dip); 1771 1772 if ((ksp = kstat_create(driver, instance, 1773 "driver_info", "net", KSTAT_TYPE_NAMED, 1774 sizeof (struct hmekstat) / sizeof (kstat_named_t), 0)) == NULL) { 1775 HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, INIT_MSG, 1776 "kstat_create failed"); 1777 return; 1778 } 1779 1780 (void) snprintf(buf, sizeof (buf), "%sc%d", driver, instance); 1781 hmep->hme_intrstats = kstat_create(driver, instance, buf, "controller", 1782 KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT); 1783 if (hmep->hme_intrstats) 1784 kstat_install(hmep->hme_intrstats); 1785 1786 hmep->hme_ksp = ksp; 1787 hkp = (struct hmekstat *)ksp->ks_data; 1788 kstat_named_init(&hkp->hk_cvc, "code_violations", 1789 KSTAT_DATA_ULONG); 1790 kstat_named_init(&hkp->hk_lenerr, "len_errors", 1791 KSTAT_DATA_ULONG); 1792 kstat_named_init(&hkp->hk_buff, "buff", 1793 KSTAT_DATA_ULONG); 1794 kstat_named_init(&hkp->hk_missed, "missed", 1795 KSTAT_DATA_ULONG); 1796 kstat_named_init(&hkp->hk_nocanput, "nocanput", 1797 KSTAT_DATA_ULONG); 1798 kstat_named_init(&hkp->hk_allocbfail, "allocbfail", 1799 KSTAT_DATA_ULONG); 1800 kstat_named_init(&hkp->hk_babl, "babble", 1801 KSTAT_DATA_ULONG); 1802 kstat_named_init(&hkp->hk_tmder, "tmd_error", 1803 KSTAT_DATA_ULONG); 1804 kstat_named_init(&hkp->hk_txlaterr, "tx_late_error", 1805 KSTAT_DATA_ULONG); 1806 kstat_named_init(&hkp->hk_rxlaterr, "rx_late_error", 1807 KSTAT_DATA_ULONG); 1808 kstat_named_init(&hkp->hk_slvparerr, "slv_parity_error", 1809 KSTAT_DATA_ULONG); 1810 kstat_named_init(&hkp->hk_txparerr, "tx_parity_error", 1811 KSTAT_DATA_ULONG); 1812 kstat_named_init(&hkp->hk_rxparerr, "rx_parity_error", 1813 KSTAT_DATA_ULONG); 1814 kstat_named_init(&hkp->hk_slverrack, "slv_error_ack", 1815 KSTAT_DATA_ULONG); 1816 kstat_named_init(&hkp->hk_txerrack, "tx_error_ack", 1817 KSTAT_DATA_ULONG); 1818 kstat_named_init(&hkp->hk_rxerrack, "rx_error_ack", 1819 KSTAT_DATA_ULONG); 1820 kstat_named_init(&hkp->hk_txtagerr, "tx_tag_error", 1821 KSTAT_DATA_ULONG); 1822 kstat_named_init(&hkp->hk_rxtagerr, "rx_tag_error", 1823 KSTAT_DATA_ULONG); 1824 kstat_named_init(&hkp->hk_eoperr, "eop_error", 1825 KSTAT_DATA_ULONG); 1826 kstat_named_init(&hkp->hk_notmds, "no_tmds", 1827 KSTAT_DATA_ULONG); 1828 kstat_named_init(&hkp->hk_notbufs, "no_tbufs", 1829 KSTAT_DATA_ULONG); 1830 kstat_named_init(&hkp->hk_norbufs, "no_rbufs", 1831 KSTAT_DATA_ULONG); 1832 1833 /* 1834 * Debugging kstats 1835 */ 1836 kstat_named_init(&hkp->hk_inits, "inits", 1837 KSTAT_DATA_ULONG); 1838 kstat_named_init(&hkp->hk_phyfail, "phy_failures", 1839 KSTAT_DATA_ULONG); 1840 1841 /* 1842 * xcvr kstats 1843 */ 1844 kstat_named_init(&hkp->hk_asic_rev, "asic_rev", 1845 KSTAT_DATA_ULONG); 1846 1847 ksp->ks_update = hmestat_kstat_update; 1848 ksp->ks_private = (void *) hmep; 1849 kstat_install(ksp); 1850 } 1851 1852 int 1853 hme_m_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t flags, 1854 uint_t sz, void *val, uint_t *perm) 1855 { 1856 struct hme *hmep = arg; 1857 int value; 1858 boolean_t is_default; 1859 int rv; 1860 1861 rv = mii_m_getprop(hmep->hme_mii, name, num, flags, sz, val, perm); 1862 if (rv != ENOTSUP) 1863 return (rv); 1864 1865 switch (num) { 1866 case MAC_PROP_PRIVATE: 1867 break; 1868 default: 1869 return (ENOTSUP); 1870 } 1871 1872 *perm = MAC_PROP_PERM_RW; 1873 1874 is_default = (flags & MAC_PROP_DEFAULT) ? B_TRUE : B_FALSE; 1875 if (strcmp(name, "_ipg0") == 0) { 1876 value = is_default ? hme_ipg0 : hmep->hme_ipg0; 1877 1878 } else if (strcmp(name, "_ipg1") == 0) { 1879 value = is_default ? hme_ipg1 : hmep->hme_ipg1; 1880 } else if (strcmp(name, "_ipg2") == 0) { 1881 value = is_default ? hme_ipg2 : hmep->hme_ipg2; 1882 } else if (strcmp(name, "_lance_mode") == 0) { 1883 value = is_default ? hme_lance_mode : hmep->hme_lance_mode; 1884 } else { 1885 return (ENOTSUP); 1886 } 1887 (void) snprintf(val, sz, "%d", value); 1888 return (0); 1889 } 1890 1891 int 1892 hme_m_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz, 1893 const void *val) 1894 { 1895 struct hme *hmep = arg; 1896 int rv; 1897 long lval; 1898 boolean_t init = B_FALSE; 1899 1900 rv = mii_m_setprop(hmep->hme_mii, name, num, sz, val); 1901 if (rv != ENOTSUP) 1902 return (rv); 1903 rv = 0; 1904 1905 switch (num) { 1906 case MAC_PROP_PRIVATE: 1907 break; 1908 default: 1909 return (ENOTSUP); 1910 } 1911 1912 (void) ddi_strtol(val, NULL, 0, &lval); 1913 1914 if (strcmp(name, "_ipg1") == 0) { 1915 if ((lval >= 0) && (lval <= 255)) { 1916 hmep->hme_ipg1 = lval & 0xff; 1917 init = B_TRUE; 1918 } else { 1919 return (EINVAL); 1920 } 1921 1922 } else if (strcmp(name, "_ipg2") == 0) { 1923 if ((lval >= 0) && (lval <= 255)) { 1924 hmep->hme_ipg2 = lval & 0xff; 1925 init = B_TRUE; 1926 } else { 1927 return (EINVAL); 1928 } 1929 1930 } else if (strcmp(name, "_ipg0") == 0) { 1931 if ((lval >= 0) && (lval <= 31)) { 1932 hmep->hme_ipg0 = lval & 0xff; 1933 init = B_TRUE; 1934 } else { 1935 return (EINVAL); 1936 } 1937 } else if (strcmp(name, "_lance_mode") == 0) { 1938 if ((lval >= 0) && (lval <= 1)) { 1939 hmep->hme_lance_mode = lval & 0xff; 1940 init = B_TRUE; 1941 } else { 1942 return (EINVAL); 1943 } 1944 1945 } else { 1946 rv = ENOTSUP; 1947 } 1948 1949 if (init) { 1950 (void) hmeinit(hmep); 1951 } 1952 return (rv); 1953 } 1954 1955 1956 /*ARGSUSED*/ 1957 static boolean_t 1958 hme_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1959 { 1960 switch (cap) { 1961 case MAC_CAPAB_HCKSUM: 1962 *(uint32_t *)cap_data = HCKSUM_INET_PARTIAL; 1963 return (B_TRUE); 1964 default: 1965 return (B_FALSE); 1966 } 1967 } 1968 1969 static int 1970 hme_m_promisc(void *arg, boolean_t on) 1971 { 1972 struct hme *hmep = arg; 1973 1974 hmep->hme_promisc = on; 1975 (void) hmeinit(hmep); 1976 return (0); 1977 } 1978 1979 static int 1980 hme_m_unicst(void *arg, const uint8_t *macaddr) 1981 { 1982 struct hme *hmep = arg; 1983 1984 /* 1985 * Set new interface local address and re-init device. 1986 * This is destructive to any other streams attached 1987 * to this device. 1988 */ 1989 mutex_enter(&hmep->hme_intrlock); 1990 bcopy(macaddr, &hmep->hme_ouraddr, ETHERADDRL); 1991 mutex_exit(&hmep->hme_intrlock); 1992 (void) hmeinit(hmep); 1993 return (0); 1994 } 1995 1996 static int 1997 hme_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr) 1998 { 1999 struct hme *hmep = arg; 2000 uint32_t ladrf_bit; 2001 boolean_t doinit = B_FALSE; 2002 2003 /* 2004 * If this address's bit was not already set in the local address 2005 * filter, add it and re-initialize the Hardware. 2006 */ 2007 ladrf_bit = hmeladrf_bit(macaddr); 2008 2009 mutex_enter(&hmep->hme_intrlock); 2010 if (add) { 2011 hmep->hme_ladrf_refcnt[ladrf_bit]++; 2012 if (hmep->hme_ladrf_refcnt[ladrf_bit] == 1) { 2013 hmep->hme_ladrf[ladrf_bit >> 4] |= 2014 1 << (ladrf_bit & 0xf); 2015 hmep->hme_multi++; 2016 doinit = B_TRUE; 2017 } 2018 } else { 2019 hmep->hme_ladrf_refcnt[ladrf_bit]--; 2020 if (hmep->hme_ladrf_refcnt[ladrf_bit] == 0) { 2021 hmep->hme_ladrf[ladrf_bit >> 4] &= 2022 ~(1 << (ladrf_bit & 0xf)); 2023 doinit = B_TRUE; 2024 } 2025 } 2026 mutex_exit(&hmep->hme_intrlock); 2027 2028 if (doinit) { 2029 (void) hmeinit(hmep); 2030 } 2031 2032 return (0); 2033 } 2034 2035 static int 2036 hme_m_start(void *arg) 2037 { 2038 struct hme *hmep = arg; 2039 2040 if (hmeinit(hmep) != 0) { 2041 /* initialization failed -- really want DL_INITFAILED */ 2042 return (EIO); 2043 } else { 2044 hmep->hme_started = B_TRUE; 2045 mii_start(hmep->hme_mii); 2046 return (0); 2047 } 2048 } 2049 2050 static void 2051 hme_m_stop(void *arg) 2052 { 2053 struct hme *hmep = arg; 2054 2055 mii_stop(hmep->hme_mii); 2056 hmep->hme_started = B_FALSE; 2057 hmeuninit(hmep); 2058 } 2059 2060 static int 2061 hme_m_stat(void *arg, uint_t stat, uint64_t *val) 2062 { 2063 struct hme *hmep = arg; 2064 2065 mutex_enter(&hmep->hme_xmitlock); 2066 if (hmep->hme_flags & HMERUNNING) { 2067 hmereclaim(hmep); 2068 hmesavecntrs(hmep); 2069 } 2070 mutex_exit(&hmep->hme_xmitlock); 2071 2072 2073 if (mii_m_getstat(hmep->hme_mii, stat, val) == 0) { 2074 return (0); 2075 } 2076 switch (stat) { 2077 case MAC_STAT_IPACKETS: 2078 *val = hmep->hme_ipackets; 2079 break; 2080 case MAC_STAT_RBYTES: 2081 *val = hmep->hme_rbytes; 2082 break; 2083 case MAC_STAT_IERRORS: 2084 *val = hmep->hme_ierrors; 2085 break; 2086 case MAC_STAT_OPACKETS: 2087 *val = hmep->hme_opackets; 2088 break; 2089 case MAC_STAT_OBYTES: 2090 *val = hmep->hme_obytes; 2091 break; 2092 case MAC_STAT_OERRORS: 2093 *val = hmep->hme_oerrors; 2094 break; 2095 case MAC_STAT_MULTIRCV: 2096 *val = hmep->hme_multircv; 2097 break; 2098 case MAC_STAT_MULTIXMT: 2099 *val = hmep->hme_multixmt; 2100 break; 2101 case MAC_STAT_BRDCSTRCV: 2102 *val = hmep->hme_brdcstrcv; 2103 break; 2104 case MAC_STAT_BRDCSTXMT: 2105 *val = hmep->hme_brdcstxmt; 2106 break; 2107 case MAC_STAT_UNDERFLOWS: 2108 *val = hmep->hme_uflo; 2109 break; 2110 case MAC_STAT_OVERFLOWS: 2111 *val = hmep->hme_oflo; 2112 break; 2113 case MAC_STAT_COLLISIONS: 2114 *val = hmep->hme_coll; 2115 break; 2116 case MAC_STAT_NORCVBUF: 2117 *val = hmep->hme_norcvbuf; 2118 break; 2119 case MAC_STAT_NOXMTBUF: 2120 *val = hmep->hme_noxmtbuf; 2121 break; 2122 case ETHER_STAT_LINK_DUPLEX: 2123 *val = hmep->hme_duplex; 2124 break; 2125 case ETHER_STAT_ALIGN_ERRORS: 2126 *val = hmep->hme_align_errors; 2127 break; 2128 case ETHER_STAT_FCS_ERRORS: 2129 *val = hmep->hme_fcs_errors; 2130 break; 2131 case ETHER_STAT_EX_COLLISIONS: 2132 *val = hmep->hme_excol; 2133 break; 2134 case ETHER_STAT_DEFER_XMTS: 2135 *val = hmep->hme_defer_xmts; 2136 break; 2137 case ETHER_STAT_SQE_ERRORS: 2138 *val = hmep->hme_sqe_errors; 2139 break; 2140 case ETHER_STAT_FIRST_COLLISIONS: 2141 *val = hmep->hme_fstcol; 2142 break; 2143 case ETHER_STAT_TX_LATE_COLLISIONS: 2144 *val = hmep->hme_tlcol; 2145 break; 2146 case ETHER_STAT_TOOLONG_ERRORS: 2147 *val = hmep->hme_toolong_errors; 2148 break; 2149 case ETHER_STAT_TOOSHORT_ERRORS: 2150 *val = hmep->hme_runt; 2151 break; 2152 case ETHER_STAT_CARRIER_ERRORS: 2153 *val = hmep->hme_carrier_errors; 2154 break; 2155 default: 2156 return (EINVAL); 2157 } 2158 return (0); 2159 } 2160 2161 static mblk_t * 2162 hme_m_tx(void *arg, mblk_t *mp) 2163 { 2164 struct hme *hmep = arg; 2165 mblk_t *next; 2166 2167 while (mp != NULL) { 2168 next = mp->b_next; 2169 mp->b_next = NULL; 2170 if (!hmestart(hmep, mp)) { 2171 mp->b_next = next; 2172 break; 2173 } 2174 mp = next; 2175 } 2176 return (mp); 2177 } 2178 2179 /* 2180 * Software IP checksum, for the edge cases that the 2181 * hardware can't handle. See hmestart for more info. 2182 */ 2183 static uint16_t 2184 hme_cksum(void *data, int len) 2185 { 2186 uint16_t *words = data; 2187 int i, nwords = len / 2; 2188 uint32_t sum = 0; 2189 2190 /* just add up the words */ 2191 for (i = 0; i < nwords; i++) { 2192 sum += *words++; 2193 } 2194 2195 /* pick up residual byte ... assume even half-word allocations */ 2196 if (len % 2) { 2197 sum += (*words & htons(0xff00)); 2198 } 2199 2200 sum = (sum >> 16) + (sum & 0xffff); 2201 sum = (sum >> 16) + (sum & 0xffff); 2202 2203 return (~(sum & 0xffff)); 2204 } 2205 2206 static boolean_t 2207 hmestart(struct hme *hmep, mblk_t *mp) 2208 { 2209 uint32_t len; 2210 boolean_t retval = B_TRUE; 2211 hmebuf_t *tbuf; 2212 uint32_t txptr; 2213 2214 uint32_t csflags = 0; 2215 uint32_t flags; 2216 uint32_t start_offset; 2217 uint32_t stuff_offset; 2218 2219 hcksum_retrieve(mp, NULL, NULL, &start_offset, &stuff_offset, 2220 NULL, NULL, &flags); 2221 2222 if (flags & HCK_PARTIALCKSUM) { 2223 if (get_ether_type(mp->b_rptr) == ETHERTYPE_VLAN) { 2224 start_offset += sizeof (struct ether_header) + 4; 2225 stuff_offset += sizeof (struct ether_header) + 4; 2226 } else { 2227 start_offset += sizeof (struct ether_header); 2228 stuff_offset += sizeof (struct ether_header); 2229 } 2230 csflags = HMETMD_CSENABL | 2231 (start_offset << HMETMD_CSSTART_SHIFT) | 2232 (stuff_offset << HMETMD_CSSTUFF_SHIFT); 2233 } 2234 2235 mutex_enter(&hmep->hme_xmitlock); 2236 2237 if (hmep->hme_flags & HMESUSPENDED) { 2238 hmep->hme_carrier_errors++; 2239 hmep->hme_oerrors++; 2240 goto bad; 2241 } 2242 2243 if (hmep->hme_txindex != hmep->hme_txreclaim) { 2244 hmereclaim(hmep); 2245 } 2246 if ((hmep->hme_txindex - HME_TMDMAX) == hmep->hme_txreclaim) 2247 goto notmds; 2248 txptr = hmep->hme_txindex % HME_TMDMAX; 2249 tbuf = &hmep->hme_tbuf[txptr]; 2250 2251 /* 2252 * Note that for checksum offload, the hardware cannot 2253 * generate correct checksums if the packet is smaller than 2254 * 64-bytes. In such a case, we bcopy the packet and use 2255 * a software checksum. 2256 */ 2257 2258 len = msgsize(mp); 2259 if (len < 64) { 2260 /* zero fill the padding */ 2261 bzero(tbuf->kaddr, 64); 2262 } 2263 mcopymsg(mp, tbuf->kaddr); 2264 2265 if ((csflags != 0) && (len < 64)) { 2266 uint16_t sum; 2267 sum = hme_cksum(tbuf->kaddr + start_offset, 2268 len - start_offset); 2269 bcopy(&sum, tbuf->kaddr + stuff_offset, sizeof (sum)); 2270 csflags = 0; 2271 } 2272 2273 if (ddi_dma_sync(tbuf->dmah, 0, len, DDI_DMA_SYNC_FORDEV) == 2274 DDI_FAILURE) { 2275 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, DDI_MSG, 2276 "ddi_dma_sync failed"); 2277 } 2278 2279 /* 2280 * update MIB II statistics 2281 */ 2282 BUMP_OutNUcast(hmep, tbuf->kaddr); 2283 2284 PUT_TMD(txptr, tbuf->paddr, len, 2285 HMETMD_OWN | HMETMD_SOP | HMETMD_EOP | csflags); 2286 2287 HMESYNCTMD(txptr, DDI_DMA_SYNC_FORDEV); 2288 hmep->hme_txindex++; 2289 2290 PUT_ETXREG(txpend, HMET_TXPEND_TDMD); 2291 CHECK_ETXREG(); 2292 2293 mutex_exit(&hmep->hme_xmitlock); 2294 2295 hmep->hme_starts++; 2296 return (B_TRUE); 2297 2298 bad: 2299 mutex_exit(&hmep->hme_xmitlock); 2300 freemsg(mp); 2301 return (B_TRUE); 2302 2303 notmds: 2304 hmep->hme_notmds++; 2305 hmep->hme_wantw = B_TRUE; 2306 hmereclaim(hmep); 2307 retval = B_FALSE; 2308 done: 2309 mutex_exit(&hmep->hme_xmitlock); 2310 2311 return (retval); 2312 } 2313 2314 /* 2315 * Initialize channel. 2316 * Return 0 on success, nonzero on error. 2317 * 2318 * The recommended sequence for initialization is: 2319 * 1. Issue a Global Reset command to the Ethernet Channel. 2320 * 2. Poll the Global_Reset bits until the execution of the reset has been 2321 * completed. 2322 * 2(a). Use the MIF Frame/Output register to reset the transceiver. 2323 * Poll Register 0 to till the Resetbit is 0. 2324 * 2(b). Use the MIF Frame/Output register to set the PHY in in Normal-Op, 2325 * 100Mbps and Non-Isolated mode. The main point here is to bring the 2326 * PHY out of Isolate mode so that it can generate the rx_clk and tx_clk 2327 * to the MII interface so that the Bigmac core can correctly reset 2328 * upon a software reset. 2329 * 2(c). Issue another Global Reset command to the Ethernet Channel and poll 2330 * the Global_Reset bits till completion. 2331 * 3. Set up all the data structures in the host memory. 2332 * 4. Program the TX_MAC registers/counters (excluding the TX_MAC Configuration 2333 * Register). 2334 * 5. Program the RX_MAC registers/counters (excluding the RX_MAC Configuration 2335 * Register). 2336 * 6. Program the Transmit Descriptor Ring Base Address in the ETX. 2337 * 7. Program the Receive Descriptor Ring Base Address in the ERX. 2338 * 8. Program the Global Configuration and the Global Interrupt Mask Registers. 2339 * 9. Program the ETX Configuration register (enable the Transmit DMA channel). 2340 * 10. Program the ERX Configuration register (enable the Receive DMA channel). 2341 * 11. Program the XIF Configuration Register (enable the XIF). 2342 * 12. Program the RX_MAC Configuration Register (Enable the RX_MAC). 2343 * 13. Program the TX_MAC Configuration Register (Enable the TX_MAC). 2344 */ 2345 2346 2347 #ifdef FEPS_URUN_BUG 2348 static int hme_palen = 32; 2349 #endif 2350 2351 static int 2352 hmeinit(struct hme *hmep) 2353 { 2354 uint32_t i; 2355 int ret; 2356 boolean_t fdx; 2357 int phyad; 2358 2359 /* 2360 * Lock sequence: 2361 * hme_intrlock, hme_xmitlock. 2362 */ 2363 mutex_enter(&hmep->hme_intrlock); 2364 2365 /* 2366 * Don't touch the hardware if we are suspended. But don't 2367 * fail either. Some time later we may be resumed, and then 2368 * we'll be back here to program the device using the settings 2369 * in the soft state. 2370 */ 2371 if (hmep->hme_flags & HMESUSPENDED) { 2372 mutex_exit(&hmep->hme_intrlock); 2373 return (0); 2374 } 2375 2376 /* 2377 * This should prevent us from clearing any interrupts that 2378 * may occur by temporarily stopping interrupts from occurring 2379 * for a short time. We need to update the interrupt mask 2380 * later in this function. 2381 */ 2382 PUT_GLOBREG(intmask, ~HMEG_MASK_MIF_INTR); 2383 2384 2385 /* 2386 * Rearranged the mutex acquisition order to solve the deadlock 2387 * situation as described in bug ID 4065896. 2388 */ 2389 2390 mutex_enter(&hmep->hme_xmitlock); 2391 2392 hmep->hme_flags = 0; 2393 hmep->hme_wantw = B_FALSE; 2394 2395 if (hmep->inits) 2396 hmesavecntrs(hmep); 2397 2398 /* 2399 * Perform Global reset of the Sbus/FEPS ENET channel. 2400 */ 2401 (void) hmestop(hmep); 2402 2403 /* 2404 * Clear all descriptors. 2405 */ 2406 bzero(hmep->hme_rmdp, HME_RMDMAX * sizeof (struct hme_rmd)); 2407 bzero(hmep->hme_tmdp, HME_TMDMAX * sizeof (struct hme_tmd)); 2408 2409 /* 2410 * Hang out receive buffers. 2411 */ 2412 for (i = 0; i < HME_RMDMAX; i++) { 2413 PUT_RMD(i, hmep->hme_rbuf[i].paddr); 2414 } 2415 2416 /* 2417 * DMA sync descriptors. 2418 */ 2419 (void) ddi_dma_sync(hmep->hme_rmd_dmah, 0, 0, DDI_DMA_SYNC_FORDEV); 2420 (void) ddi_dma_sync(hmep->hme_tmd_dmah, 0, 0, DDI_DMA_SYNC_FORDEV); 2421 2422 /* 2423 * Reset RMD and TMD 'walking' pointers. 2424 */ 2425 hmep->hme_rxindex = 0; 2426 hmep->hme_txindex = hmep->hme_txreclaim = 0; 2427 2428 /* 2429 * This is the right place to initialize MIF !!! 2430 */ 2431 2432 PUT_MIFREG(mif_imask, HME_MIF_INTMASK); /* mask all interrupts */ 2433 2434 if (!hmep->hme_frame_enable) 2435 PUT_MIFREG(mif_cfg, GET_MIFREG(mif_cfg) | HME_MIF_CFGBB); 2436 else 2437 PUT_MIFREG(mif_cfg, GET_MIFREG(mif_cfg) & ~HME_MIF_CFGBB); 2438 /* enable frame mode */ 2439 2440 /* 2441 * Depending on the transceiver detected, select the source 2442 * of the clocks for the MAC. Without the clocks, TX_MAC does 2443 * not reset. When the Global Reset is issued to the Sbus/FEPS 2444 * ASIC, it selects Internal by default. 2445 */ 2446 2447 switch ((phyad = mii_get_addr(hmep->hme_mii))) { 2448 case -1: 2449 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, XCVR_MSG, no_xcvr_msg); 2450 goto init_fail; /* abort initialization */ 2451 2452 case HME_INTERNAL_PHYAD: 2453 PUT_MACREG(xifc, 0); 2454 break; 2455 case HME_EXTERNAL_PHYAD: 2456 /* Isolate the Int. xcvr */ 2457 PUT_MACREG(xifc, BMAC_XIFC_MIIBUFDIS); 2458 break; 2459 } 2460 2461 hmep->inits++; 2462 2463 /* 2464 * Initialize BigMAC registers. 2465 * First set the tx enable bit in tx config reg to 0 and poll on 2466 * it till it turns to 0. Same for rx config, hash and address 2467 * filter reg. 2468 * Here is the sequence per the spec. 2469 * MADD2 - MAC Address 2 2470 * MADD1 - MAC Address 1 2471 * MADD0 - MAC Address 0 2472 * HASH3, HASH2, HASH1, HASH0 for group address 2473 * AFR2, AFR1, AFR0 and AFMR for address filter mask 2474 * Program RXMIN and RXMAX for packet length if not 802.3 2475 * RXCFG - Rx config for not stripping CRC 2476 * XXX Anything else to hme configured in RXCFG 2477 * IPG1, IPG2, ALIMIT, SLOT, PALEN, PAPAT, TXSFD, JAM, TXMAX, TXMIN 2478 * if not 802.3 compliant 2479 * XIF register for speed selection 2480 * MASK - Interrupt mask 2481 * Set bit 0 of TXCFG 2482 * Set bit 0 of RXCFG 2483 */ 2484 2485 /* 2486 * Initialize the TX_MAC registers 2487 * Initialization of jamsize to work around rx crc bug 2488 */ 2489 PUT_MACREG(jam, jamsize); 2490 2491 #ifdef FEPS_URUN_BUG 2492 if (hme_urun_fix) 2493 PUT_MACREG(palen, hme_palen); 2494 #endif 2495 2496 PUT_MACREG(ipg1, hmep->hme_ipg1); 2497 PUT_MACREG(ipg2, hmep->hme_ipg2); 2498 2499 PUT_MACREG(rseed, 2500 ((hmep->hme_ouraddr.ether_addr_octet[0] << 8) & 0x3) | 2501 hmep->hme_ouraddr.ether_addr_octet[1]); 2502 2503 /* Initialize the RX_MAC registers */ 2504 2505 /* 2506 * Program BigMAC with local individual ethernet address. 2507 */ 2508 PUT_MACREG(madd2, (hmep->hme_ouraddr.ether_addr_octet[4] << 8) | 2509 hmep->hme_ouraddr.ether_addr_octet[5]); 2510 PUT_MACREG(madd1, (hmep->hme_ouraddr.ether_addr_octet[2] << 8) | 2511 hmep->hme_ouraddr.ether_addr_octet[3]); 2512 PUT_MACREG(madd0, (hmep->hme_ouraddr.ether_addr_octet[0] << 8) | 2513 hmep->hme_ouraddr.ether_addr_octet[1]); 2514 2515 /* 2516 * Set up multicast address filter by passing all multicast 2517 * addresses through a crc generator, and then using the 2518 * low order 6 bits as a index into the 64 bit logical 2519 * address filter. The high order three bits select the word, 2520 * while the rest of the bits select the bit within the word. 2521 */ 2522 PUT_MACREG(hash0, hmep->hme_ladrf[0]); 2523 PUT_MACREG(hash1, hmep->hme_ladrf[1]); 2524 PUT_MACREG(hash2, hmep->hme_ladrf[2]); 2525 PUT_MACREG(hash3, hmep->hme_ladrf[3]); 2526 2527 /* 2528 * Configure parameters to support VLAN. (VLAN encapsulation adds 2529 * four bytes.) 2530 */ 2531 PUT_MACREG(txmax, ETHERMAX + ETHERFCSL + 4); 2532 PUT_MACREG(rxmax, ETHERMAX + ETHERFCSL + 4); 2533 2534 /* 2535 * Initialize HME Global registers, ETX registers and ERX registers. 2536 */ 2537 2538 PUT_ETXREG(txring, hmep->hme_tmd_paddr); 2539 PUT_ERXREG(rxring, hmep->hme_rmd_paddr); 2540 2541 /* 2542 * ERX registers can be written only if they have even no. of bits set. 2543 * So, if the value written is not read back, set the lsb and write 2544 * again. 2545 * static int hme_erx_fix = 1; : Use the fix for erx bug 2546 */ 2547 { 2548 uint32_t temp; 2549 temp = hmep->hme_rmd_paddr; 2550 2551 if (GET_ERXREG(rxring) != temp) 2552 PUT_ERXREG(rxring, (temp | 4)); 2553 } 2554 2555 PUT_GLOBREG(config, (hmep->hme_config | 2556 (hmep->hme_64bit_xfer << HMEG_CONFIG_64BIT_SHIFT))); 2557 2558 /* 2559 * Significant performance improvements can be achieved by 2560 * disabling transmit interrupt. Thus TMD's are reclaimed only 2561 * when we run out of them in hmestart(). 2562 */ 2563 PUT_GLOBREG(intmask, 2564 HMEG_MASK_INTR | HMEG_MASK_TINT | HMEG_MASK_TX_ALL); 2565 2566 PUT_ETXREG(txring_size, ((HME_TMDMAX -1)>> HMET_RINGSZ_SHIFT)); 2567 PUT_ETXREG(config, (GET_ETXREG(config) | HMET_CONFIG_TXDMA_EN 2568 | HMET_CONFIG_TXFIFOTH)); 2569 /* get the rxring size bits */ 2570 switch (HME_RMDMAX) { 2571 case 32: 2572 i = HMER_CONFIG_RXRINGSZ32; 2573 break; 2574 case 64: 2575 i = HMER_CONFIG_RXRINGSZ64; 2576 break; 2577 case 128: 2578 i = HMER_CONFIG_RXRINGSZ128; 2579 break; 2580 case 256: 2581 i = HMER_CONFIG_RXRINGSZ256; 2582 break; 2583 default: 2584 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, 2585 unk_rx_ringsz_msg); 2586 goto init_fail; 2587 } 2588 i |= (HME_FSTBYTE_OFFSET << HMER_CONFIG_FBO_SHIFT) 2589 | HMER_CONFIG_RXDMA_EN; 2590 2591 /* h/w checks start offset in half words */ 2592 i |= ((sizeof (struct ether_header) / 2) << HMER_RX_CSSTART_SHIFT); 2593 2594 PUT_ERXREG(config, i); 2595 2596 /* 2597 * Bug related to the parity handling in ERX. When erxp-config is 2598 * read back. 2599 * Sbus/FEPS drives the parity bit. This value is used while 2600 * writing again. 2601 * This fixes the RECV problem in SS5. 2602 * static int hme_erx_fix = 1; : Use the fix for erx bug 2603 */ 2604 { 2605 uint32_t temp; 2606 temp = GET_ERXREG(config); 2607 PUT_ERXREG(config, i); 2608 2609 if (GET_ERXREG(config) != i) 2610 HME_FAULT_MSG4(hmep, SEVERITY_UNKNOWN, ERX_MSG, 2611 "error:temp = %x erxp->config = %x, should be %x", 2612 temp, GET_ERXREG(config), i); 2613 } 2614 2615 /* 2616 * Set up the rxconfig, txconfig and seed register without enabling 2617 * them the former two at this time 2618 * 2619 * BigMAC strips the CRC bytes by default. Since this is 2620 * contrary to other pieces of hardware, this bit needs to 2621 * enabled to tell BigMAC not to strip the CRC bytes. 2622 * Do not filter this node's own packets. 2623 */ 2624 2625 if (hme_reject_own) { 2626 PUT_MACREG(rxcfg, 2627 ((hmep->hme_promisc ? BMAC_RXCFG_PROMIS : 0) | 2628 BMAC_RXCFG_MYOWN | BMAC_RXCFG_HASH)); 2629 } else { 2630 PUT_MACREG(rxcfg, 2631 ((hmep->hme_promisc ? BMAC_RXCFG_PROMIS : 0) | 2632 BMAC_RXCFG_HASH)); 2633 } 2634 2635 drv_usecwait(10); /* wait after setting Hash Enable bit */ 2636 2637 fdx = (mii_get_duplex(hmep->hme_mii) == LINK_DUPLEX_FULL); 2638 2639 if (hme_ngu_enable) 2640 PUT_MACREG(txcfg, (fdx ? BMAC_TXCFG_FDX : 0) | 2641 BMAC_TXCFG_NGU); 2642 else 2643 PUT_MACREG(txcfg, (fdx ? BMAC_TXCFG_FDX: 0)); 2644 2645 i = 0; 2646 if ((hmep->hme_lance_mode) && (hmep->hme_lance_mode_enable)) 2647 i = ((hmep->hme_ipg0 & HME_MASK_5BIT) << BMAC_XIFC_IPG0_SHIFT) 2648 | BMAC_XIFC_LANCE_ENAB; 2649 if (phyad == HME_INTERNAL_PHYAD) 2650 PUT_MACREG(xifc, i | (BMAC_XIFC_ENAB)); 2651 else 2652 PUT_MACREG(xifc, i | (BMAC_XIFC_ENAB | BMAC_XIFC_MIIBUFDIS)); 2653 2654 PUT_MACREG(rxcfg, GET_MACREG(rxcfg) | BMAC_RXCFG_ENAB); 2655 PUT_MACREG(txcfg, GET_MACREG(txcfg) | BMAC_TXCFG_ENAB); 2656 2657 hmep->hme_flags |= (HMERUNNING | HMEINITIALIZED); 2658 /* 2659 * Update the interrupt mask : this will re-allow interrupts to occur 2660 */ 2661 PUT_GLOBREG(intmask, HMEG_MASK_INTR); 2662 mac_tx_update(hmep->hme_mh); 2663 2664 init_fail: 2665 /* 2666 * Release the locks in reverse order 2667 */ 2668 mutex_exit(&hmep->hme_xmitlock); 2669 mutex_exit(&hmep->hme_intrlock); 2670 2671 ret = !(hmep->hme_flags & HMERUNNING); 2672 if (ret) { 2673 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, 2674 init_fail_gen_msg); 2675 } 2676 2677 /* 2678 * Hardware checks. 2679 */ 2680 CHECK_GLOBREG(); 2681 CHECK_MIFREG(); 2682 CHECK_MACREG(); 2683 CHECK_ERXREG(); 2684 CHECK_ETXREG(); 2685 2686 init_exit: 2687 return (ret); 2688 } 2689 2690 /* 2691 * Calculate the dvma burstsize by setting up a dvma temporarily. Return 2692 * 0 as burstsize upon failure as it signifies no burst size. 2693 * Requests for 64-bit transfer setup, if the platform supports it. 2694 * NOTE: Do not use ddi_dma_alloc_handle(9f) then ddi_dma_burstsize(9f), 2695 * sun4u Ultra-2 incorrectly returns a 32bit transfer. 2696 */ 2697 static int 2698 hmeburstsizes(struct hme *hmep) 2699 { 2700 int burstsizes; 2701 ddi_dma_handle_t handle; 2702 2703 if (ddi_dma_alloc_handle(hmep->dip, &hme_dma_attr, 2704 DDI_DMA_DONTWAIT, NULL, &handle)) { 2705 return (0); 2706 } 2707 2708 hmep->hme_burstsizes = burstsizes = ddi_dma_burstsizes(handle); 2709 ddi_dma_free_handle(&handle); 2710 2711 /* 2712 * Use user-configurable parameter for enabling 64-bit transfers 2713 */ 2714 burstsizes = (hmep->hme_burstsizes >> 16); 2715 if (burstsizes) 2716 hmep->hme_64bit_xfer = hme_64bit_enable; /* user config value */ 2717 else 2718 burstsizes = hmep->hme_burstsizes; 2719 2720 if (hmep->hme_cheerio_mode) 2721 hmep->hme_64bit_xfer = 0; /* Disable for cheerio */ 2722 2723 if (burstsizes & 0x40) 2724 hmep->hme_config = HMEG_CONFIG_BURST64; 2725 else if (burstsizes & 0x20) 2726 hmep->hme_config = HMEG_CONFIG_BURST32; 2727 else 2728 hmep->hme_config = HMEG_CONFIG_BURST16; 2729 2730 return (DDI_SUCCESS); 2731 } 2732 2733 static int 2734 hmeallocbuf(struct hme *hmep, hmebuf_t *buf, int dir) 2735 { 2736 ddi_dma_cookie_t dmac; 2737 size_t len; 2738 unsigned ccnt; 2739 2740 if (ddi_dma_alloc_handle(hmep->dip, &hme_dma_attr, 2741 DDI_DMA_DONTWAIT, NULL, &buf->dmah) != DDI_SUCCESS) { 2742 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, 2743 "cannot allocate buf dma handle - failed"); 2744 return (DDI_FAILURE); 2745 } 2746 2747 if (ddi_dma_mem_alloc(buf->dmah, ROUNDUP(HMEBUFSIZE, 512), 2748 &hme_buf_attr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL, 2749 &buf->kaddr, &len, &buf->acch) != DDI_SUCCESS) { 2750 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, 2751 "cannot allocate buf memory - failed"); 2752 return (DDI_FAILURE); 2753 } 2754 2755 if (ddi_dma_addr_bind_handle(buf->dmah, NULL, buf->kaddr, 2756 len, dir | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, 2757 &dmac, &ccnt) != DDI_DMA_MAPPED) { 2758 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, 2759 "cannot map buf for dma - failed"); 2760 return (DDI_FAILURE); 2761 } 2762 buf->paddr = dmac.dmac_address; 2763 2764 /* apparently they don't handle multiple cookies */ 2765 if (ccnt > 1) { 2766 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, 2767 "too many buf dma cookies"); 2768 return (DDI_FAILURE); 2769 } 2770 return (DDI_SUCCESS); 2771 } 2772 2773 static int 2774 hmeallocbufs(struct hme *hmep) 2775 { 2776 hmep->hme_tbuf = kmem_zalloc(HME_TMDMAX * sizeof (hmebuf_t), KM_SLEEP); 2777 hmep->hme_rbuf = kmem_zalloc(HME_RMDMAX * sizeof (hmebuf_t), KM_SLEEP); 2778 2779 /* Alloc RX buffers. */ 2780 for (int i = 0; i < HME_RMDMAX; i++) { 2781 if (hmeallocbuf(hmep, &hmep->hme_rbuf[i], DDI_DMA_READ) != 2782 DDI_SUCCESS) { 2783 return (DDI_FAILURE); 2784 } 2785 } 2786 2787 /* Alloc TX buffers. */ 2788 for (int i = 0; i < HME_TMDMAX; i++) { 2789 if (hmeallocbuf(hmep, &hmep->hme_tbuf[i], DDI_DMA_WRITE) != 2790 DDI_SUCCESS) { 2791 return (DDI_FAILURE); 2792 } 2793 } 2794 return (DDI_SUCCESS); 2795 } 2796 2797 static void 2798 hmefreebufs(struct hme *hmep) 2799 { 2800 int i; 2801 2802 if (hmep->hme_rbuf == NULL) 2803 return; 2804 2805 /* 2806 * Free and unload pending xmit and recv buffers. 2807 * Maintaining the 1-to-1 ordered sequence of 2808 * We have written the routine to be idempotent. 2809 */ 2810 2811 for (i = 0; i < HME_TMDMAX; i++) { 2812 hmebuf_t *tbuf = &hmep->hme_tbuf[i]; 2813 if (tbuf->paddr) { 2814 (void) ddi_dma_unbind_handle(tbuf->dmah); 2815 } 2816 if (tbuf->kaddr) { 2817 ddi_dma_mem_free(&tbuf->acch); 2818 } 2819 if (tbuf->dmah) { 2820 ddi_dma_free_handle(&tbuf->dmah); 2821 } 2822 } 2823 for (i = 0; i < HME_RMDMAX; i++) { 2824 hmebuf_t *rbuf = &hmep->hme_rbuf[i]; 2825 if (rbuf->paddr) { 2826 (void) ddi_dma_unbind_handle(rbuf->dmah); 2827 } 2828 if (rbuf->kaddr) { 2829 ddi_dma_mem_free(&rbuf->acch); 2830 } 2831 if (rbuf->dmah) { 2832 ddi_dma_free_handle(&rbuf->dmah); 2833 } 2834 } 2835 kmem_free(hmep->hme_rbuf, HME_RMDMAX * sizeof (hmebuf_t)); 2836 kmem_free(hmep->hme_tbuf, HME_TMDMAX * sizeof (hmebuf_t)); 2837 } 2838 2839 /* 2840 * Un-initialize (STOP) HME channel. 2841 */ 2842 static void 2843 hmeuninit(struct hme *hmep) 2844 { 2845 /* 2846 * Allow up to 'HMEDRAINTIME' for pending xmit's to complete. 2847 */ 2848 HMEDELAY((hmep->hme_txindex == hmep->hme_txreclaim), HMEDRAINTIME); 2849 2850 mutex_enter(&hmep->hme_intrlock); 2851 mutex_enter(&hmep->hme_xmitlock); 2852 2853 hmep->hme_flags &= ~HMERUNNING; 2854 2855 (void) hmestop(hmep); 2856 2857 mutex_exit(&hmep->hme_xmitlock); 2858 mutex_exit(&hmep->hme_intrlock); 2859 } 2860 2861 /* 2862 * Allocate CONSISTENT memory for rmds and tmds with appropriate alignment and 2863 * map it in IO space. Allocate space for transmit and receive ddi_dma_handle 2864 * structures to use the DMA interface. 2865 */ 2866 static int 2867 hmeallocthings(struct hme *hmep) 2868 { 2869 int size; 2870 int rval; 2871 size_t real_len; 2872 uint_t cookiec; 2873 ddi_dma_cookie_t dmac; 2874 dev_info_t *dip = hmep->dip; 2875 2876 /* 2877 * Allocate the TMD and RMD descriptors and extra for page alignment. 2878 */ 2879 2880 rval = ddi_dma_alloc_handle(dip, &hme_dma_attr, DDI_DMA_DONTWAIT, NULL, 2881 &hmep->hme_rmd_dmah); 2882 if (rval != DDI_SUCCESS) { 2883 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, 2884 "cannot allocate rmd handle - failed"); 2885 return (DDI_FAILURE); 2886 } 2887 size = HME_RMDMAX * sizeof (struct hme_rmd); 2888 rval = ddi_dma_mem_alloc(hmep->hme_rmd_dmah, size, 2889 &hmep->hme_dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, 2890 &hmep->hme_rmd_kaddr, &real_len, &hmep->hme_rmd_acch); 2891 if (rval != DDI_SUCCESS) { 2892 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, 2893 "cannot allocate rmd dma mem - failed"); 2894 return (DDI_FAILURE); 2895 } 2896 hmep->hme_rmdp = (void *)(hmep->hme_rmd_kaddr); 2897 rval = ddi_dma_addr_bind_handle(hmep->hme_rmd_dmah, NULL, 2898 hmep->hme_rmd_kaddr, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2899 DDI_DMA_DONTWAIT, NULL, &dmac, &cookiec); 2900 if (rval != DDI_DMA_MAPPED) { 2901 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, 2902 "cannot allocate rmd dma - failed"); 2903 return (DDI_FAILURE); 2904 } 2905 hmep->hme_rmd_paddr = dmac.dmac_address; 2906 if (cookiec != 1) { 2907 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, 2908 "too many rmd cookies - failed"); 2909 return (DDI_FAILURE); 2910 } 2911 2912 rval = ddi_dma_alloc_handle(dip, &hme_dma_attr, DDI_DMA_DONTWAIT, NULL, 2913 &hmep->hme_tmd_dmah); 2914 if (rval != DDI_SUCCESS) { 2915 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, 2916 "cannot allocate tmd handle - failed"); 2917 return (DDI_FAILURE); 2918 } 2919 size = HME_TMDMAX * sizeof (struct hme_rmd); 2920 rval = ddi_dma_mem_alloc(hmep->hme_tmd_dmah, size, 2921 &hmep->hme_dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, 2922 &hmep->hme_tmd_kaddr, &real_len, &hmep->hme_tmd_acch); 2923 if (rval != DDI_SUCCESS) { 2924 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, 2925 "cannot allocate tmd dma mem - failed"); 2926 return (DDI_FAILURE); 2927 } 2928 hmep->hme_tmdp = (void *)(hmep->hme_tmd_kaddr); 2929 rval = ddi_dma_addr_bind_handle(hmep->hme_tmd_dmah, NULL, 2930 hmep->hme_tmd_kaddr, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2931 DDI_DMA_DONTWAIT, NULL, &dmac, &cookiec); 2932 if (rval != DDI_DMA_MAPPED) { 2933 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, 2934 "cannot allocate tmd dma - failed"); 2935 return (DDI_FAILURE); 2936 } 2937 hmep->hme_tmd_paddr = dmac.dmac_address; 2938 if (cookiec != 1) { 2939 HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, 2940 "too many tmd cookies - failed"); 2941 return (DDI_FAILURE); 2942 } 2943 2944 return (DDI_SUCCESS); 2945 } 2946 2947 static void 2948 hmefreethings(struct hme *hmep) 2949 { 2950 if (hmep->hme_rmd_paddr) { 2951 (void) ddi_dma_unbind_handle(hmep->hme_rmd_dmah); 2952 hmep->hme_rmd_paddr = 0; 2953 } 2954 if (hmep->hme_rmd_acch) 2955 ddi_dma_mem_free(&hmep->hme_rmd_acch); 2956 if (hmep->hme_rmd_dmah) 2957 ddi_dma_free_handle(&hmep->hme_rmd_dmah); 2958 2959 if (hmep->hme_tmd_paddr) { 2960 (void) ddi_dma_unbind_handle(hmep->hme_tmd_dmah); 2961 hmep->hme_tmd_paddr = 0; 2962 } 2963 if (hmep->hme_tmd_acch) 2964 ddi_dma_mem_free(&hmep->hme_tmd_acch); 2965 if (hmep->hme_tmd_dmah) 2966 ddi_dma_free_handle(&hmep->hme_tmd_dmah); 2967 } 2968 2969 /* 2970 * First check to see if it our device interrupting. 2971 */ 2972 static uint_t 2973 hmeintr(caddr_t arg) 2974 { 2975 struct hme *hmep = (void *)arg; 2976 uint32_t hmesbits; 2977 uint32_t serviced = DDI_INTR_UNCLAIMED; 2978 uint32_t num_reads = 0; 2979 uint32_t rflags; 2980 mblk_t *mp, *head, **tail; 2981 2982 2983 head = NULL; 2984 tail = &head; 2985 2986 mutex_enter(&hmep->hme_intrlock); 2987 2988 /* 2989 * The status register auto-clears on read except for 2990 * MIF Interrupt bit 2991 */ 2992 hmesbits = GET_GLOBREG(status); 2993 CHECK_GLOBREG(); 2994 2995 /* 2996 * Note: TINT is sometimes enabled in thr hmereclaim() 2997 */ 2998 2999 /* 3000 * Bugid 1227832 - to handle spurious interrupts on fusion systems. 3001 * Claim the first interrupt after initialization 3002 */ 3003 if (hmep->hme_flags & HMEINITIALIZED) { 3004 hmep->hme_flags &= ~HMEINITIALIZED; 3005 serviced = DDI_INTR_CLAIMED; 3006 } 3007 3008 if ((hmesbits & (HMEG_STATUS_INTR | HMEG_STATUS_TINT)) == 0) { 3009 /* No interesting interrupt */ 3010 if (hmep->hme_intrstats) { 3011 if (serviced == DDI_INTR_UNCLAIMED) 3012 KIOIP->intrs[KSTAT_INTR_SPURIOUS]++; 3013 else 3014 KIOIP->intrs[KSTAT_INTR_HARD]++; 3015 } 3016 mutex_exit(&hmep->hme_intrlock); 3017 return (serviced); 3018 } 3019 3020 serviced = DDI_INTR_CLAIMED; 3021 3022 if (!(hmep->hme_flags & HMERUNNING)) { 3023 if (hmep->hme_intrstats) 3024 KIOIP->intrs[KSTAT_INTR_HARD]++; 3025 mutex_exit(&hmep->hme_intrlock); 3026 hmeuninit(hmep); 3027 return (serviced); 3028 } 3029 3030 if (hmesbits & (HMEG_STATUS_FATAL_ERR | HMEG_STATUS_NONFATAL_ERR)) { 3031 if (hmesbits & HMEG_STATUS_FATAL_ERR) { 3032 3033 if (hmep->hme_intrstats) 3034 KIOIP->intrs[KSTAT_INTR_HARD]++; 3035 hme_fatal_err(hmep, hmesbits); 3036 3037 mutex_exit(&hmep->hme_intrlock); 3038 (void) hmeinit(hmep); 3039 return (serviced); 3040 } 3041 hme_nonfatal_err(hmep, hmesbits); 3042 } 3043 3044 if (hmesbits & (HMEG_STATUS_TX_ALL | HMEG_STATUS_TINT)) { 3045 mutex_enter(&hmep->hme_xmitlock); 3046 3047 hmereclaim(hmep); 3048 mutex_exit(&hmep->hme_xmitlock); 3049 } 3050 3051 if (hmesbits & HMEG_STATUS_RINT) { 3052 3053 /* 3054 * This dummy PIO is required to flush the SBus 3055 * Bridge buffers in QFE. 3056 */ 3057 (void) GET_GLOBREG(config); 3058 3059 /* 3060 * Loop through each RMD no more than once. 3061 */ 3062 while (num_reads++ < HME_RMDMAX) { 3063 hmebuf_t *rbuf; 3064 int rxptr; 3065 3066 rxptr = hmep->hme_rxindex % HME_RMDMAX; 3067 HMESYNCRMD(rxptr, DDI_DMA_SYNC_FORKERNEL); 3068 3069 rflags = GET_RMD_FLAGS(rxptr); 3070 if (rflags & HMERMD_OWN) { 3071 /* 3072 * Chip still owns it. We're done. 3073 */ 3074 break; 3075 } 3076 3077 /* 3078 * Retrieve the packet. 3079 */ 3080 rbuf = &hmep->hme_rbuf[rxptr]; 3081 mp = hmeread(hmep, rbuf, rflags); 3082 3083 /* 3084 * Return ownership of the RMD. 3085 */ 3086 PUT_RMD(rxptr, rbuf->paddr); 3087 HMESYNCRMD(rxptr, DDI_DMA_SYNC_FORDEV); 3088 3089 if (mp != NULL) { 3090 *tail = mp; 3091 tail = &mp->b_next; 3092 } 3093 3094 /* 3095 * Advance to the next RMD. 3096 */ 3097 hmep->hme_rxindex++; 3098 } 3099 } 3100 3101 if (hmep->hme_intrstats) 3102 KIOIP->intrs[KSTAT_INTR_HARD]++; 3103 3104 mutex_exit(&hmep->hme_intrlock); 3105 3106 if (head != NULL) 3107 mac_rx(hmep->hme_mh, NULL, head); 3108 3109 return (serviced); 3110 } 3111 3112 /* 3113 * Transmit completion reclaiming. 3114 */ 3115 static void 3116 hmereclaim(struct hme *hmep) 3117 { 3118 boolean_t reclaimed = B_FALSE; 3119 3120 /* 3121 * Loop through each TMD. 3122 */ 3123 while (hmep->hme_txindex > hmep->hme_txreclaim) { 3124 3125 int reclaim; 3126 uint32_t flags; 3127 3128 reclaim = hmep->hme_txreclaim % HME_TMDMAX; 3129 HMESYNCTMD(reclaim, DDI_DMA_SYNC_FORKERNEL); 3130 3131 flags = GET_TMD_FLAGS(reclaim); 3132 if (flags & HMETMD_OWN) { 3133 /* 3134 * Chip still owns it. We're done. 3135 */ 3136 break; 3137 } 3138 3139 /* 3140 * Count a chained packet only once. 3141 */ 3142 if (flags & HMETMD_SOP) { 3143 hmep->hme_opackets++; 3144 } 3145 3146 /* 3147 * MIB II 3148 */ 3149 hmep->hme_obytes += flags & HMETMD_BUFSIZE; 3150 3151 reclaimed = B_TRUE; 3152 hmep->hme_txreclaim++; 3153 } 3154 3155 if (reclaimed) { 3156 /* 3157 * we could reclaim some TMDs so turn off interrupts 3158 */ 3159 if (hmep->hme_wantw) { 3160 PUT_GLOBREG(intmask, 3161 HMEG_MASK_INTR | HMEG_MASK_TINT | 3162 HMEG_MASK_TX_ALL); 3163 hmep->hme_wantw = B_FALSE; 3164 mac_tx_update(hmep->hme_mh); 3165 } 3166 } else { 3167 /* 3168 * enable TINTS: so that even if there is no further activity 3169 * hmereclaim will get called 3170 */ 3171 if (hmep->hme_wantw) 3172 PUT_GLOBREG(intmask, 3173 GET_GLOBREG(intmask) & ~HMEG_MASK_TX_ALL); 3174 } 3175 CHECK_GLOBREG(); 3176 } 3177 3178 /* 3179 * Handle interrupts for fatal errors 3180 * Need reinitialization of the ENET channel. 3181 */ 3182 static void 3183 hme_fatal_err(struct hme *hmep, uint_t hmesbits) 3184 { 3185 3186 if (hmesbits & HMEG_STATUS_SLV_PAR_ERR) { 3187 hmep->hme_slvparerr++; 3188 } 3189 3190 if (hmesbits & HMEG_STATUS_SLV_ERR_ACK) { 3191 hmep->hme_slverrack++; 3192 } 3193 3194 if (hmesbits & HMEG_STATUS_TX_TAG_ERR) { 3195 hmep->hme_txtagerr++; 3196 hmep->hme_oerrors++; 3197 } 3198 3199 if (hmesbits & HMEG_STATUS_TX_PAR_ERR) { 3200 hmep->hme_txparerr++; 3201 hmep->hme_oerrors++; 3202 } 3203 3204 if (hmesbits & HMEG_STATUS_TX_LATE_ERR) { 3205 hmep->hme_txlaterr++; 3206 hmep->hme_oerrors++; 3207 } 3208 3209 if (hmesbits & HMEG_STATUS_TX_ERR_ACK) { 3210 hmep->hme_txerrack++; 3211 hmep->hme_oerrors++; 3212 } 3213 3214 if (hmesbits & HMEG_STATUS_EOP_ERR) { 3215 hmep->hme_eoperr++; 3216 } 3217 3218 if (hmesbits & HMEG_STATUS_RX_TAG_ERR) { 3219 hmep->hme_rxtagerr++; 3220 hmep->hme_ierrors++; 3221 } 3222 3223 if (hmesbits & HMEG_STATUS_RX_PAR_ERR) { 3224 hmep->hme_rxparerr++; 3225 hmep->hme_ierrors++; 3226 } 3227 3228 if (hmesbits & HMEG_STATUS_RX_LATE_ERR) { 3229 hmep->hme_rxlaterr++; 3230 hmep->hme_ierrors++; 3231 } 3232 3233 if (hmesbits & HMEG_STATUS_RX_ERR_ACK) { 3234 hmep->hme_rxerrack++; 3235 hmep->hme_ierrors++; 3236 } 3237 } 3238 3239 /* 3240 * Handle interrupts regarding non-fatal errors. 3241 */ 3242 static void 3243 hme_nonfatal_err(struct hme *hmep, uint_t hmesbits) 3244 { 3245 3246 if (hmesbits & HMEG_STATUS_RX_DROP) { 3247 hmep->hme_missed++; 3248 hmep->hme_ierrors++; 3249 } 3250 3251 if (hmesbits & HMEG_STATUS_DEFTIMR_EXP) { 3252 hmep->hme_defer_xmts++; 3253 } 3254 3255 if (hmesbits & HMEG_STATUS_FSTCOLC_EXP) { 3256 hmep->hme_fstcol += 256; 3257 } 3258 3259 if (hmesbits & HMEG_STATUS_LATCOLC_EXP) { 3260 hmep->hme_tlcol += 256; 3261 hmep->hme_oerrors += 256; 3262 } 3263 3264 if (hmesbits & HMEG_STATUS_EXCOLC_EXP) { 3265 hmep->hme_excol += 256; 3266 hmep->hme_oerrors += 256; 3267 } 3268 3269 if (hmesbits & HMEG_STATUS_NRMCOLC_EXP) { 3270 hmep->hme_coll += 256; 3271 } 3272 3273 if (hmesbits & HMEG_STATUS_MXPKTSZ_ERR) { 3274 hmep->hme_babl++; 3275 hmep->hme_oerrors++; 3276 } 3277 3278 /* 3279 * This error is fatal and the board needs to 3280 * be reinitialized. Comments? 3281 */ 3282 if (hmesbits & HMEG_STATUS_TXFIFO_UNDR) { 3283 hmep->hme_uflo++; 3284 hmep->hme_oerrors++; 3285 } 3286 3287 if (hmesbits & HMEG_STATUS_SQE_TST_ERR) { 3288 hmep->hme_sqe_errors++; 3289 } 3290 3291 if (hmesbits & HMEG_STATUS_RCV_CNT_EXP) { 3292 if (hmep->hme_rxcv_enable) { 3293 hmep->hme_cvc += 256; 3294 } 3295 } 3296 3297 if (hmesbits & HMEG_STATUS_RXFIFO_OVFL) { 3298 hmep->hme_oflo++; 3299 hmep->hme_ierrors++; 3300 } 3301 3302 if (hmesbits & HMEG_STATUS_LEN_CNT_EXP) { 3303 hmep->hme_lenerr += 256; 3304 hmep->hme_ierrors += 256; 3305 } 3306 3307 if (hmesbits & HMEG_STATUS_ALN_CNT_EXP) { 3308 hmep->hme_align_errors += 256; 3309 hmep->hme_ierrors += 256; 3310 } 3311 3312 if (hmesbits & HMEG_STATUS_CRC_CNT_EXP) { 3313 hmep->hme_fcs_errors += 256; 3314 hmep->hme_ierrors += 256; 3315 } 3316 } 3317 3318 static mblk_t * 3319 hmeread(struct hme *hmep, hmebuf_t *rbuf, uint32_t rflags) 3320 { 3321 mblk_t *bp; 3322 uint32_t len; 3323 t_uscalar_t type; 3324 3325 len = (rflags & HMERMD_BUFSIZE) >> HMERMD_BUFSIZE_SHIFT; 3326 3327 /* 3328 * Check for short packet 3329 * and check for overflow packet also. The processing is the 3330 * same for both the cases - reuse the buffer. Update the Buffer 3331 * overflow counter. 3332 */ 3333 if ((len < ETHERMIN) || (rflags & HMERMD_OVFLOW) || 3334 (len > (ETHERMAX + 4))) { 3335 if (len < ETHERMIN) 3336 hmep->hme_runt++; 3337 3338 else { 3339 hmep->hme_buff++; 3340 hmep->hme_toolong_errors++; 3341 } 3342 hmep->hme_ierrors++; 3343 return (NULL); 3344 } 3345 3346 /* 3347 * Sync the received buffer before looking at it. 3348 */ 3349 3350 (void) ddi_dma_sync(rbuf->dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL); 3351 3352 /* 3353 * copy the packet data and then recycle the descriptor. 3354 */ 3355 3356 if ((bp = allocb(len + HME_FSTBYTE_OFFSET, BPRI_HI)) == NULL) { 3357 3358 hmep->hme_allocbfail++; 3359 hmep->hme_norcvbuf++; 3360 3361 return (NULL); 3362 } 3363 3364 bcopy(rbuf->kaddr, bp->b_rptr, len + HME_FSTBYTE_OFFSET); 3365 3366 hmep->hme_ipackets++; 3367 3368 /* Add the First Byte offset to the b_rptr and copy */ 3369 bp->b_rptr += HME_FSTBYTE_OFFSET; 3370 bp->b_wptr = bp->b_rptr + len; 3371 3372 /* 3373 * update MIB II statistics 3374 */ 3375 BUMP_InNUcast(hmep, bp->b_rptr); 3376 hmep->hme_rbytes += len; 3377 3378 type = get_ether_type(bp->b_rptr); 3379 3380 /* 3381 * TCP partial checksum in hardware 3382 */ 3383 if (type == ETHERTYPE_IP || type == ETHERTYPE_IPV6) { 3384 uint16_t cksum = ~rflags & HMERMD_CKSUM; 3385 uint_t end = len - sizeof (struct ether_header); 3386 (void) hcksum_assoc(bp, NULL, NULL, 0, 3387 0, end, htons(cksum), HCK_PARTIALCKSUM, 0); 3388 } 3389 3390 return (bp); 3391 } 3392 3393 /*VARARGS*/ 3394 static void 3395 hme_fault_msg(struct hme *hmep, uint_t severity, msg_t type, char *fmt, ...) 3396 { 3397 char msg_buffer[255]; 3398 va_list ap; 3399 3400 va_start(ap, fmt); 3401 (void) vsnprintf(msg_buffer, sizeof (msg_buffer), fmt, ap); 3402 3403 if (hmep == NULL) { 3404 cmn_err(CE_NOTE, "hme : %s", msg_buffer); 3405 3406 } else if (type == DISPLAY_MSG) { 3407 cmn_err(CE_CONT, "?%s%d : %s\n", ddi_driver_name(hmep->dip), 3408 hmep->instance, msg_buffer); 3409 } else if (severity == SEVERITY_HIGH) { 3410 cmn_err(CE_WARN, "%s%d : %s, SEVERITY_HIGH, %s\n", 3411 ddi_driver_name(hmep->dip), hmep->instance, 3412 msg_buffer, msg_string[type]); 3413 } else { 3414 cmn_err(CE_CONT, "%s%d : %s\n", ddi_driver_name(hmep->dip), 3415 hmep->instance, msg_buffer); 3416 } 3417 va_end(ap); 3418 } 3419 3420 /* 3421 * if this is the first init do not bother to save the 3422 * counters. They should be 0, but do not count on it. 3423 */ 3424 static void 3425 hmesavecntrs(struct hme *hmep) 3426 { 3427 uint32_t fecnt, aecnt, lecnt, rxcv; 3428 uint32_t ltcnt, excnt; 3429 3430 /* XXX What all gets added in ierrors and oerrors? */ 3431 fecnt = GET_MACREG(fecnt); 3432 PUT_MACREG(fecnt, 0); 3433 3434 aecnt = GET_MACREG(aecnt); 3435 hmep->hme_align_errors += aecnt; 3436 PUT_MACREG(aecnt, 0); 3437 3438 lecnt = GET_MACREG(lecnt); 3439 hmep->hme_lenerr += lecnt; 3440 PUT_MACREG(lecnt, 0); 3441 3442 rxcv = GET_MACREG(rxcv); 3443 #ifdef HME_CODEVIOL_BUG 3444 /* 3445 * Ignore rxcv errors for Sbus/FEPS 2.1 or earlier 3446 */ 3447 if (!hmep->hme_rxcv_enable) { 3448 rxcv = 0; 3449 } 3450 #endif 3451 hmep->hme_cvc += rxcv; 3452 PUT_MACREG(rxcv, 0); 3453 3454 ltcnt = GET_MACREG(ltcnt); 3455 hmep->hme_tlcol += ltcnt; 3456 PUT_MACREG(ltcnt, 0); 3457 3458 excnt = GET_MACREG(excnt); 3459 hmep->hme_excol += excnt; 3460 PUT_MACREG(excnt, 0); 3461 3462 hmep->hme_fcs_errors += fecnt; 3463 hmep->hme_ierrors += (fecnt + aecnt + lecnt); 3464 hmep->hme_oerrors += (ltcnt + excnt); 3465 hmep->hme_coll += (GET_MACREG(nccnt) + ltcnt); 3466 3467 PUT_MACREG(nccnt, 0); 3468 CHECK_MACREG(); 3469 } 3470 3471 /* 3472 * To set up the mac address for the network interface: 3473 * The adapter card may support a local mac address which is published 3474 * in a device node property "local-mac-address". This mac address is 3475 * treated as the factory-installed mac address for DLPI interface. 3476 * If the adapter firmware has used the device for diskless boot 3477 * operation it publishes a property called "mac-address" for use by 3478 * inetboot and the device driver. 3479 * If "mac-address" is not found, the system options property 3480 * "local-mac-address" is used to select the mac-address. If this option 3481 * is set to "true", and "local-mac-address" has been found, then 3482 * local-mac-address is used; otherwise the system mac address is used 3483 * by calling the "localetheraddr()" function. 3484 */ 3485 static void 3486 hme_setup_mac_address(struct hme *hmep, dev_info_t *dip) 3487 { 3488 char *prop; 3489 int prop_len = sizeof (int); 3490 3491 hmep->hme_addrflags = 0; 3492 3493 /* 3494 * Check if it is an adapter with its own local mac address 3495 * If it is present, save it as the "factory-address" 3496 * for this adapter. 3497 */ 3498 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 3499 "local-mac-address", 3500 (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) { 3501 if (prop_len == ETHERADDRL) { 3502 hmep->hme_addrflags = HME_FACTADDR_PRESENT; 3503 ether_bcopy(prop, &hmep->hme_factaddr); 3504 HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG, 3505 "Local Ethernet address = %s", 3506 ether_sprintf(&hmep->hme_factaddr)); 3507 } 3508 kmem_free(prop, prop_len); 3509 } 3510 3511 /* 3512 * Check if the adapter has published "mac-address" property. 3513 * If it is present, use it as the mac address for this device. 3514 */ 3515 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 3516 "mac-address", (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) { 3517 if (prop_len >= ETHERADDRL) { 3518 ether_bcopy(prop, &hmep->hme_ouraddr); 3519 kmem_free(prop, prop_len); 3520 return; 3521 } 3522 kmem_free(prop, prop_len); 3523 } 3524 3525 #ifdef __sparc 3526 /* 3527 * On sparc, we might be able to use the mac address from the 3528 * system. However, on all other systems, we need to use the 3529 * address from the PROM. 3530 */ 3531 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 0, "local-mac-address?", 3532 (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) { 3533 if ((strncmp("true", prop, prop_len) == 0) && 3534 (hmep->hme_addrflags & HME_FACTADDR_PRESENT)) { 3535 hmep->hme_addrflags |= HME_FACTADDR_USE; 3536 ether_bcopy(&hmep->hme_factaddr, &hmep->hme_ouraddr); 3537 kmem_free(prop, prop_len); 3538 HME_FAULT_MSG1(hmep, SEVERITY_NONE, DISPLAY_MSG, 3539 "Using local MAC address"); 3540 return; 3541 } 3542 kmem_free(prop, prop_len); 3543 } 3544 3545 /* 3546 * Get the system ethernet address. 3547 */ 3548 (void) localetheraddr((struct ether_addr *)NULL, &hmep->hme_ouraddr); 3549 #else 3550 ether_bcopy(&hmep->hme_factaddr, &hmep->hme_ouraddr); 3551 #endif 3552 } 3553 3554 /* ARGSUSED */ 3555 static void 3556 hme_check_acc_handle(char *file, uint_t line, struct hme *hmep, 3557 ddi_acc_handle_t handle) 3558 { 3559 } 3560