1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 28 #include "nge.h" 29 30 /* 31 * Describes the chip's DMA engine 32 */ 33 34 static ddi_dma_attr_t hot_dma_attr = { 35 DMA_ATTR_V0, /* dma_attr version */ 36 0x0000000000000000ull, /* dma_attr_addr_lo */ 37 0x000000FFFFFFFFFFull, /* dma_attr_addr_hi */ 38 0x000000007FFFFFFFull, /* dma_attr_count_max */ 39 0x0000000000000010ull, /* dma_attr_align */ 40 0x00000FFF, /* dma_attr_burstsizes */ 41 0x00000001, /* dma_attr_minxfer */ 42 0x000000000000FFFFull, /* dma_attr_maxxfer */ 43 0x000000FFFFFFFFFFull, /* dma_attr_seg */ 44 1, /* dma_attr_sgllen */ 45 0x00000001, /* dma_attr_granular */ 46 0 47 }; 48 49 static ddi_dma_attr_t hot_tx_dma_attr = { 50 DMA_ATTR_V0, /* dma_attr version */ 51 0x0000000000000000ull, /* dma_attr_addr_lo */ 52 0x000000FFFFFFFFFFull, /* dma_attr_addr_hi */ 53 0x0000000000003FFFull, /* dma_attr_count_max */ 54 0x0000000000000010ull, /* dma_attr_align */ 55 0x00000FFF, /* dma_attr_burstsizes */ 56 0x00000001, /* dma_attr_minxfer */ 57 0x0000000000003FFFull, /* dma_attr_maxxfer */ 58 0x000000FFFFFFFFFFull, /* dma_attr_seg */ 59 NGE_MAX_COOKIES, /* dma_attr_sgllen */ 60 1, /* dma_attr_granular */ 61 0 62 }; 63 64 static ddi_dma_attr_t sum_dma_attr = { 65 DMA_ATTR_V0, /* dma_attr version */ 66 0x0000000000000000ull, /* dma_attr_addr_lo */ 67 0x00000000FFFFFFFFull, /* dma_attr_addr_hi */ 68 0x000000007FFFFFFFull, /* dma_attr_count_max */ 69 0x0000000000000010ull, /* dma_attr_align */ 70 0x00000FFF, /* dma_attr_burstsizes */ 71 0x00000001, /* dma_attr_minxfer */ 72 0x000000000000FFFFull, /* dma_attr_maxxfer */ 73 0x00000000FFFFFFFFull, /* dma_attr_seg */ 74 1, /* dma_attr_sgllen */ 75 0x00000001, /* dma_attr_granular */ 76 0 77 }; 78 79 static ddi_dma_attr_t sum_tx_dma_attr = { 80 DMA_ATTR_V0, /* dma_attr version */ 81 0x0000000000000000ull, /* dma_attr_addr_lo */ 82 0x00000000FFFFFFFFull, /* dma_attr_addr_hi */ 83 0x0000000000003FFFull, /* dma_attr_count_max */ 84 0x0000000000000010ull, /* dma_attr_align */ 85 0x00000FFF, /* dma_attr_burstsizes */ 86 0x00000001, /* dma_attr_minxfer */ 87 0x0000000000003FFFull, /* dma_attr_maxxfer */ 88 0x00000000FFFFFFFFull, /* dma_attr_seg */ 89 NGE_MAX_COOKIES, /* dma_attr_sgllen */ 90 1, /* dma_attr_granular */ 91 0 92 }; 93 94 /* 95 * DMA access attributes for data. 96 */ 97 ddi_device_acc_attr_t nge_data_accattr = { 98 DDI_DEVICE_ATTR_V0, 99 DDI_STRUCTURE_LE_ACC, 100 DDI_STRICTORDER_ACC, 101 DDI_DEFAULT_ACC 102 }; 103 104 /* 105 * DMA access attributes for descriptors. 106 */ 107 static ddi_device_acc_attr_t nge_desc_accattr = { 108 DDI_DEVICE_ATTR_V0, 109 DDI_STRUCTURE_LE_ACC, 110 DDI_STRICTORDER_ACC, 111 DDI_DEFAULT_ACC 112 }; 113 114 /* 115 * PIO access attributes for registers 116 */ 117 static ddi_device_acc_attr_t nge_reg_accattr = { 118 DDI_DEVICE_ATTR_V0, 119 DDI_STRUCTURE_LE_ACC, 120 DDI_STRICTORDER_ACC, 121 DDI_DEFAULT_ACC 122 }; 123 124 /* 125 * NIC DESC MODE 2 126 */ 127 128 static const nge_desc_attr_t nge_sum_desc = { 129 130 sizeof (sum_rx_bd), 131 sizeof (sum_tx_bd), 132 &sum_dma_attr, 133 &sum_tx_dma_attr, 134 nge_sum_rxd_fill, 135 nge_sum_rxd_check, 136 nge_sum_txd_fill, 137 nge_sum_txd_check, 138 }; 139 140 /* 141 * NIC DESC MODE 3 142 */ 143 144 static const nge_desc_attr_t nge_hot_desc = { 145 146 sizeof (hot_rx_bd), 147 sizeof (hot_tx_bd), 148 &hot_dma_attr, 149 &hot_tx_dma_attr, 150 nge_hot_rxd_fill, 151 nge_hot_rxd_check, 152 nge_hot_txd_fill, 153 nge_hot_txd_check, 154 }; 155 156 static char nge_ident[] = "nVidia 1Gb Ethernet"; 157 static char clsize_propname[] = "cache-line-size"; 158 static char latency_propname[] = "latency-timer"; 159 static char debug_propname[] = "nge-debug-flags"; 160 static char intr_moderation[] = "intr-moderation"; 161 static char rx_data_hw[] = "rx-data-hw"; 162 static char rx_prd_lw[] = "rx-prd-lw"; 163 static char rx_prd_hw[] = "rx-prd-hw"; 164 static char sw_intr_intv[] = "sw-intr-intvl"; 165 static char nge_desc_mode[] = "desc-mode"; 166 static char default_mtu[] = "default_mtu"; 167 static char low_memory_mode[] = "minimal-memory-usage"; 168 static char mac_addr_reversion[] = "mac-addr-reversion"; 169 extern kmutex_t nge_log_mutex[1]; 170 171 static int nge_m_start(void *); 172 static void nge_m_stop(void *); 173 static int nge_m_promisc(void *, boolean_t); 174 static int nge_m_multicst(void *, boolean_t, const uint8_t *); 175 static int nge_m_unicst(void *, const uint8_t *); 176 static void nge_m_ioctl(void *, queue_t *, mblk_t *); 177 static boolean_t nge_m_getcapab(void *, mac_capab_t, void *); 178 static int nge_m_setprop(void *, const char *, mac_prop_id_t, 179 uint_t, const void *); 180 static int nge_m_getprop(void *, const char *, mac_prop_id_t, 181 uint_t, uint_t, void *, uint_t *); 182 static int nge_set_priv_prop(nge_t *, const char *, uint_t, 183 const void *); 184 static int nge_get_priv_prop(nge_t *, const char *, uint_t, 185 uint_t, void *); 186 187 #define NGE_M_CALLBACK_FLAGS\ 188 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 189 190 static mac_callbacks_t nge_m_callbacks = { 191 NGE_M_CALLBACK_FLAGS, 192 nge_m_stat, 193 nge_m_start, 194 nge_m_stop, 195 nge_m_promisc, 196 nge_m_multicst, 197 nge_m_unicst, 198 nge_m_tx, 199 nge_m_ioctl, 200 nge_m_getcapab, 201 NULL, 202 NULL, 203 nge_m_setprop, 204 nge_m_getprop 205 }; 206 207 mac_priv_prop_t nge_priv_props[] = { 208 {"_tx_bcopy_threshold", MAC_PROP_PERM_RW}, 209 {"_rx_bcopy_threshold", MAC_PROP_PERM_RW}, 210 {"_recv_max_packet", MAC_PROP_PERM_RW}, 211 {"_poll_quiet_time", MAC_PROP_PERM_RW}, 212 {"_poll_busy_time", MAC_PROP_PERM_RW}, 213 {"_rx_intr_hwater", MAC_PROP_PERM_RW}, 214 {"_rx_intr_lwater", MAC_PROP_PERM_RW}, 215 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 216 {"_adv_asym_pause_cap", MAC_PROP_PERM_RW}, 217 {"_tx_n_intr", MAC_PROP_PERM_RW} 218 }; 219 220 #define NGE_MAX_PRIV_PROPS \ 221 (sizeof (nge_priv_props)/sizeof (mac_priv_prop_t)) 222 223 static int nge_add_intrs(nge_t *, int); 224 static void nge_rem_intrs(nge_t *); 225 static int nge_register_intrs_and_init_locks(nge_t *); 226 227 /* 228 * NGE MSI tunable: 229 */ 230 boolean_t nge_enable_msi = B_FALSE; 231 232 static enum ioc_reply 233 nge_set_loop_mode(nge_t *ngep, uint32_t mode) 234 { 235 /* 236 * If the mode isn't being changed, there's nothing to do ... 237 */ 238 if (mode == ngep->param_loop_mode) 239 return (IOC_ACK); 240 241 /* 242 * Validate the requested mode and prepare a suitable message 243 * to explain the link down/up cycle that the change will 244 * probably induce ... 245 */ 246 switch (mode) { 247 default: 248 return (IOC_INVAL); 249 250 case NGE_LOOP_NONE: 251 case NGE_LOOP_EXTERNAL_100: 252 case NGE_LOOP_EXTERNAL_10: 253 case NGE_LOOP_INTERNAL_PHY: 254 break; 255 } 256 257 /* 258 * All OK; tell the caller to reprogram 259 * the PHY and/or MAC for the new mode ... 260 */ 261 ngep->param_loop_mode = mode; 262 return (IOC_RESTART_ACK); 263 } 264 265 #undef NGE_DBG 266 #define NGE_DBG NGE_DBG_INIT 267 268 /* 269 * Utility routine to carve a slice off a chunk of allocated memory, 270 * updating the chunk descriptor accordingly. The size of the slice 271 * is given by the product of the <qty> and <size> parameters. 272 */ 273 void 274 nge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 275 uint32_t qty, uint32_t size) 276 { 277 size_t totsize; 278 279 totsize = qty*size; 280 ASSERT(size > 0); 281 ASSERT(totsize <= chunk->alength); 282 283 *slice = *chunk; 284 slice->nslots = qty; 285 slice->size = size; 286 slice->alength = totsize; 287 288 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 289 chunk->alength -= totsize; 290 chunk->offset += totsize; 291 chunk->cookie.dmac_laddress += totsize; 292 chunk->cookie.dmac_size -= totsize; 293 } 294 295 /* 296 * Allocate an area of memory and a DMA handle for accessing it 297 */ 298 int 299 nge_alloc_dma_mem(nge_t *ngep, size_t memsize, ddi_device_acc_attr_t *attr_p, 300 uint_t dma_flags, dma_area_t *dma_p) 301 { 302 int err; 303 caddr_t va; 304 305 NGE_TRACE(("nge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 306 (void *)ngep, memsize, attr_p, dma_flags, dma_p)); 307 /* 308 * Allocate handle 309 */ 310 err = ddi_dma_alloc_handle(ngep->devinfo, ngep->desc_attr.dma_attr, 311 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 312 if (err != DDI_SUCCESS) 313 goto fail; 314 315 /* 316 * Allocate memory 317 */ 318 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 319 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), 320 DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, &dma_p->acc_hdl); 321 if (err != DDI_SUCCESS) 322 goto fail; 323 324 /* 325 * Bind the two together 326 */ 327 dma_p->mem_va = va; 328 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 329 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 330 &dma_p->cookie, &dma_p->ncookies); 331 332 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 333 goto fail; 334 335 dma_p->nslots = ~0U; 336 dma_p->size = ~0U; 337 dma_p->offset = 0; 338 339 return (DDI_SUCCESS); 340 341 fail: 342 nge_free_dma_mem(dma_p); 343 NGE_DEBUG(("nge_alloc_dma_mem: fail to alloc dma memory!")); 344 345 return (DDI_FAILURE); 346 } 347 348 /* 349 * Free one allocated area of DMAable memory 350 */ 351 void 352 nge_free_dma_mem(dma_area_t *dma_p) 353 { 354 if (dma_p->dma_hdl != NULL) { 355 if (dma_p->ncookies) { 356 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 357 dma_p->ncookies = 0; 358 } 359 } 360 if (dma_p->acc_hdl != NULL) { 361 ddi_dma_mem_free(&dma_p->acc_hdl); 362 dma_p->acc_hdl = NULL; 363 } 364 if (dma_p->dma_hdl != NULL) { 365 ddi_dma_free_handle(&dma_p->dma_hdl); 366 dma_p->dma_hdl = NULL; 367 } 368 } 369 370 #define ALLOC_TX_BUF 0x1 371 #define ALLOC_TX_DESC 0x2 372 #define ALLOC_RX_DESC 0x4 373 374 int 375 nge_alloc_bufs(nge_t *ngep) 376 { 377 int err; 378 int split; 379 int progress; 380 size_t txbuffsize; 381 size_t rxdescsize; 382 size_t txdescsize; 383 384 txbuffsize = ngep->tx_desc * ngep->buf_size; 385 rxdescsize = ngep->rx_desc; 386 txdescsize = ngep->tx_desc; 387 rxdescsize *= ngep->desc_attr.rxd_size; 388 txdescsize *= ngep->desc_attr.txd_size; 389 progress = 0; 390 391 NGE_TRACE(("nge_alloc_bufs($%p)", (void *)ngep)); 392 /* 393 * Allocate memory & handles for TX buffers 394 */ 395 ASSERT((txbuffsize % ngep->nge_split) == 0); 396 for (split = 0; split < ngep->nge_split; ++split) { 397 err = nge_alloc_dma_mem(ngep, txbuffsize/ngep->nge_split, 398 &nge_data_accattr, DDI_DMA_WRITE | NGE_DMA_MODE, 399 &ngep->send->buf[split]); 400 if (err != DDI_SUCCESS) 401 goto fail; 402 } 403 404 progress |= ALLOC_TX_BUF; 405 406 /* 407 * Allocate memory & handles for receive return rings and 408 * buffer (producer) descriptor rings 409 */ 410 err = nge_alloc_dma_mem(ngep, rxdescsize, &nge_desc_accattr, 411 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->recv->desc); 412 if (err != DDI_SUCCESS) 413 goto fail; 414 progress |= ALLOC_RX_DESC; 415 416 /* 417 * Allocate memory & handles for TX descriptor rings, 418 */ 419 err = nge_alloc_dma_mem(ngep, txdescsize, &nge_desc_accattr, 420 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->send->desc); 421 if (err != DDI_SUCCESS) 422 goto fail; 423 return (DDI_SUCCESS); 424 425 fail: 426 if (progress & ALLOC_RX_DESC) 427 nge_free_dma_mem(&ngep->recv->desc); 428 if (progress & ALLOC_TX_BUF) { 429 for (split = 0; split < ngep->nge_split; ++split) 430 nge_free_dma_mem(&ngep->send->buf[split]); 431 } 432 433 return (DDI_FAILURE); 434 } 435 436 /* 437 * This routine frees the transmit and receive buffers and descriptors. 438 * Make sure the chip is stopped before calling it! 439 */ 440 void 441 nge_free_bufs(nge_t *ngep) 442 { 443 int split; 444 445 NGE_TRACE(("nge_free_bufs($%p)", (void *)ngep)); 446 447 nge_free_dma_mem(&ngep->recv->desc); 448 nge_free_dma_mem(&ngep->send->desc); 449 450 for (split = 0; split < ngep->nge_split; ++split) 451 nge_free_dma_mem(&ngep->send->buf[split]); 452 } 453 454 /* 455 * Clean up initialisation done above before the memory is freed 456 */ 457 static void 458 nge_fini_send_ring(nge_t *ngep) 459 { 460 uint32_t slot; 461 size_t dmah_num; 462 send_ring_t *srp; 463 sw_tx_sbd_t *ssbdp; 464 465 srp = ngep->send; 466 ssbdp = srp->sw_sbds; 467 468 NGE_TRACE(("nge_fini_send_ring($%p)", (void *)ngep)); 469 470 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]); 471 472 for (slot = 0; slot < dmah_num; ++slot) { 473 if (srp->dmahndl[slot].hndl) { 474 (void) ddi_dma_unbind_handle(srp->dmahndl[slot].hndl); 475 ddi_dma_free_handle(&srp->dmahndl[slot].hndl); 476 srp->dmahndl[slot].hndl = NULL; 477 srp->dmahndl[slot].next = NULL; 478 } 479 } 480 481 srp->dmah_free.head = NULL; 482 srp->dmah_free.tail = NULL; 483 484 kmem_free(ssbdp, srp->desc.nslots*sizeof (*ssbdp)); 485 486 } 487 488 /* 489 * Initialise the specified Send Ring, using the information in the 490 * <dma_area> descriptors that it contains to set up all the other 491 * fields. This routine should be called only once for each ring. 492 */ 493 static int 494 nge_init_send_ring(nge_t *ngep) 495 { 496 size_t dmah_num; 497 uint32_t nslots; 498 uint32_t err; 499 uint32_t slot; 500 uint32_t split; 501 send_ring_t *srp; 502 sw_tx_sbd_t *ssbdp; 503 dma_area_t desc; 504 dma_area_t pbuf; 505 506 srp = ngep->send; 507 srp->desc.nslots = ngep->tx_desc; 508 nslots = srp->desc.nslots; 509 510 NGE_TRACE(("nge_init_send_ring($%p)", (void *)ngep)); 511 /* 512 * Other one-off initialisation of per-ring data 513 */ 514 srp->ngep = ngep; 515 516 /* 517 * Allocate the array of s/w Send Buffer Descriptors 518 */ 519 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 520 srp->sw_sbds = ssbdp; 521 522 /* 523 * Now initialise each array element once and for all 524 */ 525 desc = srp->desc; 526 for (split = 0; split < ngep->nge_split; ++split) { 527 pbuf = srp->buf[split]; 528 for (slot = 0; slot < nslots/ngep->nge_split; ++ssbdp, ++slot) { 529 nge_slice_chunk(&ssbdp->desc, &desc, 1, 530 ngep->desc_attr.txd_size); 531 nge_slice_chunk(&ssbdp->pbuf, &pbuf, 1, 532 ngep->buf_size); 533 } 534 ASSERT(pbuf.alength == 0); 535 } 536 ASSERT(desc.alength == 0); 537 538 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]); 539 540 /* preallocate dma handles for tx buffer */ 541 for (slot = 0; slot < dmah_num; ++slot) { 542 543 err = ddi_dma_alloc_handle(ngep->devinfo, 544 ngep->desc_attr.tx_dma_attr, DDI_DMA_DONTWAIT, 545 NULL, &srp->dmahndl[slot].hndl); 546 547 if (err != DDI_SUCCESS) { 548 nge_fini_send_ring(ngep); 549 nge_error(ngep, 550 "nge_init_send_ring: alloc dma handle fails"); 551 return (DDI_FAILURE); 552 } 553 srp->dmahndl[slot].next = srp->dmahndl + slot + 1; 554 } 555 556 srp->dmah_free.head = srp->dmahndl; 557 srp->dmah_free.tail = srp->dmahndl + dmah_num - 1; 558 srp->dmah_free.tail->next = NULL; 559 560 return (DDI_SUCCESS); 561 } 562 563 /* 564 * Intialize the tx recycle pointer and tx sending pointer of tx ring 565 * and set the type of tx's data descriptor by default. 566 */ 567 static void 568 nge_reinit_send_ring(nge_t *ngep) 569 { 570 size_t dmah_num; 571 uint32_t slot; 572 send_ring_t *srp; 573 sw_tx_sbd_t *ssbdp; 574 575 srp = ngep->send; 576 577 /* 578 * Reinitialise control variables ... 579 */ 580 581 srp->tx_hwmark = NGE_DESC_MIN; 582 srp->tx_lwmark = NGE_DESC_MIN; 583 584 srp->tx_next = 0; 585 srp->tx_free = srp->desc.nslots; 586 srp->tc_next = 0; 587 588 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]); 589 590 for (slot = 0; slot - dmah_num != 0; ++slot) 591 srp->dmahndl[slot].next = srp->dmahndl + slot + 1; 592 593 srp->dmah_free.head = srp->dmahndl; 594 srp->dmah_free.tail = srp->dmahndl + dmah_num - 1; 595 srp->dmah_free.tail->next = NULL; 596 597 /* 598 * Zero and sync all the h/w Send Buffer Descriptors 599 */ 600 for (slot = 0; slot < srp->desc.nslots; ++slot) { 601 ssbdp = &srp->sw_sbds[slot]; 602 ssbdp->flags = HOST_OWN; 603 } 604 605 DMA_ZERO(srp->desc); 606 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 607 } 608 609 /* 610 * Initialize the slot number of rx's ring 611 */ 612 static void 613 nge_init_recv_ring(nge_t *ngep) 614 { 615 recv_ring_t *rrp; 616 617 rrp = ngep->recv; 618 rrp->desc.nslots = ngep->rx_desc; 619 rrp->ngep = ngep; 620 } 621 622 /* 623 * Intialize the rx recycle pointer and rx sending pointer of rx ring 624 */ 625 static void 626 nge_reinit_recv_ring(nge_t *ngep) 627 { 628 recv_ring_t *rrp; 629 630 rrp = ngep->recv; 631 632 /* 633 * Reinitialise control variables ... 634 */ 635 rrp->prod_index = 0; 636 /* 637 * Zero and sync all the h/w Send Buffer Descriptors 638 */ 639 DMA_ZERO(rrp->desc); 640 DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORDEV); 641 } 642 643 /* 644 * Clean up initialisation done above before the memory is freed 645 */ 646 static void 647 nge_fini_buff_ring(nge_t *ngep) 648 { 649 uint32_t i; 650 buff_ring_t *brp; 651 dma_area_t *bufp; 652 sw_rx_sbd_t *bsbdp; 653 654 brp = ngep->buff; 655 bsbdp = brp->sw_rbds; 656 657 NGE_DEBUG(("nge_fini_buff_ring($%p)", (void *)ngep)); 658 659 mutex_enter(brp->recycle_lock); 660 brp->buf_sign++; 661 mutex_exit(brp->recycle_lock); 662 for (i = 0; i < ngep->rx_desc; i++, ++bsbdp) { 663 if (bsbdp->bufp) { 664 if (bsbdp->bufp->mp) 665 freemsg(bsbdp->bufp->mp); 666 nge_free_dma_mem(bsbdp->bufp); 667 kmem_free(bsbdp->bufp, sizeof (dma_area_t)); 668 bsbdp->bufp = NULL; 669 } 670 } 671 while (brp->free_list != NULL) { 672 bufp = brp->free_list; 673 brp->free_list = bufp->next; 674 bufp->next = NULL; 675 if (bufp->mp) 676 freemsg(bufp->mp); 677 nge_free_dma_mem(bufp); 678 kmem_free(bufp, sizeof (dma_area_t)); 679 } 680 while (brp->recycle_list != NULL) { 681 bufp = brp->recycle_list; 682 brp->recycle_list = bufp->next; 683 bufp->next = NULL; 684 if (bufp->mp) 685 freemsg(bufp->mp); 686 nge_free_dma_mem(bufp); 687 kmem_free(bufp, sizeof (dma_area_t)); 688 } 689 690 691 kmem_free(brp->sw_rbds, (ngep->rx_desc * sizeof (*bsbdp))); 692 brp->sw_rbds = NULL; 693 } 694 695 /* 696 * Intialize the Rx's data ring and free ring 697 */ 698 static int 699 nge_init_buff_ring(nge_t *ngep) 700 { 701 uint32_t err; 702 uint32_t slot; 703 uint32_t nslots_buff; 704 uint32_t nslots_recv; 705 buff_ring_t *brp; 706 recv_ring_t *rrp; 707 dma_area_t desc; 708 dma_area_t *bufp; 709 sw_rx_sbd_t *bsbdp; 710 711 rrp = ngep->recv; 712 brp = ngep->buff; 713 brp->nslots = ngep->rx_buf; 714 brp->rx_bcopy = B_FALSE; 715 nslots_recv = rrp->desc.nslots; 716 nslots_buff = brp->nslots; 717 brp->ngep = ngep; 718 719 NGE_TRACE(("nge_init_buff_ring($%p)", (void *)ngep)); 720 721 /* 722 * Allocate the array of s/w Recv Buffer Descriptors 723 */ 724 bsbdp = kmem_zalloc(nslots_recv *sizeof (*bsbdp), KM_SLEEP); 725 brp->sw_rbds = bsbdp; 726 brp->free_list = NULL; 727 brp->recycle_list = NULL; 728 for (slot = 0; slot < nslots_buff; ++slot) { 729 bufp = kmem_zalloc(sizeof (dma_area_t), KM_SLEEP); 730 err = nge_alloc_dma_mem(ngep, (ngep->buf_size 731 + NGE_HEADROOM), 732 &nge_data_accattr, DDI_DMA_READ | NGE_DMA_MODE, bufp); 733 if (err != DDI_SUCCESS) { 734 kmem_free(bufp, sizeof (dma_area_t)); 735 return (DDI_FAILURE); 736 } 737 738 bufp->alength -= NGE_HEADROOM; 739 bufp->offset += NGE_HEADROOM; 740 bufp->private = (caddr_t)ngep; 741 bufp->rx_recycle.free_func = nge_recv_recycle; 742 bufp->rx_recycle.free_arg = (caddr_t)bufp; 743 bufp->signature = brp->buf_sign; 744 bufp->rx_delivered = B_FALSE; 745 bufp->mp = desballoc(DMA_VPTR(*bufp), 746 ngep->buf_size + NGE_HEADROOM, 747 0, &bufp->rx_recycle); 748 749 if (bufp->mp == NULL) { 750 return (DDI_FAILURE); 751 } 752 bufp->next = brp->free_list; 753 brp->free_list = bufp; 754 } 755 756 /* 757 * Now initialise each array element once and for all 758 */ 759 desc = rrp->desc; 760 for (slot = 0; slot < nslots_recv; ++slot, ++bsbdp) { 761 nge_slice_chunk(&bsbdp->desc, &desc, 1, 762 ngep->desc_attr.rxd_size); 763 bufp = brp->free_list; 764 brp->free_list = bufp->next; 765 bsbdp->bufp = bufp; 766 bsbdp->flags = CONTROLER_OWN; 767 bufp->next = NULL; 768 } 769 770 ASSERT(desc.alength == 0); 771 return (DDI_SUCCESS); 772 } 773 774 /* 775 * Fill the host address of data in rx' descriptor 776 * and initialize free pointers of rx free ring 777 */ 778 static int 779 nge_reinit_buff_ring(nge_t *ngep) 780 { 781 uint32_t slot; 782 uint32_t nslots_recv; 783 buff_ring_t *brp; 784 recv_ring_t *rrp; 785 sw_rx_sbd_t *bsbdp; 786 void *hw_bd_p; 787 788 brp = ngep->buff; 789 rrp = ngep->recv; 790 bsbdp = brp->sw_rbds; 791 nslots_recv = rrp->desc.nslots; 792 for (slot = 0; slot < nslots_recv; ++bsbdp, ++slot) { 793 hw_bd_p = DMA_VPTR(bsbdp->desc); 794 /* 795 * There is a scenario: When the traffic of small tcp 796 * packet is heavy, suspending the tcp traffic will 797 * cause the preallocated buffers for rx not to be 798 * released in time by tcp taffic and cause rx's buffer 799 * pointers not to be refilled in time. 800 * 801 * At this point, if we reinitialize the driver, the bufp 802 * pointer for rx's traffic will be NULL. 803 * So the result of the reinitializion fails. 804 */ 805 if (bsbdp->bufp == NULL) 806 return (DDI_FAILURE); 807 808 ngep->desc_attr.rxd_fill(hw_bd_p, &bsbdp->bufp->cookie, 809 bsbdp->bufp->alength); 810 } 811 return (DDI_SUCCESS); 812 } 813 814 static void 815 nge_init_ring_param_lock(nge_t *ngep) 816 { 817 buff_ring_t *brp; 818 send_ring_t *srp; 819 820 srp = ngep->send; 821 brp = ngep->buff; 822 823 /* Init the locks for send ring */ 824 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 825 DDI_INTR_PRI(ngep->intr_pri)); 826 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 827 DDI_INTR_PRI(ngep->intr_pri)); 828 mutex_init(&srp->dmah_lock, NULL, MUTEX_DRIVER, 829 DDI_INTR_PRI(ngep->intr_pri)); 830 831 /* Init parameters of buffer ring */ 832 brp->free_list = NULL; 833 brp->recycle_list = NULL; 834 brp->rx_hold = 0; 835 brp->buf_sign = 0; 836 837 /* Init recycle list lock */ 838 mutex_init(brp->recycle_lock, NULL, MUTEX_DRIVER, 839 DDI_INTR_PRI(ngep->intr_pri)); 840 } 841 842 int 843 nge_init_rings(nge_t *ngep) 844 { 845 uint32_t err; 846 847 err = nge_init_send_ring(ngep); 848 if (err != DDI_SUCCESS) { 849 return (err); 850 } 851 nge_init_recv_ring(ngep); 852 853 err = nge_init_buff_ring(ngep); 854 if (err != DDI_SUCCESS) { 855 nge_fini_send_ring(ngep); 856 return (DDI_FAILURE); 857 } 858 859 return (err); 860 } 861 862 static int 863 nge_reinit_ring(nge_t *ngep) 864 { 865 int err; 866 867 nge_reinit_recv_ring(ngep); 868 nge_reinit_send_ring(ngep); 869 err = nge_reinit_buff_ring(ngep); 870 return (err); 871 } 872 873 874 void 875 nge_fini_rings(nge_t *ngep) 876 { 877 /* 878 * For receive ring, nothing need to be finished. 879 * So only finish buffer ring and send ring here. 880 */ 881 nge_fini_buff_ring(ngep); 882 nge_fini_send_ring(ngep); 883 } 884 885 /* 886 * Loopback ioctl code 887 */ 888 889 static lb_property_t loopmodes[] = { 890 { normal, "normal", NGE_LOOP_NONE }, 891 { external, "100Mbps", NGE_LOOP_EXTERNAL_100 }, 892 { external, "10Mbps", NGE_LOOP_EXTERNAL_10 }, 893 { internal, "PHY", NGE_LOOP_INTERNAL_PHY }, 894 }; 895 896 enum ioc_reply 897 nge_loop_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp) 898 { 899 int cmd; 900 uint32_t *lbmp; 901 lb_info_sz_t *lbsp; 902 lb_property_t *lbpp; 903 904 /* 905 * Validate format of ioctl 906 */ 907 if (mp->b_cont == NULL) 908 return (IOC_INVAL); 909 910 cmd = iocp->ioc_cmd; 911 912 switch (cmd) { 913 default: 914 return (IOC_INVAL); 915 916 case LB_GET_INFO_SIZE: 917 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 918 return (IOC_INVAL); 919 lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr; 920 *lbsp = sizeof (loopmodes); 921 return (IOC_REPLY); 922 923 case LB_GET_INFO: 924 if (iocp->ioc_count != sizeof (loopmodes)) 925 return (IOC_INVAL); 926 lbpp = (lb_property_t *)mp->b_cont->b_rptr; 927 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 928 return (IOC_REPLY); 929 930 case LB_GET_MODE: 931 if (iocp->ioc_count != sizeof (uint32_t)) 932 return (IOC_INVAL); 933 lbmp = (uint32_t *)mp->b_cont->b_rptr; 934 *lbmp = ngep->param_loop_mode; 935 return (IOC_REPLY); 936 937 case LB_SET_MODE: 938 if (iocp->ioc_count != sizeof (uint32_t)) 939 return (IOC_INVAL); 940 lbmp = (uint32_t *)mp->b_cont->b_rptr; 941 return (nge_set_loop_mode(ngep, *lbmp)); 942 } 943 } 944 945 #undef NGE_DBG 946 #define NGE_DBG NGE_DBG_NEMO 947 948 949 static void 950 nge_check_desc_prop(nge_t *ngep) 951 { 952 if (ngep->desc_mode != DESC_HOT && ngep->desc_mode != DESC_OFFLOAD) 953 ngep->desc_mode = DESC_HOT; 954 955 if (ngep->desc_mode == DESC_OFFLOAD) { 956 957 ngep->desc_attr = nge_sum_desc; 958 959 } else if (ngep->desc_mode == DESC_HOT) { 960 961 ngep->desc_attr = nge_hot_desc; 962 } 963 } 964 965 /* 966 * nge_get_props -- get the parameters to tune the driver 967 */ 968 static void 969 nge_get_props(nge_t *ngep) 970 { 971 chip_info_t *infop; 972 dev_info_t *devinfo; 973 nge_dev_spec_param_t *dev_param_p; 974 975 devinfo = ngep->devinfo; 976 infop = (chip_info_t *)&ngep->chipinfo; 977 dev_param_p = &ngep->dev_spec_param; 978 979 infop->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 980 DDI_PROP_DONTPASS, clsize_propname, 32); 981 982 infop->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 983 DDI_PROP_DONTPASS, latency_propname, 64); 984 ngep->intr_moderation = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 985 DDI_PROP_DONTPASS, intr_moderation, NGE_SET); 986 ngep->rx_datahwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 987 DDI_PROP_DONTPASS, rx_data_hw, 0x20); 988 ngep->rx_prdlwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 989 DDI_PROP_DONTPASS, rx_prd_lw, 0x4); 990 ngep->rx_prdhwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 991 DDI_PROP_DONTPASS, rx_prd_hw, 0xc); 992 993 ngep->sw_intr_intv = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 994 DDI_PROP_DONTPASS, sw_intr_intv, SWTR_ITC); 995 ngep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 996 DDI_PROP_DONTPASS, debug_propname, NGE_DBG_CHIP); 997 ngep->desc_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 998 DDI_PROP_DONTPASS, nge_desc_mode, dev_param_p->desc_type); 999 ngep->lowmem_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 1000 DDI_PROP_DONTPASS, low_memory_mode, 0); 1001 ngep->mac_addr_reversion = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 1002 DDI_PROP_DONTPASS, mac_addr_reversion, 0); 1003 1004 if (dev_param_p->jumbo) { 1005 ngep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 1006 DDI_PROP_DONTPASS, default_mtu, ETHERMTU); 1007 } else 1008 ngep->default_mtu = ETHERMTU; 1009 1010 if (ngep->default_mtu > ETHERMTU && 1011 ngep->default_mtu <= NGE_MTU_2500) { 1012 ngep->buf_size = NGE_JB2500_BUFSZ; 1013 ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC; 1014 ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC; 1015 ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2; 1016 ngep->nge_split = NGE_SPLIT_256; 1017 } else if (ngep->default_mtu > NGE_MTU_2500 && 1018 ngep->default_mtu <= NGE_MTU_4500) { 1019 ngep->buf_size = NGE_JB4500_BUFSZ; 1020 ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC; 1021 ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC; 1022 ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2; 1023 ngep->nge_split = NGE_SPLIT_256; 1024 } else if (ngep->default_mtu > NGE_MTU_4500 && 1025 ngep->default_mtu <= NGE_MAX_MTU) { 1026 ngep->buf_size = NGE_JB9000_BUFSZ; 1027 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1028 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1029 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1030 ngep->nge_split = NGE_SPLIT_256; 1031 } else if (ngep->default_mtu > NGE_MAX_MTU) { 1032 ngep->default_mtu = NGE_MAX_MTU; 1033 ngep->buf_size = NGE_JB9000_BUFSZ; 1034 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1035 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1036 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1037 ngep->nge_split = NGE_SPLIT_256; 1038 } else if (ngep->lowmem_mode != 0) { 1039 ngep->default_mtu = ETHERMTU; 1040 ngep->buf_size = NGE_STD_BUFSZ; 1041 ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC; 1042 ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC; 1043 ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2; 1044 ngep->nge_split = NGE_SPLIT_32; 1045 } else { 1046 ngep->default_mtu = ETHERMTU; 1047 ngep->buf_size = NGE_STD_BUFSZ; 1048 ngep->tx_desc = dev_param_p->tx_desc_num; 1049 ngep->rx_desc = dev_param_p->rx_desc_num; 1050 ngep->rx_buf = dev_param_p->rx_desc_num * 2; 1051 ngep->nge_split = dev_param_p->nge_split; 1052 } 1053 1054 nge_check_desc_prop(ngep); 1055 } 1056 1057 1058 static int 1059 nge_reset_dev(nge_t *ngep) 1060 { 1061 int err; 1062 nge_mul_addr1 maddr1; 1063 nge_sw_statistics_t *sw_stp; 1064 sw_stp = &ngep->statistics.sw_statistics; 1065 send_ring_t *srp = ngep->send; 1066 1067 ASSERT(mutex_owned(ngep->genlock)); 1068 mutex_enter(srp->tc_lock); 1069 mutex_enter(srp->tx_lock); 1070 1071 nge_tx_recycle_all(ngep); 1072 err = nge_reinit_ring(ngep); 1073 if (err == DDI_FAILURE) { 1074 mutex_exit(srp->tx_lock); 1075 mutex_exit(srp->tc_lock); 1076 return (err); 1077 } 1078 err = nge_chip_reset(ngep); 1079 /* 1080 * Clear the Multicast mac address table 1081 */ 1082 nge_reg_put32(ngep, NGE_MUL_ADDR0, 0); 1083 maddr1.addr_val = nge_reg_get32(ngep, NGE_MUL_ADDR1); 1084 maddr1.addr_bits.addr = 0; 1085 nge_reg_put32(ngep, NGE_MUL_ADDR1, maddr1.addr_val); 1086 1087 mutex_exit(srp->tx_lock); 1088 mutex_exit(srp->tc_lock); 1089 if (err == DDI_FAILURE) 1090 return (err); 1091 ngep->watchdog = 0; 1092 ngep->resched_needed = B_FALSE; 1093 ngep->promisc = B_FALSE; 1094 ngep->param_loop_mode = NGE_LOOP_NONE; 1095 ngep->factotum_flag = 0; 1096 ngep->resched_needed = 0; 1097 ngep->nge_mac_state = NGE_MAC_RESET; 1098 ngep->max_sdu = ngep->default_mtu + ETHER_HEAD_LEN + ETHERFCSL; 1099 ngep->max_sdu += VTAG_SIZE; 1100 ngep->rx_def = 0x16; 1101 1102 /* Clear the software statistics */ 1103 sw_stp->recv_count = 0; 1104 sw_stp->xmit_count = 0; 1105 sw_stp->rbytes = 0; 1106 sw_stp->obytes = 0; 1107 1108 return (DDI_SUCCESS); 1109 } 1110 1111 static void 1112 nge_m_stop(void *arg) 1113 { 1114 nge_t *ngep = arg; /* private device info */ 1115 1116 NGE_TRACE(("nge_m_stop($%p)", arg)); 1117 1118 /* 1119 * Just stop processing, then record new MAC state 1120 */ 1121 mutex_enter(ngep->genlock); 1122 /* If suspended, the adapter is already stopped, just return. */ 1123 if (ngep->suspended) { 1124 ASSERT(ngep->nge_mac_state == NGE_MAC_STOPPED); 1125 mutex_exit(ngep->genlock); 1126 return; 1127 } 1128 rw_enter(ngep->rwlock, RW_WRITER); 1129 1130 (void) nge_chip_stop(ngep, B_FALSE); 1131 ngep->nge_mac_state = NGE_MAC_STOPPED; 1132 1133 /* Recycle all the TX BD */ 1134 nge_tx_recycle_all(ngep); 1135 nge_fini_rings(ngep); 1136 nge_free_bufs(ngep); 1137 1138 NGE_DEBUG(("nge_m_stop($%p) done", arg)); 1139 1140 rw_exit(ngep->rwlock); 1141 mutex_exit(ngep->genlock); 1142 } 1143 1144 static int 1145 nge_m_start(void *arg) 1146 { 1147 int err; 1148 nge_t *ngep = arg; 1149 1150 NGE_TRACE(("nge_m_start($%p)", arg)); 1151 1152 /* 1153 * Start processing and record new MAC state 1154 */ 1155 mutex_enter(ngep->genlock); 1156 /* 1157 * If suspended, don't start, as the resume processing 1158 * will recall this function with the suspended flag off. 1159 */ 1160 if (ngep->suspended) { 1161 mutex_exit(ngep->genlock); 1162 return (EIO); 1163 } 1164 rw_enter(ngep->rwlock, RW_WRITER); 1165 err = nge_alloc_bufs(ngep); 1166 if (err != DDI_SUCCESS) { 1167 nge_problem(ngep, "nge_m_start: DMA buffer allocation failed"); 1168 goto finish; 1169 } 1170 err = nge_init_rings(ngep); 1171 if (err != DDI_SUCCESS) { 1172 nge_free_bufs(ngep); 1173 nge_problem(ngep, "nge_init_rings() failed,err=%x", err); 1174 goto finish; 1175 } 1176 err = nge_restart(ngep); 1177 1178 NGE_DEBUG(("nge_m_start($%p) done", arg)); 1179 finish: 1180 rw_exit(ngep->rwlock); 1181 mutex_exit(ngep->genlock); 1182 1183 return (err == DDI_SUCCESS ? 0 : EIO); 1184 } 1185 1186 static int 1187 nge_m_unicst(void *arg, const uint8_t *macaddr) 1188 { 1189 nge_t *ngep = arg; 1190 1191 NGE_TRACE(("nge_m_unicst($%p)", arg)); 1192 /* 1193 * Remember the new current address in the driver state 1194 * Sync the chip's idea of the address too ... 1195 */ 1196 mutex_enter(ngep->genlock); 1197 1198 ethaddr_copy(macaddr, ngep->cur_uni_addr.addr); 1199 ngep->cur_uni_addr.set = 1; 1200 1201 /* 1202 * If we are suspended, we want to quit now, and not update 1203 * the chip. Doing so might put it in a bad state, but the 1204 * resume will get the unicast address installed. 1205 */ 1206 if (ngep->suspended) { 1207 mutex_exit(ngep->genlock); 1208 return (DDI_SUCCESS); 1209 } 1210 nge_chip_sync(ngep); 1211 1212 NGE_DEBUG(("nge_m_unicst($%p) done", arg)); 1213 mutex_exit(ngep->genlock); 1214 1215 return (0); 1216 } 1217 1218 static int 1219 nge_m_promisc(void *arg, boolean_t on) 1220 { 1221 nge_t *ngep = arg; 1222 1223 NGE_TRACE(("nge_m_promisc($%p)", arg)); 1224 1225 /* 1226 * Store specified mode and pass to chip layer to update h/w 1227 */ 1228 mutex_enter(ngep->genlock); 1229 /* 1230 * If suspended, there is no need to do anything, even 1231 * recording the promiscuious mode is not neccessary, as 1232 * it won't be properly set on resume. Just return failing. 1233 */ 1234 if (ngep->suspended) { 1235 mutex_exit(ngep->genlock); 1236 return (DDI_FAILURE); 1237 } 1238 if (ngep->promisc == on) { 1239 mutex_exit(ngep->genlock); 1240 NGE_DEBUG(("nge_m_promisc($%p) done", arg)); 1241 return (0); 1242 } 1243 ngep->promisc = on; 1244 ngep->record_promisc = ngep->promisc; 1245 nge_chip_sync(ngep); 1246 NGE_DEBUG(("nge_m_promisc($%p) done", arg)); 1247 mutex_exit(ngep->genlock); 1248 1249 return (0); 1250 } 1251 1252 static void nge_mulparam(nge_t *ngep) 1253 { 1254 uint8_t number; 1255 ether_addr_t pand; 1256 ether_addr_t por; 1257 mul_item *plist; 1258 1259 for (number = 0; number < ETHERADDRL; number++) { 1260 pand[number] = 0x00; 1261 por[number] = 0x00; 1262 } 1263 for (plist = ngep->pcur_mulist; plist != NULL; plist = plist->next) { 1264 for (number = 0; number < ETHERADDRL; number++) { 1265 pand[number] &= plist->mul_addr[number]; 1266 por[number] |= plist->mul_addr[number]; 1267 } 1268 } 1269 for (number = 0; number < ETHERADDRL; number++) { 1270 ngep->cur_mul_addr.addr[number] 1271 = pand[number] & por[number]; 1272 ngep->cur_mul_mask.addr[number] 1273 = pand [number] | (~por[number]); 1274 } 1275 } 1276 static int 1277 nge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1278 { 1279 boolean_t update; 1280 boolean_t b_eq; 1281 nge_t *ngep = arg; 1282 mul_item *plist; 1283 mul_item *plist_prev; 1284 mul_item *pitem; 1285 1286 NGE_TRACE(("nge_m_multicst($%p, %s, %s)", arg, 1287 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1288 1289 update = B_FALSE; 1290 plist = plist_prev = NULL; 1291 mutex_enter(ngep->genlock); 1292 if (add) { 1293 if (ngep->pcur_mulist != NULL) { 1294 for (plist = ngep->pcur_mulist; plist != NULL; 1295 plist = plist->next) { 1296 b_eq = ether_eq(plist->mul_addr, mca); 1297 if (b_eq) { 1298 plist->ref_cnt++; 1299 break; 1300 } 1301 plist_prev = plist; 1302 } 1303 } 1304 1305 if (plist == NULL) { 1306 pitem = kmem_zalloc(sizeof (mul_item), KM_SLEEP); 1307 ether_copy(mca, pitem->mul_addr); 1308 pitem ->ref_cnt++; 1309 pitem ->next = NULL; 1310 if (plist_prev == NULL) 1311 ngep->pcur_mulist = pitem; 1312 else 1313 plist_prev->next = pitem; 1314 update = B_TRUE; 1315 } 1316 } else { 1317 if (ngep->pcur_mulist != NULL) { 1318 for (plist = ngep->pcur_mulist; plist != NULL; 1319 plist = plist->next) { 1320 b_eq = ether_eq(plist->mul_addr, mca); 1321 if (b_eq) { 1322 update = B_TRUE; 1323 break; 1324 } 1325 plist_prev = plist; 1326 } 1327 1328 if (update) { 1329 if ((plist_prev == NULL) && 1330 (plist->next == NULL)) 1331 ngep->pcur_mulist = NULL; 1332 else if ((plist_prev == NULL) && 1333 (plist->next != NULL)) 1334 ngep->pcur_mulist = plist->next; 1335 else 1336 plist_prev->next = plist->next; 1337 kmem_free(plist, sizeof (mul_item)); 1338 } 1339 } 1340 } 1341 1342 if (update && !ngep->suspended) { 1343 nge_mulparam(ngep); 1344 nge_chip_sync(ngep); 1345 } 1346 NGE_DEBUG(("nge_m_multicst($%p) done", arg)); 1347 mutex_exit(ngep->genlock); 1348 1349 return (0); 1350 } 1351 1352 static void 1353 nge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1354 { 1355 int err; 1356 int cmd; 1357 nge_t *ngep = arg; 1358 struct iocblk *iocp; 1359 enum ioc_reply status; 1360 boolean_t need_privilege; 1361 1362 /* 1363 * If suspended, we might actually be able to do some of 1364 * these ioctls, but it is harder to make sure they occur 1365 * without actually putting the hardware in an undesireable 1366 * state. So just NAK it. 1367 */ 1368 mutex_enter(ngep->genlock); 1369 if (ngep->suspended) { 1370 miocnak(wq, mp, 0, EINVAL); 1371 mutex_exit(ngep->genlock); 1372 return; 1373 } 1374 mutex_exit(ngep->genlock); 1375 1376 /* 1377 * Validate the command before bothering with the mutex ... 1378 */ 1379 iocp = (struct iocblk *)mp->b_rptr; 1380 iocp->ioc_error = 0; 1381 need_privilege = B_TRUE; 1382 cmd = iocp->ioc_cmd; 1383 1384 NGE_DEBUG(("nge_m_ioctl: cmd 0x%x", cmd)); 1385 switch (cmd) { 1386 default: 1387 NGE_LDB(NGE_DBG_BADIOC, 1388 ("nge_m_ioctl: unknown cmd 0x%x", cmd)); 1389 1390 miocnak(wq, mp, 0, EINVAL); 1391 return; 1392 1393 case NGE_MII_READ: 1394 case NGE_MII_WRITE: 1395 case NGE_SEE_READ: 1396 case NGE_SEE_WRITE: 1397 case NGE_DIAG: 1398 case NGE_PEEK: 1399 case NGE_POKE: 1400 case NGE_PHY_RESET: 1401 case NGE_SOFT_RESET: 1402 case NGE_HARD_RESET: 1403 break; 1404 1405 case LB_GET_INFO_SIZE: 1406 case LB_GET_INFO: 1407 case LB_GET_MODE: 1408 need_privilege = B_FALSE; 1409 break; 1410 case LB_SET_MODE: 1411 break; 1412 } 1413 1414 if (need_privilege) { 1415 /* 1416 * Check for specific net_config privilege. 1417 */ 1418 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1419 if (err != 0) { 1420 NGE_DEBUG(("nge_m_ioctl: rejected cmd 0x%x, err %d", 1421 cmd, err)); 1422 miocnak(wq, mp, 0, err); 1423 return; 1424 } 1425 } 1426 1427 mutex_enter(ngep->genlock); 1428 1429 switch (cmd) { 1430 default: 1431 _NOTE(NOTREACHED) 1432 status = IOC_INVAL; 1433 break; 1434 1435 case NGE_MII_READ: 1436 case NGE_MII_WRITE: 1437 case NGE_SEE_READ: 1438 case NGE_SEE_WRITE: 1439 case NGE_DIAG: 1440 case NGE_PEEK: 1441 case NGE_POKE: 1442 case NGE_PHY_RESET: 1443 case NGE_SOFT_RESET: 1444 case NGE_HARD_RESET: 1445 status = nge_chip_ioctl(ngep, mp, iocp); 1446 break; 1447 1448 case LB_GET_INFO_SIZE: 1449 case LB_GET_INFO: 1450 case LB_GET_MODE: 1451 case LB_SET_MODE: 1452 status = nge_loop_ioctl(ngep, mp, iocp); 1453 break; 1454 1455 } 1456 1457 /* 1458 * Do we need to reprogram the PHY and/or the MAC? 1459 * Do it now, while we still have the mutex. 1460 * 1461 * Note: update the PHY first, 'cos it controls the 1462 * speed/duplex parameters that the MAC code uses. 1463 */ 1464 1465 NGE_DEBUG(("nge_m_ioctl: cmd 0x%x status %d", cmd, status)); 1466 1467 switch (status) { 1468 case IOC_RESTART_REPLY: 1469 case IOC_RESTART_ACK: 1470 (*ngep->physops->phys_update)(ngep); 1471 nge_chip_sync(ngep); 1472 break; 1473 1474 default: 1475 break; 1476 } 1477 1478 mutex_exit(ngep->genlock); 1479 1480 /* 1481 * Finally, decide how to reply 1482 */ 1483 switch (status) { 1484 1485 default: 1486 case IOC_INVAL: 1487 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1488 EINVAL : iocp->ioc_error); 1489 break; 1490 1491 case IOC_DONE: 1492 break; 1493 1494 case IOC_RESTART_ACK: 1495 case IOC_ACK: 1496 miocack(wq, mp, 0, 0); 1497 break; 1498 1499 case IOC_RESTART_REPLY: 1500 case IOC_REPLY: 1501 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1502 M_IOCACK : M_IOCNAK; 1503 qreply(wq, mp); 1504 break; 1505 } 1506 } 1507 1508 static boolean_t 1509 nge_param_locked(mac_prop_id_t pr_num) 1510 { 1511 /* 1512 * All adv_* parameters are locked (read-only) while 1513 * the device is in any sort of loopback mode ... 1514 */ 1515 switch (pr_num) { 1516 case MAC_PROP_ADV_1000FDX_CAP: 1517 case MAC_PROP_EN_1000FDX_CAP: 1518 case MAC_PROP_ADV_1000HDX_CAP: 1519 case MAC_PROP_EN_1000HDX_CAP: 1520 case MAC_PROP_ADV_100FDX_CAP: 1521 case MAC_PROP_EN_100FDX_CAP: 1522 case MAC_PROP_ADV_100HDX_CAP: 1523 case MAC_PROP_EN_100HDX_CAP: 1524 case MAC_PROP_ADV_10FDX_CAP: 1525 case MAC_PROP_EN_10FDX_CAP: 1526 case MAC_PROP_ADV_10HDX_CAP: 1527 case MAC_PROP_EN_10HDX_CAP: 1528 case MAC_PROP_AUTONEG: 1529 case MAC_PROP_FLOWCTRL: 1530 return (B_TRUE); 1531 } 1532 return (B_FALSE); 1533 } 1534 1535 /* 1536 * callback functions for set/get of properties 1537 */ 1538 static int 1539 nge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1540 uint_t pr_valsize, const void *pr_val) 1541 { 1542 nge_t *ngep = barg; 1543 int err = 0; 1544 uint32_t cur_mtu, new_mtu; 1545 link_flowctrl_t fl; 1546 1547 mutex_enter(ngep->genlock); 1548 if (ngep->param_loop_mode != NGE_LOOP_NONE && 1549 nge_param_locked(pr_num)) { 1550 /* 1551 * All adv_* parameters are locked (read-only) 1552 * while the device is in any sort of loopback mode. 1553 */ 1554 mutex_exit(ngep->genlock); 1555 return (EBUSY); 1556 } 1557 switch (pr_num) { 1558 case MAC_PROP_EN_1000FDX_CAP: 1559 ngep->param_en_1000fdx = *(uint8_t *)pr_val; 1560 ngep->param_adv_1000fdx = *(uint8_t *)pr_val; 1561 goto reprogram; 1562 case MAC_PROP_EN_100FDX_CAP: 1563 ngep->param_en_100fdx = *(uint8_t *)pr_val; 1564 ngep->param_adv_100fdx = *(uint8_t *)pr_val; 1565 goto reprogram; 1566 case MAC_PROP_EN_100HDX_CAP: 1567 ngep->param_en_100hdx = *(uint8_t *)pr_val; 1568 ngep->param_adv_100hdx = *(uint8_t *)pr_val; 1569 goto reprogram; 1570 case MAC_PROP_EN_10FDX_CAP: 1571 ngep->param_en_10fdx = *(uint8_t *)pr_val; 1572 ngep->param_adv_10fdx = *(uint8_t *)pr_val; 1573 goto reprogram; 1574 case MAC_PROP_EN_10HDX_CAP: 1575 ngep->param_en_10hdx = *(uint8_t *)pr_val; 1576 ngep->param_adv_10hdx = *(uint8_t *)pr_val; 1577 reprogram: 1578 (*ngep->physops->phys_update)(ngep); 1579 nge_chip_sync(ngep); 1580 break; 1581 1582 case MAC_PROP_ADV_1000FDX_CAP: 1583 case MAC_PROP_ADV_1000HDX_CAP: 1584 case MAC_PROP_ADV_100FDX_CAP: 1585 case MAC_PROP_ADV_100HDX_CAP: 1586 case MAC_PROP_ADV_10FDX_CAP: 1587 case MAC_PROP_ADV_10HDX_CAP: 1588 case MAC_PROP_STATUS: 1589 case MAC_PROP_SPEED: 1590 case MAC_PROP_DUPLEX: 1591 case MAC_PROP_EN_1000HDX_CAP: 1592 err = ENOTSUP; /* read-only prop. Can't set this */ 1593 break; 1594 case MAC_PROP_AUTONEG: 1595 ngep->param_adv_autoneg = *(uint8_t *)pr_val; 1596 (*ngep->physops->phys_update)(ngep); 1597 nge_chip_sync(ngep); 1598 break; 1599 case MAC_PROP_MTU: 1600 cur_mtu = ngep->default_mtu; 1601 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 1602 if (new_mtu == cur_mtu) { 1603 err = 0; 1604 break; 1605 } 1606 if (new_mtu < ETHERMTU || 1607 new_mtu > NGE_MAX_MTU) { 1608 err = EINVAL; 1609 break; 1610 } 1611 if ((new_mtu > ETHERMTU) && 1612 (!ngep->dev_spec_param.jumbo)) { 1613 err = EINVAL; 1614 break; 1615 } 1616 if (ngep->nge_mac_state == NGE_MAC_STARTED) { 1617 err = EBUSY; 1618 break; 1619 } 1620 1621 ngep->default_mtu = new_mtu; 1622 if (ngep->default_mtu > ETHERMTU && 1623 ngep->default_mtu <= NGE_MTU_2500) { 1624 ngep->buf_size = NGE_JB2500_BUFSZ; 1625 ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC; 1626 ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC; 1627 ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2; 1628 ngep->nge_split = NGE_SPLIT_256; 1629 } else if (ngep->default_mtu > NGE_MTU_2500 && 1630 ngep->default_mtu <= NGE_MTU_4500) { 1631 ngep->buf_size = NGE_JB4500_BUFSZ; 1632 ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC; 1633 ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC; 1634 ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2; 1635 ngep->nge_split = NGE_SPLIT_256; 1636 } else if (ngep->default_mtu > NGE_MTU_4500 && 1637 ngep->default_mtu <= NGE_MAX_MTU) { 1638 ngep->buf_size = NGE_JB9000_BUFSZ; 1639 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1640 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1641 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1642 ngep->nge_split = NGE_SPLIT_256; 1643 } else if (ngep->default_mtu > NGE_MAX_MTU) { 1644 ngep->default_mtu = NGE_MAX_MTU; 1645 ngep->buf_size = NGE_JB9000_BUFSZ; 1646 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1647 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1648 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1649 ngep->nge_split = NGE_SPLIT_256; 1650 } else if (ngep->lowmem_mode != 0) { 1651 ngep->default_mtu = ETHERMTU; 1652 ngep->buf_size = NGE_STD_BUFSZ; 1653 ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC; 1654 ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC; 1655 ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2; 1656 ngep->nge_split = NGE_SPLIT_32; 1657 } else { 1658 ngep->default_mtu = ETHERMTU; 1659 ngep->buf_size = NGE_STD_BUFSZ; 1660 ngep->tx_desc = 1661 ngep->dev_spec_param.tx_desc_num; 1662 ngep->rx_desc = 1663 ngep->dev_spec_param.rx_desc_num; 1664 ngep->rx_buf = 1665 ngep->dev_spec_param.rx_desc_num * 2; 1666 ngep->nge_split = 1667 ngep->dev_spec_param.nge_split; 1668 } 1669 1670 err = mac_maxsdu_update(ngep->mh, ngep->default_mtu); 1671 1672 break; 1673 case MAC_PROP_FLOWCTRL: 1674 bcopy(pr_val, &fl, sizeof (fl)); 1675 switch (fl) { 1676 default: 1677 err = ENOTSUP; 1678 break; 1679 case LINK_FLOWCTRL_NONE: 1680 ngep->param_adv_pause = 0; 1681 ngep->param_adv_asym_pause = 0; 1682 1683 ngep->param_link_rx_pause = B_FALSE; 1684 ngep->param_link_tx_pause = B_FALSE; 1685 break; 1686 case LINK_FLOWCTRL_RX: 1687 if (!((ngep->param_lp_pause == 0) && 1688 (ngep->param_lp_asym_pause == 1))) { 1689 err = EINVAL; 1690 break; 1691 } 1692 ngep->param_adv_pause = 1; 1693 ngep->param_adv_asym_pause = 1; 1694 1695 ngep->param_link_rx_pause = B_TRUE; 1696 ngep->param_link_tx_pause = B_FALSE; 1697 break; 1698 case LINK_FLOWCTRL_TX: 1699 if (!((ngep->param_lp_pause == 1) && 1700 (ngep->param_lp_asym_pause == 1))) { 1701 err = EINVAL; 1702 break; 1703 } 1704 ngep->param_adv_pause = 0; 1705 ngep->param_adv_asym_pause = 1; 1706 1707 ngep->param_link_rx_pause = B_FALSE; 1708 ngep->param_link_tx_pause = B_TRUE; 1709 break; 1710 case LINK_FLOWCTRL_BI: 1711 if (ngep->param_lp_pause != 1) { 1712 err = EINVAL; 1713 break; 1714 } 1715 ngep->param_adv_pause = 1; 1716 1717 ngep->param_link_rx_pause = B_TRUE; 1718 ngep->param_link_tx_pause = B_TRUE; 1719 break; 1720 } 1721 1722 if (err == 0) { 1723 (*ngep->physops->phys_update)(ngep); 1724 nge_chip_sync(ngep); 1725 } 1726 1727 break; 1728 case MAC_PROP_PRIVATE: 1729 err = nge_set_priv_prop(ngep, pr_name, pr_valsize, 1730 pr_val); 1731 if (err == 0) { 1732 (*ngep->physops->phys_update)(ngep); 1733 nge_chip_sync(ngep); 1734 } 1735 break; 1736 default: 1737 err = ENOTSUP; 1738 } 1739 mutex_exit(ngep->genlock); 1740 return (err); 1741 } 1742 1743 static int 1744 nge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1745 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 1746 { 1747 nge_t *ngep = barg; 1748 int err = 0; 1749 link_flowctrl_t fl; 1750 uint64_t speed; 1751 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 1752 1753 if (pr_valsize == 0) 1754 return (EINVAL); 1755 1756 *perm = MAC_PROP_PERM_RW; 1757 1758 bzero(pr_val, pr_valsize); 1759 1760 switch (pr_num) { 1761 case MAC_PROP_DUPLEX: 1762 *perm = MAC_PROP_PERM_READ; 1763 if (pr_valsize >= sizeof (link_duplex_t)) { 1764 bcopy(&ngep->param_link_duplex, pr_val, 1765 sizeof (link_duplex_t)); 1766 } else 1767 err = EINVAL; 1768 break; 1769 case MAC_PROP_SPEED: 1770 *perm = MAC_PROP_PERM_READ; 1771 if (pr_valsize >= sizeof (uint64_t)) { 1772 speed = ngep->param_link_speed * 1000000ull; 1773 bcopy(&speed, pr_val, sizeof (speed)); 1774 } else 1775 err = EINVAL; 1776 break; 1777 case MAC_PROP_AUTONEG: 1778 if (is_default) { 1779 *(uint8_t *)pr_val = 1; 1780 } else { 1781 *(uint8_t *)pr_val = ngep->param_adv_autoneg; 1782 } 1783 break; 1784 case MAC_PROP_FLOWCTRL: 1785 if (pr_valsize >= sizeof (link_flowctrl_t)) { 1786 if (pr_flags & MAC_PROP_DEFAULT) { 1787 fl = LINK_FLOWCTRL_BI; 1788 bcopy(&fl, pr_val, sizeof (fl)); 1789 break; 1790 } 1791 if (ngep->param_link_rx_pause && 1792 !ngep->param_link_tx_pause) 1793 fl = LINK_FLOWCTRL_RX; 1794 1795 if (!ngep->param_link_rx_pause && 1796 !ngep->param_link_tx_pause) 1797 fl = LINK_FLOWCTRL_NONE; 1798 1799 if (!ngep->param_link_rx_pause && 1800 ngep->param_link_tx_pause) 1801 fl = LINK_FLOWCTRL_TX; 1802 1803 if (ngep->param_link_rx_pause && 1804 ngep->param_link_tx_pause) 1805 fl = LINK_FLOWCTRL_BI; 1806 bcopy(&fl, pr_val, sizeof (fl)); 1807 } else 1808 err = EINVAL; 1809 break; 1810 case MAC_PROP_ADV_1000FDX_CAP: 1811 *perm = MAC_PROP_PERM_READ; 1812 if (is_default) { 1813 *(uint8_t *)pr_val = 1; 1814 } else { 1815 *(uint8_t *)pr_val = ngep->param_adv_1000fdx; 1816 } 1817 break; 1818 case MAC_PROP_EN_1000FDX_CAP: 1819 if (is_default) { 1820 *(uint8_t *)pr_val = 1; 1821 } else { 1822 *(uint8_t *)pr_val = ngep->param_en_1000fdx; 1823 } 1824 break; 1825 case MAC_PROP_ADV_1000HDX_CAP: 1826 *perm = MAC_PROP_PERM_READ; 1827 if (is_default) { 1828 *(uint8_t *)pr_val = 0; 1829 } else { 1830 *(uint8_t *)pr_val = ngep->param_adv_1000hdx; 1831 } 1832 break; 1833 case MAC_PROP_EN_1000HDX_CAP: 1834 *perm = MAC_PROP_PERM_READ; 1835 if (is_default) { 1836 *(uint8_t *)pr_val = 0; 1837 } else { 1838 *(uint8_t *)pr_val = ngep->param_en_1000hdx; 1839 } 1840 break; 1841 case MAC_PROP_ADV_100FDX_CAP: 1842 *perm = MAC_PROP_PERM_READ; 1843 if (is_default) { 1844 *(uint8_t *)pr_val = 1; 1845 } else { 1846 *(uint8_t *)pr_val = ngep->param_adv_100fdx; 1847 } 1848 break; 1849 case MAC_PROP_EN_100FDX_CAP: 1850 if (is_default) { 1851 *(uint8_t *)pr_val = 1; 1852 } else { 1853 *(uint8_t *)pr_val = ngep->param_en_100fdx; 1854 } 1855 break; 1856 case MAC_PROP_ADV_100HDX_CAP: 1857 *perm = MAC_PROP_PERM_READ; 1858 if (is_default) { 1859 *(uint8_t *)pr_val = 1; 1860 } else { 1861 *(uint8_t *)pr_val = ngep->param_adv_100hdx; 1862 } 1863 break; 1864 case MAC_PROP_EN_100HDX_CAP: 1865 if (is_default) { 1866 *(uint8_t *)pr_val = 1; 1867 } else { 1868 *(uint8_t *)pr_val = ngep->param_en_100hdx; 1869 } 1870 break; 1871 case MAC_PROP_ADV_10FDX_CAP: 1872 *perm = MAC_PROP_PERM_READ; 1873 if (is_default) { 1874 *(uint8_t *)pr_val = 1; 1875 } else { 1876 *(uint8_t *)pr_val = ngep->param_adv_10fdx; 1877 } 1878 break; 1879 case MAC_PROP_EN_10FDX_CAP: 1880 if (is_default) { 1881 *(uint8_t *)pr_val = 1; 1882 } else { 1883 *(uint8_t *)pr_val = ngep->param_en_10fdx; 1884 } 1885 break; 1886 case MAC_PROP_ADV_10HDX_CAP: 1887 *perm = MAC_PROP_PERM_READ; 1888 if (is_default) { 1889 *(uint8_t *)pr_val = 1; 1890 } else { 1891 *(uint8_t *)pr_val = ngep->param_adv_10hdx; 1892 } 1893 break; 1894 case MAC_PROP_EN_10HDX_CAP: 1895 if (is_default) { 1896 *(uint8_t *)pr_val = 1; 1897 } else { 1898 *(uint8_t *)pr_val = ngep->param_en_10hdx; 1899 } 1900 break; 1901 case MAC_PROP_ADV_100T4_CAP: 1902 case MAC_PROP_EN_100T4_CAP: 1903 *perm = MAC_PROP_PERM_READ; 1904 *(uint8_t *)pr_val = 0; 1905 break; 1906 case MAC_PROP_PRIVATE: 1907 err = nge_get_priv_prop(ngep, pr_name, pr_flags, 1908 pr_valsize, pr_val); 1909 break; 1910 case MAC_PROP_MTU: { 1911 mac_propval_range_t range; 1912 1913 if (!(pr_flags & MAC_PROP_POSSIBLE)) 1914 return (ENOTSUP); 1915 if (pr_valsize < sizeof (mac_propval_range_t)) 1916 return (EINVAL); 1917 range.mpr_count = 1; 1918 range.mpr_type = MAC_PROPVAL_UINT32; 1919 range.range_uint32[0].mpur_min = 1920 range.range_uint32[0].mpur_max = ETHERMTU; 1921 if (ngep->dev_spec_param.jumbo) 1922 range.range_uint32[0].mpur_max = NGE_MAX_MTU; 1923 bcopy(&range, pr_val, sizeof (range)); 1924 break; 1925 } 1926 default: 1927 err = ENOTSUP; 1928 } 1929 return (err); 1930 } 1931 1932 /* ARGSUSED */ 1933 static int 1934 nge_set_priv_prop(nge_t *ngep, const char *pr_name, uint_t pr_valsize, 1935 const void *pr_val) 1936 { 1937 int err = 0; 1938 long result; 1939 1940 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 1941 if (pr_val == NULL) { 1942 err = EINVAL; 1943 return (err); 1944 } 1945 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1946 if (result < 0 || result > NGE_MAX_SDU) { 1947 err = EINVAL; 1948 } else { 1949 ngep->param_txbcopy_threshold = (uint32_t)result; 1950 goto reprogram; 1951 } 1952 return (err); 1953 } 1954 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 1955 if (pr_val == NULL) { 1956 err = EINVAL; 1957 return (err); 1958 } 1959 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1960 if (result < 0 || result > NGE_MAX_SDU) { 1961 err = EINVAL; 1962 } else { 1963 ngep->param_rxbcopy_threshold = (uint32_t)result; 1964 goto reprogram; 1965 } 1966 return (err); 1967 } 1968 if (strcmp(pr_name, "_recv_max_packet") == 0) { 1969 if (pr_val == NULL) { 1970 err = EINVAL; 1971 return (err); 1972 } 1973 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1974 if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) { 1975 err = EINVAL; 1976 } else { 1977 ngep->param_recv_max_packet = (uint32_t)result; 1978 goto reprogram; 1979 } 1980 return (err); 1981 } 1982 if (strcmp(pr_name, "_poll_quiet_time") == 0) { 1983 if (pr_val == NULL) { 1984 err = EINVAL; 1985 return (err); 1986 } 1987 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1988 if (result < 0 || result > 10000) { 1989 err = EINVAL; 1990 } else { 1991 ngep->param_poll_quiet_time = (uint32_t)result; 1992 goto reprogram; 1993 } 1994 return (err); 1995 } 1996 if (strcmp(pr_name, "_poll_busy_time") == 0) { 1997 if (pr_val == NULL) { 1998 err = EINVAL; 1999 return (err); 2000 } 2001 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 2002 if (result < 0 || result > 10000) { 2003 err = EINVAL; 2004 } else { 2005 ngep->param_poll_busy_time = (uint32_t)result; 2006 goto reprogram; 2007 } 2008 return (err); 2009 } 2010 if (strcmp(pr_name, "_rx_intr_hwater") == 0) { 2011 if (pr_val == NULL) { 2012 err = EINVAL; 2013 return (err); 2014 } 2015 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 2016 if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) { 2017 err = EINVAL; 2018 } else { 2019 ngep->param_rx_intr_hwater = (uint32_t)result; 2020 goto reprogram; 2021 } 2022 return (err); 2023 } 2024 if (strcmp(pr_name, "_rx_intr_lwater") == 0) { 2025 if (pr_val == NULL) { 2026 err = EINVAL; 2027 return (err); 2028 } 2029 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 2030 if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) { 2031 err = EINVAL; 2032 } else { 2033 ngep->param_rx_intr_lwater = (uint32_t)result; 2034 goto reprogram; 2035 } 2036 return (err); 2037 } 2038 if (strcmp(pr_name, "_tx_n_intr") == 0) { 2039 if (pr_val == NULL) { 2040 err = EINVAL; 2041 return (err); 2042 } 2043 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 2044 if (result < 1 || result > 10000) { 2045 err = EINVAL; 2046 } else { 2047 ngep->param_tx_n_intr = (uint32_t)result; 2048 goto reprogram; 2049 } 2050 return (err); 2051 } 2052 2053 err = ENOTSUP; 2054 return (err); 2055 2056 reprogram: 2057 if (err == 0) { 2058 (*ngep->physops->phys_update)(ngep); 2059 nge_chip_sync(ngep); 2060 } 2061 2062 return (err); 2063 } 2064 2065 static int 2066 nge_get_priv_prop(nge_t *ngep, const char *pr_name, uint_t pr_flags, 2067 uint_t pr_valsize, void *pr_val) 2068 { 2069 int err = ENOTSUP; 2070 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 2071 int value; 2072 2073 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 2074 value = (is_default ? 1 : ngep->param_adv_pause); 2075 err = 0; 2076 goto done; 2077 } 2078 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 2079 value = (is_default ? 1 : ngep->param_adv_asym_pause); 2080 err = 0; 2081 goto done; 2082 } 2083 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 2084 value = (is_default ? NGE_TX_COPY_SIZE : 2085 ngep->param_txbcopy_threshold); 2086 err = 0; 2087 goto done; 2088 } 2089 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 2090 value = (is_default ? NGE_RX_COPY_SIZE : 2091 ngep->param_rxbcopy_threshold); 2092 err = 0; 2093 goto done; 2094 } 2095 if (strcmp(pr_name, "_recv_max_packet") == 0) { 2096 value = (is_default ? 128 : ngep->param_recv_max_packet); 2097 err = 0; 2098 goto done; 2099 } 2100 if (strcmp(pr_name, "_poll_quiet_time") == 0) { 2101 value = (is_default ? NGE_POLL_QUIET_TIME : 2102 ngep->param_poll_quiet_time); 2103 err = 0; 2104 goto done; 2105 } 2106 if (strcmp(pr_name, "_poll_busy_time") == 0) { 2107 value = (is_default ? NGE_POLL_BUSY_TIME : 2108 ngep->param_poll_busy_time); 2109 err = 0; 2110 goto done; 2111 } 2112 if (strcmp(pr_name, "_rx_intr_hwater") == 0) { 2113 value = (is_default ? 1 : ngep->param_rx_intr_hwater); 2114 err = 0; 2115 goto done; 2116 } 2117 if (strcmp(pr_name, "_rx_intr_lwater") == 0) { 2118 value = (is_default ? 8 : ngep->param_rx_intr_lwater); 2119 err = 0; 2120 goto done; 2121 } 2122 if (strcmp(pr_name, "_tx_n_intr") == 0) { 2123 value = (is_default ? NGE_TX_N_INTR : 2124 ngep->param_tx_n_intr); 2125 err = 0; 2126 goto done; 2127 } 2128 2129 done: 2130 if (err == 0) { 2131 (void) snprintf(pr_val, pr_valsize, "%d", value); 2132 } 2133 return (err); 2134 } 2135 2136 /* ARGSUSED */ 2137 static boolean_t 2138 nge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 2139 { 2140 nge_t *ngep = arg; 2141 nge_dev_spec_param_t *dev_param_p; 2142 2143 dev_param_p = &ngep->dev_spec_param; 2144 2145 switch (cap) { 2146 case MAC_CAPAB_HCKSUM: { 2147 uint32_t *hcksum_txflags = cap_data; 2148 2149 if (dev_param_p->tx_hw_checksum) { 2150 *hcksum_txflags = dev_param_p->tx_hw_checksum; 2151 } else 2152 return (B_FALSE); 2153 break; 2154 } 2155 default: 2156 return (B_FALSE); 2157 } 2158 return (B_TRUE); 2159 } 2160 2161 #undef NGE_DBG 2162 #define NGE_DBG NGE_DBG_INIT /* debug flag for this code */ 2163 int 2164 nge_restart(nge_t *ngep) 2165 { 2166 int err = 0; 2167 err = nge_reset_dev(ngep); 2168 /* write back the promisc setting */ 2169 ngep->promisc = ngep->record_promisc; 2170 nge_chip_sync(ngep); 2171 if (!err) 2172 err = nge_chip_start(ngep); 2173 2174 if (err) { 2175 ngep->nge_mac_state = NGE_MAC_STOPPED; 2176 return (DDI_FAILURE); 2177 } else { 2178 ngep->nge_mac_state = NGE_MAC_STARTED; 2179 return (DDI_SUCCESS); 2180 } 2181 } 2182 2183 void 2184 nge_wake_factotum(nge_t *ngep) 2185 { 2186 mutex_enter(ngep->softlock); 2187 if (ngep->factotum_flag == 0) { 2188 ngep->factotum_flag = 1; 2189 (void) ddi_intr_trigger_softint(ngep->factotum_hdl, NULL); 2190 } 2191 mutex_exit(ngep->softlock); 2192 } 2193 2194 /* 2195 * High-level cyclic handler 2196 * 2197 * This routine schedules a (low-level) softint callback to the 2198 * factotum. 2199 */ 2200 2201 static void 2202 nge_chip_cyclic(void *arg) 2203 { 2204 nge_t *ngep; 2205 2206 ngep = (nge_t *)arg; 2207 2208 switch (ngep->nge_chip_state) { 2209 default: 2210 return; 2211 2212 case NGE_CHIP_RUNNING: 2213 break; 2214 2215 case NGE_CHIP_FAULT: 2216 case NGE_CHIP_ERROR: 2217 break; 2218 } 2219 2220 nge_wake_factotum(ngep); 2221 } 2222 2223 static void 2224 nge_unattach(nge_t *ngep) 2225 { 2226 send_ring_t *srp; 2227 buff_ring_t *brp; 2228 2229 srp = ngep->send; 2230 brp = ngep->buff; 2231 NGE_TRACE(("nge_unattach($%p)", (void *)ngep)); 2232 2233 /* 2234 * Flag that no more activity may be initiated 2235 */ 2236 ngep->progress &= ~PROGRESS_READY; 2237 ngep->nge_mac_state = NGE_MAC_UNATTACH; 2238 2239 /* 2240 * Quiesce the PHY and MAC (leave it reset but still powered). 2241 * Clean up and free all NGE data structures 2242 */ 2243 if (ngep->periodic_id != NULL) { 2244 ddi_periodic_delete(ngep->periodic_id); 2245 ngep->periodic_id = NULL; 2246 } 2247 2248 if (ngep->progress & PROGRESS_KSTATS) 2249 nge_fini_kstats(ngep); 2250 2251 if (ngep->progress & PROGRESS_HWINT) { 2252 mutex_enter(ngep->genlock); 2253 nge_restore_mac_addr(ngep); 2254 (void) nge_chip_stop(ngep, B_FALSE); 2255 mutex_exit(ngep->genlock); 2256 } 2257 2258 if (ngep->progress & PROGRESS_SWINT) 2259 nge_rem_intrs(ngep); 2260 2261 if (ngep->progress & PROGRESS_FACTOTUM) 2262 (void) ddi_intr_remove_softint(ngep->factotum_hdl); 2263 2264 if (ngep->progress & PROGRESS_RESCHED) 2265 (void) ddi_intr_remove_softint(ngep->resched_hdl); 2266 2267 if (ngep->progress & PROGRESS_INTR) { 2268 mutex_destroy(srp->tx_lock); 2269 mutex_destroy(srp->tc_lock); 2270 mutex_destroy(&srp->dmah_lock); 2271 mutex_destroy(brp->recycle_lock); 2272 2273 mutex_destroy(ngep->genlock); 2274 mutex_destroy(ngep->softlock); 2275 rw_destroy(ngep->rwlock); 2276 } 2277 2278 if (ngep->progress & PROGRESS_REGS) 2279 ddi_regs_map_free(&ngep->io_handle); 2280 2281 if (ngep->progress & PROGRESS_CFG) 2282 pci_config_teardown(&ngep->cfg_handle); 2283 2284 ddi_remove_minor_node(ngep->devinfo, NULL); 2285 2286 kmem_free(ngep, sizeof (*ngep)); 2287 } 2288 2289 static int 2290 nge_resume(dev_info_t *devinfo) 2291 { 2292 nge_t *ngep; 2293 chip_info_t *infop; 2294 int err; 2295 2296 ASSERT(devinfo != NULL); 2297 2298 ngep = ddi_get_driver_private(devinfo); 2299 err = 0; 2300 2301 /* 2302 * If there are state inconsistancies, this is bad. Returning 2303 * DDI_FAILURE here will eventually cause the machine to panic, 2304 * so it is best done here so that there is a possibility of 2305 * debugging the problem. 2306 */ 2307 if (ngep == NULL) 2308 cmn_err(CE_PANIC, 2309 "nge: ngep returned from ddi_get_driver_private was NULL"); 2310 infop = (chip_info_t *)&ngep->chipinfo; 2311 2312 if (ngep->devinfo != devinfo) 2313 cmn_err(CE_PANIC, 2314 "nge: passed devinfo not the same as saved devinfo"); 2315 2316 mutex_enter(ngep->genlock); 2317 rw_enter(ngep->rwlock, RW_WRITER); 2318 2319 /* 2320 * Fetch the config space. Even though we have most of it cached, 2321 * some values *might* change across a suspend/resume. 2322 */ 2323 nge_chip_cfg_init(ngep, infop, B_FALSE); 2324 2325 /* 2326 * Only in one case, this conditional branch can be executed: the port 2327 * hasn't been plumbed. 2328 */ 2329 if (ngep->suspended == B_FALSE) { 2330 rw_exit(ngep->rwlock); 2331 mutex_exit(ngep->genlock); 2332 return (DDI_SUCCESS); 2333 } 2334 2335 nge_tx_recycle_all(ngep); 2336 err = nge_reinit_ring(ngep); 2337 if (!err) { 2338 err = nge_chip_reset(ngep); 2339 if (!err) 2340 err = nge_chip_start(ngep); 2341 } 2342 2343 if (err) { 2344 /* 2345 * We note the failure, but return success, as the 2346 * system is still usable without this controller. 2347 */ 2348 cmn_err(CE_WARN, "nge: resume: failed to restart controller"); 2349 } else { 2350 ngep->nge_mac_state = NGE_MAC_STARTED; 2351 } 2352 ngep->suspended = B_FALSE; 2353 2354 rw_exit(ngep->rwlock); 2355 mutex_exit(ngep->genlock); 2356 2357 return (DDI_SUCCESS); 2358 } 2359 2360 /* 2361 * attach(9E) -- Attach a device to the system 2362 * 2363 * Called once for each board successfully probed. 2364 */ 2365 static int 2366 nge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 2367 { 2368 int err; 2369 int i; 2370 int instance; 2371 caddr_t regs; 2372 nge_t *ngep; 2373 chip_info_t *infop; 2374 mac_register_t *macp; 2375 2376 switch (cmd) { 2377 default: 2378 return (DDI_FAILURE); 2379 2380 case DDI_RESUME: 2381 return (nge_resume(devinfo)); 2382 2383 case DDI_ATTACH: 2384 break; 2385 } 2386 2387 ngep = kmem_zalloc(sizeof (*ngep), KM_SLEEP); 2388 instance = ddi_get_instance(devinfo); 2389 ddi_set_driver_private(devinfo, ngep); 2390 ngep->devinfo = devinfo; 2391 2392 (void) snprintf(ngep->ifname, sizeof (ngep->ifname), "%s%d", 2393 NGE_DRIVER_NAME, instance); 2394 err = pci_config_setup(devinfo, &ngep->cfg_handle); 2395 if (err != DDI_SUCCESS) { 2396 nge_problem(ngep, "nge_attach: pci_config_setup() failed"); 2397 goto attach_fail; 2398 } 2399 /* 2400 * param_txbcopy_threshold and param_rxbcopy_threshold are tx/rx bcopy 2401 * thresholds. Bounds: min 0, max NGE_MAX_SDU 2402 */ 2403 ngep->param_txbcopy_threshold = NGE_TX_COPY_SIZE; 2404 ngep->param_rxbcopy_threshold = NGE_RX_COPY_SIZE; 2405 2406 /* 2407 * param_recv_max_packet is max packet received per interupt. 2408 * Bounds: min 0, max NGE_RECV_SLOTS_DESC_1024 2409 */ 2410 ngep->param_recv_max_packet = 128; 2411 2412 /* 2413 * param_poll_quiet_time and param_poll_busy_time are quiet/busy time 2414 * switch from per packet interrupt to polling interrupt. 2415 * Bounds: min 0, max 10000 2416 */ 2417 ngep->param_poll_quiet_time = NGE_POLL_QUIET_TIME; 2418 ngep->param_poll_busy_time = NGE_POLL_BUSY_TIME; 2419 2420 /* 2421 * param_rx_intr_hwater/param_rx_intr_lwater: ackets received 2422 * to trigger the poll_quiet_time/poll_busy_time counter. 2423 * Bounds: min 0, max NGE_RECV_SLOTS_DESC_1024. 2424 */ 2425 ngep->param_rx_intr_hwater = 1; 2426 ngep->param_rx_intr_lwater = 8; 2427 2428 /* 2429 * param_tx_n_intr: Per N tx packets to do tx recycle in poll mode. 2430 * Bounds: min 1, max 10000. 2431 */ 2432 ngep->param_tx_n_intr = NGE_TX_N_INTR; 2433 2434 infop = (chip_info_t *)&ngep->chipinfo; 2435 nge_chip_cfg_init(ngep, infop, B_FALSE); 2436 nge_init_dev_spec_param(ngep); 2437 nge_get_props(ngep); 2438 ngep->progress |= PROGRESS_CFG; 2439 2440 err = ddi_regs_map_setup(devinfo, NGE_PCI_OPREGS_RNUMBER, 2441 ®s, 0, 0, &nge_reg_accattr, &ngep->io_handle); 2442 if (err != DDI_SUCCESS) { 2443 nge_problem(ngep, "nge_attach: ddi_regs_map_setup() failed"); 2444 goto attach_fail; 2445 } 2446 ngep->io_regs = regs; 2447 ngep->progress |= PROGRESS_REGS; 2448 2449 err = nge_register_intrs_and_init_locks(ngep); 2450 if (err != DDI_SUCCESS) { 2451 nge_problem(ngep, "nge_attach:" 2452 " register intrs and init locks failed"); 2453 goto attach_fail; 2454 } 2455 nge_init_ring_param_lock(ngep); 2456 ngep->progress |= PROGRESS_INTR; 2457 2458 mutex_enter(ngep->genlock); 2459 2460 /* 2461 * Initialise link state variables 2462 * Stop, reset & reinitialise the chip. 2463 * Initialise the (internal) PHY. 2464 */ 2465 nge_phys_init(ngep); 2466 ngep->nge_chip_state = NGE_CHIP_INITIAL; 2467 err = nge_chip_reset(ngep); 2468 if (err != DDI_SUCCESS) { 2469 nge_problem(ngep, "nge_attach: nge_chip_reset() failed"); 2470 mutex_exit(ngep->genlock); 2471 goto attach_fail; 2472 } 2473 nge_chip_sync(ngep); 2474 2475 /* 2476 * Now that mutex locks are initialized, enable interrupts. 2477 */ 2478 if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) { 2479 /* Call ddi_intr_block_enable() for MSI interrupts */ 2480 (void) ddi_intr_block_enable(ngep->htable, 2481 ngep->intr_actual_cnt); 2482 } else { 2483 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 2484 for (i = 0; i < ngep->intr_actual_cnt; i++) { 2485 (void) ddi_intr_enable(ngep->htable[i]); 2486 } 2487 } 2488 2489 ngep->link_state = LINK_STATE_UNKNOWN; 2490 ngep->progress |= PROGRESS_HWINT; 2491 2492 /* 2493 * Register NDD-tweakable parameters 2494 */ 2495 if (nge_nd_init(ngep)) { 2496 nge_problem(ngep, "nge_attach: nge_nd_init() failed"); 2497 mutex_exit(ngep->genlock); 2498 goto attach_fail; 2499 } 2500 ngep->progress |= PROGRESS_NDD; 2501 2502 /* 2503 * Create & initialise named kstats 2504 */ 2505 nge_init_kstats(ngep, instance); 2506 ngep->progress |= PROGRESS_KSTATS; 2507 2508 mutex_exit(ngep->genlock); 2509 2510 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 2511 goto attach_fail; 2512 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 2513 macp->m_driver = ngep; 2514 macp->m_dip = devinfo; 2515 macp->m_src_addr = infop->vendor_addr.addr; 2516 macp->m_callbacks = &nge_m_callbacks; 2517 macp->m_min_sdu = 0; 2518 macp->m_max_sdu = ngep->default_mtu; 2519 macp->m_margin = VTAG_SIZE; 2520 macp->m_priv_props = nge_priv_props; 2521 macp->m_priv_prop_count = NGE_MAX_PRIV_PROPS; 2522 /* 2523 * Finally, we're ready to register ourselves with the mac 2524 * interface; if this succeeds, we're all ready to start() 2525 */ 2526 err = mac_register(macp, &ngep->mh); 2527 mac_free(macp); 2528 if (err != 0) 2529 goto attach_fail; 2530 2531 /* 2532 * Register a periodical handler. 2533 * nge_chip_cyclic() is invoked in kernel context. 2534 */ 2535 ngep->periodic_id = ddi_periodic_add(nge_chip_cyclic, ngep, 2536 NGE_CYCLIC_PERIOD, DDI_IPL_0); 2537 2538 ngep->progress |= PROGRESS_READY; 2539 return (DDI_SUCCESS); 2540 2541 attach_fail: 2542 nge_unattach(ngep); 2543 return (DDI_FAILURE); 2544 } 2545 2546 static int 2547 nge_suspend(nge_t *ngep) 2548 { 2549 mutex_enter(ngep->genlock); 2550 rw_enter(ngep->rwlock, RW_WRITER); 2551 2552 /* if the port hasn't been plumbed, just return */ 2553 if (ngep->nge_mac_state != NGE_MAC_STARTED) { 2554 rw_exit(ngep->rwlock); 2555 mutex_exit(ngep->genlock); 2556 return (DDI_SUCCESS); 2557 } 2558 ngep->suspended = B_TRUE; 2559 (void) nge_chip_stop(ngep, B_FALSE); 2560 ngep->nge_mac_state = NGE_MAC_STOPPED; 2561 2562 rw_exit(ngep->rwlock); 2563 mutex_exit(ngep->genlock); 2564 return (DDI_SUCCESS); 2565 } 2566 2567 /* 2568 * detach(9E) -- Detach a device from the system 2569 */ 2570 static int 2571 nge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 2572 { 2573 int i; 2574 nge_t *ngep; 2575 mul_item *p, *nextp; 2576 buff_ring_t *brp; 2577 2578 NGE_GTRACE(("nge_detach($%p, %d)", (void *)devinfo, cmd)); 2579 2580 ngep = ddi_get_driver_private(devinfo); 2581 brp = ngep->buff; 2582 2583 switch (cmd) { 2584 default: 2585 return (DDI_FAILURE); 2586 2587 case DDI_SUSPEND: 2588 /* 2589 * Stop the NIC 2590 * Note: This driver doesn't currently support WOL, but 2591 * should it in the future, it is important to 2592 * make sure the PHY remains powered so that the 2593 * wakeup packet can actually be recieved. 2594 */ 2595 return (nge_suspend(ngep)); 2596 2597 case DDI_DETACH: 2598 break; 2599 } 2600 2601 /* Try to wait all the buffer post to upper layer be released */ 2602 for (i = 0; i < 1000; i++) { 2603 if (brp->rx_hold == 0) 2604 break; 2605 drv_usecwait(1000); 2606 } 2607 2608 /* If there is any posted buffer, reject to detach */ 2609 if (brp->rx_hold != 0) 2610 return (DDI_FAILURE); 2611 2612 /* 2613 * Unregister from the GLD subsystem. This can fail, in 2614 * particular if there are DLPI style-2 streams still open - 2615 * in which case we just return failure without shutting 2616 * down chip operations. 2617 */ 2618 if (mac_unregister(ngep->mh) != DDI_SUCCESS) 2619 return (DDI_FAILURE); 2620 2621 /* 2622 * Recycle the multicast table. mac_unregister() should be called 2623 * before it to ensure the multicast table can be used even if 2624 * mac_unregister() fails. 2625 */ 2626 for (p = ngep->pcur_mulist; p != NULL; p = nextp) { 2627 nextp = p->next; 2628 kmem_free(p, sizeof (mul_item)); 2629 } 2630 ngep->pcur_mulist = NULL; 2631 2632 /* 2633 * All activity stopped, so we can clean up & exit 2634 */ 2635 nge_unattach(ngep); 2636 return (DDI_SUCCESS); 2637 } 2638 2639 /* 2640 * quiesce(9E) entry point. 2641 * 2642 * This function is called when the system is single-threaded at high 2643 * PIL with preemption disabled. Therefore, this function must not be 2644 * blocked. 2645 * 2646 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 2647 * DDI_FAILURE indicates an error condition and should almost never happen. 2648 */ 2649 static int 2650 nge_quiesce(dev_info_t *devinfo) 2651 { 2652 nge_t *ngep; 2653 2654 ngep = ddi_get_driver_private(devinfo); 2655 2656 if (ngep == NULL) 2657 return (DDI_FAILURE); 2658 2659 /* 2660 * Turn off debug tracing 2661 */ 2662 nge_debug = 0; 2663 ngep->debug = 0; 2664 2665 nge_restore_mac_addr(ngep); 2666 (void) nge_chip_stop(ngep, B_FALSE); 2667 2668 return (DDI_SUCCESS); 2669 } 2670 2671 2672 2673 /* 2674 * ========== Module Loading Data & Entry Points ========== 2675 */ 2676 2677 DDI_DEFINE_STREAM_OPS(nge_dev_ops, nulldev, nulldev, nge_attach, nge_detach, 2678 NULL, NULL, D_MP, NULL, nge_quiesce); 2679 2680 2681 static struct modldrv nge_modldrv = { 2682 &mod_driverops, /* Type of module. This one is a driver */ 2683 nge_ident, /* short description */ 2684 &nge_dev_ops /* driver specific ops */ 2685 }; 2686 2687 static struct modlinkage modlinkage = { 2688 MODREV_1, (void *)&nge_modldrv, NULL 2689 }; 2690 2691 2692 int 2693 _info(struct modinfo *modinfop) 2694 { 2695 return (mod_info(&modlinkage, modinfop)); 2696 } 2697 2698 int 2699 _init(void) 2700 { 2701 int status; 2702 2703 mac_init_ops(&nge_dev_ops, "nge"); 2704 status = mod_install(&modlinkage); 2705 if (status != DDI_SUCCESS) 2706 mac_fini_ops(&nge_dev_ops); 2707 else 2708 mutex_init(nge_log_mutex, NULL, MUTEX_DRIVER, NULL); 2709 2710 return (status); 2711 } 2712 2713 int 2714 _fini(void) 2715 { 2716 int status; 2717 2718 status = mod_remove(&modlinkage); 2719 if (status == DDI_SUCCESS) { 2720 mac_fini_ops(&nge_dev_ops); 2721 mutex_destroy(nge_log_mutex); 2722 } 2723 2724 return (status); 2725 } 2726 2727 /* 2728 * ============ Init MSI/Fixed/SoftInterrupt routines ============== 2729 */ 2730 2731 /* 2732 * Register interrupts and initialize each mutex and condition variables 2733 */ 2734 2735 static int 2736 nge_register_intrs_and_init_locks(nge_t *ngep) 2737 { 2738 int err; 2739 int intr_types; 2740 uint_t soft_prip; 2741 nge_msi_mask msi_mask; 2742 nge_msi_map0_vec map0_vec; 2743 nge_msi_map1_vec map1_vec; 2744 2745 /* 2746 * Add the softint handlers: 2747 * 2748 * Both of these handlers are used to avoid restrictions on the 2749 * context and/or mutexes required for some operations. In 2750 * particular, the hardware interrupt handler and its subfunctions 2751 * can detect a number of conditions that we don't want to handle 2752 * in that context or with that set of mutexes held. So, these 2753 * softints are triggered instead: 2754 * 2755 * the <resched> softint is triggered if if we have previously 2756 * had to refuse to send a packet because of resource shortage 2757 * (we've run out of transmit buffers), but the send completion 2758 * interrupt handler has now detected that more buffers have 2759 * become available. Its only purpose is to call gld_sched() 2760 * to retry the pending transmits (we're not allowed to hold 2761 * driver-defined mutexes across gld_sched()). 2762 * 2763 * the <factotum> is triggered if the h/w interrupt handler 2764 * sees the <link state changed> or <error> bits in the status 2765 * block. It's also triggered periodically to poll the link 2766 * state, just in case we aren't getting link status change 2767 * interrupts ... 2768 */ 2769 err = ddi_intr_add_softint(ngep->devinfo, &ngep->resched_hdl, 2770 DDI_INTR_SOFTPRI_MIN, nge_reschedule, (caddr_t)ngep); 2771 if (err != DDI_SUCCESS) { 2772 nge_problem(ngep, 2773 "nge_attach: add nge_reschedule softintr failed"); 2774 2775 return (DDI_FAILURE); 2776 } 2777 ngep->progress |= PROGRESS_RESCHED; 2778 err = ddi_intr_add_softint(ngep->devinfo, &ngep->factotum_hdl, 2779 DDI_INTR_SOFTPRI_MIN, nge_chip_factotum, (caddr_t)ngep); 2780 if (err != DDI_SUCCESS) { 2781 nge_problem(ngep, 2782 "nge_attach: add nge_chip_factotum softintr failed!"); 2783 2784 return (DDI_FAILURE); 2785 } 2786 if (ddi_intr_get_softint_pri(ngep->factotum_hdl, &soft_prip) 2787 != DDI_SUCCESS) { 2788 nge_problem(ngep, "nge_attach: get softintr priority failed\n"); 2789 2790 return (DDI_FAILURE); 2791 } 2792 ngep->soft_pri = soft_prip; 2793 2794 ngep->progress |= PROGRESS_FACTOTUM; 2795 /* Get supported interrupt types */ 2796 if (ddi_intr_get_supported_types(ngep->devinfo, &intr_types) 2797 != DDI_SUCCESS) { 2798 nge_error(ngep, "ddi_intr_get_supported_types failed\n"); 2799 2800 return (DDI_FAILURE); 2801 } 2802 2803 NGE_DEBUG(("ddi_intr_get_supported_types() returned: %x", 2804 intr_types)); 2805 2806 if ((intr_types & DDI_INTR_TYPE_MSI) && nge_enable_msi) { 2807 2808 /* MSI Configurations for mcp55 chipset */ 2809 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 2810 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 2811 2812 2813 /* Enable the 8 vectors */ 2814 msi_mask.msi_mask_val = 2815 nge_reg_get32(ngep, NGE_MSI_MASK); 2816 msi_mask.msi_msk_bits.vec0 = NGE_SET; 2817 msi_mask.msi_msk_bits.vec1 = NGE_SET; 2818 msi_mask.msi_msk_bits.vec2 = NGE_SET; 2819 msi_mask.msi_msk_bits.vec3 = NGE_SET; 2820 msi_mask.msi_msk_bits.vec4 = NGE_SET; 2821 msi_mask.msi_msk_bits.vec5 = NGE_SET; 2822 msi_mask.msi_msk_bits.vec6 = NGE_SET; 2823 msi_mask.msi_msk_bits.vec7 = NGE_SET; 2824 nge_reg_put32(ngep, NGE_MSI_MASK, 2825 msi_mask.msi_mask_val); 2826 2827 /* 2828 * Remapping the MSI MAP0 and MAP1. MCP55 2829 * is default mapping all the interrupt to 0 vector. 2830 * Software needs to remapping this. 2831 * This mapping is same as CK804. 2832 */ 2833 map0_vec.msi_map0_val = 2834 nge_reg_get32(ngep, NGE_MSI_MAP0); 2835 map1_vec.msi_map1_val = 2836 nge_reg_get32(ngep, NGE_MSI_MAP1); 2837 map0_vec.vecs_bits.reint_vec = 0; 2838 map0_vec.vecs_bits.rcint_vec = 0; 2839 map0_vec.vecs_bits.miss_vec = 3; 2840 map0_vec.vecs_bits.teint_vec = 5; 2841 map0_vec.vecs_bits.tcint_vec = 5; 2842 map0_vec.vecs_bits.stint_vec = 2; 2843 map0_vec.vecs_bits.mint_vec = 6; 2844 map0_vec.vecs_bits.rfint_vec = 0; 2845 map1_vec.vecs_bits.tfint_vec = 5; 2846 map1_vec.vecs_bits.feint_vec = 6; 2847 map1_vec.vecs_bits.resv8_11 = 3; 2848 map1_vec.vecs_bits.resv12_15 = 1; 2849 map1_vec.vecs_bits.resv16_19 = 0; 2850 map1_vec.vecs_bits.resv20_23 = 7; 2851 map1_vec.vecs_bits.resv24_31 = 0xff; 2852 nge_reg_put32(ngep, NGE_MSI_MAP0, 2853 map0_vec.msi_map0_val); 2854 nge_reg_put32(ngep, NGE_MSI_MAP1, 2855 map1_vec.msi_map1_val); 2856 } 2857 if (nge_add_intrs(ngep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 2858 NGE_DEBUG(("MSI registration failed, " 2859 "trying FIXED interrupt type\n")); 2860 } else { 2861 nge_log(ngep, "Using MSI interrupt type\n"); 2862 2863 ngep->intr_type = DDI_INTR_TYPE_MSI; 2864 ngep->progress |= PROGRESS_SWINT; 2865 } 2866 } 2867 2868 if (!(ngep->progress & PROGRESS_SWINT) && 2869 (intr_types & DDI_INTR_TYPE_FIXED)) { 2870 if (nge_add_intrs(ngep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 2871 nge_error(ngep, "FIXED interrupt " 2872 "registration failed\n"); 2873 2874 return (DDI_FAILURE); 2875 } 2876 2877 nge_log(ngep, "Using FIXED interrupt type\n"); 2878 2879 ngep->intr_type = DDI_INTR_TYPE_FIXED; 2880 ngep->progress |= PROGRESS_SWINT; 2881 } 2882 2883 2884 if (!(ngep->progress & PROGRESS_SWINT)) { 2885 nge_error(ngep, "No interrupts registered\n"); 2886 2887 return (DDI_FAILURE); 2888 } 2889 mutex_init(ngep->genlock, NULL, MUTEX_DRIVER, 2890 DDI_INTR_PRI(ngep->intr_pri)); 2891 mutex_init(ngep->softlock, NULL, MUTEX_DRIVER, 2892 DDI_INTR_PRI(ngep->soft_pri)); 2893 rw_init(ngep->rwlock, NULL, RW_DRIVER, 2894 DDI_INTR_PRI(ngep->intr_pri)); 2895 2896 return (DDI_SUCCESS); 2897 } 2898 2899 /* 2900 * nge_add_intrs: 2901 * 2902 * Register FIXED or MSI interrupts. 2903 */ 2904 static int 2905 nge_add_intrs(nge_t *ngep, int intr_type) 2906 { 2907 dev_info_t *dip = ngep->devinfo; 2908 int avail, actual, intr_size, count = 0; 2909 int i, flag, ret; 2910 2911 NGE_DEBUG(("nge_add_intrs: interrupt type 0x%x\n", intr_type)); 2912 2913 /* Get number of interrupts */ 2914 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 2915 if ((ret != DDI_SUCCESS) || (count == 0)) { 2916 nge_error(ngep, "ddi_intr_get_nintrs() failure, ret: %d, " 2917 "count: %d", ret, count); 2918 2919 return (DDI_FAILURE); 2920 } 2921 2922 /* Get number of available interrupts */ 2923 ret = ddi_intr_get_navail(dip, intr_type, &avail); 2924 if ((ret != DDI_SUCCESS) || (avail == 0)) { 2925 nge_error(ngep, "ddi_intr_get_navail() failure, " 2926 "ret: %d, avail: %d\n", ret, avail); 2927 2928 return (DDI_FAILURE); 2929 } 2930 2931 if (avail < count) { 2932 NGE_DEBUG(("nitrs() returned %d, navail returned %d\n", 2933 count, avail)); 2934 } 2935 flag = DDI_INTR_ALLOC_NORMAL; 2936 2937 /* Allocate an array of interrupt handles */ 2938 intr_size = count * sizeof (ddi_intr_handle_t); 2939 ngep->htable = kmem_alloc(intr_size, KM_SLEEP); 2940 2941 /* Call ddi_intr_alloc() */ 2942 ret = ddi_intr_alloc(dip, ngep->htable, intr_type, 0, 2943 count, &actual, flag); 2944 2945 if ((ret != DDI_SUCCESS) || (actual == 0)) { 2946 nge_error(ngep, "ddi_intr_alloc() failed %d\n", ret); 2947 2948 kmem_free(ngep->htable, intr_size); 2949 return (DDI_FAILURE); 2950 } 2951 2952 if (actual < count) { 2953 NGE_DEBUG(("Requested: %d, Received: %d\n", 2954 count, actual)); 2955 } 2956 2957 ngep->intr_actual_cnt = actual; 2958 ngep->intr_req_cnt = count; 2959 2960 /* 2961 * Get priority for first msi, assume remaining are all the same 2962 */ 2963 if ((ret = ddi_intr_get_pri(ngep->htable[0], &ngep->intr_pri)) != 2964 DDI_SUCCESS) { 2965 nge_error(ngep, "ddi_intr_get_pri() failed %d\n", ret); 2966 2967 /* Free already allocated intr */ 2968 for (i = 0; i < actual; i++) { 2969 (void) ddi_intr_free(ngep->htable[i]); 2970 } 2971 2972 kmem_free(ngep->htable, intr_size); 2973 2974 return (DDI_FAILURE); 2975 } 2976 /* Test for high level mutex */ 2977 if (ngep->intr_pri >= ddi_intr_get_hilevel_pri()) { 2978 nge_error(ngep, "nge_add_intrs:" 2979 "Hi level interrupt not supported"); 2980 2981 for (i = 0; i < actual; i++) 2982 (void) ddi_intr_free(ngep->htable[i]); 2983 2984 kmem_free(ngep->htable, intr_size); 2985 2986 return (DDI_FAILURE); 2987 } 2988 2989 2990 /* Call ddi_intr_add_handler() */ 2991 for (i = 0; i < actual; i++) { 2992 if ((ret = ddi_intr_add_handler(ngep->htable[i], nge_chip_intr, 2993 (caddr_t)ngep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 2994 nge_error(ngep, "ddi_intr_add_handler() " 2995 "failed %d\n", ret); 2996 2997 /* Free already allocated intr */ 2998 for (i = 0; i < actual; i++) { 2999 (void) ddi_intr_free(ngep->htable[i]); 3000 } 3001 3002 kmem_free(ngep->htable, intr_size); 3003 3004 return (DDI_FAILURE); 3005 } 3006 } 3007 3008 if ((ret = ddi_intr_get_cap(ngep->htable[0], &ngep->intr_cap)) 3009 != DDI_SUCCESS) { 3010 nge_error(ngep, "ddi_intr_get_cap() failed %d\n", ret); 3011 3012 for (i = 0; i < actual; i++) { 3013 (void) ddi_intr_remove_handler(ngep->htable[i]); 3014 (void) ddi_intr_free(ngep->htable[i]); 3015 } 3016 3017 kmem_free(ngep->htable, intr_size); 3018 3019 return (DDI_FAILURE); 3020 } 3021 3022 return (DDI_SUCCESS); 3023 } 3024 3025 /* 3026 * nge_rem_intrs: 3027 * 3028 * Unregister FIXED or MSI interrupts 3029 */ 3030 static void 3031 nge_rem_intrs(nge_t *ngep) 3032 { 3033 int i; 3034 3035 NGE_DEBUG(("nge_rem_intrs\n")); 3036 3037 /* Disable all interrupts */ 3038 if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3039 /* Call ddi_intr_block_disable() */ 3040 (void) ddi_intr_block_disable(ngep->htable, 3041 ngep->intr_actual_cnt); 3042 } else { 3043 for (i = 0; i < ngep->intr_actual_cnt; i++) { 3044 (void) ddi_intr_disable(ngep->htable[i]); 3045 } 3046 } 3047 3048 /* Call ddi_intr_remove_handler() */ 3049 for (i = 0; i < ngep->intr_actual_cnt; i++) { 3050 (void) ddi_intr_remove_handler(ngep->htable[i]); 3051 (void) ddi_intr_free(ngep->htable[i]); 3052 } 3053 3054 kmem_free(ngep->htable, 3055 ngep->intr_req_cnt * sizeof (ddi_intr_handle_t)); 3056 } 3057