1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 28 #include "nge.h" 29 30 /* 31 * Describes the chip's DMA engine 32 */ 33 34 static ddi_dma_attr_t hot_dma_attr = { 35 DMA_ATTR_V0, /* dma_attr version */ 36 0x0000000000000000ull, /* dma_attr_addr_lo */ 37 0x000000FFFFFFFFFFull, /* dma_attr_addr_hi */ 38 0x000000007FFFFFFFull, /* dma_attr_count_max */ 39 0x0000000000000010ull, /* dma_attr_align */ 40 0x00000FFF, /* dma_attr_burstsizes */ 41 0x00000001, /* dma_attr_minxfer */ 42 0x000000000000FFFFull, /* dma_attr_maxxfer */ 43 0x000000FFFFFFFFFFull, /* dma_attr_seg */ 44 1, /* dma_attr_sgllen */ 45 0x00000001, /* dma_attr_granular */ 46 0 47 }; 48 49 static ddi_dma_attr_t hot_tx_dma_attr = { 50 DMA_ATTR_V0, /* dma_attr version */ 51 0x0000000000000000ull, /* dma_attr_addr_lo */ 52 0x000000FFFFFFFFFFull, /* dma_attr_addr_hi */ 53 0x0000000000003FFFull, /* dma_attr_count_max */ 54 0x0000000000000010ull, /* dma_attr_align */ 55 0x00000FFF, /* dma_attr_burstsizes */ 56 0x00000001, /* dma_attr_minxfer */ 57 0x0000000000003FFFull, /* dma_attr_maxxfer */ 58 0x000000FFFFFFFFFFull, /* dma_attr_seg */ 59 NGE_MAX_COOKIES, /* dma_attr_sgllen */ 60 1, /* dma_attr_granular */ 61 0 62 }; 63 64 static ddi_dma_attr_t sum_dma_attr = { 65 DMA_ATTR_V0, /* dma_attr version */ 66 0x0000000000000000ull, /* dma_attr_addr_lo */ 67 0x00000000FFFFFFFFull, /* dma_attr_addr_hi */ 68 0x000000007FFFFFFFull, /* dma_attr_count_max */ 69 0x0000000000000010ull, /* dma_attr_align */ 70 0x00000FFF, /* dma_attr_burstsizes */ 71 0x00000001, /* dma_attr_minxfer */ 72 0x000000000000FFFFull, /* dma_attr_maxxfer */ 73 0x00000000FFFFFFFFull, /* dma_attr_seg */ 74 1, /* dma_attr_sgllen */ 75 0x00000001, /* dma_attr_granular */ 76 0 77 }; 78 79 static ddi_dma_attr_t sum_tx_dma_attr = { 80 DMA_ATTR_V0, /* dma_attr version */ 81 0x0000000000000000ull, /* dma_attr_addr_lo */ 82 0x00000000FFFFFFFFull, /* dma_attr_addr_hi */ 83 0x0000000000003FFFull, /* dma_attr_count_max */ 84 0x0000000000000010ull, /* dma_attr_align */ 85 0x00000FFF, /* dma_attr_burstsizes */ 86 0x00000001, /* dma_attr_minxfer */ 87 0x0000000000003FFFull, /* dma_attr_maxxfer */ 88 0x00000000FFFFFFFFull, /* dma_attr_seg */ 89 NGE_MAX_COOKIES, /* dma_attr_sgllen */ 90 1, /* dma_attr_granular */ 91 0 92 }; 93 94 /* 95 * DMA access attributes for data. 96 */ 97 ddi_device_acc_attr_t nge_data_accattr = { 98 DDI_DEVICE_ATTR_V0, 99 DDI_STRUCTURE_LE_ACC, 100 DDI_STRICTORDER_ACC, 101 DDI_DEFAULT_ACC 102 }; 103 104 /* 105 * DMA access attributes for descriptors. 106 */ 107 static ddi_device_acc_attr_t nge_desc_accattr = { 108 DDI_DEVICE_ATTR_V0, 109 DDI_STRUCTURE_LE_ACC, 110 DDI_STRICTORDER_ACC, 111 DDI_DEFAULT_ACC 112 }; 113 114 /* 115 * PIO access attributes for registers 116 */ 117 static ddi_device_acc_attr_t nge_reg_accattr = { 118 DDI_DEVICE_ATTR_V0, 119 DDI_STRUCTURE_LE_ACC, 120 DDI_STRICTORDER_ACC, 121 DDI_DEFAULT_ACC 122 }; 123 124 /* 125 * NIC DESC MODE 2 126 */ 127 128 static const nge_desc_attr_t nge_sum_desc = { 129 130 sizeof (sum_rx_bd), 131 sizeof (sum_tx_bd), 132 &sum_dma_attr, 133 &sum_tx_dma_attr, 134 nge_sum_rxd_fill, 135 nge_sum_rxd_check, 136 nge_sum_txd_fill, 137 nge_sum_txd_check, 138 }; 139 140 /* 141 * NIC DESC MODE 3 142 */ 143 144 static const nge_desc_attr_t nge_hot_desc = { 145 146 sizeof (hot_rx_bd), 147 sizeof (hot_tx_bd), 148 &hot_dma_attr, 149 &hot_tx_dma_attr, 150 nge_hot_rxd_fill, 151 nge_hot_rxd_check, 152 nge_hot_txd_fill, 153 nge_hot_txd_check, 154 }; 155 156 static char nge_ident[] = "nVidia 1Gb Ethernet"; 157 static char clsize_propname[] = "cache-line-size"; 158 static char latency_propname[] = "latency-timer"; 159 static char debug_propname[] = "nge-debug-flags"; 160 static char intr_moderation[] = "intr-moderation"; 161 static char rx_data_hw[] = "rx-data-hw"; 162 static char rx_prd_lw[] = "rx-prd-lw"; 163 static char rx_prd_hw[] = "rx-prd-hw"; 164 static char sw_intr_intv[] = "sw-intr-intvl"; 165 static char nge_desc_mode[] = "desc-mode"; 166 static char default_mtu[] = "default_mtu"; 167 static char low_memory_mode[] = "minimal-memory-usage"; 168 extern kmutex_t nge_log_mutex[1]; 169 170 static int nge_m_start(void *); 171 static void nge_m_stop(void *); 172 static int nge_m_promisc(void *, boolean_t); 173 static int nge_m_multicst(void *, boolean_t, const uint8_t *); 174 static int nge_m_unicst(void *, const uint8_t *); 175 static void nge_m_ioctl(void *, queue_t *, mblk_t *); 176 static boolean_t nge_m_getcapab(void *, mac_capab_t, void *); 177 static int nge_m_setprop(void *, const char *, mac_prop_id_t, 178 uint_t, const void *); 179 static int nge_m_getprop(void *, const char *, mac_prop_id_t, 180 uint_t, void *); 181 static void nge_m_propinfo(void *, const char *, mac_prop_id_t, 182 mac_prop_info_handle_t); 183 static int nge_set_priv_prop(nge_t *, const char *, uint_t, 184 const void *); 185 static int nge_get_priv_prop(nge_t *, const char *, uint_t, 186 void *); 187 188 #define NGE_M_CALLBACK_FLAGS\ 189 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | \ 190 MC_PROPINFO) 191 192 static mac_callbacks_t nge_m_callbacks = { 193 NGE_M_CALLBACK_FLAGS, 194 nge_m_stat, 195 nge_m_start, 196 nge_m_stop, 197 nge_m_promisc, 198 nge_m_multicst, 199 nge_m_unicst, 200 nge_m_tx, 201 NULL, 202 nge_m_ioctl, 203 nge_m_getcapab, 204 NULL, 205 NULL, 206 nge_m_setprop, 207 nge_m_getprop, 208 nge_m_propinfo 209 }; 210 211 char *nge_priv_props[] = { 212 "_tx_bcopy_threshold", 213 "_rx_bcopy_threshold", 214 "_recv_max_packet", 215 "_poll_quiet_time", 216 "_poll_busy_time", 217 "_rx_intr_hwater", 218 "_rx_intr_lwater", 219 NULL 220 }; 221 222 static int nge_add_intrs(nge_t *, int); 223 static void nge_rem_intrs(nge_t *); 224 static int nge_register_intrs_and_init_locks(nge_t *); 225 226 /* 227 * NGE MSI tunable: 228 */ 229 boolean_t nge_enable_msi = B_FALSE; 230 231 static enum ioc_reply 232 nge_set_loop_mode(nge_t *ngep, uint32_t mode) 233 { 234 /* 235 * If the mode isn't being changed, there's nothing to do ... 236 */ 237 if (mode == ngep->param_loop_mode) 238 return (IOC_ACK); 239 240 /* 241 * Validate the requested mode and prepare a suitable message 242 * to explain the link down/up cycle that the change will 243 * probably induce ... 244 */ 245 switch (mode) { 246 default: 247 return (IOC_INVAL); 248 249 case NGE_LOOP_NONE: 250 case NGE_LOOP_EXTERNAL_100: 251 case NGE_LOOP_EXTERNAL_10: 252 case NGE_LOOP_INTERNAL_PHY: 253 break; 254 } 255 256 /* 257 * All OK; tell the caller to reprogram 258 * the PHY and/or MAC for the new mode ... 259 */ 260 ngep->param_loop_mode = mode; 261 return (IOC_RESTART_ACK); 262 } 263 264 #undef NGE_DBG 265 #define NGE_DBG NGE_DBG_INIT 266 267 /* 268 * Utility routine to carve a slice off a chunk of allocated memory, 269 * updating the chunk descriptor accordingly. The size of the slice 270 * is given by the product of the <qty> and <size> parameters. 271 */ 272 void 273 nge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 274 uint32_t qty, uint32_t size) 275 { 276 size_t totsize; 277 278 totsize = qty*size; 279 ASSERT(size > 0); 280 ASSERT(totsize <= chunk->alength); 281 282 *slice = *chunk; 283 slice->nslots = qty; 284 slice->size = size; 285 slice->alength = totsize; 286 287 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 288 chunk->alength -= totsize; 289 chunk->offset += totsize; 290 chunk->cookie.dmac_laddress += totsize; 291 chunk->cookie.dmac_size -= totsize; 292 } 293 294 /* 295 * Allocate an area of memory and a DMA handle for accessing it 296 */ 297 int 298 nge_alloc_dma_mem(nge_t *ngep, size_t memsize, ddi_device_acc_attr_t *attr_p, 299 uint_t dma_flags, dma_area_t *dma_p) 300 { 301 int err; 302 caddr_t va; 303 304 NGE_TRACE(("nge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 305 (void *)ngep, memsize, attr_p, dma_flags, dma_p)); 306 /* 307 * Allocate handle 308 */ 309 err = ddi_dma_alloc_handle(ngep->devinfo, ngep->desc_attr.dma_attr, 310 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 311 if (err != DDI_SUCCESS) 312 goto fail; 313 314 /* 315 * Allocate memory 316 */ 317 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 318 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), 319 DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, &dma_p->acc_hdl); 320 if (err != DDI_SUCCESS) 321 goto fail; 322 323 /* 324 * Bind the two together 325 */ 326 dma_p->mem_va = va; 327 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 328 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 329 &dma_p->cookie, &dma_p->ncookies); 330 331 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 332 goto fail; 333 334 dma_p->nslots = ~0U; 335 dma_p->size = ~0U; 336 dma_p->offset = 0; 337 338 return (DDI_SUCCESS); 339 340 fail: 341 nge_free_dma_mem(dma_p); 342 NGE_DEBUG(("nge_alloc_dma_mem: fail to alloc dma memory!")); 343 344 return (DDI_FAILURE); 345 } 346 347 /* 348 * Free one allocated area of DMAable memory 349 */ 350 void 351 nge_free_dma_mem(dma_area_t *dma_p) 352 { 353 if (dma_p->dma_hdl != NULL) { 354 if (dma_p->ncookies) { 355 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 356 dma_p->ncookies = 0; 357 } 358 } 359 if (dma_p->acc_hdl != NULL) { 360 ddi_dma_mem_free(&dma_p->acc_hdl); 361 dma_p->acc_hdl = NULL; 362 } 363 if (dma_p->dma_hdl != NULL) { 364 ddi_dma_free_handle(&dma_p->dma_hdl); 365 dma_p->dma_hdl = NULL; 366 } 367 } 368 369 #define ALLOC_TX_BUF 0x1 370 #define ALLOC_TX_DESC 0x2 371 #define ALLOC_RX_DESC 0x4 372 373 int 374 nge_alloc_bufs(nge_t *ngep) 375 { 376 int err; 377 int split; 378 int progress; 379 size_t txbuffsize; 380 size_t rxdescsize; 381 size_t txdescsize; 382 383 txbuffsize = ngep->tx_desc * ngep->buf_size; 384 rxdescsize = ngep->rx_desc; 385 txdescsize = ngep->tx_desc; 386 rxdescsize *= ngep->desc_attr.rxd_size; 387 txdescsize *= ngep->desc_attr.txd_size; 388 progress = 0; 389 390 NGE_TRACE(("nge_alloc_bufs($%p)", (void *)ngep)); 391 /* 392 * Allocate memory & handles for TX buffers 393 */ 394 ASSERT((txbuffsize % ngep->nge_split) == 0); 395 for (split = 0; split < ngep->nge_split; ++split) { 396 err = nge_alloc_dma_mem(ngep, txbuffsize/ngep->nge_split, 397 &nge_data_accattr, DDI_DMA_WRITE | NGE_DMA_MODE, 398 &ngep->send->buf[split]); 399 if (err != DDI_SUCCESS) 400 goto fail; 401 } 402 403 progress |= ALLOC_TX_BUF; 404 405 /* 406 * Allocate memory & handles for receive return rings and 407 * buffer (producer) descriptor rings 408 */ 409 err = nge_alloc_dma_mem(ngep, rxdescsize, &nge_desc_accattr, 410 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->recv->desc); 411 if (err != DDI_SUCCESS) 412 goto fail; 413 progress |= ALLOC_RX_DESC; 414 415 /* 416 * Allocate memory & handles for TX descriptor rings, 417 */ 418 err = nge_alloc_dma_mem(ngep, txdescsize, &nge_desc_accattr, 419 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->send->desc); 420 if (err != DDI_SUCCESS) 421 goto fail; 422 return (DDI_SUCCESS); 423 424 fail: 425 if (progress & ALLOC_RX_DESC) 426 nge_free_dma_mem(&ngep->recv->desc); 427 if (progress & ALLOC_TX_BUF) { 428 for (split = 0; split < ngep->nge_split; ++split) 429 nge_free_dma_mem(&ngep->send->buf[split]); 430 } 431 432 return (DDI_FAILURE); 433 } 434 435 /* 436 * This routine frees the transmit and receive buffers and descriptors. 437 * Make sure the chip is stopped before calling it! 438 */ 439 void 440 nge_free_bufs(nge_t *ngep) 441 { 442 int split; 443 444 NGE_TRACE(("nge_free_bufs($%p)", (void *)ngep)); 445 446 nge_free_dma_mem(&ngep->recv->desc); 447 nge_free_dma_mem(&ngep->send->desc); 448 449 for (split = 0; split < ngep->nge_split; ++split) 450 nge_free_dma_mem(&ngep->send->buf[split]); 451 } 452 453 /* 454 * Clean up initialisation done above before the memory is freed 455 */ 456 static void 457 nge_fini_send_ring(nge_t *ngep) 458 { 459 uint32_t slot; 460 size_t dmah_num; 461 send_ring_t *srp; 462 sw_tx_sbd_t *ssbdp; 463 464 srp = ngep->send; 465 ssbdp = srp->sw_sbds; 466 467 NGE_TRACE(("nge_fini_send_ring($%p)", (void *)ngep)); 468 469 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]); 470 471 for (slot = 0; slot < dmah_num; ++slot) { 472 if (srp->dmahndl[slot].hndl) { 473 (void) ddi_dma_unbind_handle(srp->dmahndl[slot].hndl); 474 ddi_dma_free_handle(&srp->dmahndl[slot].hndl); 475 srp->dmahndl[slot].hndl = NULL; 476 srp->dmahndl[slot].next = NULL; 477 } 478 } 479 480 srp->dmah_free.head = NULL; 481 srp->dmah_free.tail = NULL; 482 483 kmem_free(ssbdp, srp->desc.nslots*sizeof (*ssbdp)); 484 485 } 486 487 /* 488 * Initialise the specified Send Ring, using the information in the 489 * <dma_area> descriptors that it contains to set up all the other 490 * fields. This routine should be called only once for each ring. 491 */ 492 static int 493 nge_init_send_ring(nge_t *ngep) 494 { 495 size_t dmah_num; 496 uint32_t nslots; 497 uint32_t err; 498 uint32_t slot; 499 uint32_t split; 500 send_ring_t *srp; 501 sw_tx_sbd_t *ssbdp; 502 dma_area_t desc; 503 dma_area_t pbuf; 504 505 srp = ngep->send; 506 srp->desc.nslots = ngep->tx_desc; 507 nslots = srp->desc.nslots; 508 509 NGE_TRACE(("nge_init_send_ring($%p)", (void *)ngep)); 510 /* 511 * Other one-off initialisation of per-ring data 512 */ 513 srp->ngep = ngep; 514 515 /* 516 * Allocate the array of s/w Send Buffer Descriptors 517 */ 518 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 519 srp->sw_sbds = ssbdp; 520 521 /* 522 * Now initialise each array element once and for all 523 */ 524 desc = srp->desc; 525 for (split = 0; split < ngep->nge_split; ++split) { 526 pbuf = srp->buf[split]; 527 for (slot = 0; slot < nslots/ngep->nge_split; ++ssbdp, ++slot) { 528 nge_slice_chunk(&ssbdp->desc, &desc, 1, 529 ngep->desc_attr.txd_size); 530 nge_slice_chunk(&ssbdp->pbuf, &pbuf, 1, 531 ngep->buf_size); 532 } 533 ASSERT(pbuf.alength == 0); 534 } 535 ASSERT(desc.alength == 0); 536 537 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]); 538 539 /* preallocate dma handles for tx buffer */ 540 for (slot = 0; slot < dmah_num; ++slot) { 541 542 err = ddi_dma_alloc_handle(ngep->devinfo, 543 ngep->desc_attr.tx_dma_attr, DDI_DMA_DONTWAIT, 544 NULL, &srp->dmahndl[slot].hndl); 545 546 if (err != DDI_SUCCESS) { 547 nge_fini_send_ring(ngep); 548 nge_error(ngep, 549 "nge_init_send_ring: alloc dma handle fails"); 550 return (DDI_FAILURE); 551 } 552 srp->dmahndl[slot].next = srp->dmahndl + slot + 1; 553 } 554 555 srp->dmah_free.head = srp->dmahndl; 556 srp->dmah_free.tail = srp->dmahndl + dmah_num - 1; 557 srp->dmah_free.tail->next = NULL; 558 559 return (DDI_SUCCESS); 560 } 561 562 /* 563 * Intialize the tx recycle pointer and tx sending pointer of tx ring 564 * and set the type of tx's data descriptor by default. 565 */ 566 static void 567 nge_reinit_send_ring(nge_t *ngep) 568 { 569 size_t dmah_num; 570 uint32_t slot; 571 send_ring_t *srp; 572 sw_tx_sbd_t *ssbdp; 573 574 srp = ngep->send; 575 576 /* 577 * Reinitialise control variables ... 578 */ 579 580 srp->tx_hwmark = NGE_DESC_MIN; 581 srp->tx_lwmark = NGE_DESC_MIN; 582 583 srp->tx_next = 0; 584 srp->tx_free = srp->desc.nslots; 585 srp->tc_next = 0; 586 587 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]); 588 589 for (slot = 0; slot - dmah_num != 0; ++slot) 590 srp->dmahndl[slot].next = srp->dmahndl + slot + 1; 591 592 srp->dmah_free.head = srp->dmahndl; 593 srp->dmah_free.tail = srp->dmahndl + dmah_num - 1; 594 srp->dmah_free.tail->next = NULL; 595 596 /* 597 * Zero and sync all the h/w Send Buffer Descriptors 598 */ 599 for (slot = 0; slot < srp->desc.nslots; ++slot) { 600 ssbdp = &srp->sw_sbds[slot]; 601 ssbdp->flags = HOST_OWN; 602 } 603 604 DMA_ZERO(srp->desc); 605 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 606 } 607 608 /* 609 * Initialize the slot number of rx's ring 610 */ 611 static void 612 nge_init_recv_ring(nge_t *ngep) 613 { 614 recv_ring_t *rrp; 615 616 rrp = ngep->recv; 617 rrp->desc.nslots = ngep->rx_desc; 618 rrp->ngep = ngep; 619 } 620 621 /* 622 * Intialize the rx recycle pointer and rx sending pointer of rx ring 623 */ 624 static void 625 nge_reinit_recv_ring(nge_t *ngep) 626 { 627 recv_ring_t *rrp; 628 629 rrp = ngep->recv; 630 631 /* 632 * Reinitialise control variables ... 633 */ 634 rrp->prod_index = 0; 635 /* 636 * Zero and sync all the h/w Send Buffer Descriptors 637 */ 638 DMA_ZERO(rrp->desc); 639 DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORDEV); 640 } 641 642 /* 643 * Clean up initialisation done above before the memory is freed 644 */ 645 static void 646 nge_fini_buff_ring(nge_t *ngep) 647 { 648 uint32_t i; 649 buff_ring_t *brp; 650 dma_area_t *bufp; 651 sw_rx_sbd_t *bsbdp; 652 653 brp = ngep->buff; 654 bsbdp = brp->sw_rbds; 655 656 NGE_DEBUG(("nge_fini_buff_ring($%p)", (void *)ngep)); 657 658 mutex_enter(brp->recycle_lock); 659 brp->buf_sign++; 660 mutex_exit(brp->recycle_lock); 661 for (i = 0; i < ngep->rx_desc; i++, ++bsbdp) { 662 if (bsbdp->bufp) { 663 if (bsbdp->bufp->mp) 664 freemsg(bsbdp->bufp->mp); 665 nge_free_dma_mem(bsbdp->bufp); 666 kmem_free(bsbdp->bufp, sizeof (dma_area_t)); 667 bsbdp->bufp = NULL; 668 } 669 } 670 while (brp->free_list != NULL) { 671 bufp = brp->free_list; 672 brp->free_list = bufp->next; 673 bufp->next = NULL; 674 if (bufp->mp) 675 freemsg(bufp->mp); 676 nge_free_dma_mem(bufp); 677 kmem_free(bufp, sizeof (dma_area_t)); 678 } 679 while (brp->recycle_list != NULL) { 680 bufp = brp->recycle_list; 681 brp->recycle_list = bufp->next; 682 bufp->next = NULL; 683 if (bufp->mp) 684 freemsg(bufp->mp); 685 nge_free_dma_mem(bufp); 686 kmem_free(bufp, sizeof (dma_area_t)); 687 } 688 689 690 kmem_free(brp->sw_rbds, (ngep->rx_desc * sizeof (*bsbdp))); 691 brp->sw_rbds = NULL; 692 } 693 694 /* 695 * Intialize the Rx's data ring and free ring 696 */ 697 static int 698 nge_init_buff_ring(nge_t *ngep) 699 { 700 uint32_t err; 701 uint32_t slot; 702 uint32_t nslots_buff; 703 uint32_t nslots_recv; 704 buff_ring_t *brp; 705 recv_ring_t *rrp; 706 dma_area_t desc; 707 dma_area_t *bufp; 708 sw_rx_sbd_t *bsbdp; 709 710 rrp = ngep->recv; 711 brp = ngep->buff; 712 brp->nslots = ngep->rx_buf; 713 brp->rx_bcopy = B_FALSE; 714 nslots_recv = rrp->desc.nslots; 715 nslots_buff = brp->nslots; 716 brp->ngep = ngep; 717 718 NGE_TRACE(("nge_init_buff_ring($%p)", (void *)ngep)); 719 720 /* 721 * Allocate the array of s/w Recv Buffer Descriptors 722 */ 723 bsbdp = kmem_zalloc(nslots_recv *sizeof (*bsbdp), KM_SLEEP); 724 brp->sw_rbds = bsbdp; 725 brp->free_list = NULL; 726 brp->recycle_list = NULL; 727 for (slot = 0; slot < nslots_buff; ++slot) { 728 bufp = kmem_zalloc(sizeof (dma_area_t), KM_SLEEP); 729 err = nge_alloc_dma_mem(ngep, (ngep->buf_size 730 + NGE_HEADROOM), 731 &nge_data_accattr, DDI_DMA_READ | NGE_DMA_MODE, bufp); 732 if (err != DDI_SUCCESS) { 733 kmem_free(bufp, sizeof (dma_area_t)); 734 return (DDI_FAILURE); 735 } 736 737 bufp->alength -= NGE_HEADROOM; 738 bufp->offset += NGE_HEADROOM; 739 bufp->private = (caddr_t)ngep; 740 bufp->rx_recycle.free_func = nge_recv_recycle; 741 bufp->rx_recycle.free_arg = (caddr_t)bufp; 742 bufp->signature = brp->buf_sign; 743 bufp->rx_delivered = B_FALSE; 744 bufp->mp = desballoc(DMA_VPTR(*bufp), 745 ngep->buf_size + NGE_HEADROOM, 746 0, &bufp->rx_recycle); 747 748 if (bufp->mp == NULL) { 749 return (DDI_FAILURE); 750 } 751 bufp->next = brp->free_list; 752 brp->free_list = bufp; 753 } 754 755 /* 756 * Now initialise each array element once and for all 757 */ 758 desc = rrp->desc; 759 for (slot = 0; slot < nslots_recv; ++slot, ++bsbdp) { 760 nge_slice_chunk(&bsbdp->desc, &desc, 1, 761 ngep->desc_attr.rxd_size); 762 bufp = brp->free_list; 763 brp->free_list = bufp->next; 764 bsbdp->bufp = bufp; 765 bsbdp->flags = CONTROLER_OWN; 766 bufp->next = NULL; 767 } 768 769 ASSERT(desc.alength == 0); 770 return (DDI_SUCCESS); 771 } 772 773 /* 774 * Fill the host address of data in rx' descriptor 775 * and initialize free pointers of rx free ring 776 */ 777 static int 778 nge_reinit_buff_ring(nge_t *ngep) 779 { 780 uint32_t slot; 781 uint32_t nslots_recv; 782 buff_ring_t *brp; 783 recv_ring_t *rrp; 784 sw_rx_sbd_t *bsbdp; 785 void *hw_bd_p; 786 787 brp = ngep->buff; 788 rrp = ngep->recv; 789 bsbdp = brp->sw_rbds; 790 nslots_recv = rrp->desc.nslots; 791 for (slot = 0; slot < nslots_recv; ++bsbdp, ++slot) { 792 hw_bd_p = DMA_VPTR(bsbdp->desc); 793 /* 794 * There is a scenario: When the traffic of small tcp 795 * packet is heavy, suspending the tcp traffic will 796 * cause the preallocated buffers for rx not to be 797 * released in time by tcp taffic and cause rx's buffer 798 * pointers not to be refilled in time. 799 * 800 * At this point, if we reinitialize the driver, the bufp 801 * pointer for rx's traffic will be NULL. 802 * So the result of the reinitializion fails. 803 */ 804 if (bsbdp->bufp == NULL) 805 return (DDI_FAILURE); 806 807 ngep->desc_attr.rxd_fill(hw_bd_p, &bsbdp->bufp->cookie, 808 bsbdp->bufp->alength); 809 } 810 return (DDI_SUCCESS); 811 } 812 813 static void 814 nge_init_ring_param_lock(nge_t *ngep) 815 { 816 buff_ring_t *brp; 817 send_ring_t *srp; 818 819 srp = ngep->send; 820 brp = ngep->buff; 821 822 /* Init the locks for send ring */ 823 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 824 DDI_INTR_PRI(ngep->intr_pri)); 825 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 826 DDI_INTR_PRI(ngep->intr_pri)); 827 mutex_init(&srp->dmah_lock, NULL, MUTEX_DRIVER, 828 DDI_INTR_PRI(ngep->intr_pri)); 829 830 /* Init parameters of buffer ring */ 831 brp->free_list = NULL; 832 brp->recycle_list = NULL; 833 brp->rx_hold = 0; 834 brp->buf_sign = 0; 835 836 /* Init recycle list lock */ 837 mutex_init(brp->recycle_lock, NULL, MUTEX_DRIVER, 838 DDI_INTR_PRI(ngep->intr_pri)); 839 } 840 841 int 842 nge_init_rings(nge_t *ngep) 843 { 844 uint32_t err; 845 846 err = nge_init_send_ring(ngep); 847 if (err != DDI_SUCCESS) { 848 return (err); 849 } 850 nge_init_recv_ring(ngep); 851 852 err = nge_init_buff_ring(ngep); 853 if (err != DDI_SUCCESS) { 854 nge_fini_send_ring(ngep); 855 return (DDI_FAILURE); 856 } 857 858 return (err); 859 } 860 861 static int 862 nge_reinit_ring(nge_t *ngep) 863 { 864 int err; 865 866 nge_reinit_recv_ring(ngep); 867 nge_reinit_send_ring(ngep); 868 err = nge_reinit_buff_ring(ngep); 869 return (err); 870 } 871 872 873 void 874 nge_fini_rings(nge_t *ngep) 875 { 876 /* 877 * For receive ring, nothing need to be finished. 878 * So only finish buffer ring and send ring here. 879 */ 880 nge_fini_buff_ring(ngep); 881 nge_fini_send_ring(ngep); 882 } 883 884 /* 885 * Loopback ioctl code 886 */ 887 888 static lb_property_t loopmodes[] = { 889 { normal, "normal", NGE_LOOP_NONE }, 890 { external, "100Mbps", NGE_LOOP_EXTERNAL_100 }, 891 { external, "10Mbps", NGE_LOOP_EXTERNAL_10 }, 892 { internal, "PHY", NGE_LOOP_INTERNAL_PHY }, 893 }; 894 895 enum ioc_reply 896 nge_loop_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp) 897 { 898 int cmd; 899 uint32_t *lbmp; 900 lb_info_sz_t *lbsp; 901 lb_property_t *lbpp; 902 903 /* 904 * Validate format of ioctl 905 */ 906 if (mp->b_cont == NULL) 907 return (IOC_INVAL); 908 909 cmd = iocp->ioc_cmd; 910 911 switch (cmd) { 912 default: 913 return (IOC_INVAL); 914 915 case LB_GET_INFO_SIZE: 916 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 917 return (IOC_INVAL); 918 lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr; 919 *lbsp = sizeof (loopmodes); 920 return (IOC_REPLY); 921 922 case LB_GET_INFO: 923 if (iocp->ioc_count != sizeof (loopmodes)) 924 return (IOC_INVAL); 925 lbpp = (lb_property_t *)mp->b_cont->b_rptr; 926 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 927 return (IOC_REPLY); 928 929 case LB_GET_MODE: 930 if (iocp->ioc_count != sizeof (uint32_t)) 931 return (IOC_INVAL); 932 lbmp = (uint32_t *)mp->b_cont->b_rptr; 933 *lbmp = ngep->param_loop_mode; 934 return (IOC_REPLY); 935 936 case LB_SET_MODE: 937 if (iocp->ioc_count != sizeof (uint32_t)) 938 return (IOC_INVAL); 939 lbmp = (uint32_t *)mp->b_cont->b_rptr; 940 return (nge_set_loop_mode(ngep, *lbmp)); 941 } 942 } 943 944 #undef NGE_DBG 945 #define NGE_DBG NGE_DBG_NEMO 946 947 948 static void 949 nge_check_desc_prop(nge_t *ngep) 950 { 951 if (ngep->desc_mode != DESC_HOT && ngep->desc_mode != DESC_OFFLOAD) 952 ngep->desc_mode = DESC_HOT; 953 954 if (ngep->desc_mode == DESC_OFFLOAD) { 955 956 ngep->desc_attr = nge_sum_desc; 957 958 } else if (ngep->desc_mode == DESC_HOT) { 959 960 ngep->desc_attr = nge_hot_desc; 961 } 962 } 963 964 /* 965 * nge_get_props -- get the parameters to tune the driver 966 */ 967 static void 968 nge_get_props(nge_t *ngep) 969 { 970 chip_info_t *infop; 971 dev_info_t *devinfo; 972 nge_dev_spec_param_t *dev_param_p; 973 974 devinfo = ngep->devinfo; 975 infop = (chip_info_t *)&ngep->chipinfo; 976 dev_param_p = &ngep->dev_spec_param; 977 978 infop->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 979 DDI_PROP_DONTPASS, clsize_propname, 32); 980 981 infop->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 982 DDI_PROP_DONTPASS, latency_propname, 64); 983 ngep->intr_moderation = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 984 DDI_PROP_DONTPASS, intr_moderation, NGE_SET); 985 ngep->rx_datahwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 986 DDI_PROP_DONTPASS, rx_data_hw, 0x20); 987 ngep->rx_prdlwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 988 DDI_PROP_DONTPASS, rx_prd_lw, 0x4); 989 ngep->rx_prdhwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 990 DDI_PROP_DONTPASS, rx_prd_hw, 0xc); 991 992 ngep->sw_intr_intv = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 993 DDI_PROP_DONTPASS, sw_intr_intv, SWTR_ITC); 994 ngep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 995 DDI_PROP_DONTPASS, debug_propname, NGE_DBG_CHIP); 996 ngep->desc_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 997 DDI_PROP_DONTPASS, nge_desc_mode, dev_param_p->desc_type); 998 ngep->lowmem_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 999 DDI_PROP_DONTPASS, low_memory_mode, 0); 1000 1001 if (dev_param_p->jumbo) { 1002 ngep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 1003 DDI_PROP_DONTPASS, default_mtu, ETHERMTU); 1004 } else 1005 ngep->default_mtu = ETHERMTU; 1006 if (dev_param_p->tx_pause_frame) 1007 ngep->param_link_tx_pause = B_TRUE; 1008 else 1009 ngep->param_link_tx_pause = B_FALSE; 1010 1011 if (dev_param_p->rx_pause_frame) 1012 ngep->param_link_rx_pause = B_TRUE; 1013 else 1014 ngep->param_link_rx_pause = B_FALSE; 1015 1016 if (ngep->default_mtu > ETHERMTU && 1017 ngep->default_mtu <= NGE_MTU_2500) { 1018 ngep->buf_size = NGE_JB2500_BUFSZ; 1019 ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC; 1020 ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC; 1021 ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2; 1022 ngep->nge_split = NGE_SPLIT_256; 1023 } else if (ngep->default_mtu > NGE_MTU_2500 && 1024 ngep->default_mtu <= NGE_MTU_4500) { 1025 ngep->buf_size = NGE_JB4500_BUFSZ; 1026 ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC; 1027 ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC; 1028 ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2; 1029 ngep->nge_split = NGE_SPLIT_256; 1030 } else if (ngep->default_mtu > NGE_MTU_4500 && 1031 ngep->default_mtu <= NGE_MAX_MTU) { 1032 ngep->buf_size = NGE_JB9000_BUFSZ; 1033 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1034 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1035 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1036 ngep->nge_split = NGE_SPLIT_256; 1037 } else if (ngep->default_mtu > NGE_MAX_MTU) { 1038 ngep->default_mtu = NGE_MAX_MTU; 1039 ngep->buf_size = NGE_JB9000_BUFSZ; 1040 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1041 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1042 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1043 ngep->nge_split = NGE_SPLIT_256; 1044 } else if (ngep->lowmem_mode != 0) { 1045 ngep->default_mtu = ETHERMTU; 1046 ngep->buf_size = NGE_STD_BUFSZ; 1047 ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC; 1048 ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC; 1049 ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2; 1050 ngep->nge_split = NGE_SPLIT_32; 1051 } else { 1052 ngep->default_mtu = ETHERMTU; 1053 ngep->buf_size = NGE_STD_BUFSZ; 1054 ngep->tx_desc = dev_param_p->tx_desc_num; 1055 ngep->rx_desc = dev_param_p->rx_desc_num; 1056 ngep->rx_buf = dev_param_p->rx_desc_num * 2; 1057 ngep->nge_split = dev_param_p->nge_split; 1058 } 1059 1060 nge_check_desc_prop(ngep); 1061 } 1062 1063 1064 static int 1065 nge_reset_dev(nge_t *ngep) 1066 { 1067 int err; 1068 nge_mul_addr1 maddr1; 1069 nge_sw_statistics_t *sw_stp; 1070 sw_stp = &ngep->statistics.sw_statistics; 1071 send_ring_t *srp = ngep->send; 1072 1073 ASSERT(mutex_owned(ngep->genlock)); 1074 mutex_enter(srp->tc_lock); 1075 mutex_enter(srp->tx_lock); 1076 1077 nge_tx_recycle_all(ngep); 1078 err = nge_reinit_ring(ngep); 1079 if (err == DDI_FAILURE) { 1080 mutex_exit(srp->tx_lock); 1081 mutex_exit(srp->tc_lock); 1082 return (err); 1083 } 1084 err = nge_chip_reset(ngep); 1085 /* 1086 * Clear the Multicast mac address table 1087 */ 1088 nge_reg_put32(ngep, NGE_MUL_ADDR0, 0); 1089 maddr1.addr_val = nge_reg_get32(ngep, NGE_MUL_ADDR1); 1090 maddr1.addr_bits.addr = 0; 1091 nge_reg_put32(ngep, NGE_MUL_ADDR1, maddr1.addr_val); 1092 1093 mutex_exit(srp->tx_lock); 1094 mutex_exit(srp->tc_lock); 1095 if (err == DDI_FAILURE) 1096 return (err); 1097 ngep->watchdog = 0; 1098 ngep->resched_needed = B_FALSE; 1099 ngep->promisc = B_FALSE; 1100 ngep->param_loop_mode = NGE_LOOP_NONE; 1101 ngep->factotum_flag = 0; 1102 ngep->resched_needed = 0; 1103 ngep->nge_mac_state = NGE_MAC_RESET; 1104 ngep->max_sdu = ngep->default_mtu + ETHER_HEAD_LEN + ETHERFCSL; 1105 ngep->max_sdu += VTAG_SIZE; 1106 ngep->rx_def = 0x16; 1107 1108 /* Clear the software statistics */ 1109 sw_stp->recv_count = 0; 1110 sw_stp->xmit_count = 0; 1111 sw_stp->rbytes = 0; 1112 sw_stp->obytes = 0; 1113 1114 return (DDI_SUCCESS); 1115 } 1116 1117 static void 1118 nge_m_stop(void *arg) 1119 { 1120 nge_t *ngep = arg; /* private device info */ 1121 int err; 1122 1123 NGE_TRACE(("nge_m_stop($%p)", arg)); 1124 1125 /* 1126 * Just stop processing, then record new MAC state 1127 */ 1128 mutex_enter(ngep->genlock); 1129 /* If suspended, the adapter is already stopped, just return. */ 1130 if (ngep->suspended) { 1131 ASSERT(ngep->nge_mac_state == NGE_MAC_STOPPED); 1132 mutex_exit(ngep->genlock); 1133 return; 1134 } 1135 rw_enter(ngep->rwlock, RW_WRITER); 1136 1137 err = nge_chip_stop(ngep, B_FALSE); 1138 if (err == DDI_FAILURE) 1139 err = nge_chip_reset(ngep); 1140 if (err == DDI_FAILURE) 1141 nge_problem(ngep, "nge_m_stop: stop chip failed"); 1142 ngep->nge_mac_state = NGE_MAC_STOPPED; 1143 1144 /* Recycle all the TX BD */ 1145 nge_tx_recycle_all(ngep); 1146 nge_fini_rings(ngep); 1147 nge_free_bufs(ngep); 1148 1149 NGE_DEBUG(("nge_m_stop($%p) done", arg)); 1150 1151 rw_exit(ngep->rwlock); 1152 mutex_exit(ngep->genlock); 1153 } 1154 1155 static int 1156 nge_m_start(void *arg) 1157 { 1158 int err; 1159 nge_t *ngep = arg; 1160 1161 NGE_TRACE(("nge_m_start($%p)", arg)); 1162 1163 /* 1164 * Start processing and record new MAC state 1165 */ 1166 mutex_enter(ngep->genlock); 1167 /* 1168 * If suspended, don't start, as the resume processing 1169 * will recall this function with the suspended flag off. 1170 */ 1171 if (ngep->suspended) { 1172 mutex_exit(ngep->genlock); 1173 return (EIO); 1174 } 1175 rw_enter(ngep->rwlock, RW_WRITER); 1176 err = nge_alloc_bufs(ngep); 1177 if (err != DDI_SUCCESS) { 1178 nge_problem(ngep, "nge_m_start: DMA buffer allocation failed"); 1179 goto finish; 1180 } 1181 err = nge_init_rings(ngep); 1182 if (err != DDI_SUCCESS) { 1183 nge_free_bufs(ngep); 1184 nge_problem(ngep, "nge_init_rings() failed,err=%x", err); 1185 goto finish; 1186 } 1187 err = nge_restart(ngep); 1188 1189 NGE_DEBUG(("nge_m_start($%p) done", arg)); 1190 finish: 1191 rw_exit(ngep->rwlock); 1192 mutex_exit(ngep->genlock); 1193 1194 return (err == DDI_SUCCESS ? 0 : EIO); 1195 } 1196 1197 static int 1198 nge_m_unicst(void *arg, const uint8_t *macaddr) 1199 { 1200 nge_t *ngep = arg; 1201 1202 NGE_TRACE(("nge_m_unicst($%p)", arg)); 1203 /* 1204 * Remember the new current address in the driver state 1205 * Sync the chip's idea of the address too ... 1206 */ 1207 mutex_enter(ngep->genlock); 1208 1209 ethaddr_copy(macaddr, ngep->cur_uni_addr.addr); 1210 ngep->cur_uni_addr.set = 1; 1211 1212 /* 1213 * If we are suspended, we want to quit now, and not update 1214 * the chip. Doing so might put it in a bad state, but the 1215 * resume will get the unicast address installed. 1216 */ 1217 if (ngep->suspended) { 1218 mutex_exit(ngep->genlock); 1219 return (DDI_SUCCESS); 1220 } 1221 nge_chip_sync(ngep); 1222 1223 NGE_DEBUG(("nge_m_unicst($%p) done", arg)); 1224 mutex_exit(ngep->genlock); 1225 1226 return (0); 1227 } 1228 1229 static int 1230 nge_m_promisc(void *arg, boolean_t on) 1231 { 1232 nge_t *ngep = arg; 1233 1234 NGE_TRACE(("nge_m_promisc($%p)", arg)); 1235 1236 /* 1237 * Store specified mode and pass to chip layer to update h/w 1238 */ 1239 mutex_enter(ngep->genlock); 1240 /* 1241 * If suspended, there is no need to do anything, even 1242 * recording the promiscuious mode is not neccessary, as 1243 * it won't be properly set on resume. Just return failing. 1244 */ 1245 if (ngep->suspended) { 1246 mutex_exit(ngep->genlock); 1247 return (DDI_FAILURE); 1248 } 1249 if (ngep->promisc == on) { 1250 mutex_exit(ngep->genlock); 1251 NGE_DEBUG(("nge_m_promisc($%p) done", arg)); 1252 return (0); 1253 } 1254 ngep->promisc = on; 1255 ngep->record_promisc = ngep->promisc; 1256 nge_chip_sync(ngep); 1257 NGE_DEBUG(("nge_m_promisc($%p) done", arg)); 1258 mutex_exit(ngep->genlock); 1259 1260 return (0); 1261 } 1262 1263 static void nge_mulparam(nge_t *ngep) 1264 { 1265 uint8_t number; 1266 ether_addr_t pand; 1267 ether_addr_t por; 1268 mul_item *plist; 1269 1270 for (number = 0; number < ETHERADDRL; number++) { 1271 pand[number] = 0x00; 1272 por[number] = 0x00; 1273 } 1274 for (plist = ngep->pcur_mulist; plist != NULL; plist = plist->next) { 1275 for (number = 0; number < ETHERADDRL; number++) { 1276 pand[number] &= plist->mul_addr[number]; 1277 por[number] |= plist->mul_addr[number]; 1278 } 1279 } 1280 for (number = 0; number < ETHERADDRL; number++) { 1281 ngep->cur_mul_addr.addr[number] 1282 = pand[number] & por[number]; 1283 ngep->cur_mul_mask.addr[number] 1284 = pand [number] | (~por[number]); 1285 } 1286 } 1287 static int 1288 nge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1289 { 1290 boolean_t update; 1291 boolean_t b_eq; 1292 nge_t *ngep = arg; 1293 mul_item *plist; 1294 mul_item *plist_prev; 1295 mul_item *pitem; 1296 1297 NGE_TRACE(("nge_m_multicst($%p, %s, %s)", arg, 1298 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1299 1300 update = B_FALSE; 1301 plist = plist_prev = NULL; 1302 mutex_enter(ngep->genlock); 1303 if (add) { 1304 if (ngep->pcur_mulist != NULL) { 1305 for (plist = ngep->pcur_mulist; plist != NULL; 1306 plist = plist->next) { 1307 b_eq = ether_eq(plist->mul_addr, mca); 1308 if (b_eq) { 1309 plist->ref_cnt++; 1310 break; 1311 } 1312 plist_prev = plist; 1313 } 1314 } 1315 1316 if (plist == NULL) { 1317 pitem = kmem_zalloc(sizeof (mul_item), KM_SLEEP); 1318 ether_copy(mca, pitem->mul_addr); 1319 pitem ->ref_cnt++; 1320 pitem ->next = NULL; 1321 if (plist_prev == NULL) 1322 ngep->pcur_mulist = pitem; 1323 else 1324 plist_prev->next = pitem; 1325 update = B_TRUE; 1326 } 1327 } else { 1328 if (ngep->pcur_mulist != NULL) { 1329 for (plist = ngep->pcur_mulist; plist != NULL; 1330 plist = plist->next) { 1331 b_eq = ether_eq(plist->mul_addr, mca); 1332 if (b_eq) { 1333 update = B_TRUE; 1334 break; 1335 } 1336 plist_prev = plist; 1337 } 1338 1339 if (update) { 1340 if ((plist_prev == NULL) && 1341 (plist->next == NULL)) 1342 ngep->pcur_mulist = NULL; 1343 else if ((plist_prev == NULL) && 1344 (plist->next != NULL)) 1345 ngep->pcur_mulist = plist->next; 1346 else 1347 plist_prev->next = plist->next; 1348 kmem_free(plist, sizeof (mul_item)); 1349 } 1350 } 1351 } 1352 1353 if (update && !ngep->suspended) { 1354 nge_mulparam(ngep); 1355 nge_chip_sync(ngep); 1356 } 1357 NGE_DEBUG(("nge_m_multicst($%p) done", arg)); 1358 mutex_exit(ngep->genlock); 1359 1360 return (0); 1361 } 1362 1363 static void 1364 nge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1365 { 1366 int err; 1367 int cmd; 1368 nge_t *ngep = arg; 1369 struct iocblk *iocp; 1370 enum ioc_reply status; 1371 boolean_t need_privilege; 1372 1373 /* 1374 * If suspended, we might actually be able to do some of 1375 * these ioctls, but it is harder to make sure they occur 1376 * without actually putting the hardware in an undesireable 1377 * state. So just NAK it. 1378 */ 1379 mutex_enter(ngep->genlock); 1380 if (ngep->suspended) { 1381 miocnak(wq, mp, 0, EINVAL); 1382 mutex_exit(ngep->genlock); 1383 return; 1384 } 1385 mutex_exit(ngep->genlock); 1386 1387 /* 1388 * Validate the command before bothering with the mutex ... 1389 */ 1390 iocp = (struct iocblk *)mp->b_rptr; 1391 iocp->ioc_error = 0; 1392 need_privilege = B_TRUE; 1393 cmd = iocp->ioc_cmd; 1394 1395 NGE_DEBUG(("nge_m_ioctl: cmd 0x%x", cmd)); 1396 switch (cmd) { 1397 default: 1398 NGE_LDB(NGE_DBG_BADIOC, 1399 ("nge_m_ioctl: unknown cmd 0x%x", cmd)); 1400 1401 miocnak(wq, mp, 0, EINVAL); 1402 return; 1403 1404 case NGE_MII_READ: 1405 case NGE_MII_WRITE: 1406 case NGE_SEE_READ: 1407 case NGE_SEE_WRITE: 1408 case NGE_DIAG: 1409 case NGE_PEEK: 1410 case NGE_POKE: 1411 case NGE_PHY_RESET: 1412 case NGE_SOFT_RESET: 1413 case NGE_HARD_RESET: 1414 break; 1415 1416 case LB_GET_INFO_SIZE: 1417 case LB_GET_INFO: 1418 case LB_GET_MODE: 1419 need_privilege = B_FALSE; 1420 break; 1421 case LB_SET_MODE: 1422 break; 1423 } 1424 1425 if (need_privilege) { 1426 /* 1427 * Check for specific net_config privilege. 1428 */ 1429 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1430 if (err != 0) { 1431 NGE_DEBUG(("nge_m_ioctl: rejected cmd 0x%x, err %d", 1432 cmd, err)); 1433 miocnak(wq, mp, 0, err); 1434 return; 1435 } 1436 } 1437 1438 mutex_enter(ngep->genlock); 1439 1440 switch (cmd) { 1441 default: 1442 _NOTE(NOTREACHED) 1443 status = IOC_INVAL; 1444 break; 1445 1446 case NGE_MII_READ: 1447 case NGE_MII_WRITE: 1448 case NGE_SEE_READ: 1449 case NGE_SEE_WRITE: 1450 case NGE_DIAG: 1451 case NGE_PEEK: 1452 case NGE_POKE: 1453 case NGE_PHY_RESET: 1454 case NGE_SOFT_RESET: 1455 case NGE_HARD_RESET: 1456 status = nge_chip_ioctl(ngep, mp, iocp); 1457 break; 1458 1459 case LB_GET_INFO_SIZE: 1460 case LB_GET_INFO: 1461 case LB_GET_MODE: 1462 case LB_SET_MODE: 1463 status = nge_loop_ioctl(ngep, mp, iocp); 1464 break; 1465 1466 } 1467 1468 /* 1469 * Do we need to reprogram the PHY and/or the MAC? 1470 * Do it now, while we still have the mutex. 1471 * 1472 * Note: update the PHY first, 'cos it controls the 1473 * speed/duplex parameters that the MAC code uses. 1474 */ 1475 1476 NGE_DEBUG(("nge_m_ioctl: cmd 0x%x status %d", cmd, status)); 1477 1478 switch (status) { 1479 case IOC_RESTART_REPLY: 1480 case IOC_RESTART_ACK: 1481 (*ngep->physops->phys_update)(ngep); 1482 nge_chip_sync(ngep); 1483 break; 1484 1485 default: 1486 break; 1487 } 1488 1489 mutex_exit(ngep->genlock); 1490 1491 /* 1492 * Finally, decide how to reply 1493 */ 1494 switch (status) { 1495 1496 default: 1497 case IOC_INVAL: 1498 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1499 EINVAL : iocp->ioc_error); 1500 break; 1501 1502 case IOC_DONE: 1503 break; 1504 1505 case IOC_RESTART_ACK: 1506 case IOC_ACK: 1507 miocack(wq, mp, 0, 0); 1508 break; 1509 1510 case IOC_RESTART_REPLY: 1511 case IOC_REPLY: 1512 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1513 M_IOCACK : M_IOCNAK; 1514 qreply(wq, mp); 1515 break; 1516 } 1517 } 1518 1519 static boolean_t 1520 nge_param_locked(mac_prop_id_t pr_num) 1521 { 1522 /* 1523 * All adv_* parameters are locked (read-only) while 1524 * the device is in any sort of loopback mode ... 1525 */ 1526 switch (pr_num) { 1527 case MAC_PROP_ADV_1000FDX_CAP: 1528 case MAC_PROP_EN_1000FDX_CAP: 1529 case MAC_PROP_ADV_1000HDX_CAP: 1530 case MAC_PROP_EN_1000HDX_CAP: 1531 case MAC_PROP_ADV_100FDX_CAP: 1532 case MAC_PROP_EN_100FDX_CAP: 1533 case MAC_PROP_ADV_100HDX_CAP: 1534 case MAC_PROP_EN_100HDX_CAP: 1535 case MAC_PROP_ADV_10FDX_CAP: 1536 case MAC_PROP_EN_10FDX_CAP: 1537 case MAC_PROP_ADV_10HDX_CAP: 1538 case MAC_PROP_EN_10HDX_CAP: 1539 case MAC_PROP_AUTONEG: 1540 case MAC_PROP_FLOWCTRL: 1541 return (B_TRUE); 1542 } 1543 return (B_FALSE); 1544 } 1545 1546 /* 1547 * callback functions for set/get of properties 1548 */ 1549 static int 1550 nge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1551 uint_t pr_valsize, const void *pr_val) 1552 { 1553 nge_t *ngep = barg; 1554 int err = 0; 1555 uint32_t cur_mtu, new_mtu; 1556 link_flowctrl_t fl; 1557 1558 mutex_enter(ngep->genlock); 1559 if (ngep->param_loop_mode != NGE_LOOP_NONE && 1560 nge_param_locked(pr_num)) { 1561 /* 1562 * All adv_* parameters are locked (read-only) 1563 * while the device is in any sort of loopback mode. 1564 */ 1565 mutex_exit(ngep->genlock); 1566 return (EBUSY); 1567 } 1568 switch (pr_num) { 1569 case MAC_PROP_EN_1000FDX_CAP: 1570 ngep->param_en_1000fdx = *(uint8_t *)pr_val; 1571 ngep->param_adv_1000fdx = *(uint8_t *)pr_val; 1572 goto reprogram; 1573 case MAC_PROP_EN_100FDX_CAP: 1574 ngep->param_en_100fdx = *(uint8_t *)pr_val; 1575 ngep->param_adv_100fdx = *(uint8_t *)pr_val; 1576 goto reprogram; 1577 case MAC_PROP_EN_100HDX_CAP: 1578 ngep->param_en_100hdx = *(uint8_t *)pr_val; 1579 ngep->param_adv_100hdx = *(uint8_t *)pr_val; 1580 goto reprogram; 1581 case MAC_PROP_EN_10FDX_CAP: 1582 ngep->param_en_10fdx = *(uint8_t *)pr_val; 1583 ngep->param_adv_10fdx = *(uint8_t *)pr_val; 1584 goto reprogram; 1585 case MAC_PROP_EN_10HDX_CAP: 1586 ngep->param_en_10hdx = *(uint8_t *)pr_val; 1587 ngep->param_adv_10hdx = *(uint8_t *)pr_val; 1588 reprogram: 1589 (*ngep->physops->phys_update)(ngep); 1590 nge_chip_sync(ngep); 1591 break; 1592 1593 case MAC_PROP_ADV_1000FDX_CAP: 1594 case MAC_PROP_ADV_1000HDX_CAP: 1595 case MAC_PROP_ADV_100FDX_CAP: 1596 case MAC_PROP_ADV_100HDX_CAP: 1597 case MAC_PROP_ADV_10FDX_CAP: 1598 case MAC_PROP_ADV_10HDX_CAP: 1599 case MAC_PROP_STATUS: 1600 case MAC_PROP_SPEED: 1601 case MAC_PROP_DUPLEX: 1602 case MAC_PROP_EN_1000HDX_CAP: 1603 err = ENOTSUP; /* read-only prop. Can't set this */ 1604 break; 1605 case MAC_PROP_AUTONEG: 1606 ngep->param_adv_autoneg = *(uint8_t *)pr_val; 1607 (*ngep->physops->phys_update)(ngep); 1608 nge_chip_sync(ngep); 1609 break; 1610 case MAC_PROP_MTU: 1611 cur_mtu = ngep->default_mtu; 1612 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 1613 if (new_mtu == cur_mtu) { 1614 err = 0; 1615 break; 1616 } 1617 if (new_mtu < ETHERMTU || 1618 new_mtu > NGE_MAX_MTU) { 1619 err = EINVAL; 1620 break; 1621 } 1622 if ((new_mtu > ETHERMTU) && 1623 (!ngep->dev_spec_param.jumbo)) { 1624 err = EINVAL; 1625 break; 1626 } 1627 if (ngep->nge_mac_state == NGE_MAC_STARTED) { 1628 err = EBUSY; 1629 break; 1630 } 1631 1632 ngep->default_mtu = new_mtu; 1633 if (ngep->default_mtu > ETHERMTU && 1634 ngep->default_mtu <= NGE_MTU_2500) { 1635 ngep->buf_size = NGE_JB2500_BUFSZ; 1636 ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC; 1637 ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC; 1638 ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2; 1639 ngep->nge_split = NGE_SPLIT_256; 1640 } else if (ngep->default_mtu > NGE_MTU_2500 && 1641 ngep->default_mtu <= NGE_MTU_4500) { 1642 ngep->buf_size = NGE_JB4500_BUFSZ; 1643 ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC; 1644 ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC; 1645 ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2; 1646 ngep->nge_split = NGE_SPLIT_256; 1647 } else if (ngep->default_mtu > NGE_MTU_4500 && 1648 ngep->default_mtu <= NGE_MAX_MTU) { 1649 ngep->buf_size = NGE_JB9000_BUFSZ; 1650 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1651 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1652 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1653 ngep->nge_split = NGE_SPLIT_256; 1654 } else if (ngep->default_mtu > NGE_MAX_MTU) { 1655 ngep->default_mtu = NGE_MAX_MTU; 1656 ngep->buf_size = NGE_JB9000_BUFSZ; 1657 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1658 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1659 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1660 ngep->nge_split = NGE_SPLIT_256; 1661 } else if (ngep->lowmem_mode != 0) { 1662 ngep->default_mtu = ETHERMTU; 1663 ngep->buf_size = NGE_STD_BUFSZ; 1664 ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC; 1665 ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC; 1666 ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2; 1667 ngep->nge_split = NGE_SPLIT_32; 1668 } else { 1669 ngep->default_mtu = ETHERMTU; 1670 ngep->buf_size = NGE_STD_BUFSZ; 1671 ngep->tx_desc = 1672 ngep->dev_spec_param.tx_desc_num; 1673 ngep->rx_desc = 1674 ngep->dev_spec_param.rx_desc_num; 1675 ngep->rx_buf = 1676 ngep->dev_spec_param.rx_desc_num * 2; 1677 ngep->nge_split = 1678 ngep->dev_spec_param.nge_split; 1679 } 1680 1681 err = mac_maxsdu_update(ngep->mh, ngep->default_mtu); 1682 1683 break; 1684 case MAC_PROP_FLOWCTRL: 1685 bcopy(pr_val, &fl, sizeof (fl)); 1686 switch (fl) { 1687 default: 1688 err = ENOTSUP; 1689 break; 1690 case LINK_FLOWCTRL_NONE: 1691 ngep->param_adv_pause = 0; 1692 ngep->param_adv_asym_pause = 0; 1693 1694 ngep->param_link_rx_pause = B_FALSE; 1695 ngep->param_link_tx_pause = B_FALSE; 1696 break; 1697 case LINK_FLOWCTRL_RX: 1698 if (!((ngep->param_lp_pause == 0) && 1699 (ngep->param_lp_asym_pause == 1))) { 1700 err = EINVAL; 1701 break; 1702 } 1703 ngep->param_adv_pause = 1; 1704 ngep->param_adv_asym_pause = 1; 1705 1706 ngep->param_link_rx_pause = B_TRUE; 1707 ngep->param_link_tx_pause = B_FALSE; 1708 break; 1709 case LINK_FLOWCTRL_TX: 1710 if (!((ngep->param_lp_pause == 1) && 1711 (ngep->param_lp_asym_pause == 1))) { 1712 err = EINVAL; 1713 break; 1714 } 1715 ngep->param_adv_pause = 0; 1716 ngep->param_adv_asym_pause = 1; 1717 1718 ngep->param_link_rx_pause = B_FALSE; 1719 ngep->param_link_tx_pause = B_TRUE; 1720 break; 1721 case LINK_FLOWCTRL_BI: 1722 if (ngep->param_lp_pause != 1) { 1723 err = EINVAL; 1724 break; 1725 } 1726 ngep->param_adv_pause = 1; 1727 1728 ngep->param_link_rx_pause = B_TRUE; 1729 ngep->param_link_tx_pause = B_TRUE; 1730 break; 1731 } 1732 1733 if (err == 0) { 1734 (*ngep->physops->phys_update)(ngep); 1735 nge_chip_sync(ngep); 1736 } 1737 1738 break; 1739 case MAC_PROP_PRIVATE: 1740 err = nge_set_priv_prop(ngep, pr_name, pr_valsize, 1741 pr_val); 1742 if (err == 0) { 1743 (*ngep->physops->phys_update)(ngep); 1744 nge_chip_sync(ngep); 1745 } 1746 break; 1747 default: 1748 err = ENOTSUP; 1749 } 1750 mutex_exit(ngep->genlock); 1751 return (err); 1752 } 1753 1754 static int 1755 nge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1756 uint_t pr_valsize, void *pr_val) 1757 { 1758 nge_t *ngep = barg; 1759 int err = 0; 1760 link_flowctrl_t fl; 1761 uint64_t speed; 1762 1763 switch (pr_num) { 1764 case MAC_PROP_DUPLEX: 1765 ASSERT(pr_valsize >= sizeof (link_duplex_t)); 1766 bcopy(&ngep->param_link_duplex, pr_val, 1767 sizeof (link_duplex_t)); 1768 break; 1769 case MAC_PROP_SPEED: 1770 ASSERT(pr_valsize >= sizeof (uint64_t)); 1771 speed = ngep->param_link_speed * 1000000ull; 1772 bcopy(&speed, pr_val, sizeof (speed)); 1773 break; 1774 case MAC_PROP_AUTONEG: 1775 *(uint8_t *)pr_val = ngep->param_adv_autoneg; 1776 break; 1777 case MAC_PROP_FLOWCTRL: 1778 ASSERT(pr_valsize >= sizeof (link_flowctrl_t)); 1779 if (ngep->param_link_rx_pause && 1780 !ngep->param_link_tx_pause) 1781 fl = LINK_FLOWCTRL_RX; 1782 1783 if (!ngep->param_link_rx_pause && 1784 !ngep->param_link_tx_pause) 1785 fl = LINK_FLOWCTRL_NONE; 1786 1787 if (!ngep->param_link_rx_pause && 1788 ngep->param_link_tx_pause) 1789 fl = LINK_FLOWCTRL_TX; 1790 1791 if (ngep->param_link_rx_pause && 1792 ngep->param_link_tx_pause) 1793 fl = LINK_FLOWCTRL_BI; 1794 bcopy(&fl, pr_val, sizeof (fl)); 1795 break; 1796 case MAC_PROP_ADV_1000FDX_CAP: 1797 *(uint8_t *)pr_val = ngep->param_adv_1000fdx; 1798 break; 1799 case MAC_PROP_EN_1000FDX_CAP: 1800 *(uint8_t *)pr_val = ngep->param_en_1000fdx; 1801 break; 1802 case MAC_PROP_ADV_1000HDX_CAP: 1803 *(uint8_t *)pr_val = ngep->param_adv_1000hdx; 1804 break; 1805 case MAC_PROP_EN_1000HDX_CAP: 1806 *(uint8_t *)pr_val = ngep->param_en_1000hdx; 1807 break; 1808 case MAC_PROP_ADV_100FDX_CAP: 1809 *(uint8_t *)pr_val = ngep->param_adv_100fdx; 1810 break; 1811 case MAC_PROP_EN_100FDX_CAP: 1812 *(uint8_t *)pr_val = ngep->param_en_100fdx; 1813 break; 1814 case MAC_PROP_ADV_100HDX_CAP: 1815 *(uint8_t *)pr_val = ngep->param_adv_100hdx; 1816 break; 1817 case MAC_PROP_EN_100HDX_CAP: 1818 *(uint8_t *)pr_val = ngep->param_en_100hdx; 1819 break; 1820 case MAC_PROP_ADV_10FDX_CAP: 1821 *(uint8_t *)pr_val = ngep->param_adv_10fdx; 1822 break; 1823 case MAC_PROP_EN_10FDX_CAP: 1824 *(uint8_t *)pr_val = ngep->param_en_10fdx; 1825 break; 1826 case MAC_PROP_ADV_10HDX_CAP: 1827 *(uint8_t *)pr_val = ngep->param_adv_10hdx; 1828 break; 1829 case MAC_PROP_EN_10HDX_CAP: 1830 *(uint8_t *)pr_val = ngep->param_en_10hdx; 1831 break; 1832 case MAC_PROP_ADV_100T4_CAP: 1833 case MAC_PROP_EN_100T4_CAP: 1834 *(uint8_t *)pr_val = 0; 1835 break; 1836 case MAC_PROP_PRIVATE: 1837 err = nge_get_priv_prop(ngep, pr_name, 1838 pr_valsize, pr_val); 1839 break; 1840 default: 1841 err = ENOTSUP; 1842 } 1843 return (err); 1844 } 1845 1846 static void 1847 nge_m_propinfo(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1848 mac_prop_info_handle_t prh) 1849 { 1850 nge_t *ngep = barg; 1851 1852 switch (pr_num) { 1853 case MAC_PROP_DUPLEX: 1854 case MAC_PROP_SPEED: 1855 case MAC_PROP_ADV_1000FDX_CAP: 1856 case MAC_PROP_ADV_1000HDX_CAP: 1857 case MAC_PROP_ADV_100FDX_CAP: 1858 case MAC_PROP_EN_1000HDX_CAP: 1859 case MAC_PROP_ADV_100HDX_CAP: 1860 case MAC_PROP_ADV_10FDX_CAP: 1861 case MAC_PROP_ADV_10HDX_CAP: 1862 case MAC_PROP_ADV_100T4_CAP: 1863 case MAC_PROP_EN_100T4_CAP: 1864 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 1865 break; 1866 1867 case MAC_PROP_EN_1000FDX_CAP: 1868 case MAC_PROP_EN_100FDX_CAP: 1869 case MAC_PROP_EN_100HDX_CAP: 1870 case MAC_PROP_EN_10FDX_CAP: 1871 case MAC_PROP_EN_10HDX_CAP: 1872 mac_prop_info_set_default_uint8(prh, 1); 1873 break; 1874 1875 case MAC_PROP_AUTONEG: 1876 mac_prop_info_set_default_uint8(prh, 1); 1877 break; 1878 1879 case MAC_PROP_FLOWCTRL: 1880 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI); 1881 break; 1882 1883 case MAC_PROP_MTU: 1884 mac_prop_info_set_range_uint32(prh, ETHERMTU, 1885 ngep->dev_spec_param.jumbo ? NGE_MAX_MTU : ETHERMTU); 1886 break; 1887 1888 case MAC_PROP_PRIVATE: { 1889 char valstr[64]; 1890 int value; 1891 1892 bzero(valstr, sizeof (valstr)); 1893 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 1894 value = NGE_TX_COPY_SIZE; 1895 } else if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 1896 value = NGE_RX_COPY_SIZE; 1897 } else if (strcmp(pr_name, "_recv_max_packet") == 0) { 1898 value = 128; 1899 } else if (strcmp(pr_name, "_poll_quiet_time") == 0) { 1900 value = NGE_POLL_QUIET_TIME; 1901 } else if (strcmp(pr_name, "_poll_busy_time") == 0) { 1902 value = NGE_POLL_BUSY_TIME; 1903 } else if (strcmp(pr_name, "_rx_intr_hwater") == 0) { 1904 value = 1; 1905 } else if (strcmp(pr_name, "_rx_intr_lwater") == 0) { 1906 value = 8; 1907 } else { 1908 return; 1909 } 1910 1911 (void) snprintf(valstr, sizeof (valstr), "%d", value); 1912 } 1913 } 1914 1915 } 1916 1917 /* ARGSUSED */ 1918 static int 1919 nge_set_priv_prop(nge_t *ngep, const char *pr_name, uint_t pr_valsize, 1920 const void *pr_val) 1921 { 1922 int err = 0; 1923 long result; 1924 1925 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 1926 if (pr_val == NULL) { 1927 err = EINVAL; 1928 return (err); 1929 } 1930 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1931 if (result < 0 || result > NGE_MAX_SDU) { 1932 err = EINVAL; 1933 } else { 1934 ngep->param_txbcopy_threshold = (uint32_t)result; 1935 goto reprogram; 1936 } 1937 return (err); 1938 } 1939 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 1940 if (pr_val == NULL) { 1941 err = EINVAL; 1942 return (err); 1943 } 1944 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1945 if (result < 0 || result > NGE_MAX_SDU) { 1946 err = EINVAL; 1947 } else { 1948 ngep->param_rxbcopy_threshold = (uint32_t)result; 1949 goto reprogram; 1950 } 1951 return (err); 1952 } 1953 if (strcmp(pr_name, "_recv_max_packet") == 0) { 1954 if (pr_val == NULL) { 1955 err = EINVAL; 1956 return (err); 1957 } 1958 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1959 if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) { 1960 err = EINVAL; 1961 } else { 1962 ngep->param_recv_max_packet = (uint32_t)result; 1963 goto reprogram; 1964 } 1965 return (err); 1966 } 1967 if (strcmp(pr_name, "_poll_quiet_time") == 0) { 1968 if (pr_val == NULL) { 1969 err = EINVAL; 1970 return (err); 1971 } 1972 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1973 if (result < 0 || result > 10000) { 1974 err = EINVAL; 1975 } else { 1976 ngep->param_poll_quiet_time = (uint32_t)result; 1977 goto reprogram; 1978 } 1979 return (err); 1980 } 1981 if (strcmp(pr_name, "_poll_busy_time") == 0) { 1982 if (pr_val == NULL) { 1983 err = EINVAL; 1984 return (err); 1985 } 1986 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1987 if (result < 0 || result > 10000) { 1988 err = EINVAL; 1989 } else { 1990 ngep->param_poll_busy_time = (uint32_t)result; 1991 goto reprogram; 1992 } 1993 return (err); 1994 } 1995 if (strcmp(pr_name, "_rx_intr_hwater") == 0) { 1996 if (pr_val == NULL) { 1997 err = EINVAL; 1998 return (err); 1999 } 2000 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 2001 if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) { 2002 err = EINVAL; 2003 } else { 2004 ngep->param_rx_intr_hwater = (uint32_t)result; 2005 goto reprogram; 2006 } 2007 return (err); 2008 } 2009 if (strcmp(pr_name, "_rx_intr_lwater") == 0) { 2010 if (pr_val == NULL) { 2011 err = EINVAL; 2012 return (err); 2013 } 2014 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 2015 if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) { 2016 err = EINVAL; 2017 } else { 2018 ngep->param_rx_intr_lwater = (uint32_t)result; 2019 goto reprogram; 2020 } 2021 return (err); 2022 } 2023 err = ENOTSUP; 2024 return (err); 2025 2026 reprogram: 2027 if (err == 0) { 2028 (*ngep->physops->phys_update)(ngep); 2029 nge_chip_sync(ngep); 2030 } 2031 2032 return (err); 2033 } 2034 2035 static int 2036 nge_get_priv_prop(nge_t *ngep, const char *pr_name, uint_t pr_valsize, 2037 void *pr_val) 2038 { 2039 int err = ENOTSUP; 2040 int value; 2041 2042 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 2043 value = ngep->param_txbcopy_threshold; 2044 err = 0; 2045 goto done; 2046 } 2047 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 2048 value = ngep->param_rxbcopy_threshold; 2049 err = 0; 2050 goto done; 2051 } 2052 if (strcmp(pr_name, "_recv_max_packet") == 0) { 2053 value = ngep->param_recv_max_packet; 2054 err = 0; 2055 goto done; 2056 } 2057 if (strcmp(pr_name, "_poll_quiet_time") == 0) { 2058 value = ngep->param_poll_quiet_time; 2059 err = 0; 2060 goto done; 2061 } 2062 if (strcmp(pr_name, "_poll_busy_time") == 0) { 2063 value = ngep->param_poll_busy_time; 2064 err = 0; 2065 goto done; 2066 } 2067 if (strcmp(pr_name, "_rx_intr_hwater") == 0) { 2068 value = ngep->param_rx_intr_hwater; 2069 err = 0; 2070 goto done; 2071 } 2072 if (strcmp(pr_name, "_rx_intr_lwater") == 0) { 2073 value = ngep->param_rx_intr_lwater; 2074 err = 0; 2075 goto done; 2076 } 2077 2078 done: 2079 if (err == 0) { 2080 (void) snprintf(pr_val, pr_valsize, "%d", value); 2081 } 2082 return (err); 2083 } 2084 2085 /* ARGSUSED */ 2086 static boolean_t 2087 nge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 2088 { 2089 nge_t *ngep = arg; 2090 nge_dev_spec_param_t *dev_param_p; 2091 2092 dev_param_p = &ngep->dev_spec_param; 2093 2094 switch (cap) { 2095 case MAC_CAPAB_HCKSUM: { 2096 uint32_t *hcksum_txflags = cap_data; 2097 2098 if (dev_param_p->tx_hw_checksum) { 2099 *hcksum_txflags = dev_param_p->tx_hw_checksum; 2100 } else 2101 return (B_FALSE); 2102 break; 2103 } 2104 default: 2105 return (B_FALSE); 2106 } 2107 return (B_TRUE); 2108 } 2109 2110 #undef NGE_DBG 2111 #define NGE_DBG NGE_DBG_INIT /* debug flag for this code */ 2112 int 2113 nge_restart(nge_t *ngep) 2114 { 2115 int err = 0; 2116 err = nge_reset_dev(ngep); 2117 /* write back the promisc setting */ 2118 ngep->promisc = ngep->record_promisc; 2119 nge_chip_sync(ngep); 2120 if (!err) 2121 err = nge_chip_start(ngep); 2122 2123 if (err) { 2124 ngep->nge_mac_state = NGE_MAC_STOPPED; 2125 return (DDI_FAILURE); 2126 } else { 2127 ngep->nge_mac_state = NGE_MAC_STARTED; 2128 return (DDI_SUCCESS); 2129 } 2130 } 2131 2132 void 2133 nge_wake_factotum(nge_t *ngep) 2134 { 2135 mutex_enter(ngep->softlock); 2136 if (ngep->factotum_flag == 0) { 2137 ngep->factotum_flag = 1; 2138 (void) ddi_intr_trigger_softint(ngep->factotum_hdl, NULL); 2139 } 2140 mutex_exit(ngep->softlock); 2141 } 2142 2143 void 2144 nge_interrupt_optimize(nge_t *ngep) 2145 { 2146 uint32_t tx_pkts; 2147 tx_pkts = ngep->statistics.sw_statistics.xmit_count - ngep->tpkts_last; 2148 ngep->tpkts_last = ngep->statistics.sw_statistics.xmit_count; 2149 if ((tx_pkts > NGE_POLL_TUNE) && 2150 (tx_pkts <= NGE_POLL_MAX)) 2151 ngep->tfint_threshold = (tx_pkts / NGE_POLL_ENTER); 2152 else 2153 ngep->tfint_threshold = NGE_TFINT_DEFAULT; 2154 } 2155 2156 /* 2157 * High-level cyclic handler 2158 * 2159 * This routine schedules a (low-level) softint callback to the 2160 * factotum. 2161 */ 2162 2163 static void 2164 nge_chip_cyclic(void *arg) 2165 { 2166 nge_t *ngep; 2167 2168 ngep = (nge_t *)arg; 2169 2170 switch (ngep->nge_chip_state) { 2171 default: 2172 return; 2173 2174 case NGE_CHIP_RUNNING: 2175 nge_interrupt_optimize(ngep); 2176 break; 2177 2178 case NGE_CHIP_FAULT: 2179 case NGE_CHIP_ERROR: 2180 break; 2181 } 2182 2183 nge_wake_factotum(ngep); 2184 } 2185 2186 /* 2187 * Get/Release semaphore of SMU 2188 * For SMU enabled chipset 2189 * When nge driver is attached, driver should acquire 2190 * semaphore before PHY init and accessing MAC registers. 2191 * When nge driver is unattached, driver should release 2192 * semaphore. 2193 */ 2194 2195 static int 2196 nge_smu_sema(nge_t *ngep, boolean_t acquire) 2197 { 2198 nge_tx_en tx_en; 2199 uint32_t tries; 2200 2201 if (acquire) { 2202 for (tries = 0; tries < 5; tries++) { 2203 tx_en.val = nge_reg_get32(ngep, NGE_TX_EN); 2204 if (tx_en.bits.smu2mac == NGE_SMU_FREE) 2205 break; 2206 delay(drv_usectohz(1000000)); 2207 } 2208 if (tx_en.bits.smu2mac != NGE_SMU_FREE) 2209 return (DDI_FAILURE); 2210 for (tries = 0; tries < 5; tries++) { 2211 tx_en.val = nge_reg_get32(ngep, NGE_TX_EN); 2212 tx_en.bits.mac2smu = NGE_SMU_GET; 2213 nge_reg_put32(ngep, NGE_TX_EN, tx_en.val); 2214 tx_en.val = nge_reg_get32(ngep, NGE_TX_EN); 2215 2216 if (tx_en.bits.mac2smu == NGE_SMU_GET && 2217 tx_en.bits.smu2mac == NGE_SMU_FREE) 2218 return (DDI_SUCCESS); 2219 drv_usecwait(10); 2220 } 2221 return (DDI_FAILURE); 2222 } else 2223 nge_reg_put32(ngep, NGE_TX_EN, 0x0); 2224 2225 return (DDI_SUCCESS); 2226 2227 } 2228 static void 2229 nge_unattach(nge_t *ngep) 2230 { 2231 send_ring_t *srp; 2232 buff_ring_t *brp; 2233 2234 srp = ngep->send; 2235 brp = ngep->buff; 2236 NGE_TRACE(("nge_unattach($%p)", (void *)ngep)); 2237 2238 /* 2239 * Flag that no more activity may be initiated 2240 */ 2241 ngep->progress &= ~PROGRESS_READY; 2242 ngep->nge_mac_state = NGE_MAC_UNATTACH; 2243 2244 /* 2245 * Quiesce the PHY and MAC (leave it reset but still powered). 2246 * Clean up and free all NGE data structures 2247 */ 2248 if (ngep->periodic_id != NULL) { 2249 ddi_periodic_delete(ngep->periodic_id); 2250 ngep->periodic_id = NULL; 2251 } 2252 2253 if (ngep->progress & PROGRESS_KSTATS) 2254 nge_fini_kstats(ngep); 2255 2256 if (ngep->progress & PROGRESS_HWINT) { 2257 mutex_enter(ngep->genlock); 2258 nge_restore_mac_addr(ngep); 2259 (void) nge_chip_stop(ngep, B_FALSE); 2260 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 2261 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 2262 (void) nge_smu_sema(ngep, B_FALSE); 2263 } 2264 mutex_exit(ngep->genlock); 2265 } 2266 2267 if (ngep->progress & PROGRESS_SWINT) 2268 nge_rem_intrs(ngep); 2269 2270 if (ngep->progress & PROGRESS_FACTOTUM) 2271 (void) ddi_intr_remove_softint(ngep->factotum_hdl); 2272 2273 if (ngep->progress & PROGRESS_RESCHED) 2274 (void) ddi_intr_remove_softint(ngep->resched_hdl); 2275 2276 if (ngep->progress & PROGRESS_INTR) { 2277 mutex_destroy(srp->tx_lock); 2278 mutex_destroy(srp->tc_lock); 2279 mutex_destroy(&srp->dmah_lock); 2280 mutex_destroy(brp->recycle_lock); 2281 2282 mutex_destroy(ngep->genlock); 2283 mutex_destroy(ngep->softlock); 2284 rw_destroy(ngep->rwlock); 2285 } 2286 2287 if (ngep->progress & PROGRESS_REGS) 2288 ddi_regs_map_free(&ngep->io_handle); 2289 2290 if (ngep->progress & PROGRESS_CFG) 2291 pci_config_teardown(&ngep->cfg_handle); 2292 2293 ddi_remove_minor_node(ngep->devinfo, NULL); 2294 2295 kmem_free(ngep, sizeof (*ngep)); 2296 } 2297 2298 static int 2299 nge_resume(dev_info_t *devinfo) 2300 { 2301 nge_t *ngep; 2302 chip_info_t *infop; 2303 int err; 2304 2305 ASSERT(devinfo != NULL); 2306 2307 ngep = ddi_get_driver_private(devinfo); 2308 err = 0; 2309 2310 /* 2311 * If there are state inconsistancies, this is bad. Returning 2312 * DDI_FAILURE here will eventually cause the machine to panic, 2313 * so it is best done here so that there is a possibility of 2314 * debugging the problem. 2315 */ 2316 if (ngep == NULL) 2317 cmn_err(CE_PANIC, 2318 "nge: ngep returned from ddi_get_driver_private was NULL"); 2319 infop = (chip_info_t *)&ngep->chipinfo; 2320 2321 if (ngep->devinfo != devinfo) 2322 cmn_err(CE_PANIC, 2323 "nge: passed devinfo not the same as saved devinfo"); 2324 2325 mutex_enter(ngep->genlock); 2326 rw_enter(ngep->rwlock, RW_WRITER); 2327 2328 /* 2329 * Fetch the config space. Even though we have most of it cached, 2330 * some values *might* change across a suspend/resume. 2331 */ 2332 nge_chip_cfg_init(ngep, infop, B_FALSE); 2333 2334 /* 2335 * Only in one case, this conditional branch can be executed: the port 2336 * hasn't been plumbed. 2337 */ 2338 if (ngep->suspended == B_FALSE) { 2339 rw_exit(ngep->rwlock); 2340 mutex_exit(ngep->genlock); 2341 return (DDI_SUCCESS); 2342 } 2343 2344 nge_tx_recycle_all(ngep); 2345 err = nge_reinit_ring(ngep); 2346 if (!err) { 2347 err = nge_chip_reset(ngep); 2348 if (!err) 2349 err = nge_chip_start(ngep); 2350 } 2351 2352 if (err) { 2353 /* 2354 * We note the failure, but return success, as the 2355 * system is still usable without this controller. 2356 */ 2357 cmn_err(CE_WARN, "nge: resume: failed to restart controller"); 2358 } else { 2359 ngep->nge_mac_state = NGE_MAC_STARTED; 2360 } 2361 ngep->suspended = B_FALSE; 2362 2363 rw_exit(ngep->rwlock); 2364 mutex_exit(ngep->genlock); 2365 2366 return (DDI_SUCCESS); 2367 } 2368 2369 /* 2370 * attach(9E) -- Attach a device to the system 2371 * 2372 * Called once for each board successfully probed. 2373 */ 2374 static int 2375 nge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 2376 { 2377 int err; 2378 int i; 2379 int instance; 2380 caddr_t regs; 2381 nge_t *ngep; 2382 chip_info_t *infop; 2383 mac_register_t *macp; 2384 2385 switch (cmd) { 2386 default: 2387 return (DDI_FAILURE); 2388 2389 case DDI_RESUME: 2390 return (nge_resume(devinfo)); 2391 2392 case DDI_ATTACH: 2393 break; 2394 } 2395 2396 ngep = kmem_zalloc(sizeof (*ngep), KM_SLEEP); 2397 instance = ddi_get_instance(devinfo); 2398 ddi_set_driver_private(devinfo, ngep); 2399 ngep->devinfo = devinfo; 2400 2401 (void) snprintf(ngep->ifname, sizeof (ngep->ifname), "%s%d", 2402 NGE_DRIVER_NAME, instance); 2403 err = pci_config_setup(devinfo, &ngep->cfg_handle); 2404 if (err != DDI_SUCCESS) { 2405 nge_problem(ngep, "nge_attach: pci_config_setup() failed"); 2406 goto attach_fail; 2407 } 2408 /* 2409 * param_txbcopy_threshold and param_rxbcopy_threshold are tx/rx bcopy 2410 * thresholds. Bounds: min 0, max NGE_MAX_SDU 2411 */ 2412 ngep->param_txbcopy_threshold = NGE_TX_COPY_SIZE; 2413 ngep->param_rxbcopy_threshold = NGE_RX_COPY_SIZE; 2414 2415 /* 2416 * param_recv_max_packet is max packet received per interupt. 2417 * Bounds: min 0, max NGE_RECV_SLOTS_DESC_1024 2418 */ 2419 ngep->param_recv_max_packet = 128; 2420 2421 /* 2422 * param_poll_quiet_time and param_poll_busy_time are quiet/busy time 2423 * switch from per packet interrupt to polling interrupt. 2424 * Bounds: min 0, max 10000 2425 */ 2426 ngep->param_poll_quiet_time = NGE_POLL_QUIET_TIME; 2427 ngep->param_poll_busy_time = NGE_POLL_BUSY_TIME; 2428 ngep->tfint_threshold = NGE_TFINT_DEFAULT; 2429 ngep->poll = B_FALSE; 2430 ngep->ch_intr_mode = B_FALSE; 2431 2432 /* 2433 * param_rx_intr_hwater/param_rx_intr_lwater: ackets received 2434 * to trigger the poll_quiet_time/poll_busy_time counter. 2435 * Bounds: min 0, max NGE_RECV_SLOTS_DESC_1024. 2436 */ 2437 ngep->param_rx_intr_hwater = 1; 2438 ngep->param_rx_intr_lwater = 8; 2439 2440 2441 infop = (chip_info_t *)&ngep->chipinfo; 2442 nge_chip_cfg_init(ngep, infop, B_FALSE); 2443 nge_init_dev_spec_param(ngep); 2444 nge_get_props(ngep); 2445 ngep->progress |= PROGRESS_CFG; 2446 2447 err = ddi_regs_map_setup(devinfo, NGE_PCI_OPREGS_RNUMBER, 2448 ®s, 0, 0, &nge_reg_accattr, &ngep->io_handle); 2449 if (err != DDI_SUCCESS) { 2450 nge_problem(ngep, "nge_attach: ddi_regs_map_setup() failed"); 2451 goto attach_fail; 2452 } 2453 ngep->io_regs = regs; 2454 ngep->progress |= PROGRESS_REGS; 2455 2456 err = nge_register_intrs_and_init_locks(ngep); 2457 if (err != DDI_SUCCESS) { 2458 nge_problem(ngep, "nge_attach:" 2459 " register intrs and init locks failed"); 2460 goto attach_fail; 2461 } 2462 nge_init_ring_param_lock(ngep); 2463 ngep->progress |= PROGRESS_INTR; 2464 2465 mutex_enter(ngep->genlock); 2466 2467 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 2468 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 2469 err = nge_smu_sema(ngep, B_TRUE); 2470 if (err != DDI_SUCCESS) { 2471 nge_problem(ngep, "nge_attach: nge_smu_sema() failed"); 2472 goto attach_fail; 2473 } 2474 } 2475 /* 2476 * Initialise link state variables 2477 * Stop, reset & reinitialise the chip. 2478 * Initialise the (internal) PHY. 2479 */ 2480 nge_phys_init(ngep); 2481 ngep->nge_chip_state = NGE_CHIP_INITIAL; 2482 err = nge_chip_reset(ngep); 2483 if (err != DDI_SUCCESS) { 2484 nge_problem(ngep, "nge_attach: nge_chip_reset() failed"); 2485 mutex_exit(ngep->genlock); 2486 goto attach_fail; 2487 } 2488 nge_chip_sync(ngep); 2489 2490 /* 2491 * Now that mutex locks are initialized, enable interrupts. 2492 */ 2493 if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) { 2494 /* Call ddi_intr_block_enable() for MSI interrupts */ 2495 (void) ddi_intr_block_enable(ngep->htable, 2496 ngep->intr_actual_cnt); 2497 } else { 2498 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 2499 for (i = 0; i < ngep->intr_actual_cnt; i++) { 2500 (void) ddi_intr_enable(ngep->htable[i]); 2501 } 2502 } 2503 2504 ngep->link_state = LINK_STATE_UNKNOWN; 2505 ngep->progress |= PROGRESS_HWINT; 2506 2507 /* 2508 * Register NDD-tweakable parameters 2509 */ 2510 if (nge_nd_init(ngep)) { 2511 nge_problem(ngep, "nge_attach: nge_nd_init() failed"); 2512 mutex_exit(ngep->genlock); 2513 goto attach_fail; 2514 } 2515 ngep->progress |= PROGRESS_NDD; 2516 2517 /* 2518 * Create & initialise named kstats 2519 */ 2520 nge_init_kstats(ngep, instance); 2521 ngep->progress |= PROGRESS_KSTATS; 2522 2523 mutex_exit(ngep->genlock); 2524 2525 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 2526 goto attach_fail; 2527 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 2528 macp->m_driver = ngep; 2529 macp->m_dip = devinfo; 2530 macp->m_src_addr = infop->vendor_addr.addr; 2531 macp->m_callbacks = &nge_m_callbacks; 2532 macp->m_min_sdu = 0; 2533 macp->m_max_sdu = ngep->default_mtu; 2534 macp->m_margin = VTAG_SIZE; 2535 macp->m_priv_props = nge_priv_props; 2536 /* 2537 * Finally, we're ready to register ourselves with the mac 2538 * interface; if this succeeds, we're all ready to start() 2539 */ 2540 err = mac_register(macp, &ngep->mh); 2541 mac_free(macp); 2542 if (err != 0) 2543 goto attach_fail; 2544 2545 /* 2546 * Register a periodical handler. 2547 * nge_chip_cyclic() is invoked in kernel context. 2548 */ 2549 ngep->periodic_id = ddi_periodic_add(nge_chip_cyclic, ngep, 2550 NGE_CYCLIC_PERIOD, DDI_IPL_0); 2551 2552 ngep->progress |= PROGRESS_READY; 2553 return (DDI_SUCCESS); 2554 2555 attach_fail: 2556 nge_unattach(ngep); 2557 return (DDI_FAILURE); 2558 } 2559 2560 static int 2561 nge_suspend(nge_t *ngep) 2562 { 2563 mutex_enter(ngep->genlock); 2564 rw_enter(ngep->rwlock, RW_WRITER); 2565 2566 /* if the port hasn't been plumbed, just return */ 2567 if (ngep->nge_mac_state != NGE_MAC_STARTED) { 2568 rw_exit(ngep->rwlock); 2569 mutex_exit(ngep->genlock); 2570 return (DDI_SUCCESS); 2571 } 2572 ngep->suspended = B_TRUE; 2573 (void) nge_chip_stop(ngep, B_FALSE); 2574 ngep->nge_mac_state = NGE_MAC_STOPPED; 2575 2576 rw_exit(ngep->rwlock); 2577 mutex_exit(ngep->genlock); 2578 return (DDI_SUCCESS); 2579 } 2580 2581 /* 2582 * detach(9E) -- Detach a device from the system 2583 */ 2584 static int 2585 nge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 2586 { 2587 int i; 2588 nge_t *ngep; 2589 mul_item *p, *nextp; 2590 buff_ring_t *brp; 2591 2592 NGE_GTRACE(("nge_detach($%p, %d)", (void *)devinfo, cmd)); 2593 2594 ngep = ddi_get_driver_private(devinfo); 2595 brp = ngep->buff; 2596 2597 switch (cmd) { 2598 default: 2599 return (DDI_FAILURE); 2600 2601 case DDI_SUSPEND: 2602 /* 2603 * Stop the NIC 2604 * Note: This driver doesn't currently support WOL, but 2605 * should it in the future, it is important to 2606 * make sure the PHY remains powered so that the 2607 * wakeup packet can actually be recieved. 2608 */ 2609 return (nge_suspend(ngep)); 2610 2611 case DDI_DETACH: 2612 break; 2613 } 2614 2615 /* Try to wait all the buffer post to upper layer be released */ 2616 for (i = 0; i < 1000; i++) { 2617 if (brp->rx_hold == 0) 2618 break; 2619 drv_usecwait(1000); 2620 } 2621 2622 /* If there is any posted buffer, reject to detach */ 2623 if (brp->rx_hold != 0) 2624 return (DDI_FAILURE); 2625 2626 /* 2627 * Unregister from the GLD subsystem. This can fail, in 2628 * particular if there are DLPI style-2 streams still open - 2629 * in which case we just return failure without shutting 2630 * down chip operations. 2631 */ 2632 if (mac_unregister(ngep->mh) != DDI_SUCCESS) 2633 return (DDI_FAILURE); 2634 2635 /* 2636 * Recycle the multicast table. mac_unregister() should be called 2637 * before it to ensure the multicast table can be used even if 2638 * mac_unregister() fails. 2639 */ 2640 for (p = ngep->pcur_mulist; p != NULL; p = nextp) { 2641 nextp = p->next; 2642 kmem_free(p, sizeof (mul_item)); 2643 } 2644 ngep->pcur_mulist = NULL; 2645 2646 /* 2647 * All activity stopped, so we can clean up & exit 2648 */ 2649 nge_unattach(ngep); 2650 return (DDI_SUCCESS); 2651 } 2652 2653 /* 2654 * quiesce(9E) entry point. 2655 * 2656 * This function is called when the system is single-threaded at high 2657 * PIL with preemption disabled. Therefore, this function must not be 2658 * blocked. 2659 * 2660 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 2661 * DDI_FAILURE indicates an error condition and should almost never happen. 2662 */ 2663 static int 2664 nge_quiesce(dev_info_t *devinfo) 2665 { 2666 nge_t *ngep; 2667 2668 ngep = ddi_get_driver_private(devinfo); 2669 2670 if (ngep == NULL) 2671 return (DDI_FAILURE); 2672 2673 /* 2674 * Turn off debug tracing 2675 */ 2676 nge_debug = 0; 2677 ngep->debug = 0; 2678 2679 nge_restore_mac_addr(ngep); 2680 (void) nge_chip_stop(ngep, B_FALSE); 2681 2682 return (DDI_SUCCESS); 2683 } 2684 2685 2686 2687 /* 2688 * ========== Module Loading Data & Entry Points ========== 2689 */ 2690 2691 DDI_DEFINE_STREAM_OPS(nge_dev_ops, nulldev, nulldev, nge_attach, nge_detach, 2692 NULL, NULL, D_MP, NULL, nge_quiesce); 2693 2694 2695 static struct modldrv nge_modldrv = { 2696 &mod_driverops, /* Type of module. This one is a driver */ 2697 nge_ident, /* short description */ 2698 &nge_dev_ops /* driver specific ops */ 2699 }; 2700 2701 static struct modlinkage modlinkage = { 2702 MODREV_1, (void *)&nge_modldrv, NULL 2703 }; 2704 2705 2706 int 2707 _info(struct modinfo *modinfop) 2708 { 2709 return (mod_info(&modlinkage, modinfop)); 2710 } 2711 2712 int 2713 _init(void) 2714 { 2715 int status; 2716 2717 mac_init_ops(&nge_dev_ops, "nge"); 2718 status = mod_install(&modlinkage); 2719 if (status != DDI_SUCCESS) 2720 mac_fini_ops(&nge_dev_ops); 2721 else 2722 mutex_init(nge_log_mutex, NULL, MUTEX_DRIVER, NULL); 2723 2724 return (status); 2725 } 2726 2727 int 2728 _fini(void) 2729 { 2730 int status; 2731 2732 status = mod_remove(&modlinkage); 2733 if (status == DDI_SUCCESS) { 2734 mac_fini_ops(&nge_dev_ops); 2735 mutex_destroy(nge_log_mutex); 2736 } 2737 2738 return (status); 2739 } 2740 2741 /* 2742 * ============ Init MSI/Fixed/SoftInterrupt routines ============== 2743 */ 2744 2745 /* 2746 * Register interrupts and initialize each mutex and condition variables 2747 */ 2748 2749 static int 2750 nge_register_intrs_and_init_locks(nge_t *ngep) 2751 { 2752 int err; 2753 int intr_types; 2754 uint_t soft_prip; 2755 nge_msi_mask msi_mask; 2756 nge_msi_map0_vec map0_vec; 2757 nge_msi_map1_vec map1_vec; 2758 2759 /* 2760 * Add the softint handlers: 2761 * 2762 * Both of these handlers are used to avoid restrictions on the 2763 * context and/or mutexes required for some operations. In 2764 * particular, the hardware interrupt handler and its subfunctions 2765 * can detect a number of conditions that we don't want to handle 2766 * in that context or with that set of mutexes held. So, these 2767 * softints are triggered instead: 2768 * 2769 * the <resched> softint is triggered if if we have previously 2770 * had to refuse to send a packet because of resource shortage 2771 * (we've run out of transmit buffers), but the send completion 2772 * interrupt handler has now detected that more buffers have 2773 * become available. Its only purpose is to call gld_sched() 2774 * to retry the pending transmits (we're not allowed to hold 2775 * driver-defined mutexes across gld_sched()). 2776 * 2777 * the <factotum> is triggered if the h/w interrupt handler 2778 * sees the <link state changed> or <error> bits in the status 2779 * block. It's also triggered periodically to poll the link 2780 * state, just in case we aren't getting link status change 2781 * interrupts ... 2782 */ 2783 err = ddi_intr_add_softint(ngep->devinfo, &ngep->resched_hdl, 2784 DDI_INTR_SOFTPRI_MIN, nge_reschedule, (caddr_t)ngep); 2785 if (err != DDI_SUCCESS) { 2786 nge_problem(ngep, 2787 "nge_attach: add nge_reschedule softintr failed"); 2788 2789 return (DDI_FAILURE); 2790 } 2791 ngep->progress |= PROGRESS_RESCHED; 2792 err = ddi_intr_add_softint(ngep->devinfo, &ngep->factotum_hdl, 2793 DDI_INTR_SOFTPRI_MIN, nge_chip_factotum, (caddr_t)ngep); 2794 if (err != DDI_SUCCESS) { 2795 nge_problem(ngep, 2796 "nge_attach: add nge_chip_factotum softintr failed!"); 2797 2798 return (DDI_FAILURE); 2799 } 2800 if (ddi_intr_get_softint_pri(ngep->factotum_hdl, &soft_prip) 2801 != DDI_SUCCESS) { 2802 nge_problem(ngep, "nge_attach: get softintr priority failed\n"); 2803 2804 return (DDI_FAILURE); 2805 } 2806 ngep->soft_pri = soft_prip; 2807 2808 ngep->progress |= PROGRESS_FACTOTUM; 2809 /* Get supported interrupt types */ 2810 if (ddi_intr_get_supported_types(ngep->devinfo, &intr_types) 2811 != DDI_SUCCESS) { 2812 nge_error(ngep, "ddi_intr_get_supported_types failed\n"); 2813 2814 return (DDI_FAILURE); 2815 } 2816 2817 NGE_DEBUG(("ddi_intr_get_supported_types() returned: %x", 2818 intr_types)); 2819 2820 if ((intr_types & DDI_INTR_TYPE_MSI) && nge_enable_msi) { 2821 2822 /* MSI Configurations for mcp55 chipset */ 2823 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 2824 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 2825 2826 2827 /* Enable the 8 vectors */ 2828 msi_mask.msi_mask_val = 2829 nge_reg_get32(ngep, NGE_MSI_MASK); 2830 msi_mask.msi_msk_bits.vec0 = NGE_SET; 2831 msi_mask.msi_msk_bits.vec1 = NGE_SET; 2832 msi_mask.msi_msk_bits.vec2 = NGE_SET; 2833 msi_mask.msi_msk_bits.vec3 = NGE_SET; 2834 msi_mask.msi_msk_bits.vec4 = NGE_SET; 2835 msi_mask.msi_msk_bits.vec5 = NGE_SET; 2836 msi_mask.msi_msk_bits.vec6 = NGE_SET; 2837 msi_mask.msi_msk_bits.vec7 = NGE_SET; 2838 nge_reg_put32(ngep, NGE_MSI_MASK, 2839 msi_mask.msi_mask_val); 2840 2841 /* 2842 * Remapping the MSI MAP0 and MAP1. MCP55 2843 * is default mapping all the interrupt to 0 vector. 2844 * Software needs to remapping this. 2845 * This mapping is same as CK804. 2846 */ 2847 map0_vec.msi_map0_val = 2848 nge_reg_get32(ngep, NGE_MSI_MAP0); 2849 map1_vec.msi_map1_val = 2850 nge_reg_get32(ngep, NGE_MSI_MAP1); 2851 map0_vec.vecs_bits.reint_vec = 0; 2852 map0_vec.vecs_bits.rcint_vec = 0; 2853 map0_vec.vecs_bits.miss_vec = 3; 2854 map0_vec.vecs_bits.teint_vec = 5; 2855 map0_vec.vecs_bits.tcint_vec = 5; 2856 map0_vec.vecs_bits.stint_vec = 2; 2857 map0_vec.vecs_bits.mint_vec = 6; 2858 map0_vec.vecs_bits.rfint_vec = 0; 2859 map1_vec.vecs_bits.tfint_vec = 5; 2860 map1_vec.vecs_bits.feint_vec = 6; 2861 map1_vec.vecs_bits.resv8_11 = 3; 2862 map1_vec.vecs_bits.resv12_15 = 1; 2863 map1_vec.vecs_bits.resv16_19 = 0; 2864 map1_vec.vecs_bits.resv20_23 = 7; 2865 map1_vec.vecs_bits.resv24_31 = 0xff; 2866 nge_reg_put32(ngep, NGE_MSI_MAP0, 2867 map0_vec.msi_map0_val); 2868 nge_reg_put32(ngep, NGE_MSI_MAP1, 2869 map1_vec.msi_map1_val); 2870 } 2871 if (nge_add_intrs(ngep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 2872 NGE_DEBUG(("MSI registration failed, " 2873 "trying FIXED interrupt type\n")); 2874 } else { 2875 nge_log(ngep, "Using MSI interrupt type\n"); 2876 2877 ngep->intr_type = DDI_INTR_TYPE_MSI; 2878 ngep->progress |= PROGRESS_SWINT; 2879 } 2880 } 2881 2882 if (!(ngep->progress & PROGRESS_SWINT) && 2883 (intr_types & DDI_INTR_TYPE_FIXED)) { 2884 if (nge_add_intrs(ngep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 2885 nge_error(ngep, "FIXED interrupt " 2886 "registration failed\n"); 2887 2888 return (DDI_FAILURE); 2889 } 2890 2891 nge_log(ngep, "Using FIXED interrupt type\n"); 2892 2893 ngep->intr_type = DDI_INTR_TYPE_FIXED; 2894 ngep->progress |= PROGRESS_SWINT; 2895 } 2896 2897 2898 if (!(ngep->progress & PROGRESS_SWINT)) { 2899 nge_error(ngep, "No interrupts registered\n"); 2900 2901 return (DDI_FAILURE); 2902 } 2903 mutex_init(ngep->genlock, NULL, MUTEX_DRIVER, 2904 DDI_INTR_PRI(ngep->intr_pri)); 2905 mutex_init(ngep->softlock, NULL, MUTEX_DRIVER, 2906 DDI_INTR_PRI(ngep->soft_pri)); 2907 rw_init(ngep->rwlock, NULL, RW_DRIVER, 2908 DDI_INTR_PRI(ngep->intr_pri)); 2909 2910 return (DDI_SUCCESS); 2911 } 2912 2913 /* 2914 * nge_add_intrs: 2915 * 2916 * Register FIXED or MSI interrupts. 2917 */ 2918 static int 2919 nge_add_intrs(nge_t *ngep, int intr_type) 2920 { 2921 dev_info_t *dip = ngep->devinfo; 2922 int avail, actual, intr_size, count = 0; 2923 int i, flag, ret; 2924 2925 NGE_DEBUG(("nge_add_intrs: interrupt type 0x%x\n", intr_type)); 2926 2927 /* Get number of interrupts */ 2928 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 2929 if ((ret != DDI_SUCCESS) || (count == 0)) { 2930 nge_error(ngep, "ddi_intr_get_nintrs() failure, ret: %d, " 2931 "count: %d", ret, count); 2932 2933 return (DDI_FAILURE); 2934 } 2935 2936 /* Get number of available interrupts */ 2937 ret = ddi_intr_get_navail(dip, intr_type, &avail); 2938 if ((ret != DDI_SUCCESS) || (avail == 0)) { 2939 nge_error(ngep, "ddi_intr_get_navail() failure, " 2940 "ret: %d, avail: %d\n", ret, avail); 2941 2942 return (DDI_FAILURE); 2943 } 2944 2945 if (avail < count) { 2946 NGE_DEBUG(("nitrs() returned %d, navail returned %d\n", 2947 count, avail)); 2948 } 2949 flag = DDI_INTR_ALLOC_NORMAL; 2950 2951 /* Allocate an array of interrupt handles */ 2952 intr_size = count * sizeof (ddi_intr_handle_t); 2953 ngep->htable = kmem_alloc(intr_size, KM_SLEEP); 2954 2955 /* Call ddi_intr_alloc() */ 2956 ret = ddi_intr_alloc(dip, ngep->htable, intr_type, 0, 2957 count, &actual, flag); 2958 2959 if ((ret != DDI_SUCCESS) || (actual == 0)) { 2960 nge_error(ngep, "ddi_intr_alloc() failed %d\n", ret); 2961 2962 kmem_free(ngep->htable, intr_size); 2963 return (DDI_FAILURE); 2964 } 2965 2966 if (actual < count) { 2967 NGE_DEBUG(("Requested: %d, Received: %d\n", 2968 count, actual)); 2969 } 2970 2971 ngep->intr_actual_cnt = actual; 2972 ngep->intr_req_cnt = count; 2973 2974 /* 2975 * Get priority for first msi, assume remaining are all the same 2976 */ 2977 if ((ret = ddi_intr_get_pri(ngep->htable[0], &ngep->intr_pri)) != 2978 DDI_SUCCESS) { 2979 nge_error(ngep, "ddi_intr_get_pri() failed %d\n", ret); 2980 2981 /* Free already allocated intr */ 2982 for (i = 0; i < actual; i++) { 2983 (void) ddi_intr_free(ngep->htable[i]); 2984 } 2985 2986 kmem_free(ngep->htable, intr_size); 2987 2988 return (DDI_FAILURE); 2989 } 2990 /* Test for high level mutex */ 2991 if (ngep->intr_pri >= ddi_intr_get_hilevel_pri()) { 2992 nge_error(ngep, "nge_add_intrs:" 2993 "Hi level interrupt not supported"); 2994 2995 for (i = 0; i < actual; i++) 2996 (void) ddi_intr_free(ngep->htable[i]); 2997 2998 kmem_free(ngep->htable, intr_size); 2999 3000 return (DDI_FAILURE); 3001 } 3002 3003 3004 /* Call ddi_intr_add_handler() */ 3005 for (i = 0; i < actual; i++) { 3006 if ((ret = ddi_intr_add_handler(ngep->htable[i], nge_chip_intr, 3007 (caddr_t)ngep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 3008 nge_error(ngep, "ddi_intr_add_handler() " 3009 "failed %d\n", ret); 3010 3011 /* Free already allocated intr */ 3012 for (i = 0; i < actual; i++) { 3013 (void) ddi_intr_free(ngep->htable[i]); 3014 } 3015 3016 kmem_free(ngep->htable, intr_size); 3017 3018 return (DDI_FAILURE); 3019 } 3020 } 3021 3022 if ((ret = ddi_intr_get_cap(ngep->htable[0], &ngep->intr_cap)) 3023 != DDI_SUCCESS) { 3024 nge_error(ngep, "ddi_intr_get_cap() failed %d\n", ret); 3025 3026 for (i = 0; i < actual; i++) { 3027 (void) ddi_intr_remove_handler(ngep->htable[i]); 3028 (void) ddi_intr_free(ngep->htable[i]); 3029 } 3030 3031 kmem_free(ngep->htable, intr_size); 3032 3033 return (DDI_FAILURE); 3034 } 3035 3036 return (DDI_SUCCESS); 3037 } 3038 3039 /* 3040 * nge_rem_intrs: 3041 * 3042 * Unregister FIXED or MSI interrupts 3043 */ 3044 static void 3045 nge_rem_intrs(nge_t *ngep) 3046 { 3047 int i; 3048 3049 NGE_DEBUG(("nge_rem_intrs\n")); 3050 3051 /* Disable all interrupts */ 3052 if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3053 /* Call ddi_intr_block_disable() */ 3054 (void) ddi_intr_block_disable(ngep->htable, 3055 ngep->intr_actual_cnt); 3056 } else { 3057 for (i = 0; i < ngep->intr_actual_cnt; i++) { 3058 (void) ddi_intr_disable(ngep->htable[i]); 3059 } 3060 } 3061 3062 /* Call ddi_intr_remove_handler() */ 3063 for (i = 0; i < ngep->intr_actual_cnt; i++) { 3064 (void) ddi_intr_remove_handler(ngep->htable[i]); 3065 (void) ddi_intr_free(ngep->htable[i]); 3066 } 3067 3068 kmem_free(ngep->htable, 3069 ngep->intr_req_cnt * sizeof (ddi_intr_handle_t)); 3070 } 3071