1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 28 #include "nge.h" 29 30 /* 31 * Describes the chip's DMA engine 32 */ 33 34 static ddi_dma_attr_t hot_dma_attr = { 35 DMA_ATTR_V0, /* dma_attr version */ 36 0x0000000000000000ull, /* dma_attr_addr_lo */ 37 0x000000FFFFFFFFFFull, /* dma_attr_addr_hi */ 38 0x000000007FFFFFFFull, /* dma_attr_count_max */ 39 0x0000000000000010ull, /* dma_attr_align */ 40 0x00000FFF, /* dma_attr_burstsizes */ 41 0x00000001, /* dma_attr_minxfer */ 42 0x000000000000FFFFull, /* dma_attr_maxxfer */ 43 0x000000FFFFFFFFFFull, /* dma_attr_seg */ 44 1, /* dma_attr_sgllen */ 45 0x00000001, /* dma_attr_granular */ 46 0 47 }; 48 49 static ddi_dma_attr_t hot_tx_dma_attr = { 50 DMA_ATTR_V0, /* dma_attr version */ 51 0x0000000000000000ull, /* dma_attr_addr_lo */ 52 0x000000FFFFFFFFFFull, /* dma_attr_addr_hi */ 53 0x0000000000003FFFull, /* dma_attr_count_max */ 54 0x0000000000000010ull, /* dma_attr_align */ 55 0x00000FFF, /* dma_attr_burstsizes */ 56 0x00000001, /* dma_attr_minxfer */ 57 0x0000000000003FFFull, /* dma_attr_maxxfer */ 58 0x000000FFFFFFFFFFull, /* dma_attr_seg */ 59 NGE_MAX_COOKIES, /* dma_attr_sgllen */ 60 1, /* dma_attr_granular */ 61 0 62 }; 63 64 static ddi_dma_attr_t sum_dma_attr = { 65 DMA_ATTR_V0, /* dma_attr version */ 66 0x0000000000000000ull, /* dma_attr_addr_lo */ 67 0x00000000FFFFFFFFull, /* dma_attr_addr_hi */ 68 0x000000007FFFFFFFull, /* dma_attr_count_max */ 69 0x0000000000000010ull, /* dma_attr_align */ 70 0x00000FFF, /* dma_attr_burstsizes */ 71 0x00000001, /* dma_attr_minxfer */ 72 0x000000000000FFFFull, /* dma_attr_maxxfer */ 73 0x00000000FFFFFFFFull, /* dma_attr_seg */ 74 1, /* dma_attr_sgllen */ 75 0x00000001, /* dma_attr_granular */ 76 0 77 }; 78 79 static ddi_dma_attr_t sum_tx_dma_attr = { 80 DMA_ATTR_V0, /* dma_attr version */ 81 0x0000000000000000ull, /* dma_attr_addr_lo */ 82 0x00000000FFFFFFFFull, /* dma_attr_addr_hi */ 83 0x0000000000003FFFull, /* dma_attr_count_max */ 84 0x0000000000000010ull, /* dma_attr_align */ 85 0x00000FFF, /* dma_attr_burstsizes */ 86 0x00000001, /* dma_attr_minxfer */ 87 0x0000000000003FFFull, /* dma_attr_maxxfer */ 88 0x00000000FFFFFFFFull, /* dma_attr_seg */ 89 NGE_MAX_COOKIES, /* dma_attr_sgllen */ 90 1, /* dma_attr_granular */ 91 0 92 }; 93 94 /* 95 * DMA access attributes for data. 96 */ 97 ddi_device_acc_attr_t nge_data_accattr = { 98 DDI_DEVICE_ATTR_V0, 99 DDI_STRUCTURE_LE_ACC, 100 DDI_STRICTORDER_ACC, 101 DDI_DEFAULT_ACC 102 }; 103 104 /* 105 * DMA access attributes for descriptors. 106 */ 107 static ddi_device_acc_attr_t nge_desc_accattr = { 108 DDI_DEVICE_ATTR_V0, 109 DDI_STRUCTURE_LE_ACC, 110 DDI_STRICTORDER_ACC, 111 DDI_DEFAULT_ACC 112 }; 113 114 /* 115 * PIO access attributes for registers 116 */ 117 static ddi_device_acc_attr_t nge_reg_accattr = { 118 DDI_DEVICE_ATTR_V0, 119 DDI_STRUCTURE_LE_ACC, 120 DDI_STRICTORDER_ACC, 121 DDI_DEFAULT_ACC 122 }; 123 124 /* 125 * NIC DESC MODE 2 126 */ 127 128 static const nge_desc_attr_t nge_sum_desc = { 129 130 sizeof (sum_rx_bd), 131 sizeof (sum_tx_bd), 132 &sum_dma_attr, 133 &sum_tx_dma_attr, 134 nge_sum_rxd_fill, 135 nge_sum_rxd_check, 136 nge_sum_txd_fill, 137 nge_sum_txd_check, 138 }; 139 140 /* 141 * NIC DESC MODE 3 142 */ 143 144 static const nge_desc_attr_t nge_hot_desc = { 145 146 sizeof (hot_rx_bd), 147 sizeof (hot_tx_bd), 148 &hot_dma_attr, 149 &hot_tx_dma_attr, 150 nge_hot_rxd_fill, 151 nge_hot_rxd_check, 152 nge_hot_txd_fill, 153 nge_hot_txd_check, 154 }; 155 156 static char nge_ident[] = "nVidia 1Gb Ethernet"; 157 static char clsize_propname[] = "cache-line-size"; 158 static char latency_propname[] = "latency-timer"; 159 static char debug_propname[] = "nge-debug-flags"; 160 static char intr_moderation[] = "intr-moderation"; 161 static char rx_data_hw[] = "rx-data-hw"; 162 static char rx_prd_lw[] = "rx-prd-lw"; 163 static char rx_prd_hw[] = "rx-prd-hw"; 164 static char sw_intr_intv[] = "sw-intr-intvl"; 165 static char nge_desc_mode[] = "desc-mode"; 166 static char default_mtu[] = "default_mtu"; 167 static char low_memory_mode[] = "minimal-memory-usage"; 168 static char mac_addr_reversion[] = "mac-addr-reversion"; 169 extern kmutex_t nge_log_mutex[1]; 170 171 static int nge_m_start(void *); 172 static void nge_m_stop(void *); 173 static int nge_m_promisc(void *, boolean_t); 174 static int nge_m_multicst(void *, boolean_t, const uint8_t *); 175 static int nge_m_unicst(void *, const uint8_t *); 176 static void nge_m_ioctl(void *, queue_t *, mblk_t *); 177 static boolean_t nge_m_getcapab(void *, mac_capab_t, void *); 178 static int nge_m_setprop(void *, const char *, mac_prop_id_t, 179 uint_t, const void *); 180 static int nge_m_getprop(void *, const char *, mac_prop_id_t, 181 uint_t, uint_t, void *, uint_t *); 182 static int nge_set_priv_prop(nge_t *, const char *, uint_t, 183 const void *); 184 static int nge_get_priv_prop(nge_t *, const char *, uint_t, 185 uint_t, void *); 186 187 #define NGE_M_CALLBACK_FLAGS\ 188 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 189 190 static mac_callbacks_t nge_m_callbacks = { 191 NGE_M_CALLBACK_FLAGS, 192 nge_m_stat, 193 nge_m_start, 194 nge_m_stop, 195 nge_m_promisc, 196 nge_m_multicst, 197 nge_m_unicst, 198 nge_m_tx, 199 NULL, 200 nge_m_ioctl, 201 nge_m_getcapab, 202 NULL, 203 NULL, 204 nge_m_setprop, 205 nge_m_getprop 206 }; 207 208 mac_priv_prop_t nge_priv_props[] = { 209 {"_tx_bcopy_threshold", MAC_PROP_PERM_RW}, 210 {"_rx_bcopy_threshold", MAC_PROP_PERM_RW}, 211 {"_recv_max_packet", MAC_PROP_PERM_RW}, 212 {"_poll_quiet_time", MAC_PROP_PERM_RW}, 213 {"_poll_busy_time", MAC_PROP_PERM_RW}, 214 {"_rx_intr_hwater", MAC_PROP_PERM_RW}, 215 {"_rx_intr_lwater", MAC_PROP_PERM_RW}, 216 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 217 {"_adv_asym_pause_cap", MAC_PROP_PERM_RW}, 218 {"_tx_n_intr", MAC_PROP_PERM_RW} 219 }; 220 221 #define NGE_MAX_PRIV_PROPS \ 222 (sizeof (nge_priv_props)/sizeof (mac_priv_prop_t)) 223 224 static int nge_add_intrs(nge_t *, int); 225 static void nge_rem_intrs(nge_t *); 226 static int nge_register_intrs_and_init_locks(nge_t *); 227 228 /* 229 * NGE MSI tunable: 230 */ 231 boolean_t nge_enable_msi = B_FALSE; 232 233 static enum ioc_reply 234 nge_set_loop_mode(nge_t *ngep, uint32_t mode) 235 { 236 /* 237 * If the mode isn't being changed, there's nothing to do ... 238 */ 239 if (mode == ngep->param_loop_mode) 240 return (IOC_ACK); 241 242 /* 243 * Validate the requested mode and prepare a suitable message 244 * to explain the link down/up cycle that the change will 245 * probably induce ... 246 */ 247 switch (mode) { 248 default: 249 return (IOC_INVAL); 250 251 case NGE_LOOP_NONE: 252 case NGE_LOOP_EXTERNAL_100: 253 case NGE_LOOP_EXTERNAL_10: 254 case NGE_LOOP_INTERNAL_PHY: 255 break; 256 } 257 258 /* 259 * All OK; tell the caller to reprogram 260 * the PHY and/or MAC for the new mode ... 261 */ 262 ngep->param_loop_mode = mode; 263 return (IOC_RESTART_ACK); 264 } 265 266 #undef NGE_DBG 267 #define NGE_DBG NGE_DBG_INIT 268 269 /* 270 * Utility routine to carve a slice off a chunk of allocated memory, 271 * updating the chunk descriptor accordingly. The size of the slice 272 * is given by the product of the <qty> and <size> parameters. 273 */ 274 void 275 nge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 276 uint32_t qty, uint32_t size) 277 { 278 size_t totsize; 279 280 totsize = qty*size; 281 ASSERT(size > 0); 282 ASSERT(totsize <= chunk->alength); 283 284 *slice = *chunk; 285 slice->nslots = qty; 286 slice->size = size; 287 slice->alength = totsize; 288 289 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 290 chunk->alength -= totsize; 291 chunk->offset += totsize; 292 chunk->cookie.dmac_laddress += totsize; 293 chunk->cookie.dmac_size -= totsize; 294 } 295 296 /* 297 * Allocate an area of memory and a DMA handle for accessing it 298 */ 299 int 300 nge_alloc_dma_mem(nge_t *ngep, size_t memsize, ddi_device_acc_attr_t *attr_p, 301 uint_t dma_flags, dma_area_t *dma_p) 302 { 303 int err; 304 caddr_t va; 305 306 NGE_TRACE(("nge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 307 (void *)ngep, memsize, attr_p, dma_flags, dma_p)); 308 /* 309 * Allocate handle 310 */ 311 err = ddi_dma_alloc_handle(ngep->devinfo, ngep->desc_attr.dma_attr, 312 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 313 if (err != DDI_SUCCESS) 314 goto fail; 315 316 /* 317 * Allocate memory 318 */ 319 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 320 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), 321 DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, &dma_p->acc_hdl); 322 if (err != DDI_SUCCESS) 323 goto fail; 324 325 /* 326 * Bind the two together 327 */ 328 dma_p->mem_va = va; 329 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 330 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 331 &dma_p->cookie, &dma_p->ncookies); 332 333 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 334 goto fail; 335 336 dma_p->nslots = ~0U; 337 dma_p->size = ~0U; 338 dma_p->offset = 0; 339 340 return (DDI_SUCCESS); 341 342 fail: 343 nge_free_dma_mem(dma_p); 344 NGE_DEBUG(("nge_alloc_dma_mem: fail to alloc dma memory!")); 345 346 return (DDI_FAILURE); 347 } 348 349 /* 350 * Free one allocated area of DMAable memory 351 */ 352 void 353 nge_free_dma_mem(dma_area_t *dma_p) 354 { 355 if (dma_p->dma_hdl != NULL) { 356 if (dma_p->ncookies) { 357 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 358 dma_p->ncookies = 0; 359 } 360 } 361 if (dma_p->acc_hdl != NULL) { 362 ddi_dma_mem_free(&dma_p->acc_hdl); 363 dma_p->acc_hdl = NULL; 364 } 365 if (dma_p->dma_hdl != NULL) { 366 ddi_dma_free_handle(&dma_p->dma_hdl); 367 dma_p->dma_hdl = NULL; 368 } 369 } 370 371 #define ALLOC_TX_BUF 0x1 372 #define ALLOC_TX_DESC 0x2 373 #define ALLOC_RX_DESC 0x4 374 375 int 376 nge_alloc_bufs(nge_t *ngep) 377 { 378 int err; 379 int split; 380 int progress; 381 size_t txbuffsize; 382 size_t rxdescsize; 383 size_t txdescsize; 384 385 txbuffsize = ngep->tx_desc * ngep->buf_size; 386 rxdescsize = ngep->rx_desc; 387 txdescsize = ngep->tx_desc; 388 rxdescsize *= ngep->desc_attr.rxd_size; 389 txdescsize *= ngep->desc_attr.txd_size; 390 progress = 0; 391 392 NGE_TRACE(("nge_alloc_bufs($%p)", (void *)ngep)); 393 /* 394 * Allocate memory & handles for TX buffers 395 */ 396 ASSERT((txbuffsize % ngep->nge_split) == 0); 397 for (split = 0; split < ngep->nge_split; ++split) { 398 err = nge_alloc_dma_mem(ngep, txbuffsize/ngep->nge_split, 399 &nge_data_accattr, DDI_DMA_WRITE | NGE_DMA_MODE, 400 &ngep->send->buf[split]); 401 if (err != DDI_SUCCESS) 402 goto fail; 403 } 404 405 progress |= ALLOC_TX_BUF; 406 407 /* 408 * Allocate memory & handles for receive return rings and 409 * buffer (producer) descriptor rings 410 */ 411 err = nge_alloc_dma_mem(ngep, rxdescsize, &nge_desc_accattr, 412 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->recv->desc); 413 if (err != DDI_SUCCESS) 414 goto fail; 415 progress |= ALLOC_RX_DESC; 416 417 /* 418 * Allocate memory & handles for TX descriptor rings, 419 */ 420 err = nge_alloc_dma_mem(ngep, txdescsize, &nge_desc_accattr, 421 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->send->desc); 422 if (err != DDI_SUCCESS) 423 goto fail; 424 return (DDI_SUCCESS); 425 426 fail: 427 if (progress & ALLOC_RX_DESC) 428 nge_free_dma_mem(&ngep->recv->desc); 429 if (progress & ALLOC_TX_BUF) { 430 for (split = 0; split < ngep->nge_split; ++split) 431 nge_free_dma_mem(&ngep->send->buf[split]); 432 } 433 434 return (DDI_FAILURE); 435 } 436 437 /* 438 * This routine frees the transmit and receive buffers and descriptors. 439 * Make sure the chip is stopped before calling it! 440 */ 441 void 442 nge_free_bufs(nge_t *ngep) 443 { 444 int split; 445 446 NGE_TRACE(("nge_free_bufs($%p)", (void *)ngep)); 447 448 nge_free_dma_mem(&ngep->recv->desc); 449 nge_free_dma_mem(&ngep->send->desc); 450 451 for (split = 0; split < ngep->nge_split; ++split) 452 nge_free_dma_mem(&ngep->send->buf[split]); 453 } 454 455 /* 456 * Clean up initialisation done above before the memory is freed 457 */ 458 static void 459 nge_fini_send_ring(nge_t *ngep) 460 { 461 uint32_t slot; 462 size_t dmah_num; 463 send_ring_t *srp; 464 sw_tx_sbd_t *ssbdp; 465 466 srp = ngep->send; 467 ssbdp = srp->sw_sbds; 468 469 NGE_TRACE(("nge_fini_send_ring($%p)", (void *)ngep)); 470 471 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]); 472 473 for (slot = 0; slot < dmah_num; ++slot) { 474 if (srp->dmahndl[slot].hndl) { 475 (void) ddi_dma_unbind_handle(srp->dmahndl[slot].hndl); 476 ddi_dma_free_handle(&srp->dmahndl[slot].hndl); 477 srp->dmahndl[slot].hndl = NULL; 478 srp->dmahndl[slot].next = NULL; 479 } 480 } 481 482 srp->dmah_free.head = NULL; 483 srp->dmah_free.tail = NULL; 484 485 kmem_free(ssbdp, srp->desc.nslots*sizeof (*ssbdp)); 486 487 } 488 489 /* 490 * Initialise the specified Send Ring, using the information in the 491 * <dma_area> descriptors that it contains to set up all the other 492 * fields. This routine should be called only once for each ring. 493 */ 494 static int 495 nge_init_send_ring(nge_t *ngep) 496 { 497 size_t dmah_num; 498 uint32_t nslots; 499 uint32_t err; 500 uint32_t slot; 501 uint32_t split; 502 send_ring_t *srp; 503 sw_tx_sbd_t *ssbdp; 504 dma_area_t desc; 505 dma_area_t pbuf; 506 507 srp = ngep->send; 508 srp->desc.nslots = ngep->tx_desc; 509 nslots = srp->desc.nslots; 510 511 NGE_TRACE(("nge_init_send_ring($%p)", (void *)ngep)); 512 /* 513 * Other one-off initialisation of per-ring data 514 */ 515 srp->ngep = ngep; 516 517 /* 518 * Allocate the array of s/w Send Buffer Descriptors 519 */ 520 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 521 srp->sw_sbds = ssbdp; 522 523 /* 524 * Now initialise each array element once and for all 525 */ 526 desc = srp->desc; 527 for (split = 0; split < ngep->nge_split; ++split) { 528 pbuf = srp->buf[split]; 529 for (slot = 0; slot < nslots/ngep->nge_split; ++ssbdp, ++slot) { 530 nge_slice_chunk(&ssbdp->desc, &desc, 1, 531 ngep->desc_attr.txd_size); 532 nge_slice_chunk(&ssbdp->pbuf, &pbuf, 1, 533 ngep->buf_size); 534 } 535 ASSERT(pbuf.alength == 0); 536 } 537 ASSERT(desc.alength == 0); 538 539 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]); 540 541 /* preallocate dma handles for tx buffer */ 542 for (slot = 0; slot < dmah_num; ++slot) { 543 544 err = ddi_dma_alloc_handle(ngep->devinfo, 545 ngep->desc_attr.tx_dma_attr, DDI_DMA_DONTWAIT, 546 NULL, &srp->dmahndl[slot].hndl); 547 548 if (err != DDI_SUCCESS) { 549 nge_fini_send_ring(ngep); 550 nge_error(ngep, 551 "nge_init_send_ring: alloc dma handle fails"); 552 return (DDI_FAILURE); 553 } 554 srp->dmahndl[slot].next = srp->dmahndl + slot + 1; 555 } 556 557 srp->dmah_free.head = srp->dmahndl; 558 srp->dmah_free.tail = srp->dmahndl + dmah_num - 1; 559 srp->dmah_free.tail->next = NULL; 560 561 return (DDI_SUCCESS); 562 } 563 564 /* 565 * Intialize the tx recycle pointer and tx sending pointer of tx ring 566 * and set the type of tx's data descriptor by default. 567 */ 568 static void 569 nge_reinit_send_ring(nge_t *ngep) 570 { 571 size_t dmah_num; 572 uint32_t slot; 573 send_ring_t *srp; 574 sw_tx_sbd_t *ssbdp; 575 576 srp = ngep->send; 577 578 /* 579 * Reinitialise control variables ... 580 */ 581 582 srp->tx_hwmark = NGE_DESC_MIN; 583 srp->tx_lwmark = NGE_DESC_MIN; 584 585 srp->tx_next = 0; 586 srp->tx_free = srp->desc.nslots; 587 srp->tc_next = 0; 588 589 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]); 590 591 for (slot = 0; slot - dmah_num != 0; ++slot) 592 srp->dmahndl[slot].next = srp->dmahndl + slot + 1; 593 594 srp->dmah_free.head = srp->dmahndl; 595 srp->dmah_free.tail = srp->dmahndl + dmah_num - 1; 596 srp->dmah_free.tail->next = NULL; 597 598 /* 599 * Zero and sync all the h/w Send Buffer Descriptors 600 */ 601 for (slot = 0; slot < srp->desc.nslots; ++slot) { 602 ssbdp = &srp->sw_sbds[slot]; 603 ssbdp->flags = HOST_OWN; 604 } 605 606 DMA_ZERO(srp->desc); 607 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 608 } 609 610 /* 611 * Initialize the slot number of rx's ring 612 */ 613 static void 614 nge_init_recv_ring(nge_t *ngep) 615 { 616 recv_ring_t *rrp; 617 618 rrp = ngep->recv; 619 rrp->desc.nslots = ngep->rx_desc; 620 rrp->ngep = ngep; 621 } 622 623 /* 624 * Intialize the rx recycle pointer and rx sending pointer of rx ring 625 */ 626 static void 627 nge_reinit_recv_ring(nge_t *ngep) 628 { 629 recv_ring_t *rrp; 630 631 rrp = ngep->recv; 632 633 /* 634 * Reinitialise control variables ... 635 */ 636 rrp->prod_index = 0; 637 /* 638 * Zero and sync all the h/w Send Buffer Descriptors 639 */ 640 DMA_ZERO(rrp->desc); 641 DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORDEV); 642 } 643 644 /* 645 * Clean up initialisation done above before the memory is freed 646 */ 647 static void 648 nge_fini_buff_ring(nge_t *ngep) 649 { 650 uint32_t i; 651 buff_ring_t *brp; 652 dma_area_t *bufp; 653 sw_rx_sbd_t *bsbdp; 654 655 brp = ngep->buff; 656 bsbdp = brp->sw_rbds; 657 658 NGE_DEBUG(("nge_fini_buff_ring($%p)", (void *)ngep)); 659 660 mutex_enter(brp->recycle_lock); 661 brp->buf_sign++; 662 mutex_exit(brp->recycle_lock); 663 for (i = 0; i < ngep->rx_desc; i++, ++bsbdp) { 664 if (bsbdp->bufp) { 665 if (bsbdp->bufp->mp) 666 freemsg(bsbdp->bufp->mp); 667 nge_free_dma_mem(bsbdp->bufp); 668 kmem_free(bsbdp->bufp, sizeof (dma_area_t)); 669 bsbdp->bufp = NULL; 670 } 671 } 672 while (brp->free_list != NULL) { 673 bufp = brp->free_list; 674 brp->free_list = bufp->next; 675 bufp->next = NULL; 676 if (bufp->mp) 677 freemsg(bufp->mp); 678 nge_free_dma_mem(bufp); 679 kmem_free(bufp, sizeof (dma_area_t)); 680 } 681 while (brp->recycle_list != NULL) { 682 bufp = brp->recycle_list; 683 brp->recycle_list = bufp->next; 684 bufp->next = NULL; 685 if (bufp->mp) 686 freemsg(bufp->mp); 687 nge_free_dma_mem(bufp); 688 kmem_free(bufp, sizeof (dma_area_t)); 689 } 690 691 692 kmem_free(brp->sw_rbds, (ngep->rx_desc * sizeof (*bsbdp))); 693 brp->sw_rbds = NULL; 694 } 695 696 /* 697 * Intialize the Rx's data ring and free ring 698 */ 699 static int 700 nge_init_buff_ring(nge_t *ngep) 701 { 702 uint32_t err; 703 uint32_t slot; 704 uint32_t nslots_buff; 705 uint32_t nslots_recv; 706 buff_ring_t *brp; 707 recv_ring_t *rrp; 708 dma_area_t desc; 709 dma_area_t *bufp; 710 sw_rx_sbd_t *bsbdp; 711 712 rrp = ngep->recv; 713 brp = ngep->buff; 714 brp->nslots = ngep->rx_buf; 715 brp->rx_bcopy = B_FALSE; 716 nslots_recv = rrp->desc.nslots; 717 nslots_buff = brp->nslots; 718 brp->ngep = ngep; 719 720 NGE_TRACE(("nge_init_buff_ring($%p)", (void *)ngep)); 721 722 /* 723 * Allocate the array of s/w Recv Buffer Descriptors 724 */ 725 bsbdp = kmem_zalloc(nslots_recv *sizeof (*bsbdp), KM_SLEEP); 726 brp->sw_rbds = bsbdp; 727 brp->free_list = NULL; 728 brp->recycle_list = NULL; 729 for (slot = 0; slot < nslots_buff; ++slot) { 730 bufp = kmem_zalloc(sizeof (dma_area_t), KM_SLEEP); 731 err = nge_alloc_dma_mem(ngep, (ngep->buf_size 732 + NGE_HEADROOM), 733 &nge_data_accattr, DDI_DMA_READ | NGE_DMA_MODE, bufp); 734 if (err != DDI_SUCCESS) { 735 kmem_free(bufp, sizeof (dma_area_t)); 736 return (DDI_FAILURE); 737 } 738 739 bufp->alength -= NGE_HEADROOM; 740 bufp->offset += NGE_HEADROOM; 741 bufp->private = (caddr_t)ngep; 742 bufp->rx_recycle.free_func = nge_recv_recycle; 743 bufp->rx_recycle.free_arg = (caddr_t)bufp; 744 bufp->signature = brp->buf_sign; 745 bufp->rx_delivered = B_FALSE; 746 bufp->mp = desballoc(DMA_VPTR(*bufp), 747 ngep->buf_size + NGE_HEADROOM, 748 0, &bufp->rx_recycle); 749 750 if (bufp->mp == NULL) { 751 return (DDI_FAILURE); 752 } 753 bufp->next = brp->free_list; 754 brp->free_list = bufp; 755 } 756 757 /* 758 * Now initialise each array element once and for all 759 */ 760 desc = rrp->desc; 761 for (slot = 0; slot < nslots_recv; ++slot, ++bsbdp) { 762 nge_slice_chunk(&bsbdp->desc, &desc, 1, 763 ngep->desc_attr.rxd_size); 764 bufp = brp->free_list; 765 brp->free_list = bufp->next; 766 bsbdp->bufp = bufp; 767 bsbdp->flags = CONTROLER_OWN; 768 bufp->next = NULL; 769 } 770 771 ASSERT(desc.alength == 0); 772 return (DDI_SUCCESS); 773 } 774 775 /* 776 * Fill the host address of data in rx' descriptor 777 * and initialize free pointers of rx free ring 778 */ 779 static int 780 nge_reinit_buff_ring(nge_t *ngep) 781 { 782 uint32_t slot; 783 uint32_t nslots_recv; 784 buff_ring_t *brp; 785 recv_ring_t *rrp; 786 sw_rx_sbd_t *bsbdp; 787 void *hw_bd_p; 788 789 brp = ngep->buff; 790 rrp = ngep->recv; 791 bsbdp = brp->sw_rbds; 792 nslots_recv = rrp->desc.nslots; 793 for (slot = 0; slot < nslots_recv; ++bsbdp, ++slot) { 794 hw_bd_p = DMA_VPTR(bsbdp->desc); 795 /* 796 * There is a scenario: When the traffic of small tcp 797 * packet is heavy, suspending the tcp traffic will 798 * cause the preallocated buffers for rx not to be 799 * released in time by tcp taffic and cause rx's buffer 800 * pointers not to be refilled in time. 801 * 802 * At this point, if we reinitialize the driver, the bufp 803 * pointer for rx's traffic will be NULL. 804 * So the result of the reinitializion fails. 805 */ 806 if (bsbdp->bufp == NULL) 807 return (DDI_FAILURE); 808 809 ngep->desc_attr.rxd_fill(hw_bd_p, &bsbdp->bufp->cookie, 810 bsbdp->bufp->alength); 811 } 812 return (DDI_SUCCESS); 813 } 814 815 static void 816 nge_init_ring_param_lock(nge_t *ngep) 817 { 818 buff_ring_t *brp; 819 send_ring_t *srp; 820 821 srp = ngep->send; 822 brp = ngep->buff; 823 824 /* Init the locks for send ring */ 825 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 826 DDI_INTR_PRI(ngep->intr_pri)); 827 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 828 DDI_INTR_PRI(ngep->intr_pri)); 829 mutex_init(&srp->dmah_lock, NULL, MUTEX_DRIVER, 830 DDI_INTR_PRI(ngep->intr_pri)); 831 832 /* Init parameters of buffer ring */ 833 brp->free_list = NULL; 834 brp->recycle_list = NULL; 835 brp->rx_hold = 0; 836 brp->buf_sign = 0; 837 838 /* Init recycle list lock */ 839 mutex_init(brp->recycle_lock, NULL, MUTEX_DRIVER, 840 DDI_INTR_PRI(ngep->intr_pri)); 841 } 842 843 int 844 nge_init_rings(nge_t *ngep) 845 { 846 uint32_t err; 847 848 err = nge_init_send_ring(ngep); 849 if (err != DDI_SUCCESS) { 850 return (err); 851 } 852 nge_init_recv_ring(ngep); 853 854 err = nge_init_buff_ring(ngep); 855 if (err != DDI_SUCCESS) { 856 nge_fini_send_ring(ngep); 857 return (DDI_FAILURE); 858 } 859 860 return (err); 861 } 862 863 static int 864 nge_reinit_ring(nge_t *ngep) 865 { 866 int err; 867 868 nge_reinit_recv_ring(ngep); 869 nge_reinit_send_ring(ngep); 870 err = nge_reinit_buff_ring(ngep); 871 return (err); 872 } 873 874 875 void 876 nge_fini_rings(nge_t *ngep) 877 { 878 /* 879 * For receive ring, nothing need to be finished. 880 * So only finish buffer ring and send ring here. 881 */ 882 nge_fini_buff_ring(ngep); 883 nge_fini_send_ring(ngep); 884 } 885 886 /* 887 * Loopback ioctl code 888 */ 889 890 static lb_property_t loopmodes[] = { 891 { normal, "normal", NGE_LOOP_NONE }, 892 { external, "100Mbps", NGE_LOOP_EXTERNAL_100 }, 893 { external, "10Mbps", NGE_LOOP_EXTERNAL_10 }, 894 { internal, "PHY", NGE_LOOP_INTERNAL_PHY }, 895 }; 896 897 enum ioc_reply 898 nge_loop_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp) 899 { 900 int cmd; 901 uint32_t *lbmp; 902 lb_info_sz_t *lbsp; 903 lb_property_t *lbpp; 904 905 /* 906 * Validate format of ioctl 907 */ 908 if (mp->b_cont == NULL) 909 return (IOC_INVAL); 910 911 cmd = iocp->ioc_cmd; 912 913 switch (cmd) { 914 default: 915 return (IOC_INVAL); 916 917 case LB_GET_INFO_SIZE: 918 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 919 return (IOC_INVAL); 920 lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr; 921 *lbsp = sizeof (loopmodes); 922 return (IOC_REPLY); 923 924 case LB_GET_INFO: 925 if (iocp->ioc_count != sizeof (loopmodes)) 926 return (IOC_INVAL); 927 lbpp = (lb_property_t *)mp->b_cont->b_rptr; 928 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 929 return (IOC_REPLY); 930 931 case LB_GET_MODE: 932 if (iocp->ioc_count != sizeof (uint32_t)) 933 return (IOC_INVAL); 934 lbmp = (uint32_t *)mp->b_cont->b_rptr; 935 *lbmp = ngep->param_loop_mode; 936 return (IOC_REPLY); 937 938 case LB_SET_MODE: 939 if (iocp->ioc_count != sizeof (uint32_t)) 940 return (IOC_INVAL); 941 lbmp = (uint32_t *)mp->b_cont->b_rptr; 942 return (nge_set_loop_mode(ngep, *lbmp)); 943 } 944 } 945 946 #undef NGE_DBG 947 #define NGE_DBG NGE_DBG_NEMO 948 949 950 static void 951 nge_check_desc_prop(nge_t *ngep) 952 { 953 if (ngep->desc_mode != DESC_HOT && ngep->desc_mode != DESC_OFFLOAD) 954 ngep->desc_mode = DESC_HOT; 955 956 if (ngep->desc_mode == DESC_OFFLOAD) { 957 958 ngep->desc_attr = nge_sum_desc; 959 960 } else if (ngep->desc_mode == DESC_HOT) { 961 962 ngep->desc_attr = nge_hot_desc; 963 } 964 } 965 966 /* 967 * nge_get_props -- get the parameters to tune the driver 968 */ 969 static void 970 nge_get_props(nge_t *ngep) 971 { 972 chip_info_t *infop; 973 dev_info_t *devinfo; 974 nge_dev_spec_param_t *dev_param_p; 975 976 devinfo = ngep->devinfo; 977 infop = (chip_info_t *)&ngep->chipinfo; 978 dev_param_p = &ngep->dev_spec_param; 979 980 infop->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 981 DDI_PROP_DONTPASS, clsize_propname, 32); 982 983 infop->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 984 DDI_PROP_DONTPASS, latency_propname, 64); 985 ngep->intr_moderation = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 986 DDI_PROP_DONTPASS, intr_moderation, NGE_SET); 987 ngep->rx_datahwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 988 DDI_PROP_DONTPASS, rx_data_hw, 0x20); 989 ngep->rx_prdlwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 990 DDI_PROP_DONTPASS, rx_prd_lw, 0x4); 991 ngep->rx_prdhwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 992 DDI_PROP_DONTPASS, rx_prd_hw, 0xc); 993 994 ngep->sw_intr_intv = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 995 DDI_PROP_DONTPASS, sw_intr_intv, SWTR_ITC); 996 ngep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 997 DDI_PROP_DONTPASS, debug_propname, NGE_DBG_CHIP); 998 ngep->desc_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 999 DDI_PROP_DONTPASS, nge_desc_mode, dev_param_p->desc_type); 1000 ngep->lowmem_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 1001 DDI_PROP_DONTPASS, low_memory_mode, 0); 1002 ngep->mac_addr_reversion = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 1003 DDI_PROP_DONTPASS, mac_addr_reversion, 0); 1004 1005 if (dev_param_p->jumbo) { 1006 ngep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 1007 DDI_PROP_DONTPASS, default_mtu, ETHERMTU); 1008 } else 1009 ngep->default_mtu = ETHERMTU; 1010 1011 if (ngep->default_mtu > ETHERMTU && 1012 ngep->default_mtu <= NGE_MTU_2500) { 1013 ngep->buf_size = NGE_JB2500_BUFSZ; 1014 ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC; 1015 ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC; 1016 ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2; 1017 ngep->nge_split = NGE_SPLIT_256; 1018 } else if (ngep->default_mtu > NGE_MTU_2500 && 1019 ngep->default_mtu <= NGE_MTU_4500) { 1020 ngep->buf_size = NGE_JB4500_BUFSZ; 1021 ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC; 1022 ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC; 1023 ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2; 1024 ngep->nge_split = NGE_SPLIT_256; 1025 } else if (ngep->default_mtu > NGE_MTU_4500 && 1026 ngep->default_mtu <= NGE_MAX_MTU) { 1027 ngep->buf_size = NGE_JB9000_BUFSZ; 1028 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1029 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1030 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1031 ngep->nge_split = NGE_SPLIT_256; 1032 } else if (ngep->default_mtu > NGE_MAX_MTU) { 1033 ngep->default_mtu = NGE_MAX_MTU; 1034 ngep->buf_size = NGE_JB9000_BUFSZ; 1035 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1036 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1037 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1038 ngep->nge_split = NGE_SPLIT_256; 1039 } else if (ngep->lowmem_mode != 0) { 1040 ngep->default_mtu = ETHERMTU; 1041 ngep->buf_size = NGE_STD_BUFSZ; 1042 ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC; 1043 ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC; 1044 ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2; 1045 ngep->nge_split = NGE_SPLIT_32; 1046 } else { 1047 ngep->default_mtu = ETHERMTU; 1048 ngep->buf_size = NGE_STD_BUFSZ; 1049 ngep->tx_desc = dev_param_p->tx_desc_num; 1050 ngep->rx_desc = dev_param_p->rx_desc_num; 1051 ngep->rx_buf = dev_param_p->rx_desc_num * 2; 1052 ngep->nge_split = dev_param_p->nge_split; 1053 } 1054 1055 nge_check_desc_prop(ngep); 1056 } 1057 1058 1059 static int 1060 nge_reset_dev(nge_t *ngep) 1061 { 1062 int err; 1063 nge_mul_addr1 maddr1; 1064 nge_sw_statistics_t *sw_stp; 1065 sw_stp = &ngep->statistics.sw_statistics; 1066 send_ring_t *srp = ngep->send; 1067 1068 ASSERT(mutex_owned(ngep->genlock)); 1069 mutex_enter(srp->tc_lock); 1070 mutex_enter(srp->tx_lock); 1071 1072 nge_tx_recycle_all(ngep); 1073 err = nge_reinit_ring(ngep); 1074 if (err == DDI_FAILURE) { 1075 mutex_exit(srp->tx_lock); 1076 mutex_exit(srp->tc_lock); 1077 return (err); 1078 } 1079 err = nge_chip_reset(ngep); 1080 /* 1081 * Clear the Multicast mac address table 1082 */ 1083 nge_reg_put32(ngep, NGE_MUL_ADDR0, 0); 1084 maddr1.addr_val = nge_reg_get32(ngep, NGE_MUL_ADDR1); 1085 maddr1.addr_bits.addr = 0; 1086 nge_reg_put32(ngep, NGE_MUL_ADDR1, maddr1.addr_val); 1087 1088 mutex_exit(srp->tx_lock); 1089 mutex_exit(srp->tc_lock); 1090 if (err == DDI_FAILURE) 1091 return (err); 1092 ngep->watchdog = 0; 1093 ngep->resched_needed = B_FALSE; 1094 ngep->promisc = B_FALSE; 1095 ngep->param_loop_mode = NGE_LOOP_NONE; 1096 ngep->factotum_flag = 0; 1097 ngep->resched_needed = 0; 1098 ngep->nge_mac_state = NGE_MAC_RESET; 1099 ngep->max_sdu = ngep->default_mtu + ETHER_HEAD_LEN + ETHERFCSL; 1100 ngep->max_sdu += VTAG_SIZE; 1101 ngep->rx_def = 0x16; 1102 1103 /* Clear the software statistics */ 1104 sw_stp->recv_count = 0; 1105 sw_stp->xmit_count = 0; 1106 sw_stp->rbytes = 0; 1107 sw_stp->obytes = 0; 1108 1109 return (DDI_SUCCESS); 1110 } 1111 1112 static void 1113 nge_m_stop(void *arg) 1114 { 1115 nge_t *ngep = arg; /* private device info */ 1116 1117 NGE_TRACE(("nge_m_stop($%p)", arg)); 1118 1119 /* 1120 * Just stop processing, then record new MAC state 1121 */ 1122 mutex_enter(ngep->genlock); 1123 /* If suspended, the adapter is already stopped, just return. */ 1124 if (ngep->suspended) { 1125 ASSERT(ngep->nge_mac_state == NGE_MAC_STOPPED); 1126 mutex_exit(ngep->genlock); 1127 return; 1128 } 1129 rw_enter(ngep->rwlock, RW_WRITER); 1130 1131 (void) nge_chip_stop(ngep, B_FALSE); 1132 ngep->nge_mac_state = NGE_MAC_STOPPED; 1133 1134 /* Recycle all the TX BD */ 1135 nge_tx_recycle_all(ngep); 1136 nge_fini_rings(ngep); 1137 nge_free_bufs(ngep); 1138 1139 NGE_DEBUG(("nge_m_stop($%p) done", arg)); 1140 1141 rw_exit(ngep->rwlock); 1142 mutex_exit(ngep->genlock); 1143 } 1144 1145 static int 1146 nge_m_start(void *arg) 1147 { 1148 int err; 1149 nge_t *ngep = arg; 1150 1151 NGE_TRACE(("nge_m_start($%p)", arg)); 1152 1153 /* 1154 * Start processing and record new MAC state 1155 */ 1156 mutex_enter(ngep->genlock); 1157 /* 1158 * If suspended, don't start, as the resume processing 1159 * will recall this function with the suspended flag off. 1160 */ 1161 if (ngep->suspended) { 1162 mutex_exit(ngep->genlock); 1163 return (EIO); 1164 } 1165 rw_enter(ngep->rwlock, RW_WRITER); 1166 err = nge_alloc_bufs(ngep); 1167 if (err != DDI_SUCCESS) { 1168 nge_problem(ngep, "nge_m_start: DMA buffer allocation failed"); 1169 goto finish; 1170 } 1171 err = nge_init_rings(ngep); 1172 if (err != DDI_SUCCESS) { 1173 nge_free_bufs(ngep); 1174 nge_problem(ngep, "nge_init_rings() failed,err=%x", err); 1175 goto finish; 1176 } 1177 err = nge_restart(ngep); 1178 1179 NGE_DEBUG(("nge_m_start($%p) done", arg)); 1180 finish: 1181 rw_exit(ngep->rwlock); 1182 mutex_exit(ngep->genlock); 1183 1184 return (err == DDI_SUCCESS ? 0 : EIO); 1185 } 1186 1187 static int 1188 nge_m_unicst(void *arg, const uint8_t *macaddr) 1189 { 1190 nge_t *ngep = arg; 1191 1192 NGE_TRACE(("nge_m_unicst($%p)", arg)); 1193 /* 1194 * Remember the new current address in the driver state 1195 * Sync the chip's idea of the address too ... 1196 */ 1197 mutex_enter(ngep->genlock); 1198 1199 ethaddr_copy(macaddr, ngep->cur_uni_addr.addr); 1200 ngep->cur_uni_addr.set = 1; 1201 1202 /* 1203 * If we are suspended, we want to quit now, and not update 1204 * the chip. Doing so might put it in a bad state, but the 1205 * resume will get the unicast address installed. 1206 */ 1207 if (ngep->suspended) { 1208 mutex_exit(ngep->genlock); 1209 return (DDI_SUCCESS); 1210 } 1211 nge_chip_sync(ngep); 1212 1213 NGE_DEBUG(("nge_m_unicst($%p) done", arg)); 1214 mutex_exit(ngep->genlock); 1215 1216 return (0); 1217 } 1218 1219 static int 1220 nge_m_promisc(void *arg, boolean_t on) 1221 { 1222 nge_t *ngep = arg; 1223 1224 NGE_TRACE(("nge_m_promisc($%p)", arg)); 1225 1226 /* 1227 * Store specified mode and pass to chip layer to update h/w 1228 */ 1229 mutex_enter(ngep->genlock); 1230 /* 1231 * If suspended, there is no need to do anything, even 1232 * recording the promiscuious mode is not neccessary, as 1233 * it won't be properly set on resume. Just return failing. 1234 */ 1235 if (ngep->suspended) { 1236 mutex_exit(ngep->genlock); 1237 return (DDI_FAILURE); 1238 } 1239 if (ngep->promisc == on) { 1240 mutex_exit(ngep->genlock); 1241 NGE_DEBUG(("nge_m_promisc($%p) done", arg)); 1242 return (0); 1243 } 1244 ngep->promisc = on; 1245 ngep->record_promisc = ngep->promisc; 1246 nge_chip_sync(ngep); 1247 NGE_DEBUG(("nge_m_promisc($%p) done", arg)); 1248 mutex_exit(ngep->genlock); 1249 1250 return (0); 1251 } 1252 1253 static void nge_mulparam(nge_t *ngep) 1254 { 1255 uint8_t number; 1256 ether_addr_t pand; 1257 ether_addr_t por; 1258 mul_item *plist; 1259 1260 for (number = 0; number < ETHERADDRL; number++) { 1261 pand[number] = 0x00; 1262 por[number] = 0x00; 1263 } 1264 for (plist = ngep->pcur_mulist; plist != NULL; plist = plist->next) { 1265 for (number = 0; number < ETHERADDRL; number++) { 1266 pand[number] &= plist->mul_addr[number]; 1267 por[number] |= plist->mul_addr[number]; 1268 } 1269 } 1270 for (number = 0; number < ETHERADDRL; number++) { 1271 ngep->cur_mul_addr.addr[number] 1272 = pand[number] & por[number]; 1273 ngep->cur_mul_mask.addr[number] 1274 = pand [number] | (~por[number]); 1275 } 1276 } 1277 static int 1278 nge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1279 { 1280 boolean_t update; 1281 boolean_t b_eq; 1282 nge_t *ngep = arg; 1283 mul_item *plist; 1284 mul_item *plist_prev; 1285 mul_item *pitem; 1286 1287 NGE_TRACE(("nge_m_multicst($%p, %s, %s)", arg, 1288 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1289 1290 update = B_FALSE; 1291 plist = plist_prev = NULL; 1292 mutex_enter(ngep->genlock); 1293 if (add) { 1294 if (ngep->pcur_mulist != NULL) { 1295 for (plist = ngep->pcur_mulist; plist != NULL; 1296 plist = plist->next) { 1297 b_eq = ether_eq(plist->mul_addr, mca); 1298 if (b_eq) { 1299 plist->ref_cnt++; 1300 break; 1301 } 1302 plist_prev = plist; 1303 } 1304 } 1305 1306 if (plist == NULL) { 1307 pitem = kmem_zalloc(sizeof (mul_item), KM_SLEEP); 1308 ether_copy(mca, pitem->mul_addr); 1309 pitem ->ref_cnt++; 1310 pitem ->next = NULL; 1311 if (plist_prev == NULL) 1312 ngep->pcur_mulist = pitem; 1313 else 1314 plist_prev->next = pitem; 1315 update = B_TRUE; 1316 } 1317 } else { 1318 if (ngep->pcur_mulist != NULL) { 1319 for (plist = ngep->pcur_mulist; plist != NULL; 1320 plist = plist->next) { 1321 b_eq = ether_eq(plist->mul_addr, mca); 1322 if (b_eq) { 1323 update = B_TRUE; 1324 break; 1325 } 1326 plist_prev = plist; 1327 } 1328 1329 if (update) { 1330 if ((plist_prev == NULL) && 1331 (plist->next == NULL)) 1332 ngep->pcur_mulist = NULL; 1333 else if ((plist_prev == NULL) && 1334 (plist->next != NULL)) 1335 ngep->pcur_mulist = plist->next; 1336 else 1337 plist_prev->next = plist->next; 1338 kmem_free(plist, sizeof (mul_item)); 1339 } 1340 } 1341 } 1342 1343 if (update && !ngep->suspended) { 1344 nge_mulparam(ngep); 1345 nge_chip_sync(ngep); 1346 } 1347 NGE_DEBUG(("nge_m_multicst($%p) done", arg)); 1348 mutex_exit(ngep->genlock); 1349 1350 return (0); 1351 } 1352 1353 static void 1354 nge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1355 { 1356 int err; 1357 int cmd; 1358 nge_t *ngep = arg; 1359 struct iocblk *iocp; 1360 enum ioc_reply status; 1361 boolean_t need_privilege; 1362 1363 /* 1364 * If suspended, we might actually be able to do some of 1365 * these ioctls, but it is harder to make sure they occur 1366 * without actually putting the hardware in an undesireable 1367 * state. So just NAK it. 1368 */ 1369 mutex_enter(ngep->genlock); 1370 if (ngep->suspended) { 1371 miocnak(wq, mp, 0, EINVAL); 1372 mutex_exit(ngep->genlock); 1373 return; 1374 } 1375 mutex_exit(ngep->genlock); 1376 1377 /* 1378 * Validate the command before bothering with the mutex ... 1379 */ 1380 iocp = (struct iocblk *)mp->b_rptr; 1381 iocp->ioc_error = 0; 1382 need_privilege = B_TRUE; 1383 cmd = iocp->ioc_cmd; 1384 1385 NGE_DEBUG(("nge_m_ioctl: cmd 0x%x", cmd)); 1386 switch (cmd) { 1387 default: 1388 NGE_LDB(NGE_DBG_BADIOC, 1389 ("nge_m_ioctl: unknown cmd 0x%x", cmd)); 1390 1391 miocnak(wq, mp, 0, EINVAL); 1392 return; 1393 1394 case NGE_MII_READ: 1395 case NGE_MII_WRITE: 1396 case NGE_SEE_READ: 1397 case NGE_SEE_WRITE: 1398 case NGE_DIAG: 1399 case NGE_PEEK: 1400 case NGE_POKE: 1401 case NGE_PHY_RESET: 1402 case NGE_SOFT_RESET: 1403 case NGE_HARD_RESET: 1404 break; 1405 1406 case LB_GET_INFO_SIZE: 1407 case LB_GET_INFO: 1408 case LB_GET_MODE: 1409 need_privilege = B_FALSE; 1410 break; 1411 case LB_SET_MODE: 1412 break; 1413 } 1414 1415 if (need_privilege) { 1416 /* 1417 * Check for specific net_config privilege. 1418 */ 1419 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1420 if (err != 0) { 1421 NGE_DEBUG(("nge_m_ioctl: rejected cmd 0x%x, err %d", 1422 cmd, err)); 1423 miocnak(wq, mp, 0, err); 1424 return; 1425 } 1426 } 1427 1428 mutex_enter(ngep->genlock); 1429 1430 switch (cmd) { 1431 default: 1432 _NOTE(NOTREACHED) 1433 status = IOC_INVAL; 1434 break; 1435 1436 case NGE_MII_READ: 1437 case NGE_MII_WRITE: 1438 case NGE_SEE_READ: 1439 case NGE_SEE_WRITE: 1440 case NGE_DIAG: 1441 case NGE_PEEK: 1442 case NGE_POKE: 1443 case NGE_PHY_RESET: 1444 case NGE_SOFT_RESET: 1445 case NGE_HARD_RESET: 1446 status = nge_chip_ioctl(ngep, mp, iocp); 1447 break; 1448 1449 case LB_GET_INFO_SIZE: 1450 case LB_GET_INFO: 1451 case LB_GET_MODE: 1452 case LB_SET_MODE: 1453 status = nge_loop_ioctl(ngep, mp, iocp); 1454 break; 1455 1456 } 1457 1458 /* 1459 * Do we need to reprogram the PHY and/or the MAC? 1460 * Do it now, while we still have the mutex. 1461 * 1462 * Note: update the PHY first, 'cos it controls the 1463 * speed/duplex parameters that the MAC code uses. 1464 */ 1465 1466 NGE_DEBUG(("nge_m_ioctl: cmd 0x%x status %d", cmd, status)); 1467 1468 switch (status) { 1469 case IOC_RESTART_REPLY: 1470 case IOC_RESTART_ACK: 1471 (*ngep->physops->phys_update)(ngep); 1472 nge_chip_sync(ngep); 1473 break; 1474 1475 default: 1476 break; 1477 } 1478 1479 mutex_exit(ngep->genlock); 1480 1481 /* 1482 * Finally, decide how to reply 1483 */ 1484 switch (status) { 1485 1486 default: 1487 case IOC_INVAL: 1488 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1489 EINVAL : iocp->ioc_error); 1490 break; 1491 1492 case IOC_DONE: 1493 break; 1494 1495 case IOC_RESTART_ACK: 1496 case IOC_ACK: 1497 miocack(wq, mp, 0, 0); 1498 break; 1499 1500 case IOC_RESTART_REPLY: 1501 case IOC_REPLY: 1502 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1503 M_IOCACK : M_IOCNAK; 1504 qreply(wq, mp); 1505 break; 1506 } 1507 } 1508 1509 static boolean_t 1510 nge_param_locked(mac_prop_id_t pr_num) 1511 { 1512 /* 1513 * All adv_* parameters are locked (read-only) while 1514 * the device is in any sort of loopback mode ... 1515 */ 1516 switch (pr_num) { 1517 case MAC_PROP_ADV_1000FDX_CAP: 1518 case MAC_PROP_EN_1000FDX_CAP: 1519 case MAC_PROP_ADV_1000HDX_CAP: 1520 case MAC_PROP_EN_1000HDX_CAP: 1521 case MAC_PROP_ADV_100FDX_CAP: 1522 case MAC_PROP_EN_100FDX_CAP: 1523 case MAC_PROP_ADV_100HDX_CAP: 1524 case MAC_PROP_EN_100HDX_CAP: 1525 case MAC_PROP_ADV_10FDX_CAP: 1526 case MAC_PROP_EN_10FDX_CAP: 1527 case MAC_PROP_ADV_10HDX_CAP: 1528 case MAC_PROP_EN_10HDX_CAP: 1529 case MAC_PROP_AUTONEG: 1530 case MAC_PROP_FLOWCTRL: 1531 return (B_TRUE); 1532 } 1533 return (B_FALSE); 1534 } 1535 1536 /* 1537 * callback functions for set/get of properties 1538 */ 1539 static int 1540 nge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1541 uint_t pr_valsize, const void *pr_val) 1542 { 1543 nge_t *ngep = barg; 1544 int err = 0; 1545 uint32_t cur_mtu, new_mtu; 1546 link_flowctrl_t fl; 1547 1548 mutex_enter(ngep->genlock); 1549 if (ngep->param_loop_mode != NGE_LOOP_NONE && 1550 nge_param_locked(pr_num)) { 1551 /* 1552 * All adv_* parameters are locked (read-only) 1553 * while the device is in any sort of loopback mode. 1554 */ 1555 mutex_exit(ngep->genlock); 1556 return (EBUSY); 1557 } 1558 switch (pr_num) { 1559 case MAC_PROP_EN_1000FDX_CAP: 1560 ngep->param_en_1000fdx = *(uint8_t *)pr_val; 1561 ngep->param_adv_1000fdx = *(uint8_t *)pr_val; 1562 goto reprogram; 1563 case MAC_PROP_EN_100FDX_CAP: 1564 ngep->param_en_100fdx = *(uint8_t *)pr_val; 1565 ngep->param_adv_100fdx = *(uint8_t *)pr_val; 1566 goto reprogram; 1567 case MAC_PROP_EN_100HDX_CAP: 1568 ngep->param_en_100hdx = *(uint8_t *)pr_val; 1569 ngep->param_adv_100hdx = *(uint8_t *)pr_val; 1570 goto reprogram; 1571 case MAC_PROP_EN_10FDX_CAP: 1572 ngep->param_en_10fdx = *(uint8_t *)pr_val; 1573 ngep->param_adv_10fdx = *(uint8_t *)pr_val; 1574 goto reprogram; 1575 case MAC_PROP_EN_10HDX_CAP: 1576 ngep->param_en_10hdx = *(uint8_t *)pr_val; 1577 ngep->param_adv_10hdx = *(uint8_t *)pr_val; 1578 reprogram: 1579 (*ngep->physops->phys_update)(ngep); 1580 nge_chip_sync(ngep); 1581 break; 1582 1583 case MAC_PROP_ADV_1000FDX_CAP: 1584 case MAC_PROP_ADV_1000HDX_CAP: 1585 case MAC_PROP_ADV_100FDX_CAP: 1586 case MAC_PROP_ADV_100HDX_CAP: 1587 case MAC_PROP_ADV_10FDX_CAP: 1588 case MAC_PROP_ADV_10HDX_CAP: 1589 case MAC_PROP_STATUS: 1590 case MAC_PROP_SPEED: 1591 case MAC_PROP_DUPLEX: 1592 case MAC_PROP_EN_1000HDX_CAP: 1593 err = ENOTSUP; /* read-only prop. Can't set this */ 1594 break; 1595 case MAC_PROP_AUTONEG: 1596 ngep->param_adv_autoneg = *(uint8_t *)pr_val; 1597 (*ngep->physops->phys_update)(ngep); 1598 nge_chip_sync(ngep); 1599 break; 1600 case MAC_PROP_MTU: 1601 cur_mtu = ngep->default_mtu; 1602 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 1603 if (new_mtu == cur_mtu) { 1604 err = 0; 1605 break; 1606 } 1607 if (new_mtu < ETHERMTU || 1608 new_mtu > NGE_MAX_MTU) { 1609 err = EINVAL; 1610 break; 1611 } 1612 if ((new_mtu > ETHERMTU) && 1613 (!ngep->dev_spec_param.jumbo)) { 1614 err = EINVAL; 1615 break; 1616 } 1617 if (ngep->nge_mac_state == NGE_MAC_STARTED) { 1618 err = EBUSY; 1619 break; 1620 } 1621 1622 ngep->default_mtu = new_mtu; 1623 if (ngep->default_mtu > ETHERMTU && 1624 ngep->default_mtu <= NGE_MTU_2500) { 1625 ngep->buf_size = NGE_JB2500_BUFSZ; 1626 ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC; 1627 ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC; 1628 ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2; 1629 ngep->nge_split = NGE_SPLIT_256; 1630 } else if (ngep->default_mtu > NGE_MTU_2500 && 1631 ngep->default_mtu <= NGE_MTU_4500) { 1632 ngep->buf_size = NGE_JB4500_BUFSZ; 1633 ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC; 1634 ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC; 1635 ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2; 1636 ngep->nge_split = NGE_SPLIT_256; 1637 } else if (ngep->default_mtu > NGE_MTU_4500 && 1638 ngep->default_mtu <= NGE_MAX_MTU) { 1639 ngep->buf_size = NGE_JB9000_BUFSZ; 1640 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1641 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1642 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1643 ngep->nge_split = NGE_SPLIT_256; 1644 } else if (ngep->default_mtu > NGE_MAX_MTU) { 1645 ngep->default_mtu = NGE_MAX_MTU; 1646 ngep->buf_size = NGE_JB9000_BUFSZ; 1647 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1648 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1649 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1650 ngep->nge_split = NGE_SPLIT_256; 1651 } else if (ngep->lowmem_mode != 0) { 1652 ngep->default_mtu = ETHERMTU; 1653 ngep->buf_size = NGE_STD_BUFSZ; 1654 ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC; 1655 ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC; 1656 ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2; 1657 ngep->nge_split = NGE_SPLIT_32; 1658 } else { 1659 ngep->default_mtu = ETHERMTU; 1660 ngep->buf_size = NGE_STD_BUFSZ; 1661 ngep->tx_desc = 1662 ngep->dev_spec_param.tx_desc_num; 1663 ngep->rx_desc = 1664 ngep->dev_spec_param.rx_desc_num; 1665 ngep->rx_buf = 1666 ngep->dev_spec_param.rx_desc_num * 2; 1667 ngep->nge_split = 1668 ngep->dev_spec_param.nge_split; 1669 } 1670 1671 err = mac_maxsdu_update(ngep->mh, ngep->default_mtu); 1672 1673 break; 1674 case MAC_PROP_FLOWCTRL: 1675 bcopy(pr_val, &fl, sizeof (fl)); 1676 switch (fl) { 1677 default: 1678 err = ENOTSUP; 1679 break; 1680 case LINK_FLOWCTRL_NONE: 1681 ngep->param_adv_pause = 0; 1682 ngep->param_adv_asym_pause = 0; 1683 1684 ngep->param_link_rx_pause = B_FALSE; 1685 ngep->param_link_tx_pause = B_FALSE; 1686 break; 1687 case LINK_FLOWCTRL_RX: 1688 if (!((ngep->param_lp_pause == 0) && 1689 (ngep->param_lp_asym_pause == 1))) { 1690 err = EINVAL; 1691 break; 1692 } 1693 ngep->param_adv_pause = 1; 1694 ngep->param_adv_asym_pause = 1; 1695 1696 ngep->param_link_rx_pause = B_TRUE; 1697 ngep->param_link_tx_pause = B_FALSE; 1698 break; 1699 case LINK_FLOWCTRL_TX: 1700 if (!((ngep->param_lp_pause == 1) && 1701 (ngep->param_lp_asym_pause == 1))) { 1702 err = EINVAL; 1703 break; 1704 } 1705 ngep->param_adv_pause = 0; 1706 ngep->param_adv_asym_pause = 1; 1707 1708 ngep->param_link_rx_pause = B_FALSE; 1709 ngep->param_link_tx_pause = B_TRUE; 1710 break; 1711 case LINK_FLOWCTRL_BI: 1712 if (ngep->param_lp_pause != 1) { 1713 err = EINVAL; 1714 break; 1715 } 1716 ngep->param_adv_pause = 1; 1717 1718 ngep->param_link_rx_pause = B_TRUE; 1719 ngep->param_link_tx_pause = B_TRUE; 1720 break; 1721 } 1722 1723 if (err == 0) { 1724 (*ngep->physops->phys_update)(ngep); 1725 nge_chip_sync(ngep); 1726 } 1727 1728 break; 1729 case MAC_PROP_PRIVATE: 1730 err = nge_set_priv_prop(ngep, pr_name, pr_valsize, 1731 pr_val); 1732 if (err == 0) { 1733 (*ngep->physops->phys_update)(ngep); 1734 nge_chip_sync(ngep); 1735 } 1736 break; 1737 default: 1738 err = ENOTSUP; 1739 } 1740 mutex_exit(ngep->genlock); 1741 return (err); 1742 } 1743 1744 static int 1745 nge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1746 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 1747 { 1748 nge_t *ngep = barg; 1749 int err = 0; 1750 link_flowctrl_t fl; 1751 uint64_t speed; 1752 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 1753 1754 if (pr_valsize == 0) 1755 return (EINVAL); 1756 1757 *perm = MAC_PROP_PERM_RW; 1758 1759 bzero(pr_val, pr_valsize); 1760 1761 switch (pr_num) { 1762 case MAC_PROP_DUPLEX: 1763 *perm = MAC_PROP_PERM_READ; 1764 if (pr_valsize >= sizeof (link_duplex_t)) { 1765 bcopy(&ngep->param_link_duplex, pr_val, 1766 sizeof (link_duplex_t)); 1767 } else 1768 err = EINVAL; 1769 break; 1770 case MAC_PROP_SPEED: 1771 *perm = MAC_PROP_PERM_READ; 1772 if (pr_valsize >= sizeof (uint64_t)) { 1773 speed = ngep->param_link_speed * 1000000ull; 1774 bcopy(&speed, pr_val, sizeof (speed)); 1775 } else 1776 err = EINVAL; 1777 break; 1778 case MAC_PROP_AUTONEG: 1779 if (is_default) { 1780 *(uint8_t *)pr_val = 1; 1781 } else { 1782 *(uint8_t *)pr_val = ngep->param_adv_autoneg; 1783 } 1784 break; 1785 case MAC_PROP_FLOWCTRL: 1786 if (pr_valsize >= sizeof (link_flowctrl_t)) { 1787 if (pr_flags & MAC_PROP_DEFAULT) { 1788 fl = LINK_FLOWCTRL_BI; 1789 bcopy(&fl, pr_val, sizeof (fl)); 1790 break; 1791 } 1792 if (ngep->param_link_rx_pause && 1793 !ngep->param_link_tx_pause) 1794 fl = LINK_FLOWCTRL_RX; 1795 1796 if (!ngep->param_link_rx_pause && 1797 !ngep->param_link_tx_pause) 1798 fl = LINK_FLOWCTRL_NONE; 1799 1800 if (!ngep->param_link_rx_pause && 1801 ngep->param_link_tx_pause) 1802 fl = LINK_FLOWCTRL_TX; 1803 1804 if (ngep->param_link_rx_pause && 1805 ngep->param_link_tx_pause) 1806 fl = LINK_FLOWCTRL_BI; 1807 bcopy(&fl, pr_val, sizeof (fl)); 1808 } else 1809 err = EINVAL; 1810 break; 1811 case MAC_PROP_ADV_1000FDX_CAP: 1812 *perm = MAC_PROP_PERM_READ; 1813 if (is_default) { 1814 *(uint8_t *)pr_val = 1; 1815 } else { 1816 *(uint8_t *)pr_val = ngep->param_adv_1000fdx; 1817 } 1818 break; 1819 case MAC_PROP_EN_1000FDX_CAP: 1820 if (is_default) { 1821 *(uint8_t *)pr_val = 1; 1822 } else { 1823 *(uint8_t *)pr_val = ngep->param_en_1000fdx; 1824 } 1825 break; 1826 case MAC_PROP_ADV_1000HDX_CAP: 1827 *perm = MAC_PROP_PERM_READ; 1828 if (is_default) { 1829 *(uint8_t *)pr_val = 0; 1830 } else { 1831 *(uint8_t *)pr_val = ngep->param_adv_1000hdx; 1832 } 1833 break; 1834 case MAC_PROP_EN_1000HDX_CAP: 1835 *perm = MAC_PROP_PERM_READ; 1836 if (is_default) { 1837 *(uint8_t *)pr_val = 0; 1838 } else { 1839 *(uint8_t *)pr_val = ngep->param_en_1000hdx; 1840 } 1841 break; 1842 case MAC_PROP_ADV_100FDX_CAP: 1843 *perm = MAC_PROP_PERM_READ; 1844 if (is_default) { 1845 *(uint8_t *)pr_val = 1; 1846 } else { 1847 *(uint8_t *)pr_val = ngep->param_adv_100fdx; 1848 } 1849 break; 1850 case MAC_PROP_EN_100FDX_CAP: 1851 if (is_default) { 1852 *(uint8_t *)pr_val = 1; 1853 } else { 1854 *(uint8_t *)pr_val = ngep->param_en_100fdx; 1855 } 1856 break; 1857 case MAC_PROP_ADV_100HDX_CAP: 1858 *perm = MAC_PROP_PERM_READ; 1859 if (is_default) { 1860 *(uint8_t *)pr_val = 1; 1861 } else { 1862 *(uint8_t *)pr_val = ngep->param_adv_100hdx; 1863 } 1864 break; 1865 case MAC_PROP_EN_100HDX_CAP: 1866 if (is_default) { 1867 *(uint8_t *)pr_val = 1; 1868 } else { 1869 *(uint8_t *)pr_val = ngep->param_en_100hdx; 1870 } 1871 break; 1872 case MAC_PROP_ADV_10FDX_CAP: 1873 *perm = MAC_PROP_PERM_READ; 1874 if (is_default) { 1875 *(uint8_t *)pr_val = 1; 1876 } else { 1877 *(uint8_t *)pr_val = ngep->param_adv_10fdx; 1878 } 1879 break; 1880 case MAC_PROP_EN_10FDX_CAP: 1881 if (is_default) { 1882 *(uint8_t *)pr_val = 1; 1883 } else { 1884 *(uint8_t *)pr_val = ngep->param_en_10fdx; 1885 } 1886 break; 1887 case MAC_PROP_ADV_10HDX_CAP: 1888 *perm = MAC_PROP_PERM_READ; 1889 if (is_default) { 1890 *(uint8_t *)pr_val = 1; 1891 } else { 1892 *(uint8_t *)pr_val = ngep->param_adv_10hdx; 1893 } 1894 break; 1895 case MAC_PROP_EN_10HDX_CAP: 1896 if (is_default) { 1897 *(uint8_t *)pr_val = 1; 1898 } else { 1899 *(uint8_t *)pr_val = ngep->param_en_10hdx; 1900 } 1901 break; 1902 case MAC_PROP_ADV_100T4_CAP: 1903 case MAC_PROP_EN_100T4_CAP: 1904 *perm = MAC_PROP_PERM_READ; 1905 *(uint8_t *)pr_val = 0; 1906 break; 1907 case MAC_PROP_PRIVATE: 1908 err = nge_get_priv_prop(ngep, pr_name, pr_flags, 1909 pr_valsize, pr_val); 1910 break; 1911 default: 1912 err = ENOTSUP; 1913 } 1914 return (err); 1915 } 1916 1917 /* ARGSUSED */ 1918 static int 1919 nge_set_priv_prop(nge_t *ngep, const char *pr_name, uint_t pr_valsize, 1920 const void *pr_val) 1921 { 1922 int err = 0; 1923 long result; 1924 1925 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 1926 if (pr_val == NULL) { 1927 err = EINVAL; 1928 return (err); 1929 } 1930 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1931 if (result < 0 || result > NGE_MAX_SDU) { 1932 err = EINVAL; 1933 } else { 1934 ngep->param_txbcopy_threshold = (uint32_t)result; 1935 goto reprogram; 1936 } 1937 return (err); 1938 } 1939 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 1940 if (pr_val == NULL) { 1941 err = EINVAL; 1942 return (err); 1943 } 1944 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1945 if (result < 0 || result > NGE_MAX_SDU) { 1946 err = EINVAL; 1947 } else { 1948 ngep->param_rxbcopy_threshold = (uint32_t)result; 1949 goto reprogram; 1950 } 1951 return (err); 1952 } 1953 if (strcmp(pr_name, "_recv_max_packet") == 0) { 1954 if (pr_val == NULL) { 1955 err = EINVAL; 1956 return (err); 1957 } 1958 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1959 if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) { 1960 err = EINVAL; 1961 } else { 1962 ngep->param_recv_max_packet = (uint32_t)result; 1963 goto reprogram; 1964 } 1965 return (err); 1966 } 1967 if (strcmp(pr_name, "_poll_quiet_time") == 0) { 1968 if (pr_val == NULL) { 1969 err = EINVAL; 1970 return (err); 1971 } 1972 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1973 if (result < 0 || result > 10000) { 1974 err = EINVAL; 1975 } else { 1976 ngep->param_poll_quiet_time = (uint32_t)result; 1977 goto reprogram; 1978 } 1979 return (err); 1980 } 1981 if (strcmp(pr_name, "_poll_busy_time") == 0) { 1982 if (pr_val == NULL) { 1983 err = EINVAL; 1984 return (err); 1985 } 1986 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1987 if (result < 0 || result > 10000) { 1988 err = EINVAL; 1989 } else { 1990 ngep->param_poll_busy_time = (uint32_t)result; 1991 goto reprogram; 1992 } 1993 return (err); 1994 } 1995 if (strcmp(pr_name, "_rx_intr_hwater") == 0) { 1996 if (pr_val == NULL) { 1997 err = EINVAL; 1998 return (err); 1999 } 2000 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 2001 if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) { 2002 err = EINVAL; 2003 } else { 2004 ngep->param_rx_intr_hwater = (uint32_t)result; 2005 goto reprogram; 2006 } 2007 return (err); 2008 } 2009 if (strcmp(pr_name, "_rx_intr_lwater") == 0) { 2010 if (pr_val == NULL) { 2011 err = EINVAL; 2012 return (err); 2013 } 2014 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 2015 if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) { 2016 err = EINVAL; 2017 } else { 2018 ngep->param_rx_intr_lwater = (uint32_t)result; 2019 goto reprogram; 2020 } 2021 return (err); 2022 } 2023 if (strcmp(pr_name, "_tx_n_intr") == 0) { 2024 if (pr_val == NULL) { 2025 err = EINVAL; 2026 return (err); 2027 } 2028 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 2029 if (result < 1 || result > 10000) { 2030 err = EINVAL; 2031 } else { 2032 ngep->param_tx_n_intr = (uint32_t)result; 2033 goto reprogram; 2034 } 2035 return (err); 2036 } 2037 2038 err = ENOTSUP; 2039 return (err); 2040 2041 reprogram: 2042 if (err == 0) { 2043 (*ngep->physops->phys_update)(ngep); 2044 nge_chip_sync(ngep); 2045 } 2046 2047 return (err); 2048 } 2049 2050 static int 2051 nge_get_priv_prop(nge_t *ngep, const char *pr_name, uint_t pr_flags, 2052 uint_t pr_valsize, void *pr_val) 2053 { 2054 int err = ENOTSUP; 2055 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 2056 int value; 2057 2058 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 2059 value = (is_default ? 1 : ngep->param_adv_pause); 2060 err = 0; 2061 goto done; 2062 } 2063 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 2064 value = (is_default ? 1 : ngep->param_adv_asym_pause); 2065 err = 0; 2066 goto done; 2067 } 2068 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 2069 value = (is_default ? NGE_TX_COPY_SIZE : 2070 ngep->param_txbcopy_threshold); 2071 err = 0; 2072 goto done; 2073 } 2074 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 2075 value = (is_default ? NGE_RX_COPY_SIZE : 2076 ngep->param_rxbcopy_threshold); 2077 err = 0; 2078 goto done; 2079 } 2080 if (strcmp(pr_name, "_recv_max_packet") == 0) { 2081 value = (is_default ? 128 : ngep->param_recv_max_packet); 2082 err = 0; 2083 goto done; 2084 } 2085 if (strcmp(pr_name, "_poll_quiet_time") == 0) { 2086 value = (is_default ? NGE_POLL_QUIET_TIME : 2087 ngep->param_poll_quiet_time); 2088 err = 0; 2089 goto done; 2090 } 2091 if (strcmp(pr_name, "_poll_busy_time") == 0) { 2092 value = (is_default ? NGE_POLL_BUSY_TIME : 2093 ngep->param_poll_busy_time); 2094 err = 0; 2095 goto done; 2096 } 2097 if (strcmp(pr_name, "_rx_intr_hwater") == 0) { 2098 value = (is_default ? 1 : ngep->param_rx_intr_hwater); 2099 err = 0; 2100 goto done; 2101 } 2102 if (strcmp(pr_name, "_rx_intr_lwater") == 0) { 2103 value = (is_default ? 8 : ngep->param_rx_intr_lwater); 2104 err = 0; 2105 goto done; 2106 } 2107 if (strcmp(pr_name, "_tx_n_intr") == 0) { 2108 value = (is_default ? NGE_TX_N_INTR : 2109 ngep->param_tx_n_intr); 2110 err = 0; 2111 goto done; 2112 } 2113 2114 done: 2115 if (err == 0) { 2116 (void) snprintf(pr_val, pr_valsize, "%d", value); 2117 } 2118 return (err); 2119 } 2120 2121 /* ARGSUSED */ 2122 static boolean_t 2123 nge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 2124 { 2125 nge_t *ngep = arg; 2126 nge_dev_spec_param_t *dev_param_p; 2127 2128 dev_param_p = &ngep->dev_spec_param; 2129 2130 switch (cap) { 2131 case MAC_CAPAB_HCKSUM: { 2132 uint32_t *hcksum_txflags = cap_data; 2133 2134 if (dev_param_p->tx_hw_checksum) { 2135 *hcksum_txflags = dev_param_p->tx_hw_checksum; 2136 } else 2137 return (B_FALSE); 2138 break; 2139 } 2140 case MAC_CAPAB_POLL: 2141 /* 2142 * There's nothing for us to fill in, simply returning 2143 * B_TRUE, stating that we support polling is sufficient. 2144 */ 2145 break; 2146 default: 2147 return (B_FALSE); 2148 } 2149 return (B_TRUE); 2150 } 2151 2152 #undef NGE_DBG 2153 #define NGE_DBG NGE_DBG_INIT /* debug flag for this code */ 2154 int 2155 nge_restart(nge_t *ngep) 2156 { 2157 int err = 0; 2158 err = nge_reset_dev(ngep); 2159 /* write back the promisc setting */ 2160 ngep->promisc = ngep->record_promisc; 2161 nge_chip_sync(ngep); 2162 if (!err) 2163 err = nge_chip_start(ngep); 2164 2165 if (err) { 2166 ngep->nge_mac_state = NGE_MAC_STOPPED; 2167 return (DDI_FAILURE); 2168 } else { 2169 ngep->nge_mac_state = NGE_MAC_STARTED; 2170 return (DDI_SUCCESS); 2171 } 2172 } 2173 2174 void 2175 nge_wake_factotum(nge_t *ngep) 2176 { 2177 mutex_enter(ngep->softlock); 2178 if (ngep->factotum_flag == 0) { 2179 ngep->factotum_flag = 1; 2180 (void) ddi_intr_trigger_softint(ngep->factotum_hdl, NULL); 2181 } 2182 mutex_exit(ngep->softlock); 2183 } 2184 2185 /* 2186 * High-level cyclic handler 2187 * 2188 * This routine schedules a (low-level) softint callback to the 2189 * factotum. 2190 */ 2191 2192 static void 2193 nge_chip_cyclic(void *arg) 2194 { 2195 nge_t *ngep; 2196 2197 ngep = (nge_t *)arg; 2198 2199 switch (ngep->nge_chip_state) { 2200 default: 2201 return; 2202 2203 case NGE_CHIP_RUNNING: 2204 break; 2205 2206 case NGE_CHIP_FAULT: 2207 case NGE_CHIP_ERROR: 2208 break; 2209 } 2210 2211 nge_wake_factotum(ngep); 2212 } 2213 2214 static void 2215 nge_unattach(nge_t *ngep) 2216 { 2217 send_ring_t *srp; 2218 buff_ring_t *brp; 2219 2220 srp = ngep->send; 2221 brp = ngep->buff; 2222 NGE_TRACE(("nge_unattach($%p)", (void *)ngep)); 2223 2224 /* 2225 * Flag that no more activity may be initiated 2226 */ 2227 ngep->progress &= ~PROGRESS_READY; 2228 ngep->nge_mac_state = NGE_MAC_UNATTACH; 2229 2230 /* 2231 * Quiesce the PHY and MAC (leave it reset but still powered). 2232 * Clean up and free all NGE data structures 2233 */ 2234 if (ngep->periodic_id != NULL) { 2235 ddi_periodic_delete(ngep->periodic_id); 2236 ngep->periodic_id = NULL; 2237 } 2238 2239 if (ngep->progress & PROGRESS_KSTATS) 2240 nge_fini_kstats(ngep); 2241 2242 if (ngep->progress & PROGRESS_HWINT) { 2243 mutex_enter(ngep->genlock); 2244 nge_restore_mac_addr(ngep); 2245 (void) nge_chip_stop(ngep, B_FALSE); 2246 mutex_exit(ngep->genlock); 2247 } 2248 2249 if (ngep->progress & PROGRESS_SWINT) 2250 nge_rem_intrs(ngep); 2251 2252 if (ngep->progress & PROGRESS_FACTOTUM) 2253 (void) ddi_intr_remove_softint(ngep->factotum_hdl); 2254 2255 if (ngep->progress & PROGRESS_RESCHED) 2256 (void) ddi_intr_remove_softint(ngep->resched_hdl); 2257 2258 if (ngep->progress & PROGRESS_INTR) { 2259 mutex_destroy(srp->tx_lock); 2260 mutex_destroy(srp->tc_lock); 2261 mutex_destroy(&srp->dmah_lock); 2262 mutex_destroy(brp->recycle_lock); 2263 2264 mutex_destroy(ngep->genlock); 2265 mutex_destroy(ngep->softlock); 2266 rw_destroy(ngep->rwlock); 2267 } 2268 2269 if (ngep->progress & PROGRESS_REGS) 2270 ddi_regs_map_free(&ngep->io_handle); 2271 2272 if (ngep->progress & PROGRESS_CFG) 2273 pci_config_teardown(&ngep->cfg_handle); 2274 2275 ddi_remove_minor_node(ngep->devinfo, NULL); 2276 2277 kmem_free(ngep, sizeof (*ngep)); 2278 } 2279 2280 static int 2281 nge_resume(dev_info_t *devinfo) 2282 { 2283 nge_t *ngep; 2284 chip_info_t *infop; 2285 int err; 2286 2287 ASSERT(devinfo != NULL); 2288 2289 ngep = ddi_get_driver_private(devinfo); 2290 err = 0; 2291 2292 /* 2293 * If there are state inconsistancies, this is bad. Returning 2294 * DDI_FAILURE here will eventually cause the machine to panic, 2295 * so it is best done here so that there is a possibility of 2296 * debugging the problem. 2297 */ 2298 if (ngep == NULL) 2299 cmn_err(CE_PANIC, 2300 "nge: ngep returned from ddi_get_driver_private was NULL"); 2301 infop = (chip_info_t *)&ngep->chipinfo; 2302 2303 if (ngep->devinfo != devinfo) 2304 cmn_err(CE_PANIC, 2305 "nge: passed devinfo not the same as saved devinfo"); 2306 2307 mutex_enter(ngep->genlock); 2308 rw_enter(ngep->rwlock, RW_WRITER); 2309 2310 /* 2311 * Fetch the config space. Even though we have most of it cached, 2312 * some values *might* change across a suspend/resume. 2313 */ 2314 nge_chip_cfg_init(ngep, infop, B_FALSE); 2315 2316 /* 2317 * Only in one case, this conditional branch can be executed: the port 2318 * hasn't been plumbed. 2319 */ 2320 if (ngep->suspended == B_FALSE) { 2321 rw_exit(ngep->rwlock); 2322 mutex_exit(ngep->genlock); 2323 return (DDI_SUCCESS); 2324 } 2325 2326 nge_tx_recycle_all(ngep); 2327 err = nge_reinit_ring(ngep); 2328 if (!err) { 2329 err = nge_chip_reset(ngep); 2330 if (!err) 2331 err = nge_chip_start(ngep); 2332 } 2333 2334 if (err) { 2335 /* 2336 * We note the failure, but return success, as the 2337 * system is still usable without this controller. 2338 */ 2339 cmn_err(CE_WARN, "nge: resume: failed to restart controller"); 2340 } else { 2341 ngep->nge_mac_state = NGE_MAC_STARTED; 2342 } 2343 ngep->suspended = B_FALSE; 2344 2345 rw_exit(ngep->rwlock); 2346 mutex_exit(ngep->genlock); 2347 2348 return (DDI_SUCCESS); 2349 } 2350 2351 /* 2352 * attach(9E) -- Attach a device to the system 2353 * 2354 * Called once for each board successfully probed. 2355 */ 2356 static int 2357 nge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 2358 { 2359 int err; 2360 int i; 2361 int instance; 2362 caddr_t regs; 2363 nge_t *ngep; 2364 chip_info_t *infop; 2365 mac_register_t *macp; 2366 2367 switch (cmd) { 2368 default: 2369 return (DDI_FAILURE); 2370 2371 case DDI_RESUME: 2372 return (nge_resume(devinfo)); 2373 2374 case DDI_ATTACH: 2375 break; 2376 } 2377 2378 ngep = kmem_zalloc(sizeof (*ngep), KM_SLEEP); 2379 instance = ddi_get_instance(devinfo); 2380 ddi_set_driver_private(devinfo, ngep); 2381 ngep->devinfo = devinfo; 2382 2383 (void) snprintf(ngep->ifname, sizeof (ngep->ifname), "%s%d", 2384 NGE_DRIVER_NAME, instance); 2385 err = pci_config_setup(devinfo, &ngep->cfg_handle); 2386 if (err != DDI_SUCCESS) { 2387 nge_problem(ngep, "nge_attach: pci_config_setup() failed"); 2388 goto attach_fail; 2389 } 2390 /* 2391 * param_txbcopy_threshold and param_rxbcopy_threshold are tx/rx bcopy 2392 * thresholds. Bounds: min 0, max NGE_MAX_SDU 2393 */ 2394 ngep->param_txbcopy_threshold = NGE_TX_COPY_SIZE; 2395 ngep->param_rxbcopy_threshold = NGE_RX_COPY_SIZE; 2396 2397 /* 2398 * param_recv_max_packet is max packet received per interupt. 2399 * Bounds: min 0, max NGE_RECV_SLOTS_DESC_1024 2400 */ 2401 ngep->param_recv_max_packet = 128; 2402 2403 /* 2404 * param_poll_quiet_time and param_poll_busy_time are quiet/busy time 2405 * switch from per packet interrupt to polling interrupt. 2406 * Bounds: min 0, max 10000 2407 */ 2408 ngep->param_poll_quiet_time = NGE_POLL_QUIET_TIME; 2409 ngep->param_poll_busy_time = NGE_POLL_BUSY_TIME; 2410 2411 /* 2412 * param_rx_intr_hwater/param_rx_intr_lwater: ackets received 2413 * to trigger the poll_quiet_time/poll_busy_time counter. 2414 * Bounds: min 0, max NGE_RECV_SLOTS_DESC_1024. 2415 */ 2416 ngep->param_rx_intr_hwater = 1; 2417 ngep->param_rx_intr_lwater = 8; 2418 2419 /* 2420 * param_tx_n_intr: Per N tx packets to do tx recycle in poll mode. 2421 * Bounds: min 1, max 10000. 2422 */ 2423 ngep->param_tx_n_intr = NGE_TX_N_INTR; 2424 2425 infop = (chip_info_t *)&ngep->chipinfo; 2426 nge_chip_cfg_init(ngep, infop, B_FALSE); 2427 nge_init_dev_spec_param(ngep); 2428 nge_get_props(ngep); 2429 ngep->progress |= PROGRESS_CFG; 2430 2431 err = ddi_regs_map_setup(devinfo, NGE_PCI_OPREGS_RNUMBER, 2432 ®s, 0, 0, &nge_reg_accattr, &ngep->io_handle); 2433 if (err != DDI_SUCCESS) { 2434 nge_problem(ngep, "nge_attach: ddi_regs_map_setup() failed"); 2435 goto attach_fail; 2436 } 2437 ngep->io_regs = regs; 2438 ngep->progress |= PROGRESS_REGS; 2439 2440 err = nge_register_intrs_and_init_locks(ngep); 2441 if (err != DDI_SUCCESS) { 2442 nge_problem(ngep, "nge_attach:" 2443 " register intrs and init locks failed"); 2444 goto attach_fail; 2445 } 2446 nge_init_ring_param_lock(ngep); 2447 ngep->progress |= PROGRESS_INTR; 2448 2449 mutex_enter(ngep->genlock); 2450 2451 /* 2452 * Initialise link state variables 2453 * Stop, reset & reinitialise the chip. 2454 * Initialise the (internal) PHY. 2455 */ 2456 nge_phys_init(ngep); 2457 ngep->nge_chip_state = NGE_CHIP_INITIAL; 2458 err = nge_chip_reset(ngep); 2459 if (err != DDI_SUCCESS) { 2460 nge_problem(ngep, "nge_attach: nge_chip_reset() failed"); 2461 mutex_exit(ngep->genlock); 2462 goto attach_fail; 2463 } 2464 nge_chip_sync(ngep); 2465 2466 /* 2467 * Now that mutex locks are initialized, enable interrupts. 2468 */ 2469 if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) { 2470 /* Call ddi_intr_block_enable() for MSI interrupts */ 2471 (void) ddi_intr_block_enable(ngep->htable, 2472 ngep->intr_actual_cnt); 2473 } else { 2474 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 2475 for (i = 0; i < ngep->intr_actual_cnt; i++) { 2476 (void) ddi_intr_enable(ngep->htable[i]); 2477 } 2478 } 2479 2480 ngep->link_state = LINK_STATE_UNKNOWN; 2481 ngep->progress |= PROGRESS_HWINT; 2482 2483 /* 2484 * Register NDD-tweakable parameters 2485 */ 2486 if (nge_nd_init(ngep)) { 2487 nge_problem(ngep, "nge_attach: nge_nd_init() failed"); 2488 mutex_exit(ngep->genlock); 2489 goto attach_fail; 2490 } 2491 ngep->progress |= PROGRESS_NDD; 2492 2493 /* 2494 * Create & initialise named kstats 2495 */ 2496 nge_init_kstats(ngep, instance); 2497 ngep->progress |= PROGRESS_KSTATS; 2498 2499 mutex_exit(ngep->genlock); 2500 2501 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 2502 goto attach_fail; 2503 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 2504 macp->m_driver = ngep; 2505 macp->m_dip = devinfo; 2506 macp->m_src_addr = infop->vendor_addr.addr; 2507 macp->m_callbacks = &nge_m_callbacks; 2508 macp->m_min_sdu = 0; 2509 macp->m_max_sdu = ngep->default_mtu; 2510 macp->m_margin = VTAG_SIZE; 2511 macp->m_priv_props = nge_priv_props; 2512 macp->m_priv_prop_count = NGE_MAX_PRIV_PROPS; 2513 /* 2514 * Finally, we're ready to register ourselves with the mac 2515 * interface; if this succeeds, we're all ready to start() 2516 */ 2517 err = mac_register(macp, &ngep->mh); 2518 mac_free(macp); 2519 if (err != 0) 2520 goto attach_fail; 2521 2522 /* 2523 * Register a periodical handler. 2524 * nge_chip_cyclic() is invoked in kernel context. 2525 */ 2526 ngep->periodic_id = ddi_periodic_add(nge_chip_cyclic, ngep, 2527 NGE_CYCLIC_PERIOD, DDI_IPL_0); 2528 2529 ngep->progress |= PROGRESS_READY; 2530 return (DDI_SUCCESS); 2531 2532 attach_fail: 2533 nge_unattach(ngep); 2534 return (DDI_FAILURE); 2535 } 2536 2537 static int 2538 nge_suspend(nge_t *ngep) 2539 { 2540 mutex_enter(ngep->genlock); 2541 rw_enter(ngep->rwlock, RW_WRITER); 2542 2543 /* if the port hasn't been plumbed, just return */ 2544 if (ngep->nge_mac_state != NGE_MAC_STARTED) { 2545 rw_exit(ngep->rwlock); 2546 mutex_exit(ngep->genlock); 2547 return (DDI_SUCCESS); 2548 } 2549 ngep->suspended = B_TRUE; 2550 (void) nge_chip_stop(ngep, B_FALSE); 2551 ngep->nge_mac_state = NGE_MAC_STOPPED; 2552 2553 rw_exit(ngep->rwlock); 2554 mutex_exit(ngep->genlock); 2555 return (DDI_SUCCESS); 2556 } 2557 2558 /* 2559 * detach(9E) -- Detach a device from the system 2560 */ 2561 static int 2562 nge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 2563 { 2564 int i; 2565 nge_t *ngep; 2566 mul_item *p, *nextp; 2567 buff_ring_t *brp; 2568 2569 NGE_GTRACE(("nge_detach($%p, %d)", (void *)devinfo, cmd)); 2570 2571 ngep = ddi_get_driver_private(devinfo); 2572 brp = ngep->buff; 2573 2574 switch (cmd) { 2575 default: 2576 return (DDI_FAILURE); 2577 2578 case DDI_SUSPEND: 2579 /* 2580 * Stop the NIC 2581 * Note: This driver doesn't currently support WOL, but 2582 * should it in the future, it is important to 2583 * make sure the PHY remains powered so that the 2584 * wakeup packet can actually be recieved. 2585 */ 2586 return (nge_suspend(ngep)); 2587 2588 case DDI_DETACH: 2589 break; 2590 } 2591 2592 /* Try to wait all the buffer post to upper layer be released */ 2593 for (i = 0; i < 1000; i++) { 2594 if (brp->rx_hold == 0) 2595 break; 2596 drv_usecwait(1000); 2597 } 2598 2599 /* If there is any posted buffer, reject to detach */ 2600 if (brp->rx_hold != 0) 2601 return (DDI_FAILURE); 2602 2603 /* 2604 * Unregister from the GLD subsystem. This can fail, in 2605 * particular if there are DLPI style-2 streams still open - 2606 * in which case we just return failure without shutting 2607 * down chip operations. 2608 */ 2609 if (mac_unregister(ngep->mh) != DDI_SUCCESS) 2610 return (DDI_FAILURE); 2611 2612 /* 2613 * Recycle the multicast table. mac_unregister() should be called 2614 * before it to ensure the multicast table can be used even if 2615 * mac_unregister() fails. 2616 */ 2617 for (p = ngep->pcur_mulist; p != NULL; p = nextp) { 2618 nextp = p->next; 2619 kmem_free(p, sizeof (mul_item)); 2620 } 2621 ngep->pcur_mulist = NULL; 2622 2623 /* 2624 * All activity stopped, so we can clean up & exit 2625 */ 2626 nge_unattach(ngep); 2627 return (DDI_SUCCESS); 2628 } 2629 2630 /* 2631 * quiesce(9E) entry point. 2632 * 2633 * This function is called when the system is single-threaded at high 2634 * PIL with preemption disabled. Therefore, this function must not be 2635 * blocked. 2636 * 2637 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 2638 * DDI_FAILURE indicates an error condition and should almost never happen. 2639 */ 2640 static int 2641 nge_quiesce(dev_info_t *devinfo) 2642 { 2643 nge_t *ngep; 2644 2645 ngep = ddi_get_driver_private(devinfo); 2646 2647 if (ngep == NULL) 2648 return (DDI_FAILURE); 2649 2650 /* 2651 * Turn off debug tracing 2652 */ 2653 nge_debug = 0; 2654 ngep->debug = 0; 2655 2656 nge_restore_mac_addr(ngep); 2657 (void) nge_chip_stop(ngep, B_FALSE); 2658 2659 return (DDI_SUCCESS); 2660 } 2661 2662 2663 2664 /* 2665 * ========== Module Loading Data & Entry Points ========== 2666 */ 2667 2668 DDI_DEFINE_STREAM_OPS(nge_dev_ops, nulldev, nulldev, nge_attach, nge_detach, 2669 NULL, NULL, D_MP, NULL, nge_quiesce); 2670 2671 2672 static struct modldrv nge_modldrv = { 2673 &mod_driverops, /* Type of module. This one is a driver */ 2674 nge_ident, /* short description */ 2675 &nge_dev_ops /* driver specific ops */ 2676 }; 2677 2678 static struct modlinkage modlinkage = { 2679 MODREV_1, (void *)&nge_modldrv, NULL 2680 }; 2681 2682 2683 int 2684 _info(struct modinfo *modinfop) 2685 { 2686 return (mod_info(&modlinkage, modinfop)); 2687 } 2688 2689 int 2690 _init(void) 2691 { 2692 int status; 2693 2694 mac_init_ops(&nge_dev_ops, "nge"); 2695 status = mod_install(&modlinkage); 2696 if (status != DDI_SUCCESS) 2697 mac_fini_ops(&nge_dev_ops); 2698 else 2699 mutex_init(nge_log_mutex, NULL, MUTEX_DRIVER, NULL); 2700 2701 return (status); 2702 } 2703 2704 int 2705 _fini(void) 2706 { 2707 int status; 2708 2709 status = mod_remove(&modlinkage); 2710 if (status == DDI_SUCCESS) { 2711 mac_fini_ops(&nge_dev_ops); 2712 mutex_destroy(nge_log_mutex); 2713 } 2714 2715 return (status); 2716 } 2717 2718 /* 2719 * ============ Init MSI/Fixed/SoftInterrupt routines ============== 2720 */ 2721 2722 /* 2723 * Register interrupts and initialize each mutex and condition variables 2724 */ 2725 2726 static int 2727 nge_register_intrs_and_init_locks(nge_t *ngep) 2728 { 2729 int err; 2730 int intr_types; 2731 uint_t soft_prip; 2732 nge_msi_mask msi_mask; 2733 nge_msi_map0_vec map0_vec; 2734 nge_msi_map1_vec map1_vec; 2735 2736 /* 2737 * Add the softint handlers: 2738 * 2739 * Both of these handlers are used to avoid restrictions on the 2740 * context and/or mutexes required for some operations. In 2741 * particular, the hardware interrupt handler and its subfunctions 2742 * can detect a number of conditions that we don't want to handle 2743 * in that context or with that set of mutexes held. So, these 2744 * softints are triggered instead: 2745 * 2746 * the <resched> softint is triggered if if we have previously 2747 * had to refuse to send a packet because of resource shortage 2748 * (we've run out of transmit buffers), but the send completion 2749 * interrupt handler has now detected that more buffers have 2750 * become available. Its only purpose is to call gld_sched() 2751 * to retry the pending transmits (we're not allowed to hold 2752 * driver-defined mutexes across gld_sched()). 2753 * 2754 * the <factotum> is triggered if the h/w interrupt handler 2755 * sees the <link state changed> or <error> bits in the status 2756 * block. It's also triggered periodically to poll the link 2757 * state, just in case we aren't getting link status change 2758 * interrupts ... 2759 */ 2760 err = ddi_intr_add_softint(ngep->devinfo, &ngep->resched_hdl, 2761 DDI_INTR_SOFTPRI_MIN, nge_reschedule, (caddr_t)ngep); 2762 if (err != DDI_SUCCESS) { 2763 nge_problem(ngep, 2764 "nge_attach: add nge_reschedule softintr failed"); 2765 2766 return (DDI_FAILURE); 2767 } 2768 ngep->progress |= PROGRESS_RESCHED; 2769 err = ddi_intr_add_softint(ngep->devinfo, &ngep->factotum_hdl, 2770 DDI_INTR_SOFTPRI_MIN, nge_chip_factotum, (caddr_t)ngep); 2771 if (err != DDI_SUCCESS) { 2772 nge_problem(ngep, 2773 "nge_attach: add nge_chip_factotum softintr failed!"); 2774 2775 return (DDI_FAILURE); 2776 } 2777 if (ddi_intr_get_softint_pri(ngep->factotum_hdl, &soft_prip) 2778 != DDI_SUCCESS) { 2779 nge_problem(ngep, "nge_attach: get softintr priority failed\n"); 2780 2781 return (DDI_FAILURE); 2782 } 2783 ngep->soft_pri = soft_prip; 2784 2785 ngep->progress |= PROGRESS_FACTOTUM; 2786 /* Get supported interrupt types */ 2787 if (ddi_intr_get_supported_types(ngep->devinfo, &intr_types) 2788 != DDI_SUCCESS) { 2789 nge_error(ngep, "ddi_intr_get_supported_types failed\n"); 2790 2791 return (DDI_FAILURE); 2792 } 2793 2794 NGE_DEBUG(("ddi_intr_get_supported_types() returned: %x", 2795 intr_types)); 2796 2797 if ((intr_types & DDI_INTR_TYPE_MSI) && nge_enable_msi) { 2798 2799 /* MSI Configurations for mcp55 chipset */ 2800 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 2801 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 2802 2803 2804 /* Enable the 8 vectors */ 2805 msi_mask.msi_mask_val = 2806 nge_reg_get32(ngep, NGE_MSI_MASK); 2807 msi_mask.msi_msk_bits.vec0 = NGE_SET; 2808 msi_mask.msi_msk_bits.vec1 = NGE_SET; 2809 msi_mask.msi_msk_bits.vec2 = NGE_SET; 2810 msi_mask.msi_msk_bits.vec3 = NGE_SET; 2811 msi_mask.msi_msk_bits.vec4 = NGE_SET; 2812 msi_mask.msi_msk_bits.vec5 = NGE_SET; 2813 msi_mask.msi_msk_bits.vec6 = NGE_SET; 2814 msi_mask.msi_msk_bits.vec7 = NGE_SET; 2815 nge_reg_put32(ngep, NGE_MSI_MASK, 2816 msi_mask.msi_mask_val); 2817 2818 /* 2819 * Remapping the MSI MAP0 and MAP1. MCP55 2820 * is default mapping all the interrupt to 0 vector. 2821 * Software needs to remapping this. 2822 * This mapping is same as CK804. 2823 */ 2824 map0_vec.msi_map0_val = 2825 nge_reg_get32(ngep, NGE_MSI_MAP0); 2826 map1_vec.msi_map1_val = 2827 nge_reg_get32(ngep, NGE_MSI_MAP1); 2828 map0_vec.vecs_bits.reint_vec = 0; 2829 map0_vec.vecs_bits.rcint_vec = 0; 2830 map0_vec.vecs_bits.miss_vec = 3; 2831 map0_vec.vecs_bits.teint_vec = 5; 2832 map0_vec.vecs_bits.tcint_vec = 5; 2833 map0_vec.vecs_bits.stint_vec = 2; 2834 map0_vec.vecs_bits.mint_vec = 6; 2835 map0_vec.vecs_bits.rfint_vec = 0; 2836 map1_vec.vecs_bits.tfint_vec = 5; 2837 map1_vec.vecs_bits.feint_vec = 6; 2838 map1_vec.vecs_bits.resv8_11 = 3; 2839 map1_vec.vecs_bits.resv12_15 = 1; 2840 map1_vec.vecs_bits.resv16_19 = 0; 2841 map1_vec.vecs_bits.resv20_23 = 7; 2842 map1_vec.vecs_bits.resv24_31 = 0xff; 2843 nge_reg_put32(ngep, NGE_MSI_MAP0, 2844 map0_vec.msi_map0_val); 2845 nge_reg_put32(ngep, NGE_MSI_MAP1, 2846 map1_vec.msi_map1_val); 2847 } 2848 if (nge_add_intrs(ngep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 2849 NGE_DEBUG(("MSI registration failed, " 2850 "trying FIXED interrupt type\n")); 2851 } else { 2852 nge_log(ngep, "Using MSI interrupt type\n"); 2853 2854 ngep->intr_type = DDI_INTR_TYPE_MSI; 2855 ngep->progress |= PROGRESS_SWINT; 2856 } 2857 } 2858 2859 if (!(ngep->progress & PROGRESS_SWINT) && 2860 (intr_types & DDI_INTR_TYPE_FIXED)) { 2861 if (nge_add_intrs(ngep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 2862 nge_error(ngep, "FIXED interrupt " 2863 "registration failed\n"); 2864 2865 return (DDI_FAILURE); 2866 } 2867 2868 nge_log(ngep, "Using FIXED interrupt type\n"); 2869 2870 ngep->intr_type = DDI_INTR_TYPE_FIXED; 2871 ngep->progress |= PROGRESS_SWINT; 2872 } 2873 2874 2875 if (!(ngep->progress & PROGRESS_SWINT)) { 2876 nge_error(ngep, "No interrupts registered\n"); 2877 2878 return (DDI_FAILURE); 2879 } 2880 mutex_init(ngep->genlock, NULL, MUTEX_DRIVER, 2881 DDI_INTR_PRI(ngep->intr_pri)); 2882 mutex_init(ngep->softlock, NULL, MUTEX_DRIVER, 2883 DDI_INTR_PRI(ngep->soft_pri)); 2884 rw_init(ngep->rwlock, NULL, RW_DRIVER, 2885 DDI_INTR_PRI(ngep->intr_pri)); 2886 2887 return (DDI_SUCCESS); 2888 } 2889 2890 /* 2891 * nge_add_intrs: 2892 * 2893 * Register FIXED or MSI interrupts. 2894 */ 2895 static int 2896 nge_add_intrs(nge_t *ngep, int intr_type) 2897 { 2898 dev_info_t *dip = ngep->devinfo; 2899 int avail, actual, intr_size, count = 0; 2900 int i, flag, ret; 2901 2902 NGE_DEBUG(("nge_add_intrs: interrupt type 0x%x\n", intr_type)); 2903 2904 /* Get number of interrupts */ 2905 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 2906 if ((ret != DDI_SUCCESS) || (count == 0)) { 2907 nge_error(ngep, "ddi_intr_get_nintrs() failure, ret: %d, " 2908 "count: %d", ret, count); 2909 2910 return (DDI_FAILURE); 2911 } 2912 2913 /* Get number of available interrupts */ 2914 ret = ddi_intr_get_navail(dip, intr_type, &avail); 2915 if ((ret != DDI_SUCCESS) || (avail == 0)) { 2916 nge_error(ngep, "ddi_intr_get_navail() failure, " 2917 "ret: %d, avail: %d\n", ret, avail); 2918 2919 return (DDI_FAILURE); 2920 } 2921 2922 if (avail < count) { 2923 NGE_DEBUG(("nitrs() returned %d, navail returned %d\n", 2924 count, avail)); 2925 } 2926 flag = DDI_INTR_ALLOC_NORMAL; 2927 2928 /* Allocate an array of interrupt handles */ 2929 intr_size = count * sizeof (ddi_intr_handle_t); 2930 ngep->htable = kmem_alloc(intr_size, KM_SLEEP); 2931 2932 /* Call ddi_intr_alloc() */ 2933 ret = ddi_intr_alloc(dip, ngep->htable, intr_type, 0, 2934 count, &actual, flag); 2935 2936 if ((ret != DDI_SUCCESS) || (actual == 0)) { 2937 nge_error(ngep, "ddi_intr_alloc() failed %d\n", ret); 2938 2939 kmem_free(ngep->htable, intr_size); 2940 return (DDI_FAILURE); 2941 } 2942 2943 if (actual < count) { 2944 NGE_DEBUG(("Requested: %d, Received: %d\n", 2945 count, actual)); 2946 } 2947 2948 ngep->intr_actual_cnt = actual; 2949 ngep->intr_req_cnt = count; 2950 2951 /* 2952 * Get priority for first msi, assume remaining are all the same 2953 */ 2954 if ((ret = ddi_intr_get_pri(ngep->htable[0], &ngep->intr_pri)) != 2955 DDI_SUCCESS) { 2956 nge_error(ngep, "ddi_intr_get_pri() failed %d\n", ret); 2957 2958 /* Free already allocated intr */ 2959 for (i = 0; i < actual; i++) { 2960 (void) ddi_intr_free(ngep->htable[i]); 2961 } 2962 2963 kmem_free(ngep->htable, intr_size); 2964 2965 return (DDI_FAILURE); 2966 } 2967 /* Test for high level mutex */ 2968 if (ngep->intr_pri >= ddi_intr_get_hilevel_pri()) { 2969 nge_error(ngep, "nge_add_intrs:" 2970 "Hi level interrupt not supported"); 2971 2972 for (i = 0; i < actual; i++) 2973 (void) ddi_intr_free(ngep->htable[i]); 2974 2975 kmem_free(ngep->htable, intr_size); 2976 2977 return (DDI_FAILURE); 2978 } 2979 2980 2981 /* Call ddi_intr_add_handler() */ 2982 for (i = 0; i < actual; i++) { 2983 if ((ret = ddi_intr_add_handler(ngep->htable[i], nge_chip_intr, 2984 (caddr_t)ngep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 2985 nge_error(ngep, "ddi_intr_add_handler() " 2986 "failed %d\n", ret); 2987 2988 /* Free already allocated intr */ 2989 for (i = 0; i < actual; i++) { 2990 (void) ddi_intr_free(ngep->htable[i]); 2991 } 2992 2993 kmem_free(ngep->htable, intr_size); 2994 2995 return (DDI_FAILURE); 2996 } 2997 } 2998 2999 if ((ret = ddi_intr_get_cap(ngep->htable[0], &ngep->intr_cap)) 3000 != DDI_SUCCESS) { 3001 nge_error(ngep, "ddi_intr_get_cap() failed %d\n", ret); 3002 3003 for (i = 0; i < actual; i++) { 3004 (void) ddi_intr_remove_handler(ngep->htable[i]); 3005 (void) ddi_intr_free(ngep->htable[i]); 3006 } 3007 3008 kmem_free(ngep->htable, intr_size); 3009 3010 return (DDI_FAILURE); 3011 } 3012 3013 return (DDI_SUCCESS); 3014 } 3015 3016 /* 3017 * nge_rem_intrs: 3018 * 3019 * Unregister FIXED or MSI interrupts 3020 */ 3021 static void 3022 nge_rem_intrs(nge_t *ngep) 3023 { 3024 int i; 3025 3026 NGE_DEBUG(("nge_rem_intrs\n")); 3027 3028 /* Disable all interrupts */ 3029 if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3030 /* Call ddi_intr_block_disable() */ 3031 (void) ddi_intr_block_disable(ngep->htable, 3032 ngep->intr_actual_cnt); 3033 } else { 3034 for (i = 0; i < ngep->intr_actual_cnt; i++) { 3035 (void) ddi_intr_disable(ngep->htable[i]); 3036 } 3037 } 3038 3039 /* Call ddi_intr_remove_handler() */ 3040 for (i = 0; i < ngep->intr_actual_cnt; i++) { 3041 (void) ddi_intr_remove_handler(ngep->htable[i]); 3042 (void) ddi_intr_free(ngep->htable[i]); 3043 } 3044 3045 kmem_free(ngep->htable, 3046 ngep->intr_req_cnt * sizeof (ddi_intr_handle_t)); 3047 } 3048