1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 28 #include "nge.h" 29 30 /* 31 * Describes the chip's DMA engine 32 */ 33 34 static ddi_dma_attr_t hot_dma_attr = { 35 DMA_ATTR_V0, /* dma_attr version */ 36 0x0000000000000000ull, /* dma_attr_addr_lo */ 37 0x000000FFFFFFFFFFull, /* dma_attr_addr_hi */ 38 0x000000007FFFFFFFull, /* dma_attr_count_max */ 39 0x0000000000000010ull, /* dma_attr_align */ 40 0x00000FFF, /* dma_attr_burstsizes */ 41 0x00000001, /* dma_attr_minxfer */ 42 0x000000000000FFFFull, /* dma_attr_maxxfer */ 43 0x000000FFFFFFFFFFull, /* dma_attr_seg */ 44 1, /* dma_attr_sgllen */ 45 0x00000001, /* dma_attr_granular */ 46 0 47 }; 48 49 static ddi_dma_attr_t hot_tx_dma_attr = { 50 DMA_ATTR_V0, /* dma_attr version */ 51 0x0000000000000000ull, /* dma_attr_addr_lo */ 52 0x000000FFFFFFFFFFull, /* dma_attr_addr_hi */ 53 0x0000000000003FFFull, /* dma_attr_count_max */ 54 0x0000000000000010ull, /* dma_attr_align */ 55 0x00000FFF, /* dma_attr_burstsizes */ 56 0x00000001, /* dma_attr_minxfer */ 57 0x0000000000003FFFull, /* dma_attr_maxxfer */ 58 0x000000FFFFFFFFFFull, /* dma_attr_seg */ 59 NGE_MAX_COOKIES, /* dma_attr_sgllen */ 60 1, /* dma_attr_granular */ 61 0 62 }; 63 64 static ddi_dma_attr_t sum_dma_attr = { 65 DMA_ATTR_V0, /* dma_attr version */ 66 0x0000000000000000ull, /* dma_attr_addr_lo */ 67 0x00000000FFFFFFFFull, /* dma_attr_addr_hi */ 68 0x000000007FFFFFFFull, /* dma_attr_count_max */ 69 0x0000000000000010ull, /* dma_attr_align */ 70 0x00000FFF, /* dma_attr_burstsizes */ 71 0x00000001, /* dma_attr_minxfer */ 72 0x000000000000FFFFull, /* dma_attr_maxxfer */ 73 0x00000000FFFFFFFFull, /* dma_attr_seg */ 74 1, /* dma_attr_sgllen */ 75 0x00000001, /* dma_attr_granular */ 76 0 77 }; 78 79 static ddi_dma_attr_t sum_tx_dma_attr = { 80 DMA_ATTR_V0, /* dma_attr version */ 81 0x0000000000000000ull, /* dma_attr_addr_lo */ 82 0x00000000FFFFFFFFull, /* dma_attr_addr_hi */ 83 0x0000000000003FFFull, /* dma_attr_count_max */ 84 0x0000000000000010ull, /* dma_attr_align */ 85 0x00000FFF, /* dma_attr_burstsizes */ 86 0x00000001, /* dma_attr_minxfer */ 87 0x0000000000003FFFull, /* dma_attr_maxxfer */ 88 0x00000000FFFFFFFFull, /* dma_attr_seg */ 89 NGE_MAX_COOKIES, /* dma_attr_sgllen */ 90 1, /* dma_attr_granular */ 91 0 92 }; 93 94 /* 95 * DMA access attributes for data. 96 */ 97 ddi_device_acc_attr_t nge_data_accattr = { 98 DDI_DEVICE_ATTR_V0, 99 DDI_STRUCTURE_LE_ACC, 100 DDI_STRICTORDER_ACC, 101 DDI_DEFAULT_ACC 102 }; 103 104 /* 105 * DMA access attributes for descriptors. 106 */ 107 static ddi_device_acc_attr_t nge_desc_accattr = { 108 DDI_DEVICE_ATTR_V0, 109 DDI_STRUCTURE_LE_ACC, 110 DDI_STRICTORDER_ACC, 111 DDI_DEFAULT_ACC 112 }; 113 114 /* 115 * PIO access attributes for registers 116 */ 117 static ddi_device_acc_attr_t nge_reg_accattr = { 118 DDI_DEVICE_ATTR_V0, 119 DDI_STRUCTURE_LE_ACC, 120 DDI_STRICTORDER_ACC, 121 DDI_DEFAULT_ACC 122 }; 123 124 /* 125 * NIC DESC MODE 2 126 */ 127 128 static const nge_desc_attr_t nge_sum_desc = { 129 130 sizeof (sum_rx_bd), 131 sizeof (sum_tx_bd), 132 &sum_dma_attr, 133 &sum_tx_dma_attr, 134 nge_sum_rxd_fill, 135 nge_sum_rxd_check, 136 nge_sum_txd_fill, 137 nge_sum_txd_check, 138 }; 139 140 /* 141 * NIC DESC MODE 3 142 */ 143 144 static const nge_desc_attr_t nge_hot_desc = { 145 146 sizeof (hot_rx_bd), 147 sizeof (hot_tx_bd), 148 &hot_dma_attr, 149 &hot_tx_dma_attr, 150 nge_hot_rxd_fill, 151 nge_hot_rxd_check, 152 nge_hot_txd_fill, 153 nge_hot_txd_check, 154 }; 155 156 static char nge_ident[] = "nVidia 1Gb Ethernet"; 157 static char clsize_propname[] = "cache-line-size"; 158 static char latency_propname[] = "latency-timer"; 159 static char debug_propname[] = "nge-debug-flags"; 160 static char intr_moderation[] = "intr-moderation"; 161 static char rx_data_hw[] = "rx-data-hw"; 162 static char rx_prd_lw[] = "rx-prd-lw"; 163 static char rx_prd_hw[] = "rx-prd-hw"; 164 static char sw_intr_intv[] = "sw-intr-intvl"; 165 static char nge_desc_mode[] = "desc-mode"; 166 static char default_mtu[] = "default_mtu"; 167 static char low_memory_mode[] = "minimal-memory-usage"; 168 extern kmutex_t nge_log_mutex[1]; 169 170 static int nge_m_start(void *); 171 static void nge_m_stop(void *); 172 static int nge_m_promisc(void *, boolean_t); 173 static int nge_m_multicst(void *, boolean_t, const uint8_t *); 174 static int nge_m_unicst(void *, const uint8_t *); 175 static void nge_m_ioctl(void *, queue_t *, mblk_t *); 176 static boolean_t nge_m_getcapab(void *, mac_capab_t, void *); 177 static int nge_m_setprop(void *, const char *, mac_prop_id_t, 178 uint_t, const void *); 179 static int nge_m_getprop(void *, const char *, mac_prop_id_t, 180 uint_t, uint_t, void *, uint_t *); 181 static int nge_set_priv_prop(nge_t *, const char *, uint_t, 182 const void *); 183 static int nge_get_priv_prop(nge_t *, const char *, uint_t, 184 uint_t, void *); 185 186 #define NGE_M_CALLBACK_FLAGS\ 187 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 188 189 static mac_callbacks_t nge_m_callbacks = { 190 NGE_M_CALLBACK_FLAGS, 191 nge_m_stat, 192 nge_m_start, 193 nge_m_stop, 194 nge_m_promisc, 195 nge_m_multicst, 196 nge_m_unicst, 197 nge_m_tx, 198 nge_m_ioctl, 199 nge_m_getcapab, 200 NULL, 201 NULL, 202 nge_m_setprop, 203 nge_m_getprop 204 }; 205 206 mac_priv_prop_t nge_priv_props[] = { 207 {"_tx_bcopy_threshold", MAC_PROP_PERM_RW}, 208 {"_rx_bcopy_threshold", MAC_PROP_PERM_RW}, 209 {"_recv_max_packet", MAC_PROP_PERM_RW}, 210 {"_poll_quiet_time", MAC_PROP_PERM_RW}, 211 {"_poll_busy_time", MAC_PROP_PERM_RW}, 212 {"_rx_intr_hwater", MAC_PROP_PERM_RW}, 213 {"_rx_intr_lwater", MAC_PROP_PERM_RW}, 214 {"_tx_n_intr", MAC_PROP_PERM_RW} 215 }; 216 217 #define NGE_MAX_PRIV_PROPS \ 218 (sizeof (nge_priv_props)/sizeof (mac_priv_prop_t)) 219 220 static int nge_add_intrs(nge_t *, int); 221 static void nge_rem_intrs(nge_t *); 222 static int nge_register_intrs_and_init_locks(nge_t *); 223 224 /* 225 * NGE MSI tunable: 226 */ 227 boolean_t nge_enable_msi = B_FALSE; 228 229 static enum ioc_reply 230 nge_set_loop_mode(nge_t *ngep, uint32_t mode) 231 { 232 /* 233 * If the mode isn't being changed, there's nothing to do ... 234 */ 235 if (mode == ngep->param_loop_mode) 236 return (IOC_ACK); 237 238 /* 239 * Validate the requested mode and prepare a suitable message 240 * to explain the link down/up cycle that the change will 241 * probably induce ... 242 */ 243 switch (mode) { 244 default: 245 return (IOC_INVAL); 246 247 case NGE_LOOP_NONE: 248 case NGE_LOOP_EXTERNAL_100: 249 case NGE_LOOP_EXTERNAL_10: 250 case NGE_LOOP_INTERNAL_PHY: 251 break; 252 } 253 254 /* 255 * All OK; tell the caller to reprogram 256 * the PHY and/or MAC for the new mode ... 257 */ 258 ngep->param_loop_mode = mode; 259 return (IOC_RESTART_ACK); 260 } 261 262 #undef NGE_DBG 263 #define NGE_DBG NGE_DBG_INIT 264 265 /* 266 * Utility routine to carve a slice off a chunk of allocated memory, 267 * updating the chunk descriptor accordingly. The size of the slice 268 * is given by the product of the <qty> and <size> parameters. 269 */ 270 void 271 nge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 272 uint32_t qty, uint32_t size) 273 { 274 size_t totsize; 275 276 totsize = qty*size; 277 ASSERT(size > 0); 278 ASSERT(totsize <= chunk->alength); 279 280 *slice = *chunk; 281 slice->nslots = qty; 282 slice->size = size; 283 slice->alength = totsize; 284 285 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 286 chunk->alength -= totsize; 287 chunk->offset += totsize; 288 chunk->cookie.dmac_laddress += totsize; 289 chunk->cookie.dmac_size -= totsize; 290 } 291 292 /* 293 * Allocate an area of memory and a DMA handle for accessing it 294 */ 295 int 296 nge_alloc_dma_mem(nge_t *ngep, size_t memsize, ddi_device_acc_attr_t *attr_p, 297 uint_t dma_flags, dma_area_t *dma_p) 298 { 299 int err; 300 caddr_t va; 301 302 NGE_TRACE(("nge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 303 (void *)ngep, memsize, attr_p, dma_flags, dma_p)); 304 /* 305 * Allocate handle 306 */ 307 err = ddi_dma_alloc_handle(ngep->devinfo, ngep->desc_attr.dma_attr, 308 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 309 if (err != DDI_SUCCESS) 310 goto fail; 311 312 /* 313 * Allocate memory 314 */ 315 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 316 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), 317 DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, &dma_p->acc_hdl); 318 if (err != DDI_SUCCESS) 319 goto fail; 320 321 /* 322 * Bind the two together 323 */ 324 dma_p->mem_va = va; 325 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 326 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 327 &dma_p->cookie, &dma_p->ncookies); 328 329 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 330 goto fail; 331 332 dma_p->nslots = ~0U; 333 dma_p->size = ~0U; 334 dma_p->offset = 0; 335 336 return (DDI_SUCCESS); 337 338 fail: 339 nge_free_dma_mem(dma_p); 340 NGE_DEBUG(("nge_alloc_dma_mem: fail to alloc dma memory!")); 341 342 return (DDI_FAILURE); 343 } 344 345 /* 346 * Free one allocated area of DMAable memory 347 */ 348 void 349 nge_free_dma_mem(dma_area_t *dma_p) 350 { 351 if (dma_p->dma_hdl != NULL) { 352 if (dma_p->ncookies) { 353 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 354 dma_p->ncookies = 0; 355 } 356 } 357 if (dma_p->acc_hdl != NULL) { 358 ddi_dma_mem_free(&dma_p->acc_hdl); 359 dma_p->acc_hdl = NULL; 360 } 361 if (dma_p->dma_hdl != NULL) { 362 ddi_dma_free_handle(&dma_p->dma_hdl); 363 dma_p->dma_hdl = NULL; 364 } 365 } 366 367 #define ALLOC_TX_BUF 0x1 368 #define ALLOC_TX_DESC 0x2 369 #define ALLOC_RX_DESC 0x4 370 371 int 372 nge_alloc_bufs(nge_t *ngep) 373 { 374 int err; 375 int split; 376 int progress; 377 size_t txbuffsize; 378 size_t rxdescsize; 379 size_t txdescsize; 380 381 txbuffsize = ngep->tx_desc * ngep->buf_size; 382 rxdescsize = ngep->rx_desc; 383 txdescsize = ngep->tx_desc; 384 rxdescsize *= ngep->desc_attr.rxd_size; 385 txdescsize *= ngep->desc_attr.txd_size; 386 progress = 0; 387 388 NGE_TRACE(("nge_alloc_bufs($%p)", (void *)ngep)); 389 /* 390 * Allocate memory & handles for TX buffers 391 */ 392 ASSERT((txbuffsize % ngep->nge_split) == 0); 393 for (split = 0; split < ngep->nge_split; ++split) { 394 err = nge_alloc_dma_mem(ngep, txbuffsize/ngep->nge_split, 395 &nge_data_accattr, DDI_DMA_WRITE | NGE_DMA_MODE, 396 &ngep->send->buf[split]); 397 if (err != DDI_SUCCESS) 398 goto fail; 399 } 400 401 progress |= ALLOC_TX_BUF; 402 403 /* 404 * Allocate memory & handles for receive return rings and 405 * buffer (producer) descriptor rings 406 */ 407 err = nge_alloc_dma_mem(ngep, rxdescsize, &nge_desc_accattr, 408 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->recv->desc); 409 if (err != DDI_SUCCESS) 410 goto fail; 411 progress |= ALLOC_RX_DESC; 412 413 /* 414 * Allocate memory & handles for TX descriptor rings, 415 */ 416 err = nge_alloc_dma_mem(ngep, txdescsize, &nge_desc_accattr, 417 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->send->desc); 418 if (err != DDI_SUCCESS) 419 goto fail; 420 return (DDI_SUCCESS); 421 422 fail: 423 if (progress & ALLOC_RX_DESC) 424 nge_free_dma_mem(&ngep->recv->desc); 425 if (progress & ALLOC_TX_BUF) { 426 for (split = 0; split < ngep->nge_split; ++split) 427 nge_free_dma_mem(&ngep->send->buf[split]); 428 } 429 430 return (DDI_FAILURE); 431 } 432 433 /* 434 * This routine frees the transmit and receive buffers and descriptors. 435 * Make sure the chip is stopped before calling it! 436 */ 437 void 438 nge_free_bufs(nge_t *ngep) 439 { 440 int split; 441 442 NGE_TRACE(("nge_free_bufs($%p)", (void *)ngep)); 443 444 nge_free_dma_mem(&ngep->recv->desc); 445 nge_free_dma_mem(&ngep->send->desc); 446 447 for (split = 0; split < ngep->nge_split; ++split) 448 nge_free_dma_mem(&ngep->send->buf[split]); 449 } 450 451 /* 452 * Clean up initialisation done above before the memory is freed 453 */ 454 static void 455 nge_fini_send_ring(nge_t *ngep) 456 { 457 uint32_t slot; 458 size_t dmah_num; 459 send_ring_t *srp; 460 sw_tx_sbd_t *ssbdp; 461 462 srp = ngep->send; 463 ssbdp = srp->sw_sbds; 464 465 NGE_TRACE(("nge_fini_send_ring($%p)", (void *)ngep)); 466 467 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]); 468 469 for (slot = 0; slot < dmah_num; ++slot) { 470 if (srp->dmahndl[slot].hndl) { 471 (void) ddi_dma_unbind_handle(srp->dmahndl[slot].hndl); 472 ddi_dma_free_handle(&srp->dmahndl[slot].hndl); 473 srp->dmahndl[slot].hndl = NULL; 474 srp->dmahndl[slot].next = NULL; 475 } 476 } 477 478 srp->dmah_free.head = NULL; 479 srp->dmah_free.tail = NULL; 480 481 kmem_free(ssbdp, srp->desc.nslots*sizeof (*ssbdp)); 482 483 } 484 485 /* 486 * Initialise the specified Send Ring, using the information in the 487 * <dma_area> descriptors that it contains to set up all the other 488 * fields. This routine should be called only once for each ring. 489 */ 490 static int 491 nge_init_send_ring(nge_t *ngep) 492 { 493 size_t dmah_num; 494 uint32_t nslots; 495 uint32_t err; 496 uint32_t slot; 497 uint32_t split; 498 send_ring_t *srp; 499 sw_tx_sbd_t *ssbdp; 500 dma_area_t desc; 501 dma_area_t pbuf; 502 503 srp = ngep->send; 504 srp->desc.nslots = ngep->tx_desc; 505 nslots = srp->desc.nslots; 506 507 NGE_TRACE(("nge_init_send_ring($%p)", (void *)ngep)); 508 /* 509 * Other one-off initialisation of per-ring data 510 */ 511 srp->ngep = ngep; 512 513 /* 514 * Allocate the array of s/w Send Buffer Descriptors 515 */ 516 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 517 srp->sw_sbds = ssbdp; 518 519 /* 520 * Now initialise each array element once and for all 521 */ 522 desc = srp->desc; 523 for (split = 0; split < ngep->nge_split; ++split) { 524 pbuf = srp->buf[split]; 525 for (slot = 0; slot < nslots/ngep->nge_split; ++ssbdp, ++slot) { 526 nge_slice_chunk(&ssbdp->desc, &desc, 1, 527 ngep->desc_attr.txd_size); 528 nge_slice_chunk(&ssbdp->pbuf, &pbuf, 1, 529 ngep->buf_size); 530 } 531 ASSERT(pbuf.alength == 0); 532 } 533 ASSERT(desc.alength == 0); 534 535 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]); 536 537 /* preallocate dma handles for tx buffer */ 538 for (slot = 0; slot < dmah_num; ++slot) { 539 540 err = ddi_dma_alloc_handle(ngep->devinfo, 541 ngep->desc_attr.tx_dma_attr, DDI_DMA_DONTWAIT, 542 NULL, &srp->dmahndl[slot].hndl); 543 544 if (err != DDI_SUCCESS) { 545 nge_fini_send_ring(ngep); 546 nge_error(ngep, 547 "nge_init_send_ring: alloc dma handle fails"); 548 return (DDI_FAILURE); 549 } 550 srp->dmahndl[slot].next = srp->dmahndl + slot + 1; 551 } 552 553 srp->dmah_free.head = srp->dmahndl; 554 srp->dmah_free.tail = srp->dmahndl + dmah_num - 1; 555 srp->dmah_free.tail->next = NULL; 556 557 return (DDI_SUCCESS); 558 } 559 560 /* 561 * Intialize the tx recycle pointer and tx sending pointer of tx ring 562 * and set the type of tx's data descriptor by default. 563 */ 564 static void 565 nge_reinit_send_ring(nge_t *ngep) 566 { 567 size_t dmah_num; 568 uint32_t slot; 569 send_ring_t *srp; 570 sw_tx_sbd_t *ssbdp; 571 572 srp = ngep->send; 573 574 /* 575 * Reinitialise control variables ... 576 */ 577 578 srp->tx_hwmark = NGE_DESC_MIN; 579 srp->tx_lwmark = NGE_DESC_MIN; 580 581 srp->tx_next = 0; 582 srp->tx_free = srp->desc.nslots; 583 srp->tc_next = 0; 584 585 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]); 586 587 for (slot = 0; slot - dmah_num != 0; ++slot) 588 srp->dmahndl[slot].next = srp->dmahndl + slot + 1; 589 590 srp->dmah_free.head = srp->dmahndl; 591 srp->dmah_free.tail = srp->dmahndl + dmah_num - 1; 592 srp->dmah_free.tail->next = NULL; 593 594 /* 595 * Zero and sync all the h/w Send Buffer Descriptors 596 */ 597 for (slot = 0; slot < srp->desc.nslots; ++slot) { 598 ssbdp = &srp->sw_sbds[slot]; 599 ssbdp->flags = HOST_OWN; 600 } 601 602 DMA_ZERO(srp->desc); 603 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 604 } 605 606 /* 607 * Initialize the slot number of rx's ring 608 */ 609 static void 610 nge_init_recv_ring(nge_t *ngep) 611 { 612 recv_ring_t *rrp; 613 614 rrp = ngep->recv; 615 rrp->desc.nslots = ngep->rx_desc; 616 rrp->ngep = ngep; 617 } 618 619 /* 620 * Intialize the rx recycle pointer and rx sending pointer of rx ring 621 */ 622 static void 623 nge_reinit_recv_ring(nge_t *ngep) 624 { 625 recv_ring_t *rrp; 626 627 rrp = ngep->recv; 628 629 /* 630 * Reinitialise control variables ... 631 */ 632 rrp->prod_index = 0; 633 /* 634 * Zero and sync all the h/w Send Buffer Descriptors 635 */ 636 DMA_ZERO(rrp->desc); 637 DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORDEV); 638 } 639 640 /* 641 * Clean up initialisation done above before the memory is freed 642 */ 643 static void 644 nge_fini_buff_ring(nge_t *ngep) 645 { 646 uint32_t i; 647 buff_ring_t *brp; 648 dma_area_t *bufp; 649 sw_rx_sbd_t *bsbdp; 650 651 brp = ngep->buff; 652 bsbdp = brp->sw_rbds; 653 654 NGE_DEBUG(("nge_fini_buff_ring($%p)", (void *)ngep)); 655 656 mutex_enter(brp->recycle_lock); 657 brp->buf_sign++; 658 mutex_exit(brp->recycle_lock); 659 for (i = 0; i < ngep->rx_desc; i++, ++bsbdp) { 660 if (bsbdp->bufp) { 661 if (bsbdp->bufp->mp) 662 freemsg(bsbdp->bufp->mp); 663 nge_free_dma_mem(bsbdp->bufp); 664 kmem_free(bsbdp->bufp, sizeof (dma_area_t)); 665 bsbdp->bufp = NULL; 666 } 667 } 668 while (brp->free_list != NULL) { 669 bufp = brp->free_list; 670 brp->free_list = bufp->next; 671 bufp->next = NULL; 672 if (bufp->mp) 673 freemsg(bufp->mp); 674 nge_free_dma_mem(bufp); 675 kmem_free(bufp, sizeof (dma_area_t)); 676 } 677 while (brp->recycle_list != NULL) { 678 bufp = brp->recycle_list; 679 brp->recycle_list = bufp->next; 680 bufp->next = NULL; 681 if (bufp->mp) 682 freemsg(bufp->mp); 683 nge_free_dma_mem(bufp); 684 kmem_free(bufp, sizeof (dma_area_t)); 685 } 686 687 688 kmem_free(brp->sw_rbds, (ngep->rx_desc * sizeof (*bsbdp))); 689 brp->sw_rbds = NULL; 690 } 691 692 /* 693 * Intialize the Rx's data ring and free ring 694 */ 695 static int 696 nge_init_buff_ring(nge_t *ngep) 697 { 698 uint32_t err; 699 uint32_t slot; 700 uint32_t nslots_buff; 701 uint32_t nslots_recv; 702 buff_ring_t *brp; 703 recv_ring_t *rrp; 704 dma_area_t desc; 705 dma_area_t *bufp; 706 sw_rx_sbd_t *bsbdp; 707 708 rrp = ngep->recv; 709 brp = ngep->buff; 710 brp->nslots = ngep->rx_buf; 711 brp->rx_bcopy = B_FALSE; 712 nslots_recv = rrp->desc.nslots; 713 nslots_buff = brp->nslots; 714 brp->ngep = ngep; 715 716 NGE_TRACE(("nge_init_buff_ring($%p)", (void *)ngep)); 717 718 /* 719 * Allocate the array of s/w Recv Buffer Descriptors 720 */ 721 bsbdp = kmem_zalloc(nslots_recv *sizeof (*bsbdp), KM_SLEEP); 722 brp->sw_rbds = bsbdp; 723 brp->free_list = NULL; 724 brp->recycle_list = NULL; 725 for (slot = 0; slot < nslots_buff; ++slot) { 726 bufp = kmem_zalloc(sizeof (dma_area_t), KM_SLEEP); 727 err = nge_alloc_dma_mem(ngep, (ngep->buf_size 728 + NGE_HEADROOM), 729 &nge_data_accattr, DDI_DMA_READ | NGE_DMA_MODE, bufp); 730 if (err != DDI_SUCCESS) { 731 kmem_free(bufp, sizeof (dma_area_t)); 732 return (DDI_FAILURE); 733 } 734 735 bufp->alength -= NGE_HEADROOM; 736 bufp->offset += NGE_HEADROOM; 737 bufp->private = (caddr_t)ngep; 738 bufp->rx_recycle.free_func = nge_recv_recycle; 739 bufp->rx_recycle.free_arg = (caddr_t)bufp; 740 bufp->signature = brp->buf_sign; 741 bufp->rx_delivered = B_FALSE; 742 bufp->mp = desballoc(DMA_VPTR(*bufp), 743 ngep->buf_size + NGE_HEADROOM, 744 0, &bufp->rx_recycle); 745 746 if (bufp->mp == NULL) { 747 return (DDI_FAILURE); 748 } 749 bufp->next = brp->free_list; 750 brp->free_list = bufp; 751 } 752 753 /* 754 * Now initialise each array element once and for all 755 */ 756 desc = rrp->desc; 757 for (slot = 0; slot < nslots_recv; ++slot, ++bsbdp) { 758 nge_slice_chunk(&bsbdp->desc, &desc, 1, 759 ngep->desc_attr.rxd_size); 760 bufp = brp->free_list; 761 brp->free_list = bufp->next; 762 bsbdp->bufp = bufp; 763 bsbdp->flags = CONTROLER_OWN; 764 bufp->next = NULL; 765 } 766 767 ASSERT(desc.alength == 0); 768 return (DDI_SUCCESS); 769 } 770 771 /* 772 * Fill the host address of data in rx' descriptor 773 * and initialize free pointers of rx free ring 774 */ 775 static int 776 nge_reinit_buff_ring(nge_t *ngep) 777 { 778 uint32_t slot; 779 uint32_t nslots_recv; 780 buff_ring_t *brp; 781 recv_ring_t *rrp; 782 sw_rx_sbd_t *bsbdp; 783 void *hw_bd_p; 784 785 brp = ngep->buff; 786 rrp = ngep->recv; 787 bsbdp = brp->sw_rbds; 788 nslots_recv = rrp->desc.nslots; 789 for (slot = 0; slot < nslots_recv; ++bsbdp, ++slot) { 790 hw_bd_p = DMA_VPTR(bsbdp->desc); 791 /* 792 * There is a scenario: When the traffic of small tcp 793 * packet is heavy, suspending the tcp traffic will 794 * cause the preallocated buffers for rx not to be 795 * released in time by tcp taffic and cause rx's buffer 796 * pointers not to be refilled in time. 797 * 798 * At this point, if we reinitialize the driver, the bufp 799 * pointer for rx's traffic will be NULL. 800 * So the result of the reinitializion fails. 801 */ 802 if (bsbdp->bufp == NULL) 803 return (DDI_FAILURE); 804 805 ngep->desc_attr.rxd_fill(hw_bd_p, &bsbdp->bufp->cookie, 806 bsbdp->bufp->alength); 807 } 808 return (DDI_SUCCESS); 809 } 810 811 static void 812 nge_init_ring_param_lock(nge_t *ngep) 813 { 814 buff_ring_t *brp; 815 send_ring_t *srp; 816 817 srp = ngep->send; 818 brp = ngep->buff; 819 820 /* Init the locks for send ring */ 821 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 822 DDI_INTR_PRI(ngep->intr_pri)); 823 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 824 DDI_INTR_PRI(ngep->intr_pri)); 825 mutex_init(&srp->dmah_lock, NULL, MUTEX_DRIVER, 826 DDI_INTR_PRI(ngep->intr_pri)); 827 828 /* Init parameters of buffer ring */ 829 brp->free_list = NULL; 830 brp->recycle_list = NULL; 831 brp->rx_hold = 0; 832 brp->buf_sign = 0; 833 834 /* Init recycle list lock */ 835 mutex_init(brp->recycle_lock, NULL, MUTEX_DRIVER, 836 DDI_INTR_PRI(ngep->intr_pri)); 837 } 838 839 int 840 nge_init_rings(nge_t *ngep) 841 { 842 uint32_t err; 843 844 err = nge_init_send_ring(ngep); 845 if (err != DDI_SUCCESS) { 846 return (err); 847 } 848 nge_init_recv_ring(ngep); 849 850 err = nge_init_buff_ring(ngep); 851 if (err != DDI_SUCCESS) { 852 nge_fini_send_ring(ngep); 853 return (DDI_FAILURE); 854 } 855 856 return (err); 857 } 858 859 static int 860 nge_reinit_ring(nge_t *ngep) 861 { 862 int err; 863 864 nge_reinit_recv_ring(ngep); 865 nge_reinit_send_ring(ngep); 866 err = nge_reinit_buff_ring(ngep); 867 return (err); 868 } 869 870 871 void 872 nge_fini_rings(nge_t *ngep) 873 { 874 /* 875 * For receive ring, nothing need to be finished. 876 * So only finish buffer ring and send ring here. 877 */ 878 nge_fini_buff_ring(ngep); 879 nge_fini_send_ring(ngep); 880 } 881 882 /* 883 * Loopback ioctl code 884 */ 885 886 static lb_property_t loopmodes[] = { 887 { normal, "normal", NGE_LOOP_NONE }, 888 { external, "100Mbps", NGE_LOOP_EXTERNAL_100 }, 889 { external, "10Mbps", NGE_LOOP_EXTERNAL_10 }, 890 { internal, "PHY", NGE_LOOP_INTERNAL_PHY }, 891 }; 892 893 enum ioc_reply 894 nge_loop_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp) 895 { 896 int cmd; 897 uint32_t *lbmp; 898 lb_info_sz_t *lbsp; 899 lb_property_t *lbpp; 900 901 /* 902 * Validate format of ioctl 903 */ 904 if (mp->b_cont == NULL) 905 return (IOC_INVAL); 906 907 cmd = iocp->ioc_cmd; 908 909 switch (cmd) { 910 default: 911 return (IOC_INVAL); 912 913 case LB_GET_INFO_SIZE: 914 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 915 return (IOC_INVAL); 916 lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr; 917 *lbsp = sizeof (loopmodes); 918 return (IOC_REPLY); 919 920 case LB_GET_INFO: 921 if (iocp->ioc_count != sizeof (loopmodes)) 922 return (IOC_INVAL); 923 lbpp = (lb_property_t *)mp->b_cont->b_rptr; 924 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 925 return (IOC_REPLY); 926 927 case LB_GET_MODE: 928 if (iocp->ioc_count != sizeof (uint32_t)) 929 return (IOC_INVAL); 930 lbmp = (uint32_t *)mp->b_cont->b_rptr; 931 *lbmp = ngep->param_loop_mode; 932 return (IOC_REPLY); 933 934 case LB_SET_MODE: 935 if (iocp->ioc_count != sizeof (uint32_t)) 936 return (IOC_INVAL); 937 lbmp = (uint32_t *)mp->b_cont->b_rptr; 938 return (nge_set_loop_mode(ngep, *lbmp)); 939 } 940 } 941 942 #undef NGE_DBG 943 #define NGE_DBG NGE_DBG_NEMO 944 945 946 static void 947 nge_check_desc_prop(nge_t *ngep) 948 { 949 if (ngep->desc_mode != DESC_HOT && ngep->desc_mode != DESC_OFFLOAD) 950 ngep->desc_mode = DESC_HOT; 951 952 if (ngep->desc_mode == DESC_OFFLOAD) { 953 954 ngep->desc_attr = nge_sum_desc; 955 956 } else if (ngep->desc_mode == DESC_HOT) { 957 958 ngep->desc_attr = nge_hot_desc; 959 } 960 } 961 962 /* 963 * nge_get_props -- get the parameters to tune the driver 964 */ 965 static void 966 nge_get_props(nge_t *ngep) 967 { 968 chip_info_t *infop; 969 dev_info_t *devinfo; 970 nge_dev_spec_param_t *dev_param_p; 971 972 devinfo = ngep->devinfo; 973 infop = (chip_info_t *)&ngep->chipinfo; 974 dev_param_p = &ngep->dev_spec_param; 975 976 infop->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 977 DDI_PROP_DONTPASS, clsize_propname, 32); 978 979 infop->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 980 DDI_PROP_DONTPASS, latency_propname, 64); 981 ngep->intr_moderation = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 982 DDI_PROP_DONTPASS, intr_moderation, NGE_SET); 983 ngep->rx_datahwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 984 DDI_PROP_DONTPASS, rx_data_hw, 0x20); 985 ngep->rx_prdlwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 986 DDI_PROP_DONTPASS, rx_prd_lw, 0x4); 987 ngep->rx_prdhwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 988 DDI_PROP_DONTPASS, rx_prd_hw, 0xc); 989 990 ngep->sw_intr_intv = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 991 DDI_PROP_DONTPASS, sw_intr_intv, SWTR_ITC); 992 ngep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 993 DDI_PROP_DONTPASS, debug_propname, NGE_DBG_CHIP); 994 ngep->desc_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 995 DDI_PROP_DONTPASS, nge_desc_mode, dev_param_p->desc_type); 996 ngep->lowmem_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 997 DDI_PROP_DONTPASS, low_memory_mode, 0); 998 999 if (dev_param_p->jumbo) { 1000 ngep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 1001 DDI_PROP_DONTPASS, default_mtu, ETHERMTU); 1002 } else 1003 ngep->default_mtu = ETHERMTU; 1004 if (dev_param_p->tx_pause_frame) 1005 ngep->param_link_tx_pause = B_TRUE; 1006 else 1007 ngep->param_link_tx_pause = B_FALSE; 1008 1009 if (dev_param_p->rx_pause_frame) 1010 ngep->param_link_rx_pause = B_TRUE; 1011 else 1012 ngep->param_link_rx_pause = B_FALSE; 1013 1014 if (ngep->default_mtu > ETHERMTU && 1015 ngep->default_mtu <= NGE_MTU_2500) { 1016 ngep->buf_size = NGE_JB2500_BUFSZ; 1017 ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC; 1018 ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC; 1019 ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2; 1020 ngep->nge_split = NGE_SPLIT_256; 1021 } else if (ngep->default_mtu > NGE_MTU_2500 && 1022 ngep->default_mtu <= NGE_MTU_4500) { 1023 ngep->buf_size = NGE_JB4500_BUFSZ; 1024 ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC; 1025 ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC; 1026 ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2; 1027 ngep->nge_split = NGE_SPLIT_256; 1028 } else if (ngep->default_mtu > NGE_MTU_4500 && 1029 ngep->default_mtu <= NGE_MAX_MTU) { 1030 ngep->buf_size = NGE_JB9000_BUFSZ; 1031 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1032 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1033 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1034 ngep->nge_split = NGE_SPLIT_256; 1035 } else if (ngep->default_mtu > NGE_MAX_MTU) { 1036 ngep->default_mtu = NGE_MAX_MTU; 1037 ngep->buf_size = NGE_JB9000_BUFSZ; 1038 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1039 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1040 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1041 ngep->nge_split = NGE_SPLIT_256; 1042 } else if (ngep->lowmem_mode != 0) { 1043 ngep->default_mtu = ETHERMTU; 1044 ngep->buf_size = NGE_STD_BUFSZ; 1045 ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC; 1046 ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC; 1047 ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2; 1048 ngep->nge_split = NGE_SPLIT_32; 1049 } else { 1050 ngep->default_mtu = ETHERMTU; 1051 ngep->buf_size = NGE_STD_BUFSZ; 1052 ngep->tx_desc = dev_param_p->tx_desc_num; 1053 ngep->rx_desc = dev_param_p->rx_desc_num; 1054 ngep->rx_buf = dev_param_p->rx_desc_num * 2; 1055 ngep->nge_split = dev_param_p->nge_split; 1056 } 1057 1058 nge_check_desc_prop(ngep); 1059 } 1060 1061 1062 static int 1063 nge_reset_dev(nge_t *ngep) 1064 { 1065 int err; 1066 nge_mul_addr1 maddr1; 1067 nge_sw_statistics_t *sw_stp; 1068 sw_stp = &ngep->statistics.sw_statistics; 1069 send_ring_t *srp = ngep->send; 1070 1071 ASSERT(mutex_owned(ngep->genlock)); 1072 mutex_enter(srp->tc_lock); 1073 mutex_enter(srp->tx_lock); 1074 1075 nge_tx_recycle_all(ngep); 1076 err = nge_reinit_ring(ngep); 1077 if (err == DDI_FAILURE) { 1078 mutex_exit(srp->tx_lock); 1079 mutex_exit(srp->tc_lock); 1080 return (err); 1081 } 1082 err = nge_chip_reset(ngep); 1083 /* 1084 * Clear the Multicast mac address table 1085 */ 1086 nge_reg_put32(ngep, NGE_MUL_ADDR0, 0); 1087 maddr1.addr_val = nge_reg_get32(ngep, NGE_MUL_ADDR1); 1088 maddr1.addr_bits.addr = 0; 1089 nge_reg_put32(ngep, NGE_MUL_ADDR1, maddr1.addr_val); 1090 1091 mutex_exit(srp->tx_lock); 1092 mutex_exit(srp->tc_lock); 1093 if (err == DDI_FAILURE) 1094 return (err); 1095 ngep->watchdog = 0; 1096 ngep->resched_needed = B_FALSE; 1097 ngep->promisc = B_FALSE; 1098 ngep->param_loop_mode = NGE_LOOP_NONE; 1099 ngep->factotum_flag = 0; 1100 ngep->resched_needed = 0; 1101 ngep->nge_mac_state = NGE_MAC_RESET; 1102 ngep->max_sdu = ngep->default_mtu + ETHER_HEAD_LEN + ETHERFCSL; 1103 ngep->max_sdu += VTAG_SIZE; 1104 ngep->rx_def = 0x16; 1105 1106 /* Clear the software statistics */ 1107 sw_stp->recv_count = 0; 1108 sw_stp->xmit_count = 0; 1109 sw_stp->rbytes = 0; 1110 sw_stp->obytes = 0; 1111 1112 return (DDI_SUCCESS); 1113 } 1114 1115 static void 1116 nge_m_stop(void *arg) 1117 { 1118 nge_t *ngep = arg; /* private device info */ 1119 int err; 1120 1121 NGE_TRACE(("nge_m_stop($%p)", arg)); 1122 1123 /* 1124 * Just stop processing, then record new MAC state 1125 */ 1126 mutex_enter(ngep->genlock); 1127 /* If suspended, the adapter is already stopped, just return. */ 1128 if (ngep->suspended) { 1129 ASSERT(ngep->nge_mac_state == NGE_MAC_STOPPED); 1130 mutex_exit(ngep->genlock); 1131 return; 1132 } 1133 rw_enter(ngep->rwlock, RW_WRITER); 1134 1135 err = nge_chip_stop(ngep, B_FALSE); 1136 if (err == DDI_FAILURE) 1137 err = nge_chip_reset(ngep); 1138 if (err == DDI_FAILURE) 1139 nge_problem(ngep, "nge_m_stop: stop chip failed"); 1140 ngep->nge_mac_state = NGE_MAC_STOPPED; 1141 1142 /* Recycle all the TX BD */ 1143 nge_tx_recycle_all(ngep); 1144 nge_fini_rings(ngep); 1145 nge_free_bufs(ngep); 1146 1147 NGE_DEBUG(("nge_m_stop($%p) done", arg)); 1148 1149 rw_exit(ngep->rwlock); 1150 mutex_exit(ngep->genlock); 1151 } 1152 1153 static int 1154 nge_m_start(void *arg) 1155 { 1156 int err; 1157 nge_t *ngep = arg; 1158 1159 NGE_TRACE(("nge_m_start($%p)", arg)); 1160 1161 /* 1162 * Start processing and record new MAC state 1163 */ 1164 mutex_enter(ngep->genlock); 1165 /* 1166 * If suspended, don't start, as the resume processing 1167 * will recall this function with the suspended flag off. 1168 */ 1169 if (ngep->suspended) { 1170 mutex_exit(ngep->genlock); 1171 return (EIO); 1172 } 1173 rw_enter(ngep->rwlock, RW_WRITER); 1174 err = nge_alloc_bufs(ngep); 1175 if (err != DDI_SUCCESS) { 1176 nge_problem(ngep, "nge_m_start: DMA buffer allocation failed"); 1177 goto finish; 1178 } 1179 err = nge_init_rings(ngep); 1180 if (err != DDI_SUCCESS) { 1181 nge_free_bufs(ngep); 1182 nge_problem(ngep, "nge_init_rings() failed,err=%x", err); 1183 goto finish; 1184 } 1185 err = nge_restart(ngep); 1186 1187 NGE_DEBUG(("nge_m_start($%p) done", arg)); 1188 finish: 1189 rw_exit(ngep->rwlock); 1190 mutex_exit(ngep->genlock); 1191 1192 return (err == DDI_SUCCESS ? 0 : EIO); 1193 } 1194 1195 static int 1196 nge_m_unicst(void *arg, const uint8_t *macaddr) 1197 { 1198 nge_t *ngep = arg; 1199 1200 NGE_TRACE(("nge_m_unicst($%p)", arg)); 1201 /* 1202 * Remember the new current address in the driver state 1203 * Sync the chip's idea of the address too ... 1204 */ 1205 mutex_enter(ngep->genlock); 1206 1207 ethaddr_copy(macaddr, ngep->cur_uni_addr.addr); 1208 ngep->cur_uni_addr.set = 1; 1209 1210 /* 1211 * If we are suspended, we want to quit now, and not update 1212 * the chip. Doing so might put it in a bad state, but the 1213 * resume will get the unicast address installed. 1214 */ 1215 if (ngep->suspended) { 1216 mutex_exit(ngep->genlock); 1217 return (DDI_SUCCESS); 1218 } 1219 nge_chip_sync(ngep); 1220 1221 NGE_DEBUG(("nge_m_unicst($%p) done", arg)); 1222 mutex_exit(ngep->genlock); 1223 1224 return (0); 1225 } 1226 1227 static int 1228 nge_m_promisc(void *arg, boolean_t on) 1229 { 1230 nge_t *ngep = arg; 1231 1232 NGE_TRACE(("nge_m_promisc($%p)", arg)); 1233 1234 /* 1235 * Store specified mode and pass to chip layer to update h/w 1236 */ 1237 mutex_enter(ngep->genlock); 1238 /* 1239 * If suspended, there is no need to do anything, even 1240 * recording the promiscuious mode is not neccessary, as 1241 * it won't be properly set on resume. Just return failing. 1242 */ 1243 if (ngep->suspended) { 1244 mutex_exit(ngep->genlock); 1245 return (DDI_FAILURE); 1246 } 1247 if (ngep->promisc == on) { 1248 mutex_exit(ngep->genlock); 1249 NGE_DEBUG(("nge_m_promisc($%p) done", arg)); 1250 return (0); 1251 } 1252 ngep->promisc = on; 1253 ngep->record_promisc = ngep->promisc; 1254 nge_chip_sync(ngep); 1255 NGE_DEBUG(("nge_m_promisc($%p) done", arg)); 1256 mutex_exit(ngep->genlock); 1257 1258 return (0); 1259 } 1260 1261 static void nge_mulparam(nge_t *ngep) 1262 { 1263 uint8_t number; 1264 ether_addr_t pand; 1265 ether_addr_t por; 1266 mul_item *plist; 1267 1268 for (number = 0; number < ETHERADDRL; number++) { 1269 pand[number] = 0x00; 1270 por[number] = 0x00; 1271 } 1272 for (plist = ngep->pcur_mulist; plist != NULL; plist = plist->next) { 1273 for (number = 0; number < ETHERADDRL; number++) { 1274 pand[number] &= plist->mul_addr[number]; 1275 por[number] |= plist->mul_addr[number]; 1276 } 1277 } 1278 for (number = 0; number < ETHERADDRL; number++) { 1279 ngep->cur_mul_addr.addr[number] 1280 = pand[number] & por[number]; 1281 ngep->cur_mul_mask.addr[number] 1282 = pand [number] | (~por[number]); 1283 } 1284 } 1285 static int 1286 nge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1287 { 1288 boolean_t update; 1289 boolean_t b_eq; 1290 nge_t *ngep = arg; 1291 mul_item *plist; 1292 mul_item *plist_prev; 1293 mul_item *pitem; 1294 1295 NGE_TRACE(("nge_m_multicst($%p, %s, %s)", arg, 1296 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1297 1298 update = B_FALSE; 1299 plist = plist_prev = NULL; 1300 mutex_enter(ngep->genlock); 1301 if (add) { 1302 if (ngep->pcur_mulist != NULL) { 1303 for (plist = ngep->pcur_mulist; plist != NULL; 1304 plist = plist->next) { 1305 b_eq = ether_eq(plist->mul_addr, mca); 1306 if (b_eq) { 1307 plist->ref_cnt++; 1308 break; 1309 } 1310 plist_prev = plist; 1311 } 1312 } 1313 1314 if (plist == NULL) { 1315 pitem = kmem_zalloc(sizeof (mul_item), KM_SLEEP); 1316 ether_copy(mca, pitem->mul_addr); 1317 pitem ->ref_cnt++; 1318 pitem ->next = NULL; 1319 if (plist_prev == NULL) 1320 ngep->pcur_mulist = pitem; 1321 else 1322 plist_prev->next = pitem; 1323 update = B_TRUE; 1324 } 1325 } else { 1326 if (ngep->pcur_mulist != NULL) { 1327 for (plist = ngep->pcur_mulist; plist != NULL; 1328 plist = plist->next) { 1329 b_eq = ether_eq(plist->mul_addr, mca); 1330 if (b_eq) { 1331 update = B_TRUE; 1332 break; 1333 } 1334 plist_prev = plist; 1335 } 1336 1337 if (update) { 1338 if ((plist_prev == NULL) && 1339 (plist->next == NULL)) 1340 ngep->pcur_mulist = NULL; 1341 else if ((plist_prev == NULL) && 1342 (plist->next != NULL)) 1343 ngep->pcur_mulist = plist->next; 1344 else 1345 plist_prev->next = plist->next; 1346 kmem_free(plist, sizeof (mul_item)); 1347 } 1348 } 1349 } 1350 1351 if (update && !ngep->suspended) { 1352 nge_mulparam(ngep); 1353 nge_chip_sync(ngep); 1354 } 1355 NGE_DEBUG(("nge_m_multicst($%p) done", arg)); 1356 mutex_exit(ngep->genlock); 1357 1358 return (0); 1359 } 1360 1361 static void 1362 nge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1363 { 1364 int err; 1365 int cmd; 1366 nge_t *ngep = arg; 1367 struct iocblk *iocp; 1368 enum ioc_reply status; 1369 boolean_t need_privilege; 1370 1371 /* 1372 * If suspended, we might actually be able to do some of 1373 * these ioctls, but it is harder to make sure they occur 1374 * without actually putting the hardware in an undesireable 1375 * state. So just NAK it. 1376 */ 1377 mutex_enter(ngep->genlock); 1378 if (ngep->suspended) { 1379 miocnak(wq, mp, 0, EINVAL); 1380 mutex_exit(ngep->genlock); 1381 return; 1382 } 1383 mutex_exit(ngep->genlock); 1384 1385 /* 1386 * Validate the command before bothering with the mutex ... 1387 */ 1388 iocp = (struct iocblk *)mp->b_rptr; 1389 iocp->ioc_error = 0; 1390 need_privilege = B_TRUE; 1391 cmd = iocp->ioc_cmd; 1392 1393 NGE_DEBUG(("nge_m_ioctl: cmd 0x%x", cmd)); 1394 switch (cmd) { 1395 default: 1396 NGE_LDB(NGE_DBG_BADIOC, 1397 ("nge_m_ioctl: unknown cmd 0x%x", cmd)); 1398 1399 miocnak(wq, mp, 0, EINVAL); 1400 return; 1401 1402 case NGE_MII_READ: 1403 case NGE_MII_WRITE: 1404 case NGE_SEE_READ: 1405 case NGE_SEE_WRITE: 1406 case NGE_DIAG: 1407 case NGE_PEEK: 1408 case NGE_POKE: 1409 case NGE_PHY_RESET: 1410 case NGE_SOFT_RESET: 1411 case NGE_HARD_RESET: 1412 break; 1413 1414 case LB_GET_INFO_SIZE: 1415 case LB_GET_INFO: 1416 case LB_GET_MODE: 1417 need_privilege = B_FALSE; 1418 break; 1419 case LB_SET_MODE: 1420 break; 1421 } 1422 1423 if (need_privilege) { 1424 /* 1425 * Check for specific net_config privilege. 1426 */ 1427 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1428 if (err != 0) { 1429 NGE_DEBUG(("nge_m_ioctl: rejected cmd 0x%x, err %d", 1430 cmd, err)); 1431 miocnak(wq, mp, 0, err); 1432 return; 1433 } 1434 } 1435 1436 mutex_enter(ngep->genlock); 1437 1438 switch (cmd) { 1439 default: 1440 _NOTE(NOTREACHED) 1441 status = IOC_INVAL; 1442 break; 1443 1444 case NGE_MII_READ: 1445 case NGE_MII_WRITE: 1446 case NGE_SEE_READ: 1447 case NGE_SEE_WRITE: 1448 case NGE_DIAG: 1449 case NGE_PEEK: 1450 case NGE_POKE: 1451 case NGE_PHY_RESET: 1452 case NGE_SOFT_RESET: 1453 case NGE_HARD_RESET: 1454 status = nge_chip_ioctl(ngep, mp, iocp); 1455 break; 1456 1457 case LB_GET_INFO_SIZE: 1458 case LB_GET_INFO: 1459 case LB_GET_MODE: 1460 case LB_SET_MODE: 1461 status = nge_loop_ioctl(ngep, mp, iocp); 1462 break; 1463 1464 } 1465 1466 /* 1467 * Do we need to reprogram the PHY and/or the MAC? 1468 * Do it now, while we still have the mutex. 1469 * 1470 * Note: update the PHY first, 'cos it controls the 1471 * speed/duplex parameters that the MAC code uses. 1472 */ 1473 1474 NGE_DEBUG(("nge_m_ioctl: cmd 0x%x status %d", cmd, status)); 1475 1476 switch (status) { 1477 case IOC_RESTART_REPLY: 1478 case IOC_RESTART_ACK: 1479 (*ngep->physops->phys_update)(ngep); 1480 nge_chip_sync(ngep); 1481 break; 1482 1483 default: 1484 break; 1485 } 1486 1487 mutex_exit(ngep->genlock); 1488 1489 /* 1490 * Finally, decide how to reply 1491 */ 1492 switch (status) { 1493 1494 default: 1495 case IOC_INVAL: 1496 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1497 EINVAL : iocp->ioc_error); 1498 break; 1499 1500 case IOC_DONE: 1501 break; 1502 1503 case IOC_RESTART_ACK: 1504 case IOC_ACK: 1505 miocack(wq, mp, 0, 0); 1506 break; 1507 1508 case IOC_RESTART_REPLY: 1509 case IOC_REPLY: 1510 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1511 M_IOCACK : M_IOCNAK; 1512 qreply(wq, mp); 1513 break; 1514 } 1515 } 1516 1517 static boolean_t 1518 nge_param_locked(mac_prop_id_t pr_num) 1519 { 1520 /* 1521 * All adv_* parameters are locked (read-only) while 1522 * the device is in any sort of loopback mode ... 1523 */ 1524 switch (pr_num) { 1525 case MAC_PROP_ADV_1000FDX_CAP: 1526 case MAC_PROP_EN_1000FDX_CAP: 1527 case MAC_PROP_ADV_1000HDX_CAP: 1528 case MAC_PROP_EN_1000HDX_CAP: 1529 case MAC_PROP_ADV_100FDX_CAP: 1530 case MAC_PROP_EN_100FDX_CAP: 1531 case MAC_PROP_ADV_100HDX_CAP: 1532 case MAC_PROP_EN_100HDX_CAP: 1533 case MAC_PROP_ADV_10FDX_CAP: 1534 case MAC_PROP_EN_10FDX_CAP: 1535 case MAC_PROP_ADV_10HDX_CAP: 1536 case MAC_PROP_EN_10HDX_CAP: 1537 case MAC_PROP_AUTONEG: 1538 case MAC_PROP_FLOWCTRL: 1539 return (B_TRUE); 1540 } 1541 return (B_FALSE); 1542 } 1543 1544 /* 1545 * callback functions for set/get of properties 1546 */ 1547 static int 1548 nge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1549 uint_t pr_valsize, const void *pr_val) 1550 { 1551 nge_t *ngep = barg; 1552 int err = 0; 1553 uint32_t cur_mtu, new_mtu; 1554 link_flowctrl_t fl; 1555 1556 mutex_enter(ngep->genlock); 1557 if (ngep->param_loop_mode != NGE_LOOP_NONE && 1558 nge_param_locked(pr_num)) { 1559 /* 1560 * All adv_* parameters are locked (read-only) 1561 * while the device is in any sort of loopback mode. 1562 */ 1563 mutex_exit(ngep->genlock); 1564 return (EBUSY); 1565 } 1566 switch (pr_num) { 1567 case MAC_PROP_EN_1000FDX_CAP: 1568 ngep->param_en_1000fdx = *(uint8_t *)pr_val; 1569 ngep->param_adv_1000fdx = *(uint8_t *)pr_val; 1570 goto reprogram; 1571 case MAC_PROP_EN_100FDX_CAP: 1572 ngep->param_en_100fdx = *(uint8_t *)pr_val; 1573 ngep->param_adv_100fdx = *(uint8_t *)pr_val; 1574 goto reprogram; 1575 case MAC_PROP_EN_100HDX_CAP: 1576 ngep->param_en_100hdx = *(uint8_t *)pr_val; 1577 ngep->param_adv_100hdx = *(uint8_t *)pr_val; 1578 goto reprogram; 1579 case MAC_PROP_EN_10FDX_CAP: 1580 ngep->param_en_10fdx = *(uint8_t *)pr_val; 1581 ngep->param_adv_10fdx = *(uint8_t *)pr_val; 1582 goto reprogram; 1583 case MAC_PROP_EN_10HDX_CAP: 1584 ngep->param_en_10hdx = *(uint8_t *)pr_val; 1585 ngep->param_adv_10hdx = *(uint8_t *)pr_val; 1586 reprogram: 1587 (*ngep->physops->phys_update)(ngep); 1588 nge_chip_sync(ngep); 1589 break; 1590 1591 case MAC_PROP_ADV_1000FDX_CAP: 1592 case MAC_PROP_ADV_1000HDX_CAP: 1593 case MAC_PROP_ADV_100FDX_CAP: 1594 case MAC_PROP_ADV_100HDX_CAP: 1595 case MAC_PROP_ADV_10FDX_CAP: 1596 case MAC_PROP_ADV_10HDX_CAP: 1597 case MAC_PROP_STATUS: 1598 case MAC_PROP_SPEED: 1599 case MAC_PROP_DUPLEX: 1600 case MAC_PROP_EN_1000HDX_CAP: 1601 err = ENOTSUP; /* read-only prop. Can't set this */ 1602 break; 1603 case MAC_PROP_AUTONEG: 1604 ngep->param_adv_autoneg = *(uint8_t *)pr_val; 1605 (*ngep->physops->phys_update)(ngep); 1606 nge_chip_sync(ngep); 1607 break; 1608 case MAC_PROP_MTU: 1609 cur_mtu = ngep->default_mtu; 1610 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 1611 if (new_mtu == cur_mtu) { 1612 err = 0; 1613 break; 1614 } 1615 if (new_mtu < ETHERMTU || 1616 new_mtu > NGE_MAX_MTU) { 1617 err = EINVAL; 1618 break; 1619 } 1620 if ((new_mtu > ETHERMTU) && 1621 (!ngep->dev_spec_param.jumbo)) { 1622 err = EINVAL; 1623 break; 1624 } 1625 if (ngep->nge_mac_state == NGE_MAC_STARTED) { 1626 err = EBUSY; 1627 break; 1628 } 1629 1630 ngep->default_mtu = new_mtu; 1631 if (ngep->default_mtu > ETHERMTU && 1632 ngep->default_mtu <= NGE_MTU_2500) { 1633 ngep->buf_size = NGE_JB2500_BUFSZ; 1634 ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC; 1635 ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC; 1636 ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2; 1637 ngep->nge_split = NGE_SPLIT_256; 1638 } else if (ngep->default_mtu > NGE_MTU_2500 && 1639 ngep->default_mtu <= NGE_MTU_4500) { 1640 ngep->buf_size = NGE_JB4500_BUFSZ; 1641 ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC; 1642 ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC; 1643 ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2; 1644 ngep->nge_split = NGE_SPLIT_256; 1645 } else if (ngep->default_mtu > NGE_MTU_4500 && 1646 ngep->default_mtu <= NGE_MAX_MTU) { 1647 ngep->buf_size = NGE_JB9000_BUFSZ; 1648 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1649 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1650 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1651 ngep->nge_split = NGE_SPLIT_256; 1652 } else if (ngep->default_mtu > NGE_MAX_MTU) { 1653 ngep->default_mtu = NGE_MAX_MTU; 1654 ngep->buf_size = NGE_JB9000_BUFSZ; 1655 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1656 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1657 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1658 ngep->nge_split = NGE_SPLIT_256; 1659 } else if (ngep->lowmem_mode != 0) { 1660 ngep->default_mtu = ETHERMTU; 1661 ngep->buf_size = NGE_STD_BUFSZ; 1662 ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC; 1663 ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC; 1664 ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2; 1665 ngep->nge_split = NGE_SPLIT_32; 1666 } else { 1667 ngep->default_mtu = ETHERMTU; 1668 ngep->buf_size = NGE_STD_BUFSZ; 1669 ngep->tx_desc = 1670 ngep->dev_spec_param.tx_desc_num; 1671 ngep->rx_desc = 1672 ngep->dev_spec_param.rx_desc_num; 1673 ngep->rx_buf = 1674 ngep->dev_spec_param.rx_desc_num * 2; 1675 ngep->nge_split = 1676 ngep->dev_spec_param.nge_split; 1677 } 1678 1679 err = mac_maxsdu_update(ngep->mh, ngep->default_mtu); 1680 1681 break; 1682 case MAC_PROP_FLOWCTRL: 1683 bcopy(pr_val, &fl, sizeof (fl)); 1684 switch (fl) { 1685 default: 1686 err = ENOTSUP; 1687 break; 1688 case LINK_FLOWCTRL_NONE: 1689 ngep->param_adv_pause = 0; 1690 ngep->param_adv_asym_pause = 0; 1691 1692 ngep->param_link_rx_pause = B_FALSE; 1693 ngep->param_link_tx_pause = B_FALSE; 1694 break; 1695 case LINK_FLOWCTRL_RX: 1696 if (!((ngep->param_lp_pause == 0) && 1697 (ngep->param_lp_asym_pause == 1))) { 1698 err = EINVAL; 1699 break; 1700 } 1701 ngep->param_adv_pause = 1; 1702 ngep->param_adv_asym_pause = 1; 1703 1704 ngep->param_link_rx_pause = B_TRUE; 1705 ngep->param_link_tx_pause = B_FALSE; 1706 break; 1707 case LINK_FLOWCTRL_TX: 1708 if (!((ngep->param_lp_pause == 1) && 1709 (ngep->param_lp_asym_pause == 1))) { 1710 err = EINVAL; 1711 break; 1712 } 1713 ngep->param_adv_pause = 0; 1714 ngep->param_adv_asym_pause = 1; 1715 1716 ngep->param_link_rx_pause = B_FALSE; 1717 ngep->param_link_tx_pause = B_TRUE; 1718 break; 1719 case LINK_FLOWCTRL_BI: 1720 if (ngep->param_lp_pause != 1) { 1721 err = EINVAL; 1722 break; 1723 } 1724 ngep->param_adv_pause = 1; 1725 1726 ngep->param_link_rx_pause = B_TRUE; 1727 ngep->param_link_tx_pause = B_TRUE; 1728 break; 1729 } 1730 1731 if (err == 0) { 1732 (*ngep->physops->phys_update)(ngep); 1733 nge_chip_sync(ngep); 1734 } 1735 1736 break; 1737 case MAC_PROP_PRIVATE: 1738 err = nge_set_priv_prop(ngep, pr_name, pr_valsize, 1739 pr_val); 1740 if (err == 0) { 1741 (*ngep->physops->phys_update)(ngep); 1742 nge_chip_sync(ngep); 1743 } 1744 break; 1745 default: 1746 err = ENOTSUP; 1747 } 1748 mutex_exit(ngep->genlock); 1749 return (err); 1750 } 1751 1752 static int 1753 nge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1754 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 1755 { 1756 nge_t *ngep = barg; 1757 int err = 0; 1758 link_flowctrl_t fl; 1759 uint64_t speed; 1760 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 1761 1762 if (pr_valsize == 0) 1763 return (EINVAL); 1764 1765 *perm = MAC_PROP_PERM_RW; 1766 1767 bzero(pr_val, pr_valsize); 1768 1769 switch (pr_num) { 1770 case MAC_PROP_DUPLEX: 1771 *perm = MAC_PROP_PERM_READ; 1772 if (pr_valsize >= sizeof (link_duplex_t)) { 1773 bcopy(&ngep->param_link_duplex, pr_val, 1774 sizeof (link_duplex_t)); 1775 } else 1776 err = EINVAL; 1777 break; 1778 case MAC_PROP_SPEED: 1779 *perm = MAC_PROP_PERM_READ; 1780 if (pr_valsize >= sizeof (uint64_t)) { 1781 speed = ngep->param_link_speed * 1000000ull; 1782 bcopy(&speed, pr_val, sizeof (speed)); 1783 } else 1784 err = EINVAL; 1785 break; 1786 case MAC_PROP_AUTONEG: 1787 if (is_default) { 1788 *(uint8_t *)pr_val = 1; 1789 } else { 1790 *(uint8_t *)pr_val = ngep->param_adv_autoneg; 1791 } 1792 break; 1793 case MAC_PROP_FLOWCTRL: 1794 if (pr_valsize >= sizeof (link_flowctrl_t)) { 1795 if (pr_flags & MAC_PROP_DEFAULT) { 1796 fl = LINK_FLOWCTRL_BI; 1797 bcopy(&fl, pr_val, sizeof (fl)); 1798 break; 1799 } 1800 if (ngep->param_link_rx_pause && 1801 !ngep->param_link_tx_pause) 1802 fl = LINK_FLOWCTRL_RX; 1803 1804 if (!ngep->param_link_rx_pause && 1805 !ngep->param_link_tx_pause) 1806 fl = LINK_FLOWCTRL_NONE; 1807 1808 if (!ngep->param_link_rx_pause && 1809 ngep->param_link_tx_pause) 1810 fl = LINK_FLOWCTRL_TX; 1811 1812 if (ngep->param_link_rx_pause && 1813 ngep->param_link_tx_pause) 1814 fl = LINK_FLOWCTRL_BI; 1815 bcopy(&fl, pr_val, sizeof (fl)); 1816 } else 1817 err = EINVAL; 1818 break; 1819 case MAC_PROP_ADV_1000FDX_CAP: 1820 *perm = MAC_PROP_PERM_READ; 1821 if (is_default) { 1822 *(uint8_t *)pr_val = 1; 1823 } else { 1824 *(uint8_t *)pr_val = ngep->param_adv_1000fdx; 1825 } 1826 break; 1827 case MAC_PROP_EN_1000FDX_CAP: 1828 if (is_default) { 1829 *(uint8_t *)pr_val = 1; 1830 } else { 1831 *(uint8_t *)pr_val = ngep->param_en_1000fdx; 1832 } 1833 break; 1834 case MAC_PROP_ADV_1000HDX_CAP: 1835 *perm = MAC_PROP_PERM_READ; 1836 if (is_default) { 1837 *(uint8_t *)pr_val = 0; 1838 } else { 1839 *(uint8_t *)pr_val = ngep->param_adv_1000hdx; 1840 } 1841 break; 1842 case MAC_PROP_EN_1000HDX_CAP: 1843 *perm = MAC_PROP_PERM_READ; 1844 if (is_default) { 1845 *(uint8_t *)pr_val = 0; 1846 } else { 1847 *(uint8_t *)pr_val = ngep->param_en_1000hdx; 1848 } 1849 break; 1850 case MAC_PROP_ADV_100FDX_CAP: 1851 *perm = MAC_PROP_PERM_READ; 1852 if (is_default) { 1853 *(uint8_t *)pr_val = 1; 1854 } else { 1855 *(uint8_t *)pr_val = ngep->param_adv_100fdx; 1856 } 1857 break; 1858 case MAC_PROP_EN_100FDX_CAP: 1859 if (is_default) { 1860 *(uint8_t *)pr_val = 1; 1861 } else { 1862 *(uint8_t *)pr_val = ngep->param_en_100fdx; 1863 } 1864 break; 1865 case MAC_PROP_ADV_100HDX_CAP: 1866 *perm = MAC_PROP_PERM_READ; 1867 if (is_default) { 1868 *(uint8_t *)pr_val = 1; 1869 } else { 1870 *(uint8_t *)pr_val = ngep->param_adv_100hdx; 1871 } 1872 break; 1873 case MAC_PROP_EN_100HDX_CAP: 1874 if (is_default) { 1875 *(uint8_t *)pr_val = 1; 1876 } else { 1877 *(uint8_t *)pr_val = ngep->param_en_100hdx; 1878 } 1879 break; 1880 case MAC_PROP_ADV_10FDX_CAP: 1881 *perm = MAC_PROP_PERM_READ; 1882 if (is_default) { 1883 *(uint8_t *)pr_val = 1; 1884 } else { 1885 *(uint8_t *)pr_val = ngep->param_adv_10fdx; 1886 } 1887 break; 1888 case MAC_PROP_EN_10FDX_CAP: 1889 if (is_default) { 1890 *(uint8_t *)pr_val = 1; 1891 } else { 1892 *(uint8_t *)pr_val = ngep->param_en_10fdx; 1893 } 1894 break; 1895 case MAC_PROP_ADV_10HDX_CAP: 1896 *perm = MAC_PROP_PERM_READ; 1897 if (is_default) { 1898 *(uint8_t *)pr_val = 1; 1899 } else { 1900 *(uint8_t *)pr_val = ngep->param_adv_10hdx; 1901 } 1902 break; 1903 case MAC_PROP_EN_10HDX_CAP: 1904 if (is_default) { 1905 *(uint8_t *)pr_val = 1; 1906 } else { 1907 *(uint8_t *)pr_val = ngep->param_en_10hdx; 1908 } 1909 break; 1910 case MAC_PROP_ADV_100T4_CAP: 1911 case MAC_PROP_EN_100T4_CAP: 1912 *perm = MAC_PROP_PERM_READ; 1913 *(uint8_t *)pr_val = 0; 1914 break; 1915 case MAC_PROP_PRIVATE: 1916 err = nge_get_priv_prop(ngep, pr_name, pr_flags, 1917 pr_valsize, pr_val); 1918 break; 1919 case MAC_PROP_MTU: { 1920 mac_propval_range_t range; 1921 1922 if (!(pr_flags & MAC_PROP_POSSIBLE)) 1923 return (ENOTSUP); 1924 if (pr_valsize < sizeof (mac_propval_range_t)) 1925 return (EINVAL); 1926 range.mpr_count = 1; 1927 range.mpr_type = MAC_PROPVAL_UINT32; 1928 range.range_uint32[0].mpur_min = 1929 range.range_uint32[0].mpur_max = ETHERMTU; 1930 if (ngep->dev_spec_param.jumbo) 1931 range.range_uint32[0].mpur_max = NGE_MAX_MTU; 1932 bcopy(&range, pr_val, sizeof (range)); 1933 break; 1934 } 1935 default: 1936 err = ENOTSUP; 1937 } 1938 return (err); 1939 } 1940 1941 /* ARGSUSED */ 1942 static int 1943 nge_set_priv_prop(nge_t *ngep, const char *pr_name, uint_t pr_valsize, 1944 const void *pr_val) 1945 { 1946 int err = 0; 1947 long result; 1948 1949 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 1950 if (pr_val == NULL) { 1951 err = EINVAL; 1952 return (err); 1953 } 1954 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1955 if (result < 0 || result > NGE_MAX_SDU) { 1956 err = EINVAL; 1957 } else { 1958 ngep->param_txbcopy_threshold = (uint32_t)result; 1959 goto reprogram; 1960 } 1961 return (err); 1962 } 1963 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 1964 if (pr_val == NULL) { 1965 err = EINVAL; 1966 return (err); 1967 } 1968 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1969 if (result < 0 || result > NGE_MAX_SDU) { 1970 err = EINVAL; 1971 } else { 1972 ngep->param_rxbcopy_threshold = (uint32_t)result; 1973 goto reprogram; 1974 } 1975 return (err); 1976 } 1977 if (strcmp(pr_name, "_recv_max_packet") == 0) { 1978 if (pr_val == NULL) { 1979 err = EINVAL; 1980 return (err); 1981 } 1982 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1983 if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) { 1984 err = EINVAL; 1985 } else { 1986 ngep->param_recv_max_packet = (uint32_t)result; 1987 goto reprogram; 1988 } 1989 return (err); 1990 } 1991 if (strcmp(pr_name, "_poll_quiet_time") == 0) { 1992 if (pr_val == NULL) { 1993 err = EINVAL; 1994 return (err); 1995 } 1996 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1997 if (result < 0 || result > 10000) { 1998 err = EINVAL; 1999 } else { 2000 ngep->param_poll_quiet_time = (uint32_t)result; 2001 goto reprogram; 2002 } 2003 return (err); 2004 } 2005 if (strcmp(pr_name, "_poll_busy_time") == 0) { 2006 if (pr_val == NULL) { 2007 err = EINVAL; 2008 return (err); 2009 } 2010 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 2011 if (result < 0 || result > 10000) { 2012 err = EINVAL; 2013 } else { 2014 ngep->param_poll_busy_time = (uint32_t)result; 2015 goto reprogram; 2016 } 2017 return (err); 2018 } 2019 if (strcmp(pr_name, "_rx_intr_hwater") == 0) { 2020 if (pr_val == NULL) { 2021 err = EINVAL; 2022 return (err); 2023 } 2024 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 2025 if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) { 2026 err = EINVAL; 2027 } else { 2028 ngep->param_rx_intr_hwater = (uint32_t)result; 2029 goto reprogram; 2030 } 2031 return (err); 2032 } 2033 if (strcmp(pr_name, "_rx_intr_lwater") == 0) { 2034 if (pr_val == NULL) { 2035 err = EINVAL; 2036 return (err); 2037 } 2038 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 2039 if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) { 2040 err = EINVAL; 2041 } else { 2042 ngep->param_rx_intr_lwater = (uint32_t)result; 2043 goto reprogram; 2044 } 2045 return (err); 2046 } 2047 if (strcmp(pr_name, "_tx_n_intr") == 0) { 2048 if (pr_val == NULL) { 2049 err = EINVAL; 2050 return (err); 2051 } 2052 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 2053 if (result < 1 || result > 10000) { 2054 err = EINVAL; 2055 } else { 2056 ngep->param_tx_n_intr = (uint32_t)result; 2057 goto reprogram; 2058 } 2059 return (err); 2060 } 2061 2062 err = ENOTSUP; 2063 return (err); 2064 2065 reprogram: 2066 if (err == 0) { 2067 (*ngep->physops->phys_update)(ngep); 2068 nge_chip_sync(ngep); 2069 } 2070 2071 return (err); 2072 } 2073 2074 static int 2075 nge_get_priv_prop(nge_t *ngep, const char *pr_name, uint_t pr_flags, 2076 uint_t pr_valsize, void *pr_val) 2077 { 2078 int err = ENOTSUP; 2079 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 2080 int value; 2081 2082 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 2083 value = (is_default ? NGE_TX_COPY_SIZE : 2084 ngep->param_txbcopy_threshold); 2085 err = 0; 2086 goto done; 2087 } 2088 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 2089 value = (is_default ? NGE_RX_COPY_SIZE : 2090 ngep->param_rxbcopy_threshold); 2091 err = 0; 2092 goto done; 2093 } 2094 if (strcmp(pr_name, "_recv_max_packet") == 0) { 2095 value = (is_default ? 128 : ngep->param_recv_max_packet); 2096 err = 0; 2097 goto done; 2098 } 2099 if (strcmp(pr_name, "_poll_quiet_time") == 0) { 2100 value = (is_default ? NGE_POLL_QUIET_TIME : 2101 ngep->param_poll_quiet_time); 2102 err = 0; 2103 goto done; 2104 } 2105 if (strcmp(pr_name, "_poll_busy_time") == 0) { 2106 value = (is_default ? NGE_POLL_BUSY_TIME : 2107 ngep->param_poll_busy_time); 2108 err = 0; 2109 goto done; 2110 } 2111 if (strcmp(pr_name, "_rx_intr_hwater") == 0) { 2112 value = (is_default ? 1 : ngep->param_rx_intr_hwater); 2113 err = 0; 2114 goto done; 2115 } 2116 if (strcmp(pr_name, "_rx_intr_lwater") == 0) { 2117 value = (is_default ? 8 : ngep->param_rx_intr_lwater); 2118 err = 0; 2119 goto done; 2120 } 2121 if (strcmp(pr_name, "_tx_n_intr") == 0) { 2122 value = (is_default ? NGE_TX_N_INTR : 2123 ngep->param_tx_n_intr); 2124 err = 0; 2125 goto done; 2126 } 2127 2128 done: 2129 if (err == 0) { 2130 (void) snprintf(pr_val, pr_valsize, "%d", value); 2131 } 2132 return (err); 2133 } 2134 2135 /* ARGSUSED */ 2136 static boolean_t 2137 nge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 2138 { 2139 nge_t *ngep = arg; 2140 nge_dev_spec_param_t *dev_param_p; 2141 2142 dev_param_p = &ngep->dev_spec_param; 2143 2144 switch (cap) { 2145 case MAC_CAPAB_HCKSUM: { 2146 uint32_t *hcksum_txflags = cap_data; 2147 2148 if (dev_param_p->tx_hw_checksum) { 2149 *hcksum_txflags = dev_param_p->tx_hw_checksum; 2150 } else 2151 return (B_FALSE); 2152 break; 2153 } 2154 default: 2155 return (B_FALSE); 2156 } 2157 return (B_TRUE); 2158 } 2159 2160 #undef NGE_DBG 2161 #define NGE_DBG NGE_DBG_INIT /* debug flag for this code */ 2162 int 2163 nge_restart(nge_t *ngep) 2164 { 2165 int err = 0; 2166 err = nge_reset_dev(ngep); 2167 /* write back the promisc setting */ 2168 ngep->promisc = ngep->record_promisc; 2169 nge_chip_sync(ngep); 2170 if (!err) 2171 err = nge_chip_start(ngep); 2172 2173 if (err) { 2174 ngep->nge_mac_state = NGE_MAC_STOPPED; 2175 return (DDI_FAILURE); 2176 } else { 2177 ngep->nge_mac_state = NGE_MAC_STARTED; 2178 return (DDI_SUCCESS); 2179 } 2180 } 2181 2182 void 2183 nge_wake_factotum(nge_t *ngep) 2184 { 2185 mutex_enter(ngep->softlock); 2186 if (ngep->factotum_flag == 0) { 2187 ngep->factotum_flag = 1; 2188 (void) ddi_intr_trigger_softint(ngep->factotum_hdl, NULL); 2189 } 2190 mutex_exit(ngep->softlock); 2191 } 2192 2193 /* 2194 * High-level cyclic handler 2195 * 2196 * This routine schedules a (low-level) softint callback to the 2197 * factotum. 2198 */ 2199 2200 static void 2201 nge_chip_cyclic(void *arg) 2202 { 2203 nge_t *ngep; 2204 2205 ngep = (nge_t *)arg; 2206 2207 switch (ngep->nge_chip_state) { 2208 default: 2209 return; 2210 2211 case NGE_CHIP_RUNNING: 2212 break; 2213 2214 case NGE_CHIP_FAULT: 2215 case NGE_CHIP_ERROR: 2216 break; 2217 } 2218 2219 nge_wake_factotum(ngep); 2220 } 2221 2222 /* 2223 * Get/Release semaphore of SMU 2224 * For SMU enabled chipset 2225 * When nge driver is attached, driver should acquire 2226 * semaphore before PHY init and accessing MAC registers. 2227 * When nge driver is unattached, driver should release 2228 * semaphore. 2229 */ 2230 2231 static int 2232 nge_smu_sema(nge_t *ngep, boolean_t acquire) 2233 { 2234 nge_tx_en tx_en; 2235 uint32_t tries; 2236 2237 if (acquire) { 2238 for (tries = 0; tries < 5; tries++) { 2239 tx_en.val = nge_reg_get32(ngep, NGE_TX_EN); 2240 if (tx_en.bits.smu2mac == NGE_SMU_FREE) 2241 break; 2242 delay(drv_usectohz(1000000)); 2243 } 2244 if (tx_en.bits.smu2mac != NGE_SMU_FREE) 2245 return (DDI_FAILURE); 2246 for (tries = 0; tries < 5; tries++) { 2247 tx_en.val = nge_reg_get32(ngep, NGE_TX_EN); 2248 tx_en.bits.mac2smu = NGE_SMU_GET; 2249 nge_reg_put32(ngep, NGE_TX_EN, tx_en.val); 2250 tx_en.val = nge_reg_get32(ngep, NGE_TX_EN); 2251 2252 if (tx_en.bits.mac2smu == NGE_SMU_GET && 2253 tx_en.bits.smu2mac == NGE_SMU_FREE) 2254 return (DDI_SUCCESS); 2255 drv_usecwait(10); 2256 } 2257 return (DDI_FAILURE); 2258 } else 2259 nge_reg_put32(ngep, NGE_TX_EN, 0x0); 2260 2261 return (DDI_SUCCESS); 2262 2263 } 2264 static void 2265 nge_unattach(nge_t *ngep) 2266 { 2267 send_ring_t *srp; 2268 buff_ring_t *brp; 2269 2270 srp = ngep->send; 2271 brp = ngep->buff; 2272 NGE_TRACE(("nge_unattach($%p)", (void *)ngep)); 2273 2274 /* 2275 * Flag that no more activity may be initiated 2276 */ 2277 ngep->progress &= ~PROGRESS_READY; 2278 ngep->nge_mac_state = NGE_MAC_UNATTACH; 2279 2280 /* 2281 * Quiesce the PHY and MAC (leave it reset but still powered). 2282 * Clean up and free all NGE data structures 2283 */ 2284 if (ngep->periodic_id != NULL) { 2285 ddi_periodic_delete(ngep->periodic_id); 2286 ngep->periodic_id = NULL; 2287 } 2288 2289 if (ngep->progress & PROGRESS_KSTATS) 2290 nge_fini_kstats(ngep); 2291 2292 if (ngep->progress & PROGRESS_HWINT) { 2293 mutex_enter(ngep->genlock); 2294 nge_restore_mac_addr(ngep); 2295 (void) nge_chip_stop(ngep, B_FALSE); 2296 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 2297 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 2298 (void) nge_smu_sema(ngep, B_FALSE); 2299 } 2300 mutex_exit(ngep->genlock); 2301 } 2302 2303 if (ngep->progress & PROGRESS_SWINT) 2304 nge_rem_intrs(ngep); 2305 2306 if (ngep->progress & PROGRESS_FACTOTUM) 2307 (void) ddi_intr_remove_softint(ngep->factotum_hdl); 2308 2309 if (ngep->progress & PROGRESS_RESCHED) 2310 (void) ddi_intr_remove_softint(ngep->resched_hdl); 2311 2312 if (ngep->progress & PROGRESS_INTR) { 2313 mutex_destroy(srp->tx_lock); 2314 mutex_destroy(srp->tc_lock); 2315 mutex_destroy(&srp->dmah_lock); 2316 mutex_destroy(brp->recycle_lock); 2317 2318 mutex_destroy(ngep->genlock); 2319 mutex_destroy(ngep->softlock); 2320 rw_destroy(ngep->rwlock); 2321 } 2322 2323 if (ngep->progress & PROGRESS_REGS) 2324 ddi_regs_map_free(&ngep->io_handle); 2325 2326 if (ngep->progress & PROGRESS_CFG) 2327 pci_config_teardown(&ngep->cfg_handle); 2328 2329 ddi_remove_minor_node(ngep->devinfo, NULL); 2330 2331 kmem_free(ngep, sizeof (*ngep)); 2332 } 2333 2334 static int 2335 nge_resume(dev_info_t *devinfo) 2336 { 2337 nge_t *ngep; 2338 chip_info_t *infop; 2339 int err; 2340 2341 ASSERT(devinfo != NULL); 2342 2343 ngep = ddi_get_driver_private(devinfo); 2344 err = 0; 2345 2346 /* 2347 * If there are state inconsistancies, this is bad. Returning 2348 * DDI_FAILURE here will eventually cause the machine to panic, 2349 * so it is best done here so that there is a possibility of 2350 * debugging the problem. 2351 */ 2352 if (ngep == NULL) 2353 cmn_err(CE_PANIC, 2354 "nge: ngep returned from ddi_get_driver_private was NULL"); 2355 infop = (chip_info_t *)&ngep->chipinfo; 2356 2357 if (ngep->devinfo != devinfo) 2358 cmn_err(CE_PANIC, 2359 "nge: passed devinfo not the same as saved devinfo"); 2360 2361 mutex_enter(ngep->genlock); 2362 rw_enter(ngep->rwlock, RW_WRITER); 2363 2364 /* 2365 * Fetch the config space. Even though we have most of it cached, 2366 * some values *might* change across a suspend/resume. 2367 */ 2368 nge_chip_cfg_init(ngep, infop, B_FALSE); 2369 2370 /* 2371 * Only in one case, this conditional branch can be executed: the port 2372 * hasn't been plumbed. 2373 */ 2374 if (ngep->suspended == B_FALSE) { 2375 rw_exit(ngep->rwlock); 2376 mutex_exit(ngep->genlock); 2377 return (DDI_SUCCESS); 2378 } 2379 2380 nge_tx_recycle_all(ngep); 2381 err = nge_reinit_ring(ngep); 2382 if (!err) { 2383 err = nge_chip_reset(ngep); 2384 if (!err) 2385 err = nge_chip_start(ngep); 2386 } 2387 2388 if (err) { 2389 /* 2390 * We note the failure, but return success, as the 2391 * system is still usable without this controller. 2392 */ 2393 cmn_err(CE_WARN, "nge: resume: failed to restart controller"); 2394 } else { 2395 ngep->nge_mac_state = NGE_MAC_STARTED; 2396 } 2397 ngep->suspended = B_FALSE; 2398 2399 rw_exit(ngep->rwlock); 2400 mutex_exit(ngep->genlock); 2401 2402 return (DDI_SUCCESS); 2403 } 2404 2405 /* 2406 * attach(9E) -- Attach a device to the system 2407 * 2408 * Called once for each board successfully probed. 2409 */ 2410 static int 2411 nge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 2412 { 2413 int err; 2414 int i; 2415 int instance; 2416 caddr_t regs; 2417 nge_t *ngep; 2418 chip_info_t *infop; 2419 mac_register_t *macp; 2420 2421 switch (cmd) { 2422 default: 2423 return (DDI_FAILURE); 2424 2425 case DDI_RESUME: 2426 return (nge_resume(devinfo)); 2427 2428 case DDI_ATTACH: 2429 break; 2430 } 2431 2432 ngep = kmem_zalloc(sizeof (*ngep), KM_SLEEP); 2433 instance = ddi_get_instance(devinfo); 2434 ddi_set_driver_private(devinfo, ngep); 2435 ngep->devinfo = devinfo; 2436 2437 (void) snprintf(ngep->ifname, sizeof (ngep->ifname), "%s%d", 2438 NGE_DRIVER_NAME, instance); 2439 err = pci_config_setup(devinfo, &ngep->cfg_handle); 2440 if (err != DDI_SUCCESS) { 2441 nge_problem(ngep, "nge_attach: pci_config_setup() failed"); 2442 goto attach_fail; 2443 } 2444 /* 2445 * param_txbcopy_threshold and param_rxbcopy_threshold are tx/rx bcopy 2446 * thresholds. Bounds: min 0, max NGE_MAX_SDU 2447 */ 2448 ngep->param_txbcopy_threshold = NGE_TX_COPY_SIZE; 2449 ngep->param_rxbcopy_threshold = NGE_RX_COPY_SIZE; 2450 2451 /* 2452 * param_recv_max_packet is max packet received per interupt. 2453 * Bounds: min 0, max NGE_RECV_SLOTS_DESC_1024 2454 */ 2455 ngep->param_recv_max_packet = 128; 2456 2457 /* 2458 * param_poll_quiet_time and param_poll_busy_time are quiet/busy time 2459 * switch from per packet interrupt to polling interrupt. 2460 * Bounds: min 0, max 10000 2461 */ 2462 ngep->param_poll_quiet_time = NGE_POLL_QUIET_TIME; 2463 ngep->param_poll_busy_time = NGE_POLL_BUSY_TIME; 2464 2465 /* 2466 * param_rx_intr_hwater/param_rx_intr_lwater: ackets received 2467 * to trigger the poll_quiet_time/poll_busy_time counter. 2468 * Bounds: min 0, max NGE_RECV_SLOTS_DESC_1024. 2469 */ 2470 ngep->param_rx_intr_hwater = 1; 2471 ngep->param_rx_intr_lwater = 8; 2472 2473 /* 2474 * param_tx_n_intr: Per N tx packets to do tx recycle in poll mode. 2475 * Bounds: min 1, max 10000. 2476 */ 2477 ngep->param_tx_n_intr = NGE_TX_N_INTR; 2478 2479 infop = (chip_info_t *)&ngep->chipinfo; 2480 nge_chip_cfg_init(ngep, infop, B_FALSE); 2481 nge_init_dev_spec_param(ngep); 2482 nge_get_props(ngep); 2483 ngep->progress |= PROGRESS_CFG; 2484 2485 err = ddi_regs_map_setup(devinfo, NGE_PCI_OPREGS_RNUMBER, 2486 ®s, 0, 0, &nge_reg_accattr, &ngep->io_handle); 2487 if (err != DDI_SUCCESS) { 2488 nge_problem(ngep, "nge_attach: ddi_regs_map_setup() failed"); 2489 goto attach_fail; 2490 } 2491 ngep->io_regs = regs; 2492 ngep->progress |= PROGRESS_REGS; 2493 2494 err = nge_register_intrs_and_init_locks(ngep); 2495 if (err != DDI_SUCCESS) { 2496 nge_problem(ngep, "nge_attach:" 2497 " register intrs and init locks failed"); 2498 goto attach_fail; 2499 } 2500 nge_init_ring_param_lock(ngep); 2501 ngep->progress |= PROGRESS_INTR; 2502 2503 mutex_enter(ngep->genlock); 2504 2505 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 2506 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 2507 err = nge_smu_sema(ngep, B_TRUE); 2508 if (err != DDI_SUCCESS) { 2509 nge_problem(ngep, "nge_attach: nge_smu_sema() failed"); 2510 goto attach_fail; 2511 } 2512 } 2513 /* 2514 * Initialise link state variables 2515 * Stop, reset & reinitialise the chip. 2516 * Initialise the (internal) PHY. 2517 */ 2518 nge_phys_init(ngep); 2519 ngep->nge_chip_state = NGE_CHIP_INITIAL; 2520 err = nge_chip_reset(ngep); 2521 if (err != DDI_SUCCESS) { 2522 nge_problem(ngep, "nge_attach: nge_chip_reset() failed"); 2523 mutex_exit(ngep->genlock); 2524 goto attach_fail; 2525 } 2526 nge_chip_sync(ngep); 2527 2528 /* 2529 * Now that mutex locks are initialized, enable interrupts. 2530 */ 2531 if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) { 2532 /* Call ddi_intr_block_enable() for MSI interrupts */ 2533 (void) ddi_intr_block_enable(ngep->htable, 2534 ngep->intr_actual_cnt); 2535 } else { 2536 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 2537 for (i = 0; i < ngep->intr_actual_cnt; i++) { 2538 (void) ddi_intr_enable(ngep->htable[i]); 2539 } 2540 } 2541 2542 ngep->link_state = LINK_STATE_UNKNOWN; 2543 ngep->progress |= PROGRESS_HWINT; 2544 2545 /* 2546 * Register NDD-tweakable parameters 2547 */ 2548 if (nge_nd_init(ngep)) { 2549 nge_problem(ngep, "nge_attach: nge_nd_init() failed"); 2550 mutex_exit(ngep->genlock); 2551 goto attach_fail; 2552 } 2553 ngep->progress |= PROGRESS_NDD; 2554 2555 /* 2556 * Create & initialise named kstats 2557 */ 2558 nge_init_kstats(ngep, instance); 2559 ngep->progress |= PROGRESS_KSTATS; 2560 2561 mutex_exit(ngep->genlock); 2562 2563 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 2564 goto attach_fail; 2565 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 2566 macp->m_driver = ngep; 2567 macp->m_dip = devinfo; 2568 macp->m_src_addr = infop->vendor_addr.addr; 2569 macp->m_callbacks = &nge_m_callbacks; 2570 macp->m_min_sdu = 0; 2571 macp->m_max_sdu = ngep->default_mtu; 2572 macp->m_margin = VTAG_SIZE; 2573 macp->m_priv_props = nge_priv_props; 2574 macp->m_priv_prop_count = NGE_MAX_PRIV_PROPS; 2575 /* 2576 * Finally, we're ready to register ourselves with the mac 2577 * interface; if this succeeds, we're all ready to start() 2578 */ 2579 err = mac_register(macp, &ngep->mh); 2580 mac_free(macp); 2581 if (err != 0) 2582 goto attach_fail; 2583 2584 /* 2585 * Register a periodical handler. 2586 * nge_chip_cyclic() is invoked in kernel context. 2587 */ 2588 ngep->periodic_id = ddi_periodic_add(nge_chip_cyclic, ngep, 2589 NGE_CYCLIC_PERIOD, DDI_IPL_0); 2590 2591 ngep->progress |= PROGRESS_READY; 2592 return (DDI_SUCCESS); 2593 2594 attach_fail: 2595 nge_unattach(ngep); 2596 return (DDI_FAILURE); 2597 } 2598 2599 static int 2600 nge_suspend(nge_t *ngep) 2601 { 2602 mutex_enter(ngep->genlock); 2603 rw_enter(ngep->rwlock, RW_WRITER); 2604 2605 /* if the port hasn't been plumbed, just return */ 2606 if (ngep->nge_mac_state != NGE_MAC_STARTED) { 2607 rw_exit(ngep->rwlock); 2608 mutex_exit(ngep->genlock); 2609 return (DDI_SUCCESS); 2610 } 2611 ngep->suspended = B_TRUE; 2612 (void) nge_chip_stop(ngep, B_FALSE); 2613 ngep->nge_mac_state = NGE_MAC_STOPPED; 2614 2615 rw_exit(ngep->rwlock); 2616 mutex_exit(ngep->genlock); 2617 return (DDI_SUCCESS); 2618 } 2619 2620 /* 2621 * detach(9E) -- Detach a device from the system 2622 */ 2623 static int 2624 nge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 2625 { 2626 int i; 2627 nge_t *ngep; 2628 mul_item *p, *nextp; 2629 buff_ring_t *brp; 2630 2631 NGE_GTRACE(("nge_detach($%p, %d)", (void *)devinfo, cmd)); 2632 2633 ngep = ddi_get_driver_private(devinfo); 2634 brp = ngep->buff; 2635 2636 switch (cmd) { 2637 default: 2638 return (DDI_FAILURE); 2639 2640 case DDI_SUSPEND: 2641 /* 2642 * Stop the NIC 2643 * Note: This driver doesn't currently support WOL, but 2644 * should it in the future, it is important to 2645 * make sure the PHY remains powered so that the 2646 * wakeup packet can actually be recieved. 2647 */ 2648 return (nge_suspend(ngep)); 2649 2650 case DDI_DETACH: 2651 break; 2652 } 2653 2654 /* Try to wait all the buffer post to upper layer be released */ 2655 for (i = 0; i < 1000; i++) { 2656 if (brp->rx_hold == 0) 2657 break; 2658 drv_usecwait(1000); 2659 } 2660 2661 /* If there is any posted buffer, reject to detach */ 2662 if (brp->rx_hold != 0) 2663 return (DDI_FAILURE); 2664 2665 /* 2666 * Unregister from the GLD subsystem. This can fail, in 2667 * particular if there are DLPI style-2 streams still open - 2668 * in which case we just return failure without shutting 2669 * down chip operations. 2670 */ 2671 if (mac_unregister(ngep->mh) != DDI_SUCCESS) 2672 return (DDI_FAILURE); 2673 2674 /* 2675 * Recycle the multicast table. mac_unregister() should be called 2676 * before it to ensure the multicast table can be used even if 2677 * mac_unregister() fails. 2678 */ 2679 for (p = ngep->pcur_mulist; p != NULL; p = nextp) { 2680 nextp = p->next; 2681 kmem_free(p, sizeof (mul_item)); 2682 } 2683 ngep->pcur_mulist = NULL; 2684 2685 /* 2686 * All activity stopped, so we can clean up & exit 2687 */ 2688 nge_unattach(ngep); 2689 return (DDI_SUCCESS); 2690 } 2691 2692 /* 2693 * quiesce(9E) entry point. 2694 * 2695 * This function is called when the system is single-threaded at high 2696 * PIL with preemption disabled. Therefore, this function must not be 2697 * blocked. 2698 * 2699 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 2700 * DDI_FAILURE indicates an error condition and should almost never happen. 2701 */ 2702 static int 2703 nge_quiesce(dev_info_t *devinfo) 2704 { 2705 nge_t *ngep; 2706 2707 ngep = ddi_get_driver_private(devinfo); 2708 2709 if (ngep == NULL) 2710 return (DDI_FAILURE); 2711 2712 /* 2713 * Turn off debug tracing 2714 */ 2715 nge_debug = 0; 2716 ngep->debug = 0; 2717 2718 nge_restore_mac_addr(ngep); 2719 (void) nge_chip_stop(ngep, B_FALSE); 2720 2721 return (DDI_SUCCESS); 2722 } 2723 2724 2725 2726 /* 2727 * ========== Module Loading Data & Entry Points ========== 2728 */ 2729 2730 DDI_DEFINE_STREAM_OPS(nge_dev_ops, nulldev, nulldev, nge_attach, nge_detach, 2731 NULL, NULL, D_MP, NULL, nge_quiesce); 2732 2733 2734 static struct modldrv nge_modldrv = { 2735 &mod_driverops, /* Type of module. This one is a driver */ 2736 nge_ident, /* short description */ 2737 &nge_dev_ops /* driver specific ops */ 2738 }; 2739 2740 static struct modlinkage modlinkage = { 2741 MODREV_1, (void *)&nge_modldrv, NULL 2742 }; 2743 2744 2745 int 2746 _info(struct modinfo *modinfop) 2747 { 2748 return (mod_info(&modlinkage, modinfop)); 2749 } 2750 2751 int 2752 _init(void) 2753 { 2754 int status; 2755 2756 mac_init_ops(&nge_dev_ops, "nge"); 2757 status = mod_install(&modlinkage); 2758 if (status != DDI_SUCCESS) 2759 mac_fini_ops(&nge_dev_ops); 2760 else 2761 mutex_init(nge_log_mutex, NULL, MUTEX_DRIVER, NULL); 2762 2763 return (status); 2764 } 2765 2766 int 2767 _fini(void) 2768 { 2769 int status; 2770 2771 status = mod_remove(&modlinkage); 2772 if (status == DDI_SUCCESS) { 2773 mac_fini_ops(&nge_dev_ops); 2774 mutex_destroy(nge_log_mutex); 2775 } 2776 2777 return (status); 2778 } 2779 2780 /* 2781 * ============ Init MSI/Fixed/SoftInterrupt routines ============== 2782 */ 2783 2784 /* 2785 * Register interrupts and initialize each mutex and condition variables 2786 */ 2787 2788 static int 2789 nge_register_intrs_and_init_locks(nge_t *ngep) 2790 { 2791 int err; 2792 int intr_types; 2793 uint_t soft_prip; 2794 nge_msi_mask msi_mask; 2795 nge_msi_map0_vec map0_vec; 2796 nge_msi_map1_vec map1_vec; 2797 2798 /* 2799 * Add the softint handlers: 2800 * 2801 * Both of these handlers are used to avoid restrictions on the 2802 * context and/or mutexes required for some operations. In 2803 * particular, the hardware interrupt handler and its subfunctions 2804 * can detect a number of conditions that we don't want to handle 2805 * in that context or with that set of mutexes held. So, these 2806 * softints are triggered instead: 2807 * 2808 * the <resched> softint is triggered if if we have previously 2809 * had to refuse to send a packet because of resource shortage 2810 * (we've run out of transmit buffers), but the send completion 2811 * interrupt handler has now detected that more buffers have 2812 * become available. Its only purpose is to call gld_sched() 2813 * to retry the pending transmits (we're not allowed to hold 2814 * driver-defined mutexes across gld_sched()). 2815 * 2816 * the <factotum> is triggered if the h/w interrupt handler 2817 * sees the <link state changed> or <error> bits in the status 2818 * block. It's also triggered periodically to poll the link 2819 * state, just in case we aren't getting link status change 2820 * interrupts ... 2821 */ 2822 err = ddi_intr_add_softint(ngep->devinfo, &ngep->resched_hdl, 2823 DDI_INTR_SOFTPRI_MIN, nge_reschedule, (caddr_t)ngep); 2824 if (err != DDI_SUCCESS) { 2825 nge_problem(ngep, 2826 "nge_attach: add nge_reschedule softintr failed"); 2827 2828 return (DDI_FAILURE); 2829 } 2830 ngep->progress |= PROGRESS_RESCHED; 2831 err = ddi_intr_add_softint(ngep->devinfo, &ngep->factotum_hdl, 2832 DDI_INTR_SOFTPRI_MIN, nge_chip_factotum, (caddr_t)ngep); 2833 if (err != DDI_SUCCESS) { 2834 nge_problem(ngep, 2835 "nge_attach: add nge_chip_factotum softintr failed!"); 2836 2837 return (DDI_FAILURE); 2838 } 2839 if (ddi_intr_get_softint_pri(ngep->factotum_hdl, &soft_prip) 2840 != DDI_SUCCESS) { 2841 nge_problem(ngep, "nge_attach: get softintr priority failed\n"); 2842 2843 return (DDI_FAILURE); 2844 } 2845 ngep->soft_pri = soft_prip; 2846 2847 ngep->progress |= PROGRESS_FACTOTUM; 2848 /* Get supported interrupt types */ 2849 if (ddi_intr_get_supported_types(ngep->devinfo, &intr_types) 2850 != DDI_SUCCESS) { 2851 nge_error(ngep, "ddi_intr_get_supported_types failed\n"); 2852 2853 return (DDI_FAILURE); 2854 } 2855 2856 NGE_DEBUG(("ddi_intr_get_supported_types() returned: %x", 2857 intr_types)); 2858 2859 if ((intr_types & DDI_INTR_TYPE_MSI) && nge_enable_msi) { 2860 2861 /* MSI Configurations for mcp55 chipset */ 2862 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 2863 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 2864 2865 2866 /* Enable the 8 vectors */ 2867 msi_mask.msi_mask_val = 2868 nge_reg_get32(ngep, NGE_MSI_MASK); 2869 msi_mask.msi_msk_bits.vec0 = NGE_SET; 2870 msi_mask.msi_msk_bits.vec1 = NGE_SET; 2871 msi_mask.msi_msk_bits.vec2 = NGE_SET; 2872 msi_mask.msi_msk_bits.vec3 = NGE_SET; 2873 msi_mask.msi_msk_bits.vec4 = NGE_SET; 2874 msi_mask.msi_msk_bits.vec5 = NGE_SET; 2875 msi_mask.msi_msk_bits.vec6 = NGE_SET; 2876 msi_mask.msi_msk_bits.vec7 = NGE_SET; 2877 nge_reg_put32(ngep, NGE_MSI_MASK, 2878 msi_mask.msi_mask_val); 2879 2880 /* 2881 * Remapping the MSI MAP0 and MAP1. MCP55 2882 * is default mapping all the interrupt to 0 vector. 2883 * Software needs to remapping this. 2884 * This mapping is same as CK804. 2885 */ 2886 map0_vec.msi_map0_val = 2887 nge_reg_get32(ngep, NGE_MSI_MAP0); 2888 map1_vec.msi_map1_val = 2889 nge_reg_get32(ngep, NGE_MSI_MAP1); 2890 map0_vec.vecs_bits.reint_vec = 0; 2891 map0_vec.vecs_bits.rcint_vec = 0; 2892 map0_vec.vecs_bits.miss_vec = 3; 2893 map0_vec.vecs_bits.teint_vec = 5; 2894 map0_vec.vecs_bits.tcint_vec = 5; 2895 map0_vec.vecs_bits.stint_vec = 2; 2896 map0_vec.vecs_bits.mint_vec = 6; 2897 map0_vec.vecs_bits.rfint_vec = 0; 2898 map1_vec.vecs_bits.tfint_vec = 5; 2899 map1_vec.vecs_bits.feint_vec = 6; 2900 map1_vec.vecs_bits.resv8_11 = 3; 2901 map1_vec.vecs_bits.resv12_15 = 1; 2902 map1_vec.vecs_bits.resv16_19 = 0; 2903 map1_vec.vecs_bits.resv20_23 = 7; 2904 map1_vec.vecs_bits.resv24_31 = 0xff; 2905 nge_reg_put32(ngep, NGE_MSI_MAP0, 2906 map0_vec.msi_map0_val); 2907 nge_reg_put32(ngep, NGE_MSI_MAP1, 2908 map1_vec.msi_map1_val); 2909 } 2910 if (nge_add_intrs(ngep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 2911 NGE_DEBUG(("MSI registration failed, " 2912 "trying FIXED interrupt type\n")); 2913 } else { 2914 nge_log(ngep, "Using MSI interrupt type\n"); 2915 2916 ngep->intr_type = DDI_INTR_TYPE_MSI; 2917 ngep->progress |= PROGRESS_SWINT; 2918 } 2919 } 2920 2921 if (!(ngep->progress & PROGRESS_SWINT) && 2922 (intr_types & DDI_INTR_TYPE_FIXED)) { 2923 if (nge_add_intrs(ngep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 2924 nge_error(ngep, "FIXED interrupt " 2925 "registration failed\n"); 2926 2927 return (DDI_FAILURE); 2928 } 2929 2930 nge_log(ngep, "Using FIXED interrupt type\n"); 2931 2932 ngep->intr_type = DDI_INTR_TYPE_FIXED; 2933 ngep->progress |= PROGRESS_SWINT; 2934 } 2935 2936 2937 if (!(ngep->progress & PROGRESS_SWINT)) { 2938 nge_error(ngep, "No interrupts registered\n"); 2939 2940 return (DDI_FAILURE); 2941 } 2942 mutex_init(ngep->genlock, NULL, MUTEX_DRIVER, 2943 DDI_INTR_PRI(ngep->intr_pri)); 2944 mutex_init(ngep->softlock, NULL, MUTEX_DRIVER, 2945 DDI_INTR_PRI(ngep->soft_pri)); 2946 rw_init(ngep->rwlock, NULL, RW_DRIVER, 2947 DDI_INTR_PRI(ngep->intr_pri)); 2948 2949 return (DDI_SUCCESS); 2950 } 2951 2952 /* 2953 * nge_add_intrs: 2954 * 2955 * Register FIXED or MSI interrupts. 2956 */ 2957 static int 2958 nge_add_intrs(nge_t *ngep, int intr_type) 2959 { 2960 dev_info_t *dip = ngep->devinfo; 2961 int avail, actual, intr_size, count = 0; 2962 int i, flag, ret; 2963 2964 NGE_DEBUG(("nge_add_intrs: interrupt type 0x%x\n", intr_type)); 2965 2966 /* Get number of interrupts */ 2967 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 2968 if ((ret != DDI_SUCCESS) || (count == 0)) { 2969 nge_error(ngep, "ddi_intr_get_nintrs() failure, ret: %d, " 2970 "count: %d", ret, count); 2971 2972 return (DDI_FAILURE); 2973 } 2974 2975 /* Get number of available interrupts */ 2976 ret = ddi_intr_get_navail(dip, intr_type, &avail); 2977 if ((ret != DDI_SUCCESS) || (avail == 0)) { 2978 nge_error(ngep, "ddi_intr_get_navail() failure, " 2979 "ret: %d, avail: %d\n", ret, avail); 2980 2981 return (DDI_FAILURE); 2982 } 2983 2984 if (avail < count) { 2985 NGE_DEBUG(("nitrs() returned %d, navail returned %d\n", 2986 count, avail)); 2987 } 2988 flag = DDI_INTR_ALLOC_NORMAL; 2989 2990 /* Allocate an array of interrupt handles */ 2991 intr_size = count * sizeof (ddi_intr_handle_t); 2992 ngep->htable = kmem_alloc(intr_size, KM_SLEEP); 2993 2994 /* Call ddi_intr_alloc() */ 2995 ret = ddi_intr_alloc(dip, ngep->htable, intr_type, 0, 2996 count, &actual, flag); 2997 2998 if ((ret != DDI_SUCCESS) || (actual == 0)) { 2999 nge_error(ngep, "ddi_intr_alloc() failed %d\n", ret); 3000 3001 kmem_free(ngep->htable, intr_size); 3002 return (DDI_FAILURE); 3003 } 3004 3005 if (actual < count) { 3006 NGE_DEBUG(("Requested: %d, Received: %d\n", 3007 count, actual)); 3008 } 3009 3010 ngep->intr_actual_cnt = actual; 3011 ngep->intr_req_cnt = count; 3012 3013 /* 3014 * Get priority for first msi, assume remaining are all the same 3015 */ 3016 if ((ret = ddi_intr_get_pri(ngep->htable[0], &ngep->intr_pri)) != 3017 DDI_SUCCESS) { 3018 nge_error(ngep, "ddi_intr_get_pri() failed %d\n", ret); 3019 3020 /* Free already allocated intr */ 3021 for (i = 0; i < actual; i++) { 3022 (void) ddi_intr_free(ngep->htable[i]); 3023 } 3024 3025 kmem_free(ngep->htable, intr_size); 3026 3027 return (DDI_FAILURE); 3028 } 3029 /* Test for high level mutex */ 3030 if (ngep->intr_pri >= ddi_intr_get_hilevel_pri()) { 3031 nge_error(ngep, "nge_add_intrs:" 3032 "Hi level interrupt not supported"); 3033 3034 for (i = 0; i < actual; i++) 3035 (void) ddi_intr_free(ngep->htable[i]); 3036 3037 kmem_free(ngep->htable, intr_size); 3038 3039 return (DDI_FAILURE); 3040 } 3041 3042 3043 /* Call ddi_intr_add_handler() */ 3044 for (i = 0; i < actual; i++) { 3045 if ((ret = ddi_intr_add_handler(ngep->htable[i], nge_chip_intr, 3046 (caddr_t)ngep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 3047 nge_error(ngep, "ddi_intr_add_handler() " 3048 "failed %d\n", ret); 3049 3050 /* Free already allocated intr */ 3051 for (i = 0; i < actual; i++) { 3052 (void) ddi_intr_free(ngep->htable[i]); 3053 } 3054 3055 kmem_free(ngep->htable, intr_size); 3056 3057 return (DDI_FAILURE); 3058 } 3059 } 3060 3061 if ((ret = ddi_intr_get_cap(ngep->htable[0], &ngep->intr_cap)) 3062 != DDI_SUCCESS) { 3063 nge_error(ngep, "ddi_intr_get_cap() failed %d\n", ret); 3064 3065 for (i = 0; i < actual; i++) { 3066 (void) ddi_intr_remove_handler(ngep->htable[i]); 3067 (void) ddi_intr_free(ngep->htable[i]); 3068 } 3069 3070 kmem_free(ngep->htable, intr_size); 3071 3072 return (DDI_FAILURE); 3073 } 3074 3075 return (DDI_SUCCESS); 3076 } 3077 3078 /* 3079 * nge_rem_intrs: 3080 * 3081 * Unregister FIXED or MSI interrupts 3082 */ 3083 static void 3084 nge_rem_intrs(nge_t *ngep) 3085 { 3086 int i; 3087 3088 NGE_DEBUG(("nge_rem_intrs\n")); 3089 3090 /* Disable all interrupts */ 3091 if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3092 /* Call ddi_intr_block_disable() */ 3093 (void) ddi_intr_block_disable(ngep->htable, 3094 ngep->intr_actual_cnt); 3095 } else { 3096 for (i = 0; i < ngep->intr_actual_cnt; i++) { 3097 (void) ddi_intr_disable(ngep->htable[i]); 3098 } 3099 } 3100 3101 /* Call ddi_intr_remove_handler() */ 3102 for (i = 0; i < ngep->intr_actual_cnt; i++) { 3103 (void) ddi_intr_remove_handler(ngep->htable[i]); 3104 (void) ddi_intr_free(ngep->htable[i]); 3105 } 3106 3107 kmem_free(ngep->htable, 3108 ngep->intr_req_cnt * sizeof (ddi_intr_handle_t)); 3109 } 3110