1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "nge.h" 30 31 /* 32 * Describes the chip's DMA engine 33 */ 34 35 static ddi_dma_attr_t hot_dma_attr = { 36 DMA_ATTR_V0, /* dma_attr version */ 37 0x0000000000000000ull, /* dma_attr_addr_lo */ 38 0x000000FFFFFFFFFFull, /* dma_attr_addr_hi */ 39 0x000000007FFFFFFFull, /* dma_attr_count_max */ 40 0x0000000000000010ull, /* dma_attr_align */ 41 0x00000FFF, /* dma_attr_burstsizes */ 42 0x00000001, /* dma_attr_minxfer */ 43 0x000000000000FFFFull, /* dma_attr_maxxfer */ 44 0x000000FFFFFFFFFFull, /* dma_attr_seg */ 45 1, /* dma_attr_sgllen */ 46 0x00000001, /* dma_attr_granular */ 47 0 48 }; 49 50 static ddi_dma_attr_t hot_tx_dma_attr = { 51 DMA_ATTR_V0, /* dma_attr version */ 52 0x0000000000000000ull, /* dma_attr_addr_lo */ 53 0x000000FFFFFFFFFFull, /* dma_attr_addr_hi */ 54 0x0000000000003FFFull, /* dma_attr_count_max */ 55 0x0000000000000010ull, /* dma_attr_align */ 56 0x00000FFF, /* dma_attr_burstsizes */ 57 0x00000001, /* dma_attr_minxfer */ 58 0x0000000000003FFFull, /* dma_attr_maxxfer */ 59 0x000000FFFFFFFFFFull, /* dma_attr_seg */ 60 NGE_MAX_COOKIES, /* dma_attr_sgllen */ 61 1, /* dma_attr_granular */ 62 0 63 }; 64 65 static ddi_dma_attr_t sum_dma_attr = { 66 DMA_ATTR_V0, /* dma_attr version */ 67 0x0000000000000000ull, /* dma_attr_addr_lo */ 68 0x00000000FFFFFFFFull, /* dma_attr_addr_hi */ 69 0x000000007FFFFFFFull, /* dma_attr_count_max */ 70 0x0000000000000010ull, /* dma_attr_align */ 71 0x00000FFF, /* dma_attr_burstsizes */ 72 0x00000001, /* dma_attr_minxfer */ 73 0x000000000000FFFFull, /* dma_attr_maxxfer */ 74 0x00000000FFFFFFFFull, /* dma_attr_seg */ 75 1, /* dma_attr_sgllen */ 76 0x00000001, /* dma_attr_granular */ 77 0 78 }; 79 80 static ddi_dma_attr_t sum_tx_dma_attr = { 81 DMA_ATTR_V0, /* dma_attr version */ 82 0x0000000000000000ull, /* dma_attr_addr_lo */ 83 0x00000000FFFFFFFFull, /* dma_attr_addr_hi */ 84 0x0000000000003FFFull, /* dma_attr_count_max */ 85 0x0000000000000010ull, /* dma_attr_align */ 86 0x00000FFF, /* dma_attr_burstsizes */ 87 0x00000001, /* dma_attr_minxfer */ 88 0x0000000000003FFFull, /* dma_attr_maxxfer */ 89 0x00000000FFFFFFFFull, /* dma_attr_seg */ 90 NGE_MAX_COOKIES, /* dma_attr_sgllen */ 91 1, /* dma_attr_granular */ 92 0 93 }; 94 95 /* 96 * DMA access attributes for data. 97 */ 98 ddi_device_acc_attr_t nge_data_accattr = { 99 DDI_DEVICE_ATTR_V0, 100 DDI_STRUCTURE_LE_ACC, 101 DDI_STRICTORDER_ACC, 102 DDI_DEFAULT_ACC 103 }; 104 105 /* 106 * DMA access attributes for descriptors. 107 */ 108 static ddi_device_acc_attr_t nge_desc_accattr = { 109 DDI_DEVICE_ATTR_V0, 110 DDI_STRUCTURE_LE_ACC, 111 DDI_STRICTORDER_ACC, 112 DDI_DEFAULT_ACC 113 }; 114 115 /* 116 * PIO access attributes for registers 117 */ 118 static ddi_device_acc_attr_t nge_reg_accattr = { 119 DDI_DEVICE_ATTR_V0, 120 DDI_STRUCTURE_LE_ACC, 121 DDI_STRICTORDER_ACC, 122 DDI_DEFAULT_ACC 123 }; 124 125 /* 126 * NIC DESC MODE 2 127 */ 128 129 static const nge_desc_attr_t nge_sum_desc = { 130 131 sizeof (sum_rx_bd), 132 sizeof (sum_tx_bd), 133 &sum_dma_attr, 134 &sum_tx_dma_attr, 135 nge_sum_rxd_fill, 136 nge_sum_rxd_check, 137 nge_sum_txd_fill, 138 nge_sum_txd_check, 139 }; 140 141 /* 142 * NIC DESC MODE 3 143 */ 144 145 static const nge_desc_attr_t nge_hot_desc = { 146 147 sizeof (hot_rx_bd), 148 sizeof (hot_tx_bd), 149 &hot_dma_attr, 150 &hot_tx_dma_attr, 151 nge_hot_rxd_fill, 152 nge_hot_rxd_check, 153 nge_hot_txd_fill, 154 nge_hot_txd_check, 155 }; 156 157 static char nge_ident[] = "nVidia 1Gb Ethernet %I%"; 158 static char clsize_propname[] = "cache-line-size"; 159 static char latency_propname[] = "latency-timer"; 160 static char debug_propname[] = "nge-debug-flags"; 161 static char intr_moderation[] = "intr-moderation"; 162 static char rx_data_hw[] = "rx-data-hw"; 163 static char rx_prd_lw[] = "rx-prd-lw"; 164 static char rx_prd_hw[] = "rx-prd-hw"; 165 static char sw_intr_intv[] = "sw-intr-intvl"; 166 static char nge_desc_mode[] = "desc-mode"; 167 static char default_mtu[] = "default_mtu"; 168 static char low_memory_mode[] = "minimal-memory-usage"; 169 extern kmutex_t nge_log_mutex[1]; 170 171 static int nge_m_start(void *); 172 static void nge_m_stop(void *); 173 static int nge_m_promisc(void *, boolean_t); 174 static int nge_m_multicst(void *, boolean_t, const uint8_t *); 175 static int nge_m_unicst(void *, const uint8_t *); 176 static void nge_m_ioctl(void *, queue_t *, mblk_t *); 177 static boolean_t nge_m_getcapab(void *, mac_capab_t, void *); 178 static int nge_m_setprop(void *, const char *, mac_prop_id_t, 179 uint_t, const void *); 180 static int nge_m_getprop(void *, const char *, mac_prop_id_t, 181 uint_t, void *); 182 static int nge_set_priv_prop(nge_t *, const char *, uint_t, 183 const void *); 184 static int nge_get_priv_prop(nge_t *, const char *, uint_t, 185 void *); 186 187 #define NGE_M_CALLBACK_FLAGS\ 188 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 189 190 static mac_callbacks_t nge_m_callbacks = { 191 NGE_M_CALLBACK_FLAGS, 192 nge_m_stat, 193 nge_m_start, 194 nge_m_stop, 195 nge_m_promisc, 196 nge_m_multicst, 197 nge_m_unicst, 198 nge_m_tx, 199 NULL, 200 nge_m_ioctl, 201 nge_m_getcapab, 202 NULL, 203 NULL, 204 nge_m_setprop, 205 nge_m_getprop 206 }; 207 208 static int nge_add_intrs(nge_t *, int); 209 static void nge_rem_intrs(nge_t *); 210 static int nge_register_intrs_and_init_locks(nge_t *); 211 212 /* 213 * NGE MSI tunable: 214 */ 215 boolean_t nge_enable_msi = B_FALSE; 216 217 static enum ioc_reply 218 nge_set_loop_mode(nge_t *ngep, uint32_t mode) 219 { 220 /* 221 * If the mode isn't being changed, there's nothing to do ... 222 */ 223 if (mode == ngep->param_loop_mode) 224 return (IOC_ACK); 225 226 /* 227 * Validate the requested mode and prepare a suitable message 228 * to explain the link down/up cycle that the change will 229 * probably induce ... 230 */ 231 switch (mode) { 232 default: 233 return (IOC_INVAL); 234 235 case NGE_LOOP_NONE: 236 case NGE_LOOP_EXTERNAL_100: 237 case NGE_LOOP_EXTERNAL_10: 238 case NGE_LOOP_INTERNAL_PHY: 239 break; 240 } 241 242 /* 243 * All OK; tell the caller to reprogram 244 * the PHY and/or MAC for the new mode ... 245 */ 246 ngep->param_loop_mode = mode; 247 return (IOC_RESTART_ACK); 248 } 249 250 #undef NGE_DBG 251 #define NGE_DBG NGE_DBG_INIT 252 253 /* 254 * Utility routine to carve a slice off a chunk of allocated memory, 255 * updating the chunk descriptor accordingly. The size of the slice 256 * is given by the product of the <qty> and <size> parameters. 257 */ 258 void 259 nge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 260 uint32_t qty, uint32_t size) 261 { 262 size_t totsize; 263 264 totsize = qty*size; 265 ASSERT(size > 0); 266 ASSERT(totsize <= chunk->alength); 267 268 *slice = *chunk; 269 slice->nslots = qty; 270 slice->size = size; 271 slice->alength = totsize; 272 273 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 274 chunk->alength -= totsize; 275 chunk->offset += totsize; 276 chunk->cookie.dmac_laddress += totsize; 277 chunk->cookie.dmac_size -= totsize; 278 } 279 280 /* 281 * Allocate an area of memory and a DMA handle for accessing it 282 */ 283 int 284 nge_alloc_dma_mem(nge_t *ngep, size_t memsize, ddi_device_acc_attr_t *attr_p, 285 uint_t dma_flags, dma_area_t *dma_p) 286 { 287 int err; 288 caddr_t va; 289 290 NGE_TRACE(("nge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 291 (void *)ngep, memsize, attr_p, dma_flags, dma_p)); 292 /* 293 * Allocate handle 294 */ 295 err = ddi_dma_alloc_handle(ngep->devinfo, ngep->desc_attr.dma_attr, 296 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 297 if (err != DDI_SUCCESS) 298 goto fail; 299 300 /* 301 * Allocate memory 302 */ 303 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 304 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), 305 DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, &dma_p->acc_hdl); 306 if (err != DDI_SUCCESS) 307 goto fail; 308 309 /* 310 * Bind the two together 311 */ 312 dma_p->mem_va = va; 313 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 314 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 315 &dma_p->cookie, &dma_p->ncookies); 316 317 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 318 goto fail; 319 320 dma_p->nslots = ~0U; 321 dma_p->size = ~0U; 322 dma_p->offset = 0; 323 324 return (DDI_SUCCESS); 325 326 fail: 327 nge_free_dma_mem(dma_p); 328 NGE_DEBUG(("nge_alloc_dma_mem: fail to alloc dma memory!")); 329 330 return (DDI_FAILURE); 331 } 332 333 /* 334 * Free one allocated area of DMAable memory 335 */ 336 void 337 nge_free_dma_mem(dma_area_t *dma_p) 338 { 339 if (dma_p->dma_hdl != NULL) { 340 if (dma_p->ncookies) { 341 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 342 dma_p->ncookies = 0; 343 } 344 } 345 if (dma_p->acc_hdl != NULL) { 346 ddi_dma_mem_free(&dma_p->acc_hdl); 347 dma_p->acc_hdl = NULL; 348 } 349 if (dma_p->dma_hdl != NULL) { 350 ddi_dma_free_handle(&dma_p->dma_hdl); 351 dma_p->dma_hdl = NULL; 352 } 353 } 354 355 #define ALLOC_TX_BUF 0x1 356 #define ALLOC_TX_DESC 0x2 357 #define ALLOC_RX_DESC 0x4 358 359 int 360 nge_alloc_bufs(nge_t *ngep) 361 { 362 int err; 363 int split; 364 int progress; 365 size_t txbuffsize; 366 size_t rxdescsize; 367 size_t txdescsize; 368 369 txbuffsize = ngep->tx_desc * ngep->buf_size; 370 rxdescsize = ngep->rx_desc; 371 txdescsize = ngep->tx_desc; 372 rxdescsize *= ngep->desc_attr.rxd_size; 373 txdescsize *= ngep->desc_attr.txd_size; 374 progress = 0; 375 376 NGE_TRACE(("nge_alloc_bufs($%p)", (void *)ngep)); 377 /* 378 * Allocate memory & handles for TX buffers 379 */ 380 ASSERT((txbuffsize % ngep->nge_split) == 0); 381 for (split = 0; split < ngep->nge_split; ++split) { 382 err = nge_alloc_dma_mem(ngep, txbuffsize/ngep->nge_split, 383 &nge_data_accattr, DDI_DMA_WRITE | NGE_DMA_MODE, 384 &ngep->send->buf[split]); 385 if (err != DDI_SUCCESS) 386 goto fail; 387 } 388 389 progress |= ALLOC_TX_BUF; 390 391 /* 392 * Allocate memory & handles for receive return rings and 393 * buffer (producer) descriptor rings 394 */ 395 err = nge_alloc_dma_mem(ngep, rxdescsize, &nge_desc_accattr, 396 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->recv->desc); 397 if (err != DDI_SUCCESS) 398 goto fail; 399 progress |= ALLOC_RX_DESC; 400 401 /* 402 * Allocate memory & handles for TX descriptor rings, 403 */ 404 err = nge_alloc_dma_mem(ngep, txdescsize, &nge_desc_accattr, 405 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->send->desc); 406 if (err != DDI_SUCCESS) 407 goto fail; 408 return (DDI_SUCCESS); 409 410 fail: 411 if (progress & ALLOC_RX_DESC) 412 nge_free_dma_mem(&ngep->recv->desc); 413 if (progress & ALLOC_TX_BUF) { 414 for (split = 0; split < ngep->nge_split; ++split) 415 nge_free_dma_mem(&ngep->send->buf[split]); 416 } 417 418 return (DDI_FAILURE); 419 } 420 421 /* 422 * This routine frees the transmit and receive buffers and descriptors. 423 * Make sure the chip is stopped before calling it! 424 */ 425 void 426 nge_free_bufs(nge_t *ngep) 427 { 428 int split; 429 430 NGE_TRACE(("nge_free_bufs($%p)", (void *)ngep)); 431 432 nge_free_dma_mem(&ngep->recv->desc); 433 nge_free_dma_mem(&ngep->send->desc); 434 435 for (split = 0; split < ngep->nge_split; ++split) 436 nge_free_dma_mem(&ngep->send->buf[split]); 437 } 438 439 /* 440 * Clean up initialisation done above before the memory is freed 441 */ 442 static void 443 nge_fini_send_ring(nge_t *ngep) 444 { 445 uint32_t slot; 446 size_t dmah_num; 447 send_ring_t *srp; 448 sw_tx_sbd_t *ssbdp; 449 450 srp = ngep->send; 451 ssbdp = srp->sw_sbds; 452 453 NGE_TRACE(("nge_fini_send_ring($%p)", (void *)ngep)); 454 455 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]); 456 457 for (slot = 0; slot < dmah_num; ++slot) { 458 if (srp->dmahndl[slot].hndl) { 459 (void) ddi_dma_unbind_handle(srp->dmahndl[slot].hndl); 460 ddi_dma_free_handle(&srp->dmahndl[slot].hndl); 461 srp->dmahndl[slot].hndl = NULL; 462 srp->dmahndl[slot].next = NULL; 463 } 464 } 465 466 srp->dmah_free.head = NULL; 467 srp->dmah_free.tail = NULL; 468 469 kmem_free(ssbdp, srp->desc.nslots*sizeof (*ssbdp)); 470 471 } 472 473 /* 474 * Initialise the specified Send Ring, using the information in the 475 * <dma_area> descriptors that it contains to set up all the other 476 * fields. This routine should be called only once for each ring. 477 */ 478 static int 479 nge_init_send_ring(nge_t *ngep) 480 { 481 size_t dmah_num; 482 uint32_t nslots; 483 uint32_t err; 484 uint32_t slot; 485 uint32_t split; 486 send_ring_t *srp; 487 sw_tx_sbd_t *ssbdp; 488 dma_area_t desc; 489 dma_area_t pbuf; 490 491 srp = ngep->send; 492 srp->desc.nslots = ngep->tx_desc; 493 nslots = srp->desc.nslots; 494 495 NGE_TRACE(("nge_init_send_ring($%p)", (void *)ngep)); 496 /* 497 * Other one-off initialisation of per-ring data 498 */ 499 srp->ngep = ngep; 500 501 /* 502 * Allocate the array of s/w Send Buffer Descriptors 503 */ 504 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 505 srp->sw_sbds = ssbdp; 506 507 /* 508 * Now initialise each array element once and for all 509 */ 510 desc = srp->desc; 511 for (split = 0; split < ngep->nge_split; ++split) { 512 pbuf = srp->buf[split]; 513 for (slot = 0; slot < nslots/ngep->nge_split; ++ssbdp, ++slot) { 514 nge_slice_chunk(&ssbdp->desc, &desc, 1, 515 ngep->desc_attr.txd_size); 516 nge_slice_chunk(&ssbdp->pbuf, &pbuf, 1, 517 ngep->buf_size); 518 } 519 ASSERT(pbuf.alength == 0); 520 } 521 ASSERT(desc.alength == 0); 522 523 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]); 524 525 /* preallocate dma handles for tx buffer */ 526 for (slot = 0; slot < dmah_num; ++slot) { 527 528 err = ddi_dma_alloc_handle(ngep->devinfo, 529 ngep->desc_attr.tx_dma_attr, DDI_DMA_DONTWAIT, 530 NULL, &srp->dmahndl[slot].hndl); 531 532 if (err != DDI_SUCCESS) { 533 nge_fini_send_ring(ngep); 534 nge_error(ngep, 535 "nge_init_send_ring: alloc dma handle fails"); 536 return (DDI_FAILURE); 537 } 538 srp->dmahndl[slot].next = srp->dmahndl + slot + 1; 539 } 540 541 srp->dmah_free.head = srp->dmahndl; 542 srp->dmah_free.tail = srp->dmahndl + dmah_num - 1; 543 srp->dmah_free.tail->next = NULL; 544 545 return (DDI_SUCCESS); 546 } 547 548 /* 549 * Intialize the tx recycle pointer and tx sending pointer of tx ring 550 * and set the type of tx's data descriptor by default. 551 */ 552 static void 553 nge_reinit_send_ring(nge_t *ngep) 554 { 555 size_t dmah_num; 556 uint32_t slot; 557 send_ring_t *srp; 558 sw_tx_sbd_t *ssbdp; 559 560 srp = ngep->send; 561 562 /* 563 * Reinitialise control variables ... 564 */ 565 566 srp->tx_hwmark = NGE_DESC_MIN; 567 srp->tx_lwmark = NGE_DESC_MIN; 568 569 srp->tx_next = 0; 570 srp->tx_free = srp->desc.nslots; 571 srp->tc_next = 0; 572 573 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]); 574 575 for (slot = 0; slot - dmah_num != 0; ++slot) 576 srp->dmahndl[slot].next = srp->dmahndl + slot + 1; 577 578 srp->dmah_free.head = srp->dmahndl; 579 srp->dmah_free.tail = srp->dmahndl + dmah_num - 1; 580 srp->dmah_free.tail->next = NULL; 581 582 /* 583 * Zero and sync all the h/w Send Buffer Descriptors 584 */ 585 for (slot = 0; slot < srp->desc.nslots; ++slot) { 586 ssbdp = &srp->sw_sbds[slot]; 587 ssbdp->flags = HOST_OWN; 588 } 589 590 DMA_ZERO(srp->desc); 591 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 592 } 593 594 /* 595 * Initialize the slot number of rx's ring 596 */ 597 static void 598 nge_init_recv_ring(nge_t *ngep) 599 { 600 recv_ring_t *rrp; 601 602 rrp = ngep->recv; 603 rrp->desc.nslots = ngep->rx_desc; 604 rrp->ngep = ngep; 605 } 606 607 /* 608 * Intialize the rx recycle pointer and rx sending pointer of rx ring 609 */ 610 static void 611 nge_reinit_recv_ring(nge_t *ngep) 612 { 613 recv_ring_t *rrp; 614 615 rrp = ngep->recv; 616 617 /* 618 * Reinitialise control variables ... 619 */ 620 rrp->prod_index = 0; 621 /* 622 * Zero and sync all the h/w Send Buffer Descriptors 623 */ 624 DMA_ZERO(rrp->desc); 625 DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORDEV); 626 } 627 628 /* 629 * Clean up initialisation done above before the memory is freed 630 */ 631 static void 632 nge_fini_buff_ring(nge_t *ngep) 633 { 634 uint32_t i; 635 buff_ring_t *brp; 636 dma_area_t *bufp; 637 sw_rx_sbd_t *bsbdp; 638 639 brp = ngep->buff; 640 bsbdp = brp->sw_rbds; 641 642 NGE_DEBUG(("nge_fini_buff_ring($%p)", (void *)ngep)); 643 644 mutex_enter(brp->recycle_lock); 645 brp->buf_sign++; 646 mutex_exit(brp->recycle_lock); 647 for (i = 0; i < ngep->rx_desc; i++, ++bsbdp) { 648 if (bsbdp->bufp) { 649 if (bsbdp->bufp->mp) 650 freemsg(bsbdp->bufp->mp); 651 nge_free_dma_mem(bsbdp->bufp); 652 kmem_free(bsbdp->bufp, sizeof (dma_area_t)); 653 bsbdp->bufp = NULL; 654 } 655 } 656 while (brp->free_list != NULL) { 657 bufp = brp->free_list; 658 brp->free_list = bufp->next; 659 bufp->next = NULL; 660 if (bufp->mp) 661 freemsg(bufp->mp); 662 nge_free_dma_mem(bufp); 663 kmem_free(bufp, sizeof (dma_area_t)); 664 } 665 while (brp->recycle_list != NULL) { 666 bufp = brp->recycle_list; 667 brp->recycle_list = bufp->next; 668 bufp->next = NULL; 669 if (bufp->mp) 670 freemsg(bufp->mp); 671 nge_free_dma_mem(bufp); 672 kmem_free(bufp, sizeof (dma_area_t)); 673 } 674 675 676 kmem_free(brp->sw_rbds, (ngep->rx_desc * sizeof (*bsbdp))); 677 brp->sw_rbds = NULL; 678 } 679 680 /* 681 * Intialize the Rx's data ring and free ring 682 */ 683 static int 684 nge_init_buff_ring(nge_t *ngep) 685 { 686 uint32_t err; 687 uint32_t slot; 688 uint32_t nslots_buff; 689 uint32_t nslots_recv; 690 buff_ring_t *brp; 691 recv_ring_t *rrp; 692 dma_area_t desc; 693 dma_area_t *bufp; 694 sw_rx_sbd_t *bsbdp; 695 696 rrp = ngep->recv; 697 brp = ngep->buff; 698 brp->nslots = ngep->rx_buf; 699 brp->rx_bcopy = B_FALSE; 700 nslots_recv = rrp->desc.nslots; 701 nslots_buff = brp->nslots; 702 brp->ngep = ngep; 703 704 NGE_TRACE(("nge_init_buff_ring($%p)", (void *)ngep)); 705 706 /* 707 * Allocate the array of s/w Recv Buffer Descriptors 708 */ 709 bsbdp = kmem_zalloc(nslots_recv *sizeof (*bsbdp), KM_SLEEP); 710 brp->sw_rbds = bsbdp; 711 brp->free_list = NULL; 712 brp->recycle_list = NULL; 713 for (slot = 0; slot < nslots_buff; ++slot) { 714 bufp = kmem_zalloc(sizeof (dma_area_t), KM_SLEEP); 715 err = nge_alloc_dma_mem(ngep, (ngep->buf_size 716 + NGE_HEADROOM), 717 &nge_data_accattr, DDI_DMA_READ | NGE_DMA_MODE, bufp); 718 if (err != DDI_SUCCESS) { 719 kmem_free(bufp, sizeof (dma_area_t)); 720 return (DDI_FAILURE); 721 } 722 723 bufp->alength -= NGE_HEADROOM; 724 bufp->offset += NGE_HEADROOM; 725 bufp->private = (caddr_t)ngep; 726 bufp->rx_recycle.free_func = nge_recv_recycle; 727 bufp->rx_recycle.free_arg = (caddr_t)bufp; 728 bufp->signature = brp->buf_sign; 729 bufp->rx_delivered = B_FALSE; 730 bufp->mp = desballoc(DMA_VPTR(*bufp), 731 ngep->buf_size + NGE_HEADROOM, 732 0, &bufp->rx_recycle); 733 734 if (bufp->mp == NULL) { 735 return (DDI_FAILURE); 736 } 737 bufp->next = brp->free_list; 738 brp->free_list = bufp; 739 } 740 741 /* 742 * Now initialise each array element once and for all 743 */ 744 desc = rrp->desc; 745 for (slot = 0; slot < nslots_recv; ++slot, ++bsbdp) { 746 nge_slice_chunk(&bsbdp->desc, &desc, 1, 747 ngep->desc_attr.rxd_size); 748 bufp = brp->free_list; 749 brp->free_list = bufp->next; 750 bsbdp->bufp = bufp; 751 bsbdp->flags = CONTROLER_OWN; 752 bufp->next = NULL; 753 } 754 755 ASSERT(desc.alength == 0); 756 return (DDI_SUCCESS); 757 } 758 759 /* 760 * Fill the host address of data in rx' descriptor 761 * and initialize free pointers of rx free ring 762 */ 763 static int 764 nge_reinit_buff_ring(nge_t *ngep) 765 { 766 uint32_t slot; 767 uint32_t nslots_recv; 768 buff_ring_t *brp; 769 recv_ring_t *rrp; 770 sw_rx_sbd_t *bsbdp; 771 void *hw_bd_p; 772 773 brp = ngep->buff; 774 rrp = ngep->recv; 775 bsbdp = brp->sw_rbds; 776 nslots_recv = rrp->desc.nslots; 777 for (slot = 0; slot < nslots_recv; ++bsbdp, ++slot) { 778 hw_bd_p = DMA_VPTR(bsbdp->desc); 779 /* 780 * There is a scenario: When the traffic of small tcp 781 * packet is heavy, suspending the tcp traffic will 782 * cause the preallocated buffers for rx not to be 783 * released in time by tcp taffic and cause rx's buffer 784 * pointers not to be refilled in time. 785 * 786 * At this point, if we reinitialize the driver, the bufp 787 * pointer for rx's traffic will be NULL. 788 * So the result of the reinitializion fails. 789 */ 790 if (bsbdp->bufp == NULL) 791 return (DDI_FAILURE); 792 793 ngep->desc_attr.rxd_fill(hw_bd_p, &bsbdp->bufp->cookie, 794 bsbdp->bufp->alength); 795 } 796 return (DDI_SUCCESS); 797 } 798 799 static void 800 nge_init_ring_param_lock(nge_t *ngep) 801 { 802 buff_ring_t *brp; 803 send_ring_t *srp; 804 805 srp = ngep->send; 806 brp = ngep->buff; 807 808 /* Init the locks for send ring */ 809 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 810 DDI_INTR_PRI(ngep->intr_pri)); 811 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 812 DDI_INTR_PRI(ngep->intr_pri)); 813 mutex_init(&srp->dmah_lock, NULL, MUTEX_DRIVER, 814 DDI_INTR_PRI(ngep->intr_pri)); 815 816 /* Init parameters of buffer ring */ 817 brp->free_list = NULL; 818 brp->recycle_list = NULL; 819 brp->rx_hold = 0; 820 brp->buf_sign = 0; 821 822 /* Init recycle list lock */ 823 mutex_init(brp->recycle_lock, NULL, MUTEX_DRIVER, 824 DDI_INTR_PRI(ngep->intr_pri)); 825 } 826 827 int 828 nge_init_rings(nge_t *ngep) 829 { 830 uint32_t err; 831 832 err = nge_init_send_ring(ngep); 833 if (err != DDI_SUCCESS) { 834 return (err); 835 } 836 nge_init_recv_ring(ngep); 837 838 err = nge_init_buff_ring(ngep); 839 if (err != DDI_SUCCESS) { 840 nge_fini_send_ring(ngep); 841 return (DDI_FAILURE); 842 } 843 844 return (err); 845 } 846 847 static int 848 nge_reinit_ring(nge_t *ngep) 849 { 850 int err; 851 852 nge_reinit_recv_ring(ngep); 853 nge_reinit_send_ring(ngep); 854 err = nge_reinit_buff_ring(ngep); 855 return (err); 856 } 857 858 859 void 860 nge_fini_rings(nge_t *ngep) 861 { 862 /* 863 * For receive ring, nothing need to be finished. 864 * So only finish buffer ring and send ring here. 865 */ 866 nge_fini_buff_ring(ngep); 867 nge_fini_send_ring(ngep); 868 } 869 870 /* 871 * Loopback ioctl code 872 */ 873 874 static lb_property_t loopmodes[] = { 875 { normal, "normal", NGE_LOOP_NONE }, 876 { external, "100Mbps", NGE_LOOP_EXTERNAL_100 }, 877 { external, "10Mbps", NGE_LOOP_EXTERNAL_10 }, 878 { internal, "PHY", NGE_LOOP_INTERNAL_PHY }, 879 }; 880 881 enum ioc_reply 882 nge_loop_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp) 883 { 884 int cmd; 885 uint32_t *lbmp; 886 lb_info_sz_t *lbsp; 887 lb_property_t *lbpp; 888 889 /* 890 * Validate format of ioctl 891 */ 892 if (mp->b_cont == NULL) 893 return (IOC_INVAL); 894 895 cmd = iocp->ioc_cmd; 896 897 switch (cmd) { 898 default: 899 return (IOC_INVAL); 900 901 case LB_GET_INFO_SIZE: 902 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 903 return (IOC_INVAL); 904 lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr; 905 *lbsp = sizeof (loopmodes); 906 return (IOC_REPLY); 907 908 case LB_GET_INFO: 909 if (iocp->ioc_count != sizeof (loopmodes)) 910 return (IOC_INVAL); 911 lbpp = (lb_property_t *)mp->b_cont->b_rptr; 912 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 913 return (IOC_REPLY); 914 915 case LB_GET_MODE: 916 if (iocp->ioc_count != sizeof (uint32_t)) 917 return (IOC_INVAL); 918 lbmp = (uint32_t *)mp->b_cont->b_rptr; 919 *lbmp = ngep->param_loop_mode; 920 return (IOC_REPLY); 921 922 case LB_SET_MODE: 923 if (iocp->ioc_count != sizeof (uint32_t)) 924 return (IOC_INVAL); 925 lbmp = (uint32_t *)mp->b_cont->b_rptr; 926 return (nge_set_loop_mode(ngep, *lbmp)); 927 } 928 } 929 930 #undef NGE_DBG 931 #define NGE_DBG NGE_DBG_NEMO 932 933 934 static void 935 nge_check_desc_prop(nge_t *ngep) 936 { 937 if (ngep->desc_mode != DESC_HOT && ngep->desc_mode != DESC_OFFLOAD) 938 ngep->desc_mode = DESC_HOT; 939 940 if (ngep->desc_mode == DESC_OFFLOAD) { 941 942 ngep->desc_attr = nge_sum_desc; 943 944 } else if (ngep->desc_mode == DESC_HOT) { 945 946 ngep->desc_attr = nge_hot_desc; 947 } 948 } 949 950 /* 951 * nge_get_props -- get the parameters to tune the driver 952 */ 953 static void 954 nge_get_props(nge_t *ngep) 955 { 956 chip_info_t *infop; 957 dev_info_t *devinfo; 958 nge_dev_spec_param_t *dev_param_p; 959 960 devinfo = ngep->devinfo; 961 infop = (chip_info_t *)&ngep->chipinfo; 962 dev_param_p = &ngep->dev_spec_param; 963 964 infop->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 965 DDI_PROP_DONTPASS, clsize_propname, 32); 966 967 infop->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 968 DDI_PROP_DONTPASS, latency_propname, 64); 969 ngep->intr_moderation = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 970 DDI_PROP_DONTPASS, intr_moderation, NGE_SET); 971 ngep->rx_datahwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 972 DDI_PROP_DONTPASS, rx_data_hw, 0x20); 973 ngep->rx_prdlwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 974 DDI_PROP_DONTPASS, rx_prd_lw, 0x4); 975 ngep->rx_prdhwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 976 DDI_PROP_DONTPASS, rx_prd_hw, 0xc); 977 978 ngep->sw_intr_intv = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 979 DDI_PROP_DONTPASS, sw_intr_intv, SWTR_ITC); 980 ngep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 981 DDI_PROP_DONTPASS, debug_propname, NGE_DBG_CHIP); 982 ngep->desc_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 983 DDI_PROP_DONTPASS, nge_desc_mode, dev_param_p->desc_type); 984 ngep->lowmem_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 985 DDI_PROP_DONTPASS, low_memory_mode, 0); 986 987 if (dev_param_p->jumbo) { 988 ngep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 989 DDI_PROP_DONTPASS, default_mtu, ETHERMTU); 990 } else 991 ngep->default_mtu = ETHERMTU; 992 993 if (ngep->default_mtu > ETHERMTU && 994 ngep->default_mtu <= NGE_MTU_2500) { 995 ngep->buf_size = NGE_JB2500_BUFSZ; 996 ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC; 997 ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC; 998 ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2; 999 ngep->nge_split = NGE_SPLIT_256; 1000 } else if (ngep->default_mtu > NGE_MTU_2500 && 1001 ngep->default_mtu <= NGE_MTU_4500) { 1002 ngep->buf_size = NGE_JB4500_BUFSZ; 1003 ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC; 1004 ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC; 1005 ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2; 1006 ngep->nge_split = NGE_SPLIT_256; 1007 } else if (ngep->default_mtu > NGE_MTU_4500 && 1008 ngep->default_mtu <= NGE_MAX_MTU) { 1009 ngep->buf_size = NGE_JB9000_BUFSZ; 1010 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1011 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1012 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1013 ngep->nge_split = NGE_SPLIT_256; 1014 } else if (ngep->default_mtu > NGE_MAX_MTU) { 1015 ngep->default_mtu = NGE_MAX_MTU; 1016 ngep->buf_size = NGE_JB9000_BUFSZ; 1017 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1018 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1019 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1020 ngep->nge_split = NGE_SPLIT_256; 1021 } else if (ngep->lowmem_mode != 0) { 1022 ngep->default_mtu = ETHERMTU; 1023 ngep->buf_size = NGE_STD_BUFSZ; 1024 ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC; 1025 ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC; 1026 ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2; 1027 ngep->nge_split = NGE_SPLIT_32; 1028 } else { 1029 ngep->default_mtu = ETHERMTU; 1030 ngep->buf_size = NGE_STD_BUFSZ; 1031 ngep->tx_desc = dev_param_p->tx_desc_num; 1032 ngep->rx_desc = dev_param_p->rx_desc_num; 1033 ngep->rx_buf = dev_param_p->rx_desc_num * 2; 1034 ngep->nge_split = dev_param_p->nge_split; 1035 } 1036 1037 nge_check_desc_prop(ngep); 1038 } 1039 1040 1041 static int 1042 nge_reset(nge_t *ngep) 1043 { 1044 int err; 1045 nge_mul_addr1 maddr1; 1046 nge_sw_statistics_t *sw_stp; 1047 sw_stp = &ngep->statistics.sw_statistics; 1048 send_ring_t *srp = ngep->send; 1049 1050 ASSERT(mutex_owned(ngep->genlock)); 1051 mutex_enter(srp->tc_lock); 1052 mutex_enter(srp->tx_lock); 1053 1054 nge_tx_recycle_all(ngep); 1055 err = nge_reinit_ring(ngep); 1056 if (err == DDI_FAILURE) { 1057 mutex_exit(srp->tx_lock); 1058 mutex_exit(srp->tc_lock); 1059 return (err); 1060 } 1061 err = nge_chip_reset(ngep); 1062 /* 1063 * Clear the Multicast mac address table 1064 */ 1065 nge_reg_put32(ngep, NGE_MUL_ADDR0, 0); 1066 maddr1.addr_val = nge_reg_get32(ngep, NGE_MUL_ADDR1); 1067 maddr1.addr_bits.addr = 0; 1068 nge_reg_put32(ngep, NGE_MUL_ADDR1, maddr1.addr_val); 1069 1070 mutex_exit(srp->tx_lock); 1071 mutex_exit(srp->tc_lock); 1072 if (err == DDI_FAILURE) 1073 return (err); 1074 ngep->watchdog = 0; 1075 ngep->resched_needed = B_FALSE; 1076 ngep->promisc = B_FALSE; 1077 ngep->param_loop_mode = NGE_LOOP_NONE; 1078 ngep->factotum_flag = 0; 1079 ngep->resched_needed = 0; 1080 ngep->nge_mac_state = NGE_MAC_RESET; 1081 ngep->max_sdu = ngep->default_mtu + ETHER_HEAD_LEN + ETHERFCSL; 1082 ngep->max_sdu += VTAG_SIZE; 1083 ngep->rx_def = 0x16; 1084 1085 /* Clear the software statistics */ 1086 sw_stp->recv_count = 0; 1087 sw_stp->xmit_count = 0; 1088 sw_stp->rbytes = 0; 1089 sw_stp->obytes = 0; 1090 1091 return (DDI_SUCCESS); 1092 } 1093 1094 static void 1095 nge_m_stop(void *arg) 1096 { 1097 nge_t *ngep = arg; /* private device info */ 1098 1099 NGE_TRACE(("nge_m_stop($%p)", arg)); 1100 1101 /* 1102 * Just stop processing, then record new MAC state 1103 */ 1104 mutex_enter(ngep->genlock); 1105 /* If suspended, the adapter is already stopped, just return. */ 1106 if (ngep->suspended) { 1107 ASSERT(ngep->nge_mac_state == NGE_MAC_STOPPED); 1108 mutex_exit(ngep->genlock); 1109 return; 1110 } 1111 rw_enter(ngep->rwlock, RW_WRITER); 1112 1113 (void) nge_chip_stop(ngep, B_FALSE); 1114 ngep->nge_mac_state = NGE_MAC_STOPPED; 1115 1116 /* Recycle all the TX BD */ 1117 nge_tx_recycle_all(ngep); 1118 nge_fini_rings(ngep); 1119 nge_free_bufs(ngep); 1120 1121 NGE_DEBUG(("nge_m_stop($%p) done", arg)); 1122 1123 rw_exit(ngep->rwlock); 1124 mutex_exit(ngep->genlock); 1125 } 1126 1127 static int 1128 nge_m_start(void *arg) 1129 { 1130 int err; 1131 nge_t *ngep = arg; 1132 1133 NGE_TRACE(("nge_m_start($%p)", arg)); 1134 1135 /* 1136 * Start processing and record new MAC state 1137 */ 1138 mutex_enter(ngep->genlock); 1139 /* 1140 * If suspended, don't start, as the resume processing 1141 * will recall this function with the suspended flag off. 1142 */ 1143 if (ngep->suspended) { 1144 mutex_exit(ngep->genlock); 1145 return (EIO); 1146 } 1147 rw_enter(ngep->rwlock, RW_WRITER); 1148 err = nge_alloc_bufs(ngep); 1149 if (err != DDI_SUCCESS) { 1150 nge_problem(ngep, "nge_m_start: DMA buffer allocation failed"); 1151 goto finish; 1152 } 1153 err = nge_init_rings(ngep); 1154 if (err != DDI_SUCCESS) { 1155 nge_free_bufs(ngep); 1156 nge_problem(ngep, "nge_init_rings() failed,err=%x", err); 1157 goto finish; 1158 } 1159 err = nge_restart(ngep); 1160 1161 NGE_DEBUG(("nge_m_start($%p) done", arg)); 1162 finish: 1163 rw_exit(ngep->rwlock); 1164 mutex_exit(ngep->genlock); 1165 1166 return (err == DDI_SUCCESS ? 0 : EIO); 1167 } 1168 1169 static int 1170 nge_m_unicst(void *arg, const uint8_t *macaddr) 1171 { 1172 nge_t *ngep = arg; 1173 1174 NGE_TRACE(("nge_m_unicst($%p)", arg)); 1175 /* 1176 * Remember the new current address in the driver state 1177 * Sync the chip's idea of the address too ... 1178 */ 1179 mutex_enter(ngep->genlock); 1180 1181 ethaddr_copy(macaddr, ngep->cur_uni_addr.addr); 1182 ngep->cur_uni_addr.set = 1; 1183 1184 /* 1185 * If we are suspended, we want to quit now, and not update 1186 * the chip. Doing so might put it in a bad state, but the 1187 * resume will get the unicast address installed. 1188 */ 1189 if (ngep->suspended) { 1190 mutex_exit(ngep->genlock); 1191 return (DDI_SUCCESS); 1192 } 1193 nge_chip_sync(ngep); 1194 1195 NGE_DEBUG(("nge_m_unicst($%p) done", arg)); 1196 mutex_exit(ngep->genlock); 1197 1198 return (0); 1199 } 1200 1201 static int 1202 nge_m_promisc(void *arg, boolean_t on) 1203 { 1204 nge_t *ngep = arg; 1205 1206 NGE_TRACE(("nge_m_promisc($%p)", arg)); 1207 1208 /* 1209 * Store specified mode and pass to chip layer to update h/w 1210 */ 1211 mutex_enter(ngep->genlock); 1212 /* 1213 * If suspended, there is no need to do anything, even 1214 * recording the promiscuious mode is not neccessary, as 1215 * it won't be properly set on resume. Just return failing. 1216 */ 1217 if (ngep->suspended) { 1218 mutex_exit(ngep->genlock); 1219 return (DDI_FAILURE); 1220 } 1221 if (ngep->promisc == on) { 1222 mutex_exit(ngep->genlock); 1223 NGE_DEBUG(("nge_m_promisc($%p) done", arg)); 1224 return (0); 1225 } 1226 ngep->promisc = on; 1227 nge_chip_sync(ngep); 1228 NGE_DEBUG(("nge_m_promisc($%p) done", arg)); 1229 mutex_exit(ngep->genlock); 1230 1231 return (0); 1232 } 1233 1234 static void nge_mulparam(nge_t *ngep) 1235 { 1236 uint8_t number; 1237 ether_addr_t pand; 1238 ether_addr_t por; 1239 mul_item *plist; 1240 1241 for (number = 0; number < ETHERADDRL; number++) { 1242 pand[number] = 0x00; 1243 por[number] = 0x00; 1244 } 1245 for (plist = ngep->pcur_mulist; plist != NULL; plist = plist->next) { 1246 for (number = 0; number < ETHERADDRL; number++) { 1247 pand[number] &= plist->mul_addr[number]; 1248 por[number] |= plist->mul_addr[number]; 1249 } 1250 } 1251 for (number = 0; number < ETHERADDRL; number++) { 1252 ngep->cur_mul_addr.addr[number] 1253 = pand[number] & por[number]; 1254 ngep->cur_mul_mask.addr[number] 1255 = pand [number] | (~por[number]); 1256 } 1257 } 1258 static int 1259 nge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1260 { 1261 boolean_t update; 1262 boolean_t b_eq; 1263 nge_t *ngep = arg; 1264 mul_item *plist; 1265 mul_item *plist_prev; 1266 mul_item *pitem; 1267 1268 NGE_TRACE(("nge_m_multicst($%p, %s, %s)", arg, 1269 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1270 1271 update = B_FALSE; 1272 plist = plist_prev = NULL; 1273 mutex_enter(ngep->genlock); 1274 if (add) { 1275 if (ngep->pcur_mulist != NULL) { 1276 for (plist = ngep->pcur_mulist; plist != NULL; 1277 plist = plist->next) { 1278 b_eq = ether_eq(plist->mul_addr, mca); 1279 if (b_eq) { 1280 plist->ref_cnt++; 1281 break; 1282 } 1283 plist_prev = plist; 1284 } 1285 } 1286 1287 if (plist == NULL) { 1288 pitem = kmem_zalloc(sizeof (mul_item), KM_SLEEP); 1289 ether_copy(mca, pitem->mul_addr); 1290 pitem ->ref_cnt++; 1291 pitem ->next = NULL; 1292 if (plist_prev == NULL) 1293 ngep->pcur_mulist = pitem; 1294 else 1295 plist_prev->next = pitem; 1296 update = B_TRUE; 1297 } 1298 } else { 1299 if (ngep->pcur_mulist != NULL) { 1300 for (plist = ngep->pcur_mulist; plist != NULL; 1301 plist = plist->next) { 1302 b_eq = ether_eq(plist->mul_addr, mca); 1303 if (b_eq) { 1304 update = B_TRUE; 1305 break; 1306 } 1307 plist_prev = plist; 1308 } 1309 1310 if (update) { 1311 if ((plist_prev == NULL) && 1312 (plist->next == NULL)) 1313 ngep->pcur_mulist = NULL; 1314 else if ((plist_prev == NULL) && 1315 (plist->next != NULL)) 1316 ngep->pcur_mulist = plist->next; 1317 else 1318 plist_prev->next = plist->next; 1319 kmem_free(plist, sizeof (mul_item)); 1320 } 1321 } 1322 } 1323 1324 if (update && !ngep->suspended) { 1325 nge_mulparam(ngep); 1326 nge_chip_sync(ngep); 1327 } 1328 NGE_DEBUG(("nge_m_multicst($%p) done", arg)); 1329 mutex_exit(ngep->genlock); 1330 1331 return (0); 1332 } 1333 1334 static void 1335 nge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1336 { 1337 int err; 1338 int cmd; 1339 nge_t *ngep = arg; 1340 struct iocblk *iocp; 1341 enum ioc_reply status; 1342 boolean_t need_privilege; 1343 1344 /* 1345 * If suspended, we might actually be able to do some of 1346 * these ioctls, but it is harder to make sure they occur 1347 * without actually putting the hardware in an undesireable 1348 * state. So just NAK it. 1349 */ 1350 mutex_enter(ngep->genlock); 1351 if (ngep->suspended) { 1352 miocnak(wq, mp, 0, EINVAL); 1353 mutex_exit(ngep->genlock); 1354 return; 1355 } 1356 mutex_exit(ngep->genlock); 1357 1358 /* 1359 * Validate the command before bothering with the mutex ... 1360 */ 1361 iocp = (struct iocblk *)mp->b_rptr; 1362 iocp->ioc_error = 0; 1363 need_privilege = B_TRUE; 1364 cmd = iocp->ioc_cmd; 1365 1366 NGE_DEBUG(("nge_m_ioctl: cmd 0x%x", cmd)); 1367 switch (cmd) { 1368 default: 1369 NGE_LDB(NGE_DBG_BADIOC, 1370 ("nge_m_ioctl: unknown cmd 0x%x", cmd)); 1371 1372 miocnak(wq, mp, 0, EINVAL); 1373 return; 1374 1375 case NGE_MII_READ: 1376 case NGE_MII_WRITE: 1377 case NGE_SEE_READ: 1378 case NGE_SEE_WRITE: 1379 case NGE_DIAG: 1380 case NGE_PEEK: 1381 case NGE_POKE: 1382 case NGE_PHY_RESET: 1383 case NGE_SOFT_RESET: 1384 case NGE_HARD_RESET: 1385 break; 1386 1387 case LB_GET_INFO_SIZE: 1388 case LB_GET_INFO: 1389 case LB_GET_MODE: 1390 need_privilege = B_FALSE; 1391 break; 1392 case LB_SET_MODE: 1393 break; 1394 1395 case ND_GET: 1396 need_privilege = B_FALSE; 1397 break; 1398 case ND_SET: 1399 break; 1400 } 1401 1402 if (need_privilege) { 1403 /* 1404 * Check for specific net_config privilege. 1405 */ 1406 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1407 if (err != 0) { 1408 NGE_DEBUG(("nge_m_ioctl: rejected cmd 0x%x, err %d", 1409 cmd, err)); 1410 miocnak(wq, mp, 0, err); 1411 return; 1412 } 1413 } 1414 1415 mutex_enter(ngep->genlock); 1416 1417 switch (cmd) { 1418 default: 1419 _NOTE(NOTREACHED) 1420 status = IOC_INVAL; 1421 break; 1422 1423 case NGE_MII_READ: 1424 case NGE_MII_WRITE: 1425 case NGE_SEE_READ: 1426 case NGE_SEE_WRITE: 1427 case NGE_DIAG: 1428 case NGE_PEEK: 1429 case NGE_POKE: 1430 case NGE_PHY_RESET: 1431 case NGE_SOFT_RESET: 1432 case NGE_HARD_RESET: 1433 status = nge_chip_ioctl(ngep, mp, iocp); 1434 break; 1435 1436 case LB_GET_INFO_SIZE: 1437 case LB_GET_INFO: 1438 case LB_GET_MODE: 1439 case LB_SET_MODE: 1440 status = nge_loop_ioctl(ngep, mp, iocp); 1441 break; 1442 1443 case ND_GET: 1444 case ND_SET: 1445 status = nge_nd_ioctl(ngep, wq, mp, iocp); 1446 break; 1447 1448 } 1449 1450 /* 1451 * Do we need to reprogram the PHY and/or the MAC? 1452 * Do it now, while we still have the mutex. 1453 * 1454 * Note: update the PHY first, 'cos it controls the 1455 * speed/duplex parameters that the MAC code uses. 1456 */ 1457 1458 NGE_DEBUG(("nge_m_ioctl: cmd 0x%x status %d", cmd, status)); 1459 1460 switch (status) { 1461 case IOC_RESTART_REPLY: 1462 case IOC_RESTART_ACK: 1463 (*ngep->physops->phys_update)(ngep); 1464 nge_chip_sync(ngep); 1465 break; 1466 1467 default: 1468 break; 1469 } 1470 1471 mutex_exit(ngep->genlock); 1472 1473 /* 1474 * Finally, decide how to reply 1475 */ 1476 switch (status) { 1477 1478 default: 1479 case IOC_INVAL: 1480 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1481 EINVAL : iocp->ioc_error); 1482 break; 1483 1484 case IOC_DONE: 1485 break; 1486 1487 case IOC_RESTART_ACK: 1488 case IOC_ACK: 1489 miocack(wq, mp, 0, 0); 1490 break; 1491 1492 case IOC_RESTART_REPLY: 1493 case IOC_REPLY: 1494 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1495 M_IOCACK : M_IOCNAK; 1496 qreply(wq, mp); 1497 break; 1498 } 1499 } 1500 1501 static boolean_t 1502 nge_param_locked(mac_prop_id_t pr_num) 1503 { 1504 /* 1505 * All adv_* parameters are locked (read-only) while 1506 * the device is in any sort of loopback mode ... 1507 */ 1508 switch (pr_num) { 1509 case DLD_PROP_ADV_1000FDX_CAP: 1510 case DLD_PROP_EN_1000FDX_CAP: 1511 case DLD_PROP_ADV_1000HDX_CAP: 1512 case DLD_PROP_EN_1000HDX_CAP: 1513 case DLD_PROP_ADV_100FDX_CAP: 1514 case DLD_PROP_EN_100FDX_CAP: 1515 case DLD_PROP_ADV_100HDX_CAP: 1516 case DLD_PROP_EN_100HDX_CAP: 1517 case DLD_PROP_ADV_10FDX_CAP: 1518 case DLD_PROP_EN_10FDX_CAP: 1519 case DLD_PROP_ADV_10HDX_CAP: 1520 case DLD_PROP_EN_10HDX_CAP: 1521 case DLD_PROP_AUTONEG: 1522 case DLD_PROP_FLOWCTRL: 1523 return (B_TRUE); 1524 } 1525 return (B_FALSE); 1526 } 1527 1528 /* 1529 * callback functions for set/get of properties 1530 */ 1531 static int 1532 nge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1533 uint_t pr_valsize, const void *pr_val) 1534 { 1535 nge_t *ngep = barg; 1536 int err = 0; 1537 uint64_t cur_mtu, new_mtu; 1538 link_flowctrl_t fl; 1539 1540 mutex_enter(ngep->genlock); 1541 if (ngep->param_loop_mode != NGE_LOOP_NONE && 1542 nge_param_locked(pr_num)) { 1543 /* 1544 * All adv_* parameters are locked (read-only) 1545 * while the device is in any sort of loopback mode. 1546 */ 1547 mutex_exit(ngep->genlock); 1548 return (EBUSY); 1549 } 1550 switch (pr_num) { 1551 case DLD_PROP_EN_1000FDX_CAP: 1552 ngep->param_en_1000fdx = *(uint8_t *)pr_val; 1553 ngep->param_adv_1000fdx = *(uint8_t *)pr_val; 1554 goto reprogram; 1555 case DLD_PROP_EN_1000HDX_CAP: 1556 ngep->param_en_1000hdx = *(uint8_t *)pr_val; 1557 ngep->param_adv_1000hdx = *(uint8_t *)pr_val; 1558 goto reprogram; 1559 case DLD_PROP_EN_100FDX_CAP: 1560 ngep->param_en_100fdx = *(uint8_t *)pr_val; 1561 ngep->param_adv_100fdx = *(uint8_t *)pr_val; 1562 goto reprogram; 1563 case DLD_PROP_EN_100HDX_CAP: 1564 ngep->param_en_100hdx = *(uint8_t *)pr_val; 1565 ngep->param_adv_100hdx = *(uint8_t *)pr_val; 1566 goto reprogram; 1567 case DLD_PROP_EN_10FDX_CAP: 1568 ngep->param_en_10fdx = *(uint8_t *)pr_val; 1569 ngep->param_adv_10fdx = *(uint8_t *)pr_val; 1570 goto reprogram; 1571 case DLD_PROP_EN_10HDX_CAP: 1572 ngep->param_en_10hdx = *(uint8_t *)pr_val; 1573 ngep->param_adv_10hdx = *(uint8_t *)pr_val; 1574 reprogram: 1575 (*ngep->physops->phys_update)(ngep); 1576 nge_chip_sync(ngep); 1577 break; 1578 1579 case DLD_PROP_ADV_1000FDX_CAP: 1580 case DLD_PROP_ADV_1000HDX_CAP: 1581 case DLD_PROP_ADV_100FDX_CAP: 1582 case DLD_PROP_ADV_100HDX_CAP: 1583 case DLD_PROP_ADV_10FDX_CAP: 1584 case DLD_PROP_ADV_10HDX_CAP: 1585 case DLD_PROP_STATUS: 1586 case DLD_PROP_SPEED: 1587 case DLD_PROP_DUPLEX: 1588 err = ENOTSUP; /* read-only prop. Can't set this */ 1589 break; 1590 case DLD_PROP_AUTONEG: 1591 ngep->param_adv_autoneg = *(uint8_t *)pr_val; 1592 (*ngep->physops->phys_update)(ngep); 1593 nge_chip_sync(ngep); 1594 break; 1595 case DLD_PROP_DEFMTU: 1596 cur_mtu = ngep->default_mtu; 1597 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 1598 if (new_mtu == cur_mtu) { 1599 err = 0; 1600 break; 1601 } 1602 if (new_mtu < ETHERMTU || 1603 new_mtu > NGE_MAX_MTU) { 1604 err = EINVAL; 1605 break; 1606 } 1607 if ((new_mtu > ETHERMTU) && 1608 (!ngep->dev_spec_param.jumbo)) { 1609 err = EINVAL; 1610 break; 1611 } 1612 if (ngep->nge_mac_state == NGE_MAC_STARTED) { 1613 err = EBUSY; 1614 break; 1615 } 1616 1617 ngep->default_mtu = new_mtu; 1618 if (ngep->default_mtu > ETHERMTU && 1619 ngep->default_mtu <= NGE_MTU_2500) { 1620 ngep->buf_size = NGE_JB2500_BUFSZ; 1621 ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC; 1622 ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC; 1623 ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2; 1624 ngep->nge_split = NGE_SPLIT_256; 1625 } else if (ngep->default_mtu > NGE_MTU_2500 && 1626 ngep->default_mtu <= NGE_MTU_4500) { 1627 ngep->buf_size = NGE_JB4500_BUFSZ; 1628 ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC; 1629 ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC; 1630 ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2; 1631 ngep->nge_split = NGE_SPLIT_256; 1632 } else if (ngep->default_mtu > NGE_MTU_4500 && 1633 ngep->default_mtu <= NGE_MAX_MTU) { 1634 ngep->buf_size = NGE_JB9000_BUFSZ; 1635 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1636 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1637 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1638 ngep->nge_split = NGE_SPLIT_256; 1639 } else if (ngep->default_mtu > NGE_MAX_MTU) { 1640 ngep->default_mtu = NGE_MAX_MTU; 1641 ngep->buf_size = NGE_JB9000_BUFSZ; 1642 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1643 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1644 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1645 ngep->nge_split = NGE_SPLIT_256; 1646 } else if (ngep->lowmem_mode != 0) { 1647 ngep->default_mtu = ETHERMTU; 1648 ngep->buf_size = NGE_STD_BUFSZ; 1649 ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC; 1650 ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC; 1651 ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2; 1652 ngep->nge_split = NGE_SPLIT_32; 1653 } else { 1654 ngep->default_mtu = ETHERMTU; 1655 ngep->buf_size = NGE_STD_BUFSZ; 1656 ngep->tx_desc = 1657 ngep->dev_spec_param.tx_desc_num; 1658 ngep->rx_desc = 1659 ngep->dev_spec_param.rx_desc_num; 1660 ngep->rx_buf = 1661 ngep->dev_spec_param.rx_desc_num * 2; 1662 ngep->nge_split = 1663 ngep->dev_spec_param.nge_split; 1664 } 1665 1666 err = mac_maxsdu_update(ngep->mh, ngep->default_mtu); 1667 1668 break; 1669 case DLD_PROP_FLOWCTRL: 1670 bcopy(pr_val, &fl, sizeof (fl)); 1671 switch (fl) { 1672 default: 1673 err = ENOTSUP; 1674 break; 1675 case LINK_FLOWCTRL_NONE: 1676 ngep->param_adv_pause = 0; 1677 ngep->param_adv_asym_pause = 0; 1678 1679 ngep->param_link_rx_pause = B_FALSE; 1680 ngep->param_link_tx_pause = B_FALSE; 1681 break; 1682 case LINK_FLOWCTRL_RX: 1683 if (!((ngep->param_lp_pause == 0) && 1684 (ngep->param_lp_asym_pause == 1))) { 1685 err = EINVAL; 1686 break; 1687 } 1688 ngep->param_adv_pause = 1; 1689 ngep->param_adv_asym_pause = 1; 1690 1691 ngep->param_link_rx_pause = B_TRUE; 1692 ngep->param_link_tx_pause = B_FALSE; 1693 break; 1694 case LINK_FLOWCTRL_TX: 1695 if (!((ngep->param_lp_pause == 1) && 1696 (ngep->param_lp_asym_pause == 1))) { 1697 err = EINVAL; 1698 break; 1699 } 1700 ngep->param_adv_pause = 0; 1701 ngep->param_adv_asym_pause = 1; 1702 1703 ngep->param_link_rx_pause = B_FALSE; 1704 ngep->param_link_tx_pause = B_TRUE; 1705 break; 1706 case LINK_FLOWCTRL_BI: 1707 if (ngep->param_lp_pause != 1) { 1708 err = EINVAL; 1709 break; 1710 } 1711 ngep->param_adv_pause = 1; 1712 1713 ngep->param_link_rx_pause = B_TRUE; 1714 ngep->param_link_tx_pause = B_TRUE; 1715 break; 1716 } 1717 1718 if (err == 0) { 1719 (*ngep->physops->phys_update)(ngep); 1720 nge_chip_sync(ngep); 1721 } 1722 1723 break; 1724 case DLD_PROP_PRIVATE: 1725 err = nge_set_priv_prop(ngep, pr_name, pr_valsize, 1726 pr_val); 1727 if (err == 0) { 1728 (*ngep->physops->phys_update)(ngep); 1729 nge_chip_sync(ngep); 1730 } 1731 break; 1732 default: 1733 err = ENOTSUP; 1734 } 1735 mutex_exit(ngep->genlock); 1736 return (err); 1737 } 1738 1739 static int 1740 nge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1741 uint_t pr_valsize, void *pr_val) 1742 { 1743 nge_t *ngep = barg; 1744 int err = EINVAL; 1745 link_flowctrl_t fl; 1746 uint64_t tmp = 0; 1747 1748 bzero(pr_val, pr_valsize); 1749 switch (pr_num) { 1750 case DLD_PROP_DUPLEX: 1751 if (pr_valsize >= sizeof (uint8_t)) { 1752 *(uint8_t *)pr_val = ngep->param_link_duplex; 1753 err = 0; 1754 } 1755 break; 1756 case DLD_PROP_SPEED: 1757 if (pr_valsize >= sizeof (uint64_t)) { 1758 tmp = ngep->param_link_speed * 1000000ull; 1759 bcopy(&tmp, pr_val, sizeof (tmp)); 1760 err = 0; 1761 } 1762 break; 1763 case DLD_PROP_STATUS: 1764 if (pr_valsize >= sizeof (uint8_t)) { 1765 *(uint8_t *)pr_val = ngep->param_link_up; 1766 err = 0; 1767 } 1768 break; 1769 case DLD_PROP_AUTONEG: 1770 if (pr_valsize >= sizeof (uint8_t)) { 1771 *(uint8_t *)pr_val = ngep->param_adv_autoneg; 1772 err = 0; 1773 } 1774 break; 1775 case DLD_PROP_DEFMTU: { 1776 if (pr_valsize >= sizeof (uint64_t)) { 1777 tmp = ngep->default_mtu; 1778 bcopy(&tmp, pr_val, sizeof (tmp)); 1779 err = 0; 1780 } 1781 break; 1782 } 1783 case DLD_PROP_FLOWCTRL: 1784 if (pr_valsize >= sizeof (link_flowctrl_t)) { 1785 if (ngep->param_link_rx_pause && 1786 !ngep->param_link_tx_pause) 1787 fl = LINK_FLOWCTRL_RX; 1788 1789 if (!ngep->param_link_rx_pause && 1790 !ngep->param_link_tx_pause) 1791 fl = LINK_FLOWCTRL_NONE; 1792 1793 if (!ngep->param_link_rx_pause && 1794 ngep->param_link_tx_pause) 1795 fl = LINK_FLOWCTRL_TX; 1796 1797 if (ngep->param_link_rx_pause && 1798 ngep->param_link_tx_pause) 1799 fl = LINK_FLOWCTRL_BI; 1800 bcopy(&fl, pr_val, sizeof (fl)); 1801 err = 0; 1802 } 1803 break; 1804 case DLD_PROP_ADV_1000FDX_CAP: 1805 if (pr_valsize >= sizeof (uint8_t)) { 1806 *(uint8_t *)pr_val = ngep->param_adv_1000fdx; 1807 err = 0; 1808 } 1809 break; 1810 case DLD_PROP_EN_1000FDX_CAP: 1811 if (pr_valsize >= sizeof (uint8_t)) { 1812 *(uint8_t *)pr_val = ngep->param_en_1000fdx; 1813 err = 0; 1814 } 1815 break; 1816 case DLD_PROP_ADV_1000HDX_CAP: 1817 if (pr_valsize >= sizeof (uint8_t)) { 1818 *(uint8_t *)pr_val = ngep->param_adv_1000hdx; 1819 err = 0; 1820 } 1821 break; 1822 case DLD_PROP_EN_1000HDX_CAP: 1823 if (pr_valsize >= sizeof (uint8_t)) { 1824 *(uint8_t *)pr_val = ngep->param_en_1000hdx; 1825 err = 0; 1826 } 1827 break; 1828 case DLD_PROP_ADV_100FDX_CAP: 1829 if (pr_valsize >= sizeof (uint8_t)) { 1830 *(uint8_t *)pr_val = ngep->param_adv_100fdx; 1831 err = 0; 1832 } 1833 break; 1834 case DLD_PROP_EN_100FDX_CAP: 1835 if (pr_valsize >= sizeof (uint8_t)) { 1836 *(uint8_t *)pr_val = ngep->param_en_100fdx; 1837 err = 0; 1838 } 1839 break; 1840 case DLD_PROP_ADV_100HDX_CAP: 1841 if (pr_valsize >= sizeof (uint8_t)) { 1842 *(uint8_t *)pr_val = ngep->param_adv_100hdx; 1843 err = 0; 1844 } 1845 break; 1846 case DLD_PROP_EN_100HDX_CAP: 1847 if (pr_valsize >= sizeof (uint8_t)) { 1848 *(uint8_t *)pr_val = ngep->param_en_100hdx; 1849 err = 0; 1850 } 1851 break; 1852 case DLD_PROP_ADV_10FDX_CAP: 1853 if (pr_valsize >= sizeof (uint8_t)) { 1854 *(uint8_t *)pr_val = ngep->param_adv_10fdx; 1855 err = 0; 1856 } 1857 break; 1858 case DLD_PROP_EN_10FDX_CAP: 1859 if (pr_valsize >= sizeof (uint8_t)) { 1860 *(uint8_t *)pr_val = ngep->param_en_10fdx; 1861 err = 0; 1862 } 1863 break; 1864 case DLD_PROP_ADV_10HDX_CAP: 1865 if (pr_valsize >= sizeof (uint8_t)) { 1866 *(uint8_t *)pr_val = ngep->param_adv_10hdx; 1867 err = 0; 1868 } 1869 break; 1870 case DLD_PROP_EN_10HDX_CAP: 1871 if (pr_valsize >= sizeof (uint8_t)) { 1872 *(uint8_t *)pr_val = ngep->param_en_10hdx; 1873 err = 0; 1874 } 1875 break; 1876 case DLD_PROP_PRIVATE: 1877 err = nge_get_priv_prop(ngep, pr_name, pr_valsize, 1878 pr_val); 1879 break; 1880 default: 1881 err = ENOTSUP; 1882 } 1883 return (err); 1884 } 1885 1886 /* ARGSUSED */ 1887 static int 1888 nge_set_priv_prop(nge_t *ngep, const char *pr_name, uint_t pr_valsize, 1889 const void *pr_val) 1890 { 1891 int err = 0; 1892 long result; 1893 1894 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 1895 if (pr_val == NULL) { 1896 err = EINVAL; 1897 return (err); 1898 } 1899 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1900 if (result < 0 || result > NGE_MAX_SDU) { 1901 err = EINVAL; 1902 } else { 1903 ngep->param_txbcopy_threshold = (uint32_t)result; 1904 goto reprogram; 1905 } 1906 return (err); 1907 } 1908 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 1909 if (pr_val == NULL) { 1910 err = EINVAL; 1911 return (err); 1912 } 1913 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1914 if (result < 0 || result > NGE_MAX_SDU) { 1915 err = EINVAL; 1916 } else { 1917 ngep->param_rxbcopy_threshold = (uint32_t)result; 1918 goto reprogram; 1919 } 1920 return (err); 1921 } 1922 if (strcmp(pr_name, "_recv_max_packet") == 0) { 1923 if (pr_val == NULL) { 1924 err = EINVAL; 1925 return (err); 1926 } 1927 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1928 if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) { 1929 err = EINVAL; 1930 } else { 1931 ngep->param_recv_max_packet = (uint32_t)result; 1932 goto reprogram; 1933 } 1934 return (err); 1935 } 1936 if (strcmp(pr_name, "_poll_quiet_time") == 0) { 1937 if (pr_val == NULL) { 1938 err = EINVAL; 1939 return (err); 1940 } 1941 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1942 if (result < 0 || result > 10000) { 1943 err = EINVAL; 1944 } else { 1945 ngep->param_poll_quiet_time = (uint32_t)result; 1946 goto reprogram; 1947 } 1948 return (err); 1949 } 1950 if (strcmp(pr_name, "_poll_busy_time") == 0) { 1951 if (pr_val == NULL) { 1952 err = EINVAL; 1953 return (err); 1954 } 1955 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1956 if (result < 0 || result > 10000) { 1957 err = EINVAL; 1958 } else { 1959 ngep->param_poll_busy_time = (uint32_t)result; 1960 goto reprogram; 1961 } 1962 return (err); 1963 } 1964 if (strcmp(pr_name, "_rx_intr_hwater") == 0) { 1965 if (pr_val == NULL) { 1966 err = EINVAL; 1967 return (err); 1968 } 1969 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1970 if (result < 0 || result > PARAM_RECV_MAX_PACKET) { 1971 err = EINVAL; 1972 } else { 1973 ngep->param_rx_intr_hwater = (uint32_t)result; 1974 goto reprogram; 1975 } 1976 return (err); 1977 } 1978 if (strcmp(pr_name, "_rx_intr_lwater") == 0) { 1979 if (pr_val == NULL) { 1980 err = EINVAL; 1981 return (err); 1982 } 1983 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1984 if (result < 0 || result > PARAM_RECV_MAX_PACKET) { 1985 err = EINVAL; 1986 } else { 1987 ngep->param_rx_intr_lwater = (uint32_t)result; 1988 goto reprogram; 1989 } 1990 return (err); 1991 } 1992 if (strcmp(pr_name, "_tx_n_intr") == 0) { 1993 if (pr_val == NULL) { 1994 err = EINVAL; 1995 return (err); 1996 } 1997 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1998 if (result < 1 || result > 10000) { 1999 err = EINVAL; 2000 } else { 2001 ngep->param_tx_n_intr = (uint32_t)result; 2002 goto reprogram; 2003 } 2004 return (err); 2005 } 2006 2007 err = ENOTSUP; 2008 return (err); 2009 2010 reprogram: 2011 if (err == 0) { 2012 (*ngep->physops->phys_update)(ngep); 2013 nge_chip_sync(ngep); 2014 } 2015 2016 return (err); 2017 } 2018 2019 static int 2020 nge_get_priv_prop(nge_t *ngep, const char *pr_name, uint_t pr_valsize, 2021 void *pr_val) 2022 { 2023 char valstr[MAXNAMELEN]; 2024 int err = ENOTSUP; 2025 uint_t strsize; 2026 2027 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 2028 (void) sprintf(valstr, "%d", ngep->param_txbcopy_threshold); 2029 err = 0; 2030 goto done; 2031 } 2032 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 2033 (void) sprintf(valstr, "%d", ngep->param_rxbcopy_threshold); 2034 err = 0; 2035 goto done; 2036 } 2037 if (strcmp(pr_name, "_recv_max_packet") == 0) { 2038 (void) sprintf(valstr, "%d", ngep->param_recv_max_packet); 2039 err = 0; 2040 goto done; 2041 } 2042 if (strcmp(pr_name, "_poll_quiet_time") == 0) { 2043 (void) sprintf(valstr, "%d", ngep->param_poll_quiet_time); 2044 err = 0; 2045 goto done; 2046 } 2047 if (strcmp(pr_name, "_poll_busy_time") == 0) { 2048 (void) sprintf(valstr, "%d", ngep->param_poll_busy_time); 2049 err = 0; 2050 goto done; 2051 } 2052 if (strcmp(pr_name, "_rx_intr_hwater") == 0) { 2053 (void) sprintf(valstr, "%d", ngep->param_rx_intr_hwater); 2054 err = 0; 2055 goto done; 2056 } 2057 if (strcmp(pr_name, "_rx_intr_lwater") == 0) { 2058 (void) sprintf(valstr, "%d", ngep->param_rx_intr_lwater); 2059 err = 0; 2060 goto done; 2061 } 2062 if (strcmp(pr_name, "_tx_n_intr") == 0) { 2063 (void) sprintf(valstr, "%d", ngep->param_tx_n_intr); 2064 err = 0; 2065 goto done; 2066 } 2067 2068 done: 2069 if (err == 0) { 2070 strsize = (uint_t)strlen(valstr); 2071 if (pr_valsize < strsize) { 2072 err = ENOBUFS; 2073 } else { 2074 (void) strlcpy(pr_val, valstr, strsize); 2075 } 2076 } 2077 return (err); 2078 } 2079 2080 /* ARGSUSED */ 2081 static boolean_t 2082 nge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 2083 { 2084 nge_t *ngep = arg; 2085 nge_dev_spec_param_t *dev_param_p; 2086 2087 dev_param_p = &ngep->dev_spec_param; 2088 2089 switch (cap) { 2090 case MAC_CAPAB_HCKSUM: { 2091 uint32_t *hcksum_txflags = cap_data; 2092 2093 if (dev_param_p->tx_hw_checksum) { 2094 *hcksum_txflags = dev_param_p->tx_hw_checksum; 2095 } else 2096 return (B_FALSE); 2097 break; 2098 } 2099 case MAC_CAPAB_POLL: 2100 /* 2101 * There's nothing for us to fill in, simply returning 2102 * B_TRUE, stating that we support polling is sufficient. 2103 */ 2104 break; 2105 default: 2106 return (B_FALSE); 2107 } 2108 return (B_TRUE); 2109 } 2110 2111 #undef NGE_DBG 2112 #define NGE_DBG NGE_DBG_INIT /* debug flag for this code */ 2113 int 2114 nge_restart(nge_t *ngep) 2115 { 2116 int err = 0; 2117 err = nge_reset(ngep); 2118 nge_chip_sync(ngep); 2119 if (!err) 2120 err = nge_chip_start(ngep); 2121 2122 if (err) { 2123 ngep->nge_mac_state = NGE_MAC_STOPPED; 2124 return (DDI_FAILURE); 2125 } else { 2126 ngep->nge_mac_state = NGE_MAC_STARTED; 2127 return (DDI_SUCCESS); 2128 } 2129 } 2130 2131 void 2132 nge_wake_factotum(nge_t *ngep) 2133 { 2134 mutex_enter(ngep->softlock); 2135 if (ngep->factotum_flag == 0) { 2136 ngep->factotum_flag = 1; 2137 (void) ddi_intr_trigger_softint(ngep->factotum_hdl, NULL); 2138 } 2139 mutex_exit(ngep->softlock); 2140 } 2141 2142 /* 2143 * High-level cyclic handler 2144 * 2145 * This routine schedules a (low-level) softint callback to the 2146 * factotum. 2147 */ 2148 2149 static void 2150 nge_chip_cyclic(void *arg) 2151 { 2152 nge_t *ngep; 2153 2154 ngep = (nge_t *)arg; 2155 2156 switch (ngep->nge_chip_state) { 2157 default: 2158 return; 2159 2160 case NGE_CHIP_RUNNING: 2161 break; 2162 2163 case NGE_CHIP_FAULT: 2164 case NGE_CHIP_ERROR: 2165 break; 2166 } 2167 2168 nge_wake_factotum(ngep); 2169 } 2170 2171 static void 2172 nge_unattach(nge_t *ngep) 2173 { 2174 send_ring_t *srp; 2175 buff_ring_t *brp; 2176 2177 srp = ngep->send; 2178 brp = ngep->buff; 2179 NGE_TRACE(("nge_unattach($%p)", (void *)ngep)); 2180 2181 /* 2182 * Flag that no more activity may be initiated 2183 */ 2184 ngep->progress &= ~PROGRESS_READY; 2185 ngep->nge_mac_state = NGE_MAC_UNATTACH; 2186 2187 /* 2188 * Quiesce the PHY and MAC (leave it reset but still powered). 2189 * Clean up and free all NGE data structures 2190 */ 2191 if (ngep->periodic_id != NULL) { 2192 ddi_periodic_delete(ngep->periodic_id); 2193 ngep->periodic_id = NULL; 2194 } 2195 2196 if (ngep->progress & PROGRESS_KSTATS) 2197 nge_fini_kstats(ngep); 2198 2199 if (ngep->progress & PROGRESS_NDD) 2200 nge_nd_cleanup(ngep); 2201 2202 if (ngep->progress & PROGRESS_HWINT) { 2203 mutex_enter(ngep->genlock); 2204 nge_restore_mac_addr(ngep); 2205 (void) nge_chip_stop(ngep, B_FALSE); 2206 mutex_exit(ngep->genlock); 2207 } 2208 2209 if (ngep->progress & PROGRESS_SWINT) 2210 nge_rem_intrs(ngep); 2211 2212 if (ngep->progress & PROGRESS_FACTOTUM) 2213 (void) ddi_intr_remove_softint(ngep->factotum_hdl); 2214 2215 if (ngep->progress & PROGRESS_RESCHED) 2216 (void) ddi_intr_remove_softint(ngep->resched_hdl); 2217 2218 if (ngep->progress & PROGRESS_INTR) { 2219 mutex_destroy(srp->tx_lock); 2220 mutex_destroy(srp->tc_lock); 2221 mutex_destroy(&srp->dmah_lock); 2222 mutex_destroy(brp->recycle_lock); 2223 2224 mutex_destroy(ngep->genlock); 2225 mutex_destroy(ngep->softlock); 2226 rw_destroy(ngep->rwlock); 2227 } 2228 2229 if (ngep->progress & PROGRESS_REGS) 2230 ddi_regs_map_free(&ngep->io_handle); 2231 2232 if (ngep->progress & PROGRESS_CFG) 2233 pci_config_teardown(&ngep->cfg_handle); 2234 2235 ddi_remove_minor_node(ngep->devinfo, NULL); 2236 2237 kmem_free(ngep, sizeof (*ngep)); 2238 } 2239 2240 static int 2241 nge_resume(dev_info_t *devinfo) 2242 { 2243 nge_t *ngep; 2244 chip_info_t *infop; 2245 int err; 2246 2247 ASSERT(devinfo != NULL); 2248 2249 ngep = ddi_get_driver_private(devinfo); 2250 err = 0; 2251 2252 /* 2253 * If there are state inconsistancies, this is bad. Returning 2254 * DDI_FAILURE here will eventually cause the machine to panic, 2255 * so it is best done here so that there is a possibility of 2256 * debugging the problem. 2257 */ 2258 if (ngep == NULL) 2259 cmn_err(CE_PANIC, 2260 "nge: ngep returned from ddi_get_driver_private was NULL"); 2261 infop = (chip_info_t *)&ngep->chipinfo; 2262 2263 if (ngep->devinfo != devinfo) 2264 cmn_err(CE_PANIC, 2265 "nge: passed devinfo not the same as saved devinfo"); 2266 2267 mutex_enter(ngep->genlock); 2268 rw_enter(ngep->rwlock, RW_WRITER); 2269 2270 /* 2271 * Fetch the config space. Even though we have most of it cached, 2272 * some values *might* change across a suspend/resume. 2273 */ 2274 nge_chip_cfg_init(ngep, infop, B_FALSE); 2275 2276 /* 2277 * Only in one case, this conditional branch can be executed: the port 2278 * hasn't been plumbed. 2279 */ 2280 if (ngep->suspended == B_FALSE) { 2281 rw_exit(ngep->rwlock); 2282 mutex_exit(ngep->genlock); 2283 return (DDI_SUCCESS); 2284 } 2285 2286 nge_tx_recycle_all(ngep); 2287 err = nge_reinit_ring(ngep); 2288 if (!err) { 2289 err = nge_chip_reset(ngep); 2290 if (!err) 2291 err = nge_chip_start(ngep); 2292 } 2293 2294 if (err) { 2295 /* 2296 * We note the failure, but return success, as the 2297 * system is still usable without this controller. 2298 */ 2299 cmn_err(CE_WARN, "nge: resume: failed to restart controller"); 2300 } else { 2301 ngep->nge_mac_state = NGE_MAC_STARTED; 2302 } 2303 ngep->suspended = B_FALSE; 2304 2305 rw_exit(ngep->rwlock); 2306 mutex_exit(ngep->genlock); 2307 2308 return (DDI_SUCCESS); 2309 } 2310 2311 /* 2312 * attach(9E) -- Attach a device to the system 2313 * 2314 * Called once for each board successfully probed. 2315 */ 2316 static int 2317 nge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 2318 { 2319 int err; 2320 int i; 2321 int instance; 2322 caddr_t regs; 2323 nge_t *ngep; 2324 chip_info_t *infop; 2325 mac_register_t *macp; 2326 2327 switch (cmd) { 2328 default: 2329 return (DDI_FAILURE); 2330 2331 case DDI_RESUME: 2332 return (nge_resume(devinfo)); 2333 2334 case DDI_ATTACH: 2335 break; 2336 } 2337 2338 ngep = kmem_zalloc(sizeof (*ngep), KM_SLEEP); 2339 instance = ddi_get_instance(devinfo); 2340 ddi_set_driver_private(devinfo, ngep); 2341 ngep->devinfo = devinfo; 2342 2343 (void) snprintf(ngep->ifname, sizeof (ngep->ifname), "%s%d", 2344 NGE_DRIVER_NAME, instance); 2345 err = pci_config_setup(devinfo, &ngep->cfg_handle); 2346 if (err != DDI_SUCCESS) { 2347 nge_problem(ngep, "nge_attach: pci_config_setup() failed"); 2348 goto attach_fail; 2349 } 2350 infop = (chip_info_t *)&ngep->chipinfo; 2351 nge_chip_cfg_init(ngep, infop, B_FALSE); 2352 nge_init_dev_spec_param(ngep); 2353 nge_get_props(ngep); 2354 ngep->progress |= PROGRESS_CFG; 2355 2356 err = ddi_regs_map_setup(devinfo, NGE_PCI_OPREGS_RNUMBER, 2357 ®s, 0, 0, &nge_reg_accattr, &ngep->io_handle); 2358 if (err != DDI_SUCCESS) { 2359 nge_problem(ngep, "nge_attach: ddi_regs_map_setup() failed"); 2360 goto attach_fail; 2361 } 2362 ngep->io_regs = regs; 2363 ngep->progress |= PROGRESS_REGS; 2364 2365 err = nge_register_intrs_and_init_locks(ngep); 2366 if (err != DDI_SUCCESS) { 2367 nge_problem(ngep, "nge_attach:" 2368 " register intrs and init locks failed"); 2369 goto attach_fail; 2370 } 2371 nge_init_ring_param_lock(ngep); 2372 ngep->progress |= PROGRESS_INTR; 2373 2374 mutex_enter(ngep->genlock); 2375 2376 /* 2377 * Initialise link state variables 2378 * Stop, reset & reinitialise the chip. 2379 * Initialise the (internal) PHY. 2380 */ 2381 nge_phys_init(ngep); 2382 err = nge_chip_reset(ngep); 2383 if (err != DDI_SUCCESS) { 2384 nge_problem(ngep, "nge_attach: nge_chip_reset() failed"); 2385 mutex_exit(ngep->genlock); 2386 goto attach_fail; 2387 } 2388 nge_chip_sync(ngep); 2389 2390 /* 2391 * Now that mutex locks are initialized, enable interrupts. 2392 */ 2393 if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) { 2394 /* Call ddi_intr_block_enable() for MSI interrupts */ 2395 (void) ddi_intr_block_enable(ngep->htable, 2396 ngep->intr_actual_cnt); 2397 } else { 2398 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 2399 for (i = 0; i < ngep->intr_actual_cnt; i++) { 2400 (void) ddi_intr_enable(ngep->htable[i]); 2401 } 2402 } 2403 2404 ngep->link_state = LINK_STATE_UNKNOWN; 2405 ngep->progress |= PROGRESS_HWINT; 2406 2407 /* 2408 * Register NDD-tweakable parameters 2409 */ 2410 if (nge_nd_init(ngep)) { 2411 nge_problem(ngep, "nge_attach: nge_nd_init() failed"); 2412 mutex_exit(ngep->genlock); 2413 goto attach_fail; 2414 } 2415 ngep->progress |= PROGRESS_NDD; 2416 2417 /* 2418 * Create & initialise named kstats 2419 */ 2420 nge_init_kstats(ngep, instance); 2421 ngep->progress |= PROGRESS_KSTATS; 2422 2423 mutex_exit(ngep->genlock); 2424 2425 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 2426 goto attach_fail; 2427 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 2428 macp->m_driver = ngep; 2429 macp->m_dip = devinfo; 2430 macp->m_src_addr = infop->vendor_addr.addr; 2431 macp->m_callbacks = &nge_m_callbacks; 2432 macp->m_min_sdu = 0; 2433 macp->m_max_sdu = ngep->default_mtu; 2434 macp->m_margin = VTAG_SIZE; 2435 /* 2436 * Finally, we're ready to register ourselves with the mac 2437 * interface; if this succeeds, we're all ready to start() 2438 */ 2439 err = mac_register(macp, &ngep->mh); 2440 mac_free(macp); 2441 if (err != 0) 2442 goto attach_fail; 2443 2444 /* 2445 * Register a periodical handler. 2446 * nge_chip_cyclic() is invoked in kernel context. 2447 */ 2448 ngep->periodic_id = ddi_periodic_add(nge_chip_cyclic, ngep, 2449 NGE_CYCLIC_PERIOD, DDI_IPL_0); 2450 2451 ngep->progress |= PROGRESS_READY; 2452 return (DDI_SUCCESS); 2453 2454 attach_fail: 2455 nge_unattach(ngep); 2456 return (DDI_FAILURE); 2457 } 2458 2459 static int 2460 nge_suspend(nge_t *ngep) 2461 { 2462 mutex_enter(ngep->genlock); 2463 rw_enter(ngep->rwlock, RW_WRITER); 2464 2465 /* if the port hasn't been plumbed, just return */ 2466 if (ngep->nge_mac_state != NGE_MAC_STARTED) { 2467 rw_exit(ngep->rwlock); 2468 mutex_exit(ngep->genlock); 2469 return (DDI_SUCCESS); 2470 } 2471 ngep->suspended = B_TRUE; 2472 (void) nge_chip_stop(ngep, B_FALSE); 2473 ngep->nge_mac_state = NGE_MAC_STOPPED; 2474 2475 rw_exit(ngep->rwlock); 2476 mutex_exit(ngep->genlock); 2477 return (DDI_SUCCESS); 2478 } 2479 2480 /* 2481 * detach(9E) -- Detach a device from the system 2482 */ 2483 static int 2484 nge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 2485 { 2486 int i; 2487 nge_t *ngep; 2488 mul_item *p, *nextp; 2489 buff_ring_t *brp; 2490 2491 NGE_GTRACE(("nge_detach($%p, %d)", (void *)devinfo, cmd)); 2492 2493 ngep = ddi_get_driver_private(devinfo); 2494 brp = ngep->buff; 2495 2496 switch (cmd) { 2497 default: 2498 return (DDI_FAILURE); 2499 2500 case DDI_SUSPEND: 2501 /* 2502 * Stop the NIC 2503 * Note: This driver doesn't currently support WOL, but 2504 * should it in the future, it is important to 2505 * make sure the PHY remains powered so that the 2506 * wakeup packet can actually be recieved. 2507 */ 2508 return (nge_suspend(ngep)); 2509 2510 case DDI_DETACH: 2511 break; 2512 } 2513 2514 /* Try to wait all the buffer post to upper layer be released */ 2515 for (i = 0; i < 1000; i++) { 2516 if (brp->rx_hold == 0) 2517 break; 2518 drv_usecwait(1000); 2519 } 2520 2521 /* If there is any posted buffer, reject to detach */ 2522 if (brp->rx_hold != 0) 2523 return (DDI_FAILURE); 2524 2525 /* 2526 * Unregister from the GLD subsystem. This can fail, in 2527 * particular if there are DLPI style-2 streams still open - 2528 * in which case we just return failure without shutting 2529 * down chip operations. 2530 */ 2531 if (mac_unregister(ngep->mh) != DDI_SUCCESS) 2532 return (DDI_FAILURE); 2533 2534 /* 2535 * Recycle the multicast table. mac_unregister() should be called 2536 * before it to ensure the multicast table can be used even if 2537 * mac_unregister() fails. 2538 */ 2539 for (p = ngep->pcur_mulist; p != NULL; p = nextp) { 2540 nextp = p->next; 2541 kmem_free(p, sizeof (mul_item)); 2542 } 2543 ngep->pcur_mulist = NULL; 2544 2545 /* 2546 * All activity stopped, so we can clean up & exit 2547 */ 2548 nge_unattach(ngep); 2549 return (DDI_SUCCESS); 2550 } 2551 2552 2553 /* 2554 * ========== Module Loading Data & Entry Points ========== 2555 */ 2556 2557 DDI_DEFINE_STREAM_OPS(nge_dev_ops, nulldev, nulldev, nge_attach, nge_detach, 2558 nodev, NULL, D_MP, NULL); 2559 2560 2561 static struct modldrv nge_modldrv = { 2562 &mod_driverops, /* Type of module. This one is a driver */ 2563 nge_ident, /* short description */ 2564 &nge_dev_ops /* driver specific ops */ 2565 }; 2566 2567 static struct modlinkage modlinkage = { 2568 MODREV_1, (void *)&nge_modldrv, NULL 2569 }; 2570 2571 2572 int 2573 _info(struct modinfo *modinfop) 2574 { 2575 return (mod_info(&modlinkage, modinfop)); 2576 } 2577 2578 int 2579 _init(void) 2580 { 2581 int status; 2582 2583 mac_init_ops(&nge_dev_ops, "nge"); 2584 status = mod_install(&modlinkage); 2585 if (status != DDI_SUCCESS) 2586 mac_fini_ops(&nge_dev_ops); 2587 else 2588 mutex_init(nge_log_mutex, NULL, MUTEX_DRIVER, NULL); 2589 2590 return (status); 2591 } 2592 2593 int 2594 _fini(void) 2595 { 2596 int status; 2597 2598 status = mod_remove(&modlinkage); 2599 if (status == DDI_SUCCESS) { 2600 mac_fini_ops(&nge_dev_ops); 2601 mutex_destroy(nge_log_mutex); 2602 } 2603 2604 return (status); 2605 } 2606 2607 /* 2608 * ============ Init MSI/Fixed/SoftInterrupt routines ============== 2609 */ 2610 2611 /* 2612 * Register interrupts and initialize each mutex and condition variables 2613 */ 2614 2615 static int 2616 nge_register_intrs_and_init_locks(nge_t *ngep) 2617 { 2618 int err; 2619 int intr_types; 2620 uint_t soft_prip; 2621 nge_msi_mask msi_mask; 2622 nge_msi_map0_vec map0_vec; 2623 nge_msi_map1_vec map1_vec; 2624 2625 /* 2626 * Add the softint handlers: 2627 * 2628 * Both of these handlers are used to avoid restrictions on the 2629 * context and/or mutexes required for some operations. In 2630 * particular, the hardware interrupt handler and its subfunctions 2631 * can detect a number of conditions that we don't want to handle 2632 * in that context or with that set of mutexes held. So, these 2633 * softints are triggered instead: 2634 * 2635 * the <resched> softint is triggered if if we have previously 2636 * had to refuse to send a packet because of resource shortage 2637 * (we've run out of transmit buffers), but the send completion 2638 * interrupt handler has now detected that more buffers have 2639 * become available. Its only purpose is to call gld_sched() 2640 * to retry the pending transmits (we're not allowed to hold 2641 * driver-defined mutexes across gld_sched()). 2642 * 2643 * the <factotum> is triggered if the h/w interrupt handler 2644 * sees the <link state changed> or <error> bits in the status 2645 * block. It's also triggered periodically to poll the link 2646 * state, just in case we aren't getting link status change 2647 * interrupts ... 2648 */ 2649 err = ddi_intr_add_softint(ngep->devinfo, &ngep->resched_hdl, 2650 DDI_INTR_SOFTPRI_MIN, nge_reschedule, (caddr_t)ngep); 2651 if (err != DDI_SUCCESS) { 2652 nge_problem(ngep, 2653 "nge_attach: add nge_reschedule softintr failed"); 2654 2655 return (DDI_FAILURE); 2656 } 2657 ngep->progress |= PROGRESS_RESCHED; 2658 err = ddi_intr_add_softint(ngep->devinfo, &ngep->factotum_hdl, 2659 DDI_INTR_SOFTPRI_MIN, nge_chip_factotum, (caddr_t)ngep); 2660 if (err != DDI_SUCCESS) { 2661 nge_problem(ngep, 2662 "nge_attach: add nge_chip_factotum softintr failed!"); 2663 2664 return (DDI_FAILURE); 2665 } 2666 if (ddi_intr_get_softint_pri(ngep->factotum_hdl, &soft_prip) 2667 != DDI_SUCCESS) { 2668 nge_problem(ngep, "nge_attach: get softintr priority failed\n"); 2669 2670 return (DDI_FAILURE); 2671 } 2672 ngep->soft_pri = soft_prip; 2673 2674 ngep->progress |= PROGRESS_FACTOTUM; 2675 /* Get supported interrupt types */ 2676 if (ddi_intr_get_supported_types(ngep->devinfo, &intr_types) 2677 != DDI_SUCCESS) { 2678 nge_error(ngep, "ddi_intr_get_supported_types failed\n"); 2679 2680 return (DDI_FAILURE); 2681 } 2682 2683 NGE_DEBUG(("ddi_intr_get_supported_types() returned: %x", 2684 intr_types)); 2685 2686 if ((intr_types & DDI_INTR_TYPE_MSI) && nge_enable_msi) { 2687 2688 /* MSI Configurations for mcp55 chipset */ 2689 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 2690 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 2691 2692 2693 /* Enable the 8 vectors */ 2694 msi_mask.msi_mask_val = 2695 nge_reg_get32(ngep, NGE_MSI_MASK); 2696 msi_mask.msi_msk_bits.vec0 = NGE_SET; 2697 msi_mask.msi_msk_bits.vec1 = NGE_SET; 2698 msi_mask.msi_msk_bits.vec2 = NGE_SET; 2699 msi_mask.msi_msk_bits.vec3 = NGE_SET; 2700 msi_mask.msi_msk_bits.vec4 = NGE_SET; 2701 msi_mask.msi_msk_bits.vec5 = NGE_SET; 2702 msi_mask.msi_msk_bits.vec6 = NGE_SET; 2703 msi_mask.msi_msk_bits.vec7 = NGE_SET; 2704 nge_reg_put32(ngep, NGE_MSI_MASK, 2705 msi_mask.msi_mask_val); 2706 2707 /* 2708 * Remapping the MSI MAP0 and MAP1. MCP55 2709 * is default mapping all the interrupt to 0 vector. 2710 * Software needs to remapping this. 2711 * This mapping is same as CK804. 2712 */ 2713 map0_vec.msi_map0_val = 2714 nge_reg_get32(ngep, NGE_MSI_MAP0); 2715 map1_vec.msi_map1_val = 2716 nge_reg_get32(ngep, NGE_MSI_MAP1); 2717 map0_vec.vecs_bits.reint_vec = 0; 2718 map0_vec.vecs_bits.rcint_vec = 0; 2719 map0_vec.vecs_bits.miss_vec = 3; 2720 map0_vec.vecs_bits.teint_vec = 5; 2721 map0_vec.vecs_bits.tcint_vec = 5; 2722 map0_vec.vecs_bits.stint_vec = 2; 2723 map0_vec.vecs_bits.mint_vec = 6; 2724 map0_vec.vecs_bits.rfint_vec = 0; 2725 map1_vec.vecs_bits.tfint_vec = 5; 2726 map1_vec.vecs_bits.feint_vec = 6; 2727 map1_vec.vecs_bits.resv8_11 = 3; 2728 map1_vec.vecs_bits.resv12_15 = 1; 2729 map1_vec.vecs_bits.resv16_19 = 0; 2730 map1_vec.vecs_bits.resv20_23 = 7; 2731 map1_vec.vecs_bits.resv24_31 = 0xff; 2732 nge_reg_put32(ngep, NGE_MSI_MAP0, 2733 map0_vec.msi_map0_val); 2734 nge_reg_put32(ngep, NGE_MSI_MAP1, 2735 map1_vec.msi_map1_val); 2736 } 2737 if (nge_add_intrs(ngep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 2738 NGE_DEBUG(("MSI registration failed, " 2739 "trying FIXED interrupt type\n")); 2740 } else { 2741 nge_log(ngep, "Using MSI interrupt type\n"); 2742 2743 ngep->intr_type = DDI_INTR_TYPE_MSI; 2744 ngep->progress |= PROGRESS_SWINT; 2745 } 2746 } 2747 2748 if (!(ngep->progress & PROGRESS_SWINT) && 2749 (intr_types & DDI_INTR_TYPE_FIXED)) { 2750 if (nge_add_intrs(ngep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 2751 nge_error(ngep, "FIXED interrupt " 2752 "registration failed\n"); 2753 2754 return (DDI_FAILURE); 2755 } 2756 2757 nge_log(ngep, "Using FIXED interrupt type\n"); 2758 2759 ngep->intr_type = DDI_INTR_TYPE_FIXED; 2760 ngep->progress |= PROGRESS_SWINT; 2761 } 2762 2763 2764 if (!(ngep->progress & PROGRESS_SWINT)) { 2765 nge_error(ngep, "No interrupts registered\n"); 2766 2767 return (DDI_FAILURE); 2768 } 2769 mutex_init(ngep->genlock, NULL, MUTEX_DRIVER, 2770 DDI_INTR_PRI(ngep->intr_pri)); 2771 mutex_init(ngep->softlock, NULL, MUTEX_DRIVER, 2772 DDI_INTR_PRI(ngep->soft_pri)); 2773 rw_init(ngep->rwlock, NULL, RW_DRIVER, 2774 DDI_INTR_PRI(ngep->intr_pri)); 2775 2776 return (DDI_SUCCESS); 2777 } 2778 2779 /* 2780 * nge_add_intrs: 2781 * 2782 * Register FIXED or MSI interrupts. 2783 */ 2784 static int 2785 nge_add_intrs(nge_t *ngep, int intr_type) 2786 { 2787 dev_info_t *dip = ngep->devinfo; 2788 int avail, actual, intr_size, count = 0; 2789 int i, flag, ret; 2790 2791 NGE_DEBUG(("nge_add_intrs: interrupt type 0x%x\n", intr_type)); 2792 2793 /* Get number of interrupts */ 2794 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 2795 if ((ret != DDI_SUCCESS) || (count == 0)) { 2796 nge_error(ngep, "ddi_intr_get_nintrs() failure, ret: %d, " 2797 "count: %d", ret, count); 2798 2799 return (DDI_FAILURE); 2800 } 2801 2802 /* Get number of available interrupts */ 2803 ret = ddi_intr_get_navail(dip, intr_type, &avail); 2804 if ((ret != DDI_SUCCESS) || (avail == 0)) { 2805 nge_error(ngep, "ddi_intr_get_navail() failure, " 2806 "ret: %d, avail: %d\n", ret, avail); 2807 2808 return (DDI_FAILURE); 2809 } 2810 2811 if (avail < count) { 2812 NGE_DEBUG(("nitrs() returned %d, navail returned %d\n", 2813 count, avail)); 2814 } 2815 flag = DDI_INTR_ALLOC_NORMAL; 2816 2817 /* Allocate an array of interrupt handles */ 2818 intr_size = count * sizeof (ddi_intr_handle_t); 2819 ngep->htable = kmem_alloc(intr_size, KM_SLEEP); 2820 2821 /* Call ddi_intr_alloc() */ 2822 ret = ddi_intr_alloc(dip, ngep->htable, intr_type, 0, 2823 count, &actual, flag); 2824 2825 if ((ret != DDI_SUCCESS) || (actual == 0)) { 2826 nge_error(ngep, "ddi_intr_alloc() failed %d\n", ret); 2827 2828 kmem_free(ngep->htable, intr_size); 2829 return (DDI_FAILURE); 2830 } 2831 2832 if (actual < count) { 2833 NGE_DEBUG(("Requested: %d, Received: %d\n", 2834 count, actual)); 2835 } 2836 2837 ngep->intr_actual_cnt = actual; 2838 ngep->intr_req_cnt = count; 2839 2840 /* 2841 * Get priority for first msi, assume remaining are all the same 2842 */ 2843 if ((ret = ddi_intr_get_pri(ngep->htable[0], &ngep->intr_pri)) != 2844 DDI_SUCCESS) { 2845 nge_error(ngep, "ddi_intr_get_pri() failed %d\n", ret); 2846 2847 /* Free already allocated intr */ 2848 for (i = 0; i < actual; i++) { 2849 (void) ddi_intr_free(ngep->htable[i]); 2850 } 2851 2852 kmem_free(ngep->htable, intr_size); 2853 2854 return (DDI_FAILURE); 2855 } 2856 /* Test for high level mutex */ 2857 if (ngep->intr_pri >= ddi_intr_get_hilevel_pri()) { 2858 nge_error(ngep, "nge_add_intrs:" 2859 "Hi level interrupt not supported"); 2860 2861 for (i = 0; i < actual; i++) 2862 (void) ddi_intr_free(ngep->htable[i]); 2863 2864 kmem_free(ngep->htable, intr_size); 2865 2866 return (DDI_FAILURE); 2867 } 2868 2869 2870 /* Call ddi_intr_add_handler() */ 2871 for (i = 0; i < actual; i++) { 2872 if ((ret = ddi_intr_add_handler(ngep->htable[i], nge_chip_intr, 2873 (caddr_t)ngep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 2874 nge_error(ngep, "ddi_intr_add_handler() " 2875 "failed %d\n", ret); 2876 2877 /* Free already allocated intr */ 2878 for (i = 0; i < actual; i++) { 2879 (void) ddi_intr_free(ngep->htable[i]); 2880 } 2881 2882 kmem_free(ngep->htable, intr_size); 2883 2884 return (DDI_FAILURE); 2885 } 2886 } 2887 2888 if ((ret = ddi_intr_get_cap(ngep->htable[0], &ngep->intr_cap)) 2889 != DDI_SUCCESS) { 2890 nge_error(ngep, "ddi_intr_get_cap() failed %d\n", ret); 2891 2892 for (i = 0; i < actual; i++) { 2893 (void) ddi_intr_remove_handler(ngep->htable[i]); 2894 (void) ddi_intr_free(ngep->htable[i]); 2895 } 2896 2897 kmem_free(ngep->htable, intr_size); 2898 2899 return (DDI_FAILURE); 2900 } 2901 2902 return (DDI_SUCCESS); 2903 } 2904 2905 /* 2906 * nge_rem_intrs: 2907 * 2908 * Unregister FIXED or MSI interrupts 2909 */ 2910 static void 2911 nge_rem_intrs(nge_t *ngep) 2912 { 2913 int i; 2914 2915 NGE_DEBUG(("nge_rem_intrs\n")); 2916 2917 /* Disable all interrupts */ 2918 if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) { 2919 /* Call ddi_intr_block_disable() */ 2920 (void) ddi_intr_block_disable(ngep->htable, 2921 ngep->intr_actual_cnt); 2922 } else { 2923 for (i = 0; i < ngep->intr_actual_cnt; i++) { 2924 (void) ddi_intr_disable(ngep->htable[i]); 2925 } 2926 } 2927 2928 /* Call ddi_intr_remove_handler() */ 2929 for (i = 0; i < ngep->intr_actual_cnt; i++) { 2930 (void) ddi_intr_remove_handler(ngep->htable[i]); 2931 (void) ddi_intr_free(ngep->htable[i]); 2932 } 2933 2934 kmem_free(ngep->htable, 2935 ngep->intr_req_cnt * sizeof (ddi_intr_handle_t)); 2936 } 2937