1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "nge.h" 30 31 /* 32 * Describes the chip's DMA engine 33 */ 34 35 static ddi_dma_attr_t hot_dma_attr = { 36 DMA_ATTR_V0, /* dma_attr version */ 37 0x0000000000000000ull, /* dma_attr_addr_lo */ 38 0x000000FFFFFFFFFFull, /* dma_attr_addr_hi */ 39 0x000000007FFFFFFFull, /* dma_attr_count_max */ 40 0x0000000000000010ull, /* dma_attr_align */ 41 0x00000FFF, /* dma_attr_burstsizes */ 42 0x00000001, /* dma_attr_minxfer */ 43 0x000000000000FFFFull, /* dma_attr_maxxfer */ 44 0x000000FFFFFFFFFFull, /* dma_attr_seg */ 45 1, /* dma_attr_sgllen */ 46 0x00000001, /* dma_attr_granular */ 47 0 48 }; 49 50 static ddi_dma_attr_t hot_tx_dma_attr = { 51 DMA_ATTR_V0, /* dma_attr version */ 52 0x0000000000000000ull, /* dma_attr_addr_lo */ 53 0x000000FFFFFFFFFFull, /* dma_attr_addr_hi */ 54 0x0000000000003FFFull, /* dma_attr_count_max */ 55 0x0000000000000010ull, /* dma_attr_align */ 56 0x00000FFF, /* dma_attr_burstsizes */ 57 0x00000001, /* dma_attr_minxfer */ 58 0x0000000000003FFFull, /* dma_attr_maxxfer */ 59 0x000000FFFFFFFFFFull, /* dma_attr_seg */ 60 NGE_MAX_COOKIES, /* dma_attr_sgllen */ 61 1, /* dma_attr_granular */ 62 0 63 }; 64 65 static ddi_dma_attr_t sum_dma_attr = { 66 DMA_ATTR_V0, /* dma_attr version */ 67 0x0000000000000000ull, /* dma_attr_addr_lo */ 68 0x00000000FFFFFFFFull, /* dma_attr_addr_hi */ 69 0x000000007FFFFFFFull, /* dma_attr_count_max */ 70 0x0000000000000010ull, /* dma_attr_align */ 71 0x00000FFF, /* dma_attr_burstsizes */ 72 0x00000001, /* dma_attr_minxfer */ 73 0x000000000000FFFFull, /* dma_attr_maxxfer */ 74 0x00000000FFFFFFFFull, /* dma_attr_seg */ 75 1, /* dma_attr_sgllen */ 76 0x00000001, /* dma_attr_granular */ 77 0 78 }; 79 80 static ddi_dma_attr_t sum_tx_dma_attr = { 81 DMA_ATTR_V0, /* dma_attr version */ 82 0x0000000000000000ull, /* dma_attr_addr_lo */ 83 0x00000000FFFFFFFFull, /* dma_attr_addr_hi */ 84 0x0000000000003FFFull, /* dma_attr_count_max */ 85 0x0000000000000010ull, /* dma_attr_align */ 86 0x00000FFF, /* dma_attr_burstsizes */ 87 0x00000001, /* dma_attr_minxfer */ 88 0x0000000000003FFFull, /* dma_attr_maxxfer */ 89 0x00000000FFFFFFFFull, /* dma_attr_seg */ 90 NGE_MAX_COOKIES, /* dma_attr_sgllen */ 91 1, /* dma_attr_granular */ 92 0 93 }; 94 95 /* 96 * DMA access attributes for data. 97 */ 98 ddi_device_acc_attr_t nge_data_accattr = { 99 DDI_DEVICE_ATTR_V0, 100 DDI_STRUCTURE_LE_ACC, 101 DDI_STRICTORDER_ACC, 102 DDI_DEFAULT_ACC 103 }; 104 105 /* 106 * DMA access attributes for descriptors. 107 */ 108 static ddi_device_acc_attr_t nge_desc_accattr = { 109 DDI_DEVICE_ATTR_V0, 110 DDI_STRUCTURE_LE_ACC, 111 DDI_STRICTORDER_ACC, 112 DDI_DEFAULT_ACC 113 }; 114 115 /* 116 * PIO access attributes for registers 117 */ 118 static ddi_device_acc_attr_t nge_reg_accattr = { 119 DDI_DEVICE_ATTR_V0, 120 DDI_STRUCTURE_LE_ACC, 121 DDI_STRICTORDER_ACC, 122 DDI_DEFAULT_ACC 123 }; 124 125 /* 126 * NIC DESC MODE 2 127 */ 128 129 static const nge_desc_attr_t nge_sum_desc = { 130 131 sizeof (sum_rx_bd), 132 sizeof (sum_tx_bd), 133 &sum_dma_attr, 134 &sum_tx_dma_attr, 135 nge_sum_rxd_fill, 136 nge_sum_rxd_check, 137 nge_sum_txd_fill, 138 nge_sum_txd_check, 139 }; 140 141 /* 142 * NIC DESC MODE 3 143 */ 144 145 static const nge_desc_attr_t nge_hot_desc = { 146 147 sizeof (hot_rx_bd), 148 sizeof (hot_tx_bd), 149 &hot_dma_attr, 150 &hot_tx_dma_attr, 151 nge_hot_rxd_fill, 152 nge_hot_rxd_check, 153 nge_hot_txd_fill, 154 nge_hot_txd_check, 155 }; 156 157 static char nge_ident[] = "nVidia 1Gb Ethernet %I%"; 158 static char clsize_propname[] = "cache-line-size"; 159 static char latency_propname[] = "latency-timer"; 160 static char debug_propname[] = "nge-debug-flags"; 161 static char intr_moderation[] = "intr-moderation"; 162 static char rx_data_hw[] = "rx-data-hw"; 163 static char rx_prd_lw[] = "rx-prd-lw"; 164 static char rx_prd_hw[] = "rx-prd-hw"; 165 static char sw_intr_intv[] = "sw-intr-intvl"; 166 static char nge_desc_mode[] = "desc-mode"; 167 static char default_mtu[] = "default_mtu"; 168 static char low_memory_mode[] = "minimal-memory-usage"; 169 extern kmutex_t nge_log_mutex[1]; 170 171 static int nge_m_start(void *); 172 static void nge_m_stop(void *); 173 static int nge_m_promisc(void *, boolean_t); 174 static int nge_m_multicst(void *, boolean_t, const uint8_t *); 175 static int nge_m_unicst(void *, const uint8_t *); 176 static void nge_m_ioctl(void *, queue_t *, mblk_t *); 177 static boolean_t nge_m_getcapab(void *, mac_capab_t, void *); 178 179 #define NGE_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB) 180 181 static mac_callbacks_t nge_m_callbacks = { 182 NGE_M_CALLBACK_FLAGS, 183 nge_m_stat, 184 nge_m_start, 185 nge_m_stop, 186 nge_m_promisc, 187 nge_m_multicst, 188 nge_m_unicst, 189 nge_m_tx, 190 NULL, 191 nge_m_ioctl, 192 nge_m_getcapab 193 }; 194 195 static int nge_add_intrs(nge_t *, int); 196 static void nge_rem_intrs(nge_t *); 197 static int nge_register_intrs_and_init_locks(nge_t *); 198 199 /* 200 * NGE MSI tunable: 201 */ 202 boolean_t nge_enable_msi = B_FALSE; 203 204 static enum ioc_reply 205 nge_set_loop_mode(nge_t *ngep, uint32_t mode) 206 { 207 /* 208 * If the mode isn't being changed, there's nothing to do ... 209 */ 210 if (mode == ngep->param_loop_mode) 211 return (IOC_ACK); 212 213 /* 214 * Validate the requested mode and prepare a suitable message 215 * to explain the link down/up cycle that the change will 216 * probably induce ... 217 */ 218 switch (mode) { 219 default: 220 return (IOC_INVAL); 221 222 case NGE_LOOP_NONE: 223 case NGE_LOOP_EXTERNAL_100: 224 case NGE_LOOP_EXTERNAL_10: 225 case NGE_LOOP_INTERNAL_PHY: 226 break; 227 } 228 229 /* 230 * All OK; tell the caller to reprogram 231 * the PHY and/or MAC for the new mode ... 232 */ 233 ngep->param_loop_mode = mode; 234 return (IOC_RESTART_ACK); 235 } 236 237 #undef NGE_DBG 238 #define NGE_DBG NGE_DBG_INIT 239 240 /* 241 * Utility routine to carve a slice off a chunk of allocated memory, 242 * updating the chunk descriptor accordingly. The size of the slice 243 * is given by the product of the <qty> and <size> parameters. 244 */ 245 void 246 nge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 247 uint32_t qty, uint32_t size) 248 { 249 size_t totsize; 250 251 totsize = qty*size; 252 ASSERT(size > 0); 253 ASSERT(totsize <= chunk->alength); 254 255 *slice = *chunk; 256 slice->nslots = qty; 257 slice->size = size; 258 slice->alength = totsize; 259 260 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 261 chunk->alength -= totsize; 262 chunk->offset += totsize; 263 chunk->cookie.dmac_laddress += totsize; 264 chunk->cookie.dmac_size -= totsize; 265 } 266 267 /* 268 * Allocate an area of memory and a DMA handle for accessing it 269 */ 270 int 271 nge_alloc_dma_mem(nge_t *ngep, size_t memsize, ddi_device_acc_attr_t *attr_p, 272 uint_t dma_flags, dma_area_t *dma_p) 273 { 274 int err; 275 caddr_t va; 276 277 NGE_TRACE(("nge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 278 (void *)ngep, memsize, attr_p, dma_flags, dma_p)); 279 /* 280 * Allocate handle 281 */ 282 err = ddi_dma_alloc_handle(ngep->devinfo, ngep->desc_attr.dma_attr, 283 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 284 if (err != DDI_SUCCESS) 285 goto fail; 286 287 /* 288 * Allocate memory 289 */ 290 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 291 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), 292 DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, &dma_p->acc_hdl); 293 if (err != DDI_SUCCESS) 294 goto fail; 295 296 /* 297 * Bind the two together 298 */ 299 dma_p->mem_va = va; 300 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 301 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 302 &dma_p->cookie, &dma_p->ncookies); 303 304 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 305 goto fail; 306 307 dma_p->nslots = ~0U; 308 dma_p->size = ~0U; 309 dma_p->offset = 0; 310 311 return (DDI_SUCCESS); 312 313 fail: 314 nge_free_dma_mem(dma_p); 315 NGE_DEBUG(("nge_alloc_dma_mem: fail to alloc dma memory!")); 316 317 return (DDI_FAILURE); 318 } 319 320 /* 321 * Free one allocated area of DMAable memory 322 */ 323 void 324 nge_free_dma_mem(dma_area_t *dma_p) 325 { 326 if (dma_p->dma_hdl != NULL) { 327 if (dma_p->ncookies) { 328 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 329 dma_p->ncookies = 0; 330 } 331 } 332 if (dma_p->acc_hdl != NULL) { 333 ddi_dma_mem_free(&dma_p->acc_hdl); 334 dma_p->acc_hdl = NULL; 335 } 336 if (dma_p->dma_hdl != NULL) { 337 ddi_dma_free_handle(&dma_p->dma_hdl); 338 dma_p->dma_hdl = NULL; 339 } 340 } 341 342 #define ALLOC_TX_BUF 0x1 343 #define ALLOC_TX_DESC 0x2 344 #define ALLOC_RX_DESC 0x4 345 346 int 347 nge_alloc_bufs(nge_t *ngep) 348 { 349 int err; 350 int split; 351 int progress; 352 size_t txbuffsize; 353 size_t rxdescsize; 354 size_t txdescsize; 355 356 txbuffsize = ngep->tx_desc * ngep->buf_size; 357 rxdescsize = ngep->rx_desc; 358 txdescsize = ngep->tx_desc; 359 rxdescsize *= ngep->desc_attr.rxd_size; 360 txdescsize *= ngep->desc_attr.txd_size; 361 progress = 0; 362 363 NGE_TRACE(("nge_alloc_bufs($%p)", (void *)ngep)); 364 /* 365 * Allocate memory & handles for TX buffers 366 */ 367 ASSERT((txbuffsize % ngep->nge_split) == 0); 368 for (split = 0; split < ngep->nge_split; ++split) { 369 err = nge_alloc_dma_mem(ngep, txbuffsize/ngep->nge_split, 370 &nge_data_accattr, DDI_DMA_WRITE | NGE_DMA_MODE, 371 &ngep->send->buf[split]); 372 if (err != DDI_SUCCESS) 373 goto fail; 374 } 375 376 progress |= ALLOC_TX_BUF; 377 378 /* 379 * Allocate memory & handles for receive return rings and 380 * buffer (producer) descriptor rings 381 */ 382 err = nge_alloc_dma_mem(ngep, rxdescsize, &nge_desc_accattr, 383 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->recv->desc); 384 if (err != DDI_SUCCESS) 385 goto fail; 386 progress |= ALLOC_RX_DESC; 387 388 /* 389 * Allocate memory & handles for TX descriptor rings, 390 */ 391 err = nge_alloc_dma_mem(ngep, txdescsize, &nge_desc_accattr, 392 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->send->desc); 393 if (err != DDI_SUCCESS) 394 goto fail; 395 return (DDI_SUCCESS); 396 397 fail: 398 if (progress & ALLOC_RX_DESC) 399 nge_free_dma_mem(&ngep->recv->desc); 400 if (progress & ALLOC_TX_BUF) { 401 for (split = 0; split < ngep->nge_split; ++split) 402 nge_free_dma_mem(&ngep->send->buf[split]); 403 } 404 405 return (DDI_FAILURE); 406 } 407 408 /* 409 * This routine frees the transmit and receive buffers and descriptors. 410 * Make sure the chip is stopped before calling it! 411 */ 412 void 413 nge_free_bufs(nge_t *ngep) 414 { 415 int split; 416 417 NGE_TRACE(("nge_free_bufs($%p)", (void *)ngep)); 418 419 nge_free_dma_mem(&ngep->recv->desc); 420 nge_free_dma_mem(&ngep->send->desc); 421 422 for (split = 0; split < ngep->nge_split; ++split) 423 nge_free_dma_mem(&ngep->send->buf[split]); 424 } 425 426 /* 427 * Clean up initialisation done above before the memory is freed 428 */ 429 static void 430 nge_fini_send_ring(nge_t *ngep) 431 { 432 uint32_t slot; 433 size_t dmah_num; 434 send_ring_t *srp; 435 sw_tx_sbd_t *ssbdp; 436 437 srp = ngep->send; 438 ssbdp = srp->sw_sbds; 439 440 NGE_TRACE(("nge_fini_send_ring($%p)", (void *)ngep)); 441 442 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]); 443 444 for (slot = 0; slot < dmah_num; ++slot) { 445 if (srp->dmahndl[slot].hndl) { 446 (void) ddi_dma_unbind_handle(srp->dmahndl[slot].hndl); 447 ddi_dma_free_handle(&srp->dmahndl[slot].hndl); 448 srp->dmahndl[slot].hndl = NULL; 449 srp->dmahndl[slot].next = NULL; 450 } 451 } 452 453 srp->dmah_free.head = NULL; 454 srp->dmah_free.tail = NULL; 455 456 kmem_free(ssbdp, srp->desc.nslots*sizeof (*ssbdp)); 457 458 } 459 460 /* 461 * Initialise the specified Send Ring, using the information in the 462 * <dma_area> descriptors that it contains to set up all the other 463 * fields. This routine should be called only once for each ring. 464 */ 465 static int 466 nge_init_send_ring(nge_t *ngep) 467 { 468 size_t dmah_num; 469 uint32_t nslots; 470 uint32_t err; 471 uint32_t slot; 472 uint32_t split; 473 send_ring_t *srp; 474 sw_tx_sbd_t *ssbdp; 475 dma_area_t desc; 476 dma_area_t pbuf; 477 478 srp = ngep->send; 479 srp->desc.nslots = ngep->tx_desc; 480 nslots = srp->desc.nslots; 481 482 NGE_TRACE(("nge_init_send_ring($%p)", (void *)ngep)); 483 /* 484 * Other one-off initialisation of per-ring data 485 */ 486 srp->ngep = ngep; 487 488 /* 489 * Allocate the array of s/w Send Buffer Descriptors 490 */ 491 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 492 srp->sw_sbds = ssbdp; 493 494 /* 495 * Now initialise each array element once and for all 496 */ 497 desc = srp->desc; 498 for (split = 0; split < ngep->nge_split; ++split) { 499 pbuf = srp->buf[split]; 500 for (slot = 0; slot < nslots/ngep->nge_split; ++ssbdp, ++slot) { 501 nge_slice_chunk(&ssbdp->desc, &desc, 1, 502 ngep->desc_attr.txd_size); 503 nge_slice_chunk(&ssbdp->pbuf, &pbuf, 1, 504 ngep->buf_size); 505 } 506 ASSERT(pbuf.alength == 0); 507 } 508 ASSERT(desc.alength == 0); 509 510 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]); 511 512 /* preallocate dma handles for tx buffer */ 513 for (slot = 0; slot < dmah_num; ++slot) { 514 515 err = ddi_dma_alloc_handle(ngep->devinfo, 516 ngep->desc_attr.tx_dma_attr, DDI_DMA_DONTWAIT, 517 NULL, &srp->dmahndl[slot].hndl); 518 519 if (err != DDI_SUCCESS) { 520 nge_fini_send_ring(ngep); 521 nge_error(ngep, 522 "nge_init_send_ring: alloc dma handle fails"); 523 return (DDI_FAILURE); 524 } 525 srp->dmahndl[slot].next = srp->dmahndl + slot + 1; 526 } 527 528 srp->dmah_free.head = srp->dmahndl; 529 srp->dmah_free.tail = srp->dmahndl + dmah_num - 1; 530 srp->dmah_free.tail->next = NULL; 531 532 return (DDI_SUCCESS); 533 } 534 535 /* 536 * Intialize the tx recycle pointer and tx sending pointer of tx ring 537 * and set the type of tx's data descriptor by default. 538 */ 539 static void 540 nge_reinit_send_ring(nge_t *ngep) 541 { 542 size_t dmah_num; 543 uint32_t slot; 544 send_ring_t *srp; 545 sw_tx_sbd_t *ssbdp; 546 547 srp = ngep->send; 548 549 /* 550 * Reinitialise control variables ... 551 */ 552 553 srp->tx_hwmark = NGE_DESC_MIN; 554 srp->tx_lwmark = NGE_DESC_MIN; 555 556 srp->tx_next = 0; 557 srp->tx_free = srp->desc.nslots; 558 srp->tc_next = 0; 559 560 dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]); 561 562 for (slot = 0; slot - dmah_num != 0; ++slot) 563 srp->dmahndl[slot].next = srp->dmahndl + slot + 1; 564 565 srp->dmah_free.head = srp->dmahndl; 566 srp->dmah_free.tail = srp->dmahndl + dmah_num - 1; 567 srp->dmah_free.tail->next = NULL; 568 569 /* 570 * Zero and sync all the h/w Send Buffer Descriptors 571 */ 572 for (slot = 0; slot < srp->desc.nslots; ++slot) { 573 ssbdp = &srp->sw_sbds[slot]; 574 ssbdp->flags = HOST_OWN; 575 } 576 577 DMA_ZERO(srp->desc); 578 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 579 } 580 581 /* 582 * Initialize the slot number of rx's ring 583 */ 584 static void 585 nge_init_recv_ring(nge_t *ngep) 586 { 587 recv_ring_t *rrp; 588 589 rrp = ngep->recv; 590 rrp->desc.nslots = ngep->rx_desc; 591 rrp->ngep = ngep; 592 } 593 594 /* 595 * Intialize the rx recycle pointer and rx sending pointer of rx ring 596 */ 597 static void 598 nge_reinit_recv_ring(nge_t *ngep) 599 { 600 recv_ring_t *rrp; 601 602 rrp = ngep->recv; 603 604 /* 605 * Reinitialise control variables ... 606 */ 607 rrp->prod_index = 0; 608 /* 609 * Zero and sync all the h/w Send Buffer Descriptors 610 */ 611 DMA_ZERO(rrp->desc); 612 DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORDEV); 613 } 614 615 /* 616 * Clean up initialisation done above before the memory is freed 617 */ 618 static void 619 nge_fini_buff_ring(nge_t *ngep) 620 { 621 uint32_t i; 622 buff_ring_t *brp; 623 dma_area_t *bufp; 624 sw_rx_sbd_t *bsbdp; 625 626 brp = ngep->buff; 627 bsbdp = brp->sw_rbds; 628 629 NGE_DEBUG(("nge_fini_buff_ring($%p)", (void *)ngep)); 630 631 mutex_enter(brp->recycle_lock); 632 brp->buf_sign++; 633 mutex_exit(brp->recycle_lock); 634 for (i = 0; i < ngep->rx_desc; i++, ++bsbdp) { 635 if (bsbdp->bufp) { 636 if (bsbdp->bufp->mp) 637 freemsg(bsbdp->bufp->mp); 638 nge_free_dma_mem(bsbdp->bufp); 639 kmem_free(bsbdp->bufp, sizeof (dma_area_t)); 640 bsbdp->bufp = NULL; 641 } 642 } 643 while (brp->free_list != NULL) { 644 bufp = brp->free_list; 645 brp->free_list = bufp->next; 646 bufp->next = NULL; 647 if (bufp->mp) 648 freemsg(bufp->mp); 649 nge_free_dma_mem(bufp); 650 kmem_free(bufp, sizeof (dma_area_t)); 651 } 652 while (brp->recycle_list != NULL) { 653 bufp = brp->recycle_list; 654 brp->recycle_list = bufp->next; 655 bufp->next = NULL; 656 if (bufp->mp) 657 freemsg(bufp->mp); 658 nge_free_dma_mem(bufp); 659 kmem_free(bufp, sizeof (dma_area_t)); 660 } 661 662 663 kmem_free(brp->sw_rbds, (ngep->rx_desc * sizeof (*bsbdp))); 664 brp->sw_rbds = NULL; 665 } 666 667 /* 668 * Intialize the Rx's data ring and free ring 669 */ 670 static int 671 nge_init_buff_ring(nge_t *ngep) 672 { 673 uint32_t err; 674 uint32_t slot; 675 uint32_t nslots_buff; 676 uint32_t nslots_recv; 677 buff_ring_t *brp; 678 recv_ring_t *rrp; 679 dma_area_t desc; 680 dma_area_t *bufp; 681 sw_rx_sbd_t *bsbdp; 682 683 rrp = ngep->recv; 684 brp = ngep->buff; 685 brp->nslots = ngep->rx_buf; 686 brp->rx_bcopy = B_FALSE; 687 nslots_recv = rrp->desc.nslots; 688 nslots_buff = brp->nslots; 689 brp->ngep = ngep; 690 691 NGE_TRACE(("nge_init_buff_ring($%p)", (void *)ngep)); 692 693 /* 694 * Allocate the array of s/w Recv Buffer Descriptors 695 */ 696 bsbdp = kmem_zalloc(nslots_recv *sizeof (*bsbdp), KM_SLEEP); 697 brp->sw_rbds = bsbdp; 698 brp->free_list = NULL; 699 brp->recycle_list = NULL; 700 for (slot = 0; slot < nslots_buff; ++slot) { 701 bufp = kmem_zalloc(sizeof (dma_area_t), KM_SLEEP); 702 err = nge_alloc_dma_mem(ngep, (ngep->buf_size 703 + NGE_HEADROOM), 704 &nge_data_accattr, DDI_DMA_READ | NGE_DMA_MODE, bufp); 705 if (err != DDI_SUCCESS) { 706 kmem_free(bufp, sizeof (dma_area_t)); 707 return (DDI_FAILURE); 708 } 709 710 bufp->alength -= NGE_HEADROOM; 711 bufp->offset += NGE_HEADROOM; 712 bufp->private = (caddr_t)ngep; 713 bufp->rx_recycle.free_func = nge_recv_recycle; 714 bufp->rx_recycle.free_arg = (caddr_t)bufp; 715 bufp->signature = brp->buf_sign; 716 bufp->rx_delivered = B_FALSE; 717 bufp->mp = desballoc(DMA_VPTR(*bufp), 718 ngep->buf_size + NGE_HEADROOM, 719 0, &bufp->rx_recycle); 720 721 if (bufp->mp == NULL) { 722 return (DDI_FAILURE); 723 } 724 bufp->next = brp->free_list; 725 brp->free_list = bufp; 726 } 727 728 /* 729 * Now initialise each array element once and for all 730 */ 731 desc = rrp->desc; 732 for (slot = 0; slot < nslots_recv; ++slot, ++bsbdp) { 733 nge_slice_chunk(&bsbdp->desc, &desc, 1, 734 ngep->desc_attr.rxd_size); 735 bufp = brp->free_list; 736 brp->free_list = bufp->next; 737 bsbdp->bufp = bufp; 738 bsbdp->flags = CONTROLER_OWN; 739 bufp->next = NULL; 740 } 741 742 ASSERT(desc.alength == 0); 743 return (DDI_SUCCESS); 744 } 745 746 /* 747 * Fill the host address of data in rx' descriptor 748 * and initialize free pointers of rx free ring 749 */ 750 static int 751 nge_reinit_buff_ring(nge_t *ngep) 752 { 753 uint32_t slot; 754 uint32_t nslots_recv; 755 buff_ring_t *brp; 756 recv_ring_t *rrp; 757 sw_rx_sbd_t *bsbdp; 758 void *hw_bd_p; 759 760 brp = ngep->buff; 761 rrp = ngep->recv; 762 bsbdp = brp->sw_rbds; 763 nslots_recv = rrp->desc.nslots; 764 for (slot = 0; slot < nslots_recv; ++bsbdp, ++slot) { 765 hw_bd_p = DMA_VPTR(bsbdp->desc); 766 /* 767 * There is a scenario: When the traffic of small tcp 768 * packet is heavy, suspending the tcp traffic will 769 * cause the preallocated buffers for rx not to be 770 * released in time by tcp taffic and cause rx's buffer 771 * pointers not to be refilled in time. 772 * 773 * At this point, if we reinitialize the driver, the bufp 774 * pointer for rx's traffic will be NULL. 775 * So the result of the reinitializion fails. 776 */ 777 if (bsbdp->bufp == NULL) 778 return (DDI_FAILURE); 779 780 ngep->desc_attr.rxd_fill(hw_bd_p, &bsbdp->bufp->cookie, 781 bsbdp->bufp->alength); 782 } 783 return (DDI_SUCCESS); 784 } 785 786 static void 787 nge_init_ring_param_lock(nge_t *ngep) 788 { 789 buff_ring_t *brp; 790 send_ring_t *srp; 791 792 srp = ngep->send; 793 brp = ngep->buff; 794 795 /* Init the locks for send ring */ 796 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 797 DDI_INTR_PRI(ngep->intr_pri)); 798 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 799 DDI_INTR_PRI(ngep->intr_pri)); 800 mutex_init(&srp->dmah_lock, NULL, MUTEX_DRIVER, 801 DDI_INTR_PRI(ngep->intr_pri)); 802 803 /* Init parameters of buffer ring */ 804 brp->free_list = NULL; 805 brp->recycle_list = NULL; 806 brp->rx_hold = 0; 807 brp->buf_sign = 0; 808 809 /* Init recycle list lock */ 810 mutex_init(brp->recycle_lock, NULL, MUTEX_DRIVER, 811 DDI_INTR_PRI(ngep->intr_pri)); 812 } 813 814 int 815 nge_init_rings(nge_t *ngep) 816 { 817 uint32_t err; 818 819 err = nge_init_send_ring(ngep); 820 if (err != DDI_SUCCESS) { 821 return (err); 822 } 823 nge_init_recv_ring(ngep); 824 825 err = nge_init_buff_ring(ngep); 826 if (err != DDI_SUCCESS) { 827 nge_fini_send_ring(ngep); 828 return (DDI_FAILURE); 829 } 830 831 return (err); 832 } 833 834 static int 835 nge_reinit_ring(nge_t *ngep) 836 { 837 int err; 838 839 nge_reinit_recv_ring(ngep); 840 nge_reinit_send_ring(ngep); 841 err = nge_reinit_buff_ring(ngep); 842 return (err); 843 } 844 845 846 void 847 nge_fini_rings(nge_t *ngep) 848 { 849 /* 850 * For receive ring, nothing need to be finished. 851 * So only finish buffer ring and send ring here. 852 */ 853 nge_fini_buff_ring(ngep); 854 nge_fini_send_ring(ngep); 855 } 856 857 /* 858 * Loopback ioctl code 859 */ 860 861 static lb_property_t loopmodes[] = { 862 { normal, "normal", NGE_LOOP_NONE }, 863 { external, "100Mbps", NGE_LOOP_EXTERNAL_100 }, 864 { external, "10Mbps", NGE_LOOP_EXTERNAL_10 }, 865 { internal, "PHY", NGE_LOOP_INTERNAL_PHY }, 866 }; 867 868 enum ioc_reply 869 nge_loop_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp) 870 { 871 int cmd; 872 uint32_t *lbmp; 873 lb_info_sz_t *lbsp; 874 lb_property_t *lbpp; 875 876 /* 877 * Validate format of ioctl 878 */ 879 if (mp->b_cont == NULL) 880 return (IOC_INVAL); 881 882 cmd = iocp->ioc_cmd; 883 884 switch (cmd) { 885 default: 886 return (IOC_INVAL); 887 888 case LB_GET_INFO_SIZE: 889 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 890 return (IOC_INVAL); 891 lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr; 892 *lbsp = sizeof (loopmodes); 893 return (IOC_REPLY); 894 895 case LB_GET_INFO: 896 if (iocp->ioc_count != sizeof (loopmodes)) 897 return (IOC_INVAL); 898 lbpp = (lb_property_t *)mp->b_cont->b_rptr; 899 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 900 return (IOC_REPLY); 901 902 case LB_GET_MODE: 903 if (iocp->ioc_count != sizeof (uint32_t)) 904 return (IOC_INVAL); 905 lbmp = (uint32_t *)mp->b_cont->b_rptr; 906 *lbmp = ngep->param_loop_mode; 907 return (IOC_REPLY); 908 909 case LB_SET_MODE: 910 if (iocp->ioc_count != sizeof (uint32_t)) 911 return (IOC_INVAL); 912 lbmp = (uint32_t *)mp->b_cont->b_rptr; 913 return (nge_set_loop_mode(ngep, *lbmp)); 914 } 915 } 916 917 #undef NGE_DBG 918 #define NGE_DBG NGE_DBG_NEMO 919 920 921 static void 922 nge_check_desc_prop(nge_t *ngep) 923 { 924 if (ngep->desc_mode != DESC_HOT && ngep->desc_mode != DESC_OFFLOAD) 925 ngep->desc_mode = DESC_HOT; 926 927 if (ngep->desc_mode == DESC_OFFLOAD) { 928 929 ngep->desc_attr = nge_sum_desc; 930 931 } else if (ngep->desc_mode == DESC_HOT) { 932 933 ngep->desc_attr = nge_hot_desc; 934 } 935 } 936 937 /* 938 * nge_get_props -- get the parameters to tune the driver 939 */ 940 static void 941 nge_get_props(nge_t *ngep) 942 { 943 chip_info_t *infop; 944 dev_info_t *devinfo; 945 nge_dev_spec_param_t *dev_param_p; 946 947 devinfo = ngep->devinfo; 948 infop = (chip_info_t *)&ngep->chipinfo; 949 dev_param_p = &ngep->dev_spec_param; 950 951 infop->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 952 DDI_PROP_DONTPASS, clsize_propname, 32); 953 954 infop->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 955 DDI_PROP_DONTPASS, latency_propname, 64); 956 ngep->intr_moderation = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 957 DDI_PROP_DONTPASS, intr_moderation, NGE_SET); 958 ngep->rx_datahwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 959 DDI_PROP_DONTPASS, rx_data_hw, 0x20); 960 ngep->rx_prdlwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 961 DDI_PROP_DONTPASS, rx_prd_lw, 0x4); 962 ngep->rx_prdhwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 963 DDI_PROP_DONTPASS, rx_prd_hw, 0xc); 964 965 ngep->sw_intr_intv = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 966 DDI_PROP_DONTPASS, sw_intr_intv, SWTR_ITC); 967 ngep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 968 DDI_PROP_DONTPASS, debug_propname, NGE_DBG_CHIP); 969 ngep->desc_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 970 DDI_PROP_DONTPASS, nge_desc_mode, dev_param_p->desc_type); 971 ngep->lowmem_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 972 DDI_PROP_DONTPASS, low_memory_mode, 0); 973 974 if (dev_param_p->jumbo) { 975 ngep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 976 DDI_PROP_DONTPASS, default_mtu, ETHERMTU); 977 } else 978 ngep->default_mtu = ETHERMTU; 979 980 if (ngep->default_mtu > ETHERMTU && 981 ngep->default_mtu <= NGE_MTU_2500) { 982 ngep->buf_size = NGE_JB2500_BUFSZ; 983 ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC; 984 ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC; 985 ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2; 986 ngep->nge_split = NGE_SPLIT_256; 987 } else if (ngep->default_mtu > NGE_MTU_2500 && 988 ngep->default_mtu <= NGE_MTU_4500) { 989 ngep->buf_size = NGE_JB4500_BUFSZ; 990 ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC; 991 ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC; 992 ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2; 993 ngep->nge_split = NGE_SPLIT_256; 994 } else if (ngep->default_mtu > NGE_MTU_4500 && 995 ngep->default_mtu <= NGE_MAX_MTU) { 996 ngep->buf_size = NGE_JB9000_BUFSZ; 997 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 998 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 999 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1000 ngep->nge_split = NGE_SPLIT_256; 1001 } else if (ngep->default_mtu > NGE_MAX_MTU) { 1002 ngep->default_mtu = NGE_MAX_MTU; 1003 ngep->buf_size = NGE_JB9000_BUFSZ; 1004 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; 1005 ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC; 1006 ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2; 1007 ngep->nge_split = NGE_SPLIT_256; 1008 } else if (ngep->lowmem_mode != 0) { 1009 ngep->default_mtu = ETHERMTU; 1010 ngep->buf_size = NGE_STD_BUFSZ; 1011 ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC; 1012 ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC; 1013 ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2; 1014 ngep->nge_split = NGE_SPLIT_32; 1015 } else { 1016 ngep->default_mtu = ETHERMTU; 1017 ngep->buf_size = NGE_STD_BUFSZ; 1018 ngep->tx_desc = dev_param_p->tx_desc_num; 1019 ngep->rx_desc = dev_param_p->rx_desc_num; 1020 ngep->rx_buf = dev_param_p->rx_desc_num * 2; 1021 ngep->nge_split = dev_param_p->nge_split; 1022 } 1023 1024 nge_check_desc_prop(ngep); 1025 } 1026 1027 1028 static int 1029 nge_reset(nge_t *ngep) 1030 { 1031 int err; 1032 nge_mul_addr1 maddr1; 1033 nge_sw_statistics_t *sw_stp; 1034 sw_stp = &ngep->statistics.sw_statistics; 1035 send_ring_t *srp = ngep->send; 1036 1037 ASSERT(mutex_owned(ngep->genlock)); 1038 mutex_enter(srp->tc_lock); 1039 mutex_enter(srp->tx_lock); 1040 1041 nge_tx_recycle_all(ngep); 1042 err = nge_reinit_ring(ngep); 1043 if (err == DDI_FAILURE) { 1044 mutex_exit(srp->tx_lock); 1045 mutex_exit(srp->tc_lock); 1046 return (err); 1047 } 1048 err = nge_chip_reset(ngep); 1049 /* 1050 * Clear the Multicast mac address table 1051 */ 1052 nge_reg_put32(ngep, NGE_MUL_ADDR0, 0); 1053 maddr1.addr_val = nge_reg_get32(ngep, NGE_MUL_ADDR1); 1054 maddr1.addr_bits.addr = 0; 1055 nge_reg_put32(ngep, NGE_MUL_ADDR1, maddr1.addr_val); 1056 1057 mutex_exit(srp->tx_lock); 1058 mutex_exit(srp->tc_lock); 1059 if (err == DDI_FAILURE) 1060 return (err); 1061 ngep->watchdog = 0; 1062 ngep->resched_needed = B_FALSE; 1063 ngep->promisc = B_FALSE; 1064 ngep->param_loop_mode = NGE_LOOP_NONE; 1065 ngep->factotum_flag = 0; 1066 ngep->resched_needed = 0; 1067 ngep->nge_mac_state = NGE_MAC_RESET; 1068 ngep->max_sdu = ngep->default_mtu + ETHER_HEAD_LEN + ETHERFCSL; 1069 ngep->max_sdu += VTAG_SIZE; 1070 ngep->rx_def = 0x16; 1071 1072 /* Clear the software statistics */ 1073 sw_stp->recv_count = 0; 1074 sw_stp->xmit_count = 0; 1075 sw_stp->rbytes = 0; 1076 sw_stp->obytes = 0; 1077 1078 return (DDI_SUCCESS); 1079 } 1080 1081 static void 1082 nge_m_stop(void *arg) 1083 { 1084 nge_t *ngep = arg; /* private device info */ 1085 1086 NGE_TRACE(("nge_m_stop($%p)", arg)); 1087 1088 /* 1089 * Just stop processing, then record new MAC state 1090 */ 1091 mutex_enter(ngep->genlock); 1092 /* If suspended, the adapter is already stopped, just return. */ 1093 if (ngep->suspended) { 1094 ASSERT(ngep->nge_mac_state == NGE_MAC_STOPPED); 1095 mutex_exit(ngep->genlock); 1096 return; 1097 } 1098 rw_enter(ngep->rwlock, RW_WRITER); 1099 1100 (void) nge_chip_stop(ngep, B_FALSE); 1101 ngep->nge_mac_state = NGE_MAC_STOPPED; 1102 1103 /* Recycle all the TX BD */ 1104 nge_tx_recycle_all(ngep); 1105 nge_fini_rings(ngep); 1106 nge_free_bufs(ngep); 1107 1108 NGE_DEBUG(("nge_m_stop($%p) done", arg)); 1109 1110 rw_exit(ngep->rwlock); 1111 mutex_exit(ngep->genlock); 1112 } 1113 1114 static int 1115 nge_m_start(void *arg) 1116 { 1117 int err; 1118 nge_t *ngep = arg; 1119 1120 NGE_TRACE(("nge_m_start($%p)", arg)); 1121 1122 /* 1123 * Start processing and record new MAC state 1124 */ 1125 mutex_enter(ngep->genlock); 1126 /* 1127 * If suspended, don't start, as the resume processing 1128 * will recall this function with the suspended flag off. 1129 */ 1130 if (ngep->suspended) { 1131 mutex_exit(ngep->genlock); 1132 return (EIO); 1133 } 1134 rw_enter(ngep->rwlock, RW_WRITER); 1135 err = nge_alloc_bufs(ngep); 1136 if (err != DDI_SUCCESS) { 1137 nge_problem(ngep, "nge_m_start: DMA buffer allocation failed"); 1138 goto finish; 1139 } 1140 err = nge_init_rings(ngep); 1141 if (err != DDI_SUCCESS) { 1142 nge_free_bufs(ngep); 1143 nge_problem(ngep, "nge_init_rings() failed,err=%x", err); 1144 goto finish; 1145 } 1146 err = nge_restart(ngep); 1147 1148 NGE_DEBUG(("nge_m_start($%p) done", arg)); 1149 finish: 1150 rw_exit(ngep->rwlock); 1151 mutex_exit(ngep->genlock); 1152 1153 return (err == DDI_SUCCESS ? 0 : EIO); 1154 } 1155 1156 static int 1157 nge_m_unicst(void *arg, const uint8_t *macaddr) 1158 { 1159 nge_t *ngep = arg; 1160 1161 NGE_TRACE(("nge_m_unicst($%p)", arg)); 1162 /* 1163 * Remember the new current address in the driver state 1164 * Sync the chip's idea of the address too ... 1165 */ 1166 mutex_enter(ngep->genlock); 1167 1168 ethaddr_copy(macaddr, ngep->cur_uni_addr.addr); 1169 ngep->cur_uni_addr.set = 1; 1170 1171 /* 1172 * If we are suspended, we want to quit now, and not update 1173 * the chip. Doing so might put it in a bad state, but the 1174 * resume will get the unicast address installed. 1175 */ 1176 if (ngep->suspended) { 1177 mutex_exit(ngep->genlock); 1178 return (DDI_SUCCESS); 1179 } 1180 nge_chip_sync(ngep); 1181 1182 NGE_DEBUG(("nge_m_unicst($%p) done", arg)); 1183 mutex_exit(ngep->genlock); 1184 1185 return (0); 1186 } 1187 1188 static int 1189 nge_m_promisc(void *arg, boolean_t on) 1190 { 1191 nge_t *ngep = arg; 1192 1193 NGE_TRACE(("nge_m_promisc($%p)", arg)); 1194 1195 /* 1196 * Store specified mode and pass to chip layer to update h/w 1197 */ 1198 mutex_enter(ngep->genlock); 1199 /* 1200 * If suspended, there is no need to do anything, even 1201 * recording the promiscuious mode is not neccessary, as 1202 * it won't be properly set on resume. Just return failing. 1203 */ 1204 if (ngep->suspended) { 1205 mutex_exit(ngep->genlock); 1206 return (DDI_FAILURE); 1207 } 1208 if (ngep->promisc == on) { 1209 mutex_exit(ngep->genlock); 1210 NGE_DEBUG(("nge_m_promisc($%p) done", arg)); 1211 return (0); 1212 } 1213 ngep->promisc = on; 1214 nge_chip_sync(ngep); 1215 NGE_DEBUG(("nge_m_promisc($%p) done", arg)); 1216 mutex_exit(ngep->genlock); 1217 1218 return (0); 1219 } 1220 1221 static void nge_mulparam(nge_t *ngep) 1222 { 1223 uint8_t number; 1224 ether_addr_t pand; 1225 ether_addr_t por; 1226 mul_item *plist; 1227 1228 for (number = 0; number < ETHERADDRL; number++) { 1229 pand[number] = 0x00; 1230 por[number] = 0x00; 1231 } 1232 for (plist = ngep->pcur_mulist; plist != NULL; plist = plist->next) { 1233 for (number = 0; number < ETHERADDRL; number++) { 1234 pand[number] &= plist->mul_addr[number]; 1235 por[number] |= plist->mul_addr[number]; 1236 } 1237 } 1238 for (number = 0; number < ETHERADDRL; number++) { 1239 ngep->cur_mul_addr.addr[number] 1240 = pand[number] & por[number]; 1241 ngep->cur_mul_mask.addr[number] 1242 = pand [number] | (~por[number]); 1243 } 1244 } 1245 static int 1246 nge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1247 { 1248 boolean_t update; 1249 boolean_t b_eq; 1250 nge_t *ngep = arg; 1251 mul_item *plist; 1252 mul_item *plist_prev; 1253 mul_item *pitem; 1254 1255 NGE_TRACE(("nge_m_multicst($%p, %s, %s)", arg, 1256 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1257 1258 update = B_FALSE; 1259 plist = plist_prev = NULL; 1260 mutex_enter(ngep->genlock); 1261 if (add) { 1262 if (ngep->pcur_mulist != NULL) { 1263 for (plist = ngep->pcur_mulist; plist != NULL; 1264 plist = plist->next) { 1265 b_eq = ether_eq(plist->mul_addr, mca); 1266 if (b_eq) { 1267 plist->ref_cnt++; 1268 break; 1269 } 1270 plist_prev = plist; 1271 } 1272 } 1273 1274 if (plist == NULL) { 1275 pitem = kmem_zalloc(sizeof (mul_item), KM_SLEEP); 1276 ether_copy(mca, pitem->mul_addr); 1277 pitem ->ref_cnt++; 1278 pitem ->next = NULL; 1279 if (plist_prev == NULL) 1280 ngep->pcur_mulist = pitem; 1281 else 1282 plist_prev->next = pitem; 1283 update = B_TRUE; 1284 } 1285 } else { 1286 if (ngep->pcur_mulist != NULL) { 1287 for (plist = ngep->pcur_mulist; plist != NULL; 1288 plist = plist->next) { 1289 b_eq = ether_eq(plist->mul_addr, mca); 1290 if (b_eq) { 1291 update = B_TRUE; 1292 break; 1293 } 1294 plist_prev = plist; 1295 } 1296 1297 if (update) { 1298 if ((plist_prev == NULL) && 1299 (plist->next == NULL)) 1300 ngep->pcur_mulist = NULL; 1301 else if ((plist_prev == NULL) && 1302 (plist->next != NULL)) 1303 ngep->pcur_mulist = plist->next; 1304 else 1305 plist_prev->next = plist->next; 1306 kmem_free(plist, sizeof (mul_item)); 1307 } 1308 } 1309 } 1310 1311 if (update && !ngep->suspended) { 1312 nge_mulparam(ngep); 1313 nge_chip_sync(ngep); 1314 } 1315 NGE_DEBUG(("nge_m_multicst($%p) done", arg)); 1316 mutex_exit(ngep->genlock); 1317 1318 return (0); 1319 } 1320 1321 static void 1322 nge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1323 { 1324 int err; 1325 int cmd; 1326 nge_t *ngep = arg; 1327 struct iocblk *iocp; 1328 enum ioc_reply status; 1329 boolean_t need_privilege; 1330 1331 /* 1332 * If suspended, we might actually be able to do some of 1333 * these ioctls, but it is harder to make sure they occur 1334 * without actually putting the hardware in an undesireable 1335 * state. So just NAK it. 1336 */ 1337 mutex_enter(ngep->genlock); 1338 if (ngep->suspended) { 1339 miocnak(wq, mp, 0, EINVAL); 1340 mutex_exit(ngep->genlock); 1341 return; 1342 } 1343 mutex_exit(ngep->genlock); 1344 1345 /* 1346 * Validate the command before bothering with the mutex ... 1347 */ 1348 iocp = (struct iocblk *)mp->b_rptr; 1349 iocp->ioc_error = 0; 1350 need_privilege = B_TRUE; 1351 cmd = iocp->ioc_cmd; 1352 1353 NGE_DEBUG(("nge_m_ioctl: cmd 0x%x", cmd)); 1354 switch (cmd) { 1355 default: 1356 NGE_LDB(NGE_DBG_BADIOC, 1357 ("nge_m_ioctl: unknown cmd 0x%x", cmd)); 1358 1359 miocnak(wq, mp, 0, EINVAL); 1360 return; 1361 1362 case NGE_MII_READ: 1363 case NGE_MII_WRITE: 1364 case NGE_SEE_READ: 1365 case NGE_SEE_WRITE: 1366 case NGE_DIAG: 1367 case NGE_PEEK: 1368 case NGE_POKE: 1369 case NGE_PHY_RESET: 1370 case NGE_SOFT_RESET: 1371 case NGE_HARD_RESET: 1372 break; 1373 1374 case LB_GET_INFO_SIZE: 1375 case LB_GET_INFO: 1376 case LB_GET_MODE: 1377 need_privilege = B_FALSE; 1378 break; 1379 case LB_SET_MODE: 1380 break; 1381 1382 case ND_GET: 1383 need_privilege = B_FALSE; 1384 break; 1385 case ND_SET: 1386 break; 1387 } 1388 1389 if (need_privilege) { 1390 /* 1391 * Check for specific net_config privilege. 1392 */ 1393 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1394 if (err != 0) { 1395 NGE_DEBUG(("nge_m_ioctl: rejected cmd 0x%x, err %d", 1396 cmd, err)); 1397 miocnak(wq, mp, 0, err); 1398 return; 1399 } 1400 } 1401 1402 mutex_enter(ngep->genlock); 1403 1404 switch (cmd) { 1405 default: 1406 _NOTE(NOTREACHED) 1407 status = IOC_INVAL; 1408 break; 1409 1410 case NGE_MII_READ: 1411 case NGE_MII_WRITE: 1412 case NGE_SEE_READ: 1413 case NGE_SEE_WRITE: 1414 case NGE_DIAG: 1415 case NGE_PEEK: 1416 case NGE_POKE: 1417 case NGE_PHY_RESET: 1418 case NGE_SOFT_RESET: 1419 case NGE_HARD_RESET: 1420 status = nge_chip_ioctl(ngep, mp, iocp); 1421 break; 1422 1423 case LB_GET_INFO_SIZE: 1424 case LB_GET_INFO: 1425 case LB_GET_MODE: 1426 case LB_SET_MODE: 1427 status = nge_loop_ioctl(ngep, mp, iocp); 1428 break; 1429 1430 case ND_GET: 1431 case ND_SET: 1432 status = nge_nd_ioctl(ngep, wq, mp, iocp); 1433 break; 1434 1435 } 1436 1437 /* 1438 * Do we need to reprogram the PHY and/or the MAC? 1439 * Do it now, while we still have the mutex. 1440 * 1441 * Note: update the PHY first, 'cos it controls the 1442 * speed/duplex parameters that the MAC code uses. 1443 */ 1444 1445 NGE_DEBUG(("nge_m_ioctl: cmd 0x%x status %d", cmd, status)); 1446 1447 switch (status) { 1448 case IOC_RESTART_REPLY: 1449 case IOC_RESTART_ACK: 1450 (*ngep->physops->phys_update)(ngep); 1451 nge_chip_sync(ngep); 1452 break; 1453 1454 default: 1455 break; 1456 } 1457 1458 mutex_exit(ngep->genlock); 1459 1460 /* 1461 * Finally, decide how to reply 1462 */ 1463 switch (status) { 1464 1465 default: 1466 case IOC_INVAL: 1467 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1468 EINVAL : iocp->ioc_error); 1469 break; 1470 1471 case IOC_DONE: 1472 break; 1473 1474 case IOC_RESTART_ACK: 1475 case IOC_ACK: 1476 miocack(wq, mp, 0, 0); 1477 break; 1478 1479 case IOC_RESTART_REPLY: 1480 case IOC_REPLY: 1481 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1482 M_IOCACK : M_IOCNAK; 1483 qreply(wq, mp); 1484 break; 1485 } 1486 } 1487 1488 /* ARGSUSED */ 1489 static boolean_t 1490 nge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1491 { 1492 nge_t *ngep = arg; 1493 nge_dev_spec_param_t *dev_param_p; 1494 1495 dev_param_p = &ngep->dev_spec_param; 1496 1497 switch (cap) { 1498 case MAC_CAPAB_HCKSUM: { 1499 uint32_t *hcksum_txflags = cap_data; 1500 1501 if (dev_param_p->tx_hw_checksum) { 1502 *hcksum_txflags = dev_param_p->tx_hw_checksum; 1503 } else 1504 return (B_FALSE); 1505 break; 1506 } 1507 case MAC_CAPAB_POLL: 1508 /* 1509 * There's nothing for us to fill in, simply returning 1510 * B_TRUE, stating that we support polling is sufficient. 1511 */ 1512 break; 1513 default: 1514 return (B_FALSE); 1515 } 1516 return (B_TRUE); 1517 } 1518 1519 #undef NGE_DBG 1520 #define NGE_DBG NGE_DBG_INIT /* debug flag for this code */ 1521 int 1522 nge_restart(nge_t *ngep) 1523 { 1524 int err = 0; 1525 err = nge_reset(ngep); 1526 if (!err) 1527 err = nge_chip_start(ngep); 1528 1529 if (err) { 1530 ngep->nge_mac_state = NGE_MAC_STOPPED; 1531 return (DDI_FAILURE); 1532 } else { 1533 ngep->nge_mac_state = NGE_MAC_STARTED; 1534 return (DDI_SUCCESS); 1535 } 1536 } 1537 1538 void 1539 nge_wake_factotum(nge_t *ngep) 1540 { 1541 mutex_enter(ngep->softlock); 1542 if (ngep->factotum_flag == 0) { 1543 ngep->factotum_flag = 1; 1544 (void) ddi_intr_trigger_softint(ngep->factotum_hdl, NULL); 1545 } 1546 mutex_exit(ngep->softlock); 1547 } 1548 1549 /* 1550 * High-level cyclic handler 1551 * 1552 * This routine schedules a (low-level) softint callback to the 1553 * factotum. 1554 */ 1555 1556 static void 1557 nge_chip_cyclic(void *arg) 1558 { 1559 nge_t *ngep; 1560 1561 ngep = (nge_t *)arg; 1562 1563 switch (ngep->nge_chip_state) { 1564 default: 1565 return; 1566 1567 case NGE_CHIP_RUNNING: 1568 break; 1569 1570 case NGE_CHIP_FAULT: 1571 case NGE_CHIP_ERROR: 1572 break; 1573 } 1574 1575 nge_wake_factotum(ngep); 1576 } 1577 1578 static void 1579 nge_unattach(nge_t *ngep) 1580 { 1581 send_ring_t *srp; 1582 buff_ring_t *brp; 1583 1584 srp = ngep->send; 1585 brp = ngep->buff; 1586 NGE_TRACE(("nge_unattach($%p)", (void *)ngep)); 1587 1588 /* 1589 * Flag that no more activity may be initiated 1590 */ 1591 ngep->progress &= ~PROGRESS_READY; 1592 ngep->nge_mac_state = NGE_MAC_UNATTACH; 1593 1594 /* 1595 * Quiesce the PHY and MAC (leave it reset but still powered). 1596 * Clean up and free all NGE data structures 1597 */ 1598 if (ngep->periodic_id != NULL) { 1599 ddi_periodic_delete(ngep->periodic_id); 1600 ngep->periodic_id = NULL; 1601 } 1602 1603 if (ngep->progress & PROGRESS_KSTATS) 1604 nge_fini_kstats(ngep); 1605 1606 if (ngep->progress & PROGRESS_NDD) 1607 nge_nd_cleanup(ngep); 1608 1609 if (ngep->progress & PROGRESS_HWINT) { 1610 mutex_enter(ngep->genlock); 1611 nge_restore_mac_addr(ngep); 1612 (void) nge_chip_stop(ngep, B_FALSE); 1613 mutex_exit(ngep->genlock); 1614 } 1615 1616 if (ngep->progress & PROGRESS_SWINT) 1617 nge_rem_intrs(ngep); 1618 1619 if (ngep->progress & PROGRESS_FACTOTUM) 1620 (void) ddi_intr_remove_softint(ngep->factotum_hdl); 1621 1622 if (ngep->progress & PROGRESS_RESCHED) 1623 (void) ddi_intr_remove_softint(ngep->resched_hdl); 1624 1625 if (ngep->progress & PROGRESS_INTR) { 1626 mutex_destroy(srp->tx_lock); 1627 mutex_destroy(srp->tc_lock); 1628 mutex_destroy(&srp->dmah_lock); 1629 mutex_destroy(brp->recycle_lock); 1630 1631 mutex_destroy(ngep->genlock); 1632 mutex_destroy(ngep->softlock); 1633 rw_destroy(ngep->rwlock); 1634 } 1635 1636 if (ngep->progress & PROGRESS_REGS) 1637 ddi_regs_map_free(&ngep->io_handle); 1638 1639 if (ngep->progress & PROGRESS_CFG) 1640 pci_config_teardown(&ngep->cfg_handle); 1641 1642 ddi_remove_minor_node(ngep->devinfo, NULL); 1643 1644 kmem_free(ngep, sizeof (*ngep)); 1645 } 1646 1647 static int 1648 nge_resume(dev_info_t *devinfo) 1649 { 1650 nge_t *ngep; 1651 chip_info_t *infop; 1652 int err; 1653 1654 ASSERT(devinfo != NULL); 1655 1656 ngep = ddi_get_driver_private(devinfo); 1657 err = 0; 1658 1659 /* 1660 * If there are state inconsistancies, this is bad. Returning 1661 * DDI_FAILURE here will eventually cause the machine to panic, 1662 * so it is best done here so that there is a possibility of 1663 * debugging the problem. 1664 */ 1665 if (ngep == NULL) 1666 cmn_err(CE_PANIC, 1667 "nge: ngep returned from ddi_get_driver_private was NULL"); 1668 infop = (chip_info_t *)&ngep->chipinfo; 1669 1670 if (ngep->devinfo != devinfo) 1671 cmn_err(CE_PANIC, 1672 "nge: passed devinfo not the same as saved devinfo"); 1673 1674 mutex_enter(ngep->genlock); 1675 rw_enter(ngep->rwlock, RW_WRITER); 1676 1677 /* 1678 * Fetch the config space. Even though we have most of it cached, 1679 * some values *might* change across a suspend/resume. 1680 */ 1681 nge_chip_cfg_init(ngep, infop, B_FALSE); 1682 1683 /* 1684 * Only in one case, this conditional branch can be executed: the port 1685 * hasn't been plumbed. 1686 */ 1687 if (ngep->suspended == B_FALSE) { 1688 rw_exit(ngep->rwlock); 1689 mutex_exit(ngep->genlock); 1690 return (DDI_SUCCESS); 1691 } 1692 1693 nge_tx_recycle_all(ngep); 1694 err = nge_reinit_ring(ngep); 1695 if (!err) { 1696 err = nge_chip_reset(ngep); 1697 if (!err) 1698 err = nge_chip_start(ngep); 1699 } 1700 1701 if (err) { 1702 /* 1703 * We note the failure, but return success, as the 1704 * system is still usable without this controller. 1705 */ 1706 cmn_err(CE_WARN, "nge: resume: failed to restart controller"); 1707 } else { 1708 ngep->nge_mac_state = NGE_MAC_STARTED; 1709 } 1710 ngep->suspended = B_FALSE; 1711 1712 rw_exit(ngep->rwlock); 1713 mutex_exit(ngep->genlock); 1714 1715 return (DDI_SUCCESS); 1716 } 1717 1718 /* 1719 * attach(9E) -- Attach a device to the system 1720 * 1721 * Called once for each board successfully probed. 1722 */ 1723 static int 1724 nge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 1725 { 1726 int err; 1727 int i; 1728 int instance; 1729 caddr_t regs; 1730 nge_t *ngep; 1731 chip_info_t *infop; 1732 mac_register_t *macp; 1733 1734 switch (cmd) { 1735 default: 1736 return (DDI_FAILURE); 1737 1738 case DDI_RESUME: 1739 return (nge_resume(devinfo)); 1740 1741 case DDI_ATTACH: 1742 break; 1743 } 1744 1745 ngep = kmem_zalloc(sizeof (*ngep), KM_SLEEP); 1746 instance = ddi_get_instance(devinfo); 1747 ddi_set_driver_private(devinfo, ngep); 1748 ngep->devinfo = devinfo; 1749 1750 (void) snprintf(ngep->ifname, sizeof (ngep->ifname), "%s%d", 1751 NGE_DRIVER_NAME, instance); 1752 err = pci_config_setup(devinfo, &ngep->cfg_handle); 1753 if (err != DDI_SUCCESS) { 1754 nge_problem(ngep, "nge_attach: pci_config_setup() failed"); 1755 goto attach_fail; 1756 } 1757 infop = (chip_info_t *)&ngep->chipinfo; 1758 nge_chip_cfg_init(ngep, infop, B_FALSE); 1759 nge_init_dev_spec_param(ngep); 1760 nge_get_props(ngep); 1761 ngep->progress |= PROGRESS_CFG; 1762 1763 err = ddi_regs_map_setup(devinfo, NGE_PCI_OPREGS_RNUMBER, 1764 ®s, 0, 0, &nge_reg_accattr, &ngep->io_handle); 1765 if (err != DDI_SUCCESS) { 1766 nge_problem(ngep, "nge_attach: ddi_regs_map_setup() failed"); 1767 goto attach_fail; 1768 } 1769 ngep->io_regs = regs; 1770 ngep->progress |= PROGRESS_REGS; 1771 1772 err = nge_register_intrs_and_init_locks(ngep); 1773 if (err != DDI_SUCCESS) { 1774 nge_problem(ngep, "nge_attach:" 1775 " register intrs and init locks failed"); 1776 goto attach_fail; 1777 } 1778 nge_init_ring_param_lock(ngep); 1779 ngep->progress |= PROGRESS_INTR; 1780 1781 mutex_enter(ngep->genlock); 1782 1783 /* 1784 * Initialise link state variables 1785 * Stop, reset & reinitialise the chip. 1786 * Initialise the (internal) PHY. 1787 */ 1788 nge_phys_init(ngep); 1789 err = nge_chip_reset(ngep); 1790 if (err != DDI_SUCCESS) { 1791 nge_problem(ngep, "nge_attach: nge_chip_reset() failed"); 1792 mutex_exit(ngep->genlock); 1793 goto attach_fail; 1794 } 1795 nge_chip_sync(ngep); 1796 1797 /* 1798 * Now that mutex locks are initialized, enable interrupts. 1799 */ 1800 if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) { 1801 /* Call ddi_intr_block_enable() for MSI interrupts */ 1802 (void) ddi_intr_block_enable(ngep->htable, 1803 ngep->intr_actual_cnt); 1804 } else { 1805 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 1806 for (i = 0; i < ngep->intr_actual_cnt; i++) { 1807 (void) ddi_intr_enable(ngep->htable[i]); 1808 } 1809 } 1810 1811 ngep->link_state = LINK_STATE_UNKNOWN; 1812 ngep->progress |= PROGRESS_HWINT; 1813 1814 /* 1815 * Register NDD-tweakable parameters 1816 */ 1817 if (nge_nd_init(ngep)) { 1818 nge_problem(ngep, "nge_attach: nge_nd_init() failed"); 1819 mutex_exit(ngep->genlock); 1820 goto attach_fail; 1821 } 1822 ngep->progress |= PROGRESS_NDD; 1823 1824 /* 1825 * Create & initialise named kstats 1826 */ 1827 nge_init_kstats(ngep, instance); 1828 ngep->progress |= PROGRESS_KSTATS; 1829 1830 mutex_exit(ngep->genlock); 1831 1832 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 1833 goto attach_fail; 1834 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 1835 macp->m_driver = ngep; 1836 macp->m_dip = devinfo; 1837 macp->m_src_addr = infop->vendor_addr.addr; 1838 macp->m_callbacks = &nge_m_callbacks; 1839 macp->m_min_sdu = 0; 1840 macp->m_max_sdu = ngep->default_mtu; 1841 macp->m_margin = VTAG_SIZE; 1842 /* 1843 * Finally, we're ready to register ourselves with the mac 1844 * interface; if this succeeds, we're all ready to start() 1845 */ 1846 err = mac_register(macp, &ngep->mh); 1847 mac_free(macp); 1848 if (err != 0) 1849 goto attach_fail; 1850 1851 /* 1852 * Register a periodical handler. 1853 * nge_chip_cyclic() is invoked in kernel context. 1854 */ 1855 ngep->periodic_id = ddi_periodic_add(nge_chip_cyclic, ngep, 1856 NGE_CYCLIC_PERIOD, DDI_IPL_0); 1857 1858 ngep->progress |= PROGRESS_READY; 1859 return (DDI_SUCCESS); 1860 1861 attach_fail: 1862 nge_unattach(ngep); 1863 return (DDI_FAILURE); 1864 } 1865 1866 static int 1867 nge_suspend(nge_t *ngep) 1868 { 1869 mutex_enter(ngep->genlock); 1870 rw_enter(ngep->rwlock, RW_WRITER); 1871 1872 /* if the port hasn't been plumbed, just return */ 1873 if (ngep->nge_mac_state != NGE_MAC_STARTED) { 1874 rw_exit(ngep->rwlock); 1875 mutex_exit(ngep->genlock); 1876 return (DDI_SUCCESS); 1877 } 1878 ngep->suspended = B_TRUE; 1879 (void) nge_chip_stop(ngep, B_FALSE); 1880 ngep->nge_mac_state = NGE_MAC_STOPPED; 1881 1882 rw_exit(ngep->rwlock); 1883 mutex_exit(ngep->genlock); 1884 return (DDI_SUCCESS); 1885 } 1886 1887 /* 1888 * detach(9E) -- Detach a device from the system 1889 */ 1890 static int 1891 nge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 1892 { 1893 int i; 1894 nge_t *ngep; 1895 mul_item *p, *nextp; 1896 buff_ring_t *brp; 1897 1898 NGE_GTRACE(("nge_detach($%p, %d)", (void *)devinfo, cmd)); 1899 1900 ngep = ddi_get_driver_private(devinfo); 1901 brp = ngep->buff; 1902 1903 switch (cmd) { 1904 default: 1905 return (DDI_FAILURE); 1906 1907 case DDI_SUSPEND: 1908 /* 1909 * Stop the NIC 1910 * Note: This driver doesn't currently support WOL, but 1911 * should it in the future, it is important to 1912 * make sure the PHY remains powered so that the 1913 * wakeup packet can actually be recieved. 1914 */ 1915 return (nge_suspend(ngep)); 1916 1917 case DDI_DETACH: 1918 break; 1919 } 1920 1921 /* Try to wait all the buffer post to upper layer be released */ 1922 for (i = 0; i < 1000; i++) { 1923 if (brp->rx_hold == 0) 1924 break; 1925 drv_usecwait(1000); 1926 } 1927 1928 /* If there is any posted buffer, reject to detach */ 1929 if (brp->rx_hold != 0) 1930 return (DDI_FAILURE); 1931 1932 /* Recycle the multicast table */ 1933 for (p = ngep->pcur_mulist; p != NULL; p = nextp) { 1934 nextp = p->next; 1935 kmem_free(p, sizeof (mul_item)); 1936 } 1937 ngep->pcur_mulist = NULL; 1938 1939 /* 1940 * Unregister from the GLD subsystem. This can fail, in 1941 * particular if there are DLPI style-2 streams still open - 1942 * in which case we just return failure without shutting 1943 * down chip operations. 1944 */ 1945 if (mac_unregister(ngep->mh) != DDI_SUCCESS) 1946 return (DDI_FAILURE); 1947 1948 /* 1949 * All activity stopped, so we can clean up & exit 1950 */ 1951 nge_unattach(ngep); 1952 return (DDI_SUCCESS); 1953 } 1954 1955 1956 /* 1957 * ========== Module Loading Data & Entry Points ========== 1958 */ 1959 1960 DDI_DEFINE_STREAM_OPS(nge_dev_ops, nulldev, nulldev, nge_attach, nge_detach, 1961 nodev, NULL, D_MP, NULL); 1962 1963 1964 static struct modldrv nge_modldrv = { 1965 &mod_driverops, /* Type of module. This one is a driver */ 1966 nge_ident, /* short description */ 1967 &nge_dev_ops /* driver specific ops */ 1968 }; 1969 1970 static struct modlinkage modlinkage = { 1971 MODREV_1, (void *)&nge_modldrv, NULL 1972 }; 1973 1974 1975 int 1976 _info(struct modinfo *modinfop) 1977 { 1978 return (mod_info(&modlinkage, modinfop)); 1979 } 1980 1981 int 1982 _init(void) 1983 { 1984 int status; 1985 1986 mac_init_ops(&nge_dev_ops, "nge"); 1987 status = mod_install(&modlinkage); 1988 if (status != DDI_SUCCESS) 1989 mac_fini_ops(&nge_dev_ops); 1990 else 1991 mutex_init(nge_log_mutex, NULL, MUTEX_DRIVER, NULL); 1992 1993 return (status); 1994 } 1995 1996 int 1997 _fini(void) 1998 { 1999 int status; 2000 2001 status = mod_remove(&modlinkage); 2002 if (status == DDI_SUCCESS) { 2003 mac_fini_ops(&nge_dev_ops); 2004 mutex_destroy(nge_log_mutex); 2005 } 2006 2007 return (status); 2008 } 2009 2010 /* 2011 * ============ Init MSI/Fixed/SoftInterrupt routines ============== 2012 */ 2013 2014 /* 2015 * Register interrupts and initialize each mutex and condition variables 2016 */ 2017 2018 static int 2019 nge_register_intrs_and_init_locks(nge_t *ngep) 2020 { 2021 int err; 2022 int intr_types; 2023 uint_t soft_prip; 2024 nge_msi_mask msi_mask; 2025 nge_msi_map0_vec map0_vec; 2026 nge_msi_map1_vec map1_vec; 2027 2028 /* 2029 * Add the softint handlers: 2030 * 2031 * Both of these handlers are used to avoid restrictions on the 2032 * context and/or mutexes required for some operations. In 2033 * particular, the hardware interrupt handler and its subfunctions 2034 * can detect a number of conditions that we don't want to handle 2035 * in that context or with that set of mutexes held. So, these 2036 * softints are triggered instead: 2037 * 2038 * the <resched> softint is triggered if if we have previously 2039 * had to refuse to send a packet because of resource shortage 2040 * (we've run out of transmit buffers), but the send completion 2041 * interrupt handler has now detected that more buffers have 2042 * become available. Its only purpose is to call gld_sched() 2043 * to retry the pending transmits (we're not allowed to hold 2044 * driver-defined mutexes across gld_sched()). 2045 * 2046 * the <factotum> is triggered if the h/w interrupt handler 2047 * sees the <link state changed> or <error> bits in the status 2048 * block. It's also triggered periodically to poll the link 2049 * state, just in case we aren't getting link status change 2050 * interrupts ... 2051 */ 2052 err = ddi_intr_add_softint(ngep->devinfo, &ngep->resched_hdl, 2053 DDI_INTR_SOFTPRI_MIN, nge_reschedule, (caddr_t)ngep); 2054 if (err != DDI_SUCCESS) { 2055 nge_problem(ngep, 2056 "nge_attach: add nge_reschedule softintr failed"); 2057 2058 return (DDI_FAILURE); 2059 } 2060 ngep->progress |= PROGRESS_RESCHED; 2061 err = ddi_intr_add_softint(ngep->devinfo, &ngep->factotum_hdl, 2062 DDI_INTR_SOFTPRI_MIN, nge_chip_factotum, (caddr_t)ngep); 2063 if (err != DDI_SUCCESS) { 2064 nge_problem(ngep, 2065 "nge_attach: add nge_chip_factotum softintr failed!"); 2066 2067 return (DDI_FAILURE); 2068 } 2069 if (ddi_intr_get_softint_pri(ngep->factotum_hdl, &soft_prip) 2070 != DDI_SUCCESS) { 2071 nge_problem(ngep, "nge_attach: get softintr priority failed\n"); 2072 2073 return (DDI_FAILURE); 2074 } 2075 ngep->soft_pri = soft_prip; 2076 2077 ngep->progress |= PROGRESS_FACTOTUM; 2078 /* Get supported interrupt types */ 2079 if (ddi_intr_get_supported_types(ngep->devinfo, &intr_types) 2080 != DDI_SUCCESS) { 2081 nge_error(ngep, "ddi_intr_get_supported_types failed\n"); 2082 2083 return (DDI_FAILURE); 2084 } 2085 2086 NGE_DEBUG(("ddi_intr_get_supported_types() returned: %x", 2087 intr_types)); 2088 2089 if ((intr_types & DDI_INTR_TYPE_MSI) && nge_enable_msi) { 2090 2091 /* MSI Configurations for mcp55 chipset */ 2092 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 2093 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 2094 2095 2096 /* Enable the 8 vectors */ 2097 msi_mask.msi_mask_val = 2098 nge_reg_get32(ngep, NGE_MSI_MASK); 2099 msi_mask.msi_msk_bits.vec0 = NGE_SET; 2100 msi_mask.msi_msk_bits.vec1 = NGE_SET; 2101 msi_mask.msi_msk_bits.vec2 = NGE_SET; 2102 msi_mask.msi_msk_bits.vec3 = NGE_SET; 2103 msi_mask.msi_msk_bits.vec4 = NGE_SET; 2104 msi_mask.msi_msk_bits.vec5 = NGE_SET; 2105 msi_mask.msi_msk_bits.vec6 = NGE_SET; 2106 msi_mask.msi_msk_bits.vec7 = NGE_SET; 2107 nge_reg_put32(ngep, NGE_MSI_MASK, 2108 msi_mask.msi_mask_val); 2109 2110 /* 2111 * Remapping the MSI MAP0 and MAP1. MCP55 2112 * is default mapping all the interrupt to 0 vector. 2113 * Software needs to remapping this. 2114 * This mapping is same as CK804. 2115 */ 2116 map0_vec.msi_map0_val = 2117 nge_reg_get32(ngep, NGE_MSI_MAP0); 2118 map1_vec.msi_map1_val = 2119 nge_reg_get32(ngep, NGE_MSI_MAP1); 2120 map0_vec.vecs_bits.reint_vec = 0; 2121 map0_vec.vecs_bits.rcint_vec = 0; 2122 map0_vec.vecs_bits.miss_vec = 3; 2123 map0_vec.vecs_bits.teint_vec = 5; 2124 map0_vec.vecs_bits.tcint_vec = 5; 2125 map0_vec.vecs_bits.stint_vec = 2; 2126 map0_vec.vecs_bits.mint_vec = 6; 2127 map0_vec.vecs_bits.rfint_vec = 0; 2128 map1_vec.vecs_bits.tfint_vec = 5; 2129 map1_vec.vecs_bits.feint_vec = 6; 2130 map1_vec.vecs_bits.resv8_11 = 3; 2131 map1_vec.vecs_bits.resv12_15 = 1; 2132 map1_vec.vecs_bits.resv16_19 = 0; 2133 map1_vec.vecs_bits.resv20_23 = 7; 2134 map1_vec.vecs_bits.resv24_31 = 0xff; 2135 nge_reg_put32(ngep, NGE_MSI_MAP0, 2136 map0_vec.msi_map0_val); 2137 nge_reg_put32(ngep, NGE_MSI_MAP1, 2138 map1_vec.msi_map1_val); 2139 } 2140 if (nge_add_intrs(ngep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 2141 NGE_DEBUG(("MSI registration failed, " 2142 "trying FIXED interrupt type\n")); 2143 } else { 2144 nge_log(ngep, "Using MSI interrupt type\n"); 2145 2146 ngep->intr_type = DDI_INTR_TYPE_MSI; 2147 ngep->progress |= PROGRESS_SWINT; 2148 } 2149 } 2150 2151 if (!(ngep->progress & PROGRESS_SWINT) && 2152 (intr_types & DDI_INTR_TYPE_FIXED)) { 2153 if (nge_add_intrs(ngep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 2154 nge_error(ngep, "FIXED interrupt " 2155 "registration failed\n"); 2156 2157 return (DDI_FAILURE); 2158 } 2159 2160 nge_log(ngep, "Using FIXED interrupt type\n"); 2161 2162 ngep->intr_type = DDI_INTR_TYPE_FIXED; 2163 ngep->progress |= PROGRESS_SWINT; 2164 } 2165 2166 2167 if (!(ngep->progress & PROGRESS_SWINT)) { 2168 nge_error(ngep, "No interrupts registered\n"); 2169 2170 return (DDI_FAILURE); 2171 } 2172 mutex_init(ngep->genlock, NULL, MUTEX_DRIVER, 2173 DDI_INTR_PRI(ngep->intr_pri)); 2174 mutex_init(ngep->softlock, NULL, MUTEX_DRIVER, 2175 DDI_INTR_PRI(ngep->soft_pri)); 2176 rw_init(ngep->rwlock, NULL, RW_DRIVER, 2177 DDI_INTR_PRI(ngep->intr_pri)); 2178 2179 return (DDI_SUCCESS); 2180 } 2181 2182 /* 2183 * nge_add_intrs: 2184 * 2185 * Register FIXED or MSI interrupts. 2186 */ 2187 static int 2188 nge_add_intrs(nge_t *ngep, int intr_type) 2189 { 2190 dev_info_t *dip = ngep->devinfo; 2191 int avail, actual, intr_size, count = 0; 2192 int i, flag, ret; 2193 2194 NGE_DEBUG(("nge_add_intrs: interrupt type 0x%x\n", intr_type)); 2195 2196 /* Get number of interrupts */ 2197 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 2198 if ((ret != DDI_SUCCESS) || (count == 0)) { 2199 nge_error(ngep, "ddi_intr_get_nintrs() failure, ret: %d, " 2200 "count: %d", ret, count); 2201 2202 return (DDI_FAILURE); 2203 } 2204 2205 /* Get number of available interrupts */ 2206 ret = ddi_intr_get_navail(dip, intr_type, &avail); 2207 if ((ret != DDI_SUCCESS) || (avail == 0)) { 2208 nge_error(ngep, "ddi_intr_get_navail() failure, " 2209 "ret: %d, avail: %d\n", ret, avail); 2210 2211 return (DDI_FAILURE); 2212 } 2213 2214 if (avail < count) { 2215 NGE_DEBUG(("nitrs() returned %d, navail returned %d\n", 2216 count, avail)); 2217 } 2218 flag = DDI_INTR_ALLOC_NORMAL; 2219 2220 /* Allocate an array of interrupt handles */ 2221 intr_size = count * sizeof (ddi_intr_handle_t); 2222 ngep->htable = kmem_alloc(intr_size, KM_SLEEP); 2223 2224 /* Call ddi_intr_alloc() */ 2225 ret = ddi_intr_alloc(dip, ngep->htable, intr_type, 0, 2226 count, &actual, flag); 2227 2228 if ((ret != DDI_SUCCESS) || (actual == 0)) { 2229 nge_error(ngep, "ddi_intr_alloc() failed %d\n", ret); 2230 2231 kmem_free(ngep->htable, intr_size); 2232 return (DDI_FAILURE); 2233 } 2234 2235 if (actual < count) { 2236 NGE_DEBUG(("Requested: %d, Received: %d\n", 2237 count, actual)); 2238 } 2239 2240 ngep->intr_actual_cnt = actual; 2241 ngep->intr_req_cnt = count; 2242 2243 /* 2244 * Get priority for first msi, assume remaining are all the same 2245 */ 2246 if ((ret = ddi_intr_get_pri(ngep->htable[0], &ngep->intr_pri)) != 2247 DDI_SUCCESS) { 2248 nge_error(ngep, "ddi_intr_get_pri() failed %d\n", ret); 2249 2250 /* Free already allocated intr */ 2251 for (i = 0; i < actual; i++) { 2252 (void) ddi_intr_free(ngep->htable[i]); 2253 } 2254 2255 kmem_free(ngep->htable, intr_size); 2256 2257 return (DDI_FAILURE); 2258 } 2259 /* Test for high level mutex */ 2260 if (ngep->intr_pri >= ddi_intr_get_hilevel_pri()) { 2261 nge_error(ngep, "nge_add_intrs:" 2262 "Hi level interrupt not supported"); 2263 2264 for (i = 0; i < actual; i++) 2265 (void) ddi_intr_free(ngep->htable[i]); 2266 2267 kmem_free(ngep->htable, intr_size); 2268 2269 return (DDI_FAILURE); 2270 } 2271 2272 2273 /* Call ddi_intr_add_handler() */ 2274 for (i = 0; i < actual; i++) { 2275 if ((ret = ddi_intr_add_handler(ngep->htable[i], nge_chip_intr, 2276 (caddr_t)ngep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 2277 nge_error(ngep, "ddi_intr_add_handler() " 2278 "failed %d\n", ret); 2279 2280 /* Free already allocated intr */ 2281 for (i = 0; i < actual; i++) { 2282 (void) ddi_intr_free(ngep->htable[i]); 2283 } 2284 2285 kmem_free(ngep->htable, intr_size); 2286 2287 return (DDI_FAILURE); 2288 } 2289 } 2290 2291 if ((ret = ddi_intr_get_cap(ngep->htable[0], &ngep->intr_cap)) 2292 != DDI_SUCCESS) { 2293 nge_error(ngep, "ddi_intr_get_cap() failed %d\n", ret); 2294 2295 for (i = 0; i < actual; i++) { 2296 (void) ddi_intr_remove_handler(ngep->htable[i]); 2297 (void) ddi_intr_free(ngep->htable[i]); 2298 } 2299 2300 kmem_free(ngep->htable, intr_size); 2301 2302 return (DDI_FAILURE); 2303 } 2304 2305 return (DDI_SUCCESS); 2306 } 2307 2308 /* 2309 * nge_rem_intrs: 2310 * 2311 * Unregister FIXED or MSI interrupts 2312 */ 2313 static void 2314 nge_rem_intrs(nge_t *ngep) 2315 { 2316 int i; 2317 2318 NGE_DEBUG(("nge_rem_intrs\n")); 2319 2320 /* Disable all interrupts */ 2321 if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) { 2322 /* Call ddi_intr_block_disable() */ 2323 (void) ddi_intr_block_disable(ngep->htable, 2324 ngep->intr_actual_cnt); 2325 } else { 2326 for (i = 0; i < ngep->intr_actual_cnt; i++) { 2327 (void) ddi_intr_disable(ngep->htable[i]); 2328 } 2329 } 2330 2331 /* Call ddi_intr_remove_handler() */ 2332 for (i = 0; i < ngep->intr_actual_cnt; i++) { 2333 (void) ddi_intr_remove_handler(ngep->htable[i]); 2334 (void) ddi_intr_free(ngep->htable[i]); 2335 } 2336 2337 kmem_free(ngep->htable, 2338 ngep->intr_req_cnt * sizeof (ddi_intr_handle_t)); 2339 } 2340