1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include "rge.h" 27 28 /* 29 * This is the string displayed by modinfo, etc. 30 * Make sure you keep the version ID up to date! 31 */ 32 static char rge_ident[] = "Realtek 1Gb Ethernet v1.11"; 33 34 /* 35 * Used for buffers allocated by ddi_dma_mem_alloc() 36 */ 37 static ddi_dma_attr_t dma_attr_buf = { 38 DMA_ATTR_V0, /* dma_attr version */ 39 (uint32_t)0, /* dma_attr_addr_lo */ 40 (uint32_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 41 (uint32_t)0xFFFFFFFF, /* dma_attr_count_max */ 42 (uint32_t)16, /* dma_attr_align */ 43 0xFFFFFFFF, /* dma_attr_burstsizes */ 44 1, /* dma_attr_minxfer */ 45 (uint32_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 46 (uint32_t)0xFFFFFFFF, /* dma_attr_seg */ 47 1, /* dma_attr_sgllen */ 48 1, /* dma_attr_granular */ 49 0, /* dma_attr_flags */ 50 }; 51 52 /* 53 * Used for BDs allocated by ddi_dma_mem_alloc() 54 */ 55 static ddi_dma_attr_t dma_attr_desc = { 56 DMA_ATTR_V0, /* dma_attr version */ 57 (uint32_t)0, /* dma_attr_addr_lo */ 58 (uint32_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 59 (uint32_t)0xFFFFFFFF, /* dma_attr_count_max */ 60 (uint32_t)256, /* dma_attr_align */ 61 0xFFFFFFFF, /* dma_attr_burstsizes */ 62 1, /* dma_attr_minxfer */ 63 (uint32_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 64 (uint32_t)0xFFFFFFFF, /* dma_attr_seg */ 65 1, /* dma_attr_sgllen */ 66 1, /* dma_attr_granular */ 67 0, /* dma_attr_flags */ 68 }; 69 70 /* 71 * PIO access attributes for registers 72 */ 73 static ddi_device_acc_attr_t rge_reg_accattr = { 74 DDI_DEVICE_ATTR_V0, 75 DDI_STRUCTURE_LE_ACC, 76 DDI_STRICTORDER_ACC, 77 DDI_DEFAULT_ACC 78 }; 79 80 /* 81 * DMA access attributes for descriptors 82 */ 83 static ddi_device_acc_attr_t rge_desc_accattr = { 84 DDI_DEVICE_ATTR_V0, 85 DDI_NEVERSWAP_ACC, 86 DDI_STRICTORDER_ACC, 87 DDI_DEFAULT_ACC 88 }; 89 90 /* 91 * DMA access attributes for data 92 */ 93 static ddi_device_acc_attr_t rge_buf_accattr = { 94 DDI_DEVICE_ATTR_V0, 95 DDI_NEVERSWAP_ACC, 96 DDI_STRICTORDER_ACC, 97 DDI_DEFAULT_ACC 98 }; 99 100 /* 101 * Property names 102 */ 103 static char debug_propname[] = "rge_debug_flags"; 104 static char mtu_propname[] = "default_mtu"; 105 static char msi_propname[] = "msi_enable"; 106 107 static int rge_m_start(void *); 108 static void rge_m_stop(void *); 109 static int rge_m_promisc(void *, boolean_t); 110 static int rge_m_multicst(void *, boolean_t, const uint8_t *); 111 static int rge_m_unicst(void *, const uint8_t *); 112 static void rge_m_resources(void *); 113 static void rge_m_ioctl(void *, queue_t *, mblk_t *); 114 static boolean_t rge_m_getcapab(void *, mac_capab_t, void *); 115 116 #define RGE_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB) 117 118 static mac_callbacks_t rge_m_callbacks = { 119 RGE_M_CALLBACK_FLAGS, 120 rge_m_stat, 121 rge_m_start, 122 rge_m_stop, 123 rge_m_promisc, 124 rge_m_multicst, 125 rge_m_unicst, 126 rge_m_tx, 127 rge_m_resources, 128 rge_m_ioctl, 129 rge_m_getcapab 130 }; 131 132 /* 133 * Allocate an area of memory and a DMA handle for accessing it 134 */ 135 static int 136 rge_alloc_dma_mem(rge_t *rgep, size_t memsize, ddi_dma_attr_t *dma_attr_p, 137 ddi_device_acc_attr_t *acc_attr_p, uint_t dma_flags, dma_area_t *dma_p) 138 { 139 caddr_t vaddr; 140 int err; 141 142 /* 143 * Allocate handle 144 */ 145 err = ddi_dma_alloc_handle(rgep->devinfo, dma_attr_p, 146 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl); 147 if (err != DDI_SUCCESS) { 148 dma_p->dma_hdl = NULL; 149 return (DDI_FAILURE); 150 } 151 152 /* 153 * Allocate memory 154 */ 155 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p, 156 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), 157 DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl); 158 if (err != DDI_SUCCESS) { 159 ddi_dma_free_handle(&dma_p->dma_hdl); 160 dma_p->dma_hdl = NULL; 161 dma_p->acc_hdl = NULL; 162 return (DDI_FAILURE); 163 } 164 165 /* 166 * Bind the two together 167 */ 168 dma_p->mem_va = vaddr; 169 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 170 vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL, 171 &dma_p->cookie, &dma_p->ncookies); 172 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) { 173 ddi_dma_mem_free(&dma_p->acc_hdl); 174 ddi_dma_free_handle(&dma_p->dma_hdl); 175 dma_p->acc_hdl = NULL; 176 dma_p->dma_hdl = NULL; 177 return (DDI_FAILURE); 178 } 179 180 dma_p->nslots = ~0U; 181 dma_p->size = ~0U; 182 dma_p->token = ~0U; 183 dma_p->offset = 0; 184 return (DDI_SUCCESS); 185 } 186 187 /* 188 * Free one allocated area of DMAable memory 189 */ 190 static void 191 rge_free_dma_mem(dma_area_t *dma_p) 192 { 193 if (dma_p->dma_hdl != NULL) { 194 if (dma_p->ncookies) { 195 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 196 dma_p->ncookies = 0; 197 } 198 ddi_dma_free_handle(&dma_p->dma_hdl); 199 dma_p->dma_hdl = NULL; 200 } 201 202 if (dma_p->acc_hdl != NULL) { 203 ddi_dma_mem_free(&dma_p->acc_hdl); 204 dma_p->acc_hdl = NULL; 205 } 206 } 207 208 /* 209 * Utility routine to carve a slice off a chunk of allocated memory, 210 * updating the chunk descriptor accordingly. The size of the slice 211 * is given by the product of the <qty> and <size> parameters. 212 */ 213 static void 214 rge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 215 uint32_t qty, uint32_t size) 216 { 217 static uint32_t sequence = 0xbcd5704a; 218 size_t totsize; 219 220 totsize = qty*size; 221 ASSERT(totsize <= chunk->alength); 222 223 *slice = *chunk; 224 slice->nslots = qty; 225 slice->size = size; 226 slice->alength = totsize; 227 slice->token = ++sequence; 228 229 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 230 chunk->alength -= totsize; 231 chunk->offset += totsize; 232 chunk->cookie.dmac_laddress += totsize; 233 chunk->cookie.dmac_size -= totsize; 234 } 235 236 static int 237 rge_alloc_bufs(rge_t *rgep) 238 { 239 size_t txdescsize; 240 size_t rxdescsize; 241 int err; 242 243 /* 244 * Allocate memory & handle for packet statistics 245 */ 246 err = rge_alloc_dma_mem(rgep, 247 RGE_STATS_DUMP_SIZE, 248 &dma_attr_desc, 249 &rge_desc_accattr, 250 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 251 &rgep->dma_area_stats); 252 if (err != DDI_SUCCESS) 253 return (DDI_FAILURE); 254 rgep->hw_stats = DMA_VPTR(rgep->dma_area_stats); 255 256 /* 257 * Allocate memory & handle for Tx descriptor ring 258 */ 259 txdescsize = RGE_SEND_SLOTS * sizeof (rge_bd_t); 260 err = rge_alloc_dma_mem(rgep, 261 txdescsize, 262 &dma_attr_desc, 263 &rge_desc_accattr, 264 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 265 &rgep->dma_area_txdesc); 266 if (err != DDI_SUCCESS) 267 return (DDI_FAILURE); 268 269 /* 270 * Allocate memory & handle for Rx descriptor ring 271 */ 272 rxdescsize = RGE_RECV_SLOTS * sizeof (rge_bd_t); 273 err = rge_alloc_dma_mem(rgep, 274 rxdescsize, 275 &dma_attr_desc, 276 &rge_desc_accattr, 277 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 278 &rgep->dma_area_rxdesc); 279 if (err != DDI_SUCCESS) 280 return (DDI_FAILURE); 281 282 return (DDI_SUCCESS); 283 } 284 285 /* 286 * rge_free_bufs() -- free descriptors/buffers allocated for this 287 * device instance. 288 */ 289 static void 290 rge_free_bufs(rge_t *rgep) 291 { 292 rge_free_dma_mem(&rgep->dma_area_stats); 293 rge_free_dma_mem(&rgep->dma_area_txdesc); 294 rge_free_dma_mem(&rgep->dma_area_rxdesc); 295 } 296 297 /* 298 * ========== Transmit and receive ring reinitialisation ========== 299 */ 300 301 /* 302 * These <reinit> routines each reset the rx/tx rings to an initial 303 * state, assuming that the corresponding <init> routine has already 304 * been called exactly once. 305 */ 306 static void 307 rge_reinit_send_ring(rge_t *rgep) 308 { 309 sw_sbd_t *ssbdp; 310 rge_bd_t *bdp; 311 uint32_t slot; 312 313 /* 314 * re-init send ring 315 */ 316 DMA_ZERO(rgep->tx_desc); 317 ssbdp = rgep->sw_sbds; 318 bdp = rgep->tx_ring; 319 for (slot = 0; slot < RGE_SEND_SLOTS; slot++) { 320 bdp->host_buf_addr = 321 RGE_BSWAP_32(ssbdp->pbuf.cookie.dmac_laddress); 322 bdp->host_buf_addr_hi = 323 RGE_BSWAP_32(ssbdp->pbuf.cookie.dmac_laddress >> 32); 324 /* last BD in Tx ring */ 325 if (slot == (RGE_SEND_SLOTS - 1)) 326 bdp->flags_len = RGE_BSWAP_32(BD_FLAG_EOR); 327 ssbdp++; 328 bdp++; 329 } 330 DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV); 331 rgep->tx_next = 0; 332 rgep->tc_next = 0; 333 rgep->tc_tail = 0; 334 rgep->tx_flow = 0; 335 rgep->tx_free = RGE_SEND_SLOTS; 336 } 337 338 static void 339 rge_reinit_recv_ring(rge_t *rgep) 340 { 341 rge_bd_t *bdp; 342 sw_rbd_t *srbdp; 343 dma_area_t *pbuf; 344 uint32_t slot; 345 346 /* 347 * re-init receive ring 348 */ 349 DMA_ZERO(rgep->rx_desc); 350 srbdp = rgep->sw_rbds; 351 bdp = rgep->rx_ring; 352 for (slot = 0; slot < RGE_RECV_SLOTS; slot++) { 353 pbuf = &srbdp->rx_buf->pbuf; 354 bdp->host_buf_addr = 355 RGE_BSWAP_32(pbuf->cookie.dmac_laddress + rgep->head_room); 356 bdp->host_buf_addr_hi = 357 RGE_BSWAP_32(pbuf->cookie.dmac_laddress >> 32); 358 bdp->flags_len = RGE_BSWAP_32(BD_FLAG_HW_OWN | 359 (rgep->rxbuf_size - rgep->head_room)); 360 /* last BD in Tx ring */ 361 if (slot == (RGE_RECV_SLOTS - 1)) 362 bdp->flags_len |= RGE_BSWAP_32(BD_FLAG_EOR); 363 srbdp++; 364 bdp++; 365 } 366 DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORDEV); 367 rgep->watchdog = 0; 368 rgep->rx_next = 0; 369 } 370 371 static void 372 rge_reinit_buf_ring(rge_t *rgep) 373 { 374 375 if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY) 376 return; 377 378 /* 379 * If all the up-sending buffers haven't been returned to driver, 380 * use bcopy() only in rx process. 381 */ 382 if (rgep->rx_free != RGE_BUF_SLOTS) 383 rgep->rx_bcopy = B_TRUE; 384 } 385 386 static void 387 rge_reinit_rings(rge_t *rgep) 388 { 389 rge_reinit_send_ring(rgep); 390 rge_reinit_recv_ring(rgep); 391 rge_reinit_buf_ring(rgep); 392 } 393 394 static void 395 rge_fini_send_ring(rge_t *rgep) 396 { 397 sw_sbd_t *ssbdp; 398 uint32_t slot; 399 400 ssbdp = rgep->sw_sbds; 401 for (slot = 0; slot < RGE_SEND_SLOTS; ++slot) { 402 rge_free_dma_mem(&ssbdp->pbuf); 403 ssbdp++; 404 } 405 406 kmem_free(rgep->sw_sbds, RGE_SEND_SLOTS * sizeof (sw_sbd_t)); 407 rgep->sw_sbds = NULL; 408 } 409 410 static void 411 rge_fini_recv_ring(rge_t *rgep) 412 { 413 sw_rbd_t *srbdp; 414 uint32_t slot; 415 416 srbdp = rgep->sw_rbds; 417 for (slot = 0; slot < RGE_RECV_SLOTS; ++srbdp, ++slot) { 418 if (srbdp->rx_buf) { 419 if (srbdp->rx_buf->mp != NULL) { 420 freemsg(srbdp->rx_buf->mp); 421 srbdp->rx_buf->mp = NULL; 422 } 423 rge_free_dma_mem(&srbdp->rx_buf->pbuf); 424 kmem_free(srbdp->rx_buf, sizeof (dma_buf_t)); 425 srbdp->rx_buf = NULL; 426 } 427 } 428 429 kmem_free(rgep->sw_rbds, RGE_RECV_SLOTS * sizeof (sw_rbd_t)); 430 rgep->sw_rbds = NULL; 431 } 432 433 static void 434 rge_fini_buf_ring(rge_t *rgep) 435 { 436 sw_rbd_t *srbdp; 437 uint32_t slot; 438 439 if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY) 440 return; 441 442 ASSERT(rgep->rx_free == RGE_BUF_SLOTS); 443 444 srbdp = rgep->free_srbds; 445 for (slot = 0; slot < RGE_BUF_SLOTS; ++srbdp, ++slot) { 446 if (srbdp->rx_buf != NULL) { 447 if (srbdp->rx_buf->mp != NULL) { 448 freemsg(srbdp->rx_buf->mp); 449 srbdp->rx_buf->mp = NULL; 450 } 451 rge_free_dma_mem(&srbdp->rx_buf->pbuf); 452 kmem_free(srbdp->rx_buf, sizeof (dma_buf_t)); 453 srbdp->rx_buf = NULL; 454 } 455 } 456 457 kmem_free(rgep->free_srbds, RGE_BUF_SLOTS * sizeof (sw_rbd_t)); 458 rgep->free_srbds = NULL; 459 } 460 461 static void 462 rge_fini_rings(rge_t *rgep) 463 { 464 rge_fini_send_ring(rgep); 465 rge_fini_recv_ring(rgep); 466 rge_fini_buf_ring(rgep); 467 } 468 469 static int 470 rge_init_send_ring(rge_t *rgep) 471 { 472 uint32_t slot; 473 sw_sbd_t *ssbdp; 474 dma_area_t *pbuf; 475 dma_area_t desc; 476 int err; 477 478 /* 479 * Allocate the array of s/w Tx Buffer Descriptors 480 */ 481 ssbdp = kmem_zalloc(RGE_SEND_SLOTS*sizeof (*ssbdp), KM_SLEEP); 482 rgep->sw_sbds = ssbdp; 483 484 /* 485 * Init send ring 486 */ 487 rgep->tx_desc = rgep->dma_area_txdesc; 488 DMA_ZERO(rgep->tx_desc); 489 rgep->tx_ring = rgep->tx_desc.mem_va; 490 491 desc = rgep->tx_desc; 492 for (slot = 0; slot < RGE_SEND_SLOTS; slot++) { 493 rge_slice_chunk(&ssbdp->desc, &desc, 1, sizeof (rge_bd_t)); 494 495 /* 496 * Allocate memory & handle for Tx buffers 497 */ 498 pbuf = &ssbdp->pbuf; 499 err = rge_alloc_dma_mem(rgep, rgep->txbuf_size, 500 &dma_attr_buf, &rge_buf_accattr, 501 DDI_DMA_WRITE | DDI_DMA_STREAMING, pbuf); 502 if (err != DDI_SUCCESS) { 503 rge_error(rgep, 504 "rge_init_send_ring: alloc tx buffer failed"); 505 rge_fini_send_ring(rgep); 506 return (DDI_FAILURE); 507 } 508 ssbdp++; 509 } 510 ASSERT(desc.alength == 0); 511 512 DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV); 513 return (DDI_SUCCESS); 514 } 515 516 static int 517 rge_init_recv_ring(rge_t *rgep) 518 { 519 uint32_t slot; 520 sw_rbd_t *srbdp; 521 dma_buf_t *rx_buf; 522 dma_area_t *pbuf; 523 int err; 524 525 /* 526 * Allocate the array of s/w Rx Buffer Descriptors 527 */ 528 srbdp = kmem_zalloc(RGE_RECV_SLOTS*sizeof (*srbdp), KM_SLEEP); 529 rgep->sw_rbds = srbdp; 530 531 /* 532 * Init receive ring 533 */ 534 rgep->rx_next = 0; 535 rgep->rx_desc = rgep->dma_area_rxdesc; 536 DMA_ZERO(rgep->rx_desc); 537 rgep->rx_ring = rgep->rx_desc.mem_va; 538 539 for (slot = 0; slot < RGE_RECV_SLOTS; slot++) { 540 srbdp->rx_buf = rx_buf = 541 kmem_zalloc(sizeof (dma_buf_t), KM_SLEEP); 542 543 /* 544 * Allocate memory & handle for Rx buffers 545 */ 546 pbuf = &rx_buf->pbuf; 547 err = rge_alloc_dma_mem(rgep, rgep->rxbuf_size, 548 &dma_attr_buf, &rge_buf_accattr, 549 DDI_DMA_READ | DDI_DMA_STREAMING, pbuf); 550 if (err != DDI_SUCCESS) { 551 rge_fini_recv_ring(rgep); 552 rge_error(rgep, 553 "rge_init_recv_ring: alloc rx buffer failed"); 554 return (DDI_FAILURE); 555 } 556 557 pbuf->alength -= rgep->head_room; 558 pbuf->offset += rgep->head_room; 559 if (!(rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)) { 560 rx_buf->rx_recycle.free_func = rge_rx_recycle; 561 rx_buf->rx_recycle.free_arg = (caddr_t)rx_buf; 562 rx_buf->private = (caddr_t)rgep; 563 rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf), 564 rgep->rxbuf_size, 0, &rx_buf->rx_recycle); 565 if (rx_buf->mp == NULL) { 566 rge_fini_recv_ring(rgep); 567 rge_problem(rgep, 568 "rge_init_recv_ring: desballoc() failed"); 569 return (DDI_FAILURE); 570 } 571 } 572 srbdp++; 573 } 574 DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORDEV); 575 return (DDI_SUCCESS); 576 } 577 578 static int 579 rge_init_buf_ring(rge_t *rgep) 580 { 581 uint32_t slot; 582 sw_rbd_t *free_srbdp; 583 dma_buf_t *rx_buf; 584 dma_area_t *pbuf; 585 int err; 586 587 if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY) { 588 rgep->rx_bcopy = B_TRUE; 589 return (DDI_SUCCESS); 590 } 591 592 /* 593 * Allocate the array of s/w free Buffer Descriptors 594 */ 595 free_srbdp = kmem_zalloc(RGE_BUF_SLOTS*sizeof (*free_srbdp), KM_SLEEP); 596 rgep->free_srbds = free_srbdp; 597 598 /* 599 * Init free buffer ring 600 */ 601 rgep->rc_next = 0; 602 rgep->rf_next = 0; 603 rgep->rx_bcopy = B_FALSE; 604 rgep->rx_free = RGE_BUF_SLOTS; 605 for (slot = 0; slot < RGE_BUF_SLOTS; slot++) { 606 free_srbdp->rx_buf = rx_buf = 607 kmem_zalloc(sizeof (dma_buf_t), KM_SLEEP); 608 609 /* 610 * Allocate memory & handle for free Rx buffers 611 */ 612 pbuf = &rx_buf->pbuf; 613 err = rge_alloc_dma_mem(rgep, rgep->rxbuf_size, 614 &dma_attr_buf, &rge_buf_accattr, 615 DDI_DMA_READ | DDI_DMA_STREAMING, pbuf); 616 if (err != DDI_SUCCESS) { 617 rge_fini_buf_ring(rgep); 618 rge_error(rgep, 619 "rge_init_buf_ring: alloc rx free buffer failed"); 620 return (DDI_FAILURE); 621 } 622 pbuf->alength -= rgep->head_room; 623 pbuf->offset += rgep->head_room; 624 rx_buf->rx_recycle.free_func = rge_rx_recycle; 625 rx_buf->rx_recycle.free_arg = (caddr_t)rx_buf; 626 rx_buf->private = (caddr_t)rgep; 627 rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf), 628 rgep->rxbuf_size, 0, &rx_buf->rx_recycle); 629 if (rx_buf->mp == NULL) { 630 rge_fini_buf_ring(rgep); 631 rge_problem(rgep, 632 "rge_init_buf_ring: desballoc() failed"); 633 return (DDI_FAILURE); 634 } 635 free_srbdp++; 636 } 637 return (DDI_SUCCESS); 638 } 639 640 static int 641 rge_init_rings(rge_t *rgep) 642 { 643 int err; 644 645 err = rge_init_send_ring(rgep); 646 if (err != DDI_SUCCESS) 647 return (DDI_FAILURE); 648 649 err = rge_init_recv_ring(rgep); 650 if (err != DDI_SUCCESS) { 651 rge_fini_send_ring(rgep); 652 return (DDI_FAILURE); 653 } 654 655 err = rge_init_buf_ring(rgep); 656 if (err != DDI_SUCCESS) { 657 rge_fini_send_ring(rgep); 658 rge_fini_recv_ring(rgep); 659 return (DDI_FAILURE); 660 } 661 662 return (DDI_SUCCESS); 663 } 664 665 /* 666 * ========== Internal state management entry points ========== 667 */ 668 669 #undef RGE_DBG 670 #define RGE_DBG RGE_DBG_NEMO /* debug flag for this code */ 671 672 /* 673 * These routines provide all the functionality required by the 674 * corresponding MAC layer entry points, but don't update the 675 * MAC state so they can be called internally without disturbing 676 * our record of what NEMO thinks we should be doing ... 677 */ 678 679 /* 680 * rge_reset() -- reset h/w & rings to initial state 681 */ 682 static void 683 rge_reset(rge_t *rgep) 684 { 685 ASSERT(mutex_owned(rgep->genlock)); 686 687 /* 688 * Grab all the other mutexes in the world (this should 689 * ensure no other threads are manipulating driver state) 690 */ 691 mutex_enter(rgep->rx_lock); 692 mutex_enter(rgep->rc_lock); 693 rw_enter(rgep->errlock, RW_WRITER); 694 695 (void) rge_chip_reset(rgep); 696 rge_reinit_rings(rgep); 697 rge_chip_init(rgep); 698 699 /* 700 * Free the world ... 701 */ 702 rw_exit(rgep->errlock); 703 mutex_exit(rgep->rc_lock); 704 mutex_exit(rgep->rx_lock); 705 706 rgep->stats.rpackets = 0; 707 rgep->stats.rbytes = 0; 708 rgep->stats.opackets = 0; 709 rgep->stats.obytes = 0; 710 rgep->stats.tx_pre_ismax = B_FALSE; 711 rgep->stats.tx_cur_ismax = B_FALSE; 712 713 RGE_DEBUG(("rge_reset($%p) done", (void *)rgep)); 714 } 715 716 /* 717 * rge_stop() -- stop processing, don't reset h/w or rings 718 */ 719 static void 720 rge_stop(rge_t *rgep) 721 { 722 ASSERT(mutex_owned(rgep->genlock)); 723 724 rge_chip_stop(rgep, B_FALSE); 725 726 RGE_DEBUG(("rge_stop($%p) done", (void *)rgep)); 727 } 728 729 /* 730 * rge_start() -- start transmitting/receiving 731 */ 732 static void 733 rge_start(rge_t *rgep) 734 { 735 ASSERT(mutex_owned(rgep->genlock)); 736 737 /* 738 * Start chip processing, including enabling interrupts 739 */ 740 rge_chip_start(rgep); 741 rgep->watchdog = 0; 742 } 743 744 /* 745 * rge_restart - restart transmitting/receiving after error or suspend 746 */ 747 void 748 rge_restart(rge_t *rgep) 749 { 750 uint32_t i; 751 752 ASSERT(mutex_owned(rgep->genlock)); 753 /* 754 * Wait for posted buffer to be freed... 755 */ 756 if (!rgep->rx_bcopy) { 757 for (i = 0; i < RXBUFF_FREE_LOOP; i++) { 758 if (rgep->rx_free == RGE_BUF_SLOTS) 759 break; 760 drv_usecwait(1000); 761 RGE_DEBUG(("rge_restart: waiting for rx buf free...")); 762 } 763 } 764 rge_reset(rgep); 765 rgep->stats.chip_reset++; 766 if (rgep->rge_mac_state == RGE_MAC_STARTED) { 767 rge_start(rgep); 768 rgep->resched_needed = B_TRUE; 769 (void) ddi_intr_trigger_softint(rgep->resched_hdl, NULL); 770 } 771 } 772 773 774 /* 775 * ========== Nemo-required management entry points ========== 776 */ 777 778 #undef RGE_DBG 779 #define RGE_DBG RGE_DBG_NEMO /* debug flag for this code */ 780 781 /* 782 * rge_m_stop() -- stop transmitting/receiving 783 */ 784 static void 785 rge_m_stop(void *arg) 786 { 787 rge_t *rgep = arg; /* private device info */ 788 uint32_t i; 789 790 /* 791 * Just stop processing, then record new MAC state 792 */ 793 mutex_enter(rgep->genlock); 794 if (rgep->suspended) { 795 ASSERT(rgep->rge_mac_state == RGE_MAC_STOPPED); 796 mutex_exit(rgep->genlock); 797 return; 798 } 799 rge_stop(rgep); 800 /* 801 * Wait for posted buffer to be freed... 802 */ 803 if (!rgep->rx_bcopy) { 804 for (i = 0; i < RXBUFF_FREE_LOOP; i++) { 805 if (rgep->rx_free == RGE_BUF_SLOTS) 806 break; 807 drv_usecwait(1000); 808 RGE_DEBUG(("rge_m_stop: waiting for rx buf free...")); 809 } 810 } 811 rgep->rge_mac_state = RGE_MAC_STOPPED; 812 RGE_DEBUG(("rge_m_stop($%p) done", arg)); 813 mutex_exit(rgep->genlock); 814 } 815 816 /* 817 * rge_m_start() -- start transmitting/receiving 818 */ 819 static int 820 rge_m_start(void *arg) 821 { 822 rge_t *rgep = arg; /* private device info */ 823 824 mutex_enter(rgep->genlock); 825 if (rgep->suspended) { 826 mutex_exit(rgep->genlock); 827 return (DDI_FAILURE); 828 } 829 /* 830 * Clear hw/sw statistics 831 */ 832 DMA_ZERO(rgep->dma_area_stats); 833 bzero(&rgep->stats, sizeof (rge_stats_t)); 834 835 /* 836 * Start processing and record new MAC state 837 */ 838 rge_reset(rgep); 839 rge_start(rgep); 840 rgep->rge_mac_state = RGE_MAC_STARTED; 841 RGE_DEBUG(("rge_m_start($%p) done", arg)); 842 843 mutex_exit(rgep->genlock); 844 845 return (0); 846 } 847 848 /* 849 * rge_m_unicst_set() -- set the physical network address 850 */ 851 static int 852 rge_m_unicst(void *arg, const uint8_t *macaddr) 853 { 854 rge_t *rgep = arg; /* private device info */ 855 856 /* 857 * Remember the new current address in the driver state 858 * Sync the chip's idea of the address too ... 859 */ 860 mutex_enter(rgep->genlock); 861 bcopy(macaddr, rgep->netaddr, ETHERADDRL); 862 863 if (rgep->suspended) { 864 mutex_exit(rgep->genlock); 865 return (DDI_SUCCESS); 866 } 867 868 rge_chip_sync(rgep, RGE_SET_MAC); 869 mutex_exit(rgep->genlock); 870 871 return (0); 872 } 873 874 /* 875 * Compute the index of the required bit in the multicast hash map. 876 * This must mirror the way the hardware actually does it! 877 */ 878 static uint32_t 879 rge_hash_index(const uint8_t *mca) 880 { 881 uint32_t crc = (uint32_t)RGE_HASH_CRC; 882 uint32_t const POLY = RGE_HASH_POLY; 883 uint32_t msb; 884 int bytes; 885 uchar_t currentbyte; 886 uint32_t index; 887 int bit; 888 889 for (bytes = 0; bytes < ETHERADDRL; bytes++) { 890 currentbyte = mca[bytes]; 891 for (bit = 0; bit < 8; bit++) { 892 msb = crc >> 31; 893 crc <<= 1; 894 if (msb ^ (currentbyte & 1)) 895 crc ^= POLY; 896 currentbyte >>= 1; 897 } 898 } 899 index = crc >> 26; 900 /* the index value is between 0 and 63(0x3f) */ 901 902 return (index); 903 } 904 905 /* 906 * rge_m_multicst_add() -- enable/disable a multicast address 907 */ 908 static int 909 rge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 910 { 911 rge_t *rgep = arg; /* private device info */ 912 struct ether_addr *addr; 913 uint32_t index; 914 uint32_t reg; 915 uint8_t *hashp; 916 917 mutex_enter(rgep->genlock); 918 hashp = rgep->mcast_hash; 919 addr = (struct ether_addr *)mca; 920 /* 921 * Calculate the Multicast address hash index value 922 * Normally, the position of MAR0-MAR7 is 923 * MAR0: offset 0x08, ..., MAR7: offset 0x0F. 924 * 925 * For pcie chipset, the position of MAR0-MAR7 is 926 * different from others: 927 * MAR0: offset 0x0F, ..., MAR7: offset 0x08. 928 */ 929 index = rge_hash_index(addr->ether_addr_octet); 930 if (rgep->chipid.is_pcie) 931 reg = (~(index / RGE_MCAST_NUM)) & 0x7; 932 else 933 reg = index / RGE_MCAST_NUM; 934 935 if (add) { 936 if (rgep->mcast_refs[index]++) { 937 mutex_exit(rgep->genlock); 938 return (0); 939 } 940 hashp[reg] |= 1 << (index % RGE_MCAST_NUM); 941 } else { 942 if (--rgep->mcast_refs[index]) { 943 mutex_exit(rgep->genlock); 944 return (0); 945 } 946 hashp[reg] &= ~ (1 << (index % RGE_MCAST_NUM)); 947 } 948 949 if (rgep->suspended) { 950 mutex_exit(rgep->genlock); 951 return (DDI_SUCCESS); 952 } 953 954 /* 955 * Set multicast register 956 */ 957 rge_chip_sync(rgep, RGE_SET_MUL); 958 959 mutex_exit(rgep->genlock); 960 return (0); 961 } 962 963 /* 964 * rge_m_promisc() -- set or reset promiscuous mode on the board 965 * 966 * Program the hardware to enable/disable promiscuous and/or 967 * receive-all-multicast modes. 968 */ 969 static int 970 rge_m_promisc(void *arg, boolean_t on) 971 { 972 rge_t *rgep = arg; 973 974 /* 975 * Store MAC layer specified mode and pass to chip layer to update h/w 976 */ 977 mutex_enter(rgep->genlock); 978 979 if (rgep->promisc == on) { 980 mutex_exit(rgep->genlock); 981 return (0); 982 } 983 rgep->promisc = on; 984 985 if (rgep->suspended) { 986 mutex_exit(rgep->genlock); 987 return (DDI_SUCCESS); 988 } 989 990 rge_chip_sync(rgep, RGE_SET_PROMISC); 991 RGE_DEBUG(("rge_m_promisc_set($%p) done", arg)); 992 mutex_exit(rgep->genlock); 993 return (0); 994 } 995 996 /* 997 * Loopback ioctl code 998 */ 999 1000 static lb_property_t loopmodes[] = { 1001 { normal, "normal", RGE_LOOP_NONE }, 1002 { internal, "PHY", RGE_LOOP_INTERNAL_PHY }, 1003 { internal, "MAC", RGE_LOOP_INTERNAL_MAC } 1004 }; 1005 1006 static enum ioc_reply 1007 rge_set_loop_mode(rge_t *rgep, uint32_t mode) 1008 { 1009 /* 1010 * If the mode isn't being changed, there's nothing to do ... 1011 */ 1012 if (mode == rgep->param_loop_mode) 1013 return (IOC_ACK); 1014 1015 /* 1016 * Validate the requested mode and prepare a suitable message 1017 * to explain the link down/up cycle that the change will 1018 * probably induce ... 1019 */ 1020 switch (mode) { 1021 default: 1022 return (IOC_INVAL); 1023 1024 case RGE_LOOP_NONE: 1025 case RGE_LOOP_INTERNAL_PHY: 1026 case RGE_LOOP_INTERNAL_MAC: 1027 break; 1028 } 1029 1030 /* 1031 * All OK; tell the caller to reprogram 1032 * the PHY and/or MAC for the new mode ... 1033 */ 1034 rgep->param_loop_mode = mode; 1035 return (IOC_RESTART_ACK); 1036 } 1037 1038 static enum ioc_reply 1039 rge_loop_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1040 { 1041 lb_info_sz_t *lbsp; 1042 lb_property_t *lbpp; 1043 uint32_t *lbmp; 1044 int cmd; 1045 1046 _NOTE(ARGUNUSED(wq)) 1047 1048 /* 1049 * Validate format of ioctl 1050 */ 1051 if (mp->b_cont == NULL) 1052 return (IOC_INVAL); 1053 1054 cmd = iocp->ioc_cmd; 1055 switch (cmd) { 1056 default: 1057 /* NOTREACHED */ 1058 rge_error(rgep, "rge_loop_ioctl: invalid cmd 0x%x", cmd); 1059 return (IOC_INVAL); 1060 1061 case LB_GET_INFO_SIZE: 1062 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 1063 return (IOC_INVAL); 1064 lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr; 1065 *lbsp = sizeof (loopmodes); 1066 return (IOC_REPLY); 1067 1068 case LB_GET_INFO: 1069 if (iocp->ioc_count != sizeof (loopmodes)) 1070 return (IOC_INVAL); 1071 lbpp = (lb_property_t *)mp->b_cont->b_rptr; 1072 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 1073 return (IOC_REPLY); 1074 1075 case LB_GET_MODE: 1076 if (iocp->ioc_count != sizeof (uint32_t)) 1077 return (IOC_INVAL); 1078 lbmp = (uint32_t *)mp->b_cont->b_rptr; 1079 *lbmp = rgep->param_loop_mode; 1080 return (IOC_REPLY); 1081 1082 case LB_SET_MODE: 1083 if (iocp->ioc_count != sizeof (uint32_t)) 1084 return (IOC_INVAL); 1085 lbmp = (uint32_t *)mp->b_cont->b_rptr; 1086 return (rge_set_loop_mode(rgep, *lbmp)); 1087 } 1088 } 1089 1090 /* 1091 * Specific rge IOCTLs, the MAC layer handles the generic ones. 1092 */ 1093 static void 1094 rge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1095 { 1096 rge_t *rgep = arg; 1097 struct iocblk *iocp; 1098 enum ioc_reply status; 1099 boolean_t need_privilege; 1100 int err; 1101 int cmd; 1102 1103 /* 1104 * If suspended, we might actually be able to do some of 1105 * these ioctls, but it is harder to make sure they occur 1106 * without actually putting the hardware in an undesireable 1107 * state. So just NAK it. 1108 */ 1109 mutex_enter(rgep->genlock); 1110 if (rgep->suspended) { 1111 miocnak(wq, mp, 0, EINVAL); 1112 mutex_exit(rgep->genlock); 1113 return; 1114 } 1115 mutex_exit(rgep->genlock); 1116 1117 /* 1118 * Validate the command before bothering with the mutex ... 1119 */ 1120 iocp = (struct iocblk *)mp->b_rptr; 1121 iocp->ioc_error = 0; 1122 need_privilege = B_TRUE; 1123 cmd = iocp->ioc_cmd; 1124 switch (cmd) { 1125 default: 1126 miocnak(wq, mp, 0, EINVAL); 1127 return; 1128 1129 case RGE_MII_READ: 1130 case RGE_MII_WRITE: 1131 case RGE_DIAG: 1132 case RGE_PEEK: 1133 case RGE_POKE: 1134 case RGE_PHY_RESET: 1135 case RGE_SOFT_RESET: 1136 case RGE_HARD_RESET: 1137 break; 1138 1139 case LB_GET_INFO_SIZE: 1140 case LB_GET_INFO: 1141 case LB_GET_MODE: 1142 need_privilege = B_FALSE; 1143 /* FALLTHRU */ 1144 case LB_SET_MODE: 1145 break; 1146 1147 case ND_GET: 1148 need_privilege = B_FALSE; 1149 /* FALLTHRU */ 1150 case ND_SET: 1151 break; 1152 } 1153 1154 if (need_privilege) { 1155 /* 1156 * Check for specific net_config privilege 1157 */ 1158 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1159 if (err != 0) { 1160 miocnak(wq, mp, 0, err); 1161 return; 1162 } 1163 } 1164 1165 mutex_enter(rgep->genlock); 1166 1167 switch (cmd) { 1168 default: 1169 _NOTE(NOTREACHED) 1170 status = IOC_INVAL; 1171 break; 1172 1173 case RGE_MII_READ: 1174 case RGE_MII_WRITE: 1175 case RGE_DIAG: 1176 case RGE_PEEK: 1177 case RGE_POKE: 1178 case RGE_PHY_RESET: 1179 case RGE_SOFT_RESET: 1180 case RGE_HARD_RESET: 1181 status = rge_chip_ioctl(rgep, wq, mp, iocp); 1182 break; 1183 1184 case LB_GET_INFO_SIZE: 1185 case LB_GET_INFO: 1186 case LB_GET_MODE: 1187 case LB_SET_MODE: 1188 status = rge_loop_ioctl(rgep, wq, mp, iocp); 1189 break; 1190 1191 case ND_GET: 1192 case ND_SET: 1193 status = rge_nd_ioctl(rgep, wq, mp, iocp); 1194 break; 1195 } 1196 1197 /* 1198 * Do we need to reprogram the PHY and/or the MAC? 1199 * Do it now, while we still have the mutex. 1200 * 1201 * Note: update the PHY first, 'cos it controls the 1202 * speed/duplex parameters that the MAC code uses. 1203 */ 1204 switch (status) { 1205 case IOC_RESTART_REPLY: 1206 case IOC_RESTART_ACK: 1207 rge_phy_update(rgep); 1208 break; 1209 } 1210 1211 mutex_exit(rgep->genlock); 1212 1213 /* 1214 * Finally, decide how to reply 1215 */ 1216 switch (status) { 1217 default: 1218 case IOC_INVAL: 1219 /* 1220 * Error, reply with a NAK and EINVAL or the specified error 1221 */ 1222 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1223 EINVAL : iocp->ioc_error); 1224 break; 1225 1226 case IOC_DONE: 1227 /* 1228 * OK, reply already sent 1229 */ 1230 break; 1231 1232 case IOC_RESTART_ACK: 1233 case IOC_ACK: 1234 /* 1235 * OK, reply with an ACK 1236 */ 1237 miocack(wq, mp, 0, 0); 1238 break; 1239 1240 case IOC_RESTART_REPLY: 1241 case IOC_REPLY: 1242 /* 1243 * OK, send prepared reply as ACK or NAK 1244 */ 1245 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1246 M_IOCACK : M_IOCNAK; 1247 qreply(wq, mp); 1248 break; 1249 } 1250 } 1251 1252 static void 1253 rge_m_resources(void *arg) 1254 { 1255 rge_t *rgep = arg; 1256 mac_rx_fifo_t mrf; 1257 1258 mutex_enter(rgep->genlock); 1259 1260 /* 1261 * Register Rx rings as resources and save mac 1262 * resource id for future reference 1263 */ 1264 mrf.mrf_type = MAC_RX_FIFO; 1265 mrf.mrf_blank = rge_chip_blank; 1266 mrf.mrf_arg = (void *)rgep; 1267 mrf.mrf_normal_blank_time = RGE_RX_INT_TIME; 1268 mrf.mrf_normal_pkt_count = RGE_RX_INT_PKTS; 1269 rgep->handle = mac_resource_add(rgep->mh, (mac_resource_t *)&mrf); 1270 1271 mutex_exit(rgep->genlock); 1272 } 1273 1274 /* ARGSUSED */ 1275 static boolean_t 1276 rge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1277 { 1278 switch (cap) { 1279 case MAC_CAPAB_HCKSUM: { 1280 uint32_t *hcksum_txflags = cap_data; 1281 *hcksum_txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; 1282 break; 1283 } 1284 case MAC_CAPAB_POLL: 1285 /* 1286 * There's nothing for us to fill in, simply returning 1287 * B_TRUE stating that we support polling is sufficient. 1288 */ 1289 break; 1290 default: 1291 return (B_FALSE); 1292 } 1293 return (B_TRUE); 1294 } 1295 1296 /* 1297 * ============ Init MSI/Fixed Interrupt routines ============== 1298 */ 1299 1300 /* 1301 * rge_add_intrs: 1302 * 1303 * Register FIXED or MSI interrupts. 1304 */ 1305 static int 1306 rge_add_intrs(rge_t *rgep, int intr_type) 1307 { 1308 dev_info_t *dip = rgep->devinfo; 1309 int avail; 1310 int actual; 1311 int intr_size; 1312 int count; 1313 int i, j; 1314 int ret; 1315 1316 /* Get number of interrupts */ 1317 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 1318 if ((ret != DDI_SUCCESS) || (count == 0)) { 1319 rge_error(rgep, "ddi_intr_get_nintrs() failure, ret: %d, " 1320 "count: %d", ret, count); 1321 return (DDI_FAILURE); 1322 } 1323 1324 /* Get number of available interrupts */ 1325 ret = ddi_intr_get_navail(dip, intr_type, &avail); 1326 if ((ret != DDI_SUCCESS) || (avail == 0)) { 1327 rge_error(rgep, "ddi_intr_get_navail() failure, " 1328 "ret: %d, avail: %d\n", ret, avail); 1329 return (DDI_FAILURE); 1330 } 1331 1332 /* Allocate an array of interrupt handles */ 1333 intr_size = count * sizeof (ddi_intr_handle_t); 1334 rgep->htable = kmem_alloc(intr_size, KM_SLEEP); 1335 rgep->intr_rqst = count; 1336 1337 /* Call ddi_intr_alloc() */ 1338 ret = ddi_intr_alloc(dip, rgep->htable, intr_type, 0, 1339 count, &actual, DDI_INTR_ALLOC_NORMAL); 1340 if (ret != DDI_SUCCESS || actual == 0) { 1341 rge_error(rgep, "ddi_intr_alloc() failed %d\n", ret); 1342 kmem_free(rgep->htable, intr_size); 1343 return (DDI_FAILURE); 1344 } 1345 if (actual < count) { 1346 rge_log(rgep, "ddi_intr_alloc() Requested: %d, Received: %d\n", 1347 count, actual); 1348 } 1349 rgep->intr_cnt = actual; 1350 1351 /* 1352 * Get priority for first msi, assume remaining are all the same 1353 */ 1354 if ((ret = ddi_intr_get_pri(rgep->htable[0], &rgep->intr_pri)) != 1355 DDI_SUCCESS) { 1356 rge_error(rgep, "ddi_intr_get_pri() failed %d\n", ret); 1357 /* Free already allocated intr */ 1358 for (i = 0; i < actual; i++) { 1359 (void) ddi_intr_free(rgep->htable[i]); 1360 } 1361 kmem_free(rgep->htable, intr_size); 1362 return (DDI_FAILURE); 1363 } 1364 1365 /* Test for high level mutex */ 1366 if (rgep->intr_pri >= ddi_intr_get_hilevel_pri()) { 1367 rge_error(rgep, "rge_add_intrs:" 1368 "Hi level interrupt not supported"); 1369 for (i = 0; i < actual; i++) 1370 (void) ddi_intr_free(rgep->htable[i]); 1371 kmem_free(rgep->htable, intr_size); 1372 return (DDI_FAILURE); 1373 } 1374 1375 /* Call ddi_intr_add_handler() */ 1376 for (i = 0; i < actual; i++) { 1377 if ((ret = ddi_intr_add_handler(rgep->htable[i], rge_intr, 1378 (caddr_t)rgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 1379 rge_error(rgep, "ddi_intr_add_handler() " 1380 "failed %d\n", ret); 1381 /* Remove already added intr */ 1382 for (j = 0; j < i; j++) 1383 (void) ddi_intr_remove_handler(rgep->htable[j]); 1384 /* Free already allocated intr */ 1385 for (i = 0; i < actual; i++) { 1386 (void) ddi_intr_free(rgep->htable[i]); 1387 } 1388 kmem_free(rgep->htable, intr_size); 1389 return (DDI_FAILURE); 1390 } 1391 } 1392 1393 if ((ret = ddi_intr_get_cap(rgep->htable[0], &rgep->intr_cap)) 1394 != DDI_SUCCESS) { 1395 rge_error(rgep, "ddi_intr_get_cap() failed %d\n", ret); 1396 for (i = 0; i < actual; i++) { 1397 (void) ddi_intr_remove_handler(rgep->htable[i]); 1398 (void) ddi_intr_free(rgep->htable[i]); 1399 } 1400 kmem_free(rgep->htable, intr_size); 1401 return (DDI_FAILURE); 1402 } 1403 1404 return (DDI_SUCCESS); 1405 } 1406 1407 /* 1408 * rge_rem_intrs: 1409 * 1410 * Unregister FIXED or MSI interrupts 1411 */ 1412 static void 1413 rge_rem_intrs(rge_t *rgep) 1414 { 1415 int i; 1416 1417 /* Disable all interrupts */ 1418 if (rgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 1419 /* Call ddi_intr_block_disable() */ 1420 (void) ddi_intr_block_disable(rgep->htable, rgep->intr_cnt); 1421 } else { 1422 for (i = 0; i < rgep->intr_cnt; i++) { 1423 (void) ddi_intr_disable(rgep->htable[i]); 1424 } 1425 } 1426 1427 /* Call ddi_intr_remove_handler() */ 1428 for (i = 0; i < rgep->intr_cnt; i++) { 1429 (void) ddi_intr_remove_handler(rgep->htable[i]); 1430 (void) ddi_intr_free(rgep->htable[i]); 1431 } 1432 1433 kmem_free(rgep->htable, rgep->intr_rqst * sizeof (ddi_intr_handle_t)); 1434 } 1435 1436 /* 1437 * ========== Per-instance setup/teardown code ========== 1438 */ 1439 1440 #undef RGE_DBG 1441 #define RGE_DBG RGE_DBG_INIT /* debug flag for this code */ 1442 1443 static void 1444 rge_unattach(rge_t *rgep) 1445 { 1446 /* 1447 * Flag that no more activity may be initiated 1448 */ 1449 rgep->progress &= ~PROGRESS_READY; 1450 rgep->rge_mac_state = RGE_MAC_UNATTACH; 1451 1452 /* 1453 * Quiesce the PHY and MAC (leave it reset but still powered). 1454 * Clean up and free all RGE data structures 1455 */ 1456 if (rgep->periodic_id != NULL) { 1457 ddi_periodic_delete(rgep->periodic_id); 1458 rgep->periodic_id = NULL; 1459 } 1460 1461 if (rgep->progress & PROGRESS_KSTATS) 1462 rge_fini_kstats(rgep); 1463 1464 if (rgep->progress & PROGRESS_PHY) 1465 (void) rge_phy_reset(rgep); 1466 1467 if (rgep->progress & PROGRESS_INIT) { 1468 mutex_enter(rgep->genlock); 1469 (void) rge_chip_reset(rgep); 1470 mutex_exit(rgep->genlock); 1471 rge_fini_rings(rgep); 1472 } 1473 1474 if (rgep->progress & PROGRESS_INTR) { 1475 rge_rem_intrs(rgep); 1476 mutex_destroy(rgep->rc_lock); 1477 mutex_destroy(rgep->rx_lock); 1478 mutex_destroy(rgep->tc_lock); 1479 mutex_destroy(rgep->tx_lock); 1480 rw_destroy(rgep->errlock); 1481 mutex_destroy(rgep->genlock); 1482 } 1483 1484 if (rgep->progress & PROGRESS_FACTOTUM) 1485 (void) ddi_intr_remove_softint(rgep->factotum_hdl); 1486 1487 if (rgep->progress & PROGRESS_RESCHED) 1488 (void) ddi_intr_remove_softint(rgep->resched_hdl); 1489 1490 if (rgep->progress & PROGRESS_NDD) 1491 rge_nd_cleanup(rgep); 1492 1493 rge_free_bufs(rgep); 1494 1495 if (rgep->progress & PROGRESS_REGS) 1496 ddi_regs_map_free(&rgep->io_handle); 1497 1498 if (rgep->progress & PROGRESS_CFG) 1499 pci_config_teardown(&rgep->cfg_handle); 1500 1501 ddi_remove_minor_node(rgep->devinfo, NULL); 1502 kmem_free(rgep, sizeof (*rgep)); 1503 } 1504 1505 static int 1506 rge_resume(dev_info_t *devinfo) 1507 { 1508 rge_t *rgep; /* Our private data */ 1509 chip_id_t *cidp; 1510 chip_id_t chipid; 1511 1512 rgep = ddi_get_driver_private(devinfo); 1513 1514 /* 1515 * If there are state inconsistancies, this is bad. Returning 1516 * DDI_FAILURE here will eventually cause the machine to panic, 1517 * so it is best done here so that there is a possibility of 1518 * debugging the problem. 1519 */ 1520 if (rgep == NULL) 1521 cmn_err(CE_PANIC, 1522 "rge: ngep returned from ddi_get_driver_private was NULL"); 1523 1524 /* 1525 * Refuse to resume if the data structures aren't consistent 1526 */ 1527 if (rgep->devinfo != devinfo) 1528 cmn_err(CE_PANIC, 1529 "rge: passed devinfo not the same as saved devinfo"); 1530 1531 /* 1532 * Read chip ID & set up config space command register(s) 1533 * Refuse to resume if the chip has changed its identity! 1534 */ 1535 cidp = &rgep->chipid; 1536 rge_chip_cfg_init(rgep, &chipid); 1537 if (chipid.vendor != cidp->vendor) 1538 return (DDI_FAILURE); 1539 if (chipid.device != cidp->device) 1540 return (DDI_FAILURE); 1541 if (chipid.revision != cidp->revision) 1542 return (DDI_FAILURE); 1543 1544 mutex_enter(rgep->genlock); 1545 1546 /* 1547 * Only in one case, this conditional branch can be executed: the port 1548 * hasn't been plumbed. 1549 */ 1550 if (rgep->suspended == B_FALSE) { 1551 mutex_exit(rgep->genlock); 1552 return (DDI_SUCCESS); 1553 } 1554 rgep->rge_mac_state = RGE_MAC_STARTED; 1555 /* 1556 * All OK, reinitialise h/w & kick off NEMO scheduling 1557 */ 1558 rge_restart(rgep); 1559 rgep->suspended = B_FALSE; 1560 1561 mutex_exit(rgep->genlock); 1562 1563 return (DDI_SUCCESS); 1564 } 1565 1566 1567 /* 1568 * attach(9E) -- Attach a device to the system 1569 * 1570 * Called once for each board successfully probed. 1571 */ 1572 static int 1573 rge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 1574 { 1575 rge_t *rgep; /* Our private data */ 1576 mac_register_t *macp; 1577 chip_id_t *cidp; 1578 int intr_types; 1579 caddr_t regs; 1580 int instance; 1581 int i; 1582 int err; 1583 1584 /* 1585 * we don't support high level interrupts in the driver 1586 */ 1587 if (ddi_intr_hilevel(devinfo, 0) != 0) { 1588 cmn_err(CE_WARN, 1589 "rge_attach -- unsupported high level interrupt"); 1590 return (DDI_FAILURE); 1591 } 1592 1593 instance = ddi_get_instance(devinfo); 1594 RGE_GTRACE(("rge_attach($%p, %d) instance %d", 1595 (void *)devinfo, cmd, instance)); 1596 RGE_BRKPT(NULL, "rge_attach"); 1597 1598 switch (cmd) { 1599 default: 1600 return (DDI_FAILURE); 1601 1602 case DDI_RESUME: 1603 return (rge_resume(devinfo)); 1604 1605 case DDI_ATTACH: 1606 break; 1607 } 1608 1609 rgep = kmem_zalloc(sizeof (*rgep), KM_SLEEP); 1610 ddi_set_driver_private(devinfo, rgep); 1611 rgep->devinfo = devinfo; 1612 1613 /* 1614 * Initialize more fields in RGE private data 1615 */ 1616 rgep->rge_mac_state = RGE_MAC_ATTACH; 1617 rgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 1618 DDI_PROP_DONTPASS, debug_propname, rge_debug); 1619 rgep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 1620 DDI_PROP_DONTPASS, mtu_propname, ETHERMTU); 1621 rgep->msi_enable = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 1622 DDI_PROP_DONTPASS, msi_propname, B_TRUE); 1623 (void) snprintf(rgep->ifname, sizeof (rgep->ifname), "%s%d", 1624 RGE_DRIVER_NAME, instance); 1625 1626 /* 1627 * Map config space registers 1628 * Read chip ID & set up config space command register(s) 1629 * 1630 * Note: this leaves the chip accessible by Memory Space 1631 * accesses, but with interrupts and Bus Mastering off. 1632 * This should ensure that nothing untoward will happen 1633 * if it has been left active by the (net-)bootloader. 1634 * We'll re-enable Bus Mastering once we've reset the chip, 1635 * and allow interrupts only when everything else is set up. 1636 */ 1637 err = pci_config_setup(devinfo, &rgep->cfg_handle); 1638 if (err != DDI_SUCCESS) { 1639 rge_problem(rgep, "pci_config_setup() failed"); 1640 goto attach_fail; 1641 } 1642 rgep->progress |= PROGRESS_CFG; 1643 cidp = &rgep->chipid; 1644 bzero(cidp, sizeof (*cidp)); 1645 rge_chip_cfg_init(rgep, cidp); 1646 1647 /* 1648 * Map operating registers 1649 */ 1650 err = ddi_regs_map_setup(devinfo, 1, ®s, 1651 0, 0, &rge_reg_accattr, &rgep->io_handle); 1652 if (err != DDI_SUCCESS) { 1653 rge_problem(rgep, "ddi_regs_map_setup() failed"); 1654 goto attach_fail; 1655 } 1656 rgep->io_regs = regs; 1657 rgep->progress |= PROGRESS_REGS; 1658 1659 /* 1660 * Characterise the device, so we know its requirements. 1661 * Then allocate the appropriate TX and RX descriptors & buffers. 1662 */ 1663 rge_chip_ident(rgep); 1664 err = rge_alloc_bufs(rgep); 1665 if (err != DDI_SUCCESS) { 1666 rge_problem(rgep, "DMA buffer allocation failed"); 1667 goto attach_fail; 1668 } 1669 1670 /* 1671 * Register NDD-tweakable parameters 1672 */ 1673 if (rge_nd_init(rgep)) { 1674 rge_problem(rgep, "rge_nd_init() failed"); 1675 goto attach_fail; 1676 } 1677 rgep->progress |= PROGRESS_NDD; 1678 1679 /* 1680 * Add the softint handlers: 1681 * 1682 * Both of these handlers are used to avoid restrictions on the 1683 * context and/or mutexes required for some operations. In 1684 * particular, the hardware interrupt handler and its subfunctions 1685 * can detect a number of conditions that we don't want to handle 1686 * in that context or with that set of mutexes held. So, these 1687 * softints are triggered instead: 1688 * 1689 * the <resched> softint is triggered if if we have previously 1690 * had to refuse to send a packet because of resource shortage 1691 * (we've run out of transmit buffers), but the send completion 1692 * interrupt handler has now detected that more buffers have 1693 * become available. 1694 * 1695 * the <factotum> is triggered if the h/w interrupt handler 1696 * sees the <link state changed> or <error> bits in the status 1697 * block. It's also triggered periodically to poll the link 1698 * state, just in case we aren't getting link status change 1699 * interrupts ... 1700 */ 1701 err = ddi_intr_add_softint(devinfo, &rgep->resched_hdl, 1702 DDI_INTR_SOFTPRI_MIN, rge_reschedule, (caddr_t)rgep); 1703 if (err != DDI_SUCCESS) { 1704 rge_problem(rgep, "ddi_intr_add_softint() failed"); 1705 goto attach_fail; 1706 } 1707 rgep->progress |= PROGRESS_RESCHED; 1708 err = ddi_intr_add_softint(devinfo, &rgep->factotum_hdl, 1709 DDI_INTR_SOFTPRI_MIN, rge_chip_factotum, (caddr_t)rgep); 1710 if (err != DDI_SUCCESS) { 1711 rge_problem(rgep, "ddi_intr_add_softint() failed"); 1712 goto attach_fail; 1713 } 1714 rgep->progress |= PROGRESS_FACTOTUM; 1715 1716 /* 1717 * Get supported interrupt types 1718 */ 1719 if (ddi_intr_get_supported_types(devinfo, &intr_types) 1720 != DDI_SUCCESS) { 1721 rge_error(rgep, "ddi_intr_get_supported_types failed\n"); 1722 goto attach_fail; 1723 } 1724 1725 /* 1726 * Add the h/w interrupt handler and initialise mutexes 1727 * RTL8101E is observed to have MSI invalidation issue after S/R. 1728 * So the FIXED interrupt is used instead. 1729 */ 1730 if (rgep->chipid.mac_ver == MAC_VER_8101E) 1731 rgep->msi_enable = B_FALSE; 1732 if ((intr_types & DDI_INTR_TYPE_MSI) && rgep->msi_enable) { 1733 if (rge_add_intrs(rgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 1734 rge_error(rgep, "MSI registration failed, " 1735 "trying FIXED interrupt type\n"); 1736 } else { 1737 rge_log(rgep, "Using MSI interrupt type\n"); 1738 rgep->intr_type = DDI_INTR_TYPE_MSI; 1739 rgep->progress |= PROGRESS_INTR; 1740 } 1741 } 1742 if (!(rgep->progress & PROGRESS_INTR) && 1743 (intr_types & DDI_INTR_TYPE_FIXED)) { 1744 if (rge_add_intrs(rgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 1745 rge_error(rgep, "FIXED interrupt " 1746 "registration failed\n"); 1747 goto attach_fail; 1748 } 1749 rge_log(rgep, "Using FIXED interrupt type\n"); 1750 rgep->intr_type = DDI_INTR_TYPE_FIXED; 1751 rgep->progress |= PROGRESS_INTR; 1752 } 1753 if (!(rgep->progress & PROGRESS_INTR)) { 1754 rge_error(rgep, "No interrupts registered\n"); 1755 goto attach_fail; 1756 } 1757 mutex_init(rgep->genlock, NULL, MUTEX_DRIVER, 1758 DDI_INTR_PRI(rgep->intr_pri)); 1759 rw_init(rgep->errlock, NULL, RW_DRIVER, 1760 DDI_INTR_PRI(rgep->intr_pri)); 1761 mutex_init(rgep->tx_lock, NULL, MUTEX_DRIVER, 1762 DDI_INTR_PRI(rgep->intr_pri)); 1763 mutex_init(rgep->tc_lock, NULL, MUTEX_DRIVER, 1764 DDI_INTR_PRI(rgep->intr_pri)); 1765 mutex_init(rgep->rx_lock, NULL, MUTEX_DRIVER, 1766 DDI_INTR_PRI(rgep->intr_pri)); 1767 mutex_init(rgep->rc_lock, NULL, MUTEX_DRIVER, 1768 DDI_INTR_PRI(rgep->intr_pri)); 1769 1770 /* 1771 * Initialize rings 1772 */ 1773 err = rge_init_rings(rgep); 1774 if (err != DDI_SUCCESS) { 1775 rge_problem(rgep, "rge_init_rings() failed"); 1776 goto attach_fail; 1777 } 1778 rgep->progress |= PROGRESS_INIT; 1779 1780 /* 1781 * Now that mutex locks are initialized, enable interrupts. 1782 */ 1783 if (rgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 1784 /* Call ddi_intr_block_enable() for MSI interrupts */ 1785 (void) ddi_intr_block_enable(rgep->htable, rgep->intr_cnt); 1786 } else { 1787 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 1788 for (i = 0; i < rgep->intr_cnt; i++) { 1789 (void) ddi_intr_enable(rgep->htable[i]); 1790 } 1791 } 1792 1793 /* 1794 * Initialise link state variables 1795 * Stop, reset & reinitialise the chip. 1796 * Initialise the (internal) PHY. 1797 */ 1798 rgep->param_link_up = LINK_STATE_UNKNOWN; 1799 1800 /* 1801 * Reset chip & rings to initial state; also reset address 1802 * filtering, promiscuity, loopback mode. 1803 */ 1804 mutex_enter(rgep->genlock); 1805 (void) rge_chip_reset(rgep); 1806 rge_chip_sync(rgep, RGE_GET_MAC); 1807 bzero(rgep->mcast_hash, sizeof (rgep->mcast_hash)); 1808 bzero(rgep->mcast_refs, sizeof (rgep->mcast_refs)); 1809 rgep->promisc = B_FALSE; 1810 rgep->param_loop_mode = RGE_LOOP_NONE; 1811 mutex_exit(rgep->genlock); 1812 rge_phy_init(rgep); 1813 rgep->progress |= PROGRESS_PHY; 1814 1815 /* 1816 * Create & initialise named kstats 1817 */ 1818 rge_init_kstats(rgep, instance); 1819 rgep->progress |= PROGRESS_KSTATS; 1820 1821 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 1822 goto attach_fail; 1823 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 1824 macp->m_driver = rgep; 1825 macp->m_dip = devinfo; 1826 macp->m_src_addr = rgep->netaddr; 1827 macp->m_callbacks = &rge_m_callbacks; 1828 macp->m_min_sdu = 0; 1829 macp->m_max_sdu = rgep->default_mtu; 1830 macp->m_margin = VLAN_TAGSZ; 1831 1832 /* 1833 * Finally, we're ready to register ourselves with the MAC layer 1834 * interface; if this succeeds, we're all ready to start() 1835 */ 1836 err = mac_register(macp, &rgep->mh); 1837 mac_free(macp); 1838 if (err != 0) 1839 goto attach_fail; 1840 1841 /* 1842 * Register a periodical handler. 1843 * reg_chip_cyclic() is invoked in kernel context. 1844 */ 1845 rgep->periodic_id = ddi_periodic_add(rge_chip_cyclic, rgep, 1846 RGE_CYCLIC_PERIOD, DDI_IPL_0); 1847 1848 rgep->progress |= PROGRESS_READY; 1849 return (DDI_SUCCESS); 1850 1851 attach_fail: 1852 rge_unattach(rgep); 1853 return (DDI_FAILURE); 1854 } 1855 1856 /* 1857 * rge_suspend() -- suspend transmit/receive for powerdown 1858 */ 1859 static int 1860 rge_suspend(rge_t *rgep) 1861 { 1862 /* 1863 * Stop processing and idle (powerdown) the PHY ... 1864 */ 1865 mutex_enter(rgep->genlock); 1866 rw_enter(rgep->errlock, RW_READER); 1867 1868 if (rgep->rge_mac_state != RGE_MAC_STARTED) { 1869 mutex_exit(rgep->genlock); 1870 return (DDI_SUCCESS); 1871 } 1872 1873 rgep->suspended = B_TRUE; 1874 rge_stop(rgep); 1875 rgep->rge_mac_state = RGE_MAC_STOPPED; 1876 1877 rw_exit(rgep->errlock); 1878 mutex_exit(rgep->genlock); 1879 1880 return (DDI_SUCCESS); 1881 } 1882 1883 /* 1884 * detach(9E) -- Detach a device from the system 1885 */ 1886 static int 1887 rge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 1888 { 1889 rge_t *rgep; 1890 1891 RGE_GTRACE(("rge_detach($%p, %d)", (void *)devinfo, cmd)); 1892 1893 rgep = ddi_get_driver_private(devinfo); 1894 1895 switch (cmd) { 1896 default: 1897 return (DDI_FAILURE); 1898 1899 case DDI_SUSPEND: 1900 return (rge_suspend(rgep)); 1901 1902 case DDI_DETACH: 1903 break; 1904 } 1905 1906 /* 1907 * If there is any posted buffer, the driver should reject to be 1908 * detached. Need notice upper layer to release them. 1909 */ 1910 if (!(rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY) && 1911 rgep->rx_free != RGE_BUF_SLOTS) 1912 return (DDI_FAILURE); 1913 1914 /* 1915 * Unregister from the MAC layer subsystem. This can fail, in 1916 * particular if there are DLPI style-2 streams still open - 1917 * in which case we just return failure without shutting 1918 * down chip operations. 1919 */ 1920 if (mac_unregister(rgep->mh) != 0) 1921 return (DDI_FAILURE); 1922 1923 /* 1924 * All activity stopped, so we can clean up & exit 1925 */ 1926 rge_unattach(rgep); 1927 return (DDI_SUCCESS); 1928 } 1929 1930 1931 /* 1932 * ========== Module Loading Data & Entry Points ========== 1933 */ 1934 1935 #undef RGE_DBG 1936 #define RGE_DBG RGE_DBG_INIT /* debug flag for this code */ 1937 DDI_DEFINE_STREAM_OPS(rge_dev_ops, nulldev, nulldev, rge_attach, rge_detach, 1938 nodev, NULL, D_MP, NULL); 1939 1940 static struct modldrv rge_modldrv = { 1941 &mod_driverops, /* Type of module. This one is a driver */ 1942 rge_ident, /* short description */ 1943 &rge_dev_ops /* driver specific ops */ 1944 }; 1945 1946 static struct modlinkage modlinkage = { 1947 MODREV_1, (void *)&rge_modldrv, NULL 1948 }; 1949 1950 1951 int 1952 _info(struct modinfo *modinfop) 1953 { 1954 return (mod_info(&modlinkage, modinfop)); 1955 } 1956 1957 int 1958 _init(void) 1959 { 1960 int status; 1961 1962 mac_init_ops(&rge_dev_ops, "rge"); 1963 status = mod_install(&modlinkage); 1964 if (status == DDI_SUCCESS) 1965 mutex_init(rge_log_mutex, NULL, MUTEX_DRIVER, NULL); 1966 else 1967 mac_fini_ops(&rge_dev_ops); 1968 1969 return (status); 1970 } 1971 1972 int 1973 _fini(void) 1974 { 1975 int status; 1976 1977 status = mod_remove(&modlinkage); 1978 if (status == DDI_SUCCESS) { 1979 mac_fini_ops(&rge_dev_ops); 1980 mutex_destroy(rge_log_mutex); 1981 } 1982 return (status); 1983 } 1984