1 /* 2 * CDDL HEADER START 3 * 4 * Copyright(c) 2007-2009 Intel Corporation. All rights reserved. 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at: 10 * http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When using or redistributing this file, you may do so under the 15 * License only. No other modification of this header is permitted. 16 * 17 * If applicable, add the following below this CDDL HEADER, with the 18 * fields enclosed by brackets "[]" replaced with your own identifying 19 * information: Portions Copyright [yyyy] [name of copyright owner] 20 * 21 * CDDL HEADER END 22 */ 23 24 /* 25 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29 #include "igb_sw.h" 30 31 static int igb_alloc_tbd_ring(igb_tx_ring_t *); 32 static void igb_free_tbd_ring(igb_tx_ring_t *); 33 static int igb_alloc_rbd_ring(igb_rx_ring_t *); 34 static void igb_free_rbd_ring(igb_rx_ring_t *); 35 static int igb_alloc_dma_buffer(igb_t *, dma_buffer_t *, size_t); 36 static void igb_free_dma_buffer(dma_buffer_t *); 37 static int igb_alloc_tcb_lists(igb_tx_ring_t *); 38 static void igb_free_tcb_lists(igb_tx_ring_t *); 39 static int igb_alloc_rcb_lists(igb_rx_ring_t *); 40 static void igb_free_rcb_lists(igb_rx_ring_t *); 41 42 #ifdef __sparc 43 #define IGB_DMA_ALIGNMENT 0x0000000000002000ull 44 #else 45 #define IGB_DMA_ALIGNMENT 0x0000000000001000ull 46 #endif 47 48 /* 49 * DMA attributes for tx/rx descriptors 50 */ 51 static ddi_dma_attr_t igb_desc_dma_attr = { 52 DMA_ATTR_V0, /* version number */ 53 0x0000000000000000ull, /* low address */ 54 0xFFFFFFFFFFFFFFFFull, /* high address */ 55 0x00000000FFFFFFFFull, /* dma counter max */ 56 IGB_DMA_ALIGNMENT, /* alignment */ 57 0x00000FFF, /* burst sizes */ 58 0x00000001, /* minimum transfer size */ 59 0x00000000FFFFFFFFull, /* maximum transfer size */ 60 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */ 61 1, /* scatter/gather list length */ 62 0x00000001, /* granularity */ 63 DDI_DMA_FLAGERR, /* DMA flags */ 64 }; 65 66 /* 67 * DMA attributes for tx/rx buffers 68 */ 69 static ddi_dma_attr_t igb_buf_dma_attr = { 70 DMA_ATTR_V0, /* version number */ 71 0x0000000000000000ull, /* low address */ 72 0xFFFFFFFFFFFFFFFFull, /* high address */ 73 0x00000000FFFFFFFFull, /* dma counter max */ 74 IGB_DMA_ALIGNMENT, /* alignment */ 75 0x00000FFF, /* burst sizes */ 76 0x00000001, /* minimum transfer size */ 77 0x00000000FFFFFFFFull, /* maximum transfer size */ 78 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */ 79 1, /* scatter/gather list length */ 80 0x00000001, /* granularity */ 81 DDI_DMA_FLAGERR, /* DMA flags */ 82 }; 83 84 /* 85 * DMA attributes for transmit 86 */ 87 static ddi_dma_attr_t igb_tx_dma_attr = { 88 DMA_ATTR_V0, /* version number */ 89 0x0000000000000000ull, /* low address */ 90 0xFFFFFFFFFFFFFFFFull, /* high address */ 91 0x00000000FFFFFFFFull, /* dma counter max */ 92 1, /* alignment */ 93 0x00000FFF, /* burst sizes */ 94 0x00000001, /* minimum transfer size */ 95 0x00000000FFFFFFFFull, /* maximum transfer size */ 96 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */ 97 MAX_COOKIE, /* scatter/gather list length */ 98 0x00000001, /* granularity */ 99 DDI_DMA_FLAGERR, /* DMA flags */ 100 }; 101 102 /* 103 * DMA access attributes for descriptors. 104 */ 105 static ddi_device_acc_attr_t igb_desc_acc_attr = { 106 DDI_DEVICE_ATTR_V0, 107 DDI_STRUCTURE_LE_ACC, 108 DDI_STRICTORDER_ACC 109 }; 110 111 /* 112 * DMA access attributes for buffers. 113 */ 114 static ddi_device_acc_attr_t igb_buf_acc_attr = { 115 DDI_DEVICE_ATTR_V0, 116 DDI_NEVERSWAP_ACC, 117 DDI_STRICTORDER_ACC 118 }; 119 120 121 /* 122 * igb_alloc_dma - Allocate DMA resources for all rx/tx rings 123 */ 124 int 125 igb_alloc_dma(igb_t *igb) 126 { 127 igb_rx_ring_t *rx_ring; 128 igb_tx_ring_t *tx_ring; 129 int i; 130 131 for (i = 0; i < igb->num_rx_rings; i++) { 132 /* 133 * Allocate receive desciptor ring and control block lists 134 */ 135 rx_ring = &igb->rx_rings[i]; 136 137 if (igb_alloc_rbd_ring(rx_ring) != IGB_SUCCESS) 138 goto alloc_dma_failure; 139 140 if (igb_alloc_rcb_lists(rx_ring) != IGB_SUCCESS) 141 goto alloc_dma_failure; 142 } 143 144 for (i = 0; i < igb->num_tx_rings; i++) { 145 /* 146 * Allocate transmit desciptor ring and control block lists 147 */ 148 tx_ring = &igb->tx_rings[i]; 149 150 if (igb_alloc_tbd_ring(tx_ring) != IGB_SUCCESS) 151 goto alloc_dma_failure; 152 153 if (igb_alloc_tcb_lists(tx_ring) != IGB_SUCCESS) 154 goto alloc_dma_failure; 155 } 156 157 return (IGB_SUCCESS); 158 159 alloc_dma_failure: 160 igb_free_dma(igb); 161 162 return (IGB_FAILURE); 163 } 164 165 166 /* 167 * igb_free_dma - Free all the DMA resources of all rx/tx rings 168 */ 169 void 170 igb_free_dma(igb_t *igb) 171 { 172 igb_rx_ring_t *rx_ring; 173 igb_tx_ring_t *tx_ring; 174 int i; 175 176 /* 177 * Free DMA resources of rx rings 178 */ 179 for (i = 0; i < igb->num_rx_rings; i++) { 180 rx_ring = &igb->rx_rings[i]; 181 igb_free_rbd_ring(rx_ring); 182 igb_free_rcb_lists(rx_ring); 183 } 184 185 /* 186 * Free DMA resources of tx rings 187 */ 188 for (i = 0; i < igb->num_tx_rings; i++) { 189 tx_ring = &igb->tx_rings[i]; 190 igb_free_tbd_ring(tx_ring); 191 igb_free_tcb_lists(tx_ring); 192 } 193 } 194 195 /* 196 * igb_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring. 197 */ 198 static int 199 igb_alloc_tbd_ring(igb_tx_ring_t *tx_ring) 200 { 201 int ret; 202 size_t size; 203 size_t len; 204 uint_t cookie_num; 205 dev_info_t *devinfo; 206 ddi_dma_cookie_t cookie; 207 igb_t *igb = tx_ring->igb; 208 209 devinfo = igb->dip; 210 size = sizeof (union e1000_adv_tx_desc) * tx_ring->ring_size; 211 212 /* 213 * If tx head write-back is enabled, an extra tbd is allocated 214 * to save the head write-back value 215 */ 216 if (igb->tx_head_wb_enable) { 217 size += sizeof (union e1000_adv_tx_desc); 218 } 219 220 /* 221 * Allocate a DMA handle for the transmit descriptor 222 * memory area. 223 */ 224 ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr, 225 DDI_DMA_DONTWAIT, NULL, 226 &tx_ring->tbd_area.dma_handle); 227 228 if (ret != DDI_SUCCESS) { 229 igb_error(igb, 230 "Could not allocate tbd dma handle: %x", ret); 231 tx_ring->tbd_area.dma_handle = NULL; 232 233 return (IGB_FAILURE); 234 } 235 236 /* 237 * Allocate memory to DMA data to and from the transmit 238 * descriptors. 239 */ 240 ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle, 241 size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT, 242 DDI_DMA_DONTWAIT, NULL, 243 (caddr_t *)&tx_ring->tbd_area.address, 244 &len, &tx_ring->tbd_area.acc_handle); 245 246 if (ret != DDI_SUCCESS) { 247 igb_error(igb, 248 "Could not allocate tbd dma memory: %x", ret); 249 tx_ring->tbd_area.acc_handle = NULL; 250 tx_ring->tbd_area.address = NULL; 251 if (tx_ring->tbd_area.dma_handle != NULL) { 252 ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle); 253 tx_ring->tbd_area.dma_handle = NULL; 254 } 255 return (IGB_FAILURE); 256 } 257 258 /* 259 * Initialize the entire transmit buffer descriptor area to zero 260 */ 261 bzero(tx_ring->tbd_area.address, len); 262 263 /* 264 * Allocates DMA resources for the memory that was allocated by 265 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the 266 * the memory address 267 */ 268 ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle, 269 NULL, (caddr_t)tx_ring->tbd_area.address, 270 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 271 DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num); 272 273 if (ret != DDI_DMA_MAPPED) { 274 igb_error(igb, 275 "Could not bind tbd dma resource: %x", ret); 276 tx_ring->tbd_area.dma_address = NULL; 277 if (tx_ring->tbd_area.acc_handle != NULL) { 278 ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle); 279 tx_ring->tbd_area.acc_handle = NULL; 280 tx_ring->tbd_area.address = NULL; 281 } 282 if (tx_ring->tbd_area.dma_handle != NULL) { 283 ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle); 284 tx_ring->tbd_area.dma_handle = NULL; 285 } 286 return (IGB_FAILURE); 287 } 288 289 ASSERT(cookie_num == 1); 290 291 tx_ring->tbd_area.dma_address = cookie.dmac_laddress; 292 tx_ring->tbd_area.size = len; 293 294 tx_ring->tbd_ring = (union e1000_adv_tx_desc *)(uintptr_t) 295 tx_ring->tbd_area.address; 296 297 return (IGB_SUCCESS); 298 } 299 300 /* 301 * igb_free_tbd_ring - Free the tx descriptors of one ring. 302 */ 303 static void 304 igb_free_tbd_ring(igb_tx_ring_t *tx_ring) 305 { 306 if (tx_ring->tbd_area.dma_handle != NULL) { 307 (void) ddi_dma_unbind_handle(tx_ring->tbd_area.dma_handle); 308 } 309 if (tx_ring->tbd_area.acc_handle != NULL) { 310 ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle); 311 tx_ring->tbd_area.acc_handle = NULL; 312 } 313 if (tx_ring->tbd_area.dma_handle != NULL) { 314 ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle); 315 tx_ring->tbd_area.dma_handle = NULL; 316 } 317 tx_ring->tbd_area.address = NULL; 318 tx_ring->tbd_area.dma_address = NULL; 319 tx_ring->tbd_area.size = 0; 320 321 tx_ring->tbd_ring = NULL; 322 } 323 324 /* 325 * igb_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring. 326 */ 327 static int 328 igb_alloc_rbd_ring(igb_rx_ring_t *rx_ring) 329 { 330 int ret; 331 size_t size; 332 size_t len; 333 uint_t cookie_num; 334 dev_info_t *devinfo; 335 ddi_dma_cookie_t cookie; 336 igb_t *igb = rx_ring->igb; 337 338 devinfo = igb->dip; 339 size = sizeof (union e1000_adv_rx_desc) * rx_ring->ring_size; 340 341 /* 342 * Allocate a new DMA handle for the receive descriptor 343 * memory area. 344 */ 345 ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr, 346 DDI_DMA_DONTWAIT, NULL, 347 &rx_ring->rbd_area.dma_handle); 348 349 if (ret != DDI_SUCCESS) { 350 igb_error(igb, 351 "Could not allocate rbd dma handle: %x", ret); 352 rx_ring->rbd_area.dma_handle = NULL; 353 return (IGB_FAILURE); 354 } 355 356 /* 357 * Allocate memory to DMA data to and from the receive 358 * descriptors. 359 */ 360 ret = ddi_dma_mem_alloc(rx_ring->rbd_area.dma_handle, 361 size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT, 362 DDI_DMA_DONTWAIT, NULL, 363 (caddr_t *)&rx_ring->rbd_area.address, 364 &len, &rx_ring->rbd_area.acc_handle); 365 366 if (ret != DDI_SUCCESS) { 367 igb_error(igb, 368 "Could not allocate rbd dma memory: %x", ret); 369 rx_ring->rbd_area.acc_handle = NULL; 370 rx_ring->rbd_area.address = NULL; 371 if (rx_ring->rbd_area.dma_handle != NULL) { 372 ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle); 373 rx_ring->rbd_area.dma_handle = NULL; 374 } 375 return (IGB_FAILURE); 376 } 377 378 /* 379 * Initialize the entire transmit buffer descriptor area to zero 380 */ 381 bzero(rx_ring->rbd_area.address, len); 382 383 /* 384 * Allocates DMA resources for the memory that was allocated by 385 * the ddi_dma_mem_alloc call. 386 */ 387 ret = ddi_dma_addr_bind_handle(rx_ring->rbd_area.dma_handle, 388 NULL, (caddr_t)rx_ring->rbd_area.address, 389 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 390 DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num); 391 392 if (ret != DDI_DMA_MAPPED) { 393 igb_error(igb, 394 "Could not bind rbd dma resource: %x", ret); 395 rx_ring->rbd_area.dma_address = NULL; 396 if (rx_ring->rbd_area.acc_handle != NULL) { 397 ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle); 398 rx_ring->rbd_area.acc_handle = NULL; 399 rx_ring->rbd_area.address = NULL; 400 } 401 if (rx_ring->rbd_area.dma_handle != NULL) { 402 ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle); 403 rx_ring->rbd_area.dma_handle = NULL; 404 } 405 return (IGB_FAILURE); 406 } 407 408 ASSERT(cookie_num == 1); 409 410 rx_ring->rbd_area.dma_address = cookie.dmac_laddress; 411 rx_ring->rbd_area.size = len; 412 413 rx_ring->rbd_ring = (union e1000_adv_rx_desc *)(uintptr_t) 414 rx_ring->rbd_area.address; 415 416 return (IGB_SUCCESS); 417 } 418 419 /* 420 * igb_free_rbd_ring - Free the rx descriptors of one ring. 421 */ 422 static void 423 igb_free_rbd_ring(igb_rx_ring_t *rx_ring) 424 { 425 if (rx_ring->rbd_area.dma_handle != NULL) { 426 (void) ddi_dma_unbind_handle(rx_ring->rbd_area.dma_handle); 427 } 428 if (rx_ring->rbd_area.acc_handle != NULL) { 429 ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle); 430 rx_ring->rbd_area.acc_handle = NULL; 431 } 432 if (rx_ring->rbd_area.dma_handle != NULL) { 433 ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle); 434 rx_ring->rbd_area.dma_handle = NULL; 435 } 436 rx_ring->rbd_area.address = NULL; 437 rx_ring->rbd_area.dma_address = NULL; 438 rx_ring->rbd_area.size = 0; 439 440 rx_ring->rbd_ring = NULL; 441 } 442 443 444 /* 445 * igb_alloc_dma_buffer - Allocate DMA resources for a DMA buffer 446 */ 447 static int 448 igb_alloc_dma_buffer(igb_t *igb, 449 dma_buffer_t *buf, size_t size) 450 { 451 int ret; 452 dev_info_t *devinfo = igb->dip; 453 ddi_dma_cookie_t cookie; 454 size_t len; 455 uint_t cookie_num; 456 457 ret = ddi_dma_alloc_handle(devinfo, 458 &igb_buf_dma_attr, DDI_DMA_DONTWAIT, 459 NULL, &buf->dma_handle); 460 461 if (ret != DDI_SUCCESS) { 462 buf->dma_handle = NULL; 463 igb_error(igb, 464 "Could not allocate dma buffer handle: %x", ret); 465 return (IGB_FAILURE); 466 } 467 468 ret = ddi_dma_mem_alloc(buf->dma_handle, 469 size, &igb_buf_acc_attr, DDI_DMA_STREAMING, 470 DDI_DMA_DONTWAIT, NULL, &buf->address, 471 &len, &buf->acc_handle); 472 473 if (ret != DDI_SUCCESS) { 474 buf->acc_handle = NULL; 475 buf->address = NULL; 476 if (buf->dma_handle != NULL) { 477 ddi_dma_free_handle(&buf->dma_handle); 478 buf->dma_handle = NULL; 479 } 480 igb_error(igb, 481 "Could not allocate dma buffer memory: %x", ret); 482 return (IGB_FAILURE); 483 } 484 485 ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL, 486 buf->address, 487 len, DDI_DMA_RDWR | DDI_DMA_STREAMING, 488 DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num); 489 490 if (ret != DDI_DMA_MAPPED) { 491 buf->dma_address = NULL; 492 if (buf->acc_handle != NULL) { 493 ddi_dma_mem_free(&buf->acc_handle); 494 buf->acc_handle = NULL; 495 buf->address = NULL; 496 } 497 if (buf->dma_handle != NULL) { 498 ddi_dma_free_handle(&buf->dma_handle); 499 buf->dma_handle = NULL; 500 } 501 igb_error(igb, 502 "Could not bind dma buffer handle: %x", ret); 503 return (IGB_FAILURE); 504 } 505 506 ASSERT(cookie_num == 1); 507 508 buf->dma_address = cookie.dmac_laddress; 509 buf->size = len; 510 buf->len = 0; 511 512 return (IGB_SUCCESS); 513 } 514 515 /* 516 * igb_free_dma_buffer - Free one allocated area of dma memory and handle 517 */ 518 static void 519 igb_free_dma_buffer(dma_buffer_t *buf) 520 { 521 if (buf->dma_handle != NULL) { 522 (void) ddi_dma_unbind_handle(buf->dma_handle); 523 buf->dma_address = NULL; 524 } else { 525 return; 526 } 527 528 if (buf->acc_handle != NULL) { 529 ddi_dma_mem_free(&buf->acc_handle); 530 buf->acc_handle = NULL; 531 buf->address = NULL; 532 } 533 534 if (buf->dma_handle != NULL) { 535 ddi_dma_free_handle(&buf->dma_handle); 536 buf->dma_handle = NULL; 537 } 538 539 buf->size = 0; 540 buf->len = 0; 541 } 542 543 /* 544 * igb_alloc_tcb_lists - Memory allocation for the transmit control bolcks 545 * of one ring. 546 */ 547 static int 548 igb_alloc_tcb_lists(igb_tx_ring_t *tx_ring) 549 { 550 int i; 551 int ret; 552 tx_control_block_t *tcb; 553 dma_buffer_t *tx_buf; 554 igb_t *igb = tx_ring->igb; 555 dev_info_t *devinfo = igb->dip; 556 557 /* 558 * Allocate memory for the work list. 559 */ 560 tx_ring->work_list = kmem_zalloc(sizeof (tx_control_block_t *) * 561 tx_ring->ring_size, KM_NOSLEEP); 562 563 if (tx_ring->work_list == NULL) { 564 igb_error(igb, 565 "Cound not allocate memory for tx work list"); 566 return (IGB_FAILURE); 567 } 568 569 /* 570 * Allocate memory for the free list. 571 */ 572 tx_ring->free_list = kmem_zalloc(sizeof (tx_control_block_t *) * 573 tx_ring->free_list_size, KM_NOSLEEP); 574 575 if (tx_ring->free_list == NULL) { 576 kmem_free(tx_ring->work_list, 577 sizeof (tx_control_block_t *) * tx_ring->ring_size); 578 tx_ring->work_list = NULL; 579 580 igb_error(igb, 581 "Cound not allocate memory for tx free list"); 582 return (IGB_FAILURE); 583 } 584 585 /* 586 * Allocate memory for the tx control blocks of free list. 587 */ 588 tx_ring->tcb_area = 589 kmem_zalloc(sizeof (tx_control_block_t) * 590 tx_ring->free_list_size, KM_NOSLEEP); 591 592 if (tx_ring->tcb_area == NULL) { 593 kmem_free(tx_ring->work_list, 594 sizeof (tx_control_block_t *) * tx_ring->ring_size); 595 tx_ring->work_list = NULL; 596 597 kmem_free(tx_ring->free_list, 598 sizeof (tx_control_block_t *) * tx_ring->free_list_size); 599 tx_ring->free_list = NULL; 600 601 igb_error(igb, 602 "Cound not allocate memory for tx control blocks"); 603 return (IGB_FAILURE); 604 } 605 606 /* 607 * Allocate dma memory for the tx control block of free list. 608 */ 609 tcb = tx_ring->tcb_area; 610 for (i = 0; i < tx_ring->free_list_size; i++, tcb++) { 611 ASSERT(tcb != NULL); 612 613 tx_ring->free_list[i] = tcb; 614 615 /* 616 * Pre-allocate dma handles for transmit. These dma handles 617 * will be dynamically bound to the data buffers passed down 618 * from the upper layers at the time of transmitting. 619 */ 620 ret = ddi_dma_alloc_handle(devinfo, 621 &igb_tx_dma_attr, 622 DDI_DMA_DONTWAIT, NULL, 623 &tcb->tx_dma_handle); 624 if (ret != DDI_SUCCESS) { 625 tcb->tx_dma_handle = NULL; 626 igb_error(igb, 627 "Could not allocate tx dma handle: %x", ret); 628 goto alloc_tcb_lists_fail; 629 } 630 631 /* 632 * Pre-allocate transmit buffers for packets that the 633 * size is less than bcopy_thresh. 634 */ 635 tx_buf = &tcb->tx_buf; 636 637 ret = igb_alloc_dma_buffer(igb, 638 tx_buf, igb->tx_buf_size); 639 640 if (ret != IGB_SUCCESS) { 641 ASSERT(tcb->tx_dma_handle != NULL); 642 ddi_dma_free_handle(&tcb->tx_dma_handle); 643 tcb->tx_dma_handle = NULL; 644 igb_error(igb, "Allocate tx dma buffer failed"); 645 goto alloc_tcb_lists_fail; 646 } 647 } 648 649 return (IGB_SUCCESS); 650 651 alloc_tcb_lists_fail: 652 igb_free_tcb_lists(tx_ring); 653 654 return (IGB_FAILURE); 655 } 656 657 /* 658 * igb_free_tcb_lists - Release the memory allocated for 659 * the transmit control bolcks of one ring. 660 */ 661 static void 662 igb_free_tcb_lists(igb_tx_ring_t *tx_ring) 663 { 664 int i; 665 tx_control_block_t *tcb; 666 667 tcb = tx_ring->tcb_area; 668 if (tcb == NULL) 669 return; 670 671 for (i = 0; i < tx_ring->free_list_size; i++, tcb++) { 672 ASSERT(tcb != NULL); 673 674 /* Free the tx dma handle for dynamical binding */ 675 if (tcb->tx_dma_handle != NULL) { 676 ddi_dma_free_handle(&tcb->tx_dma_handle); 677 tcb->tx_dma_handle = NULL; 678 } else { 679 /* 680 * If the dma handle is NULL, then we don't 681 * have to check the remaining. 682 */ 683 break; 684 } 685 686 igb_free_dma_buffer(&tcb->tx_buf); 687 } 688 689 if (tx_ring->tcb_area != NULL) { 690 kmem_free(tx_ring->tcb_area, 691 sizeof (tx_control_block_t) * tx_ring->free_list_size); 692 tx_ring->tcb_area = NULL; 693 } 694 695 if (tx_ring->work_list != NULL) { 696 kmem_free(tx_ring->work_list, 697 sizeof (tx_control_block_t *) * tx_ring->ring_size); 698 tx_ring->work_list = NULL; 699 } 700 701 if (tx_ring->free_list != NULL) { 702 kmem_free(tx_ring->free_list, 703 sizeof (tx_control_block_t *) * tx_ring->free_list_size); 704 tx_ring->free_list = NULL; 705 } 706 } 707 708 /* 709 * igb_alloc_rcb_lists - Memory allocation for the receive control blocks 710 * of one ring. 711 */ 712 static int 713 igb_alloc_rcb_lists(igb_rx_ring_t *rx_ring) 714 { 715 int i; 716 int ret; 717 rx_control_block_t *rcb; 718 igb_t *igb = rx_ring->igb; 719 dma_buffer_t *rx_buf; 720 uint32_t rcb_count; 721 722 /* 723 * Allocate memory for the work list. 724 */ 725 rx_ring->work_list = kmem_zalloc(sizeof (rx_control_block_t *) * 726 rx_ring->ring_size, KM_NOSLEEP); 727 728 if (rx_ring->work_list == NULL) { 729 igb_error(igb, 730 "Could not allocate memory for rx work list"); 731 return (IGB_FAILURE); 732 } 733 734 /* 735 * Allocate memory for the free list. 736 */ 737 rx_ring->free_list = kmem_zalloc(sizeof (rx_control_block_t *) * 738 rx_ring->free_list_size, KM_NOSLEEP); 739 740 if (rx_ring->free_list == NULL) { 741 kmem_free(rx_ring->work_list, 742 sizeof (rx_control_block_t *) * rx_ring->ring_size); 743 rx_ring->work_list = NULL; 744 745 igb_error(igb, 746 "Cound not allocate memory for rx free list"); 747 return (IGB_FAILURE); 748 } 749 750 /* 751 * Allocate memory for the rx control blocks for work list and 752 * free list. 753 */ 754 rcb_count = rx_ring->ring_size + rx_ring->free_list_size; 755 rx_ring->rcb_area = 756 kmem_zalloc(sizeof (rx_control_block_t) * rcb_count, 757 KM_NOSLEEP); 758 759 if (rx_ring->rcb_area == NULL) { 760 kmem_free(rx_ring->work_list, 761 sizeof (rx_control_block_t *) * rx_ring->ring_size); 762 rx_ring->work_list = NULL; 763 764 kmem_free(rx_ring->free_list, 765 sizeof (rx_control_block_t *) * rx_ring->free_list_size); 766 rx_ring->free_list = NULL; 767 768 igb_error(igb, 769 "Cound not allocate memory for rx control blocks"); 770 return (IGB_FAILURE); 771 } 772 773 /* 774 * Allocate dma memory for the rx control blocks 775 */ 776 rcb = rx_ring->rcb_area; 777 for (i = 0; i < rcb_count; i++, rcb++) { 778 ASSERT(rcb != NULL); 779 780 if (i < rx_ring->ring_size) { 781 /* Attach the rx control block to the work list */ 782 rx_ring->work_list[i] = rcb; 783 } else { 784 /* Attach the rx control block to the free list */ 785 rx_ring->free_list[i - rx_ring->ring_size] = rcb; 786 } 787 788 rx_buf = &rcb->rx_buf; 789 ret = igb_alloc_dma_buffer(igb, 790 rx_buf, igb->rx_buf_size); 791 792 if (ret != IGB_SUCCESS) { 793 igb_error(igb, "Allocate rx dma buffer failed"); 794 goto alloc_rcb_lists_fail; 795 } 796 797 rx_buf->size -= IPHDR_ALIGN_ROOM; 798 rx_buf->address += IPHDR_ALIGN_ROOM; 799 rx_buf->dma_address += IPHDR_ALIGN_ROOM; 800 801 rcb->state = RCB_FREE; 802 rcb->rx_ring = (igb_rx_ring_t *)rx_ring; 803 rcb->free_rtn.free_func = igb_rx_recycle; 804 rcb->free_rtn.free_arg = (char *)rcb; 805 806 rcb->mp = desballoc((unsigned char *) 807 rx_buf->address, 808 rx_buf->size, 809 0, &rcb->free_rtn); 810 } 811 812 return (IGB_SUCCESS); 813 814 alloc_rcb_lists_fail: 815 igb_free_rcb_lists(rx_ring); 816 817 return (IGB_FAILURE); 818 } 819 820 /* 821 * igb_free_rcb_lists - Free the receive control blocks of one ring. 822 */ 823 static void 824 igb_free_rcb_lists(igb_rx_ring_t *rx_ring) 825 { 826 int i; 827 rx_control_block_t *rcb; 828 uint32_t rcb_count; 829 830 rcb = rx_ring->rcb_area; 831 if (rcb == NULL) 832 return; 833 834 rcb_count = rx_ring->ring_size + rx_ring->free_list_size; 835 for (i = 0; i < rcb_count; i++, rcb++) { 836 ASSERT(rcb != NULL); 837 ASSERT(rcb->state == RCB_FREE); 838 839 if (rcb->mp != NULL) { 840 freemsg(rcb->mp); 841 rcb->mp = NULL; 842 } 843 844 igb_free_dma_buffer(&rcb->rx_buf); 845 } 846 847 if (rx_ring->rcb_area != NULL) { 848 kmem_free(rx_ring->rcb_area, 849 sizeof (rx_control_block_t) * rcb_count); 850 rx_ring->rcb_area = NULL; 851 } 852 853 if (rx_ring->work_list != NULL) { 854 kmem_free(rx_ring->work_list, 855 sizeof (rx_control_block_t *) * rx_ring->ring_size); 856 rx_ring->work_list = NULL; 857 } 858 859 if (rx_ring->free_list != NULL) { 860 kmem_free(rx_ring->free_list, 861 sizeof (rx_control_block_t *) * rx_ring->free_list_size); 862 rx_ring->free_list = NULL; 863 } 864 } 865 866 void 867 igb_set_fma_flags(int dma_flag) 868 { 869 if (dma_flag) { 870 igb_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 871 igb_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 872 igb_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 873 } else { 874 igb_tx_dma_attr.dma_attr_flags = 0; 875 igb_buf_dma_attr.dma_attr_flags = 0; 876 igb_desc_dma_attr.dma_attr_flags = 0; 877 } 878 } 879