1 /* 2 * This file is provided under a CDDLv1 license. When using or 3 * redistributing this file, you may do so under this license. 4 * In redistributing this file this license must be included 5 * and no other modification of this header file is permitted. 6 * 7 * CDDL LICENSE SUMMARY 8 * 9 * Copyright(c) 1999 - 2007 Intel Corporation. All rights reserved. 10 * 11 * The contents of this file are subject to the terms of Version 12 * 1.0 of the Common Development and Distribution License (the "License"). 13 * 14 * You should have received a copy of the License with this software. 15 * You can obtain a copy of the License at 16 * http://www.opensolaris.org/os/licensing. 17 * See the License for the specific language governing permissions 18 * and limitations under the License. 19 */ 20 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms of the CDDLv1. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * ********************************************************************** 30 * Module Name: * 31 * e1000galloc.c * 32 * * 33 * Abstract: * 34 * This file contains some routines that take care of init, * 35 * uninit, and memory allocation. * 36 * * 37 * * 38 * This driver runs on the following hardware: * 39 * - Wiseman based PCI gigabit ethernet adapters * 40 * * 41 * Environment: * 42 * Kernel Mode - * 43 * * 44 * ********************************************************************** 45 */ 46 47 #include "e1000g_sw.h" 48 #include "e1000g_debug.h" 49 50 #define TX_SW_PKT_AREA_SZ \ 51 (sizeof (TX_SW_PACKET) * Adapter->NumTxSwPacket) 52 53 static int e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *); 54 static int e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *); 55 static void e1000g_free_tx_descriptors(e1000g_tx_ring_t *); 56 static void e1000g_free_rx_descriptors(e1000g_rx_ring_t *); 57 static int e1000g_alloc_tx_packets(e1000g_tx_ring_t *); 58 static int e1000g_alloc_rx_packets(e1000g_rx_ring_t *); 59 static void e1000g_free_tx_packets(e1000g_tx_ring_t *); 60 static void e1000g_free_rx_packets(e1000g_rx_ring_t *); 61 static int e1000g_alloc_dma_buffer(struct e1000g *, dma_buffer_t *, size_t); 62 static void e1000g_free_dma_buffer(dma_buffer_t *); 63 #ifdef __sparc 64 static int e1000g_alloc_dvma_buffer(struct e1000g *, dma_buffer_t *, size_t); 65 static void e1000g_free_dvma_buffer(dma_buffer_t *); 66 #endif 67 static int e1000g_alloc_descriptors(struct e1000g *Adapter); 68 static int e1000g_alloc_packets(struct e1000g *Adapter); 69 static PRX_SW_PACKET e1000g_alloc_rx_sw_packet(e1000g_rx_ring_t *); 70 71 #ifdef __sparc 72 static ddi_dma_lim_t e1000g_dma_limits = { 73 (uint_t)0, /* dlim_addr_lo */ 74 (uint_t)0xffffffff, /* dlim_addr_hi */ 75 (uint_t)0xffffffff, /* dlim_cntr_max */ 76 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 77 0x1, /* dlim_minxfer */ 78 1024 /* dlim_speed */ 79 }; 80 #endif 81 82 #ifdef __sparc 83 static dma_type_t e1000g_dma_type = USE_DVMA; 84 #else 85 static dma_type_t e1000g_dma_type = USE_DMA; 86 #endif 87 88 extern krwlock_t e1000g_dma_type_lock; 89 90 int 91 e1000g_alloc_dma_resources(struct e1000g *Adapter) 92 { 93 e1000g_tx_ring_t *tx_ring; 94 e1000g_rx_ring_t *rx_ring; 95 96 tx_ring = Adapter->tx_ring; 97 rx_ring = Adapter->rx_ring; 98 99 if (e1000g_alloc_descriptors(Adapter) != DDI_SUCCESS) 100 return (DDI_FAILURE); 101 102 if (e1000g_alloc_packets(Adapter) != DDI_SUCCESS) { 103 e1000g_free_tx_descriptors(tx_ring); 104 e1000g_free_rx_descriptors(rx_ring); 105 return (DDI_FAILURE); 106 } 107 108 return (DDI_SUCCESS); 109 } 110 111 /* 112 * ********************************************************************** 113 * Name: e1000g_alloc_descriptors * 114 * * 115 * Description: * 116 * This routine Allocates Neccesary Buffers for the device * 117 * It allocates memory for * 118 * Transmit Descriptor Area * 119 * Receive Descrpitor Area * 120 * * 121 * NOTE -- The device must have been reset before this routine * 122 * is called. * 123 * * 124 * Author: Hari Seshadri * 125 * Functions Called : * 126 * DDI mem functions called * 127 * ddi_dma_alloc_handle() allocates a new DMA handle. A DMA * 128 * handle is an opaque object used as a reference to subse- * 129 * quently allocated DMA resources. ddi_dma_alloc_handle() * 130 * accepts as parameters the device information referred to by * 131 * dip and the device's DMA attributes described by a * 132 * ddi_dma_attr(9S) structure. A successful call to * 133 * ddi_dma_alloc_handle() fills in the value pointed to by * 134 * handlep. A DMA handle must only be used by the device for * 135 * which it was allocated and is only valid for one I/O tran- * 136 * saction at a time. * 137 * * 138 * ddi_dma_mem_alloc() allocates memory for DMA transfers to or * 139 * from a device. The allocation will obey the alignment, pad- * 140 * ding constraints and device granularity as specified by the * 141 * DMA attributes (see ddi_dma_attr(9S)) passed to * 142 * ddi_dma_alloc_handle(9F) and the more restrictive attributes * 143 * imposed by the system.Flags should be set to DDI_DMA_STREAMING * 144 * if the device is doing sequential, unidirectional, * 145 * block-sized, and block- aligned transfers to or from memory. * 146 * * 147 * * 148 * ddi_dma_addr_bind_handle() allocates DMA resources for a * 149 * memory object such that a device can perform DMA to or from * 150 * the object. DMA resources are allocated considering the * 151 * device's DMA attributes as expressed by ddi_dma_attr(9S) * 152 * (see ddi_dma_alloc_handle(9F)). * 153 * ddi_dma_addr_bind_handle() fills in the first DMA cookie * 154 * pointed to by cookiep with the appropriate address, length, * 155 * and bus type. *ccountp is set to the number of DMA cookies * 156 * representing this DMA object. Subsequent DMA cookies must be * 157 * retrieved by calling ddi_dma_nextcookie(9F) the number of * 158 * times specified by *countp - 1. * 159 * * 160 * Arguments: * 161 * Adapter - A pointer to context sensitive "Adapter" structure. * 162 * * 163 * * 164 * Returns: * 165 * DDI_SUCCESS on success * 166 * DDI_FAILURE on error * 167 * * 168 * Modification log: * 169 * Date Who Description * 170 * -------- --- ----------------------------------------------------- * 171 * 11/11/98 Vinay Cleaned the entire function to prevents panics and * 172 * memory corruption * 173 * 17/11/98 Vinay Optimized it for proper usages of function calls * 174 * 30/04/99 Vinay Resolved some more memory problems related to race * 175 * conditions * 176 * ********************************************************************** 177 */ 178 static int 179 e1000g_alloc_descriptors(struct e1000g *Adapter) 180 { 181 int result; 182 e1000g_tx_ring_t *tx_ring; 183 e1000g_rx_ring_t *rx_ring; 184 185 tx_ring = Adapter->tx_ring; 186 187 result = e1000g_alloc_tx_descriptors(tx_ring); 188 if (result != DDI_SUCCESS) 189 return (DDI_FAILURE); 190 191 rx_ring = Adapter->rx_ring; 192 193 result = e1000g_alloc_rx_descriptors(rx_ring); 194 if (result != DDI_SUCCESS) { 195 e1000g_free_tx_descriptors(tx_ring); 196 return (DDI_FAILURE); 197 } 198 199 return (DDI_SUCCESS); 200 } 201 202 static int 203 e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *tx_ring) 204 { 205 int mystat; 206 boolean_t alloc_flag; 207 size_t size; 208 size_t len; 209 uintptr_t templong; 210 uint_t cookie_count; 211 dev_info_t *devinfo; 212 ddi_dma_cookie_t cookie; 213 struct e1000g *Adapter; 214 215 Adapter = tx_ring->adapter; 216 217 alloc_flag = B_FALSE; 218 219 devinfo = Adapter->dip; 220 221 /* 222 * Solaris 7 has a problem with allocating physically contiguous memory 223 * that is aligned on a 4K boundary. The transmit and rx descriptors 224 * need to aligned on a 4kbyte boundary. We first try to allocate the 225 * memory with DMA attributes set to 4K alignment and also no scatter/ 226 * gather mechanism specified. In most cases, this does not allocate 227 * memory aligned at a 4Kbyte boundary. We then try asking for memory 228 * aligned on 4K boundary with scatter/gather set to 2. This works when 229 * the amount of memory is less than 4k i.e a page size. If neither of 230 * these options work or if the number of descriptors is greater than 231 * 4K, ie more than 256 descriptors, we allocate 4k extra memory and 232 * and then align the memory at a 4k boundary. 233 */ 234 size = sizeof (struct e1000_tx_desc) * Adapter->NumTxDescriptors; 235 236 /* 237 * Memory allocation for the transmit buffer descriptors. 238 */ 239 /* 240 * DMA attributes set to asking for 4k alignment and no 241 * scatter/gather specified. 242 * This typically does not succeed for Solaris 7, but 243 * might work for Solaris 2.6 244 */ 245 tbd_dma_attr.dma_attr_sgllen = 1; 246 247 /* 248 * Allocate a new DMA handle for the transmit descriptor 249 * memory area. 250 */ 251 mystat = ddi_dma_alloc_handle(devinfo, &tbd_dma_attr, 252 DDI_DMA_DONTWAIT, 0, 253 &tx_ring->tbd_dma_handle); 254 255 if (mystat != DDI_SUCCESS) { 256 e1000g_log(Adapter, CE_WARN, 257 "Could not allocate tbd dma handle: %d", mystat); 258 tx_ring->tbd_dma_handle = NULL; 259 return (DDI_FAILURE); 260 } 261 262 /* 263 * Allocate memory to DMA data to and from the transmit 264 * descriptors. 265 */ 266 mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle, 267 size, 268 &accattr, DDI_DMA_CONSISTENT, 269 DDI_DMA_DONTWAIT, 0, 270 (caddr_t *)&tx_ring->tbd_area, 271 &len, &tx_ring->tbd_acc_handle); 272 273 if ((mystat != DDI_SUCCESS) || 274 ((uintptr_t)tx_ring->tbd_area & (E1000_MDALIGN - 1))) { 275 if (mystat == DDI_SUCCESS) { 276 ddi_dma_mem_free(&tx_ring->tbd_acc_handle); 277 tx_ring->tbd_acc_handle = NULL; 278 tx_ring->tbd_area = NULL; 279 } 280 if (tx_ring->tbd_dma_handle != NULL) { 281 ddi_dma_free_handle(&tx_ring->tbd_dma_handle); 282 tx_ring->tbd_dma_handle = NULL; 283 } 284 alloc_flag = B_FALSE; 285 } else 286 alloc_flag = B_TRUE; 287 288 /* 289 * Initialize the entire transmit buffer descriptor area to zero 290 */ 291 if (alloc_flag) 292 bzero(tx_ring->tbd_area, len); 293 294 /* 295 * If the previous DMA attributes setting could not give us contiguous 296 * memory or the number of descriptors is greater than the page size, 297 * we allocate 4K extra memory and then align it at a 4k boundary. 298 */ 299 if (!alloc_flag) { 300 size = size + ROUNDOFF; 301 302 /* 303 * DMA attributes set to no scatter/gather and 16 bit alignment 304 */ 305 tbd_dma_attr.dma_attr_align = 1; 306 tbd_dma_attr.dma_attr_sgllen = 1; 307 308 /* 309 * Allocate a new DMA handle for the transmit descriptor memory 310 * area. 311 */ 312 mystat = ddi_dma_alloc_handle(devinfo, &tbd_dma_attr, 313 DDI_DMA_DONTWAIT, 0, 314 &tx_ring->tbd_dma_handle); 315 316 if (mystat != DDI_SUCCESS) { 317 e1000g_log(Adapter, CE_WARN, 318 "Could not re-allocate tbd dma handle: %d", mystat); 319 tx_ring->tbd_dma_handle = NULL; 320 return (DDI_FAILURE); 321 } 322 323 /* 324 * Allocate memory to DMA data to and from the transmit 325 * descriptors. 326 */ 327 mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle, 328 size, 329 &accattr, DDI_DMA_CONSISTENT, 330 DDI_DMA_DONTWAIT, 0, 331 (caddr_t *)&tx_ring->tbd_area, 332 &len, &tx_ring->tbd_acc_handle); 333 334 if (mystat != DDI_SUCCESS) { 335 e1000g_log(Adapter, CE_WARN, 336 "Could not allocate tbd dma memory: %d", mystat); 337 tx_ring->tbd_acc_handle = NULL; 338 tx_ring->tbd_area = NULL; 339 if (tx_ring->tbd_dma_handle != NULL) { 340 ddi_dma_free_handle(&tx_ring->tbd_dma_handle); 341 tx_ring->tbd_dma_handle = NULL; 342 } 343 return (DDI_FAILURE); 344 } else 345 alloc_flag = B_TRUE; 346 347 /* 348 * Initialize the entire transmit buffer descriptor area to zero 349 */ 350 bzero(tx_ring->tbd_area, len); 351 /* 352 * Memory has been allocated with the ddi_dma_mem_alloc call, 353 * but has not been aligned. We now align it on a 4k boundary. 354 */ 355 templong = P2NPHASE((uintptr_t)tx_ring->tbd_area, ROUNDOFF); 356 len = size - templong; 357 templong += (uintptr_t)tx_ring->tbd_area; 358 tx_ring->tbd_area = (struct e1000_tx_desc *)templong; 359 } /* alignment workaround */ 360 361 /* 362 * Transmit buffer descriptor memory allocation succeeded 363 */ 364 ASSERT(alloc_flag); 365 366 /* 367 * Allocates DMA resources for the memory that was allocated by 368 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the 369 * the memory address 370 */ 371 mystat = ddi_dma_addr_bind_handle(tx_ring->tbd_dma_handle, 372 (struct as *)NULL, (caddr_t)tx_ring->tbd_area, 373 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 374 DDI_DMA_SLEEP, 0, &cookie, &cookie_count); 375 376 if (mystat != DDI_SUCCESS) { 377 e1000g_log(Adapter, CE_WARN, 378 "Could not bind tbd dma resource: %d", mystat); 379 if (tx_ring->tbd_acc_handle != NULL) { 380 ddi_dma_mem_free(&tx_ring->tbd_acc_handle); 381 tx_ring->tbd_acc_handle = NULL; 382 tx_ring->tbd_area = NULL; 383 } 384 if (tx_ring->tbd_dma_handle != NULL) { 385 ddi_dma_free_handle(&tx_ring->tbd_dma_handle); 386 tx_ring->tbd_dma_handle = NULL; 387 } 388 return (DDI_FAILURE); 389 } 390 391 ASSERT(cookie_count == 1); /* 1 cookie */ 392 393 if (cookie_count != 1) { 394 e1000g_log(Adapter, CE_WARN, 395 "Could not bind tbd dma resource in a single frag. " 396 "Count - %d Len - %d", cookie_count, len); 397 e1000g_free_tx_descriptors(tx_ring); 398 return (DDI_FAILURE); 399 } 400 401 /* 402 * The FirstTxDescriptor is initialized to the physical address that 403 * is obtained from the ddi_dma_addr_bind_handle call 404 */ 405 tx_ring->tbd_dma_addr = cookie.dmac_laddress; 406 tx_ring->tbd_first = tx_ring->tbd_area; 407 tx_ring->tbd_last = tx_ring->tbd_first + 408 (Adapter->NumTxDescriptors - 1); 409 410 return (DDI_SUCCESS); 411 } 412 413 static int 414 e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *rx_ring) 415 { 416 int mystat; 417 boolean_t alloc_flag; 418 size_t size; 419 size_t len; 420 uintptr_t templong; 421 uint_t cookie_count; 422 dev_info_t *devinfo; 423 ddi_dma_cookie_t cookie; 424 struct e1000g *Adapter; 425 426 Adapter = rx_ring->adapter; 427 428 alloc_flag = B_FALSE; 429 430 devinfo = Adapter->dip; 431 432 /* 433 * Memory allocation for the receive buffer descriptors. 434 */ 435 size = (sizeof (struct e1000_rx_desc)) * Adapter->NumRxDescriptors; 436 437 /* 438 * Asking for aligned memory with DMA attributes set for 4k alignment 439 */ 440 tbd_dma_attr.dma_attr_sgllen = 1; 441 tbd_dma_attr.dma_attr_align = E1000_MDALIGN; 442 443 /* 444 * Allocate a new DMA handle for the receive descriptor 445 * memory area. re-use the tbd_dma_attr since rbd has 446 * same attributes. 447 */ 448 mystat = ddi_dma_alloc_handle(devinfo, &tbd_dma_attr, 449 DDI_DMA_DONTWAIT, 0, 450 &rx_ring->rbd_dma_handle); 451 452 if (mystat != DDI_SUCCESS) { 453 e1000g_log(Adapter, CE_WARN, 454 "Could not allocate rbd dma handle: %d", mystat); 455 rx_ring->rbd_dma_handle = NULL; 456 return (DDI_FAILURE); 457 } 458 /* 459 * Allocate memory to DMA data to and from the receive 460 * descriptors. 461 */ 462 mystat = ddi_dma_mem_alloc(rx_ring->rbd_dma_handle, 463 size, 464 &accattr, DDI_DMA_CONSISTENT, 465 DDI_DMA_DONTWAIT, 0, 466 (caddr_t *)&rx_ring->rbd_area, 467 &len, &rx_ring->rbd_acc_handle); 468 469 /* 470 * Check if memory allocation succeeded and also if the 471 * allocated memory is aligned correctly. 472 */ 473 if ((mystat != DDI_SUCCESS) || 474 ((uintptr_t)rx_ring->rbd_area & (E1000_MDALIGN - 1))) { 475 if (mystat == DDI_SUCCESS) { 476 ddi_dma_mem_free(&rx_ring->rbd_acc_handle); 477 rx_ring->rbd_acc_handle = NULL; 478 rx_ring->rbd_area = NULL; 479 } 480 if (rx_ring->rbd_dma_handle != NULL) { 481 ddi_dma_free_handle(&rx_ring->rbd_dma_handle); 482 rx_ring->rbd_dma_handle = NULL; 483 } 484 alloc_flag = B_FALSE; 485 } else 486 alloc_flag = B_TRUE; 487 488 /* 489 * Initialize the allocated receive descriptor memory to zero. 490 */ 491 if (alloc_flag) 492 bzero((caddr_t)rx_ring->rbd_area, len); 493 494 /* 495 * If memory allocation did not succeed or if number of descriptors is 496 * greater than a page size ( more than 256 descriptors ), do the 497 * alignment yourself 498 */ 499 if (!alloc_flag) { 500 tbd_dma_attr.dma_attr_align = 1; 501 tbd_dma_attr.dma_attr_sgllen = 1; 502 size = size + ROUNDOFF; 503 /* 504 * Allocate a new DMA handle for the receive descriptor memory 505 * area. re-use the tbd_dma_attr since rbd has same attributes. 506 */ 507 mystat = ddi_dma_alloc_handle(devinfo, &tbd_dma_attr, 508 DDI_DMA_DONTWAIT, 0, 509 &rx_ring->rbd_dma_handle); 510 511 if (mystat != DDI_SUCCESS) { 512 e1000g_log(Adapter, CE_WARN, 513 "Could not re-allocate rbd dma handle: %d", mystat); 514 rx_ring->rbd_dma_handle = NULL; 515 return (DDI_FAILURE); 516 } 517 /* 518 * Allocate memory to DMA data to and from the receive 519 * descriptors. 520 */ 521 mystat = ddi_dma_mem_alloc(rx_ring->rbd_dma_handle, 522 size, 523 &accattr, DDI_DMA_CONSISTENT, 524 DDI_DMA_DONTWAIT, 0, 525 (caddr_t *)&rx_ring->rbd_area, 526 &len, &rx_ring->rbd_acc_handle); 527 528 if (mystat != DDI_SUCCESS) { 529 e1000g_log(Adapter, CE_WARN, 530 "Could not allocate rbd dma memory: %d", mystat); 531 rx_ring->rbd_acc_handle = NULL; 532 rx_ring->rbd_area = NULL; 533 if (rx_ring->rbd_dma_handle != NULL) { 534 ddi_dma_free_handle(&rx_ring->rbd_dma_handle); 535 rx_ring->rbd_dma_handle = NULL; 536 } 537 return (DDI_FAILURE); 538 } else 539 alloc_flag = B_TRUE; 540 541 /* 542 * Initialize the allocated receive descriptor memory to zero. 543 */ 544 bzero((caddr_t)rx_ring->rbd_area, len); 545 templong = P2NPHASE((uintptr_t)rx_ring->rbd_area, ROUNDOFF); 546 len = size - templong; 547 templong += (uintptr_t)rx_ring->rbd_area; 548 rx_ring->rbd_area = (struct e1000_rx_desc *)templong; 549 } /* alignment workaround */ 550 551 /* 552 * The memory allocation of the receive descriptors succeeded 553 */ 554 ASSERT(alloc_flag); 555 556 /* 557 * Allocates DMA resources for the memory that was allocated by 558 * the ddi_dma_mem_alloc call. 559 */ 560 mystat = ddi_dma_addr_bind_handle(rx_ring->rbd_dma_handle, 561 (struct as *)NULL, (caddr_t)rx_ring->rbd_area, 562 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 563 DDI_DMA_SLEEP, 0, &cookie, &cookie_count); 564 565 if (mystat != DDI_SUCCESS) { 566 e1000g_log(Adapter, CE_WARN, 567 "Could not bind rbd dma resource: %d", mystat); 568 if (rx_ring->rbd_acc_handle != NULL) { 569 ddi_dma_mem_free(&rx_ring->rbd_acc_handle); 570 rx_ring->rbd_acc_handle = NULL; 571 rx_ring->rbd_area = NULL; 572 } 573 if (rx_ring->rbd_dma_handle != NULL) { 574 ddi_dma_free_handle(&rx_ring->rbd_dma_handle); 575 rx_ring->rbd_dma_handle = NULL; 576 } 577 return (DDI_FAILURE); 578 } 579 580 ASSERT(cookie_count == 1); 581 if (cookie_count != 1) { 582 e1000g_log(Adapter, CE_WARN, 583 "Could not bind rbd dma resource in a single frag. " 584 "Count - %d Len - %d", cookie_count, len); 585 e1000g_free_rx_descriptors(rx_ring); 586 return (DDI_FAILURE); 587 } 588 /* 589 * Initialize the FirstRxDescriptor to the cookie address obtained 590 * from the ddi_dma_addr_bind_handle call. 591 */ 592 rx_ring->rbd_dma_addr = cookie.dmac_laddress; 593 rx_ring->rbd_first = rx_ring->rbd_area; 594 rx_ring->rbd_last = rx_ring->rbd_first + 595 (Adapter->NumRxDescriptors - 1); 596 597 return (DDI_SUCCESS); 598 } 599 600 static void 601 e1000g_free_rx_descriptors(e1000g_rx_ring_t *rx_ring) 602 { 603 if (rx_ring->rbd_dma_handle != NULL) { 604 ddi_dma_unbind_handle(rx_ring->rbd_dma_handle); 605 } 606 if (rx_ring->rbd_acc_handle != NULL) { 607 ddi_dma_mem_free(&rx_ring->rbd_acc_handle); 608 rx_ring->rbd_acc_handle = NULL; 609 rx_ring->rbd_area = NULL; 610 } 611 if (rx_ring->rbd_dma_handle != NULL) { 612 ddi_dma_free_handle(&rx_ring->rbd_dma_handle); 613 rx_ring->rbd_dma_handle = NULL; 614 } 615 rx_ring->rbd_dma_addr = NULL; 616 rx_ring->rbd_first = NULL; 617 rx_ring->rbd_last = NULL; 618 } 619 620 static void 621 e1000g_free_tx_descriptors(e1000g_tx_ring_t *tx_ring) 622 { 623 if (tx_ring->tbd_dma_handle != NULL) { 624 ddi_dma_unbind_handle(tx_ring->tbd_dma_handle); 625 } 626 if (tx_ring->tbd_acc_handle != NULL) { 627 ddi_dma_mem_free(&tx_ring->tbd_acc_handle); 628 tx_ring->tbd_acc_handle = NULL; 629 tx_ring->tbd_area = NULL; 630 } 631 if (tx_ring->tbd_dma_handle != NULL) { 632 ddi_dma_free_handle(&tx_ring->tbd_dma_handle); 633 tx_ring->tbd_dma_handle = NULL; 634 } 635 tx_ring->tbd_dma_addr = NULL; 636 tx_ring->tbd_first = NULL; 637 tx_ring->tbd_last = NULL; 638 } 639 640 641 /* 642 * ********************************************************************** 643 * Name: e1000g_alloc_packets * 644 * * 645 * Description: This routine Allocates Neccesary Buffers for the device * 646 * It allocates memory for * 647 * * 648 * Transmit packet Structure * 649 * Handle for Transmit buffers * 650 * Receive packet structure * 651 * Buffer for Receive packet * 652 * * 653 * * 654 * For ddi memory alloc routine see e1000g_Txalloc description * 655 * NOTE -- The device must have been reset before this routine * 656 * is called. * 657 * * 658 * Author: Hari Seshadri * 659 * Functions Called : * 660 * * 661 * * 662 * * 663 * Arguments: * 664 * Adapter - A pointer to our context sensitive "Adapter" * 665 * structure. * 666 * * 667 * * 668 * Returns: * 669 * DDI_SUCCESS on sucess * 670 * DDI_FAILURE on error * 671 * * 672 * * 673 * * 674 * Modification log: * 675 * Date Who Description * 676 * -------- --- ----------------------------------------------------- * 677 * 30/04/99 VA Cleaned code for memory corruptions, invalid DMA * 678 * attributes and prevent panics * 679 * ********************************************************************** 680 */ 681 static int 682 e1000g_alloc_packets(struct e1000g *Adapter) 683 { 684 int result; 685 e1000g_tx_ring_t *tx_ring; 686 e1000g_rx_ring_t *rx_ring; 687 688 tx_ring = Adapter->tx_ring; 689 rx_ring = Adapter->rx_ring; 690 691 again: 692 rw_enter(&e1000g_dma_type_lock, RW_READER); 693 694 result = e1000g_alloc_tx_packets(tx_ring); 695 if (result != DDI_SUCCESS) { 696 if (e1000g_dma_type == USE_DVMA) { 697 rw_exit(&e1000g_dma_type_lock); 698 699 rw_enter(&e1000g_dma_type_lock, RW_WRITER); 700 e1000g_dma_type = USE_DMA; 701 rw_exit(&e1000g_dma_type_lock); 702 703 e1000g_DEBUGLOG_0(Adapter, e1000g_CALLTRACE_LEVEL, 704 "No enough dvma resource for Tx packets, " 705 "trying to allocate dma buffers...\n"); 706 goto again; 707 } 708 rw_exit(&e1000g_dma_type_lock); 709 710 e1000g_DEBUGLOG_0(Adapter, e1000g_INFO_LEVEL, 711 "Failed to allocate dma buffers for Tx packets\n"); 712 return (DDI_FAILURE); 713 } 714 715 result = e1000g_alloc_rx_packets(rx_ring); 716 if (result != DDI_SUCCESS) { 717 e1000g_free_tx_packets(tx_ring); 718 if (e1000g_dma_type == USE_DVMA) { 719 rw_exit(&e1000g_dma_type_lock); 720 721 rw_enter(&e1000g_dma_type_lock, RW_WRITER); 722 e1000g_dma_type = USE_DMA; 723 rw_exit(&e1000g_dma_type_lock); 724 725 e1000g_DEBUGLOG_0(Adapter, e1000g_CALLTRACE_LEVEL, 726 "No enough dvma resource for Rx packets, " 727 "trying to allocate dma buffers...\n"); 728 goto again; 729 } 730 rw_exit(&e1000g_dma_type_lock); 731 732 e1000g_DEBUGLOG_0(Adapter, e1000g_INFO_LEVEL, 733 "Failed to allocate dma buffers for Rx packets\n"); 734 return (DDI_FAILURE); 735 } 736 737 rw_exit(&e1000g_dma_type_lock); 738 739 return (DDI_SUCCESS); 740 } 741 742 #ifdef __sparc 743 static int 744 e1000g_alloc_dvma_buffer(struct e1000g *Adapter, 745 dma_buffer_t *buf, size_t size) 746 { 747 int mystat; 748 dev_info_t *devinfo; 749 ddi_dma_cookie_t cookie; 750 751 devinfo = Adapter->dip; 752 753 mystat = dvma_reserve(devinfo, 754 &e1000g_dma_limits, 755 Adapter->dvma_page_num, 756 &buf->dma_handle); 757 758 if (mystat != DDI_SUCCESS) { 759 buf->dma_handle = NULL; 760 e1000g_DEBUGLOG_1(Adapter, e1000g_CALLTRACE_LEVEL, 761 "Could not allocate dvma buffer handle: %d\n", mystat); 762 return (DDI_FAILURE); 763 } 764 765 buf->address = kmem_alloc(size, KM_NOSLEEP); 766 767 if (buf->address == NULL) { 768 if (buf->dma_handle != NULL) { 769 dvma_release(buf->dma_handle); 770 buf->dma_handle = NULL; 771 } 772 e1000g_DEBUGLOG_0(Adapter, e1000g_CALLTRACE_LEVEL, 773 "Could not allocate dvma buffer memory\n"); 774 return (DDI_FAILURE); 775 } 776 777 dvma_kaddr_load(buf->dma_handle, 778 buf->address, size, 0, &cookie); 779 780 buf->dma_address = cookie.dmac_laddress; 781 buf->size = size; 782 buf->len = 0; 783 784 return (DDI_SUCCESS); 785 } 786 787 static void 788 e1000g_free_dvma_buffer(dma_buffer_t *buf) 789 { 790 if (buf->dma_handle != NULL) { 791 dvma_unload(buf->dma_handle, 0, -1); 792 } else { 793 return; 794 } 795 796 buf->dma_address = NULL; 797 798 if (buf->address != NULL) { 799 kmem_free(buf->address, buf->size); 800 buf->address = NULL; 801 } 802 803 if (buf->dma_handle != NULL) { 804 dvma_release(buf->dma_handle); 805 buf->dma_handle = NULL; 806 } 807 808 buf->size = 0; 809 buf->len = 0; 810 } 811 #endif 812 813 static int 814 e1000g_alloc_dma_buffer(struct e1000g *Adapter, 815 dma_buffer_t *buf, size_t size) 816 { 817 int mystat; 818 dev_info_t *devinfo; 819 ddi_dma_cookie_t cookie; 820 size_t len; 821 uint_t count; 822 823 devinfo = Adapter->dip; 824 825 mystat = ddi_dma_alloc_handle(devinfo, 826 &buf_dma_attr, 827 DDI_DMA_DONTWAIT, 0, 828 &buf->dma_handle); 829 830 if (mystat != DDI_SUCCESS) { 831 buf->dma_handle = NULL; 832 e1000g_DEBUGLOG_1(Adapter, e1000g_CALLTRACE_LEVEL, 833 "Could not allocate dma buffer handle: %d\n", mystat); 834 return (DDI_FAILURE); 835 } 836 837 mystat = ddi_dma_mem_alloc(buf->dma_handle, 838 size, &accattr2, DDI_DMA_STREAMING, 839 DDI_DMA_DONTWAIT, 0, 840 &buf->address, 841 &len, &buf->acc_handle); 842 843 if (mystat != DDI_SUCCESS) { 844 buf->acc_handle = NULL; 845 buf->address = NULL; 846 if (buf->dma_handle != NULL) { 847 ddi_dma_free_handle(&buf->dma_handle); 848 buf->dma_handle = NULL; 849 } 850 e1000g_DEBUGLOG_1(Adapter, e1000g_CALLTRACE_LEVEL, 851 "Could not allocate dma buffer memory: %d\n", mystat); 852 return (DDI_FAILURE); 853 } 854 855 mystat = ddi_dma_addr_bind_handle(buf->dma_handle, 856 (struct as *)NULL, 857 buf->address, 858 len, DDI_DMA_READ | DDI_DMA_STREAMING, 859 DDI_DMA_SLEEP, 0, &cookie, &count); 860 861 if (mystat != DDI_SUCCESS) { 862 if (buf->acc_handle != NULL) { 863 ddi_dma_mem_free(&buf->acc_handle); 864 buf->acc_handle = NULL; 865 buf->address = NULL; 866 } 867 if (buf->dma_handle != NULL) { 868 ddi_dma_free_handle(&buf->dma_handle); 869 buf->dma_handle = NULL; 870 } 871 e1000g_DEBUGLOG_1(Adapter, e1000g_CALLTRACE_LEVEL, 872 "Could not bind buffer dma handle: %d\n", mystat); 873 return (DDI_FAILURE); 874 } 875 876 ASSERT(count == 1); 877 if (count != 1) { 878 if (buf->dma_handle != NULL) { 879 ddi_dma_unbind_handle(buf->dma_handle); 880 } 881 if (buf->acc_handle != NULL) { 882 ddi_dma_mem_free(&buf->acc_handle); 883 buf->acc_handle = NULL; 884 buf->address = NULL; 885 } 886 if (buf->dma_handle != NULL) { 887 ddi_dma_free_handle(&buf->dma_handle); 888 buf->dma_handle = NULL; 889 } 890 e1000g_DEBUGLOG_1(Adapter, e1000g_CALLTRACE_LEVEL, 891 "Could not bind buffer as a single frag. " 892 "Count = %d\n", count); 893 return (DDI_FAILURE); 894 } 895 896 buf->dma_address = cookie.dmac_laddress; 897 buf->size = len; 898 buf->len = 0; 899 900 return (DDI_SUCCESS); 901 } 902 903 static void 904 e1000g_free_dma_buffer(dma_buffer_t *buf) 905 { 906 if (buf->dma_handle != NULL) { 907 ddi_dma_unbind_handle(buf->dma_handle); 908 } else { 909 return; 910 } 911 912 buf->dma_address = NULL; 913 914 if (buf->acc_handle != NULL) { 915 ddi_dma_mem_free(&buf->acc_handle); 916 buf->acc_handle = NULL; 917 buf->address = NULL; 918 } 919 920 if (buf->dma_handle != NULL) { 921 ddi_dma_free_handle(&buf->dma_handle); 922 buf->dma_handle = NULL; 923 } 924 925 buf->size = 0; 926 buf->len = 0; 927 } 928 929 static int 930 e1000g_alloc_tx_packets(e1000g_tx_ring_t *tx_ring) 931 { 932 int j; 933 PTX_SW_PACKET packet; 934 int mystat; 935 dma_buffer_t *tx_buf; 936 struct e1000g *Adapter = tx_ring->adapter; 937 dev_info_t *devinfo = Adapter->dip; 938 939 /* 940 * Memory allocation for the Transmit software structure, the transmit 941 * software packet. This structure stores all the relevant information 942 * for transmitting a single packet. 943 */ 944 tx_ring->packet_area = 945 kmem_zalloc(TX_SW_PKT_AREA_SZ, KM_NOSLEEP); 946 947 if (tx_ring->packet_area == NULL) 948 return (DDI_FAILURE); 949 950 for (j = 0, packet = tx_ring->packet_area; 951 j < Adapter->NumTxSwPacket; j++, packet++) { 952 953 ASSERT(packet != NULL); 954 955 /* 956 * Pre-allocate dma handles for transmit. These dma handles 957 * will be dynamically bound to the data buffers passed down 958 * from the upper layers at the time of transmitting. The 959 * dynamic binding only applies for the packets that are larger 960 * than the tx_bcopy_thresh. 961 */ 962 switch (e1000g_dma_type) { 963 #ifdef __sparc 964 case USE_DVMA: 965 mystat = dvma_reserve(devinfo, 966 &e1000g_dma_limits, 967 Adapter->dvma_page_num, 968 &packet->tx_dma_handle); 969 break; 970 #endif 971 case USE_DMA: 972 mystat = ddi_dma_alloc_handle(devinfo, 973 &tx_dma_attr, 974 DDI_DMA_DONTWAIT, 0, 975 &packet->tx_dma_handle); 976 break; 977 default: 978 ASSERT(B_FALSE); 979 break; 980 } 981 if (mystat != DDI_SUCCESS) { 982 packet->tx_dma_handle = NULL; 983 e1000g_DEBUGLOG_1(Adapter, e1000g_CALLTRACE_LEVEL, 984 "Could not allocate tx dma handle: %d\n", mystat); 985 goto tx_pkt_fail; 986 } 987 988 /* 989 * Pre-allocate transmit buffers for small packets that the 990 * size is less than tx_bcopy_thresh. The data of those small 991 * packets will be bcopy() to the transmit buffers instead of 992 * using dynamical DMA binding. For small packets, bcopy will 993 * bring better performance than DMA binding. 994 */ 995 tx_buf = packet->tx_buf; 996 997 switch (e1000g_dma_type) { 998 #ifdef __sparc 999 case USE_DVMA: 1000 mystat = e1000g_alloc_dvma_buffer(Adapter, 1001 tx_buf, Adapter->TxBufferSize); 1002 break; 1003 #endif 1004 case USE_DMA: 1005 mystat = e1000g_alloc_dma_buffer(Adapter, 1006 tx_buf, Adapter->TxBufferSize); 1007 break; 1008 default: 1009 ASSERT(B_FALSE); 1010 break; 1011 } 1012 if (mystat != DDI_SUCCESS) { 1013 ASSERT(packet->tx_dma_handle != NULL); 1014 switch (e1000g_dma_type) { 1015 #ifdef __sparc 1016 case USE_DVMA: 1017 dvma_release(packet->tx_dma_handle); 1018 break; 1019 #endif 1020 case USE_DMA: 1021 ddi_dma_free_handle(&packet->tx_dma_handle); 1022 break; 1023 default: 1024 ASSERT(B_FALSE); 1025 break; 1026 } 1027 packet->tx_dma_handle = NULL; 1028 e1000g_DEBUGLOG_0(Adapter, e1000g_CALLTRACE_LEVEL, 1029 "Allocate Tx buffer fail\n"); 1030 goto tx_pkt_fail; 1031 } 1032 1033 packet->dma_type = e1000g_dma_type; 1034 } /* for */ 1035 1036 return (DDI_SUCCESS); 1037 1038 tx_pkt_fail: 1039 e1000g_free_tx_packets(tx_ring); 1040 1041 return (DDI_FAILURE); 1042 } 1043 1044 static int 1045 e1000g_alloc_rx_packets(e1000g_rx_ring_t *rx_ring) 1046 { 1047 int i; 1048 PRX_SW_PACKET packet; 1049 struct e1000g *Adapter; 1050 uint32_t packet_num; 1051 1052 Adapter = rx_ring->adapter; 1053 1054 /* 1055 * Allocate memory for the RX_SW_PACKET structures. Each one of these 1056 * structures will contain a virtual and physical address to an actual 1057 * receive buffer in host memory. Since we use one RX_SW_PACKET per 1058 * received packet, the maximum number of RX_SW_PACKETs that we'll 1059 * need is equal to the number of receive descriptors that we've 1060 * allocated. 1061 * 1062 * Pre allocation for recv packet buffer. The Recv intr constructs 1063 * a new mp using this buffer 1064 * 1065 * On Wiseman these Receive buffers must be aligned with 256 byte 1066 * boundary 1067 * Vinay, Apr19,2000 1068 */ 1069 packet_num = Adapter->NumRxDescriptors + Adapter->NumRxFreeList; 1070 rx_ring->packet_area = NULL; 1071 1072 for (i = 0; i < packet_num; i++) { 1073 packet = e1000g_alloc_rx_sw_packet(rx_ring); 1074 if (packet == NULL) 1075 goto rx_pkt_fail; 1076 1077 packet->next = rx_ring->packet_area; 1078 rx_ring->packet_area = packet; 1079 } 1080 1081 return (DDI_SUCCESS); 1082 1083 rx_pkt_fail: 1084 e1000g_free_rx_packets(rx_ring); 1085 1086 return (DDI_FAILURE); 1087 } 1088 1089 static PRX_SW_PACKET 1090 e1000g_alloc_rx_sw_packet(e1000g_rx_ring_t *rx_ring) 1091 { 1092 int mystat; 1093 PRX_SW_PACKET packet; 1094 dma_buffer_t *rx_buf; 1095 struct e1000g *Adapter; 1096 1097 Adapter = rx_ring->adapter; 1098 1099 packet = kmem_zalloc(sizeof (RX_SW_PACKET), KM_NOSLEEP); 1100 if (packet == NULL) { 1101 e1000g_DEBUGLOG_0(Adapter, e1000g_CALLTRACE_LEVEL, 1102 "Cound not allocate memory for Rx SwPacket\n"); 1103 return (NULL); 1104 } 1105 1106 rx_buf = packet->rx_buf; 1107 1108 /* 1109 * Make sure that receive buffers are 256 byte aligned 1110 */ 1111 buf_dma_attr.dma_attr_align = Adapter->RcvBufferAlignment; 1112 1113 switch (e1000g_dma_type) { 1114 #ifdef __sparc 1115 case USE_DVMA: 1116 mystat = e1000g_alloc_dvma_buffer(Adapter, 1117 rx_buf, Adapter->RxBufferSize); 1118 break; 1119 #endif 1120 case USE_DMA: 1121 mystat = e1000g_alloc_dma_buffer(Adapter, 1122 rx_buf, Adapter->RxBufferSize); 1123 break; 1124 default: 1125 ASSERT(B_FALSE); 1126 break; 1127 } 1128 1129 if (mystat != DDI_SUCCESS) { 1130 if (packet != NULL) 1131 kmem_free(packet, sizeof (RX_SW_PACKET)); 1132 1133 e1000g_DEBUGLOG_0(Adapter, e1000g_CALLTRACE_LEVEL, 1134 "Failed to allocate Rx buffer\n"); 1135 return (NULL); 1136 } 1137 1138 rx_buf->size -= E1000G_IPALIGNROOM; 1139 rx_buf->address += E1000G_IPALIGNROOM; 1140 rx_buf->dma_address += E1000G_IPALIGNROOM; 1141 1142 packet->rx_ring = (caddr_t)rx_ring; 1143 packet->free_rtn.free_func = e1000g_rxfree_func; 1144 packet->free_rtn.free_arg = (char *)packet; 1145 /* 1146 * esballoc is changed to desballoc which 1147 * is undocumented call but as per sun, 1148 * we can use it. It gives better efficiency. 1149 */ 1150 packet->mp = desballoc((unsigned char *) 1151 rx_buf->address - E1000G_IPALIGNROOM, 1152 rx_buf->size + E1000G_IPALIGNROOM, 1153 BPRI_MED, &packet->free_rtn); 1154 1155 if (packet->mp != NULL) { 1156 packet->mp->b_rptr += E1000G_IPALIGNROOM; 1157 packet->mp->b_wptr += E1000G_IPALIGNROOM; 1158 } 1159 1160 packet->dma_type = e1000g_dma_type; 1161 1162 return (packet); 1163 } 1164 1165 void 1166 e1000g_free_rx_sw_packet(PRX_SW_PACKET packet) 1167 { 1168 dma_buffer_t *rx_buf; 1169 1170 if (packet->mp != NULL) { 1171 freemsg(packet->mp); 1172 packet->mp = NULL; 1173 } 1174 1175 rx_buf = packet->rx_buf; 1176 ASSERT(rx_buf->dma_handle != NULL); 1177 1178 rx_buf->size += E1000G_IPALIGNROOM; 1179 rx_buf->address -= E1000G_IPALIGNROOM; 1180 1181 switch (packet->dma_type) { 1182 #ifdef __sparc 1183 case USE_DVMA: 1184 e1000g_free_dvma_buffer(rx_buf); 1185 break; 1186 #endif 1187 case USE_DMA: 1188 e1000g_free_dma_buffer(rx_buf); 1189 break; 1190 default: 1191 ASSERT(B_FALSE); 1192 break; 1193 } 1194 1195 packet->dma_type = USE_NONE; 1196 1197 kmem_free(packet, sizeof (RX_SW_PACKET)); 1198 } 1199 1200 static void 1201 e1000g_free_rx_packets(e1000g_rx_ring_t *rx_ring) 1202 { 1203 PRX_SW_PACKET packet, next_packet; 1204 1205 rw_enter(&e1000g_rx_detach_lock, RW_WRITER); 1206 for (packet = rx_ring->packet_area; packet != NULL; 1207 packet = packet->next) { 1208 if (packet->flag & E1000G_RX_SW_SENDUP) { 1209 e1000g_mblks_pending++; 1210 packet->flag |= E1000G_RX_SW_DETACHED; 1211 } 1212 } 1213 rw_exit(&e1000g_rx_detach_lock); 1214 1215 packet = rx_ring->packet_area; 1216 rx_ring->packet_area = NULL; 1217 1218 for (; packet != NULL; packet = next_packet) { 1219 next_packet = packet->next; 1220 1221 if (packet->flag & E1000G_RX_SW_DETACHED) 1222 continue; 1223 1224 ASSERT((packet->flag & E1000G_RX_SW_SENDUP) == 0); 1225 e1000g_free_rx_sw_packet(packet); 1226 } 1227 } 1228 1229 static void 1230 e1000g_free_tx_packets(e1000g_tx_ring_t *tx_ring) 1231 { 1232 int j; 1233 struct e1000g *Adapter; 1234 PTX_SW_PACKET packet; 1235 dma_buffer_t *tx_buf; 1236 1237 Adapter = tx_ring->adapter; 1238 1239 for (j = 0, packet = tx_ring->packet_area; 1240 j < Adapter->NumTxSwPacket; j++, packet++) { 1241 1242 if (packet == NULL) 1243 break; 1244 1245 /* Free the Tx DMA handle for dynamical binding */ 1246 if (packet->tx_dma_handle != NULL) { 1247 switch (packet->dma_type) { 1248 #ifdef __sparc 1249 case USE_DVMA: 1250 dvma_release(packet->tx_dma_handle); 1251 break; 1252 #endif 1253 case USE_DMA: 1254 ddi_dma_free_handle(&packet->tx_dma_handle); 1255 break; 1256 default: 1257 ASSERT(B_FALSE); 1258 break; 1259 } 1260 packet->tx_dma_handle = NULL; 1261 } else { 1262 /* 1263 * If the dma handle is NULL, then we don't 1264 * need to check the packets left. For they 1265 * have not been initialized or have been freed. 1266 */ 1267 break; 1268 } 1269 1270 tx_buf = packet->tx_buf; 1271 1272 switch (packet->dma_type) { 1273 #ifdef __sparc 1274 case USE_DVMA: 1275 e1000g_free_dvma_buffer(tx_buf); 1276 break; 1277 #endif 1278 case USE_DMA: 1279 e1000g_free_dma_buffer(tx_buf); 1280 break; 1281 default: 1282 ASSERT(B_FALSE); 1283 break; 1284 } 1285 1286 packet->dma_type = USE_NONE; 1287 } 1288 if (tx_ring->packet_area != NULL) { 1289 kmem_free(tx_ring->packet_area, TX_SW_PKT_AREA_SZ); 1290 tx_ring->packet_area = NULL; 1291 } 1292 } 1293 1294 /* 1295 * ********************************************************************** 1296 * Name: e1000g_release_dma_resources * 1297 * * 1298 * Description: * 1299 * This function release any pending buffers. that has been * 1300 * previously allocated * 1301 * * 1302 * Parameter Passed: * 1303 * * 1304 * Return Value: * 1305 * * 1306 * Functions called: * 1307 * * 1308 * * 1309 * ********************************************************************** 1310 */ 1311 void 1312 e1000g_release_dma_resources(register struct e1000g *Adapter) 1313 { 1314 e1000g_tx_ring_t *tx_ring; 1315 e1000g_rx_ring_t *rx_ring; 1316 1317 tx_ring = Adapter->tx_ring; 1318 rx_ring = Adapter->rx_ring; 1319 1320 /* 1321 * Release all the handles, memory and DMA resources that are 1322 * allocated for the transmit buffer descriptors. 1323 */ 1324 e1000g_free_tx_descriptors(tx_ring); 1325 1326 /* 1327 * Release all the handles, memory and DMA resources that are 1328 * allocated for the receive buffer descriptors. 1329 */ 1330 e1000g_free_rx_descriptors(rx_ring); 1331 1332 /* 1333 * Free Tx packet resources 1334 */ 1335 e1000g_free_tx_packets(tx_ring); 1336 1337 /* 1338 * TX resources done, now free RX resources 1339 */ 1340 e1000g_free_rx_packets(rx_ring); 1341 } 1342