xref: /titanic_44/usr/src/uts/common/io/e1000g/e1000g_alloc.c (revision ee5416c9d7e449233197d5d20bc6b81e4ff091b2)
1 /*
2  * This file is provided under a CDDLv1 license.  When using or
3  * redistributing this file, you may do so under this license.
4  * In redistributing this file this license must be included
5  * and no other modification of this header file is permitted.
6  *
7  * CDDL LICENSE SUMMARY
8  *
9  * Copyright(c) 1999 - 2007 Intel Corporation. All rights reserved.
10  *
11  * The contents of this file are subject to the terms of Version
12  * 1.0 of the Common Development and Distribution License (the "License").
13  *
14  * You should have received a copy of the License with this software.
15  * You can obtain a copy of the License at
16  *	http://www.opensolaris.org/os/licensing.
17  * See the License for the specific language governing permissions
18  * and limitations under the License.
19  */
20 
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms of the CDDLv1.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * **********************************************************************
30  * Module Name:								*
31  *   e1000g_alloc.c							*
32  *									*
33  * Abstract:								*
34  *   This file contains some routines that take care of			*
35  *   memory allocation for descriptors and buffers.			*
36  *									*
37  * **********************************************************************
38  */
39 
40 #include "e1000g_sw.h"
41 #include "e1000g_debug.h"
42 
43 #define	TX_SW_PKT_AREA_SZ \
44 	(sizeof (tx_sw_packet_t) * Adapter->tx_freelist_num)
45 
46 static int e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *);
47 static int e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *);
48 static void e1000g_free_tx_descriptors(e1000g_tx_ring_t *);
49 static void e1000g_free_rx_descriptors(e1000g_rx_ring_t *);
50 static int e1000g_alloc_tx_packets(e1000g_tx_ring_t *);
51 static int e1000g_alloc_rx_packets(e1000g_rx_ring_t *);
52 static void e1000g_free_tx_packets(e1000g_tx_ring_t *);
53 static void e1000g_free_rx_packets(e1000g_rx_ring_t *);
54 static int e1000g_alloc_dma_buffer(struct e1000g *,
55     dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
56 static void e1000g_free_dma_buffer(dma_buffer_t *);
57 #ifdef __sparc
58 static int e1000g_alloc_dvma_buffer(struct e1000g *, dma_buffer_t *, size_t);
59 static void e1000g_free_dvma_buffer(dma_buffer_t *);
60 #endif
61 static int e1000g_alloc_descriptors(struct e1000g *Adapter);
62 static void e1000g_free_descriptors(struct e1000g *Adapter);
63 static int e1000g_alloc_packets(struct e1000g *Adapter);
64 static void e1000g_free_packets(struct e1000g *Adapter);
65 static p_rx_sw_packet_t e1000g_alloc_rx_sw_packet(e1000g_rx_ring_t *,
66     ddi_dma_attr_t *p_dma_attr);
67 
68 /* DMA access attributes for descriptors <Little Endian> */
69 static ddi_device_acc_attr_t e1000g_desc_acc_attr = {
70 	DDI_DEVICE_ATTR_V0,
71 	DDI_STRUCTURE_LE_ACC,
72 	DDI_STRICTORDER_ACC,
73 };
74 
75 /* DMA access attributes for DMA buffers */
76 #ifdef __sparc
77 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
78 	DDI_DEVICE_ATTR_V0,
79 	DDI_STRUCTURE_BE_ACC,
80 	DDI_STRICTORDER_ACC,
81 };
82 #else
83 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
84 	DDI_DEVICE_ATTR_V0,
85 	DDI_STRUCTURE_LE_ACC,
86 	DDI_STRICTORDER_ACC,
87 };
88 #endif
89 
90 /* DMA attributes for tx mblk buffers */
91 static ddi_dma_attr_t e1000g_tx_dma_attr = {
92 	DMA_ATTR_V0,		/* version of this structure */
93 	0,			/* lowest usable address */
94 	0xffffffffffffffffULL,	/* highest usable address */
95 	0x7fffffff,		/* maximum DMAable byte count */
96 	1,			/* alignment in bytes */
97 	0x7ff,			/* burst sizes (any?) */
98 	1,			/* minimum transfer */
99 	0xffffffffU,		/* maximum transfer */
100 	0xffffffffffffffffULL,	/* maximum segment length */
101 	16,			/* maximum number of segments */
102 	1,			/* granularity */
103 	0,			/* flags (reserved) */
104 };
105 
106 /* DMA attributes for pre-allocated rx/tx buffers */
107 static ddi_dma_attr_t e1000g_buf_dma_attr = {
108 	DMA_ATTR_V0,		/* version of this structure */
109 	0,			/* lowest usable address */
110 	0xffffffffffffffffULL,	/* highest usable address */
111 	0x7fffffff,		/* maximum DMAable byte count */
112 	1,			/* alignment in bytes */
113 	0x7ff,			/* burst sizes (any?) */
114 	1,			/* minimum transfer */
115 	0xffffffffU,		/* maximum transfer */
116 	0xffffffffffffffffULL,	/* maximum segment length */
117 	1,			/* maximum number of segments */
118 	1,			/* granularity */
119 	0,			/* flags (reserved) */
120 };
121 
122 /* DMA attributes for rx/tx descriptors */
123 static ddi_dma_attr_t e1000g_desc_dma_attr = {
124 	DMA_ATTR_V0,		/* version of this structure */
125 	0,			/* lowest usable address */
126 	0xffffffffffffffffULL,	/* highest usable address */
127 	0x7fffffff,		/* maximum DMAable byte count */
128 	E1000_MDALIGN,		/* alignment in bytes 4K! */
129 	0x7ff,			/* burst sizes (any?) */
130 	1,			/* minimum transfer */
131 	0xffffffffU,		/* maximum transfer */
132 	0xffffffffffffffffULL,	/* maximum segment length */
133 	1,			/* maximum number of segments */
134 	1,			/* granularity */
135 	0,			/* flags (reserved) */
136 };
137 
138 #ifdef __sparc
139 static ddi_dma_lim_t e1000g_dma_limits = {
140 	(uint_t)0,		/* dlim_addr_lo */
141 	(uint_t)0xffffffff,	/* dlim_addr_hi */
142 	(uint_t)0xffffffff,	/* dlim_cntr_max */
143 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
144 	0x1,			/* dlim_minxfer */
145 	1024			/* dlim_speed */
146 };
147 #endif
148 
149 #ifdef __sparc
150 static dma_type_t e1000g_dma_type = USE_DVMA;
151 #else
152 static dma_type_t e1000g_dma_type = USE_DMA;
153 #endif
154 
155 extern krwlock_t e1000g_dma_type_lock;
156 
157 
158 int
159 e1000g_alloc_dma_resources(struct e1000g *Adapter)
160 {
161 	int result;
162 
163 	result = DDI_FAILURE;
164 
165 	while ((result != DDI_SUCCESS) &&
166 	    (Adapter->tx_desc_num >= MIN_NUM_TX_DESCRIPTOR) &&
167 	    (Adapter->rx_desc_num >= MIN_NUM_RX_DESCRIPTOR) &&
168 	    (Adapter->tx_freelist_num >= MIN_NUM_TX_FREELIST) &&
169 	    (Adapter->rx_freelist_num >= MIN_NUM_RX_FREELIST)) {
170 
171 		result = e1000g_alloc_descriptors(Adapter);
172 
173 		if (result == DDI_SUCCESS) {
174 			result = e1000g_alloc_packets(Adapter);
175 
176 			if (result != DDI_SUCCESS)
177 				e1000g_free_descriptors(Adapter);
178 		}
179 
180 		/*
181 		 * If the allocation fails due to resource shortage,
182 		 * we'll reduce the numbers of descriptors/buffers by
183 		 * half, and try the allocation again.
184 		 */
185 		if (result != DDI_SUCCESS) {
186 			/*
187 			 * We must ensure the number of descriptors
188 			 * is always a multiple of 8.
189 			 */
190 			Adapter->tx_desc_num =
191 			    (Adapter->tx_desc_num >> 4) << 3;
192 			Adapter->rx_desc_num =
193 			    (Adapter->rx_desc_num >> 4) << 3;
194 
195 			Adapter->tx_freelist_num >>= 1;
196 			Adapter->rx_freelist_num >>= 1;
197 		}
198 	}
199 
200 	return (result);
201 }
202 
203 /*
204  * e1000g_alloc_descriptors - allocate DMA buffers for descriptors
205  *
206  * This routine allocates neccesary DMA buffers for
207  *	Transmit Descriptor Area
208  *	Receive Descrpitor Area
209  */
210 static int
211 e1000g_alloc_descriptors(struct e1000g *Adapter)
212 {
213 	int result;
214 	e1000g_tx_ring_t *tx_ring;
215 	e1000g_rx_ring_t *rx_ring;
216 
217 	tx_ring = Adapter->tx_ring;
218 
219 	result = e1000g_alloc_tx_descriptors(tx_ring);
220 	if (result != DDI_SUCCESS)
221 		return (DDI_FAILURE);
222 
223 	rx_ring = Adapter->rx_ring;
224 
225 	result = e1000g_alloc_rx_descriptors(rx_ring);
226 	if (result != DDI_SUCCESS) {
227 		e1000g_free_tx_descriptors(tx_ring);
228 		return (DDI_FAILURE);
229 	}
230 
231 	return (DDI_SUCCESS);
232 }
233 
234 static void
235 e1000g_free_descriptors(struct e1000g *Adapter)
236 {
237 	e1000g_tx_ring_t *tx_ring;
238 	e1000g_rx_ring_t *rx_ring;
239 
240 	tx_ring = Adapter->tx_ring;
241 	rx_ring = Adapter->rx_ring;
242 
243 	e1000g_free_tx_descriptors(tx_ring);
244 	e1000g_free_rx_descriptors(rx_ring);
245 }
246 
247 static int
248 e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *tx_ring)
249 {
250 	int mystat;
251 	boolean_t alloc_flag;
252 	size_t size;
253 	size_t len;
254 	uintptr_t templong;
255 	uint_t cookie_count;
256 	dev_info_t *devinfo;
257 	ddi_dma_cookie_t cookie;
258 	struct e1000g *Adapter;
259 	ddi_dma_attr_t dma_attr;
260 
261 	Adapter = tx_ring->adapter;
262 	devinfo = Adapter->dip;
263 
264 	alloc_flag = B_FALSE;
265 	dma_attr = e1000g_desc_dma_attr;
266 
267 	/*
268 	 * Solaris 7 has a problem with allocating physically contiguous memory
269 	 * that is aligned on a 4K boundary. The transmit and rx descriptors
270 	 * need to aligned on a 4kbyte boundary. We first try to allocate the
271 	 * memory with DMA attributes set to 4K alignment and also no scatter/
272 	 * gather mechanism specified. In most cases, this does not allocate
273 	 * memory aligned at a 4Kbyte boundary. We then try asking for memory
274 	 * aligned on 4K boundary with scatter/gather set to 2. This works when
275 	 * the amount of memory is less than 4k i.e a page size. If neither of
276 	 * these options work or if the number of descriptors is greater than
277 	 * 4K, ie more than 256 descriptors, we allocate 4k extra memory and
278 	 * and then align the memory at a 4k boundary.
279 	 */
280 	size = sizeof (struct e1000_tx_desc) * Adapter->tx_desc_num;
281 
282 	/*
283 	 * Memory allocation for the transmit buffer descriptors.
284 	 */
285 	dma_attr.dma_attr_sgllen = 1;
286 
287 	/*
288 	 * Allocate a new DMA handle for the transmit descriptor
289 	 * memory area.
290 	 */
291 	mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
292 	    DDI_DMA_DONTWAIT, 0,
293 	    &tx_ring->tbd_dma_handle);
294 
295 	if (mystat != DDI_SUCCESS) {
296 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
297 		    "Could not allocate tbd dma handle: %d", mystat);
298 		tx_ring->tbd_dma_handle = NULL;
299 		return (DDI_FAILURE);
300 	}
301 
302 	/*
303 	 * Allocate memory to DMA data to and from the transmit
304 	 * descriptors.
305 	 */
306 	mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
307 	    size,
308 	    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
309 	    DDI_DMA_DONTWAIT, 0,
310 	    (caddr_t *)&tx_ring->tbd_area,
311 	    &len, &tx_ring->tbd_acc_handle);
312 
313 	if ((mystat != DDI_SUCCESS) ||
314 	    ((uintptr_t)tx_ring->tbd_area & (E1000_MDALIGN - 1))) {
315 		if (mystat == DDI_SUCCESS) {
316 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
317 			tx_ring->tbd_acc_handle = NULL;
318 			tx_ring->tbd_area = NULL;
319 		}
320 		if (tx_ring->tbd_dma_handle != NULL) {
321 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
322 			tx_ring->tbd_dma_handle = NULL;
323 		}
324 		alloc_flag = B_FALSE;
325 	} else
326 		alloc_flag = B_TRUE;
327 
328 	/*
329 	 * Initialize the entire transmit buffer descriptor area to zero
330 	 */
331 	if (alloc_flag)
332 		bzero(tx_ring->tbd_area, len);
333 
334 	/*
335 	 * If the previous DMA attributes setting could not give us contiguous
336 	 * memory or the number of descriptors is greater than the page size,
337 	 * we allocate 4K extra memory and then align it at a 4k boundary.
338 	 */
339 	if (!alloc_flag) {
340 		size = size + ROUNDOFF;
341 
342 		/*
343 		 * DMA attributes set to no scatter/gather and 16 bit alignment
344 		 */
345 		dma_attr.dma_attr_align = 1;
346 		dma_attr.dma_attr_sgllen = 1;
347 
348 		/*
349 		 * Allocate a new DMA handle for the transmit descriptor memory
350 		 * area.
351 		 */
352 		mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
353 		    DDI_DMA_DONTWAIT, 0,
354 		    &tx_ring->tbd_dma_handle);
355 
356 		if (mystat != DDI_SUCCESS) {
357 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
358 			    "Could not re-allocate tbd dma handle: %d", mystat);
359 			tx_ring->tbd_dma_handle = NULL;
360 			return (DDI_FAILURE);
361 		}
362 
363 		/*
364 		 * Allocate memory to DMA data to and from the transmit
365 		 * descriptors.
366 		 */
367 		mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
368 		    size,
369 		    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
370 		    DDI_DMA_DONTWAIT, 0,
371 		    (caddr_t *)&tx_ring->tbd_area,
372 		    &len, &tx_ring->tbd_acc_handle);
373 
374 		if (mystat != DDI_SUCCESS) {
375 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
376 			    "Could not allocate tbd dma memory: %d", mystat);
377 			tx_ring->tbd_acc_handle = NULL;
378 			tx_ring->tbd_area = NULL;
379 			if (tx_ring->tbd_dma_handle != NULL) {
380 				ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
381 				tx_ring->tbd_dma_handle = NULL;
382 			}
383 			return (DDI_FAILURE);
384 		} else
385 			alloc_flag = B_TRUE;
386 
387 		/*
388 		 * Initialize the entire transmit buffer descriptor area to zero
389 		 */
390 		bzero(tx_ring->tbd_area, len);
391 		/*
392 		 * Memory has been allocated with the ddi_dma_mem_alloc call,
393 		 * but has not been aligned. We now align it on a 4k boundary.
394 		 */
395 		templong = P2NPHASE((uintptr_t)tx_ring->tbd_area, ROUNDOFF);
396 		len = size - templong;
397 		templong += (uintptr_t)tx_ring->tbd_area;
398 		tx_ring->tbd_area = (struct e1000_tx_desc *)templong;
399 	}	/* alignment workaround */
400 
401 	/*
402 	 * Transmit buffer descriptor memory allocation succeeded
403 	 */
404 	ASSERT(alloc_flag);
405 
406 	/*
407 	 * Allocates DMA resources for the memory that was allocated by
408 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
409 	 * the memory address
410 	 */
411 	mystat = ddi_dma_addr_bind_handle(tx_ring->tbd_dma_handle,
412 	    (struct as *)NULL, (caddr_t)tx_ring->tbd_area,
413 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
414 	    DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
415 
416 	if (mystat != DDI_SUCCESS) {
417 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
418 		    "Could not bind tbd dma resource: %d", mystat);
419 		if (tx_ring->tbd_acc_handle != NULL) {
420 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
421 			tx_ring->tbd_acc_handle = NULL;
422 			tx_ring->tbd_area = NULL;
423 		}
424 		if (tx_ring->tbd_dma_handle != NULL) {
425 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
426 			tx_ring->tbd_dma_handle = NULL;
427 		}
428 		return (DDI_FAILURE);
429 	}
430 
431 	ASSERT(cookie_count == 1);	/* 1 cookie */
432 
433 	if (cookie_count != 1) {
434 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
435 		    "Could not bind tbd dma resource in a single frag. "
436 		    "Count - %d Len - %d", cookie_count, len);
437 		e1000g_free_tx_descriptors(tx_ring);
438 		return (DDI_FAILURE);
439 	}
440 
441 	tx_ring->tbd_dma_addr = cookie.dmac_laddress;
442 	tx_ring->tbd_first = tx_ring->tbd_area;
443 	tx_ring->tbd_last = tx_ring->tbd_first +
444 	    (Adapter->tx_desc_num - 1);
445 
446 	return (DDI_SUCCESS);
447 }
448 
449 static int
450 e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *rx_ring)
451 {
452 	int mystat;
453 	boolean_t alloc_flag;
454 	size_t size;
455 	size_t len;
456 	uintptr_t templong;
457 	uint_t cookie_count;
458 	dev_info_t *devinfo;
459 	ddi_dma_cookie_t cookie;
460 	struct e1000g *Adapter;
461 	ddi_dma_attr_t dma_attr;
462 
463 	Adapter = rx_ring->adapter;
464 	devinfo = Adapter->dip;
465 
466 	alloc_flag = B_FALSE;
467 	dma_attr = e1000g_desc_dma_attr;
468 
469 	/*
470 	 * Memory allocation for the receive buffer descriptors.
471 	 */
472 	size = (sizeof (struct e1000_rx_desc)) * Adapter->rx_desc_num;
473 
474 	/*
475 	 * Asking for aligned memory with DMA attributes set for 4k alignment
476 	 */
477 	dma_attr.dma_attr_sgllen = 1;
478 	dma_attr.dma_attr_align = E1000_MDALIGN;
479 
480 	/*
481 	 * Allocate a new DMA handle for the receive descriptors
482 	 */
483 	mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
484 	    DDI_DMA_DONTWAIT, 0,
485 	    &rx_ring->rbd_dma_handle);
486 
487 	if (mystat != DDI_SUCCESS) {
488 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
489 		    "Could not allocate rbd dma handle: %d", mystat);
490 		rx_ring->rbd_dma_handle = NULL;
491 		return (DDI_FAILURE);
492 	}
493 	/*
494 	 * Allocate memory to DMA data to and from the receive
495 	 * descriptors.
496 	 */
497 	mystat = ddi_dma_mem_alloc(rx_ring->rbd_dma_handle,
498 	    size,
499 	    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
500 	    DDI_DMA_DONTWAIT, 0,
501 	    (caddr_t *)&rx_ring->rbd_area,
502 	    &len, &rx_ring->rbd_acc_handle);
503 
504 	/*
505 	 * Check if memory allocation succeeded and also if the
506 	 * allocated memory is aligned correctly.
507 	 */
508 	if ((mystat != DDI_SUCCESS) ||
509 	    ((uintptr_t)rx_ring->rbd_area & (E1000_MDALIGN - 1))) {
510 		if (mystat == DDI_SUCCESS) {
511 			ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
512 			rx_ring->rbd_acc_handle = NULL;
513 			rx_ring->rbd_area = NULL;
514 		}
515 		if (rx_ring->rbd_dma_handle != NULL) {
516 			ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
517 			rx_ring->rbd_dma_handle = NULL;
518 		}
519 		alloc_flag = B_FALSE;
520 	} else
521 		alloc_flag = B_TRUE;
522 
523 	/*
524 	 * Initialize the allocated receive descriptor memory to zero.
525 	 */
526 	if (alloc_flag)
527 		bzero((caddr_t)rx_ring->rbd_area, len);
528 
529 	/*
530 	 * If memory allocation did not succeed, do the alignment ourselves
531 	 */
532 	if (!alloc_flag) {
533 		dma_attr.dma_attr_align = 1;
534 		dma_attr.dma_attr_sgllen = 1;
535 		size = size + ROUNDOFF;
536 		/*
537 		 * Allocate a new DMA handle for the receive descriptor.
538 		 */
539 		mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
540 		    DDI_DMA_DONTWAIT, 0,
541 		    &rx_ring->rbd_dma_handle);
542 
543 		if (mystat != DDI_SUCCESS) {
544 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
545 			    "Could not re-allocate rbd dma handle: %d", mystat);
546 			rx_ring->rbd_dma_handle = NULL;
547 			return (DDI_FAILURE);
548 		}
549 		/*
550 		 * Allocate memory to DMA data to and from the receive
551 		 * descriptors.
552 		 */
553 		mystat = ddi_dma_mem_alloc(rx_ring->rbd_dma_handle,
554 		    size,
555 		    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
556 		    DDI_DMA_DONTWAIT, 0,
557 		    (caddr_t *)&rx_ring->rbd_area,
558 		    &len, &rx_ring->rbd_acc_handle);
559 
560 		if (mystat != DDI_SUCCESS) {
561 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
562 			    "Could not allocate rbd dma memory: %d", mystat);
563 			rx_ring->rbd_acc_handle = NULL;
564 			rx_ring->rbd_area = NULL;
565 			if (rx_ring->rbd_dma_handle != NULL) {
566 				ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
567 				rx_ring->rbd_dma_handle = NULL;
568 			}
569 			return (DDI_FAILURE);
570 		} else
571 			alloc_flag = B_TRUE;
572 
573 		/*
574 		 * Initialize the allocated receive descriptor memory to zero.
575 		 */
576 		bzero((caddr_t)rx_ring->rbd_area, len);
577 		templong = P2NPHASE((uintptr_t)rx_ring->rbd_area, ROUNDOFF);
578 		len = size - templong;
579 		templong += (uintptr_t)rx_ring->rbd_area;
580 		rx_ring->rbd_area = (struct e1000_rx_desc *)templong;
581 	}	/* alignment workaround */
582 
583 	/*
584 	 * The memory allocation of the receive descriptors succeeded
585 	 */
586 	ASSERT(alloc_flag);
587 
588 	/*
589 	 * Allocates DMA resources for the memory that was allocated by
590 	 * the ddi_dma_mem_alloc call.
591 	 */
592 	mystat = ddi_dma_addr_bind_handle(rx_ring->rbd_dma_handle,
593 	    (struct as *)NULL, (caddr_t)rx_ring->rbd_area,
594 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
595 	    DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
596 
597 	if (mystat != DDI_SUCCESS) {
598 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
599 		    "Could not bind rbd dma resource: %d", mystat);
600 		if (rx_ring->rbd_acc_handle != NULL) {
601 			ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
602 			rx_ring->rbd_acc_handle = NULL;
603 			rx_ring->rbd_area = NULL;
604 		}
605 		if (rx_ring->rbd_dma_handle != NULL) {
606 			ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
607 			rx_ring->rbd_dma_handle = NULL;
608 		}
609 		return (DDI_FAILURE);
610 	}
611 
612 	ASSERT(cookie_count == 1);
613 	if (cookie_count != 1) {
614 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
615 		    "Could not bind rbd dma resource in a single frag. "
616 		    "Count - %d Len - %d", cookie_count, len);
617 		e1000g_free_rx_descriptors(rx_ring);
618 		return (DDI_FAILURE);
619 	}
620 
621 	rx_ring->rbd_dma_addr = cookie.dmac_laddress;
622 	rx_ring->rbd_first = rx_ring->rbd_area;
623 	rx_ring->rbd_last = rx_ring->rbd_first +
624 	    (Adapter->rx_desc_num - 1);
625 
626 	return (DDI_SUCCESS);
627 }
628 
629 static void
630 e1000g_free_rx_descriptors(e1000g_rx_ring_t *rx_ring)
631 {
632 	if (rx_ring->rbd_dma_handle != NULL) {
633 		ddi_dma_unbind_handle(rx_ring->rbd_dma_handle);
634 	}
635 	if (rx_ring->rbd_acc_handle != NULL) {
636 		ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
637 		rx_ring->rbd_acc_handle = NULL;
638 		rx_ring->rbd_area = NULL;
639 	}
640 	if (rx_ring->rbd_dma_handle != NULL) {
641 		ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
642 		rx_ring->rbd_dma_handle = NULL;
643 	}
644 	rx_ring->rbd_dma_addr = NULL;
645 	rx_ring->rbd_first = NULL;
646 	rx_ring->rbd_last = NULL;
647 }
648 
649 static void
650 e1000g_free_tx_descriptors(e1000g_tx_ring_t *tx_ring)
651 {
652 	if (tx_ring->tbd_dma_handle != NULL) {
653 		ddi_dma_unbind_handle(tx_ring->tbd_dma_handle);
654 	}
655 	if (tx_ring->tbd_acc_handle != NULL) {
656 		ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
657 		tx_ring->tbd_acc_handle = NULL;
658 		tx_ring->tbd_area = NULL;
659 	}
660 	if (tx_ring->tbd_dma_handle != NULL) {
661 		ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
662 		tx_ring->tbd_dma_handle = NULL;
663 	}
664 	tx_ring->tbd_dma_addr = NULL;
665 	tx_ring->tbd_first = NULL;
666 	tx_ring->tbd_last = NULL;
667 }
668 
669 
670 /*
671  * e1000g_alloc_packets - allocate DMA buffers for rx/tx
672  *
673  * This routine allocates neccesary buffers for
674  *	 Transmit sw packet structure
675  *	 DMA handle for Transmit
676  *	 DMA buffer for Transmit
677  *	 Receive sw packet structure
678  *	 DMA buffer for Receive
679  */
680 static int
681 e1000g_alloc_packets(struct e1000g *Adapter)
682 {
683 	int result;
684 	e1000g_tx_ring_t *tx_ring;
685 	e1000g_rx_ring_t *rx_ring;
686 
687 	tx_ring = Adapter->tx_ring;
688 	rx_ring = Adapter->rx_ring;
689 
690 again:
691 	rw_enter(&e1000g_dma_type_lock, RW_READER);
692 
693 	result = e1000g_alloc_tx_packets(tx_ring);
694 	if (result != DDI_SUCCESS) {
695 		if (e1000g_dma_type == USE_DVMA) {
696 			rw_exit(&e1000g_dma_type_lock);
697 
698 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
699 			e1000g_dma_type = USE_DMA;
700 			rw_exit(&e1000g_dma_type_lock);
701 
702 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
703 			    "No enough dvma resource for Tx packets, "
704 			    "trying to allocate dma buffers...\n");
705 			goto again;
706 		}
707 		rw_exit(&e1000g_dma_type_lock);
708 
709 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
710 		    "Failed to allocate dma buffers for Tx packets\n");
711 		return (DDI_FAILURE);
712 	}
713 
714 	result = e1000g_alloc_rx_packets(rx_ring);
715 	if (result != DDI_SUCCESS) {
716 		e1000g_free_tx_packets(tx_ring);
717 		if (e1000g_dma_type == USE_DVMA) {
718 			rw_exit(&e1000g_dma_type_lock);
719 
720 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
721 			e1000g_dma_type = USE_DMA;
722 			rw_exit(&e1000g_dma_type_lock);
723 
724 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
725 			    "No enough dvma resource for Rx packets, "
726 			    "trying to allocate dma buffers...\n");
727 			goto again;
728 		}
729 		rw_exit(&e1000g_dma_type_lock);
730 
731 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
732 		    "Failed to allocate dma buffers for Rx packets\n");
733 		return (DDI_FAILURE);
734 	}
735 
736 	rw_exit(&e1000g_dma_type_lock);
737 
738 	return (DDI_SUCCESS);
739 }
740 
741 static void
742 e1000g_free_packets(struct e1000g *Adapter)
743 {
744 	e1000g_tx_ring_t *tx_ring;
745 	e1000g_rx_ring_t *rx_ring;
746 
747 	tx_ring = Adapter->tx_ring;
748 	rx_ring = Adapter->rx_ring;
749 
750 	e1000g_free_tx_packets(tx_ring);
751 	e1000g_free_rx_packets(rx_ring);
752 }
753 
754 #ifdef __sparc
755 static int
756 e1000g_alloc_dvma_buffer(struct e1000g *Adapter,
757     dma_buffer_t *buf, size_t size)
758 {
759 	int mystat;
760 	dev_info_t *devinfo;
761 	ddi_dma_cookie_t cookie;
762 
763 	if (e1000g_force_detach)
764 		devinfo = Adapter->priv_dip;
765 	else
766 		devinfo = Adapter->dip;
767 
768 	mystat = dvma_reserve(devinfo,
769 	    &e1000g_dma_limits,
770 	    Adapter->dvma_page_num,
771 	    &buf->dma_handle);
772 
773 	if (mystat != DDI_SUCCESS) {
774 		buf->dma_handle = NULL;
775 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
776 		    "Could not allocate dvma buffer handle: %d\n", mystat);
777 		return (DDI_FAILURE);
778 	}
779 
780 	buf->address = kmem_alloc(size, KM_NOSLEEP);
781 
782 	if (buf->address == NULL) {
783 		if (buf->dma_handle != NULL) {
784 			dvma_release(buf->dma_handle);
785 			buf->dma_handle = NULL;
786 		}
787 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
788 		    "Could not allocate dvma buffer memory\n");
789 		return (DDI_FAILURE);
790 	}
791 
792 	dvma_kaddr_load(buf->dma_handle,
793 	    buf->address, size, 0, &cookie);
794 
795 	buf->dma_address = cookie.dmac_laddress;
796 	buf->size = size;
797 	buf->len = 0;
798 
799 	return (DDI_SUCCESS);
800 }
801 
802 static void
803 e1000g_free_dvma_buffer(dma_buffer_t *buf)
804 {
805 	if (buf->dma_handle != NULL) {
806 		dvma_unload(buf->dma_handle, 0, -1);
807 	} else {
808 		return;
809 	}
810 
811 	buf->dma_address = NULL;
812 
813 	if (buf->address != NULL) {
814 		kmem_free(buf->address, buf->size);
815 		buf->address = NULL;
816 	}
817 
818 	if (buf->dma_handle != NULL) {
819 		dvma_release(buf->dma_handle);
820 		buf->dma_handle = NULL;
821 	}
822 
823 	buf->size = 0;
824 	buf->len = 0;
825 }
826 #endif
827 
828 static int
829 e1000g_alloc_dma_buffer(struct e1000g *Adapter,
830     dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
831 {
832 	int mystat;
833 	dev_info_t *devinfo;
834 	ddi_dma_cookie_t cookie;
835 	size_t len;
836 	uint_t count;
837 
838 	if (e1000g_force_detach)
839 		devinfo = Adapter->priv_dip;
840 	else
841 		devinfo = Adapter->dip;
842 
843 	mystat = ddi_dma_alloc_handle(devinfo,
844 	    p_dma_attr,
845 	    DDI_DMA_DONTWAIT, 0,
846 	    &buf->dma_handle);
847 
848 	if (mystat != DDI_SUCCESS) {
849 		buf->dma_handle = NULL;
850 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
851 		    "Could not allocate dma buffer handle: %d\n", mystat);
852 		return (DDI_FAILURE);
853 	}
854 
855 	mystat = ddi_dma_mem_alloc(buf->dma_handle,
856 	    size, &e1000g_buf_acc_attr, DDI_DMA_STREAMING,
857 	    DDI_DMA_DONTWAIT, 0,
858 	    &buf->address,
859 	    &len, &buf->acc_handle);
860 
861 	if (mystat != DDI_SUCCESS) {
862 		buf->acc_handle = NULL;
863 		buf->address = NULL;
864 		if (buf->dma_handle != NULL) {
865 			ddi_dma_free_handle(&buf->dma_handle);
866 			buf->dma_handle = NULL;
867 		}
868 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
869 		    "Could not allocate dma buffer memory: %d\n", mystat);
870 		return (DDI_FAILURE);
871 	}
872 
873 	mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
874 	    (struct as *)NULL,
875 	    buf->address,
876 	    len, DDI_DMA_READ | DDI_DMA_STREAMING,
877 	    DDI_DMA_DONTWAIT, 0, &cookie, &count);
878 
879 	if (mystat != DDI_SUCCESS) {
880 		if (buf->acc_handle != NULL) {
881 			ddi_dma_mem_free(&buf->acc_handle);
882 			buf->acc_handle = NULL;
883 			buf->address = NULL;
884 		}
885 		if (buf->dma_handle != NULL) {
886 			ddi_dma_free_handle(&buf->dma_handle);
887 			buf->dma_handle = NULL;
888 		}
889 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
890 		    "Could not bind buffer dma handle: %d\n", mystat);
891 		return (DDI_FAILURE);
892 	}
893 
894 	ASSERT(count == 1);
895 	if (count != 1) {
896 		if (buf->dma_handle != NULL) {
897 			ddi_dma_unbind_handle(buf->dma_handle);
898 		}
899 		if (buf->acc_handle != NULL) {
900 			ddi_dma_mem_free(&buf->acc_handle);
901 			buf->acc_handle = NULL;
902 			buf->address = NULL;
903 		}
904 		if (buf->dma_handle != NULL) {
905 			ddi_dma_free_handle(&buf->dma_handle);
906 			buf->dma_handle = NULL;
907 		}
908 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
909 		    "Could not bind buffer as a single frag. "
910 		    "Count = %d\n", count);
911 		return (DDI_FAILURE);
912 	}
913 
914 	buf->dma_address = cookie.dmac_laddress;
915 	buf->size = len;
916 	buf->len = 0;
917 
918 	return (DDI_SUCCESS);
919 }
920 
921 static void
922 e1000g_free_dma_buffer(dma_buffer_t *buf)
923 {
924 	if (buf->dma_handle != NULL) {
925 		ddi_dma_unbind_handle(buf->dma_handle);
926 	} else {
927 		return;
928 	}
929 
930 	buf->dma_address = NULL;
931 
932 	if (buf->acc_handle != NULL) {
933 		ddi_dma_mem_free(&buf->acc_handle);
934 		buf->acc_handle = NULL;
935 		buf->address = NULL;
936 	}
937 
938 	if (buf->dma_handle != NULL) {
939 		ddi_dma_free_handle(&buf->dma_handle);
940 		buf->dma_handle = NULL;
941 	}
942 
943 	buf->size = 0;
944 	buf->len = 0;
945 }
946 
947 static int
948 e1000g_alloc_tx_packets(e1000g_tx_ring_t *tx_ring)
949 {
950 	int j;
951 	p_tx_sw_packet_t packet;
952 	int mystat;
953 	dma_buffer_t *tx_buf;
954 	struct e1000g *Adapter;
955 	dev_info_t *devinfo;
956 	ddi_dma_attr_t dma_attr;
957 
958 	Adapter = tx_ring->adapter;
959 	devinfo = Adapter->dip;
960 	dma_attr = e1000g_buf_dma_attr;
961 
962 	/*
963 	 * Memory allocation for the Transmit software structure, the transmit
964 	 * software packet. This structure stores all the relevant information
965 	 * for transmitting a single packet.
966 	 */
967 	tx_ring->packet_area =
968 	    kmem_zalloc(TX_SW_PKT_AREA_SZ, KM_NOSLEEP);
969 
970 	if (tx_ring->packet_area == NULL)
971 		return (DDI_FAILURE);
972 
973 	for (j = 0, packet = tx_ring->packet_area;
974 	    j < Adapter->tx_freelist_num; j++, packet++) {
975 
976 		ASSERT(packet != NULL);
977 
978 		/*
979 		 * Pre-allocate dma handles for transmit. These dma handles
980 		 * will be dynamically bound to the data buffers passed down
981 		 * from the upper layers at the time of transmitting. The
982 		 * dynamic binding only applies for the packets that are larger
983 		 * than the tx_bcopy_thresh.
984 		 */
985 		switch (e1000g_dma_type) {
986 #ifdef __sparc
987 		case USE_DVMA:
988 			mystat = dvma_reserve(devinfo,
989 			    &e1000g_dma_limits,
990 			    Adapter->dvma_page_num,
991 			    &packet->tx_dma_handle);
992 			break;
993 #endif
994 		case USE_DMA:
995 			mystat = ddi_dma_alloc_handle(devinfo,
996 			    &e1000g_tx_dma_attr,
997 			    DDI_DMA_DONTWAIT, 0,
998 			    &packet->tx_dma_handle);
999 			break;
1000 		default:
1001 			ASSERT(B_FALSE);
1002 			break;
1003 		}
1004 		if (mystat != DDI_SUCCESS) {
1005 			packet->tx_dma_handle = NULL;
1006 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1007 			    "Could not allocate tx dma handle: %d\n", mystat);
1008 			goto tx_pkt_fail;
1009 		}
1010 
1011 		/*
1012 		 * Pre-allocate transmit buffers for small packets that the
1013 		 * size is less than tx_bcopy_thresh. The data of those small
1014 		 * packets will be bcopy() to the transmit buffers instead of
1015 		 * using dynamical DMA binding. For small packets, bcopy will
1016 		 * bring better performance than DMA binding.
1017 		 */
1018 		tx_buf = packet->tx_buf;
1019 
1020 		switch (e1000g_dma_type) {
1021 #ifdef __sparc
1022 		case USE_DVMA:
1023 			mystat = e1000g_alloc_dvma_buffer(Adapter,
1024 			    tx_buf, Adapter->tx_buffer_size);
1025 			break;
1026 #endif
1027 		case USE_DMA:
1028 			mystat = e1000g_alloc_dma_buffer(Adapter,
1029 			    tx_buf, Adapter->tx_buffer_size, &dma_attr);
1030 			break;
1031 		default:
1032 			ASSERT(B_FALSE);
1033 			break;
1034 		}
1035 		if (mystat != DDI_SUCCESS) {
1036 			ASSERT(packet->tx_dma_handle != NULL);
1037 			switch (e1000g_dma_type) {
1038 #ifdef __sparc
1039 			case USE_DVMA:
1040 				dvma_release(packet->tx_dma_handle);
1041 				break;
1042 #endif
1043 			case USE_DMA:
1044 				ddi_dma_free_handle(&packet->tx_dma_handle);
1045 				break;
1046 			default:
1047 				ASSERT(B_FALSE);
1048 				break;
1049 			}
1050 			packet->tx_dma_handle = NULL;
1051 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1052 			    "Allocate Tx buffer fail\n");
1053 			goto tx_pkt_fail;
1054 		}
1055 
1056 		packet->dma_type = e1000g_dma_type;
1057 	} /* for */
1058 
1059 	return (DDI_SUCCESS);
1060 
1061 tx_pkt_fail:
1062 	e1000g_free_tx_packets(tx_ring);
1063 
1064 	return (DDI_FAILURE);
1065 }
1066 
1067 static int
1068 e1000g_alloc_rx_packets(e1000g_rx_ring_t *rx_ring)
1069 {
1070 	int i;
1071 	p_rx_sw_packet_t packet;
1072 	struct e1000g *Adapter;
1073 	uint32_t packet_num;
1074 	ddi_dma_attr_t dma_attr;
1075 
1076 	Adapter = rx_ring->adapter;
1077 	dma_attr = e1000g_buf_dma_attr;
1078 
1079 #ifndef NO_82542_SUPPORT
1080 	dma_attr.dma_attr_align = Adapter->rx_buf_align;
1081 #endif
1082 	/*
1083 	 * Allocate memory for the rx_sw_packet structures. Each one of these
1084 	 * structures will contain a virtual and physical address to an actual
1085 	 * receive buffer in host memory. Since we use one rx_sw_packet per
1086 	 * received packet, the maximum number of rx_sw_packet that we'll
1087 	 * need is equal to the number of receive descriptors that we've
1088 	 * allocated.
1089 	 */
1090 	packet_num = Adapter->rx_desc_num + Adapter->rx_freelist_num;
1091 	rx_ring->packet_area = NULL;
1092 
1093 	for (i = 0; i < packet_num; i++) {
1094 		packet = e1000g_alloc_rx_sw_packet(rx_ring, &dma_attr);
1095 		if (packet == NULL)
1096 			goto rx_pkt_fail;
1097 
1098 		packet->next = rx_ring->packet_area;
1099 		rx_ring->packet_area = packet;
1100 	}
1101 
1102 	return (DDI_SUCCESS);
1103 
1104 rx_pkt_fail:
1105 	e1000g_free_rx_packets(rx_ring);
1106 
1107 	return (DDI_FAILURE);
1108 }
1109 
1110 static p_rx_sw_packet_t
1111 e1000g_alloc_rx_sw_packet(e1000g_rx_ring_t *rx_ring, ddi_dma_attr_t *p_dma_attr)
1112 {
1113 	int mystat;
1114 	p_rx_sw_packet_t packet;
1115 	dma_buffer_t *rx_buf;
1116 	struct e1000g *Adapter;
1117 
1118 	Adapter = rx_ring->adapter;
1119 
1120 	packet = kmem_zalloc(sizeof (rx_sw_packet_t), KM_NOSLEEP);
1121 	if (packet == NULL) {
1122 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1123 		    "Cound not allocate memory for Rx SwPacket\n");
1124 		return (NULL);
1125 	}
1126 
1127 	rx_buf = packet->rx_buf;
1128 
1129 	switch (e1000g_dma_type) {
1130 #ifdef __sparc
1131 	case USE_DVMA:
1132 		mystat = e1000g_alloc_dvma_buffer(Adapter,
1133 		    rx_buf, Adapter->rx_buffer_size);
1134 		break;
1135 #endif
1136 	case USE_DMA:
1137 		mystat = e1000g_alloc_dma_buffer(Adapter,
1138 		    rx_buf, Adapter->rx_buffer_size, p_dma_attr);
1139 		break;
1140 	default:
1141 		ASSERT(B_FALSE);
1142 		break;
1143 	}
1144 
1145 	if (mystat != DDI_SUCCESS) {
1146 		if (packet != NULL)
1147 			kmem_free(packet, sizeof (rx_sw_packet_t));
1148 
1149 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1150 		    "Failed to allocate Rx buffer\n");
1151 		return (NULL);
1152 	}
1153 
1154 	rx_buf->size -= E1000G_IPALIGNROOM;
1155 	rx_buf->address += E1000G_IPALIGNROOM;
1156 	rx_buf->dma_address += E1000G_IPALIGNROOM;
1157 
1158 	packet->rx_ring = (caddr_t)rx_ring;
1159 	packet->free_rtn.free_func = e1000g_rxfree_func;
1160 	packet->free_rtn.free_arg = (char *)packet;
1161 	/*
1162 	 * esballoc is changed to desballoc which
1163 	 * is undocumented call but as per sun,
1164 	 * we can use it. It gives better efficiency.
1165 	 */
1166 	packet->mp = desballoc((unsigned char *)
1167 	    rx_buf->address - E1000G_IPALIGNROOM,
1168 	    rx_buf->size + E1000G_IPALIGNROOM,
1169 	    BPRI_MED, &packet->free_rtn);
1170 
1171 	if (packet->mp != NULL) {
1172 		packet->mp->b_rptr += E1000G_IPALIGNROOM;
1173 		packet->mp->b_wptr += E1000G_IPALIGNROOM;
1174 	}
1175 
1176 	packet->dma_type = e1000g_dma_type;
1177 
1178 	return (packet);
1179 }
1180 
1181 void
1182 e1000g_free_rx_sw_packet(p_rx_sw_packet_t packet)
1183 {
1184 	dma_buffer_t *rx_buf;
1185 
1186 	if (packet->mp != NULL) {
1187 		freemsg(packet->mp);
1188 		packet->mp = NULL;
1189 	}
1190 
1191 	rx_buf = packet->rx_buf;
1192 	ASSERT(rx_buf->dma_handle != NULL);
1193 
1194 	rx_buf->size += E1000G_IPALIGNROOM;
1195 	rx_buf->address -= E1000G_IPALIGNROOM;
1196 
1197 	switch (packet->dma_type) {
1198 #ifdef __sparc
1199 	case USE_DVMA:
1200 		e1000g_free_dvma_buffer(rx_buf);
1201 		break;
1202 #endif
1203 	case USE_DMA:
1204 		e1000g_free_dma_buffer(rx_buf);
1205 		break;
1206 	default:
1207 		ASSERT(B_FALSE);
1208 		break;
1209 	}
1210 
1211 	packet->dma_type = USE_NONE;
1212 
1213 	kmem_free(packet, sizeof (rx_sw_packet_t));
1214 }
1215 
1216 static void
1217 e1000g_free_rx_packets(e1000g_rx_ring_t *rx_ring)
1218 {
1219 	p_rx_sw_packet_t packet, next_packet, free_list;
1220 
1221 	rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
1222 
1223 	free_list = NULL;
1224 	packet = rx_ring->packet_area;
1225 	for (; packet != NULL; packet = next_packet) {
1226 		next_packet = packet->next;
1227 
1228 		if (packet->flag == E1000G_RX_SW_SENDUP) {
1229 			rx_ring->pending_count++;
1230 			e1000g_mblks_pending++;
1231 			packet->flag = E1000G_RX_SW_STOP;
1232 			packet->next = rx_ring->pending_list;
1233 			rx_ring->pending_list = packet;
1234 		} else {
1235 			packet->next = free_list;
1236 			free_list = packet;
1237 		}
1238 	}
1239 	rx_ring->packet_area = NULL;
1240 
1241 	rw_exit(&e1000g_rx_detach_lock);
1242 
1243 	packet = free_list;
1244 	for (; packet != NULL; packet = next_packet) {
1245 		next_packet = packet->next;
1246 
1247 		ASSERT(packet->flag == E1000G_RX_SW_FREE);
1248 		e1000g_free_rx_sw_packet(packet);
1249 	}
1250 }
1251 
1252 static void
1253 e1000g_free_tx_packets(e1000g_tx_ring_t *tx_ring)
1254 {
1255 	int j;
1256 	struct e1000g *Adapter;
1257 	p_tx_sw_packet_t packet;
1258 	dma_buffer_t *tx_buf;
1259 
1260 	Adapter = tx_ring->adapter;
1261 
1262 	for (j = 0, packet = tx_ring->packet_area;
1263 	    j < Adapter->tx_freelist_num; j++, packet++) {
1264 
1265 		if (packet == NULL)
1266 			break;
1267 
1268 		/* Free the Tx DMA handle for dynamical binding */
1269 		if (packet->tx_dma_handle != NULL) {
1270 			switch (packet->dma_type) {
1271 #ifdef __sparc
1272 			case USE_DVMA:
1273 				dvma_release(packet->tx_dma_handle);
1274 				break;
1275 #endif
1276 			case USE_DMA:
1277 				ddi_dma_free_handle(&packet->tx_dma_handle);
1278 				break;
1279 			default:
1280 				ASSERT(B_FALSE);
1281 				break;
1282 			}
1283 			packet->tx_dma_handle = NULL;
1284 		} else {
1285 			/*
1286 			 * If the dma handle is NULL, then we don't
1287 			 * need to check the packets left. For they
1288 			 * have not been initialized or have been freed.
1289 			 */
1290 			break;
1291 		}
1292 
1293 		tx_buf = packet->tx_buf;
1294 
1295 		switch (packet->dma_type) {
1296 #ifdef __sparc
1297 		case USE_DVMA:
1298 			e1000g_free_dvma_buffer(tx_buf);
1299 			break;
1300 #endif
1301 		case USE_DMA:
1302 			e1000g_free_dma_buffer(tx_buf);
1303 			break;
1304 		default:
1305 			ASSERT(B_FALSE);
1306 			break;
1307 		}
1308 
1309 		packet->dma_type = USE_NONE;
1310 	}
1311 	if (tx_ring->packet_area != NULL) {
1312 		kmem_free(tx_ring->packet_area, TX_SW_PKT_AREA_SZ);
1313 		tx_ring->packet_area = NULL;
1314 	}
1315 }
1316 
1317 /*
1318  * e1000g_release_dma_resources - release allocated DMA resources
1319  *
1320  * This function releases any pending buffers that has been
1321  * previously allocated
1322  */
1323 void
1324 e1000g_release_dma_resources(struct e1000g *Adapter)
1325 {
1326 	e1000g_free_descriptors(Adapter);
1327 	e1000g_free_packets(Adapter);
1328 }
1329