xref: /titanic_52/usr/src/uts/common/io/e1000g/e1000g_alloc.c (revision 65d61b8c5bb0da98e137e8d74245d8f53d690237)
1 /*
2  * This file is provided under a CDDLv1 license.  When using or
3  * redistributing this file, you may do so under this license.
4  * In redistributing this file this license must be included
5  * and no other modification of this header file is permitted.
6  *
7  * CDDL LICENSE SUMMARY
8  *
9  * Copyright(c) 1999 - 2007 Intel Corporation. All rights reserved.
10  *
11  * The contents of this file are subject to the terms of Version
12  * 1.0 of the Common Development and Distribution License (the "License").
13  *
14  * You should have received a copy of the License with this software.
15  * You can obtain a copy of the License at
16  *	http://www.opensolaris.org/os/licensing.
17  * See the License for the specific language governing permissions
18  * and limitations under the License.
19  */
20 
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms of the CDDLv1.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * **********************************************************************
30  * Module Name:								*
31  *   e1000galloc.c							*
32  *									*
33  * Abstract:								*
34  *   This file contains some routines that take care of init,		*
35  *   uninit, and memory allocation.					*
36  *									*
37  *									*
38  *   This driver runs on the following hardware:			*
39  *   - Wiseman based PCI gigabit ethernet adapters			*
40  *									*
41  * Environment:								*
42  *   Kernel Mode -							*
43  *									*
44  * **********************************************************************
45  */
46 
47 #include "e1000g_sw.h"
48 #include "e1000g_debug.h"
49 
50 #define	TX_SW_PKT_AREA_SZ \
51 	(sizeof (TX_SW_PACKET) * Adapter->NumTxSwPacket)
52 
53 static int e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *);
54 static int e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *);
55 static void e1000g_free_tx_descriptors(e1000g_tx_ring_t *);
56 static void e1000g_free_rx_descriptors(e1000g_rx_ring_t *);
57 static int e1000g_alloc_tx_packets(e1000g_tx_ring_t *);
58 static int e1000g_alloc_rx_packets(e1000g_rx_ring_t *);
59 static void e1000g_free_tx_packets(e1000g_tx_ring_t *);
60 static void e1000g_free_rx_packets(e1000g_rx_ring_t *);
61 static int e1000g_alloc_dma_buffer(struct e1000g *, dma_buffer_t *, size_t);
62 static void e1000g_free_dma_buffer(dma_buffer_t *);
63 #ifdef __sparc
64 static int e1000g_alloc_dvma_buffer(struct e1000g *, dma_buffer_t *, size_t);
65 static void e1000g_free_dvma_buffer(dma_buffer_t *);
66 #endif
67 static int e1000g_alloc_descriptors(struct e1000g *Adapter);
68 static int e1000g_alloc_packets(struct e1000g *Adapter);
69 static PRX_SW_PACKET e1000g_alloc_rx_sw_packet(e1000g_rx_ring_t *);
70 
71 #ifdef __sparc
72 static ddi_dma_lim_t e1000g_dma_limits = {
73 	(uint_t)0,		/* dlim_addr_lo */
74 	(uint_t)0xffffffff,	/* dlim_addr_hi */
75 	(uint_t)0xffffffff,	/* dlim_cntr_max */
76 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
77 	0x1,			/* dlim_minxfer */
78 	1024			/* dlim_speed */
79 };
80 #endif
81 
82 #ifdef __sparc
83 static dma_type_t e1000g_dma_type = USE_DVMA;
84 #else
85 static dma_type_t e1000g_dma_type = USE_DMA;
86 #endif
87 
88 extern krwlock_t e1000g_dma_type_lock;
89 
90 int
91 e1000g_alloc_dma_resources(struct e1000g *Adapter)
92 {
93 	e1000g_tx_ring_t *tx_ring;
94 	e1000g_rx_ring_t *rx_ring;
95 
96 	tx_ring = Adapter->tx_ring;
97 	rx_ring = Adapter->rx_ring;
98 
99 	if (e1000g_alloc_descriptors(Adapter) != DDI_SUCCESS)
100 		return (DDI_FAILURE);
101 
102 	if (e1000g_alloc_packets(Adapter) != DDI_SUCCESS) {
103 		e1000g_free_tx_descriptors(tx_ring);
104 		e1000g_free_rx_descriptors(rx_ring);
105 		return (DDI_FAILURE);
106 	}
107 
108 	return (DDI_SUCCESS);
109 }
110 
111 /*
112  * **********************************************************************
113  * Name:	e1000g_alloc_descriptors				*
114  *									*
115  * Description:								*
116  *     This routine Allocates Neccesary Buffers for the device		*
117  *     It allocates memory for						*
118  *	 Transmit Descriptor Area					*
119  *	 Receive Descrpitor Area					*
120  *									*
121  *     NOTE -- The device must have been reset before this routine	*
122  *		      is called.					*
123  *									*
124  * Author:	       Hari Seshadri					*
125  * Functions Called :							*
126  *		       DDI mem functions called				*
127  *     ddi_dma_alloc_handle() allocates a new  DMA  handle.  A  DMA	*
128  *     handle  is  an  opaque  object used as a reference to subse-	*
129  *     quently  allocated  DMA  resources.   ddi_dma_alloc_handle()	*
130  *     accepts  as parameters the device information referred to by	*
131  *     dip  and  the  device's  DMA  attributes  described   by   a	*
132  *     ddi_dma_attr(9S)    structure.    A   successful   call   to	*
133  *     ddi_dma_alloc_handle() fills in  the  value  pointed  to  by	*
134  *     handlep.   A  DMA handle must only be used by the device for	*
135  *     which it was allocated and is only valid for one  I/O  tran-	*
136  *     saction at a time.						*
137  *									*
138  *     ddi_dma_mem_alloc() allocates memory for DMA transfers to or	*
139  *     from a device.  The allocation will obey the alignment, pad-	*
140  *     ding constraints and device granularity as specified by  the	*
141  *     DMA    attributes    (see    ddi_dma_attr(9S))   passed   to	*
142  *     ddi_dma_alloc_handle(9F) and the more restrictive attributes	*
143  *     imposed by the system.Flags should be set to DDI_DMA_STREAMING	*
144  *     if  the  device  is  doing  sequential,  unidirectional,		*
145  *     block-sized, and block- aligned transfers to or from memory.	*
146  *									*
147  *									*
148  *     ddi_dma_addr_bind_handle() allocates  DMA  resources  for  a	*
149  *     memory  object such that a device can perform DMA to or from	*
150  *     the object.  DMA resources  are  allocated  considering  the	*
151  *     device's  DMA  attributes  as  expressed by ddi_dma_attr(9S)	*
152  *     (see ddi_dma_alloc_handle(9F)).					*
153  *     ddi_dma_addr_bind_handle() fills in  the  first  DMA  cookie	*
154  *     pointed  to by cookiep with the appropriate address, length,	*
155  *     and bus type.	*ccountp is set to the number of DMA  cookies	*
156  *     representing this DMA object. Subsequent DMA cookies must be	*
157  *     retrieved by calling ddi_dma_nextcookie(9F)  the  number  of	*
158  *     times specified by *countp - 1.					*
159  *									*
160  * Arguments:								*
161  *      Adapter - A pointer to context sensitive "Adapter" structure.	*
162  *									*
163  *									*
164  * Returns:								*
165  *      DDI_SUCCESS on success						*
166  *	  DDI_FAILURE on error						*
167  *									*
168  * Modification log:							*
169  * Date      Who  Description						*
170  * --------  ---  -----------------------------------------------------	*
171  * 11/11/98  Vinay  Cleaned the entire function to prevents panics and	*
172  *		   memory corruption					*
173  * 17/11/98  Vinay  Optimized it for proper usages of function calls	*
174  * 30/04/99  Vinay  Resolved some more memory problems related to race	*
175  *		  conditions						*
176  * **********************************************************************
177  */
178 static int
179 e1000g_alloc_descriptors(struct e1000g *Adapter)
180 {
181 	int result;
182 	e1000g_tx_ring_t *tx_ring;
183 	e1000g_rx_ring_t *rx_ring;
184 
185 	tx_ring = Adapter->tx_ring;
186 
187 	result = e1000g_alloc_tx_descriptors(tx_ring);
188 	if (result != DDI_SUCCESS)
189 		return (DDI_FAILURE);
190 
191 	rx_ring = Adapter->rx_ring;
192 
193 	result = e1000g_alloc_rx_descriptors(rx_ring);
194 	if (result != DDI_SUCCESS) {
195 		e1000g_free_tx_descriptors(tx_ring);
196 		return (DDI_FAILURE);
197 	}
198 
199 	return (DDI_SUCCESS);
200 }
201 
202 static int
203 e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *tx_ring)
204 {
205 	int mystat;
206 	boolean_t alloc_flag;
207 	size_t size;
208 	size_t len;
209 	uintptr_t templong;
210 	uint_t cookie_count;
211 	dev_info_t *devinfo;
212 	ddi_dma_cookie_t cookie;
213 	struct e1000g *Adapter;
214 
215 	Adapter = tx_ring->adapter;
216 
217 	alloc_flag = B_FALSE;
218 
219 	devinfo = Adapter->dip;
220 
221 	/*
222 	 * Solaris 7 has a problem with allocating physically contiguous memory
223 	 * that is aligned on a 4K boundary. The transmit and rx descriptors
224 	 * need to aligned on a 4kbyte boundary. We first try to allocate the
225 	 * memory with DMA attributes set to 4K alignment and also no scatter/
226 	 * gather mechanism specified. In most cases, this does not allocate
227 	 * memory aligned at a 4Kbyte boundary. We then try asking for memory
228 	 * aligned on 4K boundary with scatter/gather set to 2. This works when
229 	 * the amount of memory is less than 4k i.e a page size. If neither of
230 	 * these options work or if the number of descriptors is greater than
231 	 * 4K, ie more than 256 descriptors, we allocate 4k extra memory and
232 	 * and then align the memory at a 4k boundary.
233 	 */
234 	size = sizeof (struct e1000_tx_desc) * Adapter->NumTxDescriptors;
235 
236 	/*
237 	 * Memory allocation for the transmit buffer descriptors.
238 	 */
239 	/*
240 	 * DMA attributes set to asking for 4k alignment and no
241 	 * scatter/gather specified.
242 	 * This typically does not succeed for Solaris 7, but
243 	 * might work for Solaris 2.6
244 	 */
245 	tbd_dma_attr.dma_attr_sgllen = 1;
246 
247 	/*
248 	 * Allocate a new DMA handle for the transmit descriptor
249 	 * memory area.
250 	 */
251 	mystat = ddi_dma_alloc_handle(devinfo, &tbd_dma_attr,
252 	    DDI_DMA_DONTWAIT, 0,
253 	    &tx_ring->tbd_dma_handle);
254 
255 	if (mystat != DDI_SUCCESS) {
256 		e1000g_log(Adapter, CE_WARN,
257 		    "Could not allocate tbd dma handle: %d", mystat);
258 		tx_ring->tbd_dma_handle = NULL;
259 		return (DDI_FAILURE);
260 	}
261 
262 	/*
263 	 * Allocate memory to DMA data to and from the transmit
264 	 * descriptors.
265 	 */
266 	mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
267 	    size,
268 	    &accattr, DDI_DMA_CONSISTENT,
269 	    DDI_DMA_DONTWAIT, 0,
270 	    (caddr_t *)&tx_ring->tbd_area,
271 	    &len, &tx_ring->tbd_acc_handle);
272 
273 	if ((mystat != DDI_SUCCESS) ||
274 	    ((uintptr_t)tx_ring->tbd_area & (E1000_MDALIGN - 1))) {
275 		if (mystat == DDI_SUCCESS) {
276 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
277 			tx_ring->tbd_acc_handle = NULL;
278 			tx_ring->tbd_area = NULL;
279 		}
280 		if (tx_ring->tbd_dma_handle != NULL) {
281 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
282 			tx_ring->tbd_dma_handle = NULL;
283 		}
284 		alloc_flag = B_FALSE;
285 	} else
286 		alloc_flag = B_TRUE;
287 
288 	/*
289 	 * Initialize the entire transmit buffer descriptor area to zero
290 	 */
291 	if (alloc_flag)
292 		bzero(tx_ring->tbd_area, len);
293 
294 	/*
295 	 * If the previous DMA attributes setting could not give us contiguous
296 	 * memory or the number of descriptors is greater than the page size,
297 	 * we allocate 4K extra memory and then align it at a 4k boundary.
298 	 */
299 	if (!alloc_flag) {
300 		size = size + ROUNDOFF;
301 
302 		/*
303 		 * DMA attributes set to no scatter/gather and 16 bit alignment
304 		 */
305 		tbd_dma_attr.dma_attr_align = 1;
306 		tbd_dma_attr.dma_attr_sgllen = 1;
307 
308 		/*
309 		 * Allocate a new DMA handle for the transmit descriptor memory
310 		 * area.
311 		 */
312 		mystat = ddi_dma_alloc_handle(devinfo, &tbd_dma_attr,
313 		    DDI_DMA_DONTWAIT, 0,
314 		    &tx_ring->tbd_dma_handle);
315 
316 		if (mystat != DDI_SUCCESS) {
317 			e1000g_log(Adapter, CE_WARN,
318 			    "Could not re-allocate tbd dma handle: %d", mystat);
319 			tx_ring->tbd_dma_handle = NULL;
320 			return (DDI_FAILURE);
321 		}
322 
323 		/*
324 		 * Allocate memory to DMA data to and from the transmit
325 		 * descriptors.
326 		 */
327 		mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
328 		    size,
329 		    &accattr, DDI_DMA_CONSISTENT,
330 		    DDI_DMA_DONTWAIT, 0,
331 		    (caddr_t *)&tx_ring->tbd_area,
332 		    &len, &tx_ring->tbd_acc_handle);
333 
334 		if (mystat != DDI_SUCCESS) {
335 			e1000g_log(Adapter, CE_WARN,
336 			    "Could not allocate tbd dma memory: %d", mystat);
337 			tx_ring->tbd_acc_handle = NULL;
338 			tx_ring->tbd_area = NULL;
339 			if (tx_ring->tbd_dma_handle != NULL) {
340 				ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
341 				tx_ring->tbd_dma_handle = NULL;
342 			}
343 			return (DDI_FAILURE);
344 		} else
345 			alloc_flag = B_TRUE;
346 
347 		/*
348 		 * Initialize the entire transmit buffer descriptor area to zero
349 		 */
350 		bzero(tx_ring->tbd_area, len);
351 		/*
352 		 * Memory has been allocated with the ddi_dma_mem_alloc call,
353 		 * but has not been aligned. We now align it on a 4k boundary.
354 		 */
355 		templong = P2NPHASE((uintptr_t)tx_ring->tbd_area, ROUNDOFF);
356 		len = size - templong;
357 		templong += (uintptr_t)tx_ring->tbd_area;
358 		tx_ring->tbd_area = (struct e1000_tx_desc *)templong;
359 	}	/* alignment workaround */
360 
361 	/*
362 	 * Transmit buffer descriptor memory allocation succeeded
363 	 */
364 	ASSERT(alloc_flag);
365 
366 	/*
367 	 * Allocates DMA resources for the memory that was allocated by
368 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
369 	 * the memory address
370 	 */
371 	mystat = ddi_dma_addr_bind_handle(tx_ring->tbd_dma_handle,
372 	    (struct as *)NULL, (caddr_t)tx_ring->tbd_area,
373 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
374 	    DDI_DMA_SLEEP, 0, &cookie, &cookie_count);
375 
376 	if (mystat != DDI_SUCCESS) {
377 		e1000g_log(Adapter, CE_WARN,
378 		    "Could not bind tbd dma resource: %d", mystat);
379 		if (tx_ring->tbd_acc_handle != NULL) {
380 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
381 			tx_ring->tbd_acc_handle = NULL;
382 			tx_ring->tbd_area = NULL;
383 		}
384 		if (tx_ring->tbd_dma_handle != NULL) {
385 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
386 			tx_ring->tbd_dma_handle = NULL;
387 		}
388 		return (DDI_FAILURE);
389 	}
390 
391 	ASSERT(cookie_count == 1);	/* 1 cookie */
392 
393 	if (cookie_count != 1) {
394 		e1000g_log(Adapter, CE_WARN,
395 		    "Could not bind tbd dma resource in a single frag. "
396 		    "Count - %d Len - %d", cookie_count, len);
397 		e1000g_free_tx_descriptors(tx_ring);
398 		return (DDI_FAILURE);
399 	}
400 
401 	/*
402 	 * The FirstTxDescriptor is initialized to the physical address that
403 	 * is obtained from the ddi_dma_addr_bind_handle call
404 	 */
405 	tx_ring->tbd_dma_addr = cookie.dmac_laddress;
406 	tx_ring->tbd_first = tx_ring->tbd_area;
407 	tx_ring->tbd_last = tx_ring->tbd_first +
408 	    (Adapter->NumTxDescriptors - 1);
409 
410 	return (DDI_SUCCESS);
411 }
412 
413 static int
414 e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *rx_ring)
415 {
416 	int mystat;
417 	boolean_t alloc_flag;
418 	size_t size;
419 	size_t len;
420 	uintptr_t templong;
421 	uint_t cookie_count;
422 	dev_info_t *devinfo;
423 	ddi_dma_cookie_t cookie;
424 	struct e1000g *Adapter;
425 
426 	Adapter = rx_ring->adapter;
427 
428 	alloc_flag = B_FALSE;
429 
430 	devinfo = Adapter->dip;
431 
432 	/*
433 	 * Memory allocation for the receive buffer descriptors.
434 	 */
435 	size = (sizeof (struct e1000_rx_desc)) * Adapter->NumRxDescriptors;
436 
437 	/*
438 	 * Asking for aligned memory with DMA attributes set for 4k alignment
439 	 */
440 	tbd_dma_attr.dma_attr_sgllen = 1;
441 	tbd_dma_attr.dma_attr_align = E1000_MDALIGN;
442 
443 	/*
444 	 * Allocate a new DMA handle for the receive descriptor
445 	 * memory area. re-use the tbd_dma_attr since rbd has
446 	 * same attributes.
447 	 */
448 	mystat = ddi_dma_alloc_handle(devinfo, &tbd_dma_attr,
449 	    DDI_DMA_DONTWAIT, 0,
450 	    &rx_ring->rbd_dma_handle);
451 
452 	if (mystat != DDI_SUCCESS) {
453 		e1000g_log(Adapter, CE_WARN,
454 		    "Could not allocate rbd dma handle: %d", mystat);
455 		rx_ring->rbd_dma_handle = NULL;
456 		return (DDI_FAILURE);
457 	}
458 	/*
459 	 * Allocate memory to DMA data to and from the receive
460 	 * descriptors.
461 	 */
462 	mystat = ddi_dma_mem_alloc(rx_ring->rbd_dma_handle,
463 	    size,
464 	    &accattr, DDI_DMA_CONSISTENT,
465 	    DDI_DMA_DONTWAIT, 0,
466 	    (caddr_t *)&rx_ring->rbd_area,
467 	    &len, &rx_ring->rbd_acc_handle);
468 
469 	/*
470 	 * Check if memory allocation succeeded and also if the
471 	 * allocated memory is aligned correctly.
472 	 */
473 	if ((mystat != DDI_SUCCESS) ||
474 	    ((uintptr_t)rx_ring->rbd_area & (E1000_MDALIGN - 1))) {
475 		if (mystat == DDI_SUCCESS) {
476 			ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
477 			rx_ring->rbd_acc_handle = NULL;
478 			rx_ring->rbd_area = NULL;
479 		}
480 		if (rx_ring->rbd_dma_handle != NULL) {
481 			ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
482 			rx_ring->rbd_dma_handle = NULL;
483 		}
484 		alloc_flag = B_FALSE;
485 	} else
486 		alloc_flag = B_TRUE;
487 
488 	/*
489 	 * Initialize the allocated receive descriptor memory to zero.
490 	 */
491 	if (alloc_flag)
492 		bzero((caddr_t)rx_ring->rbd_area, len);
493 
494 	/*
495 	 * If memory allocation did not succeed or if number of descriptors is
496 	 * greater than a page size ( more than 256 descriptors ), do the
497 	 * alignment yourself
498 	 */
499 	if (!alloc_flag) {
500 		tbd_dma_attr.dma_attr_align = 1;
501 		tbd_dma_attr.dma_attr_sgllen = 1;
502 		size = size + ROUNDOFF;
503 		/*
504 		 * Allocate a new DMA handle for the receive descriptor memory
505 		 * area. re-use the tbd_dma_attr since rbd has same attributes.
506 		 */
507 		mystat = ddi_dma_alloc_handle(devinfo, &tbd_dma_attr,
508 		    DDI_DMA_DONTWAIT, 0,
509 		    &rx_ring->rbd_dma_handle);
510 
511 		if (mystat != DDI_SUCCESS) {
512 			e1000g_log(Adapter, CE_WARN,
513 			    "Could not re-allocate rbd dma handle: %d", mystat);
514 			rx_ring->rbd_dma_handle = NULL;
515 			return (DDI_FAILURE);
516 		}
517 		/*
518 		 * Allocate memory to DMA data to and from the receive
519 		 * descriptors.
520 		 */
521 		mystat = ddi_dma_mem_alloc(rx_ring->rbd_dma_handle,
522 		    size,
523 		    &accattr, DDI_DMA_CONSISTENT,
524 		    DDI_DMA_DONTWAIT, 0,
525 		    (caddr_t *)&rx_ring->rbd_area,
526 		    &len, &rx_ring->rbd_acc_handle);
527 
528 		if (mystat != DDI_SUCCESS) {
529 			e1000g_log(Adapter, CE_WARN,
530 			    "Could not allocate rbd dma memory: %d", mystat);
531 			rx_ring->rbd_acc_handle = NULL;
532 			rx_ring->rbd_area = NULL;
533 			if (rx_ring->rbd_dma_handle != NULL) {
534 				ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
535 				rx_ring->rbd_dma_handle = NULL;
536 			}
537 			return (DDI_FAILURE);
538 		} else
539 			alloc_flag = B_TRUE;
540 
541 		/*
542 		 * Initialize the allocated receive descriptor memory to zero.
543 		 */
544 		bzero((caddr_t)rx_ring->rbd_area, len);
545 		templong = P2NPHASE((uintptr_t)rx_ring->rbd_area, ROUNDOFF);
546 		len = size - templong;
547 		templong += (uintptr_t)rx_ring->rbd_area;
548 		rx_ring->rbd_area = (struct e1000_rx_desc *)templong;
549 	}	/* alignment workaround */
550 
551 	/*
552 	 * The memory allocation of the receive descriptors succeeded
553 	 */
554 	ASSERT(alloc_flag);
555 
556 	/*
557 	 * Allocates DMA resources for the memory that was allocated by
558 	 * the ddi_dma_mem_alloc call.
559 	 */
560 	mystat = ddi_dma_addr_bind_handle(rx_ring->rbd_dma_handle,
561 	    (struct as *)NULL, (caddr_t)rx_ring->rbd_area,
562 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
563 	    DDI_DMA_SLEEP, 0, &cookie, &cookie_count);
564 
565 	if (mystat != DDI_SUCCESS) {
566 		e1000g_log(Adapter, CE_WARN,
567 		    "Could not bind rbd dma resource: %d", mystat);
568 		if (rx_ring->rbd_acc_handle != NULL) {
569 			ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
570 			rx_ring->rbd_acc_handle = NULL;
571 			rx_ring->rbd_area = NULL;
572 		}
573 		if (rx_ring->rbd_dma_handle != NULL) {
574 			ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
575 			rx_ring->rbd_dma_handle = NULL;
576 		}
577 		return (DDI_FAILURE);
578 	}
579 
580 	ASSERT(cookie_count == 1);
581 	if (cookie_count != 1) {
582 		e1000g_log(Adapter, CE_WARN,
583 		    "Could not bind rbd dma resource in a single frag. "
584 		    "Count - %d Len - %d", cookie_count, len);
585 		e1000g_free_rx_descriptors(rx_ring);
586 		return (DDI_FAILURE);
587 	}
588 	/*
589 	 * Initialize the FirstRxDescriptor to the cookie address obtained
590 	 * from the ddi_dma_addr_bind_handle call.
591 	 */
592 	rx_ring->rbd_dma_addr = cookie.dmac_laddress;
593 	rx_ring->rbd_first = rx_ring->rbd_area;
594 	rx_ring->rbd_last = rx_ring->rbd_first +
595 	    (Adapter->NumRxDescriptors - 1);
596 
597 	return (DDI_SUCCESS);
598 }
599 
600 static void
601 e1000g_free_rx_descriptors(e1000g_rx_ring_t *rx_ring)
602 {
603 	if (rx_ring->rbd_dma_handle != NULL) {
604 		ddi_dma_unbind_handle(rx_ring->rbd_dma_handle);
605 	}
606 	if (rx_ring->rbd_acc_handle != NULL) {
607 		ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
608 		rx_ring->rbd_acc_handle = NULL;
609 		rx_ring->rbd_area = NULL;
610 	}
611 	if (rx_ring->rbd_dma_handle != NULL) {
612 		ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
613 		rx_ring->rbd_dma_handle = NULL;
614 	}
615 	rx_ring->rbd_dma_addr = NULL;
616 	rx_ring->rbd_first = NULL;
617 	rx_ring->rbd_last = NULL;
618 }
619 
620 static void
621 e1000g_free_tx_descriptors(e1000g_tx_ring_t *tx_ring)
622 {
623 	if (tx_ring->tbd_dma_handle != NULL) {
624 		ddi_dma_unbind_handle(tx_ring->tbd_dma_handle);
625 	}
626 	if (tx_ring->tbd_acc_handle != NULL) {
627 		ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
628 		tx_ring->tbd_acc_handle = NULL;
629 		tx_ring->tbd_area = NULL;
630 	}
631 	if (tx_ring->tbd_dma_handle != NULL) {
632 		ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
633 		tx_ring->tbd_dma_handle = NULL;
634 	}
635 	tx_ring->tbd_dma_addr = NULL;
636 	tx_ring->tbd_first = NULL;
637 	tx_ring->tbd_last = NULL;
638 }
639 
640 
641 /*
642  * **********************************************************************
643  * Name:	e1000g_alloc_packets					*
644  *									*
645  * Description: This routine Allocates Neccesary Buffers for the device	*
646  *      It allocates memory for						*
647  *									*
648  *	 Transmit packet Structure					*
649  *	 Handle for Transmit buffers					*
650  *	 Receive packet structure					*
651  *	 Buffer for Receive packet					*
652  *									*
653  *									*
654  *       For ddi memory alloc routine see e1000g_Txalloc description	*
655  *       NOTE -- The device must have been reset before this routine	*
656  *	       is called.						*
657  *									*
658  * Author:		   Hari Seshadri				*
659  * Functions Called :							*
660  *									*
661  *									*
662  *									*
663  * Arguments:								*
664  *      Adapter - A pointer to our context sensitive "Adapter"		*
665  *		structure.						*
666  *									*
667  *									*
668  * Returns:								*
669  *      DDI_SUCCESS on sucess						*
670  *	  DDI_FAILURE on error						*
671  *									*
672  *									*
673  *									*
674  * Modification log:							*
675  * Date      Who  Description						*
676  * --------  ---  -----------------------------------------------------	*
677  * 30/04/99  VA   Cleaned code for memory corruptions, invalid DMA	*
678  *		attributes and prevent panics				*
679  * **********************************************************************
680  */
681 static int
682 e1000g_alloc_packets(struct e1000g *Adapter)
683 {
684 	int result;
685 	e1000g_tx_ring_t *tx_ring;
686 	e1000g_rx_ring_t *rx_ring;
687 
688 	tx_ring = Adapter->tx_ring;
689 	rx_ring = Adapter->rx_ring;
690 
691 again:
692 	rw_enter(&e1000g_dma_type_lock, RW_READER);
693 
694 	result = e1000g_alloc_tx_packets(tx_ring);
695 	if (result != DDI_SUCCESS) {
696 		if (e1000g_dma_type == USE_DVMA) {
697 			rw_exit(&e1000g_dma_type_lock);
698 
699 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
700 			e1000g_dma_type = USE_DMA;
701 			rw_exit(&e1000g_dma_type_lock);
702 
703 			e1000g_DEBUGLOG_0(Adapter, e1000g_CALLTRACE_LEVEL,
704 			    "No enough dvma resource for Tx packets, "
705 			    "trying to allocate dma buffers...\n");
706 			goto again;
707 		}
708 		rw_exit(&e1000g_dma_type_lock);
709 
710 		e1000g_DEBUGLOG_0(Adapter, e1000g_INFO_LEVEL,
711 		    "Failed to allocate dma buffers for Tx packets\n");
712 		return (DDI_FAILURE);
713 	}
714 
715 	result = e1000g_alloc_rx_packets(rx_ring);
716 	if (result != DDI_SUCCESS) {
717 		e1000g_free_tx_packets(tx_ring);
718 		if (e1000g_dma_type == USE_DVMA) {
719 			rw_exit(&e1000g_dma_type_lock);
720 
721 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
722 			e1000g_dma_type = USE_DMA;
723 			rw_exit(&e1000g_dma_type_lock);
724 
725 			e1000g_DEBUGLOG_0(Adapter, e1000g_CALLTRACE_LEVEL,
726 			    "No enough dvma resource for Rx packets, "
727 			    "trying to allocate dma buffers...\n");
728 			goto again;
729 		}
730 		rw_exit(&e1000g_dma_type_lock);
731 
732 		e1000g_DEBUGLOG_0(Adapter, e1000g_INFO_LEVEL,
733 		    "Failed to allocate dma buffers for Rx packets\n");
734 		return (DDI_FAILURE);
735 	}
736 
737 	rw_exit(&e1000g_dma_type_lock);
738 
739 	return (DDI_SUCCESS);
740 }
741 
742 #ifdef __sparc
743 static int
744 e1000g_alloc_dvma_buffer(struct e1000g *Adapter,
745     dma_buffer_t *buf, size_t size)
746 {
747 	int mystat;
748 	dev_info_t *devinfo;
749 	ddi_dma_cookie_t cookie;
750 
751 	if (e1000g_force_detach)
752 		devinfo = Adapter->priv_dip;
753 	else
754 		devinfo = Adapter->dip;
755 
756 	mystat = dvma_reserve(devinfo,
757 	    &e1000g_dma_limits,
758 	    Adapter->dvma_page_num,
759 	    &buf->dma_handle);
760 
761 	if (mystat != DDI_SUCCESS) {
762 		buf->dma_handle = NULL;
763 		e1000g_DEBUGLOG_1(Adapter, e1000g_CALLTRACE_LEVEL,
764 		    "Could not allocate dvma buffer handle: %d\n", mystat);
765 		return (DDI_FAILURE);
766 	}
767 
768 	buf->address = kmem_alloc(size, KM_NOSLEEP);
769 
770 	if (buf->address == NULL) {
771 		if (buf->dma_handle != NULL) {
772 			dvma_release(buf->dma_handle);
773 			buf->dma_handle = NULL;
774 		}
775 		e1000g_DEBUGLOG_0(Adapter, e1000g_CALLTRACE_LEVEL,
776 		    "Could not allocate dvma buffer memory\n");
777 		return (DDI_FAILURE);
778 	}
779 
780 	dvma_kaddr_load(buf->dma_handle,
781 	    buf->address, size, 0, &cookie);
782 
783 	buf->dma_address = cookie.dmac_laddress;
784 	buf->size = size;
785 	buf->len = 0;
786 
787 	return (DDI_SUCCESS);
788 }
789 
790 static void
791 e1000g_free_dvma_buffer(dma_buffer_t *buf)
792 {
793 	if (buf->dma_handle != NULL) {
794 		dvma_unload(buf->dma_handle, 0, -1);
795 	} else {
796 		return;
797 	}
798 
799 	buf->dma_address = NULL;
800 
801 	if (buf->address != NULL) {
802 		kmem_free(buf->address, buf->size);
803 		buf->address = NULL;
804 	}
805 
806 	if (buf->dma_handle != NULL) {
807 		dvma_release(buf->dma_handle);
808 		buf->dma_handle = NULL;
809 	}
810 
811 	buf->size = 0;
812 	buf->len = 0;
813 }
814 #endif
815 
816 static int
817 e1000g_alloc_dma_buffer(struct e1000g *Adapter,
818     dma_buffer_t *buf, size_t size)
819 {
820 	int mystat;
821 	dev_info_t *devinfo;
822 	ddi_dma_cookie_t cookie;
823 	size_t len;
824 	uint_t count;
825 
826 	if (e1000g_force_detach)
827 		devinfo = Adapter->priv_dip;
828 	else
829 		devinfo = Adapter->dip;
830 
831 	mystat = ddi_dma_alloc_handle(devinfo,
832 	    &buf_dma_attr,
833 	    DDI_DMA_DONTWAIT, 0,
834 	    &buf->dma_handle);
835 
836 	if (mystat != DDI_SUCCESS) {
837 		buf->dma_handle = NULL;
838 		e1000g_DEBUGLOG_1(Adapter, e1000g_CALLTRACE_LEVEL,
839 		    "Could not allocate dma buffer handle: %d\n", mystat);
840 		return (DDI_FAILURE);
841 	}
842 
843 	mystat = ddi_dma_mem_alloc(buf->dma_handle,
844 	    size, &accattr2, DDI_DMA_STREAMING,
845 	    DDI_DMA_DONTWAIT, 0,
846 	    &buf->address,
847 	    &len, &buf->acc_handle);
848 
849 	if (mystat != DDI_SUCCESS) {
850 		buf->acc_handle = NULL;
851 		buf->address = NULL;
852 		if (buf->dma_handle != NULL) {
853 			ddi_dma_free_handle(&buf->dma_handle);
854 			buf->dma_handle = NULL;
855 		}
856 		e1000g_DEBUGLOG_1(Adapter, e1000g_CALLTRACE_LEVEL,
857 		    "Could not allocate dma buffer memory: %d\n", mystat);
858 		return (DDI_FAILURE);
859 	}
860 
861 	mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
862 	    (struct as *)NULL,
863 	    buf->address,
864 	    len, DDI_DMA_READ | DDI_DMA_STREAMING,
865 	    DDI_DMA_SLEEP, 0, &cookie, &count);
866 
867 	if (mystat != DDI_SUCCESS) {
868 		if (buf->acc_handle != NULL) {
869 			ddi_dma_mem_free(&buf->acc_handle);
870 			buf->acc_handle = NULL;
871 			buf->address = NULL;
872 		}
873 		if (buf->dma_handle != NULL) {
874 			ddi_dma_free_handle(&buf->dma_handle);
875 			buf->dma_handle = NULL;
876 		}
877 		e1000g_DEBUGLOG_1(Adapter, e1000g_CALLTRACE_LEVEL,
878 		    "Could not bind buffer dma handle: %d\n", mystat);
879 		return (DDI_FAILURE);
880 	}
881 
882 	ASSERT(count == 1);
883 	if (count != 1) {
884 		if (buf->dma_handle != NULL) {
885 			ddi_dma_unbind_handle(buf->dma_handle);
886 		}
887 		if (buf->acc_handle != NULL) {
888 			ddi_dma_mem_free(&buf->acc_handle);
889 			buf->acc_handle = NULL;
890 			buf->address = NULL;
891 		}
892 		if (buf->dma_handle != NULL) {
893 			ddi_dma_free_handle(&buf->dma_handle);
894 			buf->dma_handle = NULL;
895 		}
896 		e1000g_DEBUGLOG_1(Adapter, e1000g_CALLTRACE_LEVEL,
897 		    "Could not bind buffer as a single frag. "
898 		    "Count = %d\n", count);
899 		return (DDI_FAILURE);
900 	}
901 
902 	buf->dma_address = cookie.dmac_laddress;
903 	buf->size = len;
904 	buf->len = 0;
905 
906 	return (DDI_SUCCESS);
907 }
908 
909 static void
910 e1000g_free_dma_buffer(dma_buffer_t *buf)
911 {
912 	if (buf->dma_handle != NULL) {
913 		ddi_dma_unbind_handle(buf->dma_handle);
914 	} else {
915 		return;
916 	}
917 
918 	buf->dma_address = NULL;
919 
920 	if (buf->acc_handle != NULL) {
921 		ddi_dma_mem_free(&buf->acc_handle);
922 		buf->acc_handle = NULL;
923 		buf->address = NULL;
924 	}
925 
926 	if (buf->dma_handle != NULL) {
927 		ddi_dma_free_handle(&buf->dma_handle);
928 		buf->dma_handle = NULL;
929 	}
930 
931 	buf->size = 0;
932 	buf->len = 0;
933 }
934 
935 static int
936 e1000g_alloc_tx_packets(e1000g_tx_ring_t *tx_ring)
937 {
938 	int j;
939 	PTX_SW_PACKET packet;
940 	int mystat;
941 	dma_buffer_t *tx_buf;
942 	struct e1000g *Adapter = tx_ring->adapter;
943 	dev_info_t *devinfo = Adapter->dip;
944 
945 	/*
946 	 * Memory allocation for the Transmit software structure, the transmit
947 	 * software packet. This structure stores all the relevant information
948 	 * for transmitting a single packet.
949 	 */
950 	tx_ring->packet_area =
951 	    kmem_zalloc(TX_SW_PKT_AREA_SZ, KM_NOSLEEP);
952 
953 	if (tx_ring->packet_area == NULL)
954 		return (DDI_FAILURE);
955 
956 	for (j = 0, packet = tx_ring->packet_area;
957 	    j < Adapter->NumTxSwPacket; j++, packet++) {
958 
959 		ASSERT(packet != NULL);
960 
961 		/*
962 		 * Pre-allocate dma handles for transmit. These dma handles
963 		 * will be dynamically bound to the data buffers passed down
964 		 * from the upper layers at the time of transmitting. The
965 		 * dynamic binding only applies for the packets that are larger
966 		 * than the tx_bcopy_thresh.
967 		 */
968 		switch (e1000g_dma_type) {
969 #ifdef __sparc
970 		case USE_DVMA:
971 			mystat = dvma_reserve(devinfo,
972 			    &e1000g_dma_limits,
973 			    Adapter->dvma_page_num,
974 			    &packet->tx_dma_handle);
975 			break;
976 #endif
977 		case USE_DMA:
978 			mystat = ddi_dma_alloc_handle(devinfo,
979 			    &tx_dma_attr,
980 			    DDI_DMA_DONTWAIT, 0,
981 			    &packet->tx_dma_handle);
982 			break;
983 		default:
984 			ASSERT(B_FALSE);
985 			break;
986 		}
987 		if (mystat != DDI_SUCCESS) {
988 			packet->tx_dma_handle = NULL;
989 			e1000g_DEBUGLOG_1(Adapter, e1000g_CALLTRACE_LEVEL,
990 			    "Could not allocate tx dma handle: %d\n", mystat);
991 			goto tx_pkt_fail;
992 		}
993 
994 		/*
995 		 * Pre-allocate transmit buffers for small packets that the
996 		 * size is less than tx_bcopy_thresh. The data of those small
997 		 * packets will be bcopy() to the transmit buffers instead of
998 		 * using dynamical DMA binding. For small packets, bcopy will
999 		 * bring better performance than DMA binding.
1000 		 */
1001 		tx_buf = packet->tx_buf;
1002 
1003 		switch (e1000g_dma_type) {
1004 #ifdef __sparc
1005 		case USE_DVMA:
1006 			mystat = e1000g_alloc_dvma_buffer(Adapter,
1007 			    tx_buf, Adapter->TxBufferSize);
1008 			break;
1009 #endif
1010 		case USE_DMA:
1011 			mystat = e1000g_alloc_dma_buffer(Adapter,
1012 			    tx_buf, Adapter->TxBufferSize);
1013 			break;
1014 		default:
1015 			ASSERT(B_FALSE);
1016 			break;
1017 		}
1018 		if (mystat != DDI_SUCCESS) {
1019 			ASSERT(packet->tx_dma_handle != NULL);
1020 			switch (e1000g_dma_type) {
1021 #ifdef __sparc
1022 			case USE_DVMA:
1023 				dvma_release(packet->tx_dma_handle);
1024 				break;
1025 #endif
1026 			case USE_DMA:
1027 				ddi_dma_free_handle(&packet->tx_dma_handle);
1028 				break;
1029 			default:
1030 				ASSERT(B_FALSE);
1031 				break;
1032 			}
1033 			packet->tx_dma_handle = NULL;
1034 			e1000g_DEBUGLOG_0(Adapter, e1000g_CALLTRACE_LEVEL,
1035 			    "Allocate Tx buffer fail\n");
1036 			goto tx_pkt_fail;
1037 		}
1038 
1039 		packet->dma_type = e1000g_dma_type;
1040 	} /* for */
1041 
1042 	return (DDI_SUCCESS);
1043 
1044 tx_pkt_fail:
1045 	e1000g_free_tx_packets(tx_ring);
1046 
1047 	return (DDI_FAILURE);
1048 }
1049 
1050 static int
1051 e1000g_alloc_rx_packets(e1000g_rx_ring_t *rx_ring)
1052 {
1053 	int i;
1054 	PRX_SW_PACKET packet;
1055 	struct e1000g *Adapter;
1056 	uint32_t packet_num;
1057 
1058 	Adapter = rx_ring->adapter;
1059 
1060 	/*
1061 	 * Allocate memory for the RX_SW_PACKET structures. Each one of these
1062 	 * structures will contain a virtual and physical address to an actual
1063 	 * receive buffer in host memory. Since we use one RX_SW_PACKET per
1064 	 * received packet, the maximum number of RX_SW_PACKETs that we'll
1065 	 * need is equal to the number of receive descriptors that we've
1066 	 * allocated.
1067 	 *
1068 	 * Pre allocation for recv packet buffer. The Recv intr constructs
1069 	 * a new mp using this buffer
1070 	 *
1071 	 * On Wiseman these Receive buffers must be aligned with 256 byte
1072 	 * boundary
1073 	 * Vinay, Apr19,2000
1074 	 */
1075 	packet_num = Adapter->NumRxDescriptors + Adapter->NumRxFreeList;
1076 	rx_ring->packet_area = NULL;
1077 
1078 	for (i = 0; i < packet_num; i++) {
1079 		packet = e1000g_alloc_rx_sw_packet(rx_ring);
1080 		if (packet == NULL)
1081 			goto rx_pkt_fail;
1082 
1083 		packet->next = rx_ring->packet_area;
1084 		rx_ring->packet_area = packet;
1085 	}
1086 
1087 	return (DDI_SUCCESS);
1088 
1089 rx_pkt_fail:
1090 	e1000g_free_rx_packets(rx_ring);
1091 
1092 	return (DDI_FAILURE);
1093 }
1094 
1095 static PRX_SW_PACKET
1096 e1000g_alloc_rx_sw_packet(e1000g_rx_ring_t *rx_ring)
1097 {
1098 	int mystat;
1099 	PRX_SW_PACKET packet;
1100 	dma_buffer_t *rx_buf;
1101 	struct e1000g *Adapter;
1102 
1103 	Adapter = rx_ring->adapter;
1104 
1105 	packet = kmem_zalloc(sizeof (RX_SW_PACKET), KM_NOSLEEP);
1106 	if (packet == NULL) {
1107 		e1000g_DEBUGLOG_0(Adapter, e1000g_CALLTRACE_LEVEL,
1108 		    "Cound not allocate memory for Rx SwPacket\n");
1109 		return (NULL);
1110 	}
1111 
1112 	rx_buf = packet->rx_buf;
1113 
1114 	/*
1115 	 * Make sure that receive buffers are 256 byte aligned
1116 	 */
1117 	buf_dma_attr.dma_attr_align = Adapter->RcvBufferAlignment;
1118 
1119 	switch (e1000g_dma_type) {
1120 #ifdef __sparc
1121 	case USE_DVMA:
1122 		mystat = e1000g_alloc_dvma_buffer(Adapter,
1123 		    rx_buf, Adapter->RxBufferSize);
1124 		break;
1125 #endif
1126 	case USE_DMA:
1127 		mystat = e1000g_alloc_dma_buffer(Adapter,
1128 		    rx_buf, Adapter->RxBufferSize);
1129 		break;
1130 	default:
1131 		ASSERT(B_FALSE);
1132 		break;
1133 	}
1134 
1135 	if (mystat != DDI_SUCCESS) {
1136 		if (packet != NULL)
1137 			kmem_free(packet, sizeof (RX_SW_PACKET));
1138 
1139 		e1000g_DEBUGLOG_0(Adapter, e1000g_CALLTRACE_LEVEL,
1140 		    "Failed to allocate Rx buffer\n");
1141 		return (NULL);
1142 	}
1143 
1144 	rx_buf->size -= E1000G_IPALIGNROOM;
1145 	rx_buf->address += E1000G_IPALIGNROOM;
1146 	rx_buf->dma_address += E1000G_IPALIGNROOM;
1147 
1148 	packet->rx_ring = (caddr_t)rx_ring;
1149 	packet->free_rtn.free_func = e1000g_rxfree_func;
1150 	packet->free_rtn.free_arg = (char *)packet;
1151 	/*
1152 	 * esballoc is changed to desballoc which
1153 	 * is undocumented call but as per sun,
1154 	 * we can use it. It gives better efficiency.
1155 	 */
1156 	packet->mp = desballoc((unsigned char *)
1157 	    rx_buf->address - E1000G_IPALIGNROOM,
1158 	    rx_buf->size + E1000G_IPALIGNROOM,
1159 	    BPRI_MED, &packet->free_rtn);
1160 
1161 	if (packet->mp != NULL) {
1162 		packet->mp->b_rptr += E1000G_IPALIGNROOM;
1163 		packet->mp->b_wptr += E1000G_IPALIGNROOM;
1164 	}
1165 
1166 	packet->dma_type = e1000g_dma_type;
1167 
1168 	return (packet);
1169 }
1170 
1171 void
1172 e1000g_free_rx_sw_packet(PRX_SW_PACKET packet)
1173 {
1174 	dma_buffer_t *rx_buf;
1175 
1176 	if (packet->mp != NULL) {
1177 		freemsg(packet->mp);
1178 		packet->mp = NULL;
1179 	}
1180 
1181 	rx_buf = packet->rx_buf;
1182 	ASSERT(rx_buf->dma_handle != NULL);
1183 
1184 	rx_buf->size += E1000G_IPALIGNROOM;
1185 	rx_buf->address -= E1000G_IPALIGNROOM;
1186 
1187 	switch (packet->dma_type) {
1188 #ifdef __sparc
1189 	case USE_DVMA:
1190 		e1000g_free_dvma_buffer(rx_buf);
1191 		break;
1192 #endif
1193 	case USE_DMA:
1194 		e1000g_free_dma_buffer(rx_buf);
1195 		break;
1196 	default:
1197 		ASSERT(B_FALSE);
1198 		break;
1199 	}
1200 
1201 	packet->dma_type = USE_NONE;
1202 
1203 	kmem_free(packet, sizeof (RX_SW_PACKET));
1204 }
1205 
1206 static void
1207 e1000g_free_rx_packets(e1000g_rx_ring_t *rx_ring)
1208 {
1209 	PRX_SW_PACKET packet, next_packet, free_list;
1210 
1211 	rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
1212 
1213 	free_list = NULL;
1214 	packet = rx_ring->packet_area;
1215 	for (; packet != NULL; packet = next_packet) {
1216 		next_packet = packet->next;
1217 
1218 		if (packet->flag & E1000G_RX_SW_SENDUP) {
1219 			e1000g_mblks_pending++;
1220 			packet->flag |= E1000G_RX_SW_DETACHED;
1221 			packet->next = NULL;
1222 		} else {
1223 			packet->next = free_list;
1224 			free_list = packet;
1225 		}
1226 	}
1227 	rx_ring->packet_area = NULL;
1228 
1229 	rw_exit(&e1000g_rx_detach_lock);
1230 
1231 	packet = free_list;
1232 	for (; packet != NULL; packet = next_packet) {
1233 		next_packet = packet->next;
1234 
1235 		ASSERT(packet->flag == E1000G_RX_SW_FREE);
1236 		e1000g_free_rx_sw_packet(packet);
1237 	}
1238 }
1239 
1240 static void
1241 e1000g_free_tx_packets(e1000g_tx_ring_t *tx_ring)
1242 {
1243 	int j;
1244 	struct e1000g *Adapter;
1245 	PTX_SW_PACKET packet;
1246 	dma_buffer_t *tx_buf;
1247 
1248 	Adapter = tx_ring->adapter;
1249 
1250 	for (j = 0, packet = tx_ring->packet_area;
1251 	    j < Adapter->NumTxSwPacket; j++, packet++) {
1252 
1253 		if (packet == NULL)
1254 			break;
1255 
1256 		/* Free the Tx DMA handle for dynamical binding */
1257 		if (packet->tx_dma_handle != NULL) {
1258 			switch (packet->dma_type) {
1259 #ifdef __sparc
1260 			case USE_DVMA:
1261 				dvma_release(packet->tx_dma_handle);
1262 				break;
1263 #endif
1264 			case USE_DMA:
1265 				ddi_dma_free_handle(&packet->tx_dma_handle);
1266 				break;
1267 			default:
1268 				ASSERT(B_FALSE);
1269 				break;
1270 			}
1271 			packet->tx_dma_handle = NULL;
1272 		} else {
1273 			/*
1274 			 * If the dma handle is NULL, then we don't
1275 			 * need to check the packets left. For they
1276 			 * have not been initialized or have been freed.
1277 			 */
1278 			break;
1279 		}
1280 
1281 		tx_buf = packet->tx_buf;
1282 
1283 		switch (packet->dma_type) {
1284 #ifdef __sparc
1285 		case USE_DVMA:
1286 			e1000g_free_dvma_buffer(tx_buf);
1287 			break;
1288 #endif
1289 		case USE_DMA:
1290 			e1000g_free_dma_buffer(tx_buf);
1291 			break;
1292 		default:
1293 			ASSERT(B_FALSE);
1294 			break;
1295 		}
1296 
1297 		packet->dma_type = USE_NONE;
1298 	}
1299 	if (tx_ring->packet_area != NULL) {
1300 		kmem_free(tx_ring->packet_area, TX_SW_PKT_AREA_SZ);
1301 		tx_ring->packet_area = NULL;
1302 	}
1303 }
1304 
1305 /*
1306  * **********************************************************************
1307  * Name:      e1000g_release_dma_resources				*
1308  *									*
1309  * Description:								*
1310  *     This function release any pending buffers. that has been		*
1311  *     previously allocated						*
1312  *									*
1313  * Parameter Passed:							*
1314  *									*
1315  * Return Value:							*
1316  *									*
1317  * Functions called:							*
1318  *									*
1319  *									*
1320  * **********************************************************************
1321  */
1322 void
1323 e1000g_release_dma_resources(register struct e1000g *Adapter)
1324 {
1325 	e1000g_tx_ring_t *tx_ring;
1326 	e1000g_rx_ring_t *rx_ring;
1327 
1328 	tx_ring = Adapter->tx_ring;
1329 	rx_ring = Adapter->rx_ring;
1330 
1331 	/*
1332 	 * Release all the handles, memory and DMA resources that are
1333 	 * allocated for the transmit buffer descriptors.
1334 	 */
1335 	e1000g_free_tx_descriptors(tx_ring);
1336 
1337 	/*
1338 	 * Release all the handles, memory and DMA resources that are
1339 	 * allocated for the receive buffer descriptors.
1340 	 */
1341 	e1000g_free_rx_descriptors(rx_ring);
1342 
1343 	/*
1344 	 * Free Tx packet resources
1345 	 */
1346 	e1000g_free_tx_packets(tx_ring);
1347 
1348 	/*
1349 	 * TX resources done, now free RX resources
1350 	 */
1351 	e1000g_free_rx_packets(rx_ring);
1352 }
1353