xref: /titanic_51/usr/src/uts/common/io/e1000g/e1000g_alloc.c (revision 074e084f68dd0b08686612bec695a0cfe249da6d)
1 /*
2  * This file is provided under a CDDLv1 license.  When using or
3  * redistributing this file, you may do so under this license.
4  * In redistributing this file this license must be included
5  * and no other modification of this header file is permitted.
6  *
7  * CDDL LICENSE SUMMARY
8  *
9  * Copyright(c) 1999 - 2008 Intel Corporation. All rights reserved.
10  *
11  * The contents of this file are subject to the terms of Version
12  * 1.0 of the Common Development and Distribution License (the "License").
13  *
14  * You should have received a copy of the License with this software.
15  * You can obtain a copy of the License at
16  *	http://www.opensolaris.org/os/licensing.
17  * See the License for the specific language governing permissions
18  * and limitations under the License.
19  */
20 
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms of the CDDLv1.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * **********************************************************************
30  * Module Name:								*
31  *   e1000g_alloc.c							*
32  *									*
33  * Abstract:								*
34  *   This file contains some routines that take care of			*
35  *   memory allocation for descriptors and buffers.			*
36  *									*
37  * **********************************************************************
38  */
39 
40 #include "e1000g_sw.h"
41 #include "e1000g_debug.h"
42 
43 #define	TX_SW_PKT_AREA_SZ \
44 	(sizeof (tx_sw_packet_t) * Adapter->tx_freelist_num)
45 
46 static int e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *);
47 static int e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *);
48 static void e1000g_free_tx_descriptors(e1000g_tx_ring_t *);
49 static void e1000g_free_rx_descriptors(e1000g_rx_ring_t *);
50 static int e1000g_alloc_tx_packets(e1000g_tx_ring_t *);
51 static int e1000g_alloc_rx_packets(e1000g_rx_ring_t *);
52 static void e1000g_free_tx_packets(e1000g_tx_ring_t *);
53 static void e1000g_free_rx_packets(e1000g_rx_ring_t *);
54 static int e1000g_alloc_dma_buffer(struct e1000g *,
55     dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
56 static void e1000g_free_dma_buffer(dma_buffer_t *);
57 #ifdef __sparc
58 static int e1000g_alloc_dvma_buffer(struct e1000g *, dma_buffer_t *, size_t);
59 static void e1000g_free_dvma_buffer(dma_buffer_t *);
60 #endif
61 static int e1000g_alloc_descriptors(struct e1000g *Adapter);
62 static void e1000g_free_descriptors(struct e1000g *Adapter);
63 static int e1000g_alloc_packets(struct e1000g *Adapter);
64 static void e1000g_free_packets(struct e1000g *Adapter);
65 static p_rx_sw_packet_t e1000g_alloc_rx_sw_packet(e1000g_rx_ring_t *,
66     ddi_dma_attr_t *p_dma_attr);
67 
68 /* DMA access attributes for descriptors <Little Endian> */
69 static ddi_device_acc_attr_t e1000g_desc_acc_attr = {
70 	DDI_DEVICE_ATTR_V0,
71 	DDI_STRUCTURE_LE_ACC,
72 	DDI_STRICTORDER_ACC,
73 	DDI_FLAGERR_ACC
74 };
75 
76 /* DMA access attributes for DMA buffers */
77 #ifdef __sparc
78 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
79 	DDI_DEVICE_ATTR_V0,
80 	DDI_STRUCTURE_BE_ACC,
81 	DDI_STRICTORDER_ACC,
82 };
83 #else
84 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
85 	DDI_DEVICE_ATTR_V0,
86 	DDI_STRUCTURE_LE_ACC,
87 	DDI_STRICTORDER_ACC,
88 };
89 #endif
90 
91 /* DMA attributes for tx mblk buffers */
92 static ddi_dma_attr_t e1000g_tx_dma_attr = {
93 	DMA_ATTR_V0,		/* version of this structure */
94 	0,			/* lowest usable address */
95 	0xffffffffffffffffULL,	/* highest usable address */
96 	0x7fffffff,		/* maximum DMAable byte count */
97 	1,			/* alignment in bytes */
98 	0x7ff,			/* burst sizes (any?) */
99 	1,			/* minimum transfer */
100 	0xffffffffU,		/* maximum transfer */
101 	0xffffffffffffffffULL,	/* maximum segment length */
102 	16,			/* maximum number of segments */
103 	1,			/* granularity */
104 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
105 };
106 
107 /* DMA attributes for pre-allocated rx/tx buffers */
108 static ddi_dma_attr_t e1000g_buf_dma_attr = {
109 	DMA_ATTR_V0,		/* version of this structure */
110 	0,			/* lowest usable address */
111 	0xffffffffffffffffULL,	/* highest usable address */
112 	0x7fffffff,		/* maximum DMAable byte count */
113 	1,			/* alignment in bytes */
114 	0x7ff,			/* burst sizes (any?) */
115 	1,			/* minimum transfer */
116 	0xffffffffU,		/* maximum transfer */
117 	0xffffffffffffffffULL,	/* maximum segment length */
118 	1,			/* maximum number of segments */
119 	1,			/* granularity */
120 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
121 };
122 
123 /* DMA attributes for rx/tx descriptors */
124 static ddi_dma_attr_t e1000g_desc_dma_attr = {
125 	DMA_ATTR_V0,		/* version of this structure */
126 	0,			/* lowest usable address */
127 	0xffffffffffffffffULL,	/* highest usable address */
128 	0x7fffffff,		/* maximum DMAable byte count */
129 	E1000_MDALIGN,		/* alignment in bytes 4K! */
130 	0x7ff,			/* burst sizes (any?) */
131 	1,			/* minimum transfer */
132 	0xffffffffU,		/* maximum transfer */
133 	0xffffffffffffffffULL,	/* maximum segment length */
134 	1,			/* maximum number of segments */
135 	1,			/* granularity */
136 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
137 };
138 
139 #ifdef __sparc
140 static ddi_dma_lim_t e1000g_dma_limits = {
141 	(uint_t)0,		/* dlim_addr_lo */
142 	(uint_t)0xffffffff,	/* dlim_addr_hi */
143 	(uint_t)0xffffffff,	/* dlim_cntr_max */
144 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
145 	0x1,			/* dlim_minxfer */
146 	1024			/* dlim_speed */
147 };
148 #endif
149 
150 #ifdef __sparc
151 static dma_type_t e1000g_dma_type = USE_DVMA;
152 #else
153 static dma_type_t e1000g_dma_type = USE_DMA;
154 #endif
155 
156 extern krwlock_t e1000g_dma_type_lock;
157 
158 
159 int
160 e1000g_alloc_dma_resources(struct e1000g *Adapter)
161 {
162 	int result;
163 
164 	result = DDI_FAILURE;
165 
166 	while ((result != DDI_SUCCESS) &&
167 	    (Adapter->tx_desc_num >= MIN_NUM_TX_DESCRIPTOR) &&
168 	    (Adapter->rx_desc_num >= MIN_NUM_RX_DESCRIPTOR) &&
169 	    (Adapter->tx_freelist_num >= MIN_NUM_TX_FREELIST) &&
170 	    (Adapter->rx_freelist_num >= MIN_NUM_RX_FREELIST)) {
171 
172 		result = e1000g_alloc_descriptors(Adapter);
173 
174 		if (result == DDI_SUCCESS) {
175 			result = e1000g_alloc_packets(Adapter);
176 
177 			if (result != DDI_SUCCESS)
178 				e1000g_free_descriptors(Adapter);
179 		}
180 
181 		/*
182 		 * If the allocation fails due to resource shortage,
183 		 * we'll reduce the numbers of descriptors/buffers by
184 		 * half, and try the allocation again.
185 		 */
186 		if (result != DDI_SUCCESS) {
187 			/*
188 			 * We must ensure the number of descriptors
189 			 * is always a multiple of 8.
190 			 */
191 			Adapter->tx_desc_num =
192 			    (Adapter->tx_desc_num >> 4) << 3;
193 			Adapter->rx_desc_num =
194 			    (Adapter->rx_desc_num >> 4) << 3;
195 
196 			Adapter->tx_freelist_num >>= 1;
197 			Adapter->rx_freelist_num >>= 1;
198 		}
199 	}
200 
201 	return (result);
202 }
203 
204 /*
205  * e1000g_alloc_descriptors - allocate DMA buffers for descriptors
206  *
207  * This routine allocates neccesary DMA buffers for
208  *	Transmit Descriptor Area
209  *	Receive Descrpitor Area
210  */
211 static int
212 e1000g_alloc_descriptors(struct e1000g *Adapter)
213 {
214 	int result;
215 	e1000g_tx_ring_t *tx_ring;
216 	e1000g_rx_ring_t *rx_ring;
217 
218 	tx_ring = Adapter->tx_ring;
219 
220 	result = e1000g_alloc_tx_descriptors(tx_ring);
221 	if (result != DDI_SUCCESS)
222 		return (DDI_FAILURE);
223 
224 	rx_ring = Adapter->rx_ring;
225 
226 	result = e1000g_alloc_rx_descriptors(rx_ring);
227 	if (result != DDI_SUCCESS) {
228 		e1000g_free_tx_descriptors(tx_ring);
229 		return (DDI_FAILURE);
230 	}
231 
232 	return (DDI_SUCCESS);
233 }
234 
235 static void
236 e1000g_free_descriptors(struct e1000g *Adapter)
237 {
238 	e1000g_tx_ring_t *tx_ring;
239 	e1000g_rx_ring_t *rx_ring;
240 
241 	tx_ring = Adapter->tx_ring;
242 	rx_ring = Adapter->rx_ring;
243 
244 	e1000g_free_tx_descriptors(tx_ring);
245 	e1000g_free_rx_descriptors(rx_ring);
246 }
247 
248 static int
249 e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *tx_ring)
250 {
251 	int mystat;
252 	boolean_t alloc_flag;
253 	size_t size;
254 	size_t len;
255 	uintptr_t templong;
256 	uint_t cookie_count;
257 	dev_info_t *devinfo;
258 	ddi_dma_cookie_t cookie;
259 	struct e1000g *Adapter;
260 	ddi_dma_attr_t dma_attr;
261 
262 	Adapter = tx_ring->adapter;
263 	devinfo = Adapter->dip;
264 
265 	alloc_flag = B_FALSE;
266 	dma_attr = e1000g_desc_dma_attr;
267 
268 	/*
269 	 * Solaris 7 has a problem with allocating physically contiguous memory
270 	 * that is aligned on a 4K boundary. The transmit and rx descriptors
271 	 * need to aligned on a 4kbyte boundary. We first try to allocate the
272 	 * memory with DMA attributes set to 4K alignment and also no scatter/
273 	 * gather mechanism specified. In most cases, this does not allocate
274 	 * memory aligned at a 4Kbyte boundary. We then try asking for memory
275 	 * aligned on 4K boundary with scatter/gather set to 2. This works when
276 	 * the amount of memory is less than 4k i.e a page size. If neither of
277 	 * these options work or if the number of descriptors is greater than
278 	 * 4K, ie more than 256 descriptors, we allocate 4k extra memory and
279 	 * and then align the memory at a 4k boundary.
280 	 */
281 	size = sizeof (struct e1000_tx_desc) * Adapter->tx_desc_num;
282 
283 	/*
284 	 * Memory allocation for the transmit buffer descriptors.
285 	 */
286 	dma_attr.dma_attr_sgllen = 1;
287 
288 	/*
289 	 * Allocate a new DMA handle for the transmit descriptor
290 	 * memory area.
291 	 */
292 	mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
293 	    DDI_DMA_DONTWAIT, 0,
294 	    &tx_ring->tbd_dma_handle);
295 
296 	if (mystat != DDI_SUCCESS) {
297 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
298 		    "Could not allocate tbd dma handle: %d", mystat);
299 		tx_ring->tbd_dma_handle = NULL;
300 		return (DDI_FAILURE);
301 	}
302 
303 	/*
304 	 * Allocate memory to DMA data to and from the transmit
305 	 * descriptors.
306 	 */
307 	mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
308 	    size,
309 	    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
310 	    DDI_DMA_DONTWAIT, 0,
311 	    (caddr_t *)&tx_ring->tbd_area,
312 	    &len, &tx_ring->tbd_acc_handle);
313 
314 	if ((mystat != DDI_SUCCESS) ||
315 	    ((uintptr_t)tx_ring->tbd_area & (E1000_MDALIGN - 1))) {
316 		if (mystat == DDI_SUCCESS) {
317 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
318 			tx_ring->tbd_acc_handle = NULL;
319 			tx_ring->tbd_area = NULL;
320 		}
321 		if (tx_ring->tbd_dma_handle != NULL) {
322 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
323 			tx_ring->tbd_dma_handle = NULL;
324 		}
325 		alloc_flag = B_FALSE;
326 	} else
327 		alloc_flag = B_TRUE;
328 
329 	/*
330 	 * Initialize the entire transmit buffer descriptor area to zero
331 	 */
332 	if (alloc_flag)
333 		bzero(tx_ring->tbd_area, len);
334 
335 	/*
336 	 * If the previous DMA attributes setting could not give us contiguous
337 	 * memory or the number of descriptors is greater than the page size,
338 	 * we allocate 4K extra memory and then align it at a 4k boundary.
339 	 */
340 	if (!alloc_flag) {
341 		size = size + ROUNDOFF;
342 
343 		/*
344 		 * DMA attributes set to no scatter/gather and 16 bit alignment
345 		 */
346 		dma_attr.dma_attr_align = 1;
347 		dma_attr.dma_attr_sgllen = 1;
348 
349 		/*
350 		 * Allocate a new DMA handle for the transmit descriptor memory
351 		 * area.
352 		 */
353 		mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
354 		    DDI_DMA_DONTWAIT, 0,
355 		    &tx_ring->tbd_dma_handle);
356 
357 		if (mystat != DDI_SUCCESS) {
358 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
359 			    "Could not re-allocate tbd dma handle: %d", mystat);
360 			tx_ring->tbd_dma_handle = NULL;
361 			return (DDI_FAILURE);
362 		}
363 
364 		/*
365 		 * Allocate memory to DMA data to and from the transmit
366 		 * descriptors.
367 		 */
368 		mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
369 		    size,
370 		    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
371 		    DDI_DMA_DONTWAIT, 0,
372 		    (caddr_t *)&tx_ring->tbd_area,
373 		    &len, &tx_ring->tbd_acc_handle);
374 
375 		if (mystat != DDI_SUCCESS) {
376 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
377 			    "Could not allocate tbd dma memory: %d", mystat);
378 			tx_ring->tbd_acc_handle = NULL;
379 			tx_ring->tbd_area = NULL;
380 			if (tx_ring->tbd_dma_handle != NULL) {
381 				ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
382 				tx_ring->tbd_dma_handle = NULL;
383 			}
384 			return (DDI_FAILURE);
385 		} else
386 			alloc_flag = B_TRUE;
387 
388 		/*
389 		 * Initialize the entire transmit buffer descriptor area to zero
390 		 */
391 		bzero(tx_ring->tbd_area, len);
392 		/*
393 		 * Memory has been allocated with the ddi_dma_mem_alloc call,
394 		 * but has not been aligned. We now align it on a 4k boundary.
395 		 */
396 		templong = P2NPHASE((uintptr_t)tx_ring->tbd_area, ROUNDOFF);
397 		len = size - templong;
398 		templong += (uintptr_t)tx_ring->tbd_area;
399 		tx_ring->tbd_area = (struct e1000_tx_desc *)templong;
400 	}	/* alignment workaround */
401 
402 	/*
403 	 * Transmit buffer descriptor memory allocation succeeded
404 	 */
405 	ASSERT(alloc_flag);
406 
407 	/*
408 	 * Allocates DMA resources for the memory that was allocated by
409 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
410 	 * the memory address
411 	 */
412 	mystat = ddi_dma_addr_bind_handle(tx_ring->tbd_dma_handle,
413 	    (struct as *)NULL, (caddr_t)tx_ring->tbd_area,
414 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
415 	    DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
416 
417 	if (mystat != DDI_SUCCESS) {
418 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
419 		    "Could not bind tbd dma resource: %d", mystat);
420 		if (tx_ring->tbd_acc_handle != NULL) {
421 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
422 			tx_ring->tbd_acc_handle = NULL;
423 			tx_ring->tbd_area = NULL;
424 		}
425 		if (tx_ring->tbd_dma_handle != NULL) {
426 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
427 			tx_ring->tbd_dma_handle = NULL;
428 		}
429 		return (DDI_FAILURE);
430 	}
431 
432 	ASSERT(cookie_count == 1);	/* 1 cookie */
433 
434 	if (cookie_count != 1) {
435 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
436 		    "Could not bind tbd dma resource in a single frag. "
437 		    "Count - %d Len - %d", cookie_count, len);
438 		e1000g_free_tx_descriptors(tx_ring);
439 		return (DDI_FAILURE);
440 	}
441 
442 	tx_ring->tbd_dma_addr = cookie.dmac_laddress;
443 	tx_ring->tbd_first = tx_ring->tbd_area;
444 	tx_ring->tbd_last = tx_ring->tbd_first +
445 	    (Adapter->tx_desc_num - 1);
446 
447 	return (DDI_SUCCESS);
448 }
449 
450 static int
451 e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *rx_ring)
452 {
453 	int mystat;
454 	boolean_t alloc_flag;
455 	size_t size;
456 	size_t len;
457 	uintptr_t templong;
458 	uint_t cookie_count;
459 	dev_info_t *devinfo;
460 	ddi_dma_cookie_t cookie;
461 	struct e1000g *Adapter;
462 	ddi_dma_attr_t dma_attr;
463 
464 	Adapter = rx_ring->adapter;
465 	devinfo = Adapter->dip;
466 
467 	alloc_flag = B_FALSE;
468 	dma_attr = e1000g_desc_dma_attr;
469 
470 	/*
471 	 * Memory allocation for the receive buffer descriptors.
472 	 */
473 	size = (sizeof (struct e1000_rx_desc)) * Adapter->rx_desc_num;
474 
475 	/*
476 	 * Asking for aligned memory with DMA attributes set for 4k alignment
477 	 */
478 	dma_attr.dma_attr_sgllen = 1;
479 	dma_attr.dma_attr_align = E1000_MDALIGN;
480 
481 	/*
482 	 * Allocate a new DMA handle for the receive descriptors
483 	 */
484 	mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
485 	    DDI_DMA_DONTWAIT, 0,
486 	    &rx_ring->rbd_dma_handle);
487 
488 	if (mystat != DDI_SUCCESS) {
489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
490 		    "Could not allocate rbd dma handle: %d", mystat);
491 		rx_ring->rbd_dma_handle = NULL;
492 		return (DDI_FAILURE);
493 	}
494 	/*
495 	 * Allocate memory to DMA data to and from the receive
496 	 * descriptors.
497 	 */
498 	mystat = ddi_dma_mem_alloc(rx_ring->rbd_dma_handle,
499 	    size,
500 	    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
501 	    DDI_DMA_DONTWAIT, 0,
502 	    (caddr_t *)&rx_ring->rbd_area,
503 	    &len, &rx_ring->rbd_acc_handle);
504 
505 	/*
506 	 * Check if memory allocation succeeded and also if the
507 	 * allocated memory is aligned correctly.
508 	 */
509 	if ((mystat != DDI_SUCCESS) ||
510 	    ((uintptr_t)rx_ring->rbd_area & (E1000_MDALIGN - 1))) {
511 		if (mystat == DDI_SUCCESS) {
512 			ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
513 			rx_ring->rbd_acc_handle = NULL;
514 			rx_ring->rbd_area = NULL;
515 		}
516 		if (rx_ring->rbd_dma_handle != NULL) {
517 			ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
518 			rx_ring->rbd_dma_handle = NULL;
519 		}
520 		alloc_flag = B_FALSE;
521 	} else
522 		alloc_flag = B_TRUE;
523 
524 	/*
525 	 * Initialize the allocated receive descriptor memory to zero.
526 	 */
527 	if (alloc_flag)
528 		bzero((caddr_t)rx_ring->rbd_area, len);
529 
530 	/*
531 	 * If memory allocation did not succeed, do the alignment ourselves
532 	 */
533 	if (!alloc_flag) {
534 		dma_attr.dma_attr_align = 1;
535 		dma_attr.dma_attr_sgllen = 1;
536 		size = size + ROUNDOFF;
537 		/*
538 		 * Allocate a new DMA handle for the receive descriptor.
539 		 */
540 		mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
541 		    DDI_DMA_DONTWAIT, 0,
542 		    &rx_ring->rbd_dma_handle);
543 
544 		if (mystat != DDI_SUCCESS) {
545 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
546 			    "Could not re-allocate rbd dma handle: %d", mystat);
547 			rx_ring->rbd_dma_handle = NULL;
548 			return (DDI_FAILURE);
549 		}
550 		/*
551 		 * Allocate memory to DMA data to and from the receive
552 		 * descriptors.
553 		 */
554 		mystat = ddi_dma_mem_alloc(rx_ring->rbd_dma_handle,
555 		    size,
556 		    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
557 		    DDI_DMA_DONTWAIT, 0,
558 		    (caddr_t *)&rx_ring->rbd_area,
559 		    &len, &rx_ring->rbd_acc_handle);
560 
561 		if (mystat != DDI_SUCCESS) {
562 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
563 			    "Could not allocate rbd dma memory: %d", mystat);
564 			rx_ring->rbd_acc_handle = NULL;
565 			rx_ring->rbd_area = NULL;
566 			if (rx_ring->rbd_dma_handle != NULL) {
567 				ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
568 				rx_ring->rbd_dma_handle = NULL;
569 			}
570 			return (DDI_FAILURE);
571 		} else
572 			alloc_flag = B_TRUE;
573 
574 		/*
575 		 * Initialize the allocated receive descriptor memory to zero.
576 		 */
577 		bzero((caddr_t)rx_ring->rbd_area, len);
578 		templong = P2NPHASE((uintptr_t)rx_ring->rbd_area, ROUNDOFF);
579 		len = size - templong;
580 		templong += (uintptr_t)rx_ring->rbd_area;
581 		rx_ring->rbd_area = (struct e1000_rx_desc *)templong;
582 	}	/* alignment workaround */
583 
584 	/*
585 	 * The memory allocation of the receive descriptors succeeded
586 	 */
587 	ASSERT(alloc_flag);
588 
589 	/*
590 	 * Allocates DMA resources for the memory that was allocated by
591 	 * the ddi_dma_mem_alloc call.
592 	 */
593 	mystat = ddi_dma_addr_bind_handle(rx_ring->rbd_dma_handle,
594 	    (struct as *)NULL, (caddr_t)rx_ring->rbd_area,
595 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
596 	    DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
597 
598 	if (mystat != DDI_SUCCESS) {
599 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
600 		    "Could not bind rbd dma resource: %d", mystat);
601 		if (rx_ring->rbd_acc_handle != NULL) {
602 			ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
603 			rx_ring->rbd_acc_handle = NULL;
604 			rx_ring->rbd_area = NULL;
605 		}
606 		if (rx_ring->rbd_dma_handle != NULL) {
607 			ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
608 			rx_ring->rbd_dma_handle = NULL;
609 		}
610 		return (DDI_FAILURE);
611 	}
612 
613 	ASSERT(cookie_count == 1);
614 	if (cookie_count != 1) {
615 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
616 		    "Could not bind rbd dma resource in a single frag. "
617 		    "Count - %d Len - %d", cookie_count, len);
618 		e1000g_free_rx_descriptors(rx_ring);
619 		return (DDI_FAILURE);
620 	}
621 
622 	rx_ring->rbd_dma_addr = cookie.dmac_laddress;
623 	rx_ring->rbd_first = rx_ring->rbd_area;
624 	rx_ring->rbd_last = rx_ring->rbd_first +
625 	    (Adapter->rx_desc_num - 1);
626 
627 	return (DDI_SUCCESS);
628 }
629 
630 static void
631 e1000g_free_rx_descriptors(e1000g_rx_ring_t *rx_ring)
632 {
633 	if (rx_ring->rbd_dma_handle != NULL) {
634 		ddi_dma_unbind_handle(rx_ring->rbd_dma_handle);
635 	}
636 	if (rx_ring->rbd_acc_handle != NULL) {
637 		ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
638 		rx_ring->rbd_acc_handle = NULL;
639 		rx_ring->rbd_area = NULL;
640 	}
641 	if (rx_ring->rbd_dma_handle != NULL) {
642 		ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
643 		rx_ring->rbd_dma_handle = NULL;
644 	}
645 	rx_ring->rbd_dma_addr = NULL;
646 	rx_ring->rbd_first = NULL;
647 	rx_ring->rbd_last = NULL;
648 }
649 
650 static void
651 e1000g_free_tx_descriptors(e1000g_tx_ring_t *tx_ring)
652 {
653 	if (tx_ring->tbd_dma_handle != NULL) {
654 		ddi_dma_unbind_handle(tx_ring->tbd_dma_handle);
655 	}
656 	if (tx_ring->tbd_acc_handle != NULL) {
657 		ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
658 		tx_ring->tbd_acc_handle = NULL;
659 		tx_ring->tbd_area = NULL;
660 	}
661 	if (tx_ring->tbd_dma_handle != NULL) {
662 		ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
663 		tx_ring->tbd_dma_handle = NULL;
664 	}
665 	tx_ring->tbd_dma_addr = NULL;
666 	tx_ring->tbd_first = NULL;
667 	tx_ring->tbd_last = NULL;
668 }
669 
670 
671 /*
672  * e1000g_alloc_packets - allocate DMA buffers for rx/tx
673  *
674  * This routine allocates neccesary buffers for
675  *	 Transmit sw packet structure
676  *	 DMA handle for Transmit
677  *	 DMA buffer for Transmit
678  *	 Receive sw packet structure
679  *	 DMA buffer for Receive
680  */
681 static int
682 e1000g_alloc_packets(struct e1000g *Adapter)
683 {
684 	int result;
685 	e1000g_tx_ring_t *tx_ring;
686 	e1000g_rx_ring_t *rx_ring;
687 
688 	tx_ring = Adapter->tx_ring;
689 	rx_ring = Adapter->rx_ring;
690 
691 again:
692 	rw_enter(&e1000g_dma_type_lock, RW_READER);
693 
694 	result = e1000g_alloc_tx_packets(tx_ring);
695 	if (result != DDI_SUCCESS) {
696 		if (e1000g_dma_type == USE_DVMA) {
697 			rw_exit(&e1000g_dma_type_lock);
698 
699 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
700 			e1000g_dma_type = USE_DMA;
701 			rw_exit(&e1000g_dma_type_lock);
702 
703 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
704 			    "No enough dvma resource for Tx packets, "
705 			    "trying to allocate dma buffers...\n");
706 			goto again;
707 		}
708 		rw_exit(&e1000g_dma_type_lock);
709 
710 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
711 		    "Failed to allocate dma buffers for Tx packets\n");
712 		return (DDI_FAILURE);
713 	}
714 
715 	result = e1000g_alloc_rx_packets(rx_ring);
716 	if (result != DDI_SUCCESS) {
717 		e1000g_free_tx_packets(tx_ring);
718 		if (e1000g_dma_type == USE_DVMA) {
719 			rw_exit(&e1000g_dma_type_lock);
720 
721 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
722 			e1000g_dma_type = USE_DMA;
723 			rw_exit(&e1000g_dma_type_lock);
724 
725 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
726 			    "No enough dvma resource for Rx packets, "
727 			    "trying to allocate dma buffers...\n");
728 			goto again;
729 		}
730 		rw_exit(&e1000g_dma_type_lock);
731 
732 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
733 		    "Failed to allocate dma buffers for Rx packets\n");
734 		return (DDI_FAILURE);
735 	}
736 
737 	rw_exit(&e1000g_dma_type_lock);
738 
739 	return (DDI_SUCCESS);
740 }
741 
742 static void
743 e1000g_free_packets(struct e1000g *Adapter)
744 {
745 	e1000g_tx_ring_t *tx_ring;
746 	e1000g_rx_ring_t *rx_ring;
747 
748 	tx_ring = Adapter->tx_ring;
749 	rx_ring = Adapter->rx_ring;
750 
751 	e1000g_free_tx_packets(tx_ring);
752 	e1000g_free_rx_packets(rx_ring);
753 }
754 
755 #ifdef __sparc
756 static int
757 e1000g_alloc_dvma_buffer(struct e1000g *Adapter,
758     dma_buffer_t *buf, size_t size)
759 {
760 	int mystat;
761 	dev_info_t *devinfo;
762 	ddi_dma_cookie_t cookie;
763 
764 	if (e1000g_force_detach)
765 		devinfo = Adapter->priv_dip;
766 	else
767 		devinfo = Adapter->dip;
768 
769 	mystat = dvma_reserve(devinfo,
770 	    &e1000g_dma_limits,
771 	    Adapter->dvma_page_num,
772 	    &buf->dma_handle);
773 
774 	if (mystat != DDI_SUCCESS) {
775 		buf->dma_handle = NULL;
776 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
777 		    "Could not allocate dvma buffer handle: %d\n", mystat);
778 		return (DDI_FAILURE);
779 	}
780 
781 	buf->address = kmem_alloc(size, KM_NOSLEEP);
782 
783 	if (buf->address == NULL) {
784 		if (buf->dma_handle != NULL) {
785 			dvma_release(buf->dma_handle);
786 			buf->dma_handle = NULL;
787 		}
788 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
789 		    "Could not allocate dvma buffer memory\n");
790 		return (DDI_FAILURE);
791 	}
792 
793 	dvma_kaddr_load(buf->dma_handle,
794 	    buf->address, size, 0, &cookie);
795 
796 	buf->dma_address = cookie.dmac_laddress;
797 	buf->size = size;
798 	buf->len = 0;
799 
800 	return (DDI_SUCCESS);
801 }
802 
803 static void
804 e1000g_free_dvma_buffer(dma_buffer_t *buf)
805 {
806 	if (buf->dma_handle != NULL) {
807 		dvma_unload(buf->dma_handle, 0, -1);
808 	} else {
809 		return;
810 	}
811 
812 	buf->dma_address = NULL;
813 
814 	if (buf->address != NULL) {
815 		kmem_free(buf->address, buf->size);
816 		buf->address = NULL;
817 	}
818 
819 	if (buf->dma_handle != NULL) {
820 		dvma_release(buf->dma_handle);
821 		buf->dma_handle = NULL;
822 	}
823 
824 	buf->size = 0;
825 	buf->len = 0;
826 }
827 #endif
828 
829 static int
830 e1000g_alloc_dma_buffer(struct e1000g *Adapter,
831     dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
832 {
833 	int mystat;
834 	dev_info_t *devinfo;
835 	ddi_dma_cookie_t cookie;
836 	size_t len;
837 	uint_t count;
838 
839 	if (e1000g_force_detach)
840 		devinfo = Adapter->priv_dip;
841 	else
842 		devinfo = Adapter->dip;
843 
844 	mystat = ddi_dma_alloc_handle(devinfo,
845 	    p_dma_attr,
846 	    DDI_DMA_DONTWAIT, 0,
847 	    &buf->dma_handle);
848 
849 	if (mystat != DDI_SUCCESS) {
850 		buf->dma_handle = NULL;
851 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
852 		    "Could not allocate dma buffer handle: %d\n", mystat);
853 		return (DDI_FAILURE);
854 	}
855 
856 	mystat = ddi_dma_mem_alloc(buf->dma_handle,
857 	    size, &e1000g_buf_acc_attr, DDI_DMA_STREAMING,
858 	    DDI_DMA_DONTWAIT, 0,
859 	    &buf->address,
860 	    &len, &buf->acc_handle);
861 
862 	if (mystat != DDI_SUCCESS) {
863 		buf->acc_handle = NULL;
864 		buf->address = NULL;
865 		if (buf->dma_handle != NULL) {
866 			ddi_dma_free_handle(&buf->dma_handle);
867 			buf->dma_handle = NULL;
868 		}
869 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
870 		    "Could not allocate dma buffer memory: %d\n", mystat);
871 		return (DDI_FAILURE);
872 	}
873 
874 	mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
875 	    (struct as *)NULL,
876 	    buf->address,
877 	    len, DDI_DMA_READ | DDI_DMA_STREAMING,
878 	    DDI_DMA_DONTWAIT, 0, &cookie, &count);
879 
880 	if (mystat != DDI_SUCCESS) {
881 		if (buf->acc_handle != NULL) {
882 			ddi_dma_mem_free(&buf->acc_handle);
883 			buf->acc_handle = NULL;
884 			buf->address = NULL;
885 		}
886 		if (buf->dma_handle != NULL) {
887 			ddi_dma_free_handle(&buf->dma_handle);
888 			buf->dma_handle = NULL;
889 		}
890 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
891 		    "Could not bind buffer dma handle: %d\n", mystat);
892 		return (DDI_FAILURE);
893 	}
894 
895 	ASSERT(count == 1);
896 	if (count != 1) {
897 		if (buf->dma_handle != NULL) {
898 			ddi_dma_unbind_handle(buf->dma_handle);
899 		}
900 		if (buf->acc_handle != NULL) {
901 			ddi_dma_mem_free(&buf->acc_handle);
902 			buf->acc_handle = NULL;
903 			buf->address = NULL;
904 		}
905 		if (buf->dma_handle != NULL) {
906 			ddi_dma_free_handle(&buf->dma_handle);
907 			buf->dma_handle = NULL;
908 		}
909 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
910 		    "Could not bind buffer as a single frag. "
911 		    "Count = %d\n", count);
912 		return (DDI_FAILURE);
913 	}
914 
915 	buf->dma_address = cookie.dmac_laddress;
916 	buf->size = len;
917 	buf->len = 0;
918 
919 	return (DDI_SUCCESS);
920 }
921 
922 static void
923 e1000g_free_dma_buffer(dma_buffer_t *buf)
924 {
925 	if (buf->dma_handle != NULL) {
926 		ddi_dma_unbind_handle(buf->dma_handle);
927 	} else {
928 		return;
929 	}
930 
931 	buf->dma_address = NULL;
932 
933 	if (buf->acc_handle != NULL) {
934 		ddi_dma_mem_free(&buf->acc_handle);
935 		buf->acc_handle = NULL;
936 		buf->address = NULL;
937 	}
938 
939 	if (buf->dma_handle != NULL) {
940 		ddi_dma_free_handle(&buf->dma_handle);
941 		buf->dma_handle = NULL;
942 	}
943 
944 	buf->size = 0;
945 	buf->len = 0;
946 }
947 
948 static int
949 e1000g_alloc_tx_packets(e1000g_tx_ring_t *tx_ring)
950 {
951 	int j;
952 	p_tx_sw_packet_t packet;
953 	int mystat;
954 	dma_buffer_t *tx_buf;
955 	struct e1000g *Adapter;
956 	dev_info_t *devinfo;
957 	ddi_dma_attr_t dma_attr;
958 
959 	Adapter = tx_ring->adapter;
960 	devinfo = Adapter->dip;
961 	dma_attr = e1000g_buf_dma_attr;
962 
963 	/*
964 	 * Memory allocation for the Transmit software structure, the transmit
965 	 * software packet. This structure stores all the relevant information
966 	 * for transmitting a single packet.
967 	 */
968 	tx_ring->packet_area =
969 	    kmem_zalloc(TX_SW_PKT_AREA_SZ, KM_NOSLEEP);
970 
971 	if (tx_ring->packet_area == NULL)
972 		return (DDI_FAILURE);
973 
974 	for (j = 0, packet = tx_ring->packet_area;
975 	    j < Adapter->tx_freelist_num; j++, packet++) {
976 
977 		ASSERT(packet != NULL);
978 
979 		/*
980 		 * Pre-allocate dma handles for transmit. These dma handles
981 		 * will be dynamically bound to the data buffers passed down
982 		 * from the upper layers at the time of transmitting. The
983 		 * dynamic binding only applies for the packets that are larger
984 		 * than the tx_bcopy_thresh.
985 		 */
986 		switch (e1000g_dma_type) {
987 #ifdef __sparc
988 		case USE_DVMA:
989 			mystat = dvma_reserve(devinfo,
990 			    &e1000g_dma_limits,
991 			    Adapter->dvma_page_num,
992 			    &packet->tx_dma_handle);
993 			break;
994 #endif
995 		case USE_DMA:
996 			mystat = ddi_dma_alloc_handle(devinfo,
997 			    &e1000g_tx_dma_attr,
998 			    DDI_DMA_DONTWAIT, 0,
999 			    &packet->tx_dma_handle);
1000 			break;
1001 		default:
1002 			ASSERT(B_FALSE);
1003 			break;
1004 		}
1005 		if (mystat != DDI_SUCCESS) {
1006 			packet->tx_dma_handle = NULL;
1007 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1008 			    "Could not allocate tx dma handle: %d\n", mystat);
1009 			goto tx_pkt_fail;
1010 		}
1011 
1012 		/*
1013 		 * Pre-allocate transmit buffers for small packets that the
1014 		 * size is less than tx_bcopy_thresh. The data of those small
1015 		 * packets will be bcopy() to the transmit buffers instead of
1016 		 * using dynamical DMA binding. For small packets, bcopy will
1017 		 * bring better performance than DMA binding.
1018 		 */
1019 		tx_buf = packet->tx_buf;
1020 
1021 		switch (e1000g_dma_type) {
1022 #ifdef __sparc
1023 		case USE_DVMA:
1024 			mystat = e1000g_alloc_dvma_buffer(Adapter,
1025 			    tx_buf, Adapter->tx_buffer_size);
1026 			break;
1027 #endif
1028 		case USE_DMA:
1029 			mystat = e1000g_alloc_dma_buffer(Adapter,
1030 			    tx_buf, Adapter->tx_buffer_size, &dma_attr);
1031 			break;
1032 		default:
1033 			ASSERT(B_FALSE);
1034 			break;
1035 		}
1036 		if (mystat != DDI_SUCCESS) {
1037 			ASSERT(packet->tx_dma_handle != NULL);
1038 			switch (e1000g_dma_type) {
1039 #ifdef __sparc
1040 			case USE_DVMA:
1041 				dvma_release(packet->tx_dma_handle);
1042 				break;
1043 #endif
1044 			case USE_DMA:
1045 				ddi_dma_free_handle(&packet->tx_dma_handle);
1046 				break;
1047 			default:
1048 				ASSERT(B_FALSE);
1049 				break;
1050 			}
1051 			packet->tx_dma_handle = NULL;
1052 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1053 			    "Allocate Tx buffer fail\n");
1054 			goto tx_pkt_fail;
1055 		}
1056 
1057 		packet->dma_type = e1000g_dma_type;
1058 	} /* for */
1059 
1060 	return (DDI_SUCCESS);
1061 
1062 tx_pkt_fail:
1063 	e1000g_free_tx_packets(tx_ring);
1064 
1065 	return (DDI_FAILURE);
1066 }
1067 
1068 static int
1069 e1000g_alloc_rx_packets(e1000g_rx_ring_t *rx_ring)
1070 {
1071 	int i;
1072 	p_rx_sw_packet_t packet;
1073 	struct e1000g *Adapter;
1074 	uint32_t packet_num;
1075 	ddi_dma_attr_t dma_attr;
1076 
1077 	Adapter = rx_ring->adapter;
1078 	dma_attr = e1000g_buf_dma_attr;
1079 	dma_attr.dma_attr_align = Adapter->rx_buf_align;
1080 
1081 	/*
1082 	 * Allocate memory for the rx_sw_packet structures. Each one of these
1083 	 * structures will contain a virtual and physical address to an actual
1084 	 * receive buffer in host memory. Since we use one rx_sw_packet per
1085 	 * received packet, the maximum number of rx_sw_packet that we'll
1086 	 * need is equal to the number of receive descriptors that we've
1087 	 * allocated.
1088 	 */
1089 	packet_num = Adapter->rx_desc_num + Adapter->rx_freelist_num;
1090 	rx_ring->packet_area = NULL;
1091 
1092 	for (i = 0; i < packet_num; i++) {
1093 		packet = e1000g_alloc_rx_sw_packet(rx_ring, &dma_attr);
1094 		if (packet == NULL)
1095 			goto rx_pkt_fail;
1096 
1097 		packet->next = rx_ring->packet_area;
1098 		rx_ring->packet_area = packet;
1099 	}
1100 
1101 	return (DDI_SUCCESS);
1102 
1103 rx_pkt_fail:
1104 	e1000g_free_rx_packets(rx_ring);
1105 
1106 	return (DDI_FAILURE);
1107 }
1108 
1109 static p_rx_sw_packet_t
1110 e1000g_alloc_rx_sw_packet(e1000g_rx_ring_t *rx_ring, ddi_dma_attr_t *p_dma_attr)
1111 {
1112 	int mystat;
1113 	p_rx_sw_packet_t packet;
1114 	dma_buffer_t *rx_buf;
1115 	struct e1000g *Adapter;
1116 
1117 	Adapter = rx_ring->adapter;
1118 
1119 	packet = kmem_zalloc(sizeof (rx_sw_packet_t), KM_NOSLEEP);
1120 	if (packet == NULL) {
1121 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1122 		    "Cound not allocate memory for Rx SwPacket\n");
1123 		return (NULL);
1124 	}
1125 
1126 	rx_buf = packet->rx_buf;
1127 
1128 	switch (e1000g_dma_type) {
1129 #ifdef __sparc
1130 	case USE_DVMA:
1131 		mystat = e1000g_alloc_dvma_buffer(Adapter,
1132 		    rx_buf, Adapter->rx_buffer_size);
1133 		break;
1134 #endif
1135 	case USE_DMA:
1136 		mystat = e1000g_alloc_dma_buffer(Adapter,
1137 		    rx_buf, Adapter->rx_buffer_size, p_dma_attr);
1138 		break;
1139 	default:
1140 		ASSERT(B_FALSE);
1141 		break;
1142 	}
1143 
1144 	if (mystat != DDI_SUCCESS) {
1145 		if (packet != NULL)
1146 			kmem_free(packet, sizeof (rx_sw_packet_t));
1147 
1148 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1149 		    "Failed to allocate Rx buffer\n");
1150 		return (NULL);
1151 	}
1152 
1153 	rx_buf->size -= E1000G_IPALIGNROOM;
1154 	rx_buf->address += E1000G_IPALIGNROOM;
1155 	rx_buf->dma_address += E1000G_IPALIGNROOM;
1156 
1157 	packet->rx_ring = (caddr_t)rx_ring;
1158 	packet->free_rtn.free_func = e1000g_rxfree_func;
1159 	packet->free_rtn.free_arg = (char *)packet;
1160 	/*
1161 	 * esballoc is changed to desballoc which
1162 	 * is undocumented call but as per sun,
1163 	 * we can use it. It gives better efficiency.
1164 	 */
1165 	packet->mp = desballoc((unsigned char *)
1166 	    rx_buf->address - E1000G_IPALIGNROOM,
1167 	    rx_buf->size + E1000G_IPALIGNROOM,
1168 	    BPRI_MED, &packet->free_rtn);
1169 
1170 	if (packet->mp != NULL) {
1171 		packet->mp->b_rptr += E1000G_IPALIGNROOM;
1172 		packet->mp->b_wptr += E1000G_IPALIGNROOM;
1173 	}
1174 
1175 	packet->dma_type = e1000g_dma_type;
1176 
1177 	return (packet);
1178 }
1179 
1180 void
1181 e1000g_free_rx_sw_packet(p_rx_sw_packet_t packet)
1182 {
1183 	dma_buffer_t *rx_buf;
1184 
1185 	if (packet->mp != NULL) {
1186 		freemsg(packet->mp);
1187 		packet->mp = NULL;
1188 	}
1189 
1190 	rx_buf = packet->rx_buf;
1191 	ASSERT(rx_buf->dma_handle != NULL);
1192 
1193 	rx_buf->size += E1000G_IPALIGNROOM;
1194 	rx_buf->address -= E1000G_IPALIGNROOM;
1195 
1196 	switch (packet->dma_type) {
1197 #ifdef __sparc
1198 	case USE_DVMA:
1199 		e1000g_free_dvma_buffer(rx_buf);
1200 		break;
1201 #endif
1202 	case USE_DMA:
1203 		e1000g_free_dma_buffer(rx_buf);
1204 		break;
1205 	default:
1206 		ASSERT(B_FALSE);
1207 		break;
1208 	}
1209 
1210 	packet->dma_type = USE_NONE;
1211 
1212 	kmem_free(packet, sizeof (rx_sw_packet_t));
1213 }
1214 
1215 static void
1216 e1000g_free_rx_packets(e1000g_rx_ring_t *rx_ring)
1217 {
1218 	p_rx_sw_packet_t packet, next_packet, free_list;
1219 
1220 	rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
1221 
1222 	free_list = NULL;
1223 	packet = rx_ring->packet_area;
1224 	for (; packet != NULL; packet = next_packet) {
1225 		next_packet = packet->next;
1226 
1227 		if (packet->flag == E1000G_RX_SW_SENDUP) {
1228 			rx_ring->pending_count++;
1229 			e1000g_mblks_pending++;
1230 			packet->flag = E1000G_RX_SW_STOP;
1231 			packet->next = rx_ring->pending_list;
1232 			rx_ring->pending_list = packet;
1233 		} else {
1234 			packet->next = free_list;
1235 			free_list = packet;
1236 		}
1237 	}
1238 	rx_ring->packet_area = NULL;
1239 
1240 	rw_exit(&e1000g_rx_detach_lock);
1241 
1242 	packet = free_list;
1243 	for (; packet != NULL; packet = next_packet) {
1244 		next_packet = packet->next;
1245 
1246 		ASSERT(packet->flag == E1000G_RX_SW_FREE);
1247 		e1000g_free_rx_sw_packet(packet);
1248 	}
1249 }
1250 
1251 static void
1252 e1000g_free_tx_packets(e1000g_tx_ring_t *tx_ring)
1253 {
1254 	int j;
1255 	struct e1000g *Adapter;
1256 	p_tx_sw_packet_t packet;
1257 	dma_buffer_t *tx_buf;
1258 
1259 	Adapter = tx_ring->adapter;
1260 
1261 	for (j = 0, packet = tx_ring->packet_area;
1262 	    j < Adapter->tx_freelist_num; j++, packet++) {
1263 
1264 		if (packet == NULL)
1265 			break;
1266 
1267 		/* Free the Tx DMA handle for dynamical binding */
1268 		if (packet->tx_dma_handle != NULL) {
1269 			switch (packet->dma_type) {
1270 #ifdef __sparc
1271 			case USE_DVMA:
1272 				dvma_release(packet->tx_dma_handle);
1273 				break;
1274 #endif
1275 			case USE_DMA:
1276 				ddi_dma_free_handle(&packet->tx_dma_handle);
1277 				break;
1278 			default:
1279 				ASSERT(B_FALSE);
1280 				break;
1281 			}
1282 			packet->tx_dma_handle = NULL;
1283 		} else {
1284 			/*
1285 			 * If the dma handle is NULL, then we don't
1286 			 * need to check the packets left. For they
1287 			 * have not been initialized or have been freed.
1288 			 */
1289 			break;
1290 		}
1291 
1292 		tx_buf = packet->tx_buf;
1293 
1294 		switch (packet->dma_type) {
1295 #ifdef __sparc
1296 		case USE_DVMA:
1297 			e1000g_free_dvma_buffer(tx_buf);
1298 			break;
1299 #endif
1300 		case USE_DMA:
1301 			e1000g_free_dma_buffer(tx_buf);
1302 			break;
1303 		default:
1304 			ASSERT(B_FALSE);
1305 			break;
1306 		}
1307 
1308 		packet->dma_type = USE_NONE;
1309 	}
1310 	if (tx_ring->packet_area != NULL) {
1311 		kmem_free(tx_ring->packet_area, TX_SW_PKT_AREA_SZ);
1312 		tx_ring->packet_area = NULL;
1313 	}
1314 }
1315 
1316 /*
1317  * e1000g_release_dma_resources - release allocated DMA resources
1318  *
1319  * This function releases any pending buffers that has been
1320  * previously allocated
1321  */
1322 void
1323 e1000g_release_dma_resources(struct e1000g *Adapter)
1324 {
1325 	e1000g_free_descriptors(Adapter);
1326 	e1000g_free_packets(Adapter);
1327 }
1328 
1329 void
1330 e1000g_set_fma_flags(struct e1000g *Adapter, int acc_flag, int dma_flag)
1331 {
1332 	if (acc_flag) {
1333 		e1000g_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1334 	} else {
1335 		e1000g_desc_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
1336 	}
1337 
1338 	if (dma_flag) {
1339 		e1000g_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1340 		e1000g_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1341 		e1000g_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1342 	} else {
1343 		e1000g_tx_dma_attr.dma_attr_flags = 0;
1344 		e1000g_buf_dma_attr.dma_attr_flags = 0;
1345 		e1000g_desc_dma_attr.dma_attr_flags = 0;
1346 	}
1347 }
1348