xref: /titanic_51/usr/src/uts/common/io/e1000g/e1000g_alloc.c (revision f6cfb02b955a670e8c39660b2d0468385cbc7e80)
1 /*
2  * This file is provided under a CDDLv1 license.  When using or
3  * redistributing this file, you may do so under this license.
4  * In redistributing this file this license must be included
5  * and no other modification of this header file is permitted.
6  *
7  * CDDL LICENSE SUMMARY
8  *
9  * Copyright(c) 1999 - 2008 Intel Corporation. All rights reserved.
10  *
11  * The contents of this file are subject to the terms of Version
12  * 1.0 of the Common Development and Distribution License (the "License").
13  *
14  * You should have received a copy of the License with this software.
15  * You can obtain a copy of the License at
16  *	http://www.opensolaris.org/os/licensing.
17  * See the License for the specific language governing permissions
18  * and limitations under the License.
19  */
20 
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms of the CDDLv1.
24  */
25 
26 /*
27  * **********************************************************************
28  * Module Name:								*
29  *   e1000g_alloc.c							*
30  *									*
31  * Abstract:								*
32  *   This file contains some routines that take care of			*
33  *   memory allocation for descriptors and buffers.			*
34  *									*
35  * **********************************************************************
36  */
37 
38 #include "e1000g_sw.h"
39 #include "e1000g_debug.h"
40 
41 #define	TX_SW_PKT_AREA_SZ \
42 	(sizeof (tx_sw_packet_t) * Adapter->tx_freelist_num)
43 
44 static int e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *);
45 static int e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *);
46 static void e1000g_free_tx_descriptors(e1000g_tx_ring_t *);
47 static void e1000g_free_rx_descriptors(e1000g_rx_ring_t *);
48 static int e1000g_alloc_tx_packets(e1000g_tx_ring_t *);
49 static int e1000g_alloc_rx_packets(e1000g_rx_ring_t *);
50 static void e1000g_free_tx_packets(e1000g_tx_ring_t *);
51 static void e1000g_free_rx_packets(e1000g_rx_ring_t *);
52 static int e1000g_alloc_dma_buffer(struct e1000g *,
53     dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
54 static void e1000g_free_dma_buffer(dma_buffer_t *);
55 #ifdef __sparc
56 static int e1000g_alloc_dvma_buffer(struct e1000g *, dma_buffer_t *, size_t);
57 static void e1000g_free_dvma_buffer(dma_buffer_t *);
58 #endif
59 static int e1000g_alloc_descriptors(struct e1000g *Adapter);
60 static void e1000g_free_descriptors(struct e1000g *Adapter);
61 static int e1000g_alloc_packets(struct e1000g *Adapter);
62 static void e1000g_free_packets(struct e1000g *Adapter);
63 static p_rx_sw_packet_t e1000g_alloc_rx_sw_packet(e1000g_rx_ring_t *,
64     ddi_dma_attr_t *p_dma_attr);
65 
66 /* DMA access attributes for descriptors <Little Endian> */
67 static ddi_device_acc_attr_t e1000g_desc_acc_attr = {
68 	DDI_DEVICE_ATTR_V0,
69 	DDI_STRUCTURE_LE_ACC,
70 	DDI_STRICTORDER_ACC,
71 	DDI_FLAGERR_ACC
72 };
73 
74 /* DMA access attributes for DMA buffers */
75 #ifdef __sparc
76 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
77 	DDI_DEVICE_ATTR_V0,
78 	DDI_STRUCTURE_BE_ACC,
79 	DDI_STRICTORDER_ACC,
80 };
81 #else
82 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
83 	DDI_DEVICE_ATTR_V0,
84 	DDI_STRUCTURE_LE_ACC,
85 	DDI_STRICTORDER_ACC,
86 };
87 #endif
88 
89 /* DMA attributes for tx mblk buffers */
90 static ddi_dma_attr_t e1000g_tx_dma_attr = {
91 	DMA_ATTR_V0,		/* version of this structure */
92 	0,			/* lowest usable address */
93 	0xffffffffffffffffULL,	/* highest usable address */
94 	0x7fffffff,		/* maximum DMAable byte count */
95 	1,			/* alignment in bytes */
96 	0x7ff,			/* burst sizes (any?) */
97 	1,			/* minimum transfer */
98 	0xffffffffU,		/* maximum transfer */
99 	0xffffffffffffffffULL,	/* maximum segment length */
100 	MAX_COOKIES,		/* maximum number of segments */
101 	1,			/* granularity */
102 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
103 };
104 
105 /* DMA attributes for pre-allocated rx/tx buffers */
106 static ddi_dma_attr_t e1000g_buf_dma_attr = {
107 	DMA_ATTR_V0,		/* version of this structure */
108 	0,			/* lowest usable address */
109 	0xffffffffffffffffULL,	/* highest usable address */
110 	0x7fffffff,		/* maximum DMAable byte count */
111 	1,			/* alignment in bytes */
112 	0x7ff,			/* burst sizes (any?) */
113 	1,			/* minimum transfer */
114 	0xffffffffU,		/* maximum transfer */
115 	0xffffffffffffffffULL,	/* maximum segment length */
116 	1,			/* maximum number of segments */
117 	1,			/* granularity */
118 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
119 };
120 
121 /* DMA attributes for rx/tx descriptors */
122 static ddi_dma_attr_t e1000g_desc_dma_attr = {
123 	DMA_ATTR_V0,		/* version of this structure */
124 	0,			/* lowest usable address */
125 	0xffffffffffffffffULL,	/* highest usable address */
126 	0x7fffffff,		/* maximum DMAable byte count */
127 	E1000_MDALIGN,		/* alignment in bytes 4K! */
128 	0x7ff,			/* burst sizes (any?) */
129 	1,			/* minimum transfer */
130 	0xffffffffU,		/* maximum transfer */
131 	0xffffffffffffffffULL,	/* maximum segment length */
132 	1,			/* maximum number of segments */
133 	1,			/* granularity */
134 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
135 };
136 
137 #ifdef __sparc
138 static ddi_dma_lim_t e1000g_dma_limits = {
139 	(uint_t)0,		/* dlim_addr_lo */
140 	(uint_t)0xffffffff,	/* dlim_addr_hi */
141 	(uint_t)0xffffffff,	/* dlim_cntr_max */
142 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
143 	0x1,			/* dlim_minxfer */
144 	1024			/* dlim_speed */
145 };
146 #endif
147 
148 #ifdef __sparc
149 static dma_type_t e1000g_dma_type = USE_DVMA;
150 #else
151 static dma_type_t e1000g_dma_type = USE_DMA;
152 #endif
153 
154 extern krwlock_t e1000g_dma_type_lock;
155 
156 
157 int
158 e1000g_alloc_dma_resources(struct e1000g *Adapter)
159 {
160 	int result;
161 
162 	result = DDI_FAILURE;
163 
164 	while ((result != DDI_SUCCESS) &&
165 	    (Adapter->tx_desc_num >= MIN_NUM_TX_DESCRIPTOR) &&
166 	    (Adapter->rx_desc_num >= MIN_NUM_RX_DESCRIPTOR) &&
167 	    (Adapter->tx_freelist_num >= MIN_NUM_TX_FREELIST) &&
168 	    (Adapter->rx_freelist_num >= MIN_NUM_RX_FREELIST)) {
169 
170 		result = e1000g_alloc_descriptors(Adapter);
171 
172 		if (result == DDI_SUCCESS) {
173 			result = e1000g_alloc_packets(Adapter);
174 
175 			if (result != DDI_SUCCESS)
176 				e1000g_free_descriptors(Adapter);
177 		}
178 
179 		/*
180 		 * If the allocation fails due to resource shortage,
181 		 * we'll reduce the numbers of descriptors/buffers by
182 		 * half, and try the allocation again.
183 		 */
184 		if (result != DDI_SUCCESS) {
185 			/*
186 			 * We must ensure the number of descriptors
187 			 * is always a multiple of 8.
188 			 */
189 			Adapter->tx_desc_num =
190 			    (Adapter->tx_desc_num >> 4) << 3;
191 			Adapter->rx_desc_num =
192 			    (Adapter->rx_desc_num >> 4) << 3;
193 
194 			Adapter->tx_freelist_num >>= 1;
195 			Adapter->rx_freelist_num >>= 1;
196 		}
197 	}
198 
199 	return (result);
200 }
201 
202 /*
203  * e1000g_alloc_descriptors - allocate DMA buffers for descriptors
204  *
205  * This routine allocates neccesary DMA buffers for
206  *	Transmit Descriptor Area
207  *	Receive Descrpitor Area
208  */
209 static int
210 e1000g_alloc_descriptors(struct e1000g *Adapter)
211 {
212 	int result;
213 	e1000g_tx_ring_t *tx_ring;
214 	e1000g_rx_ring_t *rx_ring;
215 
216 	tx_ring = Adapter->tx_ring;
217 
218 	result = e1000g_alloc_tx_descriptors(tx_ring);
219 	if (result != DDI_SUCCESS)
220 		return (DDI_FAILURE);
221 
222 	rx_ring = Adapter->rx_ring;
223 
224 	result = e1000g_alloc_rx_descriptors(rx_ring);
225 	if (result != DDI_SUCCESS) {
226 		e1000g_free_tx_descriptors(tx_ring);
227 		return (DDI_FAILURE);
228 	}
229 
230 	return (DDI_SUCCESS);
231 }
232 
233 static void
234 e1000g_free_descriptors(struct e1000g *Adapter)
235 {
236 	e1000g_tx_ring_t *tx_ring;
237 	e1000g_rx_ring_t *rx_ring;
238 
239 	tx_ring = Adapter->tx_ring;
240 	rx_ring = Adapter->rx_ring;
241 
242 	e1000g_free_tx_descriptors(tx_ring);
243 	e1000g_free_rx_descriptors(rx_ring);
244 }
245 
246 static int
247 e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *tx_ring)
248 {
249 	int mystat;
250 	boolean_t alloc_flag;
251 	size_t size;
252 	size_t len;
253 	uintptr_t templong;
254 	uint_t cookie_count;
255 	dev_info_t *devinfo;
256 	ddi_dma_cookie_t cookie;
257 	struct e1000g *Adapter;
258 	ddi_dma_attr_t dma_attr;
259 
260 	Adapter = tx_ring->adapter;
261 	devinfo = Adapter->dip;
262 
263 	alloc_flag = B_FALSE;
264 	dma_attr = e1000g_desc_dma_attr;
265 
266 	/*
267 	 * Solaris 7 has a problem with allocating physically contiguous memory
268 	 * that is aligned on a 4K boundary. The transmit and rx descriptors
269 	 * need to aligned on a 4kbyte boundary. We first try to allocate the
270 	 * memory with DMA attributes set to 4K alignment and also no scatter/
271 	 * gather mechanism specified. In most cases, this does not allocate
272 	 * memory aligned at a 4Kbyte boundary. We then try asking for memory
273 	 * aligned on 4K boundary with scatter/gather set to 2. This works when
274 	 * the amount of memory is less than 4k i.e a page size. If neither of
275 	 * these options work or if the number of descriptors is greater than
276 	 * 4K, ie more than 256 descriptors, we allocate 4k extra memory and
277 	 * and then align the memory at a 4k boundary.
278 	 */
279 	size = sizeof (struct e1000_tx_desc) * Adapter->tx_desc_num;
280 
281 	/*
282 	 * Memory allocation for the transmit buffer descriptors.
283 	 */
284 	dma_attr.dma_attr_sgllen = 1;
285 
286 	/*
287 	 * Allocate a new DMA handle for the transmit descriptor
288 	 * memory area.
289 	 */
290 	mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
291 	    DDI_DMA_DONTWAIT, 0,
292 	    &tx_ring->tbd_dma_handle);
293 
294 	if (mystat != DDI_SUCCESS) {
295 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
296 		    "Could not allocate tbd dma handle: %d", mystat);
297 		tx_ring->tbd_dma_handle = NULL;
298 		return (DDI_FAILURE);
299 	}
300 
301 	/*
302 	 * Allocate memory to DMA data to and from the transmit
303 	 * descriptors.
304 	 */
305 	mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
306 	    size,
307 	    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
308 	    DDI_DMA_DONTWAIT, 0,
309 	    (caddr_t *)&tx_ring->tbd_area,
310 	    &len, &tx_ring->tbd_acc_handle);
311 
312 	if ((mystat != DDI_SUCCESS) ||
313 	    ((uintptr_t)tx_ring->tbd_area & (E1000_MDALIGN - 1))) {
314 		if (mystat == DDI_SUCCESS) {
315 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
316 			tx_ring->tbd_acc_handle = NULL;
317 			tx_ring->tbd_area = NULL;
318 		}
319 		if (tx_ring->tbd_dma_handle != NULL) {
320 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
321 			tx_ring->tbd_dma_handle = NULL;
322 		}
323 		alloc_flag = B_FALSE;
324 	} else
325 		alloc_flag = B_TRUE;
326 
327 	/*
328 	 * Initialize the entire transmit buffer descriptor area to zero
329 	 */
330 	if (alloc_flag)
331 		bzero(tx_ring->tbd_area, len);
332 
333 	/*
334 	 * If the previous DMA attributes setting could not give us contiguous
335 	 * memory or the number of descriptors is greater than the page size,
336 	 * we allocate 4K extra memory and then align it at a 4k boundary.
337 	 */
338 	if (!alloc_flag) {
339 		size = size + ROUNDOFF;
340 
341 		/*
342 		 * DMA attributes set to no scatter/gather and 16 bit alignment
343 		 */
344 		dma_attr.dma_attr_align = 1;
345 		dma_attr.dma_attr_sgllen = 1;
346 
347 		/*
348 		 * Allocate a new DMA handle for the transmit descriptor memory
349 		 * area.
350 		 */
351 		mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
352 		    DDI_DMA_DONTWAIT, 0,
353 		    &tx_ring->tbd_dma_handle);
354 
355 		if (mystat != DDI_SUCCESS) {
356 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
357 			    "Could not re-allocate tbd dma handle: %d", mystat);
358 			tx_ring->tbd_dma_handle = NULL;
359 			return (DDI_FAILURE);
360 		}
361 
362 		/*
363 		 * Allocate memory to DMA data to and from the transmit
364 		 * descriptors.
365 		 */
366 		mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
367 		    size,
368 		    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
369 		    DDI_DMA_DONTWAIT, 0,
370 		    (caddr_t *)&tx_ring->tbd_area,
371 		    &len, &tx_ring->tbd_acc_handle);
372 
373 		if (mystat != DDI_SUCCESS) {
374 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
375 			    "Could not allocate tbd dma memory: %d", mystat);
376 			tx_ring->tbd_acc_handle = NULL;
377 			tx_ring->tbd_area = NULL;
378 			if (tx_ring->tbd_dma_handle != NULL) {
379 				ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
380 				tx_ring->tbd_dma_handle = NULL;
381 			}
382 			return (DDI_FAILURE);
383 		} else
384 			alloc_flag = B_TRUE;
385 
386 		/*
387 		 * Initialize the entire transmit buffer descriptor area to zero
388 		 */
389 		bzero(tx_ring->tbd_area, len);
390 		/*
391 		 * Memory has been allocated with the ddi_dma_mem_alloc call,
392 		 * but has not been aligned. We now align it on a 4k boundary.
393 		 */
394 		templong = P2NPHASE((uintptr_t)tx_ring->tbd_area, ROUNDOFF);
395 		len = size - templong;
396 		templong += (uintptr_t)tx_ring->tbd_area;
397 		tx_ring->tbd_area = (struct e1000_tx_desc *)templong;
398 	}	/* alignment workaround */
399 
400 	/*
401 	 * Transmit buffer descriptor memory allocation succeeded
402 	 */
403 	ASSERT(alloc_flag);
404 
405 	/*
406 	 * Allocates DMA resources for the memory that was allocated by
407 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
408 	 * the memory address
409 	 */
410 	mystat = ddi_dma_addr_bind_handle(tx_ring->tbd_dma_handle,
411 	    (struct as *)NULL, (caddr_t)tx_ring->tbd_area,
412 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
413 	    DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
414 
415 	if (mystat != DDI_SUCCESS) {
416 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
417 		    "Could not bind tbd dma resource: %d", mystat);
418 		if (tx_ring->tbd_acc_handle != NULL) {
419 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
420 			tx_ring->tbd_acc_handle = NULL;
421 			tx_ring->tbd_area = NULL;
422 		}
423 		if (tx_ring->tbd_dma_handle != NULL) {
424 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
425 			tx_ring->tbd_dma_handle = NULL;
426 		}
427 		return (DDI_FAILURE);
428 	}
429 
430 	ASSERT(cookie_count == 1);	/* 1 cookie */
431 
432 	if (cookie_count != 1) {
433 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
434 		    "Could not bind tbd dma resource in a single frag. "
435 		    "Count - %d Len - %d", cookie_count, len);
436 		e1000g_free_tx_descriptors(tx_ring);
437 		return (DDI_FAILURE);
438 	}
439 
440 	tx_ring->tbd_dma_addr = cookie.dmac_laddress;
441 	tx_ring->tbd_first = tx_ring->tbd_area;
442 	tx_ring->tbd_last = tx_ring->tbd_first +
443 	    (Adapter->tx_desc_num - 1);
444 
445 	return (DDI_SUCCESS);
446 }
447 
448 static int
449 e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *rx_ring)
450 {
451 	int mystat;
452 	boolean_t alloc_flag;
453 	size_t size;
454 	size_t len;
455 	uintptr_t templong;
456 	uint_t cookie_count;
457 	dev_info_t *devinfo;
458 	ddi_dma_cookie_t cookie;
459 	struct e1000g *Adapter;
460 	ddi_dma_attr_t dma_attr;
461 
462 	Adapter = rx_ring->adapter;
463 	devinfo = Adapter->dip;
464 
465 	alloc_flag = B_FALSE;
466 	dma_attr = e1000g_desc_dma_attr;
467 
468 	/*
469 	 * Memory allocation for the receive buffer descriptors.
470 	 */
471 	size = (sizeof (struct e1000_rx_desc)) * Adapter->rx_desc_num;
472 
473 	/*
474 	 * Asking for aligned memory with DMA attributes set for 4k alignment
475 	 */
476 	dma_attr.dma_attr_sgllen = 1;
477 	dma_attr.dma_attr_align = E1000_MDALIGN;
478 
479 	/*
480 	 * Allocate a new DMA handle for the receive descriptors
481 	 */
482 	mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
483 	    DDI_DMA_DONTWAIT, 0,
484 	    &rx_ring->rbd_dma_handle);
485 
486 	if (mystat != DDI_SUCCESS) {
487 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
488 		    "Could not allocate rbd dma handle: %d", mystat);
489 		rx_ring->rbd_dma_handle = NULL;
490 		return (DDI_FAILURE);
491 	}
492 	/*
493 	 * Allocate memory to DMA data to and from the receive
494 	 * descriptors.
495 	 */
496 	mystat = ddi_dma_mem_alloc(rx_ring->rbd_dma_handle,
497 	    size,
498 	    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
499 	    DDI_DMA_DONTWAIT, 0,
500 	    (caddr_t *)&rx_ring->rbd_area,
501 	    &len, &rx_ring->rbd_acc_handle);
502 
503 	/*
504 	 * Check if memory allocation succeeded and also if the
505 	 * allocated memory is aligned correctly.
506 	 */
507 	if ((mystat != DDI_SUCCESS) ||
508 	    ((uintptr_t)rx_ring->rbd_area & (E1000_MDALIGN - 1))) {
509 		if (mystat == DDI_SUCCESS) {
510 			ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
511 			rx_ring->rbd_acc_handle = NULL;
512 			rx_ring->rbd_area = NULL;
513 		}
514 		if (rx_ring->rbd_dma_handle != NULL) {
515 			ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
516 			rx_ring->rbd_dma_handle = NULL;
517 		}
518 		alloc_flag = B_FALSE;
519 	} else
520 		alloc_flag = B_TRUE;
521 
522 	/*
523 	 * Initialize the allocated receive descriptor memory to zero.
524 	 */
525 	if (alloc_flag)
526 		bzero((caddr_t)rx_ring->rbd_area, len);
527 
528 	/*
529 	 * If memory allocation did not succeed, do the alignment ourselves
530 	 */
531 	if (!alloc_flag) {
532 		dma_attr.dma_attr_align = 1;
533 		dma_attr.dma_attr_sgllen = 1;
534 		size = size + ROUNDOFF;
535 		/*
536 		 * Allocate a new DMA handle for the receive descriptor.
537 		 */
538 		mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
539 		    DDI_DMA_DONTWAIT, 0,
540 		    &rx_ring->rbd_dma_handle);
541 
542 		if (mystat != DDI_SUCCESS) {
543 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
544 			    "Could not re-allocate rbd dma handle: %d", mystat);
545 			rx_ring->rbd_dma_handle = NULL;
546 			return (DDI_FAILURE);
547 		}
548 		/*
549 		 * Allocate memory to DMA data to and from the receive
550 		 * descriptors.
551 		 */
552 		mystat = ddi_dma_mem_alloc(rx_ring->rbd_dma_handle,
553 		    size,
554 		    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
555 		    DDI_DMA_DONTWAIT, 0,
556 		    (caddr_t *)&rx_ring->rbd_area,
557 		    &len, &rx_ring->rbd_acc_handle);
558 
559 		if (mystat != DDI_SUCCESS) {
560 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
561 			    "Could not allocate rbd dma memory: %d", mystat);
562 			rx_ring->rbd_acc_handle = NULL;
563 			rx_ring->rbd_area = NULL;
564 			if (rx_ring->rbd_dma_handle != NULL) {
565 				ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
566 				rx_ring->rbd_dma_handle = NULL;
567 			}
568 			return (DDI_FAILURE);
569 		} else
570 			alloc_flag = B_TRUE;
571 
572 		/*
573 		 * Initialize the allocated receive descriptor memory to zero.
574 		 */
575 		bzero((caddr_t)rx_ring->rbd_area, len);
576 		templong = P2NPHASE((uintptr_t)rx_ring->rbd_area, ROUNDOFF);
577 		len = size - templong;
578 		templong += (uintptr_t)rx_ring->rbd_area;
579 		rx_ring->rbd_area = (struct e1000_rx_desc *)templong;
580 	}	/* alignment workaround */
581 
582 	/*
583 	 * The memory allocation of the receive descriptors succeeded
584 	 */
585 	ASSERT(alloc_flag);
586 
587 	/*
588 	 * Allocates DMA resources for the memory that was allocated by
589 	 * the ddi_dma_mem_alloc call.
590 	 */
591 	mystat = ddi_dma_addr_bind_handle(rx_ring->rbd_dma_handle,
592 	    (struct as *)NULL, (caddr_t)rx_ring->rbd_area,
593 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
594 	    DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
595 
596 	if (mystat != DDI_SUCCESS) {
597 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
598 		    "Could not bind rbd dma resource: %d", mystat);
599 		if (rx_ring->rbd_acc_handle != NULL) {
600 			ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
601 			rx_ring->rbd_acc_handle = NULL;
602 			rx_ring->rbd_area = NULL;
603 		}
604 		if (rx_ring->rbd_dma_handle != NULL) {
605 			ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
606 			rx_ring->rbd_dma_handle = NULL;
607 		}
608 		return (DDI_FAILURE);
609 	}
610 
611 	ASSERT(cookie_count == 1);
612 	if (cookie_count != 1) {
613 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
614 		    "Could not bind rbd dma resource in a single frag. "
615 		    "Count - %d Len - %d", cookie_count, len);
616 		e1000g_free_rx_descriptors(rx_ring);
617 		return (DDI_FAILURE);
618 	}
619 
620 	rx_ring->rbd_dma_addr = cookie.dmac_laddress;
621 	rx_ring->rbd_first = rx_ring->rbd_area;
622 	rx_ring->rbd_last = rx_ring->rbd_first +
623 	    (Adapter->rx_desc_num - 1);
624 
625 	return (DDI_SUCCESS);
626 }
627 
628 static void
629 e1000g_free_rx_descriptors(e1000g_rx_ring_t *rx_ring)
630 {
631 	if (rx_ring->rbd_dma_handle != NULL) {
632 		(void) ddi_dma_unbind_handle(rx_ring->rbd_dma_handle);
633 	}
634 	if (rx_ring->rbd_acc_handle != NULL) {
635 		ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
636 		rx_ring->rbd_acc_handle = NULL;
637 		rx_ring->rbd_area = NULL;
638 	}
639 	if (rx_ring->rbd_dma_handle != NULL) {
640 		ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
641 		rx_ring->rbd_dma_handle = NULL;
642 	}
643 	rx_ring->rbd_dma_addr = NULL;
644 	rx_ring->rbd_first = NULL;
645 	rx_ring->rbd_last = NULL;
646 }
647 
648 static void
649 e1000g_free_tx_descriptors(e1000g_tx_ring_t *tx_ring)
650 {
651 	if (tx_ring->tbd_dma_handle != NULL) {
652 		(void) ddi_dma_unbind_handle(tx_ring->tbd_dma_handle);
653 	}
654 	if (tx_ring->tbd_acc_handle != NULL) {
655 		ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
656 		tx_ring->tbd_acc_handle = NULL;
657 		tx_ring->tbd_area = NULL;
658 	}
659 	if (tx_ring->tbd_dma_handle != NULL) {
660 		ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
661 		tx_ring->tbd_dma_handle = NULL;
662 	}
663 	tx_ring->tbd_dma_addr = NULL;
664 	tx_ring->tbd_first = NULL;
665 	tx_ring->tbd_last = NULL;
666 }
667 
668 
669 /*
670  * e1000g_alloc_packets - allocate DMA buffers for rx/tx
671  *
672  * This routine allocates neccesary buffers for
673  *	 Transmit sw packet structure
674  *	 DMA handle for Transmit
675  *	 DMA buffer for Transmit
676  *	 Receive sw packet structure
677  *	 DMA buffer for Receive
678  */
679 static int
680 e1000g_alloc_packets(struct e1000g *Adapter)
681 {
682 	int result;
683 	e1000g_tx_ring_t *tx_ring;
684 	e1000g_rx_ring_t *rx_ring;
685 
686 	tx_ring = Adapter->tx_ring;
687 	rx_ring = Adapter->rx_ring;
688 
689 again:
690 	rw_enter(&e1000g_dma_type_lock, RW_READER);
691 
692 	result = e1000g_alloc_tx_packets(tx_ring);
693 	if (result != DDI_SUCCESS) {
694 		if (e1000g_dma_type == USE_DVMA) {
695 			rw_exit(&e1000g_dma_type_lock);
696 
697 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
698 			e1000g_dma_type = USE_DMA;
699 			rw_exit(&e1000g_dma_type_lock);
700 
701 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
702 			    "No enough dvma resource for Tx packets, "
703 			    "trying to allocate dma buffers...\n");
704 			goto again;
705 		}
706 		rw_exit(&e1000g_dma_type_lock);
707 
708 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
709 		    "Failed to allocate dma buffers for Tx packets\n");
710 		return (DDI_FAILURE);
711 	}
712 
713 	result = e1000g_alloc_rx_packets(rx_ring);
714 	if (result != DDI_SUCCESS) {
715 		e1000g_free_tx_packets(tx_ring);
716 		if (e1000g_dma_type == USE_DVMA) {
717 			rw_exit(&e1000g_dma_type_lock);
718 
719 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
720 			e1000g_dma_type = USE_DMA;
721 			rw_exit(&e1000g_dma_type_lock);
722 
723 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
724 			    "No enough dvma resource for Rx packets, "
725 			    "trying to allocate dma buffers...\n");
726 			goto again;
727 		}
728 		rw_exit(&e1000g_dma_type_lock);
729 
730 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
731 		    "Failed to allocate dma buffers for Rx packets\n");
732 		return (DDI_FAILURE);
733 	}
734 
735 	rw_exit(&e1000g_dma_type_lock);
736 
737 	return (DDI_SUCCESS);
738 }
739 
740 static void
741 e1000g_free_packets(struct e1000g *Adapter)
742 {
743 	e1000g_tx_ring_t *tx_ring;
744 	e1000g_rx_ring_t *rx_ring;
745 
746 	tx_ring = Adapter->tx_ring;
747 	rx_ring = Adapter->rx_ring;
748 
749 	e1000g_free_tx_packets(tx_ring);
750 	e1000g_free_rx_packets(rx_ring);
751 }
752 
753 #ifdef __sparc
754 static int
755 e1000g_alloc_dvma_buffer(struct e1000g *Adapter,
756     dma_buffer_t *buf, size_t size)
757 {
758 	int mystat;
759 	dev_info_t *devinfo;
760 	ddi_dma_cookie_t cookie;
761 
762 	if (e1000g_force_detach)
763 		devinfo = Adapter->priv_dip;
764 	else
765 		devinfo = Adapter->dip;
766 
767 	mystat = dvma_reserve(devinfo,
768 	    &e1000g_dma_limits,
769 	    Adapter->dvma_page_num,
770 	    &buf->dma_handle);
771 
772 	if (mystat != DDI_SUCCESS) {
773 		buf->dma_handle = NULL;
774 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
775 		    "Could not allocate dvma buffer handle: %d\n", mystat);
776 		return (DDI_FAILURE);
777 	}
778 
779 	buf->address = kmem_alloc(size, KM_NOSLEEP);
780 
781 	if (buf->address == NULL) {
782 		if (buf->dma_handle != NULL) {
783 			dvma_release(buf->dma_handle);
784 			buf->dma_handle = NULL;
785 		}
786 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
787 		    "Could not allocate dvma buffer memory\n");
788 		return (DDI_FAILURE);
789 	}
790 
791 	dvma_kaddr_load(buf->dma_handle,
792 	    buf->address, size, 0, &cookie);
793 
794 	buf->dma_address = cookie.dmac_laddress;
795 	buf->size = size;
796 	buf->len = 0;
797 
798 	return (DDI_SUCCESS);
799 }
800 
801 static void
802 e1000g_free_dvma_buffer(dma_buffer_t *buf)
803 {
804 	if (buf->dma_handle != NULL) {
805 		dvma_unload(buf->dma_handle, 0, -1);
806 	} else {
807 		return;
808 	}
809 
810 	buf->dma_address = NULL;
811 
812 	if (buf->address != NULL) {
813 		kmem_free(buf->address, buf->size);
814 		buf->address = NULL;
815 	}
816 
817 	if (buf->dma_handle != NULL) {
818 		dvma_release(buf->dma_handle);
819 		buf->dma_handle = NULL;
820 	}
821 
822 	buf->size = 0;
823 	buf->len = 0;
824 }
825 #endif
826 
827 static int
828 e1000g_alloc_dma_buffer(struct e1000g *Adapter,
829     dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
830 {
831 	int mystat;
832 	dev_info_t *devinfo;
833 	ddi_dma_cookie_t cookie;
834 	size_t len;
835 	uint_t count;
836 
837 	if (e1000g_force_detach)
838 		devinfo = Adapter->priv_dip;
839 	else
840 		devinfo = Adapter->dip;
841 
842 	mystat = ddi_dma_alloc_handle(devinfo,
843 	    p_dma_attr,
844 	    DDI_DMA_DONTWAIT, 0,
845 	    &buf->dma_handle);
846 
847 	if (mystat != DDI_SUCCESS) {
848 		buf->dma_handle = NULL;
849 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
850 		    "Could not allocate dma buffer handle: %d\n", mystat);
851 		return (DDI_FAILURE);
852 	}
853 
854 	mystat = ddi_dma_mem_alloc(buf->dma_handle,
855 	    size, &e1000g_buf_acc_attr, DDI_DMA_STREAMING,
856 	    DDI_DMA_DONTWAIT, 0,
857 	    &buf->address,
858 	    &len, &buf->acc_handle);
859 
860 	if (mystat != DDI_SUCCESS) {
861 		buf->acc_handle = NULL;
862 		buf->address = NULL;
863 		if (buf->dma_handle != NULL) {
864 			ddi_dma_free_handle(&buf->dma_handle);
865 			buf->dma_handle = NULL;
866 		}
867 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
868 		    "Could not allocate dma buffer memory: %d\n", mystat);
869 		return (DDI_FAILURE);
870 	}
871 
872 	mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
873 	    (struct as *)NULL,
874 	    buf->address,
875 	    len, DDI_DMA_READ | DDI_DMA_STREAMING,
876 	    DDI_DMA_DONTWAIT, 0, &cookie, &count);
877 
878 	if (mystat != DDI_SUCCESS) {
879 		if (buf->acc_handle != NULL) {
880 			ddi_dma_mem_free(&buf->acc_handle);
881 			buf->acc_handle = NULL;
882 			buf->address = NULL;
883 		}
884 		if (buf->dma_handle != NULL) {
885 			ddi_dma_free_handle(&buf->dma_handle);
886 			buf->dma_handle = NULL;
887 		}
888 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
889 		    "Could not bind buffer dma handle: %d\n", mystat);
890 		return (DDI_FAILURE);
891 	}
892 
893 	ASSERT(count == 1);
894 	if (count != 1) {
895 		if (buf->dma_handle != NULL) {
896 			(void) ddi_dma_unbind_handle(buf->dma_handle);
897 		}
898 		if (buf->acc_handle != NULL) {
899 			ddi_dma_mem_free(&buf->acc_handle);
900 			buf->acc_handle = NULL;
901 			buf->address = NULL;
902 		}
903 		if (buf->dma_handle != NULL) {
904 			ddi_dma_free_handle(&buf->dma_handle);
905 			buf->dma_handle = NULL;
906 		}
907 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
908 		    "Could not bind buffer as a single frag. "
909 		    "Count = %d\n", count);
910 		return (DDI_FAILURE);
911 	}
912 
913 	buf->dma_address = cookie.dmac_laddress;
914 	buf->size = len;
915 	buf->len = 0;
916 
917 	return (DDI_SUCCESS);
918 }
919 
920 static void
921 e1000g_free_dma_buffer(dma_buffer_t *buf)
922 {
923 	if (buf->dma_handle != NULL) {
924 		(void) ddi_dma_unbind_handle(buf->dma_handle);
925 	} else {
926 		return;
927 	}
928 
929 	buf->dma_address = NULL;
930 
931 	if (buf->acc_handle != NULL) {
932 		ddi_dma_mem_free(&buf->acc_handle);
933 		buf->acc_handle = NULL;
934 		buf->address = NULL;
935 	}
936 
937 	if (buf->dma_handle != NULL) {
938 		ddi_dma_free_handle(&buf->dma_handle);
939 		buf->dma_handle = NULL;
940 	}
941 
942 	buf->size = 0;
943 	buf->len = 0;
944 }
945 
946 static int
947 e1000g_alloc_tx_packets(e1000g_tx_ring_t *tx_ring)
948 {
949 	int j;
950 	p_tx_sw_packet_t packet;
951 	int mystat;
952 	dma_buffer_t *tx_buf;
953 	struct e1000g *Adapter;
954 	dev_info_t *devinfo;
955 	ddi_dma_attr_t dma_attr;
956 
957 	Adapter = tx_ring->adapter;
958 	devinfo = Adapter->dip;
959 	dma_attr = e1000g_buf_dma_attr;
960 
961 	/*
962 	 * Memory allocation for the Transmit software structure, the transmit
963 	 * software packet. This structure stores all the relevant information
964 	 * for transmitting a single packet.
965 	 */
966 	tx_ring->packet_area =
967 	    kmem_zalloc(TX_SW_PKT_AREA_SZ, KM_NOSLEEP);
968 
969 	if (tx_ring->packet_area == NULL)
970 		return (DDI_FAILURE);
971 
972 	for (j = 0, packet = tx_ring->packet_area;
973 	    j < Adapter->tx_freelist_num; j++, packet++) {
974 
975 		ASSERT(packet != NULL);
976 
977 		/*
978 		 * Pre-allocate dma handles for transmit. These dma handles
979 		 * will be dynamically bound to the data buffers passed down
980 		 * from the upper layers at the time of transmitting. The
981 		 * dynamic binding only applies for the packets that are larger
982 		 * than the tx_bcopy_thresh.
983 		 */
984 		switch (e1000g_dma_type) {
985 #ifdef __sparc
986 		case USE_DVMA:
987 			mystat = dvma_reserve(devinfo,
988 			    &e1000g_dma_limits,
989 			    Adapter->dvma_page_num,
990 			    &packet->tx_dma_handle);
991 			break;
992 #endif
993 		case USE_DMA:
994 			mystat = ddi_dma_alloc_handle(devinfo,
995 			    &e1000g_tx_dma_attr,
996 			    DDI_DMA_DONTWAIT, 0,
997 			    &packet->tx_dma_handle);
998 			break;
999 		default:
1000 			ASSERT(B_FALSE);
1001 			break;
1002 		}
1003 		if (mystat != DDI_SUCCESS) {
1004 			packet->tx_dma_handle = NULL;
1005 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1006 			    "Could not allocate tx dma handle: %d\n", mystat);
1007 			goto tx_pkt_fail;
1008 		}
1009 
1010 		/*
1011 		 * Pre-allocate transmit buffers for small packets that the
1012 		 * size is less than tx_bcopy_thresh. The data of those small
1013 		 * packets will be bcopy() to the transmit buffers instead of
1014 		 * using dynamical DMA binding. For small packets, bcopy will
1015 		 * bring better performance than DMA binding.
1016 		 */
1017 		tx_buf = packet->tx_buf;
1018 
1019 		switch (e1000g_dma_type) {
1020 #ifdef __sparc
1021 		case USE_DVMA:
1022 			mystat = e1000g_alloc_dvma_buffer(Adapter,
1023 			    tx_buf, Adapter->tx_buffer_size);
1024 			break;
1025 #endif
1026 		case USE_DMA:
1027 			mystat = e1000g_alloc_dma_buffer(Adapter,
1028 			    tx_buf, Adapter->tx_buffer_size, &dma_attr);
1029 			break;
1030 		default:
1031 			ASSERT(B_FALSE);
1032 			break;
1033 		}
1034 		if (mystat != DDI_SUCCESS) {
1035 			ASSERT(packet->tx_dma_handle != NULL);
1036 			switch (e1000g_dma_type) {
1037 #ifdef __sparc
1038 			case USE_DVMA:
1039 				dvma_release(packet->tx_dma_handle);
1040 				break;
1041 #endif
1042 			case USE_DMA:
1043 				ddi_dma_free_handle(&packet->tx_dma_handle);
1044 				break;
1045 			default:
1046 				ASSERT(B_FALSE);
1047 				break;
1048 			}
1049 			packet->tx_dma_handle = NULL;
1050 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1051 			    "Allocate Tx buffer fail\n");
1052 			goto tx_pkt_fail;
1053 		}
1054 
1055 		packet->dma_type = e1000g_dma_type;
1056 	} /* for */
1057 
1058 	return (DDI_SUCCESS);
1059 
1060 tx_pkt_fail:
1061 	e1000g_free_tx_packets(tx_ring);
1062 
1063 	return (DDI_FAILURE);
1064 }
1065 
1066 static int
1067 e1000g_alloc_rx_packets(e1000g_rx_ring_t *rx_ring)
1068 {
1069 	int i;
1070 	p_rx_sw_packet_t packet;
1071 	struct e1000g *Adapter;
1072 	uint32_t packet_num;
1073 	ddi_dma_attr_t dma_attr;
1074 
1075 	Adapter = rx_ring->adapter;
1076 	dma_attr = e1000g_buf_dma_attr;
1077 	dma_attr.dma_attr_align = Adapter->rx_buf_align;
1078 
1079 	/*
1080 	 * Allocate memory for the rx_sw_packet structures. Each one of these
1081 	 * structures will contain a virtual and physical address to an actual
1082 	 * receive buffer in host memory. Since we use one rx_sw_packet per
1083 	 * received packet, the maximum number of rx_sw_packet that we'll
1084 	 * need is equal to the number of receive descriptors that we've
1085 	 * allocated.
1086 	 */
1087 	packet_num = Adapter->rx_desc_num + Adapter->rx_freelist_num;
1088 	rx_ring->packet_area = NULL;
1089 
1090 	for (i = 0; i < packet_num; i++) {
1091 		packet = e1000g_alloc_rx_sw_packet(rx_ring, &dma_attr);
1092 		if (packet == NULL)
1093 			goto rx_pkt_fail;
1094 
1095 		packet->next = rx_ring->packet_area;
1096 		rx_ring->packet_area = packet;
1097 	}
1098 
1099 	return (DDI_SUCCESS);
1100 
1101 rx_pkt_fail:
1102 	e1000g_free_rx_packets(rx_ring);
1103 
1104 	return (DDI_FAILURE);
1105 }
1106 
1107 static p_rx_sw_packet_t
1108 e1000g_alloc_rx_sw_packet(e1000g_rx_ring_t *rx_ring, ddi_dma_attr_t *p_dma_attr)
1109 {
1110 	int mystat;
1111 	p_rx_sw_packet_t packet;
1112 	dma_buffer_t *rx_buf;
1113 	struct e1000g *Adapter;
1114 
1115 	Adapter = rx_ring->adapter;
1116 
1117 	packet = kmem_zalloc(sizeof (rx_sw_packet_t), KM_NOSLEEP);
1118 	if (packet == NULL) {
1119 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1120 		    "Cound not allocate memory for Rx SwPacket\n");
1121 		return (NULL);
1122 	}
1123 
1124 	rx_buf = packet->rx_buf;
1125 
1126 	switch (e1000g_dma_type) {
1127 #ifdef __sparc
1128 	case USE_DVMA:
1129 		mystat = e1000g_alloc_dvma_buffer(Adapter,
1130 		    rx_buf, Adapter->rx_buffer_size);
1131 		break;
1132 #endif
1133 	case USE_DMA:
1134 		mystat = e1000g_alloc_dma_buffer(Adapter,
1135 		    rx_buf, Adapter->rx_buffer_size, p_dma_attr);
1136 		break;
1137 	default:
1138 		ASSERT(B_FALSE);
1139 		break;
1140 	}
1141 
1142 	if (mystat != DDI_SUCCESS) {
1143 		if (packet != NULL)
1144 			kmem_free(packet, sizeof (rx_sw_packet_t));
1145 
1146 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1147 		    "Failed to allocate Rx buffer\n");
1148 		return (NULL);
1149 	}
1150 
1151 	rx_buf->size -= E1000G_IPALIGNROOM;
1152 	rx_buf->address += E1000G_IPALIGNROOM;
1153 	rx_buf->dma_address += E1000G_IPALIGNROOM;
1154 
1155 	packet->rx_ring = (caddr_t)rx_ring;
1156 	packet->free_rtn.free_func = e1000g_rxfree_func;
1157 	packet->free_rtn.free_arg = (char *)packet;
1158 	/*
1159 	 * esballoc is changed to desballoc which
1160 	 * is undocumented call but as per sun,
1161 	 * we can use it. It gives better efficiency.
1162 	 */
1163 	packet->mp = desballoc((unsigned char *)
1164 	    rx_buf->address - E1000G_IPALIGNROOM,
1165 	    rx_buf->size + E1000G_IPALIGNROOM,
1166 	    BPRI_MED, &packet->free_rtn);
1167 
1168 	if (packet->mp != NULL) {
1169 		packet->mp->b_rptr += E1000G_IPALIGNROOM;
1170 		packet->mp->b_wptr += E1000G_IPALIGNROOM;
1171 	}
1172 
1173 	packet->dma_type = e1000g_dma_type;
1174 
1175 	return (packet);
1176 }
1177 
1178 void
1179 e1000g_free_rx_sw_packet(p_rx_sw_packet_t packet)
1180 {
1181 	dma_buffer_t *rx_buf;
1182 
1183 	if (packet->mp != NULL) {
1184 		freemsg(packet->mp);
1185 		packet->mp = NULL;
1186 	}
1187 
1188 	rx_buf = packet->rx_buf;
1189 	ASSERT(rx_buf->dma_handle != NULL);
1190 
1191 	rx_buf->size += E1000G_IPALIGNROOM;
1192 	rx_buf->address -= E1000G_IPALIGNROOM;
1193 
1194 	switch (packet->dma_type) {
1195 #ifdef __sparc
1196 	case USE_DVMA:
1197 		e1000g_free_dvma_buffer(rx_buf);
1198 		break;
1199 #endif
1200 	case USE_DMA:
1201 		e1000g_free_dma_buffer(rx_buf);
1202 		break;
1203 	default:
1204 		ASSERT(B_FALSE);
1205 		break;
1206 	}
1207 
1208 	packet->dma_type = USE_NONE;
1209 
1210 	kmem_free(packet, sizeof (rx_sw_packet_t));
1211 }
1212 
1213 static void
1214 e1000g_free_rx_packets(e1000g_rx_ring_t *rx_ring)
1215 {
1216 	p_rx_sw_packet_t packet, next_packet, free_list;
1217 
1218 	rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
1219 
1220 	free_list = NULL;
1221 	packet = rx_ring->packet_area;
1222 	for (; packet != NULL; packet = next_packet) {
1223 		next_packet = packet->next;
1224 
1225 		if (packet->flag == E1000G_RX_SW_SENDUP) {
1226 			rx_ring->pending_count++;
1227 			e1000g_mblks_pending++;
1228 			packet->flag = E1000G_RX_SW_STOP;
1229 			packet->next = rx_ring->pending_list;
1230 			rx_ring->pending_list = packet;
1231 		} else {
1232 			packet->next = free_list;
1233 			free_list = packet;
1234 		}
1235 	}
1236 	rx_ring->packet_area = NULL;
1237 
1238 	rw_exit(&e1000g_rx_detach_lock);
1239 
1240 	packet = free_list;
1241 	for (; packet != NULL; packet = next_packet) {
1242 		next_packet = packet->next;
1243 
1244 		ASSERT(packet->flag == E1000G_RX_SW_FREE);
1245 		e1000g_free_rx_sw_packet(packet);
1246 	}
1247 }
1248 
1249 static void
1250 e1000g_free_tx_packets(e1000g_tx_ring_t *tx_ring)
1251 {
1252 	int j;
1253 	struct e1000g *Adapter;
1254 	p_tx_sw_packet_t packet;
1255 	dma_buffer_t *tx_buf;
1256 
1257 	Adapter = tx_ring->adapter;
1258 
1259 	for (j = 0, packet = tx_ring->packet_area;
1260 	    j < Adapter->tx_freelist_num; j++, packet++) {
1261 
1262 		if (packet == NULL)
1263 			break;
1264 
1265 		/* Free the Tx DMA handle for dynamical binding */
1266 		if (packet->tx_dma_handle != NULL) {
1267 			switch (packet->dma_type) {
1268 #ifdef __sparc
1269 			case USE_DVMA:
1270 				dvma_release(packet->tx_dma_handle);
1271 				break;
1272 #endif
1273 			case USE_DMA:
1274 				ddi_dma_free_handle(&packet->tx_dma_handle);
1275 				break;
1276 			default:
1277 				ASSERT(B_FALSE);
1278 				break;
1279 			}
1280 			packet->tx_dma_handle = NULL;
1281 		} else {
1282 			/*
1283 			 * If the dma handle is NULL, then we don't
1284 			 * need to check the packets left. For they
1285 			 * have not been initialized or have been freed.
1286 			 */
1287 			break;
1288 		}
1289 
1290 		tx_buf = packet->tx_buf;
1291 
1292 		switch (packet->dma_type) {
1293 #ifdef __sparc
1294 		case USE_DVMA:
1295 			e1000g_free_dvma_buffer(tx_buf);
1296 			break;
1297 #endif
1298 		case USE_DMA:
1299 			e1000g_free_dma_buffer(tx_buf);
1300 			break;
1301 		default:
1302 			ASSERT(B_FALSE);
1303 			break;
1304 		}
1305 
1306 		packet->dma_type = USE_NONE;
1307 	}
1308 	if (tx_ring->packet_area != NULL) {
1309 		kmem_free(tx_ring->packet_area, TX_SW_PKT_AREA_SZ);
1310 		tx_ring->packet_area = NULL;
1311 	}
1312 }
1313 
1314 /*
1315  * e1000g_release_dma_resources - release allocated DMA resources
1316  *
1317  * This function releases any pending buffers that has been
1318  * previously allocated
1319  */
1320 void
1321 e1000g_release_dma_resources(struct e1000g *Adapter)
1322 {
1323 	e1000g_free_descriptors(Adapter);
1324 	e1000g_free_packets(Adapter);
1325 }
1326 
1327 /* ARGSUSED */
1328 void
1329 e1000g_set_fma_flags(struct e1000g *Adapter, int acc_flag, int dma_flag)
1330 {
1331 	if (acc_flag) {
1332 		e1000g_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1333 	} else {
1334 		e1000g_desc_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
1335 	}
1336 
1337 	if (dma_flag) {
1338 		e1000g_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1339 		e1000g_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1340 		e1000g_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1341 	} else {
1342 		e1000g_tx_dma_attr.dma_attr_flags = 0;
1343 		e1000g_buf_dma_attr.dma_attr_flags = 0;
1344 		e1000g_desc_dma_attr.dma_attr_flags = 0;
1345 	}
1346 }
1347