xref: /titanic_51/usr/src/uts/common/io/e1000g/e1000g_alloc.c (revision f06271be56df67ca3faa4ca4bc51457dad15c3b5)
1 /*
2  * This file is provided under a CDDLv1 license.  When using or
3  * redistributing this file, you may do so under this license.
4  * In redistributing this file this license must be included
5  * and no other modification of this header file is permitted.
6  *
7  * CDDL LICENSE SUMMARY
8  *
9  * Copyright(c) 1999 - 2008 Intel Corporation. All rights reserved.
10  *
11  * The contents of this file are subject to the terms of Version
12  * 1.0 of the Common Development and Distribution License (the "License").
13  *
14  * You should have received a copy of the License with this software.
15  * You can obtain a copy of the License at
16  *	http://www.opensolaris.org/os/licensing.
17  * See the License for the specific language governing permissions
18  * and limitations under the License.
19  */
20 
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms of the CDDLv1.
24  */
25 
26 /*
27  * **********************************************************************
28  * Module Name:								*
29  *   e1000g_alloc.c							*
30  *									*
31  * Abstract:								*
32  *   This file contains some routines that take care of			*
33  *   memory allocation for descriptors and buffers.			*
34  *									*
35  * **********************************************************************
36  */
37 
38 #include "e1000g_sw.h"
39 #include "e1000g_debug.h"
40 
41 #define	TX_SW_PKT_AREA_SZ \
42 	(sizeof (tx_sw_packet_t) * Adapter->tx_freelist_num)
43 
44 static int e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *);
45 static int e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *);
46 static void e1000g_free_tx_descriptors(e1000g_tx_ring_t *);
47 static void e1000g_free_rx_descriptors(e1000g_rx_ring_t *);
48 static int e1000g_alloc_tx_packets(e1000g_tx_ring_t *);
49 static int e1000g_alloc_rx_packets(e1000g_rx_ring_t *);
50 static void e1000g_free_tx_packets(e1000g_tx_ring_t *);
51 static void e1000g_free_rx_packets(e1000g_rx_ring_t *);
52 static int e1000g_alloc_dma_buffer(struct e1000g *,
53     dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
54 
55 /*
56  * In order to avoid address error crossing 64KB boundary
57  * during PCI-X packets receving, e1000g_alloc_dma_buffer_82546
58  * is used by some necessary adapter types.
59  */
60 static int e1000g_alloc_dma_buffer_82546(struct e1000g *,
61     dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
62 static int e1000g_dma_mem_alloc_82546(dma_buffer_t *buf,
63     size_t size, size_t *len);
64 static boolean_t e1000g_cross_64k_bound(void *, uintptr_t);
65 
66 static void e1000g_free_dma_buffer(dma_buffer_t *);
67 #ifdef __sparc
68 static int e1000g_alloc_dvma_buffer(struct e1000g *, dma_buffer_t *, size_t);
69 static void e1000g_free_dvma_buffer(dma_buffer_t *);
70 #endif
71 static int e1000g_alloc_descriptors(struct e1000g *Adapter);
72 static void e1000g_free_descriptors(struct e1000g *Adapter);
73 static int e1000g_alloc_packets(struct e1000g *Adapter);
74 static void e1000g_free_packets(struct e1000g *Adapter);
75 static p_rx_sw_packet_t e1000g_alloc_rx_sw_packet(e1000g_rx_ring_t *,
76     ddi_dma_attr_t *p_dma_attr);
77 
78 /* DMA access attributes for descriptors <Little Endian> */
79 static ddi_device_acc_attr_t e1000g_desc_acc_attr = {
80 	DDI_DEVICE_ATTR_V0,
81 	DDI_STRUCTURE_LE_ACC,
82 	DDI_STRICTORDER_ACC,
83 	DDI_FLAGERR_ACC
84 };
85 
86 /* DMA access attributes for DMA buffers */
87 #ifdef __sparc
88 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
89 	DDI_DEVICE_ATTR_V0,
90 	DDI_STRUCTURE_BE_ACC,
91 	DDI_STRICTORDER_ACC,
92 };
93 #else
94 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
95 	DDI_DEVICE_ATTR_V0,
96 	DDI_STRUCTURE_LE_ACC,
97 	DDI_STRICTORDER_ACC,
98 };
99 #endif
100 
101 /* DMA attributes for tx mblk buffers */
102 static ddi_dma_attr_t e1000g_tx_dma_attr = {
103 	DMA_ATTR_V0,		/* version of this structure */
104 	0,			/* lowest usable address */
105 	0xffffffffffffffffULL,	/* highest usable address */
106 	0x7fffffff,		/* maximum DMAable byte count */
107 	1,			/* alignment in bytes */
108 	0x7ff,			/* burst sizes (any?) */
109 	1,			/* minimum transfer */
110 	0xffffffffU,		/* maximum transfer */
111 	0xffffffffffffffffULL,	/* maximum segment length */
112 	MAX_COOKIES,		/* maximum number of segments */
113 	1,			/* granularity */
114 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
115 };
116 
117 /* DMA attributes for pre-allocated rx/tx buffers */
118 static ddi_dma_attr_t e1000g_buf_dma_attr = {
119 	DMA_ATTR_V0,		/* version of this structure */
120 	0,			/* lowest usable address */
121 	0xffffffffffffffffULL,	/* highest usable address */
122 	0x7fffffff,		/* maximum DMAable byte count */
123 	1,			/* alignment in bytes */
124 	0x7ff,			/* burst sizes (any?) */
125 	1,			/* minimum transfer */
126 	0xffffffffU,		/* maximum transfer */
127 	0xffffffffffffffffULL,	/* maximum segment length */
128 	1,			/* maximum number of segments */
129 	1,			/* granularity */
130 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
131 };
132 
133 /* DMA attributes for rx/tx descriptors */
134 static ddi_dma_attr_t e1000g_desc_dma_attr = {
135 	DMA_ATTR_V0,		/* version of this structure */
136 	0,			/* lowest usable address */
137 	0xffffffffffffffffULL,	/* highest usable address */
138 	0x7fffffff,		/* maximum DMAable byte count */
139 	E1000_MDALIGN,		/* default alignment is 4k but can be changed */
140 	0x7ff,			/* burst sizes (any?) */
141 	1,			/* minimum transfer */
142 	0xffffffffU,		/* maximum transfer */
143 	0xffffffffffffffffULL,	/* maximum segment length */
144 	1,			/* maximum number of segments */
145 	1,			/* granularity */
146 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
147 };
148 
149 #ifdef __sparc
150 static ddi_dma_lim_t e1000g_dma_limits = {
151 	(uint_t)0,		/* dlim_addr_lo */
152 	(uint_t)0xffffffff,	/* dlim_addr_hi */
153 	(uint_t)0xffffffff,	/* dlim_cntr_max */
154 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
155 	0x1,			/* dlim_minxfer */
156 	1024			/* dlim_speed */
157 };
158 #endif
159 
160 #ifdef __sparc
161 static dma_type_t e1000g_dma_type = USE_DVMA;
162 #else
163 static dma_type_t e1000g_dma_type = USE_DMA;
164 #endif
165 
166 extern krwlock_t e1000g_dma_type_lock;
167 
168 
169 int
170 e1000g_alloc_dma_resources(struct e1000g *Adapter)
171 {
172 	int result;
173 
174 	result = DDI_FAILURE;
175 
176 	while ((result != DDI_SUCCESS) &&
177 	    (Adapter->tx_desc_num >= MIN_NUM_TX_DESCRIPTOR) &&
178 	    (Adapter->rx_desc_num >= MIN_NUM_RX_DESCRIPTOR) &&
179 	    (Adapter->tx_freelist_num >= MIN_NUM_TX_FREELIST) &&
180 	    (Adapter->rx_freelist_num >= MIN_NUM_RX_FREELIST)) {
181 
182 		result = e1000g_alloc_descriptors(Adapter);
183 
184 		if (result == DDI_SUCCESS) {
185 			result = e1000g_alloc_packets(Adapter);
186 
187 			if (result != DDI_SUCCESS)
188 				e1000g_free_descriptors(Adapter);
189 		}
190 
191 		/*
192 		 * If the allocation fails due to resource shortage,
193 		 * we'll reduce the numbers of descriptors/buffers by
194 		 * half, and try the allocation again.
195 		 */
196 		if (result != DDI_SUCCESS) {
197 			/*
198 			 * We must ensure the number of descriptors
199 			 * is always a multiple of 8.
200 			 */
201 			Adapter->tx_desc_num =
202 			    (Adapter->tx_desc_num >> 4) << 3;
203 			Adapter->rx_desc_num =
204 			    (Adapter->rx_desc_num >> 4) << 3;
205 
206 			Adapter->tx_freelist_num >>= 1;
207 			Adapter->rx_freelist_num >>= 1;
208 		}
209 	}
210 
211 	return (result);
212 }
213 
214 /*
215  * e1000g_alloc_descriptors - allocate DMA buffers for descriptors
216  *
217  * This routine allocates neccesary DMA buffers for
218  *	Transmit Descriptor Area
219  *	Receive Descrpitor Area
220  */
221 static int
222 e1000g_alloc_descriptors(struct e1000g *Adapter)
223 {
224 	int result;
225 	e1000g_tx_ring_t *tx_ring;
226 	e1000g_rx_ring_t *rx_ring;
227 
228 	if ((Adapter->shared.mac.type == e1000_82545) ||
229 	    (Adapter->shared.mac.type == e1000_82546) ||
230 	    (Adapter->shared.mac.type == e1000_82546_rev_3)) {
231 		/* Align on a 64k boundary for these adapter types */
232 		Adapter->desc_align = E1000_MDALIGN_82546;
233 	} else {
234 		/* Align on a 4k boundary for all other adapter types */
235 		Adapter->desc_align = E1000_MDALIGN;
236 	}
237 
238 	tx_ring = Adapter->tx_ring;
239 
240 	result = e1000g_alloc_tx_descriptors(tx_ring);
241 	if (result != DDI_SUCCESS)
242 		return (DDI_FAILURE);
243 
244 	rx_ring = Adapter->rx_ring;
245 
246 	result = e1000g_alloc_rx_descriptors(rx_ring);
247 	if (result != DDI_SUCCESS) {
248 		e1000g_free_tx_descriptors(tx_ring);
249 		return (DDI_FAILURE);
250 	}
251 
252 	return (DDI_SUCCESS);
253 }
254 
255 static void
256 e1000g_free_descriptors(struct e1000g *Adapter)
257 {
258 	e1000g_tx_ring_t *tx_ring;
259 	e1000g_rx_ring_t *rx_ring;
260 
261 	tx_ring = Adapter->tx_ring;
262 	rx_ring = Adapter->rx_ring;
263 
264 	e1000g_free_tx_descriptors(tx_ring);
265 	e1000g_free_rx_descriptors(rx_ring);
266 }
267 
268 static int
269 e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *tx_ring)
270 {
271 	int mystat;
272 	boolean_t alloc_flag;
273 	size_t size;
274 	size_t len;
275 	uintptr_t templong;
276 	uint_t cookie_count;
277 	dev_info_t *devinfo;
278 	ddi_dma_cookie_t cookie;
279 	struct e1000g *Adapter;
280 	ddi_dma_attr_t dma_attr;
281 
282 	Adapter = tx_ring->adapter;
283 	devinfo = Adapter->dip;
284 
285 	alloc_flag = B_FALSE;
286 	dma_attr = e1000g_desc_dma_attr;
287 
288 	/*
289 	 * Solaris 7 has a problem with allocating physically contiguous memory
290 	 * that is aligned on a 4K boundary. The transmit and rx descriptors
291 	 * need to aligned on a 4kbyte boundary. We first try to allocate the
292 	 * memory with DMA attributes set to 4K alignment and also no scatter/
293 	 * gather mechanism specified. In most cases, this does not allocate
294 	 * memory aligned at a 4Kbyte boundary. We then try asking for memory
295 	 * aligned on 4K boundary with scatter/gather set to 2. This works when
296 	 * the amount of memory is less than 4k i.e a page size. If neither of
297 	 * these options work or if the number of descriptors is greater than
298 	 * 4K, ie more than 256 descriptors, we allocate 4k extra memory and
299 	 * and then align the memory at a 4k boundary.
300 	 */
301 	size = sizeof (struct e1000_tx_desc) * Adapter->tx_desc_num;
302 
303 	/*
304 	 * Memory allocation for the transmit buffer descriptors.
305 	 */
306 	dma_attr.dma_attr_sgllen = 1;
307 	dma_attr.dma_attr_align = Adapter->desc_align;
308 
309 	/*
310 	 * Allocate a new DMA handle for the transmit descriptor
311 	 * memory area.
312 	 */
313 	mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
314 	    DDI_DMA_DONTWAIT, 0,
315 	    &tx_ring->tbd_dma_handle);
316 
317 	if (mystat != DDI_SUCCESS) {
318 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
319 		    "Could not allocate tbd dma handle: %d", mystat);
320 		tx_ring->tbd_dma_handle = NULL;
321 		return (DDI_FAILURE);
322 	}
323 
324 	/*
325 	 * Allocate memory to DMA data to and from the transmit
326 	 * descriptors.
327 	 */
328 	mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
329 	    size,
330 	    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
331 	    DDI_DMA_DONTWAIT, 0,
332 	    (caddr_t *)&tx_ring->tbd_area,
333 	    &len, &tx_ring->tbd_acc_handle);
334 
335 	if ((mystat != DDI_SUCCESS) ||
336 	    ((uintptr_t)tx_ring->tbd_area & (Adapter->desc_align - 1))) {
337 		if (mystat == DDI_SUCCESS) {
338 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
339 			tx_ring->tbd_acc_handle = NULL;
340 			tx_ring->tbd_area = NULL;
341 		}
342 		if (tx_ring->tbd_dma_handle != NULL) {
343 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
344 			tx_ring->tbd_dma_handle = NULL;
345 		}
346 		alloc_flag = B_FALSE;
347 	} else
348 		alloc_flag = B_TRUE;
349 
350 	/*
351 	 * Initialize the entire transmit buffer descriptor area to zero
352 	 */
353 	if (alloc_flag)
354 		bzero(tx_ring->tbd_area, len);
355 
356 	/*
357 	 * If the previous DMA attributes setting could not give us contiguous
358 	 * memory or the number of descriptors is greater than the page size,
359 	 * we allocate extra memory and then align it at appropriate boundary.
360 	 */
361 	if (!alloc_flag) {
362 		size = size + Adapter->desc_align;
363 
364 		/*
365 		 * DMA attributes set to no scatter/gather and 16 bit alignment
366 		 */
367 		dma_attr.dma_attr_align = 1;
368 		dma_attr.dma_attr_sgllen = 1;
369 
370 		/*
371 		 * Allocate a new DMA handle for the transmit descriptor memory
372 		 * area.
373 		 */
374 		mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
375 		    DDI_DMA_DONTWAIT, 0,
376 		    &tx_ring->tbd_dma_handle);
377 
378 		if (mystat != DDI_SUCCESS) {
379 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
380 			    "Could not re-allocate tbd dma handle: %d", mystat);
381 			tx_ring->tbd_dma_handle = NULL;
382 			return (DDI_FAILURE);
383 		}
384 
385 		/*
386 		 * Allocate memory to DMA data to and from the transmit
387 		 * descriptors.
388 		 */
389 		mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
390 		    size,
391 		    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
392 		    DDI_DMA_DONTWAIT, 0,
393 		    (caddr_t *)&tx_ring->tbd_area,
394 		    &len, &tx_ring->tbd_acc_handle);
395 
396 		if (mystat != DDI_SUCCESS) {
397 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
398 			    "Could not allocate tbd dma memory: %d", mystat);
399 			tx_ring->tbd_acc_handle = NULL;
400 			tx_ring->tbd_area = NULL;
401 			if (tx_ring->tbd_dma_handle != NULL) {
402 				ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
403 				tx_ring->tbd_dma_handle = NULL;
404 			}
405 			return (DDI_FAILURE);
406 		} else
407 			alloc_flag = B_TRUE;
408 
409 		/*
410 		 * Initialize the entire transmit buffer descriptor area to zero
411 		 */
412 		bzero(tx_ring->tbd_area, len);
413 		/*
414 		 * Memory has been allocated with the ddi_dma_mem_alloc call,
415 		 * but has not been aligned.
416 		 * We now align it on the appropriate boundary.
417 		 */
418 		templong = P2NPHASE((uintptr_t)tx_ring->tbd_area,
419 		    Adapter->desc_align);
420 		len = size - templong;
421 		templong += (uintptr_t)tx_ring->tbd_area;
422 		tx_ring->tbd_area = (struct e1000_tx_desc *)templong;
423 	}	/* alignment workaround */
424 
425 	/*
426 	 * Transmit buffer descriptor memory allocation succeeded
427 	 */
428 	ASSERT(alloc_flag);
429 
430 	/*
431 	 * Allocates DMA resources for the memory that was allocated by
432 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
433 	 * the memory address
434 	 */
435 	mystat = ddi_dma_addr_bind_handle(tx_ring->tbd_dma_handle,
436 	    (struct as *)NULL, (caddr_t)tx_ring->tbd_area,
437 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
438 	    DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
439 
440 	if (mystat != DDI_SUCCESS) {
441 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
442 		    "Could not bind tbd dma resource: %d", mystat);
443 		if (tx_ring->tbd_acc_handle != NULL) {
444 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
445 			tx_ring->tbd_acc_handle = NULL;
446 			tx_ring->tbd_area = NULL;
447 		}
448 		if (tx_ring->tbd_dma_handle != NULL) {
449 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
450 			tx_ring->tbd_dma_handle = NULL;
451 		}
452 		return (DDI_FAILURE);
453 	}
454 
455 	ASSERT(cookie_count == 1);	/* 1 cookie */
456 
457 	if (cookie_count != 1) {
458 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
459 		    "Could not bind tbd dma resource in a single frag. "
460 		    "Count - %d Len - %d", cookie_count, len);
461 		e1000g_free_tx_descriptors(tx_ring);
462 		return (DDI_FAILURE);
463 	}
464 
465 	tx_ring->tbd_dma_addr = cookie.dmac_laddress;
466 	tx_ring->tbd_first = tx_ring->tbd_area;
467 	tx_ring->tbd_last = tx_ring->tbd_first +
468 	    (Adapter->tx_desc_num - 1);
469 
470 	return (DDI_SUCCESS);
471 }
472 
473 static int
474 e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *rx_ring)
475 {
476 	int mystat;
477 	boolean_t alloc_flag;
478 	size_t size;
479 	size_t len;
480 	uintptr_t templong;
481 	uint_t cookie_count;
482 	dev_info_t *devinfo;
483 	ddi_dma_cookie_t cookie;
484 	struct e1000g *Adapter;
485 	ddi_dma_attr_t dma_attr;
486 
487 	Adapter = rx_ring->adapter;
488 	devinfo = Adapter->dip;
489 
490 	alloc_flag = B_FALSE;
491 	dma_attr = e1000g_desc_dma_attr;
492 
493 	/*
494 	 * Memory allocation for the receive buffer descriptors.
495 	 */
496 	size = (sizeof (struct e1000_rx_desc)) * Adapter->rx_desc_num;
497 
498 	/*
499 	 * Asking for aligned memory with DMA attributes set for suitable value
500 	 */
501 	dma_attr.dma_attr_sgllen = 1;
502 	dma_attr.dma_attr_align = Adapter->desc_align;
503 
504 	/*
505 	 * Allocate a new DMA handle for the receive descriptors
506 	 */
507 	mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
508 	    DDI_DMA_DONTWAIT, 0,
509 	    &rx_ring->rbd_dma_handle);
510 
511 	if (mystat != DDI_SUCCESS) {
512 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
513 		    "Could not allocate rbd dma handle: %d", mystat);
514 		rx_ring->rbd_dma_handle = NULL;
515 		return (DDI_FAILURE);
516 	}
517 	/*
518 	 * Allocate memory to DMA data to and from the receive
519 	 * descriptors.
520 	 */
521 	mystat = ddi_dma_mem_alloc(rx_ring->rbd_dma_handle,
522 	    size,
523 	    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
524 	    DDI_DMA_DONTWAIT, 0,
525 	    (caddr_t *)&rx_ring->rbd_area,
526 	    &len, &rx_ring->rbd_acc_handle);
527 
528 	/*
529 	 * Check if memory allocation succeeded and also if the
530 	 * allocated memory is aligned correctly.
531 	 */
532 	if ((mystat != DDI_SUCCESS) ||
533 	    ((uintptr_t)rx_ring->rbd_area & (Adapter->desc_align - 1))) {
534 		if (mystat == DDI_SUCCESS) {
535 			ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
536 			rx_ring->rbd_acc_handle = NULL;
537 			rx_ring->rbd_area = NULL;
538 		}
539 		if (rx_ring->rbd_dma_handle != NULL) {
540 			ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
541 			rx_ring->rbd_dma_handle = NULL;
542 		}
543 		alloc_flag = B_FALSE;
544 	} else
545 		alloc_flag = B_TRUE;
546 
547 	/*
548 	 * Initialize the allocated receive descriptor memory to zero.
549 	 */
550 	if (alloc_flag)
551 		bzero((caddr_t)rx_ring->rbd_area, len);
552 
553 	/*
554 	 * If memory allocation did not succeed, do the alignment ourselves
555 	 */
556 	if (!alloc_flag) {
557 		dma_attr.dma_attr_align = 1;
558 		dma_attr.dma_attr_sgllen = 1;
559 		size = size + Adapter->desc_align;
560 		/*
561 		 * Allocate a new DMA handle for the receive descriptor.
562 		 */
563 		mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
564 		    DDI_DMA_DONTWAIT, 0,
565 		    &rx_ring->rbd_dma_handle);
566 
567 		if (mystat != DDI_SUCCESS) {
568 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
569 			    "Could not re-allocate rbd dma handle: %d", mystat);
570 			rx_ring->rbd_dma_handle = NULL;
571 			return (DDI_FAILURE);
572 		}
573 		/*
574 		 * Allocate memory to DMA data to and from the receive
575 		 * descriptors.
576 		 */
577 		mystat = ddi_dma_mem_alloc(rx_ring->rbd_dma_handle,
578 		    size,
579 		    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
580 		    DDI_DMA_DONTWAIT, 0,
581 		    (caddr_t *)&rx_ring->rbd_area,
582 		    &len, &rx_ring->rbd_acc_handle);
583 
584 		if (mystat != DDI_SUCCESS) {
585 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
586 			    "Could not allocate rbd dma memory: %d", mystat);
587 			rx_ring->rbd_acc_handle = NULL;
588 			rx_ring->rbd_area = NULL;
589 			if (rx_ring->rbd_dma_handle != NULL) {
590 				ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
591 				rx_ring->rbd_dma_handle = NULL;
592 			}
593 			return (DDI_FAILURE);
594 		} else
595 			alloc_flag = B_TRUE;
596 
597 		/*
598 		 * Initialize the allocated receive descriptor memory to zero.
599 		 */
600 		bzero((caddr_t)rx_ring->rbd_area, len);
601 		templong = P2NPHASE((uintptr_t)rx_ring->rbd_area,
602 		    Adapter->desc_align);
603 		len = size - templong;
604 		templong += (uintptr_t)rx_ring->rbd_area;
605 		rx_ring->rbd_area = (struct e1000_rx_desc *)templong;
606 	}	/* alignment workaround */
607 
608 	/*
609 	 * The memory allocation of the receive descriptors succeeded
610 	 */
611 	ASSERT(alloc_flag);
612 
613 	/*
614 	 * Allocates DMA resources for the memory that was allocated by
615 	 * the ddi_dma_mem_alloc call.
616 	 */
617 	mystat = ddi_dma_addr_bind_handle(rx_ring->rbd_dma_handle,
618 	    (struct as *)NULL, (caddr_t)rx_ring->rbd_area,
619 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
620 	    DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
621 
622 	if (mystat != DDI_SUCCESS) {
623 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
624 		    "Could not bind rbd dma resource: %d", mystat);
625 		if (rx_ring->rbd_acc_handle != NULL) {
626 			ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
627 			rx_ring->rbd_acc_handle = NULL;
628 			rx_ring->rbd_area = NULL;
629 		}
630 		if (rx_ring->rbd_dma_handle != NULL) {
631 			ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
632 			rx_ring->rbd_dma_handle = NULL;
633 		}
634 		return (DDI_FAILURE);
635 	}
636 
637 	ASSERT(cookie_count == 1);
638 	if (cookie_count != 1) {
639 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
640 		    "Could not bind rbd dma resource in a single frag. "
641 		    "Count - %d Len - %d", cookie_count, len);
642 		e1000g_free_rx_descriptors(rx_ring);
643 		return (DDI_FAILURE);
644 	}
645 
646 	rx_ring->rbd_dma_addr = cookie.dmac_laddress;
647 	rx_ring->rbd_first = rx_ring->rbd_area;
648 	rx_ring->rbd_last = rx_ring->rbd_first +
649 	    (Adapter->rx_desc_num - 1);
650 
651 	return (DDI_SUCCESS);
652 }
653 
654 static void
655 e1000g_free_rx_descriptors(e1000g_rx_ring_t *rx_ring)
656 {
657 	if (rx_ring->rbd_dma_handle != NULL) {
658 		(void) ddi_dma_unbind_handle(rx_ring->rbd_dma_handle);
659 	}
660 	if (rx_ring->rbd_acc_handle != NULL) {
661 		ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
662 		rx_ring->rbd_acc_handle = NULL;
663 		rx_ring->rbd_area = NULL;
664 	}
665 	if (rx_ring->rbd_dma_handle != NULL) {
666 		ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
667 		rx_ring->rbd_dma_handle = NULL;
668 	}
669 	rx_ring->rbd_dma_addr = NULL;
670 	rx_ring->rbd_first = NULL;
671 	rx_ring->rbd_last = NULL;
672 }
673 
674 static void
675 e1000g_free_tx_descriptors(e1000g_tx_ring_t *tx_ring)
676 {
677 	if (tx_ring->tbd_dma_handle != NULL) {
678 		(void) ddi_dma_unbind_handle(tx_ring->tbd_dma_handle);
679 	}
680 	if (tx_ring->tbd_acc_handle != NULL) {
681 		ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
682 		tx_ring->tbd_acc_handle = NULL;
683 		tx_ring->tbd_area = NULL;
684 	}
685 	if (tx_ring->tbd_dma_handle != NULL) {
686 		ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
687 		tx_ring->tbd_dma_handle = NULL;
688 	}
689 	tx_ring->tbd_dma_addr = NULL;
690 	tx_ring->tbd_first = NULL;
691 	tx_ring->tbd_last = NULL;
692 }
693 
694 
695 /*
696  * e1000g_alloc_packets - allocate DMA buffers for rx/tx
697  *
698  * This routine allocates neccesary buffers for
699  *	 Transmit sw packet structure
700  *	 DMA handle for Transmit
701  *	 DMA buffer for Transmit
702  *	 Receive sw packet structure
703  *	 DMA buffer for Receive
704  */
705 static int
706 e1000g_alloc_packets(struct e1000g *Adapter)
707 {
708 	int result;
709 	e1000g_tx_ring_t *tx_ring;
710 	e1000g_rx_ring_t *rx_ring;
711 
712 	tx_ring = Adapter->tx_ring;
713 	rx_ring = Adapter->rx_ring;
714 
715 again:
716 	rw_enter(&e1000g_dma_type_lock, RW_READER);
717 
718 	result = e1000g_alloc_tx_packets(tx_ring);
719 	if (result != DDI_SUCCESS) {
720 		if (e1000g_dma_type == USE_DVMA) {
721 			rw_exit(&e1000g_dma_type_lock);
722 
723 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
724 			e1000g_dma_type = USE_DMA;
725 			rw_exit(&e1000g_dma_type_lock);
726 
727 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
728 			    "No enough dvma resource for Tx packets, "
729 			    "trying to allocate dma buffers...\n");
730 			goto again;
731 		}
732 		rw_exit(&e1000g_dma_type_lock);
733 
734 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
735 		    "Failed to allocate dma buffers for Tx packets\n");
736 		return (DDI_FAILURE);
737 	}
738 
739 	result = e1000g_alloc_rx_packets(rx_ring);
740 	if (result != DDI_SUCCESS) {
741 		e1000g_free_tx_packets(tx_ring);
742 		if (e1000g_dma_type == USE_DVMA) {
743 			rw_exit(&e1000g_dma_type_lock);
744 
745 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
746 			e1000g_dma_type = USE_DMA;
747 			rw_exit(&e1000g_dma_type_lock);
748 
749 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
750 			    "No enough dvma resource for Rx packets, "
751 			    "trying to allocate dma buffers...\n");
752 			goto again;
753 		}
754 		rw_exit(&e1000g_dma_type_lock);
755 
756 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
757 		    "Failed to allocate dma buffers for Rx packets\n");
758 		return (DDI_FAILURE);
759 	}
760 
761 	rw_exit(&e1000g_dma_type_lock);
762 
763 	return (DDI_SUCCESS);
764 }
765 
766 static void
767 e1000g_free_packets(struct e1000g *Adapter)
768 {
769 	e1000g_tx_ring_t *tx_ring;
770 	e1000g_rx_ring_t *rx_ring;
771 
772 	tx_ring = Adapter->tx_ring;
773 	rx_ring = Adapter->rx_ring;
774 
775 	e1000g_free_tx_packets(tx_ring);
776 	e1000g_free_rx_packets(rx_ring);
777 }
778 
779 #ifdef __sparc
780 static int
781 e1000g_alloc_dvma_buffer(struct e1000g *Adapter,
782     dma_buffer_t *buf, size_t size)
783 {
784 	int mystat;
785 	dev_info_t *devinfo;
786 	ddi_dma_cookie_t cookie;
787 
788 	if (e1000g_force_detach)
789 		devinfo = Adapter->priv_dip;
790 	else
791 		devinfo = Adapter->dip;
792 
793 	mystat = dvma_reserve(devinfo,
794 	    &e1000g_dma_limits,
795 	    Adapter->dvma_page_num,
796 	    &buf->dma_handle);
797 
798 	if (mystat != DDI_SUCCESS) {
799 		buf->dma_handle = NULL;
800 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
801 		    "Could not allocate dvma buffer handle: %d\n", mystat);
802 		return (DDI_FAILURE);
803 	}
804 
805 	buf->address = kmem_alloc(size, KM_NOSLEEP);
806 
807 	if (buf->address == NULL) {
808 		if (buf->dma_handle != NULL) {
809 			dvma_release(buf->dma_handle);
810 			buf->dma_handle = NULL;
811 		}
812 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
813 		    "Could not allocate dvma buffer memory\n");
814 		return (DDI_FAILURE);
815 	}
816 
817 	dvma_kaddr_load(buf->dma_handle,
818 	    buf->address, size, 0, &cookie);
819 
820 	buf->dma_address = cookie.dmac_laddress;
821 	buf->size = size;
822 	buf->len = 0;
823 
824 	return (DDI_SUCCESS);
825 }
826 
827 static void
828 e1000g_free_dvma_buffer(dma_buffer_t *buf)
829 {
830 	if (buf->dma_handle != NULL) {
831 		dvma_unload(buf->dma_handle, 0, -1);
832 	} else {
833 		return;
834 	}
835 
836 	buf->dma_address = NULL;
837 
838 	if (buf->address != NULL) {
839 		kmem_free(buf->address, buf->size);
840 		buf->address = NULL;
841 	}
842 
843 	if (buf->dma_handle != NULL) {
844 		dvma_release(buf->dma_handle);
845 		buf->dma_handle = NULL;
846 	}
847 
848 	buf->size = 0;
849 	buf->len = 0;
850 }
851 #endif
852 
853 static int
854 e1000g_alloc_dma_buffer(struct e1000g *Adapter,
855     dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
856 {
857 	int mystat;
858 	dev_info_t *devinfo;
859 	ddi_dma_cookie_t cookie;
860 	size_t len;
861 	uint_t count;
862 
863 	if (e1000g_force_detach)
864 		devinfo = Adapter->priv_dip;
865 	else
866 		devinfo = Adapter->dip;
867 
868 	mystat = ddi_dma_alloc_handle(devinfo,
869 	    p_dma_attr,
870 	    DDI_DMA_DONTWAIT, 0,
871 	    &buf->dma_handle);
872 
873 	if (mystat != DDI_SUCCESS) {
874 		buf->dma_handle = NULL;
875 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
876 		    "Could not allocate dma buffer handle: %d\n", mystat);
877 		return (DDI_FAILURE);
878 	}
879 
880 	mystat = ddi_dma_mem_alloc(buf->dma_handle,
881 	    size, &e1000g_buf_acc_attr, DDI_DMA_STREAMING,
882 	    DDI_DMA_DONTWAIT, 0,
883 	    &buf->address,
884 	    &len, &buf->acc_handle);
885 
886 	if (mystat != DDI_SUCCESS) {
887 		buf->acc_handle = NULL;
888 		buf->address = NULL;
889 		if (buf->dma_handle != NULL) {
890 			ddi_dma_free_handle(&buf->dma_handle);
891 			buf->dma_handle = NULL;
892 		}
893 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
894 		    "Could not allocate dma buffer memory: %d\n", mystat);
895 		return (DDI_FAILURE);
896 	}
897 
898 	mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
899 	    (struct as *)NULL,
900 	    buf->address,
901 	    len, DDI_DMA_READ | DDI_DMA_STREAMING,
902 	    DDI_DMA_DONTWAIT, 0, &cookie, &count);
903 
904 	if (mystat != DDI_SUCCESS) {
905 		if (buf->acc_handle != NULL) {
906 			ddi_dma_mem_free(&buf->acc_handle);
907 			buf->acc_handle = NULL;
908 			buf->address = NULL;
909 		}
910 		if (buf->dma_handle != NULL) {
911 			ddi_dma_free_handle(&buf->dma_handle);
912 			buf->dma_handle = NULL;
913 		}
914 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
915 		    "Could not bind buffer dma handle: %d\n", mystat);
916 		return (DDI_FAILURE);
917 	}
918 
919 	ASSERT(count == 1);
920 	if (count != 1) {
921 		if (buf->dma_handle != NULL) {
922 			(void) ddi_dma_unbind_handle(buf->dma_handle);
923 		}
924 		if (buf->acc_handle != NULL) {
925 			ddi_dma_mem_free(&buf->acc_handle);
926 			buf->acc_handle = NULL;
927 			buf->address = NULL;
928 		}
929 		if (buf->dma_handle != NULL) {
930 			ddi_dma_free_handle(&buf->dma_handle);
931 			buf->dma_handle = NULL;
932 		}
933 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
934 		    "Could not bind buffer as a single frag. "
935 		    "Count = %d\n", count);
936 		return (DDI_FAILURE);
937 	}
938 
939 	buf->dma_address = cookie.dmac_laddress;
940 	buf->size = len;
941 	buf->len = 0;
942 
943 	return (DDI_SUCCESS);
944 }
945 
946 /*
947  * e1000g_alloc_dma_buffer_82546 - allocate a dma buffer along with all
948  * necessary handles.  Same as e1000g_alloc_dma_buffer() except ensure
949  * that buffer that doesn't cross a 64k boundary.
950  */
951 static int
952 e1000g_alloc_dma_buffer_82546(struct e1000g *Adapter,
953     dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
954 {
955 	int mystat;
956 	dev_info_t *devinfo;
957 	ddi_dma_cookie_t cookie;
958 	size_t len;
959 	uint_t count;
960 
961 	if (e1000g_force_detach)
962 		devinfo = Adapter->priv_dip;
963 	else
964 		devinfo = Adapter->dip;
965 
966 	mystat = ddi_dma_alloc_handle(devinfo,
967 	    p_dma_attr,
968 	    DDI_DMA_DONTWAIT, 0,
969 	    &buf->dma_handle);
970 
971 	if (mystat != DDI_SUCCESS) {
972 		buf->dma_handle = NULL;
973 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
974 		    "Could not allocate dma buffer handle: %d\n", mystat);
975 		return (DDI_FAILURE);
976 	}
977 
978 	mystat = e1000g_dma_mem_alloc_82546(buf, size, &len);
979 	if (mystat != DDI_SUCCESS) {
980 		buf->acc_handle = NULL;
981 		buf->address = NULL;
982 		if (buf->dma_handle != NULL) {
983 			ddi_dma_free_handle(&buf->dma_handle);
984 			buf->dma_handle = NULL;
985 		}
986 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
987 		    "Could not allocate dma buffer memory: %d\n", mystat);
988 		return (DDI_FAILURE);
989 	}
990 
991 	mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
992 	    (struct as *)NULL,
993 	    buf->address,
994 	    len, DDI_DMA_READ | DDI_DMA_STREAMING,
995 	    DDI_DMA_DONTWAIT, 0, &cookie, &count);
996 
997 	if (mystat != DDI_SUCCESS) {
998 		if (buf->acc_handle != NULL) {
999 			ddi_dma_mem_free(&buf->acc_handle);
1000 			buf->acc_handle = NULL;
1001 			buf->address = NULL;
1002 		}
1003 		if (buf->dma_handle != NULL) {
1004 			ddi_dma_free_handle(&buf->dma_handle);
1005 			buf->dma_handle = NULL;
1006 		}
1007 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1008 		    "Could not bind buffer dma handle: %d\n", mystat);
1009 		return (DDI_FAILURE);
1010 	}
1011 
1012 	ASSERT(count == 1);
1013 	if (count != 1) {
1014 		if (buf->dma_handle != NULL) {
1015 			ddi_dma_unbind_handle(buf->dma_handle);
1016 		}
1017 		if (buf->acc_handle != NULL) {
1018 			ddi_dma_mem_free(&buf->acc_handle);
1019 			buf->acc_handle = NULL;
1020 			buf->address = NULL;
1021 		}
1022 		if (buf->dma_handle != NULL) {
1023 			ddi_dma_free_handle(&buf->dma_handle);
1024 			buf->dma_handle = NULL;
1025 		}
1026 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1027 		    "Could not bind buffer as a single frag. "
1028 		    "Count = %d\n", count);
1029 		return (DDI_FAILURE);
1030 	}
1031 
1032 	buf->dma_address = cookie.dmac_laddress;
1033 	buf->size = len;
1034 	buf->len = 0;
1035 
1036 	return (DDI_SUCCESS);
1037 }
1038 
1039 /*
1040  * e1000g_dma_mem_alloc_82546 - allocate a dma buffer, making up to
1041  * ALLOC_RETRY attempts to get a buffer that doesn't cross a 64k boundary.
1042  */
1043 static int
1044 e1000g_dma_mem_alloc_82546(dma_buffer_t *buf, size_t size, size_t *len)
1045 {
1046 #define	ALLOC_RETRY	10
1047 	int stat;
1048 	int cnt = 0;
1049 	ddi_acc_handle_t hold[ALLOC_RETRY];
1050 
1051 	while (cnt < ALLOC_RETRY) {
1052 		hold[cnt] = NULL;
1053 
1054 		/* allocate memory */
1055 		stat = ddi_dma_mem_alloc(buf->dma_handle, size,
1056 		    &e1000g_buf_acc_attr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
1057 		    0, &buf->address, len, &buf->acc_handle);
1058 
1059 		if (stat != DDI_SUCCESS) {
1060 			break;
1061 		}
1062 
1063 		/*
1064 		 * Check 64k bounday:
1065 		 * if it is bad, hold it and retry
1066 		 * if it is good, exit loop
1067 		 */
1068 		if (e1000g_cross_64k_bound(buf->address, *len)) {
1069 			hold[cnt] = buf->acc_handle;
1070 			stat = DDI_FAILURE;
1071 		} else {
1072 			break;
1073 		}
1074 
1075 		cnt++;
1076 	}
1077 
1078 	/* Release any held buffers crossing 64k bounday */
1079 	for (--cnt; cnt >= 0; cnt--) {
1080 		if (hold[cnt])
1081 			ddi_dma_mem_free(&hold[cnt]);
1082 	}
1083 
1084 	return (stat);
1085 }
1086 
1087 /*
1088  * e1000g_cross_64k_bound - If starting and ending address cross a 64k boundary
1089  * return true; otherwise return false
1090  */
1091 static boolean_t
1092 e1000g_cross_64k_bound(void *addr, uintptr_t len)
1093 {
1094 	uintptr_t start = (uintptr_t)addr;
1095 	uintptr_t end = start + len - 1;
1096 
1097 	return (((start ^ end) >> 16) == 0 ? B_FALSE : B_TRUE);
1098 }
1099 
1100 static void
1101 e1000g_free_dma_buffer(dma_buffer_t *buf)
1102 {
1103 	if (buf->dma_handle != NULL) {
1104 		(void) ddi_dma_unbind_handle(buf->dma_handle);
1105 	} else {
1106 		return;
1107 	}
1108 
1109 	buf->dma_address = NULL;
1110 
1111 	if (buf->acc_handle != NULL) {
1112 		ddi_dma_mem_free(&buf->acc_handle);
1113 		buf->acc_handle = NULL;
1114 		buf->address = NULL;
1115 	}
1116 
1117 	if (buf->dma_handle != NULL) {
1118 		ddi_dma_free_handle(&buf->dma_handle);
1119 		buf->dma_handle = NULL;
1120 	}
1121 
1122 	buf->size = 0;
1123 	buf->len = 0;
1124 }
1125 
1126 static int
1127 e1000g_alloc_tx_packets(e1000g_tx_ring_t *tx_ring)
1128 {
1129 	int j;
1130 	p_tx_sw_packet_t packet;
1131 	int mystat;
1132 	dma_buffer_t *tx_buf;
1133 	struct e1000g *Adapter;
1134 	dev_info_t *devinfo;
1135 	ddi_dma_attr_t dma_attr;
1136 
1137 	Adapter = tx_ring->adapter;
1138 	devinfo = Adapter->dip;
1139 	dma_attr = e1000g_buf_dma_attr;
1140 
1141 	/*
1142 	 * Memory allocation for the Transmit software structure, the transmit
1143 	 * software packet. This structure stores all the relevant information
1144 	 * for transmitting a single packet.
1145 	 */
1146 	tx_ring->packet_area =
1147 	    kmem_zalloc(TX_SW_PKT_AREA_SZ, KM_NOSLEEP);
1148 
1149 	if (tx_ring->packet_area == NULL)
1150 		return (DDI_FAILURE);
1151 
1152 	for (j = 0, packet = tx_ring->packet_area;
1153 	    j < Adapter->tx_freelist_num; j++, packet++) {
1154 
1155 		ASSERT(packet != NULL);
1156 
1157 		/*
1158 		 * Pre-allocate dma handles for transmit. These dma handles
1159 		 * will be dynamically bound to the data buffers passed down
1160 		 * from the upper layers at the time of transmitting. The
1161 		 * dynamic binding only applies for the packets that are larger
1162 		 * than the tx_bcopy_thresh.
1163 		 */
1164 		switch (e1000g_dma_type) {
1165 #ifdef __sparc
1166 		case USE_DVMA:
1167 			mystat = dvma_reserve(devinfo,
1168 			    &e1000g_dma_limits,
1169 			    Adapter->dvma_page_num,
1170 			    &packet->tx_dma_handle);
1171 			break;
1172 #endif
1173 		case USE_DMA:
1174 			mystat = ddi_dma_alloc_handle(devinfo,
1175 			    &e1000g_tx_dma_attr,
1176 			    DDI_DMA_DONTWAIT, 0,
1177 			    &packet->tx_dma_handle);
1178 			break;
1179 		default:
1180 			ASSERT(B_FALSE);
1181 			break;
1182 		}
1183 		if (mystat != DDI_SUCCESS) {
1184 			packet->tx_dma_handle = NULL;
1185 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1186 			    "Could not allocate tx dma handle: %d\n", mystat);
1187 			goto tx_pkt_fail;
1188 		}
1189 
1190 		/*
1191 		 * Pre-allocate transmit buffers for small packets that the
1192 		 * size is less than tx_bcopy_thresh. The data of those small
1193 		 * packets will be bcopy() to the transmit buffers instead of
1194 		 * using dynamical DMA binding. For small packets, bcopy will
1195 		 * bring better performance than DMA binding.
1196 		 */
1197 		tx_buf = packet->tx_buf;
1198 
1199 		switch (e1000g_dma_type) {
1200 #ifdef __sparc
1201 		case USE_DVMA:
1202 			mystat = e1000g_alloc_dvma_buffer(Adapter,
1203 			    tx_buf, Adapter->tx_buffer_size);
1204 			break;
1205 #endif
1206 		case USE_DMA:
1207 			mystat = e1000g_alloc_dma_buffer(Adapter,
1208 			    tx_buf, Adapter->tx_buffer_size, &dma_attr);
1209 			break;
1210 		default:
1211 			ASSERT(B_FALSE);
1212 			break;
1213 		}
1214 		if (mystat != DDI_SUCCESS) {
1215 			ASSERT(packet->tx_dma_handle != NULL);
1216 			switch (e1000g_dma_type) {
1217 #ifdef __sparc
1218 			case USE_DVMA:
1219 				dvma_release(packet->tx_dma_handle);
1220 				break;
1221 #endif
1222 			case USE_DMA:
1223 				ddi_dma_free_handle(&packet->tx_dma_handle);
1224 				break;
1225 			default:
1226 				ASSERT(B_FALSE);
1227 				break;
1228 			}
1229 			packet->tx_dma_handle = NULL;
1230 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1231 			    "Allocate Tx buffer fail\n");
1232 			goto tx_pkt_fail;
1233 		}
1234 
1235 		packet->dma_type = e1000g_dma_type;
1236 	} /* for */
1237 
1238 	return (DDI_SUCCESS);
1239 
1240 tx_pkt_fail:
1241 	e1000g_free_tx_packets(tx_ring);
1242 
1243 	return (DDI_FAILURE);
1244 }
1245 
1246 static int
1247 e1000g_alloc_rx_packets(e1000g_rx_ring_t *rx_ring)
1248 {
1249 	int i;
1250 	p_rx_sw_packet_t packet;
1251 	struct e1000g *Adapter;
1252 	uint32_t packet_num;
1253 	ddi_dma_attr_t dma_attr;
1254 
1255 	Adapter = rx_ring->adapter;
1256 	dma_attr = e1000g_buf_dma_attr;
1257 	dma_attr.dma_attr_align = Adapter->rx_buf_align;
1258 
1259 	/*
1260 	 * Allocate memory for the rx_sw_packet structures. Each one of these
1261 	 * structures will contain a virtual and physical address to an actual
1262 	 * receive buffer in host memory. Since we use one rx_sw_packet per
1263 	 * received packet, the maximum number of rx_sw_packet that we'll
1264 	 * need is equal to the number of receive descriptors that we've
1265 	 * allocated.
1266 	 */
1267 	packet_num = Adapter->rx_desc_num + Adapter->rx_freelist_num;
1268 	rx_ring->packet_area = NULL;
1269 
1270 	for (i = 0; i < packet_num; i++) {
1271 		packet = e1000g_alloc_rx_sw_packet(rx_ring, &dma_attr);
1272 		if (packet == NULL)
1273 			goto rx_pkt_fail;
1274 
1275 		packet->next = rx_ring->packet_area;
1276 		rx_ring->packet_area = packet;
1277 	}
1278 
1279 	return (DDI_SUCCESS);
1280 
1281 rx_pkt_fail:
1282 	e1000g_free_rx_packets(rx_ring);
1283 
1284 	return (DDI_FAILURE);
1285 }
1286 
1287 static p_rx_sw_packet_t
1288 e1000g_alloc_rx_sw_packet(e1000g_rx_ring_t *rx_ring, ddi_dma_attr_t *p_dma_attr)
1289 {
1290 	int mystat;
1291 	p_rx_sw_packet_t packet;
1292 	dma_buffer_t *rx_buf;
1293 	struct e1000g *Adapter;
1294 
1295 	Adapter = rx_ring->adapter;
1296 
1297 	packet = kmem_zalloc(sizeof (rx_sw_packet_t), KM_NOSLEEP);
1298 	if (packet == NULL) {
1299 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1300 		    "Cound not allocate memory for Rx SwPacket\n");
1301 		return (NULL);
1302 	}
1303 
1304 	rx_buf = packet->rx_buf;
1305 
1306 	switch (e1000g_dma_type) {
1307 #ifdef __sparc
1308 	case USE_DVMA:
1309 		mystat = e1000g_alloc_dvma_buffer(Adapter,
1310 		    rx_buf, Adapter->rx_buffer_size);
1311 		break;
1312 #endif
1313 	case USE_DMA:
1314 		if ((Adapter->shared.mac.type == e1000_82545) ||
1315 		    (Adapter->shared.mac.type == e1000_82546) ||
1316 		    (Adapter->shared.mac.type == e1000_82546_rev_3)) {
1317 			mystat = e1000g_alloc_dma_buffer_82546(Adapter,
1318 			    rx_buf, Adapter->rx_buffer_size, p_dma_attr);
1319 		} else {
1320 			mystat = e1000g_alloc_dma_buffer(Adapter,
1321 			    rx_buf, Adapter->rx_buffer_size, p_dma_attr);
1322 		}
1323 		break;
1324 	default:
1325 		ASSERT(B_FALSE);
1326 		break;
1327 	}
1328 
1329 	if (mystat != DDI_SUCCESS) {
1330 		if (packet != NULL)
1331 			kmem_free(packet, sizeof (rx_sw_packet_t));
1332 
1333 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1334 		    "Failed to allocate Rx buffer\n");
1335 		return (NULL);
1336 	}
1337 
1338 	rx_buf->size -= E1000G_IPALIGNROOM;
1339 	rx_buf->address += E1000G_IPALIGNROOM;
1340 	rx_buf->dma_address += E1000G_IPALIGNROOM;
1341 
1342 	packet->rx_ring = (caddr_t)rx_ring;
1343 	packet->free_rtn.free_func = e1000g_rxfree_func;
1344 	packet->free_rtn.free_arg = (char *)packet;
1345 	/*
1346 	 * esballoc is changed to desballoc which
1347 	 * is undocumented call but as per sun,
1348 	 * we can use it. It gives better efficiency.
1349 	 */
1350 	packet->mp = desballoc((unsigned char *)
1351 	    rx_buf->address - E1000G_IPALIGNROOM,
1352 	    rx_buf->size + E1000G_IPALIGNROOM,
1353 	    BPRI_MED, &packet->free_rtn);
1354 
1355 	if (packet->mp != NULL) {
1356 		packet->mp->b_rptr += E1000G_IPALIGNROOM;
1357 		packet->mp->b_wptr += E1000G_IPALIGNROOM;
1358 	}
1359 
1360 	packet->dma_type = e1000g_dma_type;
1361 
1362 	return (packet);
1363 }
1364 
1365 void
1366 e1000g_free_rx_sw_packet(p_rx_sw_packet_t packet)
1367 {
1368 	dma_buffer_t *rx_buf;
1369 
1370 	if (packet->mp != NULL) {
1371 		freemsg(packet->mp);
1372 		packet->mp = NULL;
1373 	}
1374 
1375 	rx_buf = packet->rx_buf;
1376 	ASSERT(rx_buf->dma_handle != NULL);
1377 
1378 	rx_buf->size += E1000G_IPALIGNROOM;
1379 	rx_buf->address -= E1000G_IPALIGNROOM;
1380 
1381 	switch (packet->dma_type) {
1382 #ifdef __sparc
1383 	case USE_DVMA:
1384 		e1000g_free_dvma_buffer(rx_buf);
1385 		break;
1386 #endif
1387 	case USE_DMA:
1388 		e1000g_free_dma_buffer(rx_buf);
1389 		break;
1390 	default:
1391 		ASSERT(B_FALSE);
1392 		break;
1393 	}
1394 
1395 	packet->dma_type = USE_NONE;
1396 
1397 	kmem_free(packet, sizeof (rx_sw_packet_t));
1398 }
1399 
1400 static void
1401 e1000g_free_rx_packets(e1000g_rx_ring_t *rx_ring)
1402 {
1403 	p_rx_sw_packet_t packet, next_packet, free_list;
1404 
1405 	rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
1406 
1407 	free_list = NULL;
1408 	packet = rx_ring->packet_area;
1409 	for (; packet != NULL; packet = next_packet) {
1410 		next_packet = packet->next;
1411 
1412 		if (packet->flag == E1000G_RX_SW_SENDUP) {
1413 			rx_ring->pending_count++;
1414 			e1000g_mblks_pending++;
1415 			packet->flag = E1000G_RX_SW_STOP;
1416 			packet->next = rx_ring->pending_list;
1417 			rx_ring->pending_list = packet;
1418 		} else {
1419 			packet->next = free_list;
1420 			free_list = packet;
1421 		}
1422 	}
1423 	rx_ring->packet_area = NULL;
1424 
1425 	rw_exit(&e1000g_rx_detach_lock);
1426 
1427 	packet = free_list;
1428 	for (; packet != NULL; packet = next_packet) {
1429 		next_packet = packet->next;
1430 
1431 		ASSERT(packet->flag == E1000G_RX_SW_FREE);
1432 		e1000g_free_rx_sw_packet(packet);
1433 	}
1434 }
1435 
1436 static void
1437 e1000g_free_tx_packets(e1000g_tx_ring_t *tx_ring)
1438 {
1439 	int j;
1440 	struct e1000g *Adapter;
1441 	p_tx_sw_packet_t packet;
1442 	dma_buffer_t *tx_buf;
1443 
1444 	Adapter = tx_ring->adapter;
1445 
1446 	for (j = 0, packet = tx_ring->packet_area;
1447 	    j < Adapter->tx_freelist_num; j++, packet++) {
1448 
1449 		if (packet == NULL)
1450 			break;
1451 
1452 		/* Free the Tx DMA handle for dynamical binding */
1453 		if (packet->tx_dma_handle != NULL) {
1454 			switch (packet->dma_type) {
1455 #ifdef __sparc
1456 			case USE_DVMA:
1457 				dvma_release(packet->tx_dma_handle);
1458 				break;
1459 #endif
1460 			case USE_DMA:
1461 				ddi_dma_free_handle(&packet->tx_dma_handle);
1462 				break;
1463 			default:
1464 				ASSERT(B_FALSE);
1465 				break;
1466 			}
1467 			packet->tx_dma_handle = NULL;
1468 		} else {
1469 			/*
1470 			 * If the dma handle is NULL, then we don't
1471 			 * need to check the packets left. For they
1472 			 * have not been initialized or have been freed.
1473 			 */
1474 			break;
1475 		}
1476 
1477 		tx_buf = packet->tx_buf;
1478 
1479 		switch (packet->dma_type) {
1480 #ifdef __sparc
1481 		case USE_DVMA:
1482 			e1000g_free_dvma_buffer(tx_buf);
1483 			break;
1484 #endif
1485 		case USE_DMA:
1486 			e1000g_free_dma_buffer(tx_buf);
1487 			break;
1488 		default:
1489 			ASSERT(B_FALSE);
1490 			break;
1491 		}
1492 
1493 		packet->dma_type = USE_NONE;
1494 	}
1495 	if (tx_ring->packet_area != NULL) {
1496 		kmem_free(tx_ring->packet_area, TX_SW_PKT_AREA_SZ);
1497 		tx_ring->packet_area = NULL;
1498 	}
1499 }
1500 
1501 /*
1502  * e1000g_release_dma_resources - release allocated DMA resources
1503  *
1504  * This function releases any pending buffers that has been
1505  * previously allocated
1506  */
1507 void
1508 e1000g_release_dma_resources(struct e1000g *Adapter)
1509 {
1510 	e1000g_free_descriptors(Adapter);
1511 	e1000g_free_packets(Adapter);
1512 }
1513 
1514 /* ARGSUSED */
1515 void
1516 e1000g_set_fma_flags(struct e1000g *Adapter, int acc_flag, int dma_flag)
1517 {
1518 	if (acc_flag) {
1519 		e1000g_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1520 	} else {
1521 		e1000g_desc_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
1522 	}
1523 
1524 	if (dma_flag) {
1525 		e1000g_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1526 		e1000g_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1527 		e1000g_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1528 	} else {
1529 		e1000g_tx_dma_attr.dma_attr_flags = 0;
1530 		e1000g_buf_dma_attr.dma_attr_flags = 0;
1531 		e1000g_desc_dma_attr.dma_attr_flags = 0;
1532 	}
1533 }
1534