xref: /titanic_50/usr/src/uts/common/io/e1000g/e1000g_alloc.c (revision 7ff836697c120cb94bd30d5c2204eb9b74718e4c)
1 /*
2  * This file is provided under a CDDLv1 license.  When using or
3  * redistributing this file, you may do so under this license.
4  * In redistributing this file this license must be included
5  * and no other modification of this header file is permitted.
6  *
7  * CDDL LICENSE SUMMARY
8  *
9  * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved.
10  *
11  * The contents of this file are subject to the terms of Version
12  * 1.0 of the Common Development and Distribution License (the "License").
13  *
14  * You should have received a copy of the License with this software.
15  * You can obtain a copy of the License at
16  *	http://www.opensolaris.org/os/licensing.
17  * See the License for the specific language governing permissions
18  * and limitations under the License.
19  */
20 
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms of the CDDLv1.
24  */
25 
26 /*
27  * **********************************************************************
28  * Module Name:								*
29  *   e1000g_alloc.c							*
30  *									*
31  * Abstract:								*
32  *   This file contains some routines that take care of			*
33  *   memory allocation for descriptors and buffers.			*
34  *									*
35  * **********************************************************************
36  */
37 
38 #include "e1000g_sw.h"
39 #include "e1000g_debug.h"
40 
41 #define	TX_SW_PKT_AREA_SZ \
42 	(sizeof (tx_sw_packet_t) * Adapter->tx_freelist_num)
43 
44 static int e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *);
45 static int e1000g_alloc_rx_descriptors(e1000g_rx_data_t *);
46 static void e1000g_free_tx_descriptors(e1000g_tx_ring_t *);
47 static void e1000g_free_rx_descriptors(e1000g_rx_data_t *);
48 static int e1000g_alloc_tx_packets(e1000g_tx_ring_t *);
49 static int e1000g_alloc_rx_packets(e1000g_rx_data_t *);
50 static void e1000g_free_tx_packets(e1000g_tx_ring_t *);
51 static void e1000g_free_rx_packets(e1000g_rx_data_t *);
52 static int e1000g_alloc_dma_buffer(struct e1000g *,
53     dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
54 
55 /*
56  * In order to avoid address error crossing 64KB boundary
57  * during PCI-X packets receving, e1000g_alloc_dma_buffer_82546
58  * is used by some necessary adapter types.
59  */
60 static int e1000g_alloc_dma_buffer_82546(struct e1000g *,
61     dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
62 static int e1000g_dma_mem_alloc_82546(dma_buffer_t *buf,
63     size_t size, size_t *len);
64 static boolean_t e1000g_cross_64k_bound(void *, uintptr_t);
65 
66 static void e1000g_free_dma_buffer(dma_buffer_t *);
67 #ifdef __sparc
68 static int e1000g_alloc_dvma_buffer(struct e1000g *, dma_buffer_t *, size_t);
69 static void e1000g_free_dvma_buffer(dma_buffer_t *);
70 #endif
71 static int e1000g_alloc_descriptors(struct e1000g *Adapter);
72 static void e1000g_free_descriptors(struct e1000g *Adapter);
73 static int e1000g_alloc_packets(struct e1000g *Adapter);
74 static void e1000g_free_packets(struct e1000g *Adapter);
75 static p_rx_sw_packet_t e1000g_alloc_rx_sw_packet(e1000g_rx_data_t *,
76     ddi_dma_attr_t *p_dma_attr);
77 
78 /* DMA access attributes for descriptors <Little Endian> */
79 static ddi_device_acc_attr_t e1000g_desc_acc_attr = {
80 	DDI_DEVICE_ATTR_V0,
81 	DDI_STRUCTURE_LE_ACC,
82 	DDI_STRICTORDER_ACC,
83 	DDI_FLAGERR_ACC
84 };
85 
86 /* DMA access attributes for DMA buffers */
87 #ifdef __sparc
88 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
89 	DDI_DEVICE_ATTR_V0,
90 	DDI_STRUCTURE_BE_ACC,
91 	DDI_STRICTORDER_ACC,
92 };
93 #else
94 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
95 	DDI_DEVICE_ATTR_V0,
96 	DDI_STRUCTURE_LE_ACC,
97 	DDI_STRICTORDER_ACC,
98 };
99 #endif
100 
101 /* DMA attributes for tx mblk buffers */
102 static ddi_dma_attr_t e1000g_tx_dma_attr = {
103 	DMA_ATTR_V0,		/* version of this structure */
104 	0,			/* lowest usable address */
105 	0xffffffffffffffffULL,	/* highest usable address */
106 	0x7fffffff,		/* maximum DMAable byte count */
107 	1,			/* alignment in bytes */
108 	0x7ff,			/* burst sizes (any?) */
109 	1,			/* minimum transfer */
110 	0xffffffffU,		/* maximum transfer */
111 	0xffffffffffffffffULL,	/* maximum segment length */
112 	MAX_COOKIES,		/* maximum number of segments */
113 	1,			/* granularity */
114 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
115 };
116 
117 /* DMA attributes for pre-allocated rx/tx buffers */
118 static ddi_dma_attr_t e1000g_buf_dma_attr = {
119 	DMA_ATTR_V0,		/* version of this structure */
120 	0,			/* lowest usable address */
121 	0xffffffffffffffffULL,	/* highest usable address */
122 	0x7fffffff,		/* maximum DMAable byte count */
123 	1,			/* alignment in bytes */
124 	0x7ff,			/* burst sizes (any?) */
125 	1,			/* minimum transfer */
126 	0xffffffffU,		/* maximum transfer */
127 	0xffffffffffffffffULL,	/* maximum segment length */
128 	1,			/* maximum number of segments */
129 	1,			/* granularity */
130 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
131 };
132 
133 /* DMA attributes for rx/tx descriptors */
134 static ddi_dma_attr_t e1000g_desc_dma_attr = {
135 	DMA_ATTR_V0,		/* version of this structure */
136 	0,			/* lowest usable address */
137 	0xffffffffffffffffULL,	/* highest usable address */
138 	0x7fffffff,		/* maximum DMAable byte count */
139 	E1000_MDALIGN,		/* default alignment is 4k but can be changed */
140 	0x7ff,			/* burst sizes (any?) */
141 	1,			/* minimum transfer */
142 	0xffffffffU,		/* maximum transfer */
143 	0xffffffffffffffffULL,	/* maximum segment length */
144 	1,			/* maximum number of segments */
145 	1,			/* granularity */
146 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
147 };
148 
149 #ifdef __sparc
150 static ddi_dma_lim_t e1000g_dma_limits = {
151 	(uint_t)0,		/* dlim_addr_lo */
152 	(uint_t)0xffffffff,	/* dlim_addr_hi */
153 	(uint_t)0xffffffff,	/* dlim_cntr_max */
154 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
155 	0x1,			/* dlim_minxfer */
156 	1024			/* dlim_speed */
157 };
158 #endif
159 
160 #ifdef __sparc
161 static dma_type_t e1000g_dma_type = USE_DVMA;
162 #else
163 static dma_type_t e1000g_dma_type = USE_DMA;
164 #endif
165 
166 extern krwlock_t e1000g_dma_type_lock;
167 
168 
169 int
170 e1000g_alloc_dma_resources(struct e1000g *Adapter)
171 {
172 	int result;
173 
174 	result = DDI_FAILURE;
175 
176 	while ((result != DDI_SUCCESS) &&
177 	    (Adapter->tx_desc_num >= MIN_NUM_TX_DESCRIPTOR) &&
178 	    (Adapter->rx_desc_num >= MIN_NUM_RX_DESCRIPTOR) &&
179 	    (Adapter->tx_freelist_num >= MIN_NUM_TX_FREELIST) &&
180 	    (Adapter->rx_freelist_num >= MIN_NUM_RX_FREELIST)) {
181 
182 		result = e1000g_alloc_descriptors(Adapter);
183 
184 		if (result == DDI_SUCCESS) {
185 			result = e1000g_alloc_packets(Adapter);
186 
187 			if (result != DDI_SUCCESS)
188 				e1000g_free_descriptors(Adapter);
189 		}
190 
191 		/*
192 		 * If the allocation fails due to resource shortage,
193 		 * we'll reduce the numbers of descriptors/buffers by
194 		 * half, and try the allocation again.
195 		 */
196 		if (result != DDI_SUCCESS) {
197 			/*
198 			 * We must ensure the number of descriptors
199 			 * is always a multiple of 8.
200 			 */
201 			Adapter->tx_desc_num =
202 			    (Adapter->tx_desc_num >> 4) << 3;
203 			Adapter->rx_desc_num =
204 			    (Adapter->rx_desc_num >> 4) << 3;
205 
206 			Adapter->tx_freelist_num >>= 1;
207 			Adapter->rx_freelist_num >>= 1;
208 		}
209 	}
210 
211 	return (result);
212 }
213 
214 /*
215  * e1000g_alloc_descriptors - allocate DMA buffers for descriptors
216  *
217  * This routine allocates neccesary DMA buffers for
218  *	Transmit Descriptor Area
219  *	Receive Descrpitor Area
220  */
221 static int
222 e1000g_alloc_descriptors(struct e1000g *Adapter)
223 {
224 	int result;
225 	e1000g_tx_ring_t *tx_ring;
226 	e1000g_rx_data_t *rx_data;
227 
228 	if (Adapter->mem_workaround_82546 &&
229 	    ((Adapter->shared.mac.type == e1000_82545) ||
230 	    (Adapter->shared.mac.type == e1000_82546) ||
231 	    (Adapter->shared.mac.type == e1000_82546_rev_3))) {
232 		/* Align on a 64k boundary for these adapter types */
233 		Adapter->desc_align = E1000_MDALIGN_82546;
234 	} else {
235 		/* Align on a 4k boundary for all other adapter types */
236 		Adapter->desc_align = E1000_MDALIGN;
237 	}
238 
239 	tx_ring = Adapter->tx_ring;
240 
241 	result = e1000g_alloc_tx_descriptors(tx_ring);
242 	if (result != DDI_SUCCESS)
243 		return (DDI_FAILURE);
244 
245 	rx_data = Adapter->rx_ring->rx_data;
246 
247 	result = e1000g_alloc_rx_descriptors(rx_data);
248 	if (result != DDI_SUCCESS) {
249 		e1000g_free_tx_descriptors(tx_ring);
250 		return (DDI_FAILURE);
251 	}
252 
253 	return (DDI_SUCCESS);
254 }
255 
256 static void
257 e1000g_free_descriptors(struct e1000g *Adapter)
258 {
259 	e1000g_tx_ring_t *tx_ring;
260 	e1000g_rx_data_t *rx_data;
261 
262 	tx_ring = Adapter->tx_ring;
263 	rx_data = Adapter->rx_ring->rx_data;
264 
265 	e1000g_free_tx_descriptors(tx_ring);
266 	e1000g_free_rx_descriptors(rx_data);
267 }
268 
269 static int
270 e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *tx_ring)
271 {
272 	int mystat;
273 	boolean_t alloc_flag;
274 	size_t size;
275 	size_t len;
276 	uintptr_t templong;
277 	uint_t cookie_count;
278 	dev_info_t *devinfo;
279 	ddi_dma_cookie_t cookie;
280 	struct e1000g *Adapter;
281 	ddi_dma_attr_t dma_attr;
282 
283 	Adapter = tx_ring->adapter;
284 	devinfo = Adapter->dip;
285 
286 	alloc_flag = B_FALSE;
287 	dma_attr = e1000g_desc_dma_attr;
288 
289 	/*
290 	 * Solaris 7 has a problem with allocating physically contiguous memory
291 	 * that is aligned on a 4K boundary. The transmit and rx descriptors
292 	 * need to aligned on a 4kbyte boundary. We first try to allocate the
293 	 * memory with DMA attributes set to 4K alignment and also no scatter/
294 	 * gather mechanism specified. In most cases, this does not allocate
295 	 * memory aligned at a 4Kbyte boundary. We then try asking for memory
296 	 * aligned on 4K boundary with scatter/gather set to 2. This works when
297 	 * the amount of memory is less than 4k i.e a page size. If neither of
298 	 * these options work or if the number of descriptors is greater than
299 	 * 4K, ie more than 256 descriptors, we allocate 4k extra memory and
300 	 * and then align the memory at a 4k boundary.
301 	 */
302 	size = sizeof (struct e1000_tx_desc) * Adapter->tx_desc_num;
303 
304 	/*
305 	 * Memory allocation for the transmit buffer descriptors.
306 	 */
307 	dma_attr.dma_attr_sgllen = 1;
308 	dma_attr.dma_attr_align = Adapter->desc_align;
309 
310 	/*
311 	 * Allocate a new DMA handle for the transmit descriptor
312 	 * memory area.
313 	 */
314 	mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
315 	    DDI_DMA_DONTWAIT, 0,
316 	    &tx_ring->tbd_dma_handle);
317 
318 	if (mystat != DDI_SUCCESS) {
319 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
320 		    "Could not allocate tbd dma handle: %d", mystat);
321 		tx_ring->tbd_dma_handle = NULL;
322 		return (DDI_FAILURE);
323 	}
324 
325 	/*
326 	 * Allocate memory to DMA data to and from the transmit
327 	 * descriptors.
328 	 */
329 	mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
330 	    size,
331 	    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
332 	    DDI_DMA_DONTWAIT, 0,
333 	    (caddr_t *)&tx_ring->tbd_area,
334 	    &len, &tx_ring->tbd_acc_handle);
335 
336 	if ((mystat != DDI_SUCCESS) ||
337 	    ((uintptr_t)tx_ring->tbd_area & (Adapter->desc_align - 1))) {
338 		if (mystat == DDI_SUCCESS) {
339 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
340 			tx_ring->tbd_acc_handle = NULL;
341 			tx_ring->tbd_area = NULL;
342 		}
343 		if (tx_ring->tbd_dma_handle != NULL) {
344 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
345 			tx_ring->tbd_dma_handle = NULL;
346 		}
347 		alloc_flag = B_FALSE;
348 	} else
349 		alloc_flag = B_TRUE;
350 
351 	/*
352 	 * Initialize the entire transmit buffer descriptor area to zero
353 	 */
354 	if (alloc_flag)
355 		bzero(tx_ring->tbd_area, len);
356 
357 	/*
358 	 * If the previous DMA attributes setting could not give us contiguous
359 	 * memory or the number of descriptors is greater than the page size,
360 	 * we allocate extra memory and then align it at appropriate boundary.
361 	 */
362 	if (!alloc_flag) {
363 		size = size + Adapter->desc_align;
364 
365 		/*
366 		 * DMA attributes set to no scatter/gather and 16 bit alignment
367 		 */
368 		dma_attr.dma_attr_align = 1;
369 		dma_attr.dma_attr_sgllen = 1;
370 
371 		/*
372 		 * Allocate a new DMA handle for the transmit descriptor memory
373 		 * area.
374 		 */
375 		mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
376 		    DDI_DMA_DONTWAIT, 0,
377 		    &tx_ring->tbd_dma_handle);
378 
379 		if (mystat != DDI_SUCCESS) {
380 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
381 			    "Could not re-allocate tbd dma handle: %d", mystat);
382 			tx_ring->tbd_dma_handle = NULL;
383 			return (DDI_FAILURE);
384 		}
385 
386 		/*
387 		 * Allocate memory to DMA data to and from the transmit
388 		 * descriptors.
389 		 */
390 		mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
391 		    size,
392 		    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
393 		    DDI_DMA_DONTWAIT, 0,
394 		    (caddr_t *)&tx_ring->tbd_area,
395 		    &len, &tx_ring->tbd_acc_handle);
396 
397 		if (mystat != DDI_SUCCESS) {
398 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
399 			    "Could not allocate tbd dma memory: %d", mystat);
400 			tx_ring->tbd_acc_handle = NULL;
401 			tx_ring->tbd_area = NULL;
402 			if (tx_ring->tbd_dma_handle != NULL) {
403 				ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
404 				tx_ring->tbd_dma_handle = NULL;
405 			}
406 			return (DDI_FAILURE);
407 		} else
408 			alloc_flag = B_TRUE;
409 
410 		/*
411 		 * Initialize the entire transmit buffer descriptor area to zero
412 		 */
413 		bzero(tx_ring->tbd_area, len);
414 		/*
415 		 * Memory has been allocated with the ddi_dma_mem_alloc call,
416 		 * but has not been aligned.
417 		 * We now align it on the appropriate boundary.
418 		 */
419 		templong = P2NPHASE((uintptr_t)tx_ring->tbd_area,
420 		    Adapter->desc_align);
421 		len = size - templong;
422 		templong += (uintptr_t)tx_ring->tbd_area;
423 		tx_ring->tbd_area = (struct e1000_tx_desc *)templong;
424 	}	/* alignment workaround */
425 
426 	/*
427 	 * Transmit buffer descriptor memory allocation succeeded
428 	 */
429 	ASSERT(alloc_flag);
430 
431 	/*
432 	 * Allocates DMA resources for the memory that was allocated by
433 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
434 	 * the memory address
435 	 */
436 	mystat = ddi_dma_addr_bind_handle(tx_ring->tbd_dma_handle,
437 	    (struct as *)NULL, (caddr_t)tx_ring->tbd_area,
438 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
439 	    DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
440 
441 	if (mystat != DDI_SUCCESS) {
442 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
443 		    "Could not bind tbd dma resource: %d", mystat);
444 		if (tx_ring->tbd_acc_handle != NULL) {
445 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
446 			tx_ring->tbd_acc_handle = NULL;
447 			tx_ring->tbd_area = NULL;
448 		}
449 		if (tx_ring->tbd_dma_handle != NULL) {
450 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
451 			tx_ring->tbd_dma_handle = NULL;
452 		}
453 		return (DDI_FAILURE);
454 	}
455 
456 	ASSERT(cookie_count == 1);	/* 1 cookie */
457 
458 	if (cookie_count != 1) {
459 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
460 		    "Could not bind tbd dma resource in a single frag. "
461 		    "Count - %d Len - %d", cookie_count, len);
462 		e1000g_free_tx_descriptors(tx_ring);
463 		return (DDI_FAILURE);
464 	}
465 
466 	tx_ring->tbd_dma_addr = cookie.dmac_laddress;
467 	tx_ring->tbd_first = tx_ring->tbd_area;
468 	tx_ring->tbd_last = tx_ring->tbd_first +
469 	    (Adapter->tx_desc_num - 1);
470 
471 	return (DDI_SUCCESS);
472 }
473 
474 static int
475 e1000g_alloc_rx_descriptors(e1000g_rx_data_t *rx_data)
476 {
477 	int mystat;
478 	boolean_t alloc_flag;
479 	size_t size;
480 	size_t len;
481 	uintptr_t templong;
482 	uint_t cookie_count;
483 	dev_info_t *devinfo;
484 	ddi_dma_cookie_t cookie;
485 	struct e1000g *Adapter;
486 	ddi_dma_attr_t dma_attr;
487 
488 	Adapter = rx_data->rx_ring->adapter;
489 	devinfo = Adapter->dip;
490 
491 	alloc_flag = B_FALSE;
492 	dma_attr = e1000g_desc_dma_attr;
493 
494 	/*
495 	 * Memory allocation for the receive buffer descriptors.
496 	 */
497 	size = (sizeof (struct e1000_rx_desc)) * Adapter->rx_desc_num;
498 
499 	/*
500 	 * Asking for aligned memory with DMA attributes set for suitable value
501 	 */
502 	dma_attr.dma_attr_sgllen = 1;
503 	dma_attr.dma_attr_align = Adapter->desc_align;
504 
505 	/*
506 	 * Allocate a new DMA handle for the receive descriptors
507 	 */
508 	mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
509 	    DDI_DMA_DONTWAIT, 0,
510 	    &rx_data->rbd_dma_handle);
511 
512 	if (mystat != DDI_SUCCESS) {
513 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
514 		    "Could not allocate rbd dma handle: %d", mystat);
515 		rx_data->rbd_dma_handle = NULL;
516 		return (DDI_FAILURE);
517 	}
518 	/*
519 	 * Allocate memory to DMA data to and from the receive
520 	 * descriptors.
521 	 */
522 	mystat = ddi_dma_mem_alloc(rx_data->rbd_dma_handle,
523 	    size,
524 	    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
525 	    DDI_DMA_DONTWAIT, 0,
526 	    (caddr_t *)&rx_data->rbd_area,
527 	    &len, &rx_data->rbd_acc_handle);
528 
529 	/*
530 	 * Check if memory allocation succeeded and also if the
531 	 * allocated memory is aligned correctly.
532 	 */
533 	if ((mystat != DDI_SUCCESS) ||
534 	    ((uintptr_t)rx_data->rbd_area & (Adapter->desc_align - 1))) {
535 		if (mystat == DDI_SUCCESS) {
536 			ddi_dma_mem_free(&rx_data->rbd_acc_handle);
537 			rx_data->rbd_acc_handle = NULL;
538 			rx_data->rbd_area = NULL;
539 		}
540 		if (rx_data->rbd_dma_handle != NULL) {
541 			ddi_dma_free_handle(&rx_data->rbd_dma_handle);
542 			rx_data->rbd_dma_handle = NULL;
543 		}
544 		alloc_flag = B_FALSE;
545 	} else
546 		alloc_flag = B_TRUE;
547 
548 	/*
549 	 * Initialize the allocated receive descriptor memory to zero.
550 	 */
551 	if (alloc_flag)
552 		bzero((caddr_t)rx_data->rbd_area, len);
553 
554 	/*
555 	 * If memory allocation did not succeed, do the alignment ourselves
556 	 */
557 	if (!alloc_flag) {
558 		dma_attr.dma_attr_align = 1;
559 		dma_attr.dma_attr_sgllen = 1;
560 		size = size + Adapter->desc_align;
561 		/*
562 		 * Allocate a new DMA handle for the receive descriptor.
563 		 */
564 		mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
565 		    DDI_DMA_DONTWAIT, 0,
566 		    &rx_data->rbd_dma_handle);
567 
568 		if (mystat != DDI_SUCCESS) {
569 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
570 			    "Could not re-allocate rbd dma handle: %d", mystat);
571 			rx_data->rbd_dma_handle = NULL;
572 			return (DDI_FAILURE);
573 		}
574 		/*
575 		 * Allocate memory to DMA data to and from the receive
576 		 * descriptors.
577 		 */
578 		mystat = ddi_dma_mem_alloc(rx_data->rbd_dma_handle,
579 		    size,
580 		    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
581 		    DDI_DMA_DONTWAIT, 0,
582 		    (caddr_t *)&rx_data->rbd_area,
583 		    &len, &rx_data->rbd_acc_handle);
584 
585 		if (mystat != DDI_SUCCESS) {
586 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
587 			    "Could not allocate rbd dma memory: %d", mystat);
588 			rx_data->rbd_acc_handle = NULL;
589 			rx_data->rbd_area = NULL;
590 			if (rx_data->rbd_dma_handle != NULL) {
591 				ddi_dma_free_handle(&rx_data->rbd_dma_handle);
592 				rx_data->rbd_dma_handle = NULL;
593 			}
594 			return (DDI_FAILURE);
595 		} else
596 			alloc_flag = B_TRUE;
597 
598 		/*
599 		 * Initialize the allocated receive descriptor memory to zero.
600 		 */
601 		bzero((caddr_t)rx_data->rbd_area, len);
602 		templong = P2NPHASE((uintptr_t)rx_data->rbd_area,
603 		    Adapter->desc_align);
604 		len = size - templong;
605 		templong += (uintptr_t)rx_data->rbd_area;
606 		rx_data->rbd_area = (struct e1000_rx_desc *)templong;
607 	}	/* alignment workaround */
608 
609 	/*
610 	 * The memory allocation of the receive descriptors succeeded
611 	 */
612 	ASSERT(alloc_flag);
613 
614 	/*
615 	 * Allocates DMA resources for the memory that was allocated by
616 	 * the ddi_dma_mem_alloc call.
617 	 */
618 	mystat = ddi_dma_addr_bind_handle(rx_data->rbd_dma_handle,
619 	    (struct as *)NULL, (caddr_t)rx_data->rbd_area,
620 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
621 	    DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
622 
623 	if (mystat != DDI_SUCCESS) {
624 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
625 		    "Could not bind rbd dma resource: %d", mystat);
626 		if (rx_data->rbd_acc_handle != NULL) {
627 			ddi_dma_mem_free(&rx_data->rbd_acc_handle);
628 			rx_data->rbd_acc_handle = NULL;
629 			rx_data->rbd_area = NULL;
630 		}
631 		if (rx_data->rbd_dma_handle != NULL) {
632 			ddi_dma_free_handle(&rx_data->rbd_dma_handle);
633 			rx_data->rbd_dma_handle = NULL;
634 		}
635 		return (DDI_FAILURE);
636 	}
637 
638 	ASSERT(cookie_count == 1);
639 	if (cookie_count != 1) {
640 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
641 		    "Could not bind rbd dma resource in a single frag. "
642 		    "Count - %d Len - %d", cookie_count, len);
643 		e1000g_free_rx_descriptors(rx_data);
644 		return (DDI_FAILURE);
645 	}
646 
647 	rx_data->rbd_dma_addr = cookie.dmac_laddress;
648 	rx_data->rbd_first = rx_data->rbd_area;
649 	rx_data->rbd_last = rx_data->rbd_first +
650 	    (Adapter->rx_desc_num - 1);
651 
652 	return (DDI_SUCCESS);
653 }
654 
655 static void
656 e1000g_free_rx_descriptors(e1000g_rx_data_t *rx_data)
657 {
658 	if (rx_data->rbd_dma_handle != NULL) {
659 		(void) ddi_dma_unbind_handle(rx_data->rbd_dma_handle);
660 	}
661 	if (rx_data->rbd_acc_handle != NULL) {
662 		ddi_dma_mem_free(&rx_data->rbd_acc_handle);
663 		rx_data->rbd_acc_handle = NULL;
664 		rx_data->rbd_area = NULL;
665 	}
666 	if (rx_data->rbd_dma_handle != NULL) {
667 		ddi_dma_free_handle(&rx_data->rbd_dma_handle);
668 		rx_data->rbd_dma_handle = NULL;
669 	}
670 	rx_data->rbd_dma_addr = NULL;
671 	rx_data->rbd_first = NULL;
672 	rx_data->rbd_last = NULL;
673 }
674 
675 static void
676 e1000g_free_tx_descriptors(e1000g_tx_ring_t *tx_ring)
677 {
678 	if (tx_ring->tbd_dma_handle != NULL) {
679 		(void) ddi_dma_unbind_handle(tx_ring->tbd_dma_handle);
680 	}
681 	if (tx_ring->tbd_acc_handle != NULL) {
682 		ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
683 		tx_ring->tbd_acc_handle = NULL;
684 		tx_ring->tbd_area = NULL;
685 	}
686 	if (tx_ring->tbd_dma_handle != NULL) {
687 		ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
688 		tx_ring->tbd_dma_handle = NULL;
689 	}
690 	tx_ring->tbd_dma_addr = NULL;
691 	tx_ring->tbd_first = NULL;
692 	tx_ring->tbd_last = NULL;
693 }
694 
695 
696 /*
697  * e1000g_alloc_packets - allocate DMA buffers for rx/tx
698  *
699  * This routine allocates neccesary buffers for
700  *	 Transmit sw packet structure
701  *	 DMA handle for Transmit
702  *	 DMA buffer for Transmit
703  *	 Receive sw packet structure
704  *	 DMA buffer for Receive
705  */
706 static int
707 e1000g_alloc_packets(struct e1000g *Adapter)
708 {
709 	int result;
710 	e1000g_tx_ring_t *tx_ring;
711 	e1000g_rx_data_t *rx_data;
712 
713 	tx_ring = Adapter->tx_ring;
714 	rx_data = Adapter->rx_ring->rx_data;
715 
716 again:
717 	rw_enter(&e1000g_dma_type_lock, RW_READER);
718 
719 	result = e1000g_alloc_tx_packets(tx_ring);
720 	if (result != DDI_SUCCESS) {
721 		if (e1000g_dma_type == USE_DVMA) {
722 			rw_exit(&e1000g_dma_type_lock);
723 
724 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
725 			e1000g_dma_type = USE_DMA;
726 			rw_exit(&e1000g_dma_type_lock);
727 
728 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
729 			    "No enough dvma resource for Tx packets, "
730 			    "trying to allocate dma buffers...\n");
731 			goto again;
732 		}
733 		rw_exit(&e1000g_dma_type_lock);
734 
735 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
736 		    "Failed to allocate dma buffers for Tx packets\n");
737 		return (DDI_FAILURE);
738 	}
739 
740 	result = e1000g_alloc_rx_packets(rx_data);
741 	if (result != DDI_SUCCESS) {
742 		e1000g_free_tx_packets(tx_ring);
743 		if (e1000g_dma_type == USE_DVMA) {
744 			rw_exit(&e1000g_dma_type_lock);
745 
746 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
747 			e1000g_dma_type = USE_DMA;
748 			rw_exit(&e1000g_dma_type_lock);
749 
750 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
751 			    "No enough dvma resource for Rx packets, "
752 			    "trying to allocate dma buffers...\n");
753 			goto again;
754 		}
755 		rw_exit(&e1000g_dma_type_lock);
756 
757 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
758 		    "Failed to allocate dma buffers for Rx packets\n");
759 		return (DDI_FAILURE);
760 	}
761 
762 	rw_exit(&e1000g_dma_type_lock);
763 
764 	return (DDI_SUCCESS);
765 }
766 
767 static void
768 e1000g_free_packets(struct e1000g *Adapter)
769 {
770 	e1000g_tx_ring_t *tx_ring;
771 	e1000g_rx_data_t *rx_data;
772 
773 	tx_ring = Adapter->tx_ring;
774 	rx_data = Adapter->rx_ring->rx_data;
775 
776 	e1000g_free_tx_packets(tx_ring);
777 	e1000g_free_rx_packets(rx_data);
778 }
779 
780 #ifdef __sparc
781 static int
782 e1000g_alloc_dvma_buffer(struct e1000g *Adapter,
783     dma_buffer_t *buf, size_t size)
784 {
785 	int mystat;
786 	dev_info_t *devinfo;
787 	ddi_dma_cookie_t cookie;
788 
789 	if (e1000g_force_detach)
790 		devinfo = Adapter->priv_dip;
791 	else
792 		devinfo = Adapter->dip;
793 
794 	mystat = dvma_reserve(devinfo,
795 	    &e1000g_dma_limits,
796 	    Adapter->dvma_page_num,
797 	    &buf->dma_handle);
798 
799 	if (mystat != DDI_SUCCESS) {
800 		buf->dma_handle = NULL;
801 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
802 		    "Could not allocate dvma buffer handle: %d\n", mystat);
803 		return (DDI_FAILURE);
804 	}
805 
806 	buf->address = kmem_alloc(size, KM_NOSLEEP);
807 
808 	if (buf->address == NULL) {
809 		if (buf->dma_handle != NULL) {
810 			dvma_release(buf->dma_handle);
811 			buf->dma_handle = NULL;
812 		}
813 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
814 		    "Could not allocate dvma buffer memory\n");
815 		return (DDI_FAILURE);
816 	}
817 
818 	dvma_kaddr_load(buf->dma_handle,
819 	    buf->address, size, 0, &cookie);
820 
821 	buf->dma_address = cookie.dmac_laddress;
822 	buf->size = size;
823 	buf->len = 0;
824 
825 	return (DDI_SUCCESS);
826 }
827 
828 static void
829 e1000g_free_dvma_buffer(dma_buffer_t *buf)
830 {
831 	if (buf->dma_handle != NULL) {
832 		dvma_unload(buf->dma_handle, 0, -1);
833 	} else {
834 		return;
835 	}
836 
837 	buf->dma_address = NULL;
838 
839 	if (buf->address != NULL) {
840 		kmem_free(buf->address, buf->size);
841 		buf->address = NULL;
842 	}
843 
844 	if (buf->dma_handle != NULL) {
845 		dvma_release(buf->dma_handle);
846 		buf->dma_handle = NULL;
847 	}
848 
849 	buf->size = 0;
850 	buf->len = 0;
851 }
852 #endif
853 
854 static int
855 e1000g_alloc_dma_buffer(struct e1000g *Adapter,
856     dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
857 {
858 	int mystat;
859 	dev_info_t *devinfo;
860 	ddi_dma_cookie_t cookie;
861 	size_t len;
862 	uint_t count;
863 
864 	if (e1000g_force_detach)
865 		devinfo = Adapter->priv_dip;
866 	else
867 		devinfo = Adapter->dip;
868 
869 	mystat = ddi_dma_alloc_handle(devinfo,
870 	    p_dma_attr,
871 	    DDI_DMA_DONTWAIT, 0,
872 	    &buf->dma_handle);
873 
874 	if (mystat != DDI_SUCCESS) {
875 		buf->dma_handle = NULL;
876 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
877 		    "Could not allocate dma buffer handle: %d\n", mystat);
878 		return (DDI_FAILURE);
879 	}
880 
881 	mystat = ddi_dma_mem_alloc(buf->dma_handle,
882 	    size, &e1000g_buf_acc_attr, DDI_DMA_STREAMING,
883 	    DDI_DMA_DONTWAIT, 0,
884 	    &buf->address,
885 	    &len, &buf->acc_handle);
886 
887 	if (mystat != DDI_SUCCESS) {
888 		buf->acc_handle = NULL;
889 		buf->address = NULL;
890 		if (buf->dma_handle != NULL) {
891 			ddi_dma_free_handle(&buf->dma_handle);
892 			buf->dma_handle = NULL;
893 		}
894 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
895 		    "Could not allocate dma buffer memory: %d\n", mystat);
896 		return (DDI_FAILURE);
897 	}
898 
899 	mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
900 	    (struct as *)NULL,
901 	    buf->address,
902 	    len, DDI_DMA_READ | DDI_DMA_STREAMING,
903 	    DDI_DMA_DONTWAIT, 0, &cookie, &count);
904 
905 	if (mystat != DDI_SUCCESS) {
906 		if (buf->acc_handle != NULL) {
907 			ddi_dma_mem_free(&buf->acc_handle);
908 			buf->acc_handle = NULL;
909 			buf->address = NULL;
910 		}
911 		if (buf->dma_handle != NULL) {
912 			ddi_dma_free_handle(&buf->dma_handle);
913 			buf->dma_handle = NULL;
914 		}
915 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
916 		    "Could not bind buffer dma handle: %d\n", mystat);
917 		return (DDI_FAILURE);
918 	}
919 
920 	ASSERT(count == 1);
921 	if (count != 1) {
922 		if (buf->dma_handle != NULL) {
923 			(void) ddi_dma_unbind_handle(buf->dma_handle);
924 		}
925 		if (buf->acc_handle != NULL) {
926 			ddi_dma_mem_free(&buf->acc_handle);
927 			buf->acc_handle = NULL;
928 			buf->address = NULL;
929 		}
930 		if (buf->dma_handle != NULL) {
931 			ddi_dma_free_handle(&buf->dma_handle);
932 			buf->dma_handle = NULL;
933 		}
934 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
935 		    "Could not bind buffer as a single frag. "
936 		    "Count = %d\n", count);
937 		return (DDI_FAILURE);
938 	}
939 
940 	buf->dma_address = cookie.dmac_laddress;
941 	buf->size = len;
942 	buf->len = 0;
943 
944 	return (DDI_SUCCESS);
945 }
946 
947 /*
948  * e1000g_alloc_dma_buffer_82546 - allocate a dma buffer along with all
949  * necessary handles.  Same as e1000g_alloc_dma_buffer() except ensure
950  * that buffer that doesn't cross a 64k boundary.
951  */
952 static int
953 e1000g_alloc_dma_buffer_82546(struct e1000g *Adapter,
954     dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
955 {
956 	int mystat;
957 	dev_info_t *devinfo;
958 	ddi_dma_cookie_t cookie;
959 	size_t len;
960 	uint_t count;
961 
962 	if (e1000g_force_detach)
963 		devinfo = Adapter->priv_dip;
964 	else
965 		devinfo = Adapter->dip;
966 
967 	mystat = ddi_dma_alloc_handle(devinfo,
968 	    p_dma_attr,
969 	    DDI_DMA_DONTWAIT, 0,
970 	    &buf->dma_handle);
971 
972 	if (mystat != DDI_SUCCESS) {
973 		buf->dma_handle = NULL;
974 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
975 		    "Could not allocate dma buffer handle: %d\n", mystat);
976 		return (DDI_FAILURE);
977 	}
978 
979 	mystat = e1000g_dma_mem_alloc_82546(buf, size, &len);
980 	if (mystat != DDI_SUCCESS) {
981 		buf->acc_handle = NULL;
982 		buf->address = NULL;
983 		if (buf->dma_handle != NULL) {
984 			ddi_dma_free_handle(&buf->dma_handle);
985 			buf->dma_handle = NULL;
986 		}
987 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
988 		    "Could not allocate dma buffer memory: %d\n", mystat);
989 		return (DDI_FAILURE);
990 	}
991 
992 	mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
993 	    (struct as *)NULL,
994 	    buf->address,
995 	    len, DDI_DMA_READ | DDI_DMA_STREAMING,
996 	    DDI_DMA_DONTWAIT, 0, &cookie, &count);
997 
998 	if (mystat != DDI_SUCCESS) {
999 		if (buf->acc_handle != NULL) {
1000 			ddi_dma_mem_free(&buf->acc_handle);
1001 			buf->acc_handle = NULL;
1002 			buf->address = NULL;
1003 		}
1004 		if (buf->dma_handle != NULL) {
1005 			ddi_dma_free_handle(&buf->dma_handle);
1006 			buf->dma_handle = NULL;
1007 		}
1008 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1009 		    "Could not bind buffer dma handle: %d\n", mystat);
1010 		return (DDI_FAILURE);
1011 	}
1012 
1013 	ASSERT(count == 1);
1014 	if (count != 1) {
1015 		if (buf->dma_handle != NULL) {
1016 			ddi_dma_unbind_handle(buf->dma_handle);
1017 		}
1018 		if (buf->acc_handle != NULL) {
1019 			ddi_dma_mem_free(&buf->acc_handle);
1020 			buf->acc_handle = NULL;
1021 			buf->address = NULL;
1022 		}
1023 		if (buf->dma_handle != NULL) {
1024 			ddi_dma_free_handle(&buf->dma_handle);
1025 			buf->dma_handle = NULL;
1026 		}
1027 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1028 		    "Could not bind buffer as a single frag. "
1029 		    "Count = %d\n", count);
1030 		return (DDI_FAILURE);
1031 	}
1032 
1033 	buf->dma_address = cookie.dmac_laddress;
1034 	buf->size = len;
1035 	buf->len = 0;
1036 
1037 	return (DDI_SUCCESS);
1038 }
1039 
1040 /*
1041  * e1000g_dma_mem_alloc_82546 - allocate a dma buffer, making up to
1042  * ALLOC_RETRY attempts to get a buffer that doesn't cross a 64k boundary.
1043  */
1044 static int
1045 e1000g_dma_mem_alloc_82546(dma_buffer_t *buf, size_t size, size_t *len)
1046 {
1047 #define	ALLOC_RETRY	10
1048 	int stat;
1049 	int cnt = 0;
1050 	ddi_acc_handle_t hold[ALLOC_RETRY];
1051 
1052 	while (cnt < ALLOC_RETRY) {
1053 		hold[cnt] = NULL;
1054 
1055 		/* allocate memory */
1056 		stat = ddi_dma_mem_alloc(buf->dma_handle, size,
1057 		    &e1000g_buf_acc_attr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
1058 		    0, &buf->address, len, &buf->acc_handle);
1059 
1060 		if (stat != DDI_SUCCESS) {
1061 			break;
1062 		}
1063 
1064 		/*
1065 		 * Check 64k bounday:
1066 		 * if it is bad, hold it and retry
1067 		 * if it is good, exit loop
1068 		 */
1069 		if (e1000g_cross_64k_bound(buf->address, *len)) {
1070 			hold[cnt] = buf->acc_handle;
1071 			stat = DDI_FAILURE;
1072 		} else {
1073 			break;
1074 		}
1075 
1076 		cnt++;
1077 	}
1078 
1079 	/* Release any held buffers crossing 64k bounday */
1080 	for (--cnt; cnt >= 0; cnt--) {
1081 		if (hold[cnt])
1082 			ddi_dma_mem_free(&hold[cnt]);
1083 	}
1084 
1085 	return (stat);
1086 }
1087 
1088 /*
1089  * e1000g_cross_64k_bound - If starting and ending address cross a 64k boundary
1090  * return true; otherwise return false
1091  */
1092 static boolean_t
1093 e1000g_cross_64k_bound(void *addr, uintptr_t len)
1094 {
1095 	uintptr_t start = (uintptr_t)addr;
1096 	uintptr_t end = start + len - 1;
1097 
1098 	return (((start ^ end) >> 16) == 0 ? B_FALSE : B_TRUE);
1099 }
1100 
1101 static void
1102 e1000g_free_dma_buffer(dma_buffer_t *buf)
1103 {
1104 	if (buf->dma_handle != NULL) {
1105 		(void) ddi_dma_unbind_handle(buf->dma_handle);
1106 	} else {
1107 		return;
1108 	}
1109 
1110 	buf->dma_address = NULL;
1111 
1112 	if (buf->acc_handle != NULL) {
1113 		ddi_dma_mem_free(&buf->acc_handle);
1114 		buf->acc_handle = NULL;
1115 		buf->address = NULL;
1116 	}
1117 
1118 	if (buf->dma_handle != NULL) {
1119 		ddi_dma_free_handle(&buf->dma_handle);
1120 		buf->dma_handle = NULL;
1121 	}
1122 
1123 	buf->size = 0;
1124 	buf->len = 0;
1125 }
1126 
1127 static int
1128 e1000g_alloc_tx_packets(e1000g_tx_ring_t *tx_ring)
1129 {
1130 	int j;
1131 	p_tx_sw_packet_t packet;
1132 	int mystat;
1133 	dma_buffer_t *tx_buf;
1134 	struct e1000g *Adapter;
1135 	dev_info_t *devinfo;
1136 	ddi_dma_attr_t dma_attr;
1137 
1138 	Adapter = tx_ring->adapter;
1139 	devinfo = Adapter->dip;
1140 	dma_attr = e1000g_buf_dma_attr;
1141 
1142 	/*
1143 	 * Memory allocation for the Transmit software structure, the transmit
1144 	 * software packet. This structure stores all the relevant information
1145 	 * for transmitting a single packet.
1146 	 */
1147 	tx_ring->packet_area =
1148 	    kmem_zalloc(TX_SW_PKT_AREA_SZ, KM_NOSLEEP);
1149 
1150 	if (tx_ring->packet_area == NULL)
1151 		return (DDI_FAILURE);
1152 
1153 	for (j = 0, packet = tx_ring->packet_area;
1154 	    j < Adapter->tx_freelist_num; j++, packet++) {
1155 
1156 		ASSERT(packet != NULL);
1157 
1158 		/*
1159 		 * Pre-allocate dma handles for transmit. These dma handles
1160 		 * will be dynamically bound to the data buffers passed down
1161 		 * from the upper layers at the time of transmitting. The
1162 		 * dynamic binding only applies for the packets that are larger
1163 		 * than the tx_bcopy_thresh.
1164 		 */
1165 		switch (e1000g_dma_type) {
1166 #ifdef __sparc
1167 		case USE_DVMA:
1168 			mystat = dvma_reserve(devinfo,
1169 			    &e1000g_dma_limits,
1170 			    Adapter->dvma_page_num,
1171 			    &packet->tx_dma_handle);
1172 			break;
1173 #endif
1174 		case USE_DMA:
1175 			mystat = ddi_dma_alloc_handle(devinfo,
1176 			    &e1000g_tx_dma_attr,
1177 			    DDI_DMA_DONTWAIT, 0,
1178 			    &packet->tx_dma_handle);
1179 			break;
1180 		default:
1181 			ASSERT(B_FALSE);
1182 			break;
1183 		}
1184 		if (mystat != DDI_SUCCESS) {
1185 			packet->tx_dma_handle = NULL;
1186 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1187 			    "Could not allocate tx dma handle: %d\n", mystat);
1188 			goto tx_pkt_fail;
1189 		}
1190 
1191 		/*
1192 		 * Pre-allocate transmit buffers for small packets that the
1193 		 * size is less than tx_bcopy_thresh. The data of those small
1194 		 * packets will be bcopy() to the transmit buffers instead of
1195 		 * using dynamical DMA binding. For small packets, bcopy will
1196 		 * bring better performance than DMA binding.
1197 		 */
1198 		tx_buf = packet->tx_buf;
1199 
1200 		switch (e1000g_dma_type) {
1201 #ifdef __sparc
1202 		case USE_DVMA:
1203 			mystat = e1000g_alloc_dvma_buffer(Adapter,
1204 			    tx_buf, Adapter->tx_buffer_size);
1205 			break;
1206 #endif
1207 		case USE_DMA:
1208 			mystat = e1000g_alloc_dma_buffer(Adapter,
1209 			    tx_buf, Adapter->tx_buffer_size, &dma_attr);
1210 			break;
1211 		default:
1212 			ASSERT(B_FALSE);
1213 			break;
1214 		}
1215 		if (mystat != DDI_SUCCESS) {
1216 			ASSERT(packet->tx_dma_handle != NULL);
1217 			switch (e1000g_dma_type) {
1218 #ifdef __sparc
1219 			case USE_DVMA:
1220 				dvma_release(packet->tx_dma_handle);
1221 				break;
1222 #endif
1223 			case USE_DMA:
1224 				ddi_dma_free_handle(&packet->tx_dma_handle);
1225 				break;
1226 			default:
1227 				ASSERT(B_FALSE);
1228 				break;
1229 			}
1230 			packet->tx_dma_handle = NULL;
1231 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1232 			    "Allocate Tx buffer fail\n");
1233 			goto tx_pkt_fail;
1234 		}
1235 
1236 		packet->dma_type = e1000g_dma_type;
1237 	} /* for */
1238 
1239 	return (DDI_SUCCESS);
1240 
1241 tx_pkt_fail:
1242 	e1000g_free_tx_packets(tx_ring);
1243 
1244 	return (DDI_FAILURE);
1245 }
1246 
1247 static int
1248 e1000g_alloc_rx_packets(e1000g_rx_data_t *rx_data)
1249 {
1250 	int i;
1251 	p_rx_sw_packet_t packet;
1252 	struct e1000g *Adapter;
1253 	uint32_t packet_num;
1254 	ddi_dma_attr_t dma_attr;
1255 
1256 	Adapter = rx_data->rx_ring->adapter;
1257 	dma_attr = e1000g_buf_dma_attr;
1258 	dma_attr.dma_attr_align = Adapter->rx_buf_align;
1259 
1260 	/*
1261 	 * Allocate memory for the rx_sw_packet structures. Each one of these
1262 	 * structures will contain a virtual and physical address to an actual
1263 	 * receive buffer in host memory. Since we use one rx_sw_packet per
1264 	 * received packet, the maximum number of rx_sw_packet that we'll
1265 	 * need is equal to the number of receive descriptors plus the freelist
1266 	 * size.
1267 	 */
1268 	packet_num = Adapter->rx_desc_num + Adapter->rx_freelist_num;
1269 	rx_data->packet_area = NULL;
1270 
1271 	for (i = 0; i < packet_num; i++) {
1272 		packet = e1000g_alloc_rx_sw_packet(rx_data, &dma_attr);
1273 		if (packet == NULL)
1274 			goto rx_pkt_fail;
1275 
1276 		packet->next = rx_data->packet_area;
1277 		rx_data->packet_area = packet;
1278 	}
1279 
1280 	return (DDI_SUCCESS);
1281 
1282 rx_pkt_fail:
1283 	e1000g_free_rx_packets(rx_data);
1284 
1285 	return (DDI_FAILURE);
1286 }
1287 
1288 static p_rx_sw_packet_t
1289 e1000g_alloc_rx_sw_packet(e1000g_rx_data_t *rx_data, ddi_dma_attr_t *p_dma_attr)
1290 {
1291 	int mystat;
1292 	p_rx_sw_packet_t packet;
1293 	dma_buffer_t *rx_buf;
1294 	struct e1000g *Adapter;
1295 
1296 	Adapter = rx_data->rx_ring->adapter;
1297 
1298 	packet = kmem_zalloc(sizeof (rx_sw_packet_t), KM_NOSLEEP);
1299 	if (packet == NULL) {
1300 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1301 		    "Cound not allocate memory for Rx SwPacket\n");
1302 		return (NULL);
1303 	}
1304 
1305 	rx_buf = packet->rx_buf;
1306 
1307 	switch (e1000g_dma_type) {
1308 #ifdef __sparc
1309 	case USE_DVMA:
1310 		mystat = e1000g_alloc_dvma_buffer(Adapter,
1311 		    rx_buf, Adapter->rx_buffer_size);
1312 		break;
1313 #endif
1314 	case USE_DMA:
1315 		if (Adapter->mem_workaround_82546 &&
1316 		    ((Adapter->shared.mac.type == e1000_82545) ||
1317 		    (Adapter->shared.mac.type == e1000_82546) ||
1318 		    (Adapter->shared.mac.type == e1000_82546_rev_3))) {
1319 			mystat = e1000g_alloc_dma_buffer_82546(Adapter,
1320 			    rx_buf, Adapter->rx_buffer_size, p_dma_attr);
1321 		} else {
1322 			mystat = e1000g_alloc_dma_buffer(Adapter,
1323 			    rx_buf, Adapter->rx_buffer_size, p_dma_attr);
1324 		}
1325 		break;
1326 	default:
1327 		ASSERT(B_FALSE);
1328 		break;
1329 	}
1330 
1331 	if (mystat != DDI_SUCCESS) {
1332 		if (packet != NULL)
1333 			kmem_free(packet, sizeof (rx_sw_packet_t));
1334 
1335 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1336 		    "Failed to allocate Rx buffer\n");
1337 		return (NULL);
1338 	}
1339 
1340 	rx_buf->size -= E1000G_IPALIGNROOM;
1341 	rx_buf->address += E1000G_IPALIGNROOM;
1342 	rx_buf->dma_address += E1000G_IPALIGNROOM;
1343 
1344 	packet->rx_data = (caddr_t)rx_data;
1345 	packet->free_rtn.free_func = e1000g_rxfree_func;
1346 	packet->free_rtn.free_arg = (char *)packet;
1347 	/*
1348 	 * esballoc is changed to desballoc which
1349 	 * is undocumented call but as per sun,
1350 	 * we can use it. It gives better efficiency.
1351 	 */
1352 	packet->mp = desballoc((unsigned char *)
1353 	    rx_buf->address,
1354 	    rx_buf->size,
1355 	    BPRI_MED, &packet->free_rtn);
1356 
1357 	packet->dma_type = e1000g_dma_type;
1358 	packet->ref_cnt = 1;
1359 
1360 	return (packet);
1361 }
1362 
1363 void
1364 e1000g_free_rx_sw_packet(p_rx_sw_packet_t packet, boolean_t full_release)
1365 {
1366 	dma_buffer_t *rx_buf;
1367 
1368 	if (packet->mp != NULL) {
1369 		freemsg(packet->mp);
1370 		packet->mp = NULL;
1371 	}
1372 
1373 	rx_buf = packet->rx_buf;
1374 
1375 	switch (packet->dma_type) {
1376 #ifdef __sparc
1377 	case USE_DVMA:
1378 		if (rx_buf->address != NULL) {
1379 			rx_buf->size += E1000G_IPALIGNROOM;
1380 			rx_buf->address -= E1000G_IPALIGNROOM;
1381 		}
1382 		e1000g_free_dvma_buffer(rx_buf);
1383 		break;
1384 #endif
1385 	case USE_DMA:
1386 		e1000g_free_dma_buffer(rx_buf);
1387 		break;
1388 	default:
1389 		break;
1390 	}
1391 
1392 	packet->dma_type = USE_NONE;
1393 
1394 	if (!full_release)
1395 		return;
1396 
1397 	kmem_free(packet, sizeof (rx_sw_packet_t));
1398 }
1399 
1400 static void
1401 e1000g_free_rx_packets(e1000g_rx_data_t *rx_data)
1402 {
1403 	p_rx_sw_packet_t packet, next_packet;
1404 	uint32_t ref_cnt;
1405 
1406 	mutex_enter(&e1000g_rx_detach_lock);
1407 
1408 	packet = rx_data->packet_area;
1409 	while (packet != NULL) {
1410 		next_packet = packet->next;
1411 
1412 		ref_cnt = atomic_dec_32_nv(&packet->ref_cnt);
1413 		if (ref_cnt > 0) {
1414 			atomic_inc_32(&rx_data->pending_count);
1415 			atomic_inc_32(&e1000g_mblks_pending);
1416 		} else {
1417 			e1000g_free_rx_sw_packet(packet, B_FALSE);
1418 		}
1419 
1420 		packet = next_packet;
1421 	}
1422 
1423 	mutex_exit(&e1000g_rx_detach_lock);
1424 }
1425 
1426 
1427 static void
1428 e1000g_free_tx_packets(e1000g_tx_ring_t *tx_ring)
1429 {
1430 	int j;
1431 	struct e1000g *Adapter;
1432 	p_tx_sw_packet_t packet;
1433 	dma_buffer_t *tx_buf;
1434 
1435 	Adapter = tx_ring->adapter;
1436 
1437 	for (j = 0, packet = tx_ring->packet_area;
1438 	    j < Adapter->tx_freelist_num; j++, packet++) {
1439 
1440 		if (packet == NULL)
1441 			break;
1442 
1443 		/* Free the Tx DMA handle for dynamical binding */
1444 		if (packet->tx_dma_handle != NULL) {
1445 			switch (packet->dma_type) {
1446 #ifdef __sparc
1447 			case USE_DVMA:
1448 				dvma_release(packet->tx_dma_handle);
1449 				break;
1450 #endif
1451 			case USE_DMA:
1452 				ddi_dma_free_handle(&packet->tx_dma_handle);
1453 				break;
1454 			default:
1455 				ASSERT(B_FALSE);
1456 				break;
1457 			}
1458 			packet->tx_dma_handle = NULL;
1459 		} else {
1460 			/*
1461 			 * If the dma handle is NULL, then we don't
1462 			 * need to check the packets left. For they
1463 			 * have not been initialized or have been freed.
1464 			 */
1465 			break;
1466 		}
1467 
1468 		tx_buf = packet->tx_buf;
1469 
1470 		switch (packet->dma_type) {
1471 #ifdef __sparc
1472 		case USE_DVMA:
1473 			e1000g_free_dvma_buffer(tx_buf);
1474 			break;
1475 #endif
1476 		case USE_DMA:
1477 			e1000g_free_dma_buffer(tx_buf);
1478 			break;
1479 		default:
1480 			ASSERT(B_FALSE);
1481 			break;
1482 		}
1483 
1484 		packet->dma_type = USE_NONE;
1485 	}
1486 	if (tx_ring->packet_area != NULL) {
1487 		kmem_free(tx_ring->packet_area, TX_SW_PKT_AREA_SZ);
1488 		tx_ring->packet_area = NULL;
1489 	}
1490 }
1491 
1492 /*
1493  * e1000g_release_dma_resources - release allocated DMA resources
1494  *
1495  * This function releases any pending buffers that has been
1496  * previously allocated
1497  */
1498 void
1499 e1000g_release_dma_resources(struct e1000g *Adapter)
1500 {
1501 	e1000g_free_descriptors(Adapter);
1502 	e1000g_free_packets(Adapter);
1503 }
1504 
1505 /* ARGSUSED */
1506 void
1507 e1000g_set_fma_flags(struct e1000g *Adapter, int acc_flag, int dma_flag)
1508 {
1509 	if (acc_flag) {
1510 		e1000g_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1511 	} else {
1512 		e1000g_desc_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
1513 	}
1514 
1515 	if (dma_flag) {
1516 		e1000g_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1517 		e1000g_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1518 		e1000g_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1519 	} else {
1520 		e1000g_tx_dma_attr.dma_attr_flags = 0;
1521 		e1000g_buf_dma_attr.dma_attr_flags = 0;
1522 		e1000g_desc_dma_attr.dma_attr_flags = 0;
1523 	}
1524 }
1525