xref: /illumos-gate/usr/src/uts/common/io/e1000g/e1000g_alloc.c (revision e4d060fb4c00d44cd578713eb9a921f594b733b8)
1 /*
2  * This file is provided under a CDDLv1 license.  When using or
3  * redistributing this file, you may do so under this license.
4  * In redistributing this file this license must be included
5  * and no other modification of this header file is permitted.
6  *
7  * CDDL LICENSE SUMMARY
8  *
9  * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved.
10  *
11  * The contents of this file are subject to the terms of Version
12  * 1.0 of the Common Development and Distribution License (the "License").
13  *
14  * You should have received a copy of the License with this software.
15  * You can obtain a copy of the License at
16  *	http://www.opensolaris.org/os/licensing.
17  * See the License for the specific language governing permissions
18  * and limitations under the License.
19  */
20 
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * **********************************************************************
28  * Module Name:								*
29  *   e1000g_alloc.c							*
30  *									*
31  * Abstract:								*
32  *   This file contains some routines that take care of			*
33  *   memory allocation for descriptors and buffers.			*
34  *									*
35  * **********************************************************************
36  */
37 
38 #include "e1000g_sw.h"
39 #include "e1000g_debug.h"
40 
41 #define	TX_SW_PKT_AREA_SZ \
42 	(sizeof (tx_sw_packet_t) * Adapter->tx_freelist_num)
43 
44 static int e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *);
45 static int e1000g_alloc_rx_descriptors(e1000g_rx_data_t *);
46 static void e1000g_free_tx_descriptors(e1000g_tx_ring_t *);
47 static void e1000g_free_rx_descriptors(e1000g_rx_data_t *);
48 static int e1000g_alloc_tx_packets(e1000g_tx_ring_t *);
49 static int e1000g_alloc_rx_packets(e1000g_rx_data_t *);
50 static void e1000g_free_tx_packets(e1000g_tx_ring_t *);
51 static void e1000g_free_rx_packets(e1000g_rx_data_t *);
52 static int e1000g_alloc_dma_buffer(struct e1000g *,
53     dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
54 
55 /*
56  * In order to avoid address error crossing 64KB boundary
57  * during PCI-X packets receving, e1000g_alloc_dma_buffer_82546
58  * is used by some necessary adapter types.
59  */
60 static int e1000g_alloc_dma_buffer_82546(struct e1000g *,
61     dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
62 static int e1000g_dma_mem_alloc_82546(dma_buffer_t *buf,
63     size_t size, size_t *len);
64 static boolean_t e1000g_cross_64k_bound(void *, uintptr_t);
65 
66 static void e1000g_free_dma_buffer(dma_buffer_t *);
67 #ifdef __sparc
68 static int e1000g_alloc_dvma_buffer(struct e1000g *, dma_buffer_t *, size_t);
69 static void e1000g_free_dvma_buffer(dma_buffer_t *);
70 #endif
71 static int e1000g_alloc_descriptors(struct e1000g *Adapter);
72 static void e1000g_free_descriptors(struct e1000g *Adapter);
73 static int e1000g_alloc_packets(struct e1000g *Adapter);
74 static void e1000g_free_packets(struct e1000g *Adapter);
75 static p_rx_sw_packet_t e1000g_alloc_rx_sw_packet(e1000g_rx_data_t *,
76     ddi_dma_attr_t *p_dma_attr);
77 
78 /* DMA access attributes for descriptors <Little Endian> */
79 static ddi_device_acc_attr_t e1000g_desc_acc_attr = {
80 	DDI_DEVICE_ATTR_V0,
81 	DDI_STRUCTURE_LE_ACC,
82 	DDI_STRICTORDER_ACC
83 };
84 
85 /* DMA access attributes for DMA buffers */
86 #ifdef __sparc
87 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
88 	DDI_DEVICE_ATTR_V0,
89 	DDI_STRUCTURE_BE_ACC,
90 	DDI_STRICTORDER_ACC,
91 };
92 #else
93 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
94 	DDI_DEVICE_ATTR_V0,
95 	DDI_STRUCTURE_LE_ACC,
96 	DDI_STRICTORDER_ACC,
97 };
98 #endif
99 
100 /* DMA attributes for tx mblk buffers */
101 static ddi_dma_attr_t e1000g_tx_dma_attr = {
102 	DMA_ATTR_V0,		/* version of this structure */
103 	0,			/* lowest usable address */
104 	0xffffffffffffffffULL,	/* highest usable address */
105 	0x7fffffff,		/* maximum DMAable byte count */
106 	1,			/* alignment in bytes */
107 	0x7ff,			/* burst sizes (any?) */
108 	1,			/* minimum transfer */
109 	0xffffffffU,		/* maximum transfer */
110 	0xffffffffffffffffULL,	/* maximum segment length */
111 	MAX_COOKIES,		/* maximum number of segments */
112 	1,			/* granularity */
113 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
114 };
115 
116 /* DMA attributes for pre-allocated rx/tx buffers */
117 static ddi_dma_attr_t e1000g_buf_dma_attr = {
118 	DMA_ATTR_V0,		/* version of this structure */
119 	0,			/* lowest usable address */
120 	0xffffffffffffffffULL,	/* highest usable address */
121 	0x7fffffff,		/* maximum DMAable byte count */
122 	1,			/* alignment in bytes */
123 	0x7ff,			/* burst sizes (any?) */
124 	1,			/* minimum transfer */
125 	0xffffffffU,		/* maximum transfer */
126 	0xffffffffffffffffULL,	/* maximum segment length */
127 	1,			/* maximum number of segments */
128 	1,			/* granularity */
129 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
130 };
131 
132 /* DMA attributes for rx/tx descriptors */
133 static ddi_dma_attr_t e1000g_desc_dma_attr = {
134 	DMA_ATTR_V0,		/* version of this structure */
135 	0,			/* lowest usable address */
136 	0xffffffffffffffffULL,	/* highest usable address */
137 	0x7fffffff,		/* maximum DMAable byte count */
138 	E1000_MDALIGN,		/* default alignment is 4k but can be changed */
139 	0x7ff,			/* burst sizes (any?) */
140 	1,			/* minimum transfer */
141 	0xffffffffU,		/* maximum transfer */
142 	0xffffffffffffffffULL,	/* maximum segment length */
143 	1,			/* maximum number of segments */
144 	1,			/* granularity */
145 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
146 };
147 
148 #ifdef __sparc
149 static ddi_dma_lim_t e1000g_dma_limits = {
150 	(uint_t)0,		/* dlim_addr_lo */
151 	(uint_t)0xffffffff,	/* dlim_addr_hi */
152 	(uint_t)0xffffffff,	/* dlim_cntr_max */
153 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
154 	0x1,			/* dlim_minxfer */
155 	1024			/* dlim_speed */
156 };
157 #endif
158 
159 #ifdef __sparc
160 static dma_type_t e1000g_dma_type = USE_DVMA;
161 #else
162 static dma_type_t e1000g_dma_type = USE_DMA;
163 #endif
164 
165 extern krwlock_t e1000g_dma_type_lock;
166 
167 
168 int
169 e1000g_alloc_dma_resources(struct e1000g *Adapter)
170 {
171 	int result;
172 
173 	result = DDI_FAILURE;
174 
175 	while ((result != DDI_SUCCESS) &&
176 	    (Adapter->tx_desc_num >= MIN_NUM_TX_DESCRIPTOR) &&
177 	    (Adapter->rx_desc_num >= MIN_NUM_RX_DESCRIPTOR) &&
178 	    (Adapter->tx_freelist_num >= MIN_NUM_TX_FREELIST) &&
179 	    (Adapter->rx_freelist_num >= MIN_NUM_RX_FREELIST)) {
180 
181 		result = e1000g_alloc_descriptors(Adapter);
182 
183 		if (result == DDI_SUCCESS) {
184 			result = e1000g_alloc_packets(Adapter);
185 
186 			if (result != DDI_SUCCESS)
187 				e1000g_free_descriptors(Adapter);
188 		}
189 
190 		/*
191 		 * If the allocation fails due to resource shortage,
192 		 * we'll reduce the numbers of descriptors/buffers by
193 		 * half, and try the allocation again.
194 		 */
195 		if (result != DDI_SUCCESS) {
196 			/*
197 			 * We must ensure the number of descriptors
198 			 * is always a multiple of 8.
199 			 */
200 			Adapter->tx_desc_num =
201 			    (Adapter->tx_desc_num >> 4) << 3;
202 			Adapter->rx_desc_num =
203 			    (Adapter->rx_desc_num >> 4) << 3;
204 
205 			Adapter->tx_freelist_num >>= 1;
206 			Adapter->rx_freelist_num >>= 1;
207 		}
208 	}
209 
210 	return (result);
211 }
212 
213 /*
214  * e1000g_alloc_descriptors - allocate DMA buffers for descriptors
215  *
216  * This routine allocates neccesary DMA buffers for
217  *	Transmit Descriptor Area
218  *	Receive Descrpitor Area
219  */
220 static int
221 e1000g_alloc_descriptors(struct e1000g *Adapter)
222 {
223 	int result;
224 	e1000g_tx_ring_t *tx_ring;
225 	e1000g_rx_data_t *rx_data;
226 
227 	if (Adapter->mem_workaround_82546 &&
228 	    ((Adapter->shared.mac.type == e1000_82545) ||
229 	    (Adapter->shared.mac.type == e1000_82546) ||
230 	    (Adapter->shared.mac.type == e1000_82546_rev_3))) {
231 		/* Align on a 64k boundary for these adapter types */
232 		Adapter->desc_align = E1000_MDALIGN_82546;
233 	} else {
234 		/* Align on a 4k boundary for all other adapter types */
235 		Adapter->desc_align = E1000_MDALIGN;
236 	}
237 
238 	tx_ring = Adapter->tx_ring;
239 
240 	result = e1000g_alloc_tx_descriptors(tx_ring);
241 	if (result != DDI_SUCCESS)
242 		return (DDI_FAILURE);
243 
244 	rx_data = Adapter->rx_ring->rx_data;
245 
246 	result = e1000g_alloc_rx_descriptors(rx_data);
247 	if (result != DDI_SUCCESS) {
248 		e1000g_free_tx_descriptors(tx_ring);
249 		return (DDI_FAILURE);
250 	}
251 
252 	return (DDI_SUCCESS);
253 }
254 
255 static void
256 e1000g_free_descriptors(struct e1000g *Adapter)
257 {
258 	e1000g_tx_ring_t *tx_ring;
259 	e1000g_rx_data_t *rx_data;
260 
261 	tx_ring = Adapter->tx_ring;
262 	rx_data = Adapter->rx_ring->rx_data;
263 
264 	e1000g_free_tx_descriptors(tx_ring);
265 	e1000g_free_rx_descriptors(rx_data);
266 }
267 
268 static int
269 e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *tx_ring)
270 {
271 	int mystat;
272 	boolean_t alloc_flag;
273 	size_t size;
274 	size_t len;
275 	uintptr_t templong;
276 	uint_t cookie_count;
277 	dev_info_t *devinfo;
278 	ddi_dma_cookie_t cookie;
279 	struct e1000g *Adapter;
280 	ddi_dma_attr_t dma_attr;
281 
282 	Adapter = tx_ring->adapter;
283 	devinfo = Adapter->dip;
284 
285 	alloc_flag = B_FALSE;
286 	dma_attr = e1000g_desc_dma_attr;
287 
288 	/*
289 	 * Solaris 7 has a problem with allocating physically contiguous memory
290 	 * that is aligned on a 4K boundary. The transmit and rx descriptors
291 	 * need to aligned on a 4kbyte boundary. We first try to allocate the
292 	 * memory with DMA attributes set to 4K alignment and also no scatter/
293 	 * gather mechanism specified. In most cases, this does not allocate
294 	 * memory aligned at a 4Kbyte boundary. We then try asking for memory
295 	 * aligned on 4K boundary with scatter/gather set to 2. This works when
296 	 * the amount of memory is less than 4k i.e a page size. If neither of
297 	 * these options work or if the number of descriptors is greater than
298 	 * 4K, ie more than 256 descriptors, we allocate 4k extra memory and
299 	 * and then align the memory at a 4k boundary.
300 	 */
301 	size = sizeof (struct e1000_tx_desc) * Adapter->tx_desc_num;
302 
303 	/*
304 	 * Memory allocation for the transmit buffer descriptors.
305 	 */
306 	dma_attr.dma_attr_sgllen = 1;
307 	dma_attr.dma_attr_align = Adapter->desc_align;
308 
309 	/*
310 	 * Allocate a new DMA handle for the transmit descriptor
311 	 * memory area.
312 	 */
313 	mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
314 	    DDI_DMA_DONTWAIT, 0,
315 	    &tx_ring->tbd_dma_handle);
316 
317 	if (mystat != DDI_SUCCESS) {
318 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
319 		    "Could not allocate tbd dma handle: %d", mystat);
320 		tx_ring->tbd_dma_handle = NULL;
321 		return (DDI_FAILURE);
322 	}
323 
324 	/*
325 	 * Allocate memory to DMA data to and from the transmit
326 	 * descriptors.
327 	 */
328 	mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
329 	    size,
330 	    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
331 	    DDI_DMA_DONTWAIT, 0,
332 	    (caddr_t *)&tx_ring->tbd_area,
333 	    &len, &tx_ring->tbd_acc_handle);
334 
335 	if ((mystat != DDI_SUCCESS) ||
336 	    ((uintptr_t)tx_ring->tbd_area & (Adapter->desc_align - 1))) {
337 		if (mystat == DDI_SUCCESS) {
338 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
339 			tx_ring->tbd_acc_handle = NULL;
340 			tx_ring->tbd_area = NULL;
341 		}
342 		if (tx_ring->tbd_dma_handle != NULL) {
343 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
344 			tx_ring->tbd_dma_handle = NULL;
345 		}
346 		alloc_flag = B_FALSE;
347 	} else
348 		alloc_flag = B_TRUE;
349 
350 	/*
351 	 * Initialize the entire transmit buffer descriptor area to zero
352 	 */
353 	if (alloc_flag)
354 		bzero(tx_ring->tbd_area, len);
355 
356 	/*
357 	 * If the previous DMA attributes setting could not give us contiguous
358 	 * memory or the number of descriptors is greater than the page size,
359 	 * we allocate extra memory and then align it at appropriate boundary.
360 	 */
361 	if (!alloc_flag) {
362 		size = size + Adapter->desc_align;
363 
364 		/*
365 		 * DMA attributes set to no scatter/gather and 16 bit alignment
366 		 */
367 		dma_attr.dma_attr_align = 1;
368 		dma_attr.dma_attr_sgllen = 1;
369 
370 		/*
371 		 * Allocate a new DMA handle for the transmit descriptor memory
372 		 * area.
373 		 */
374 		mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
375 		    DDI_DMA_DONTWAIT, 0,
376 		    &tx_ring->tbd_dma_handle);
377 
378 		if (mystat != DDI_SUCCESS) {
379 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
380 			    "Could not re-allocate tbd dma handle: %d", mystat);
381 			tx_ring->tbd_dma_handle = NULL;
382 			return (DDI_FAILURE);
383 		}
384 
385 		/*
386 		 * Allocate memory to DMA data to and from the transmit
387 		 * descriptors.
388 		 */
389 		mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
390 		    size,
391 		    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
392 		    DDI_DMA_DONTWAIT, 0,
393 		    (caddr_t *)&tx_ring->tbd_area,
394 		    &len, &tx_ring->tbd_acc_handle);
395 
396 		if (mystat != DDI_SUCCESS) {
397 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
398 			    "Could not allocate tbd dma memory: %d", mystat);
399 			tx_ring->tbd_acc_handle = NULL;
400 			tx_ring->tbd_area = NULL;
401 			if (tx_ring->tbd_dma_handle != NULL) {
402 				ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
403 				tx_ring->tbd_dma_handle = NULL;
404 			}
405 			return (DDI_FAILURE);
406 		} else
407 			alloc_flag = B_TRUE;
408 
409 		/*
410 		 * Initialize the entire transmit buffer descriptor area to zero
411 		 */
412 		bzero(tx_ring->tbd_area, len);
413 		/*
414 		 * Memory has been allocated with the ddi_dma_mem_alloc call,
415 		 * but has not been aligned.
416 		 * We now align it on the appropriate boundary.
417 		 */
418 		templong = P2NPHASE((uintptr_t)tx_ring->tbd_area,
419 		    Adapter->desc_align);
420 		len = size - templong;
421 		templong += (uintptr_t)tx_ring->tbd_area;
422 		tx_ring->tbd_area = (struct e1000_tx_desc *)templong;
423 	}	/* alignment workaround */
424 
425 	/*
426 	 * Transmit buffer descriptor memory allocation succeeded
427 	 */
428 	ASSERT(alloc_flag);
429 
430 	/*
431 	 * Allocates DMA resources for the memory that was allocated by
432 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
433 	 * the memory address
434 	 */
435 	mystat = ddi_dma_addr_bind_handle(tx_ring->tbd_dma_handle,
436 	    (struct as *)NULL, (caddr_t)tx_ring->tbd_area,
437 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
438 	    DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
439 
440 	if (mystat != DDI_SUCCESS) {
441 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
442 		    "Could not bind tbd dma resource: %d", mystat);
443 		if (tx_ring->tbd_acc_handle != NULL) {
444 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
445 			tx_ring->tbd_acc_handle = NULL;
446 			tx_ring->tbd_area = NULL;
447 		}
448 		if (tx_ring->tbd_dma_handle != NULL) {
449 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
450 			tx_ring->tbd_dma_handle = NULL;
451 		}
452 		return (DDI_FAILURE);
453 	}
454 
455 	ASSERT(cookie_count == 1);	/* 1 cookie */
456 
457 	if (cookie_count != 1) {
458 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
459 		    "Could not bind tbd dma resource in a single frag. "
460 		    "Count - %d Len - %d", cookie_count, len);
461 		e1000g_free_tx_descriptors(tx_ring);
462 		return (DDI_FAILURE);
463 	}
464 
465 	tx_ring->tbd_dma_addr = cookie.dmac_laddress;
466 	tx_ring->tbd_first = tx_ring->tbd_area;
467 	tx_ring->tbd_last = tx_ring->tbd_first +
468 	    (Adapter->tx_desc_num - 1);
469 
470 	return (DDI_SUCCESS);
471 }
472 
473 static int
474 e1000g_alloc_rx_descriptors(e1000g_rx_data_t *rx_data)
475 {
476 	int mystat;
477 	boolean_t alloc_flag;
478 	size_t size;
479 	size_t len;
480 	uintptr_t templong;
481 	uint_t cookie_count;
482 	dev_info_t *devinfo;
483 	ddi_dma_cookie_t cookie;
484 	struct e1000g *Adapter;
485 	ddi_dma_attr_t dma_attr;
486 
487 	Adapter = rx_data->rx_ring->adapter;
488 	devinfo = Adapter->dip;
489 
490 	alloc_flag = B_FALSE;
491 	dma_attr = e1000g_desc_dma_attr;
492 
493 	/*
494 	 * Memory allocation for the receive buffer descriptors.
495 	 */
496 	size = (sizeof (struct e1000_rx_desc)) * Adapter->rx_desc_num;
497 
498 	/*
499 	 * Asking for aligned memory with DMA attributes set for suitable value
500 	 */
501 	dma_attr.dma_attr_sgllen = 1;
502 	dma_attr.dma_attr_align = Adapter->desc_align;
503 
504 	/*
505 	 * Allocate a new DMA handle for the receive descriptors
506 	 */
507 	mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
508 	    DDI_DMA_DONTWAIT, 0,
509 	    &rx_data->rbd_dma_handle);
510 
511 	if (mystat != DDI_SUCCESS) {
512 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
513 		    "Could not allocate rbd dma handle: %d", mystat);
514 		rx_data->rbd_dma_handle = NULL;
515 		return (DDI_FAILURE);
516 	}
517 	/*
518 	 * Allocate memory to DMA data to and from the receive
519 	 * descriptors.
520 	 */
521 	mystat = ddi_dma_mem_alloc(rx_data->rbd_dma_handle,
522 	    size,
523 	    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
524 	    DDI_DMA_DONTWAIT, 0,
525 	    (caddr_t *)&rx_data->rbd_area,
526 	    &len, &rx_data->rbd_acc_handle);
527 
528 	/*
529 	 * Check if memory allocation succeeded and also if the
530 	 * allocated memory is aligned correctly.
531 	 */
532 	if ((mystat != DDI_SUCCESS) ||
533 	    ((uintptr_t)rx_data->rbd_area & (Adapter->desc_align - 1))) {
534 		if (mystat == DDI_SUCCESS) {
535 			ddi_dma_mem_free(&rx_data->rbd_acc_handle);
536 			rx_data->rbd_acc_handle = NULL;
537 			rx_data->rbd_area = NULL;
538 		}
539 		if (rx_data->rbd_dma_handle != NULL) {
540 			ddi_dma_free_handle(&rx_data->rbd_dma_handle);
541 			rx_data->rbd_dma_handle = NULL;
542 		}
543 		alloc_flag = B_FALSE;
544 	} else
545 		alloc_flag = B_TRUE;
546 
547 	/*
548 	 * Initialize the allocated receive descriptor memory to zero.
549 	 */
550 	if (alloc_flag)
551 		bzero((caddr_t)rx_data->rbd_area, len);
552 
553 	/*
554 	 * If memory allocation did not succeed, do the alignment ourselves
555 	 */
556 	if (!alloc_flag) {
557 		dma_attr.dma_attr_align = 1;
558 		dma_attr.dma_attr_sgllen = 1;
559 		size = size + Adapter->desc_align;
560 		/*
561 		 * Allocate a new DMA handle for the receive descriptor.
562 		 */
563 		mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
564 		    DDI_DMA_DONTWAIT, 0,
565 		    &rx_data->rbd_dma_handle);
566 
567 		if (mystat != DDI_SUCCESS) {
568 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
569 			    "Could not re-allocate rbd dma handle: %d", mystat);
570 			rx_data->rbd_dma_handle = NULL;
571 			return (DDI_FAILURE);
572 		}
573 		/*
574 		 * Allocate memory to DMA data to and from the receive
575 		 * descriptors.
576 		 */
577 		mystat = ddi_dma_mem_alloc(rx_data->rbd_dma_handle,
578 		    size,
579 		    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
580 		    DDI_DMA_DONTWAIT, 0,
581 		    (caddr_t *)&rx_data->rbd_area,
582 		    &len, &rx_data->rbd_acc_handle);
583 
584 		if (mystat != DDI_SUCCESS) {
585 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
586 			    "Could not allocate rbd dma memory: %d", mystat);
587 			rx_data->rbd_acc_handle = NULL;
588 			rx_data->rbd_area = NULL;
589 			if (rx_data->rbd_dma_handle != NULL) {
590 				ddi_dma_free_handle(&rx_data->rbd_dma_handle);
591 				rx_data->rbd_dma_handle = NULL;
592 			}
593 			return (DDI_FAILURE);
594 		} else
595 			alloc_flag = B_TRUE;
596 
597 		/*
598 		 * Initialize the allocated receive descriptor memory to zero.
599 		 */
600 		bzero((caddr_t)rx_data->rbd_area, len);
601 		templong = P2NPHASE((uintptr_t)rx_data->rbd_area,
602 		    Adapter->desc_align);
603 		len = size - templong;
604 		templong += (uintptr_t)rx_data->rbd_area;
605 		rx_data->rbd_area = (struct e1000_rx_desc *)templong;
606 	}	/* alignment workaround */
607 
608 	/*
609 	 * The memory allocation of the receive descriptors succeeded
610 	 */
611 	ASSERT(alloc_flag);
612 
613 	/*
614 	 * Allocates DMA resources for the memory that was allocated by
615 	 * the ddi_dma_mem_alloc call.
616 	 */
617 	mystat = ddi_dma_addr_bind_handle(rx_data->rbd_dma_handle,
618 	    (struct as *)NULL, (caddr_t)rx_data->rbd_area,
619 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
620 	    DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
621 
622 	if (mystat != DDI_SUCCESS) {
623 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
624 		    "Could not bind rbd dma resource: %d", mystat);
625 		if (rx_data->rbd_acc_handle != NULL) {
626 			ddi_dma_mem_free(&rx_data->rbd_acc_handle);
627 			rx_data->rbd_acc_handle = NULL;
628 			rx_data->rbd_area = NULL;
629 		}
630 		if (rx_data->rbd_dma_handle != NULL) {
631 			ddi_dma_free_handle(&rx_data->rbd_dma_handle);
632 			rx_data->rbd_dma_handle = NULL;
633 		}
634 		return (DDI_FAILURE);
635 	}
636 
637 	ASSERT(cookie_count == 1);
638 	if (cookie_count != 1) {
639 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
640 		    "Could not bind rbd dma resource in a single frag. "
641 		    "Count - %d Len - %d", cookie_count, len);
642 		e1000g_free_rx_descriptors(rx_data);
643 		return (DDI_FAILURE);
644 	}
645 
646 	rx_data->rbd_dma_addr = cookie.dmac_laddress;
647 	rx_data->rbd_first = rx_data->rbd_area;
648 	rx_data->rbd_last = rx_data->rbd_first +
649 	    (Adapter->rx_desc_num - 1);
650 
651 	return (DDI_SUCCESS);
652 }
653 
654 static void
655 e1000g_free_rx_descriptors(e1000g_rx_data_t *rx_data)
656 {
657 	if (rx_data->rbd_dma_handle != NULL) {
658 		(void) ddi_dma_unbind_handle(rx_data->rbd_dma_handle);
659 	}
660 	if (rx_data->rbd_acc_handle != NULL) {
661 		ddi_dma_mem_free(&rx_data->rbd_acc_handle);
662 		rx_data->rbd_acc_handle = NULL;
663 		rx_data->rbd_area = NULL;
664 	}
665 	if (rx_data->rbd_dma_handle != NULL) {
666 		ddi_dma_free_handle(&rx_data->rbd_dma_handle);
667 		rx_data->rbd_dma_handle = NULL;
668 	}
669 	rx_data->rbd_dma_addr = NULL;
670 	rx_data->rbd_first = NULL;
671 	rx_data->rbd_last = NULL;
672 }
673 
674 static void
675 e1000g_free_tx_descriptors(e1000g_tx_ring_t *tx_ring)
676 {
677 	if (tx_ring->tbd_dma_handle != NULL) {
678 		(void) ddi_dma_unbind_handle(tx_ring->tbd_dma_handle);
679 	}
680 	if (tx_ring->tbd_acc_handle != NULL) {
681 		ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
682 		tx_ring->tbd_acc_handle = NULL;
683 		tx_ring->tbd_area = NULL;
684 	}
685 	if (tx_ring->tbd_dma_handle != NULL) {
686 		ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
687 		tx_ring->tbd_dma_handle = NULL;
688 	}
689 	tx_ring->tbd_dma_addr = NULL;
690 	tx_ring->tbd_first = NULL;
691 	tx_ring->tbd_last = NULL;
692 }
693 
694 
695 /*
696  * e1000g_alloc_packets - allocate DMA buffers for rx/tx
697  *
698  * This routine allocates neccesary buffers for
699  *	 Transmit sw packet structure
700  *	 DMA handle for Transmit
701  *	 DMA buffer for Transmit
702  *	 Receive sw packet structure
703  *	 DMA buffer for Receive
704  */
705 static int
706 e1000g_alloc_packets(struct e1000g *Adapter)
707 {
708 	int result;
709 	e1000g_tx_ring_t *tx_ring;
710 	e1000g_rx_data_t *rx_data;
711 
712 	tx_ring = Adapter->tx_ring;
713 	rx_data = Adapter->rx_ring->rx_data;
714 
715 again:
716 	rw_enter(&e1000g_dma_type_lock, RW_READER);
717 
718 	result = e1000g_alloc_tx_packets(tx_ring);
719 	if (result != DDI_SUCCESS) {
720 		if (e1000g_dma_type == USE_DVMA) {
721 			rw_exit(&e1000g_dma_type_lock);
722 
723 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
724 			e1000g_dma_type = USE_DMA;
725 			rw_exit(&e1000g_dma_type_lock);
726 
727 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
728 			    "No enough dvma resource for Tx packets, "
729 			    "trying to allocate dma buffers...\n");
730 			goto again;
731 		}
732 		rw_exit(&e1000g_dma_type_lock);
733 
734 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
735 		    "Failed to allocate dma buffers for Tx packets\n");
736 		return (DDI_FAILURE);
737 	}
738 
739 	result = e1000g_alloc_rx_packets(rx_data);
740 	if (result != DDI_SUCCESS) {
741 		e1000g_free_tx_packets(tx_ring);
742 		if (e1000g_dma_type == USE_DVMA) {
743 			rw_exit(&e1000g_dma_type_lock);
744 
745 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
746 			e1000g_dma_type = USE_DMA;
747 			rw_exit(&e1000g_dma_type_lock);
748 
749 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
750 			    "No enough dvma resource for Rx packets, "
751 			    "trying to allocate dma buffers...\n");
752 			goto again;
753 		}
754 		rw_exit(&e1000g_dma_type_lock);
755 
756 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
757 		    "Failed to allocate dma buffers for Rx packets\n");
758 		return (DDI_FAILURE);
759 	}
760 
761 	rw_exit(&e1000g_dma_type_lock);
762 
763 	return (DDI_SUCCESS);
764 }
765 
766 static void
767 e1000g_free_packets(struct e1000g *Adapter)
768 {
769 	e1000g_tx_ring_t *tx_ring;
770 	e1000g_rx_data_t *rx_data;
771 
772 	tx_ring = Adapter->tx_ring;
773 	rx_data = Adapter->rx_ring->rx_data;
774 
775 	e1000g_free_tx_packets(tx_ring);
776 	e1000g_free_rx_packets(rx_data);
777 }
778 
779 #ifdef __sparc
780 static int
781 e1000g_alloc_dvma_buffer(struct e1000g *Adapter,
782     dma_buffer_t *buf, size_t size)
783 {
784 	int mystat;
785 	dev_info_t *devinfo;
786 	ddi_dma_cookie_t cookie;
787 
788 	if (e1000g_force_detach)
789 		devinfo = Adapter->priv_dip;
790 	else
791 		devinfo = Adapter->dip;
792 
793 	mystat = dvma_reserve(devinfo,
794 	    &e1000g_dma_limits,
795 	    Adapter->dvma_page_num,
796 	    &buf->dma_handle);
797 
798 	if (mystat != DDI_SUCCESS) {
799 		buf->dma_handle = NULL;
800 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
801 		    "Could not allocate dvma buffer handle: %d\n", mystat);
802 		return (DDI_FAILURE);
803 	}
804 
805 	buf->address = kmem_alloc(size, KM_NOSLEEP);
806 
807 	if (buf->address == NULL) {
808 		if (buf->dma_handle != NULL) {
809 			dvma_release(buf->dma_handle);
810 			buf->dma_handle = NULL;
811 		}
812 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
813 		    "Could not allocate dvma buffer memory\n");
814 		return (DDI_FAILURE);
815 	}
816 
817 	dvma_kaddr_load(buf->dma_handle,
818 	    buf->address, size, 0, &cookie);
819 
820 	buf->dma_address = cookie.dmac_laddress;
821 	buf->size = size;
822 	buf->len = 0;
823 
824 	return (DDI_SUCCESS);
825 }
826 
827 static void
828 e1000g_free_dvma_buffer(dma_buffer_t *buf)
829 {
830 	if (buf->dma_handle != NULL) {
831 		dvma_unload(buf->dma_handle, 0, -1);
832 	} else {
833 		return;
834 	}
835 
836 	buf->dma_address = NULL;
837 
838 	if (buf->address != NULL) {
839 		kmem_free(buf->address, buf->size);
840 		buf->address = NULL;
841 	}
842 
843 	if (buf->dma_handle != NULL) {
844 		dvma_release(buf->dma_handle);
845 		buf->dma_handle = NULL;
846 	}
847 
848 	buf->size = 0;
849 	buf->len = 0;
850 }
851 #endif
852 
853 static int
854 e1000g_alloc_dma_buffer(struct e1000g *Adapter,
855     dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
856 {
857 	int mystat;
858 	dev_info_t *devinfo;
859 	ddi_dma_cookie_t cookie;
860 	size_t len;
861 	uint_t count;
862 
863 	if (e1000g_force_detach)
864 		devinfo = Adapter->priv_dip;
865 	else
866 		devinfo = Adapter->dip;
867 
868 	mystat = ddi_dma_alloc_handle(devinfo,
869 	    p_dma_attr,
870 	    DDI_DMA_DONTWAIT, 0,
871 	    &buf->dma_handle);
872 
873 	if (mystat != DDI_SUCCESS) {
874 		buf->dma_handle = NULL;
875 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
876 		    "Could not allocate dma buffer handle: %d\n", mystat);
877 		return (DDI_FAILURE);
878 	}
879 
880 	mystat = ddi_dma_mem_alloc(buf->dma_handle,
881 	    size, &e1000g_buf_acc_attr, DDI_DMA_STREAMING,
882 	    DDI_DMA_DONTWAIT, 0,
883 	    &buf->address,
884 	    &len, &buf->acc_handle);
885 
886 	if (mystat != DDI_SUCCESS) {
887 		buf->acc_handle = NULL;
888 		buf->address = NULL;
889 		if (buf->dma_handle != NULL) {
890 			ddi_dma_free_handle(&buf->dma_handle);
891 			buf->dma_handle = NULL;
892 		}
893 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
894 		    "Could not allocate dma buffer memory: %d\n", mystat);
895 		return (DDI_FAILURE);
896 	}
897 
898 	mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
899 	    (struct as *)NULL,
900 	    buf->address,
901 	    len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
902 	    DDI_DMA_DONTWAIT, 0, &cookie, &count);
903 
904 	if (mystat != DDI_SUCCESS) {
905 		if (buf->acc_handle != NULL) {
906 			ddi_dma_mem_free(&buf->acc_handle);
907 			buf->acc_handle = NULL;
908 			buf->address = NULL;
909 		}
910 		if (buf->dma_handle != NULL) {
911 			ddi_dma_free_handle(&buf->dma_handle);
912 			buf->dma_handle = NULL;
913 		}
914 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
915 		    "Could not bind buffer dma handle: %d\n", mystat);
916 		return (DDI_FAILURE);
917 	}
918 
919 	ASSERT(count == 1);
920 	if (count != 1) {
921 		if (buf->dma_handle != NULL) {
922 			(void) ddi_dma_unbind_handle(buf->dma_handle);
923 		}
924 		if (buf->acc_handle != NULL) {
925 			ddi_dma_mem_free(&buf->acc_handle);
926 			buf->acc_handle = NULL;
927 			buf->address = NULL;
928 		}
929 		if (buf->dma_handle != NULL) {
930 			ddi_dma_free_handle(&buf->dma_handle);
931 			buf->dma_handle = NULL;
932 		}
933 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
934 		    "Could not bind buffer as a single frag. "
935 		    "Count = %d\n", count);
936 		return (DDI_FAILURE);
937 	}
938 
939 	buf->dma_address = cookie.dmac_laddress;
940 	buf->size = len;
941 	buf->len = 0;
942 
943 	return (DDI_SUCCESS);
944 }
945 
946 /*
947  * e1000g_alloc_dma_buffer_82546 - allocate a dma buffer along with all
948  * necessary handles.  Same as e1000g_alloc_dma_buffer() except ensure
949  * that buffer that doesn't cross a 64k boundary.
950  */
951 static int
952 e1000g_alloc_dma_buffer_82546(struct e1000g *Adapter,
953     dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
954 {
955 	int mystat;
956 	dev_info_t *devinfo;
957 	ddi_dma_cookie_t cookie;
958 	size_t len;
959 	uint_t count;
960 
961 	if (e1000g_force_detach)
962 		devinfo = Adapter->priv_dip;
963 	else
964 		devinfo = Adapter->dip;
965 
966 	mystat = ddi_dma_alloc_handle(devinfo,
967 	    p_dma_attr,
968 	    DDI_DMA_DONTWAIT, 0,
969 	    &buf->dma_handle);
970 
971 	if (mystat != DDI_SUCCESS) {
972 		buf->dma_handle = NULL;
973 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
974 		    "Could not allocate dma buffer handle: %d\n", mystat);
975 		return (DDI_FAILURE);
976 	}
977 
978 	mystat = e1000g_dma_mem_alloc_82546(buf, size, &len);
979 	if (mystat != DDI_SUCCESS) {
980 		buf->acc_handle = NULL;
981 		buf->address = NULL;
982 		if (buf->dma_handle != NULL) {
983 			ddi_dma_free_handle(&buf->dma_handle);
984 			buf->dma_handle = NULL;
985 		}
986 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
987 		    "Could not allocate dma buffer memory: %d\n", mystat);
988 		return (DDI_FAILURE);
989 	}
990 
991 	mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
992 	    (struct as *)NULL,
993 	    buf->address,
994 	    len, DDI_DMA_READ | DDI_DMA_STREAMING,
995 	    DDI_DMA_DONTWAIT, 0, &cookie, &count);
996 
997 	if (mystat != DDI_SUCCESS) {
998 		if (buf->acc_handle != NULL) {
999 			ddi_dma_mem_free(&buf->acc_handle);
1000 			buf->acc_handle = NULL;
1001 			buf->address = NULL;
1002 		}
1003 		if (buf->dma_handle != NULL) {
1004 			ddi_dma_free_handle(&buf->dma_handle);
1005 			buf->dma_handle = NULL;
1006 		}
1007 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1008 		    "Could not bind buffer dma handle: %d\n", mystat);
1009 		return (DDI_FAILURE);
1010 	}
1011 
1012 	ASSERT(count == 1);
1013 	if (count != 1) {
1014 		if (buf->dma_handle != NULL) {
1015 			(void) ddi_dma_unbind_handle(buf->dma_handle);
1016 		}
1017 		if (buf->acc_handle != NULL) {
1018 			ddi_dma_mem_free(&buf->acc_handle);
1019 			buf->acc_handle = NULL;
1020 			buf->address = NULL;
1021 		}
1022 		if (buf->dma_handle != NULL) {
1023 			ddi_dma_free_handle(&buf->dma_handle);
1024 			buf->dma_handle = NULL;
1025 		}
1026 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1027 		    "Could not bind buffer as a single frag. "
1028 		    "Count = %d\n", count);
1029 		return (DDI_FAILURE);
1030 	}
1031 
1032 	buf->dma_address = cookie.dmac_laddress;
1033 	buf->size = len;
1034 	buf->len = 0;
1035 
1036 	return (DDI_SUCCESS);
1037 }
1038 
1039 /*
1040  * e1000g_dma_mem_alloc_82546 - allocate a dma buffer, making up to
1041  * ALLOC_RETRY attempts to get a buffer that doesn't cross a 64k boundary.
1042  */
1043 static int
1044 e1000g_dma_mem_alloc_82546(dma_buffer_t *buf, size_t size, size_t *len)
1045 {
1046 #define	ALLOC_RETRY	10
1047 	int stat;
1048 	int cnt = 0;
1049 	ddi_acc_handle_t hold[ALLOC_RETRY];
1050 
1051 	while (cnt < ALLOC_RETRY) {
1052 		hold[cnt] = NULL;
1053 
1054 		/* allocate memory */
1055 		stat = ddi_dma_mem_alloc(buf->dma_handle, size,
1056 		    &e1000g_buf_acc_attr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
1057 		    0, &buf->address, len, &buf->acc_handle);
1058 
1059 		if (stat != DDI_SUCCESS) {
1060 			break;
1061 		}
1062 
1063 		/*
1064 		 * Check 64k bounday:
1065 		 * if it is bad, hold it and retry
1066 		 * if it is good, exit loop
1067 		 */
1068 		if (e1000g_cross_64k_bound(buf->address, *len)) {
1069 			hold[cnt] = buf->acc_handle;
1070 			stat = DDI_FAILURE;
1071 		} else {
1072 			break;
1073 		}
1074 
1075 		cnt++;
1076 	}
1077 
1078 	/* Release any held buffers crossing 64k bounday */
1079 	for (--cnt; cnt >= 0; cnt--) {
1080 		if (hold[cnt])
1081 			ddi_dma_mem_free(&hold[cnt]);
1082 	}
1083 
1084 	return (stat);
1085 }
1086 
1087 /*
1088  * e1000g_cross_64k_bound - If starting and ending address cross a 64k boundary
1089  * return true; otherwise return false
1090  */
1091 static boolean_t
1092 e1000g_cross_64k_bound(void *addr, uintptr_t len)
1093 {
1094 	uintptr_t start = (uintptr_t)addr;
1095 	uintptr_t end = start + len - 1;
1096 
1097 	return (((start ^ end) >> 16) == 0 ? B_FALSE : B_TRUE);
1098 }
1099 
1100 static void
1101 e1000g_free_dma_buffer(dma_buffer_t *buf)
1102 {
1103 	if (buf->dma_handle != NULL) {
1104 		(void) ddi_dma_unbind_handle(buf->dma_handle);
1105 	} else {
1106 		return;
1107 	}
1108 
1109 	buf->dma_address = NULL;
1110 
1111 	if (buf->acc_handle != NULL) {
1112 		ddi_dma_mem_free(&buf->acc_handle);
1113 		buf->acc_handle = NULL;
1114 		buf->address = NULL;
1115 	}
1116 
1117 	if (buf->dma_handle != NULL) {
1118 		ddi_dma_free_handle(&buf->dma_handle);
1119 		buf->dma_handle = NULL;
1120 	}
1121 
1122 	buf->size = 0;
1123 	buf->len = 0;
1124 }
1125 
1126 static int
1127 e1000g_alloc_tx_packets(e1000g_tx_ring_t *tx_ring)
1128 {
1129 	int j;
1130 	p_tx_sw_packet_t packet;
1131 	int mystat;
1132 	dma_buffer_t *tx_buf;
1133 	struct e1000g *Adapter;
1134 	dev_info_t *devinfo;
1135 	ddi_dma_attr_t dma_attr;
1136 
1137 	Adapter = tx_ring->adapter;
1138 	devinfo = Adapter->dip;
1139 	dma_attr = e1000g_buf_dma_attr;
1140 
1141 	/*
1142 	 * Memory allocation for the Transmit software structure, the transmit
1143 	 * software packet. This structure stores all the relevant information
1144 	 * for transmitting a single packet.
1145 	 */
1146 	tx_ring->packet_area =
1147 	    kmem_zalloc(TX_SW_PKT_AREA_SZ, KM_NOSLEEP);
1148 
1149 	if (tx_ring->packet_area == NULL)
1150 		return (DDI_FAILURE);
1151 
1152 	for (j = 0, packet = tx_ring->packet_area;
1153 	    j < Adapter->tx_freelist_num; j++, packet++) {
1154 
1155 		ASSERT(packet != NULL);
1156 
1157 		/*
1158 		 * Pre-allocate dma handles for transmit. These dma handles
1159 		 * will be dynamically bound to the data buffers passed down
1160 		 * from the upper layers at the time of transmitting. The
1161 		 * dynamic binding only applies for the packets that are larger
1162 		 * than the tx_bcopy_thresh.
1163 		 */
1164 		switch (e1000g_dma_type) {
1165 #ifdef __sparc
1166 		case USE_DVMA:
1167 			mystat = dvma_reserve(devinfo,
1168 			    &e1000g_dma_limits,
1169 			    Adapter->dvma_page_num,
1170 			    &packet->tx_dma_handle);
1171 			break;
1172 #endif
1173 		case USE_DMA:
1174 			mystat = ddi_dma_alloc_handle(devinfo,
1175 			    &e1000g_tx_dma_attr,
1176 			    DDI_DMA_DONTWAIT, 0,
1177 			    &packet->tx_dma_handle);
1178 			break;
1179 		default:
1180 			ASSERT(B_FALSE);
1181 			break;
1182 		}
1183 		if (mystat != DDI_SUCCESS) {
1184 			packet->tx_dma_handle = NULL;
1185 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1186 			    "Could not allocate tx dma handle: %d\n", mystat);
1187 			goto tx_pkt_fail;
1188 		}
1189 
1190 		/*
1191 		 * Pre-allocate transmit buffers for small packets that the
1192 		 * size is less than tx_bcopy_thresh. The data of those small
1193 		 * packets will be bcopy() to the transmit buffers instead of
1194 		 * using dynamical DMA binding. For small packets, bcopy will
1195 		 * bring better performance than DMA binding.
1196 		 */
1197 		tx_buf = packet->tx_buf;
1198 
1199 		switch (e1000g_dma_type) {
1200 #ifdef __sparc
1201 		case USE_DVMA:
1202 			mystat = e1000g_alloc_dvma_buffer(Adapter,
1203 			    tx_buf, Adapter->tx_buffer_size);
1204 			break;
1205 #endif
1206 		case USE_DMA:
1207 			mystat = e1000g_alloc_dma_buffer(Adapter,
1208 			    tx_buf, Adapter->tx_buffer_size, &dma_attr);
1209 			break;
1210 		default:
1211 			ASSERT(B_FALSE);
1212 			break;
1213 		}
1214 		if (mystat != DDI_SUCCESS) {
1215 			ASSERT(packet->tx_dma_handle != NULL);
1216 			switch (e1000g_dma_type) {
1217 #ifdef __sparc
1218 			case USE_DVMA:
1219 				dvma_release(packet->tx_dma_handle);
1220 				break;
1221 #endif
1222 			case USE_DMA:
1223 				ddi_dma_free_handle(&packet->tx_dma_handle);
1224 				break;
1225 			default:
1226 				ASSERT(B_FALSE);
1227 				break;
1228 			}
1229 			packet->tx_dma_handle = NULL;
1230 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1231 			    "Allocate Tx buffer fail\n");
1232 			goto tx_pkt_fail;
1233 		}
1234 
1235 		packet->dma_type = e1000g_dma_type;
1236 	} /* for */
1237 
1238 	return (DDI_SUCCESS);
1239 
1240 tx_pkt_fail:
1241 	e1000g_free_tx_packets(tx_ring);
1242 
1243 	return (DDI_FAILURE);
1244 }
1245 
1246 static int
1247 e1000g_alloc_rx_packets(e1000g_rx_data_t *rx_data)
1248 {
1249 	int i;
1250 	p_rx_sw_packet_t packet;
1251 	struct e1000g *Adapter;
1252 	uint32_t packet_num;
1253 	ddi_dma_attr_t dma_attr;
1254 
1255 	Adapter = rx_data->rx_ring->adapter;
1256 	dma_attr = e1000g_buf_dma_attr;
1257 	dma_attr.dma_attr_align = Adapter->rx_buf_align;
1258 
1259 	/*
1260 	 * Allocate memory for the rx_sw_packet structures. Each one of these
1261 	 * structures will contain a virtual and physical address to an actual
1262 	 * receive buffer in host memory. Since we use one rx_sw_packet per
1263 	 * received packet, the maximum number of rx_sw_packet that we'll
1264 	 * need is equal to the number of receive descriptors plus the freelist
1265 	 * size.
1266 	 */
1267 	packet_num = Adapter->rx_desc_num + Adapter->rx_freelist_num;
1268 	rx_data->packet_area = NULL;
1269 
1270 	for (i = 0; i < packet_num; i++) {
1271 		packet = e1000g_alloc_rx_sw_packet(rx_data, &dma_attr);
1272 		if (packet == NULL)
1273 			goto rx_pkt_fail;
1274 
1275 		packet->next = rx_data->packet_area;
1276 		rx_data->packet_area = packet;
1277 	}
1278 
1279 	return (DDI_SUCCESS);
1280 
1281 rx_pkt_fail:
1282 	e1000g_free_rx_packets(rx_data);
1283 
1284 	return (DDI_FAILURE);
1285 }
1286 
1287 static p_rx_sw_packet_t
1288 e1000g_alloc_rx_sw_packet(e1000g_rx_data_t *rx_data, ddi_dma_attr_t *p_dma_attr)
1289 {
1290 	int mystat;
1291 	p_rx_sw_packet_t packet;
1292 	dma_buffer_t *rx_buf;
1293 	struct e1000g *Adapter;
1294 
1295 	Adapter = rx_data->rx_ring->adapter;
1296 
1297 	packet = kmem_zalloc(sizeof (rx_sw_packet_t), KM_NOSLEEP);
1298 	if (packet == NULL) {
1299 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1300 		    "Cound not allocate memory for Rx SwPacket\n");
1301 		return (NULL);
1302 	}
1303 
1304 	rx_buf = packet->rx_buf;
1305 
1306 	switch (e1000g_dma_type) {
1307 #ifdef __sparc
1308 	case USE_DVMA:
1309 		mystat = e1000g_alloc_dvma_buffer(Adapter,
1310 		    rx_buf, Adapter->rx_buffer_size);
1311 		break;
1312 #endif
1313 	case USE_DMA:
1314 		if (Adapter->mem_workaround_82546 &&
1315 		    ((Adapter->shared.mac.type == e1000_82545) ||
1316 		    (Adapter->shared.mac.type == e1000_82546) ||
1317 		    (Adapter->shared.mac.type == e1000_82546_rev_3))) {
1318 			mystat = e1000g_alloc_dma_buffer_82546(Adapter,
1319 			    rx_buf, Adapter->rx_buffer_size, p_dma_attr);
1320 		} else {
1321 			mystat = e1000g_alloc_dma_buffer(Adapter,
1322 			    rx_buf, Adapter->rx_buffer_size, p_dma_attr);
1323 		}
1324 		break;
1325 	default:
1326 		ASSERT(B_FALSE);
1327 		break;
1328 	}
1329 
1330 	if (mystat != DDI_SUCCESS) {
1331 		if (packet != NULL)
1332 			kmem_free(packet, sizeof (rx_sw_packet_t));
1333 
1334 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1335 		    "Failed to allocate Rx buffer\n");
1336 		return (NULL);
1337 	}
1338 
1339 	rx_buf->size -= E1000G_IPALIGNROOM;
1340 	rx_buf->address += E1000G_IPALIGNROOM;
1341 	rx_buf->dma_address += E1000G_IPALIGNROOM;
1342 
1343 	packet->rx_data = (caddr_t)rx_data;
1344 	packet->free_rtn.free_func = e1000g_rxfree_func;
1345 	packet->free_rtn.free_arg = (char *)packet;
1346 	/*
1347 	 * esballoc is changed to desballoc which
1348 	 * is undocumented call but as per sun,
1349 	 * we can use it. It gives better efficiency.
1350 	 */
1351 	packet->mp = desballoc((unsigned char *)
1352 	    rx_buf->address,
1353 	    rx_buf->size,
1354 	    BPRI_MED, &packet->free_rtn);
1355 
1356 	packet->dma_type = e1000g_dma_type;
1357 	packet->ref_cnt = 1;
1358 
1359 	return (packet);
1360 }
1361 
1362 void
1363 e1000g_free_rx_sw_packet(p_rx_sw_packet_t packet, boolean_t full_release)
1364 {
1365 	dma_buffer_t *rx_buf;
1366 
1367 	if (packet->mp != NULL) {
1368 		freemsg(packet->mp);
1369 		packet->mp = NULL;
1370 	}
1371 
1372 	rx_buf = packet->rx_buf;
1373 
1374 	switch (packet->dma_type) {
1375 #ifdef __sparc
1376 	case USE_DVMA:
1377 		if (rx_buf->address != NULL) {
1378 			rx_buf->size += E1000G_IPALIGNROOM;
1379 			rx_buf->address -= E1000G_IPALIGNROOM;
1380 		}
1381 		e1000g_free_dvma_buffer(rx_buf);
1382 		break;
1383 #endif
1384 	case USE_DMA:
1385 		e1000g_free_dma_buffer(rx_buf);
1386 		break;
1387 	default:
1388 		break;
1389 	}
1390 
1391 	packet->dma_type = USE_NONE;
1392 
1393 	if (!full_release)
1394 		return;
1395 
1396 	kmem_free(packet, sizeof (rx_sw_packet_t));
1397 }
1398 
1399 static void
1400 e1000g_free_rx_packets(e1000g_rx_data_t *rx_data)
1401 {
1402 	p_rx_sw_packet_t packet, next_packet;
1403 	uint32_t ref_cnt;
1404 
1405 	mutex_enter(&e1000g_rx_detach_lock);
1406 
1407 	packet = rx_data->packet_area;
1408 	while (packet != NULL) {
1409 		next_packet = packet->next;
1410 
1411 		ref_cnt = atomic_dec_32_nv(&packet->ref_cnt);
1412 		if (ref_cnt > 0) {
1413 			atomic_inc_32(&rx_data->pending_count);
1414 			atomic_inc_32(&e1000g_mblks_pending);
1415 		} else {
1416 			e1000g_free_rx_sw_packet(packet, B_FALSE);
1417 		}
1418 
1419 		packet = next_packet;
1420 	}
1421 
1422 	mutex_exit(&e1000g_rx_detach_lock);
1423 }
1424 
1425 
1426 static void
1427 e1000g_free_tx_packets(e1000g_tx_ring_t *tx_ring)
1428 {
1429 	int j;
1430 	struct e1000g *Adapter;
1431 	p_tx_sw_packet_t packet;
1432 	dma_buffer_t *tx_buf;
1433 
1434 	Adapter = tx_ring->adapter;
1435 
1436 	for (j = 0, packet = tx_ring->packet_area;
1437 	    j < Adapter->tx_freelist_num; j++, packet++) {
1438 
1439 		if (packet == NULL)
1440 			break;
1441 
1442 		/* Free the Tx DMA handle for dynamical binding */
1443 		if (packet->tx_dma_handle != NULL) {
1444 			switch (packet->dma_type) {
1445 #ifdef __sparc
1446 			case USE_DVMA:
1447 				dvma_release(packet->tx_dma_handle);
1448 				break;
1449 #endif
1450 			case USE_DMA:
1451 				ddi_dma_free_handle(&packet->tx_dma_handle);
1452 				break;
1453 			default:
1454 				ASSERT(B_FALSE);
1455 				break;
1456 			}
1457 			packet->tx_dma_handle = NULL;
1458 		} else {
1459 			/*
1460 			 * If the dma handle is NULL, then we don't
1461 			 * need to check the packets left. For they
1462 			 * have not been initialized or have been freed.
1463 			 */
1464 			break;
1465 		}
1466 
1467 		tx_buf = packet->tx_buf;
1468 
1469 		switch (packet->dma_type) {
1470 #ifdef __sparc
1471 		case USE_DVMA:
1472 			e1000g_free_dvma_buffer(tx_buf);
1473 			break;
1474 #endif
1475 		case USE_DMA:
1476 			e1000g_free_dma_buffer(tx_buf);
1477 			break;
1478 		default:
1479 			ASSERT(B_FALSE);
1480 			break;
1481 		}
1482 
1483 		packet->dma_type = USE_NONE;
1484 	}
1485 	if (tx_ring->packet_area != NULL) {
1486 		kmem_free(tx_ring->packet_area, TX_SW_PKT_AREA_SZ);
1487 		tx_ring->packet_area = NULL;
1488 	}
1489 }
1490 
1491 /*
1492  * e1000g_release_dma_resources - release allocated DMA resources
1493  *
1494  * This function releases any pending buffers that has been
1495  * previously allocated
1496  */
1497 void
1498 e1000g_release_dma_resources(struct e1000g *Adapter)
1499 {
1500 	e1000g_free_descriptors(Adapter);
1501 	e1000g_free_packets(Adapter);
1502 }
1503 
1504 /* ARGSUSED */
1505 void
1506 e1000g_set_fma_flags(int dma_flag)
1507 {
1508 	if (dma_flag) {
1509 		e1000g_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1510 		e1000g_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1511 		e1000g_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1512 	} else {
1513 		e1000g_tx_dma_attr.dma_attr_flags = 0;
1514 		e1000g_buf_dma_attr.dma_attr_flags = 0;
1515 		e1000g_desc_dma_attr.dma_attr_flags = 0;
1516 	}
1517 }
1518