xref: /titanic_41/usr/src/uts/common/io/igb/igb_buf.c (revision 43b9c05035ac59f7f7a8e7827598db5a15f30ed3)
1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at:
10  *	http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When using or redistributing this file, you may do so under the
15  * License only. No other modification of this header is permitted.
16  *
17  * If applicable, add the following below this CDDL HEADER, with the
18  * fields enclosed by brackets "[]" replaced with your own identifying
19  * information: Portions Copyright [yyyy] [name of copyright owner]
20  *
21  * CDDL HEADER END
22  */
23 
24 /*
25  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms of the CDDL.
27  */
28 
29 #pragma ident	"%Z%%M%	%I%	%E% SMI"
30 
31 #include "igb_sw.h"
32 
33 static int igb_alloc_tbd_ring(igb_tx_ring_t *);
34 static void igb_free_tbd_ring(igb_tx_ring_t *);
35 static int igb_alloc_rbd_ring(igb_rx_ring_t *);
36 static void igb_free_rbd_ring(igb_rx_ring_t *);
37 static int igb_alloc_dma_buffer(igb_t *, dma_buffer_t *, size_t);
38 static void igb_free_dma_buffer(dma_buffer_t *);
39 static int igb_alloc_tcb_lists(igb_tx_ring_t *);
40 static void igb_free_tcb_lists(igb_tx_ring_t *);
41 static int igb_alloc_rcb_lists(igb_rx_ring_t *);
42 static void igb_free_rcb_lists(igb_rx_ring_t *);
43 
44 #ifdef __sparc
45 #define	IGB_DMA_ALIGNMENT	0x0000000000002000ull
46 #else
47 #define	IGB_DMA_ALIGNMENT	0x0000000000001000ull
48 #endif
49 
50 /*
51  * DMA attributes for tx/rx descriptors
52  */
53 static ddi_dma_attr_t igb_desc_dma_attr = {
54 	DMA_ATTR_V0,			/* version number */
55 	0x0000000000000000ull,		/* low address */
56 	0xFFFFFFFFFFFFFFFFull,		/* high address */
57 	0x00000000FFFFFFFFull,		/* dma counter max */
58 	IGB_DMA_ALIGNMENT,		/* alignment */
59 	0x00000FFF,			/* burst sizes */
60 	0x00000001,			/* minimum transfer size */
61 	0x00000000FFFFFFFFull,		/* maximum transfer size */
62 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size */
63 	1,				/* scatter/gather list length */
64 	0x00000001,			/* granularity */
65 	DDI_DMA_FLAGERR,		/* DMA flags */
66 };
67 
68 /*
69  * DMA attributes for tx/rx buffers
70  */
71 static ddi_dma_attr_t igb_buf_dma_attr = {
72 	DMA_ATTR_V0,			/* version number */
73 	0x0000000000000000ull,		/* low address */
74 	0xFFFFFFFFFFFFFFFFull,		/* high address */
75 	0x00000000FFFFFFFFull,		/* dma counter max */
76 	IGB_DMA_ALIGNMENT,		/* alignment */
77 	0x00000FFF,			/* burst sizes */
78 	0x00000001,			/* minimum transfer size */
79 	0x00000000FFFFFFFFull,		/* maximum transfer size */
80 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size	 */
81 	1,				/* scatter/gather list length */
82 	0x00000001,			/* granularity */
83 	DDI_DMA_FLAGERR,		/* DMA flags */
84 };
85 
86 /*
87  * DMA attributes for transmit
88  */
89 static ddi_dma_attr_t igb_tx_dma_attr = {
90 	DMA_ATTR_V0,			/* version number */
91 	0x0000000000000000ull,		/* low address */
92 	0xFFFFFFFFFFFFFFFFull,		/* high address */
93 	0x00000000FFFFFFFFull,		/* dma counter max */
94 	1,				/* alignment */
95 	0x00000FFF,			/* burst sizes */
96 	0x00000001,			/* minimum transfer size */
97 	0x00000000FFFFFFFFull,		/* maximum transfer size */
98 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size	 */
99 	MAX_COOKIE,			/* scatter/gather list length */
100 	0x00000001,			/* granularity */
101 	DDI_DMA_FLAGERR,		/* DMA flags */
102 };
103 
104 /*
105  * DMA access attributes for descriptors.
106  */
107 static ddi_device_acc_attr_t igb_desc_acc_attr = {
108 	DDI_DEVICE_ATTR_V0,
109 	DDI_STRUCTURE_LE_ACC,
110 	DDI_STRICTORDER_ACC,
111 	DDI_FLAGERR_ACC
112 };
113 
114 /*
115  * DMA access attributes for buffers.
116  */
117 static ddi_device_acc_attr_t igb_buf_acc_attr = {
118 	DDI_DEVICE_ATTR_V0,
119 	DDI_NEVERSWAP_ACC,
120 	DDI_STRICTORDER_ACC
121 };
122 
123 
124 /*
125  * igb_alloc_dma - Allocate DMA resources for all rx/tx rings
126  */
127 int
128 igb_alloc_dma(igb_t *igb)
129 {
130 	igb_rx_ring_t *rx_ring;
131 	igb_tx_ring_t *tx_ring;
132 	int i;
133 
134 	for (i = 0; i < igb->num_rx_rings; i++) {
135 		/*
136 		 * Allocate receive desciptor ring and control block lists
137 		 */
138 		rx_ring = &igb->rx_rings[i];
139 
140 		if (igb_alloc_rbd_ring(rx_ring) != IGB_SUCCESS)
141 			goto alloc_dma_failure;
142 
143 		if (igb_alloc_rcb_lists(rx_ring) != IGB_SUCCESS)
144 			goto alloc_dma_failure;
145 	}
146 
147 	for (i = 0; i < igb->num_tx_rings; i++) {
148 		/*
149 		 * Allocate transmit desciptor ring and control block lists
150 		 */
151 		tx_ring = &igb->tx_rings[i];
152 
153 		if (igb_alloc_tbd_ring(tx_ring) != IGB_SUCCESS)
154 			goto alloc_dma_failure;
155 
156 		if (igb_alloc_tcb_lists(tx_ring) != IGB_SUCCESS)
157 			goto alloc_dma_failure;
158 	}
159 
160 	return (IGB_SUCCESS);
161 
162 alloc_dma_failure:
163 	igb_free_dma(igb);
164 
165 	return (IGB_FAILURE);
166 }
167 
168 
169 /*
170  * igb_free_dma - Free all the DMA resources of all rx/tx rings
171  */
172 void
173 igb_free_dma(igb_t *igb)
174 {
175 	igb_rx_ring_t *rx_ring;
176 	igb_tx_ring_t *tx_ring;
177 	int i;
178 
179 	/*
180 	 * Free DMA resources of rx rings
181 	 */
182 	for (i = 0; i < igb->num_rx_rings; i++) {
183 		rx_ring = &igb->rx_rings[i];
184 		igb_free_rbd_ring(rx_ring);
185 		igb_free_rcb_lists(rx_ring);
186 	}
187 
188 	/*
189 	 * Free DMA resources of tx rings
190 	 */
191 	for (i = 0; i < igb->num_tx_rings; i++) {
192 		tx_ring = &igb->tx_rings[i];
193 		igb_free_tbd_ring(tx_ring);
194 		igb_free_tcb_lists(tx_ring);
195 	}
196 }
197 
198 /*
199  * igb_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring.
200  */
201 static int
202 igb_alloc_tbd_ring(igb_tx_ring_t *tx_ring)
203 {
204 	int ret;
205 	size_t size;
206 	size_t len;
207 	uint_t cookie_num;
208 	dev_info_t *devinfo;
209 	ddi_dma_cookie_t cookie;
210 	igb_t *igb = tx_ring->igb;
211 
212 	devinfo = igb->dip;
213 	size = sizeof (union e1000_adv_tx_desc) * tx_ring->ring_size;
214 
215 	/*
216 	 * If tx head write-back is enabled, an extra tbd is allocated
217 	 * to save the head write-back value
218 	 */
219 	if (igb->tx_head_wb_enable) {
220 		size += sizeof (union e1000_adv_tx_desc);
221 	}
222 
223 	/*
224 	 * Allocate a DMA handle for the transmit descriptor
225 	 * memory area.
226 	 */
227 	ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
228 	    DDI_DMA_DONTWAIT, NULL,
229 	    &tx_ring->tbd_area.dma_handle);
230 
231 	if (ret != DDI_SUCCESS) {
232 		igb_error(igb,
233 		    "Could not allocate tbd dma handle: %x", ret);
234 		tx_ring->tbd_area.dma_handle = NULL;
235 
236 		return (IGB_FAILURE);
237 	}
238 
239 	/*
240 	 * Allocate memory to DMA data to and from the transmit
241 	 * descriptors.
242 	 */
243 	ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle,
244 	    size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
245 	    DDI_DMA_DONTWAIT, NULL,
246 	    (caddr_t *)&tx_ring->tbd_area.address,
247 	    &len, &tx_ring->tbd_area.acc_handle);
248 
249 	if (ret != DDI_SUCCESS) {
250 		igb_error(igb,
251 		    "Could not allocate tbd dma memory: %x", ret);
252 		tx_ring->tbd_area.acc_handle = NULL;
253 		tx_ring->tbd_area.address = NULL;
254 		if (tx_ring->tbd_area.dma_handle != NULL) {
255 			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
256 			tx_ring->tbd_area.dma_handle = NULL;
257 		}
258 		return (IGB_FAILURE);
259 	}
260 
261 	/*
262 	 * Initialize the entire transmit buffer descriptor area to zero
263 	 */
264 	bzero(tx_ring->tbd_area.address, len);
265 
266 	/*
267 	 * Allocates DMA resources for the memory that was allocated by
268 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
269 	 * the memory address
270 	 */
271 	ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle,
272 	    NULL, (caddr_t)tx_ring->tbd_area.address,
273 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
274 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
275 
276 	if (ret != DDI_DMA_MAPPED) {
277 		igb_error(igb,
278 		    "Could not bind tbd dma resource: %x", ret);
279 		tx_ring->tbd_area.dma_address = NULL;
280 		if (tx_ring->tbd_area.acc_handle != NULL) {
281 			ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
282 			tx_ring->tbd_area.acc_handle = NULL;
283 			tx_ring->tbd_area.address = NULL;
284 		}
285 		if (tx_ring->tbd_area.dma_handle != NULL) {
286 			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
287 			tx_ring->tbd_area.dma_handle = NULL;
288 		}
289 		return (IGB_FAILURE);
290 	}
291 
292 	ASSERT(cookie_num == 1);
293 
294 	tx_ring->tbd_area.dma_address = cookie.dmac_laddress;
295 	tx_ring->tbd_area.size = len;
296 
297 	tx_ring->tbd_ring = (union e1000_adv_tx_desc *)(uintptr_t)
298 	    tx_ring->tbd_area.address;
299 
300 	return (IGB_SUCCESS);
301 }
302 
303 /*
304  * igb_free_tbd_ring - Free the tx descriptors of one ring.
305  */
306 static void
307 igb_free_tbd_ring(igb_tx_ring_t *tx_ring)
308 {
309 	if (tx_ring->tbd_area.dma_handle != NULL) {
310 		(void) ddi_dma_unbind_handle(tx_ring->tbd_area.dma_handle);
311 	}
312 	if (tx_ring->tbd_area.acc_handle != NULL) {
313 		ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
314 		tx_ring->tbd_area.acc_handle = NULL;
315 	}
316 	if (tx_ring->tbd_area.dma_handle != NULL) {
317 		ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
318 		tx_ring->tbd_area.dma_handle = NULL;
319 	}
320 	tx_ring->tbd_area.address = NULL;
321 	tx_ring->tbd_area.dma_address = NULL;
322 	tx_ring->tbd_area.size = 0;
323 
324 	tx_ring->tbd_ring = NULL;
325 }
326 
327 /*
328  * igb_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring.
329  */
330 static int
331 igb_alloc_rbd_ring(igb_rx_ring_t *rx_ring)
332 {
333 	int ret;
334 	size_t size;
335 	size_t len;
336 	uint_t cookie_num;
337 	dev_info_t *devinfo;
338 	ddi_dma_cookie_t cookie;
339 	igb_t *igb = rx_ring->igb;
340 
341 	devinfo = igb->dip;
342 	size = sizeof (union e1000_adv_rx_desc) * rx_ring->ring_size;
343 
344 	/*
345 	 * Allocate a new DMA handle for the receive descriptor
346 	 * memory area.
347 	 */
348 	ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
349 	    DDI_DMA_DONTWAIT, NULL,
350 	    &rx_ring->rbd_area.dma_handle);
351 
352 	if (ret != DDI_SUCCESS) {
353 		igb_error(igb,
354 		    "Could not allocate rbd dma handle: %x", ret);
355 		rx_ring->rbd_area.dma_handle = NULL;
356 		return (IGB_FAILURE);
357 	}
358 
359 	/*
360 	 * Allocate memory to DMA data to and from the receive
361 	 * descriptors.
362 	 */
363 	ret = ddi_dma_mem_alloc(rx_ring->rbd_area.dma_handle,
364 	    size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
365 	    DDI_DMA_DONTWAIT, NULL,
366 	    (caddr_t *)&rx_ring->rbd_area.address,
367 	    &len, &rx_ring->rbd_area.acc_handle);
368 
369 	if (ret != DDI_SUCCESS) {
370 		igb_error(igb,
371 		    "Could not allocate rbd dma memory: %x", ret);
372 		rx_ring->rbd_area.acc_handle = NULL;
373 		rx_ring->rbd_area.address = NULL;
374 		if (rx_ring->rbd_area.dma_handle != NULL) {
375 			ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
376 			rx_ring->rbd_area.dma_handle = NULL;
377 		}
378 		return (IGB_FAILURE);
379 	}
380 
381 	/*
382 	 * Initialize the entire transmit buffer descriptor area to zero
383 	 */
384 	bzero(rx_ring->rbd_area.address, len);
385 
386 	/*
387 	 * Allocates DMA resources for the memory that was allocated by
388 	 * the ddi_dma_mem_alloc call.
389 	 */
390 	ret = ddi_dma_addr_bind_handle(rx_ring->rbd_area.dma_handle,
391 	    NULL, (caddr_t)rx_ring->rbd_area.address,
392 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
393 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
394 
395 	if (ret != DDI_DMA_MAPPED) {
396 		igb_error(igb,
397 		    "Could not bind rbd dma resource: %x", ret);
398 		rx_ring->rbd_area.dma_address = NULL;
399 		if (rx_ring->rbd_area.acc_handle != NULL) {
400 			ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle);
401 			rx_ring->rbd_area.acc_handle = NULL;
402 			rx_ring->rbd_area.address = NULL;
403 		}
404 		if (rx_ring->rbd_area.dma_handle != NULL) {
405 			ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
406 			rx_ring->rbd_area.dma_handle = NULL;
407 		}
408 		return (IGB_FAILURE);
409 	}
410 
411 	ASSERT(cookie_num == 1);
412 
413 	rx_ring->rbd_area.dma_address = cookie.dmac_laddress;
414 	rx_ring->rbd_area.size = len;
415 
416 	rx_ring->rbd_ring = (union e1000_adv_rx_desc *)(uintptr_t)
417 	    rx_ring->rbd_area.address;
418 
419 	return (IGB_SUCCESS);
420 }
421 
422 /*
423  * igb_free_rbd_ring - Free the rx descriptors of one ring.
424  */
425 static void
426 igb_free_rbd_ring(igb_rx_ring_t *rx_ring)
427 {
428 	if (rx_ring->rbd_area.dma_handle != NULL) {
429 		(void) ddi_dma_unbind_handle(rx_ring->rbd_area.dma_handle);
430 	}
431 	if (rx_ring->rbd_area.acc_handle != NULL) {
432 		ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle);
433 		rx_ring->rbd_area.acc_handle = NULL;
434 	}
435 	if (rx_ring->rbd_area.dma_handle != NULL) {
436 		ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
437 		rx_ring->rbd_area.dma_handle = NULL;
438 	}
439 	rx_ring->rbd_area.address = NULL;
440 	rx_ring->rbd_area.dma_address = NULL;
441 	rx_ring->rbd_area.size = 0;
442 
443 	rx_ring->rbd_ring = NULL;
444 }
445 
446 
447 /*
448  * igb_alloc_dma_buffer - Allocate DMA resources for a DMA buffer
449  */
450 static int
451 igb_alloc_dma_buffer(igb_t *igb,
452     dma_buffer_t *buf, size_t size)
453 {
454 	int ret;
455 	dev_info_t *devinfo = igb->dip;
456 	ddi_dma_cookie_t cookie;
457 	size_t len;
458 	uint_t cookie_num;
459 
460 	ret = ddi_dma_alloc_handle(devinfo,
461 	    &igb_buf_dma_attr, DDI_DMA_DONTWAIT,
462 	    NULL, &buf->dma_handle);
463 
464 	if (ret != DDI_SUCCESS) {
465 		buf->dma_handle = NULL;
466 		igb_error(igb,
467 		    "Could not allocate dma buffer handle: %x", ret);
468 		return (IGB_FAILURE);
469 	}
470 
471 	ret = ddi_dma_mem_alloc(buf->dma_handle,
472 	    size, &igb_buf_acc_attr, DDI_DMA_STREAMING,
473 	    DDI_DMA_DONTWAIT, NULL, &buf->address,
474 	    &len, &buf->acc_handle);
475 
476 	if (ret != DDI_SUCCESS) {
477 		buf->acc_handle = NULL;
478 		buf->address = NULL;
479 		if (buf->dma_handle != NULL) {
480 			ddi_dma_free_handle(&buf->dma_handle);
481 			buf->dma_handle = NULL;
482 		}
483 		igb_error(igb,
484 		    "Could not allocate dma buffer memory: %x", ret);
485 		return (IGB_FAILURE);
486 	}
487 
488 	ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL,
489 	    buf->address,
490 	    len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
491 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
492 
493 	if (ret != DDI_DMA_MAPPED) {
494 		buf->dma_address = NULL;
495 		if (buf->acc_handle != NULL) {
496 			ddi_dma_mem_free(&buf->acc_handle);
497 			buf->acc_handle = NULL;
498 			buf->address = NULL;
499 		}
500 		if (buf->dma_handle != NULL) {
501 			ddi_dma_free_handle(&buf->dma_handle);
502 			buf->dma_handle = NULL;
503 		}
504 		igb_error(igb,
505 		    "Could not bind dma buffer handle: %x", ret);
506 		return (IGB_FAILURE);
507 	}
508 
509 	ASSERT(cookie_num == 1);
510 
511 	buf->dma_address = cookie.dmac_laddress;
512 	buf->size = len;
513 	buf->len = 0;
514 
515 	return (IGB_SUCCESS);
516 }
517 
518 /*
519  * igb_free_dma_buffer - Free one allocated area of dma memory and handle
520  */
521 static void
522 igb_free_dma_buffer(dma_buffer_t *buf)
523 {
524 	if (buf->dma_handle != NULL) {
525 		(void) ddi_dma_unbind_handle(buf->dma_handle);
526 		buf->dma_address = NULL;
527 	} else {
528 		return;
529 	}
530 
531 	if (buf->acc_handle != NULL) {
532 		ddi_dma_mem_free(&buf->acc_handle);
533 		buf->acc_handle = NULL;
534 		buf->address = NULL;
535 	}
536 
537 	if (buf->dma_handle != NULL) {
538 		ddi_dma_free_handle(&buf->dma_handle);
539 		buf->dma_handle = NULL;
540 	}
541 
542 	buf->size = 0;
543 	buf->len = 0;
544 }
545 
546 /*
547  * igb_alloc_tcb_lists - Memory allocation for the transmit control bolcks
548  * of one ring.
549  */
550 static int
551 igb_alloc_tcb_lists(igb_tx_ring_t *tx_ring)
552 {
553 	int i;
554 	int ret;
555 	tx_control_block_t *tcb;
556 	dma_buffer_t *tx_buf;
557 	igb_t *igb = tx_ring->igb;
558 	dev_info_t *devinfo = igb->dip;
559 
560 	/*
561 	 * Allocate memory for the work list.
562 	 */
563 	tx_ring->work_list = kmem_zalloc(sizeof (tx_control_block_t *) *
564 	    tx_ring->ring_size, KM_NOSLEEP);
565 
566 	if (tx_ring->work_list == NULL) {
567 		igb_error(igb,
568 		    "Cound not allocate memory for tx work list");
569 		return (IGB_FAILURE);
570 	}
571 
572 	/*
573 	 * Allocate memory for the free list.
574 	 */
575 	tx_ring->free_list = kmem_zalloc(sizeof (tx_control_block_t *) *
576 	    tx_ring->free_list_size, KM_NOSLEEP);
577 
578 	if (tx_ring->free_list == NULL) {
579 		kmem_free(tx_ring->work_list,
580 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
581 		tx_ring->work_list = NULL;
582 
583 		igb_error(igb,
584 		    "Cound not allocate memory for tx free list");
585 		return (IGB_FAILURE);
586 	}
587 
588 	/*
589 	 * Allocate memory for the tx control blocks of free list.
590 	 */
591 	tx_ring->tcb_area =
592 	    kmem_zalloc(sizeof (tx_control_block_t) *
593 	    tx_ring->free_list_size, KM_NOSLEEP);
594 
595 	if (tx_ring->tcb_area == NULL) {
596 		kmem_free(tx_ring->work_list,
597 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
598 		tx_ring->work_list = NULL;
599 
600 		kmem_free(tx_ring->free_list,
601 		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
602 		tx_ring->free_list = NULL;
603 
604 		igb_error(igb,
605 		    "Cound not allocate memory for tx control blocks");
606 		return (IGB_FAILURE);
607 	}
608 
609 	/*
610 	 * Allocate dma memory for the tx control block of free list.
611 	 */
612 	tcb = tx_ring->tcb_area;
613 	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
614 		ASSERT(tcb != NULL);
615 
616 		tx_ring->free_list[i] = tcb;
617 
618 		/*
619 		 * Pre-allocate dma handles for transmit. These dma handles
620 		 * will be dynamically bound to the data buffers passed down
621 		 * from the upper layers at the time of transmitting.
622 		 */
623 		ret = ddi_dma_alloc_handle(devinfo,
624 		    &igb_tx_dma_attr,
625 		    DDI_DMA_DONTWAIT, NULL,
626 		    &tcb->tx_dma_handle);
627 		if (ret != DDI_SUCCESS) {
628 			tcb->tx_dma_handle = NULL;
629 			igb_error(igb,
630 			    "Could not allocate tx dma handle: %x", ret);
631 			goto alloc_tcb_lists_fail;
632 		}
633 
634 		/*
635 		 * Pre-allocate transmit buffers for packets that the
636 		 * size is less than bcopy_thresh.
637 		 */
638 		tx_buf = &tcb->tx_buf;
639 
640 		ret = igb_alloc_dma_buffer(igb,
641 		    tx_buf, igb->tx_buf_size);
642 
643 		if (ret != IGB_SUCCESS) {
644 			ASSERT(tcb->tx_dma_handle != NULL);
645 			ddi_dma_free_handle(&tcb->tx_dma_handle);
646 			tcb->tx_dma_handle = NULL;
647 			igb_error(igb, "Allocate tx dma buffer failed");
648 			goto alloc_tcb_lists_fail;
649 		}
650 	}
651 
652 	return (IGB_SUCCESS);
653 
654 alloc_tcb_lists_fail:
655 	igb_free_tcb_lists(tx_ring);
656 
657 	return (IGB_FAILURE);
658 }
659 
660 /*
661  * igb_free_tcb_lists - Release the memory allocated for
662  * the transmit control bolcks of one ring.
663  */
664 static void
665 igb_free_tcb_lists(igb_tx_ring_t *tx_ring)
666 {
667 	int i;
668 	tx_control_block_t *tcb;
669 
670 	tcb = tx_ring->tcb_area;
671 	if (tcb == NULL)
672 		return;
673 
674 	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
675 		ASSERT(tcb != NULL);
676 
677 		/* Free the tx dma handle for dynamical binding */
678 		if (tcb->tx_dma_handle != NULL) {
679 			ddi_dma_free_handle(&tcb->tx_dma_handle);
680 			tcb->tx_dma_handle = NULL;
681 		} else {
682 			/*
683 			 * If the dma handle is NULL, then we don't
684 			 * have to check the remaining.
685 			 */
686 			break;
687 		}
688 
689 		igb_free_dma_buffer(&tcb->tx_buf);
690 	}
691 
692 	if (tx_ring->tcb_area != NULL) {
693 		kmem_free(tx_ring->tcb_area,
694 		    sizeof (tx_control_block_t) * tx_ring->free_list_size);
695 		tx_ring->tcb_area = NULL;
696 	}
697 
698 	if (tx_ring->work_list != NULL) {
699 		kmem_free(tx_ring->work_list,
700 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
701 		tx_ring->work_list = NULL;
702 	}
703 
704 	if (tx_ring->free_list != NULL) {
705 		kmem_free(tx_ring->free_list,
706 		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
707 		tx_ring->free_list = NULL;
708 	}
709 }
710 
711 /*
712  * igb_alloc_rcb_lists - Memory allocation for the receive control blocks
713  * of one ring.
714  */
715 static int
716 igb_alloc_rcb_lists(igb_rx_ring_t *rx_ring)
717 {
718 	int i;
719 	int ret;
720 	rx_control_block_t *rcb;
721 	igb_t *igb = rx_ring->igb;
722 	dma_buffer_t *rx_buf;
723 	uint32_t rcb_count;
724 
725 	/*
726 	 * Allocate memory for the work list.
727 	 */
728 	rx_ring->work_list = kmem_zalloc(sizeof (rx_control_block_t *) *
729 	    rx_ring->ring_size, KM_NOSLEEP);
730 
731 	if (rx_ring->work_list == NULL) {
732 		igb_error(igb,
733 		    "Could not allocate memory for rx work list");
734 		return (IGB_FAILURE);
735 	}
736 
737 	/*
738 	 * Allocate memory for the free list.
739 	 */
740 	rx_ring->free_list = kmem_zalloc(sizeof (rx_control_block_t *) *
741 	    rx_ring->free_list_size, KM_NOSLEEP);
742 
743 	if (rx_ring->free_list == NULL) {
744 		kmem_free(rx_ring->work_list,
745 		    sizeof (rx_control_block_t *) * rx_ring->ring_size);
746 		rx_ring->work_list = NULL;
747 
748 		igb_error(igb,
749 		    "Cound not allocate memory for rx free list");
750 		return (IGB_FAILURE);
751 	}
752 
753 	/*
754 	 * Allocate memory for the rx control blocks for work list and
755 	 * free list.
756 	 */
757 	rcb_count = rx_ring->ring_size + rx_ring->free_list_size;
758 	rx_ring->rcb_area =
759 	    kmem_zalloc(sizeof (rx_control_block_t) * rcb_count,
760 	    KM_NOSLEEP);
761 
762 	if (rx_ring->rcb_area == NULL) {
763 		kmem_free(rx_ring->work_list,
764 		    sizeof (rx_control_block_t *) * rx_ring->ring_size);
765 		rx_ring->work_list = NULL;
766 
767 		kmem_free(rx_ring->free_list,
768 		    sizeof (rx_control_block_t *) * rx_ring->free_list_size);
769 		rx_ring->free_list = NULL;
770 
771 		igb_error(igb,
772 		    "Cound not allocate memory for rx control blocks");
773 		return (IGB_FAILURE);
774 	}
775 
776 	/*
777 	 * Allocate dma memory for the rx control blocks
778 	 */
779 	rcb = rx_ring->rcb_area;
780 	for (i = 0; i < rcb_count; i++, rcb++) {
781 		ASSERT(rcb != NULL);
782 
783 		if (i < rx_ring->ring_size) {
784 			/* Attach the rx control block to the work list */
785 			rx_ring->work_list[i] = rcb;
786 		} else {
787 			/* Attach the rx control block to the free list */
788 			rx_ring->free_list[i - rx_ring->ring_size] = rcb;
789 		}
790 
791 		rx_buf = &rcb->rx_buf;
792 		ret = igb_alloc_dma_buffer(igb,
793 		    rx_buf, igb->rx_buf_size);
794 
795 		if (ret != IGB_SUCCESS) {
796 			igb_error(igb, "Allocate rx dma buffer failed");
797 			goto alloc_rcb_lists_fail;
798 		}
799 
800 		rx_buf->size -= IPHDR_ALIGN_ROOM;
801 		rx_buf->address += IPHDR_ALIGN_ROOM;
802 		rx_buf->dma_address += IPHDR_ALIGN_ROOM;
803 
804 		rcb->state = RCB_FREE;
805 		rcb->rx_ring = (igb_rx_ring_t *)rx_ring;
806 		rcb->free_rtn.free_func = igb_rx_recycle;
807 		rcb->free_rtn.free_arg = (char *)rcb;
808 
809 		rcb->mp = desballoc((unsigned char *)
810 		    rx_buf->address - IPHDR_ALIGN_ROOM,
811 		    rx_buf->size + IPHDR_ALIGN_ROOM,
812 		    0, &rcb->free_rtn);
813 
814 		if (rcb->mp != NULL) {
815 			rcb->mp->b_rptr += IPHDR_ALIGN_ROOM;
816 			rcb->mp->b_wptr += IPHDR_ALIGN_ROOM;
817 		}
818 	}
819 
820 	return (IGB_SUCCESS);
821 
822 alloc_rcb_lists_fail:
823 	igb_free_rcb_lists(rx_ring);
824 
825 	return (IGB_FAILURE);
826 }
827 
828 /*
829  * igb_free_rcb_lists - Free the receive control blocks of one ring.
830  */
831 static void
832 igb_free_rcb_lists(igb_rx_ring_t *rx_ring)
833 {
834 	int i;
835 	rx_control_block_t *rcb;
836 	uint32_t rcb_count;
837 
838 	rcb = rx_ring->rcb_area;
839 	if (rcb == NULL)
840 		return;
841 
842 	rcb_count = rx_ring->ring_size + rx_ring->free_list_size;
843 	for (i = 0; i < rcb_count; i++, rcb++) {
844 		ASSERT(rcb != NULL);
845 		ASSERT(rcb->state == RCB_FREE);
846 
847 		if (rcb->mp != NULL) {
848 			freemsg(rcb->mp);
849 			rcb->mp = NULL;
850 		}
851 
852 		igb_free_dma_buffer(&rcb->rx_buf);
853 	}
854 
855 	if (rx_ring->rcb_area != NULL) {
856 		kmem_free(rx_ring->rcb_area,
857 		    sizeof (rx_control_block_t) * rcb_count);
858 		rx_ring->rcb_area = NULL;
859 	}
860 
861 	if (rx_ring->work_list != NULL) {
862 		kmem_free(rx_ring->work_list,
863 		    sizeof (rx_control_block_t *) * rx_ring->ring_size);
864 		rx_ring->work_list = NULL;
865 	}
866 
867 	if (rx_ring->free_list != NULL) {
868 		kmem_free(rx_ring->free_list,
869 		    sizeof (rx_control_block_t *) * rx_ring->free_list_size);
870 		rx_ring->free_list = NULL;
871 	}
872 }
873 
874 void
875 igb_set_fma_flags(int acc_flag, int dma_flag)
876 {
877 	if (acc_flag) {
878 		igb_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
879 	} else {
880 		igb_desc_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
881 	}
882 
883 	if (dma_flag) {
884 		igb_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
885 		igb_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
886 		igb_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
887 	} else {
888 		igb_tx_dma_attr.dma_attr_flags = 0;
889 		igb_buf_dma_attr.dma_attr_flags = 0;
890 		igb_desc_dma_attr.dma_attr_flags = 0;
891 	}
892 }
893