xref: /titanic_41/usr/src/uts/common/io/igb/igb_buf.c (revision 9e39c5ba00a55fa05777cc94b148296af305e135)
1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2009 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at:
10  *	http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When using or redistributing this file, you may do so under the
15  * License only. No other modification of this header is permitted.
16  *
17  * If applicable, add the following below this CDDL HEADER, with the
18  * fields enclosed by brackets "[]" replaced with your own identifying
19  * information: Portions Copyright [yyyy] [name of copyright owner]
20  *
21  * CDDL HEADER END
22  */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms of the CDDL.
27  */
28 
29 #include "igb_sw.h"
30 
31 static int igb_alloc_tbd_ring(igb_tx_ring_t *);
32 static void igb_free_tbd_ring(igb_tx_ring_t *);
33 static int igb_alloc_rbd_ring(igb_rx_ring_t *);
34 static void igb_free_rbd_ring(igb_rx_ring_t *);
35 static int igb_alloc_dma_buffer(igb_t *, dma_buffer_t *, size_t);
36 static void igb_free_dma_buffer(dma_buffer_t *);
37 static int igb_alloc_tcb_lists(igb_tx_ring_t *);
38 static void igb_free_tcb_lists(igb_tx_ring_t *);
39 static int igb_alloc_rcb_lists(igb_rx_ring_t *);
40 static void igb_free_rcb_lists(igb_rx_ring_t *);
41 
42 #ifdef __sparc
43 #define	IGB_DMA_ALIGNMENT	0x0000000000002000ull
44 #else
45 #define	IGB_DMA_ALIGNMENT	0x0000000000001000ull
46 #endif
47 
48 /*
49  * DMA attributes for tx/rx descriptors
50  */
51 static ddi_dma_attr_t igb_desc_dma_attr = {
52 	DMA_ATTR_V0,			/* version number */
53 	0x0000000000000000ull,		/* low address */
54 	0xFFFFFFFFFFFFFFFFull,		/* high address */
55 	0x00000000FFFFFFFFull,		/* dma counter max */
56 	IGB_DMA_ALIGNMENT,		/* alignment */
57 	0x00000FFF,			/* burst sizes */
58 	0x00000001,			/* minimum transfer size */
59 	0x00000000FFFFFFFFull,		/* maximum transfer size */
60 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size */
61 	1,				/* scatter/gather list length */
62 	0x00000001,			/* granularity */
63 	DDI_DMA_FLAGERR,		/* DMA flags */
64 };
65 
66 /*
67  * DMA attributes for tx/rx buffers
68  */
69 static ddi_dma_attr_t igb_buf_dma_attr = {
70 	DMA_ATTR_V0,			/* version number */
71 	0x0000000000000000ull,		/* low address */
72 	0xFFFFFFFFFFFFFFFFull,		/* high address */
73 	0x00000000FFFFFFFFull,		/* dma counter max */
74 	IGB_DMA_ALIGNMENT,		/* alignment */
75 	0x00000FFF,			/* burst sizes */
76 	0x00000001,			/* minimum transfer size */
77 	0x00000000FFFFFFFFull,		/* maximum transfer size */
78 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size	 */
79 	1,				/* scatter/gather list length */
80 	0x00000001,			/* granularity */
81 	DDI_DMA_FLAGERR,		/* DMA flags */
82 };
83 
84 /*
85  * DMA attributes for transmit
86  */
87 static ddi_dma_attr_t igb_tx_dma_attr = {
88 	DMA_ATTR_V0,			/* version number */
89 	0x0000000000000000ull,		/* low address */
90 	0xFFFFFFFFFFFFFFFFull,		/* high address */
91 	0x00000000FFFFFFFFull,		/* dma counter max */
92 	1,				/* alignment */
93 	0x00000FFF,			/* burst sizes */
94 	0x00000001,			/* minimum transfer size */
95 	0x00000000FFFFFFFFull,		/* maximum transfer size */
96 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size	 */
97 	MAX_COOKIE,			/* scatter/gather list length */
98 	0x00000001,			/* granularity */
99 	DDI_DMA_FLAGERR,		/* DMA flags */
100 };
101 
102 /*
103  * DMA access attributes for descriptors.
104  */
105 static ddi_device_acc_attr_t igb_desc_acc_attr = {
106 	DDI_DEVICE_ATTR_V0,
107 	DDI_STRUCTURE_LE_ACC,
108 	DDI_STRICTORDER_ACC,
109 	DDI_FLAGERR_ACC
110 };
111 
112 /*
113  * DMA access attributes for buffers.
114  */
115 static ddi_device_acc_attr_t igb_buf_acc_attr = {
116 	DDI_DEVICE_ATTR_V0,
117 	DDI_NEVERSWAP_ACC,
118 	DDI_STRICTORDER_ACC
119 };
120 
121 
122 /*
123  * igb_alloc_dma - Allocate DMA resources for all rx/tx rings
124  */
125 int
126 igb_alloc_dma(igb_t *igb)
127 {
128 	igb_rx_ring_t *rx_ring;
129 	igb_tx_ring_t *tx_ring;
130 	int i;
131 
132 	for (i = 0; i < igb->num_rx_rings; i++) {
133 		/*
134 		 * Allocate receive desciptor ring and control block lists
135 		 */
136 		rx_ring = &igb->rx_rings[i];
137 
138 		if (igb_alloc_rbd_ring(rx_ring) != IGB_SUCCESS)
139 			goto alloc_dma_failure;
140 
141 		if (igb_alloc_rcb_lists(rx_ring) != IGB_SUCCESS)
142 			goto alloc_dma_failure;
143 	}
144 
145 	for (i = 0; i < igb->num_tx_rings; i++) {
146 		/*
147 		 * Allocate transmit desciptor ring and control block lists
148 		 */
149 		tx_ring = &igb->tx_rings[i];
150 
151 		if (igb_alloc_tbd_ring(tx_ring) != IGB_SUCCESS)
152 			goto alloc_dma_failure;
153 
154 		if (igb_alloc_tcb_lists(tx_ring) != IGB_SUCCESS)
155 			goto alloc_dma_failure;
156 	}
157 
158 	return (IGB_SUCCESS);
159 
160 alloc_dma_failure:
161 	igb_free_dma(igb);
162 
163 	return (IGB_FAILURE);
164 }
165 
166 
167 /*
168  * igb_free_dma - Free all the DMA resources of all rx/tx rings
169  */
170 void
171 igb_free_dma(igb_t *igb)
172 {
173 	igb_rx_ring_t *rx_ring;
174 	igb_tx_ring_t *tx_ring;
175 	int i;
176 
177 	/*
178 	 * Free DMA resources of rx rings
179 	 */
180 	for (i = 0; i < igb->num_rx_rings; i++) {
181 		rx_ring = &igb->rx_rings[i];
182 		igb_free_rbd_ring(rx_ring);
183 		igb_free_rcb_lists(rx_ring);
184 	}
185 
186 	/*
187 	 * Free DMA resources of tx rings
188 	 */
189 	for (i = 0; i < igb->num_tx_rings; i++) {
190 		tx_ring = &igb->tx_rings[i];
191 		igb_free_tbd_ring(tx_ring);
192 		igb_free_tcb_lists(tx_ring);
193 	}
194 }
195 
196 /*
197  * igb_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring.
198  */
199 static int
200 igb_alloc_tbd_ring(igb_tx_ring_t *tx_ring)
201 {
202 	int ret;
203 	size_t size;
204 	size_t len;
205 	uint_t cookie_num;
206 	dev_info_t *devinfo;
207 	ddi_dma_cookie_t cookie;
208 	igb_t *igb = tx_ring->igb;
209 
210 	devinfo = igb->dip;
211 	size = sizeof (union e1000_adv_tx_desc) * tx_ring->ring_size;
212 
213 	/*
214 	 * If tx head write-back is enabled, an extra tbd is allocated
215 	 * to save the head write-back value
216 	 */
217 	if (igb->tx_head_wb_enable) {
218 		size += sizeof (union e1000_adv_tx_desc);
219 	}
220 
221 	/*
222 	 * Allocate a DMA handle for the transmit descriptor
223 	 * memory area.
224 	 */
225 	ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
226 	    DDI_DMA_DONTWAIT, NULL,
227 	    &tx_ring->tbd_area.dma_handle);
228 
229 	if (ret != DDI_SUCCESS) {
230 		igb_error(igb,
231 		    "Could not allocate tbd dma handle: %x", ret);
232 		tx_ring->tbd_area.dma_handle = NULL;
233 
234 		return (IGB_FAILURE);
235 	}
236 
237 	/*
238 	 * Allocate memory to DMA data to and from the transmit
239 	 * descriptors.
240 	 */
241 	ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle,
242 	    size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
243 	    DDI_DMA_DONTWAIT, NULL,
244 	    (caddr_t *)&tx_ring->tbd_area.address,
245 	    &len, &tx_ring->tbd_area.acc_handle);
246 
247 	if (ret != DDI_SUCCESS) {
248 		igb_error(igb,
249 		    "Could not allocate tbd dma memory: %x", ret);
250 		tx_ring->tbd_area.acc_handle = NULL;
251 		tx_ring->tbd_area.address = NULL;
252 		if (tx_ring->tbd_area.dma_handle != NULL) {
253 			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
254 			tx_ring->tbd_area.dma_handle = NULL;
255 		}
256 		return (IGB_FAILURE);
257 	}
258 
259 	/*
260 	 * Initialize the entire transmit buffer descriptor area to zero
261 	 */
262 	bzero(tx_ring->tbd_area.address, len);
263 
264 	/*
265 	 * Allocates DMA resources for the memory that was allocated by
266 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
267 	 * the memory address
268 	 */
269 	ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle,
270 	    NULL, (caddr_t)tx_ring->tbd_area.address,
271 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
272 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
273 
274 	if (ret != DDI_DMA_MAPPED) {
275 		igb_error(igb,
276 		    "Could not bind tbd dma resource: %x", ret);
277 		tx_ring->tbd_area.dma_address = NULL;
278 		if (tx_ring->tbd_area.acc_handle != NULL) {
279 			ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
280 			tx_ring->tbd_area.acc_handle = NULL;
281 			tx_ring->tbd_area.address = NULL;
282 		}
283 		if (tx_ring->tbd_area.dma_handle != NULL) {
284 			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
285 			tx_ring->tbd_area.dma_handle = NULL;
286 		}
287 		return (IGB_FAILURE);
288 	}
289 
290 	ASSERT(cookie_num == 1);
291 
292 	tx_ring->tbd_area.dma_address = cookie.dmac_laddress;
293 	tx_ring->tbd_area.size = len;
294 
295 	tx_ring->tbd_ring = (union e1000_adv_tx_desc *)(uintptr_t)
296 	    tx_ring->tbd_area.address;
297 
298 	return (IGB_SUCCESS);
299 }
300 
301 /*
302  * igb_free_tbd_ring - Free the tx descriptors of one ring.
303  */
304 static void
305 igb_free_tbd_ring(igb_tx_ring_t *tx_ring)
306 {
307 	if (tx_ring->tbd_area.dma_handle != NULL) {
308 		(void) ddi_dma_unbind_handle(tx_ring->tbd_area.dma_handle);
309 	}
310 	if (tx_ring->tbd_area.acc_handle != NULL) {
311 		ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
312 		tx_ring->tbd_area.acc_handle = NULL;
313 	}
314 	if (tx_ring->tbd_area.dma_handle != NULL) {
315 		ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
316 		tx_ring->tbd_area.dma_handle = NULL;
317 	}
318 	tx_ring->tbd_area.address = NULL;
319 	tx_ring->tbd_area.dma_address = NULL;
320 	tx_ring->tbd_area.size = 0;
321 
322 	tx_ring->tbd_ring = NULL;
323 }
324 
325 /*
326  * igb_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring.
327  */
328 static int
329 igb_alloc_rbd_ring(igb_rx_ring_t *rx_ring)
330 {
331 	int ret;
332 	size_t size;
333 	size_t len;
334 	uint_t cookie_num;
335 	dev_info_t *devinfo;
336 	ddi_dma_cookie_t cookie;
337 	igb_t *igb = rx_ring->igb;
338 
339 	devinfo = igb->dip;
340 	size = sizeof (union e1000_adv_rx_desc) * rx_ring->ring_size;
341 
342 	/*
343 	 * Allocate a new DMA handle for the receive descriptor
344 	 * memory area.
345 	 */
346 	ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
347 	    DDI_DMA_DONTWAIT, NULL,
348 	    &rx_ring->rbd_area.dma_handle);
349 
350 	if (ret != DDI_SUCCESS) {
351 		igb_error(igb,
352 		    "Could not allocate rbd dma handle: %x", ret);
353 		rx_ring->rbd_area.dma_handle = NULL;
354 		return (IGB_FAILURE);
355 	}
356 
357 	/*
358 	 * Allocate memory to DMA data to and from the receive
359 	 * descriptors.
360 	 */
361 	ret = ddi_dma_mem_alloc(rx_ring->rbd_area.dma_handle,
362 	    size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
363 	    DDI_DMA_DONTWAIT, NULL,
364 	    (caddr_t *)&rx_ring->rbd_area.address,
365 	    &len, &rx_ring->rbd_area.acc_handle);
366 
367 	if (ret != DDI_SUCCESS) {
368 		igb_error(igb,
369 		    "Could not allocate rbd dma memory: %x", ret);
370 		rx_ring->rbd_area.acc_handle = NULL;
371 		rx_ring->rbd_area.address = NULL;
372 		if (rx_ring->rbd_area.dma_handle != NULL) {
373 			ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
374 			rx_ring->rbd_area.dma_handle = NULL;
375 		}
376 		return (IGB_FAILURE);
377 	}
378 
379 	/*
380 	 * Initialize the entire transmit buffer descriptor area to zero
381 	 */
382 	bzero(rx_ring->rbd_area.address, len);
383 
384 	/*
385 	 * Allocates DMA resources for the memory that was allocated by
386 	 * the ddi_dma_mem_alloc call.
387 	 */
388 	ret = ddi_dma_addr_bind_handle(rx_ring->rbd_area.dma_handle,
389 	    NULL, (caddr_t)rx_ring->rbd_area.address,
390 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
391 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
392 
393 	if (ret != DDI_DMA_MAPPED) {
394 		igb_error(igb,
395 		    "Could not bind rbd dma resource: %x", ret);
396 		rx_ring->rbd_area.dma_address = NULL;
397 		if (rx_ring->rbd_area.acc_handle != NULL) {
398 			ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle);
399 			rx_ring->rbd_area.acc_handle = NULL;
400 			rx_ring->rbd_area.address = NULL;
401 		}
402 		if (rx_ring->rbd_area.dma_handle != NULL) {
403 			ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
404 			rx_ring->rbd_area.dma_handle = NULL;
405 		}
406 		return (IGB_FAILURE);
407 	}
408 
409 	ASSERT(cookie_num == 1);
410 
411 	rx_ring->rbd_area.dma_address = cookie.dmac_laddress;
412 	rx_ring->rbd_area.size = len;
413 
414 	rx_ring->rbd_ring = (union e1000_adv_rx_desc *)(uintptr_t)
415 	    rx_ring->rbd_area.address;
416 
417 	return (IGB_SUCCESS);
418 }
419 
420 /*
421  * igb_free_rbd_ring - Free the rx descriptors of one ring.
422  */
423 static void
424 igb_free_rbd_ring(igb_rx_ring_t *rx_ring)
425 {
426 	if (rx_ring->rbd_area.dma_handle != NULL) {
427 		(void) ddi_dma_unbind_handle(rx_ring->rbd_area.dma_handle);
428 	}
429 	if (rx_ring->rbd_area.acc_handle != NULL) {
430 		ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle);
431 		rx_ring->rbd_area.acc_handle = NULL;
432 	}
433 	if (rx_ring->rbd_area.dma_handle != NULL) {
434 		ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
435 		rx_ring->rbd_area.dma_handle = NULL;
436 	}
437 	rx_ring->rbd_area.address = NULL;
438 	rx_ring->rbd_area.dma_address = NULL;
439 	rx_ring->rbd_area.size = 0;
440 
441 	rx_ring->rbd_ring = NULL;
442 }
443 
444 
445 /*
446  * igb_alloc_dma_buffer - Allocate DMA resources for a DMA buffer
447  */
448 static int
449 igb_alloc_dma_buffer(igb_t *igb,
450     dma_buffer_t *buf, size_t size)
451 {
452 	int ret;
453 	dev_info_t *devinfo = igb->dip;
454 	ddi_dma_cookie_t cookie;
455 	size_t len;
456 	uint_t cookie_num;
457 
458 	ret = ddi_dma_alloc_handle(devinfo,
459 	    &igb_buf_dma_attr, DDI_DMA_DONTWAIT,
460 	    NULL, &buf->dma_handle);
461 
462 	if (ret != DDI_SUCCESS) {
463 		buf->dma_handle = NULL;
464 		igb_error(igb,
465 		    "Could not allocate dma buffer handle: %x", ret);
466 		return (IGB_FAILURE);
467 	}
468 
469 	ret = ddi_dma_mem_alloc(buf->dma_handle,
470 	    size, &igb_buf_acc_attr, DDI_DMA_STREAMING,
471 	    DDI_DMA_DONTWAIT, NULL, &buf->address,
472 	    &len, &buf->acc_handle);
473 
474 	if (ret != DDI_SUCCESS) {
475 		buf->acc_handle = NULL;
476 		buf->address = NULL;
477 		if (buf->dma_handle != NULL) {
478 			ddi_dma_free_handle(&buf->dma_handle);
479 			buf->dma_handle = NULL;
480 		}
481 		igb_error(igb,
482 		    "Could not allocate dma buffer memory: %x", ret);
483 		return (IGB_FAILURE);
484 	}
485 
486 	ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL,
487 	    buf->address,
488 	    len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
489 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
490 
491 	if (ret != DDI_DMA_MAPPED) {
492 		buf->dma_address = NULL;
493 		if (buf->acc_handle != NULL) {
494 			ddi_dma_mem_free(&buf->acc_handle);
495 			buf->acc_handle = NULL;
496 			buf->address = NULL;
497 		}
498 		if (buf->dma_handle != NULL) {
499 			ddi_dma_free_handle(&buf->dma_handle);
500 			buf->dma_handle = NULL;
501 		}
502 		igb_error(igb,
503 		    "Could not bind dma buffer handle: %x", ret);
504 		return (IGB_FAILURE);
505 	}
506 
507 	ASSERT(cookie_num == 1);
508 
509 	buf->dma_address = cookie.dmac_laddress;
510 	buf->size = len;
511 	buf->len = 0;
512 
513 	return (IGB_SUCCESS);
514 }
515 
516 /*
517  * igb_free_dma_buffer - Free one allocated area of dma memory and handle
518  */
519 static void
520 igb_free_dma_buffer(dma_buffer_t *buf)
521 {
522 	if (buf->dma_handle != NULL) {
523 		(void) ddi_dma_unbind_handle(buf->dma_handle);
524 		buf->dma_address = NULL;
525 	} else {
526 		return;
527 	}
528 
529 	if (buf->acc_handle != NULL) {
530 		ddi_dma_mem_free(&buf->acc_handle);
531 		buf->acc_handle = NULL;
532 		buf->address = NULL;
533 	}
534 
535 	if (buf->dma_handle != NULL) {
536 		ddi_dma_free_handle(&buf->dma_handle);
537 		buf->dma_handle = NULL;
538 	}
539 
540 	buf->size = 0;
541 	buf->len = 0;
542 }
543 
544 /*
545  * igb_alloc_tcb_lists - Memory allocation for the transmit control bolcks
546  * of one ring.
547  */
548 static int
549 igb_alloc_tcb_lists(igb_tx_ring_t *tx_ring)
550 {
551 	int i;
552 	int ret;
553 	tx_control_block_t *tcb;
554 	dma_buffer_t *tx_buf;
555 	igb_t *igb = tx_ring->igb;
556 	dev_info_t *devinfo = igb->dip;
557 
558 	/*
559 	 * Allocate memory for the work list.
560 	 */
561 	tx_ring->work_list = kmem_zalloc(sizeof (tx_control_block_t *) *
562 	    tx_ring->ring_size, KM_NOSLEEP);
563 
564 	if (tx_ring->work_list == NULL) {
565 		igb_error(igb,
566 		    "Cound not allocate memory for tx work list");
567 		return (IGB_FAILURE);
568 	}
569 
570 	/*
571 	 * Allocate memory for the free list.
572 	 */
573 	tx_ring->free_list = kmem_zalloc(sizeof (tx_control_block_t *) *
574 	    tx_ring->free_list_size, KM_NOSLEEP);
575 
576 	if (tx_ring->free_list == NULL) {
577 		kmem_free(tx_ring->work_list,
578 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
579 		tx_ring->work_list = NULL;
580 
581 		igb_error(igb,
582 		    "Cound not allocate memory for tx free list");
583 		return (IGB_FAILURE);
584 	}
585 
586 	/*
587 	 * Allocate memory for the tx control blocks of free list.
588 	 */
589 	tx_ring->tcb_area =
590 	    kmem_zalloc(sizeof (tx_control_block_t) *
591 	    tx_ring->free_list_size, KM_NOSLEEP);
592 
593 	if (tx_ring->tcb_area == NULL) {
594 		kmem_free(tx_ring->work_list,
595 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
596 		tx_ring->work_list = NULL;
597 
598 		kmem_free(tx_ring->free_list,
599 		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
600 		tx_ring->free_list = NULL;
601 
602 		igb_error(igb,
603 		    "Cound not allocate memory for tx control blocks");
604 		return (IGB_FAILURE);
605 	}
606 
607 	/*
608 	 * Allocate dma memory for the tx control block of free list.
609 	 */
610 	tcb = tx_ring->tcb_area;
611 	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
612 		ASSERT(tcb != NULL);
613 
614 		tx_ring->free_list[i] = tcb;
615 
616 		/*
617 		 * Pre-allocate dma handles for transmit. These dma handles
618 		 * will be dynamically bound to the data buffers passed down
619 		 * from the upper layers at the time of transmitting.
620 		 */
621 		ret = ddi_dma_alloc_handle(devinfo,
622 		    &igb_tx_dma_attr,
623 		    DDI_DMA_DONTWAIT, NULL,
624 		    &tcb->tx_dma_handle);
625 		if (ret != DDI_SUCCESS) {
626 			tcb->tx_dma_handle = NULL;
627 			igb_error(igb,
628 			    "Could not allocate tx dma handle: %x", ret);
629 			goto alloc_tcb_lists_fail;
630 		}
631 
632 		/*
633 		 * Pre-allocate transmit buffers for packets that the
634 		 * size is less than bcopy_thresh.
635 		 */
636 		tx_buf = &tcb->tx_buf;
637 
638 		ret = igb_alloc_dma_buffer(igb,
639 		    tx_buf, igb->tx_buf_size);
640 
641 		if (ret != IGB_SUCCESS) {
642 			ASSERT(tcb->tx_dma_handle != NULL);
643 			ddi_dma_free_handle(&tcb->tx_dma_handle);
644 			tcb->tx_dma_handle = NULL;
645 			igb_error(igb, "Allocate tx dma buffer failed");
646 			goto alloc_tcb_lists_fail;
647 		}
648 	}
649 
650 	return (IGB_SUCCESS);
651 
652 alloc_tcb_lists_fail:
653 	igb_free_tcb_lists(tx_ring);
654 
655 	return (IGB_FAILURE);
656 }
657 
658 /*
659  * igb_free_tcb_lists - Release the memory allocated for
660  * the transmit control bolcks of one ring.
661  */
662 static void
663 igb_free_tcb_lists(igb_tx_ring_t *tx_ring)
664 {
665 	int i;
666 	tx_control_block_t *tcb;
667 
668 	tcb = tx_ring->tcb_area;
669 	if (tcb == NULL)
670 		return;
671 
672 	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
673 		ASSERT(tcb != NULL);
674 
675 		/* Free the tx dma handle for dynamical binding */
676 		if (tcb->tx_dma_handle != NULL) {
677 			ddi_dma_free_handle(&tcb->tx_dma_handle);
678 			tcb->tx_dma_handle = NULL;
679 		} else {
680 			/*
681 			 * If the dma handle is NULL, then we don't
682 			 * have to check the remaining.
683 			 */
684 			break;
685 		}
686 
687 		igb_free_dma_buffer(&tcb->tx_buf);
688 	}
689 
690 	if (tx_ring->tcb_area != NULL) {
691 		kmem_free(tx_ring->tcb_area,
692 		    sizeof (tx_control_block_t) * tx_ring->free_list_size);
693 		tx_ring->tcb_area = NULL;
694 	}
695 
696 	if (tx_ring->work_list != NULL) {
697 		kmem_free(tx_ring->work_list,
698 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
699 		tx_ring->work_list = NULL;
700 	}
701 
702 	if (tx_ring->free_list != NULL) {
703 		kmem_free(tx_ring->free_list,
704 		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
705 		tx_ring->free_list = NULL;
706 	}
707 }
708 
709 /*
710  * igb_alloc_rcb_lists - Memory allocation for the receive control blocks
711  * of one ring.
712  */
713 static int
714 igb_alloc_rcb_lists(igb_rx_ring_t *rx_ring)
715 {
716 	int i;
717 	int ret;
718 	rx_control_block_t *rcb;
719 	igb_t *igb = rx_ring->igb;
720 	dma_buffer_t *rx_buf;
721 	uint32_t rcb_count;
722 
723 	/*
724 	 * Allocate memory for the work list.
725 	 */
726 	rx_ring->work_list = kmem_zalloc(sizeof (rx_control_block_t *) *
727 	    rx_ring->ring_size, KM_NOSLEEP);
728 
729 	if (rx_ring->work_list == NULL) {
730 		igb_error(igb,
731 		    "Could not allocate memory for rx work list");
732 		return (IGB_FAILURE);
733 	}
734 
735 	/*
736 	 * Allocate memory for the free list.
737 	 */
738 	rx_ring->free_list = kmem_zalloc(sizeof (rx_control_block_t *) *
739 	    rx_ring->free_list_size, KM_NOSLEEP);
740 
741 	if (rx_ring->free_list == NULL) {
742 		kmem_free(rx_ring->work_list,
743 		    sizeof (rx_control_block_t *) * rx_ring->ring_size);
744 		rx_ring->work_list = NULL;
745 
746 		igb_error(igb,
747 		    "Cound not allocate memory for rx free list");
748 		return (IGB_FAILURE);
749 	}
750 
751 	/*
752 	 * Allocate memory for the rx control blocks for work list and
753 	 * free list.
754 	 */
755 	rcb_count = rx_ring->ring_size + rx_ring->free_list_size;
756 	rx_ring->rcb_area =
757 	    kmem_zalloc(sizeof (rx_control_block_t) * rcb_count,
758 	    KM_NOSLEEP);
759 
760 	if (rx_ring->rcb_area == NULL) {
761 		kmem_free(rx_ring->work_list,
762 		    sizeof (rx_control_block_t *) * rx_ring->ring_size);
763 		rx_ring->work_list = NULL;
764 
765 		kmem_free(rx_ring->free_list,
766 		    sizeof (rx_control_block_t *) * rx_ring->free_list_size);
767 		rx_ring->free_list = NULL;
768 
769 		igb_error(igb,
770 		    "Cound not allocate memory for rx control blocks");
771 		return (IGB_FAILURE);
772 	}
773 
774 	/*
775 	 * Allocate dma memory for the rx control blocks
776 	 */
777 	rcb = rx_ring->rcb_area;
778 	for (i = 0; i < rcb_count; i++, rcb++) {
779 		ASSERT(rcb != NULL);
780 
781 		if (i < rx_ring->ring_size) {
782 			/* Attach the rx control block to the work list */
783 			rx_ring->work_list[i] = rcb;
784 		} else {
785 			/* Attach the rx control block to the free list */
786 			rx_ring->free_list[i - rx_ring->ring_size] = rcb;
787 		}
788 
789 		rx_buf = &rcb->rx_buf;
790 		ret = igb_alloc_dma_buffer(igb,
791 		    rx_buf, igb->rx_buf_size);
792 
793 		if (ret != IGB_SUCCESS) {
794 			igb_error(igb, "Allocate rx dma buffer failed");
795 			goto alloc_rcb_lists_fail;
796 		}
797 
798 		rx_buf->size -= IPHDR_ALIGN_ROOM;
799 		rx_buf->address += IPHDR_ALIGN_ROOM;
800 		rx_buf->dma_address += IPHDR_ALIGN_ROOM;
801 
802 		rcb->state = RCB_FREE;
803 		rcb->rx_ring = (igb_rx_ring_t *)rx_ring;
804 		rcb->free_rtn.free_func = igb_rx_recycle;
805 		rcb->free_rtn.free_arg = (char *)rcb;
806 
807 		rcb->mp = desballoc((unsigned char *)
808 		    rx_buf->address,
809 		    rx_buf->size,
810 		    0, &rcb->free_rtn);
811 	}
812 
813 	return (IGB_SUCCESS);
814 
815 alloc_rcb_lists_fail:
816 	igb_free_rcb_lists(rx_ring);
817 
818 	return (IGB_FAILURE);
819 }
820 
821 /*
822  * igb_free_rcb_lists - Free the receive control blocks of one ring.
823  */
824 static void
825 igb_free_rcb_lists(igb_rx_ring_t *rx_ring)
826 {
827 	int i;
828 	rx_control_block_t *rcb;
829 	uint32_t rcb_count;
830 
831 	rcb = rx_ring->rcb_area;
832 	if (rcb == NULL)
833 		return;
834 
835 	rcb_count = rx_ring->ring_size + rx_ring->free_list_size;
836 	for (i = 0; i < rcb_count; i++, rcb++) {
837 		ASSERT(rcb != NULL);
838 		ASSERT(rcb->state == RCB_FREE);
839 
840 		if (rcb->mp != NULL) {
841 			freemsg(rcb->mp);
842 			rcb->mp = NULL;
843 		}
844 
845 		igb_free_dma_buffer(&rcb->rx_buf);
846 	}
847 
848 	if (rx_ring->rcb_area != NULL) {
849 		kmem_free(rx_ring->rcb_area,
850 		    sizeof (rx_control_block_t) * rcb_count);
851 		rx_ring->rcb_area = NULL;
852 	}
853 
854 	if (rx_ring->work_list != NULL) {
855 		kmem_free(rx_ring->work_list,
856 		    sizeof (rx_control_block_t *) * rx_ring->ring_size);
857 		rx_ring->work_list = NULL;
858 	}
859 
860 	if (rx_ring->free_list != NULL) {
861 		kmem_free(rx_ring->free_list,
862 		    sizeof (rx_control_block_t *) * rx_ring->free_list_size);
863 		rx_ring->free_list = NULL;
864 	}
865 }
866 
867 void
868 igb_set_fma_flags(int acc_flag, int dma_flag)
869 {
870 	if (acc_flag) {
871 		igb_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
872 	} else {
873 		igb_desc_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
874 	}
875 
876 	if (dma_flag) {
877 		igb_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
878 		igb_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
879 		igb_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
880 	} else {
881 		igb_tx_dma_attr.dma_attr_flags = 0;
882 		igb_buf_dma_attr.dma_attr_flags = 0;
883 		igb_desc_dma_attr.dma_attr_flags = 0;
884 	}
885 }
886