xref: /titanic_50/usr/src/uts/common/io/igb/igb_buf.c (revision 142c9f13e148d687426ed2d4e8bd93717eeaebbc)
1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at:
10  *	http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When using or redistributing this file, you may do so under the
15  * License only. No other modification of this header is permitted.
16  *
17  * If applicable, add the following below this CDDL HEADER, with the
18  * fields enclosed by brackets "[]" replaced with your own identifying
19  * information: Portions Copyright [yyyy] [name of copyright owner]
20  *
21  * CDDL HEADER END
22  */
23 
24 /*
25  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms of the CDDL.
27  */
28 
29 #pragma ident	"%Z%%M%	%I%	%E% SMI"
30 
31 #include "igb_sw.h"
32 
33 static int igb_alloc_tbd_ring(igb_tx_ring_t *);
34 static void igb_free_tbd_ring(igb_tx_ring_t *);
35 static int igb_alloc_rbd_ring(igb_rx_ring_t *);
36 static void igb_free_rbd_ring(igb_rx_ring_t *);
37 static int igb_alloc_dma_buffer(igb_t *, dma_buffer_t *, size_t);
38 static void igb_free_dma_buffer(dma_buffer_t *);
39 static int igb_alloc_tcb_lists(igb_tx_ring_t *);
40 static void igb_free_tcb_lists(igb_tx_ring_t *);
41 static int igb_alloc_rcb_lists(igb_rx_ring_t *);
42 static void igb_free_rcb_lists(igb_rx_ring_t *);
43 
44 #ifdef __sparc
45 #define	IGB_DMA_ALIGNMENT	0x0000000000002000ull
46 #else
47 #define	IGB_DMA_ALIGNMENT	0x0000000000001000ull
48 #endif
49 
50 /*
51  * DMA attributes for tx/rx descriptors
52  */
53 static ddi_dma_attr_t igb_desc_dma_attr = {
54 	DMA_ATTR_V0,			/* version number */
55 	0x0000000000000000ull,		/* low address */
56 	0xFFFFFFFFFFFFFFFFull,		/* high address */
57 	0x00000000FFFFFFFFull,		/* dma counter max */
58 	IGB_DMA_ALIGNMENT,		/* alignment */
59 	0x00000FFF,			/* burst sizes */
60 	0x00000001,			/* minimum transfer size */
61 	0x00000000FFFFFFFFull,		/* maximum transfer size */
62 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size */
63 	1,				/* scatter/gather list length */
64 	0x00000001,			/* granularity */
65 	0				/* DMA flags */
66 };
67 
68 /*
69  * DMA attributes for tx/rx buffers
70  */
71 static ddi_dma_attr_t igb_buf_dma_attr = {
72 	DMA_ATTR_V0,			/* version number */
73 	0x0000000000000000ull,		/* low address */
74 	0xFFFFFFFFFFFFFFFFull,		/* high address */
75 	0x00000000FFFFFFFFull,		/* dma counter max */
76 	IGB_DMA_ALIGNMENT,		/* alignment */
77 	0x00000FFF,			/* burst sizes */
78 	0x00000001,			/* minimum transfer size */
79 	0x00000000FFFFFFFFull,		/* maximum transfer size */
80 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size	 */
81 	1,				/* scatter/gather list length */
82 	0x00000001,			/* granularity */
83 	0				/* DMA flags */
84 };
85 
86 /*
87  * DMA attributes for transmit
88  */
89 static ddi_dma_attr_t igb_tx_dma_attr = {
90 	DMA_ATTR_V0,			/* version number */
91 	0x0000000000000000ull,		/* low address */
92 	0xFFFFFFFFFFFFFFFFull,		/* high address */
93 	0x00000000FFFFFFFFull,		/* dma counter max */
94 	1,				/* alignment */
95 	0x00000FFF,			/* burst sizes */
96 	0x00000001,			/* minimum transfer size */
97 	0x00000000FFFFFFFFull,		/* maximum transfer size */
98 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size	 */
99 	MAX_COOKIE,			/* scatter/gather list length */
100 	0x00000001,			/* granularity */
101 	0				/* DMA flags */
102 };
103 
104 /*
105  * DMA access attributes for descriptors.
106  */
107 static ddi_device_acc_attr_t igb_desc_acc_attr = {
108 	DDI_DEVICE_ATTR_V0,
109 	DDI_STRUCTURE_LE_ACC,
110 	DDI_STRICTORDER_ACC
111 };
112 
113 /*
114  * DMA access attributes for buffers.
115  */
116 static ddi_device_acc_attr_t igb_buf_acc_attr = {
117 	DDI_DEVICE_ATTR_V0,
118 	DDI_NEVERSWAP_ACC,
119 	DDI_STRICTORDER_ACC
120 };
121 
122 
123 /*
124  * igb_alloc_dma - Allocate DMA resources for all rx/tx rings
125  */
126 int
127 igb_alloc_dma(igb_t *igb)
128 {
129 	igb_rx_ring_t *rx_ring;
130 	igb_tx_ring_t *tx_ring;
131 	int i;
132 
133 	for (i = 0; i < igb->num_rx_rings; i++) {
134 		/*
135 		 * Allocate receive desciptor ring and control block lists
136 		 */
137 		rx_ring = &igb->rx_rings[i];
138 
139 		if (igb_alloc_rbd_ring(rx_ring) != IGB_SUCCESS)
140 			goto alloc_dma_failure;
141 
142 		if (igb_alloc_rcb_lists(rx_ring) != IGB_SUCCESS)
143 			goto alloc_dma_failure;
144 	}
145 
146 	for (i = 0; i < igb->num_tx_rings; i++) {
147 		/*
148 		 * Allocate transmit desciptor ring and control block lists
149 		 */
150 		tx_ring = &igb->tx_rings[i];
151 
152 		if (igb_alloc_tbd_ring(tx_ring) != IGB_SUCCESS)
153 			goto alloc_dma_failure;
154 
155 		if (igb_alloc_tcb_lists(tx_ring) != IGB_SUCCESS)
156 			goto alloc_dma_failure;
157 	}
158 
159 	return (IGB_SUCCESS);
160 
161 alloc_dma_failure:
162 	igb_free_dma(igb);
163 
164 	return (IGB_FAILURE);
165 }
166 
167 
168 /*
169  * igb_free_dma - Free all the DMA resources of all rx/tx rings
170  */
171 void
172 igb_free_dma(igb_t *igb)
173 {
174 	igb_rx_ring_t *rx_ring;
175 	igb_tx_ring_t *tx_ring;
176 	int i;
177 
178 	/*
179 	 * Free DMA resources of rx rings
180 	 */
181 	for (i = 0; i < igb->num_rx_rings; i++) {
182 		rx_ring = &igb->rx_rings[i];
183 		igb_free_rbd_ring(rx_ring);
184 		igb_free_rcb_lists(rx_ring);
185 	}
186 
187 	/*
188 	 * Free DMA resources of tx rings
189 	 */
190 	for (i = 0; i < igb->num_tx_rings; i++) {
191 		tx_ring = &igb->tx_rings[i];
192 		igb_free_tbd_ring(tx_ring);
193 		igb_free_tcb_lists(tx_ring);
194 	}
195 }
196 
197 /*
198  * igb_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring.
199  */
200 static int
201 igb_alloc_tbd_ring(igb_tx_ring_t *tx_ring)
202 {
203 	int ret;
204 	size_t size;
205 	size_t len;
206 	uint_t cookie_num;
207 	dev_info_t *devinfo;
208 	ddi_dma_cookie_t cookie;
209 	igb_t *igb = tx_ring->igb;
210 
211 	devinfo = igb->dip;
212 	size = sizeof (union e1000_adv_tx_desc) * tx_ring->ring_size;
213 
214 	/*
215 	 * If tx head write-back is enabled, an extra tbd is allocated
216 	 * to save the head write-back value
217 	 */
218 	if (igb->tx_head_wb_enable) {
219 		size += sizeof (union e1000_adv_tx_desc);
220 	}
221 
222 	/*
223 	 * Allocate a DMA handle for the transmit descriptor
224 	 * memory area.
225 	 */
226 	ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
227 	    DDI_DMA_DONTWAIT, NULL,
228 	    &tx_ring->tbd_area.dma_handle);
229 
230 	if (ret != DDI_SUCCESS) {
231 		igb_error(igb,
232 		    "Could not allocate tbd dma handle: %x", ret);
233 		tx_ring->tbd_area.dma_handle = NULL;
234 
235 		return (IGB_FAILURE);
236 	}
237 
238 	/*
239 	 * Allocate memory to DMA data to and from the transmit
240 	 * descriptors.
241 	 */
242 	ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle,
243 	    size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
244 	    DDI_DMA_DONTWAIT, NULL,
245 	    (caddr_t *)&tx_ring->tbd_area.address,
246 	    &len, &tx_ring->tbd_area.acc_handle);
247 
248 	if (ret != DDI_SUCCESS) {
249 		igb_error(igb,
250 		    "Could not allocate tbd dma memory: %x", ret);
251 		tx_ring->tbd_area.acc_handle = NULL;
252 		tx_ring->tbd_area.address = NULL;
253 		if (tx_ring->tbd_area.dma_handle != NULL) {
254 			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
255 			tx_ring->tbd_area.dma_handle = NULL;
256 		}
257 		return (IGB_FAILURE);
258 	}
259 
260 	/*
261 	 * Initialize the entire transmit buffer descriptor area to zero
262 	 */
263 	bzero(tx_ring->tbd_area.address, len);
264 
265 	/*
266 	 * Allocates DMA resources for the memory that was allocated by
267 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
268 	 * the memory address
269 	 */
270 	ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle,
271 	    NULL, (caddr_t)tx_ring->tbd_area.address,
272 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
273 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
274 
275 	if (ret != DDI_DMA_MAPPED) {
276 		igb_error(igb,
277 		    "Could not bind tbd dma resource: %x", ret);
278 		tx_ring->tbd_area.dma_address = NULL;
279 		if (tx_ring->tbd_area.acc_handle != NULL) {
280 			ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
281 			tx_ring->tbd_area.acc_handle = NULL;
282 			tx_ring->tbd_area.address = NULL;
283 		}
284 		if (tx_ring->tbd_area.dma_handle != NULL) {
285 			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
286 			tx_ring->tbd_area.dma_handle = NULL;
287 		}
288 		return (IGB_FAILURE);
289 	}
290 
291 	ASSERT(cookie_num == 1);
292 
293 	tx_ring->tbd_area.dma_address = cookie.dmac_laddress;
294 	tx_ring->tbd_area.size = len;
295 
296 	tx_ring->tbd_ring = (union e1000_adv_tx_desc *)(uintptr_t)
297 	    tx_ring->tbd_area.address;
298 
299 	return (IGB_SUCCESS);
300 }
301 
302 /*
303  * igb_free_tbd_ring - Free the tx descriptors of one ring.
304  */
305 static void
306 igb_free_tbd_ring(igb_tx_ring_t *tx_ring)
307 {
308 	if (tx_ring->tbd_area.dma_handle != NULL) {
309 		(void) ddi_dma_unbind_handle(tx_ring->tbd_area.dma_handle);
310 	}
311 	if (tx_ring->tbd_area.acc_handle != NULL) {
312 		ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
313 		tx_ring->tbd_area.acc_handle = NULL;
314 	}
315 	if (tx_ring->tbd_area.dma_handle != NULL) {
316 		ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
317 		tx_ring->tbd_area.dma_handle = NULL;
318 	}
319 	tx_ring->tbd_area.address = NULL;
320 	tx_ring->tbd_area.dma_address = NULL;
321 	tx_ring->tbd_area.size = 0;
322 
323 	tx_ring->tbd_ring = NULL;
324 }
325 
326 /*
327  * igb_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring.
328  */
329 static int
330 igb_alloc_rbd_ring(igb_rx_ring_t *rx_ring)
331 {
332 	int ret;
333 	size_t size;
334 	size_t len;
335 	uint_t cookie_num;
336 	dev_info_t *devinfo;
337 	ddi_dma_cookie_t cookie;
338 	igb_t *igb = rx_ring->igb;
339 
340 	devinfo = igb->dip;
341 	size = sizeof (union e1000_adv_rx_desc) * rx_ring->ring_size;
342 
343 	/*
344 	 * Allocate a new DMA handle for the receive descriptor
345 	 * memory area.
346 	 */
347 	ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
348 	    DDI_DMA_DONTWAIT, NULL,
349 	    &rx_ring->rbd_area.dma_handle);
350 
351 	if (ret != DDI_SUCCESS) {
352 		igb_error(igb,
353 		    "Could not allocate rbd dma handle: %x", ret);
354 		rx_ring->rbd_area.dma_handle = NULL;
355 		return (IGB_FAILURE);
356 	}
357 
358 	/*
359 	 * Allocate memory to DMA data to and from the receive
360 	 * descriptors.
361 	 */
362 	ret = ddi_dma_mem_alloc(rx_ring->rbd_area.dma_handle,
363 	    size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
364 	    DDI_DMA_DONTWAIT, NULL,
365 	    (caddr_t *)&rx_ring->rbd_area.address,
366 	    &len, &rx_ring->rbd_area.acc_handle);
367 
368 	if (ret != DDI_SUCCESS) {
369 		igb_error(igb,
370 		    "Could not allocate rbd dma memory: %x", ret);
371 		rx_ring->rbd_area.acc_handle = NULL;
372 		rx_ring->rbd_area.address = NULL;
373 		if (rx_ring->rbd_area.dma_handle != NULL) {
374 			ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
375 			rx_ring->rbd_area.dma_handle = NULL;
376 		}
377 		return (IGB_FAILURE);
378 	}
379 
380 	/*
381 	 * Initialize the entire transmit buffer descriptor area to zero
382 	 */
383 	bzero(rx_ring->rbd_area.address, len);
384 
385 	/*
386 	 * Allocates DMA resources for the memory that was allocated by
387 	 * the ddi_dma_mem_alloc call.
388 	 */
389 	ret = ddi_dma_addr_bind_handle(rx_ring->rbd_area.dma_handle,
390 	    NULL, (caddr_t)rx_ring->rbd_area.address,
391 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
392 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
393 
394 	if (ret != DDI_DMA_MAPPED) {
395 		igb_error(igb,
396 		    "Could not bind rbd dma resource: %x", ret);
397 		rx_ring->rbd_area.dma_address = NULL;
398 		if (rx_ring->rbd_area.acc_handle != NULL) {
399 			ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle);
400 			rx_ring->rbd_area.acc_handle = NULL;
401 			rx_ring->rbd_area.address = NULL;
402 		}
403 		if (rx_ring->rbd_area.dma_handle != NULL) {
404 			ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
405 			rx_ring->rbd_area.dma_handle = NULL;
406 		}
407 		return (IGB_FAILURE);
408 	}
409 
410 	ASSERT(cookie_num == 1);
411 
412 	rx_ring->rbd_area.dma_address = cookie.dmac_laddress;
413 	rx_ring->rbd_area.size = len;
414 
415 	rx_ring->rbd_ring = (union e1000_adv_rx_desc *)(uintptr_t)
416 	    rx_ring->rbd_area.address;
417 
418 	return (IGB_SUCCESS);
419 }
420 
421 /*
422  * igb_free_rbd_ring - Free the rx descriptors of one ring.
423  */
424 static void
425 igb_free_rbd_ring(igb_rx_ring_t *rx_ring)
426 {
427 	if (rx_ring->rbd_area.dma_handle != NULL) {
428 		(void) ddi_dma_unbind_handle(rx_ring->rbd_area.dma_handle);
429 	}
430 	if (rx_ring->rbd_area.acc_handle != NULL) {
431 		ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle);
432 		rx_ring->rbd_area.acc_handle = NULL;
433 	}
434 	if (rx_ring->rbd_area.dma_handle != NULL) {
435 		ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
436 		rx_ring->rbd_area.dma_handle = NULL;
437 	}
438 	rx_ring->rbd_area.address = NULL;
439 	rx_ring->rbd_area.dma_address = NULL;
440 	rx_ring->rbd_area.size = 0;
441 
442 	rx_ring->rbd_ring = NULL;
443 }
444 
445 
446 /*
447  * igb_alloc_dma_buffer - Allocate DMA resources for a DMA buffer
448  */
449 static int
450 igb_alloc_dma_buffer(igb_t *igb,
451     dma_buffer_t *buf, size_t size)
452 {
453 	int ret;
454 	dev_info_t *devinfo = igb->dip;
455 	ddi_dma_cookie_t cookie;
456 	size_t len;
457 	uint_t cookie_num;
458 
459 	ret = ddi_dma_alloc_handle(devinfo,
460 	    &igb_buf_dma_attr, DDI_DMA_DONTWAIT,
461 	    NULL, &buf->dma_handle);
462 
463 	if (ret != DDI_SUCCESS) {
464 		buf->dma_handle = NULL;
465 		igb_error(igb,
466 		    "Could not allocate dma buffer handle: %x", ret);
467 		return (IGB_FAILURE);
468 	}
469 
470 	ret = ddi_dma_mem_alloc(buf->dma_handle,
471 	    size, &igb_buf_acc_attr, DDI_DMA_STREAMING,
472 	    DDI_DMA_DONTWAIT, NULL, &buf->address,
473 	    &len, &buf->acc_handle);
474 
475 	if (ret != DDI_SUCCESS) {
476 		buf->acc_handle = NULL;
477 		buf->address = NULL;
478 		if (buf->dma_handle != NULL) {
479 			ddi_dma_free_handle(&buf->dma_handle);
480 			buf->dma_handle = NULL;
481 		}
482 		igb_error(igb,
483 		    "Could not allocate dma buffer memory: %x", ret);
484 		return (IGB_FAILURE);
485 	}
486 
487 	ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL,
488 	    buf->address,
489 	    len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
490 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
491 
492 	if (ret != DDI_DMA_MAPPED) {
493 		buf->dma_address = NULL;
494 		if (buf->acc_handle != NULL) {
495 			ddi_dma_mem_free(&buf->acc_handle);
496 			buf->acc_handle = NULL;
497 			buf->address = NULL;
498 		}
499 		if (buf->dma_handle != NULL) {
500 			ddi_dma_free_handle(&buf->dma_handle);
501 			buf->dma_handle = NULL;
502 		}
503 		igb_error(igb,
504 		    "Could not bind dma buffer handle: %x", ret);
505 		return (IGB_FAILURE);
506 	}
507 
508 	ASSERT(cookie_num == 1);
509 
510 	buf->dma_address = cookie.dmac_laddress;
511 	buf->size = len;
512 	buf->len = 0;
513 
514 	return (IGB_SUCCESS);
515 }
516 
517 /*
518  * igb_free_dma_buffer - Free one allocated area of dma memory and handle
519  */
520 static void
521 igb_free_dma_buffer(dma_buffer_t *buf)
522 {
523 	if (buf->dma_handle != NULL) {
524 		(void) ddi_dma_unbind_handle(buf->dma_handle);
525 		buf->dma_address = NULL;
526 	} else {
527 		return;
528 	}
529 
530 	if (buf->acc_handle != NULL) {
531 		ddi_dma_mem_free(&buf->acc_handle);
532 		buf->acc_handle = NULL;
533 		buf->address = NULL;
534 	}
535 
536 	if (buf->dma_handle != NULL) {
537 		ddi_dma_free_handle(&buf->dma_handle);
538 		buf->dma_handle = NULL;
539 	}
540 
541 	buf->size = 0;
542 	buf->len = 0;
543 }
544 
545 /*
546  * igb_alloc_tcb_lists - Memory allocation for the transmit control bolcks
547  * of one ring.
548  */
549 static int
550 igb_alloc_tcb_lists(igb_tx_ring_t *tx_ring)
551 {
552 	int i;
553 	int ret;
554 	tx_control_block_t *tcb;
555 	dma_buffer_t *tx_buf;
556 	igb_t *igb = tx_ring->igb;
557 	dev_info_t *devinfo = igb->dip;
558 
559 	/*
560 	 * Allocate memory for the work list.
561 	 */
562 	tx_ring->work_list = kmem_zalloc(sizeof (tx_control_block_t *) *
563 	    tx_ring->ring_size, KM_NOSLEEP);
564 
565 	if (tx_ring->work_list == NULL) {
566 		igb_error(igb,
567 		    "Cound not allocate memory for tx work list");
568 		return (IGB_FAILURE);
569 	}
570 
571 	/*
572 	 * Allocate memory for the free list.
573 	 */
574 	tx_ring->free_list = kmem_zalloc(sizeof (tx_control_block_t *) *
575 	    tx_ring->free_list_size, KM_NOSLEEP);
576 
577 	if (tx_ring->free_list == NULL) {
578 		kmem_free(tx_ring->work_list,
579 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
580 		tx_ring->work_list = NULL;
581 
582 		igb_error(igb,
583 		    "Cound not allocate memory for tx free list");
584 		return (IGB_FAILURE);
585 	}
586 
587 	/*
588 	 * Allocate memory for the tx control blocks of free list.
589 	 */
590 	tx_ring->tcb_area =
591 	    kmem_zalloc(sizeof (tx_control_block_t) *
592 	    tx_ring->free_list_size, KM_NOSLEEP);
593 
594 	if (tx_ring->tcb_area == NULL) {
595 		kmem_free(tx_ring->work_list,
596 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
597 		tx_ring->work_list = NULL;
598 
599 		kmem_free(tx_ring->free_list,
600 		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
601 		tx_ring->free_list = NULL;
602 
603 		igb_error(igb,
604 		    "Cound not allocate memory for tx control blocks");
605 		return (IGB_FAILURE);
606 	}
607 
608 	/*
609 	 * Allocate dma memory for the tx control block of free list.
610 	 */
611 	tcb = tx_ring->tcb_area;
612 	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
613 		ASSERT(tcb != NULL);
614 
615 		tx_ring->free_list[i] = tcb;
616 
617 		/*
618 		 * Pre-allocate dma handles for transmit. These dma handles
619 		 * will be dynamically bound to the data buffers passed down
620 		 * from the upper layers at the time of transmitting.
621 		 */
622 		ret = ddi_dma_alloc_handle(devinfo,
623 		    &igb_tx_dma_attr,
624 		    DDI_DMA_DONTWAIT, NULL,
625 		    &tcb->tx_dma_handle);
626 		if (ret != DDI_SUCCESS) {
627 			tcb->tx_dma_handle = NULL;
628 			igb_error(igb,
629 			    "Could not allocate tx dma handle: %x", ret);
630 			goto alloc_tcb_lists_fail;
631 		}
632 
633 		/*
634 		 * Pre-allocate transmit buffers for packets that the
635 		 * size is less than bcopy_thresh.
636 		 */
637 		tx_buf = &tcb->tx_buf;
638 
639 		ret = igb_alloc_dma_buffer(igb,
640 		    tx_buf, igb->tx_buf_size);
641 
642 		if (ret != IGB_SUCCESS) {
643 			ASSERT(tcb->tx_dma_handle != NULL);
644 			ddi_dma_free_handle(&tcb->tx_dma_handle);
645 			tcb->tx_dma_handle = NULL;
646 			igb_error(igb, "Allocate tx dma buffer failed");
647 			goto alloc_tcb_lists_fail;
648 		}
649 	}
650 
651 	return (IGB_SUCCESS);
652 
653 alloc_tcb_lists_fail:
654 	igb_free_tcb_lists(tx_ring);
655 
656 	return (IGB_FAILURE);
657 }
658 
659 /*
660  * igb_free_tcb_lists - Release the memory allocated for
661  * the transmit control bolcks of one ring.
662  */
663 static void
664 igb_free_tcb_lists(igb_tx_ring_t *tx_ring)
665 {
666 	int i;
667 	tx_control_block_t *tcb;
668 
669 	tcb = tx_ring->tcb_area;
670 	if (tcb == NULL)
671 		return;
672 
673 	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
674 		ASSERT(tcb != NULL);
675 
676 		/* Free the tx dma handle for dynamical binding */
677 		if (tcb->tx_dma_handle != NULL) {
678 			ddi_dma_free_handle(&tcb->tx_dma_handle);
679 			tcb->tx_dma_handle = NULL;
680 		} else {
681 			/*
682 			 * If the dma handle is NULL, then we don't
683 			 * have to check the remaining.
684 			 */
685 			break;
686 		}
687 
688 		igb_free_dma_buffer(&tcb->tx_buf);
689 	}
690 
691 	if (tx_ring->tcb_area != NULL) {
692 		kmem_free(tx_ring->tcb_area,
693 		    sizeof (tx_control_block_t) * tx_ring->free_list_size);
694 		tx_ring->tcb_area = NULL;
695 	}
696 
697 	if (tx_ring->work_list != NULL) {
698 		kmem_free(tx_ring->work_list,
699 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
700 		tx_ring->work_list = NULL;
701 	}
702 
703 	if (tx_ring->free_list != NULL) {
704 		kmem_free(tx_ring->free_list,
705 		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
706 		tx_ring->free_list = NULL;
707 	}
708 }
709 
710 /*
711  * igb_alloc_rcb_lists - Memory allocation for the receive control blocks
712  * of one ring.
713  */
714 static int
715 igb_alloc_rcb_lists(igb_rx_ring_t *rx_ring)
716 {
717 	int i;
718 	int ret;
719 	rx_control_block_t *rcb;
720 	igb_t *igb = rx_ring->igb;
721 	dma_buffer_t *rx_buf;
722 	uint32_t rcb_count;
723 
724 	/*
725 	 * Allocate memory for the work list.
726 	 */
727 	rx_ring->work_list = kmem_zalloc(sizeof (rx_control_block_t *) *
728 	    rx_ring->ring_size, KM_NOSLEEP);
729 
730 	if (rx_ring->work_list == NULL) {
731 		igb_error(igb,
732 		    "Could not allocate memory for rx work list");
733 		return (IGB_FAILURE);
734 	}
735 
736 	/*
737 	 * Allocate memory for the free list.
738 	 */
739 	rx_ring->free_list = kmem_zalloc(sizeof (rx_control_block_t *) *
740 	    rx_ring->free_list_size, KM_NOSLEEP);
741 
742 	if (rx_ring->free_list == NULL) {
743 		kmem_free(rx_ring->work_list,
744 		    sizeof (rx_control_block_t *) * rx_ring->ring_size);
745 		rx_ring->work_list = NULL;
746 
747 		igb_error(igb,
748 		    "Cound not allocate memory for rx free list");
749 		return (IGB_FAILURE);
750 	}
751 
752 	/*
753 	 * Allocate memory for the rx control blocks for work list and
754 	 * free list.
755 	 */
756 	rcb_count = rx_ring->ring_size + rx_ring->free_list_size;
757 	rx_ring->rcb_area =
758 	    kmem_zalloc(sizeof (rx_control_block_t) * rcb_count,
759 	    KM_NOSLEEP);
760 
761 	if (rx_ring->rcb_area == NULL) {
762 		kmem_free(rx_ring->work_list,
763 		    sizeof (rx_control_block_t *) * rx_ring->ring_size);
764 		rx_ring->work_list = NULL;
765 
766 		kmem_free(rx_ring->free_list,
767 		    sizeof (rx_control_block_t *) * rx_ring->free_list_size);
768 		rx_ring->free_list = NULL;
769 
770 		igb_error(igb,
771 		    "Cound not allocate memory for rx control blocks");
772 		return (IGB_FAILURE);
773 	}
774 
775 	/*
776 	 * Allocate dma memory for the rx control blocks
777 	 */
778 	rcb = rx_ring->rcb_area;
779 	for (i = 0; i < rcb_count; i++, rcb++) {
780 		ASSERT(rcb != NULL);
781 
782 		if (i < rx_ring->ring_size) {
783 			/* Attach the rx control block to the work list */
784 			rx_ring->work_list[i] = rcb;
785 		} else {
786 			/* Attach the rx control block to the free list */
787 			rx_ring->free_list[i - rx_ring->ring_size] = rcb;
788 		}
789 
790 		rx_buf = &rcb->rx_buf;
791 		ret = igb_alloc_dma_buffer(igb,
792 		    rx_buf, igb->rx_buf_size);
793 
794 		if (ret != IGB_SUCCESS) {
795 			igb_error(igb, "Allocate rx dma buffer failed");
796 			goto alloc_rcb_lists_fail;
797 		}
798 
799 		rx_buf->size -= IPHDR_ALIGN_ROOM;
800 		rx_buf->address += IPHDR_ALIGN_ROOM;
801 		rx_buf->dma_address += IPHDR_ALIGN_ROOM;
802 
803 		rcb->state = RCB_FREE;
804 		rcb->rx_ring = (igb_rx_ring_t *)rx_ring;
805 		rcb->free_rtn.free_func = igb_rx_recycle;
806 		rcb->free_rtn.free_arg = (char *)rcb;
807 
808 		rcb->mp = desballoc((unsigned char *)
809 		    rx_buf->address - IPHDR_ALIGN_ROOM,
810 		    rx_buf->size + IPHDR_ALIGN_ROOM,
811 		    0, &rcb->free_rtn);
812 
813 		if (rcb->mp != NULL) {
814 			rcb->mp->b_rptr += IPHDR_ALIGN_ROOM;
815 			rcb->mp->b_wptr += IPHDR_ALIGN_ROOM;
816 		}
817 	}
818 
819 	return (IGB_SUCCESS);
820 
821 alloc_rcb_lists_fail:
822 	igb_free_rcb_lists(rx_ring);
823 
824 	return (IGB_FAILURE);
825 }
826 
827 /*
828  * igb_free_rcb_lists - Free the receive control blocks of one ring.
829  */
830 static void
831 igb_free_rcb_lists(igb_rx_ring_t *rx_ring)
832 {
833 	int i;
834 	rx_control_block_t *rcb;
835 	uint32_t rcb_count;
836 
837 	rcb = rx_ring->rcb_area;
838 	if (rcb == NULL)
839 		return;
840 
841 	rcb_count = rx_ring->ring_size + rx_ring->free_list_size;
842 	for (i = 0; i < rcb_count; i++, rcb++) {
843 		ASSERT(rcb != NULL);
844 		ASSERT(rcb->state == RCB_FREE);
845 
846 		if (rcb->mp != NULL) {
847 			freemsg(rcb->mp);
848 			rcb->mp = NULL;
849 		}
850 
851 		igb_free_dma_buffer(&rcb->rx_buf);
852 	}
853 
854 	if (rx_ring->rcb_area != NULL) {
855 		kmem_free(rx_ring->rcb_area,
856 		    sizeof (rx_control_block_t) * rcb_count);
857 		rx_ring->rcb_area = NULL;
858 	}
859 
860 	if (rx_ring->work_list != NULL) {
861 		kmem_free(rx_ring->work_list,
862 		    sizeof (rx_control_block_t *) * rx_ring->ring_size);
863 		rx_ring->work_list = NULL;
864 	}
865 
866 	if (rx_ring->free_list != NULL) {
867 		kmem_free(rx_ring->free_list,
868 		    sizeof (rx_control_block_t *) * rx_ring->free_list_size);
869 		rx_ring->free_list = NULL;
870 	}
871 }
872