xref: /titanic_41/usr/src/uts/common/io/ixgbe/ixgbe_buf.c (revision 0bb073995ac5a95bd35f2dd790df1ea3d8c2d507)
1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at:
10  *      http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When using or redistributing this file, you may do so under the
15  * License only. No other modification of this header is permitted.
16  *
17  * If applicable, add the following below this CDDL HEADER, with the
18  * fields enclosed by brackets "[]" replaced with your own identifying
19  * information: Portions Copyright [yyyy] [name of copyright owner]
20  *
21  * CDDL HEADER END
22  */
23 
24 /*
25  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms of the CDDL.
27  */
28 
29 #pragma ident	"%Z%%M%	%I%	%E% SMI"
30 
31 #include "ixgbe_sw.h"
32 
33 static int ixgbe_alloc_tbd_ring(ixgbe_tx_ring_t *);
34 static void ixgbe_free_tbd_ring(ixgbe_tx_ring_t *);
35 static int ixgbe_alloc_rbd_ring(ixgbe_rx_ring_t *);
36 static void ixgbe_free_rbd_ring(ixgbe_rx_ring_t *);
37 static int ixgbe_alloc_dma_buffer(ixgbe_t *, dma_buffer_t *, size_t);
38 static void ixgbe_free_dma_buffer(dma_buffer_t *);
39 static int ixgbe_alloc_tcb_lists(ixgbe_tx_ring_t *);
40 static void ixgbe_free_tcb_lists(ixgbe_tx_ring_t *);
41 static int ixgbe_alloc_rcb_lists(ixgbe_rx_ring_t *);
42 static void ixgbe_free_rcb_lists(ixgbe_rx_ring_t *);
43 
44 #ifdef __sparc
45 #define	IXGBE_DMA_ALIGNMENT	0x0000000000002000ull
46 #else
47 #define	IXGBE_DMA_ALIGNMENT	0x0000000000001000ull
48 #endif
49 
50 /*
51  * DMA attributes for tx/rx descriptors.
52  */
53 static ddi_dma_attr_t ixgbe_desc_dma_attr = {
54 	DMA_ATTR_V0,			/* version number */
55 	0x0000000000000000ull,		/* low address */
56 	0xFFFFFFFFFFFFFFFFull,		/* high address */
57 	0x00000000FFFFFFFFull,		/* dma counter max */
58 	IXGBE_DMA_ALIGNMENT,		/* alignment */
59 	0x00000FFF,			/* burst sizes */
60 	0x00000001,			/* minimum transfer size */
61 	0x00000000FFFFFFFFull,		/* maximum transfer size */
62 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size */
63 	1,				/* scatter/gather list length */
64 	0x00000001,			/* granularity */
65 	DDI_DMA_FLAGERR			/* DMA flags */
66 };
67 
68 /*
69  * DMA attributes for tx/rx buffers.
70  */
71 static ddi_dma_attr_t ixgbe_buf_dma_attr = {
72 	DMA_ATTR_V0,			/* version number */
73 	0x0000000000000000ull,		/* low address */
74 	0xFFFFFFFFFFFFFFFFull,		/* high address */
75 	0x00000000FFFFFFFFull,		/* dma counter max */
76 	IXGBE_DMA_ALIGNMENT,		/* alignment */
77 	0x00000FFF,			/* burst sizes */
78 	0x00000001,			/* minimum transfer size */
79 	0x00000000FFFFFFFFull,		/* maximum transfer size */
80 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size	 */
81 	1,				/* scatter/gather list length */
82 	0x00000001,			/* granularity */
83 	DDI_DMA_FLAGERR			/* DMA flags */
84 };
85 
86 /*
87  * DMA attributes for transmit.
88  */
89 static ddi_dma_attr_t ixgbe_tx_dma_attr = {
90 	DMA_ATTR_V0,			/* version number */
91 	0x0000000000000000ull,		/* low address */
92 	0xFFFFFFFFFFFFFFFFull,		/* high address */
93 	0x00000000FFFFFFFFull,		/* dma counter max */
94 	1,				/* alignment */
95 	0x00000FFF,			/* burst sizes */
96 	0x00000001,			/* minimum transfer size */
97 	0x00000000FFFFFFFFull,		/* maximum transfer size */
98 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size	 */
99 	MAX_COOKIE,			/* scatter/gather list length */
100 	0x00000001,			/* granularity */
101 	DDI_DMA_FLAGERR			/* DMA flags */
102 };
103 
104 /*
105  * DMA access attributes for descriptors.
106  */
107 static ddi_device_acc_attr_t ixgbe_desc_acc_attr = {
108 	DDI_DEVICE_ATTR_V0,
109 	DDI_STRUCTURE_LE_ACC,
110 	DDI_STRICTORDER_ACC,
111 	DDI_FLAGERR_ACC
112 };
113 
114 /*
115  * DMA access attributes for buffers.
116  */
117 static ddi_device_acc_attr_t ixgbe_buf_acc_attr = {
118 	DDI_DEVICE_ATTR_V0,
119 	DDI_NEVERSWAP_ACC,
120 	DDI_STRICTORDER_ACC
121 };
122 
123 /*
124  * ixgbe_alloc_dma - Allocate DMA resources for all rx/tx rings.
125  */
126 int
127 ixgbe_alloc_dma(ixgbe_t *ixgbe)
128 {
129 	ixgbe_rx_ring_t *rx_ring;
130 	ixgbe_tx_ring_t *tx_ring;
131 	int i;
132 
133 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
134 		/*
135 		 * Allocate receive desciptor ring and control block lists
136 		 */
137 		rx_ring = &ixgbe->rx_rings[i];
138 
139 		if (ixgbe_alloc_rbd_ring(rx_ring) != IXGBE_SUCCESS)
140 			goto alloc_dma_failure;
141 
142 		if (ixgbe_alloc_rcb_lists(rx_ring) != IXGBE_SUCCESS)
143 			goto alloc_dma_failure;
144 	}
145 
146 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
147 		/*
148 		 * Allocate transmit desciptor ring and control block lists
149 		 */
150 		tx_ring = &ixgbe->tx_rings[i];
151 
152 		if (ixgbe_alloc_tbd_ring(tx_ring) != IXGBE_SUCCESS)
153 			goto alloc_dma_failure;
154 
155 		if (ixgbe_alloc_tcb_lists(tx_ring) != IXGBE_SUCCESS)
156 			goto alloc_dma_failure;
157 	}
158 
159 	return (IXGBE_SUCCESS);
160 
161 alloc_dma_failure:
162 	ixgbe_free_dma(ixgbe);
163 
164 	return (IXGBE_FAILURE);
165 }
166 
167 /*
168  * ixgbe_free_dma - Free all the DMA resources of all rx/tx rings.
169  */
170 void
171 ixgbe_free_dma(ixgbe_t *ixgbe)
172 {
173 	ixgbe_rx_ring_t *rx_ring;
174 	ixgbe_tx_ring_t *tx_ring;
175 	int i;
176 
177 	/*
178 	 * Free DMA resources of rx rings
179 	 */
180 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
181 		rx_ring = &ixgbe->rx_rings[i];
182 		ixgbe_free_rbd_ring(rx_ring);
183 		ixgbe_free_rcb_lists(rx_ring);
184 	}
185 
186 	/*
187 	 * Free DMA resources of tx rings
188 	 */
189 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
190 		tx_ring = &ixgbe->tx_rings[i];
191 		ixgbe_free_tbd_ring(tx_ring);
192 		ixgbe_free_tcb_lists(tx_ring);
193 	}
194 }
195 
196 /*
197  * ixgbe_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring.
198  */
199 static int
200 ixgbe_alloc_tbd_ring(ixgbe_tx_ring_t *tx_ring)
201 {
202 	int ret;
203 	size_t size;
204 	size_t len;
205 	uint_t cookie_num;
206 	dev_info_t *devinfo;
207 	ddi_dma_cookie_t cookie;
208 	ixgbe_t *ixgbe = tx_ring->ixgbe;
209 
210 	devinfo = ixgbe->dip;
211 	size = sizeof (union ixgbe_adv_tx_desc) * tx_ring->ring_size;
212 
213 	/*
214 	 * If tx head write-back is enabled, an extra tbd is allocated
215 	 * to save the head write-back value
216 	 */
217 	if (ixgbe->tx_head_wb_enable) {
218 		size += sizeof (union ixgbe_adv_tx_desc);
219 	}
220 
221 	/*
222 	 * Allocate a DMA handle for the transmit descriptor
223 	 * memory area.
224 	 */
225 	ret = ddi_dma_alloc_handle(devinfo, &ixgbe_desc_dma_attr,
226 	    DDI_DMA_DONTWAIT, NULL,
227 	    &tx_ring->tbd_area.dma_handle);
228 
229 	if (ret != DDI_SUCCESS) {
230 		ixgbe_error(ixgbe,
231 		    "Could not allocate tbd dma handle: %x", ret);
232 		tx_ring->tbd_area.dma_handle = NULL;
233 
234 		return (IXGBE_FAILURE);
235 	}
236 
237 	/*
238 	 * Allocate memory to DMA data to and from the transmit
239 	 * descriptors.
240 	 */
241 	ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle,
242 	    size, &ixgbe_desc_acc_attr, DDI_DMA_CONSISTENT,
243 	    DDI_DMA_DONTWAIT, NULL,
244 	    (caddr_t *)&tx_ring->tbd_area.address,
245 	    &len, &tx_ring->tbd_area.acc_handle);
246 
247 	if (ret != DDI_SUCCESS) {
248 		ixgbe_error(ixgbe,
249 		    "Could not allocate tbd dma memory: %x", ret);
250 		tx_ring->tbd_area.acc_handle = NULL;
251 		tx_ring->tbd_area.address = NULL;
252 		if (tx_ring->tbd_area.dma_handle != NULL) {
253 			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
254 			tx_ring->tbd_area.dma_handle = NULL;
255 		}
256 		return (IXGBE_FAILURE);
257 	}
258 
259 	/*
260 	 * Initialize the entire transmit buffer descriptor area to zero
261 	 */
262 	bzero(tx_ring->tbd_area.address, len);
263 
264 	/*
265 	 * Allocates DMA resources for the memory that was allocated by
266 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
267 	 * the memory address
268 	 */
269 	ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle,
270 	    NULL, (caddr_t)tx_ring->tbd_area.address,
271 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
272 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
273 
274 	if (ret != DDI_DMA_MAPPED) {
275 		ixgbe_error(ixgbe,
276 		    "Could not bind tbd dma resource: %x", ret);
277 		tx_ring->tbd_area.dma_address = NULL;
278 		if (tx_ring->tbd_area.acc_handle != NULL) {
279 			ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
280 			tx_ring->tbd_area.acc_handle = NULL;
281 			tx_ring->tbd_area.address = NULL;
282 		}
283 		if (tx_ring->tbd_area.dma_handle != NULL) {
284 			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
285 			tx_ring->tbd_area.dma_handle = NULL;
286 		}
287 		return (IXGBE_FAILURE);
288 	}
289 
290 	ASSERT(cookie_num == 1);
291 
292 	tx_ring->tbd_area.dma_address = cookie.dmac_laddress;
293 	tx_ring->tbd_area.size = len;
294 
295 	tx_ring->tbd_ring = (union ixgbe_adv_tx_desc *)(uintptr_t)
296 	    tx_ring->tbd_area.address;
297 
298 	return (IXGBE_SUCCESS);
299 }
300 
301 /*
302  * ixgbe_free_tbd_ring - Free the tx descriptors of one ring.
303  */
304 static void
305 ixgbe_free_tbd_ring(ixgbe_tx_ring_t *tx_ring)
306 {
307 	if (tx_ring->tbd_area.dma_handle != NULL) {
308 		(void) ddi_dma_unbind_handle(tx_ring->tbd_area.dma_handle);
309 	}
310 	if (tx_ring->tbd_area.acc_handle != NULL) {
311 		ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
312 		tx_ring->tbd_area.acc_handle = NULL;
313 	}
314 	if (tx_ring->tbd_area.dma_handle != NULL) {
315 		ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
316 		tx_ring->tbd_area.dma_handle = NULL;
317 	}
318 	tx_ring->tbd_area.address = NULL;
319 	tx_ring->tbd_area.dma_address = NULL;
320 	tx_ring->tbd_area.size = 0;
321 
322 	tx_ring->tbd_ring = NULL;
323 }
324 
325 /*
326  * ixgbe_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring.
327  */
328 static int
329 ixgbe_alloc_rbd_ring(ixgbe_rx_ring_t *rx_ring)
330 {
331 	int ret;
332 	size_t size;
333 	size_t len;
334 	uint_t cookie_num;
335 	dev_info_t *devinfo;
336 	ddi_dma_cookie_t cookie;
337 	ixgbe_t *ixgbe = rx_ring->ixgbe;
338 
339 	devinfo = ixgbe->dip;
340 	size = sizeof (union ixgbe_adv_rx_desc) * rx_ring->ring_size;
341 
342 	/*
343 	 * Allocate a new DMA handle for the receive descriptor
344 	 * memory area.
345 	 */
346 	ret = ddi_dma_alloc_handle(devinfo, &ixgbe_desc_dma_attr,
347 	    DDI_DMA_DONTWAIT, NULL,
348 	    &rx_ring->rbd_area.dma_handle);
349 
350 	if (ret != DDI_SUCCESS) {
351 		ixgbe_error(ixgbe,
352 		    "Could not allocate rbd dma handle: %x", ret);
353 		rx_ring->rbd_area.dma_handle = NULL;
354 		return (IXGBE_FAILURE);
355 	}
356 
357 	/*
358 	 * Allocate memory to DMA data to and from the receive
359 	 * descriptors.
360 	 */
361 	ret = ddi_dma_mem_alloc(rx_ring->rbd_area.dma_handle,
362 	    size, &ixgbe_desc_acc_attr, DDI_DMA_CONSISTENT,
363 	    DDI_DMA_DONTWAIT, NULL,
364 	    (caddr_t *)&rx_ring->rbd_area.address,
365 	    &len, &rx_ring->rbd_area.acc_handle);
366 
367 	if (ret != DDI_SUCCESS) {
368 		ixgbe_error(ixgbe,
369 		    "Could not allocate rbd dma memory: %x", ret);
370 		rx_ring->rbd_area.acc_handle = NULL;
371 		rx_ring->rbd_area.address = NULL;
372 		if (rx_ring->rbd_area.dma_handle != NULL) {
373 			ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
374 			rx_ring->rbd_area.dma_handle = NULL;
375 		}
376 		return (IXGBE_FAILURE);
377 	}
378 
379 	/*
380 	 * Initialize the entire transmit buffer descriptor area to zero
381 	 */
382 	bzero(rx_ring->rbd_area.address, len);
383 
384 	/*
385 	 * Allocates DMA resources for the memory that was allocated by
386 	 * the ddi_dma_mem_alloc call.
387 	 */
388 	ret = ddi_dma_addr_bind_handle(rx_ring->rbd_area.dma_handle,
389 	    NULL, (caddr_t)rx_ring->rbd_area.address,
390 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
391 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
392 
393 	if (ret != DDI_DMA_MAPPED) {
394 		ixgbe_error(ixgbe,
395 		    "Could not bind rbd dma resource: %x", ret);
396 		rx_ring->rbd_area.dma_address = NULL;
397 		if (rx_ring->rbd_area.acc_handle != NULL) {
398 			ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle);
399 			rx_ring->rbd_area.acc_handle = NULL;
400 			rx_ring->rbd_area.address = NULL;
401 		}
402 		if (rx_ring->rbd_area.dma_handle != NULL) {
403 			ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
404 			rx_ring->rbd_area.dma_handle = NULL;
405 		}
406 		return (IXGBE_FAILURE);
407 	}
408 
409 	ASSERT(cookie_num == 1);
410 
411 	rx_ring->rbd_area.dma_address = cookie.dmac_laddress;
412 	rx_ring->rbd_area.size = len;
413 
414 	rx_ring->rbd_ring = (union ixgbe_adv_rx_desc *)(uintptr_t)
415 	    rx_ring->rbd_area.address;
416 
417 	return (IXGBE_SUCCESS);
418 }
419 
420 /*
421  * ixgbe_free_rbd_ring - Free the rx descriptors of one ring.
422  */
423 static void
424 ixgbe_free_rbd_ring(ixgbe_rx_ring_t *rx_ring)
425 {
426 	if (rx_ring->rbd_area.dma_handle != NULL) {
427 		(void) ddi_dma_unbind_handle(rx_ring->rbd_area.dma_handle);
428 	}
429 	if (rx_ring->rbd_area.acc_handle != NULL) {
430 		ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle);
431 		rx_ring->rbd_area.acc_handle = NULL;
432 	}
433 	if (rx_ring->rbd_area.dma_handle != NULL) {
434 		ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
435 		rx_ring->rbd_area.dma_handle = NULL;
436 	}
437 	rx_ring->rbd_area.address = NULL;
438 	rx_ring->rbd_area.dma_address = NULL;
439 	rx_ring->rbd_area.size = 0;
440 
441 	rx_ring->rbd_ring = NULL;
442 }
443 
444 /*
445  * ixgbe_alloc_dma_buffer - Allocate DMA resources for a DMA buffer.
446  */
447 static int
448 ixgbe_alloc_dma_buffer(ixgbe_t *ixgbe, dma_buffer_t *buf, size_t size)
449 {
450 	int ret;
451 	dev_info_t *devinfo = ixgbe->dip;
452 	ddi_dma_cookie_t cookie;
453 	size_t len;
454 	uint_t cookie_num;
455 
456 	ret = ddi_dma_alloc_handle(devinfo,
457 	    &ixgbe_buf_dma_attr, DDI_DMA_DONTWAIT,
458 	    NULL, &buf->dma_handle);
459 
460 	if (ret != DDI_SUCCESS) {
461 		buf->dma_handle = NULL;
462 		ixgbe_error(ixgbe,
463 		    "Could not allocate dma buffer handle: %x", ret);
464 		return (IXGBE_FAILURE);
465 	}
466 
467 	ret = ddi_dma_mem_alloc(buf->dma_handle,
468 	    size, &ixgbe_buf_acc_attr, DDI_DMA_STREAMING,
469 	    DDI_DMA_DONTWAIT, NULL, &buf->address,
470 	    &len, &buf->acc_handle);
471 
472 	if (ret != DDI_SUCCESS) {
473 		buf->acc_handle = NULL;
474 		buf->address = NULL;
475 		if (buf->dma_handle != NULL) {
476 			ddi_dma_free_handle(&buf->dma_handle);
477 			buf->dma_handle = NULL;
478 		}
479 		ixgbe_error(ixgbe,
480 		    "Could not allocate dma buffer memory: %x", ret);
481 		return (IXGBE_FAILURE);
482 	}
483 
484 	ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL,
485 	    buf->address,
486 	    len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
487 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
488 
489 	if (ret != DDI_DMA_MAPPED) {
490 		buf->dma_address = NULL;
491 		if (buf->acc_handle != NULL) {
492 			ddi_dma_mem_free(&buf->acc_handle);
493 			buf->acc_handle = NULL;
494 			buf->address = NULL;
495 		}
496 		if (buf->dma_handle != NULL) {
497 			ddi_dma_free_handle(&buf->dma_handle);
498 			buf->dma_handle = NULL;
499 		}
500 		ixgbe_error(ixgbe,
501 		    "Could not bind dma buffer handle: %x", ret);
502 		return (IXGBE_FAILURE);
503 	}
504 
505 	ASSERT(cookie_num == 1);
506 
507 	buf->dma_address = cookie.dmac_laddress;
508 	buf->size = len;
509 	buf->len = 0;
510 
511 	return (IXGBE_SUCCESS);
512 }
513 
514 /*
515  * ixgbe_free_dma_buffer - Free one allocated area of dma memory and handle.
516  */
517 static void
518 ixgbe_free_dma_buffer(dma_buffer_t *buf)
519 {
520 	if (buf->dma_handle != NULL) {
521 		(void) ddi_dma_unbind_handle(buf->dma_handle);
522 		buf->dma_address = NULL;
523 	} else {
524 		return;
525 	}
526 
527 	if (buf->acc_handle != NULL) {
528 		ddi_dma_mem_free(&buf->acc_handle);
529 		buf->acc_handle = NULL;
530 		buf->address = NULL;
531 	}
532 
533 	if (buf->dma_handle != NULL) {
534 		ddi_dma_free_handle(&buf->dma_handle);
535 		buf->dma_handle = NULL;
536 	}
537 
538 	buf->size = 0;
539 	buf->len = 0;
540 }
541 
542 /*
543  * ixgbe_alloc_tcb_lists - Memory allocation for the transmit control bolcks
544  * of one ring.
545  */
546 static int
547 ixgbe_alloc_tcb_lists(ixgbe_tx_ring_t *tx_ring)
548 {
549 	int i;
550 	int ret;
551 	tx_control_block_t *tcb;
552 	dma_buffer_t *tx_buf;
553 	ixgbe_t *ixgbe = tx_ring->ixgbe;
554 	dev_info_t *devinfo = ixgbe->dip;
555 
556 	/*
557 	 * Allocate memory for the work list.
558 	 */
559 	tx_ring->work_list = kmem_zalloc(sizeof (tx_control_block_t *) *
560 	    tx_ring->ring_size, KM_NOSLEEP);
561 
562 	if (tx_ring->work_list == NULL) {
563 		ixgbe_error(ixgbe,
564 		    "Cound not allocate memory for tx work list");
565 		return (IXGBE_FAILURE);
566 	}
567 
568 	/*
569 	 * Allocate memory for the free list.
570 	 */
571 	tx_ring->free_list = kmem_zalloc(sizeof (tx_control_block_t *) *
572 	    tx_ring->free_list_size, KM_NOSLEEP);
573 
574 	if (tx_ring->free_list == NULL) {
575 		kmem_free(tx_ring->work_list,
576 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
577 		tx_ring->work_list = NULL;
578 
579 		ixgbe_error(ixgbe,
580 		    "Cound not allocate memory for tx free list");
581 		return (IXGBE_FAILURE);
582 	}
583 
584 	/*
585 	 * Allocate memory for the tx control blocks of free list.
586 	 */
587 	tx_ring->tcb_area =
588 	    kmem_zalloc(sizeof (tx_control_block_t) *
589 	    tx_ring->free_list_size, KM_NOSLEEP);
590 
591 	if (tx_ring->tcb_area == NULL) {
592 		kmem_free(tx_ring->work_list,
593 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
594 		tx_ring->work_list = NULL;
595 
596 		kmem_free(tx_ring->free_list,
597 		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
598 		tx_ring->free_list = NULL;
599 
600 		ixgbe_error(ixgbe,
601 		    "Cound not allocate memory for tx control blocks");
602 		return (IXGBE_FAILURE);
603 	}
604 
605 	/*
606 	 * Allocate dma memory for the tx control block of free list.
607 	 */
608 	tcb = tx_ring->tcb_area;
609 	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
610 		ASSERT(tcb != NULL);
611 
612 		tx_ring->free_list[i] = tcb;
613 
614 		/*
615 		 * Pre-allocate dma handles for transmit. These dma handles
616 		 * will be dynamically bound to the data buffers passed down
617 		 * from the upper layers at the time of transmitting.
618 		 */
619 		ret = ddi_dma_alloc_handle(devinfo,
620 		    &ixgbe_tx_dma_attr,
621 		    DDI_DMA_DONTWAIT, NULL,
622 		    &tcb->tx_dma_handle);
623 		if (ret != DDI_SUCCESS) {
624 			tcb->tx_dma_handle = NULL;
625 			ixgbe_error(ixgbe,
626 			    "Could not allocate tx dma handle: %x", ret);
627 			goto alloc_tcb_lists_fail;
628 		}
629 
630 		/*
631 		 * Pre-allocate transmit buffers for packets that the
632 		 * size is less than bcopy_thresh.
633 		 */
634 		tx_buf = &tcb->tx_buf;
635 
636 		ret = ixgbe_alloc_dma_buffer(ixgbe,
637 		    tx_buf, ixgbe->tx_buf_size);
638 
639 		if (ret != IXGBE_SUCCESS) {
640 			ASSERT(tcb->tx_dma_handle != NULL);
641 			ddi_dma_free_handle(&tcb->tx_dma_handle);
642 			tcb->tx_dma_handle = NULL;
643 			ixgbe_error(ixgbe, "Allocate tx dma buffer failed");
644 			goto alloc_tcb_lists_fail;
645 		}
646 	}
647 
648 	return (IXGBE_SUCCESS);
649 
650 alloc_tcb_lists_fail:
651 	ixgbe_free_tcb_lists(tx_ring);
652 
653 	return (IXGBE_FAILURE);
654 }
655 
656 /*
657  * ixgbe_free_tcb_lists - Release the memory allocated for
658  * the transmit control bolcks of one ring.
659  */
660 static void
661 ixgbe_free_tcb_lists(ixgbe_tx_ring_t *tx_ring)
662 {
663 	int i;
664 	tx_control_block_t *tcb;
665 
666 	tcb = tx_ring->tcb_area;
667 	if (tcb == NULL)
668 		return;
669 
670 	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
671 		ASSERT(tcb != NULL);
672 
673 		/* Free the tx dma handle for dynamical binding */
674 		if (tcb->tx_dma_handle != NULL) {
675 			ddi_dma_free_handle(&tcb->tx_dma_handle);
676 			tcb->tx_dma_handle = NULL;
677 		} else {
678 			/*
679 			 * If the dma handle is NULL, then we don't
680 			 * have to check the remaining.
681 			 */
682 			break;
683 		}
684 
685 		ixgbe_free_dma_buffer(&tcb->tx_buf);
686 	}
687 
688 	if (tx_ring->tcb_area != NULL) {
689 		kmem_free(tx_ring->tcb_area,
690 		    sizeof (tx_control_block_t) * tx_ring->free_list_size);
691 		tx_ring->tcb_area = NULL;
692 	}
693 
694 	if (tx_ring->work_list != NULL) {
695 		kmem_free(tx_ring->work_list,
696 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
697 		tx_ring->work_list = NULL;
698 	}
699 
700 	if (tx_ring->free_list != NULL) {
701 		kmem_free(tx_ring->free_list,
702 		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
703 		tx_ring->free_list = NULL;
704 	}
705 }
706 
707 /*
708  * ixgbe_alloc_rcb_lists - Memory allocation for the receive control blocks
709  * of one ring.
710  */
711 static int
712 ixgbe_alloc_rcb_lists(ixgbe_rx_ring_t *rx_ring)
713 {
714 	int i;
715 	int ret;
716 	rx_control_block_t *rcb;
717 	ixgbe_t *ixgbe = rx_ring->ixgbe;
718 	dma_buffer_t *rx_buf;
719 	uint32_t rcb_count;
720 
721 	/*
722 	 * Allocate memory for the work list.
723 	 */
724 	rx_ring->work_list = kmem_zalloc(sizeof (rx_control_block_t *) *
725 	    rx_ring->ring_size, KM_NOSLEEP);
726 
727 	if (rx_ring->work_list == NULL) {
728 		ixgbe_error(ixgbe,
729 		    "Could not allocate memory for rx work list");
730 		return (IXGBE_FAILURE);
731 	}
732 
733 	/*
734 	 * Allocate memory for the free list.
735 	 */
736 	rx_ring->free_list = kmem_zalloc(sizeof (rx_control_block_t *) *
737 	    rx_ring->free_list_size, KM_NOSLEEP);
738 
739 	if (rx_ring->free_list == NULL) {
740 		kmem_free(rx_ring->work_list,
741 		    sizeof (rx_control_block_t *) * rx_ring->ring_size);
742 		rx_ring->work_list = NULL;
743 
744 		ixgbe_error(ixgbe,
745 		    "Cound not allocate memory for rx free list");
746 		return (IXGBE_FAILURE);
747 	}
748 
749 	/*
750 	 * Allocate memory for the rx control blocks for work list and
751 	 * free list.
752 	 */
753 	rcb_count = rx_ring->ring_size + rx_ring->free_list_size;
754 	rx_ring->rcb_area =
755 	    kmem_zalloc(sizeof (rx_control_block_t) * rcb_count,
756 	    KM_NOSLEEP);
757 
758 	if (rx_ring->rcb_area == NULL) {
759 		kmem_free(rx_ring->work_list,
760 		    sizeof (rx_control_block_t *) * rx_ring->ring_size);
761 		rx_ring->work_list = NULL;
762 
763 		kmem_free(rx_ring->free_list,
764 		    sizeof (rx_control_block_t *) * rx_ring->free_list_size);
765 		rx_ring->free_list = NULL;
766 
767 		ixgbe_error(ixgbe,
768 		    "Cound not allocate memory for rx control blocks");
769 		return (IXGBE_FAILURE);
770 	}
771 
772 	/*
773 	 * Allocate dma memory for the rx control blocks
774 	 */
775 	rcb = rx_ring->rcb_area;
776 	for (i = 0; i < rcb_count; i++, rcb++) {
777 		ASSERT(rcb != NULL);
778 
779 		if (i < rx_ring->ring_size) {
780 			/* Attach the rx control block to the work list */
781 			rx_ring->work_list[i] = rcb;
782 		} else {
783 			/* Attach the rx control block to the free list */
784 			rx_ring->free_list[i - rx_ring->ring_size] = rcb;
785 		}
786 
787 		rx_buf = &rcb->rx_buf;
788 		ret = ixgbe_alloc_dma_buffer(ixgbe,
789 		    rx_buf, ixgbe->rx_buf_size);
790 
791 		if (ret != IXGBE_SUCCESS) {
792 			ixgbe_error(ixgbe, "Allocate rx dma buffer failed");
793 			goto alloc_rcb_lists_fail;
794 		}
795 
796 		rx_buf->size -= IPHDR_ALIGN_ROOM;
797 		rx_buf->address += IPHDR_ALIGN_ROOM;
798 		rx_buf->dma_address += IPHDR_ALIGN_ROOM;
799 
800 		rcb->state = RCB_FREE;
801 		rcb->rx_ring = (ixgbe_rx_ring_t *)rx_ring;
802 		rcb->free_rtn.free_func = ixgbe_rx_recycle;
803 		rcb->free_rtn.free_arg = (char *)rcb;
804 
805 		rcb->mp = desballoc((unsigned char *)
806 		    rx_buf->address - IPHDR_ALIGN_ROOM,
807 		    rx_buf->size + IPHDR_ALIGN_ROOM,
808 		    0, &rcb->free_rtn);
809 
810 		if (rcb->mp != NULL) {
811 			rcb->mp->b_rptr += IPHDR_ALIGN_ROOM;
812 			rcb->mp->b_wptr += IPHDR_ALIGN_ROOM;
813 		}
814 	}
815 
816 	return (IXGBE_SUCCESS);
817 
818 alloc_rcb_lists_fail:
819 	ixgbe_free_rcb_lists(rx_ring);
820 
821 	return (IXGBE_FAILURE);
822 }
823 
824 /*
825  * ixgbe_free_rcb_lists - Free the receive control blocks of one ring.
826  */
827 static void
828 ixgbe_free_rcb_lists(ixgbe_rx_ring_t *rx_ring)
829 {
830 	int i;
831 	rx_control_block_t *rcb;
832 	uint32_t rcb_count;
833 
834 	rcb = rx_ring->rcb_area;
835 	if (rcb == NULL)
836 		return;
837 
838 	rcb_count = rx_ring->ring_size + rx_ring->free_list_size;
839 	for (i = 0; i < rcb_count; i++, rcb++) {
840 		ASSERT(rcb != NULL);
841 		ASSERT(rcb->state == RCB_FREE);
842 
843 		if (rcb->mp != NULL) {
844 			freemsg(rcb->mp);
845 			rcb->mp = NULL;
846 		}
847 
848 		ixgbe_free_dma_buffer(&rcb->rx_buf);
849 	}
850 
851 	if (rx_ring->rcb_area != NULL) {
852 		kmem_free(rx_ring->rcb_area,
853 		    sizeof (rx_control_block_t) * rcb_count);
854 		rx_ring->rcb_area = NULL;
855 	}
856 
857 	if (rx_ring->work_list != NULL) {
858 		kmem_free(rx_ring->work_list,
859 		    sizeof (rx_control_block_t *) * rx_ring->ring_size);
860 		rx_ring->work_list = NULL;
861 	}
862 
863 	if (rx_ring->free_list != NULL) {
864 		kmem_free(rx_ring->free_list,
865 		    sizeof (rx_control_block_t *) * rx_ring->free_list_size);
866 		rx_ring->free_list = NULL;
867 	}
868 }
869 
870 /*
871  * ixgbe_set_fma_flags - Set the attribute for fma support.
872  */
873 void
874 ixgbe_set_fma_flags(int acc_flag, int dma_flag)
875 {
876 	if (acc_flag) {
877 		ixgbe_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
878 	} else {
879 		ixgbe_desc_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
880 	}
881 
882 	if (dma_flag) {
883 		ixgbe_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
884 		ixgbe_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
885 		ixgbe_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
886 	} else {
887 		ixgbe_tx_dma_attr.dma_attr_flags = 0;
888 		ixgbe_buf_dma_attr.dma_attr_flags = 0;
889 		ixgbe_desc_dma_attr.dma_attr_flags = 0;
890 	}
891 }
892