xref: /titanic_50/usr/src/uts/common/io/ixgbe/ixgbe_buf.c (revision 13237b7e1e5bd293e466307b2e06f8e0e2321a0a)
1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2009 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at:
10  *      http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When using or redistributing this file, you may do so under the
15  * License only. No other modification of this header is permitted.
16  *
17  * If applicable, add the following below this CDDL HEADER, with the
18  * fields enclosed by brackets "[]" replaced with your own identifying
19  * information: Portions Copyright [yyyy] [name of copyright owner]
20  *
21  * CDDL HEADER END
22  */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 
30 #include "ixgbe_sw.h"
31 
32 static int ixgbe_alloc_tbd_ring(ixgbe_tx_ring_t *);
33 static void ixgbe_free_tbd_ring(ixgbe_tx_ring_t *);
34 static int ixgbe_alloc_rbd_ring(ixgbe_rx_ring_t *);
35 static void ixgbe_free_rbd_ring(ixgbe_rx_ring_t *);
36 static int ixgbe_alloc_dma_buffer(ixgbe_t *, dma_buffer_t *, size_t);
37 static void ixgbe_free_dma_buffer(dma_buffer_t *);
38 static int ixgbe_alloc_tcb_lists(ixgbe_tx_ring_t *);
39 static void ixgbe_free_tcb_lists(ixgbe_tx_ring_t *);
40 static int ixgbe_alloc_rcb_lists(ixgbe_rx_ring_t *);
41 static void ixgbe_free_rcb_lists(ixgbe_rx_ring_t *);
42 
43 #ifdef __sparc
44 #define	IXGBE_DMA_ALIGNMENT	0x0000000000002000ull
45 #else
46 #define	IXGBE_DMA_ALIGNMENT	0x0000000000001000ull
47 #endif
48 
49 /*
50  * DMA attributes for tx/rx descriptors.
51  */
52 static ddi_dma_attr_t ixgbe_desc_dma_attr = {
53 	DMA_ATTR_V0,			/* version number */
54 	0x0000000000000000ull,		/* low address */
55 	0xFFFFFFFFFFFFFFFFull,		/* high address */
56 	0x00000000FFFFFFFFull,		/* dma counter max */
57 	IXGBE_DMA_ALIGNMENT,		/* alignment */
58 	0x00000FFF,			/* burst sizes */
59 	0x00000001,			/* minimum transfer size */
60 	0x00000000FFFFFFFFull,		/* maximum transfer size */
61 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size */
62 	1,				/* scatter/gather list length */
63 	0x00000001,			/* granularity */
64 	DDI_DMA_FLAGERR			/* DMA flags */
65 };
66 
67 /*
68  * DMA attributes for tx/rx buffers.
69  */
70 static ddi_dma_attr_t ixgbe_buf_dma_attr = {
71 	DMA_ATTR_V0,			/* version number */
72 	0x0000000000000000ull,		/* low address */
73 	0xFFFFFFFFFFFFFFFFull,		/* high address */
74 	0x00000000FFFFFFFFull,		/* dma counter max */
75 	IXGBE_DMA_ALIGNMENT,		/* alignment */
76 	0x00000FFF,			/* burst sizes */
77 	0x00000001,			/* minimum transfer size */
78 	0x00000000FFFFFFFFull,		/* maximum transfer size */
79 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size	 */
80 	1,				/* scatter/gather list length */
81 	0x00000001,			/* granularity */
82 	DDI_DMA_FLAGERR			/* DMA flags */
83 };
84 
85 /*
86  * DMA attributes for transmit.
87  */
88 static ddi_dma_attr_t ixgbe_tx_dma_attr = {
89 	DMA_ATTR_V0,			/* version number */
90 	0x0000000000000000ull,		/* low address */
91 	0xFFFFFFFFFFFFFFFFull,		/* high address */
92 	0x00000000FFFFFFFFull,		/* dma counter max */
93 	1,				/* alignment */
94 	0x00000FFF,			/* burst sizes */
95 	0x00000001,			/* minimum transfer size */
96 	0x00000000FFFFFFFFull,		/* maximum transfer size */
97 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size	 */
98 	MAX_COOKIE,			/* scatter/gather list length */
99 	0x00000001,			/* granularity */
100 	DDI_DMA_FLAGERR			/* DMA flags */
101 };
102 
103 /*
104  * DMA access attributes for descriptors.
105  */
106 static ddi_device_acc_attr_t ixgbe_desc_acc_attr = {
107 	DDI_DEVICE_ATTR_V0,
108 	DDI_STRUCTURE_LE_ACC,
109 	DDI_STRICTORDER_ACC,
110 	DDI_FLAGERR_ACC
111 };
112 
113 /*
114  * DMA access attributes for buffers.
115  */
116 static ddi_device_acc_attr_t ixgbe_buf_acc_attr = {
117 	DDI_DEVICE_ATTR_V0,
118 	DDI_NEVERSWAP_ACC,
119 	DDI_STRICTORDER_ACC
120 };
121 
122 /*
123  * ixgbe_alloc_dma - Allocate DMA resources for all rx/tx rings.
124  */
125 int
126 ixgbe_alloc_dma(ixgbe_t *ixgbe)
127 {
128 	ixgbe_rx_ring_t *rx_ring;
129 	ixgbe_tx_ring_t *tx_ring;
130 	int i;
131 
132 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
133 		/*
134 		 * Allocate receive desciptor ring and control block lists
135 		 */
136 		rx_ring = &ixgbe->rx_rings[i];
137 
138 		if (ixgbe_alloc_rbd_ring(rx_ring) != IXGBE_SUCCESS)
139 			goto alloc_dma_failure;
140 
141 		if (ixgbe_alloc_rcb_lists(rx_ring) != IXGBE_SUCCESS)
142 			goto alloc_dma_failure;
143 	}
144 
145 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
146 		/*
147 		 * Allocate transmit desciptor ring and control block lists
148 		 */
149 		tx_ring = &ixgbe->tx_rings[i];
150 
151 		if (ixgbe_alloc_tbd_ring(tx_ring) != IXGBE_SUCCESS)
152 			goto alloc_dma_failure;
153 
154 		if (ixgbe_alloc_tcb_lists(tx_ring) != IXGBE_SUCCESS)
155 			goto alloc_dma_failure;
156 	}
157 
158 	return (IXGBE_SUCCESS);
159 
160 alloc_dma_failure:
161 	ixgbe_free_dma(ixgbe);
162 
163 	return (IXGBE_FAILURE);
164 }
165 
166 /*
167  * ixgbe_free_dma - Free all the DMA resources of all rx/tx rings.
168  */
169 void
170 ixgbe_free_dma(ixgbe_t *ixgbe)
171 {
172 	ixgbe_rx_ring_t *rx_ring;
173 	ixgbe_tx_ring_t *tx_ring;
174 	int i;
175 
176 	/*
177 	 * Free DMA resources of rx rings
178 	 */
179 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
180 		rx_ring = &ixgbe->rx_rings[i];
181 		ixgbe_free_rbd_ring(rx_ring);
182 		ixgbe_free_rcb_lists(rx_ring);
183 	}
184 
185 	/*
186 	 * Free DMA resources of tx rings
187 	 */
188 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
189 		tx_ring = &ixgbe->tx_rings[i];
190 		ixgbe_free_tbd_ring(tx_ring);
191 		ixgbe_free_tcb_lists(tx_ring);
192 	}
193 }
194 
195 /*
196  * ixgbe_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring.
197  */
198 static int
199 ixgbe_alloc_tbd_ring(ixgbe_tx_ring_t *tx_ring)
200 {
201 	int ret;
202 	size_t size;
203 	size_t len;
204 	uint_t cookie_num;
205 	dev_info_t *devinfo;
206 	ddi_dma_cookie_t cookie;
207 	ixgbe_t *ixgbe = tx_ring->ixgbe;
208 
209 	devinfo = ixgbe->dip;
210 	size = sizeof (union ixgbe_adv_tx_desc) * tx_ring->ring_size;
211 
212 	/*
213 	 * If tx head write-back is enabled, an extra tbd is allocated
214 	 * to save the head write-back value
215 	 */
216 	if (ixgbe->tx_head_wb_enable) {
217 		size += sizeof (union ixgbe_adv_tx_desc);
218 	}
219 
220 	/*
221 	 * Allocate a DMA handle for the transmit descriptor
222 	 * memory area.
223 	 */
224 	ret = ddi_dma_alloc_handle(devinfo, &ixgbe_desc_dma_attr,
225 	    DDI_DMA_DONTWAIT, NULL,
226 	    &tx_ring->tbd_area.dma_handle);
227 
228 	if (ret != DDI_SUCCESS) {
229 		ixgbe_error(ixgbe,
230 		    "Could not allocate tbd dma handle: %x", ret);
231 		tx_ring->tbd_area.dma_handle = NULL;
232 
233 		return (IXGBE_FAILURE);
234 	}
235 
236 	/*
237 	 * Allocate memory to DMA data to and from the transmit
238 	 * descriptors.
239 	 */
240 	ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle,
241 	    size, &ixgbe_desc_acc_attr, DDI_DMA_CONSISTENT,
242 	    DDI_DMA_DONTWAIT, NULL,
243 	    (caddr_t *)&tx_ring->tbd_area.address,
244 	    &len, &tx_ring->tbd_area.acc_handle);
245 
246 	if (ret != DDI_SUCCESS) {
247 		ixgbe_error(ixgbe,
248 		    "Could not allocate tbd dma memory: %x", ret);
249 		tx_ring->tbd_area.acc_handle = NULL;
250 		tx_ring->tbd_area.address = NULL;
251 		if (tx_ring->tbd_area.dma_handle != NULL) {
252 			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
253 			tx_ring->tbd_area.dma_handle = NULL;
254 		}
255 		return (IXGBE_FAILURE);
256 	}
257 
258 	/*
259 	 * Initialize the entire transmit buffer descriptor area to zero
260 	 */
261 	bzero(tx_ring->tbd_area.address, len);
262 
263 	/*
264 	 * Allocates DMA resources for the memory that was allocated by
265 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
266 	 * the memory address
267 	 */
268 	ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle,
269 	    NULL, (caddr_t)tx_ring->tbd_area.address,
270 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
271 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
272 
273 	if (ret != DDI_DMA_MAPPED) {
274 		ixgbe_error(ixgbe,
275 		    "Could not bind tbd dma resource: %x", ret);
276 		tx_ring->tbd_area.dma_address = NULL;
277 		if (tx_ring->tbd_area.acc_handle != NULL) {
278 			ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
279 			tx_ring->tbd_area.acc_handle = NULL;
280 			tx_ring->tbd_area.address = NULL;
281 		}
282 		if (tx_ring->tbd_area.dma_handle != NULL) {
283 			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
284 			tx_ring->tbd_area.dma_handle = NULL;
285 		}
286 		return (IXGBE_FAILURE);
287 	}
288 
289 	ASSERT(cookie_num == 1);
290 
291 	tx_ring->tbd_area.dma_address = cookie.dmac_laddress;
292 	tx_ring->tbd_area.size = len;
293 
294 	tx_ring->tbd_ring = (union ixgbe_adv_tx_desc *)(uintptr_t)
295 	    tx_ring->tbd_area.address;
296 
297 	return (IXGBE_SUCCESS);
298 }
299 
300 /*
301  * ixgbe_free_tbd_ring - Free the tx descriptors of one ring.
302  */
303 static void
304 ixgbe_free_tbd_ring(ixgbe_tx_ring_t *tx_ring)
305 {
306 	if (tx_ring->tbd_area.dma_handle != NULL) {
307 		(void) ddi_dma_unbind_handle(tx_ring->tbd_area.dma_handle);
308 	}
309 	if (tx_ring->tbd_area.acc_handle != NULL) {
310 		ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
311 		tx_ring->tbd_area.acc_handle = NULL;
312 	}
313 	if (tx_ring->tbd_area.dma_handle != NULL) {
314 		ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
315 		tx_ring->tbd_area.dma_handle = NULL;
316 	}
317 	tx_ring->tbd_area.address = NULL;
318 	tx_ring->tbd_area.dma_address = NULL;
319 	tx_ring->tbd_area.size = 0;
320 
321 	tx_ring->tbd_ring = NULL;
322 }
323 
324 /*
325  * ixgbe_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring.
326  */
327 static int
328 ixgbe_alloc_rbd_ring(ixgbe_rx_ring_t *rx_ring)
329 {
330 	int ret;
331 	size_t size;
332 	size_t len;
333 	uint_t cookie_num;
334 	dev_info_t *devinfo;
335 	ddi_dma_cookie_t cookie;
336 	ixgbe_t *ixgbe = rx_ring->ixgbe;
337 
338 	devinfo = ixgbe->dip;
339 	size = sizeof (union ixgbe_adv_rx_desc) * rx_ring->ring_size;
340 
341 	/*
342 	 * Allocate a new DMA handle for the receive descriptor
343 	 * memory area.
344 	 */
345 	ret = ddi_dma_alloc_handle(devinfo, &ixgbe_desc_dma_attr,
346 	    DDI_DMA_DONTWAIT, NULL,
347 	    &rx_ring->rbd_area.dma_handle);
348 
349 	if (ret != DDI_SUCCESS) {
350 		ixgbe_error(ixgbe,
351 		    "Could not allocate rbd dma handle: %x", ret);
352 		rx_ring->rbd_area.dma_handle = NULL;
353 		return (IXGBE_FAILURE);
354 	}
355 
356 	/*
357 	 * Allocate memory to DMA data to and from the receive
358 	 * descriptors.
359 	 */
360 	ret = ddi_dma_mem_alloc(rx_ring->rbd_area.dma_handle,
361 	    size, &ixgbe_desc_acc_attr, DDI_DMA_CONSISTENT,
362 	    DDI_DMA_DONTWAIT, NULL,
363 	    (caddr_t *)&rx_ring->rbd_area.address,
364 	    &len, &rx_ring->rbd_area.acc_handle);
365 
366 	if (ret != DDI_SUCCESS) {
367 		ixgbe_error(ixgbe,
368 		    "Could not allocate rbd dma memory: %x", ret);
369 		rx_ring->rbd_area.acc_handle = NULL;
370 		rx_ring->rbd_area.address = NULL;
371 		if (rx_ring->rbd_area.dma_handle != NULL) {
372 			ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
373 			rx_ring->rbd_area.dma_handle = NULL;
374 		}
375 		return (IXGBE_FAILURE);
376 	}
377 
378 	/*
379 	 * Initialize the entire transmit buffer descriptor area to zero
380 	 */
381 	bzero(rx_ring->rbd_area.address, len);
382 
383 	/*
384 	 * Allocates DMA resources for the memory that was allocated by
385 	 * the ddi_dma_mem_alloc call.
386 	 */
387 	ret = ddi_dma_addr_bind_handle(rx_ring->rbd_area.dma_handle,
388 	    NULL, (caddr_t)rx_ring->rbd_area.address,
389 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
390 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
391 
392 	if (ret != DDI_DMA_MAPPED) {
393 		ixgbe_error(ixgbe,
394 		    "Could not bind rbd dma resource: %x", ret);
395 		rx_ring->rbd_area.dma_address = NULL;
396 		if (rx_ring->rbd_area.acc_handle != NULL) {
397 			ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle);
398 			rx_ring->rbd_area.acc_handle = NULL;
399 			rx_ring->rbd_area.address = NULL;
400 		}
401 		if (rx_ring->rbd_area.dma_handle != NULL) {
402 			ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
403 			rx_ring->rbd_area.dma_handle = NULL;
404 		}
405 		return (IXGBE_FAILURE);
406 	}
407 
408 	ASSERT(cookie_num == 1);
409 
410 	rx_ring->rbd_area.dma_address = cookie.dmac_laddress;
411 	rx_ring->rbd_area.size = len;
412 
413 	rx_ring->rbd_ring = (union ixgbe_adv_rx_desc *)(uintptr_t)
414 	    rx_ring->rbd_area.address;
415 
416 	return (IXGBE_SUCCESS);
417 }
418 
419 /*
420  * ixgbe_free_rbd_ring - Free the rx descriptors of one ring.
421  */
422 static void
423 ixgbe_free_rbd_ring(ixgbe_rx_ring_t *rx_ring)
424 {
425 	if (rx_ring->rbd_area.dma_handle != NULL) {
426 		(void) ddi_dma_unbind_handle(rx_ring->rbd_area.dma_handle);
427 	}
428 	if (rx_ring->rbd_area.acc_handle != NULL) {
429 		ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle);
430 		rx_ring->rbd_area.acc_handle = NULL;
431 	}
432 	if (rx_ring->rbd_area.dma_handle != NULL) {
433 		ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
434 		rx_ring->rbd_area.dma_handle = NULL;
435 	}
436 	rx_ring->rbd_area.address = NULL;
437 	rx_ring->rbd_area.dma_address = NULL;
438 	rx_ring->rbd_area.size = 0;
439 
440 	rx_ring->rbd_ring = NULL;
441 }
442 
443 /*
444  * ixgbe_alloc_dma_buffer - Allocate DMA resources for a DMA buffer.
445  */
446 static int
447 ixgbe_alloc_dma_buffer(ixgbe_t *ixgbe, dma_buffer_t *buf, size_t size)
448 {
449 	int ret;
450 	dev_info_t *devinfo = ixgbe->dip;
451 	ddi_dma_cookie_t cookie;
452 	size_t len;
453 	uint_t cookie_num;
454 
455 	ret = ddi_dma_alloc_handle(devinfo,
456 	    &ixgbe_buf_dma_attr, DDI_DMA_DONTWAIT,
457 	    NULL, &buf->dma_handle);
458 
459 	if (ret != DDI_SUCCESS) {
460 		buf->dma_handle = NULL;
461 		ixgbe_error(ixgbe,
462 		    "Could not allocate dma buffer handle: %x", ret);
463 		return (IXGBE_FAILURE);
464 	}
465 
466 	ret = ddi_dma_mem_alloc(buf->dma_handle,
467 	    size, &ixgbe_buf_acc_attr, DDI_DMA_STREAMING,
468 	    DDI_DMA_DONTWAIT, NULL, &buf->address,
469 	    &len, &buf->acc_handle);
470 
471 	if (ret != DDI_SUCCESS) {
472 		buf->acc_handle = NULL;
473 		buf->address = NULL;
474 		if (buf->dma_handle != NULL) {
475 			ddi_dma_free_handle(&buf->dma_handle);
476 			buf->dma_handle = NULL;
477 		}
478 		ixgbe_error(ixgbe,
479 		    "Could not allocate dma buffer memory: %x", ret);
480 		return (IXGBE_FAILURE);
481 	}
482 
483 	ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL,
484 	    buf->address,
485 	    len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
486 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
487 
488 	if (ret != DDI_DMA_MAPPED) {
489 		buf->dma_address = NULL;
490 		if (buf->acc_handle != NULL) {
491 			ddi_dma_mem_free(&buf->acc_handle);
492 			buf->acc_handle = NULL;
493 			buf->address = NULL;
494 		}
495 		if (buf->dma_handle != NULL) {
496 			ddi_dma_free_handle(&buf->dma_handle);
497 			buf->dma_handle = NULL;
498 		}
499 		ixgbe_error(ixgbe,
500 		    "Could not bind dma buffer handle: %x", ret);
501 		return (IXGBE_FAILURE);
502 	}
503 
504 	ASSERT(cookie_num == 1);
505 
506 	buf->dma_address = cookie.dmac_laddress;
507 	buf->size = len;
508 	buf->len = 0;
509 
510 	return (IXGBE_SUCCESS);
511 }
512 
513 /*
514  * ixgbe_free_dma_buffer - Free one allocated area of dma memory and handle.
515  */
516 static void
517 ixgbe_free_dma_buffer(dma_buffer_t *buf)
518 {
519 	if (buf->dma_handle != NULL) {
520 		(void) ddi_dma_unbind_handle(buf->dma_handle);
521 		buf->dma_address = NULL;
522 	} else {
523 		return;
524 	}
525 
526 	if (buf->acc_handle != NULL) {
527 		ddi_dma_mem_free(&buf->acc_handle);
528 		buf->acc_handle = NULL;
529 		buf->address = NULL;
530 	}
531 
532 	if (buf->dma_handle != NULL) {
533 		ddi_dma_free_handle(&buf->dma_handle);
534 		buf->dma_handle = NULL;
535 	}
536 
537 	buf->size = 0;
538 	buf->len = 0;
539 }
540 
541 /*
542  * ixgbe_alloc_tcb_lists - Memory allocation for the transmit control bolcks
543  * of one ring.
544  */
545 static int
546 ixgbe_alloc_tcb_lists(ixgbe_tx_ring_t *tx_ring)
547 {
548 	int i;
549 	int ret;
550 	tx_control_block_t *tcb;
551 	dma_buffer_t *tx_buf;
552 	ixgbe_t *ixgbe = tx_ring->ixgbe;
553 	dev_info_t *devinfo = ixgbe->dip;
554 
555 	/*
556 	 * Allocate memory for the work list.
557 	 */
558 	tx_ring->work_list = kmem_zalloc(sizeof (tx_control_block_t *) *
559 	    tx_ring->ring_size, KM_NOSLEEP);
560 
561 	if (tx_ring->work_list == NULL) {
562 		ixgbe_error(ixgbe,
563 		    "Cound not allocate memory for tx work list");
564 		return (IXGBE_FAILURE);
565 	}
566 
567 	/*
568 	 * Allocate memory for the free list.
569 	 */
570 	tx_ring->free_list = kmem_zalloc(sizeof (tx_control_block_t *) *
571 	    tx_ring->free_list_size, KM_NOSLEEP);
572 
573 	if (tx_ring->free_list == NULL) {
574 		kmem_free(tx_ring->work_list,
575 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
576 		tx_ring->work_list = NULL;
577 
578 		ixgbe_error(ixgbe,
579 		    "Cound not allocate memory for tx free list");
580 		return (IXGBE_FAILURE);
581 	}
582 
583 	/*
584 	 * Allocate memory for the tx control blocks of free list.
585 	 */
586 	tx_ring->tcb_area =
587 	    kmem_zalloc(sizeof (tx_control_block_t) *
588 	    tx_ring->free_list_size, KM_NOSLEEP);
589 
590 	if (tx_ring->tcb_area == NULL) {
591 		kmem_free(tx_ring->work_list,
592 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
593 		tx_ring->work_list = NULL;
594 
595 		kmem_free(tx_ring->free_list,
596 		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
597 		tx_ring->free_list = NULL;
598 
599 		ixgbe_error(ixgbe,
600 		    "Cound not allocate memory for tx control blocks");
601 		return (IXGBE_FAILURE);
602 	}
603 
604 	/*
605 	 * Allocate dma memory for the tx control block of free list.
606 	 */
607 	tcb = tx_ring->tcb_area;
608 	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
609 		ASSERT(tcb != NULL);
610 
611 		tx_ring->free_list[i] = tcb;
612 
613 		/*
614 		 * Pre-allocate dma handles for transmit. These dma handles
615 		 * will be dynamically bound to the data buffers passed down
616 		 * from the upper layers at the time of transmitting.
617 		 */
618 		ret = ddi_dma_alloc_handle(devinfo,
619 		    &ixgbe_tx_dma_attr,
620 		    DDI_DMA_DONTWAIT, NULL,
621 		    &tcb->tx_dma_handle);
622 		if (ret != DDI_SUCCESS) {
623 			tcb->tx_dma_handle = NULL;
624 			ixgbe_error(ixgbe,
625 			    "Could not allocate tx dma handle: %x", ret);
626 			goto alloc_tcb_lists_fail;
627 		}
628 
629 		/*
630 		 * Pre-allocate transmit buffers for packets that the
631 		 * size is less than bcopy_thresh.
632 		 */
633 		tx_buf = &tcb->tx_buf;
634 
635 		ret = ixgbe_alloc_dma_buffer(ixgbe,
636 		    tx_buf, ixgbe->tx_buf_size);
637 
638 		if (ret != IXGBE_SUCCESS) {
639 			ASSERT(tcb->tx_dma_handle != NULL);
640 			ddi_dma_free_handle(&tcb->tx_dma_handle);
641 			tcb->tx_dma_handle = NULL;
642 			ixgbe_error(ixgbe, "Allocate tx dma buffer failed");
643 			goto alloc_tcb_lists_fail;
644 		}
645 
646 		tcb->last_index = MAX_TX_RING_SIZE;
647 	}
648 
649 	return (IXGBE_SUCCESS);
650 
651 alloc_tcb_lists_fail:
652 	ixgbe_free_tcb_lists(tx_ring);
653 
654 	return (IXGBE_FAILURE);
655 }
656 
657 /*
658  * ixgbe_free_tcb_lists - Release the memory allocated for
659  * the transmit control bolcks of one ring.
660  */
661 static void
662 ixgbe_free_tcb_lists(ixgbe_tx_ring_t *tx_ring)
663 {
664 	int i;
665 	tx_control_block_t *tcb;
666 
667 	tcb = tx_ring->tcb_area;
668 	if (tcb == NULL)
669 		return;
670 
671 	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
672 		ASSERT(tcb != NULL);
673 
674 		/* Free the tx dma handle for dynamical binding */
675 		if (tcb->tx_dma_handle != NULL) {
676 			ddi_dma_free_handle(&tcb->tx_dma_handle);
677 			tcb->tx_dma_handle = NULL;
678 		} else {
679 			/*
680 			 * If the dma handle is NULL, then we don't
681 			 * have to check the remaining.
682 			 */
683 			break;
684 		}
685 
686 		ixgbe_free_dma_buffer(&tcb->tx_buf);
687 	}
688 
689 	if (tx_ring->tcb_area != NULL) {
690 		kmem_free(tx_ring->tcb_area,
691 		    sizeof (tx_control_block_t) * tx_ring->free_list_size);
692 		tx_ring->tcb_area = NULL;
693 	}
694 
695 	if (tx_ring->work_list != NULL) {
696 		kmem_free(tx_ring->work_list,
697 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
698 		tx_ring->work_list = NULL;
699 	}
700 
701 	if (tx_ring->free_list != NULL) {
702 		kmem_free(tx_ring->free_list,
703 		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
704 		tx_ring->free_list = NULL;
705 	}
706 }
707 
708 /*
709  * ixgbe_alloc_rcb_lists - Memory allocation for the receive control blocks
710  * of one ring.
711  */
712 static int
713 ixgbe_alloc_rcb_lists(ixgbe_rx_ring_t *rx_ring)
714 {
715 	int i;
716 	int ret;
717 	rx_control_block_t *rcb;
718 	ixgbe_t *ixgbe = rx_ring->ixgbe;
719 	dma_buffer_t *rx_buf;
720 	uint32_t rcb_count;
721 
722 	/*
723 	 * Allocate memory for the work list.
724 	 */
725 	rx_ring->work_list = kmem_zalloc(sizeof (rx_control_block_t *) *
726 	    rx_ring->ring_size, KM_NOSLEEP);
727 
728 	if (rx_ring->work_list == NULL) {
729 		ixgbe_error(ixgbe,
730 		    "Could not allocate memory for rx work list");
731 		return (IXGBE_FAILURE);
732 	}
733 
734 	/*
735 	 * Allocate memory for the free list.
736 	 */
737 	rx_ring->free_list = kmem_zalloc(sizeof (rx_control_block_t *) *
738 	    rx_ring->free_list_size, KM_NOSLEEP);
739 
740 	if (rx_ring->free_list == NULL) {
741 		kmem_free(rx_ring->work_list,
742 		    sizeof (rx_control_block_t *) * rx_ring->ring_size);
743 		rx_ring->work_list = NULL;
744 
745 		ixgbe_error(ixgbe,
746 		    "Cound not allocate memory for rx free list");
747 		return (IXGBE_FAILURE);
748 	}
749 
750 	/*
751 	 * Allocate memory for the rx control blocks for work list and
752 	 * free list.
753 	 */
754 	rcb_count = rx_ring->ring_size + rx_ring->free_list_size;
755 	rx_ring->rcb_area =
756 	    kmem_zalloc(sizeof (rx_control_block_t) * rcb_count,
757 	    KM_NOSLEEP);
758 
759 	if (rx_ring->rcb_area == NULL) {
760 		kmem_free(rx_ring->work_list,
761 		    sizeof (rx_control_block_t *) * rx_ring->ring_size);
762 		rx_ring->work_list = NULL;
763 
764 		kmem_free(rx_ring->free_list,
765 		    sizeof (rx_control_block_t *) * rx_ring->free_list_size);
766 		rx_ring->free_list = NULL;
767 
768 		ixgbe_error(ixgbe,
769 		    "Cound not allocate memory for rx control blocks");
770 		return (IXGBE_FAILURE);
771 	}
772 
773 	/*
774 	 * Allocate dma memory for the rx control blocks
775 	 */
776 	rcb = rx_ring->rcb_area;
777 	for (i = 0; i < rcb_count; i++, rcb++) {
778 		ASSERT(rcb != NULL);
779 
780 		if (i < rx_ring->ring_size) {
781 			/* Attach the rx control block to the work list */
782 			rx_ring->work_list[i] = rcb;
783 		} else {
784 			/* Attach the rx control block to the free list */
785 			rx_ring->free_list[i - rx_ring->ring_size] = rcb;
786 		}
787 
788 		rx_buf = &rcb->rx_buf;
789 		ret = ixgbe_alloc_dma_buffer(ixgbe,
790 		    rx_buf, ixgbe->rx_buf_size);
791 
792 		if (ret != IXGBE_SUCCESS) {
793 			ixgbe_error(ixgbe, "Allocate rx dma buffer failed");
794 			goto alloc_rcb_lists_fail;
795 		}
796 
797 		rx_buf->size -= IPHDR_ALIGN_ROOM;
798 		rx_buf->address += IPHDR_ALIGN_ROOM;
799 		rx_buf->dma_address += IPHDR_ALIGN_ROOM;
800 
801 		rcb->state = RCB_FREE;
802 		rcb->rx_ring = (ixgbe_rx_ring_t *)rx_ring;
803 		rcb->free_rtn.free_func = ixgbe_rx_recycle;
804 		rcb->free_rtn.free_arg = (char *)rcb;
805 
806 		rcb->mp = desballoc((unsigned char *)
807 		    rx_buf->address,
808 		    rx_buf->size,
809 		    0, &rcb->free_rtn);
810 	}
811 
812 	return (IXGBE_SUCCESS);
813 
814 alloc_rcb_lists_fail:
815 	ixgbe_free_rcb_lists(rx_ring);
816 
817 	return (IXGBE_FAILURE);
818 }
819 
820 /*
821  * ixgbe_free_rcb_lists - Free the receive control blocks of one ring.
822  */
823 static void
824 ixgbe_free_rcb_lists(ixgbe_rx_ring_t *rx_ring)
825 {
826 	int i;
827 	rx_control_block_t *rcb;
828 	uint32_t rcb_count;
829 
830 	rcb = rx_ring->rcb_area;
831 	if (rcb == NULL)
832 		return;
833 
834 	rcb_count = rx_ring->ring_size + rx_ring->free_list_size;
835 	for (i = 0; i < rcb_count; i++, rcb++) {
836 		ASSERT(rcb != NULL);
837 		ASSERT(rcb->state == RCB_FREE);
838 
839 		if (rcb->mp != NULL) {
840 			freemsg(rcb->mp);
841 			rcb->mp = NULL;
842 		}
843 
844 		ixgbe_free_dma_buffer(&rcb->rx_buf);
845 	}
846 
847 	if (rx_ring->rcb_area != NULL) {
848 		kmem_free(rx_ring->rcb_area,
849 		    sizeof (rx_control_block_t) * rcb_count);
850 		rx_ring->rcb_area = NULL;
851 	}
852 
853 	if (rx_ring->work_list != NULL) {
854 		kmem_free(rx_ring->work_list,
855 		    sizeof (rx_control_block_t *) * rx_ring->ring_size);
856 		rx_ring->work_list = NULL;
857 	}
858 
859 	if (rx_ring->free_list != NULL) {
860 		kmem_free(rx_ring->free_list,
861 		    sizeof (rx_control_block_t *) * rx_ring->free_list_size);
862 		rx_ring->free_list = NULL;
863 	}
864 }
865 
866 /*
867  * ixgbe_set_fma_flags - Set the attribute for fma support.
868  */
869 void
870 ixgbe_set_fma_flags(int acc_flag, int dma_flag)
871 {
872 	if (acc_flag) {
873 		ixgbe_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
874 	} else {
875 		ixgbe_desc_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
876 	}
877 
878 	if (dma_flag) {
879 		ixgbe_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
880 		ixgbe_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
881 		ixgbe_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
882 	} else {
883 		ixgbe_tx_dma_attr.dma_attr_flags = 0;
884 		ixgbe_buf_dma_attr.dma_attr_flags = 0;
885 		ixgbe_desc_dma_attr.dma_attr_flags = 0;
886 	}
887 }
888