xref: /titanic_52/usr/src/uts/common/io/ixgbe/ixgbe_buf.c (revision 571909175b4f9a1ef15ec4afead6d6d463dbe760)
1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2009 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at:
10  *      http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When using or redistributing this file, you may do so under the
15  * License only. No other modification of this header is permitted.
16  *
17  * If applicable, add the following below this CDDL HEADER, with the
18  * fields enclosed by brackets "[]" replaced with your own identifying
19  * information: Portions Copyright [yyyy] [name of copyright owner]
20  *
21  * CDDL HEADER END
22  */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #include "ixgbe_sw.h"
30 
31 static int ixgbe_alloc_tbd_ring(ixgbe_tx_ring_t *);
32 static void ixgbe_free_tbd_ring(ixgbe_tx_ring_t *);
33 static int ixgbe_alloc_rbd_ring(ixgbe_rx_data_t *);
34 static void ixgbe_free_rbd_ring(ixgbe_rx_data_t *);
35 static int ixgbe_alloc_dma_buffer(ixgbe_t *, dma_buffer_t *, size_t);
36 static int ixgbe_alloc_tcb_lists(ixgbe_tx_ring_t *);
37 static void ixgbe_free_tcb_lists(ixgbe_tx_ring_t *);
38 static int ixgbe_alloc_rcb_lists(ixgbe_rx_data_t *);
39 static void ixgbe_free_rcb_lists(ixgbe_rx_data_t *);
40 
41 #ifdef __sparc
42 #define	IXGBE_DMA_ALIGNMENT	0x0000000000002000ull
43 #else
44 #define	IXGBE_DMA_ALIGNMENT	0x0000000000001000ull
45 #endif
46 
47 /*
48  * DMA attributes for tx/rx descriptors.
49  */
50 static ddi_dma_attr_t ixgbe_desc_dma_attr = {
51 	DMA_ATTR_V0,			/* version number */
52 	0x0000000000000000ull,		/* low address */
53 	0xFFFFFFFFFFFFFFFFull,		/* high address */
54 	0x00000000FFFFFFFFull,		/* dma counter max */
55 	IXGBE_DMA_ALIGNMENT,		/* alignment */
56 	0x00000FFF,			/* burst sizes */
57 	0x00000001,			/* minimum transfer size */
58 	0x00000000FFFFFFFFull,		/* maximum transfer size */
59 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size */
60 	1,				/* scatter/gather list length */
61 	0x00000001,			/* granularity */
62 	DDI_DMA_FLAGERR			/* DMA flags */
63 };
64 
65 /*
66  * DMA attributes for tx/rx buffers.
67  */
68 static ddi_dma_attr_t ixgbe_buf_dma_attr = {
69 	DMA_ATTR_V0,			/* version number */
70 	0x0000000000000000ull,		/* low address */
71 	0xFFFFFFFFFFFFFFFFull,		/* high address */
72 	0x00000000FFFFFFFFull,		/* dma counter max */
73 	IXGBE_DMA_ALIGNMENT,		/* alignment */
74 	0x00000FFF,			/* burst sizes */
75 	0x00000001,			/* minimum transfer size */
76 	0x00000000FFFFFFFFull,		/* maximum transfer size */
77 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size	 */
78 	1,				/* scatter/gather list length */
79 	0x00000001,			/* granularity */
80 	DDI_DMA_FLAGERR			/* DMA flags */
81 };
82 
83 /*
84  * DMA attributes for transmit.
85  */
86 static ddi_dma_attr_t ixgbe_tx_dma_attr = {
87 	DMA_ATTR_V0,			/* version number */
88 	0x0000000000000000ull,		/* low address */
89 	0xFFFFFFFFFFFFFFFFull,		/* high address */
90 	0x00000000FFFFFFFFull,		/* dma counter max */
91 	1,				/* alignment */
92 	0x00000FFF,			/* burst sizes */
93 	0x00000001,			/* minimum transfer size */
94 	0x00000000FFFFFFFFull,		/* maximum transfer size */
95 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size	 */
96 	MAX_COOKIE,			/* scatter/gather list length */
97 	0x00000001,			/* granularity */
98 	DDI_DMA_FLAGERR			/* DMA flags */
99 };
100 
101 /*
102  * DMA access attributes for descriptors.
103  */
104 static ddi_device_acc_attr_t ixgbe_desc_acc_attr = {
105 	DDI_DEVICE_ATTR_V0,
106 	DDI_STRUCTURE_LE_ACC,
107 	DDI_STRICTORDER_ACC,
108 	DDI_FLAGERR_ACC
109 };
110 
111 /*
112  * DMA access attributes for buffers.
113  */
114 static ddi_device_acc_attr_t ixgbe_buf_acc_attr = {
115 	DDI_DEVICE_ATTR_V0,
116 	DDI_NEVERSWAP_ACC,
117 	DDI_STRICTORDER_ACC
118 };
119 
120 /*
121  * ixgbe_alloc_dma - Allocate DMA resources for all rx/tx rings.
122  */
123 int
124 ixgbe_alloc_dma(ixgbe_t *ixgbe)
125 {
126 	ixgbe_rx_ring_t	*rx_ring;
127 	ixgbe_rx_data_t *rx_data;
128 	ixgbe_tx_ring_t *tx_ring;
129 	int i;
130 
131 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
132 		/*
133 		 * Allocate receive desciptor ring and control block lists
134 		 */
135 		rx_ring = &ixgbe->rx_rings[i];
136 		rx_data = rx_ring->rx_data;
137 
138 		if (ixgbe_alloc_rbd_ring(rx_data) != IXGBE_SUCCESS)
139 			goto alloc_dma_failure;
140 
141 		if (ixgbe_alloc_rcb_lists(rx_data) != IXGBE_SUCCESS)
142 			goto alloc_dma_failure;
143 	}
144 
145 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
146 		/*
147 		 * Allocate transmit desciptor ring and control block lists
148 		 */
149 		tx_ring = &ixgbe->tx_rings[i];
150 
151 		if (ixgbe_alloc_tbd_ring(tx_ring) != IXGBE_SUCCESS)
152 			goto alloc_dma_failure;
153 
154 		if (ixgbe_alloc_tcb_lists(tx_ring) != IXGBE_SUCCESS)
155 			goto alloc_dma_failure;
156 	}
157 
158 	return (IXGBE_SUCCESS);
159 
160 alloc_dma_failure:
161 	ixgbe_free_dma(ixgbe);
162 
163 	return (IXGBE_FAILURE);
164 }
165 
166 /*
167  * ixgbe_free_dma - Free all the DMA resources of all rx/tx rings.
168  */
169 void
170 ixgbe_free_dma(ixgbe_t *ixgbe)
171 {
172 	ixgbe_rx_ring_t *rx_ring;
173 	ixgbe_rx_data_t *rx_data;
174 	ixgbe_tx_ring_t *tx_ring;
175 	int i;
176 
177 	/*
178 	 * Free DMA resources of rx rings
179 	 */
180 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
181 		rx_ring = &ixgbe->rx_rings[i];
182 		rx_data = rx_ring->rx_data;
183 
184 		ixgbe_free_rbd_ring(rx_data);
185 		ixgbe_free_rcb_lists(rx_data);
186 	}
187 
188 	/*
189 	 * Free DMA resources of tx rings
190 	 */
191 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
192 		tx_ring = &ixgbe->tx_rings[i];
193 		ixgbe_free_tbd_ring(tx_ring);
194 		ixgbe_free_tcb_lists(tx_ring);
195 	}
196 }
197 
198 int
199 ixgbe_alloc_rx_ring_data(ixgbe_rx_ring_t *rx_ring)
200 {
201 	ixgbe_rx_data_t	*rx_data;
202 	ixgbe_t *ixgbe = rx_ring->ixgbe;
203 	uint32_t rcb_count;
204 
205 	/*
206 	 * Allocate memory for software receive rings
207 	 */
208 	rx_data = kmem_zalloc(sizeof (ixgbe_rx_data_t), KM_NOSLEEP);
209 
210 	if (rx_data == NULL) {
211 		ixgbe_error(ixgbe, "Allocate software receive rings failed");
212 		return (IXGBE_FAILURE);
213 	}
214 
215 	rx_data->rx_ring = rx_ring;
216 	mutex_init(&rx_data->recycle_lock, NULL,
217 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
218 
219 	rx_data->ring_size = ixgbe->rx_ring_size;
220 	rx_data->free_list_size = ixgbe->rx_ring_size;
221 
222 	rx_data->rcb_head = 0;
223 	rx_data->rcb_tail = 0;
224 	rx_data->rcb_free = rx_data->free_list_size;
225 
226 	/*
227 	 * Allocate memory for the work list.
228 	 */
229 	rx_data->work_list = kmem_zalloc(sizeof (rx_control_block_t *) *
230 	    rx_data->ring_size, KM_NOSLEEP);
231 
232 	if (rx_data->work_list == NULL) {
233 		ixgbe_error(ixgbe,
234 		    "Could not allocate memory for rx work list");
235 		goto alloc_rx_data_failure;
236 	}
237 
238 	/*
239 	 * Allocate memory for the free list.
240 	 */
241 	rx_data->free_list = kmem_zalloc(sizeof (rx_control_block_t *) *
242 	    rx_data->free_list_size, KM_NOSLEEP);
243 
244 	if (rx_data->free_list == NULL) {
245 		ixgbe_error(ixgbe,
246 		    "Cound not allocate memory for rx free list");
247 		goto alloc_rx_data_failure;
248 	}
249 
250 	/*
251 	 * Allocate memory for the rx control blocks for work list and
252 	 * free list.
253 	 */
254 	rcb_count = rx_data->ring_size + rx_data->free_list_size;
255 	rx_data->rcb_area =
256 	    kmem_zalloc(sizeof (rx_control_block_t) * rcb_count,
257 	    KM_NOSLEEP);
258 
259 	if (rx_data->rcb_area == NULL) {
260 		ixgbe_error(ixgbe,
261 		    "Cound not allocate memory for rx control blocks");
262 		goto alloc_rx_data_failure;
263 	}
264 
265 	rx_ring->rx_data = rx_data;
266 	return (IXGBE_SUCCESS);
267 
268 alloc_rx_data_failure:
269 	ixgbe_free_rx_ring_data(rx_data);
270 	return (IXGBE_FAILURE);
271 }
272 
273 void
274 ixgbe_free_rx_ring_data(ixgbe_rx_data_t *rx_data)
275 {
276 	uint32_t rcb_count;
277 
278 	if (rx_data == NULL)
279 		return;
280 
281 	ASSERT(rx_data->rcb_pending == 0);
282 
283 	rcb_count = rx_data->ring_size + rx_data->free_list_size;
284 	if (rx_data->rcb_area != NULL) {
285 		kmem_free(rx_data->rcb_area,
286 		    sizeof (rx_control_block_t) * rcb_count);
287 		rx_data->rcb_area = NULL;
288 	}
289 
290 	if (rx_data->work_list != NULL) {
291 		kmem_free(rx_data->work_list,
292 		    sizeof (rx_control_block_t *) * rx_data->ring_size);
293 		rx_data->work_list = NULL;
294 	}
295 
296 	if (rx_data->free_list != NULL) {
297 		kmem_free(rx_data->free_list,
298 		    sizeof (rx_control_block_t *) * rx_data->free_list_size);
299 		rx_data->free_list = NULL;
300 	}
301 
302 	mutex_destroy(&rx_data->recycle_lock);
303 	kmem_free(rx_data, sizeof (ixgbe_rx_data_t));
304 }
305 
306 /*
307  * ixgbe_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring.
308  */
309 static int
310 ixgbe_alloc_tbd_ring(ixgbe_tx_ring_t *tx_ring)
311 {
312 	int ret;
313 	size_t size;
314 	size_t len;
315 	uint_t cookie_num;
316 	dev_info_t *devinfo;
317 	ddi_dma_cookie_t cookie;
318 	ixgbe_t *ixgbe = tx_ring->ixgbe;
319 
320 	devinfo = ixgbe->dip;
321 	size = sizeof (union ixgbe_adv_tx_desc) * tx_ring->ring_size;
322 
323 	/*
324 	 * If tx head write-back is enabled, an extra tbd is allocated
325 	 * to save the head write-back value
326 	 */
327 	if (ixgbe->tx_head_wb_enable) {
328 		size += sizeof (union ixgbe_adv_tx_desc);
329 	}
330 
331 	/*
332 	 * Allocate a DMA handle for the transmit descriptor
333 	 * memory area.
334 	 */
335 	ret = ddi_dma_alloc_handle(devinfo, &ixgbe_desc_dma_attr,
336 	    DDI_DMA_DONTWAIT, NULL,
337 	    &tx_ring->tbd_area.dma_handle);
338 
339 	if (ret != DDI_SUCCESS) {
340 		ixgbe_error(ixgbe,
341 		    "Could not allocate tbd dma handle: %x", ret);
342 		tx_ring->tbd_area.dma_handle = NULL;
343 
344 		return (IXGBE_FAILURE);
345 	}
346 
347 	/*
348 	 * Allocate memory to DMA data to and from the transmit
349 	 * descriptors.
350 	 */
351 	ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle,
352 	    size, &ixgbe_desc_acc_attr, DDI_DMA_CONSISTENT,
353 	    DDI_DMA_DONTWAIT, NULL,
354 	    (caddr_t *)&tx_ring->tbd_area.address,
355 	    &len, &tx_ring->tbd_area.acc_handle);
356 
357 	if (ret != DDI_SUCCESS) {
358 		ixgbe_error(ixgbe,
359 		    "Could not allocate tbd dma memory: %x", ret);
360 		tx_ring->tbd_area.acc_handle = NULL;
361 		tx_ring->tbd_area.address = NULL;
362 		if (tx_ring->tbd_area.dma_handle != NULL) {
363 			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
364 			tx_ring->tbd_area.dma_handle = NULL;
365 		}
366 		return (IXGBE_FAILURE);
367 	}
368 
369 	/*
370 	 * Initialize the entire transmit buffer descriptor area to zero
371 	 */
372 	bzero(tx_ring->tbd_area.address, len);
373 
374 	/*
375 	 * Allocates DMA resources for the memory that was allocated by
376 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
377 	 * the memory address
378 	 */
379 	ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle,
380 	    NULL, (caddr_t)tx_ring->tbd_area.address,
381 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
382 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
383 
384 	if (ret != DDI_DMA_MAPPED) {
385 		ixgbe_error(ixgbe,
386 		    "Could not bind tbd dma resource: %x", ret);
387 		tx_ring->tbd_area.dma_address = NULL;
388 		if (tx_ring->tbd_area.acc_handle != NULL) {
389 			ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
390 			tx_ring->tbd_area.acc_handle = NULL;
391 			tx_ring->tbd_area.address = NULL;
392 		}
393 		if (tx_ring->tbd_area.dma_handle != NULL) {
394 			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
395 			tx_ring->tbd_area.dma_handle = NULL;
396 		}
397 		return (IXGBE_FAILURE);
398 	}
399 
400 	ASSERT(cookie_num == 1);
401 
402 	tx_ring->tbd_area.dma_address = cookie.dmac_laddress;
403 	tx_ring->tbd_area.size = len;
404 
405 	tx_ring->tbd_ring = (union ixgbe_adv_tx_desc *)(uintptr_t)
406 	    tx_ring->tbd_area.address;
407 
408 	return (IXGBE_SUCCESS);
409 }
410 
411 /*
412  * ixgbe_free_tbd_ring - Free the tx descriptors of one ring.
413  */
414 static void
415 ixgbe_free_tbd_ring(ixgbe_tx_ring_t *tx_ring)
416 {
417 	if (tx_ring->tbd_area.dma_handle != NULL) {
418 		(void) ddi_dma_unbind_handle(tx_ring->tbd_area.dma_handle);
419 	}
420 	if (tx_ring->tbd_area.acc_handle != NULL) {
421 		ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
422 		tx_ring->tbd_area.acc_handle = NULL;
423 	}
424 	if (tx_ring->tbd_area.dma_handle != NULL) {
425 		ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
426 		tx_ring->tbd_area.dma_handle = NULL;
427 	}
428 	tx_ring->tbd_area.address = NULL;
429 	tx_ring->tbd_area.dma_address = NULL;
430 	tx_ring->tbd_area.size = 0;
431 
432 	tx_ring->tbd_ring = NULL;
433 }
434 
435 /*
436  * ixgbe_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring.
437  */
438 static int
439 ixgbe_alloc_rbd_ring(ixgbe_rx_data_t *rx_data)
440 {
441 	int ret;
442 	size_t size;
443 	size_t len;
444 	uint_t cookie_num;
445 	dev_info_t *devinfo;
446 	ddi_dma_cookie_t cookie;
447 	ixgbe_t *ixgbe = rx_data->rx_ring->ixgbe;
448 
449 	devinfo = ixgbe->dip;
450 	size = sizeof (union ixgbe_adv_rx_desc) * rx_data->ring_size;
451 
452 	/*
453 	 * Allocate a new DMA handle for the receive descriptor
454 	 * memory area.
455 	 */
456 	ret = ddi_dma_alloc_handle(devinfo, &ixgbe_desc_dma_attr,
457 	    DDI_DMA_DONTWAIT, NULL,
458 	    &rx_data->rbd_area.dma_handle);
459 
460 	if (ret != DDI_SUCCESS) {
461 		ixgbe_error(ixgbe,
462 		    "Could not allocate rbd dma handle: %x", ret);
463 		rx_data->rbd_area.dma_handle = NULL;
464 		return (IXGBE_FAILURE);
465 	}
466 
467 	/*
468 	 * Allocate memory to DMA data to and from the receive
469 	 * descriptors.
470 	 */
471 	ret = ddi_dma_mem_alloc(rx_data->rbd_area.dma_handle,
472 	    size, &ixgbe_desc_acc_attr, DDI_DMA_CONSISTENT,
473 	    DDI_DMA_DONTWAIT, NULL,
474 	    (caddr_t *)&rx_data->rbd_area.address,
475 	    &len, &rx_data->rbd_area.acc_handle);
476 
477 	if (ret != DDI_SUCCESS) {
478 		ixgbe_error(ixgbe,
479 		    "Could not allocate rbd dma memory: %x", ret);
480 		rx_data->rbd_area.acc_handle = NULL;
481 		rx_data->rbd_area.address = NULL;
482 		if (rx_data->rbd_area.dma_handle != NULL) {
483 			ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
484 			rx_data->rbd_area.dma_handle = NULL;
485 		}
486 		return (IXGBE_FAILURE);
487 	}
488 
489 	/*
490 	 * Initialize the entire transmit buffer descriptor area to zero
491 	 */
492 	bzero(rx_data->rbd_area.address, len);
493 
494 	/*
495 	 * Allocates DMA resources for the memory that was allocated by
496 	 * the ddi_dma_mem_alloc call.
497 	 */
498 	ret = ddi_dma_addr_bind_handle(rx_data->rbd_area.dma_handle,
499 	    NULL, (caddr_t)rx_data->rbd_area.address,
500 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
501 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
502 
503 	if (ret != DDI_DMA_MAPPED) {
504 		ixgbe_error(ixgbe,
505 		    "Could not bind rbd dma resource: %x", ret);
506 		rx_data->rbd_area.dma_address = NULL;
507 		if (rx_data->rbd_area.acc_handle != NULL) {
508 			ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
509 			rx_data->rbd_area.acc_handle = NULL;
510 			rx_data->rbd_area.address = NULL;
511 		}
512 		if (rx_data->rbd_area.dma_handle != NULL) {
513 			ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
514 			rx_data->rbd_area.dma_handle = NULL;
515 		}
516 		return (IXGBE_FAILURE);
517 	}
518 
519 	ASSERT(cookie_num == 1);
520 
521 	rx_data->rbd_area.dma_address = cookie.dmac_laddress;
522 	rx_data->rbd_area.size = len;
523 
524 	rx_data->rbd_ring = (union ixgbe_adv_rx_desc *)(uintptr_t)
525 	    rx_data->rbd_area.address;
526 
527 	return (IXGBE_SUCCESS);
528 }
529 
530 /*
531  * ixgbe_free_rbd_ring - Free the rx descriptors of one ring.
532  */
533 static void
534 ixgbe_free_rbd_ring(ixgbe_rx_data_t *rx_data)
535 {
536 	if (rx_data->rbd_area.dma_handle != NULL) {
537 		(void) ddi_dma_unbind_handle(rx_data->rbd_area.dma_handle);
538 	}
539 	if (rx_data->rbd_area.acc_handle != NULL) {
540 		ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
541 		rx_data->rbd_area.acc_handle = NULL;
542 	}
543 	if (rx_data->rbd_area.dma_handle != NULL) {
544 		ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
545 		rx_data->rbd_area.dma_handle = NULL;
546 	}
547 	rx_data->rbd_area.address = NULL;
548 	rx_data->rbd_area.dma_address = NULL;
549 	rx_data->rbd_area.size = 0;
550 
551 	rx_data->rbd_ring = NULL;
552 }
553 
554 /*
555  * ixgbe_alloc_dma_buffer - Allocate DMA resources for a DMA buffer.
556  */
557 static int
558 ixgbe_alloc_dma_buffer(ixgbe_t *ixgbe, dma_buffer_t *buf, size_t size)
559 {
560 	int ret;
561 	dev_info_t *devinfo = ixgbe->dip;
562 	ddi_dma_cookie_t cookie;
563 	size_t len;
564 	uint_t cookie_num;
565 
566 	ret = ddi_dma_alloc_handle(devinfo,
567 	    &ixgbe_buf_dma_attr, DDI_DMA_DONTWAIT,
568 	    NULL, &buf->dma_handle);
569 
570 	if (ret != DDI_SUCCESS) {
571 		buf->dma_handle = NULL;
572 		ixgbe_error(ixgbe,
573 		    "Could not allocate dma buffer handle: %x", ret);
574 		return (IXGBE_FAILURE);
575 	}
576 
577 	ret = ddi_dma_mem_alloc(buf->dma_handle,
578 	    size, &ixgbe_buf_acc_attr, DDI_DMA_STREAMING,
579 	    DDI_DMA_DONTWAIT, NULL, &buf->address,
580 	    &len, &buf->acc_handle);
581 
582 	if (ret != DDI_SUCCESS) {
583 		buf->acc_handle = NULL;
584 		buf->address = NULL;
585 		if (buf->dma_handle != NULL) {
586 			ddi_dma_free_handle(&buf->dma_handle);
587 			buf->dma_handle = NULL;
588 		}
589 		ixgbe_error(ixgbe,
590 		    "Could not allocate dma buffer memory: %x", ret);
591 		return (IXGBE_FAILURE);
592 	}
593 
594 	ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL,
595 	    buf->address,
596 	    len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
597 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
598 
599 	if (ret != DDI_DMA_MAPPED) {
600 		buf->dma_address = NULL;
601 		if (buf->acc_handle != NULL) {
602 			ddi_dma_mem_free(&buf->acc_handle);
603 			buf->acc_handle = NULL;
604 			buf->address = NULL;
605 		}
606 		if (buf->dma_handle != NULL) {
607 			ddi_dma_free_handle(&buf->dma_handle);
608 			buf->dma_handle = NULL;
609 		}
610 		ixgbe_error(ixgbe,
611 		    "Could not bind dma buffer handle: %x", ret);
612 		return (IXGBE_FAILURE);
613 	}
614 
615 	ASSERT(cookie_num == 1);
616 
617 	buf->dma_address = cookie.dmac_laddress;
618 	buf->size = len;
619 	buf->len = 0;
620 
621 	return (IXGBE_SUCCESS);
622 }
623 
624 /*
625  * ixgbe_free_dma_buffer - Free one allocated area of dma memory and handle.
626  */
627 void
628 ixgbe_free_dma_buffer(dma_buffer_t *buf)
629 {
630 	if (buf->dma_handle != NULL) {
631 		(void) ddi_dma_unbind_handle(buf->dma_handle);
632 		buf->dma_address = NULL;
633 	} else {
634 		return;
635 	}
636 
637 	if (buf->acc_handle != NULL) {
638 		ddi_dma_mem_free(&buf->acc_handle);
639 		buf->acc_handle = NULL;
640 		buf->address = NULL;
641 	}
642 
643 	if (buf->dma_handle != NULL) {
644 		ddi_dma_free_handle(&buf->dma_handle);
645 		buf->dma_handle = NULL;
646 	}
647 
648 	buf->size = 0;
649 	buf->len = 0;
650 }
651 
652 /*
653  * ixgbe_alloc_tcb_lists - Memory allocation for the transmit control bolcks
654  * of one ring.
655  */
656 static int
657 ixgbe_alloc_tcb_lists(ixgbe_tx_ring_t *tx_ring)
658 {
659 	int i;
660 	int ret;
661 	tx_control_block_t *tcb;
662 	dma_buffer_t *tx_buf;
663 	ixgbe_t *ixgbe = tx_ring->ixgbe;
664 	dev_info_t *devinfo = ixgbe->dip;
665 
666 	/*
667 	 * Allocate memory for the work list.
668 	 */
669 	tx_ring->work_list = kmem_zalloc(sizeof (tx_control_block_t *) *
670 	    tx_ring->ring_size, KM_NOSLEEP);
671 
672 	if (tx_ring->work_list == NULL) {
673 		ixgbe_error(ixgbe,
674 		    "Cound not allocate memory for tx work list");
675 		return (IXGBE_FAILURE);
676 	}
677 
678 	/*
679 	 * Allocate memory for the free list.
680 	 */
681 	tx_ring->free_list = kmem_zalloc(sizeof (tx_control_block_t *) *
682 	    tx_ring->free_list_size, KM_NOSLEEP);
683 
684 	if (tx_ring->free_list == NULL) {
685 		kmem_free(tx_ring->work_list,
686 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
687 		tx_ring->work_list = NULL;
688 
689 		ixgbe_error(ixgbe,
690 		    "Cound not allocate memory for tx free list");
691 		return (IXGBE_FAILURE);
692 	}
693 
694 	/*
695 	 * Allocate memory for the tx control blocks of free list.
696 	 */
697 	tx_ring->tcb_area =
698 	    kmem_zalloc(sizeof (tx_control_block_t) *
699 	    tx_ring->free_list_size, KM_NOSLEEP);
700 
701 	if (tx_ring->tcb_area == NULL) {
702 		kmem_free(tx_ring->work_list,
703 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
704 		tx_ring->work_list = NULL;
705 
706 		kmem_free(tx_ring->free_list,
707 		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
708 		tx_ring->free_list = NULL;
709 
710 		ixgbe_error(ixgbe,
711 		    "Cound not allocate memory for tx control blocks");
712 		return (IXGBE_FAILURE);
713 	}
714 
715 	/*
716 	 * Allocate dma memory for the tx control block of free list.
717 	 */
718 	tcb = tx_ring->tcb_area;
719 	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
720 		ASSERT(tcb != NULL);
721 
722 		tx_ring->free_list[i] = tcb;
723 
724 		/*
725 		 * Pre-allocate dma handles for transmit. These dma handles
726 		 * will be dynamically bound to the data buffers passed down
727 		 * from the upper layers at the time of transmitting.
728 		 */
729 		ret = ddi_dma_alloc_handle(devinfo,
730 		    &ixgbe_tx_dma_attr,
731 		    DDI_DMA_DONTWAIT, NULL,
732 		    &tcb->tx_dma_handle);
733 		if (ret != DDI_SUCCESS) {
734 			tcb->tx_dma_handle = NULL;
735 			ixgbe_error(ixgbe,
736 			    "Could not allocate tx dma handle: %x", ret);
737 			goto alloc_tcb_lists_fail;
738 		}
739 
740 		/*
741 		 * Pre-allocate transmit buffers for packets that the
742 		 * size is less than bcopy_thresh.
743 		 */
744 		tx_buf = &tcb->tx_buf;
745 
746 		ret = ixgbe_alloc_dma_buffer(ixgbe,
747 		    tx_buf, ixgbe->tx_buf_size);
748 
749 		if (ret != IXGBE_SUCCESS) {
750 			ASSERT(tcb->tx_dma_handle != NULL);
751 			ddi_dma_free_handle(&tcb->tx_dma_handle);
752 			tcb->tx_dma_handle = NULL;
753 			ixgbe_error(ixgbe, "Allocate tx dma buffer failed");
754 			goto alloc_tcb_lists_fail;
755 		}
756 
757 		tcb->last_index = MAX_TX_RING_SIZE;
758 	}
759 
760 	return (IXGBE_SUCCESS);
761 
762 alloc_tcb_lists_fail:
763 	ixgbe_free_tcb_lists(tx_ring);
764 
765 	return (IXGBE_FAILURE);
766 }
767 
768 /*
769  * ixgbe_free_tcb_lists - Release the memory allocated for
770  * the transmit control bolcks of one ring.
771  */
772 static void
773 ixgbe_free_tcb_lists(ixgbe_tx_ring_t *tx_ring)
774 {
775 	int i;
776 	tx_control_block_t *tcb;
777 
778 	tcb = tx_ring->tcb_area;
779 	if (tcb == NULL)
780 		return;
781 
782 	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
783 		ASSERT(tcb != NULL);
784 
785 		/* Free the tx dma handle for dynamical binding */
786 		if (tcb->tx_dma_handle != NULL) {
787 			ddi_dma_free_handle(&tcb->tx_dma_handle);
788 			tcb->tx_dma_handle = NULL;
789 		} else {
790 			/*
791 			 * If the dma handle is NULL, then we don't
792 			 * have to check the remaining.
793 			 */
794 			break;
795 		}
796 
797 		ixgbe_free_dma_buffer(&tcb->tx_buf);
798 	}
799 
800 	if (tx_ring->tcb_area != NULL) {
801 		kmem_free(tx_ring->tcb_area,
802 		    sizeof (tx_control_block_t) * tx_ring->free_list_size);
803 		tx_ring->tcb_area = NULL;
804 	}
805 
806 	if (tx_ring->work_list != NULL) {
807 		kmem_free(tx_ring->work_list,
808 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
809 		tx_ring->work_list = NULL;
810 	}
811 
812 	if (tx_ring->free_list != NULL) {
813 		kmem_free(tx_ring->free_list,
814 		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
815 		tx_ring->free_list = NULL;
816 	}
817 }
818 
819 /*
820  * ixgbe_alloc_rcb_lists - Memory allocation for the receive control blocks
821  * of one ring.
822  */
823 static int
824 ixgbe_alloc_rcb_lists(ixgbe_rx_data_t *rx_data)
825 {
826 	int i;
827 	int ret;
828 	rx_control_block_t *rcb;
829 	ixgbe_t *ixgbe = rx_data->rx_ring->ixgbe;
830 	dma_buffer_t *rx_buf;
831 	uint32_t rcb_count;
832 
833 	/*
834 	 * Allocate memory for the rx control blocks for work list and
835 	 * free list.
836 	 */
837 	rcb_count = rx_data->ring_size + rx_data->free_list_size;
838 	rcb = rx_data->rcb_area;
839 
840 	for (i = 0; i < rcb_count; i++, rcb++) {
841 		ASSERT(rcb != NULL);
842 
843 		if (i < rx_data->ring_size) {
844 			/* Attach the rx control block to the work list */
845 			rx_data->work_list[i] = rcb;
846 		} else {
847 			/* Attach the rx control block to the free list */
848 			rx_data->free_list[i - rx_data->ring_size] = rcb;
849 		}
850 
851 		rx_buf = &rcb->rx_buf;
852 		ret = ixgbe_alloc_dma_buffer(ixgbe,
853 		    rx_buf, ixgbe->rx_buf_size);
854 
855 		if (ret != IXGBE_SUCCESS) {
856 			ixgbe_error(ixgbe, "Allocate rx dma buffer failed");
857 			goto alloc_rcb_lists_fail;
858 		}
859 
860 		rx_buf->size -= IPHDR_ALIGN_ROOM;
861 		rx_buf->address += IPHDR_ALIGN_ROOM;
862 		rx_buf->dma_address += IPHDR_ALIGN_ROOM;
863 
864 		rcb->ref_cnt = 1;
865 		rcb->rx_data = (ixgbe_rx_data_t *)rx_data;
866 		rcb->free_rtn.free_func = ixgbe_rx_recycle;
867 		rcb->free_rtn.free_arg = (char *)rcb;
868 
869 		rcb->mp = desballoc((unsigned char *)
870 		    rx_buf->address,
871 		    rx_buf->size,
872 		    0, &rcb->free_rtn);
873 	}
874 
875 	return (IXGBE_SUCCESS);
876 
877 alloc_rcb_lists_fail:
878 	ixgbe_free_rcb_lists(rx_data);
879 
880 	return (IXGBE_FAILURE);
881 }
882 
883 /*
884  * ixgbe_free_rcb_lists - Free the receive control blocks of one ring.
885  */
886 static void
887 ixgbe_free_rcb_lists(ixgbe_rx_data_t *rx_data)
888 {
889 	ixgbe_t *ixgbe;
890 	rx_control_block_t *rcb;
891 	uint32_t rcb_count;
892 	uint32_t ref_cnt;
893 	int i;
894 
895 	ixgbe = rx_data->rx_ring->ixgbe;
896 
897 	mutex_enter(&ixgbe->rx_pending_lock);
898 
899 	rcb = rx_data->rcb_area;
900 	rcb_count = rx_data->ring_size + rx_data->free_list_size;
901 
902 	for (i = 0; i < rcb_count; i++, rcb++) {
903 		ASSERT(rcb != NULL);
904 
905 		ref_cnt = atomic_dec_32_nv(&rcb->ref_cnt);
906 		if (ref_cnt == 0) {
907 			if (rcb->mp != NULL) {
908 				freemsg(rcb->mp);
909 				rcb->mp = NULL;
910 			}
911 			ixgbe_free_dma_buffer(&rcb->rx_buf);
912 		} else {
913 			atomic_inc_32(&rx_data->rcb_pending);
914 			atomic_inc_32(&ixgbe->rcb_pending);
915 		}
916 	}
917 
918 	mutex_exit(&ixgbe->rx_pending_lock);
919 }
920 
921 /*
922  * ixgbe_set_fma_flags - Set the attribute for fma support.
923  */
924 void
925 ixgbe_set_fma_flags(int acc_flag, int dma_flag)
926 {
927 	if (acc_flag) {
928 		ixgbe_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
929 	} else {
930 		ixgbe_desc_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
931 	}
932 
933 	if (dma_flag) {
934 		ixgbe_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
935 		ixgbe_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
936 		ixgbe_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
937 	} else {
938 		ixgbe_tx_dma_attr.dma_attr_flags = 0;
939 		ixgbe_buf_dma_attr.dma_attr_flags = 0;
940 		ixgbe_desc_dma_attr.dma_attr_flags = 0;
941 	}
942 }
943