xref: /titanic_50/usr/src/uts/common/io/ixgbe/ixgbe_buf.c (revision b3697b90e692e3e5d859fb77d285d4c056d99eda)
1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2009 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at:
10  *      http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When using or redistributing this file, you may do so under the
15  * License only. No other modification of this header is permitted.
16  *
17  * If applicable, add the following below this CDDL HEADER, with the
18  * fields enclosed by brackets "[]" replaced with your own identifying
19  * information: Portions Copyright [yyyy] [name of copyright owner]
20  *
21  * CDDL HEADER END
22  */
23 
24 /*
25  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #include "ixgbe_sw.h"
30 
31 static int ixgbe_alloc_tbd_ring(ixgbe_tx_ring_t *);
32 static void ixgbe_free_tbd_ring(ixgbe_tx_ring_t *);
33 static int ixgbe_alloc_rbd_ring(ixgbe_rx_data_t *);
34 static void ixgbe_free_rbd_ring(ixgbe_rx_data_t *);
35 static int ixgbe_alloc_dma_buffer(ixgbe_t *, dma_buffer_t *, size_t);
36 static int ixgbe_alloc_tcb_lists(ixgbe_tx_ring_t *);
37 static void ixgbe_free_tcb_lists(ixgbe_tx_ring_t *);
38 static int ixgbe_alloc_rcb_lists(ixgbe_rx_data_t *);
39 static void ixgbe_free_rcb_lists(ixgbe_rx_data_t *);
40 
41 #ifdef __sparc
42 #define	IXGBE_DMA_ALIGNMENT	0x0000000000002000ull
43 #else
44 #define	IXGBE_DMA_ALIGNMENT	0x0000000000001000ull
45 #endif
46 
47 /*
48  * DMA attributes for tx/rx descriptors.
49  */
50 static ddi_dma_attr_t ixgbe_desc_dma_attr = {
51 	DMA_ATTR_V0,			/* version number */
52 	0x0000000000000000ull,		/* low address */
53 	0xFFFFFFFFFFFFFFFFull,		/* high address */
54 	0x00000000FFFFFFFFull,		/* dma counter max */
55 	IXGBE_DMA_ALIGNMENT,		/* alignment */
56 	0x00000FFF,			/* burst sizes */
57 	0x00000001,			/* minimum transfer size */
58 	0x00000000FFFFFFFFull,		/* maximum transfer size */
59 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size */
60 	1,				/* scatter/gather list length */
61 	0x00000001,			/* granularity */
62 	DDI_DMA_FLAGERR			/* DMA flags */
63 };
64 
65 /*
66  * DMA attributes for tx/rx buffers.
67  */
68 static ddi_dma_attr_t ixgbe_buf_dma_attr = {
69 	DMA_ATTR_V0,			/* version number */
70 	0x0000000000000000ull,		/* low address */
71 	0xFFFFFFFFFFFFFFFFull,		/* high address */
72 	0x00000000FFFFFFFFull,		/* dma counter max */
73 	IXGBE_DMA_ALIGNMENT,		/* alignment */
74 	0x00000FFF,			/* burst sizes */
75 	0x00000001,			/* minimum transfer size */
76 	0x00000000FFFFFFFFull,		/* maximum transfer size */
77 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size	 */
78 	1,				/* scatter/gather list length */
79 	0x00000001,			/* granularity */
80 	DDI_DMA_FLAGERR			/* DMA flags */
81 };
82 
83 /*
84  * DMA attributes for transmit.
85  */
86 static ddi_dma_attr_t ixgbe_tx_dma_attr = {
87 	DMA_ATTR_V0,			/* version number */
88 	0x0000000000000000ull,		/* low address */
89 	0xFFFFFFFFFFFFFFFFull,		/* high address */
90 	0x00000000FFFFFFFFull,		/* dma counter max */
91 	1,				/* alignment */
92 	0x00000FFF,			/* burst sizes */
93 	0x00000001,			/* minimum transfer size */
94 	0x00000000FFFFFFFFull,		/* maximum transfer size */
95 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size	 */
96 	MAX_COOKIE,			/* scatter/gather list length */
97 	0x00000001,			/* granularity */
98 	DDI_DMA_FLAGERR			/* DMA flags */
99 };
100 
101 /*
102  * DMA access attributes for descriptors.
103  */
104 static ddi_device_acc_attr_t ixgbe_desc_acc_attr = {
105 	DDI_DEVICE_ATTR_V0,
106 	DDI_STRUCTURE_LE_ACC,
107 	DDI_STRICTORDER_ACC
108 };
109 
110 /*
111  * DMA access attributes for buffers.
112  */
113 static ddi_device_acc_attr_t ixgbe_buf_acc_attr = {
114 	DDI_DEVICE_ATTR_V0,
115 	DDI_NEVERSWAP_ACC,
116 	DDI_STRICTORDER_ACC
117 };
118 
119 /*
120  * ixgbe_alloc_dma - Allocate DMA resources for all rx/tx rings.
121  */
122 int
123 ixgbe_alloc_dma(ixgbe_t *ixgbe)
124 {
125 	ixgbe_rx_ring_t	*rx_ring;
126 	ixgbe_rx_data_t *rx_data;
127 	ixgbe_tx_ring_t *tx_ring;
128 	int i;
129 
130 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
131 		/*
132 		 * Allocate receive desciptor ring and control block lists
133 		 */
134 		rx_ring = &ixgbe->rx_rings[i];
135 		rx_data = rx_ring->rx_data;
136 
137 		if (ixgbe_alloc_rbd_ring(rx_data) != IXGBE_SUCCESS)
138 			goto alloc_dma_failure;
139 
140 		if (ixgbe_alloc_rcb_lists(rx_data) != IXGBE_SUCCESS)
141 			goto alloc_dma_failure;
142 	}
143 
144 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
145 		/*
146 		 * Allocate transmit desciptor ring and control block lists
147 		 */
148 		tx_ring = &ixgbe->tx_rings[i];
149 
150 		if (ixgbe_alloc_tbd_ring(tx_ring) != IXGBE_SUCCESS)
151 			goto alloc_dma_failure;
152 
153 		if (ixgbe_alloc_tcb_lists(tx_ring) != IXGBE_SUCCESS)
154 			goto alloc_dma_failure;
155 	}
156 
157 	return (IXGBE_SUCCESS);
158 
159 alloc_dma_failure:
160 	ixgbe_free_dma(ixgbe);
161 
162 	return (IXGBE_FAILURE);
163 }
164 
165 /*
166  * ixgbe_free_dma - Free all the DMA resources of all rx/tx rings.
167  */
168 void
169 ixgbe_free_dma(ixgbe_t *ixgbe)
170 {
171 	ixgbe_rx_ring_t *rx_ring;
172 	ixgbe_rx_data_t *rx_data;
173 	ixgbe_tx_ring_t *tx_ring;
174 	int i;
175 
176 	/*
177 	 * Free DMA resources of rx rings
178 	 */
179 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
180 		rx_ring = &ixgbe->rx_rings[i];
181 		rx_data = rx_ring->rx_data;
182 
183 		ixgbe_free_rbd_ring(rx_data);
184 		ixgbe_free_rcb_lists(rx_data);
185 	}
186 
187 	/*
188 	 * Free DMA resources of tx rings
189 	 */
190 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
191 		tx_ring = &ixgbe->tx_rings[i];
192 		ixgbe_free_tbd_ring(tx_ring);
193 		ixgbe_free_tcb_lists(tx_ring);
194 	}
195 }
196 
197 int
198 ixgbe_alloc_rx_ring_data(ixgbe_rx_ring_t *rx_ring)
199 {
200 	ixgbe_rx_data_t	*rx_data;
201 	ixgbe_t *ixgbe = rx_ring->ixgbe;
202 	uint32_t rcb_count;
203 
204 	/*
205 	 * Allocate memory for software receive rings
206 	 */
207 	rx_data = kmem_zalloc(sizeof (ixgbe_rx_data_t), KM_NOSLEEP);
208 
209 	if (rx_data == NULL) {
210 		ixgbe_error(ixgbe, "Allocate software receive rings failed");
211 		return (IXGBE_FAILURE);
212 	}
213 
214 	rx_data->rx_ring = rx_ring;
215 	mutex_init(&rx_data->recycle_lock, NULL,
216 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
217 
218 	rx_data->ring_size = ixgbe->rx_ring_size;
219 	rx_data->free_list_size = ixgbe->rx_ring_size;
220 
221 	rx_data->rcb_head = 0;
222 	rx_data->rcb_tail = 0;
223 	rx_data->rcb_free = rx_data->free_list_size;
224 
225 	/*
226 	 * Allocate memory for the work list.
227 	 */
228 	rx_data->work_list = kmem_zalloc(sizeof (rx_control_block_t *) *
229 	    rx_data->ring_size, KM_NOSLEEP);
230 
231 	if (rx_data->work_list == NULL) {
232 		ixgbe_error(ixgbe,
233 		    "Could not allocate memory for rx work list");
234 		goto alloc_rx_data_failure;
235 	}
236 
237 	/*
238 	 * Allocate memory for the free list.
239 	 */
240 	rx_data->free_list = kmem_zalloc(sizeof (rx_control_block_t *) *
241 	    rx_data->free_list_size, KM_NOSLEEP);
242 
243 	if (rx_data->free_list == NULL) {
244 		ixgbe_error(ixgbe,
245 		    "Cound not allocate memory for rx free list");
246 		goto alloc_rx_data_failure;
247 	}
248 
249 	/*
250 	 * Allocate memory for the rx control blocks for work list and
251 	 * free list.
252 	 */
253 	rcb_count = rx_data->ring_size + rx_data->free_list_size;
254 	rx_data->rcb_area =
255 	    kmem_zalloc(sizeof (rx_control_block_t) * rcb_count,
256 	    KM_NOSLEEP);
257 
258 	if (rx_data->rcb_area == NULL) {
259 		ixgbe_error(ixgbe,
260 		    "Cound not allocate memory for rx control blocks");
261 		goto alloc_rx_data_failure;
262 	}
263 
264 	rx_ring->rx_data = rx_data;
265 	return (IXGBE_SUCCESS);
266 
267 alloc_rx_data_failure:
268 	ixgbe_free_rx_ring_data(rx_data);
269 	return (IXGBE_FAILURE);
270 }
271 
272 void
273 ixgbe_free_rx_ring_data(ixgbe_rx_data_t *rx_data)
274 {
275 	uint32_t rcb_count;
276 
277 	if (rx_data == NULL)
278 		return;
279 
280 	ASSERT(rx_data->rcb_pending == 0);
281 
282 	rcb_count = rx_data->ring_size + rx_data->free_list_size;
283 	if (rx_data->rcb_area != NULL) {
284 		kmem_free(rx_data->rcb_area,
285 		    sizeof (rx_control_block_t) * rcb_count);
286 		rx_data->rcb_area = NULL;
287 	}
288 
289 	if (rx_data->work_list != NULL) {
290 		kmem_free(rx_data->work_list,
291 		    sizeof (rx_control_block_t *) * rx_data->ring_size);
292 		rx_data->work_list = NULL;
293 	}
294 
295 	if (rx_data->free_list != NULL) {
296 		kmem_free(rx_data->free_list,
297 		    sizeof (rx_control_block_t *) * rx_data->free_list_size);
298 		rx_data->free_list = NULL;
299 	}
300 
301 	mutex_destroy(&rx_data->recycle_lock);
302 	kmem_free(rx_data, sizeof (ixgbe_rx_data_t));
303 }
304 
305 /*
306  * ixgbe_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring.
307  */
308 static int
309 ixgbe_alloc_tbd_ring(ixgbe_tx_ring_t *tx_ring)
310 {
311 	int ret;
312 	size_t size;
313 	size_t len;
314 	uint_t cookie_num;
315 	dev_info_t *devinfo;
316 	ddi_dma_cookie_t cookie;
317 	ixgbe_t *ixgbe = tx_ring->ixgbe;
318 
319 	devinfo = ixgbe->dip;
320 	size = sizeof (union ixgbe_adv_tx_desc) * tx_ring->ring_size;
321 
322 	/*
323 	 * If tx head write-back is enabled, an extra tbd is allocated
324 	 * to save the head write-back value
325 	 */
326 	if (ixgbe->tx_head_wb_enable) {
327 		size += sizeof (union ixgbe_adv_tx_desc);
328 	}
329 
330 	/*
331 	 * Allocate a DMA handle for the transmit descriptor
332 	 * memory area.
333 	 */
334 	ret = ddi_dma_alloc_handle(devinfo, &ixgbe_desc_dma_attr,
335 	    DDI_DMA_DONTWAIT, NULL,
336 	    &tx_ring->tbd_area.dma_handle);
337 
338 	if (ret != DDI_SUCCESS) {
339 		ixgbe_error(ixgbe,
340 		    "Could not allocate tbd dma handle: %x", ret);
341 		tx_ring->tbd_area.dma_handle = NULL;
342 
343 		return (IXGBE_FAILURE);
344 	}
345 
346 	/*
347 	 * Allocate memory to DMA data to and from the transmit
348 	 * descriptors.
349 	 */
350 	ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle,
351 	    size, &ixgbe_desc_acc_attr, DDI_DMA_CONSISTENT,
352 	    DDI_DMA_DONTWAIT, NULL,
353 	    (caddr_t *)&tx_ring->tbd_area.address,
354 	    &len, &tx_ring->tbd_area.acc_handle);
355 
356 	if (ret != DDI_SUCCESS) {
357 		ixgbe_error(ixgbe,
358 		    "Could not allocate tbd dma memory: %x", ret);
359 		tx_ring->tbd_area.acc_handle = NULL;
360 		tx_ring->tbd_area.address = NULL;
361 		if (tx_ring->tbd_area.dma_handle != NULL) {
362 			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
363 			tx_ring->tbd_area.dma_handle = NULL;
364 		}
365 		return (IXGBE_FAILURE);
366 	}
367 
368 	/*
369 	 * Initialize the entire transmit buffer descriptor area to zero
370 	 */
371 	bzero(tx_ring->tbd_area.address, len);
372 
373 	/*
374 	 * Allocates DMA resources for the memory that was allocated by
375 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
376 	 * the memory address
377 	 */
378 	ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle,
379 	    NULL, (caddr_t)tx_ring->tbd_area.address,
380 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
381 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
382 
383 	if (ret != DDI_DMA_MAPPED) {
384 		ixgbe_error(ixgbe,
385 		    "Could not bind tbd dma resource: %x", ret);
386 		tx_ring->tbd_area.dma_address = NULL;
387 		if (tx_ring->tbd_area.acc_handle != NULL) {
388 			ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
389 			tx_ring->tbd_area.acc_handle = NULL;
390 			tx_ring->tbd_area.address = NULL;
391 		}
392 		if (tx_ring->tbd_area.dma_handle != NULL) {
393 			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
394 			tx_ring->tbd_area.dma_handle = NULL;
395 		}
396 		return (IXGBE_FAILURE);
397 	}
398 
399 	ASSERT(cookie_num == 1);
400 
401 	tx_ring->tbd_area.dma_address = cookie.dmac_laddress;
402 	tx_ring->tbd_area.size = len;
403 
404 	tx_ring->tbd_ring = (union ixgbe_adv_tx_desc *)(uintptr_t)
405 	    tx_ring->tbd_area.address;
406 
407 	return (IXGBE_SUCCESS);
408 }
409 
410 /*
411  * ixgbe_free_tbd_ring - Free the tx descriptors of one ring.
412  */
413 static void
414 ixgbe_free_tbd_ring(ixgbe_tx_ring_t *tx_ring)
415 {
416 	if (tx_ring->tbd_area.dma_handle != NULL) {
417 		(void) ddi_dma_unbind_handle(tx_ring->tbd_area.dma_handle);
418 	}
419 	if (tx_ring->tbd_area.acc_handle != NULL) {
420 		ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
421 		tx_ring->tbd_area.acc_handle = NULL;
422 	}
423 	if (tx_ring->tbd_area.dma_handle != NULL) {
424 		ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
425 		tx_ring->tbd_area.dma_handle = NULL;
426 	}
427 	tx_ring->tbd_area.address = NULL;
428 	tx_ring->tbd_area.dma_address = NULL;
429 	tx_ring->tbd_area.size = 0;
430 
431 	tx_ring->tbd_ring = NULL;
432 }
433 
434 /*
435  * ixgbe_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring.
436  */
437 static int
438 ixgbe_alloc_rbd_ring(ixgbe_rx_data_t *rx_data)
439 {
440 	int ret;
441 	size_t size;
442 	size_t len;
443 	uint_t cookie_num;
444 	dev_info_t *devinfo;
445 	ddi_dma_cookie_t cookie;
446 	ixgbe_t *ixgbe = rx_data->rx_ring->ixgbe;
447 
448 	devinfo = ixgbe->dip;
449 	size = sizeof (union ixgbe_adv_rx_desc) * rx_data->ring_size;
450 
451 	/*
452 	 * Allocate a new DMA handle for the receive descriptor
453 	 * memory area.
454 	 */
455 	ret = ddi_dma_alloc_handle(devinfo, &ixgbe_desc_dma_attr,
456 	    DDI_DMA_DONTWAIT, NULL,
457 	    &rx_data->rbd_area.dma_handle);
458 
459 	if (ret != DDI_SUCCESS) {
460 		ixgbe_error(ixgbe,
461 		    "Could not allocate rbd dma handle: %x", ret);
462 		rx_data->rbd_area.dma_handle = NULL;
463 		return (IXGBE_FAILURE);
464 	}
465 
466 	/*
467 	 * Allocate memory to DMA data to and from the receive
468 	 * descriptors.
469 	 */
470 	ret = ddi_dma_mem_alloc(rx_data->rbd_area.dma_handle,
471 	    size, &ixgbe_desc_acc_attr, DDI_DMA_CONSISTENT,
472 	    DDI_DMA_DONTWAIT, NULL,
473 	    (caddr_t *)&rx_data->rbd_area.address,
474 	    &len, &rx_data->rbd_area.acc_handle);
475 
476 	if (ret != DDI_SUCCESS) {
477 		ixgbe_error(ixgbe,
478 		    "Could not allocate rbd dma memory: %x", ret);
479 		rx_data->rbd_area.acc_handle = NULL;
480 		rx_data->rbd_area.address = NULL;
481 		if (rx_data->rbd_area.dma_handle != NULL) {
482 			ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
483 			rx_data->rbd_area.dma_handle = NULL;
484 		}
485 		return (IXGBE_FAILURE);
486 	}
487 
488 	/*
489 	 * Initialize the entire transmit buffer descriptor area to zero
490 	 */
491 	bzero(rx_data->rbd_area.address, len);
492 
493 	/*
494 	 * Allocates DMA resources for the memory that was allocated by
495 	 * the ddi_dma_mem_alloc call.
496 	 */
497 	ret = ddi_dma_addr_bind_handle(rx_data->rbd_area.dma_handle,
498 	    NULL, (caddr_t)rx_data->rbd_area.address,
499 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
500 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
501 
502 	if (ret != DDI_DMA_MAPPED) {
503 		ixgbe_error(ixgbe,
504 		    "Could not bind rbd dma resource: %x", ret);
505 		rx_data->rbd_area.dma_address = NULL;
506 		if (rx_data->rbd_area.acc_handle != NULL) {
507 			ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
508 			rx_data->rbd_area.acc_handle = NULL;
509 			rx_data->rbd_area.address = NULL;
510 		}
511 		if (rx_data->rbd_area.dma_handle != NULL) {
512 			ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
513 			rx_data->rbd_area.dma_handle = NULL;
514 		}
515 		return (IXGBE_FAILURE);
516 	}
517 
518 	ASSERT(cookie_num == 1);
519 
520 	rx_data->rbd_area.dma_address = cookie.dmac_laddress;
521 	rx_data->rbd_area.size = len;
522 
523 	rx_data->rbd_ring = (union ixgbe_adv_rx_desc *)(uintptr_t)
524 	    rx_data->rbd_area.address;
525 
526 	return (IXGBE_SUCCESS);
527 }
528 
529 /*
530  * ixgbe_free_rbd_ring - Free the rx descriptors of one ring.
531  */
532 static void
533 ixgbe_free_rbd_ring(ixgbe_rx_data_t *rx_data)
534 {
535 	if (rx_data->rbd_area.dma_handle != NULL) {
536 		(void) ddi_dma_unbind_handle(rx_data->rbd_area.dma_handle);
537 	}
538 	if (rx_data->rbd_area.acc_handle != NULL) {
539 		ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
540 		rx_data->rbd_area.acc_handle = NULL;
541 	}
542 	if (rx_data->rbd_area.dma_handle != NULL) {
543 		ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
544 		rx_data->rbd_area.dma_handle = NULL;
545 	}
546 	rx_data->rbd_area.address = NULL;
547 	rx_data->rbd_area.dma_address = NULL;
548 	rx_data->rbd_area.size = 0;
549 
550 	rx_data->rbd_ring = NULL;
551 }
552 
553 /*
554  * ixgbe_alloc_dma_buffer - Allocate DMA resources for a DMA buffer.
555  */
556 static int
557 ixgbe_alloc_dma_buffer(ixgbe_t *ixgbe, dma_buffer_t *buf, size_t size)
558 {
559 	int ret;
560 	dev_info_t *devinfo = ixgbe->dip;
561 	ddi_dma_cookie_t cookie;
562 	size_t len;
563 	uint_t cookie_num;
564 
565 	ret = ddi_dma_alloc_handle(devinfo,
566 	    &ixgbe_buf_dma_attr, DDI_DMA_DONTWAIT,
567 	    NULL, &buf->dma_handle);
568 
569 	if (ret != DDI_SUCCESS) {
570 		buf->dma_handle = NULL;
571 		ixgbe_error(ixgbe,
572 		    "Could not allocate dma buffer handle: %x", ret);
573 		return (IXGBE_FAILURE);
574 	}
575 
576 	ret = ddi_dma_mem_alloc(buf->dma_handle,
577 	    size, &ixgbe_buf_acc_attr, DDI_DMA_STREAMING,
578 	    DDI_DMA_DONTWAIT, NULL, &buf->address,
579 	    &len, &buf->acc_handle);
580 
581 	if (ret != DDI_SUCCESS) {
582 		buf->acc_handle = NULL;
583 		buf->address = NULL;
584 		if (buf->dma_handle != NULL) {
585 			ddi_dma_free_handle(&buf->dma_handle);
586 			buf->dma_handle = NULL;
587 		}
588 		ixgbe_error(ixgbe,
589 		    "Could not allocate dma buffer memory: %x", ret);
590 		return (IXGBE_FAILURE);
591 	}
592 
593 	ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL,
594 	    buf->address,
595 	    len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
596 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
597 
598 	if (ret != DDI_DMA_MAPPED) {
599 		buf->dma_address = NULL;
600 		if (buf->acc_handle != NULL) {
601 			ddi_dma_mem_free(&buf->acc_handle);
602 			buf->acc_handle = NULL;
603 			buf->address = NULL;
604 		}
605 		if (buf->dma_handle != NULL) {
606 			ddi_dma_free_handle(&buf->dma_handle);
607 			buf->dma_handle = NULL;
608 		}
609 		ixgbe_error(ixgbe,
610 		    "Could not bind dma buffer handle: %x", ret);
611 		return (IXGBE_FAILURE);
612 	}
613 
614 	ASSERT(cookie_num == 1);
615 
616 	buf->dma_address = cookie.dmac_laddress;
617 	buf->size = len;
618 	buf->len = 0;
619 
620 	return (IXGBE_SUCCESS);
621 }
622 
623 /*
624  * ixgbe_free_dma_buffer - Free one allocated area of dma memory and handle.
625  */
626 void
627 ixgbe_free_dma_buffer(dma_buffer_t *buf)
628 {
629 	if (buf->dma_handle != NULL) {
630 		(void) ddi_dma_unbind_handle(buf->dma_handle);
631 		buf->dma_address = NULL;
632 	} else {
633 		return;
634 	}
635 
636 	if (buf->acc_handle != NULL) {
637 		ddi_dma_mem_free(&buf->acc_handle);
638 		buf->acc_handle = NULL;
639 		buf->address = NULL;
640 	}
641 
642 	if (buf->dma_handle != NULL) {
643 		ddi_dma_free_handle(&buf->dma_handle);
644 		buf->dma_handle = NULL;
645 	}
646 
647 	buf->size = 0;
648 	buf->len = 0;
649 }
650 
651 /*
652  * ixgbe_alloc_tcb_lists - Memory allocation for the transmit control bolcks
653  * of one ring.
654  */
655 static int
656 ixgbe_alloc_tcb_lists(ixgbe_tx_ring_t *tx_ring)
657 {
658 	int i;
659 	int ret;
660 	tx_control_block_t *tcb;
661 	dma_buffer_t *tx_buf;
662 	ixgbe_t *ixgbe = tx_ring->ixgbe;
663 	dev_info_t *devinfo = ixgbe->dip;
664 
665 	/*
666 	 * Allocate memory for the work list.
667 	 */
668 	tx_ring->work_list = kmem_zalloc(sizeof (tx_control_block_t *) *
669 	    tx_ring->ring_size, KM_NOSLEEP);
670 
671 	if (tx_ring->work_list == NULL) {
672 		ixgbe_error(ixgbe,
673 		    "Cound not allocate memory for tx work list");
674 		return (IXGBE_FAILURE);
675 	}
676 
677 	/*
678 	 * Allocate memory for the free list.
679 	 */
680 	tx_ring->free_list = kmem_zalloc(sizeof (tx_control_block_t *) *
681 	    tx_ring->free_list_size, KM_NOSLEEP);
682 
683 	if (tx_ring->free_list == NULL) {
684 		kmem_free(tx_ring->work_list,
685 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
686 		tx_ring->work_list = NULL;
687 
688 		ixgbe_error(ixgbe,
689 		    "Cound not allocate memory for tx free list");
690 		return (IXGBE_FAILURE);
691 	}
692 
693 	/*
694 	 * Allocate memory for the tx control blocks of free list.
695 	 */
696 	tx_ring->tcb_area =
697 	    kmem_zalloc(sizeof (tx_control_block_t) *
698 	    tx_ring->free_list_size, KM_NOSLEEP);
699 
700 	if (tx_ring->tcb_area == NULL) {
701 		kmem_free(tx_ring->work_list,
702 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
703 		tx_ring->work_list = NULL;
704 
705 		kmem_free(tx_ring->free_list,
706 		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
707 		tx_ring->free_list = NULL;
708 
709 		ixgbe_error(ixgbe,
710 		    "Cound not allocate memory for tx control blocks");
711 		return (IXGBE_FAILURE);
712 	}
713 
714 	/*
715 	 * Allocate dma memory for the tx control block of free list.
716 	 */
717 	tcb = tx_ring->tcb_area;
718 	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
719 		ASSERT(tcb != NULL);
720 
721 		tx_ring->free_list[i] = tcb;
722 
723 		/*
724 		 * Pre-allocate dma handles for transmit. These dma handles
725 		 * will be dynamically bound to the data buffers passed down
726 		 * from the upper layers at the time of transmitting.
727 		 */
728 		ret = ddi_dma_alloc_handle(devinfo,
729 		    &ixgbe_tx_dma_attr,
730 		    DDI_DMA_DONTWAIT, NULL,
731 		    &tcb->tx_dma_handle);
732 		if (ret != DDI_SUCCESS) {
733 			tcb->tx_dma_handle = NULL;
734 			ixgbe_error(ixgbe,
735 			    "Could not allocate tx dma handle: %x", ret);
736 			goto alloc_tcb_lists_fail;
737 		}
738 
739 		/*
740 		 * Pre-allocate transmit buffers for packets that the
741 		 * size is less than bcopy_thresh.
742 		 */
743 		tx_buf = &tcb->tx_buf;
744 
745 		ret = ixgbe_alloc_dma_buffer(ixgbe,
746 		    tx_buf, ixgbe->tx_buf_size);
747 
748 		if (ret != IXGBE_SUCCESS) {
749 			ASSERT(tcb->tx_dma_handle != NULL);
750 			ddi_dma_free_handle(&tcb->tx_dma_handle);
751 			tcb->tx_dma_handle = NULL;
752 			ixgbe_error(ixgbe, "Allocate tx dma buffer failed");
753 			goto alloc_tcb_lists_fail;
754 		}
755 
756 		tcb->last_index = MAX_TX_RING_SIZE;
757 	}
758 
759 	return (IXGBE_SUCCESS);
760 
761 alloc_tcb_lists_fail:
762 	ixgbe_free_tcb_lists(tx_ring);
763 
764 	return (IXGBE_FAILURE);
765 }
766 
767 /*
768  * ixgbe_free_tcb_lists - Release the memory allocated for
769  * the transmit control bolcks of one ring.
770  */
771 static void
772 ixgbe_free_tcb_lists(ixgbe_tx_ring_t *tx_ring)
773 {
774 	int i;
775 	tx_control_block_t *tcb;
776 
777 	tcb = tx_ring->tcb_area;
778 	if (tcb == NULL)
779 		return;
780 
781 	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
782 		ASSERT(tcb != NULL);
783 
784 		/* Free the tx dma handle for dynamical binding */
785 		if (tcb->tx_dma_handle != NULL) {
786 			ddi_dma_free_handle(&tcb->tx_dma_handle);
787 			tcb->tx_dma_handle = NULL;
788 		} else {
789 			/*
790 			 * If the dma handle is NULL, then we don't
791 			 * have to check the remaining.
792 			 */
793 			break;
794 		}
795 
796 		ixgbe_free_dma_buffer(&tcb->tx_buf);
797 	}
798 
799 	if (tx_ring->tcb_area != NULL) {
800 		kmem_free(tx_ring->tcb_area,
801 		    sizeof (tx_control_block_t) * tx_ring->free_list_size);
802 		tx_ring->tcb_area = NULL;
803 	}
804 
805 	if (tx_ring->work_list != NULL) {
806 		kmem_free(tx_ring->work_list,
807 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
808 		tx_ring->work_list = NULL;
809 	}
810 
811 	if (tx_ring->free_list != NULL) {
812 		kmem_free(tx_ring->free_list,
813 		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
814 		tx_ring->free_list = NULL;
815 	}
816 }
817 
818 /*
819  * ixgbe_alloc_rcb_lists - Memory allocation for the receive control blocks
820  * of one ring.
821  */
822 static int
823 ixgbe_alloc_rcb_lists(ixgbe_rx_data_t *rx_data)
824 {
825 	int i;
826 	int ret;
827 	rx_control_block_t *rcb;
828 	ixgbe_t *ixgbe = rx_data->rx_ring->ixgbe;
829 	dma_buffer_t *rx_buf;
830 	uint32_t rcb_count;
831 
832 	/*
833 	 * Allocate memory for the rx control blocks for work list and
834 	 * free list.
835 	 */
836 	rcb_count = rx_data->ring_size + rx_data->free_list_size;
837 	rcb = rx_data->rcb_area;
838 
839 	for (i = 0; i < rcb_count; i++, rcb++) {
840 		ASSERT(rcb != NULL);
841 
842 		if (i < rx_data->ring_size) {
843 			/* Attach the rx control block to the work list */
844 			rx_data->work_list[i] = rcb;
845 		} else {
846 			/* Attach the rx control block to the free list */
847 			rx_data->free_list[i - rx_data->ring_size] = rcb;
848 		}
849 
850 		rx_buf = &rcb->rx_buf;
851 		ret = ixgbe_alloc_dma_buffer(ixgbe,
852 		    rx_buf, ixgbe->rx_buf_size);
853 
854 		if (ret != IXGBE_SUCCESS) {
855 			ixgbe_error(ixgbe, "Allocate rx dma buffer failed");
856 			goto alloc_rcb_lists_fail;
857 		}
858 
859 		rx_buf->size -= IPHDR_ALIGN_ROOM;
860 		rx_buf->address += IPHDR_ALIGN_ROOM;
861 		rx_buf->dma_address += IPHDR_ALIGN_ROOM;
862 
863 		rcb->ref_cnt = 1;
864 		rcb->rx_data = (ixgbe_rx_data_t *)rx_data;
865 		rcb->free_rtn.free_func = ixgbe_rx_recycle;
866 		rcb->free_rtn.free_arg = (char *)rcb;
867 		rcb->lro_prev = -1;
868 		rcb->lro_next = -1;
869 		rcb->lro_pkt = B_FALSE;
870 		rcb->mp = desballoc((unsigned char *)
871 		    rx_buf->address,
872 		    rx_buf->size,
873 		    0, &rcb->free_rtn);
874 	}
875 
876 	return (IXGBE_SUCCESS);
877 
878 alloc_rcb_lists_fail:
879 	ixgbe_free_rcb_lists(rx_data);
880 
881 	return (IXGBE_FAILURE);
882 }
883 
884 /*
885  * ixgbe_free_rcb_lists - Free the receive control blocks of one ring.
886  */
887 static void
888 ixgbe_free_rcb_lists(ixgbe_rx_data_t *rx_data)
889 {
890 	ixgbe_t *ixgbe;
891 	rx_control_block_t *rcb;
892 	uint32_t rcb_count;
893 	uint32_t ref_cnt;
894 	int i;
895 
896 	ixgbe = rx_data->rx_ring->ixgbe;
897 
898 	mutex_enter(&ixgbe->rx_pending_lock);
899 
900 	rcb = rx_data->rcb_area;
901 	rcb_count = rx_data->ring_size + rx_data->free_list_size;
902 
903 	for (i = 0; i < rcb_count; i++, rcb++) {
904 		ASSERT(rcb != NULL);
905 
906 		ref_cnt = atomic_dec_32_nv(&rcb->ref_cnt);
907 		if (ref_cnt == 0) {
908 			if (rcb->mp != NULL) {
909 				freemsg(rcb->mp);
910 				rcb->mp = NULL;
911 			}
912 			ixgbe_free_dma_buffer(&rcb->rx_buf);
913 		} else {
914 			atomic_inc_32(&rx_data->rcb_pending);
915 			atomic_inc_32(&ixgbe->rcb_pending);
916 		}
917 	}
918 
919 	mutex_exit(&ixgbe->rx_pending_lock);
920 }
921 
922 /*
923  * ixgbe_set_fma_flags - Set the attribute for fma support.
924  */
925 void
926 ixgbe_set_fma_flags(int dma_flag)
927 {
928 	if (dma_flag) {
929 		ixgbe_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
930 		ixgbe_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
931 		ixgbe_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
932 	} else {
933 		ixgbe_tx_dma_attr.dma_attr_flags = 0;
934 		ixgbe_buf_dma_attr.dma_attr_flags = 0;
935 		ixgbe_desc_dma_attr.dma_attr_flags = 0;
936 	}
937 }
938