xref: /illumos-gate/usr/src/uts/common/io/igb/igb_buf.c (revision 269e59f9a28bf47e0f463e64fc5af4a408b73b21)
1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2009 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at:
10  *	http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When using or redistributing this file, you may do so under the
15  * License only. No other modification of this header is permitted.
16  *
17  * If applicable, add the following below this CDDL HEADER, with the
18  * fields enclosed by brackets "[]" replaced with your own identifying
19  * information: Portions Copyright [yyyy] [name of copyright owner]
20  *
21  * CDDL HEADER END
22  */
23 
24 /*
25  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #include "igb_sw.h"
30 
31 static int igb_alloc_tbd_ring(igb_tx_ring_t *);
32 static void igb_free_tbd_ring(igb_tx_ring_t *);
33 static int igb_alloc_rbd_ring(igb_rx_data_t *);
34 static void igb_free_rbd_ring(igb_rx_data_t *);
35 static int igb_alloc_dma_buffer(igb_t *, dma_buffer_t *, size_t);
36 static int igb_alloc_tcb_lists(igb_tx_ring_t *);
37 static void igb_free_tcb_lists(igb_tx_ring_t *);
38 static int igb_alloc_rcb_lists(igb_rx_data_t *);
39 static void igb_free_rcb_lists(igb_rx_data_t *);
40 
41 #ifdef __sparc
42 #define	IGB_DMA_ALIGNMENT	0x0000000000002000ull
43 #else
44 #define	IGB_DMA_ALIGNMENT	0x0000000000001000ull
45 #endif
46 
47 /*
48  * DMA attributes for tx/rx descriptors
49  */
50 static ddi_dma_attr_t igb_desc_dma_attr = {
51 	DMA_ATTR_V0,			/* version number */
52 	0x0000000000000000ull,		/* low address */
53 	0xFFFFFFFFFFFFFFFFull,		/* high address */
54 	0x00000000FFFFFFFFull,		/* dma counter max */
55 	IGB_DMA_ALIGNMENT,		/* alignment */
56 	0x00000FFF,			/* burst sizes */
57 	0x00000001,			/* minimum transfer size */
58 	0x00000000FFFFFFFFull,		/* maximum transfer size */
59 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size */
60 	1,				/* scatter/gather list length */
61 	0x00000001,			/* granularity */
62 	DDI_DMA_FLAGERR,		/* DMA flags */
63 };
64 
65 /*
66  * DMA attributes for tx/rx buffers
67  */
68 static ddi_dma_attr_t igb_buf_dma_attr = {
69 	DMA_ATTR_V0,			/* version number */
70 	0x0000000000000000ull,		/* low address */
71 	0xFFFFFFFFFFFFFFFFull,		/* high address */
72 	0x00000000FFFFFFFFull,		/* dma counter max */
73 	IGB_DMA_ALIGNMENT,		/* alignment */
74 	0x00000FFF,			/* burst sizes */
75 	0x00000001,			/* minimum transfer size */
76 	0x00000000FFFFFFFFull,		/* maximum transfer size */
77 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size	 */
78 	1,				/* scatter/gather list length */
79 	0x00000001,			/* granularity */
80 	DDI_DMA_FLAGERR,		/* DMA flags */
81 };
82 
83 /*
84  * DMA attributes for transmit
85  */
86 static ddi_dma_attr_t igb_tx_dma_attr = {
87 	DMA_ATTR_V0,			/* version number */
88 	0x0000000000000000ull,		/* low address */
89 	0xFFFFFFFFFFFFFFFFull,		/* high address */
90 	0x00000000FFFFFFFFull,		/* dma counter max */
91 	1,				/* alignment */
92 	0x00000FFF,			/* burst sizes */
93 	0x00000001,			/* minimum transfer size */
94 	0x00000000FFFFFFFFull,		/* maximum transfer size */
95 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size	 */
96 	MAX_COOKIE,			/* scatter/gather list length */
97 	0x00000001,			/* granularity */
98 	DDI_DMA_FLAGERR,		/* DMA flags */
99 };
100 
101 /*
102  * DMA access attributes for descriptors.
103  */
104 static ddi_device_acc_attr_t igb_desc_acc_attr = {
105 	DDI_DEVICE_ATTR_V0,
106 	DDI_STRUCTURE_LE_ACC,
107 	DDI_STRICTORDER_ACC
108 };
109 
110 /*
111  * DMA access attributes for buffers.
112  */
113 static ddi_device_acc_attr_t igb_buf_acc_attr = {
114 	DDI_DEVICE_ATTR_V0,
115 	DDI_NEVERSWAP_ACC,
116 	DDI_STRICTORDER_ACC
117 };
118 
119 
120 /*
121  * igb_alloc_dma - Allocate DMA resources for all rx/tx rings
122  */
123 int
124 igb_alloc_dma(igb_t *igb)
125 {
126 	igb_rx_ring_t *rx_ring;
127 	igb_rx_data_t *rx_data;
128 	igb_tx_ring_t *tx_ring;
129 	int i;
130 
131 	for (i = 0; i < igb->num_rx_rings; i++) {
132 		/*
133 		 * Allocate receive desciptor ring and control block lists
134 		 */
135 		rx_ring = &igb->rx_rings[i];
136 		rx_data = rx_ring->rx_data;
137 
138 		if (igb_alloc_rbd_ring(rx_data) != IGB_SUCCESS)
139 			goto alloc_dma_failure;
140 
141 		if (igb_alloc_rcb_lists(rx_data) != IGB_SUCCESS)
142 			goto alloc_dma_failure;
143 	}
144 
145 	for (i = 0; i < igb->num_tx_rings; i++) {
146 		/*
147 		 * Allocate transmit desciptor ring and control block lists
148 		 */
149 		tx_ring = &igb->tx_rings[i];
150 
151 		if (igb_alloc_tbd_ring(tx_ring) != IGB_SUCCESS)
152 			goto alloc_dma_failure;
153 
154 		if (igb_alloc_tcb_lists(tx_ring) != IGB_SUCCESS)
155 			goto alloc_dma_failure;
156 	}
157 
158 	return (IGB_SUCCESS);
159 
160 alloc_dma_failure:
161 	igb_free_dma(igb);
162 
163 	return (IGB_FAILURE);
164 }
165 
166 
167 /*
168  * igb_free_dma - Free all the DMA resources of all rx/tx rings
169  */
170 void
171 igb_free_dma(igb_t *igb)
172 {
173 	igb_rx_ring_t *rx_ring;
174 	igb_rx_data_t *rx_data;
175 	igb_tx_ring_t *tx_ring;
176 	int i;
177 
178 	/*
179 	 * Free DMA resources of rx rings
180 	 */
181 	for (i = 0; i < igb->num_rx_rings; i++) {
182 		rx_ring = &igb->rx_rings[i];
183 		rx_data = rx_ring->rx_data;
184 
185 		igb_free_rbd_ring(rx_data);
186 		igb_free_rcb_lists(rx_data);
187 	}
188 
189 	/*
190 	 * Free DMA resources of tx rings
191 	 */
192 	for (i = 0; i < igb->num_tx_rings; i++) {
193 		tx_ring = &igb->tx_rings[i];
194 		igb_free_tbd_ring(tx_ring);
195 		igb_free_tcb_lists(tx_ring);
196 	}
197 }
198 
199 /*
200  * igb_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring.
201  */
202 static int
203 igb_alloc_tbd_ring(igb_tx_ring_t *tx_ring)
204 {
205 	int ret;
206 	size_t size;
207 	size_t len;
208 	uint_t cookie_num;
209 	dev_info_t *devinfo;
210 	ddi_dma_cookie_t cookie;
211 	igb_t *igb = tx_ring->igb;
212 
213 	devinfo = igb->dip;
214 	size = sizeof (union e1000_adv_tx_desc) * tx_ring->ring_size;
215 
216 	/*
217 	 * If tx head write-back is enabled, an extra tbd is allocated
218 	 * to save the head write-back value
219 	 */
220 	if (igb->tx_head_wb_enable) {
221 		size += sizeof (union e1000_adv_tx_desc);
222 	}
223 
224 	/*
225 	 * Allocate a DMA handle for the transmit descriptor
226 	 * memory area.
227 	 */
228 	ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
229 	    DDI_DMA_DONTWAIT, NULL,
230 	    &tx_ring->tbd_area.dma_handle);
231 
232 	if (ret != DDI_SUCCESS) {
233 		igb_error(igb,
234 		    "Could not allocate tbd dma handle: %x", ret);
235 		tx_ring->tbd_area.dma_handle = NULL;
236 
237 		return (IGB_FAILURE);
238 	}
239 
240 	/*
241 	 * Allocate memory to DMA data to and from the transmit
242 	 * descriptors.
243 	 */
244 	ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle,
245 	    size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
246 	    DDI_DMA_DONTWAIT, NULL,
247 	    (caddr_t *)&tx_ring->tbd_area.address,
248 	    &len, &tx_ring->tbd_area.acc_handle);
249 
250 	if (ret != DDI_SUCCESS) {
251 		igb_error(igb,
252 		    "Could not allocate tbd dma memory: %x", ret);
253 		tx_ring->tbd_area.acc_handle = NULL;
254 		tx_ring->tbd_area.address = NULL;
255 		if (tx_ring->tbd_area.dma_handle != NULL) {
256 			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
257 			tx_ring->tbd_area.dma_handle = NULL;
258 		}
259 		return (IGB_FAILURE);
260 	}
261 
262 	/*
263 	 * Initialize the entire transmit buffer descriptor area to zero
264 	 */
265 	bzero(tx_ring->tbd_area.address, len);
266 
267 	/*
268 	 * Allocates DMA resources for the memory that was allocated by
269 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
270 	 * the memory address
271 	 */
272 	ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle,
273 	    NULL, (caddr_t)tx_ring->tbd_area.address,
274 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
275 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
276 
277 	if (ret != DDI_DMA_MAPPED) {
278 		igb_error(igb,
279 		    "Could not bind tbd dma resource: %x", ret);
280 		tx_ring->tbd_area.dma_address = NULL;
281 		if (tx_ring->tbd_area.acc_handle != NULL) {
282 			ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
283 			tx_ring->tbd_area.acc_handle = NULL;
284 			tx_ring->tbd_area.address = NULL;
285 		}
286 		if (tx_ring->tbd_area.dma_handle != NULL) {
287 			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
288 			tx_ring->tbd_area.dma_handle = NULL;
289 		}
290 		return (IGB_FAILURE);
291 	}
292 
293 	ASSERT(cookie_num == 1);
294 
295 	tx_ring->tbd_area.dma_address = cookie.dmac_laddress;
296 	tx_ring->tbd_area.size = len;
297 
298 	tx_ring->tbd_ring = (union e1000_adv_tx_desc *)(uintptr_t)
299 	    tx_ring->tbd_area.address;
300 
301 	return (IGB_SUCCESS);
302 }
303 
304 /*
305  * igb_free_tbd_ring - Free the tx descriptors of one ring.
306  */
307 static void
308 igb_free_tbd_ring(igb_tx_ring_t *tx_ring)
309 {
310 	if (tx_ring->tbd_area.dma_handle != NULL) {
311 		(void) ddi_dma_unbind_handle(tx_ring->tbd_area.dma_handle);
312 	}
313 	if (tx_ring->tbd_area.acc_handle != NULL) {
314 		ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
315 		tx_ring->tbd_area.acc_handle = NULL;
316 	}
317 	if (tx_ring->tbd_area.dma_handle != NULL) {
318 		ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
319 		tx_ring->tbd_area.dma_handle = NULL;
320 	}
321 	tx_ring->tbd_area.address = NULL;
322 	tx_ring->tbd_area.dma_address = NULL;
323 	tx_ring->tbd_area.size = 0;
324 
325 	tx_ring->tbd_ring = NULL;
326 }
327 
328 int
329 igb_alloc_rx_ring_data(igb_rx_ring_t *rx_ring)
330 {
331 	igb_rx_data_t *rx_data;
332 	igb_t *igb = rx_ring->igb;
333 	uint32_t rcb_count;
334 
335 	/*
336 	 * Allocate memory for software receive rings
337 	 */
338 	rx_data = kmem_zalloc(sizeof (igb_rx_data_t), KM_NOSLEEP);
339 
340 	if (rx_data == NULL) {
341 		igb_error(igb, "Allocate software receive rings failed");
342 		return (IGB_FAILURE);
343 	}
344 
345 	rx_data->rx_ring = rx_ring;
346 	mutex_init(&rx_data->recycle_lock, NULL,
347 	    MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
348 
349 	rx_data->ring_size = igb->rx_ring_size;
350 	rx_data->free_list_size = igb->rx_ring_size;
351 
352 	rx_data->rcb_head = 0;
353 	rx_data->rcb_tail = 0;
354 	rx_data->rcb_free = rx_data->free_list_size;
355 
356 	/*
357 	 * Allocate memory for the work list.
358 	 */
359 	rx_data->work_list = kmem_zalloc(sizeof (rx_control_block_t *) *
360 	    rx_data->ring_size, KM_NOSLEEP);
361 
362 	if (rx_data->work_list == NULL) {
363 		igb_error(igb,
364 		    "Could not allocate memory for rx work list");
365 		goto alloc_rx_data_failure;
366 	}
367 
368 	/*
369 	 * Allocate memory for the free list.
370 	 */
371 	rx_data->free_list = kmem_zalloc(sizeof (rx_control_block_t *) *
372 	    rx_data->free_list_size, KM_NOSLEEP);
373 
374 	if (rx_data->free_list == NULL) {
375 		igb_error(igb,
376 		    "Cound not allocate memory for rx free list");
377 		goto alloc_rx_data_failure;
378 	}
379 
380 	/*
381 	 * Allocate memory for the rx control blocks for work list and
382 	 * free list.
383 	 */
384 	rcb_count = rx_data->ring_size + rx_data->free_list_size;
385 	rx_data->rcb_area =
386 	    kmem_zalloc(sizeof (rx_control_block_t) * rcb_count,
387 	    KM_NOSLEEP);
388 
389 	if (rx_data->rcb_area == NULL) {
390 		igb_error(igb,
391 		    "Cound not allocate memory for rx control blocks");
392 		goto alloc_rx_data_failure;
393 	}
394 
395 	rx_ring->rx_data = rx_data;
396 	return (IGB_SUCCESS);
397 
398 alloc_rx_data_failure:
399 	igb_free_rx_ring_data(rx_data);
400 	return (IGB_FAILURE);
401 }
402 
403 void
404 igb_free_rx_ring_data(igb_rx_data_t *rx_data)
405 {
406 	uint32_t rcb_count;
407 
408 	if (rx_data == NULL)
409 		return;
410 
411 	ASSERT(rx_data->rcb_pending == 0);
412 
413 	rcb_count = rx_data->ring_size + rx_data->free_list_size;
414 	if (rx_data->rcb_area != NULL) {
415 		kmem_free(rx_data->rcb_area,
416 		    sizeof (rx_control_block_t) * rcb_count);
417 		rx_data->rcb_area = NULL;
418 	}
419 
420 	if (rx_data->work_list != NULL) {
421 		kmem_free(rx_data->work_list,
422 		    sizeof (rx_control_block_t *) * rx_data->ring_size);
423 		rx_data->work_list = NULL;
424 	}
425 
426 	if (rx_data->free_list != NULL) {
427 		kmem_free(rx_data->free_list,
428 		    sizeof (rx_control_block_t *) * rx_data->free_list_size);
429 		rx_data->free_list = NULL;
430 	}
431 
432 	mutex_destroy(&rx_data->recycle_lock);
433 	kmem_free(rx_data, sizeof (igb_rx_data_t));
434 }
435 
436 /*
437  * igb_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring.
438  */
439 static int
440 igb_alloc_rbd_ring(igb_rx_data_t *rx_data)
441 {
442 	int ret;
443 	size_t size;
444 	size_t len;
445 	uint_t cookie_num;
446 	dev_info_t *devinfo;
447 	ddi_dma_cookie_t cookie;
448 	igb_t *igb = rx_data->rx_ring->igb;
449 
450 	devinfo = igb->dip;
451 	size = sizeof (union e1000_adv_rx_desc) * rx_data->ring_size;
452 
453 	/*
454 	 * Allocate a new DMA handle for the receive descriptor
455 	 * memory area.
456 	 */
457 	ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
458 	    DDI_DMA_DONTWAIT, NULL,
459 	    &rx_data->rbd_area.dma_handle);
460 
461 	if (ret != DDI_SUCCESS) {
462 		igb_error(igb,
463 		    "Could not allocate rbd dma handle: %x", ret);
464 		rx_data->rbd_area.dma_handle = NULL;
465 		return (IGB_FAILURE);
466 	}
467 
468 	/*
469 	 * Allocate memory to DMA data to and from the receive
470 	 * descriptors.
471 	 */
472 	ret = ddi_dma_mem_alloc(rx_data->rbd_area.dma_handle,
473 	    size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
474 	    DDI_DMA_DONTWAIT, NULL,
475 	    (caddr_t *)&rx_data->rbd_area.address,
476 	    &len, &rx_data->rbd_area.acc_handle);
477 
478 	if (ret != DDI_SUCCESS) {
479 		igb_error(igb,
480 		    "Could not allocate rbd dma memory: %x", ret);
481 		rx_data->rbd_area.acc_handle = NULL;
482 		rx_data->rbd_area.address = NULL;
483 		if (rx_data->rbd_area.dma_handle != NULL) {
484 			ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
485 			rx_data->rbd_area.dma_handle = NULL;
486 		}
487 		return (IGB_FAILURE);
488 	}
489 
490 	/*
491 	 * Initialize the entire transmit buffer descriptor area to zero
492 	 */
493 	bzero(rx_data->rbd_area.address, len);
494 
495 	/*
496 	 * Allocates DMA resources for the memory that was allocated by
497 	 * the ddi_dma_mem_alloc call.
498 	 */
499 	ret = ddi_dma_addr_bind_handle(rx_data->rbd_area.dma_handle,
500 	    NULL, (caddr_t)rx_data->rbd_area.address,
501 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
502 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
503 
504 	if (ret != DDI_DMA_MAPPED) {
505 		igb_error(igb,
506 		    "Could not bind rbd dma resource: %x", ret);
507 		rx_data->rbd_area.dma_address = NULL;
508 		if (rx_data->rbd_area.acc_handle != NULL) {
509 			ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
510 			rx_data->rbd_area.acc_handle = NULL;
511 			rx_data->rbd_area.address = NULL;
512 		}
513 		if (rx_data->rbd_area.dma_handle != NULL) {
514 			ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
515 			rx_data->rbd_area.dma_handle = NULL;
516 		}
517 		return (IGB_FAILURE);
518 	}
519 
520 	ASSERT(cookie_num == 1);
521 
522 	rx_data->rbd_area.dma_address = cookie.dmac_laddress;
523 	rx_data->rbd_area.size = len;
524 
525 	rx_data->rbd_ring = (union e1000_adv_rx_desc *)(uintptr_t)
526 	    rx_data->rbd_area.address;
527 
528 	return (IGB_SUCCESS);
529 }
530 
531 /*
532  * igb_free_rbd_ring - Free the rx descriptors of one ring.
533  */
534 static void
535 igb_free_rbd_ring(igb_rx_data_t *rx_data)
536 {
537 	if (rx_data->rbd_area.dma_handle != NULL) {
538 		(void) ddi_dma_unbind_handle(rx_data->rbd_area.dma_handle);
539 	}
540 	if (rx_data->rbd_area.acc_handle != NULL) {
541 		ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
542 		rx_data->rbd_area.acc_handle = NULL;
543 	}
544 	if (rx_data->rbd_area.dma_handle != NULL) {
545 		ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
546 		rx_data->rbd_area.dma_handle = NULL;
547 	}
548 	rx_data->rbd_area.address = NULL;
549 	rx_data->rbd_area.dma_address = NULL;
550 	rx_data->rbd_area.size = 0;
551 
552 	rx_data->rbd_ring = NULL;
553 }
554 
555 
556 /*
557  * igb_alloc_dma_buffer - Allocate DMA resources for a DMA buffer
558  */
559 static int
560 igb_alloc_dma_buffer(igb_t *igb,
561     dma_buffer_t *buf, size_t size)
562 {
563 	int ret;
564 	dev_info_t *devinfo = igb->dip;
565 	ddi_dma_cookie_t cookie;
566 	size_t len;
567 	uint_t cookie_num;
568 
569 	ret = ddi_dma_alloc_handle(devinfo,
570 	    &igb_buf_dma_attr, DDI_DMA_DONTWAIT,
571 	    NULL, &buf->dma_handle);
572 
573 	if (ret != DDI_SUCCESS) {
574 		buf->dma_handle = NULL;
575 		igb_error(igb,
576 		    "Could not allocate dma buffer handle: %x", ret);
577 		return (IGB_FAILURE);
578 	}
579 
580 	ret = ddi_dma_mem_alloc(buf->dma_handle,
581 	    size, &igb_buf_acc_attr, DDI_DMA_STREAMING,
582 	    DDI_DMA_DONTWAIT, NULL, &buf->address,
583 	    &len, &buf->acc_handle);
584 
585 	if (ret != DDI_SUCCESS) {
586 		buf->acc_handle = NULL;
587 		buf->address = NULL;
588 		if (buf->dma_handle != NULL) {
589 			ddi_dma_free_handle(&buf->dma_handle);
590 			buf->dma_handle = NULL;
591 		}
592 		igb_error(igb,
593 		    "Could not allocate dma buffer memory: %x", ret);
594 		return (IGB_FAILURE);
595 	}
596 
597 	ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL,
598 	    buf->address,
599 	    len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
600 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
601 
602 	if (ret != DDI_DMA_MAPPED) {
603 		buf->dma_address = NULL;
604 		if (buf->acc_handle != NULL) {
605 			ddi_dma_mem_free(&buf->acc_handle);
606 			buf->acc_handle = NULL;
607 			buf->address = NULL;
608 		}
609 		if (buf->dma_handle != NULL) {
610 			ddi_dma_free_handle(&buf->dma_handle);
611 			buf->dma_handle = NULL;
612 		}
613 		igb_error(igb,
614 		    "Could not bind dma buffer handle: %x", ret);
615 		return (IGB_FAILURE);
616 	}
617 
618 	ASSERT(cookie_num == 1);
619 
620 	buf->dma_address = cookie.dmac_laddress;
621 	buf->size = len;
622 	buf->len = 0;
623 
624 	return (IGB_SUCCESS);
625 }
626 
627 /*
628  * igb_free_dma_buffer - Free one allocated area of dma memory and handle
629  */
630 void
631 igb_free_dma_buffer(dma_buffer_t *buf)
632 {
633 	if (buf->dma_handle != NULL) {
634 		(void) ddi_dma_unbind_handle(buf->dma_handle);
635 		buf->dma_address = NULL;
636 	} else {
637 		return;
638 	}
639 
640 	if (buf->acc_handle != NULL) {
641 		ddi_dma_mem_free(&buf->acc_handle);
642 		buf->acc_handle = NULL;
643 		buf->address = NULL;
644 	}
645 
646 	if (buf->dma_handle != NULL) {
647 		ddi_dma_free_handle(&buf->dma_handle);
648 		buf->dma_handle = NULL;
649 	}
650 
651 	buf->size = 0;
652 	buf->len = 0;
653 }
654 
655 /*
656  * igb_alloc_tcb_lists - Memory allocation for the transmit control bolcks
657  * of one ring.
658  */
659 static int
660 igb_alloc_tcb_lists(igb_tx_ring_t *tx_ring)
661 {
662 	int i;
663 	int ret;
664 	tx_control_block_t *tcb;
665 	dma_buffer_t *tx_buf;
666 	igb_t *igb = tx_ring->igb;
667 	dev_info_t *devinfo = igb->dip;
668 
669 	/*
670 	 * Allocate memory for the work list.
671 	 */
672 	tx_ring->work_list = kmem_zalloc(sizeof (tx_control_block_t *) *
673 	    tx_ring->ring_size, KM_NOSLEEP);
674 
675 	if (tx_ring->work_list == NULL) {
676 		igb_error(igb,
677 		    "Cound not allocate memory for tx work list");
678 		return (IGB_FAILURE);
679 	}
680 
681 	/*
682 	 * Allocate memory for the free list.
683 	 */
684 	tx_ring->free_list = kmem_zalloc(sizeof (tx_control_block_t *) *
685 	    tx_ring->free_list_size, KM_NOSLEEP);
686 
687 	if (tx_ring->free_list == NULL) {
688 		kmem_free(tx_ring->work_list,
689 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
690 		tx_ring->work_list = NULL;
691 
692 		igb_error(igb,
693 		    "Cound not allocate memory for tx free list");
694 		return (IGB_FAILURE);
695 	}
696 
697 	/*
698 	 * Allocate memory for the tx control blocks of free list.
699 	 */
700 	tx_ring->tcb_area =
701 	    kmem_zalloc(sizeof (tx_control_block_t) *
702 	    tx_ring->free_list_size, KM_NOSLEEP);
703 
704 	if (tx_ring->tcb_area == NULL) {
705 		kmem_free(tx_ring->work_list,
706 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
707 		tx_ring->work_list = NULL;
708 
709 		kmem_free(tx_ring->free_list,
710 		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
711 		tx_ring->free_list = NULL;
712 
713 		igb_error(igb,
714 		    "Cound not allocate memory for tx control blocks");
715 		return (IGB_FAILURE);
716 	}
717 
718 	/*
719 	 * Allocate dma memory for the tx control block of free list.
720 	 */
721 	tcb = tx_ring->tcb_area;
722 	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
723 		ASSERT(tcb != NULL);
724 
725 		tx_ring->free_list[i] = tcb;
726 
727 		/*
728 		 * Pre-allocate dma handles for transmit. These dma handles
729 		 * will be dynamically bound to the data buffers passed down
730 		 * from the upper layers at the time of transmitting.
731 		 */
732 		ret = ddi_dma_alloc_handle(devinfo,
733 		    &igb_tx_dma_attr,
734 		    DDI_DMA_DONTWAIT, NULL,
735 		    &tcb->tx_dma_handle);
736 		if (ret != DDI_SUCCESS) {
737 			tcb->tx_dma_handle = NULL;
738 			igb_error(igb,
739 			    "Could not allocate tx dma handle: %x", ret);
740 			goto alloc_tcb_lists_fail;
741 		}
742 
743 		/*
744 		 * Pre-allocate transmit buffers for packets that the
745 		 * size is less than bcopy_thresh.
746 		 */
747 		tx_buf = &tcb->tx_buf;
748 
749 		ret = igb_alloc_dma_buffer(igb,
750 		    tx_buf, igb->tx_buf_size);
751 
752 		if (ret != IGB_SUCCESS) {
753 			ASSERT(tcb->tx_dma_handle != NULL);
754 			ddi_dma_free_handle(&tcb->tx_dma_handle);
755 			tcb->tx_dma_handle = NULL;
756 			igb_error(igb, "Allocate tx dma buffer failed");
757 			goto alloc_tcb_lists_fail;
758 		}
759 	}
760 
761 	return (IGB_SUCCESS);
762 
763 alloc_tcb_lists_fail:
764 	igb_free_tcb_lists(tx_ring);
765 
766 	return (IGB_FAILURE);
767 }
768 
769 /*
770  * igb_free_tcb_lists - Release the memory allocated for
771  * the transmit control bolcks of one ring.
772  */
773 static void
774 igb_free_tcb_lists(igb_tx_ring_t *tx_ring)
775 {
776 	int i;
777 	tx_control_block_t *tcb;
778 
779 	tcb = tx_ring->tcb_area;
780 	if (tcb == NULL)
781 		return;
782 
783 	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
784 		ASSERT(tcb != NULL);
785 
786 		/* Free the tx dma handle for dynamical binding */
787 		if (tcb->tx_dma_handle != NULL) {
788 			ddi_dma_free_handle(&tcb->tx_dma_handle);
789 			tcb->tx_dma_handle = NULL;
790 		} else {
791 			/*
792 			 * If the dma handle is NULL, then we don't
793 			 * have to check the remaining.
794 			 */
795 			break;
796 		}
797 
798 		igb_free_dma_buffer(&tcb->tx_buf);
799 	}
800 
801 	if (tx_ring->tcb_area != NULL) {
802 		kmem_free(tx_ring->tcb_area,
803 		    sizeof (tx_control_block_t) * tx_ring->free_list_size);
804 		tx_ring->tcb_area = NULL;
805 	}
806 
807 	if (tx_ring->work_list != NULL) {
808 		kmem_free(tx_ring->work_list,
809 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
810 		tx_ring->work_list = NULL;
811 	}
812 
813 	if (tx_ring->free_list != NULL) {
814 		kmem_free(tx_ring->free_list,
815 		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
816 		tx_ring->free_list = NULL;
817 	}
818 }
819 
820 /*
821  * igb_alloc_rcb_lists - Memory allocation for the receive control blocks
822  * of one ring.
823  */
824 static int
825 igb_alloc_rcb_lists(igb_rx_data_t *rx_data)
826 {
827 	int i;
828 	int ret;
829 	rx_control_block_t *rcb;
830 	igb_t *igb = rx_data->rx_ring->igb;
831 	dma_buffer_t *rx_buf;
832 	uint32_t rcb_count;
833 
834 	/*
835 	 * Allocate memory for the rx control blocks for work list and
836 	 * free list.
837 	 */
838 	rcb_count = rx_data->ring_size + rx_data->free_list_size;
839 	rcb = rx_data->rcb_area;
840 
841 	for (i = 0; i < rcb_count; i++, rcb++) {
842 		ASSERT(rcb != NULL);
843 
844 		if (i < rx_data->ring_size) {
845 			/* Attach the rx control block to the work list */
846 			rx_data->work_list[i] = rcb;
847 		} else {
848 			/* Attach the rx control block to the free list */
849 			rx_data->free_list[i - rx_data->ring_size] = rcb;
850 		}
851 
852 		rx_buf = &rcb->rx_buf;
853 		ret = igb_alloc_dma_buffer(igb,
854 		    rx_buf, igb->rx_buf_size);
855 
856 		if (ret != IGB_SUCCESS) {
857 			igb_error(igb, "Allocate rx dma buffer failed");
858 			goto alloc_rcb_lists_fail;
859 		}
860 
861 		rx_buf->size -= IPHDR_ALIGN_ROOM;
862 		rx_buf->address += IPHDR_ALIGN_ROOM;
863 		rx_buf->dma_address += IPHDR_ALIGN_ROOM;
864 
865 		rcb->ref_cnt = 1;
866 		rcb->rx_data = (igb_rx_data_t *)rx_data;
867 		rcb->free_rtn.free_func = igb_rx_recycle;
868 		rcb->free_rtn.free_arg = (char *)rcb;
869 
870 		rcb->mp = desballoc((unsigned char *)
871 		    rx_buf->address,
872 		    rx_buf->size,
873 		    0, &rcb->free_rtn);
874 	}
875 
876 	return (IGB_SUCCESS);
877 
878 alloc_rcb_lists_fail:
879 	igb_free_rcb_lists(rx_data);
880 
881 	return (IGB_FAILURE);
882 }
883 
884 /*
885  * igb_free_rcb_lists - Free the receive control blocks of one ring.
886  */
887 static void
888 igb_free_rcb_lists(igb_rx_data_t *rx_data)
889 {
890 	igb_t *igb;
891 	rx_control_block_t *rcb;
892 	uint32_t rcb_count;
893 	uint32_t ref_cnt;
894 	int i;
895 
896 	igb = rx_data->rx_ring->igb;
897 
898 	mutex_enter(&igb->rx_pending_lock);
899 
900 	rcb = rx_data->rcb_area;
901 	rcb_count = rx_data->ring_size + rx_data->free_list_size;
902 
903 	for (i = 0; i < rcb_count; i++, rcb++) {
904 		ASSERT(rcb != NULL);
905 
906 		ref_cnt = atomic_dec_32_nv(&rcb->ref_cnt);
907 		if (ref_cnt == 0) {
908 			if (rcb->mp != NULL) {
909 				freemsg(rcb->mp);
910 				rcb->mp = NULL;
911 			}
912 			igb_free_dma_buffer(&rcb->rx_buf);
913 		} else {
914 			atomic_inc_32(&rx_data->rcb_pending);
915 			atomic_inc_32(&igb->rcb_pending);
916 		}
917 	}
918 
919 	mutex_exit(&igb->rx_pending_lock);
920 }
921 
922 void
923 igb_set_fma_flags(int dma_flag)
924 {
925 	if (dma_flag) {
926 		igb_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
927 		igb_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
928 		igb_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
929 	} else {
930 		igb_tx_dma_attr.dma_attr_flags = 0;
931 		igb_buf_dma_attr.dma_attr_flags = 0;
932 		igb_desc_dma_attr.dma_attr_flags = 0;
933 	}
934 }
935