1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 */
29
30 #include "igb_sw.h"
31
32 static int igb_alloc_tbd_ring(igb_tx_ring_t *);
33 static void igb_free_tbd_ring(igb_tx_ring_t *);
34 static int igb_alloc_rbd_ring(igb_rx_data_t *);
35 static void igb_free_rbd_ring(igb_rx_data_t *);
36 static int igb_alloc_dma_buffer(igb_t *, dma_buffer_t *, size_t);
37 static int igb_alloc_tcb_lists(igb_tx_ring_t *);
38 static void igb_free_tcb_lists(igb_tx_ring_t *);
39 static int igb_alloc_rcb_lists(igb_rx_data_t *);
40 static void igb_free_rcb_lists(igb_rx_data_t *);
41
42 #ifdef __sparc
43 #define IGB_DMA_ALIGNMENT 0x0000000000002000ull
44 #else
45 #define IGB_DMA_ALIGNMENT 0x0000000000001000ull
46 #endif
47
48 /*
49 * DMA attributes for tx/rx descriptors
50 */
51 static ddi_dma_attr_t igb_desc_dma_attr = {
52 DMA_ATTR_V0, /* version number */
53 0x0000000000000000ull, /* low address */
54 0xFFFFFFFFFFFFFFFFull, /* high address */
55 0x00000000FFFFFFFFull, /* dma counter max */
56 IGB_DMA_ALIGNMENT, /* alignment */
57 0x00000FFF, /* burst sizes */
58 0x00000001, /* minimum transfer size */
59 0x00000000FFFFFFFFull, /* maximum transfer size */
60 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
61 1, /* scatter/gather list length */
62 0x00000001, /* granularity */
63 DDI_DMA_FLAGERR, /* DMA flags */
64 };
65
66 /*
67 * DMA attributes for tx/rx buffers
68 */
69 static ddi_dma_attr_t igb_buf_dma_attr = {
70 DMA_ATTR_V0, /* version number */
71 0x0000000000000000ull, /* low address */
72 0xFFFFFFFFFFFFFFFFull, /* high address */
73 0x00000000FFFFFFFFull, /* dma counter max */
74 IGB_DMA_ALIGNMENT, /* alignment */
75 0x00000FFF, /* burst sizes */
76 0x00000001, /* minimum transfer size */
77 0x00000000FFFFFFFFull, /* maximum transfer size */
78 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
79 1, /* scatter/gather list length */
80 0x00000001, /* granularity */
81 DDI_DMA_FLAGERR, /* DMA flags */
82 };
83
84 /*
85 * DMA attributes for transmit
86 */
87 static ddi_dma_attr_t igb_tx_dma_attr = {
88 DMA_ATTR_V0, /* version number */
89 0x0000000000000000ull, /* low address */
90 0xFFFFFFFFFFFFFFFFull, /* high address */
91 0x00000000FFFFFFFFull, /* dma counter max */
92 1, /* alignment */
93 0x00000FFF, /* burst sizes */
94 0x00000001, /* minimum transfer size */
95 0x00000000FFFFFFFFull, /* maximum transfer size */
96 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
97 MAX_COOKIE, /* scatter/gather list length */
98 0x00000001, /* granularity */
99 DDI_DMA_FLAGERR, /* DMA flags */
100 };
101
102 /*
103 * DMA access attributes for descriptors.
104 */
105 static ddi_device_acc_attr_t igb_desc_acc_attr = {
106 DDI_DEVICE_ATTR_V0,
107 DDI_STRUCTURE_LE_ACC,
108 DDI_STRICTORDER_ACC
109 };
110
111 /*
112 * DMA access attributes for buffers.
113 */
114 static ddi_device_acc_attr_t igb_buf_acc_attr = {
115 DDI_DEVICE_ATTR_V0,
116 DDI_NEVERSWAP_ACC,
117 DDI_STRICTORDER_ACC
118 };
119
120
121 /*
122 * igb_alloc_dma - Allocate DMA resources for all rx/tx rings
123 */
124 int
igb_alloc_dma(igb_t * igb)125 igb_alloc_dma(igb_t *igb)
126 {
127 igb_rx_ring_t *rx_ring;
128 igb_rx_data_t *rx_data;
129 igb_tx_ring_t *tx_ring;
130 int i;
131
132 for (i = 0; i < igb->num_rx_rings; i++) {
133 /*
134 * Allocate receive desciptor ring and control block lists
135 */
136 rx_ring = &igb->rx_rings[i];
137 rx_data = rx_ring->rx_data;
138
139 if (igb_alloc_rbd_ring(rx_data) != IGB_SUCCESS)
140 goto alloc_dma_failure;
141
142 if (igb_alloc_rcb_lists(rx_data) != IGB_SUCCESS)
143 goto alloc_dma_failure;
144 }
145
146 for (i = 0; i < igb->num_tx_rings; i++) {
147 /*
148 * Allocate transmit desciptor ring and control block lists
149 */
150 tx_ring = &igb->tx_rings[i];
151
152 if (igb_alloc_tbd_ring(tx_ring) != IGB_SUCCESS)
153 goto alloc_dma_failure;
154
155 if (igb_alloc_tcb_lists(tx_ring) != IGB_SUCCESS)
156 goto alloc_dma_failure;
157 }
158
159 return (IGB_SUCCESS);
160
161 alloc_dma_failure:
162 igb_free_dma(igb);
163
164 return (IGB_FAILURE);
165 }
166
167
168 /*
169 * igb_free_dma - Free all the DMA resources of all rx/tx rings
170 */
171 void
igb_free_dma(igb_t * igb)172 igb_free_dma(igb_t *igb)
173 {
174 igb_rx_ring_t *rx_ring;
175 igb_rx_data_t *rx_data;
176 igb_tx_ring_t *tx_ring;
177 int i;
178
179 /*
180 * Free DMA resources of rx rings
181 */
182 for (i = 0; i < igb->num_rx_rings; i++) {
183 rx_ring = &igb->rx_rings[i];
184 rx_data = rx_ring->rx_data;
185
186 igb_free_rbd_ring(rx_data);
187 igb_free_rcb_lists(rx_data);
188 }
189
190 /*
191 * Free DMA resources of tx rings
192 */
193 for (i = 0; i < igb->num_tx_rings; i++) {
194 tx_ring = &igb->tx_rings[i];
195 igb_free_tbd_ring(tx_ring);
196 igb_free_tcb_lists(tx_ring);
197 }
198 }
199
200 /*
201 * igb_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring.
202 */
203 static int
igb_alloc_tbd_ring(igb_tx_ring_t * tx_ring)204 igb_alloc_tbd_ring(igb_tx_ring_t *tx_ring)
205 {
206 int ret;
207 size_t size;
208 size_t len;
209 uint_t cookie_num;
210 dev_info_t *devinfo;
211 ddi_dma_cookie_t cookie;
212 igb_t *igb = tx_ring->igb;
213
214 devinfo = igb->dip;
215 size = sizeof (union e1000_adv_tx_desc) * tx_ring->ring_size;
216
217 /*
218 * If tx head write-back is enabled, an extra tbd is allocated
219 * to save the head write-back value
220 */
221 if (igb->tx_head_wb_enable) {
222 size += sizeof (union e1000_adv_tx_desc);
223 }
224
225 /*
226 * Allocate a DMA handle for the transmit descriptor
227 * memory area.
228 */
229 ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
230 DDI_DMA_DONTWAIT, NULL,
231 &tx_ring->tbd_area.dma_handle);
232
233 if (ret != DDI_SUCCESS) {
234 igb_log(igb, IGB_LOG_ERROR,
235 "Could not allocate tbd dma handle: %x", ret);
236 tx_ring->tbd_area.dma_handle = NULL;
237
238 return (IGB_FAILURE);
239 }
240
241 /*
242 * Allocate memory to DMA data to and from the transmit
243 * descriptors.
244 */
245 ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle,
246 size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
247 DDI_DMA_DONTWAIT, NULL,
248 (caddr_t *)&tx_ring->tbd_area.address,
249 &len, &tx_ring->tbd_area.acc_handle);
250
251 if (ret != DDI_SUCCESS) {
252 igb_log(igb, IGB_LOG_ERROR,
253 "Could not allocate tbd dma memory: %x", ret);
254 tx_ring->tbd_area.acc_handle = NULL;
255 tx_ring->tbd_area.address = NULL;
256 if (tx_ring->tbd_area.dma_handle != NULL) {
257 ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
258 tx_ring->tbd_area.dma_handle = NULL;
259 }
260 return (IGB_FAILURE);
261 }
262
263 /*
264 * Initialize the entire transmit buffer descriptor area to zero
265 */
266 bzero(tx_ring->tbd_area.address, len);
267
268 /*
269 * Allocates DMA resources for the memory that was allocated by
270 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
271 * the memory address
272 */
273 ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle,
274 NULL, (caddr_t)tx_ring->tbd_area.address,
275 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
276 DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
277
278 if (ret != DDI_DMA_MAPPED) {
279 igb_log(igb, IGB_LOG_ERROR,
280 "Could not bind tbd dma resource: %x", ret);
281 tx_ring->tbd_area.dma_address = 0;
282 if (tx_ring->tbd_area.acc_handle != NULL) {
283 ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
284 tx_ring->tbd_area.acc_handle = NULL;
285 tx_ring->tbd_area.address = NULL;
286 }
287 if (tx_ring->tbd_area.dma_handle != NULL) {
288 ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
289 tx_ring->tbd_area.dma_handle = NULL;
290 }
291 return (IGB_FAILURE);
292 }
293
294 ASSERT(cookie_num == 1);
295
296 tx_ring->tbd_area.dma_address = cookie.dmac_laddress;
297 tx_ring->tbd_area.size = len;
298
299 tx_ring->tbd_ring = (union e1000_adv_tx_desc *)(uintptr_t)
300 tx_ring->tbd_area.address;
301
302 return (IGB_SUCCESS);
303 }
304
305 /*
306 * igb_free_tbd_ring - Free the tx descriptors of one ring.
307 */
308 static void
igb_free_tbd_ring(igb_tx_ring_t * tx_ring)309 igb_free_tbd_ring(igb_tx_ring_t *tx_ring)
310 {
311 if (tx_ring->tbd_area.dma_handle != NULL) {
312 (void) ddi_dma_unbind_handle(tx_ring->tbd_area.dma_handle);
313 }
314 if (tx_ring->tbd_area.acc_handle != NULL) {
315 ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
316 tx_ring->tbd_area.acc_handle = NULL;
317 }
318 if (tx_ring->tbd_area.dma_handle != NULL) {
319 ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
320 tx_ring->tbd_area.dma_handle = NULL;
321 }
322 tx_ring->tbd_area.address = NULL;
323 tx_ring->tbd_area.dma_address = 0;
324 tx_ring->tbd_area.size = 0;
325
326 tx_ring->tbd_ring = NULL;
327 }
328
329 int
igb_alloc_rx_ring_data(igb_rx_ring_t * rx_ring)330 igb_alloc_rx_ring_data(igb_rx_ring_t *rx_ring)
331 {
332 igb_rx_data_t *rx_data;
333 igb_t *igb = rx_ring->igb;
334 uint32_t rcb_count;
335
336 /*
337 * Allocate memory for software receive rings
338 */
339 rx_data = kmem_zalloc(sizeof (igb_rx_data_t), KM_NOSLEEP);
340
341 if (rx_data == NULL) {
342 igb_log(igb, IGB_LOG_ERROR,
343 "Allocate software receive rings failed");
344 return (IGB_FAILURE);
345 }
346
347 rx_data->rx_ring = rx_ring;
348 mutex_init(&rx_data->recycle_lock, NULL,
349 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
350
351 rx_data->ring_size = igb->rx_ring_size;
352 rx_data->free_list_size = igb->rx_ring_size;
353
354 rx_data->rcb_head = 0;
355 rx_data->rcb_tail = 0;
356 rx_data->rcb_free = rx_data->free_list_size;
357
358 /*
359 * Allocate memory for the work list.
360 */
361 rx_data->work_list = kmem_zalloc(sizeof (rx_control_block_t *) *
362 rx_data->ring_size, KM_NOSLEEP);
363
364 if (rx_data->work_list == NULL) {
365 igb_log(igb, IGB_LOG_ERROR,
366 "Could not allocate memory for rx work list");
367 goto alloc_rx_data_failure;
368 }
369
370 /*
371 * Allocate memory for the free list.
372 */
373 rx_data->free_list = kmem_zalloc(sizeof (rx_control_block_t *) *
374 rx_data->free_list_size, KM_NOSLEEP);
375
376 if (rx_data->free_list == NULL) {
377 igb_log(igb, IGB_LOG_ERROR,
378 "Cound not allocate memory for rx free list");
379 goto alloc_rx_data_failure;
380 }
381
382 /*
383 * Allocate memory for the rx control blocks for work list and
384 * free list.
385 */
386 rcb_count = rx_data->ring_size + rx_data->free_list_size;
387 rx_data->rcb_area =
388 kmem_zalloc(sizeof (rx_control_block_t) * rcb_count,
389 KM_NOSLEEP);
390
391 if (rx_data->rcb_area == NULL) {
392 igb_log(igb, IGB_LOG_ERROR,
393 "Cound not allocate memory for rx control blocks");
394 goto alloc_rx_data_failure;
395 }
396
397 rx_ring->rx_data = rx_data;
398 return (IGB_SUCCESS);
399
400 alloc_rx_data_failure:
401 igb_free_rx_ring_data(rx_data);
402 return (IGB_FAILURE);
403 }
404
405 void
igb_free_rx_ring_data(igb_rx_data_t * rx_data)406 igb_free_rx_ring_data(igb_rx_data_t *rx_data)
407 {
408 uint32_t rcb_count;
409
410 if (rx_data == NULL)
411 return;
412
413 ASSERT(rx_data->rcb_pending == 0);
414
415 rcb_count = rx_data->ring_size + rx_data->free_list_size;
416 if (rx_data->rcb_area != NULL) {
417 kmem_free(rx_data->rcb_area,
418 sizeof (rx_control_block_t) * rcb_count);
419 rx_data->rcb_area = NULL;
420 }
421
422 if (rx_data->work_list != NULL) {
423 kmem_free(rx_data->work_list,
424 sizeof (rx_control_block_t *) * rx_data->ring_size);
425 rx_data->work_list = NULL;
426 }
427
428 if (rx_data->free_list != NULL) {
429 kmem_free(rx_data->free_list,
430 sizeof (rx_control_block_t *) * rx_data->free_list_size);
431 rx_data->free_list = NULL;
432 }
433
434 mutex_destroy(&rx_data->recycle_lock);
435 kmem_free(rx_data, sizeof (igb_rx_data_t));
436 }
437
438 /*
439 * igb_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring.
440 */
441 static int
igb_alloc_rbd_ring(igb_rx_data_t * rx_data)442 igb_alloc_rbd_ring(igb_rx_data_t *rx_data)
443 {
444 int ret;
445 size_t size;
446 size_t len;
447 uint_t cookie_num;
448 dev_info_t *devinfo;
449 ddi_dma_cookie_t cookie;
450 igb_t *igb = rx_data->rx_ring->igb;
451
452 devinfo = igb->dip;
453 size = sizeof (union e1000_adv_rx_desc) * rx_data->ring_size;
454
455 /*
456 * Allocate a new DMA handle for the receive descriptor
457 * memory area.
458 */
459 ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
460 DDI_DMA_DONTWAIT, NULL,
461 &rx_data->rbd_area.dma_handle);
462
463 if (ret != DDI_SUCCESS) {
464 igb_log(igb, IGB_LOG_ERROR,
465 "Could not allocate rbd dma handle: %x", ret);
466 rx_data->rbd_area.dma_handle = NULL;
467 return (IGB_FAILURE);
468 }
469
470 /*
471 * Allocate memory to DMA data to and from the receive
472 * descriptors.
473 */
474 ret = ddi_dma_mem_alloc(rx_data->rbd_area.dma_handle,
475 size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
476 DDI_DMA_DONTWAIT, NULL,
477 (caddr_t *)&rx_data->rbd_area.address,
478 &len, &rx_data->rbd_area.acc_handle);
479
480 if (ret != DDI_SUCCESS) {
481 igb_log(igb, IGB_LOG_ERROR,
482 "Could not allocate rbd dma memory: %x", ret);
483 rx_data->rbd_area.acc_handle = NULL;
484 rx_data->rbd_area.address = NULL;
485 if (rx_data->rbd_area.dma_handle != NULL) {
486 ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
487 rx_data->rbd_area.dma_handle = NULL;
488 }
489 return (IGB_FAILURE);
490 }
491
492 /*
493 * Initialize the entire transmit buffer descriptor area to zero
494 */
495 bzero(rx_data->rbd_area.address, len);
496
497 /*
498 * Allocates DMA resources for the memory that was allocated by
499 * the ddi_dma_mem_alloc call.
500 */
501 ret = ddi_dma_addr_bind_handle(rx_data->rbd_area.dma_handle,
502 NULL, (caddr_t)rx_data->rbd_area.address,
503 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
504 DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
505
506 if (ret != DDI_DMA_MAPPED) {
507 igb_log(igb, IGB_LOG_ERROR,
508 "Could not bind rbd dma resource: %x", ret);
509 rx_data->rbd_area.dma_address = 0;
510 if (rx_data->rbd_area.acc_handle != NULL) {
511 ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
512 rx_data->rbd_area.acc_handle = NULL;
513 rx_data->rbd_area.address = NULL;
514 }
515 if (rx_data->rbd_area.dma_handle != NULL) {
516 ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
517 rx_data->rbd_area.dma_handle = NULL;
518 }
519 return (IGB_FAILURE);
520 }
521
522 ASSERT(cookie_num == 1);
523
524 rx_data->rbd_area.dma_address = cookie.dmac_laddress;
525 rx_data->rbd_area.size = len;
526
527 rx_data->rbd_ring = (union e1000_adv_rx_desc *)(uintptr_t)
528 rx_data->rbd_area.address;
529
530 return (IGB_SUCCESS);
531 }
532
533 /*
534 * igb_free_rbd_ring - Free the rx descriptors of one ring.
535 */
536 static void
igb_free_rbd_ring(igb_rx_data_t * rx_data)537 igb_free_rbd_ring(igb_rx_data_t *rx_data)
538 {
539 if (rx_data->rbd_area.dma_handle != NULL) {
540 (void) ddi_dma_unbind_handle(rx_data->rbd_area.dma_handle);
541 }
542 if (rx_data->rbd_area.acc_handle != NULL) {
543 ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
544 rx_data->rbd_area.acc_handle = NULL;
545 }
546 if (rx_data->rbd_area.dma_handle != NULL) {
547 ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
548 rx_data->rbd_area.dma_handle = NULL;
549 }
550 rx_data->rbd_area.address = NULL;
551 rx_data->rbd_area.dma_address = 0;
552 rx_data->rbd_area.size = 0;
553
554 rx_data->rbd_ring = NULL;
555 }
556
557
558 /*
559 * igb_alloc_dma_buffer - Allocate DMA resources for a DMA buffer
560 */
561 static int
igb_alloc_dma_buffer(igb_t * igb,dma_buffer_t * buf,size_t size)562 igb_alloc_dma_buffer(igb_t *igb,
563 dma_buffer_t *buf, size_t size)
564 {
565 int ret;
566 dev_info_t *devinfo = igb->dip;
567 ddi_dma_cookie_t cookie;
568 size_t len;
569 uint_t cookie_num;
570
571 ret = ddi_dma_alloc_handle(devinfo,
572 &igb_buf_dma_attr, DDI_DMA_DONTWAIT,
573 NULL, &buf->dma_handle);
574
575 if (ret != DDI_SUCCESS) {
576 buf->dma_handle = NULL;
577 igb_log(igb, IGB_LOG_ERROR,
578 "Could not allocate dma buffer handle: %x", ret);
579 return (IGB_FAILURE);
580 }
581
582 ret = ddi_dma_mem_alloc(buf->dma_handle,
583 size, &igb_buf_acc_attr, DDI_DMA_STREAMING,
584 DDI_DMA_DONTWAIT, NULL, &buf->address,
585 &len, &buf->acc_handle);
586
587 if (ret != DDI_SUCCESS) {
588 buf->acc_handle = NULL;
589 buf->address = NULL;
590 if (buf->dma_handle != NULL) {
591 ddi_dma_free_handle(&buf->dma_handle);
592 buf->dma_handle = NULL;
593 }
594 igb_log(igb, IGB_LOG_ERROR,
595 "Could not allocate dma buffer memory: %x", ret);
596 return (IGB_FAILURE);
597 }
598
599 ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL,
600 buf->address,
601 len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
602 DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
603
604 if (ret != DDI_DMA_MAPPED) {
605 buf->dma_address = 0;
606 if (buf->acc_handle != NULL) {
607 ddi_dma_mem_free(&buf->acc_handle);
608 buf->acc_handle = NULL;
609 buf->address = NULL;
610 }
611 if (buf->dma_handle != NULL) {
612 ddi_dma_free_handle(&buf->dma_handle);
613 buf->dma_handle = NULL;
614 }
615 igb_log(igb, IGB_LOG_ERROR,
616 "Could not bind dma buffer handle: %x", ret);
617 return (IGB_FAILURE);
618 }
619
620 ASSERT(cookie_num == 1);
621
622 buf->dma_address = cookie.dmac_laddress;
623 buf->size = len;
624 buf->len = 0;
625
626 return (IGB_SUCCESS);
627 }
628
629 /*
630 * igb_free_dma_buffer - Free one allocated area of dma memory and handle
631 */
632 void
igb_free_dma_buffer(dma_buffer_t * buf)633 igb_free_dma_buffer(dma_buffer_t *buf)
634 {
635 if (buf->dma_handle != NULL) {
636 (void) ddi_dma_unbind_handle(buf->dma_handle);
637 buf->dma_address = 0;
638 } else {
639 return;
640 }
641
642 if (buf->acc_handle != NULL) {
643 ddi_dma_mem_free(&buf->acc_handle);
644 buf->acc_handle = NULL;
645 buf->address = NULL;
646 }
647
648 if (buf->dma_handle != NULL) {
649 ddi_dma_free_handle(&buf->dma_handle);
650 buf->dma_handle = NULL;
651 }
652
653 buf->size = 0;
654 buf->len = 0;
655 }
656
657 /*
658 * igb_alloc_tcb_lists - Memory allocation for the transmit control bolcks
659 * of one ring.
660 */
661 static int
igb_alloc_tcb_lists(igb_tx_ring_t * tx_ring)662 igb_alloc_tcb_lists(igb_tx_ring_t *tx_ring)
663 {
664 int i;
665 int ret;
666 tx_control_block_t *tcb;
667 dma_buffer_t *tx_buf;
668 igb_t *igb = tx_ring->igb;
669 dev_info_t *devinfo = igb->dip;
670
671 /*
672 * Allocate memory for the work list.
673 */
674 tx_ring->work_list = kmem_zalloc(sizeof (tx_control_block_t *) *
675 tx_ring->ring_size, KM_NOSLEEP);
676
677 if (tx_ring->work_list == NULL) {
678 igb_log(igb, IGB_LOG_ERROR,
679 "Cound not allocate memory for tx work list");
680 return (IGB_FAILURE);
681 }
682
683 /*
684 * Allocate memory for the free list.
685 */
686 tx_ring->free_list = kmem_zalloc(sizeof (tx_control_block_t *) *
687 tx_ring->free_list_size, KM_NOSLEEP);
688
689 if (tx_ring->free_list == NULL) {
690 kmem_free(tx_ring->work_list,
691 sizeof (tx_control_block_t *) * tx_ring->ring_size);
692 tx_ring->work_list = NULL;
693
694 igb_log(igb, IGB_LOG_ERROR,
695 "Cound not allocate memory for tx free list");
696 return (IGB_FAILURE);
697 }
698
699 /*
700 * Allocate memory for the tx control blocks of free list.
701 */
702 tx_ring->tcb_area =
703 kmem_zalloc(sizeof (tx_control_block_t) *
704 tx_ring->free_list_size, KM_NOSLEEP);
705
706 if (tx_ring->tcb_area == NULL) {
707 kmem_free(tx_ring->work_list,
708 sizeof (tx_control_block_t *) * tx_ring->ring_size);
709 tx_ring->work_list = NULL;
710
711 kmem_free(tx_ring->free_list,
712 sizeof (tx_control_block_t *) * tx_ring->free_list_size);
713 tx_ring->free_list = NULL;
714
715 igb_log(igb, IGB_LOG_ERROR,
716 "Cound not allocate memory for tx control blocks");
717 return (IGB_FAILURE);
718 }
719
720 /*
721 * Allocate dma memory for the tx control block of free list.
722 */
723 tcb = tx_ring->tcb_area;
724 for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
725 ASSERT(tcb != NULL);
726
727 tx_ring->free_list[i] = tcb;
728
729 /*
730 * Pre-allocate dma handles for transmit. These dma handles
731 * will be dynamically bound to the data buffers passed down
732 * from the upper layers at the time of transmitting.
733 */
734 ret = ddi_dma_alloc_handle(devinfo,
735 &igb_tx_dma_attr,
736 DDI_DMA_DONTWAIT, NULL,
737 &tcb->tx_dma_handle);
738 if (ret != DDI_SUCCESS) {
739 tcb->tx_dma_handle = NULL;
740 igb_log(igb, IGB_LOG_ERROR,
741 "Could not allocate tx dma handle: %x", ret);
742 goto alloc_tcb_lists_fail;
743 }
744
745 /*
746 * Pre-allocate transmit buffers for packets that the
747 * size is less than bcopy_thresh.
748 */
749 tx_buf = &tcb->tx_buf;
750
751 ret = igb_alloc_dma_buffer(igb,
752 tx_buf, igb->tx_buf_size);
753
754 if (ret != IGB_SUCCESS) {
755 ASSERT(tcb->tx_dma_handle != NULL);
756 ddi_dma_free_handle(&tcb->tx_dma_handle);
757 tcb->tx_dma_handle = NULL;
758 igb_log(igb, IGB_LOG_ERROR,
759 "Allocate tx dma buffer failed");
760 goto alloc_tcb_lists_fail;
761 }
762 tcb->last_index = MAX_TX_RING_SIZE;
763 }
764
765 return (IGB_SUCCESS);
766
767 alloc_tcb_lists_fail:
768 igb_free_tcb_lists(tx_ring);
769
770 return (IGB_FAILURE);
771 }
772
773 /*
774 * igb_free_tcb_lists - Release the memory allocated for
775 * the transmit control bolcks of one ring.
776 */
777 static void
igb_free_tcb_lists(igb_tx_ring_t * tx_ring)778 igb_free_tcb_lists(igb_tx_ring_t *tx_ring)
779 {
780 int i;
781 tx_control_block_t *tcb;
782
783 tcb = tx_ring->tcb_area;
784 if (tcb == NULL)
785 return;
786
787 for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
788 ASSERT(tcb != NULL);
789
790 /* Free the tx dma handle for dynamical binding */
791 if (tcb->tx_dma_handle != NULL) {
792 ddi_dma_free_handle(&tcb->tx_dma_handle);
793 tcb->tx_dma_handle = NULL;
794 } else {
795 /*
796 * If the dma handle is NULL, then we don't
797 * have to check the remaining.
798 */
799 break;
800 }
801
802 igb_free_dma_buffer(&tcb->tx_buf);
803 }
804
805 if (tx_ring->tcb_area != NULL) {
806 kmem_free(tx_ring->tcb_area,
807 sizeof (tx_control_block_t) * tx_ring->free_list_size);
808 tx_ring->tcb_area = NULL;
809 }
810
811 if (tx_ring->work_list != NULL) {
812 kmem_free(tx_ring->work_list,
813 sizeof (tx_control_block_t *) * tx_ring->ring_size);
814 tx_ring->work_list = NULL;
815 }
816
817 if (tx_ring->free_list != NULL) {
818 kmem_free(tx_ring->free_list,
819 sizeof (tx_control_block_t *) * tx_ring->free_list_size);
820 tx_ring->free_list = NULL;
821 }
822 }
823
824 /*
825 * igb_alloc_rcb_lists - Memory allocation for the receive control blocks
826 * of one ring.
827 */
828 static int
igb_alloc_rcb_lists(igb_rx_data_t * rx_data)829 igb_alloc_rcb_lists(igb_rx_data_t *rx_data)
830 {
831 int i;
832 int ret;
833 rx_control_block_t *rcb;
834 igb_t *igb = rx_data->rx_ring->igb;
835 dma_buffer_t *rx_buf;
836 uint32_t rcb_count;
837
838 /*
839 * Allocate memory for the rx control blocks for work list and
840 * free list.
841 */
842 rcb_count = rx_data->ring_size + rx_data->free_list_size;
843 rcb = rx_data->rcb_area;
844
845 for (i = 0; i < rcb_count; i++, rcb++) {
846 ASSERT(rcb != NULL);
847
848 if (i < rx_data->ring_size) {
849 /* Attach the rx control block to the work list */
850 rx_data->work_list[i] = rcb;
851 } else {
852 /* Attach the rx control block to the free list */
853 rx_data->free_list[i - rx_data->ring_size] = rcb;
854 }
855
856 rx_buf = &rcb->rx_buf;
857 ret = igb_alloc_dma_buffer(igb,
858 rx_buf, igb->rx_buf_size);
859
860 if (ret != IGB_SUCCESS) {
861 igb_log(igb, IGB_LOG_ERROR,
862 "Allocate rx dma buffer failed");
863 goto alloc_rcb_lists_fail;
864 }
865
866 rx_buf->size -= IPHDR_ALIGN_ROOM;
867 rx_buf->address += IPHDR_ALIGN_ROOM;
868 rx_buf->dma_address += IPHDR_ALIGN_ROOM;
869
870 rcb->ref_cnt = 1;
871 rcb->rx_data = (igb_rx_data_t *)rx_data;
872 rcb->free_rtn.free_func = igb_rx_recycle;
873 rcb->free_rtn.free_arg = (char *)rcb;
874
875 rcb->mp = desballoc((unsigned char *)
876 rx_buf->address,
877 rx_buf->size,
878 0, &rcb->free_rtn);
879 }
880
881 return (IGB_SUCCESS);
882
883 alloc_rcb_lists_fail:
884 igb_free_rcb_lists(rx_data);
885
886 return (IGB_FAILURE);
887 }
888
889 /*
890 * igb_free_rcb_lists - Free the receive control blocks of one ring.
891 */
892 static void
igb_free_rcb_lists(igb_rx_data_t * rx_data)893 igb_free_rcb_lists(igb_rx_data_t *rx_data)
894 {
895 igb_t *igb;
896 rx_control_block_t *rcb;
897 uint32_t rcb_count;
898 uint32_t ref_cnt;
899 int i;
900
901 igb = rx_data->rx_ring->igb;
902
903 mutex_enter(&igb->rx_pending_lock);
904
905 rcb = rx_data->rcb_area;
906 rcb_count = rx_data->ring_size + rx_data->free_list_size;
907
908 for (i = 0; i < rcb_count; i++, rcb++) {
909 ASSERT(rcb != NULL);
910
911 ref_cnt = atomic_dec_32_nv(&rcb->ref_cnt);
912 if (ref_cnt == 0) {
913 if (rcb->mp != NULL) {
914 freemsg(rcb->mp);
915 rcb->mp = NULL;
916 }
917 igb_free_dma_buffer(&rcb->rx_buf);
918 } else {
919 atomic_inc_32(&rx_data->rcb_pending);
920 atomic_inc_32(&igb->rcb_pending);
921 }
922 }
923
924 mutex_exit(&igb->rx_pending_lock);
925 }
926
927 void
igb_set_fma_flags(int dma_flag)928 igb_set_fma_flags(int dma_flag)
929 {
930 if (dma_flag) {
931 igb_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
932 igb_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
933 igb_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
934 } else {
935 igb_tx_dma_attr.dma_attr_flags = 0;
936 igb_buf_dma_attr.dma_attr_flags = 0;
937 igb_desc_dma_attr.dma_attr_flags = 0;
938 }
939 }
940