1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 */
29
30 #include "igb_sw.h"
31
32 static int igb_alloc_tbd_ring(igb_tx_ring_t *);
33 static void igb_free_tbd_ring(igb_tx_ring_t *);
34 static int igb_alloc_rbd_ring(igb_rx_data_t *);
35 static void igb_free_rbd_ring(igb_rx_data_t *);
36 static int igb_alloc_dma_buffer(igb_t *, dma_buffer_t *, size_t);
37 static int igb_alloc_tcb_lists(igb_tx_ring_t *);
38 static void igb_free_tcb_lists(igb_tx_ring_t *);
39 static int igb_alloc_rcb_lists(igb_rx_data_t *);
40 static void igb_free_rcb_lists(igb_rx_data_t *);
41
42 #ifdef __sparc
43 #define IGB_DMA_ALIGNMENT 0x0000000000002000ull
44 #else
45 #define IGB_DMA_ALIGNMENT 0x0000000000001000ull
46 #endif
47
48 /*
49 * DMA attributes for tx/rx descriptors
50 */
51 static ddi_dma_attr_t igb_desc_dma_attr = {
52 DMA_ATTR_V0, /* version number */
53 0x0000000000000000ull, /* low address */
54 0xFFFFFFFFFFFFFFFFull, /* high address */
55 0x00000000FFFFFFFFull, /* dma counter max */
56 IGB_DMA_ALIGNMENT, /* alignment */
57 0x00000FFF, /* burst sizes */
58 0x00000001, /* minimum transfer size */
59 0x00000000FFFFFFFFull, /* maximum transfer size */
60 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
61 1, /* scatter/gather list length */
62 0x00000001, /* granularity */
63 DDI_DMA_FLAGERR, /* DMA flags */
64 };
65
66 /*
67 * DMA attributes for tx/rx buffers
68 */
69 static ddi_dma_attr_t igb_buf_dma_attr = {
70 DMA_ATTR_V0, /* version number */
71 0x0000000000000000ull, /* low address */
72 0xFFFFFFFFFFFFFFFFull, /* high address */
73 0x00000000FFFFFFFFull, /* dma counter max */
74 IGB_DMA_ALIGNMENT, /* alignment */
75 0x00000FFF, /* burst sizes */
76 0x00000001, /* minimum transfer size */
77 0x00000000FFFFFFFFull, /* maximum transfer size */
78 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
79 1, /* scatter/gather list length */
80 0x00000001, /* granularity */
81 DDI_DMA_FLAGERR, /* DMA flags */
82 };
83
84 /*
85 * DMA attributes for transmit
86 */
87 static ddi_dma_attr_t igb_tx_dma_attr = {
88 DMA_ATTR_V0, /* version number */
89 0x0000000000000000ull, /* low address */
90 0xFFFFFFFFFFFFFFFFull, /* high address */
91 0x00000000FFFFFFFFull, /* dma counter max */
92 1, /* alignment */
93 0x00000FFF, /* burst sizes */
94 0x00000001, /* minimum transfer size */
95 0x00000000FFFFFFFFull, /* maximum transfer size */
96 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
97 MAX_COOKIE, /* scatter/gather list length */
98 0x00000001, /* granularity */
99 DDI_DMA_FLAGERR, /* DMA flags */
100 };
101
102 /*
103 * DMA access attributes for descriptors.
104 */
105 static ddi_device_acc_attr_t igb_desc_acc_attr = {
106 DDI_DEVICE_ATTR_V0,
107 DDI_STRUCTURE_LE_ACC,
108 DDI_STRICTORDER_ACC
109 };
110
111 /*
112 * DMA access attributes for buffers.
113 */
114 static ddi_device_acc_attr_t igb_buf_acc_attr = {
115 DDI_DEVICE_ATTR_V0,
116 DDI_NEVERSWAP_ACC,
117 DDI_STRICTORDER_ACC
118 };
119
120
121 /*
122 * igb_alloc_dma - Allocate DMA resources for all rx/tx rings
123 */
124 int
igb_alloc_dma(igb_t * igb)125 igb_alloc_dma(igb_t *igb)
126 {
127 igb_rx_ring_t *rx_ring;
128 igb_rx_data_t *rx_data;
129 igb_tx_ring_t *tx_ring;
130 int i;
131
132 for (i = 0; i < igb->num_rx_rings; i++) {
133 /*
134 * Allocate receive desciptor ring and control block lists
135 */
136 rx_ring = &igb->rx_rings[i];
137 rx_data = rx_ring->rx_data;
138
139 if (igb_alloc_rbd_ring(rx_data) != IGB_SUCCESS)
140 goto alloc_dma_failure;
141
142 if (igb_alloc_rcb_lists(rx_data) != IGB_SUCCESS)
143 goto alloc_dma_failure;
144 }
145
146 for (i = 0; i < igb->num_tx_rings; i++) {
147 /*
148 * Allocate transmit desciptor ring and control block lists
149 */
150 tx_ring = &igb->tx_rings[i];
151
152 if (igb_alloc_tbd_ring(tx_ring) != IGB_SUCCESS)
153 goto alloc_dma_failure;
154
155 if (igb_alloc_tcb_lists(tx_ring) != IGB_SUCCESS)
156 goto alloc_dma_failure;
157 }
158
159 return (IGB_SUCCESS);
160
161 alloc_dma_failure:
162 igb_free_dma(igb);
163
164 return (IGB_FAILURE);
165 }
166
167
168 /*
169 * igb_free_dma - Free all the DMA resources of all rx/tx rings
170 */
171 void
igb_free_dma(igb_t * igb)172 igb_free_dma(igb_t *igb)
173 {
174 igb_rx_ring_t *rx_ring;
175 igb_rx_data_t *rx_data;
176 igb_tx_ring_t *tx_ring;
177 int i;
178
179 /*
180 * Free DMA resources of rx rings
181 */
182 for (i = 0; i < igb->num_rx_rings; i++) {
183 rx_ring = &igb->rx_rings[i];
184 rx_data = rx_ring->rx_data;
185
186 igb_free_rbd_ring(rx_data);
187 igb_free_rcb_lists(rx_data);
188 }
189
190 /*
191 * Free DMA resources of tx rings
192 */
193 for (i = 0; i < igb->num_tx_rings; i++) {
194 tx_ring = &igb->tx_rings[i];
195 igb_free_tbd_ring(tx_ring);
196 igb_free_tcb_lists(tx_ring);
197 }
198 }
199
200 /*
201 * igb_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring.
202 */
203 static int
igb_alloc_tbd_ring(igb_tx_ring_t * tx_ring)204 igb_alloc_tbd_ring(igb_tx_ring_t *tx_ring)
205 {
206 int ret;
207 size_t size;
208 size_t len;
209 uint_t cookie_num;
210 dev_info_t *devinfo;
211 ddi_dma_cookie_t cookie;
212 igb_t *igb = tx_ring->igb;
213
214 devinfo = igb->dip;
215 size = sizeof (union e1000_adv_tx_desc) * tx_ring->ring_size;
216
217 /*
218 * If tx head write-back is enabled, an extra tbd is allocated
219 * to save the head write-back value
220 */
221 if (igb->tx_head_wb_enable) {
222 size += sizeof (union e1000_adv_tx_desc);
223 }
224
225 /*
226 * Allocate a DMA handle for the transmit descriptor
227 * memory area.
228 */
229 ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
230 DDI_DMA_DONTWAIT, NULL,
231 &tx_ring->tbd_area.dma_handle);
232
233 if (ret != DDI_SUCCESS) {
234 igb_error(igb,
235 "Could not allocate tbd dma handle: %x", ret);
236 tx_ring->tbd_area.dma_handle = NULL;
237
238 return (IGB_FAILURE);
239 }
240
241 /*
242 * Allocate memory to DMA data to and from the transmit
243 * descriptors.
244 */
245 ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle,
246 size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
247 DDI_DMA_DONTWAIT, NULL,
248 (caddr_t *)&tx_ring->tbd_area.address,
249 &len, &tx_ring->tbd_area.acc_handle);
250
251 if (ret != DDI_SUCCESS) {
252 igb_error(igb,
253 "Could not allocate tbd dma memory: %x", ret);
254 tx_ring->tbd_area.acc_handle = NULL;
255 tx_ring->tbd_area.address = NULL;
256 if (tx_ring->tbd_area.dma_handle != NULL) {
257 ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
258 tx_ring->tbd_area.dma_handle = NULL;
259 }
260 return (IGB_FAILURE);
261 }
262
263 /*
264 * Initialize the entire transmit buffer descriptor area to zero
265 */
266 bzero(tx_ring->tbd_area.address, len);
267
268 /*
269 * Allocates DMA resources for the memory that was allocated by
270 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
271 * the memory address
272 */
273 ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle,
274 NULL, (caddr_t)tx_ring->tbd_area.address,
275 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
276 DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
277
278 if (ret != DDI_DMA_MAPPED) {
279 igb_error(igb,
280 "Could not bind tbd dma resource: %x", ret);
281 tx_ring->tbd_area.dma_address = NULL;
282 if (tx_ring->tbd_area.acc_handle != NULL) {
283 ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
284 tx_ring->tbd_area.acc_handle = NULL;
285 tx_ring->tbd_area.address = NULL;
286 }
287 if (tx_ring->tbd_area.dma_handle != NULL) {
288 ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
289 tx_ring->tbd_area.dma_handle = NULL;
290 }
291 return (IGB_FAILURE);
292 }
293
294 ASSERT(cookie_num == 1);
295
296 tx_ring->tbd_area.dma_address = cookie.dmac_laddress;
297 tx_ring->tbd_area.size = len;
298
299 tx_ring->tbd_ring = (union e1000_adv_tx_desc *)(uintptr_t)
300 tx_ring->tbd_area.address;
301
302 return (IGB_SUCCESS);
303 }
304
305 /*
306 * igb_free_tbd_ring - Free the tx descriptors of one ring.
307 */
308 static void
igb_free_tbd_ring(igb_tx_ring_t * tx_ring)309 igb_free_tbd_ring(igb_tx_ring_t *tx_ring)
310 {
311 if (tx_ring->tbd_area.dma_handle != NULL) {
312 (void) ddi_dma_unbind_handle(tx_ring->tbd_area.dma_handle);
313 }
314 if (tx_ring->tbd_area.acc_handle != NULL) {
315 ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
316 tx_ring->tbd_area.acc_handle = NULL;
317 }
318 if (tx_ring->tbd_area.dma_handle != NULL) {
319 ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
320 tx_ring->tbd_area.dma_handle = NULL;
321 }
322 tx_ring->tbd_area.address = NULL;
323 tx_ring->tbd_area.dma_address = NULL;
324 tx_ring->tbd_area.size = 0;
325
326 tx_ring->tbd_ring = NULL;
327 }
328
329 int
igb_alloc_rx_ring_data(igb_rx_ring_t * rx_ring)330 igb_alloc_rx_ring_data(igb_rx_ring_t *rx_ring)
331 {
332 igb_rx_data_t *rx_data;
333 igb_t *igb = rx_ring->igb;
334 uint32_t rcb_count;
335
336 /*
337 * Allocate memory for software receive rings
338 */
339 rx_data = kmem_zalloc(sizeof (igb_rx_data_t), KM_NOSLEEP);
340
341 if (rx_data == NULL) {
342 igb_error(igb, "Allocate software receive rings failed");
343 return (IGB_FAILURE);
344 }
345
346 rx_data->rx_ring = rx_ring;
347 mutex_init(&rx_data->recycle_lock, NULL,
348 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
349
350 rx_data->ring_size = igb->rx_ring_size;
351 rx_data->free_list_size = igb->rx_ring_size;
352
353 rx_data->rcb_head = 0;
354 rx_data->rcb_tail = 0;
355 rx_data->rcb_free = rx_data->free_list_size;
356
357 /*
358 * Allocate memory for the work list.
359 */
360 rx_data->work_list = kmem_zalloc(sizeof (rx_control_block_t *) *
361 rx_data->ring_size, KM_NOSLEEP);
362
363 if (rx_data->work_list == NULL) {
364 igb_error(igb,
365 "Could not allocate memory for rx work list");
366 goto alloc_rx_data_failure;
367 }
368
369 /*
370 * Allocate memory for the free list.
371 */
372 rx_data->free_list = kmem_zalloc(sizeof (rx_control_block_t *) *
373 rx_data->free_list_size, KM_NOSLEEP);
374
375 if (rx_data->free_list == NULL) {
376 igb_error(igb,
377 "Cound not allocate memory for rx free list");
378 goto alloc_rx_data_failure;
379 }
380
381 /*
382 * Allocate memory for the rx control blocks for work list and
383 * free list.
384 */
385 rcb_count = rx_data->ring_size + rx_data->free_list_size;
386 rx_data->rcb_area =
387 kmem_zalloc(sizeof (rx_control_block_t) * rcb_count,
388 KM_NOSLEEP);
389
390 if (rx_data->rcb_area == NULL) {
391 igb_error(igb,
392 "Cound not allocate memory for rx control blocks");
393 goto alloc_rx_data_failure;
394 }
395
396 rx_ring->rx_data = rx_data;
397 return (IGB_SUCCESS);
398
399 alloc_rx_data_failure:
400 igb_free_rx_ring_data(rx_data);
401 return (IGB_FAILURE);
402 }
403
404 void
igb_free_rx_ring_data(igb_rx_data_t * rx_data)405 igb_free_rx_ring_data(igb_rx_data_t *rx_data)
406 {
407 uint32_t rcb_count;
408
409 if (rx_data == NULL)
410 return;
411
412 ASSERT(rx_data->rcb_pending == 0);
413
414 rcb_count = rx_data->ring_size + rx_data->free_list_size;
415 if (rx_data->rcb_area != NULL) {
416 kmem_free(rx_data->rcb_area,
417 sizeof (rx_control_block_t) * rcb_count);
418 rx_data->rcb_area = NULL;
419 }
420
421 if (rx_data->work_list != NULL) {
422 kmem_free(rx_data->work_list,
423 sizeof (rx_control_block_t *) * rx_data->ring_size);
424 rx_data->work_list = NULL;
425 }
426
427 if (rx_data->free_list != NULL) {
428 kmem_free(rx_data->free_list,
429 sizeof (rx_control_block_t *) * rx_data->free_list_size);
430 rx_data->free_list = NULL;
431 }
432
433 mutex_destroy(&rx_data->recycle_lock);
434 kmem_free(rx_data, sizeof (igb_rx_data_t));
435 }
436
437 /*
438 * igb_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring.
439 */
440 static int
igb_alloc_rbd_ring(igb_rx_data_t * rx_data)441 igb_alloc_rbd_ring(igb_rx_data_t *rx_data)
442 {
443 int ret;
444 size_t size;
445 size_t len;
446 uint_t cookie_num;
447 dev_info_t *devinfo;
448 ddi_dma_cookie_t cookie;
449 igb_t *igb = rx_data->rx_ring->igb;
450
451 devinfo = igb->dip;
452 size = sizeof (union e1000_adv_rx_desc) * rx_data->ring_size;
453
454 /*
455 * Allocate a new DMA handle for the receive descriptor
456 * memory area.
457 */
458 ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
459 DDI_DMA_DONTWAIT, NULL,
460 &rx_data->rbd_area.dma_handle);
461
462 if (ret != DDI_SUCCESS) {
463 igb_error(igb,
464 "Could not allocate rbd dma handle: %x", ret);
465 rx_data->rbd_area.dma_handle = NULL;
466 return (IGB_FAILURE);
467 }
468
469 /*
470 * Allocate memory to DMA data to and from the receive
471 * descriptors.
472 */
473 ret = ddi_dma_mem_alloc(rx_data->rbd_area.dma_handle,
474 size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
475 DDI_DMA_DONTWAIT, NULL,
476 (caddr_t *)&rx_data->rbd_area.address,
477 &len, &rx_data->rbd_area.acc_handle);
478
479 if (ret != DDI_SUCCESS) {
480 igb_error(igb,
481 "Could not allocate rbd dma memory: %x", ret);
482 rx_data->rbd_area.acc_handle = NULL;
483 rx_data->rbd_area.address = NULL;
484 if (rx_data->rbd_area.dma_handle != NULL) {
485 ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
486 rx_data->rbd_area.dma_handle = NULL;
487 }
488 return (IGB_FAILURE);
489 }
490
491 /*
492 * Initialize the entire transmit buffer descriptor area to zero
493 */
494 bzero(rx_data->rbd_area.address, len);
495
496 /*
497 * Allocates DMA resources for the memory that was allocated by
498 * the ddi_dma_mem_alloc call.
499 */
500 ret = ddi_dma_addr_bind_handle(rx_data->rbd_area.dma_handle,
501 NULL, (caddr_t)rx_data->rbd_area.address,
502 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
503 DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
504
505 if (ret != DDI_DMA_MAPPED) {
506 igb_error(igb,
507 "Could not bind rbd dma resource: %x", ret);
508 rx_data->rbd_area.dma_address = NULL;
509 if (rx_data->rbd_area.acc_handle != NULL) {
510 ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
511 rx_data->rbd_area.acc_handle = NULL;
512 rx_data->rbd_area.address = NULL;
513 }
514 if (rx_data->rbd_area.dma_handle != NULL) {
515 ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
516 rx_data->rbd_area.dma_handle = NULL;
517 }
518 return (IGB_FAILURE);
519 }
520
521 ASSERT(cookie_num == 1);
522
523 rx_data->rbd_area.dma_address = cookie.dmac_laddress;
524 rx_data->rbd_area.size = len;
525
526 rx_data->rbd_ring = (union e1000_adv_rx_desc *)(uintptr_t)
527 rx_data->rbd_area.address;
528
529 return (IGB_SUCCESS);
530 }
531
532 /*
533 * igb_free_rbd_ring - Free the rx descriptors of one ring.
534 */
535 static void
igb_free_rbd_ring(igb_rx_data_t * rx_data)536 igb_free_rbd_ring(igb_rx_data_t *rx_data)
537 {
538 if (rx_data->rbd_area.dma_handle != NULL) {
539 (void) ddi_dma_unbind_handle(rx_data->rbd_area.dma_handle);
540 }
541 if (rx_data->rbd_area.acc_handle != NULL) {
542 ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
543 rx_data->rbd_area.acc_handle = NULL;
544 }
545 if (rx_data->rbd_area.dma_handle != NULL) {
546 ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
547 rx_data->rbd_area.dma_handle = NULL;
548 }
549 rx_data->rbd_area.address = NULL;
550 rx_data->rbd_area.dma_address = NULL;
551 rx_data->rbd_area.size = 0;
552
553 rx_data->rbd_ring = NULL;
554 }
555
556
557 /*
558 * igb_alloc_dma_buffer - Allocate DMA resources for a DMA buffer
559 */
560 static int
igb_alloc_dma_buffer(igb_t * igb,dma_buffer_t * buf,size_t size)561 igb_alloc_dma_buffer(igb_t *igb,
562 dma_buffer_t *buf, size_t size)
563 {
564 int ret;
565 dev_info_t *devinfo = igb->dip;
566 ddi_dma_cookie_t cookie;
567 size_t len;
568 uint_t cookie_num;
569
570 ret = ddi_dma_alloc_handle(devinfo,
571 &igb_buf_dma_attr, DDI_DMA_DONTWAIT,
572 NULL, &buf->dma_handle);
573
574 if (ret != DDI_SUCCESS) {
575 buf->dma_handle = NULL;
576 igb_error(igb,
577 "Could not allocate dma buffer handle: %x", ret);
578 return (IGB_FAILURE);
579 }
580
581 ret = ddi_dma_mem_alloc(buf->dma_handle,
582 size, &igb_buf_acc_attr, DDI_DMA_STREAMING,
583 DDI_DMA_DONTWAIT, NULL, &buf->address,
584 &len, &buf->acc_handle);
585
586 if (ret != DDI_SUCCESS) {
587 buf->acc_handle = NULL;
588 buf->address = NULL;
589 if (buf->dma_handle != NULL) {
590 ddi_dma_free_handle(&buf->dma_handle);
591 buf->dma_handle = NULL;
592 }
593 igb_error(igb,
594 "Could not allocate dma buffer memory: %x", ret);
595 return (IGB_FAILURE);
596 }
597
598 ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL,
599 buf->address,
600 len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
601 DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
602
603 if (ret != DDI_DMA_MAPPED) {
604 buf->dma_address = NULL;
605 if (buf->acc_handle != NULL) {
606 ddi_dma_mem_free(&buf->acc_handle);
607 buf->acc_handle = NULL;
608 buf->address = NULL;
609 }
610 if (buf->dma_handle != NULL) {
611 ddi_dma_free_handle(&buf->dma_handle);
612 buf->dma_handle = NULL;
613 }
614 igb_error(igb,
615 "Could not bind dma buffer handle: %x", ret);
616 return (IGB_FAILURE);
617 }
618
619 ASSERT(cookie_num == 1);
620
621 buf->dma_address = cookie.dmac_laddress;
622 buf->size = len;
623 buf->len = 0;
624
625 return (IGB_SUCCESS);
626 }
627
628 /*
629 * igb_free_dma_buffer - Free one allocated area of dma memory and handle
630 */
631 void
igb_free_dma_buffer(dma_buffer_t * buf)632 igb_free_dma_buffer(dma_buffer_t *buf)
633 {
634 if (buf->dma_handle != NULL) {
635 (void) ddi_dma_unbind_handle(buf->dma_handle);
636 buf->dma_address = NULL;
637 } else {
638 return;
639 }
640
641 if (buf->acc_handle != NULL) {
642 ddi_dma_mem_free(&buf->acc_handle);
643 buf->acc_handle = NULL;
644 buf->address = NULL;
645 }
646
647 if (buf->dma_handle != NULL) {
648 ddi_dma_free_handle(&buf->dma_handle);
649 buf->dma_handle = NULL;
650 }
651
652 buf->size = 0;
653 buf->len = 0;
654 }
655
656 /*
657 * igb_alloc_tcb_lists - Memory allocation for the transmit control bolcks
658 * of one ring.
659 */
660 static int
igb_alloc_tcb_lists(igb_tx_ring_t * tx_ring)661 igb_alloc_tcb_lists(igb_tx_ring_t *tx_ring)
662 {
663 int i;
664 int ret;
665 tx_control_block_t *tcb;
666 dma_buffer_t *tx_buf;
667 igb_t *igb = tx_ring->igb;
668 dev_info_t *devinfo = igb->dip;
669
670 /*
671 * Allocate memory for the work list.
672 */
673 tx_ring->work_list = kmem_zalloc(sizeof (tx_control_block_t *) *
674 tx_ring->ring_size, KM_NOSLEEP);
675
676 if (tx_ring->work_list == NULL) {
677 igb_error(igb,
678 "Cound not allocate memory for tx work list");
679 return (IGB_FAILURE);
680 }
681
682 /*
683 * Allocate memory for the free list.
684 */
685 tx_ring->free_list = kmem_zalloc(sizeof (tx_control_block_t *) *
686 tx_ring->free_list_size, KM_NOSLEEP);
687
688 if (tx_ring->free_list == NULL) {
689 kmem_free(tx_ring->work_list,
690 sizeof (tx_control_block_t *) * tx_ring->ring_size);
691 tx_ring->work_list = NULL;
692
693 igb_error(igb,
694 "Cound not allocate memory for tx free list");
695 return (IGB_FAILURE);
696 }
697
698 /*
699 * Allocate memory for the tx control blocks of free list.
700 */
701 tx_ring->tcb_area =
702 kmem_zalloc(sizeof (tx_control_block_t) *
703 tx_ring->free_list_size, KM_NOSLEEP);
704
705 if (tx_ring->tcb_area == NULL) {
706 kmem_free(tx_ring->work_list,
707 sizeof (tx_control_block_t *) * tx_ring->ring_size);
708 tx_ring->work_list = NULL;
709
710 kmem_free(tx_ring->free_list,
711 sizeof (tx_control_block_t *) * tx_ring->free_list_size);
712 tx_ring->free_list = NULL;
713
714 igb_error(igb,
715 "Cound not allocate memory for tx control blocks");
716 return (IGB_FAILURE);
717 }
718
719 /*
720 * Allocate dma memory for the tx control block of free list.
721 */
722 tcb = tx_ring->tcb_area;
723 for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
724 ASSERT(tcb != NULL);
725
726 tx_ring->free_list[i] = tcb;
727
728 /*
729 * Pre-allocate dma handles for transmit. These dma handles
730 * will be dynamically bound to the data buffers passed down
731 * from the upper layers at the time of transmitting.
732 */
733 ret = ddi_dma_alloc_handle(devinfo,
734 &igb_tx_dma_attr,
735 DDI_DMA_DONTWAIT, NULL,
736 &tcb->tx_dma_handle);
737 if (ret != DDI_SUCCESS) {
738 tcb->tx_dma_handle = NULL;
739 igb_error(igb,
740 "Could not allocate tx dma handle: %x", ret);
741 goto alloc_tcb_lists_fail;
742 }
743
744 /*
745 * Pre-allocate transmit buffers for packets that the
746 * size is less than bcopy_thresh.
747 */
748 tx_buf = &tcb->tx_buf;
749
750 ret = igb_alloc_dma_buffer(igb,
751 tx_buf, igb->tx_buf_size);
752
753 if (ret != IGB_SUCCESS) {
754 ASSERT(tcb->tx_dma_handle != NULL);
755 ddi_dma_free_handle(&tcb->tx_dma_handle);
756 tcb->tx_dma_handle = NULL;
757 igb_error(igb, "Allocate tx dma buffer failed");
758 goto alloc_tcb_lists_fail;
759 }
760 tcb->last_index = MAX_TX_RING_SIZE;
761 }
762
763 return (IGB_SUCCESS);
764
765 alloc_tcb_lists_fail:
766 igb_free_tcb_lists(tx_ring);
767
768 return (IGB_FAILURE);
769 }
770
771 /*
772 * igb_free_tcb_lists - Release the memory allocated for
773 * the transmit control bolcks of one ring.
774 */
775 static void
igb_free_tcb_lists(igb_tx_ring_t * tx_ring)776 igb_free_tcb_lists(igb_tx_ring_t *tx_ring)
777 {
778 int i;
779 tx_control_block_t *tcb;
780
781 tcb = tx_ring->tcb_area;
782 if (tcb == NULL)
783 return;
784
785 for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
786 ASSERT(tcb != NULL);
787
788 /* Free the tx dma handle for dynamical binding */
789 if (tcb->tx_dma_handle != NULL) {
790 ddi_dma_free_handle(&tcb->tx_dma_handle);
791 tcb->tx_dma_handle = NULL;
792 } else {
793 /*
794 * If the dma handle is NULL, then we don't
795 * have to check the remaining.
796 */
797 break;
798 }
799
800 igb_free_dma_buffer(&tcb->tx_buf);
801 }
802
803 if (tx_ring->tcb_area != NULL) {
804 kmem_free(tx_ring->tcb_area,
805 sizeof (tx_control_block_t) * tx_ring->free_list_size);
806 tx_ring->tcb_area = NULL;
807 }
808
809 if (tx_ring->work_list != NULL) {
810 kmem_free(tx_ring->work_list,
811 sizeof (tx_control_block_t *) * tx_ring->ring_size);
812 tx_ring->work_list = NULL;
813 }
814
815 if (tx_ring->free_list != NULL) {
816 kmem_free(tx_ring->free_list,
817 sizeof (tx_control_block_t *) * tx_ring->free_list_size);
818 tx_ring->free_list = NULL;
819 }
820 }
821
822 /*
823 * igb_alloc_rcb_lists - Memory allocation for the receive control blocks
824 * of one ring.
825 */
826 static int
igb_alloc_rcb_lists(igb_rx_data_t * rx_data)827 igb_alloc_rcb_lists(igb_rx_data_t *rx_data)
828 {
829 int i;
830 int ret;
831 rx_control_block_t *rcb;
832 igb_t *igb = rx_data->rx_ring->igb;
833 dma_buffer_t *rx_buf;
834 uint32_t rcb_count;
835
836 /*
837 * Allocate memory for the rx control blocks for work list and
838 * free list.
839 */
840 rcb_count = rx_data->ring_size + rx_data->free_list_size;
841 rcb = rx_data->rcb_area;
842
843 for (i = 0; i < rcb_count; i++, rcb++) {
844 ASSERT(rcb != NULL);
845
846 if (i < rx_data->ring_size) {
847 /* Attach the rx control block to the work list */
848 rx_data->work_list[i] = rcb;
849 } else {
850 /* Attach the rx control block to the free list */
851 rx_data->free_list[i - rx_data->ring_size] = rcb;
852 }
853
854 rx_buf = &rcb->rx_buf;
855 ret = igb_alloc_dma_buffer(igb,
856 rx_buf, igb->rx_buf_size);
857
858 if (ret != IGB_SUCCESS) {
859 igb_error(igb, "Allocate rx dma buffer failed");
860 goto alloc_rcb_lists_fail;
861 }
862
863 rx_buf->size -= IPHDR_ALIGN_ROOM;
864 rx_buf->address += IPHDR_ALIGN_ROOM;
865 rx_buf->dma_address += IPHDR_ALIGN_ROOM;
866
867 rcb->ref_cnt = 1;
868 rcb->rx_data = (igb_rx_data_t *)rx_data;
869 rcb->free_rtn.free_func = igb_rx_recycle;
870 rcb->free_rtn.free_arg = (char *)rcb;
871
872 rcb->mp = desballoc((unsigned char *)
873 rx_buf->address,
874 rx_buf->size,
875 0, &rcb->free_rtn);
876 }
877
878 return (IGB_SUCCESS);
879
880 alloc_rcb_lists_fail:
881 igb_free_rcb_lists(rx_data);
882
883 return (IGB_FAILURE);
884 }
885
886 /*
887 * igb_free_rcb_lists - Free the receive control blocks of one ring.
888 */
889 static void
igb_free_rcb_lists(igb_rx_data_t * rx_data)890 igb_free_rcb_lists(igb_rx_data_t *rx_data)
891 {
892 igb_t *igb;
893 rx_control_block_t *rcb;
894 uint32_t rcb_count;
895 uint32_t ref_cnt;
896 int i;
897
898 igb = rx_data->rx_ring->igb;
899
900 mutex_enter(&igb->rx_pending_lock);
901
902 rcb = rx_data->rcb_area;
903 rcb_count = rx_data->ring_size + rx_data->free_list_size;
904
905 for (i = 0; i < rcb_count; i++, rcb++) {
906 ASSERT(rcb != NULL);
907
908 ref_cnt = atomic_dec_32_nv(&rcb->ref_cnt);
909 if (ref_cnt == 0) {
910 if (rcb->mp != NULL) {
911 freemsg(rcb->mp);
912 rcb->mp = NULL;
913 }
914 igb_free_dma_buffer(&rcb->rx_buf);
915 } else {
916 atomic_inc_32(&rx_data->rcb_pending);
917 atomic_inc_32(&igb->rcb_pending);
918 }
919 }
920
921 mutex_exit(&igb->rx_pending_lock);
922 }
923
924 void
igb_set_fma_flags(int dma_flag)925 igb_set_fma_flags(int dma_flag)
926 {
927 if (dma_flag) {
928 igb_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
929 igb_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
930 igb_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
931 } else {
932 igb_tx_dma_attr.dma_attr_flags = 0;
933 igb_buf_dma_attr.dma_attr_flags = 0;
934 igb_desc_dma_attr.dma_attr_flags = 0;
935 }
936 }
937