1 /*
2 * This file is provided under a CDDLv1 license. When using or
3 * redistributing this file, you may do so under this license.
4 * In redistributing this file this license must be included
5 * and no other modification of this header file is permitted.
6 *
7 * CDDL LICENSE SUMMARY
8 *
9 * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved.
10 *
11 * The contents of this file are subject to the terms of Version
12 * 1.0 of the Common Development and Distribution License (the "License").
13 *
14 * You should have received a copy of the License with this software.
15 * You can obtain a copy of the License at
16 * http://www.opensolaris.org/os/licensing.
17 * See the License for the specific language governing permissions
18 * and limitations under the License.
19 */
20
21 /*
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * **********************************************************************
27 * Module Name: *
28 * e1000g_alloc.c *
29 * *
30 * Abstract: *
31 * This file contains some routines that take care of *
32 * memory allocation for descriptors and buffers. *
33 * *
34 * **********************************************************************
35 */
36
37 #include "e1000g_sw.h"
38 #include "e1000g_debug.h"
39
40 #define TX_SW_PKT_AREA_SZ \
41 (sizeof (tx_sw_packet_t) * Adapter->tx_freelist_num)
42
43 static int e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *);
44 static int e1000g_alloc_rx_descriptors(e1000g_rx_data_t *);
45 static void e1000g_free_tx_descriptors(e1000g_tx_ring_t *);
46 static void e1000g_free_rx_descriptors(e1000g_rx_data_t *);
47 static int e1000g_alloc_tx_packets(e1000g_tx_ring_t *);
48 static int e1000g_alloc_rx_packets(e1000g_rx_data_t *);
49 static void e1000g_free_tx_packets(e1000g_tx_ring_t *);
50 static void e1000g_free_rx_packets(e1000g_rx_data_t *, boolean_t);
51 static int e1000g_alloc_dma_buffer(struct e1000g *,
52 dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
53
54 /*
55 * In order to avoid address error crossing 64KB boundary
56 * during PCI-X packets receving, e1000g_alloc_dma_buffer_82546
57 * is used by some necessary adapter types.
58 */
59 static int e1000g_alloc_dma_buffer_82546(struct e1000g *,
60 dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
61 static int e1000g_dma_mem_alloc_82546(dma_buffer_t *buf,
62 size_t size, size_t *len);
63 static boolean_t e1000g_cross_64k_bound(void *, uintptr_t);
64
65 static void e1000g_free_dma_buffer(dma_buffer_t *);
66 #ifdef __sparc
67 static int e1000g_alloc_dvma_buffer(struct e1000g *, dma_buffer_t *, size_t);
68 static void e1000g_free_dvma_buffer(dma_buffer_t *);
69 #endif
70 static int e1000g_alloc_descriptors(struct e1000g *Adapter);
71 static void e1000g_free_descriptors(struct e1000g *Adapter);
72 static int e1000g_alloc_packets(struct e1000g *Adapter);
73 static void e1000g_free_packets(struct e1000g *Adapter);
74 static p_rx_sw_packet_t e1000g_alloc_rx_sw_packet(e1000g_rx_data_t *,
75 ddi_dma_attr_t *p_dma_attr);
76
77 /* DMA access attributes for descriptors <Little Endian> */
78 static ddi_device_acc_attr_t e1000g_desc_acc_attr = {
79 DDI_DEVICE_ATTR_V0,
80 DDI_STRUCTURE_LE_ACC,
81 DDI_STRICTORDER_ACC
82 };
83
84 /* DMA access attributes for DMA buffers */
85 #ifdef __sparc
86 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
87 DDI_DEVICE_ATTR_V0,
88 DDI_STRUCTURE_BE_ACC,
89 DDI_STRICTORDER_ACC,
90 };
91 #else
92 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
93 DDI_DEVICE_ATTR_V0,
94 DDI_STRUCTURE_LE_ACC,
95 DDI_STRICTORDER_ACC,
96 };
97 #endif
98
99 /* DMA attributes for tx mblk buffers */
100 static ddi_dma_attr_t e1000g_tx_dma_attr = {
101 DMA_ATTR_V0, /* version of this structure */
102 0, /* lowest usable address */
103 0xffffffffffffffffULL, /* highest usable address */
104 0x7fffffff, /* maximum DMAable byte count */
105 1, /* alignment in bytes */
106 0x7ff, /* burst sizes (any?) */
107 1, /* minimum transfer */
108 0xffffffffU, /* maximum transfer */
109 0xffffffffffffffffULL, /* maximum segment length */
110 MAX_COOKIES, /* maximum number of segments */
111 1, /* granularity */
112 DDI_DMA_FLAGERR, /* dma_attr_flags */
113 };
114
115 /* DMA attributes for pre-allocated rx/tx buffers */
116 static ddi_dma_attr_t e1000g_buf_dma_attr = {
117 DMA_ATTR_V0, /* version of this structure */
118 0, /* lowest usable address */
119 0xffffffffffffffffULL, /* highest usable address */
120 0x7fffffff, /* maximum DMAable byte count */
121 1, /* alignment in bytes */
122 0x7ff, /* burst sizes (any?) */
123 1, /* minimum transfer */
124 0xffffffffU, /* maximum transfer */
125 0xffffffffffffffffULL, /* maximum segment length */
126 1, /* maximum number of segments */
127 1, /* granularity */
128 DDI_DMA_FLAGERR, /* dma_attr_flags */
129 };
130
131 /* DMA attributes for rx/tx descriptors */
132 static ddi_dma_attr_t e1000g_desc_dma_attr = {
133 DMA_ATTR_V0, /* version of this structure */
134 0, /* lowest usable address */
135 0xffffffffffffffffULL, /* highest usable address */
136 0x7fffffff, /* maximum DMAable byte count */
137 E1000_MDALIGN, /* default alignment is 4k but can be changed */
138 0x7ff, /* burst sizes (any?) */
139 1, /* minimum transfer */
140 0xffffffffU, /* maximum transfer */
141 0xffffffffffffffffULL, /* maximum segment length */
142 1, /* maximum number of segments */
143 1, /* granularity */
144 DDI_DMA_FLAGERR, /* dma_attr_flags */
145 };
146
147 #ifdef __sparc
148 static ddi_dma_lim_t e1000g_dma_limits = {
149 (uint_t)0, /* dlim_addr_lo */
150 (uint_t)0xffffffff, /* dlim_addr_hi */
151 (uint_t)0xffffffff, /* dlim_cntr_max */
152 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */
153 0x1, /* dlim_minxfer */
154 1024 /* dlim_speed */
155 };
156 #endif
157
158 #ifdef __sparc
159 static dma_type_t e1000g_dma_type = USE_DVMA;
160 #else
161 static dma_type_t e1000g_dma_type = USE_DMA;
162 #endif
163
164 extern krwlock_t e1000g_dma_type_lock;
165
166
167 int
e1000g_alloc_dma_resources(struct e1000g * Adapter)168 e1000g_alloc_dma_resources(struct e1000g *Adapter)
169 {
170 int result;
171
172 result = DDI_FAILURE;
173
174 while ((result != DDI_SUCCESS) &&
175 (Adapter->tx_desc_num >= MIN_NUM_TX_DESCRIPTOR) &&
176 (Adapter->rx_desc_num >= MIN_NUM_RX_DESCRIPTOR) &&
177 (Adapter->tx_freelist_num >= MIN_NUM_TX_FREELIST)) {
178
179 result = e1000g_alloc_descriptors(Adapter);
180
181 if (result == DDI_SUCCESS) {
182 result = e1000g_alloc_packets(Adapter);
183
184 if (result != DDI_SUCCESS)
185 e1000g_free_descriptors(Adapter);
186 }
187
188 /*
189 * If the allocation fails due to resource shortage,
190 * we'll reduce the numbers of descriptors/buffers by
191 * half, and try the allocation again.
192 */
193 if (result != DDI_SUCCESS) {
194 /*
195 * We must ensure the number of descriptors
196 * is always a multiple of 8.
197 */
198 Adapter->tx_desc_num =
199 (Adapter->tx_desc_num >> 4) << 3;
200 Adapter->rx_desc_num =
201 (Adapter->rx_desc_num >> 4) << 3;
202
203 Adapter->tx_freelist_num >>= 1;
204 }
205 }
206
207 return (result);
208 }
209
210 /*
211 * e1000g_alloc_descriptors - allocate DMA buffers for descriptors
212 *
213 * This routine allocates neccesary DMA buffers for
214 * Transmit Descriptor Area
215 * Receive Descrpitor Area
216 */
217 static int
e1000g_alloc_descriptors(struct e1000g * Adapter)218 e1000g_alloc_descriptors(struct e1000g *Adapter)
219 {
220 int result;
221 e1000g_tx_ring_t *tx_ring;
222 e1000g_rx_data_t *rx_data;
223
224 if (Adapter->mem_workaround_82546 &&
225 ((Adapter->shared.mac.type == e1000_82545) ||
226 (Adapter->shared.mac.type == e1000_82546) ||
227 (Adapter->shared.mac.type == e1000_82546_rev_3))) {
228 /* Align on a 64k boundary for these adapter types */
229 Adapter->desc_align = E1000_MDALIGN_82546;
230 } else {
231 /* Align on a 4k boundary for all other adapter types */
232 Adapter->desc_align = E1000_MDALIGN;
233 }
234
235 tx_ring = Adapter->tx_ring;
236
237 result = e1000g_alloc_tx_descriptors(tx_ring);
238 if (result != DDI_SUCCESS)
239 return (DDI_FAILURE);
240
241 rx_data = Adapter->rx_ring->rx_data;
242
243 result = e1000g_alloc_rx_descriptors(rx_data);
244 if (result != DDI_SUCCESS) {
245 e1000g_free_tx_descriptors(tx_ring);
246 return (DDI_FAILURE);
247 }
248
249 return (DDI_SUCCESS);
250 }
251
252 static void
e1000g_free_descriptors(struct e1000g * Adapter)253 e1000g_free_descriptors(struct e1000g *Adapter)
254 {
255 e1000g_tx_ring_t *tx_ring;
256 e1000g_rx_data_t *rx_data;
257
258 tx_ring = Adapter->tx_ring;
259 rx_data = Adapter->rx_ring->rx_data;
260
261 e1000g_free_tx_descriptors(tx_ring);
262 e1000g_free_rx_descriptors(rx_data);
263 }
264
265 static int
e1000g_alloc_tx_descriptors(e1000g_tx_ring_t * tx_ring)266 e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *tx_ring)
267 {
268 int mystat;
269 boolean_t alloc_flag;
270 size_t size;
271 size_t len;
272 uintptr_t templong;
273 uint_t cookie_count;
274 dev_info_t *devinfo;
275 ddi_dma_cookie_t cookie;
276 struct e1000g *Adapter;
277 ddi_dma_attr_t dma_attr;
278
279 Adapter = tx_ring->adapter;
280 devinfo = Adapter->dip;
281
282 alloc_flag = B_FALSE;
283 dma_attr = e1000g_desc_dma_attr;
284
285 /*
286 * Solaris 7 has a problem with allocating physically contiguous memory
287 * that is aligned on a 4K boundary. The transmit and rx descriptors
288 * need to aligned on a 4kbyte boundary. We first try to allocate the
289 * memory with DMA attributes set to 4K alignment and also no scatter/
290 * gather mechanism specified. In most cases, this does not allocate
291 * memory aligned at a 4Kbyte boundary. We then try asking for memory
292 * aligned on 4K boundary with scatter/gather set to 2. This works when
293 * the amount of memory is less than 4k i.e a page size. If neither of
294 * these options work or if the number of descriptors is greater than
295 * 4K, ie more than 256 descriptors, we allocate 4k extra memory and
296 * and then align the memory at a 4k boundary.
297 */
298 size = sizeof (struct e1000_tx_desc) * Adapter->tx_desc_num;
299
300 /*
301 * Memory allocation for the transmit buffer descriptors.
302 */
303 dma_attr.dma_attr_sgllen = 1;
304 dma_attr.dma_attr_align = Adapter->desc_align;
305
306 /*
307 * Allocate a new DMA handle for the transmit descriptor
308 * memory area.
309 */
310 mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
311 DDI_DMA_DONTWAIT, 0,
312 &tx_ring->tbd_dma_handle);
313
314 if (mystat != DDI_SUCCESS) {
315 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
316 "Could not allocate tbd dma handle: %d", mystat);
317 tx_ring->tbd_dma_handle = NULL;
318 return (DDI_FAILURE);
319 }
320
321 /*
322 * Allocate memory to DMA data to and from the transmit
323 * descriptors.
324 */
325 mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
326 size,
327 &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
328 DDI_DMA_DONTWAIT, 0,
329 (caddr_t *)&tx_ring->tbd_area,
330 &len, &tx_ring->tbd_acc_handle);
331
332 if ((mystat != DDI_SUCCESS) ||
333 ((uintptr_t)tx_ring->tbd_area & (Adapter->desc_align - 1))) {
334 if (mystat == DDI_SUCCESS) {
335 ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
336 tx_ring->tbd_acc_handle = NULL;
337 tx_ring->tbd_area = NULL;
338 }
339 if (tx_ring->tbd_dma_handle != NULL) {
340 ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
341 tx_ring->tbd_dma_handle = NULL;
342 }
343 alloc_flag = B_FALSE;
344 } else
345 alloc_flag = B_TRUE;
346
347 /*
348 * Initialize the entire transmit buffer descriptor area to zero
349 */
350 if (alloc_flag)
351 bzero(tx_ring->tbd_area, len);
352
353 /*
354 * If the previous DMA attributes setting could not give us contiguous
355 * memory or the number of descriptors is greater than the page size,
356 * we allocate extra memory and then align it at appropriate boundary.
357 */
358 if (!alloc_flag) {
359 size = size + Adapter->desc_align;
360
361 /*
362 * DMA attributes set to no scatter/gather and 16 bit alignment
363 */
364 dma_attr.dma_attr_align = 1;
365 dma_attr.dma_attr_sgllen = 1;
366
367 /*
368 * Allocate a new DMA handle for the transmit descriptor memory
369 * area.
370 */
371 mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
372 DDI_DMA_DONTWAIT, 0,
373 &tx_ring->tbd_dma_handle);
374
375 if (mystat != DDI_SUCCESS) {
376 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
377 "Could not re-allocate tbd dma handle: %d", mystat);
378 tx_ring->tbd_dma_handle = NULL;
379 return (DDI_FAILURE);
380 }
381
382 /*
383 * Allocate memory to DMA data to and from the transmit
384 * descriptors.
385 */
386 mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
387 size,
388 &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
389 DDI_DMA_DONTWAIT, 0,
390 (caddr_t *)&tx_ring->tbd_area,
391 &len, &tx_ring->tbd_acc_handle);
392
393 if (mystat != DDI_SUCCESS) {
394 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
395 "Could not allocate tbd dma memory: %d", mystat);
396 tx_ring->tbd_acc_handle = NULL;
397 tx_ring->tbd_area = NULL;
398 if (tx_ring->tbd_dma_handle != NULL) {
399 ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
400 tx_ring->tbd_dma_handle = NULL;
401 }
402 return (DDI_FAILURE);
403 } else
404 alloc_flag = B_TRUE;
405
406 /*
407 * Initialize the entire transmit buffer descriptor area to zero
408 */
409 bzero(tx_ring->tbd_area, len);
410 /*
411 * Memory has been allocated with the ddi_dma_mem_alloc call,
412 * but has not been aligned.
413 * We now align it on the appropriate boundary.
414 */
415 templong = P2NPHASE((uintptr_t)tx_ring->tbd_area,
416 Adapter->desc_align);
417 len = size - templong;
418 templong += (uintptr_t)tx_ring->tbd_area;
419 tx_ring->tbd_area = (struct e1000_tx_desc *)templong;
420 } /* alignment workaround */
421
422 /*
423 * Transmit buffer descriptor memory allocation succeeded
424 */
425 ASSERT(alloc_flag);
426
427 /*
428 * Allocates DMA resources for the memory that was allocated by
429 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
430 * the memory address
431 */
432 mystat = ddi_dma_addr_bind_handle(tx_ring->tbd_dma_handle,
433 (struct as *)NULL, (caddr_t)tx_ring->tbd_area,
434 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
435 DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
436
437 if (mystat != DDI_SUCCESS) {
438 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
439 "Could not bind tbd dma resource: %d", mystat);
440 if (tx_ring->tbd_acc_handle != NULL) {
441 ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
442 tx_ring->tbd_acc_handle = NULL;
443 tx_ring->tbd_area = NULL;
444 }
445 if (tx_ring->tbd_dma_handle != NULL) {
446 ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
447 tx_ring->tbd_dma_handle = NULL;
448 }
449 return (DDI_FAILURE);
450 }
451
452 ASSERT(cookie_count == 1); /* 1 cookie */
453
454 if (cookie_count != 1) {
455 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
456 "Could not bind tbd dma resource in a single frag. "
457 "Count - %d Len - %d", cookie_count, len);
458 e1000g_free_tx_descriptors(tx_ring);
459 return (DDI_FAILURE);
460 }
461
462 tx_ring->tbd_dma_addr = cookie.dmac_laddress;
463 tx_ring->tbd_first = tx_ring->tbd_area;
464 tx_ring->tbd_last = tx_ring->tbd_first +
465 (Adapter->tx_desc_num - 1);
466
467 return (DDI_SUCCESS);
468 }
469
470 static int
e1000g_alloc_rx_descriptors(e1000g_rx_data_t * rx_data)471 e1000g_alloc_rx_descriptors(e1000g_rx_data_t *rx_data)
472 {
473 int mystat;
474 boolean_t alloc_flag;
475 size_t size;
476 size_t len;
477 uintptr_t templong;
478 uint_t cookie_count;
479 dev_info_t *devinfo;
480 ddi_dma_cookie_t cookie;
481 struct e1000g *Adapter;
482 ddi_dma_attr_t dma_attr;
483
484 Adapter = rx_data->rx_ring->adapter;
485 devinfo = Adapter->dip;
486
487 alloc_flag = B_FALSE;
488 dma_attr = e1000g_desc_dma_attr;
489
490 /*
491 * Memory allocation for the receive buffer descriptors.
492 */
493 size = (sizeof (struct e1000_rx_desc)) * Adapter->rx_desc_num;
494
495 /*
496 * Asking for aligned memory with DMA attributes set for suitable value
497 */
498 dma_attr.dma_attr_sgllen = 1;
499 dma_attr.dma_attr_align = Adapter->desc_align;
500
501 /*
502 * Allocate a new DMA handle for the receive descriptors
503 */
504 mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
505 DDI_DMA_DONTWAIT, 0,
506 &rx_data->rbd_dma_handle);
507
508 if (mystat != DDI_SUCCESS) {
509 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
510 "Could not allocate rbd dma handle: %d", mystat);
511 rx_data->rbd_dma_handle = NULL;
512 return (DDI_FAILURE);
513 }
514 /*
515 * Allocate memory to DMA data to and from the receive
516 * descriptors.
517 */
518 mystat = ddi_dma_mem_alloc(rx_data->rbd_dma_handle,
519 size,
520 &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
521 DDI_DMA_DONTWAIT, 0,
522 (caddr_t *)&rx_data->rbd_area,
523 &len, &rx_data->rbd_acc_handle);
524
525 /*
526 * Check if memory allocation succeeded and also if the
527 * allocated memory is aligned correctly.
528 */
529 if ((mystat != DDI_SUCCESS) ||
530 ((uintptr_t)rx_data->rbd_area & (Adapter->desc_align - 1))) {
531 if (mystat == DDI_SUCCESS) {
532 ddi_dma_mem_free(&rx_data->rbd_acc_handle);
533 rx_data->rbd_acc_handle = NULL;
534 rx_data->rbd_area = NULL;
535 }
536 if (rx_data->rbd_dma_handle != NULL) {
537 ddi_dma_free_handle(&rx_data->rbd_dma_handle);
538 rx_data->rbd_dma_handle = NULL;
539 }
540 alloc_flag = B_FALSE;
541 } else
542 alloc_flag = B_TRUE;
543
544 /*
545 * Initialize the allocated receive descriptor memory to zero.
546 */
547 if (alloc_flag)
548 bzero((caddr_t)rx_data->rbd_area, len);
549
550 /*
551 * If memory allocation did not succeed, do the alignment ourselves
552 */
553 if (!alloc_flag) {
554 dma_attr.dma_attr_align = 1;
555 dma_attr.dma_attr_sgllen = 1;
556 size = size + Adapter->desc_align;
557 /*
558 * Allocate a new DMA handle for the receive descriptor.
559 */
560 mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
561 DDI_DMA_DONTWAIT, 0,
562 &rx_data->rbd_dma_handle);
563
564 if (mystat != DDI_SUCCESS) {
565 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
566 "Could not re-allocate rbd dma handle: %d", mystat);
567 rx_data->rbd_dma_handle = NULL;
568 return (DDI_FAILURE);
569 }
570 /*
571 * Allocate memory to DMA data to and from the receive
572 * descriptors.
573 */
574 mystat = ddi_dma_mem_alloc(rx_data->rbd_dma_handle,
575 size,
576 &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
577 DDI_DMA_DONTWAIT, 0,
578 (caddr_t *)&rx_data->rbd_area,
579 &len, &rx_data->rbd_acc_handle);
580
581 if (mystat != DDI_SUCCESS) {
582 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
583 "Could not allocate rbd dma memory: %d", mystat);
584 rx_data->rbd_acc_handle = NULL;
585 rx_data->rbd_area = NULL;
586 if (rx_data->rbd_dma_handle != NULL) {
587 ddi_dma_free_handle(&rx_data->rbd_dma_handle);
588 rx_data->rbd_dma_handle = NULL;
589 }
590 return (DDI_FAILURE);
591 } else
592 alloc_flag = B_TRUE;
593
594 /*
595 * Initialize the allocated receive descriptor memory to zero.
596 */
597 bzero((caddr_t)rx_data->rbd_area, len);
598 templong = P2NPHASE((uintptr_t)rx_data->rbd_area,
599 Adapter->desc_align);
600 len = size - templong;
601 templong += (uintptr_t)rx_data->rbd_area;
602 rx_data->rbd_area = (struct e1000_rx_desc *)templong;
603 } /* alignment workaround */
604
605 /*
606 * The memory allocation of the receive descriptors succeeded
607 */
608 ASSERT(alloc_flag);
609
610 /*
611 * Allocates DMA resources for the memory that was allocated by
612 * the ddi_dma_mem_alloc call.
613 */
614 mystat = ddi_dma_addr_bind_handle(rx_data->rbd_dma_handle,
615 (struct as *)NULL, (caddr_t)rx_data->rbd_area,
616 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
617 DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
618
619 if (mystat != DDI_SUCCESS) {
620 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
621 "Could not bind rbd dma resource: %d", mystat);
622 if (rx_data->rbd_acc_handle != NULL) {
623 ddi_dma_mem_free(&rx_data->rbd_acc_handle);
624 rx_data->rbd_acc_handle = NULL;
625 rx_data->rbd_area = NULL;
626 }
627 if (rx_data->rbd_dma_handle != NULL) {
628 ddi_dma_free_handle(&rx_data->rbd_dma_handle);
629 rx_data->rbd_dma_handle = NULL;
630 }
631 return (DDI_FAILURE);
632 }
633
634 ASSERT(cookie_count == 1);
635 if (cookie_count != 1) {
636 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
637 "Could not bind rbd dma resource in a single frag. "
638 "Count - %d Len - %d", cookie_count, len);
639 e1000g_free_rx_descriptors(rx_data);
640 return (DDI_FAILURE);
641 }
642
643 rx_data->rbd_dma_addr = cookie.dmac_laddress;
644 rx_data->rbd_first = rx_data->rbd_area;
645 rx_data->rbd_last = rx_data->rbd_first +
646 (Adapter->rx_desc_num - 1);
647
648 return (DDI_SUCCESS);
649 }
650
651 static void
e1000g_free_rx_descriptors(e1000g_rx_data_t * rx_data)652 e1000g_free_rx_descriptors(e1000g_rx_data_t *rx_data)
653 {
654 if (rx_data->rbd_dma_handle != NULL) {
655 (void) ddi_dma_unbind_handle(rx_data->rbd_dma_handle);
656 }
657 if (rx_data->rbd_acc_handle != NULL) {
658 ddi_dma_mem_free(&rx_data->rbd_acc_handle);
659 rx_data->rbd_acc_handle = NULL;
660 rx_data->rbd_area = NULL;
661 }
662 if (rx_data->rbd_dma_handle != NULL) {
663 ddi_dma_free_handle(&rx_data->rbd_dma_handle);
664 rx_data->rbd_dma_handle = NULL;
665 }
666 rx_data->rbd_dma_addr = NULL;
667 rx_data->rbd_first = NULL;
668 rx_data->rbd_last = NULL;
669 }
670
671 static void
e1000g_free_tx_descriptors(e1000g_tx_ring_t * tx_ring)672 e1000g_free_tx_descriptors(e1000g_tx_ring_t *tx_ring)
673 {
674 if (tx_ring->tbd_dma_handle != NULL) {
675 (void) ddi_dma_unbind_handle(tx_ring->tbd_dma_handle);
676 }
677 if (tx_ring->tbd_acc_handle != NULL) {
678 ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
679 tx_ring->tbd_acc_handle = NULL;
680 tx_ring->tbd_area = NULL;
681 }
682 if (tx_ring->tbd_dma_handle != NULL) {
683 ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
684 tx_ring->tbd_dma_handle = NULL;
685 }
686 tx_ring->tbd_dma_addr = NULL;
687 tx_ring->tbd_first = NULL;
688 tx_ring->tbd_last = NULL;
689 }
690
691
692 /*
693 * e1000g_alloc_packets - allocate DMA buffers for rx/tx
694 *
695 * This routine allocates neccesary buffers for
696 * Transmit sw packet structure
697 * DMA handle for Transmit
698 * DMA buffer for Transmit
699 * Receive sw packet structure
700 * DMA buffer for Receive
701 */
702 static int
e1000g_alloc_packets(struct e1000g * Adapter)703 e1000g_alloc_packets(struct e1000g *Adapter)
704 {
705 int result;
706 e1000g_tx_ring_t *tx_ring;
707 e1000g_rx_data_t *rx_data;
708
709 tx_ring = Adapter->tx_ring;
710 rx_data = Adapter->rx_ring->rx_data;
711
712 again:
713 rw_enter(&e1000g_dma_type_lock, RW_READER);
714
715 result = e1000g_alloc_tx_packets(tx_ring);
716 if (result != DDI_SUCCESS) {
717 if (e1000g_dma_type == USE_DVMA) {
718 rw_exit(&e1000g_dma_type_lock);
719
720 rw_enter(&e1000g_dma_type_lock, RW_WRITER);
721 e1000g_dma_type = USE_DMA;
722 rw_exit(&e1000g_dma_type_lock);
723
724 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
725 "No enough dvma resource for Tx packets, "
726 "trying to allocate dma buffers...\n");
727 goto again;
728 }
729 rw_exit(&e1000g_dma_type_lock);
730
731 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
732 "Failed to allocate dma buffers for Tx packets\n");
733 return (DDI_FAILURE);
734 }
735
736 result = e1000g_alloc_rx_packets(rx_data);
737 if (result != DDI_SUCCESS) {
738 e1000g_free_tx_packets(tx_ring);
739 if (e1000g_dma_type == USE_DVMA) {
740 rw_exit(&e1000g_dma_type_lock);
741
742 rw_enter(&e1000g_dma_type_lock, RW_WRITER);
743 e1000g_dma_type = USE_DMA;
744 rw_exit(&e1000g_dma_type_lock);
745
746 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
747 "No enough dvma resource for Rx packets, "
748 "trying to allocate dma buffers...\n");
749 goto again;
750 }
751 rw_exit(&e1000g_dma_type_lock);
752
753 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
754 "Failed to allocate dma buffers for Rx packets\n");
755 return (DDI_FAILURE);
756 }
757
758 rw_exit(&e1000g_dma_type_lock);
759
760 return (DDI_SUCCESS);
761 }
762
763 static void
e1000g_free_packets(struct e1000g * Adapter)764 e1000g_free_packets(struct e1000g *Adapter)
765 {
766 e1000g_tx_ring_t *tx_ring;
767 e1000g_rx_data_t *rx_data;
768
769 tx_ring = Adapter->tx_ring;
770 rx_data = Adapter->rx_ring->rx_data;
771
772 e1000g_free_tx_packets(tx_ring);
773 e1000g_free_rx_packets(rx_data, B_FALSE);
774 }
775
776 #ifdef __sparc
777 static int
e1000g_alloc_dvma_buffer(struct e1000g * Adapter,dma_buffer_t * buf,size_t size)778 e1000g_alloc_dvma_buffer(struct e1000g *Adapter,
779 dma_buffer_t *buf, size_t size)
780 {
781 int mystat;
782 dev_info_t *devinfo;
783 ddi_dma_cookie_t cookie;
784
785 if (e1000g_force_detach)
786 devinfo = Adapter->priv_dip;
787 else
788 devinfo = Adapter->dip;
789
790 mystat = dvma_reserve(devinfo,
791 &e1000g_dma_limits,
792 Adapter->dvma_page_num,
793 &buf->dma_handle);
794
795 if (mystat != DDI_SUCCESS) {
796 buf->dma_handle = NULL;
797 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
798 "Could not allocate dvma buffer handle: %d\n", mystat);
799 return (DDI_FAILURE);
800 }
801
802 buf->address = kmem_alloc(size, KM_NOSLEEP);
803
804 if (buf->address == NULL) {
805 if (buf->dma_handle != NULL) {
806 dvma_release(buf->dma_handle);
807 buf->dma_handle = NULL;
808 }
809 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
810 "Could not allocate dvma buffer memory\n");
811 return (DDI_FAILURE);
812 }
813
814 dvma_kaddr_load(buf->dma_handle,
815 buf->address, size, 0, &cookie);
816
817 buf->dma_address = cookie.dmac_laddress;
818 buf->size = size;
819 buf->len = 0;
820
821 return (DDI_SUCCESS);
822 }
823
824 static void
e1000g_free_dvma_buffer(dma_buffer_t * buf)825 e1000g_free_dvma_buffer(dma_buffer_t *buf)
826 {
827 if (buf->dma_handle != NULL) {
828 dvma_unload(buf->dma_handle, 0, -1);
829 } else {
830 return;
831 }
832
833 buf->dma_address = NULL;
834
835 if (buf->address != NULL) {
836 kmem_free(buf->address, buf->size);
837 buf->address = NULL;
838 }
839
840 if (buf->dma_handle != NULL) {
841 dvma_release(buf->dma_handle);
842 buf->dma_handle = NULL;
843 }
844
845 buf->size = 0;
846 buf->len = 0;
847 }
848 #endif
849
850 static int
e1000g_alloc_dma_buffer(struct e1000g * Adapter,dma_buffer_t * buf,size_t size,ddi_dma_attr_t * p_dma_attr)851 e1000g_alloc_dma_buffer(struct e1000g *Adapter,
852 dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
853 {
854 int mystat;
855 dev_info_t *devinfo;
856 ddi_dma_cookie_t cookie;
857 size_t len;
858 uint_t count;
859
860 if (e1000g_force_detach)
861 devinfo = Adapter->priv_dip;
862 else
863 devinfo = Adapter->dip;
864
865 mystat = ddi_dma_alloc_handle(devinfo,
866 p_dma_attr,
867 DDI_DMA_DONTWAIT, 0,
868 &buf->dma_handle);
869
870 if (mystat != DDI_SUCCESS) {
871 buf->dma_handle = NULL;
872 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
873 "Could not allocate dma buffer handle: %d\n", mystat);
874 return (DDI_FAILURE);
875 }
876
877 mystat = ddi_dma_mem_alloc(buf->dma_handle,
878 size, &e1000g_buf_acc_attr, DDI_DMA_STREAMING,
879 DDI_DMA_DONTWAIT, 0,
880 &buf->address,
881 &len, &buf->acc_handle);
882
883 if (mystat != DDI_SUCCESS) {
884 buf->acc_handle = NULL;
885 buf->address = NULL;
886 if (buf->dma_handle != NULL) {
887 ddi_dma_free_handle(&buf->dma_handle);
888 buf->dma_handle = NULL;
889 }
890 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
891 "Could not allocate dma buffer memory: %d\n", mystat);
892 return (DDI_FAILURE);
893 }
894
895 mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
896 (struct as *)NULL,
897 buf->address,
898 len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
899 DDI_DMA_DONTWAIT, 0, &cookie, &count);
900
901 if (mystat != DDI_SUCCESS) {
902 if (buf->acc_handle != NULL) {
903 ddi_dma_mem_free(&buf->acc_handle);
904 buf->acc_handle = NULL;
905 buf->address = NULL;
906 }
907 if (buf->dma_handle != NULL) {
908 ddi_dma_free_handle(&buf->dma_handle);
909 buf->dma_handle = NULL;
910 }
911 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
912 "Could not bind buffer dma handle: %d\n", mystat);
913 return (DDI_FAILURE);
914 }
915
916 ASSERT(count == 1);
917 if (count != 1) {
918 if (buf->dma_handle != NULL) {
919 (void) ddi_dma_unbind_handle(buf->dma_handle);
920 }
921 if (buf->acc_handle != NULL) {
922 ddi_dma_mem_free(&buf->acc_handle);
923 buf->acc_handle = NULL;
924 buf->address = NULL;
925 }
926 if (buf->dma_handle != NULL) {
927 ddi_dma_free_handle(&buf->dma_handle);
928 buf->dma_handle = NULL;
929 }
930 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
931 "Could not bind buffer as a single frag. "
932 "Count = %d\n", count);
933 return (DDI_FAILURE);
934 }
935
936 buf->dma_address = cookie.dmac_laddress;
937 buf->size = len;
938 buf->len = 0;
939
940 return (DDI_SUCCESS);
941 }
942
943 /*
944 * e1000g_alloc_dma_buffer_82546 - allocate a dma buffer along with all
945 * necessary handles. Same as e1000g_alloc_dma_buffer() except ensure
946 * that buffer that doesn't cross a 64k boundary.
947 */
948 static int
e1000g_alloc_dma_buffer_82546(struct e1000g * Adapter,dma_buffer_t * buf,size_t size,ddi_dma_attr_t * p_dma_attr)949 e1000g_alloc_dma_buffer_82546(struct e1000g *Adapter,
950 dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
951 {
952 int mystat;
953 dev_info_t *devinfo;
954 ddi_dma_cookie_t cookie;
955 size_t len;
956 uint_t count;
957
958 if (e1000g_force_detach)
959 devinfo = Adapter->priv_dip;
960 else
961 devinfo = Adapter->dip;
962
963 mystat = ddi_dma_alloc_handle(devinfo,
964 p_dma_attr,
965 DDI_DMA_DONTWAIT, 0,
966 &buf->dma_handle);
967
968 if (mystat != DDI_SUCCESS) {
969 buf->dma_handle = NULL;
970 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
971 "Could not allocate dma buffer handle: %d\n", mystat);
972 return (DDI_FAILURE);
973 }
974
975 mystat = e1000g_dma_mem_alloc_82546(buf, size, &len);
976 if (mystat != DDI_SUCCESS) {
977 buf->acc_handle = NULL;
978 buf->address = NULL;
979 if (buf->dma_handle != NULL) {
980 ddi_dma_free_handle(&buf->dma_handle);
981 buf->dma_handle = NULL;
982 }
983 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
984 "Could not allocate dma buffer memory: %d\n", mystat);
985 return (DDI_FAILURE);
986 }
987
988 mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
989 (struct as *)NULL,
990 buf->address,
991 len, DDI_DMA_READ | DDI_DMA_STREAMING,
992 DDI_DMA_DONTWAIT, 0, &cookie, &count);
993
994 if (mystat != DDI_SUCCESS) {
995 if (buf->acc_handle != NULL) {
996 ddi_dma_mem_free(&buf->acc_handle);
997 buf->acc_handle = NULL;
998 buf->address = NULL;
999 }
1000 if (buf->dma_handle != NULL) {
1001 ddi_dma_free_handle(&buf->dma_handle);
1002 buf->dma_handle = NULL;
1003 }
1004 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1005 "Could not bind buffer dma handle: %d\n", mystat);
1006 return (DDI_FAILURE);
1007 }
1008
1009 ASSERT(count == 1);
1010 if (count != 1) {
1011 if (buf->dma_handle != NULL) {
1012 (void) ddi_dma_unbind_handle(buf->dma_handle);
1013 }
1014 if (buf->acc_handle != NULL) {
1015 ddi_dma_mem_free(&buf->acc_handle);
1016 buf->acc_handle = NULL;
1017 buf->address = NULL;
1018 }
1019 if (buf->dma_handle != NULL) {
1020 ddi_dma_free_handle(&buf->dma_handle);
1021 buf->dma_handle = NULL;
1022 }
1023 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1024 "Could not bind buffer as a single frag. "
1025 "Count = %d\n", count);
1026 return (DDI_FAILURE);
1027 }
1028
1029 buf->dma_address = cookie.dmac_laddress;
1030 buf->size = len;
1031 buf->len = 0;
1032
1033 return (DDI_SUCCESS);
1034 }
1035
1036 /*
1037 * e1000g_dma_mem_alloc_82546 - allocate a dma buffer, making up to
1038 * ALLOC_RETRY attempts to get a buffer that doesn't cross a 64k boundary.
1039 */
1040 static int
e1000g_dma_mem_alloc_82546(dma_buffer_t * buf,size_t size,size_t * len)1041 e1000g_dma_mem_alloc_82546(dma_buffer_t *buf, size_t size, size_t *len)
1042 {
1043 #define ALLOC_RETRY 10
1044 int stat;
1045 int cnt = 0;
1046 ddi_acc_handle_t hold[ALLOC_RETRY];
1047
1048 while (cnt < ALLOC_RETRY) {
1049 hold[cnt] = NULL;
1050
1051 /* allocate memory */
1052 stat = ddi_dma_mem_alloc(buf->dma_handle, size,
1053 &e1000g_buf_acc_attr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
1054 0, &buf->address, len, &buf->acc_handle);
1055
1056 if (stat != DDI_SUCCESS) {
1057 break;
1058 }
1059
1060 /*
1061 * Check 64k bounday:
1062 * if it is bad, hold it and retry
1063 * if it is good, exit loop
1064 */
1065 if (e1000g_cross_64k_bound(buf->address, *len)) {
1066 hold[cnt] = buf->acc_handle;
1067 stat = DDI_FAILURE;
1068 } else {
1069 break;
1070 }
1071
1072 cnt++;
1073 }
1074
1075 /* Release any held buffers crossing 64k bounday */
1076 for (--cnt; cnt >= 0; cnt--) {
1077 if (hold[cnt])
1078 ddi_dma_mem_free(&hold[cnt]);
1079 }
1080
1081 return (stat);
1082 }
1083
1084 /*
1085 * e1000g_cross_64k_bound - If starting and ending address cross a 64k boundary
1086 * return true; otherwise return false
1087 */
1088 static boolean_t
e1000g_cross_64k_bound(void * addr,uintptr_t len)1089 e1000g_cross_64k_bound(void *addr, uintptr_t len)
1090 {
1091 uintptr_t start = (uintptr_t)addr;
1092 uintptr_t end = start + len - 1;
1093
1094 return (((start ^ end) >> 16) == 0 ? B_FALSE : B_TRUE);
1095 }
1096
1097 static void
e1000g_free_dma_buffer(dma_buffer_t * buf)1098 e1000g_free_dma_buffer(dma_buffer_t *buf)
1099 {
1100 if (buf->dma_handle != NULL) {
1101 (void) ddi_dma_unbind_handle(buf->dma_handle);
1102 } else {
1103 return;
1104 }
1105
1106 buf->dma_address = NULL;
1107
1108 if (buf->acc_handle != NULL) {
1109 ddi_dma_mem_free(&buf->acc_handle);
1110 buf->acc_handle = NULL;
1111 buf->address = NULL;
1112 }
1113
1114 if (buf->dma_handle != NULL) {
1115 ddi_dma_free_handle(&buf->dma_handle);
1116 buf->dma_handle = NULL;
1117 }
1118
1119 buf->size = 0;
1120 buf->len = 0;
1121 }
1122
1123 static int
e1000g_alloc_tx_packets(e1000g_tx_ring_t * tx_ring)1124 e1000g_alloc_tx_packets(e1000g_tx_ring_t *tx_ring)
1125 {
1126 int j;
1127 p_tx_sw_packet_t packet;
1128 int mystat;
1129 dma_buffer_t *tx_buf;
1130 struct e1000g *Adapter;
1131 dev_info_t *devinfo;
1132 ddi_dma_attr_t dma_attr;
1133
1134 Adapter = tx_ring->adapter;
1135 devinfo = Adapter->dip;
1136 dma_attr = e1000g_buf_dma_attr;
1137
1138 /*
1139 * Memory allocation for the Transmit software structure, the transmit
1140 * software packet. This structure stores all the relevant information
1141 * for transmitting a single packet.
1142 */
1143 tx_ring->packet_area =
1144 kmem_zalloc(TX_SW_PKT_AREA_SZ, KM_NOSLEEP);
1145
1146 if (tx_ring->packet_area == NULL)
1147 return (DDI_FAILURE);
1148
1149 for (j = 0, packet = tx_ring->packet_area;
1150 j < Adapter->tx_freelist_num; j++, packet++) {
1151
1152 ASSERT(packet != NULL);
1153
1154 /*
1155 * Pre-allocate dma handles for transmit. These dma handles
1156 * will be dynamically bound to the data buffers passed down
1157 * from the upper layers at the time of transmitting. The
1158 * dynamic binding only applies for the packets that are larger
1159 * than the tx_bcopy_thresh.
1160 */
1161 switch (e1000g_dma_type) {
1162 #ifdef __sparc
1163 case USE_DVMA:
1164 mystat = dvma_reserve(devinfo,
1165 &e1000g_dma_limits,
1166 Adapter->dvma_page_num,
1167 &packet->tx_dma_handle);
1168 break;
1169 #endif
1170 case USE_DMA:
1171 mystat = ddi_dma_alloc_handle(devinfo,
1172 &e1000g_tx_dma_attr,
1173 DDI_DMA_DONTWAIT, 0,
1174 &packet->tx_dma_handle);
1175 break;
1176 default:
1177 ASSERT(B_FALSE);
1178 break;
1179 }
1180 if (mystat != DDI_SUCCESS) {
1181 packet->tx_dma_handle = NULL;
1182 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1183 "Could not allocate tx dma handle: %d\n", mystat);
1184 goto tx_pkt_fail;
1185 }
1186
1187 /*
1188 * Pre-allocate transmit buffers for small packets that the
1189 * size is less than tx_bcopy_thresh. The data of those small
1190 * packets will be bcopy() to the transmit buffers instead of
1191 * using dynamical DMA binding. For small packets, bcopy will
1192 * bring better performance than DMA binding.
1193 */
1194 tx_buf = packet->tx_buf;
1195
1196 switch (e1000g_dma_type) {
1197 #ifdef __sparc
1198 case USE_DVMA:
1199 mystat = e1000g_alloc_dvma_buffer(Adapter,
1200 tx_buf, Adapter->tx_buffer_size);
1201 break;
1202 #endif
1203 case USE_DMA:
1204 mystat = e1000g_alloc_dma_buffer(Adapter,
1205 tx_buf, Adapter->tx_buffer_size, &dma_attr);
1206 break;
1207 default:
1208 ASSERT(B_FALSE);
1209 break;
1210 }
1211 if (mystat != DDI_SUCCESS) {
1212 ASSERT(packet->tx_dma_handle != NULL);
1213 switch (e1000g_dma_type) {
1214 #ifdef __sparc
1215 case USE_DVMA:
1216 dvma_release(packet->tx_dma_handle);
1217 break;
1218 #endif
1219 case USE_DMA:
1220 ddi_dma_free_handle(&packet->tx_dma_handle);
1221 break;
1222 default:
1223 ASSERT(B_FALSE);
1224 break;
1225 }
1226 packet->tx_dma_handle = NULL;
1227 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1228 "Allocate Tx buffer fail\n");
1229 goto tx_pkt_fail;
1230 }
1231
1232 packet->dma_type = e1000g_dma_type;
1233 } /* for */
1234
1235 return (DDI_SUCCESS);
1236
1237 tx_pkt_fail:
1238 e1000g_free_tx_packets(tx_ring);
1239
1240 return (DDI_FAILURE);
1241 }
1242
1243
1244 int
e1000g_increase_rx_packets(e1000g_rx_data_t * rx_data)1245 e1000g_increase_rx_packets(e1000g_rx_data_t *rx_data)
1246 {
1247 int i;
1248 p_rx_sw_packet_t packet;
1249 p_rx_sw_packet_t cur, next;
1250 struct e1000g *Adapter;
1251 ddi_dma_attr_t dma_attr;
1252
1253 Adapter = rx_data->rx_ring->adapter;
1254 dma_attr = e1000g_buf_dma_attr;
1255 dma_attr.dma_attr_align = Adapter->rx_buf_align;
1256 cur = NULL;
1257
1258 for (i = 0; i < RX_FREELIST_INCREASE_SIZE; i++) {
1259 packet = e1000g_alloc_rx_sw_packet(rx_data, &dma_attr);
1260 if (packet == NULL)
1261 break;
1262 packet->next = cur;
1263 cur = packet;
1264 }
1265 Adapter->rx_freelist_num += i;
1266 rx_data->avail_freepkt += i;
1267
1268 while (cur != NULL) {
1269 QUEUE_PUSH_TAIL(&rx_data->free_list, &cur->Link);
1270 next = cur->next;
1271 cur->next = rx_data->packet_area;
1272 rx_data->packet_area = cur;
1273
1274 cur = next;
1275 }
1276
1277 return (DDI_SUCCESS);
1278 }
1279
1280
1281 static int
e1000g_alloc_rx_packets(e1000g_rx_data_t * rx_data)1282 e1000g_alloc_rx_packets(e1000g_rx_data_t *rx_data)
1283 {
1284 int i;
1285 p_rx_sw_packet_t packet;
1286 struct e1000g *Adapter;
1287 uint32_t packet_num;
1288 ddi_dma_attr_t dma_attr;
1289
1290 Adapter = rx_data->rx_ring->adapter;
1291 dma_attr = e1000g_buf_dma_attr;
1292 dma_attr.dma_attr_align = Adapter->rx_buf_align;
1293
1294 /*
1295 * Allocate memory for the rx_sw_packet structures. Each one of these
1296 * structures will contain a virtual and physical address to an actual
1297 * receive buffer in host memory. Since we use one rx_sw_packet per
1298 * received packet, the maximum number of rx_sw_packet that we'll
1299 * need is equal to the number of receive descriptors plus the freelist
1300 * size.
1301 */
1302 packet_num = Adapter->rx_desc_num + RX_FREELIST_INCREASE_SIZE;
1303 rx_data->packet_area = NULL;
1304
1305 for (i = 0; i < packet_num; i++) {
1306 packet = e1000g_alloc_rx_sw_packet(rx_data, &dma_attr);
1307 if (packet == NULL)
1308 goto rx_pkt_fail;
1309
1310 packet->next = rx_data->packet_area;
1311 rx_data->packet_area = packet;
1312 }
1313
1314 Adapter->rx_freelist_num = RX_FREELIST_INCREASE_SIZE;
1315 return (DDI_SUCCESS);
1316
1317 rx_pkt_fail:
1318 e1000g_free_rx_packets(rx_data, B_TRUE);
1319 return (DDI_FAILURE);
1320 }
1321
1322
1323 static p_rx_sw_packet_t
e1000g_alloc_rx_sw_packet(e1000g_rx_data_t * rx_data,ddi_dma_attr_t * p_dma_attr)1324 e1000g_alloc_rx_sw_packet(e1000g_rx_data_t *rx_data, ddi_dma_attr_t *p_dma_attr)
1325 {
1326 int mystat;
1327 p_rx_sw_packet_t packet;
1328 dma_buffer_t *rx_buf;
1329 struct e1000g *Adapter;
1330
1331 Adapter = rx_data->rx_ring->adapter;
1332
1333 packet = kmem_zalloc(sizeof (rx_sw_packet_t), KM_NOSLEEP);
1334 if (packet == NULL) {
1335 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1336 "Cound not allocate memory for Rx SwPacket\n");
1337 return (NULL);
1338 }
1339
1340 rx_buf = packet->rx_buf;
1341
1342 switch (e1000g_dma_type) {
1343 #ifdef __sparc
1344 case USE_DVMA:
1345 mystat = e1000g_alloc_dvma_buffer(Adapter,
1346 rx_buf, Adapter->rx_buffer_size);
1347 break;
1348 #endif
1349 case USE_DMA:
1350 if (Adapter->mem_workaround_82546 &&
1351 ((Adapter->shared.mac.type == e1000_82545) ||
1352 (Adapter->shared.mac.type == e1000_82546) ||
1353 (Adapter->shared.mac.type == e1000_82546_rev_3))) {
1354 mystat = e1000g_alloc_dma_buffer_82546(Adapter,
1355 rx_buf, Adapter->rx_buffer_size, p_dma_attr);
1356 } else {
1357 mystat = e1000g_alloc_dma_buffer(Adapter,
1358 rx_buf, Adapter->rx_buffer_size, p_dma_attr);
1359 }
1360 break;
1361 default:
1362 ASSERT(B_FALSE);
1363 break;
1364 }
1365
1366 if (mystat != DDI_SUCCESS) {
1367 if (packet != NULL)
1368 kmem_free(packet, sizeof (rx_sw_packet_t));
1369
1370 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1371 "Failed to allocate Rx buffer\n");
1372 return (NULL);
1373 }
1374
1375 rx_buf->size -= E1000G_IPALIGNROOM;
1376 rx_buf->address += E1000G_IPALIGNROOM;
1377 rx_buf->dma_address += E1000G_IPALIGNROOM;
1378
1379 packet->rx_data = (caddr_t)rx_data;
1380 packet->free_rtn.free_func = e1000g_rxfree_func;
1381 packet->free_rtn.free_arg = (char *)packet;
1382 /*
1383 * esballoc is changed to desballoc which
1384 * is undocumented call but as per sun,
1385 * we can use it. It gives better efficiency.
1386 */
1387 packet->mp = desballoc((unsigned char *)
1388 rx_buf->address,
1389 rx_buf->size,
1390 BPRI_MED, &packet->free_rtn);
1391
1392 packet->dma_type = e1000g_dma_type;
1393 packet->ref_cnt = 1;
1394
1395 return (packet);
1396 }
1397
1398 void
e1000g_free_rx_sw_packet(p_rx_sw_packet_t packet,boolean_t full_release)1399 e1000g_free_rx_sw_packet(p_rx_sw_packet_t packet, boolean_t full_release)
1400 {
1401 dma_buffer_t *rx_buf;
1402
1403 if (packet->mp != NULL) {
1404 freemsg(packet->mp);
1405 packet->mp = NULL;
1406 }
1407
1408 rx_buf = packet->rx_buf;
1409
1410 switch (packet->dma_type) {
1411 #ifdef __sparc
1412 case USE_DVMA:
1413 if (rx_buf->address != NULL) {
1414 rx_buf->size += E1000G_IPALIGNROOM;
1415 rx_buf->address -= E1000G_IPALIGNROOM;
1416 }
1417 e1000g_free_dvma_buffer(rx_buf);
1418 break;
1419 #endif
1420 case USE_DMA:
1421 e1000g_free_dma_buffer(rx_buf);
1422 break;
1423 default:
1424 break;
1425 }
1426
1427 packet->dma_type = USE_NONE;
1428
1429 if (!full_release)
1430 return;
1431
1432 kmem_free(packet, sizeof (rx_sw_packet_t));
1433 }
1434
1435 static void
e1000g_free_rx_packets(e1000g_rx_data_t * rx_data,boolean_t full_release)1436 e1000g_free_rx_packets(e1000g_rx_data_t *rx_data, boolean_t full_release)
1437 {
1438 p_rx_sw_packet_t packet, next_packet;
1439 uint32_t ref_cnt;
1440
1441 mutex_enter(&e1000g_rx_detach_lock);
1442
1443 packet = rx_data->packet_area;
1444 while (packet != NULL) {
1445 next_packet = packet->next;
1446
1447 ref_cnt = atomic_dec_32_nv(&packet->ref_cnt);
1448 if (ref_cnt > 0) {
1449 atomic_inc_32(&rx_data->pending_count);
1450 atomic_inc_32(&e1000g_mblks_pending);
1451 } else {
1452 e1000g_free_rx_sw_packet(packet, full_release);
1453 }
1454
1455 packet = next_packet;
1456 }
1457
1458 if (full_release)
1459 rx_data->packet_area = NULL;
1460
1461 mutex_exit(&e1000g_rx_detach_lock);
1462 }
1463
1464
1465 static void
e1000g_free_tx_packets(e1000g_tx_ring_t * tx_ring)1466 e1000g_free_tx_packets(e1000g_tx_ring_t *tx_ring)
1467 {
1468 int j;
1469 struct e1000g *Adapter;
1470 p_tx_sw_packet_t packet;
1471 dma_buffer_t *tx_buf;
1472
1473 Adapter = tx_ring->adapter;
1474
1475 for (j = 0, packet = tx_ring->packet_area;
1476 j < Adapter->tx_freelist_num; j++, packet++) {
1477
1478 if (packet == NULL)
1479 break;
1480
1481 /* Free the Tx DMA handle for dynamical binding */
1482 if (packet->tx_dma_handle != NULL) {
1483 switch (packet->dma_type) {
1484 #ifdef __sparc
1485 case USE_DVMA:
1486 dvma_release(packet->tx_dma_handle);
1487 break;
1488 #endif
1489 case USE_DMA:
1490 ddi_dma_free_handle(&packet->tx_dma_handle);
1491 break;
1492 default:
1493 ASSERT(B_FALSE);
1494 break;
1495 }
1496 packet->tx_dma_handle = NULL;
1497 } else {
1498 /*
1499 * If the dma handle is NULL, then we don't
1500 * need to check the packets left. For they
1501 * have not been initialized or have been freed.
1502 */
1503 break;
1504 }
1505
1506 tx_buf = packet->tx_buf;
1507
1508 switch (packet->dma_type) {
1509 #ifdef __sparc
1510 case USE_DVMA:
1511 e1000g_free_dvma_buffer(tx_buf);
1512 break;
1513 #endif
1514 case USE_DMA:
1515 e1000g_free_dma_buffer(tx_buf);
1516 break;
1517 default:
1518 ASSERT(B_FALSE);
1519 break;
1520 }
1521
1522 packet->dma_type = USE_NONE;
1523 }
1524 if (tx_ring->packet_area != NULL) {
1525 kmem_free(tx_ring->packet_area, TX_SW_PKT_AREA_SZ);
1526 tx_ring->packet_area = NULL;
1527 }
1528 }
1529
1530 /*
1531 * e1000g_release_dma_resources - release allocated DMA resources
1532 *
1533 * This function releases any pending buffers that has been
1534 * previously allocated
1535 */
1536 void
e1000g_release_dma_resources(struct e1000g * Adapter)1537 e1000g_release_dma_resources(struct e1000g *Adapter)
1538 {
1539 e1000g_free_descriptors(Adapter);
1540 e1000g_free_packets(Adapter);
1541 }
1542
1543 /* ARGSUSED */
1544 void
e1000g_set_fma_flags(int dma_flag)1545 e1000g_set_fma_flags(int dma_flag)
1546 {
1547 if (dma_flag) {
1548 e1000g_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1549 e1000g_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1550 e1000g_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1551 } else {
1552 e1000g_tx_dma_attr.dma_attr_flags = 0;
1553 e1000g_buf_dma_attr.dma_attr_flags = 0;
1554 e1000g_desc_dma_attr.dma_attr_flags = 0;
1555 }
1556 }
1557