1 /*
2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 */
5
6 /*
7 * Copyright (c) 2001-2006 Advanced Micro Devices, Inc. All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * + Redistributions of source code must retain the above copyright notice,
13 * + this list of conditions and the following disclaimer.
14 *
15 * + Redistributions in binary form must reproduce the above copyright
16 * + notice, this list of conditions and the following disclaimer in the
17 * + documentation and/or other materials provided with the distribution.
18 *
19 * + Neither the name of Advanced Micro Devices, Inc. nor the names of its
20 * + contributors may be used to endorse or promote products derived from
21 * + this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
24 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
25 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
30 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
34 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
35 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Import/Export/Re-Export/Use/Release/Transfer Restrictions and
38 * Compliance with Applicable Laws. Notice is hereby given that
39 * the software may be subject to restrictions on use, release,
40 * transfer, importation, exportation and/or re-exportation under
41 * the laws and regulations of the United States or other
42 * countries ("Applicable Laws"), which include but are not
43 * limited to U.S. export control laws such as the Export
44 * Administration Regulations and national security controls as
45 * defined thereunder, as well as State Department controls under
46 * the U.S. Munitions List. Permission to use and/or
47 * redistribute the software is conditioned upon compliance with
48 * all Applicable Laws, including U.S. export control laws
49 * regarding specifically designated persons, countries and
50 * nationals of countries subject to national security controls.
51 */
52
53 /* include files */
54 #include <sys/disp.h>
55 #include <sys/atomic.h>
56 #include <sys/vlan.h>
57 #include "amd8111s_main.h"
58
59 /* Global macro Definations */
60 #define ROUNDUP(x, a) (((x) + (a) - 1) & ~((a) - 1))
61 #define INTERFACE_NAME "amd8111s"
62 #define AMD8111S_SPLIT 128
63 #define AMD8111S_SEND_MAX 64
64
65 static char ident[] = "AMD8111 10/100M Ethernet";
66
67 /*
68 * Driver Entry Points
69 */
70 static int amd8111s_attach(dev_info_t *, ddi_attach_cmd_t);
71 static int amd8111s_detach(dev_info_t *, ddi_detach_cmd_t);
72
73 /*
74 * GLD Entry points prototype
75 */
76 static int amd8111s_m_unicst(void *, const uint8_t *);
77 static int amd8111s_m_promisc(void *, boolean_t);
78 static int amd8111s_m_stat(void *, uint_t, uint64_t *);
79 static void amd8111s_m_ioctl(void *, queue_t *, mblk_t *);
80 static int amd8111s_m_multicst(void *, boolean_t, const uint8_t *addr);
81 static int amd8111s_m_start(void *);
82 static void amd8111s_m_stop(void *);
83 static mblk_t *amd8111s_m_tx(void *, mblk_t *mp);
84 static uint_t amd8111s_intr(caddr_t);
85
86 static int amd8111s_unattach(dev_info_t *, struct LayerPointers *);
87
88 static boolean_t amd8111s_allocate_buffers(struct LayerPointers *);
89 static int amd8111s_odlInit(struct LayerPointers *);
90 static boolean_t amd8111s_allocate_descriptors(struct LayerPointers *);
91 static void amd8111s_free_descriptors(struct LayerPointers *);
92 static boolean_t amd8111s_alloc_dma_ringbuf(struct LayerPointers *,
93 struct amd8111s_dma_ringbuf *, uint32_t, uint32_t);
94 static void amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *);
95
96
97 static void amd8111s_log(struct LayerPointers *adapter, int level,
98 char *fmt, ...);
99
100 static struct cb_ops amd8111s_cb_ops = {
101 nulldev,
102 nulldev,
103 nodev,
104 nodev,
105 nodev,
106 nodev,
107 nodev,
108 nodev,
109 nodev,
110 nodev,
111 nodev,
112 nochpoll,
113 ddi_prop_op,
114 NULL,
115 D_NEW | D_MP,
116 CB_REV, /* cb_rev */
117 nodev, /* cb_aread */
118 nodev /* cb_awrite */
119 };
120
121 static struct dev_ops amd8111s_dev_ops = {
122 DEVO_REV, /* devo_rev */
123 0, /* devo_refcnt */
124 NULL, /* devo_getinfo */
125 nulldev, /* devo_identify */
126 nulldev, /* devo_probe */
127 amd8111s_attach, /* devo_attach */
128 amd8111s_detach, /* devo_detach */
129 nodev, /* devo_reset */
130 &amd8111s_cb_ops, /* devo_cb_ops */
131 NULL, /* devo_bus_ops */
132 nodev, /* devo_power */
133 ddi_quiesce_not_supported, /* devo_quiesce */
134 };
135
136 struct modldrv amd8111s_modldrv = {
137 &mod_driverops, /* Type of module. This one is a driver */
138 ident, /* short description */
139 &amd8111s_dev_ops /* driver specific ops */
140 };
141
142 struct modlinkage amd8111s_modlinkage = {
143 MODREV_1, (void *)&amd8111s_modldrv, NULL
144 };
145
146 /*
147 * Global Variables
148 */
149 struct LayerPointers *amd8111sadapter;
150
151 static ddi_dma_attr_t pcn_buff_dma_attr_t = {
152 DMA_ATTR_V0, /* dma_attr_version */
153 (uint64_t)0, /* dma_attr_addr_lo */
154 (uint64_t)0xFFFFFFFF, /* dma_attr_addr_hi */
155 (uint64_t)0xFFFFFFFF, /* dma_attr_count_max */
156 (uint64_t)1, /* dma_attr_align */
157 (uint_t)0x7F, /* dma_attr_burstsizes */
158 (uint32_t)1, /* dma_attr_minxfer */
159 (uint64_t)0xFFFFFFFF, /* dma_attr_maxxfer */
160 (uint64_t)0xFFFFFFFF, /* dma_attr_seg */
161 (int)1, /* dma_attr_sgllen */
162 (uint32_t)1, /* granularity */
163 (uint_t)0 /* dma_attr_flags */
164 };
165
166 static ddi_dma_attr_t pcn_desc_dma_attr_t = {
167 DMA_ATTR_V0, /* dma_attr_version */
168 (uint64_t)0, /* dma_attr_addr_lo */
169 (uint64_t)0xFFFFFFFF, /* dma_attr_addr_hi */
170 (uint64_t)0x7FFFFFFF, /* dma_attr_count_max */
171 (uint64_t)0x10, /* dma_attr_align */
172 (uint_t)0xFFFFFFFFU, /* dma_attr_burstsizes */
173 (uint32_t)1, /* dma_attr_minxfer */
174 (uint64_t)0xFFFFFFFF, /* dma_attr_maxxfer */
175 (uint64_t)0xFFFFFFFF, /* dma_attr_seg */
176 (int)1, /* dma_attr_sgllen */
177 (uint32_t)1, /* granularity */
178 (uint_t)0 /* dma_attr_flags */
179 };
180
181 /* PIO access attributes for registers */
182 static ddi_device_acc_attr_t pcn_acc_attr = {
183 DDI_DEVICE_ATTR_V0,
184 DDI_STRUCTURE_LE_ACC,
185 DDI_STRICTORDER_ACC
186 };
187
188
189 static mac_callbacks_t amd8111s_m_callbacks = {
190 MC_IOCTL,
191 amd8111s_m_stat,
192 amd8111s_m_start,
193 amd8111s_m_stop,
194 amd8111s_m_promisc,
195 amd8111s_m_multicst,
196 amd8111s_m_unicst,
197 amd8111s_m_tx,
198 NULL,
199 amd8111s_m_ioctl
200 };
201
202
203 /*
204 * Standard Driver Load Entry Point
205 * It will be called at load time of driver.
206 */
207 int
_init()208 _init()
209 {
210 int status;
211 mac_init_ops(&amd8111s_dev_ops, "amd8111s");
212
213 status = mod_install(&amd8111s_modlinkage);
214 if (status != DDI_SUCCESS) {
215 mac_fini_ops(&amd8111s_dev_ops);
216 }
217
218 return (status);
219 }
220
221 /*
222 * Standard Driver Entry Point for Query.
223 * It will be called at any time to get Driver info.
224 */
225 int
_info(struct modinfo * modinfop)226 _info(struct modinfo *modinfop)
227 {
228 return (mod_info(&amd8111s_modlinkage, modinfop));
229 }
230
231 /*
232 * Standard Driver Entry Point for Unload.
233 * It will be called at unload time of driver.
234 */
235 int
_fini()236 _fini()
237 {
238 int status;
239
240 status = mod_remove(&amd8111s_modlinkage);
241 if (status == DDI_SUCCESS) {
242 mac_fini_ops(&amd8111s_dev_ops);
243 }
244
245 return (status);
246 }
247
248 /*
249 * Loopback Support
250 */
251 static lb_property_t loopmodes[] = {
252 { normal, "normal", AMD8111S_LB_NONE },
253 { external, "100Mbps", AMD8111S_LB_EXTERNAL_100 },
254 { external, "10Mbps", AMD8111S_LB_EXTERNAL_10 },
255 { internal, "MAC", AMD8111S_LB_INTERNAL_MAC }
256 };
257
258 static void
amd8111s_set_loop_mode(struct LayerPointers * adapter,uint32_t mode)259 amd8111s_set_loop_mode(struct LayerPointers *adapter, uint32_t mode)
260 {
261
262 /*
263 * If the mode isn't being changed, there's nothing to do ...
264 */
265 if (mode == adapter->pOdl->loopback_mode)
266 return;
267
268 /*
269 * Validate the requested mode and prepare a suitable message
270 * to explain the link down/up cycle that the change will
271 * probably induce ...
272 */
273 switch (mode) {
274 default:
275 return;
276
277 case AMD8111S_LB_NONE:
278 mdlStopChip(adapter);
279 if (adapter->pOdl->loopback_mode == AMD8111S_LB_INTERNAL_MAC) {
280 cmn_err(CE_NOTE, "LB_NONE restored from Interanl LB");
281 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
282 INLOOP);
283 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
284 FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
285 } else {
286 cmn_err(CE_NOTE, "LB_NONE restored from Exteranl LB");
287 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
288 EXLOOP);
289 }
290
291 amd8111s_reset(adapter);
292 adapter->pOdl->LinkStatus = LINK_STATE_DOWN;
293 adapter->pOdl->rx_fcs_stripped = B_FALSE;
294 mdlStartChip(adapter);
295 break;
296
297 case AMD8111S_LB_EXTERNAL_100:
298 cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_100");
299 mdlStopChip(adapter);
300 amd8111s_reset(adapter);
301 SetIntrCoalesc(adapter, B_FALSE);
302 mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_100);
303 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
304 VAL0 | EXLOOP);
305 adapter->pOdl->LinkStatus = LINK_STATE_UP;
306 adapter->pMdl->Speed = 100;
307 adapter->pMdl->FullDuplex = B_TRUE;
308 /* Tell GLD the state of the physical link. */
309 mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
310
311 adapter->pOdl->rx_fcs_stripped = B_TRUE;
312
313 mdlStartChip(adapter);
314 break;
315
316 case AMD8111S_LB_EXTERNAL_10:
317 cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_10");
318 mdlStopChip(adapter);
319 amd8111s_reset(adapter);
320 SetIntrCoalesc(adapter, B_FALSE);
321 mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_10);
322 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
323 VAL0 | EXLOOP);
324 adapter->pOdl->LinkStatus = LINK_STATE_UP;
325 adapter->pMdl->Speed = 10;
326 adapter->pMdl->FullDuplex = B_TRUE;
327 /* Tell GLD the state of the physical link. */
328 mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
329
330 adapter->pOdl->rx_fcs_stripped = B_TRUE;
331
332 mdlStartChip(adapter);
333 break;
334
335 case AMD8111S_LB_INTERNAL_MAC:
336 cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_INTERNAL_MAC");
337 mdlStopChip(adapter);
338 amd8111s_reset(adapter);
339 SetIntrCoalesc(adapter, B_FALSE);
340 /* Disable Port Manager */
341 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
342 EN_PMGR);
343 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
344 VAL0 | INLOOP);
345
346 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
347 VAL1 | FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
348
349 adapter->pOdl->LinkStatus = LINK_STATE_UP;
350 adapter->pMdl->FullDuplex = B_TRUE;
351 /* Tell GLD the state of the physical link. */
352 mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
353
354 adapter->pOdl->rx_fcs_stripped = B_TRUE;
355
356 mdlStartChip(adapter);
357 break;
358 }
359
360 /*
361 * All OK; tell the caller to reprogram
362 * the PHY and/or MAC for the new mode ...
363 */
364 adapter->pOdl->loopback_mode = mode;
365 }
366
367 static enum ioc_reply
amd8111s_loopback_ioctl(struct LayerPointers * adapter,struct iocblk * iocp,mblk_t * mp)368 amd8111s_loopback_ioctl(struct LayerPointers *adapter, struct iocblk *iocp,
369 mblk_t *mp)
370 {
371 lb_info_sz_t *lbsp;
372 lb_property_t *lbpp;
373 uint32_t *lbmp;
374 int cmd;
375
376 /*
377 * Validate format of ioctl
378 */
379 if (mp->b_cont == NULL)
380 return (IOC_INVAL);
381
382 cmd = iocp->ioc_cmd;
383 switch (cmd) {
384 default:
385 /* NOTREACHED */
386 amd8111s_log(adapter, CE_NOTE,
387 "amd8111s_loop_ioctl: invalid cmd 0x%x", cmd);
388 return (IOC_INVAL);
389
390 case LB_GET_INFO_SIZE:
391 if (iocp->ioc_count != sizeof (lb_info_sz_t)) {
392 amd8111s_log(adapter, CE_NOTE,
393 "wrong LB_GET_INFO_SIZE size");
394 return (IOC_INVAL);
395 }
396 lbsp = (void *)mp->b_cont->b_rptr;
397 *lbsp = sizeof (loopmodes);
398 break;
399
400 case LB_GET_INFO:
401 if (iocp->ioc_count != sizeof (loopmodes)) {
402 amd8111s_log(adapter, CE_NOTE,
403 "Wrong LB_GET_INFO size");
404 return (IOC_INVAL);
405 }
406 lbpp = (void *)mp->b_cont->b_rptr;
407 bcopy(loopmodes, lbpp, sizeof (loopmodes));
408 break;
409
410 case LB_GET_MODE:
411 if (iocp->ioc_count != sizeof (uint32_t)) {
412 amd8111s_log(adapter, CE_NOTE,
413 "Wrong LB_GET_MODE size");
414 return (IOC_INVAL);
415 }
416 lbmp = (void *)mp->b_cont->b_rptr;
417 *lbmp = adapter->pOdl->loopback_mode;
418 break;
419
420 case LB_SET_MODE:
421 if (iocp->ioc_count != sizeof (uint32_t)) {
422 amd8111s_log(adapter, CE_NOTE,
423 "Wrong LB_SET_MODE size");
424 return (IOC_INVAL);
425 }
426 lbmp = (void *)mp->b_cont->b_rptr;
427 amd8111s_set_loop_mode(adapter, *lbmp);
428 break;
429 }
430 return (IOC_REPLY);
431 }
432
433 static void
amd8111s_m_ioctl(void * arg,queue_t * q,mblk_t * mp)434 amd8111s_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
435 {
436 struct iocblk *iocp;
437 struct LayerPointers *adapter;
438 enum ioc_reply status;
439
440 iocp = (void *)mp->b_rptr;
441 iocp->ioc_error = 0;
442 adapter = arg;
443
444 ASSERT(adapter);
445 if (adapter == NULL) {
446 miocnak(q, mp, 0, EINVAL);
447 return;
448 }
449
450 switch (iocp->ioc_cmd) {
451
452 case LB_GET_INFO_SIZE:
453 case LB_GET_INFO:
454 case LB_GET_MODE:
455 case LB_SET_MODE:
456 status = amd8111s_loopback_ioctl(adapter, iocp, mp);
457 break;
458
459 default:
460 status = IOC_INVAL;
461 break;
462 }
463
464 /*
465 * Decide how to reply
466 */
467 switch (status) {
468 default:
469 case IOC_INVAL:
470 /*
471 * Error, reply with a NAK and EINVAL or the specified error
472 */
473 miocnak(q, mp, 0, iocp->ioc_error == 0 ?
474 EINVAL : iocp->ioc_error);
475 break;
476
477 case IOC_DONE:
478 /*
479 * OK, reply already sent
480 */
481 break;
482
483 case IOC_ACK:
484 /*
485 * OK, reply with an ACK
486 */
487 miocack(q, mp, 0, 0);
488 break;
489
490 case IOC_REPLY:
491 /*
492 * OK, send prepared reply as ACK or NAK
493 */
494 mp->b_datap->db_type = iocp->ioc_error == 0 ?
495 M_IOCACK : M_IOCNAK;
496 qreply(q, mp);
497 break;
498 }
499 }
500
501 /*
502 * Copy one packet from dma memory to mblk. Inc dma descriptor pointer.
503 */
504 static boolean_t
amd8111s_recv_copy(struct LayerPointers * pLayerPointers,mblk_t ** last_mp)505 amd8111s_recv_copy(struct LayerPointers *pLayerPointers, mblk_t **last_mp)
506 {
507 int length = 0;
508 mblk_t *mp;
509 struct rx_desc *descriptor;
510 struct odl *pOdl = pLayerPointers->pOdl;
511 struct amd8111s_statistics *statistics = &pOdl->statistics;
512 struct nonphysical *pNonphysical = pLayerPointers->pMil
513 ->pNonphysical;
514
515 mutex_enter(&pOdl->mdlRcvLock);
516 descriptor = pNonphysical->RxBufDescQRead->descriptor;
517 (void) ddi_dma_sync(pOdl->rx_desc_dma_handle,
518 pNonphysical->RxBufDescQRead->descriptor -
519 pNonphysical->RxBufDescQStart->descriptor,
520 sizeof (struct rx_desc), DDI_DMA_SYNC_FORCPU);
521 if ((descriptor->Rx_OWN) == 0) {
522 /*
523 * If the frame is received with errors, then set MCNT
524 * of that pkt in ReceiveArray to 0. This packet would
525 * be discarded later and not indicated to OS.
526 */
527 if (descriptor->Rx_ERR) {
528 statistics->rx_desc_err ++;
529 descriptor->Rx_ERR = 0;
530 if (descriptor->Rx_FRAM == 1) {
531 statistics->rx_desc_err_FRAM ++;
532 descriptor->Rx_FRAM = 0;
533 }
534 if (descriptor->Rx_OFLO == 1) {
535 statistics->rx_desc_err_OFLO ++;
536 descriptor->Rx_OFLO = 0;
537 pOdl->rx_overflow_counter ++;
538 if ((pOdl->rx_overflow_counter > 5) &&
539 (pOdl->pause_interval == 0)) {
540 statistics->rx_double_overflow ++;
541 mdlSendPause(pLayerPointers);
542 pOdl->rx_overflow_counter = 0;
543 pOdl->pause_interval = 25;
544 }
545 }
546 if (descriptor->Rx_CRC == 1) {
547 statistics->rx_desc_err_CRC ++;
548 descriptor->Rx_CRC = 0;
549 }
550 if (descriptor->Rx_BUFF == 1) {
551 statistics->rx_desc_err_BUFF ++;
552 descriptor->Rx_BUFF = 0;
553 }
554 goto Next_Descriptor;
555 }
556
557 /* Length of incoming packet */
558 if (pOdl->rx_fcs_stripped) {
559 length = descriptor->Rx_MCNT -4;
560 } else {
561 length = descriptor->Rx_MCNT;
562 }
563 if (length < 62) {
564 statistics->rx_error_zerosize ++;
565 }
566
567 if ((mp = allocb(length, BPRI_MED)) == NULL) {
568 statistics->rx_allocfail ++;
569 goto failed;
570 }
571 /* Copy from virtual address of incoming packet */
572 bcopy((long *)*(pNonphysical->RxBufDescQRead->USpaceMap),
573 mp->b_rptr, length);
574 mp->b_wptr = mp->b_rptr + length;
575 statistics->rx_ok_packets ++;
576 if (*last_mp == NULL) {
577 *last_mp = mp;
578 } else {
579 (*last_mp)->b_next = mp;
580 *last_mp = mp;
581 }
582
583 Next_Descriptor:
584 descriptor->Rx_MCNT = 0;
585 descriptor->Rx_SOP = 0;
586 descriptor->Rx_EOP = 0;
587 descriptor->Rx_PAM = 0;
588 descriptor->Rx_BAM = 0;
589 descriptor->TT = 0;
590 descriptor->Rx_OWN = 1;
591 pNonphysical->RxBufDescQRead->descriptor++;
592 pNonphysical->RxBufDescQRead->USpaceMap++;
593 if (pNonphysical->RxBufDescQRead->descriptor >
594 pNonphysical->RxBufDescQEnd->descriptor) {
595 pNonphysical->RxBufDescQRead->descriptor =
596 pNonphysical->RxBufDescQStart->descriptor;
597 pNonphysical->RxBufDescQRead->USpaceMap =
598 pNonphysical->RxBufDescQStart->USpaceMap;
599 }
600 mutex_exit(&pOdl->mdlRcvLock);
601
602 return (B_TRUE);
603 }
604
605 failed:
606 mutex_exit(&pOdl->mdlRcvLock);
607 return (B_FALSE);
608 }
609
610 /*
611 * Get the received packets from NIC card and send them to GLD.
612 */
613 static void
amd8111s_receive(struct LayerPointers * pLayerPointers)614 amd8111s_receive(struct LayerPointers *pLayerPointers)
615 {
616 int numOfPkts = 0;
617 struct odl *pOdl;
618 mblk_t *ret_mp = NULL, *last_mp = NULL;
619
620 pOdl = pLayerPointers->pOdl;
621
622 rw_enter(&pOdl->chip_lock, RW_READER);
623 if (!pLayerPointers->run) {
624 rw_exit(&pOdl->chip_lock);
625 return;
626 }
627
628 if (pOdl->pause_interval > 0)
629 pOdl->pause_interval --;
630
631 while (numOfPkts < RX_RING_SIZE) {
632
633 if (!amd8111s_recv_copy(pLayerPointers, &last_mp)) {
634 break;
635 }
636 if (ret_mp == NULL)
637 ret_mp = last_mp;
638 numOfPkts++;
639 }
640
641 if (ret_mp) {
642 mac_rx(pOdl->mh, NULL, ret_mp);
643 }
644
645 (void) ddi_dma_sync(pOdl->rx_desc_dma_handle, 0, 0,
646 DDI_DMA_SYNC_FORDEV);
647
648 mdlReceive(pLayerPointers);
649
650 rw_exit(&pOdl->chip_lock);
651
652 }
653
654 /*
655 * Print message in release-version driver.
656 */
657 static void
amd8111s_log(struct LayerPointers * adapter,int level,char * fmt,...)658 amd8111s_log(struct LayerPointers *adapter, int level, char *fmt, ...)
659 {
660 auto char name[32];
661 auto char buf[256];
662 va_list ap;
663
664 if (adapter != NULL) {
665 (void) sprintf(name, "amd8111s%d",
666 ddi_get_instance(adapter->pOdl->devinfo));
667 } else {
668 (void) sprintf(name, "amd8111s");
669 }
670 va_start(ap, fmt);
671 (void) vsprintf(buf, fmt, ap);
672 va_end(ap);
673 cmn_err(level, "%s: %s", name, buf);
674 }
675
676 /*
677 * To allocate & initilize all resources.
678 * Called by amd8111s_attach().
679 */
680 static int
amd8111s_odlInit(struct LayerPointers * pLayerPointers)681 amd8111s_odlInit(struct LayerPointers *pLayerPointers)
682 {
683 unsigned long mem_req_array[MEM_REQ_MAX];
684 unsigned long mem_set_array[MEM_REQ_MAX];
685 unsigned long *pmem_req_array;
686 unsigned long *pmem_set_array;
687 int i, size;
688
689 for (i = 0; i < MEM_REQ_MAX; i++) {
690 mem_req_array[i] = 0;
691 mem_set_array[i] = 0;
692 }
693
694 milRequestResources(mem_req_array);
695
696 pmem_req_array = mem_req_array;
697 pmem_set_array = mem_set_array;
698 while (*pmem_req_array) {
699 switch (*pmem_req_array) {
700 case VIRTUAL:
701 *pmem_set_array = VIRTUAL;
702 pmem_req_array++;
703 pmem_set_array++;
704 *(pmem_set_array) = *(pmem_req_array);
705 pmem_set_array++;
706 *(pmem_set_array) = (unsigned long) kmem_zalloc(
707 *(pmem_req_array), KM_NOSLEEP);
708 if (*pmem_set_array == 0)
709 goto odl_init_failure;
710 break;
711 }
712 pmem_req_array++;
713 pmem_set_array++;
714 }
715
716 /*
717 * Initilize memory on lower layers
718 */
719 milSetResources(pLayerPointers, mem_set_array);
720
721 /* Allocate Rx/Tx descriptors */
722 if (amd8111s_allocate_descriptors(pLayerPointers) != B_TRUE) {
723 *pmem_set_array = 0;
724 goto odl_init_failure;
725 }
726
727 /*
728 * Allocate Rx buffer for each Rx descriptor. Then call mil layer
729 * routine to fill physical address of Rx buffer into Rx descriptor.
730 */
731 if (amd8111s_allocate_buffers(pLayerPointers) == B_FALSE) {
732 amd8111s_free_descriptors(pLayerPointers);
733 *pmem_set_array = 0;
734 goto odl_init_failure;
735 }
736 milInitGlbds(pLayerPointers);
737
738 return (0);
739
740 odl_init_failure:
741 /*
742 * Free All memory allocated so far
743 */
744 pmem_req_array = mem_set_array;
745 while ((*pmem_req_array) && (pmem_req_array != pmem_set_array)) {
746 switch (*pmem_req_array) {
747 case VIRTUAL:
748 pmem_req_array++; /* Size */
749 size = *(pmem_req_array);
750 pmem_req_array++; /* Virtual Address */
751 if (pmem_req_array == NULL)
752 return (1);
753 kmem_free((int *)*pmem_req_array, size);
754 break;
755 }
756 pmem_req_array++;
757 }
758 return (1);
759 }
760
761 /*
762 * Allocate and initialize Tx/Rx descriptors
763 */
764 static boolean_t
amd8111s_allocate_descriptors(struct LayerPointers * pLayerPointers)765 amd8111s_allocate_descriptors(struct LayerPointers *pLayerPointers)
766 {
767 struct odl *pOdl = pLayerPointers->pOdl;
768 struct mil *pMil = pLayerPointers->pMil;
769 dev_info_t *devinfo = pOdl->devinfo;
770 uint_t length, count, i;
771 size_t real_length;
772
773 /*
774 * Allocate Rx descriptors
775 */
776 if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
777 NULL, &pOdl->rx_desc_dma_handle) != DDI_SUCCESS) {
778 amd8111s_log(pLayerPointers, CE_WARN,
779 "ddi_dma_alloc_handle for Rx desc failed");
780 pOdl->rx_desc_dma_handle = NULL;
781 return (B_FALSE);
782 }
783
784 length = sizeof (struct rx_desc) * RX_RING_SIZE + ALIGNMENT;
785 if (ddi_dma_mem_alloc(pOdl->rx_desc_dma_handle, length,
786 &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
787 NULL, (caddr_t *)&pMil->Rx_desc_original, &real_length,
788 &pOdl->rx_desc_acc_handle) != DDI_SUCCESS) {
789
790 amd8111s_log(pLayerPointers, CE_WARN,
791 "ddi_dma_mem_handle for Rx desc failed");
792 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
793 pOdl->rx_desc_dma_handle = NULL;
794 return (B_FALSE);
795 }
796
797 if (ddi_dma_addr_bind_handle(pOdl->rx_desc_dma_handle,
798 NULL, (caddr_t)pMil->Rx_desc_original, real_length,
799 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
800 NULL, &pOdl->rx_desc_dma_cookie,
801 &count) != DDI_SUCCESS) {
802
803 amd8111s_log(pLayerPointers, CE_WARN,
804 "ddi_dma_addr_bind_handle for Rx desc failed");
805 ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
806 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
807 pOdl->rx_desc_dma_handle = NULL;
808 return (B_FALSE);
809 }
810 ASSERT(count == 1);
811
812 /* Initialize Rx descriptors related variables */
813 pMil->Rx_desc = (struct rx_desc *)
814 ((pMil->Rx_desc_original + ALIGNMENT) & ~ALIGNMENT);
815 pMil->Rx_desc_pa = (unsigned int)
816 ((pOdl->rx_desc_dma_cookie.dmac_laddress + ALIGNMENT) & ~ALIGNMENT);
817
818 pLayerPointers->pMdl->init_blk->RDRA = pMil->Rx_desc_pa;
819
820
821 /*
822 * Allocate Tx descriptors
823 */
824 if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
825 NULL, &pOdl->tx_desc_dma_handle) != DDI_SUCCESS) {
826 amd8111s_log(pLayerPointers, CE_WARN,
827 "ddi_dma_alloc_handle for Tx desc failed");
828 goto allocate_desc_fail;
829 }
830
831 length = sizeof (struct tx_desc) * TX_RING_SIZE + ALIGNMENT;
832 if (ddi_dma_mem_alloc(pOdl->tx_desc_dma_handle, length,
833 &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
834 NULL, (caddr_t *)&pMil->Tx_desc_original, &real_length,
835 &pOdl->tx_desc_acc_handle) != DDI_SUCCESS) {
836
837 amd8111s_log(pLayerPointers, CE_WARN,
838 "ddi_dma_mem_handle for Tx desc failed");
839 ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
840 goto allocate_desc_fail;
841 }
842
843 if (ddi_dma_addr_bind_handle(pOdl->tx_desc_dma_handle,
844 NULL, (caddr_t)pMil->Tx_desc_original, real_length,
845 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
846 NULL, &pOdl->tx_desc_dma_cookie,
847 &count) != DDI_SUCCESS) {
848
849 amd8111s_log(pLayerPointers, CE_WARN,
850 "ddi_dma_addr_bind_handle for Tx desc failed");
851 ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
852 ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
853 goto allocate_desc_fail;
854 }
855 ASSERT(count == 1);
856 /* Set the DMA area to all zeros */
857 bzero((caddr_t)pMil->Tx_desc_original, length);
858
859 /* Initialize Tx descriptors related variables */
860 pMil->Tx_desc = (struct tx_desc *)
861 ((pMil->Tx_desc_original + ALIGNMENT) & ~ALIGNMENT);
862 pMil->pNonphysical->TxDescQRead = pMil->Tx_desc;
863 pMil->pNonphysical->TxDescQWrite = pMil->Tx_desc;
864 pMil->pNonphysical->TxDescQStart = pMil->Tx_desc;
865 pMil->pNonphysical->TxDescQEnd = &(pMil->Tx_desc[TX_RING_SIZE -1]);
866
867 /* Physical Addr of Tx_desc_original & Tx_desc */
868 pLayerPointers->pMil->Tx_desc_pa =
869 ((pOdl->tx_desc_dma_cookie.dmac_laddress + ALIGNMENT) &
870 ~ALIGNMENT);
871
872 /* Setting the reserved bits in the tx descriptors */
873 for (i = 0; i < TX_RING_SIZE; i++) {
874 pMil->pNonphysical->TxDescQWrite->Tx_RES0 = 0x0f;
875 pMil->pNonphysical->TxDescQWrite->Tx_OWN = 0;
876 pMil->pNonphysical->TxDescQWrite++;
877 }
878 pMil->pNonphysical->TxDescQWrite = pMil->pNonphysical->TxDescQStart;
879
880 pLayerPointers->pMdl->init_blk->TDRA = pMil->Tx_desc_pa;
881
882 return (B_TRUE);
883
884 allocate_desc_fail:
885 pOdl->tx_desc_dma_handle = NULL;
886 (void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
887 ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
888 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
889 pOdl->rx_desc_dma_handle = NULL;
890 return (B_FALSE);
891 }
892
893 /*
894 * Free Tx/Rx descriptors
895 */
896 static void
amd8111s_free_descriptors(struct LayerPointers * pLayerPointers)897 amd8111s_free_descriptors(struct LayerPointers *pLayerPointers)
898 {
899 struct odl *pOdl = pLayerPointers->pOdl;
900
901 /* Free Rx descriptors */
902 if (pOdl->rx_desc_dma_handle) {
903 (void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
904 ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
905 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
906 pOdl->rx_desc_dma_handle = NULL;
907 }
908
909 /* Free Rx descriptors */
910 if (pOdl->tx_desc_dma_handle) {
911 (void) ddi_dma_unbind_handle(pOdl->tx_desc_dma_handle);
912 ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
913 ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
914 pOdl->tx_desc_dma_handle = NULL;
915 }
916 }
917
918 /*
919 * Allocate Tx/Rx Ring buffer
920 */
921 static boolean_t
amd8111s_alloc_dma_ringbuf(struct LayerPointers * pLayerPointers,struct amd8111s_dma_ringbuf * pRing,uint32_t ring_size,uint32_t msg_size)922 amd8111s_alloc_dma_ringbuf(struct LayerPointers *pLayerPointers,
923 struct amd8111s_dma_ringbuf *pRing, uint32_t ring_size, uint32_t msg_size)
924 {
925 uint32_t idx, msg_idx = 0, msg_acc;
926 dev_info_t *devinfo = pLayerPointers->pOdl->devinfo;
927 size_t real_length;
928 uint_t count = 0;
929
930 ASSERT(pcn_buff_dma_attr_t.dma_attr_align == 1);
931 pRing->dma_buf_sz = msg_size;
932 pRing->ring_size = ring_size;
933 pRing->trunk_num = AMD8111S_SPLIT;
934 pRing->buf_sz = msg_size * ring_size;
935 if (ring_size < pRing->trunk_num)
936 pRing->trunk_num = ring_size;
937 ASSERT((pRing->buf_sz % pRing->trunk_num) == 0);
938
939 pRing->trunk_sz = pRing->buf_sz / pRing->trunk_num;
940 ASSERT((pRing->trunk_sz % pRing->dma_buf_sz) == 0);
941
942 pRing->msg_buf = kmem_zalloc(sizeof (struct amd8111s_msgbuf) *
943 ring_size, KM_NOSLEEP);
944 pRing->dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
945 pRing->trunk_num, KM_NOSLEEP);
946 pRing->acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
947 pRing->trunk_num, KM_NOSLEEP);
948 pRing->dma_cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) *
949 pRing->trunk_num, KM_NOSLEEP);
950 pRing->trunk_addr = kmem_zalloc(sizeof (caddr_t) *
951 pRing->trunk_num, KM_NOSLEEP);
952 if (pRing->msg_buf == NULL || pRing->dma_hdl == NULL ||
953 pRing->acc_hdl == NULL || pRing->trunk_addr == NULL ||
954 pRing->dma_cookie == NULL) {
955 amd8111s_log(pLayerPointers, CE_NOTE,
956 "kmem_zalloc failed");
957 goto failed;
958 }
959
960 for (idx = 0; idx < pRing->trunk_num; ++idx) {
961 if (ddi_dma_alloc_handle(devinfo, &pcn_buff_dma_attr_t,
962 DDI_DMA_SLEEP, NULL, &(pRing->dma_hdl[idx]))
963 != DDI_SUCCESS) {
964
965 amd8111s_log(pLayerPointers, CE_WARN,
966 "ddi_dma_alloc_handle failed");
967 goto failed;
968 } else if (ddi_dma_mem_alloc(pRing->dma_hdl[idx],
969 pRing->trunk_sz, &pcn_acc_attr, DDI_DMA_STREAMING,
970 DDI_DMA_SLEEP, NULL,
971 (caddr_t *)&(pRing->trunk_addr[idx]),
972 (size_t *)(&real_length), &pRing->acc_hdl[idx])
973 != DDI_SUCCESS) {
974
975 amd8111s_log(pLayerPointers, CE_WARN,
976 "ddi_dma_mem_alloc failed");
977 goto failed;
978 } else if (real_length != pRing->trunk_sz) {
979 amd8111s_log(pLayerPointers, CE_WARN,
980 "ddi_dma_mem_alloc failed");
981 goto failed;
982 } else if (ddi_dma_addr_bind_handle(pRing->dma_hdl[idx],
983 NULL, (caddr_t)pRing->trunk_addr[idx], real_length,
984 DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
985 &pRing->dma_cookie[idx], &count) != DDI_DMA_MAPPED) {
986
987 amd8111s_log(pLayerPointers, CE_WARN,
988 "ddi_dma_addr_bind_handle failed");
989 goto failed;
990 } else {
991 for (msg_acc = 0;
992 msg_acc < pRing->trunk_sz / pRing->dma_buf_sz;
993 ++ msg_acc) {
994 pRing->msg_buf[msg_idx].offset =
995 msg_acc * pRing->dma_buf_sz;
996 pRing->msg_buf[msg_idx].vir_addr =
997 pRing->trunk_addr[idx] +
998 pRing->msg_buf[msg_idx].offset;
999 pRing->msg_buf[msg_idx].phy_addr =
1000 pRing->dma_cookie[idx].dmac_laddress +
1001 pRing->msg_buf[msg_idx].offset;
1002 pRing->msg_buf[msg_idx].p_hdl =
1003 pRing->dma_hdl[idx];
1004 msg_idx ++;
1005 }
1006 }
1007 }
1008
1009 pRing->free = pRing->msg_buf;
1010 pRing->next = pRing->msg_buf;
1011 pRing->curr = pRing->msg_buf;
1012
1013 return (B_TRUE);
1014 failed:
1015 amd8111s_free_dma_ringbuf(pRing);
1016 return (B_FALSE);
1017 }
1018
1019 /*
1020 * Free Tx/Rx ring buffer
1021 */
1022 static void
amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf * pRing)1023 amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *pRing)
1024 {
1025 int idx;
1026
1027 if (pRing->dma_cookie != NULL) {
1028 for (idx = 0; idx < pRing->trunk_num; idx ++) {
1029 if (pRing->dma_cookie[idx].dmac_laddress == 0) {
1030 break;
1031 }
1032 (void) ddi_dma_unbind_handle(pRing->dma_hdl[idx]);
1033 }
1034 kmem_free(pRing->dma_cookie,
1035 sizeof (ddi_dma_cookie_t) * pRing->trunk_num);
1036 }
1037
1038 if (pRing->acc_hdl != NULL) {
1039 for (idx = 0; idx < pRing->trunk_num; idx ++) {
1040 if (pRing->acc_hdl[idx] == NULL)
1041 break;
1042 ddi_dma_mem_free(&pRing->acc_hdl[idx]);
1043 }
1044 kmem_free(pRing->acc_hdl,
1045 sizeof (ddi_acc_handle_t) * pRing->trunk_num);
1046 }
1047
1048 if (pRing->dma_hdl != NULL) {
1049 for (idx = 0; idx < pRing->trunk_num; idx ++) {
1050 if (pRing->dma_hdl[idx] == 0) {
1051 break;
1052 }
1053 ddi_dma_free_handle(&pRing->dma_hdl[idx]);
1054 }
1055 kmem_free(pRing->dma_hdl,
1056 sizeof (ddi_dma_handle_t) * pRing->trunk_num);
1057 }
1058
1059 if (pRing->msg_buf != NULL) {
1060 kmem_free(pRing->msg_buf,
1061 sizeof (struct amd8111s_msgbuf) * pRing->ring_size);
1062 }
1063
1064 if (pRing->trunk_addr != NULL) {
1065 kmem_free(pRing->trunk_addr,
1066 sizeof (caddr_t) * pRing->trunk_num);
1067 }
1068
1069 bzero(pRing, sizeof (*pRing));
1070 }
1071
1072
1073 /*
1074 * Allocate all Tx buffer.
1075 * Allocate a Rx buffer for each Rx descriptor. Then
1076 * call mil routine to fill physical address of Rx
1077 * buffer into Rx descriptors
1078 */
1079 static boolean_t
amd8111s_allocate_buffers(struct LayerPointers * pLayerPointers)1080 amd8111s_allocate_buffers(struct LayerPointers *pLayerPointers)
1081 {
1082 struct odl *pOdl = pLayerPointers->pOdl;
1083
1084 /*
1085 * Allocate rx Buffers
1086 */
1087 if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->rx_buf,
1088 RX_RING_SIZE, RX_BUF_SIZE) == B_FALSE) {
1089 amd8111s_log(pLayerPointers, CE_WARN,
1090 "amd8111s_alloc_dma_ringbuf for tx failed");
1091 goto allocate_buf_fail;
1092 }
1093
1094 /*
1095 * Allocate Tx buffers
1096 */
1097 if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->tx_buf,
1098 TX_COALESC_SIZE, TX_BUF_SIZE) == B_FALSE) {
1099 amd8111s_log(pLayerPointers, CE_WARN,
1100 "amd8111s_alloc_dma_ringbuf for tx failed");
1101 goto allocate_buf_fail;
1102 }
1103
1104 /*
1105 * Initilize the mil Queues
1106 */
1107 milInitGlbds(pLayerPointers);
1108
1109 milInitRxQ(pLayerPointers);
1110
1111 return (B_TRUE);
1112
1113 allocate_buf_fail:
1114
1115 amd8111s_log(pLayerPointers, CE_WARN,
1116 "amd8111s_allocate_buffers failed");
1117 return (B_FALSE);
1118 }
1119
1120 /*
1121 * Free all Rx/Tx buffer
1122 */
1123
1124 static void
amd8111s_free_buffers(struct LayerPointers * pLayerPointers)1125 amd8111s_free_buffers(struct LayerPointers *pLayerPointers)
1126 {
1127 /* Free Tx buffers */
1128 amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->tx_buf);
1129
1130 /* Free Rx Buffers */
1131 amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->rx_buf);
1132 }
1133
1134 /*
1135 * Try to recycle all the descriptors and Tx buffers
1136 * which are already freed by hardware.
1137 */
1138 static int
amd8111s_recycle_tx(struct LayerPointers * pLayerPointers)1139 amd8111s_recycle_tx(struct LayerPointers *pLayerPointers)
1140 {
1141 struct nonphysical *pNonphysical;
1142 uint32_t count = 0;
1143
1144 pNonphysical = pLayerPointers->pMil->pNonphysical;
1145 while (pNonphysical->TxDescQRead->Tx_OWN == 0 &&
1146 pNonphysical->TxDescQRead != pNonphysical->TxDescQWrite) {
1147 pLayerPointers->pOdl->tx_buf.free =
1148 NEXT(pLayerPointers->pOdl->tx_buf, free);
1149 pNonphysical->TxDescQRead++;
1150 if (pNonphysical->TxDescQRead > pNonphysical->TxDescQEnd) {
1151 pNonphysical->TxDescQRead = pNonphysical->TxDescQStart;
1152 }
1153 count ++;
1154 }
1155
1156 if (pLayerPointers->pMil->tx_reschedule)
1157 ddi_trigger_softintr(pLayerPointers->pOdl->drain_id);
1158
1159 return (count);
1160 }
1161
1162 /*
1163 * Get packets in the Tx buffer, then copy them to the send buffer.
1164 * Trigger hardware to send out packets.
1165 */
1166 static void
amd8111s_send_serial(struct LayerPointers * pLayerPointers)1167 amd8111s_send_serial(struct LayerPointers *pLayerPointers)
1168 {
1169 struct nonphysical *pNonphysical;
1170 uint32_t count;
1171
1172 pNonphysical = pLayerPointers->pMil->pNonphysical;
1173
1174 mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
1175
1176 for (count = 0; count < AMD8111S_SEND_MAX; count ++) {
1177 if (pLayerPointers->pOdl->tx_buf.curr ==
1178 pLayerPointers->pOdl->tx_buf.next) {
1179 break;
1180 }
1181 /* to verify if it needs to recycle the tx Buf */
1182 if (((pNonphysical->TxDescQWrite + 1 >
1183 pNonphysical->TxDescQEnd) ? pNonphysical->TxDescQStart :
1184 (pNonphysical->TxDescQWrite + 1)) ==
1185 pNonphysical->TxDescQRead)
1186 if (amd8111s_recycle_tx(pLayerPointers) == 0) {
1187 pLayerPointers->pOdl
1188 ->statistics.tx_no_descriptor ++;
1189 break;
1190 }
1191
1192 /* Fill packet length */
1193 pNonphysical->TxDescQWrite->Tx_BCNT = (uint16_t)pLayerPointers
1194 ->pOdl->tx_buf.curr->msg_size;
1195
1196 /* Fill physical buffer address */
1197 pNonphysical->TxDescQWrite->Tx_Base_Addr = (unsigned int)
1198 pLayerPointers->pOdl->tx_buf.curr->phy_addr;
1199
1200 pNonphysical->TxDescQWrite->Tx_SOP = 1;
1201 pNonphysical->TxDescQWrite->Tx_EOP = 1;
1202 pNonphysical->TxDescQWrite->Tx_ADD_FCS = 1;
1203 pNonphysical->TxDescQWrite->Tx_LTINT = 1;
1204 pNonphysical->TxDescQWrite->Tx_USPACE = 0;
1205 pNonphysical->TxDescQWrite->Tx_OWN = 1;
1206
1207 pNonphysical->TxDescQWrite++;
1208 if (pNonphysical->TxDescQWrite > pNonphysical->TxDescQEnd) {
1209 pNonphysical->TxDescQWrite = pNonphysical->TxDescQStart;
1210 }
1211
1212 pLayerPointers->pOdl->tx_buf.curr =
1213 NEXT(pLayerPointers->pOdl->tx_buf, curr);
1214
1215 }
1216
1217 pLayerPointers->pOdl->statistics.tx_ok_packets += count;
1218
1219 mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
1220
1221 /* Call mdlTransmit to send the pkt out on the network */
1222 mdlTransmit(pLayerPointers);
1223
1224 }
1225
1226 /*
1227 * Softintr entrance. try to send out packets in the Tx buffer.
1228 * If reschedule is True, call mac_tx_update to re-enable the
1229 * transmit
1230 */
1231 static uint_t
amd8111s_send_drain(caddr_t arg)1232 amd8111s_send_drain(caddr_t arg)
1233 {
1234 struct LayerPointers *pLayerPointers = (void *)arg;
1235
1236 amd8111s_send_serial(pLayerPointers);
1237
1238 if (pLayerPointers->pMil->tx_reschedule &&
1239 NEXT(pLayerPointers->pOdl->tx_buf, next) !=
1240 pLayerPointers->pOdl->tx_buf.free) {
1241 mac_tx_update(pLayerPointers->pOdl->mh);
1242 pLayerPointers->pMil->tx_reschedule = B_FALSE;
1243 }
1244
1245 return (DDI_INTR_CLAIMED);
1246 }
1247
1248 /*
1249 * Get a Tx buffer
1250 */
1251 static struct amd8111s_msgbuf *
amd8111s_getTxbuf(struct LayerPointers * pLayerPointers)1252 amd8111s_getTxbuf(struct LayerPointers *pLayerPointers)
1253 {
1254 struct amd8111s_msgbuf *tmp, *next;
1255
1256 mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
1257 next = NEXT(pLayerPointers->pOdl->tx_buf, next);
1258 if (next == pLayerPointers->pOdl->tx_buf.free) {
1259 tmp = NULL;
1260 } else {
1261 tmp = pLayerPointers->pOdl->tx_buf.next;
1262 pLayerPointers->pOdl->tx_buf.next = next;
1263 }
1264 mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
1265
1266 return (tmp);
1267 }
1268
1269 static boolean_t
amd8111s_send(struct LayerPointers * pLayerPointers,mblk_t * mp)1270 amd8111s_send(struct LayerPointers *pLayerPointers, mblk_t *mp)
1271 {
1272 struct odl *pOdl;
1273 size_t frag_len;
1274 mblk_t *tmp;
1275 struct amd8111s_msgbuf *txBuf;
1276 uint8_t *pMsg;
1277
1278 pOdl = pLayerPointers->pOdl;
1279
1280 /* alloc send buffer */
1281 txBuf = amd8111s_getTxbuf(pLayerPointers);
1282 if (txBuf == NULL) {
1283 pOdl->statistics.tx_no_buffer ++;
1284 pLayerPointers->pMil->tx_reschedule = B_TRUE;
1285 amd8111s_send_serial(pLayerPointers);
1286 return (B_FALSE);
1287 }
1288
1289 /* copy packet to send buffer */
1290 txBuf->msg_size = 0;
1291 pMsg = (uint8_t *)txBuf->vir_addr;
1292 for (tmp = mp; tmp; tmp = tmp->b_cont) {
1293 frag_len = MBLKL(tmp);
1294 bcopy(tmp->b_rptr, pMsg, frag_len);
1295 txBuf->msg_size += frag_len;
1296 pMsg += frag_len;
1297 }
1298 freemsg(mp);
1299
1300 amd8111s_send_serial(pLayerPointers);
1301
1302 return (B_TRUE);
1303 }
1304
1305 /*
1306 * (GLD Entry Point) Send the message block to lower layer
1307 */
1308 static mblk_t *
amd8111s_m_tx(void * arg,mblk_t * mp)1309 amd8111s_m_tx(void *arg, mblk_t *mp)
1310 {
1311 struct LayerPointers *pLayerPointers = arg;
1312 mblk_t *next;
1313
1314 rw_enter(&pLayerPointers->pOdl->chip_lock, RW_READER);
1315 if (!pLayerPointers->run) {
1316 pLayerPointers->pOdl->statistics.tx_afterunplumb ++;
1317 freemsgchain(mp);
1318 mp = NULL;
1319 }
1320
1321 while (mp != NULL) {
1322 next = mp->b_next;
1323 mp->b_next = NULL;
1324 if (!amd8111s_send(pLayerPointers, mp)) {
1325 /* Send fail */
1326 mp->b_next = next;
1327 break;
1328 }
1329 mp = next;
1330 }
1331
1332 rw_exit(&pLayerPointers->pOdl->chip_lock);
1333 return (mp);
1334 }
1335
1336 /*
1337 * (GLD Entry Point) Interrupt Service Routine
1338 */
1339 static uint_t
amd8111s_intr(caddr_t arg)1340 amd8111s_intr(caddr_t arg)
1341 {
1342 unsigned int intrCauses;
1343 struct LayerPointers *pLayerPointers = (void *)arg;
1344
1345 /* Read the interrupt status from mdl */
1346 intrCauses = mdlReadInterrupt(pLayerPointers);
1347
1348 if (intrCauses == 0) {
1349 pLayerPointers->pOdl->statistics.intr_OTHER ++;
1350 return (DDI_INTR_UNCLAIMED);
1351 }
1352
1353 if (intrCauses & LCINT) {
1354 if (mdlReadLink(pLayerPointers) == LINK_UP) {
1355 mdlGetActiveMediaInfo(pLayerPointers);
1356 /* Link status changed */
1357 if (pLayerPointers->pOdl->LinkStatus !=
1358 LINK_STATE_UP) {
1359 pLayerPointers->pOdl->LinkStatus =
1360 LINK_STATE_UP;
1361 mac_link_update(pLayerPointers->pOdl->mh,
1362 LINK_STATE_UP);
1363 }
1364 } else {
1365 if (pLayerPointers->pOdl->LinkStatus !=
1366 LINK_STATE_DOWN) {
1367 pLayerPointers->pOdl->LinkStatus =
1368 LINK_STATE_DOWN;
1369 mac_link_update(pLayerPointers->pOdl->mh,
1370 LINK_STATE_DOWN);
1371 }
1372 }
1373 }
1374 /*
1375 * RINT0: Receive Interrupt is set by the controller after the last
1376 * descriptor of a receive frame for this ring has been updated by
1377 * writing a 0 to the OWNership bit.
1378 */
1379 if (intrCauses & RINT0) {
1380 pLayerPointers->pOdl->statistics.intr_RINT0 ++;
1381 amd8111s_receive(pLayerPointers);
1382 }
1383
1384 /*
1385 * TINT0: Transmit Interrupt is set by the controller after the OWN bit
1386 * in the last descriptor of a transmit frame in this particular ring
1387 * has been cleared to indicate the frame has been copied to the
1388 * transmit FIFO.
1389 */
1390 if (intrCauses & TINT0) {
1391 pLayerPointers->pOdl->statistics.intr_TINT0 ++;
1392 /*
1393 * if desc ring is NULL and tx buf is not NULL, it should
1394 * drain tx buffer
1395 */
1396 amd8111s_send_serial(pLayerPointers);
1397 }
1398
1399 if (intrCauses & STINT) {
1400 pLayerPointers->pOdl->statistics.intr_STINT ++;
1401 }
1402
1403
1404 return (DDI_INTR_CLAIMED);
1405 }
1406
1407 /*
1408 * To re-initilize data structures.
1409 */
1410 static void
amd8111s_sw_reset(struct LayerPointers * pLayerPointers)1411 amd8111s_sw_reset(struct LayerPointers *pLayerPointers)
1412 {
1413 /* Reset all Tx/Rx queues and descriptors */
1414 milResetTxQ(pLayerPointers);
1415 milInitRxQ(pLayerPointers);
1416 }
1417
1418 /*
1419 * Send all pending tx packets
1420 */
1421 static void
amd8111s_tx_drain(struct LayerPointers * adapter)1422 amd8111s_tx_drain(struct LayerPointers *adapter)
1423 {
1424 struct tx_desc *pTx_desc = adapter->pMil->pNonphysical->TxDescQStart;
1425 int i, desc_count = 0;
1426 for (i = 0; i < 30; i++) {
1427 while ((pTx_desc->Tx_OWN == 0) && (desc_count < TX_RING_SIZE)) {
1428 /* This packet has been transmitted */
1429 pTx_desc ++;
1430 desc_count ++;
1431 }
1432 if (desc_count == TX_RING_SIZE) {
1433 break;
1434 }
1435 /* Wait 1 ms */
1436 drv_usecwait(1000);
1437 }
1438 adapter->pOdl->statistics.tx_draintime = i;
1439 }
1440
1441 /*
1442 * (GLD Entry Point) To start card will be called at
1443 * ifconfig plumb
1444 */
1445 static int
amd8111s_m_start(void * arg)1446 amd8111s_m_start(void *arg)
1447 {
1448 struct LayerPointers *pLayerPointers = arg;
1449 struct odl *pOdl = pLayerPointers->pOdl;
1450
1451 amd8111s_sw_reset(pLayerPointers);
1452 mdlHWReset(pLayerPointers);
1453 rw_enter(&pOdl->chip_lock, RW_WRITER);
1454 pLayerPointers->run = B_TRUE;
1455 rw_exit(&pOdl->chip_lock);
1456 return (0);
1457 }
1458
1459 /*
1460 * (GLD Entry Point) To stop card will be called at
1461 * ifconfig unplumb
1462 */
1463 static void
amd8111s_m_stop(void * arg)1464 amd8111s_m_stop(void *arg)
1465 {
1466 struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg;
1467 struct odl *pOdl = pLayerPointers->pOdl;
1468
1469 /* Ensure send all pending tx packets */
1470 amd8111s_tx_drain(pLayerPointers);
1471 /*
1472 * Stop the controller and disable the controller interrupt
1473 */
1474 rw_enter(&pOdl->chip_lock, RW_WRITER);
1475 mdlStopChip(pLayerPointers);
1476 pLayerPointers->run = B_FALSE;
1477 rw_exit(&pOdl->chip_lock);
1478 }
1479
1480 /*
1481 * To clean up all
1482 */
1483 static void
amd8111s_free_resource(struct LayerPointers * pLayerPointers)1484 amd8111s_free_resource(struct LayerPointers *pLayerPointers)
1485 {
1486 unsigned long mem_free_array[100];
1487 unsigned long *pmem_free_array, size;
1488
1489 /* Free Rx/Tx descriptors */
1490 amd8111s_free_descriptors(pLayerPointers);
1491
1492 /* Free memory on lower layers */
1493 milFreeResources(pLayerPointers, mem_free_array);
1494 pmem_free_array = mem_free_array;
1495 while (*pmem_free_array) {
1496 switch (*pmem_free_array) {
1497 case VIRTUAL:
1498 size = *(++pmem_free_array);
1499 pmem_free_array++;
1500 kmem_free((void *)*(pmem_free_array), size);
1501 break;
1502 }
1503 pmem_free_array++;
1504 }
1505
1506 amd8111s_free_buffers(pLayerPointers);
1507 }
1508
1509 /*
1510 * (GLD Enty pointer) To add/delete multi cast addresses
1511 *
1512 */
1513 static int
amd8111s_m_multicst(void * arg,boolean_t add,const uint8_t * addr)1514 amd8111s_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
1515 {
1516 struct LayerPointers *pLayerPointers = arg;
1517
1518 if (add) {
1519 /* Add a multicast entry */
1520 mdlAddMulticastAddress(pLayerPointers, (UCHAR *)addr);
1521 } else {
1522 /* Delete a multicast entry */
1523 mdlDeleteMulticastAddress(pLayerPointers, (UCHAR *)addr);
1524 }
1525
1526 return (0);
1527 }
1528
1529 #ifdef AMD8111S_DEBUG
1530 /*
1531 * The size of MIB registers is only 32 bits. Dump them before one
1532 * of them overflows.
1533 */
1534 static void
amd8111s_dump_mib(struct LayerPointers * pLayerPointers)1535 amd8111s_dump_mib(struct LayerPointers *pLayerPointers)
1536 {
1537 struct amd8111s_statistics *adapterStat;
1538
1539 adapterStat = &pLayerPointers->pOdl->statistics;
1540
1541 adapterStat->mib_dump_counter ++;
1542
1543 /*
1544 * Rx Counters
1545 */
1546 adapterStat->rx_mib_unicst_packets +=
1547 mdlReadMib(pLayerPointers, RcvUniCastPkts);
1548 adapterStat->rx_mib_multicst_packets +=
1549 mdlReadMib(pLayerPointers, RcvMultiCastPkts);
1550 adapterStat->rx_mib_broadcst_packets +=
1551 mdlReadMib(pLayerPointers, RcvBroadCastPkts);
1552 adapterStat->rx_mib_macctrl_packets +=
1553 mdlReadMib(pLayerPointers, RcvMACCtrl);
1554 adapterStat->rx_mib_flowctrl_packets +=
1555 mdlReadMib(pLayerPointers, RcvFlowCtrl);
1556
1557 adapterStat->rx_mib_bytes +=
1558 mdlReadMib(pLayerPointers, RcvOctets);
1559 adapterStat->rx_mib_good_bytes +=
1560 mdlReadMib(pLayerPointers, RcvGoodOctets);
1561
1562 adapterStat->rx_mib_undersize_packets +=
1563 mdlReadMib(pLayerPointers, RcvUndersizePkts);
1564 adapterStat->rx_mib_oversize_packets +=
1565 mdlReadMib(pLayerPointers, RcvOversizePkts);
1566
1567 adapterStat->rx_mib_drop_packets +=
1568 mdlReadMib(pLayerPointers, RcvDropPktsRing0);
1569 adapterStat->rx_mib_align_err_packets +=
1570 mdlReadMib(pLayerPointers, RcvAlignmentErrors);
1571 adapterStat->rx_mib_fcs_err_packets +=
1572 mdlReadMib(pLayerPointers, RcvFCSErrors);
1573 adapterStat->rx_mib_symbol_err_packets +=
1574 mdlReadMib(pLayerPointers, RcvSymbolErrors);
1575 adapterStat->rx_mib_miss_packets +=
1576 mdlReadMib(pLayerPointers, RcvMissPkts);
1577
1578 /*
1579 * Tx Counters
1580 */
1581 adapterStat->tx_mib_packets +=
1582 mdlReadMib(pLayerPointers, XmtPackets);
1583 adapterStat->tx_mib_multicst_packets +=
1584 mdlReadMib(pLayerPointers, XmtMultiCastPkts);
1585 adapterStat->tx_mib_broadcst_packets +=
1586 mdlReadMib(pLayerPointers, XmtBroadCastPkts);
1587 adapterStat->tx_mib_flowctrl_packets +=
1588 mdlReadMib(pLayerPointers, XmtFlowCtrl);
1589
1590 adapterStat->tx_mib_bytes +=
1591 mdlReadMib(pLayerPointers, XmtOctets);
1592
1593 adapterStat->tx_mib_defer_trans_packets +=
1594 mdlReadMib(pLayerPointers, XmtDeferredTransmit);
1595 adapterStat->tx_mib_collision_packets +=
1596 mdlReadMib(pLayerPointers, XmtCollisions);
1597 adapterStat->tx_mib_one_coll_packets +=
1598 mdlReadMib(pLayerPointers, XmtOneCollision);
1599 adapterStat->tx_mib_multi_coll_packets +=
1600 mdlReadMib(pLayerPointers, XmtMultipleCollision);
1601 adapterStat->tx_mib_late_coll_packets +=
1602 mdlReadMib(pLayerPointers, XmtLateCollision);
1603 adapterStat->tx_mib_ex_coll_packets +=
1604 mdlReadMib(pLayerPointers, XmtExcessiveCollision);
1605
1606
1607 /* Clear all MIB registers */
1608 WRITE_REG16(pLayerPointers, pLayerPointers->pMdl->Mem_Address
1609 + MIB_ADDR, MIB_CLEAR);
1610 }
1611 #endif
1612
1613 /*
1614 * (GLD Entry Point) set/unset promiscus mode
1615 */
1616 static int
amd8111s_m_promisc(void * arg,boolean_t on)1617 amd8111s_m_promisc(void *arg, boolean_t on)
1618 {
1619 struct LayerPointers *pLayerPointers = arg;
1620
1621 if (on) {
1622 mdlSetPromiscuous(pLayerPointers);
1623 } else {
1624 mdlDisablePromiscuous(pLayerPointers);
1625 }
1626
1627 return (0);
1628 }
1629
1630 /*
1631 * (Gld Entry point) Changes the Mac address of card
1632 */
1633 static int
amd8111s_m_unicst(void * arg,const uint8_t * macaddr)1634 amd8111s_m_unicst(void *arg, const uint8_t *macaddr)
1635 {
1636 struct LayerPointers *pLayerPointers = arg;
1637
1638 mdlDisableInterrupt(pLayerPointers);
1639 mdlSetMacAddress(pLayerPointers, (unsigned char *)macaddr);
1640 mdlEnableInterrupt(pLayerPointers);
1641
1642 return (0);
1643 }
1644
1645 /*
1646 * Reset the card
1647 */
1648 void
amd8111s_reset(struct LayerPointers * pLayerPointers)1649 amd8111s_reset(struct LayerPointers *pLayerPointers)
1650 {
1651 amd8111s_sw_reset(pLayerPointers);
1652 mdlHWReset(pLayerPointers);
1653 }
1654
1655 /*
1656 * attach(9E) -- Attach a device to the system
1657 *
1658 * Called once for each board after successfully probed.
1659 * will do
1660 * a. creating minor device node for the instance.
1661 * b. allocate & Initilize four layers (call odlInit)
1662 * c. get MAC address
1663 * d. initilize pLayerPointers to gld private pointer
1664 * e. register with GLD
1665 * if any action fails does clean up & returns DDI_FAILURE
1666 * else retursn DDI_SUCCESS
1667 */
1668 static int
amd8111s_attach(dev_info_t * devinfo,ddi_attach_cmd_t cmd)1669 amd8111s_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1670 {
1671 mac_register_t *macp;
1672 struct LayerPointers *pLayerPointers;
1673 struct odl *pOdl;
1674 ddi_acc_handle_t *pci_handle;
1675 ddi_device_acc_attr_t dev_attr;
1676 caddr_t addrp = NULL;
1677
1678 switch (cmd) {
1679 case DDI_ATTACH:
1680 break;
1681 default:
1682 return (DDI_FAILURE);
1683 }
1684
1685 pLayerPointers = (struct LayerPointers *)
1686 kmem_zalloc(sizeof (struct LayerPointers), KM_SLEEP);
1687 amd8111sadapter = pLayerPointers;
1688
1689 /* Get device instance number */
1690 pLayerPointers->instance = ddi_get_instance(devinfo);
1691 ddi_set_driver_private(devinfo, (caddr_t)pLayerPointers);
1692
1693 pOdl = (struct odl *)kmem_zalloc(sizeof (struct odl), KM_SLEEP);
1694 pLayerPointers->pOdl = pOdl;
1695
1696 pOdl->devinfo = devinfo;
1697
1698 /*
1699 * Here, we only allocate memory for struct odl and initilize it.
1700 * All other memory allocation & initilization will be done in odlInit
1701 * later on this routine.
1702 */
1703 if (ddi_get_iblock_cookie(devinfo, 0, &pLayerPointers->pOdl->iblock)
1704 != DDI_SUCCESS) {
1705 amd8111s_log(pLayerPointers, CE_NOTE,
1706 "attach: get iblock cookies failed");
1707 goto attach_failure;
1708 }
1709
1710 rw_init(&pOdl->chip_lock, NULL, RW_DRIVER, (void *)pOdl->iblock);
1711 mutex_init(&pOdl->mdlSendLock, "amd8111s Send Protection Lock",
1712 MUTEX_DRIVER, (void *)pOdl->iblock);
1713 mutex_init(&pOdl->mdlRcvLock, "amd8111s Rcv Protection Lock",
1714 MUTEX_DRIVER, (void *)pOdl->iblock);
1715
1716 /* Setup PCI space */
1717 if (pci_config_setup(devinfo, &pOdl->pci_handle) != DDI_SUCCESS) {
1718 return (DDI_FAILURE);
1719 }
1720 pLayerPointers->attach_progress = AMD8111S_ATTACH_PCI;
1721 pci_handle = &pOdl->pci_handle;
1722
1723 pOdl->vendor_id = pci_config_get16(*pci_handle, PCI_CONF_VENID);
1724 pOdl->device_id = pci_config_get16(*pci_handle, PCI_CONF_DEVID);
1725
1726 /*
1727 * Allocate and initialize all resource and map device registers.
1728 * If failed, it returns a non-zero value.
1729 */
1730 if (amd8111s_odlInit(pLayerPointers) != 0) {
1731 goto attach_failure;
1732 }
1733 pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESOURCE;
1734
1735 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1736 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1737 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1738
1739 if (ddi_regs_map_setup(devinfo, 1, &addrp, 0, 4096, &dev_attr,
1740 &(pLayerPointers->pOdl->MemBasehandle)) != 0) {
1741 amd8111s_log(pLayerPointers, CE_NOTE,
1742 "attach: ddi_regs_map_setup failed");
1743 goto attach_failure;
1744 }
1745 pLayerPointers->pMdl->Mem_Address = (unsigned long)addrp;
1746
1747 /* Initialize HW */
1748 mdlOpen(pLayerPointers);
1749 mdlGetActiveMediaInfo(pLayerPointers);
1750 pLayerPointers->attach_progress |= AMD8111S_ATTACH_REGS;
1751
1752 /*
1753 * Setup the interrupt
1754 */
1755 if (ddi_add_intr(devinfo, 0, &pOdl->iblock, 0, amd8111s_intr,
1756 (caddr_t)pLayerPointers) != DDI_SUCCESS) {
1757 goto attach_failure;
1758 }
1759 pLayerPointers->attach_progress |= AMD8111S_ATTACH_INTRADDED;
1760
1761 /*
1762 * Setup soft intr
1763 */
1764 if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &pOdl->drain_id,
1765 NULL, NULL, amd8111s_send_drain,
1766 (caddr_t)pLayerPointers) != DDI_SUCCESS) {
1767 goto attach_failure;
1768 }
1769 pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESCHED;
1770
1771 /*
1772 * Initilize the mac structure
1773 */
1774 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
1775 goto attach_failure;
1776
1777 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1778 macp->m_driver = pLayerPointers;
1779 macp->m_dip = devinfo;
1780 /* Get MAC address */
1781 mdlGetMacAddress(pLayerPointers, (unsigned char *)pOdl->MacAddress);
1782 macp->m_src_addr = pOdl->MacAddress;
1783 macp->m_callbacks = &amd8111s_m_callbacks;
1784 macp->m_min_sdu = 0;
1785 /* 1518 - 14 (ether header) - 4 (CRC) */
1786 macp->m_max_sdu = ETHERMTU;
1787 macp->m_margin = VLAN_TAGSZ;
1788
1789 /*
1790 * Finally, we're ready to register ourselves with the MAC layer
1791 * interface; if this succeeds, we're ready to start.
1792 */
1793 if (mac_register(macp, &pOdl->mh) != DDI_SUCCESS) {
1794 mac_free(macp);
1795 goto attach_failure;
1796 }
1797 mac_free(macp);
1798
1799 pLayerPointers->attach_progress |= AMD8111S_ATTACH_MACREGED;
1800
1801 return (DDI_SUCCESS);
1802
1803 attach_failure:
1804 (void) amd8111s_unattach(devinfo, pLayerPointers);
1805 return (DDI_FAILURE);
1806
1807 }
1808
1809 /*
1810 * detach(9E) -- Detach a device from the system
1811 *
1812 * It is called for each device instance when the system is preparing to
1813 * unload a dynamically unloadable driver.
1814 * will Do
1815 * a. check if any driver buffers are held by OS.
1816 * b. do clean up of all allocated memory if it is not in use by OS.
1817 * c. un register with GLD
1818 * d. return DDI_SUCCESS on succes full free & unregister
1819 * else GLD_FAILURE
1820 */
1821 static int
amd8111s_detach(dev_info_t * devinfo,ddi_detach_cmd_t cmd)1822 amd8111s_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1823 {
1824 struct LayerPointers *pLayerPointers;
1825
1826 switch (cmd) {
1827 case DDI_DETACH:
1828 break;
1829 default:
1830 return (DDI_FAILURE);
1831 }
1832
1833 /*
1834 * Get the driver private (struct LayerPointers *) structure
1835 */
1836 if ((pLayerPointers = (struct LayerPointers *)ddi_get_driver_private
1837 (devinfo)) == NULL) {
1838 return (DDI_FAILURE);
1839 }
1840
1841 return (amd8111s_unattach(devinfo, pLayerPointers));
1842 }
1843
1844 static int
amd8111s_unattach(dev_info_t * devinfo,struct LayerPointers * pLayerPointers)1845 amd8111s_unattach(dev_info_t *devinfo, struct LayerPointers *pLayerPointers)
1846 {
1847 struct odl *pOdl = pLayerPointers->pOdl;
1848
1849 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_MACREGED) {
1850 /* Unregister driver from the GLD interface */
1851 if (mac_unregister(pOdl->mh) != DDI_SUCCESS) {
1852 return (DDI_FAILURE);
1853 }
1854 }
1855
1856 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_INTRADDED) {
1857 ddi_remove_intr(devinfo, 0, pOdl->iblock);
1858 }
1859
1860 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESCHED) {
1861 ddi_remove_softintr(pOdl->drain_id);
1862 }
1863
1864 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_REGS) {
1865 /* Stop HW */
1866 mdlStopChip(pLayerPointers);
1867 ddi_regs_map_free(&(pOdl->MemBasehandle));
1868 }
1869
1870 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESOURCE) {
1871 /* Free All memory allocated */
1872 amd8111s_free_resource(pLayerPointers);
1873 }
1874
1875 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_PCI) {
1876 pci_config_teardown(&pOdl->pci_handle);
1877 mutex_destroy(&pOdl->mdlSendLock);
1878 mutex_destroy(&pOdl->mdlRcvLock);
1879 rw_destroy(&pOdl->chip_lock);
1880 }
1881
1882 kmem_free(pOdl, sizeof (struct odl));
1883 kmem_free(pLayerPointers, sizeof (struct LayerPointers));
1884
1885 return (DDI_SUCCESS);
1886 }
1887
1888 /*
1889 * (GLD Entry Point)GLD will call this entry point perodicaly to
1890 * get driver statistices.
1891 */
1892 static int
amd8111s_m_stat(void * arg,uint_t stat,uint64_t * val)1893 amd8111s_m_stat(void *arg, uint_t stat, uint64_t *val)
1894 {
1895 struct LayerPointers *pLayerPointers = arg;
1896 struct amd8111s_statistics *adapterStat;
1897
1898 adapterStat = &pLayerPointers->pOdl->statistics;
1899
1900 switch (stat) {
1901
1902 /*
1903 * Current Status
1904 */
1905 case MAC_STAT_IFSPEED:
1906 *val = pLayerPointers->pMdl->Speed * 1000000;
1907 break;
1908
1909 case ETHER_STAT_LINK_DUPLEX:
1910 if (pLayerPointers->pMdl->FullDuplex) {
1911 *val = LINK_DUPLEX_FULL;
1912 } else {
1913 *val = LINK_DUPLEX_HALF;
1914 }
1915 break;
1916
1917 /*
1918 * Capabilities
1919 */
1920 case ETHER_STAT_CAP_1000FDX:
1921 *val = 0;
1922 break;
1923
1924 case ETHER_STAT_CAP_1000HDX:
1925 *val = 0;
1926 break;
1927
1928 case ETHER_STAT_CAP_100FDX:
1929 *val = 1;
1930 break;
1931
1932 case ETHER_STAT_CAP_100HDX:
1933 *val = 1;
1934 break;
1935
1936 case ETHER_STAT_CAP_10FDX:
1937 *val = 1;
1938 break;
1939
1940 case ETHER_STAT_CAP_10HDX:
1941 *val = 1;
1942 break;
1943
1944 case ETHER_STAT_CAP_ASMPAUSE:
1945 *val = 1;
1946 break;
1947
1948 case ETHER_STAT_CAP_PAUSE:
1949 *val = 1;
1950 break;
1951
1952 case ETHER_STAT_CAP_AUTONEG:
1953 *val = 1;
1954 break;
1955
1956 case ETHER_STAT_ADV_CAP_1000FDX:
1957 *val = 0;
1958 break;
1959
1960 case ETHER_STAT_ADV_CAP_1000HDX:
1961 *val = 0;
1962 break;
1963
1964 case ETHER_STAT_ADV_CAP_100FDX:
1965 *val = 1;
1966 break;
1967
1968 case ETHER_STAT_ADV_CAP_100HDX:
1969 *val = 1;
1970 break;
1971
1972 case ETHER_STAT_ADV_CAP_10FDX:
1973 *val = 1;
1974 break;
1975
1976 case ETHER_STAT_ADV_CAP_10HDX:
1977 *val = 1;
1978 break;
1979
1980 case ETHER_STAT_ADV_CAP_ASMPAUSE:
1981 *val = 1;
1982 break;
1983
1984 case ETHER_STAT_ADV_CAP_PAUSE:
1985 *val = 1;
1986 break;
1987
1988 case ETHER_STAT_ADV_CAP_AUTONEG:
1989 *val = 1;
1990 break;
1991
1992 /*
1993 * Rx Counters
1994 */
1995 case MAC_STAT_IPACKETS:
1996 *val = adapterStat->rx_mib_unicst_packets +
1997 adapterStat->rx_mib_multicst_packets +
1998 adapterStat->rx_mib_broadcst_packets +
1999 mdlReadMib(pLayerPointers, RcvUniCastPkts) +
2000 mdlReadMib(pLayerPointers, RcvMultiCastPkts) +
2001 mdlReadMib(pLayerPointers, RcvBroadCastPkts);
2002 break;
2003
2004 case MAC_STAT_RBYTES:
2005 *val = adapterStat->rx_mib_bytes +
2006 mdlReadMib(pLayerPointers, RcvOctets);
2007 break;
2008
2009 case MAC_STAT_MULTIRCV:
2010 *val = adapterStat->rx_mib_multicst_packets +
2011 mdlReadMib(pLayerPointers, RcvMultiCastPkts);
2012 break;
2013
2014 case MAC_STAT_BRDCSTRCV:
2015 *val = adapterStat->rx_mib_broadcst_packets +
2016 mdlReadMib(pLayerPointers, RcvBroadCastPkts);
2017 break;
2018
2019 case MAC_STAT_NORCVBUF:
2020 *val = adapterStat->rx_allocfail +
2021 adapterStat->rx_mib_drop_packets +
2022 mdlReadMib(pLayerPointers, RcvDropPktsRing0);
2023 break;
2024
2025 case MAC_STAT_IERRORS:
2026 *val = adapterStat->rx_mib_align_err_packets +
2027 adapterStat->rx_mib_fcs_err_packets +
2028 adapterStat->rx_mib_symbol_err_packets +
2029 mdlReadMib(pLayerPointers, RcvAlignmentErrors) +
2030 mdlReadMib(pLayerPointers, RcvFCSErrors) +
2031 mdlReadMib(pLayerPointers, RcvSymbolErrors);
2032 break;
2033
2034 case ETHER_STAT_ALIGN_ERRORS:
2035 *val = adapterStat->rx_mib_align_err_packets +
2036 mdlReadMib(pLayerPointers, RcvAlignmentErrors);
2037 break;
2038
2039 case ETHER_STAT_FCS_ERRORS:
2040 *val = adapterStat->rx_mib_fcs_err_packets +
2041 mdlReadMib(pLayerPointers, RcvFCSErrors);
2042 break;
2043
2044 /*
2045 * Tx Counters
2046 */
2047 case MAC_STAT_OPACKETS:
2048 *val = adapterStat->tx_mib_packets +
2049 mdlReadMib(pLayerPointers, XmtPackets);
2050 break;
2051
2052 case MAC_STAT_OBYTES:
2053 *val = adapterStat->tx_mib_bytes +
2054 mdlReadMib(pLayerPointers, XmtOctets);
2055 break;
2056
2057 case MAC_STAT_MULTIXMT:
2058 *val = adapterStat->tx_mib_multicst_packets +
2059 mdlReadMib(pLayerPointers, XmtMultiCastPkts);
2060 break;
2061
2062 case MAC_STAT_BRDCSTXMT:
2063 *val = adapterStat->tx_mib_broadcst_packets +
2064 mdlReadMib(pLayerPointers, XmtBroadCastPkts);
2065 break;
2066
2067 case MAC_STAT_NOXMTBUF:
2068 *val = adapterStat->tx_no_descriptor;
2069 break;
2070
2071 case MAC_STAT_OERRORS:
2072 *val = adapterStat->tx_mib_ex_coll_packets +
2073 mdlReadMib(pLayerPointers, XmtExcessiveCollision);
2074 break;
2075
2076 case MAC_STAT_COLLISIONS:
2077 *val = adapterStat->tx_mib_ex_coll_packets +
2078 mdlReadMib(pLayerPointers, XmtCollisions);
2079 break;
2080
2081 case ETHER_STAT_FIRST_COLLISIONS:
2082 *val = adapterStat->tx_mib_one_coll_packets +
2083 mdlReadMib(pLayerPointers, XmtOneCollision);
2084 break;
2085
2086 case ETHER_STAT_MULTI_COLLISIONS:
2087 *val = adapterStat->tx_mib_multi_coll_packets +
2088 mdlReadMib(pLayerPointers, XmtMultipleCollision);
2089 break;
2090
2091 case ETHER_STAT_EX_COLLISIONS:
2092 *val = adapterStat->tx_mib_ex_coll_packets +
2093 mdlReadMib(pLayerPointers, XmtExcessiveCollision);
2094 break;
2095
2096 case ETHER_STAT_TX_LATE_COLLISIONS:
2097 *val = adapterStat->tx_mib_late_coll_packets +
2098 mdlReadMib(pLayerPointers, XmtLateCollision);
2099 break;
2100
2101 case ETHER_STAT_DEFER_XMTS:
2102 *val = adapterStat->tx_mib_defer_trans_packets +
2103 mdlReadMib(pLayerPointers, XmtDeferredTransmit);
2104 break;
2105
2106 default:
2107 return (ENOTSUP);
2108 }
2109 return (0);
2110 }
2111
2112 /*
2113 * Memory Read Function Used by MDL to set card registers.
2114 */
2115 unsigned char
READ_REG8(struct LayerPointers * pLayerPointers,long x)2116 READ_REG8(struct LayerPointers *pLayerPointers, long x)
2117 {
2118 return (ddi_get8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)x));
2119 }
2120
2121 int
READ_REG16(struct LayerPointers * pLayerPointers,long x)2122 READ_REG16(struct LayerPointers *pLayerPointers, long x)
2123 {
2124 return (ddi_get16(pLayerPointers->pOdl->MemBasehandle,
2125 (uint16_t *)(x)));
2126 }
2127
2128 long
READ_REG32(struct LayerPointers * pLayerPointers,long x)2129 READ_REG32(struct LayerPointers *pLayerPointers, long x)
2130 {
2131 return (ddi_get32(pLayerPointers->pOdl->MemBasehandle,
2132 (uint32_t *)(x)));
2133 }
2134
2135 void
WRITE_REG8(struct LayerPointers * pLayerPointers,long x,int y)2136 WRITE_REG8(struct LayerPointers *pLayerPointers, long x, int y)
2137 {
2138 ddi_put8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)(x), y);
2139 }
2140
2141 void
WRITE_REG16(struct LayerPointers * pLayerPointers,long x,int y)2142 WRITE_REG16(struct LayerPointers *pLayerPointers, long x, int y)
2143 {
2144 ddi_put16(pLayerPointers->pOdl->MemBasehandle, (uint16_t *)(x), y);
2145 }
2146
2147 void
WRITE_REG32(struct LayerPointers * pLayerPointers,long x,int y)2148 WRITE_REG32(struct LayerPointers *pLayerPointers, long x, int y)
2149 {
2150 ddi_put32(pLayerPointers->pOdl->MemBasehandle, (uint32_t *)(x), y);
2151 }
2152
2153 void
WRITE_REG64(struct LayerPointers * pLayerPointers,long x,char * y)2154 WRITE_REG64(struct LayerPointers *pLayerPointers, long x, char *y)
2155 {
2156 int i;
2157 for (i = 0; i < 8; i++) {
2158 WRITE_REG8(pLayerPointers, (x + i), y[i]);
2159 }
2160 }
2161