1 /*
2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 */
5
6 /*
7 * Copyright (c) 2001-2006 Advanced Micro Devices, Inc. All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * + Redistributions of source code must retain the above copyright notice,
13 * + this list of conditions and the following disclaimer.
14 *
15 * + Redistributions in binary form must reproduce the above copyright
16 * + notice, this list of conditions and the following disclaimer in the
17 * + documentation and/or other materials provided with the distribution.
18 *
19 * + Neither the name of Advanced Micro Devices, Inc. nor the names of its
20 * + contributors may be used to endorse or promote products derived from
21 * + this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
24 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
25 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
30 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
34 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
35 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Import/Export/Re-Export/Use/Release/Transfer Restrictions and
38 * Compliance with Applicable Laws. Notice is hereby given that
39 * the software may be subject to restrictions on use, release,
40 * transfer, importation, exportation and/or re-exportation under
41 * the laws and regulations of the United States or other
42 * countries ("Applicable Laws"), which include but are not
43 * limited to U.S. export control laws such as the Export
44 * Administration Regulations and national security controls as
45 * defined thereunder, as well as State Department controls under
46 * the U.S. Munitions List. Permission to use and/or
47 * redistribute the software is conditioned upon compliance with
48 * all Applicable Laws, including U.S. export control laws
49 * regarding specifically designated persons, countries and
50 * nationals of countries subject to national security controls.
51 */
52
53 /* include files */
54 #include <sys/disp.h>
55 #include <sys/atomic.h>
56 #include <sys/vlan.h>
57 #include "amd8111s_main.h"
58
59 /* Global macro Definations */
60 #define ROUNDUP(x, a) (((x) + (a) - 1) & ~((a) - 1))
61 #define INTERFACE_NAME "amd8111s"
62 #define AMD8111S_SPLIT 128
63 #define AMD8111S_SEND_MAX 64
64
65 static char ident[] = "AMD8111 10/100M Ethernet";
66
67 /*
68 * Driver Entry Points
69 */
70 static int amd8111s_attach(dev_info_t *, ddi_attach_cmd_t);
71 static int amd8111s_detach(dev_info_t *, ddi_detach_cmd_t);
72
73 /*
74 * GLD Entry points prototype
75 */
76 static int amd8111s_m_unicst(void *, const uint8_t *);
77 static int amd8111s_m_promisc(void *, boolean_t);
78 static int amd8111s_m_stat(void *, uint_t, uint64_t *);
79 static void amd8111s_m_ioctl(void *, queue_t *, mblk_t *);
80 static int amd8111s_m_multicst(void *, boolean_t, const uint8_t *addr);
81 static int amd8111s_m_start(void *);
82 static void amd8111s_m_stop(void *);
83 static mblk_t *amd8111s_m_tx(void *, mblk_t *mp);
84 static uint_t amd8111s_intr(caddr_t);
85
86 static int amd8111s_unattach(dev_info_t *, struct LayerPointers *);
87
88 static boolean_t amd8111s_allocate_buffers(struct LayerPointers *);
89 static int amd8111s_odlInit(struct LayerPointers *);
90 static boolean_t amd8111s_allocate_descriptors(struct LayerPointers *);
91 static void amd8111s_free_descriptors(struct LayerPointers *);
92 static boolean_t amd8111s_alloc_dma_ringbuf(struct LayerPointers *,
93 struct amd8111s_dma_ringbuf *, uint32_t, uint32_t);
94 static void amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *);
95
96
97 static void amd8111s_log(struct LayerPointers *adapter, int level,
98 char *fmt, ...);
99
100 static struct cb_ops amd8111s_cb_ops = {
101 nulldev,
102 nulldev,
103 nodev,
104 nodev,
105 nodev,
106 nodev,
107 nodev,
108 nodev,
109 nodev,
110 nodev,
111 nodev,
112 nochpoll,
113 ddi_prop_op,
114 NULL,
115 D_NEW | D_MP,
116 CB_REV, /* cb_rev */
117 nodev, /* cb_aread */
118 nodev /* cb_awrite */
119 };
120
121 static struct dev_ops amd8111s_dev_ops = {
122 DEVO_REV, /* devo_rev */
123 0, /* devo_refcnt */
124 NULL, /* devo_getinfo */
125 nulldev, /* devo_identify */
126 nulldev, /* devo_probe */
127 amd8111s_attach, /* devo_attach */
128 amd8111s_detach, /* devo_detach */
129 nodev, /* devo_reset */
130 &amd8111s_cb_ops, /* devo_cb_ops */
131 NULL, /* devo_bus_ops */
132 nodev, /* devo_power */
133 ddi_quiesce_not_supported, /* devo_quiesce */
134 };
135
136 struct modldrv amd8111s_modldrv = {
137 &mod_driverops, /* Type of module. This one is a driver */
138 ident, /* short description */
139 &amd8111s_dev_ops /* driver specific ops */
140 };
141
142 struct modlinkage amd8111s_modlinkage = {
143 MODREV_1, (void *)&amd8111s_modldrv, NULL
144 };
145
146 /*
147 * Global Variables
148 */
149 struct LayerPointers *amd8111sadapter;
150
151 static ddi_dma_attr_t pcn_buff_dma_attr_t = {
152 DMA_ATTR_V0, /* dma_attr_version */
153 (uint64_t)0, /* dma_attr_addr_lo */
154 (uint64_t)0xFFFFFFFF, /* dma_attr_addr_hi */
155 (uint64_t)0xFFFFFFFF, /* dma_attr_count_max */
156 (uint64_t)1, /* dma_attr_align */
157 (uint_t)0x7F, /* dma_attr_burstsizes */
158 (uint32_t)1, /* dma_attr_minxfer */
159 (uint64_t)0xFFFFFFFF, /* dma_attr_maxxfer */
160 (uint64_t)0xFFFFFFFF, /* dma_attr_seg */
161 (int)1, /* dma_attr_sgllen */
162 (uint32_t)1, /* granularity */
163 (uint_t)0 /* dma_attr_flags */
164 };
165
166 static ddi_dma_attr_t pcn_desc_dma_attr_t = {
167 DMA_ATTR_V0, /* dma_attr_version */
168 (uint64_t)0, /* dma_attr_addr_lo */
169 (uint64_t)0xFFFFFFFF, /* dma_attr_addr_hi */
170 (uint64_t)0x7FFFFFFF, /* dma_attr_count_max */
171 (uint64_t)0x10, /* dma_attr_align */
172 (uint_t)0xFFFFFFFFU, /* dma_attr_burstsizes */
173 (uint32_t)1, /* dma_attr_minxfer */
174 (uint64_t)0xFFFFFFFF, /* dma_attr_maxxfer */
175 (uint64_t)0xFFFFFFFF, /* dma_attr_seg */
176 (int)1, /* dma_attr_sgllen */
177 (uint32_t)1, /* granularity */
178 (uint_t)0 /* dma_attr_flags */
179 };
180
181 /* PIO access attributes for registers */
182 static ddi_device_acc_attr_t pcn_acc_attr = {
183 DDI_DEVICE_ATTR_V0,
184 DDI_STRUCTURE_LE_ACC,
185 DDI_STRICTORDER_ACC
186 };
187
188
189 static mac_callbacks_t amd8111s_m_callbacks = {
190 MC_IOCTL,
191 amd8111s_m_stat,
192 amd8111s_m_start,
193 amd8111s_m_stop,
194 amd8111s_m_promisc,
195 amd8111s_m_multicst,
196 amd8111s_m_unicst,
197 amd8111s_m_tx,
198 NULL,
199 amd8111s_m_ioctl
200 };
201
202
203 /*
204 * Standard Driver Load Entry Point
205 * It will be called at load time of driver.
206 */
207 int
_init()208 _init()
209 {
210 int status;
211 mac_init_ops(&amd8111s_dev_ops, "amd8111s");
212
213 status = mod_install(&amd8111s_modlinkage);
214 if (status != DDI_SUCCESS) {
215 mac_fini_ops(&amd8111s_dev_ops);
216 }
217
218 return (status);
219 }
220
221 /*
222 * Standard Driver Entry Point for Query.
223 * It will be called at any time to get Driver info.
224 */
225 int
_info(struct modinfo * modinfop)226 _info(struct modinfo *modinfop)
227 {
228 return (mod_info(&amd8111s_modlinkage, modinfop));
229 }
230
231 /*
232 * Standard Driver Entry Point for Unload.
233 * It will be called at unload time of driver.
234 */
235 int
_fini()236 _fini()
237 {
238 int status;
239
240 status = mod_remove(&amd8111s_modlinkage);
241 if (status == DDI_SUCCESS) {
242 mac_fini_ops(&amd8111s_dev_ops);
243 }
244
245 return (status);
246 }
247
248 /*
249 * Loopback Support
250 */
251 static lb_property_t loopmodes[] = {
252 { normal, "normal", AMD8111S_LB_NONE },
253 { external, "100Mbps", AMD8111S_LB_EXTERNAL_100 },
254 { external, "10Mbps", AMD8111S_LB_EXTERNAL_10 },
255 { internal, "MAC", AMD8111S_LB_INTERNAL_MAC }
256 };
257
258 static void
amd8111s_set_loop_mode(struct LayerPointers * adapter,uint32_t mode)259 amd8111s_set_loop_mode(struct LayerPointers *adapter, uint32_t mode)
260 {
261
262 /*
263 * If the mode isn't being changed, there's nothing to do ...
264 */
265 if (mode == adapter->pOdl->loopback_mode)
266 return;
267
268 /*
269 * Validate the requested mode and prepare a suitable message
270 * to explain the link down/up cycle that the change will
271 * probably induce ...
272 */
273 switch (mode) {
274 default:
275 return;
276
277 case AMD8111S_LB_NONE:
278 mdlStopChip(adapter);
279 if (adapter->pOdl->loopback_mode == AMD8111S_LB_INTERNAL_MAC) {
280 cmn_err(CE_NOTE, "LB_NONE restored from Interanl LB");
281 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
282 INLOOP);
283 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
284 FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
285 } else {
286 cmn_err(CE_NOTE, "LB_NONE restored from Exteranl LB");
287 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
288 EXLOOP);
289 }
290
291 amd8111s_reset(adapter);
292 adapter->pOdl->LinkStatus = LINK_STATE_DOWN;
293 adapter->pOdl->rx_fcs_stripped = B_FALSE;
294 mdlStartChip(adapter);
295 break;
296
297 case AMD8111S_LB_EXTERNAL_100:
298 cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_100");
299 mdlStopChip(adapter);
300 amd8111s_reset(adapter);
301 SetIntrCoalesc(adapter, B_FALSE);
302 mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_100);
303 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
304 VAL0 | EXLOOP);
305 adapter->pOdl->LinkStatus = LINK_STATE_UP;
306 adapter->pMdl->Speed = 100;
307 adapter->pMdl->FullDuplex = B_TRUE;
308 /* Tell GLD the state of the physical link. */
309 mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
310
311 adapter->pOdl->rx_fcs_stripped = B_TRUE;
312
313 mdlStartChip(adapter);
314 break;
315
316 case AMD8111S_LB_EXTERNAL_10:
317 cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_10");
318 mdlStopChip(adapter);
319 amd8111s_reset(adapter);
320 SetIntrCoalesc(adapter, B_FALSE);
321 mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_10);
322 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
323 VAL0 | EXLOOP);
324 adapter->pOdl->LinkStatus = LINK_STATE_UP;
325 adapter->pMdl->Speed = 10;
326 adapter->pMdl->FullDuplex = B_TRUE;
327 /* Tell GLD the state of the physical link. */
328 mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
329
330 adapter->pOdl->rx_fcs_stripped = B_TRUE;
331
332 mdlStartChip(adapter);
333 break;
334
335 case AMD8111S_LB_INTERNAL_MAC:
336 cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_INTERNAL_MAC");
337 mdlStopChip(adapter);
338 amd8111s_reset(adapter);
339 SetIntrCoalesc(adapter, B_FALSE);
340 /* Disable Port Manager */
341 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
342 EN_PMGR);
343 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
344 VAL0 | INLOOP);
345
346 WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
347 VAL1 | FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
348
349 adapter->pOdl->LinkStatus = LINK_STATE_UP;
350 adapter->pMdl->FullDuplex = B_TRUE;
351 /* Tell GLD the state of the physical link. */
352 mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
353
354 adapter->pOdl->rx_fcs_stripped = B_TRUE;
355
356 mdlStartChip(adapter);
357 break;
358 }
359
360 /*
361 * All OK; tell the caller to reprogram
362 * the PHY and/or MAC for the new mode ...
363 */
364 adapter->pOdl->loopback_mode = mode;
365 }
366
367 static enum ioc_reply
amd8111s_loopback_ioctl(struct LayerPointers * adapter,struct iocblk * iocp,mblk_t * mp)368 amd8111s_loopback_ioctl(struct LayerPointers *adapter, struct iocblk *iocp,
369 mblk_t *mp)
370 {
371 lb_info_sz_t *lbsp;
372 lb_property_t *lbpp;
373 uint32_t *lbmp;
374 int cmd;
375
376 /*
377 * Validate format of ioctl
378 */
379 if (mp->b_cont == NULL)
380 return (IOC_INVAL);
381
382 cmd = iocp->ioc_cmd;
383 switch (cmd) {
384 default:
385 /* NOTREACHED */
386 amd8111s_log(adapter, CE_NOTE,
387 "amd8111s_loop_ioctl: invalid cmd 0x%x", cmd);
388 return (IOC_INVAL);
389
390 case LB_GET_INFO_SIZE:
391 if (iocp->ioc_count != sizeof (lb_info_sz_t)) {
392 amd8111s_log(adapter, CE_NOTE,
393 "wrong LB_GET_INFO_SIZE size");
394 return (IOC_INVAL);
395 }
396 lbsp = (void *)mp->b_cont->b_rptr;
397 *lbsp = sizeof (loopmodes);
398 break;
399
400 case LB_GET_INFO:
401 if (iocp->ioc_count != sizeof (loopmodes)) {
402 amd8111s_log(adapter, CE_NOTE,
403 "Wrong LB_GET_INFO size");
404 return (IOC_INVAL);
405 }
406 lbpp = (void *)mp->b_cont->b_rptr;
407 bcopy(loopmodes, lbpp, sizeof (loopmodes));
408 break;
409
410 case LB_GET_MODE:
411 if (iocp->ioc_count != sizeof (uint32_t)) {
412 amd8111s_log(adapter, CE_NOTE,
413 "Wrong LB_GET_MODE size");
414 return (IOC_INVAL);
415 }
416 lbmp = (void *)mp->b_cont->b_rptr;
417 *lbmp = adapter->pOdl->loopback_mode;
418 break;
419
420 case LB_SET_MODE:
421 if (iocp->ioc_count != sizeof (uint32_t)) {
422 amd8111s_log(adapter, CE_NOTE,
423 "Wrong LB_SET_MODE size");
424 return (IOC_INVAL);
425 }
426 lbmp = (void *)mp->b_cont->b_rptr;
427 amd8111s_set_loop_mode(adapter, *lbmp);
428 break;
429 }
430 return (IOC_REPLY);
431 }
432
433 static void
amd8111s_m_ioctl(void * arg,queue_t * q,mblk_t * mp)434 amd8111s_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
435 {
436 struct iocblk *iocp;
437 struct LayerPointers *adapter;
438 enum ioc_reply status;
439
440 iocp = (void *)mp->b_rptr;
441 iocp->ioc_error = 0;
442 adapter = arg;
443
444 ASSERT(adapter);
445 if (adapter == NULL) {
446 miocnak(q, mp, 0, EINVAL);
447 return;
448 }
449
450 switch (iocp->ioc_cmd) {
451
452 case LB_GET_INFO_SIZE:
453 case LB_GET_INFO:
454 case LB_GET_MODE:
455 case LB_SET_MODE:
456 status = amd8111s_loopback_ioctl(adapter, iocp, mp);
457 break;
458
459 default:
460 status = IOC_INVAL;
461 break;
462 }
463
464 /*
465 * Decide how to reply
466 */
467 switch (status) {
468 default:
469 case IOC_INVAL:
470 /*
471 * Error, reply with a NAK and EINVAL or the specified error
472 */
473 miocnak(q, mp, 0, iocp->ioc_error == 0 ?
474 EINVAL : iocp->ioc_error);
475 break;
476
477 case IOC_DONE:
478 /*
479 * OK, reply already sent
480 */
481 break;
482
483 case IOC_ACK:
484 /*
485 * OK, reply with an ACK
486 */
487 miocack(q, mp, 0, 0);
488 break;
489
490 case IOC_REPLY:
491 /*
492 * OK, send prepared reply as ACK or NAK
493 */
494 mp->b_datap->db_type = iocp->ioc_error == 0 ?
495 M_IOCACK : M_IOCNAK;
496 qreply(q, mp);
497 break;
498 }
499 }
500
501 /*
502 * Copy one packet from dma memory to mblk. Inc dma descriptor pointer.
503 */
504 static boolean_t
amd8111s_recv_copy(struct LayerPointers * pLayerPointers,mblk_t ** last_mp)505 amd8111s_recv_copy(struct LayerPointers *pLayerPointers, mblk_t **last_mp)
506 {
507 int length = 0;
508 mblk_t *mp;
509 struct rx_desc *descriptor;
510 struct odl *pOdl = pLayerPointers->pOdl;
511 struct amd8111s_statistics *statistics = &pOdl->statistics;
512 struct nonphysical *pNonphysical = pLayerPointers->pMil
513 ->pNonphysical;
514
515 mutex_enter(&pOdl->mdlRcvLock);
516 descriptor = pNonphysical->RxBufDescQRead->descriptor;
517 (void) ddi_dma_sync(pOdl->rx_desc_dma_handle,
518 pNonphysical->RxBufDescQRead->descriptor -
519 pNonphysical->RxBufDescQStart->descriptor,
520 sizeof (struct rx_desc), DDI_DMA_SYNC_FORCPU);
521 if ((descriptor->Rx_OWN) == 0) {
522 /*
523 * If the frame is received with errors, then set MCNT
524 * of that pkt in ReceiveArray to 0. This packet would
525 * be discarded later and not indicated to OS.
526 */
527 if (descriptor->Rx_ERR) {
528 statistics->rx_desc_err ++;
529 descriptor->Rx_ERR = 0;
530 if (descriptor->Rx_FRAM == 1) {
531 statistics->rx_desc_err_FRAM ++;
532 descriptor->Rx_FRAM = 0;
533 }
534 if (descriptor->Rx_OFLO == 1) {
535 statistics->rx_desc_err_OFLO ++;
536 descriptor->Rx_OFLO = 0;
537 pOdl->rx_overflow_counter ++;
538 if ((pOdl->rx_overflow_counter > 5) &&
539 (pOdl->pause_interval == 0)) {
540 statistics->rx_double_overflow ++;
541 mdlSendPause(pLayerPointers);
542 pOdl->rx_overflow_counter = 0;
543 pOdl->pause_interval = 25;
544 }
545 }
546 if (descriptor->Rx_CRC == 1) {
547 statistics->rx_desc_err_CRC ++;
548 descriptor->Rx_CRC = 0;
549 }
550 if (descriptor->Rx_BUFF == 1) {
551 statistics->rx_desc_err_BUFF ++;
552 descriptor->Rx_BUFF = 0;
553 }
554 goto Next_Descriptor;
555 }
556
557 /* Length of incoming packet */
558 if (pOdl->rx_fcs_stripped) {
559 length = descriptor->Rx_MCNT -4;
560 } else {
561 length = descriptor->Rx_MCNT;
562 }
563 if (length < 62) {
564 statistics->rx_error_zerosize ++;
565 }
566
567 if ((mp = allocb(length, BPRI_MED)) == NULL) {
568 statistics->rx_allocfail ++;
569 goto failed;
570 }
571 /* Copy from virtual address of incoming packet */
572 bcopy((long *)*(pNonphysical->RxBufDescQRead->USpaceMap),
573 mp->b_rptr, length);
574 mp->b_wptr = mp->b_rptr + length;
575 statistics->rx_ok_packets ++;
576 if (*last_mp == NULL) {
577 *last_mp = mp;
578 } else {
579 (*last_mp)->b_next = mp;
580 *last_mp = mp;
581 }
582
583 Next_Descriptor:
584 descriptor->Rx_MCNT = 0;
585 descriptor->Rx_SOP = 0;
586 descriptor->Rx_EOP = 0;
587 descriptor->Rx_PAM = 0;
588 descriptor->Rx_BAM = 0;
589 descriptor->TT = 0;
590 descriptor->Rx_OWN = 1;
591 pNonphysical->RxBufDescQRead->descriptor++;
592 pNonphysical->RxBufDescQRead->USpaceMap++;
593 if (pNonphysical->RxBufDescQRead->descriptor >
594 pNonphysical->RxBufDescQEnd->descriptor) {
595 pNonphysical->RxBufDescQRead->descriptor =
596 pNonphysical->RxBufDescQStart->descriptor;
597 pNonphysical->RxBufDescQRead->USpaceMap =
598 pNonphysical->RxBufDescQStart->USpaceMap;
599 }
600 mutex_exit(&pOdl->mdlRcvLock);
601
602 return (B_TRUE);
603 }
604
605 failed:
606 mutex_exit(&pOdl->mdlRcvLock);
607 return (B_FALSE);
608 }
609
610 /*
611 * Get the received packets from NIC card and send them to GLD.
612 */
613 static void
amd8111s_receive(struct LayerPointers * pLayerPointers)614 amd8111s_receive(struct LayerPointers *pLayerPointers)
615 {
616 int numOfPkts = 0;
617 struct odl *pOdl;
618 mblk_t *ret_mp = NULL, *last_mp = NULL;
619
620 pOdl = pLayerPointers->pOdl;
621
622 rw_enter(&pOdl->chip_lock, RW_READER);
623 if (!pLayerPointers->run) {
624 rw_exit(&pOdl->chip_lock);
625 return;
626 }
627
628 if (pOdl->pause_interval > 0)
629 pOdl->pause_interval --;
630
631 while (numOfPkts < RX_RING_SIZE) {
632
633 if (!amd8111s_recv_copy(pLayerPointers, &last_mp)) {
634 break;
635 }
636 if (ret_mp == NULL)
637 ret_mp = last_mp;
638 numOfPkts++;
639 }
640
641 if (ret_mp) {
642 mac_rx(pOdl->mh, NULL, ret_mp);
643 }
644
645 (void) ddi_dma_sync(pOdl->rx_desc_dma_handle, 0, 0,
646 DDI_DMA_SYNC_FORDEV);
647
648 mdlReceive(pLayerPointers);
649
650 rw_exit(&pOdl->chip_lock);
651
652 }
653
654 /*
655 * Print message in release-version driver.
656 */
657 static void
amd8111s_log(struct LayerPointers * adapter,int level,char * fmt,...)658 amd8111s_log(struct LayerPointers *adapter, int level, char *fmt, ...)
659 {
660 auto char name[32];
661 auto char buf[256];
662 va_list ap;
663
664 if (adapter != NULL) {
665 (void) sprintf(name, "amd8111s%d",
666 ddi_get_instance(adapter->pOdl->devinfo));
667 } else {
668 (void) sprintf(name, "amd8111s");
669 }
670 va_start(ap, fmt);
671 (void) vsprintf(buf, fmt, ap);
672 va_end(ap);
673 cmn_err(level, "%s: %s", name, buf);
674 }
675
676 /*
677 * To allocate & initilize all resources.
678 * Called by amd8111s_attach().
679 */
680 static int
amd8111s_odlInit(struct LayerPointers * pLayerPointers)681 amd8111s_odlInit(struct LayerPointers *pLayerPointers)
682 {
683 unsigned long mem_req_array[MEM_REQ_MAX];
684 unsigned long mem_set_array[MEM_REQ_MAX];
685 unsigned long *pmem_req_array;
686 unsigned long *pmem_set_array;
687 int i, size;
688
689 for (i = 0; i < MEM_REQ_MAX; i++) {
690 mem_req_array[i] = 0;
691 mem_set_array[i] = 0;
692 }
693
694 milRequestResources(mem_req_array);
695
696 pmem_req_array = mem_req_array;
697 pmem_set_array = mem_set_array;
698 while (*pmem_req_array) {
699 switch (*pmem_req_array) {
700 case VIRTUAL:
701 *pmem_set_array = VIRTUAL;
702 pmem_req_array++;
703 pmem_set_array++;
704 *(pmem_set_array) = *(pmem_req_array);
705 pmem_set_array++;
706 *(pmem_set_array) = (unsigned long) kmem_zalloc(
707 *(pmem_req_array), KM_NOSLEEP);
708 if (*pmem_set_array == NULL)
709 goto odl_init_failure;
710 break;
711 }
712 pmem_req_array++;
713 pmem_set_array++;
714 }
715
716 /*
717 * Initilize memory on lower layers
718 */
719 milSetResources(pLayerPointers, mem_set_array);
720
721 /* Allocate Rx/Tx descriptors */
722 if (amd8111s_allocate_descriptors(pLayerPointers) != B_TRUE) {
723 *pmem_set_array = NULL;
724 goto odl_init_failure;
725 }
726
727 /*
728 * Allocate Rx buffer for each Rx descriptor. Then call mil layer
729 * routine to fill physical address of Rx buffer into Rx descriptor.
730 */
731 if (amd8111s_allocate_buffers(pLayerPointers) == B_FALSE) {
732 amd8111s_free_descriptors(pLayerPointers);
733 *pmem_set_array = NULL;
734 goto odl_init_failure;
735 }
736 milInitGlbds(pLayerPointers);
737
738 return (0);
739
740 odl_init_failure:
741 /*
742 * Free All memory allocated so far
743 */
744 pmem_req_array = mem_set_array;
745 while ((*pmem_req_array) && (pmem_req_array != pmem_set_array)) {
746 switch (*pmem_req_array) {
747 case VIRTUAL:
748 pmem_req_array++; /* Size */
749 size = *(pmem_req_array);
750 pmem_req_array++; /* Virtual Address */
751 if (pmem_req_array == NULL)
752 return (1);
753 kmem_free((int *)*pmem_req_array, size);
754 break;
755 }
756 pmem_req_array++;
757 }
758 return (1);
759 }
760
761 /*
762 * Allocate and initialize Tx/Rx descriptors
763 */
764 static boolean_t
amd8111s_allocate_descriptors(struct LayerPointers * pLayerPointers)765 amd8111s_allocate_descriptors(struct LayerPointers *pLayerPointers)
766 {
767 struct odl *pOdl = pLayerPointers->pOdl;
768 struct mil *pMil = pLayerPointers->pMil;
769 dev_info_t *devinfo = pOdl->devinfo;
770 uint_t length, count, i;
771 size_t real_length;
772
773 /*
774 * Allocate Rx descriptors
775 */
776 if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
777 NULL, &pOdl->rx_desc_dma_handle) != DDI_SUCCESS) {
778 amd8111s_log(pLayerPointers, CE_WARN,
779 "ddi_dma_alloc_handle for Rx desc failed");
780 pOdl->rx_desc_dma_handle = NULL;
781 return (B_FALSE);
782 }
783
784 length = sizeof (struct rx_desc) * RX_RING_SIZE + ALIGNMENT;
785 if (ddi_dma_mem_alloc(pOdl->rx_desc_dma_handle, length,
786 &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
787 NULL, (caddr_t *)&pMil->Rx_desc_original, &real_length,
788 &pOdl->rx_desc_acc_handle) != DDI_SUCCESS) {
789
790 amd8111s_log(pLayerPointers, CE_WARN,
791 "ddi_dma_mem_handle for Rx desc failed");
792 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
793 pOdl->rx_desc_dma_handle = NULL;
794 return (B_FALSE);
795 }
796
797 if (ddi_dma_addr_bind_handle(pOdl->rx_desc_dma_handle,
798 NULL, (caddr_t)pMil->Rx_desc_original, real_length,
799 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
800 NULL, &pOdl->rx_desc_dma_cookie,
801 &count) != DDI_SUCCESS) {
802
803 amd8111s_log(pLayerPointers, CE_WARN,
804 "ddi_dma_addr_bind_handle for Rx desc failed");
805 ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
806 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
807 pOdl->rx_desc_dma_handle = NULL;
808 return (B_FALSE);
809 }
810 ASSERT(count == 1);
811
812 /* Initialize Rx descriptors related variables */
813 pMil->Rx_desc = (struct rx_desc *)
814 ((pMil->Rx_desc_original + ALIGNMENT) & ~ALIGNMENT);
815 pMil->Rx_desc_pa = (unsigned int)
816 ((pOdl->rx_desc_dma_cookie.dmac_laddress + ALIGNMENT) & ~ALIGNMENT);
817
818 pLayerPointers->pMdl->init_blk->RDRA = pMil->Rx_desc_pa;
819
820
821 /*
822 * Allocate Tx descriptors
823 */
824 if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
825 NULL, &pOdl->tx_desc_dma_handle) != DDI_SUCCESS) {
826 amd8111s_log(pLayerPointers, CE_WARN,
827 "ddi_dma_alloc_handle for Tx desc failed");
828 goto allocate_desc_fail;
829 }
830
831 length = sizeof (struct tx_desc) * TX_RING_SIZE + ALIGNMENT;
832 if (ddi_dma_mem_alloc(pOdl->tx_desc_dma_handle, length,
833 &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
834 NULL, (caddr_t *)&pMil->Tx_desc_original, &real_length,
835 &pOdl->tx_desc_acc_handle) != DDI_SUCCESS) {
836
837 amd8111s_log(pLayerPointers, CE_WARN,
838 "ddi_dma_mem_handle for Tx desc failed");
839 ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
840 goto allocate_desc_fail;
841 }
842
843 if (ddi_dma_addr_bind_handle(pOdl->tx_desc_dma_handle,
844 NULL, (caddr_t)pMil->Tx_desc_original, real_length,
845 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
846 NULL, &pOdl->tx_desc_dma_cookie,
847 &count) != DDI_SUCCESS) {
848
849 amd8111s_log(pLayerPointers, CE_WARN,
850 "ddi_dma_addr_bind_handle for Tx desc failed");
851 ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
852 ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
853 goto allocate_desc_fail;
854 }
855 ASSERT(count == 1);
856 /* Set the DMA area to all zeros */
857 bzero((caddr_t)pMil->Tx_desc_original, length);
858
859 /* Initialize Tx descriptors related variables */
860 pMil->Tx_desc = (struct tx_desc *)
861 ((pMil->Tx_desc_original + ALIGNMENT) & ~ALIGNMENT);
862 pMil->pNonphysical->TxDescQRead = pMil->Tx_desc;
863 pMil->pNonphysical->TxDescQWrite = pMil->Tx_desc;
864 pMil->pNonphysical->TxDescQStart = pMil->Tx_desc;
865 pMil->pNonphysical->TxDescQEnd = &(pMil->Tx_desc[TX_RING_SIZE -1]);
866
867 /* Physical Addr of Tx_desc_original & Tx_desc */
868 pLayerPointers->pMil->Tx_desc_pa =
869 ((pOdl->tx_desc_dma_cookie.dmac_laddress + ALIGNMENT) &
870 ~ALIGNMENT);
871
872 /* Setting the reserved bits in the tx descriptors */
873 for (i = 0; i < TX_RING_SIZE; i++) {
874 pMil->pNonphysical->TxDescQWrite->Tx_RES0 = 0x0f;
875 pMil->pNonphysical->TxDescQWrite->Tx_OWN = 0;
876 pMil->pNonphysical->TxDescQWrite++;
877 }
878 pMil->pNonphysical->TxDescQWrite = pMil->pNonphysical->TxDescQStart;
879
880 pLayerPointers->pMdl->init_blk->TDRA = pMil->Tx_desc_pa;
881
882 return (B_TRUE);
883
884 allocate_desc_fail:
885 pOdl->tx_desc_dma_handle = NULL;
886 (void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
887 ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
888 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
889 pOdl->rx_desc_dma_handle = NULL;
890 return (B_FALSE);
891 }
892
893 /*
894 * Free Tx/Rx descriptors
895 */
896 static void
amd8111s_free_descriptors(struct LayerPointers * pLayerPointers)897 amd8111s_free_descriptors(struct LayerPointers *pLayerPointers)
898 {
899 struct odl *pOdl = pLayerPointers->pOdl;
900
901 /* Free Rx descriptors */
902 if (pOdl->rx_desc_dma_handle) {
903 (void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
904 ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
905 ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
906 pOdl->rx_desc_dma_handle = NULL;
907 }
908
909 /* Free Rx descriptors */
910 if (pOdl->tx_desc_dma_handle) {
911 (void) ddi_dma_unbind_handle(pOdl->tx_desc_dma_handle);
912 ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
913 ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
914 pOdl->tx_desc_dma_handle = NULL;
915 }
916 }
917
918 /*
919 * Allocate Tx/Rx Ring buffer
920 */
921 static boolean_t
amd8111s_alloc_dma_ringbuf(struct LayerPointers * pLayerPointers,struct amd8111s_dma_ringbuf * pRing,uint32_t ring_size,uint32_t msg_size)922 amd8111s_alloc_dma_ringbuf(struct LayerPointers *pLayerPointers,
923 struct amd8111s_dma_ringbuf *pRing,
924 uint32_t ring_size, uint32_t msg_size)
925 {
926 uint32_t idx, msg_idx = 0, msg_acc;
927 dev_info_t *devinfo = pLayerPointers->pOdl->devinfo;
928 size_t real_length;
929 uint_t count = 0;
930
931 ASSERT(pcn_buff_dma_attr_t.dma_attr_align == 1);
932 pRing->dma_buf_sz = msg_size;
933 pRing->ring_size = ring_size;
934 pRing->trunk_num = AMD8111S_SPLIT;
935 pRing->buf_sz = msg_size * ring_size;
936 if (ring_size < pRing->trunk_num)
937 pRing->trunk_num = ring_size;
938 ASSERT((pRing->buf_sz % pRing->trunk_num) == 0);
939
940 pRing->trunk_sz = pRing->buf_sz / pRing->trunk_num;
941 ASSERT((pRing->trunk_sz % pRing->dma_buf_sz) == 0);
942
943 pRing->msg_buf = kmem_zalloc(sizeof (struct amd8111s_msgbuf) *
944 ring_size, KM_NOSLEEP);
945 pRing->dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
946 pRing->trunk_num, KM_NOSLEEP);
947 pRing->acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
948 pRing->trunk_num, KM_NOSLEEP);
949 pRing->dma_cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) *
950 pRing->trunk_num, KM_NOSLEEP);
951 pRing->trunk_addr = kmem_zalloc(sizeof (caddr_t) *
952 pRing->trunk_num, KM_NOSLEEP);
953 if (pRing->msg_buf == NULL || pRing->dma_hdl == NULL ||
954 pRing->acc_hdl == NULL || pRing->trunk_addr == NULL ||
955 pRing->dma_cookie == NULL) {
956 amd8111s_log(pLayerPointers, CE_NOTE,
957 "kmem_zalloc failed");
958 goto failed;
959 }
960
961 for (idx = 0; idx < pRing->trunk_num; ++idx) {
962 if (ddi_dma_alloc_handle(devinfo, &pcn_buff_dma_attr_t,
963 DDI_DMA_SLEEP, NULL, &(pRing->dma_hdl[idx]))
964 != DDI_SUCCESS) {
965
966 amd8111s_log(pLayerPointers, CE_WARN,
967 "ddi_dma_alloc_handle failed");
968 goto failed;
969 } else if (ddi_dma_mem_alloc(pRing->dma_hdl[idx],
970 pRing->trunk_sz, &pcn_acc_attr, DDI_DMA_STREAMING,
971 DDI_DMA_SLEEP, NULL,
972 (caddr_t *)&(pRing->trunk_addr[idx]),
973 (size_t *)(&real_length), &pRing->acc_hdl[idx])
974 != DDI_SUCCESS) {
975
976 amd8111s_log(pLayerPointers, CE_WARN,
977 "ddi_dma_mem_alloc failed");
978 goto failed;
979 } else if (real_length != pRing->trunk_sz) {
980 amd8111s_log(pLayerPointers, CE_WARN,
981 "ddi_dma_mem_alloc failed");
982 goto failed;
983 } else if (ddi_dma_addr_bind_handle(pRing->dma_hdl[idx],
984 NULL, (caddr_t)pRing->trunk_addr[idx], real_length,
985 DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
986 &pRing->dma_cookie[idx], &count) != DDI_DMA_MAPPED) {
987
988 amd8111s_log(pLayerPointers, CE_WARN,
989 "ddi_dma_addr_bind_handle failed");
990 goto failed;
991 } else {
992 for (msg_acc = 0;
993 msg_acc < pRing->trunk_sz / pRing->dma_buf_sz;
994 ++ msg_acc) {
995 pRing->msg_buf[msg_idx].offset =
996 msg_acc * pRing->dma_buf_sz;
997 pRing->msg_buf[msg_idx].vir_addr =
998 pRing->trunk_addr[idx] +
999 pRing->msg_buf[msg_idx].offset;
1000 pRing->msg_buf[msg_idx].phy_addr =
1001 pRing->dma_cookie[idx].dmac_laddress +
1002 pRing->msg_buf[msg_idx].offset;
1003 pRing->msg_buf[msg_idx].p_hdl =
1004 pRing->dma_hdl[idx];
1005 msg_idx ++;
1006 }
1007 }
1008 }
1009
1010 pRing->free = pRing->msg_buf;
1011 pRing->next = pRing->msg_buf;
1012 pRing->curr = pRing->msg_buf;
1013
1014 return (B_TRUE);
1015 failed:
1016 amd8111s_free_dma_ringbuf(pRing);
1017 return (B_FALSE);
1018 }
1019
1020 /*
1021 * Free Tx/Rx ring buffer
1022 */
1023 static void
amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf * pRing)1024 amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *pRing)
1025 {
1026 int idx;
1027
1028 if (pRing->dma_cookie != NULL) {
1029 for (idx = 0; idx < pRing->trunk_num; idx ++) {
1030 if (pRing->dma_cookie[idx].dmac_laddress == 0) {
1031 break;
1032 }
1033 (void) ddi_dma_unbind_handle(pRing->dma_hdl[idx]);
1034 }
1035 kmem_free(pRing->dma_cookie,
1036 sizeof (ddi_dma_cookie_t) * pRing->trunk_num);
1037 }
1038
1039 if (pRing->acc_hdl != NULL) {
1040 for (idx = 0; idx < pRing->trunk_num; idx ++) {
1041 if (pRing->acc_hdl[idx] == NULL)
1042 break;
1043 ddi_dma_mem_free(&pRing->acc_hdl[idx]);
1044 }
1045 kmem_free(pRing->acc_hdl,
1046 sizeof (ddi_acc_handle_t) * pRing->trunk_num);
1047 }
1048
1049 if (pRing->dma_hdl != NULL) {
1050 for (idx = 0; idx < pRing->trunk_num; idx ++) {
1051 if (pRing->dma_hdl[idx] == 0) {
1052 break;
1053 }
1054 ddi_dma_free_handle(&pRing->dma_hdl[idx]);
1055 }
1056 kmem_free(pRing->dma_hdl,
1057 sizeof (ddi_dma_handle_t) * pRing->trunk_num);
1058 }
1059
1060 if (pRing->msg_buf != NULL) {
1061 kmem_free(pRing->msg_buf,
1062 sizeof (struct amd8111s_msgbuf) * pRing->ring_size);
1063 }
1064
1065 if (pRing->trunk_addr != NULL) {
1066 kmem_free(pRing->trunk_addr,
1067 sizeof (caddr_t) * pRing->trunk_num);
1068 }
1069
1070 bzero(pRing, sizeof (*pRing));
1071 }
1072
1073
1074 /*
1075 * Allocate all Tx buffer.
1076 * Allocate a Rx buffer for each Rx descriptor. Then
1077 * call mil routine to fill physical address of Rx
1078 * buffer into Rx descriptors
1079 */
1080 static boolean_t
amd8111s_allocate_buffers(struct LayerPointers * pLayerPointers)1081 amd8111s_allocate_buffers(struct LayerPointers *pLayerPointers)
1082 {
1083 struct odl *pOdl = pLayerPointers->pOdl;
1084
1085 /*
1086 * Allocate rx Buffers
1087 */
1088 if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->rx_buf,
1089 RX_RING_SIZE, RX_BUF_SIZE) == B_FALSE) {
1090 amd8111s_log(pLayerPointers, CE_WARN,
1091 "amd8111s_alloc_dma_ringbuf for tx failed");
1092 goto allocate_buf_fail;
1093 }
1094
1095 /*
1096 * Allocate Tx buffers
1097 */
1098 if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->tx_buf,
1099 TX_COALESC_SIZE, TX_BUF_SIZE) == B_FALSE) {
1100 amd8111s_log(pLayerPointers, CE_WARN,
1101 "amd8111s_alloc_dma_ringbuf for tx failed");
1102 goto allocate_buf_fail;
1103 }
1104
1105 /*
1106 * Initilize the mil Queues
1107 */
1108 milInitGlbds(pLayerPointers);
1109
1110 milInitRxQ(pLayerPointers);
1111
1112 return (B_TRUE);
1113
1114 allocate_buf_fail:
1115
1116 amd8111s_log(pLayerPointers, CE_WARN,
1117 "amd8111s_allocate_buffers failed");
1118 return (B_FALSE);
1119 }
1120
1121 /*
1122 * Free all Rx/Tx buffer
1123 */
1124
1125 static void
amd8111s_free_buffers(struct LayerPointers * pLayerPointers)1126 amd8111s_free_buffers(struct LayerPointers *pLayerPointers)
1127 {
1128 /* Free Tx buffers */
1129 amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->tx_buf);
1130
1131 /* Free Rx Buffers */
1132 amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->rx_buf);
1133 }
1134
1135 /*
1136 * Try to recycle all the descriptors and Tx buffers
1137 * which are already freed by hardware.
1138 */
1139 static int
amd8111s_recycle_tx(struct LayerPointers * pLayerPointers)1140 amd8111s_recycle_tx(struct LayerPointers *pLayerPointers)
1141 {
1142 struct nonphysical *pNonphysical;
1143 uint32_t count = 0;
1144
1145 pNonphysical = pLayerPointers->pMil->pNonphysical;
1146 while (pNonphysical->TxDescQRead->Tx_OWN == 0 &&
1147 pNonphysical->TxDescQRead != pNonphysical->TxDescQWrite) {
1148 pLayerPointers->pOdl->tx_buf.free =
1149 NEXT(pLayerPointers->pOdl->tx_buf, free);
1150 pNonphysical->TxDescQRead++;
1151 if (pNonphysical->TxDescQRead > pNonphysical->TxDescQEnd) {
1152 pNonphysical->TxDescQRead = pNonphysical->TxDescQStart;
1153 }
1154 count ++;
1155 }
1156
1157 if (pLayerPointers->pMil->tx_reschedule)
1158 ddi_trigger_softintr(pLayerPointers->pOdl->drain_id);
1159
1160 return (count);
1161 }
1162
1163 /*
1164 * Get packets in the Tx buffer, then copy them to the send buffer.
1165 * Trigger hardware to send out packets.
1166 */
1167 static void
amd8111s_send_serial(struct LayerPointers * pLayerPointers)1168 amd8111s_send_serial(struct LayerPointers *pLayerPointers)
1169 {
1170 struct nonphysical *pNonphysical;
1171 uint32_t count;
1172
1173 pNonphysical = pLayerPointers->pMil->pNonphysical;
1174
1175 mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
1176
1177 for (count = 0; count < AMD8111S_SEND_MAX; count ++) {
1178 if (pLayerPointers->pOdl->tx_buf.curr ==
1179 pLayerPointers->pOdl->tx_buf.next) {
1180 break;
1181 }
1182 /* to verify if it needs to recycle the tx Buf */
1183 if (((pNonphysical->TxDescQWrite + 1 >
1184 pNonphysical->TxDescQEnd) ? pNonphysical->TxDescQStart :
1185 (pNonphysical->TxDescQWrite + 1)) ==
1186 pNonphysical->TxDescQRead)
1187 if (amd8111s_recycle_tx(pLayerPointers) == 0) {
1188 pLayerPointers->pOdl
1189 ->statistics.tx_no_descriptor ++;
1190 break;
1191 }
1192
1193 /* Fill packet length */
1194 pNonphysical->TxDescQWrite->Tx_BCNT = (uint16_t)pLayerPointers
1195 ->pOdl->tx_buf.curr->msg_size;
1196
1197 /* Fill physical buffer address */
1198 pNonphysical->TxDescQWrite->Tx_Base_Addr = (unsigned int)
1199 pLayerPointers->pOdl->tx_buf.curr->phy_addr;
1200
1201 pNonphysical->TxDescQWrite->Tx_SOP = 1;
1202 pNonphysical->TxDescQWrite->Tx_EOP = 1;
1203 pNonphysical->TxDescQWrite->Tx_ADD_FCS = 1;
1204 pNonphysical->TxDescQWrite->Tx_LTINT = 1;
1205 pNonphysical->TxDescQWrite->Tx_USPACE = 0;
1206 pNonphysical->TxDescQWrite->Tx_OWN = 1;
1207
1208 pNonphysical->TxDescQWrite++;
1209 if (pNonphysical->TxDescQWrite > pNonphysical->TxDescQEnd) {
1210 pNonphysical->TxDescQWrite = pNonphysical->TxDescQStart;
1211 }
1212
1213 pLayerPointers->pOdl->tx_buf.curr =
1214 NEXT(pLayerPointers->pOdl->tx_buf, curr);
1215
1216 }
1217
1218 pLayerPointers->pOdl->statistics.tx_ok_packets += count;
1219
1220 mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
1221
1222 /* Call mdlTransmit to send the pkt out on the network */
1223 mdlTransmit(pLayerPointers);
1224
1225 }
1226
1227 /*
1228 * Softintr entrance. try to send out packets in the Tx buffer.
1229 * If reschedule is True, call mac_tx_update to re-enable the
1230 * transmit
1231 */
1232 static uint_t
amd8111s_send_drain(caddr_t arg)1233 amd8111s_send_drain(caddr_t arg)
1234 {
1235 struct LayerPointers *pLayerPointers = (void *)arg;
1236
1237 amd8111s_send_serial(pLayerPointers);
1238
1239 if (pLayerPointers->pMil->tx_reschedule &&
1240 NEXT(pLayerPointers->pOdl->tx_buf, next) !=
1241 pLayerPointers->pOdl->tx_buf.free) {
1242 mac_tx_update(pLayerPointers->pOdl->mh);
1243 pLayerPointers->pMil->tx_reschedule = B_FALSE;
1244 }
1245
1246 return (DDI_INTR_CLAIMED);
1247 }
1248
1249 /*
1250 * Get a Tx buffer
1251 */
1252 static struct amd8111s_msgbuf *
amd8111s_getTxbuf(struct LayerPointers * pLayerPointers)1253 amd8111s_getTxbuf(struct LayerPointers *pLayerPointers)
1254 {
1255 struct amd8111s_msgbuf *tmp, *next;
1256
1257 mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
1258 next = NEXT(pLayerPointers->pOdl->tx_buf, next);
1259 if (next == pLayerPointers->pOdl->tx_buf.free) {
1260 tmp = NULL;
1261 } else {
1262 tmp = pLayerPointers->pOdl->tx_buf.next;
1263 pLayerPointers->pOdl->tx_buf.next = next;
1264 }
1265 mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
1266
1267 return (tmp);
1268 }
1269
1270 static boolean_t
amd8111s_send(struct LayerPointers * pLayerPointers,mblk_t * mp)1271 amd8111s_send(struct LayerPointers *pLayerPointers, mblk_t *mp)
1272 {
1273 struct odl *pOdl;
1274 size_t frag_len;
1275 mblk_t *tmp;
1276 struct amd8111s_msgbuf *txBuf;
1277 uint8_t *pMsg;
1278
1279 pOdl = pLayerPointers->pOdl;
1280
1281 /* alloc send buffer */
1282 txBuf = amd8111s_getTxbuf(pLayerPointers);
1283 if (txBuf == NULL) {
1284 pOdl->statistics.tx_no_buffer ++;
1285 pLayerPointers->pMil->tx_reschedule = B_TRUE;
1286 amd8111s_send_serial(pLayerPointers);
1287 return (B_FALSE);
1288 }
1289
1290 /* copy packet to send buffer */
1291 txBuf->msg_size = 0;
1292 pMsg = (uint8_t *)txBuf->vir_addr;
1293 for (tmp = mp; tmp; tmp = tmp->b_cont) {
1294 frag_len = MBLKL(tmp);
1295 bcopy(tmp->b_rptr, pMsg, frag_len);
1296 txBuf->msg_size += frag_len;
1297 pMsg += frag_len;
1298 }
1299 freemsg(mp);
1300
1301 amd8111s_send_serial(pLayerPointers);
1302
1303 return (B_TRUE);
1304 }
1305
1306 /*
1307 * (GLD Entry Point) Send the message block to lower layer
1308 */
1309 static mblk_t *
amd8111s_m_tx(void * arg,mblk_t * mp)1310 amd8111s_m_tx(void *arg, mblk_t *mp)
1311 {
1312 struct LayerPointers *pLayerPointers = arg;
1313 mblk_t *next;
1314
1315 rw_enter(&pLayerPointers->pOdl->chip_lock, RW_READER);
1316 if (!pLayerPointers->run) {
1317 pLayerPointers->pOdl->statistics.tx_afterunplumb ++;
1318 freemsgchain(mp);
1319 mp = NULL;
1320 }
1321
1322 while (mp != NULL) {
1323 next = mp->b_next;
1324 mp->b_next = NULL;
1325 if (!amd8111s_send(pLayerPointers, mp)) {
1326 /* Send fail */
1327 mp->b_next = next;
1328 break;
1329 }
1330 mp = next;
1331 }
1332
1333 rw_exit(&pLayerPointers->pOdl->chip_lock);
1334 return (mp);
1335 }
1336
1337 /*
1338 * (GLD Entry Point) Interrupt Service Routine
1339 */
1340 static uint_t
amd8111s_intr(caddr_t arg)1341 amd8111s_intr(caddr_t arg)
1342 {
1343 unsigned int intrCauses;
1344 struct LayerPointers *pLayerPointers = (void *)arg;
1345
1346 /* Read the interrupt status from mdl */
1347 intrCauses = mdlReadInterrupt(pLayerPointers);
1348
1349 if (intrCauses == 0) {
1350 pLayerPointers->pOdl->statistics.intr_OTHER ++;
1351 return (DDI_INTR_UNCLAIMED);
1352 }
1353
1354 if (intrCauses & LCINT) {
1355 if (mdlReadLink(pLayerPointers) == LINK_UP) {
1356 mdlGetActiveMediaInfo(pLayerPointers);
1357 /* Link status changed */
1358 if (pLayerPointers->pOdl->LinkStatus !=
1359 LINK_STATE_UP) {
1360 pLayerPointers->pOdl->LinkStatus =
1361 LINK_STATE_UP;
1362 mac_link_update(pLayerPointers->pOdl->mh,
1363 LINK_STATE_UP);
1364 }
1365 } else {
1366 if (pLayerPointers->pOdl->LinkStatus !=
1367 LINK_STATE_DOWN) {
1368 pLayerPointers->pOdl->LinkStatus =
1369 LINK_STATE_DOWN;
1370 mac_link_update(pLayerPointers->pOdl->mh,
1371 LINK_STATE_DOWN);
1372 }
1373 }
1374 }
1375 /*
1376 * RINT0: Receive Interrupt is set by the controller after the last
1377 * descriptor of a receive frame for this ring has been updated by
1378 * writing a 0 to the OWNership bit.
1379 */
1380 if (intrCauses & RINT0) {
1381 pLayerPointers->pOdl->statistics.intr_RINT0 ++;
1382 amd8111s_receive(pLayerPointers);
1383 }
1384
1385 /*
1386 * TINT0: Transmit Interrupt is set by the controller after the OWN bit
1387 * in the last descriptor of a transmit frame in this particular ring
1388 * has been cleared to indicate the frame has been copied to the
1389 * transmit FIFO.
1390 */
1391 if (intrCauses & TINT0) {
1392 pLayerPointers->pOdl->statistics.intr_TINT0 ++;
1393 /*
1394 * if desc ring is NULL and tx buf is not NULL, it should
1395 * drain tx buffer
1396 */
1397 amd8111s_send_serial(pLayerPointers);
1398 }
1399
1400 if (intrCauses & STINT) {
1401 pLayerPointers->pOdl->statistics.intr_STINT ++;
1402 }
1403
1404
1405 return (DDI_INTR_CLAIMED);
1406 }
1407
1408 /*
1409 * To re-initilize data structures.
1410 */
1411 static void
amd8111s_sw_reset(struct LayerPointers * pLayerPointers)1412 amd8111s_sw_reset(struct LayerPointers *pLayerPointers)
1413 {
1414 /* Reset all Tx/Rx queues and descriptors */
1415 milResetTxQ(pLayerPointers);
1416 milInitRxQ(pLayerPointers);
1417 }
1418
1419 /*
1420 * Send all pending tx packets
1421 */
1422 static void
amd8111s_tx_drain(struct LayerPointers * adapter)1423 amd8111s_tx_drain(struct LayerPointers *adapter)
1424 {
1425 struct tx_desc *pTx_desc = adapter->pMil->pNonphysical->TxDescQStart;
1426 int i, desc_count = 0;
1427 for (i = 0; i < 30; i++) {
1428 while ((pTx_desc->Tx_OWN == 0) && (desc_count < TX_RING_SIZE)) {
1429 /* This packet has been transmitted */
1430 pTx_desc ++;
1431 desc_count ++;
1432 }
1433 if (desc_count == TX_RING_SIZE) {
1434 break;
1435 }
1436 /* Wait 1 ms */
1437 drv_usecwait(1000);
1438 }
1439 adapter->pOdl->statistics.tx_draintime = i;
1440 }
1441
1442 /*
1443 * (GLD Entry Point) To start card will be called at
1444 * ifconfig plumb
1445 */
1446 static int
amd8111s_m_start(void * arg)1447 amd8111s_m_start(void *arg)
1448 {
1449 struct LayerPointers *pLayerPointers = arg;
1450 struct odl *pOdl = pLayerPointers->pOdl;
1451
1452 amd8111s_sw_reset(pLayerPointers);
1453 mdlHWReset(pLayerPointers);
1454 rw_enter(&pOdl->chip_lock, RW_WRITER);
1455 pLayerPointers->run = B_TRUE;
1456 rw_exit(&pOdl->chip_lock);
1457 return (0);
1458 }
1459
1460 /*
1461 * (GLD Entry Point) To stop card will be called at
1462 * ifconfig unplumb
1463 */
1464 static void
amd8111s_m_stop(void * arg)1465 amd8111s_m_stop(void *arg)
1466 {
1467 struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg;
1468 struct odl *pOdl = pLayerPointers->pOdl;
1469
1470 /* Ensure send all pending tx packets */
1471 amd8111s_tx_drain(pLayerPointers);
1472 /*
1473 * Stop the controller and disable the controller interrupt
1474 */
1475 rw_enter(&pOdl->chip_lock, RW_WRITER);
1476 mdlStopChip(pLayerPointers);
1477 pLayerPointers->run = B_FALSE;
1478 rw_exit(&pOdl->chip_lock);
1479 }
1480
1481 /*
1482 * To clean up all
1483 */
1484 static void
amd8111s_free_resource(struct LayerPointers * pLayerPointers)1485 amd8111s_free_resource(struct LayerPointers *pLayerPointers)
1486 {
1487 unsigned long mem_free_array[100];
1488 unsigned long *pmem_free_array, size;
1489
1490 /* Free Rx/Tx descriptors */
1491 amd8111s_free_descriptors(pLayerPointers);
1492
1493 /* Free memory on lower layers */
1494 milFreeResources(pLayerPointers, mem_free_array);
1495 pmem_free_array = mem_free_array;
1496 while (*pmem_free_array) {
1497 switch (*pmem_free_array) {
1498 case VIRTUAL:
1499 size = *(++pmem_free_array);
1500 pmem_free_array++;
1501 kmem_free((void *)*(pmem_free_array), size);
1502 break;
1503 }
1504 pmem_free_array++;
1505 }
1506
1507 amd8111s_free_buffers(pLayerPointers);
1508 }
1509
1510 /*
1511 * (GLD Enty pointer) To add/delete multi cast addresses
1512 *
1513 */
1514 static int
amd8111s_m_multicst(void * arg,boolean_t add,const uint8_t * addr)1515 amd8111s_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
1516 {
1517 struct LayerPointers *pLayerPointers = arg;
1518
1519 if (add) {
1520 /* Add a multicast entry */
1521 mdlAddMulticastAddress(pLayerPointers, (UCHAR *)addr);
1522 } else {
1523 /* Delete a multicast entry */
1524 mdlDeleteMulticastAddress(pLayerPointers, (UCHAR *)addr);
1525 }
1526
1527 return (0);
1528 }
1529
1530 #ifdef AMD8111S_DEBUG
1531 /*
1532 * The size of MIB registers is only 32 bits. Dump them before one
1533 * of them overflows.
1534 */
1535 static void
amd8111s_dump_mib(struct LayerPointers * pLayerPointers)1536 amd8111s_dump_mib(struct LayerPointers *pLayerPointers)
1537 {
1538 struct amd8111s_statistics *adapterStat;
1539
1540 adapterStat = &pLayerPointers->pOdl->statistics;
1541
1542 adapterStat->mib_dump_counter ++;
1543
1544 /*
1545 * Rx Counters
1546 */
1547 adapterStat->rx_mib_unicst_packets +=
1548 mdlReadMib(pLayerPointers, RcvUniCastPkts);
1549 adapterStat->rx_mib_multicst_packets +=
1550 mdlReadMib(pLayerPointers, RcvMultiCastPkts);
1551 adapterStat->rx_mib_broadcst_packets +=
1552 mdlReadMib(pLayerPointers, RcvBroadCastPkts);
1553 adapterStat->rx_mib_macctrl_packets +=
1554 mdlReadMib(pLayerPointers, RcvMACCtrl);
1555 adapterStat->rx_mib_flowctrl_packets +=
1556 mdlReadMib(pLayerPointers, RcvFlowCtrl);
1557
1558 adapterStat->rx_mib_bytes +=
1559 mdlReadMib(pLayerPointers, RcvOctets);
1560 adapterStat->rx_mib_good_bytes +=
1561 mdlReadMib(pLayerPointers, RcvGoodOctets);
1562
1563 adapterStat->rx_mib_undersize_packets +=
1564 mdlReadMib(pLayerPointers, RcvUndersizePkts);
1565 adapterStat->rx_mib_oversize_packets +=
1566 mdlReadMib(pLayerPointers, RcvOversizePkts);
1567
1568 adapterStat->rx_mib_drop_packets +=
1569 mdlReadMib(pLayerPointers, RcvDropPktsRing0);
1570 adapterStat->rx_mib_align_err_packets +=
1571 mdlReadMib(pLayerPointers, RcvAlignmentErrors);
1572 adapterStat->rx_mib_fcs_err_packets +=
1573 mdlReadMib(pLayerPointers, RcvFCSErrors);
1574 adapterStat->rx_mib_symbol_err_packets +=
1575 mdlReadMib(pLayerPointers, RcvSymbolErrors);
1576 adapterStat->rx_mib_miss_packets +=
1577 mdlReadMib(pLayerPointers, RcvMissPkts);
1578
1579 /*
1580 * Tx Counters
1581 */
1582 adapterStat->tx_mib_packets +=
1583 mdlReadMib(pLayerPointers, XmtPackets);
1584 adapterStat->tx_mib_multicst_packets +=
1585 mdlReadMib(pLayerPointers, XmtMultiCastPkts);
1586 adapterStat->tx_mib_broadcst_packets +=
1587 mdlReadMib(pLayerPointers, XmtBroadCastPkts);
1588 adapterStat->tx_mib_flowctrl_packets +=
1589 mdlReadMib(pLayerPointers, XmtFlowCtrl);
1590
1591 adapterStat->tx_mib_bytes +=
1592 mdlReadMib(pLayerPointers, XmtOctets);
1593
1594 adapterStat->tx_mib_defer_trans_packets +=
1595 mdlReadMib(pLayerPointers, XmtDeferredTransmit);
1596 adapterStat->tx_mib_collision_packets +=
1597 mdlReadMib(pLayerPointers, XmtCollisions);
1598 adapterStat->tx_mib_one_coll_packets +=
1599 mdlReadMib(pLayerPointers, XmtOneCollision);
1600 adapterStat->tx_mib_multi_coll_packets +=
1601 mdlReadMib(pLayerPointers, XmtMultipleCollision);
1602 adapterStat->tx_mib_late_coll_packets +=
1603 mdlReadMib(pLayerPointers, XmtLateCollision);
1604 adapterStat->tx_mib_ex_coll_packets +=
1605 mdlReadMib(pLayerPointers, XmtExcessiveCollision);
1606
1607
1608 /* Clear all MIB registers */
1609 WRITE_REG16(pLayerPointers, pLayerPointers->pMdl->Mem_Address
1610 + MIB_ADDR, MIB_CLEAR);
1611 }
1612 #endif
1613
1614 /*
1615 * (GLD Entry Point) set/unset promiscus mode
1616 */
1617 static int
amd8111s_m_promisc(void * arg,boolean_t on)1618 amd8111s_m_promisc(void *arg, boolean_t on)
1619 {
1620 struct LayerPointers *pLayerPointers = arg;
1621
1622 if (on) {
1623 mdlSetPromiscuous(pLayerPointers);
1624 } else {
1625 mdlDisablePromiscuous(pLayerPointers);
1626 }
1627
1628 return (0);
1629 }
1630
1631 /*
1632 * (Gld Entry point) Changes the Mac address of card
1633 */
1634 static int
amd8111s_m_unicst(void * arg,const uint8_t * macaddr)1635 amd8111s_m_unicst(void *arg, const uint8_t *macaddr)
1636 {
1637 struct LayerPointers *pLayerPointers = arg;
1638
1639 mdlDisableInterrupt(pLayerPointers);
1640 mdlSetMacAddress(pLayerPointers, (unsigned char *)macaddr);
1641 mdlEnableInterrupt(pLayerPointers);
1642
1643 return (0);
1644 }
1645
1646 /*
1647 * Reset the card
1648 */
1649 void
amd8111s_reset(struct LayerPointers * pLayerPointers)1650 amd8111s_reset(struct LayerPointers *pLayerPointers)
1651 {
1652 amd8111s_sw_reset(pLayerPointers);
1653 mdlHWReset(pLayerPointers);
1654 }
1655
1656 /*
1657 * attach(9E) -- Attach a device to the system
1658 *
1659 * Called once for each board after successfully probed.
1660 * will do
1661 * a. creating minor device node for the instance.
1662 * b. allocate & Initilize four layers (call odlInit)
1663 * c. get MAC address
1664 * d. initilize pLayerPointers to gld private pointer
1665 * e. register with GLD
1666 * if any action fails does clean up & returns DDI_FAILURE
1667 * else retursn DDI_SUCCESS
1668 */
1669 static int
amd8111s_attach(dev_info_t * devinfo,ddi_attach_cmd_t cmd)1670 amd8111s_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1671 {
1672 mac_register_t *macp;
1673 struct LayerPointers *pLayerPointers;
1674 struct odl *pOdl;
1675 ddi_acc_handle_t *pci_handle;
1676 ddi_device_acc_attr_t dev_attr;
1677 caddr_t addrp = NULL;
1678
1679 switch (cmd) {
1680 case DDI_ATTACH:
1681 break;
1682 default:
1683 return (DDI_FAILURE);
1684 }
1685
1686 pLayerPointers = (struct LayerPointers *)
1687 kmem_zalloc(sizeof (struct LayerPointers), KM_SLEEP);
1688 amd8111sadapter = pLayerPointers;
1689
1690 /* Get device instance number */
1691 pLayerPointers->instance = ddi_get_instance(devinfo);
1692 ddi_set_driver_private(devinfo, (caddr_t)pLayerPointers);
1693
1694 pOdl = (struct odl *)kmem_zalloc(sizeof (struct odl), KM_SLEEP);
1695 pLayerPointers->pOdl = pOdl;
1696
1697 pOdl->devinfo = devinfo;
1698
1699 /*
1700 * Here, we only allocate memory for struct odl and initilize it.
1701 * All other memory allocation & initilization will be done in odlInit
1702 * later on this routine.
1703 */
1704 if (ddi_get_iblock_cookie(devinfo, 0, &pLayerPointers->pOdl->iblock)
1705 != DDI_SUCCESS) {
1706 amd8111s_log(pLayerPointers, CE_NOTE,
1707 "attach: get iblock cookies failed");
1708 goto attach_failure;
1709 }
1710
1711 rw_init(&pOdl->chip_lock, NULL, RW_DRIVER, (void *)pOdl->iblock);
1712 mutex_init(&pOdl->mdlSendLock, "amd8111s Send Protection Lock",
1713 MUTEX_DRIVER, (void *)pOdl->iblock);
1714 mutex_init(&pOdl->mdlRcvLock, "amd8111s Rcv Protection Lock",
1715 MUTEX_DRIVER, (void *)pOdl->iblock);
1716
1717 /* Setup PCI space */
1718 if (pci_config_setup(devinfo, &pOdl->pci_handle) != DDI_SUCCESS) {
1719 return (DDI_FAILURE);
1720 }
1721 pLayerPointers->attach_progress = AMD8111S_ATTACH_PCI;
1722 pci_handle = &pOdl->pci_handle;
1723
1724 pOdl->vendor_id = pci_config_get16(*pci_handle, PCI_CONF_VENID);
1725 pOdl->device_id = pci_config_get16(*pci_handle, PCI_CONF_DEVID);
1726
1727 /*
1728 * Allocate and initialize all resource and map device registers.
1729 * If failed, it returns a non-zero value.
1730 */
1731 if (amd8111s_odlInit(pLayerPointers) != 0) {
1732 goto attach_failure;
1733 }
1734 pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESOURCE;
1735
1736 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1737 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1738 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1739
1740 if (ddi_regs_map_setup(devinfo, 1, &addrp, 0, 4096, &dev_attr,
1741 &(pLayerPointers->pOdl->MemBasehandle)) != 0) {
1742 amd8111s_log(pLayerPointers, CE_NOTE,
1743 "attach: ddi_regs_map_setup failed");
1744 goto attach_failure;
1745 }
1746 pLayerPointers->pMdl->Mem_Address = (unsigned long)addrp;
1747
1748 /* Initialize HW */
1749 mdlOpen(pLayerPointers);
1750 mdlGetActiveMediaInfo(pLayerPointers);
1751 pLayerPointers->attach_progress |= AMD8111S_ATTACH_REGS;
1752
1753 /*
1754 * Setup the interrupt
1755 */
1756 if (ddi_add_intr(devinfo, 0, &pOdl->iblock, 0, amd8111s_intr,
1757 (caddr_t)pLayerPointers) != DDI_SUCCESS) {
1758 goto attach_failure;
1759 }
1760 pLayerPointers->attach_progress |= AMD8111S_ATTACH_INTRADDED;
1761
1762 /*
1763 * Setup soft intr
1764 */
1765 if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &pOdl->drain_id,
1766 NULL, NULL, amd8111s_send_drain,
1767 (caddr_t)pLayerPointers) != DDI_SUCCESS) {
1768 goto attach_failure;
1769 }
1770 pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESCHED;
1771
1772 /*
1773 * Initilize the mac structure
1774 */
1775 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
1776 goto attach_failure;
1777
1778 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1779 macp->m_driver = pLayerPointers;
1780 macp->m_dip = devinfo;
1781 /* Get MAC address */
1782 mdlGetMacAddress(pLayerPointers, (unsigned char *)pOdl->MacAddress);
1783 macp->m_src_addr = pOdl->MacAddress;
1784 macp->m_callbacks = &amd8111s_m_callbacks;
1785 macp->m_min_sdu = 0;
1786 /* 1518 - 14 (ether header) - 4 (CRC) */
1787 macp->m_max_sdu = ETHERMTU;
1788 macp->m_margin = VLAN_TAGSZ;
1789
1790 /*
1791 * Finally, we're ready to register ourselves with the MAC layer
1792 * interface; if this succeeds, we're ready to start.
1793 */
1794 if (mac_register(macp, &pOdl->mh) != DDI_SUCCESS) {
1795 mac_free(macp);
1796 goto attach_failure;
1797 }
1798 mac_free(macp);
1799
1800 pLayerPointers->attach_progress |= AMD8111S_ATTACH_MACREGED;
1801
1802 return (DDI_SUCCESS);
1803
1804 attach_failure:
1805 (void) amd8111s_unattach(devinfo, pLayerPointers);
1806 return (DDI_FAILURE);
1807
1808 }
1809
1810 /*
1811 * detach(9E) -- Detach a device from the system
1812 *
1813 * It is called for each device instance when the system is preparing to
1814 * unload a dynamically unloadable driver.
1815 * will Do
1816 * a. check if any driver buffers are held by OS.
1817 * b. do clean up of all allocated memory if it is not in use by OS.
1818 * c. un register with GLD
1819 * d. return DDI_SUCCESS on succes full free & unregister
1820 * else GLD_FAILURE
1821 */
1822 static int
amd8111s_detach(dev_info_t * devinfo,ddi_detach_cmd_t cmd)1823 amd8111s_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1824 {
1825 struct LayerPointers *pLayerPointers;
1826
1827 switch (cmd) {
1828 case DDI_DETACH:
1829 break;
1830 default:
1831 return (DDI_FAILURE);
1832 }
1833
1834 /*
1835 * Get the driver private (struct LayerPointers *) structure
1836 */
1837 if ((pLayerPointers = (struct LayerPointers *)ddi_get_driver_private
1838 (devinfo)) == NULL) {
1839 return (DDI_FAILURE);
1840 }
1841
1842 return (amd8111s_unattach(devinfo, pLayerPointers));
1843 }
1844
1845 static int
amd8111s_unattach(dev_info_t * devinfo,struct LayerPointers * pLayerPointers)1846 amd8111s_unattach(dev_info_t *devinfo, struct LayerPointers *pLayerPointers)
1847 {
1848 struct odl *pOdl = pLayerPointers->pOdl;
1849
1850 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_MACREGED) {
1851 /* Unregister driver from the GLD interface */
1852 if (mac_unregister(pOdl->mh) != DDI_SUCCESS) {
1853 return (DDI_FAILURE);
1854 }
1855 }
1856
1857 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_INTRADDED) {
1858 ddi_remove_intr(devinfo, 0, pOdl->iblock);
1859 }
1860
1861 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESCHED) {
1862 ddi_remove_softintr(pOdl->drain_id);
1863 }
1864
1865 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_REGS) {
1866 /* Stop HW */
1867 mdlStopChip(pLayerPointers);
1868 ddi_regs_map_free(&(pOdl->MemBasehandle));
1869 }
1870
1871 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESOURCE) {
1872 /* Free All memory allocated */
1873 amd8111s_free_resource(pLayerPointers);
1874 }
1875
1876 if (pLayerPointers->attach_progress & AMD8111S_ATTACH_PCI) {
1877 pci_config_teardown(&pOdl->pci_handle);
1878 mutex_destroy(&pOdl->mdlSendLock);
1879 mutex_destroy(&pOdl->mdlRcvLock);
1880 rw_destroy(&pOdl->chip_lock);
1881 }
1882
1883 kmem_free(pOdl, sizeof (struct odl));
1884 kmem_free(pLayerPointers, sizeof (struct LayerPointers));
1885
1886 return (DDI_SUCCESS);
1887 }
1888
1889 /*
1890 * (GLD Entry Point)GLD will call this entry point perodicaly to
1891 * get driver statistices.
1892 */
1893 static int
amd8111s_m_stat(void * arg,uint_t stat,uint64_t * val)1894 amd8111s_m_stat(void *arg, uint_t stat, uint64_t *val)
1895 {
1896 struct LayerPointers *pLayerPointers = arg;
1897 struct amd8111s_statistics *adapterStat;
1898
1899 adapterStat = &pLayerPointers->pOdl->statistics;
1900
1901 switch (stat) {
1902
1903 /*
1904 * Current Status
1905 */
1906 case MAC_STAT_IFSPEED:
1907 *val = pLayerPointers->pMdl->Speed * 1000000;
1908 break;
1909
1910 case ETHER_STAT_LINK_DUPLEX:
1911 if (pLayerPointers->pMdl->FullDuplex) {
1912 *val = LINK_DUPLEX_FULL;
1913 } else {
1914 *val = LINK_DUPLEX_HALF;
1915 }
1916 break;
1917
1918 /*
1919 * Capabilities
1920 */
1921 case ETHER_STAT_CAP_1000FDX:
1922 *val = 0;
1923 break;
1924
1925 case ETHER_STAT_CAP_1000HDX:
1926 *val = 0;
1927 break;
1928
1929 case ETHER_STAT_CAP_100FDX:
1930 *val = 1;
1931 break;
1932
1933 case ETHER_STAT_CAP_100HDX:
1934 *val = 1;
1935 break;
1936
1937 case ETHER_STAT_CAP_10FDX:
1938 *val = 1;
1939 break;
1940
1941 case ETHER_STAT_CAP_10HDX:
1942 *val = 1;
1943 break;
1944
1945 case ETHER_STAT_CAP_ASMPAUSE:
1946 *val = 1;
1947 break;
1948
1949 case ETHER_STAT_CAP_PAUSE:
1950 *val = 1;
1951 break;
1952
1953 case ETHER_STAT_CAP_AUTONEG:
1954 *val = 1;
1955 break;
1956
1957 case ETHER_STAT_ADV_CAP_1000FDX:
1958 *val = 0;
1959 break;
1960
1961 case ETHER_STAT_ADV_CAP_1000HDX:
1962 *val = 0;
1963 break;
1964
1965 case ETHER_STAT_ADV_CAP_100FDX:
1966 *val = 1;
1967 break;
1968
1969 case ETHER_STAT_ADV_CAP_100HDX:
1970 *val = 1;
1971 break;
1972
1973 case ETHER_STAT_ADV_CAP_10FDX:
1974 *val = 1;
1975 break;
1976
1977 case ETHER_STAT_ADV_CAP_10HDX:
1978 *val = 1;
1979 break;
1980
1981 case ETHER_STAT_ADV_CAP_ASMPAUSE:
1982 *val = 1;
1983 break;
1984
1985 case ETHER_STAT_ADV_CAP_PAUSE:
1986 *val = 1;
1987 break;
1988
1989 case ETHER_STAT_ADV_CAP_AUTONEG:
1990 *val = 1;
1991 break;
1992
1993 /*
1994 * Rx Counters
1995 */
1996 case MAC_STAT_IPACKETS:
1997 *val = adapterStat->rx_mib_unicst_packets +
1998 adapterStat->rx_mib_multicst_packets +
1999 adapterStat->rx_mib_broadcst_packets +
2000 mdlReadMib(pLayerPointers, RcvUniCastPkts) +
2001 mdlReadMib(pLayerPointers, RcvMultiCastPkts) +
2002 mdlReadMib(pLayerPointers, RcvBroadCastPkts);
2003 break;
2004
2005 case MAC_STAT_RBYTES:
2006 *val = adapterStat->rx_mib_bytes +
2007 mdlReadMib(pLayerPointers, RcvOctets);
2008 break;
2009
2010 case MAC_STAT_MULTIRCV:
2011 *val = adapterStat->rx_mib_multicst_packets +
2012 mdlReadMib(pLayerPointers, RcvMultiCastPkts);
2013 break;
2014
2015 case MAC_STAT_BRDCSTRCV:
2016 *val = adapterStat->rx_mib_broadcst_packets +
2017 mdlReadMib(pLayerPointers, RcvBroadCastPkts);
2018 break;
2019
2020 case MAC_STAT_NORCVBUF:
2021 *val = adapterStat->rx_allocfail +
2022 adapterStat->rx_mib_drop_packets +
2023 mdlReadMib(pLayerPointers, RcvDropPktsRing0);
2024 break;
2025
2026 case MAC_STAT_IERRORS:
2027 *val = adapterStat->rx_mib_align_err_packets +
2028 adapterStat->rx_mib_fcs_err_packets +
2029 adapterStat->rx_mib_symbol_err_packets +
2030 mdlReadMib(pLayerPointers, RcvAlignmentErrors) +
2031 mdlReadMib(pLayerPointers, RcvFCSErrors) +
2032 mdlReadMib(pLayerPointers, RcvSymbolErrors);
2033 break;
2034
2035 case ETHER_STAT_ALIGN_ERRORS:
2036 *val = adapterStat->rx_mib_align_err_packets +
2037 mdlReadMib(pLayerPointers, RcvAlignmentErrors);
2038 break;
2039
2040 case ETHER_STAT_FCS_ERRORS:
2041 *val = adapterStat->rx_mib_fcs_err_packets +
2042 mdlReadMib(pLayerPointers, RcvFCSErrors);
2043 break;
2044
2045 /*
2046 * Tx Counters
2047 */
2048 case MAC_STAT_OPACKETS:
2049 *val = adapterStat->tx_mib_packets +
2050 mdlReadMib(pLayerPointers, XmtPackets);
2051 break;
2052
2053 case MAC_STAT_OBYTES:
2054 *val = adapterStat->tx_mib_bytes +
2055 mdlReadMib(pLayerPointers, XmtOctets);
2056 break;
2057
2058 case MAC_STAT_MULTIXMT:
2059 *val = adapterStat->tx_mib_multicst_packets +
2060 mdlReadMib(pLayerPointers, XmtMultiCastPkts);
2061 break;
2062
2063 case MAC_STAT_BRDCSTXMT:
2064 *val = adapterStat->tx_mib_broadcst_packets +
2065 mdlReadMib(pLayerPointers, XmtBroadCastPkts);
2066 break;
2067
2068 case MAC_STAT_NOXMTBUF:
2069 *val = adapterStat->tx_no_descriptor;
2070 break;
2071
2072 case MAC_STAT_OERRORS:
2073 *val = adapterStat->tx_mib_ex_coll_packets +
2074 mdlReadMib(pLayerPointers, XmtExcessiveCollision);
2075 break;
2076
2077 case MAC_STAT_COLLISIONS:
2078 *val = adapterStat->tx_mib_ex_coll_packets +
2079 mdlReadMib(pLayerPointers, XmtCollisions);
2080 break;
2081
2082 case ETHER_STAT_FIRST_COLLISIONS:
2083 *val = adapterStat->tx_mib_one_coll_packets +
2084 mdlReadMib(pLayerPointers, XmtOneCollision);
2085 break;
2086
2087 case ETHER_STAT_MULTI_COLLISIONS:
2088 *val = adapterStat->tx_mib_multi_coll_packets +
2089 mdlReadMib(pLayerPointers, XmtMultipleCollision);
2090 break;
2091
2092 case ETHER_STAT_EX_COLLISIONS:
2093 *val = adapterStat->tx_mib_ex_coll_packets +
2094 mdlReadMib(pLayerPointers, XmtExcessiveCollision);
2095 break;
2096
2097 case ETHER_STAT_TX_LATE_COLLISIONS:
2098 *val = adapterStat->tx_mib_late_coll_packets +
2099 mdlReadMib(pLayerPointers, XmtLateCollision);
2100 break;
2101
2102 case ETHER_STAT_DEFER_XMTS:
2103 *val = adapterStat->tx_mib_defer_trans_packets +
2104 mdlReadMib(pLayerPointers, XmtDeferredTransmit);
2105 break;
2106
2107 default:
2108 return (ENOTSUP);
2109 }
2110 return (0);
2111 }
2112
2113 /*
2114 * Memory Read Function Used by MDL to set card registers.
2115 */
2116 unsigned char
READ_REG8(struct LayerPointers * pLayerPointers,long x)2117 READ_REG8(struct LayerPointers *pLayerPointers, long x)
2118 {
2119 return (ddi_get8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)x));
2120 }
2121
2122 int
READ_REG16(struct LayerPointers * pLayerPointers,long x)2123 READ_REG16(struct LayerPointers *pLayerPointers, long x)
2124 {
2125 return (ddi_get16(pLayerPointers->pOdl->MemBasehandle,
2126 (uint16_t *)(x)));
2127 }
2128
2129 long
READ_REG32(struct LayerPointers * pLayerPointers,long x)2130 READ_REG32(struct LayerPointers *pLayerPointers, long x)
2131 {
2132 return (ddi_get32(pLayerPointers->pOdl->MemBasehandle,
2133 (uint32_t *)(x)));
2134 }
2135
2136 void
WRITE_REG8(struct LayerPointers * pLayerPointers,long x,int y)2137 WRITE_REG8(struct LayerPointers *pLayerPointers, long x, int y)
2138 {
2139 ddi_put8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)(x), y);
2140 }
2141
2142 void
WRITE_REG16(struct LayerPointers * pLayerPointers,long x,int y)2143 WRITE_REG16(struct LayerPointers *pLayerPointers, long x, int y)
2144 {
2145 ddi_put16(pLayerPointers->pOdl->MemBasehandle, (uint16_t *)(x), y);
2146 }
2147
2148 void
WRITE_REG32(struct LayerPointers * pLayerPointers,long x,int y)2149 WRITE_REG32(struct LayerPointers *pLayerPointers, long x, int y)
2150 {
2151 ddi_put32(pLayerPointers->pOdl->MemBasehandle, (uint32_t *)(x), y);
2152 }
2153
2154 void
WRITE_REG64(struct LayerPointers * pLayerPointers,long x,char * y)2155 WRITE_REG64(struct LayerPointers *pLayerPointers, long x, char *y)
2156 {
2157 int i;
2158 for (i = 0; i < 8; i++) {
2159 WRITE_REG8(pLayerPointers, (x + i), y[i]);
2160 }
2161 }
2162