Lines Matching refs:dp

108 #define	GET_TXBUF(dp, sn)	\  argument
109 &(dp)->tx_buf[SLOT((dp)->tx_slots_base + (sn), (dp)->gc.gc_tx_buf_size)]
114 #define MAXPKTBUF(dp) \ argument
115 ((dp)->mtu + sizeof (struct ether_header) + VTAG_SIZE + ETHERFCSL)
131 static void gem_nd_setup(struct gem_dev *dp);
132 static void gem_nd_cleanup(struct gem_dev *dp);
140 static void gem_mii_link_watcher(struct gem_dev *dp);
141 static int gem_mac_init(struct gem_dev *dp);
142 static int gem_mac_start(struct gem_dev *dp);
143 static int gem_mac_stop(struct gem_dev *dp, uint_t flags);
144 static void gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp);
190 gem_prop_get_int(struct gem_dev *dp, char *prop_template, int def_val) in gem_prop_get_int() argument
194 (void) sprintf(propname, prop_template, dp->name); in gem_prop_get_int()
196 return (ddi_prop_get_int(DDI_DEV_T_ANY, dp->dip, in gem_prop_get_int()
218 gem_dump_packet(struct gem_dev *dp, char *title, mblk_t *mp, in gem_dump_packet() argument
352 cmn_err(CE_CONT, "!%s: %s: %s", dp->name, title, msg); in gem_dump_packet()
363 gem_rx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how) in gem_rx_desc_dma_sync() argument
367 int rx_desc_unit_shift = dp->gc.gc_rx_desc_unit_shift; in gem_rx_desc_dma_sync()
375 n = dp->gc.gc_rx_ring_size - head; in gem_rx_desc_dma_sync()
377 (void) ddi_dma_sync(dp->desc_dma_handle, in gem_rx_desc_dma_sync()
384 (void) ddi_dma_sync(dp->desc_dma_handle, in gem_rx_desc_dma_sync()
391 gem_tx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how) in gem_tx_desc_dma_sync() argument
395 int tx_desc_unit_shift = dp->gc.gc_tx_desc_unit_shift; in gem_tx_desc_dma_sync()
403 n = dp->gc.gc_tx_ring_size - head; in gem_tx_desc_dma_sync()
405 (void) ddi_dma_sync(dp->desc_dma_handle, in gem_tx_desc_dma_sync()
406 (off_t)(dp->tx_ring_dma - dp->rx_ring_dma), in gem_tx_desc_dma_sync()
412 (void) ddi_dma_sync(dp->desc_dma_handle, in gem_tx_desc_dma_sync()
414 + (dp->tx_ring_dma - dp->rx_ring_dma)), in gem_tx_desc_dma_sync()
420 gem_rx_start_default(struct gem_dev *dp, int head, int nslot) in gem_rx_start_default() argument
422 gem_rx_desc_dma_sync(dp, in gem_rx_start_default()
423 SLOT(head, dp->gc.gc_rx_ring_size), nslot, in gem_rx_start_default()
433 gem_dump_txbuf(struct gem_dev *dp, int level, const char *title) in gem_dump_txbuf() argument
441 dp->name, title, in gem_dump_txbuf()
442 dp->tx_active_head, in gem_dump_txbuf()
443 SLOT(dp->tx_active_head, dp->gc.gc_tx_buf_size), in gem_dump_txbuf()
444 dp->tx_active_tail, in gem_dump_txbuf()
445 SLOT(dp->tx_active_tail, dp->gc.gc_tx_buf_size), in gem_dump_txbuf()
446 dp->tx_active_tail - dp->tx_active_head, in gem_dump_txbuf()
447 dp->tx_softq_head, in gem_dump_txbuf()
448 SLOT(dp->tx_softq_head, dp->gc.gc_tx_buf_size), in gem_dump_txbuf()
449 dp->tx_softq_tail, in gem_dump_txbuf()
450 SLOT(dp->tx_softq_tail, dp->gc.gc_tx_buf_size), in gem_dump_txbuf()
451 dp->tx_softq_tail - dp->tx_softq_head, in gem_dump_txbuf()
452 dp->tx_free_head, in gem_dump_txbuf()
453 SLOT(dp->tx_free_head, dp->gc.gc_tx_buf_size), in gem_dump_txbuf()
454 dp->tx_free_tail, in gem_dump_txbuf()
455 SLOT(dp->tx_free_tail, dp->gc.gc_tx_buf_size), in gem_dump_txbuf()
456 dp->tx_free_tail - dp->tx_free_head, in gem_dump_txbuf()
457 dp->tx_desc_head, in gem_dump_txbuf()
458 SLOT(dp->tx_desc_head, dp->gc.gc_tx_ring_size), in gem_dump_txbuf()
459 dp->tx_desc_tail, in gem_dump_txbuf()
460 SLOT(dp->tx_desc_tail, dp->gc.gc_tx_ring_size), in gem_dump_txbuf()
461 dp->tx_desc_tail - dp->tx_desc_head, in gem_dump_txbuf()
462 dp->tx_desc_intr, in gem_dump_txbuf()
463 SLOT(dp->tx_desc_intr, dp->gc.gc_tx_ring_size), in gem_dump_txbuf()
464 dp->tx_desc_intr - dp->tx_desc_head); in gem_dump_txbuf()
470 struct gem_dev *dp; in gem_free_rxbuf() local
472 dp = rbp->rxb_devp; in gem_free_rxbuf()
473 ASSERT(mutex_owned(&dp->intrlock)); in gem_free_rxbuf()
474 rbp->rxb_next = dp->rx_buf_freelist; in gem_free_rxbuf()
475 dp->rx_buf_freelist = rbp; in gem_free_rxbuf()
476 dp->rx_buf_freecnt++; in gem_free_rxbuf()
484 gem_get_rxbuf(struct gem_dev *dp, int cansleep) in gem_get_rxbuf() argument
491 ASSERT(mutex_owned(&dp->intrlock)); in gem_get_rxbuf()
494 dp->rx_buf_freecnt)); in gem_get_rxbuf()
498 rbp = dp->rx_buf_freelist; in gem_get_rxbuf()
501 ASSERT(dp->rx_buf_freecnt > 0); in gem_get_rxbuf()
503 dp->rx_buf_freelist = rbp->rxb_next; in gem_get_rxbuf()
504 dp->rx_buf_freecnt--; in gem_get_rxbuf()
522 rbp->rxb_devp = dp; in gem_get_rxbuf()
525 if ((err = ddi_dma_alloc_handle(dp->dip, in gem_get_rxbuf()
526 &dp->gc.gc_dma_attr_rxbuf, in gem_get_rxbuf()
532 dp->name, __func__, err); in gem_get_rxbuf()
540 ROUNDUP(dp->rx_buf_len, IOC_LINESIZE), in gem_get_rxbuf()
541 &dp->gc.gc_buf_attr, in gem_get_rxbuf()
546 (dp->gc.gc_rx_header_len > 0) in gem_get_rxbuf()
555 dp->name, __func__, err); in gem_get_rxbuf()
564 NULL, rbp->rxb_buf, dp->rx_buf_len, in gem_get_rxbuf()
565 ((dp->gc.gc_rx_header_len > 0) in gem_get_rxbuf()
576 dp->name, __func__, err)); in gem_get_rxbuf()
595 dp->rx_buf_allocated++; in gem_get_rxbuf()
606 gem_alloc_memory(struct gem_dev *dp) in gem_alloc_memory() argument
622 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_alloc_memory()
624 dp->desc_dma_handle = NULL; in gem_alloc_memory()
625 req_size = dp->rx_desc_size + dp->tx_desc_size + dp->gc.gc_io_area_size; in gem_alloc_memory()
631 if ((err = ddi_dma_alloc_handle(dp->dip, in gem_alloc_memory()
632 &dp->gc.gc_dma_attr_desc, in gem_alloc_memory()
634 &dp->desc_dma_handle)) != DDI_SUCCESS) { in gem_alloc_memory()
637 dp->name, __func__, err); in gem_alloc_memory()
641 if ((err = ddi_dma_mem_alloc(dp->desc_dma_handle, in gem_alloc_memory()
642 req_size, &dp->gc.gc_desc_attr, in gem_alloc_memory()
645 &dp->desc_acc_handle)) != DDI_SUCCESS) { in gem_alloc_memory()
649 dp->name, __func__, err, (int)req_size); in gem_alloc_memory()
650 ddi_dma_free_handle(&dp->desc_dma_handle); in gem_alloc_memory()
654 if ((err = ddi_dma_addr_bind_handle(dp->desc_dma_handle, in gem_alloc_memory()
662 dp->name, __func__, err); in gem_alloc_memory()
663 ddi_dma_mem_free(&dp->desc_acc_handle); in gem_alloc_memory()
664 ddi_dma_free_handle(&dp->desc_dma_handle); in gem_alloc_memory()
670 dp->rx_ring = ring; in gem_alloc_memory()
671 dp->rx_ring_dma = ring_cookie.dmac_laddress; in gem_alloc_memory()
674 dp->tx_ring = dp->rx_ring + dp->rx_desc_size; in gem_alloc_memory()
675 dp->tx_ring_dma = dp->rx_ring_dma + dp->rx_desc_size; in gem_alloc_memory()
678 dp->io_area = dp->tx_ring + dp->tx_desc_size; in gem_alloc_memory()
679 dp->io_area_dma = dp->tx_ring_dma + dp->tx_desc_size; in gem_alloc_memory()
685 ASSERT(dp->gc.gc_tx_buf_size > 0); in gem_alloc_memory()
688 dma_attr_txbounce = dp->gc.gc_dma_attr_txbuf; in gem_alloc_memory()
694 tx_buf_len = MAXPKTBUF(dp); in gem_alloc_memory()
699 for (i = 0, tbp = dp->tx_buf; in gem_alloc_memory()
700 i < dp->gc.gc_tx_buf_size; i++, tbp++) { in gem_alloc_memory()
703 if ((err = ddi_dma_alloc_handle(dp->dip, in gem_alloc_memory()
711 dp->name, __func__, err, i); in gem_alloc_memory()
717 &dp->gc.gc_buf_attr, in gem_alloc_memory()
724 dp->name, __func__, err, tx_buf_len); in gem_alloc_memory()
737 dp->name, __func__, err); in gem_alloc_memory()
750 if (dp->gc.gc_tx_buf_size > 0) { in gem_alloc_memory()
752 (void) ddi_dma_unbind_handle(dp->tx_buf[i].txb_bdh); in gem_alloc_memory()
753 ddi_dma_mem_free(&dp->tx_buf[i].txb_bah); in gem_alloc_memory()
754 ddi_dma_free_handle(&dp->tx_buf[i].txb_bdh); in gem_alloc_memory()
758 if (dp->desc_dma_handle) { in gem_alloc_memory()
759 (void) ddi_dma_unbind_handle(dp->desc_dma_handle); in gem_alloc_memory()
760 ddi_dma_mem_free(&dp->desc_acc_handle); in gem_alloc_memory()
761 ddi_dma_free_handle(&dp->desc_dma_handle); in gem_alloc_memory()
762 dp->desc_dma_handle = NULL; in gem_alloc_memory()
769 gem_free_memory(struct gem_dev *dp) in gem_free_memory() argument
775 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_free_memory()
778 if (dp->desc_dma_handle) { in gem_free_memory()
779 (void) ddi_dma_unbind_handle(dp->desc_dma_handle); in gem_free_memory()
780 ddi_dma_mem_free(&dp->desc_acc_handle); in gem_free_memory()
781 ddi_dma_free_handle(&dp->desc_dma_handle); in gem_free_memory()
782 dp->desc_dma_handle = NULL; in gem_free_memory()
786 for (i = dp->gc.gc_tx_buf_size, tbp = dp->tx_buf; i--; tbp++) { in gem_free_memory()
794 while ((rbp = dp->rx_buf_freelist) != NULL) { in gem_free_memory()
796 ASSERT(dp->rx_buf_freecnt > 0); in gem_free_memory()
798 dp->rx_buf_freelist = rbp->rxb_next; in gem_free_memory()
799 dp->rx_buf_freecnt--; in gem_free_memory()
829 gem_init_rx_ring(struct gem_dev *dp) in gem_init_rx_ring() argument
832 int rx_ring_size = dp->gc.gc_rx_ring_size; in gem_init_rx_ring()
835 dp->name, __func__, in gem_init_rx_ring()
836 rx_ring_size, dp->gc.gc_rx_buf_max)); in gem_init_rx_ring()
840 (*dp->gc.gc_rx_desc_init)(dp, i); in gem_init_rx_ring()
842 gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV); in gem_init_rx_ring()
844 dp->rx_active_head = (seqnum_t)0; in gem_init_rx_ring()
845 dp->rx_active_tail = (seqnum_t)0; in gem_init_rx_ring()
847 ASSERT(dp->rx_buf_head == (struct rxbuf *)NULL); in gem_init_rx_ring()
848 ASSERT(dp->rx_buf_tail == (struct rxbuf *)NULL); in gem_init_rx_ring()
855 gem_prepare_rx_buf(struct gem_dev *dp) in gem_prepare_rx_buf() argument
861 ASSERT(mutex_owned(&dp->intrlock)); in gem_prepare_rx_buf()
865 nrbuf = min(dp->gc.gc_rx_ring_size, dp->gc.gc_rx_buf_max); in gem_prepare_rx_buf()
867 if ((rbp = gem_get_rxbuf(dp, B_TRUE)) == NULL) { in gem_prepare_rx_buf()
870 gem_append_rxbuf(dp, rbp); in gem_prepare_rx_buf()
873 gem_rx_desc_dma_sync(dp, in gem_prepare_rx_buf()
874 0, dp->gc.gc_rx_ring_size, DDI_DMA_SYNC_FORDEV); in gem_prepare_rx_buf()
881 gem_clean_rx_buf(struct gem_dev *dp) in gem_clean_rx_buf() argument
885 int rx_ring_size = dp->gc.gc_rx_ring_size; in gem_clean_rx_buf()
889 ASSERT(mutex_owned(&dp->intrlock)); in gem_clean_rx_buf()
892 dp->name, __func__, dp->rx_buf_freecnt)); in gem_clean_rx_buf()
897 (*dp->gc.gc_rx_desc_clean)(dp, i); in gem_clean_rx_buf()
899 gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV); in gem_clean_rx_buf()
907 while ((rbp = dp->rx_buf_head) != NULL) { in gem_clean_rx_buf()
912 dp->rx_buf_head = rbp->rxb_next; in gem_clean_rx_buf()
917 dp->rx_buf_tail = (struct rxbuf *)NULL; in gem_clean_rx_buf()
921 dp->name, __func__, total, dp->rx_buf_freecnt)); in gem_clean_rx_buf()
928 gem_init_tx_ring(struct gem_dev *dp) in gem_init_tx_ring() argument
931 int tx_buf_size = dp->gc.gc_tx_buf_size; in gem_init_tx_ring()
932 int tx_ring_size = dp->gc.gc_tx_ring_size; in gem_init_tx_ring()
935 dp->name, __func__, in gem_init_tx_ring()
936 dp->gc.gc_tx_ring_size, dp->gc.gc_tx_buf_size)); in gem_init_tx_ring()
938 ASSERT(!dp->mac_active); in gem_init_tx_ring()
941 dp->tx_slots_base = in gem_init_tx_ring()
942 SLOT(dp->tx_slots_base + dp->tx_softq_head, tx_buf_size); in gem_init_tx_ring()
943 dp->tx_softq_tail -= dp->tx_softq_head; in gem_init_tx_ring()
944 dp->tx_softq_head = (seqnum_t)0; in gem_init_tx_ring()
946 dp->tx_active_head = dp->tx_softq_head; in gem_init_tx_ring()
947 dp->tx_active_tail = dp->tx_softq_head; in gem_init_tx_ring()
949 dp->tx_free_head = dp->tx_softq_tail; in gem_init_tx_ring()
950 dp->tx_free_tail = dp->gc.gc_tx_buf_limit; in gem_init_tx_ring()
952 dp->tx_desc_head = (seqnum_t)0; in gem_init_tx_ring()
953 dp->tx_desc_tail = (seqnum_t)0; in gem_init_tx_ring()
954 dp->tx_desc_intr = (seqnum_t)0; in gem_init_tx_ring()
957 (*dp->gc.gc_tx_desc_init)(dp, i); in gem_init_tx_ring()
959 gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV); in gem_init_tx_ring()
979 gem_clean_tx_buf(struct gem_dev *dp) in gem_clean_tx_buf() argument
986 int tx_ring_size = dp->gc.gc_tx_ring_size; in gem_clean_tx_buf()
991 ASSERT(!dp->mac_active); in gem_clean_tx_buf()
992 ASSERT(dp->tx_busy == 0); in gem_clean_tx_buf()
993 ASSERT(dp->tx_softq_tail == dp->tx_free_head); in gem_clean_tx_buf()
999 (*dp->gc.gc_tx_desc_clean)(dp, i); in gem_clean_tx_buf()
1001 gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV); in gem_clean_tx_buf()
1004 head = dp->tx_active_head; in gem_clean_tx_buf()
1005 tail = dp->tx_softq_tail; in gem_clean_tx_buf()
1007 ASSERT(dp->tx_free_head - head >= 0); in gem_clean_tx_buf()
1008 tbp = GET_TXBUF(dp, head); in gem_clean_tx_buf()
1012 dp->stats.errxmt++; in gem_clean_tx_buf()
1019 while (sn != head + dp->gc.gc_tx_buf_size) { in gem_clean_tx_buf()
1023 dp->name, __func__, in gem_clean_tx_buf()
1024 sn, SLOT(sn, dp->gc.gc_tx_buf_size), in gem_clean_tx_buf()
1033 gem_dump_txbuf(dp, CE_WARN, in gem_clean_tx_buf()
1038 dp->tx_free_tail += tail - head; in gem_clean_tx_buf()
1039 ASSERT(dp->tx_free_tail == dp->tx_free_head + dp->gc.gc_tx_buf_limit); in gem_clean_tx_buf()
1042 dp->tx_active_head = dp->tx_free_head; in gem_clean_tx_buf()
1043 dp->tx_active_tail = dp->tx_free_head; in gem_clean_tx_buf()
1044 dp->tx_softq_head = dp->tx_free_head; in gem_clean_tx_buf()
1045 dp->tx_softq_tail = dp->tx_free_head; in gem_clean_tx_buf()
1052 gem_reclaim_txbuf(struct gem_dev *dp) in gem_reclaim_txbuf() argument
1061 int tx_ring_size = dp->gc.gc_tx_ring_size; in gem_reclaim_txbuf()
1062 uint_t (*tx_desc_stat)(struct gem_dev *dp, in gem_reclaim_txbuf()
1063 int slot, int ndesc) = dp->gc.gc_tx_desc_stat; in gem_reclaim_txbuf()
1072 mutex_enter(&dp->xmitlock); in gem_reclaim_txbuf()
1074 head = dp->tx_active_head; in gem_reclaim_txbuf()
1075 tail = dp->tx_active_tail; in gem_reclaim_txbuf()
1081 dp->name, __func__, in gem_reclaim_txbuf()
1082 head, SLOT(head, dp->gc.gc_tx_buf_size), in gem_reclaim_txbuf()
1083 tail, SLOT(tail, dp->gc.gc_tx_buf_size)); in gem_reclaim_txbuf()
1087 if (dp->tx_reclaim_busy == 0) { in gem_reclaim_txbuf()
1089 ASSERT(dp->tx_free_tail - dp->tx_active_head in gem_reclaim_txbuf()
1090 == dp->gc.gc_tx_buf_limit); in gem_reclaim_txbuf()
1094 dp->tx_reclaim_busy++; in gem_reclaim_txbuf()
1097 gem_tx_desc_dma_sync(dp, in gem_reclaim_txbuf()
1098 SLOT(dp->tx_desc_head, tx_ring_size), in gem_reclaim_txbuf()
1099 dp->tx_desc_tail - dp->tx_desc_head, in gem_reclaim_txbuf()
1102 tbp = GET_TXBUF(dp, head); in gem_reclaim_txbuf()
1103 desc_head = dp->tx_desc_head; in gem_reclaim_txbuf()
1105 dp->tx_active_head = (++sn), tbp = tbp->txb_next) { in gem_reclaim_txbuf()
1115 txstat = (*tx_desc_stat)(dp, in gem_reclaim_txbuf()
1123 if (!dp->tx_blocked && (tbp->txb_flag & GEM_TXFLAG_INTR)) { in gem_reclaim_txbuf()
1124 dp->tx_blocked = now; in gem_reclaim_txbuf()
1132 dp->name, sn, SLOT(sn, tx_ring_size)); in gem_reclaim_txbuf()
1137 dp->name, (now - tbp->txb_stime)*10); in gem_reclaim_txbuf()
1144 if (dp->tx_desc_head != desc_head) { in gem_reclaim_txbuf()
1146 dp->tx_desc_head = desc_head; in gem_reclaim_txbuf()
1149 if (desc_head - dp->tx_desc_intr > 0) { in gem_reclaim_txbuf()
1150 dp->tx_desc_intr = desc_head; in gem_reclaim_txbuf()
1153 mutex_exit(&dp->xmitlock); in gem_reclaim_txbuf()
1156 tbp = GET_TXBUF(dp, head); in gem_reclaim_txbuf()
1162 head, SLOT(head, dp->gc.gc_tx_buf_size), in gem_reclaim_txbuf()
1163 tail, SLOT(tail, dp->gc.gc_tx_buf_size)); in gem_reclaim_txbuf()
1171 mutex_enter(&dp->xmitlock); in gem_reclaim_txbuf()
1172 if (--dp->tx_reclaim_busy == 0) { in gem_reclaim_txbuf()
1176 sn = dp->tx_free_tail; in gem_reclaim_txbuf()
1177 tbp = GET_TXBUF(dp, new_tail); in gem_reclaim_txbuf()
1178 while (sn != dp->tx_active_head + dp->gc.gc_tx_buf_limit) { in gem_reclaim_txbuf()
1187 ASSERT(dp->tx_active_head + dp->gc.gc_tx_buf_limit == sn); in gem_reclaim_txbuf()
1189 dp->tx_free_tail = in gem_reclaim_txbuf()
1190 dp->tx_active_head + dp->gc.gc_tx_buf_limit; in gem_reclaim_txbuf()
1192 if (!dp->mac_active) { in gem_reclaim_txbuf()
1194 cv_broadcast(&dp->tx_drain_cv); in gem_reclaim_txbuf()
1199 dp->name, __func__, in gem_reclaim_txbuf()
1200 dp->tx_free_head, dp->tx_free_tail, in gem_reclaim_txbuf()
1201 dp->tx_free_tail - dp->tx_free_head, tail - head); in gem_reclaim_txbuf()
1203 mutex_exit(&dp->xmitlock); in gem_reclaim_txbuf()
1214 gem_tx_load_descs_oo(struct gem_dev *dp, in gem_tx_load_descs_oo() argument
1219 int tx_ring_size = dp->gc.gc_tx_ring_size; in gem_tx_load_descs_oo()
1221 (struct gem_dev *dp, int slot, in gem_tx_load_descs_oo()
1223 int frags, uint64_t flag) = dp->gc.gc_tx_desc_write; in gem_tx_load_descs_oo()
1227 tbp = GET_TXBUF(dp, sn); in gem_tx_load_descs_oo()
1230 if (dp->tx_cnt < 100) { in gem_tx_load_descs_oo()
1231 dp->tx_cnt++; in gem_tx_load_descs_oo()
1237 tbp->txb_ndescs = (*tx_desc_write)(dp, in gem_tx_load_descs_oo()
1252 gem_setup_txbuf_copy(struct gem_dev *dp, mblk_t *mp, struct txbuf *tbp) in gem_setup_txbuf_copy() argument
1285 (min_pkt > ETHERMIN || !dp->gc.gc_tx_auto_pad)) { in gem_setup_txbuf_copy()
1303 dp->name, __func__, in gem_setup_txbuf_copy()
1313 if (dp->gc.gc_tx_max_frags >= 3 && in gem_setup_txbuf_copy()
1333 gem_tx_start_unit(struct gem_dev *dp) in gem_tx_start_unit() argument
1341 ASSERT(mutex_owned(&dp->xmitlock)); in gem_tx_start_unit()
1342 ASSERT(dp->tx_softq_head == dp->tx_active_tail); in gem_tx_start_unit()
1344 head = dp->tx_softq_head; in gem_tx_start_unit()
1345 tail = dp->tx_softq_tail; in gem_tx_start_unit()
1349 dp->name, __func__, head, tail, tail - head, in gem_tx_start_unit()
1350 dp->tx_desc_head, dp->tx_desc_tail, in gem_tx_start_unit()
1351 dp->tx_desc_tail - dp->tx_desc_head)); in gem_tx_start_unit()
1355 dp->tx_desc_tail = tail; in gem_tx_start_unit()
1357 tbp_head = GET_TXBUF(dp, head); in gem_tx_start_unit()
1358 tbp_tail = GET_TXBUF(dp, tail - 1); in gem_tx_start_unit()
1360 ASSERT(tbp_tail->txb_desc + tbp_tail->txb_ndescs == dp->tx_desc_tail); in gem_tx_start_unit()
1362 dp->gc.gc_tx_start(dp, in gem_tx_start_unit()
1363 SLOT(tbp_head->txb_desc, dp->gc.gc_tx_ring_size), in gem_tx_start_unit()
1367 dp->tx_softq_head = dp->tx_active_tail = tail; in gem_tx_start_unit()
1380 gem_txbuf_options(struct gem_dev *dp, mblk_t *mp, uint8_t *bp) in gem_txbuf_options() argument
1409 if (dp->misc_flag & GEM_VLAN_HARD) { in gem_txbuf_options()
1426 gem_send_common(struct gem_dev *dp, mblk_t *mp_head, uint32_t flags) in gem_send_common() argument
1454 mutex_enter(&dp->xmitlock); in gem_send_common()
1455 if (dp->mac_suspended) { in gem_send_common()
1456 mutex_exit(&dp->xmitlock); in gem_send_common()
1466 if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) { in gem_send_common()
1469 mutex_exit(&dp->xmitlock); in gem_send_common()
1474 head = dp->tx_free_head; in gem_send_common()
1475 avail = dp->tx_free_tail - head; in gem_send_common()
1479 dp->name, __func__, in gem_send_common()
1480 dp->tx_free_head, dp->tx_free_tail, avail, nmblk)); in gem_send_common()
1482 avail = min(avail, dp->tx_max_packets); in gem_send_common()
1488 dp->tx_max_packets = max(dp->tx_max_packets - 1, 1); in gem_send_common()
1494 dp->tx_free_head = head + nmblk; in gem_send_common()
1495 load_flags = ((dp->tx_busy++) == 0) ? GEM_TXFLAG_HEAD : 0; in gem_send_common()
1499 tbp = GET_TXBUF(dp, head + avail - 1); in gem_send_common()
1501 dp->tx_desc_intr = head + avail; in gem_send_common()
1503 mutex_exit(&dp->xmitlock); in gem_send_common()
1505 tbp = GET_TXBUF(dp, head); in gem_send_common()
1531 txflag |= gem_txbuf_options(dp, mp, (uint8_t *)tbp->txb_buf); in gem_send_common()
1534 len_total += gem_setup_txbuf_copy(dp, mp, tbp); in gem_send_common()
1537 (void) gem_tx_load_descs_oo(dp, head, head + nmblk, load_flags); in gem_send_common()
1540 mutex_enter(&dp->xmitlock); in gem_send_common()
1542 if ((--dp->tx_busy) == 0) { in gem_send_common()
1544 dp->tx_softq_tail = dp->tx_free_head; in gem_send_common()
1546 if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) { in gem_send_common()
1553 cv_broadcast(&dp->tx_drain_cv); in gem_send_common()
1555 ASSERT(dp->tx_softq_tail - dp->tx_softq_head > 0); in gem_send_common()
1556 gem_tx_start_unit(dp); in gem_send_common()
1559 dp->stats.obytes += len_total; in gem_send_common()
1560 dp->stats.opackets += nmblk; in gem_send_common()
1561 dp->stats.obcast += bcast; in gem_send_common()
1562 dp->stats.omcast += mcast; in gem_send_common()
1564 mutex_exit(&dp->xmitlock); in gem_send_common()
1575 gem_restart_nic(struct gem_dev *dp, uint_t flags) in gem_restart_nic() argument
1577 ASSERT(mutex_owned(&dp->intrlock)); in gem_restart_nic()
1579 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_restart_nic()
1582 gem_dump_txbuf(dp, CE_CONT, "gem_restart_nic"); in gem_restart_nic()
1586 if (dp->mac_suspended) { in gem_restart_nic()
1595 if (dp->mac_active) { in gem_restart_nic()
1598 dp->rxmode &= ~RXMODE_ENABLE; in gem_restart_nic()
1599 (void) (*dp->gc.gc_set_rx_filter)(dp); in gem_restart_nic()
1601 (void) gem_mac_stop(dp, flags); in gem_restart_nic()
1605 if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) { in gem_restart_nic()
1607 dp->name, __func__); in gem_restart_nic()
1611 if (gem_mac_init(dp) != GEM_SUCCESS) { in gem_restart_nic()
1616 if (dp->mii_state == MII_STATE_LINKUP) { in gem_restart_nic()
1617 if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) { in gem_restart_nic()
1623 dp->rxmode |= RXMODE_ENABLE; in gem_restart_nic()
1624 if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) { in gem_restart_nic()
1635 if (dp->mii_state == MII_STATE_LINKUP) { in gem_restart_nic()
1637 ASSERT(!dp->mac_active); in gem_restart_nic()
1638 (void) gem_mac_start(dp); in gem_restart_nic()
1647 gem_tx_timeout(struct gem_dev *dp) in gem_tx_timeout() argument
1653 mutex_enter(&dp->intrlock); in gem_tx_timeout()
1658 mutex_enter(&dp->xmitlock); in gem_tx_timeout()
1659 if (!dp->mac_active || dp->mii_state != MII_STATE_LINKUP) { in gem_tx_timeout()
1660 mutex_exit(&dp->xmitlock); in gem_tx_timeout()
1663 mutex_exit(&dp->xmitlock); in gem_tx_timeout()
1666 if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) { in gem_tx_timeout()
1668 (void) gem_restart_nic(dp, 0); in gem_tx_timeout()
1670 dp->tx_blocked = (clock_t)0; in gem_tx_timeout()
1675 mutex_enter(&dp->xmitlock); in gem_tx_timeout()
1677 if (dp->tx_active_head == dp->tx_active_tail) { in gem_tx_timeout()
1679 if (dp->tx_blocked && in gem_tx_timeout()
1680 now - dp->tx_blocked > dp->gc.gc_tx_timeout_interval) { in gem_tx_timeout()
1681 gem_dump_txbuf(dp, CE_WARN, in gem_tx_timeout()
1684 dp->tx_blocked = (clock_t)0; in gem_tx_timeout()
1686 mutex_exit(&dp->xmitlock); in gem_tx_timeout()
1690 tbp = GET_TXBUF(dp, dp->tx_active_head); in gem_tx_timeout()
1691 if (now - tbp->txb_stime < dp->gc.gc_tx_timeout) { in gem_tx_timeout()
1692 mutex_exit(&dp->xmitlock); in gem_tx_timeout()
1695 mutex_exit(&dp->xmitlock); in gem_tx_timeout()
1697 gem_dump_txbuf(dp, CE_WARN, "gem_tx_timeout: tx timeout"); in gem_tx_timeout()
1700 (void) gem_restart_nic(dp, GEM_RESTART_NOWAIT); in gem_tx_timeout()
1702 dp->tx_blocked = (clock_t)0; in gem_tx_timeout()
1705 mutex_exit(&dp->intrlock); in gem_tx_timeout()
1709 mac_tx_update(dp->mh); in gem_tx_timeout()
1714 dp->name, BOOLEAN(dp->tx_blocked), in gem_tx_timeout()
1715 dp->tx_active_head, dp->tx_active_tail, dp->tx_desc_intr)); in gem_tx_timeout()
1716 dp->timeout_id = in gem_tx_timeout()
1718 (void *)dp, dp->gc.gc_tx_timeout_interval); in gem_tx_timeout()
1728 gem_append_rxbuf(struct gem_dev *dp, struct rxbuf *rbp_head) in gem_append_rxbuf() argument
1732 int rx_ring_size = dp->gc.gc_rx_ring_size; in gem_append_rxbuf()
1735 ASSERT(mutex_owned(&dp->intrlock)); in gem_append_rxbuf()
1738 dp->name, __func__, dp->rx_active_head, dp->rx_active_tail)); in gem_append_rxbuf()
1743 if (dp->rx_buf_head == NULL) { in gem_append_rxbuf()
1744 dp->rx_buf_head = rbp_head; in gem_append_rxbuf()
1745 ASSERT(dp->rx_buf_tail == NULL); in gem_append_rxbuf()
1747 dp->rx_buf_tail->rxb_next = rbp_head; in gem_append_rxbuf()
1750 tail = dp->rx_active_tail; in gem_append_rxbuf()
1753 dp->rx_buf_tail = rbp; in gem_append_rxbuf()
1755 dp->gc.gc_rx_desc_write(dp, in gem_append_rxbuf()
1760 dp->rx_active_tail = tail = tail + 1; in gem_append_rxbuf()
1766 gem_get_packet_default(struct gem_dev *dp, struct rxbuf *rbp, size_t len) in gem_get_packet_default() argument
1768 int rx_header_len = dp->gc.gc_rx_header_len; in gem_get_packet_default()
1799 gem_receive(struct gem_dev *dp) in gem_receive() argument
1810 int rx_ring_size = dp->gc.gc_rx_ring_size; in gem_receive()
1812 uint64_t (*rx_desc_stat)(struct gem_dev *dp, in gem_receive()
1815 int ethermax = dp->mtu + sizeof (struct ether_header); in gem_receive()
1816 int rx_header_len = dp->gc.gc_rx_header_len; in gem_receive()
1818 ASSERT(mutex_owned(&dp->intrlock)); in gem_receive()
1821 dp->name, dp->rx_buf_head)); in gem_receive()
1823 rx_desc_stat = dp->gc.gc_rx_desc_stat; in gem_receive()
1826 for (active_head = dp->rx_active_head; in gem_receive()
1827 (rbp = dp->rx_buf_head) != NULL; active_head++) { in gem_receive()
1830 cnt = max(dp->poll_pkt_delay*2, 10); in gem_receive()
1832 dp->rx_active_tail - active_head); in gem_receive()
1833 gem_rx_desc_dma_sync(dp, in gem_receive()
1844 if (((rxstat = (*rx_desc_stat)(dp, in gem_receive()
1853 dp->rx_buf_head = rbp->rxb_next; in gem_receive()
1863 dp->name, __func__, rxstat, len)); in gem_receive()
1868 if ((mp = dp->gc.gc_get_packet(dp, rbp, len)) == NULL) { in gem_receive()
1870 dp->stats.norcvbuf++; in gem_receive()
1878 ethermax = dp->mtu + sizeof (struct ether_header); in gem_receive()
1885 dp->stats.errrcv++; in gem_receive()
1886 dp->stats.runt++; in gem_receive()
1892 dp->stats.errrcv++; in gem_receive()
1893 dp->stats.frame_too_long++; in gem_receive()
1902 gem_dump_packet(dp, (char *)__func__, mp, B_TRUE); in gem_receive()
1913 dp->stats.rbcast++; in gem_receive()
1915 dp->stats.rmcast++; in gem_receive()
1927 if ((cnt = active_head - dp->rx_active_head) > 0) { in gem_receive()
1928 dp->stats.rbytes += len_total; in gem_receive()
1929 dp->stats.rpackets += cnt; in gem_receive()
1931 dp->rx_active_head = active_head; in gem_receive()
1937 if (dp->rx_buf_head == NULL) { in gem_receive()
1938 dp->rx_buf_tail = NULL; in gem_receive()
1942 dp->name, __func__, cnt, rx_head)); in gem_receive()
1951 head = dp->rx_active_tail; in gem_receive()
1952 gem_append_rxbuf(dp, newbufs); in gem_receive()
1955 dp->gc.gc_rx_start(dp, in gem_receive()
1956 SLOT(head, rx_ring_size), dp->rx_active_tail - head); in gem_receive()
1963 mutex_exit(&dp->intrlock); in gem_receive()
1964 mac_rx(dp->mh, NULL, rx_head); in gem_receive()
1965 mutex_enter(&dp->intrlock); in gem_receive()
1975 gem_tx_done(struct gem_dev *dp) in gem_tx_done() argument
1979 if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) { in gem_tx_done()
1980 (void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF); in gem_tx_done()
1982 dp->name, dp->tx_active_head, dp->tx_active_tail)); in gem_tx_done()
1987 mutex_enter(&dp->xmitlock); in gem_tx_done()
1990 ASSERT(dp->tx_softq_head == dp->tx_softq_tail); in gem_tx_done()
1995 ASSERT(dp->tx_desc_intr - dp->tx_desc_head >= 0); in gem_tx_done()
1996 if (dp->tx_blocked && dp->tx_desc_intr == dp->tx_desc_head) { in gem_tx_done()
2003 dp->tx_blocked = (clock_t)0; in gem_tx_done()
2004 dp->tx_max_packets = in gem_tx_done()
2005 min(dp->tx_max_packets + 2, dp->gc.gc_tx_buf_limit); in gem_tx_done()
2008 mutex_exit(&dp->xmitlock); in gem_tx_done()
2011 dp->name, __func__, BOOLEAN(dp->tx_blocked))); in gem_tx_done()
2017 gem_intr(struct gem_dev *dp) in gem_intr() argument
2021 mutex_enter(&dp->intrlock); in gem_intr()
2022 if (dp->mac_suspended) { in gem_intr()
2023 mutex_exit(&dp->intrlock); in gem_intr()
2026 dp->intr_busy = B_TRUE; in gem_intr()
2028 ret = (*dp->gc.gc_interrupt)(dp); in gem_intr()
2031 dp->intr_busy = B_FALSE; in gem_intr()
2032 mutex_exit(&dp->intrlock); in gem_intr()
2036 if (!dp->mac_active) { in gem_intr()
2037 cv_broadcast(&dp->tx_drain_cv); in gem_intr()
2041 dp->stats.intr++; in gem_intr()
2042 dp->intr_busy = B_FALSE; in gem_intr()
2044 mutex_exit(&dp->intrlock); in gem_intr()
2047 DPRINTF(4, (CE_CONT, "!%s: calling mac_tx_update", dp->name)); in gem_intr()
2048 mac_tx_update(dp->mh); in gem_intr()
2055 gem_intr_watcher(struct gem_dev *dp) in gem_intr_watcher() argument
2057 (void) gem_intr(dp); in gem_intr_watcher()
2060 dp->intr_watcher_id = in gem_intr_watcher()
2061 timeout((void (*)(void *))gem_intr_watcher, (void *)dp, 1); in gem_intr_watcher()
2070 gem_choose_forcedmode(struct gem_dev *dp) in gem_choose_forcedmode() argument
2073 if (dp->anadv_1000fdx || dp->anadv_1000hdx) { in gem_choose_forcedmode()
2074 dp->speed = GEM_SPD_1000; in gem_choose_forcedmode()
2075 dp->full_duplex = dp->anadv_1000fdx; in gem_choose_forcedmode()
2076 } else if (dp->anadv_100fdx || dp->anadv_100t4) { in gem_choose_forcedmode()
2077 dp->speed = GEM_SPD_100; in gem_choose_forcedmode()
2078 dp->full_duplex = B_TRUE; in gem_choose_forcedmode()
2079 } else if (dp->anadv_100hdx) { in gem_choose_forcedmode()
2080 dp->speed = GEM_SPD_100; in gem_choose_forcedmode()
2081 dp->full_duplex = B_FALSE; in gem_choose_forcedmode()
2083 dp->speed = GEM_SPD_10; in gem_choose_forcedmode()
2084 dp->full_duplex = dp->anadv_10fdx; in gem_choose_forcedmode()
2089 gem_mii_read(struct gem_dev *dp, uint_t reg) in gem_mii_read() argument
2091 if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) { in gem_mii_read()
2092 (*dp->gc.gc_mii_sync)(dp); in gem_mii_read()
2094 return ((*dp->gc.gc_mii_read)(dp, reg)); in gem_mii_read()
2098 gem_mii_write(struct gem_dev *dp, uint_t reg, uint16_t val) in gem_mii_write() argument
2100 if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) { in gem_mii_write()
2101 (*dp->gc.gc_mii_sync)(dp); in gem_mii_write()
2103 (*dp->gc.gc_mii_write)(dp, reg, val); in gem_mii_write()
2111 gem_mii_config_default(struct gem_dev *dp) in gem_mii_config_default() argument
2122 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_mii_config_default()
2127 mii_stat = dp->mii_status; in gem_mii_config_default()
2130 dp->name, __func__, mii_stat, MII_STATUS_BITS)); in gem_mii_config_default()
2135 dp->name, mii_stat, MII_STATUS_BITS); in gem_mii_config_default()
2140 val = gem_mii_read(dp, MII_AN_ADVERT) & ~MII_ABILITY_ALL; in gem_mii_config_default()
2144 dp->name, __func__, in gem_mii_config_default()
2145 dp->anadv_100t4, dp->anadv_100fdx, dp->anadv_100hdx, in gem_mii_config_default()
2146 dp->anadv_10fdx, dp->anadv_10hdx)); in gem_mii_config_default()
2148 if (dp->anadv_100t4) { in gem_mii_config_default()
2151 if (dp->anadv_100fdx) { in gem_mii_config_default()
2154 if (dp->anadv_100hdx) { in gem_mii_config_default()
2157 if (dp->anadv_10fdx) { in gem_mii_config_default()
2160 if (dp->anadv_10hdx) { in gem_mii_config_default()
2165 val |= fc_cap_encode[dp->anadv_flow_control]; in gem_mii_config_default()
2169 dp->name, __func__, val, MII_ABILITY_BITS, dp->gc.gc_mii_mode, in gem_mii_config_default()
2170 dp->anadv_flow_control)); in gem_mii_config_default()
2172 gem_mii_write(dp, MII_AN_ADVERT, val); in gem_mii_config_default()
2178 if (!dp->anadv_autoneg) { in gem_mii_config_default()
2183 if (dp->anadv_1000fdx) { in gem_mii_config_default()
2186 if (dp->anadv_1000hdx) { in gem_mii_config_default()
2192 dp->name, __func__, val, MII_1000TC_BITS)); in gem_mii_config_default()
2194 gem_mii_write(dp, MII_1000TC, val); in gem_mii_config_default()
2200 #define GEM_LINKUP(dp) mac_link_update((dp)->mh, LINK_STATE_UP) argument
2201 #define GEM_LINKDOWN(dp) mac_link_update((dp)->mh, LINK_STATE_DOWN) argument
2235 gem_mii_link_check(struct gem_dev *dp) in gem_mii_link_check() argument
2252 old_mii_state = dp->mii_state; in gem_mii_link_check()
2255 dp->name, __func__, now, dp->mii_state)); in gem_mii_link_check()
2257 diff = now - dp->mii_last_check; in gem_mii_link_check()
2258 dp->mii_last_check = now; in gem_mii_link_check()
2264 if (dp->linkup_delay > 0) { in gem_mii_link_check()
2265 if (dp->linkup_delay > diff) { in gem_mii_link_check()
2266 dp->linkup_delay -= diff; in gem_mii_link_check()
2269 dp->linkup_delay = -1; in gem_mii_link_check()
2274 switch (dp->mii_state) { in gem_mii_link_check()
2277 (*dp->gc.gc_mii_sync)(dp); in gem_mii_link_check()
2281 dp->mii_timer -= diff; in gem_mii_link_check()
2282 if (dp->mii_timer > 0) { in gem_mii_link_check()
2284 dp->mii_interval = WATCH_INTERVAL_FAST; in gem_mii_link_check()
2290 if (dp->mii_status & MII_STATUS_MFPRMBLSUPR) { in gem_mii_link_check()
2292 (*dp->gc.gc_mii_sync)(dp); in gem_mii_link_check()
2294 val = gem_mii_read(dp, MII_CONTROL); in gem_mii_link_check()
2299 dp->name, ddi_get_lbolt(), in gem_mii_link_check()
2305 gem_mii_write(dp, MII_CONTROL, 0); in gem_mii_link_check()
2308 if ((*dp->gc.gc_mii_config)(dp) != GEM_SUCCESS) { in gem_mii_link_check()
2314 gem_choose_forcedmode(dp); in gem_mii_link_check()
2316 dp->mii_lpable = 0; in gem_mii_link_check()
2317 dp->mii_advert = 0; in gem_mii_link_check()
2318 dp->mii_exp = 0; in gem_mii_link_check()
2319 dp->mii_ctl1000 = 0; in gem_mii_link_check()
2320 dp->mii_stat1000 = 0; in gem_mii_link_check()
2321 dp->flow_control = FLOW_CONTROL_NONE; in gem_mii_link_check()
2323 if (!dp->anadv_autoneg) { in gem_mii_link_check()
2325 dp->mii_state = MII_STATE_MEDIA_SETUP; in gem_mii_link_check()
2326 dp->mii_timer = 0; in gem_mii_link_check()
2327 dp->mii_interval = 0; in gem_mii_link_check()
2338 dp->mii_timer -= diff; in gem_mii_link_check()
2339 if (dp->mii_timer - in gem_mii_link_check()
2340 (dp->gc.gc_mii_an_timeout in gem_mii_link_check()
2341 - dp->gc.gc_mii_an_wait) > 0) { in gem_mii_link_check()
2346 dp->mii_interval = WATCH_INTERVAL_FAST; in gem_mii_link_check()
2351 status = gem_mii_read(dp, MII_STATUS); in gem_mii_link_check()
2354 dp->name, __func__, dp->mii_state, in gem_mii_link_check()
2364 dp->name); in gem_mii_link_check()
2369 if (dp->mii_timer <= 0) { in gem_mii_link_check()
2374 if (!dp->mii_supress_msg) { in gem_mii_link_check()
2377 dp->name); in gem_mii_link_check()
2378 dp->mii_supress_msg = B_TRUE; in gem_mii_link_check()
2385 dp->mii_interval = dp->gc.gc_mii_an_watch_interval; in gem_mii_link_check()
2393 dp->mii_supress_msg = B_FALSE; in gem_mii_link_check()
2394 dp->mii_state = MII_STATE_AN_DONE; in gem_mii_link_check()
2397 dp->name, status, MII_STATUS_BITS)); in gem_mii_link_check()
2399 if (dp->gc.gc_mii_an_delay > 0) { in gem_mii_link_check()
2400 dp->mii_timer = dp->gc.gc_mii_an_delay; in gem_mii_link_check()
2401 dp->mii_interval = drv_usectohz(20*1000); in gem_mii_link_check()
2405 dp->mii_timer = 0; in gem_mii_link_check()
2413 dp->mii_timer -= diff; in gem_mii_link_check()
2414 if (dp->mii_timer > 0) { in gem_mii_link_check()
2416 dp->mii_interval = WATCH_INTERVAL_FAST; in gem_mii_link_check()
2428 if (dp->gc.gc_mii_an_delay > 0) { in gem_mii_link_check()
2433 status = gem_mii_read(dp, MII_STATUS); in gem_mii_link_check()
2435 advert = gem_mii_read(dp, MII_AN_ADVERT); in gem_mii_link_check()
2436 lpable = gem_mii_read(dp, MII_AN_LPABLE); in gem_mii_link_check()
2437 exp = gem_mii_read(dp, MII_AN_EXPANSION); in gem_mii_link_check()
2444 if (dp->mii_status & MII_STATUS_XSTATUS) { in gem_mii_link_check()
2445 ctl1000 = gem_mii_read(dp, MII_1000TC); in gem_mii_link_check()
2446 stat1000 = gem_mii_read(dp, MII_1000TS); in gem_mii_link_check()
2448 dp->mii_lpable = lpable; in gem_mii_link_check()
2449 dp->mii_advert = advert; in gem_mii_link_check()
2450 dp->mii_exp = exp; in gem_mii_link_check()
2451 dp->mii_ctl1000 = ctl1000; in gem_mii_link_check()
2452 dp->mii_stat1000 = stat1000; in gem_mii_link_check()
2456 dp->name, in gem_mii_link_check()
2461 if (dp->mii_status & MII_STATUS_XSTATUS) { in gem_mii_link_check()
2475 dp->name); in gem_mii_link_check()
2499 cmn_err(CE_WARN, "!%s: wrong lpable.", dp->name); in gem_mii_link_check()
2509 dp->speed = GEM_SPD_1000; in gem_mii_link_check()
2510 dp->full_duplex = B_TRUE; in gem_mii_link_check()
2514 dp->speed = GEM_SPD_1000; in gem_mii_link_check()
2515 dp->full_duplex = B_FALSE; in gem_mii_link_check()
2518 dp->speed = GEM_SPD_100; in gem_mii_link_check()
2519 dp->full_duplex = B_TRUE; in gem_mii_link_check()
2522 dp->speed = GEM_SPD_100; in gem_mii_link_check()
2523 dp->full_duplex = B_TRUE; in gem_mii_link_check()
2526 dp->speed = GEM_SPD_100; in gem_mii_link_check()
2527 dp->full_duplex = B_FALSE; in gem_mii_link_check()
2530 dp->speed = GEM_SPD_10; in gem_mii_link_check()
2531 dp->full_duplex = B_TRUE; in gem_mii_link_check()
2534 dp->speed = GEM_SPD_10; in gem_mii_link_check()
2535 dp->full_duplex = B_FALSE; in gem_mii_link_check()
2543 val = gem_mii_read(dp, MII_CONTROL); in gem_mii_link_check()
2546 dp->speed = (val & MII_CONTROL_100MB) ? in gem_mii_link_check()
2548 dp->full_duplex = dp->speed != GEM_SPD_10; in gem_mii_link_check()
2556 dp->name, in gem_mii_link_check()
2560 gem_speed_value[dp->speed], in gem_mii_link_check()
2561 dp->full_duplex ? "full" : "half"); in gem_mii_link_check()
2564 if (dp->full_duplex) { in gem_mii_link_check()
2565 dp->flow_control = in gem_mii_link_check()
2569 dp->flow_control = FLOW_CONTROL_NONE; in gem_mii_link_check()
2571 dp->mii_state = MII_STATE_MEDIA_SETUP; in gem_mii_link_check()
2575 dp->mii_state = MII_STATE_LINKDOWN; in gem_mii_link_check()
2576 dp->mii_timer = dp->gc.gc_mii_linkdown_timeout; in gem_mii_link_check()
2577 DPRINTF(2, (CE_CONT, "!%s: setup midia mode done", dp->name)); in gem_mii_link_check()
2578 dp->mii_supress_msg = B_FALSE; in gem_mii_link_check()
2581 dp->mii_interval = WATCH_INTERVAL_FAST; in gem_mii_link_check()
2583 if ((!dp->anadv_autoneg) || in gem_mii_link_check()
2584 dp->gc.gc_mii_an_oneshot || fix_phy) { in gem_mii_link_check()
2589 val = gem_mii_read(dp, MII_CONTROL); in gem_mii_link_check()
2593 if (dp->full_duplex) { in gem_mii_link_check()
2597 switch (dp->speed) { in gem_mii_link_check()
2608 dp->name, dp->speed); in gem_mii_link_check()
2615 if (dp->mii_status & MII_STATUS_XSTATUS) { in gem_mii_link_check()
2616 gem_mii_write(dp, in gem_mii_link_check()
2619 gem_mii_write(dp, MII_CONTROL, val); in gem_mii_link_check()
2622 if (dp->nic_state >= NIC_STATE_INITIALIZED) { in gem_mii_link_check()
2624 (*dp->gc.gc_set_media)(dp); in gem_mii_link_check()
2627 if ((void *)dp->gc.gc_mii_tune_phy) { in gem_mii_link_check()
2630 (*dp->gc.gc_mii_tune_phy)(dp); in gem_mii_link_check()
2636 status = gem_mii_read(dp, MII_STATUS); in gem_mii_link_check()
2641 dp->mii_state = MII_STATE_LINKUP; in gem_mii_link_check()
2642 dp->mii_supress_msg = B_FALSE; in gem_mii_link_check()
2646 dp->name, status, MII_STATUS_BITS)); in gem_mii_link_check()
2654 dp->name, in gem_mii_link_check()
2655 gem_speed_value[dp->speed], in gem_mii_link_check()
2656 dp->full_duplex ? "full" : "half", in gem_mii_link_check()
2657 gem_fc_type[dp->flow_control]); in gem_mii_link_check()
2659 dp->mii_interval = dp->gc.gc_mii_link_watch_interval; in gem_mii_link_check()
2662 if (dp->gc.gc_mii_hw_link_detection && in gem_mii_link_check()
2663 dp->nic_state == NIC_STATE_ONLINE) { in gem_mii_link_check()
2664 dp->mii_interval = 0; in gem_mii_link_check()
2667 if (dp->nic_state == NIC_STATE_ONLINE) { in gem_mii_link_check()
2668 if (!dp->mac_active) { in gem_mii_link_check()
2669 (void) gem_mac_start(dp); in gem_mii_link_check()
2676 dp->mii_supress_msg = B_TRUE; in gem_mii_link_check()
2677 if (dp->anadv_autoneg) { in gem_mii_link_check()
2678 dp->mii_timer -= diff; in gem_mii_link_check()
2679 if (dp->mii_timer <= 0) { in gem_mii_link_check()
2685 dp->gc.gc_mii_linkdown_timeout_action; in gem_mii_link_check()
2693 status = gem_mii_read(dp, MII_STATUS); in gem_mii_link_check()
2700 dp->name, status, MII_STATUS_BITS); in gem_mii_link_check()
2702 if (dp->nic_state == NIC_STATE_ONLINE && in gem_mii_link_check()
2703 dp->mac_active && in gem_mii_link_check()
2704 dp->gc.gc_mii_stop_mac_on_linkdown) { in gem_mii_link_check()
2705 (void) gem_mac_stop(dp, 0); in gem_mii_link_check()
2707 if (dp->tx_blocked) { in gem_mii_link_check()
2713 if (dp->anadv_autoneg) { in gem_mii_link_check()
2715 linkdown_action = dp->gc.gc_mii_linkdown_action; in gem_mii_link_check()
2719 dp->mii_state = MII_STATE_LINKDOWN; in gem_mii_link_check()
2720 dp->mii_timer = dp->gc.gc_mii_linkdown_timeout; in gem_mii_link_check()
2722 if ((void *)dp->gc.gc_mii_tune_phy) { in gem_mii_link_check()
2724 (*dp->gc.gc_mii_tune_phy)(dp); in gem_mii_link_check()
2726 dp->mii_interval = dp->gc.gc_mii_link_watch_interval; in gem_mii_link_check()
2731 if (dp->gc.gc_mii_hw_link_detection && in gem_mii_link_check()
2732 dp->nic_state == NIC_STATE_ONLINE) { in gem_mii_link_check()
2733 dp->mii_interval = 0; in gem_mii_link_check()
2738 dp->mii_interval = dp->gc.gc_mii_link_watch_interval; in gem_mii_link_check()
2746 if (!dp->mii_supress_msg) { in gem_mii_link_check()
2747 cmn_err(CE_CONT, "!%s: resetting PHY", dp->name); in gem_mii_link_check()
2749 dp->mii_supress_msg = B_TRUE; in gem_mii_link_check()
2753 dp->mii_supress_msg = B_TRUE; in gem_mii_link_check()
2754 if (dp->gc.gc_mii_an_oneshot) { in gem_mii_link_check()
2758 dp->mii_state = MII_STATE_AUTONEGOTIATING; in gem_mii_link_check()
2759 dp->mii_timer = dp->gc.gc_mii_an_timeout; in gem_mii_link_check()
2760 dp->mii_interval = dp->gc.gc_mii_an_watch_interval; in gem_mii_link_check()
2764 if (!dp->mii_supress_msg) { in gem_mii_link_check()
2766 dp->name); in gem_mii_link_check()
2768 dp->mii_supress_msg = B_TRUE; in gem_mii_link_check()
2773 dp->name, dp->gc.gc_mii_linkdown_action); in gem_mii_link_check()
2774 dp->mii_supress_msg = B_TRUE; in gem_mii_link_check()
2779 if (!dp->mii_supress_msg) { in gem_mii_link_check()
2780 cmn_err(CE_CONT, "!%s: resetting PHY", dp->name); in gem_mii_link_check()
2782 dp->mii_state = MII_STATE_RESETTING; in gem_mii_link_check()
2783 dp->mii_timer = dp->gc.gc_mii_reset_timeout; in gem_mii_link_check()
2784 if (!dp->gc.gc_mii_dont_reset) { in gem_mii_link_check()
2785 gem_mii_write(dp, MII_CONTROL, MII_CONTROL_RESET); in gem_mii_link_check()
2787 dp->mii_interval = WATCH_INTERVAL_FAST; in gem_mii_link_check()
2791 if (!dp->mii_supress_msg) { in gem_mii_link_check()
2792 cmn_err(CE_CONT, "!%s: auto-negotiation started", dp->name); in gem_mii_link_check()
2794 dp->mii_state = MII_STATE_AUTONEGOTIATING; in gem_mii_link_check()
2795 dp->mii_timer = dp->gc.gc_mii_an_timeout; in gem_mii_link_check()
2798 val = gem_mii_read(dp, MII_CONTROL) & in gem_mii_link_check()
2801 gem_mii_write(dp, MII_CONTROL, in gem_mii_link_check()
2804 dp->mii_interval = dp->gc.gc_mii_an_watch_interval; in gem_mii_link_check()
2807 if (dp->link_watcher_id == 0 && dp->mii_interval) { in gem_mii_link_check()
2809 dp->link_watcher_id = in gem_mii_link_check()
2811 (void *)dp, dp->mii_interval); in gem_mii_link_check()
2814 if (old_mii_state != dp->mii_state) { in gem_mii_link_check()
2816 if (dp->mii_state == MII_STATE_LINKUP) { in gem_mii_link_check()
2817 dp->linkup_delay = 0; in gem_mii_link_check()
2818 GEM_LINKUP(dp); in gem_mii_link_check()
2819 } else if (dp->linkup_delay <= 0) { in gem_mii_link_check()
2820 GEM_LINKDOWN(dp); in gem_mii_link_check()
2822 } else if (dp->linkup_delay < 0) { in gem_mii_link_check()
2824 dp->linkup_delay = 0; in gem_mii_link_check()
2825 GEM_LINKDOWN(dp); in gem_mii_link_check()
2832 gem_mii_link_watcher(struct gem_dev *dp) in gem_mii_link_watcher() argument
2836 mutex_enter(&dp->intrlock); in gem_mii_link_watcher()
2838 dp->link_watcher_id = 0; in gem_mii_link_watcher()
2839 tx_sched = gem_mii_link_check(dp); in gem_mii_link_watcher()
2841 if (dp->link_watcher_id == 0) { in gem_mii_link_watcher()
2842 cmn_err(CE_CONT, "%s: link watcher stopped", dp->name); in gem_mii_link_watcher()
2845 mutex_exit(&dp->intrlock); in gem_mii_link_watcher()
2849 mac_tx_update(dp->mh); in gem_mii_link_watcher()
2854 gem_mii_probe_default(struct gem_dev *dp) in gem_mii_probe_default() argument
2861 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_mii_probe_default()
2867 dp->mii_status = 0; in gem_mii_probe_default()
2870 if (dp->mii_phy_addr) { in gem_mii_probe_default()
2871 status = gem_mii_read(dp, MII_STATUS); in gem_mii_probe_default()
2873 gem_mii_write(dp, MII_CONTROL, 0); in gem_mii_probe_default()
2877 if (dp->mii_phy_addr < 0) { in gem_mii_probe_default()
2880 dp->name); in gem_mii_probe_default()
2886 dp->name, dp->mii_phy_addr); in gem_mii_probe_default()
2890 for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) { in gem_mii_probe_default()
2891 dp->mii_phy_addr = phy; in gem_mii_probe_default()
2892 status = gem_mii_read(dp, MII_STATUS); in gem_mii_probe_default()
2895 gem_mii_write(dp, MII_CONTROL, 0); in gem_mii_probe_default()
2900 for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) { in gem_mii_probe_default()
2901 dp->mii_phy_addr = phy; in gem_mii_probe_default()
2902 gem_mii_write(dp, MII_CONTROL, 0); in gem_mii_probe_default()
2903 status = gem_mii_read(dp, MII_STATUS); in gem_mii_probe_default()
2910 cmn_err(CE_NOTE, "!%s: no MII PHY found", dp->name); in gem_mii_probe_default()
2911 dp->mii_phy_addr = -1; in gem_mii_probe_default()
2916 dp->mii_status = status; in gem_mii_probe_default()
2917 dp->mii_phy_id = (gem_mii_read(dp, MII_PHYIDH) << 16) | in gem_mii_probe_default()
2918 gem_mii_read(dp, MII_PHYIDL); in gem_mii_probe_default()
2920 if (dp->mii_phy_addr < 0) { in gem_mii_probe_default()
2922 dp->name, dp->mii_phy_id); in gem_mii_probe_default()
2925 dp->name, dp->mii_phy_id, dp->mii_phy_addr); in gem_mii_probe_default()
2929 dp->name, in gem_mii_probe_default()
2930 gem_mii_read(dp, MII_CONTROL), MII_CONTROL_BITS, in gem_mii_probe_default()
2932 gem_mii_read(dp, MII_AN_ADVERT), MII_ABILITY_BITS, in gem_mii_probe_default()
2933 gem_mii_read(dp, MII_AN_LPABLE), MII_ABILITY_BITS); in gem_mii_probe_default()
2935 dp->mii_xstatus = 0; in gem_mii_probe_default()
2937 dp->mii_xstatus = gem_mii_read(dp, MII_XSTATUS); in gem_mii_probe_default()
2940 dp->name, dp->mii_xstatus, MII_XSTATUS_BITS); in gem_mii_probe_default()
2944 adv_org = gem_mii_read(dp, MII_AN_ADVERT); in gem_mii_probe_default()
2946 gem_mii_write(dp, MII_AN_ADVERT, in gem_mii_probe_default()
2949 adv = gem_mii_read(dp, MII_AN_ADVERT); in gem_mii_probe_default()
2952 dp->gc.gc_flow_control &= ~1; in gem_mii_probe_default()
2956 dp->gc.gc_flow_control &= ~2; in gem_mii_probe_default()
2959 gem_mii_write(dp, MII_AN_ADVERT, adv_org); in gem_mii_probe_default()
2965 gem_mii_start(struct gem_dev *dp) in gem_mii_start() argument
2967 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_mii_start()
2970 dp->mii_state = MII_STATE_UNKNOWN; in gem_mii_start()
2971 dp->mii_last_check = ddi_get_lbolt(); in gem_mii_start()
2972 dp->linkup_delay = dp->gc.gc_mii_linkdown_timeout; in gem_mii_start()
2973 (void) gem_mii_link_watcher(dp); in gem_mii_start()
2977 gem_mii_stop(struct gem_dev *dp) in gem_mii_stop() argument
2979 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_mii_stop()
2982 mutex_enter(&dp->intrlock); in gem_mii_stop()
2983 if (dp->link_watcher_id) { in gem_mii_stop()
2984 while (untimeout(dp->link_watcher_id) == -1) in gem_mii_stop()
2986 dp->link_watcher_id = 0; in gem_mii_stop()
2988 mutex_exit(&dp->intrlock); in gem_mii_stop()
2992 gem_get_mac_addr_conf(struct gem_dev *dp) in gem_get_mac_addr_conf() argument
3005 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_get_mac_addr_conf()
3010 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dp->dip, in gem_get_mac_addr_conf()
3057 dp->dev_addr.ether_addr_octet[i] = mac[i]; in gem_get_mac_addr_conf()
3065 dp->name, valstr); in gem_get_mac_addr_conf()
3079 gem_mac_set_rx_filter(struct gem_dev *dp) in gem_mac_set_rx_filter() argument
3081 return ((*dp->gc.gc_set_rx_filter)(dp)); in gem_mac_set_rx_filter()
3088 gem_mac_init(struct gem_dev *dp) in gem_mac_init() argument
3090 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_mac_init()
3092 if (dp->mac_suspended) { in gem_mac_init()
3096 dp->mac_active = B_FALSE; in gem_mac_init()
3098 gem_init_rx_ring(dp); in gem_mac_init()
3099 gem_init_tx_ring(dp); in gem_mac_init()
3102 dp->tx_blocked = (clock_t)0; in gem_mac_init()
3103 dp->tx_busy = 0; in gem_mac_init()
3104 dp->tx_reclaim_busy = 0; in gem_mac_init()
3105 dp->tx_max_packets = dp->gc.gc_tx_buf_limit; in gem_mac_init()
3107 if ((*dp->gc.gc_init_chip)(dp) != GEM_SUCCESS) { in gem_mac_init()
3111 gem_prepare_rx_buf(dp); in gem_mac_init()
3119 gem_mac_start(struct gem_dev *dp) in gem_mac_start() argument
3121 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_mac_start()
3123 ASSERT(mutex_owned(&dp->intrlock)); in gem_mac_start()
3124 ASSERT(dp->nic_state == NIC_STATE_ONLINE); in gem_mac_start()
3125 ASSERT(dp->mii_state == MII_STATE_LINKUP); in gem_mac_start()
3128 mutex_enter(&dp->xmitlock); in gem_mac_start()
3129 if (dp->mac_suspended) { in gem_mac_start()
3130 mutex_exit(&dp->xmitlock); in gem_mac_start()
3133 dp->mac_active = B_TRUE; in gem_mac_start()
3134 mutex_exit(&dp->xmitlock); in gem_mac_start()
3137 (*dp->gc.gc_rx_start)(dp, in gem_mac_start()
3138 SLOT(dp->rx_active_head, dp->gc.gc_rx_ring_size), in gem_mac_start()
3139 dp->rx_active_tail - dp->rx_active_head); in gem_mac_start()
3141 if ((*dp->gc.gc_start_chip)(dp) != GEM_SUCCESS) { in gem_mac_start()
3143 dp->name, __func__); in gem_mac_start()
3147 mutex_enter(&dp->xmitlock); in gem_mac_start()
3150 ASSERT(dp->tx_softq_tail - dp->tx_softq_head >= 0); in gem_mac_start()
3151 if (dp->tx_softq_tail - dp->tx_softq_head > 0) { in gem_mac_start()
3152 gem_tx_load_descs_oo(dp, in gem_mac_start()
3153 dp->tx_softq_head, dp->tx_softq_tail, in gem_mac_start()
3156 gem_tx_start_unit(dp); in gem_mac_start()
3159 mutex_exit(&dp->xmitlock); in gem_mac_start()
3165 gem_mac_stop(struct gem_dev *dp, uint_t flags) in gem_mac_stop() argument
3175 dp->name, __func__, dp->rx_buf_freecnt)); in gem_mac_stop()
3177 ASSERT(mutex_owned(&dp->intrlock)); in gem_mac_stop()
3178 ASSERT(!mutex_owned(&dp->xmitlock)); in gem_mac_stop()
3183 mutex_enter(&dp->xmitlock); in gem_mac_stop()
3184 if (dp->mac_suspended) { in gem_mac_stop()
3185 mutex_exit(&dp->xmitlock); in gem_mac_stop()
3188 dp->mac_active = B_FALSE; in gem_mac_stop()
3190 while (dp->tx_busy > 0) { in gem_mac_stop()
3191 cv_wait(&dp->tx_drain_cv, &dp->xmitlock); in gem_mac_stop()
3193 mutex_exit(&dp->xmitlock); in gem_mac_stop()
3200 2 * (8 * MAXPKTBUF(dp) / gem_speed_value[dp->speed]) * in gem_mac_stop()
3201 (dp->tx_active_tail - dp->tx_active_head); in gem_mac_stop()
3204 dp->name, __func__, wait_time)); in gem_mac_stop()
3209 while (dp->tx_active_tail != dp->tx_active_head) { in gem_mac_stop()
3213 dp->name, __func__); in gem_mac_stop()
3216 (void) gem_reclaim_txbuf(dp); in gem_mac_stop()
3222 dp->name, __func__, i, in gem_mac_stop()
3229 if ((*dp->gc.gc_stop_chip)(dp) != GEM_SUCCESS) { in gem_mac_stop()
3231 dp->name, __func__); in gem_mac_stop()
3232 if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) { in gem_mac_stop()
3234 dp->name, __func__); in gem_mac_stop()
3242 (void) gem_receive(dp); in gem_mac_stop()
3244 gem_clean_rx_buf(dp); in gem_mac_stop()
3249 (*dp->gc.gc_get_stats)(dp); in gem_mac_stop()
3254 ASSERT(dp->tx_active_tail == dp->tx_softq_head); in gem_mac_stop()
3255 ASSERT(dp->tx_softq_tail == dp->tx_free_head); in gem_mac_stop()
3258 dp->tx_active_tail = dp->tx_active_head; in gem_mac_stop()
3259 dp->tx_softq_head = dp->tx_active_head; in gem_mac_stop()
3261 gem_clean_tx_buf(dp); in gem_mac_stop()
3268 gem_add_multicast(struct gem_dev *dp, const uint8_t *ep) in gem_add_multicast() argument
3273 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_add_multicast()
3275 mutex_enter(&dp->intrlock); in gem_add_multicast()
3276 if (dp->mac_suspended) { in gem_add_multicast()
3277 mutex_exit(&dp->intrlock); in gem_add_multicast()
3281 if (dp->mc_count_req++ < GEM_MAXMC) { in gem_add_multicast()
3283 cnt = dp->mc_count; in gem_add_multicast()
3284 bcopy(ep, dp->mc_list[cnt].addr.ether_addr_octet, in gem_add_multicast()
3286 if (dp->gc.gc_multicast_hash) { in gem_add_multicast()
3287 dp->mc_list[cnt].hash = in gem_add_multicast()
3288 (*dp->gc.gc_multicast_hash)(dp, (uint8_t *)ep); in gem_add_multicast()
3290 dp->mc_count = cnt + 1; in gem_add_multicast()
3293 if (dp->mc_count_req != dp->mc_count) { in gem_add_multicast()
3295 dp->rxmode |= RXMODE_MULTI_OVF; in gem_add_multicast()
3297 dp->rxmode &= ~RXMODE_MULTI_OVF; in gem_add_multicast()
3301 err = gem_mac_set_rx_filter(dp); in gem_add_multicast()
3303 mutex_exit(&dp->intrlock); in gem_add_multicast()
3309 gem_remove_multicast(struct gem_dev *dp, const uint8_t *ep) in gem_remove_multicast() argument
3316 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_remove_multicast()
3318 mutex_enter(&dp->intrlock); in gem_remove_multicast()
3319 if (dp->mac_suspended) { in gem_remove_multicast()
3320 mutex_exit(&dp->intrlock); in gem_remove_multicast()
3324 dp->mc_count_req--; in gem_remove_multicast()
3325 cnt = dp->mc_count; in gem_remove_multicast()
3327 if (bcmp(ep, &dp->mc_list[i].addr, ETHERADDRL)) { in gem_remove_multicast()
3331 len = (cnt - (i + 1)) * sizeof (*dp->mc_list); in gem_remove_multicast()
3333 bcopy(&dp->mc_list[i+1], &dp->mc_list[i], len); in gem_remove_multicast()
3335 dp->mc_count--; in gem_remove_multicast()
3339 if (dp->mc_count_req != dp->mc_count) { in gem_remove_multicast()
3341 dp->rxmode |= RXMODE_MULTI_OVF; in gem_remove_multicast()
3343 dp->rxmode &= ~RXMODE_MULTI_OVF; in gem_remove_multicast()
3346 err = gem_mac_set_rx_filter(dp); in gem_remove_multicast()
3348 mutex_exit(&dp->intrlock); in gem_remove_multicast()
3419 struct gem_dev *dp; member
3426 struct gem_dev *dp = ((struct gem_nd_arg *)(void *)arg)->dp; in gem_param_get() local
3431 dp->name, __func__, item)); in gem_param_get()
3435 val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG); in gem_param_get()
3440 val = BOOLEAN(dp->gc.gc_flow_control & 1); in gem_param_get()
3444 val = BOOLEAN(dp->gc.gc_flow_control & 2); in gem_param_get()
3448 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) || in gem_param_get()
3449 (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD); in gem_param_get()
3453 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) || in gem_param_get()
3454 (dp->mii_xstatus & MII_XSTATUS_1000BASEX); in gem_param_get()
3458 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4); in gem_param_get()
3462 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD); in gem_param_get()
3466 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX); in gem_param_get()
3470 val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD); in gem_param_get()
3474 val = BOOLEAN(dp->mii_status & MII_STATUS_10); in gem_param_get()
3478 val = dp->anadv_autoneg; in gem_param_get()
3482 val = BOOLEAN(dp->anadv_flow_control & 1); in gem_param_get()
3486 val = BOOLEAN(dp->anadv_flow_control & 2); in gem_param_get()
3490 val = dp->anadv_1000fdx; in gem_param_get()
3494 val = dp->anadv_1000hdx; in gem_param_get()
3498 val = dp->anadv_100t4; in gem_param_get()
3502 val = dp->anadv_100fdx; in gem_param_get()
3506 val = dp->anadv_100hdx; in gem_param_get()
3510 val = dp->anadv_10fdx; in gem_param_get()
3514 val = dp->anadv_10hdx; in gem_param_get()
3518 val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN); in gem_param_get()
3522 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE); in gem_param_get()
3526 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE); in gem_param_get()
3530 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL); in gem_param_get()
3534 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF); in gem_param_get()
3538 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4); in gem_param_get()
3542 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD); in gem_param_get()
3546 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX); in gem_param_get()
3550 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD); in gem_param_get()
3554 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T); in gem_param_get()
3558 val = (dp->mii_state == MII_STATE_LINKUP); in gem_param_get()
3562 val = gem_speed_value[dp->speed]; in gem_param_get()
3567 if (dp->mii_state == MII_STATE_LINKUP) { in gem_param_get()
3568 val = dp->full_duplex ? 2 : 1; in gem_param_get()
3573 val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN); in gem_param_get()
3577 val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) || in gem_param_get()
3578 (dp->flow_control == FLOW_CONTROL_RX_PAUSE); in gem_param_get()
3582 val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) || in gem_param_get()
3583 (dp->flow_control == FLOW_CONTROL_TX_PAUSE); in gem_param_get()
3593 dp->name, item); in gem_param_get()
3605 struct gem_dev *dp = ((struct gem_nd_arg *)(void *)arg)->dp; in gem_param_set() local
3610 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_param_set()
3623 if (val && (dp->mii_status & MII_STATUS_CANAUTONEG) == 0) { in gem_param_set()
3626 dp->anadv_autoneg = (int)val; in gem_param_set()
3634 dp->anadv_flow_control |= 1; in gem_param_set()
3636 dp->anadv_flow_control &= ~1; in gem_param_set()
3645 dp->anadv_flow_control |= 2; in gem_param_set()
3647 dp->anadv_flow_control &= ~2; in gem_param_set()
3655 if (val && (dp->mii_xstatus & in gem_param_set()
3660 dp->anadv_1000fdx = (int)val; in gem_param_set()
3667 if (val && (dp->mii_xstatus & in gem_param_set()
3671 dp->anadv_1000hdx = (int)val; in gem_param_set()
3678 if (val && (dp->mii_status & MII_STATUS_100_BASE_T4) == 0) { in gem_param_set()
3681 dp->anadv_100t4 = (int)val; in gem_param_set()
3688 if (val && (dp->mii_status & MII_STATUS_100_BASEX_FD) == 0) { in gem_param_set()
3691 dp->anadv_100fdx = (int)val; in gem_param_set()
3698 if (val && (dp->mii_status & MII_STATUS_100_BASEX) == 0) { in gem_param_set()
3701 dp->anadv_100hdx = (int)val; in gem_param_set()
3708 if (val && (dp->mii_status & MII_STATUS_10_FD) == 0) { in gem_param_set()
3711 dp->anadv_10fdx = (int)val; in gem_param_set()
3718 if (val && (dp->mii_status & MII_STATUS_10) == 0) { in gem_param_set()
3721 dp->anadv_10hdx = (int)val; in gem_param_set()
3726 gem_choose_forcedmode(dp); in gem_param_set()
3728 dp->mii_state = MII_STATE_UNKNOWN; in gem_param_set()
3729 if (dp->gc.gc_mii_hw_link_detection && dp->link_watcher_id == 0) { in gem_param_set()
3731 (void) gem_mii_link_check(dp); in gem_param_set()
3740 gem_nd_load(struct gem_dev *dp, char *name, ndgetf_t gf, ndsetf_t sf, int item) in gem_nd_load() argument
3747 arg = &((struct gem_nd_arg *)(void *)dp->nd_arg_p)[item]; in gem_nd_load()
3748 arg->dp = dp; in gem_nd_load()
3752 dp->name, __func__, name, item)); in gem_nd_load()
3753 (void) nd_load(&dp->nd_data_p, name, gf, sf, (caddr_t)arg); in gem_nd_load()
3757 gem_nd_setup(struct gem_dev *dp) in gem_nd_setup() argument
3760 dp->name, __func__, dp->mii_status, MII_STATUS_BITS)); in gem_nd_setup()
3762 ASSERT(dp->nd_arg_p == NULL); in gem_nd_setup()
3764 dp->nd_arg_p = in gem_nd_setup()
3769 gem_nd_load(dp, "autoneg_cap", in gem_nd_setup()
3771 gem_nd_load(dp, "pause_cap", in gem_nd_setup()
3773 gem_nd_load(dp, "asym_pause_cap", in gem_nd_setup()
3775 gem_nd_load(dp, "1000fdx_cap", in gem_nd_setup()
3777 gem_nd_load(dp, "1000hdx_cap", in gem_nd_setup()
3779 gem_nd_load(dp, "100T4_cap", in gem_nd_setup()
3781 gem_nd_load(dp, "100fdx_cap", in gem_nd_setup()
3783 gem_nd_load(dp, "100hdx_cap", in gem_nd_setup()
3785 gem_nd_load(dp, "10fdx_cap", in gem_nd_setup()
3787 gem_nd_load(dp, "10hdx_cap", in gem_nd_setup()
3791 gem_nd_load(dp, "adv_autoneg_cap", gem_param_get, in gem_nd_setup()
3792 SETFUNC(dp->mii_status & MII_STATUS_CANAUTONEG), in gem_nd_setup()
3794 gem_nd_load(dp, "adv_pause_cap", gem_param_get, in gem_nd_setup()
3795 SETFUNC(dp->gc.gc_flow_control & 1), in gem_nd_setup()
3797 gem_nd_load(dp, "adv_asym_pause_cap", gem_param_get, in gem_nd_setup()
3798 SETFUNC(dp->gc.gc_flow_control & 2), in gem_nd_setup()
3800 gem_nd_load(dp, "adv_1000fdx_cap", gem_param_get, in gem_nd_setup()
3801 SETFUNC(dp->mii_xstatus & in gem_nd_setup()
3804 gem_nd_load(dp, "adv_1000hdx_cap", gem_param_get, in gem_nd_setup()
3805 SETFUNC(dp->mii_xstatus & in gem_nd_setup()
3808 gem_nd_load(dp, "adv_100T4_cap", gem_param_get, in gem_nd_setup()
3809 SETFUNC((dp->mii_status & MII_STATUS_100_BASE_T4) && in gem_nd_setup()
3810 !dp->mii_advert_ro), in gem_nd_setup()
3812 gem_nd_load(dp, "adv_100fdx_cap", gem_param_get, in gem_nd_setup()
3813 SETFUNC((dp->mii_status & MII_STATUS_100_BASEX_FD) && in gem_nd_setup()
3814 !dp->mii_advert_ro), in gem_nd_setup()
3816 gem_nd_load(dp, "adv_100hdx_cap", gem_param_get, in gem_nd_setup()
3817 SETFUNC((dp->mii_status & MII_STATUS_100_BASEX) && in gem_nd_setup()
3818 !dp->mii_advert_ro), in gem_nd_setup()
3820 gem_nd_load(dp, "adv_10fdx_cap", gem_param_get, in gem_nd_setup()
3821 SETFUNC((dp->mii_status & MII_STATUS_10_FD) && in gem_nd_setup()
3822 !dp->mii_advert_ro), in gem_nd_setup()
3824 gem_nd_load(dp, "adv_10hdx_cap", gem_param_get, in gem_nd_setup()
3825 SETFUNC((dp->mii_status & MII_STATUS_10) && in gem_nd_setup()
3826 !dp->mii_advert_ro), in gem_nd_setup()
3830 gem_nd_load(dp, "lp_autoneg_cap", in gem_nd_setup()
3832 gem_nd_load(dp, "lp_pause_cap", in gem_nd_setup()
3834 gem_nd_load(dp, "lp_asym_pause_cap", in gem_nd_setup()
3836 gem_nd_load(dp, "lp_1000fdx_cap", in gem_nd_setup()
3838 gem_nd_load(dp, "lp_1000hdx_cap", in gem_nd_setup()
3840 gem_nd_load(dp, "lp_100T4_cap", in gem_nd_setup()
3842 gem_nd_load(dp, "lp_100fdx_cap", in gem_nd_setup()
3844 gem_nd_load(dp, "lp_100hdx_cap", in gem_nd_setup()
3846 gem_nd_load(dp, "lp_10fdx_cap", in gem_nd_setup()
3848 gem_nd_load(dp, "lp_10hdx_cap", in gem_nd_setup()
3852 gem_nd_load(dp, "link_status", in gem_nd_setup()
3854 gem_nd_load(dp, "link_speed", in gem_nd_setup()
3856 gem_nd_load(dp, "link_duplex", in gem_nd_setup()
3858 gem_nd_load(dp, "link_autoneg", in gem_nd_setup()
3860 gem_nd_load(dp, "link_rx_pause", in gem_nd_setup()
3862 gem_nd_load(dp, "link_tx_pause", in gem_nd_setup()
3865 gem_nd_load(dp, "resume_test", in gem_nd_setup()
3873 gem_nd_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp, struct iocblk *iocp) in gem_nd_ioctl() argument
3877 ASSERT(mutex_owned(&dp->intrlock)); in gem_nd_ioctl()
3879 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_nd_ioctl()
3883 ok = nd_getset(wq, dp->nd_data_p, mp); in gem_nd_ioctl()
3885 "%s: get %s", dp->name, ok ? "OK" : "FAIL")); in gem_nd_ioctl()
3889 ok = nd_getset(wq, dp->nd_data_p, mp); in gem_nd_ioctl()
3892 dp->name, ok ? "OK" : "FAIL", iocp->ioc_error)); in gem_nd_ioctl()
3905 cmn_err(CE_WARN, "%s: invalid cmd 0x%x", dp->name, iocp->ioc_cmd); in gem_nd_ioctl()
3911 gem_nd_cleanup(struct gem_dev *dp) in gem_nd_cleanup() argument
3913 ASSERT(dp->nd_data_p != NULL); in gem_nd_cleanup()
3914 ASSERT(dp->nd_arg_p != NULL); in gem_nd_cleanup()
3916 nd_free(&dp->nd_data_p); in gem_nd_cleanup()
3918 kmem_free(dp->nd_arg_p, sizeof (struct gem_nd_arg) * PARAM_COUNT); in gem_nd_cleanup()
3919 dp->nd_arg_p = NULL; in gem_nd_cleanup()
3923 gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp) in gem_mac_ioctl() argument
3929 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_mac_ioctl()
3938 DPRINTF(0, (CE_CONT, "%s: %s cmd:0x%x", dp->name, __func__, cmd)); in gem_mac_ioctl()
3940 mutex_enter(&dp->intrlock); in gem_mac_ioctl()
3941 mutex_enter(&dp->xmitlock); in gem_mac_ioctl()
3951 status = gem_nd_ioctl(dp, wq, mp, iocp); in gem_mac_ioctl()
3955 mutex_exit(&dp->xmitlock); in gem_mac_ioctl()
3956 mutex_exit(&dp->intrlock); in gem_mac_ioctl()
3960 gem_suspend(dp->dip); in gem_mac_ioctl()
3961 gem_resume(dp->dip); in gem_mac_ioctl()
4014 gem_mac_xcvr_inuse(struct gem_dev *dp) in gem_mac_xcvr_inuse() argument
4018 if ((dp->mii_status & MII_STATUS_XSTATUS) == 0) { in gem_mac_xcvr_inuse()
4019 if (dp->mii_status & MII_STATUS_100_BASE_T4) { in gem_mac_xcvr_inuse()
4021 } else if (dp->mii_status & in gem_mac_xcvr_inuse()
4025 } else if (dp->mii_status & in gem_mac_xcvr_inuse()
4029 } else if (dp->mii_status & in gem_mac_xcvr_inuse()
4033 } else if (dp->mii_xstatus & in gem_mac_xcvr_inuse()
4036 } else if (dp->mii_xstatus & in gem_mac_xcvr_inuse()
4079 struct gem_dev *dp = arg; in gem_m_start() local
4081 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_m_start()
4083 mutex_enter(&dp->intrlock); in gem_m_start()
4084 if (dp->mac_suspended) { in gem_m_start()
4088 if (gem_mac_init(dp) != GEM_SUCCESS) { in gem_m_start()
4092 dp->nic_state = NIC_STATE_INITIALIZED; in gem_m_start()
4095 dp->mc_count = 0; in gem_m_start()
4096 dp->mc_count_req = 0; in gem_m_start()
4099 if (dp->mii_state == MII_STATE_LINKUP) { in gem_m_start()
4100 (dp->gc.gc_set_media)(dp); in gem_m_start()
4104 bcopy(dp->dev_addr.ether_addr_octet, in gem_m_start()
4105 dp->cur_addr.ether_addr_octet, ETHERADDRL); in gem_m_start()
4106 dp->rxmode |= RXMODE_ENABLE; in gem_m_start()
4108 if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) { in gem_m_start()
4113 dp->nic_state = NIC_STATE_ONLINE; in gem_m_start()
4114 if (dp->mii_state == MII_STATE_LINKUP) { in gem_m_start()
4115 if (gem_mac_start(dp) != GEM_SUCCESS) { in gem_m_start()
4121 dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout, in gem_m_start()
4122 (void *)dp, dp->gc.gc_tx_timeout_interval); in gem_m_start()
4123 mutex_exit(&dp->intrlock); in gem_m_start()
4127 dp->nic_state = NIC_STATE_STOPPED; in gem_m_start()
4128 mutex_exit(&dp->intrlock); in gem_m_start()
4135 struct gem_dev *dp = arg; in gem_m_stop() local
4137 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_m_stop()
4140 mutex_enter(&dp->intrlock); in gem_m_stop()
4141 if (dp->mac_suspended) { in gem_m_stop()
4142 mutex_exit(&dp->intrlock); in gem_m_stop()
4145 dp->rxmode &= ~RXMODE_ENABLE; in gem_m_stop()
4146 (void) gem_mac_set_rx_filter(dp); in gem_m_stop()
4147 mutex_exit(&dp->intrlock); in gem_m_stop()
4150 if (dp->timeout_id) { in gem_m_stop()
4151 while (untimeout(dp->timeout_id) == -1) in gem_m_stop()
4153 dp->timeout_id = 0; in gem_m_stop()
4157 mutex_enter(&dp->intrlock); in gem_m_stop()
4158 if (dp->mac_suspended) { in gem_m_stop()
4159 mutex_exit(&dp->intrlock); in gem_m_stop()
4162 dp->nic_state = NIC_STATE_STOPPED; in gem_m_stop()
4165 mutex_enter(&dp->xmitlock); in gem_m_stop()
4166 dp->mac_active = B_FALSE; in gem_m_stop()
4167 mutex_exit(&dp->xmitlock); in gem_m_stop()
4170 while (dp->intr_busy) { in gem_m_stop()
4171 cv_wait(&dp->tx_drain_cv, &dp->intrlock); in gem_m_stop()
4173 (void) gem_mac_stop(dp, 0); in gem_m_stop()
4174 mutex_exit(&dp->intrlock); in gem_m_stop()
4182 struct gem_dev *dp = arg; in gem_m_multicst() local
4184 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_m_multicst()
4187 ret = gem_add_multicast(dp, ep); in gem_m_multicst()
4189 ret = gem_remove_multicast(dp, ep); in gem_m_multicst()
4204 struct gem_dev *dp = arg; in gem_m_setpromisc() local
4206 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_m_setpromisc()
4208 mutex_enter(&dp->intrlock); in gem_m_setpromisc()
4209 if (dp->mac_suspended) { in gem_m_setpromisc()
4210 mutex_exit(&dp->intrlock); in gem_m_setpromisc()
4214 dp->rxmode |= RXMODE_PROMISC; in gem_m_setpromisc()
4216 dp->rxmode &= ~RXMODE_PROMISC; in gem_m_setpromisc()
4219 if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) { in gem_m_setpromisc()
4222 mutex_exit(&dp->intrlock); in gem_m_setpromisc()
4230 struct gem_dev *dp = arg; in gem_m_getstat() local
4231 struct gem_stats *gstp = &dp->stats; in gem_m_getstat()
4234 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_m_getstat()
4236 if (mutex_owned(&dp->intrlock)) { in gem_m_getstat()
4237 if (dp->mac_suspended) { in gem_m_getstat()
4241 mutex_enter(&dp->intrlock); in gem_m_getstat()
4242 if (dp->mac_suspended) { in gem_m_getstat()
4243 mutex_exit(&dp->intrlock); in gem_m_getstat()
4246 mutex_exit(&dp->intrlock); in gem_m_getstat()
4249 if ((*dp->gc.gc_get_stats)(dp) != GEM_SUCCESS) { in gem_m_getstat()
4255 val = gem_speed_value[dp->speed] *1000000ull; in gem_m_getstat()
4367 val = dp->mii_phy_addr; in gem_m_getstat()
4371 val = dp->mii_phy_id; in gem_m_getstat()
4375 val = gem_mac_xcvr_inuse(dp); in gem_m_getstat()
4379 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) || in gem_m_getstat()
4380 (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD); in gem_m_getstat()
4384 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) || in gem_m_getstat()
4385 (dp->mii_xstatus & MII_XSTATUS_1000BASEX); in gem_m_getstat()
4389 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD); in gem_m_getstat()
4393 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX); in gem_m_getstat()
4397 val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD); in gem_m_getstat()
4401 val = BOOLEAN(dp->mii_status & MII_STATUS_10); in gem_m_getstat()
4405 val = BOOLEAN(dp->gc.gc_flow_control & 2); in gem_m_getstat()
4409 val = BOOLEAN(dp->gc.gc_flow_control & 1); in gem_m_getstat()
4413 val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG); in gem_m_getstat()
4417 val = dp->anadv_1000fdx; in gem_m_getstat()
4421 val = dp->anadv_1000hdx; in gem_m_getstat()
4425 val = dp->anadv_100fdx; in gem_m_getstat()
4429 val = dp->anadv_100hdx; in gem_m_getstat()
4433 val = dp->anadv_10fdx; in gem_m_getstat()
4437 val = dp->anadv_10hdx; in gem_m_getstat()
4441 val = BOOLEAN(dp->anadv_flow_control & 2); in gem_m_getstat()
4445 val = BOOLEAN(dp->anadv_flow_control & 1); in gem_m_getstat()
4449 val = dp->anadv_autoneg; in gem_m_getstat()
4453 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL); in gem_m_getstat()
4457 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF); in gem_m_getstat()
4461 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD); in gem_m_getstat()
4465 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX); in gem_m_getstat()
4469 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD); in gem_m_getstat()
4473 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T); in gem_m_getstat()
4477 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE); in gem_m_getstat()
4481 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE); in gem_m_getstat()
4485 val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN); in gem_m_getstat()
4489 val = BOOLEAN(dp->flow_control & 2); in gem_m_getstat()
4493 val = BOOLEAN(dp->flow_control & 1); in gem_m_getstat()
4497 val = dp->anadv_autoneg && in gem_m_getstat()
4498 BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN); in gem_m_getstat()
4502 val = (dp->mii_state == MII_STATE_LINKUP) ? in gem_m_getstat()
4503 (dp->full_duplex ? 2 : 1) : 0; in gem_m_getstat()
4510 val = BOOLEAN(dp->mii_lpable & MII_AN_ADVERT_REMFAULT); in gem_m_getstat()
4518 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4); in gem_m_getstat()
4522 val = dp->anadv_100t4; in gem_m_getstat()
4526 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4); in gem_m_getstat()
4547 struct gem_dev *dp = arg; in gem_m_unicst() local
4549 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_m_unicst()
4551 mutex_enter(&dp->intrlock); in gem_m_unicst()
4552 if (dp->mac_suspended) { in gem_m_unicst()
4553 mutex_exit(&dp->intrlock); in gem_m_unicst()
4556 bcopy(mac, dp->cur_addr.ether_addr_octet, ETHERADDRL); in gem_m_unicst()
4557 dp->rxmode |= RXMODE_ENABLE; in gem_m_unicst()
4559 if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) { in gem_m_unicst()
4562 mutex_exit(&dp->intrlock); in gem_m_unicst()
4574 struct gem_dev *dp = arg; in gem_m_tx() local
4577 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_m_tx()
4579 ASSERT(dp->nic_state == NIC_STATE_ONLINE); in gem_m_tx()
4580 if (dp->mii_state != MII_STATE_LINKUP) { in gem_m_tx()
4591 return (gem_send_common(dp, mp, flags)); in gem_m_tx()
4611 gem_gld3_init(struct gem_dev *dp, mac_register_t *macp) in gem_gld3_init() argument
4614 macp->m_driver = dp; in gem_gld3_init()
4615 macp->m_dip = dp->dip; in gem_gld3_init()
4616 macp->m_src_addr = dp->dev_addr.ether_addr_octet; in gem_gld3_init()
4619 macp->m_max_sdu = dp->mtu; in gem_gld3_init()
4621 if (dp->misc_flag & GEM_VLAN) { in gem_gld3_init()
4632 gem_read_conf(struct gem_dev *dp) in gem_read_conf() argument
4636 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_read_conf()
4641 dp->anadv_autoneg = gem_prop_get_int(dp, "adv_autoneg_cap", 1) != 0; in gem_read_conf()
4642 dp->anadv_1000fdx = gem_prop_get_int(dp, "adv_1000fdx_cap", 1) != 0; in gem_read_conf()
4643 dp->anadv_1000hdx = gem_prop_get_int(dp, "adv_1000hdx_cap", 1) != 0; in gem_read_conf()
4644 dp->anadv_100t4 = gem_prop_get_int(dp, "adv_100T4_cap", 1) != 0; in gem_read_conf()
4645 dp->anadv_100fdx = gem_prop_get_int(dp, "adv_100fdx_cap", 1) != 0; in gem_read_conf()
4646 dp->anadv_100hdx = gem_prop_get_int(dp, "adv_100hdx_cap", 1) != 0; in gem_read_conf()
4647 dp->anadv_10fdx = gem_prop_get_int(dp, "adv_10fdx_cap", 1) != 0; in gem_read_conf()
4648 dp->anadv_10hdx = gem_prop_get_int(dp, "adv_10hdx_cap", 1) != 0; in gem_read_conf()
4650 if ((ddi_prop_exists(DDI_DEV_T_ANY, dp->dip, in gem_read_conf()
4652 dp->full_duplex = gem_prop_get_int(dp, "full-duplex", 1) != 0; in gem_read_conf()
4653 dp->anadv_autoneg = B_FALSE; in gem_read_conf()
4654 if (dp->full_duplex) { in gem_read_conf()
4655 dp->anadv_1000hdx = B_FALSE; in gem_read_conf()
4656 dp->anadv_100hdx = B_FALSE; in gem_read_conf()
4657 dp->anadv_10hdx = B_FALSE; in gem_read_conf()
4659 dp->anadv_1000fdx = B_FALSE; in gem_read_conf()
4660 dp->anadv_100fdx = B_FALSE; in gem_read_conf()
4661 dp->anadv_10fdx = B_FALSE; in gem_read_conf()
4665 if ((val = gem_prop_get_int(dp, "speed", 0)) > 0) { in gem_read_conf()
4666 dp->anadv_autoneg = B_FALSE; in gem_read_conf()
4669 dp->speed = GEM_SPD_1000; in gem_read_conf()
4670 dp->anadv_100t4 = B_FALSE; in gem_read_conf()
4671 dp->anadv_100fdx = B_FALSE; in gem_read_conf()
4672 dp->anadv_100hdx = B_FALSE; in gem_read_conf()
4673 dp->anadv_10fdx = B_FALSE; in gem_read_conf()
4674 dp->anadv_10hdx = B_FALSE; in gem_read_conf()
4677 dp->speed = GEM_SPD_100; in gem_read_conf()
4678 dp->anadv_1000fdx = B_FALSE; in gem_read_conf()
4679 dp->anadv_1000hdx = B_FALSE; in gem_read_conf()
4680 dp->anadv_10fdx = B_FALSE; in gem_read_conf()
4681 dp->anadv_10hdx = B_FALSE; in gem_read_conf()
4684 dp->speed = GEM_SPD_10; in gem_read_conf()
4685 dp->anadv_1000fdx = B_FALSE; in gem_read_conf()
4686 dp->anadv_1000hdx = B_FALSE; in gem_read_conf()
4687 dp->anadv_100t4 = B_FALSE; in gem_read_conf()
4688 dp->anadv_100fdx = B_FALSE; in gem_read_conf()
4689 dp->anadv_100hdx = B_FALSE; in gem_read_conf()
4694 dp->name, "speed", val); in gem_read_conf()
4695 dp->anadv_autoneg = B_TRUE; in gem_read_conf()
4700 val = gem_prop_get_int(dp, "flow-control", dp->gc.gc_flow_control); in gem_read_conf()
4704 dp->name, "flow-control", val); in gem_read_conf()
4706 val = min(val, dp->gc.gc_flow_control); in gem_read_conf()
4708 dp->anadv_flow_control = val; in gem_read_conf()
4710 if (gem_prop_get_int(dp, "nointr", 0)) { in gem_read_conf()
4711 dp->misc_flag |= GEM_NOINTR; in gem_read_conf()
4712 cmn_err(CE_NOTE, "!%s: polling mode enabled", dp->name); in gem_read_conf()
4715 dp->mtu = gem_prop_get_int(dp, "mtu", dp->mtu); in gem_read_conf()
4716 dp->txthr = gem_prop_get_int(dp, "txthr", dp->txthr); in gem_read_conf()
4717 dp->rxthr = gem_prop_get_int(dp, "rxthr", dp->rxthr); in gem_read_conf()
4718 dp->txmaxdma = gem_prop_get_int(dp, "txmaxdma", dp->txmaxdma); in gem_read_conf()
4719 dp->rxmaxdma = gem_prop_get_int(dp, "rxmaxdma", dp->rxmaxdma); in gem_read_conf()
4738 struct gem_dev *dp; in gem_do_attach() local
4760 dp = kmem_zalloc(GEM_LOCAL_DATA_SIZE(gc), KM_SLEEP); in gem_do_attach()
4765 kmem_free(dp, GEM_LOCAL_DATA_SIZE(gc)); in gem_do_attach()
4771 dp->private = lp; in gem_do_attach()
4772 dp->priv_size = lmsize; in gem_do_attach()
4773 dp->mc_list = (struct mcast_addr *)&dp[1]; in gem_do_attach()
4775 dp->dip = dip; in gem_do_attach()
4776 (void) sprintf(dp->name, gc->gc_name, nports * unit + port); in gem_do_attach()
4784 dp->name); in gem_do_attach()
4787 dp->iblock_cookie = c; in gem_do_attach()
4792 mutex_init(&dp->intrlock, NULL, MUTEX_DRIVER, (void *)c); in gem_do_attach()
4793 mutex_init(&dp->xmitlock, NULL, MUTEX_DRIVER, (void *)c); in gem_do_attach()
4794 cv_init(&dp->tx_drain_cv, NULL, CV_DRIVER, NULL); in gem_do_attach()
4799 dp->base_addr = base; in gem_do_attach()
4800 dp->regs_handle = *regs_handlep; in gem_do_attach()
4801 dp->gc = *gc; in gem_do_attach()
4802 gc = &dp->gc; in gem_do_attach()
4838 dp->rx_desc_size = in gem_do_attach()
4843 dp->tx_desc_size = in gem_do_attach()
4848 dp->mtu = ETHERMTU; in gem_do_attach()
4849 dp->tx_buf = (void *)&dp->mc_list[GEM_MAXMC]; in gem_do_attach()
4851 for (i = 0; i < dp->gc.gc_tx_buf_size; i++) { in gem_do_attach()
4852 dp->tx_buf[i].txb_next = in gem_do_attach()
4853 &dp->tx_buf[SLOT(i + 1, dp->gc.gc_tx_buf_size)]; in gem_do_attach()
4856 dp->rxmode = 0; in gem_do_attach()
4857 dp->speed = GEM_SPD_10; /* default is 10Mbps */ in gem_do_attach()
4858 dp->full_duplex = B_FALSE; /* default is half */ in gem_do_attach()
4859 dp->flow_control = FLOW_CONTROL_NONE; in gem_do_attach()
4860 dp->poll_pkt_delay = 8; /* typical coalease for rx packets */ in gem_do_attach()
4863 dp->txthr = ETHERMAX; /* tx fifo threshold */ in gem_do_attach()
4864 dp->txmaxdma = 16*4; /* tx max dma burst size */ in gem_do_attach()
4865 dp->rxthr = 128; /* rx fifo threshold */ in gem_do_attach()
4866 dp->rxmaxdma = 16*4; /* rx max dma burst size */ in gem_do_attach()
4871 gem_read_conf(dp); in gem_do_attach()
4874 dp->rx_buf_len = MAXPKTBUF(dp) + dp->gc.gc_rx_header_len; in gem_do_attach()
4879 mutex_enter(&dp->intrlock); in gem_do_attach()
4880 dp->nic_state = NIC_STATE_STOPPED; in gem_do_attach()
4881 ret = (*dp->gc.gc_reset_chip)(dp); in gem_do_attach()
4882 mutex_exit(&dp->intrlock); in gem_do_attach()
4890 mutex_enter(&dp->intrlock); in gem_do_attach()
4891 ret = (*dp->gc.gc_attach_chip)(dp); in gem_do_attach()
4892 mutex_exit(&dp->intrlock); in gem_do_attach()
4898 dp->gc.gc_tx_copy_thresh = dp->mtu; in gem_do_attach()
4901 if (gem_alloc_memory(dp)) { in gem_do_attach()
4907 dp->name, (long)dp->base_addr, in gem_do_attach()
4908 dp->dev_addr.ether_addr_octet[0], in gem_do_attach()
4909 dp->dev_addr.ether_addr_octet[1], in gem_do_attach()
4910 dp->dev_addr.ether_addr_octet[2], in gem_do_attach()
4911 dp->dev_addr.ether_addr_octet[3], in gem_do_attach()
4912 dp->dev_addr.ether_addr_octet[4], in gem_do_attach()
4913 dp->dev_addr.ether_addr_octet[5])); in gem_do_attach()
4916 dp->cur_addr = dp->dev_addr; in gem_do_attach()
4918 gem_gld3_init(dp, macp); in gem_do_attach()
4921 dp->mii_lpable = 0; in gem_do_attach()
4922 dp->mii_advert = 0; in gem_do_attach()
4923 dp->mii_exp = 0; in gem_do_attach()
4924 dp->mii_ctl1000 = 0; in gem_do_attach()
4925 dp->mii_stat1000 = 0; in gem_do_attach()
4926 if ((*dp->gc.gc_mii_probe)(dp) != GEM_SUCCESS) { in gem_do_attach()
4931 dp->anadv_autoneg &= BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG); in gem_do_attach()
4932 dp->anadv_1000fdx &= in gem_do_attach()
4933 BOOLEAN(dp->mii_xstatus & in gem_do_attach()
4935 dp->anadv_1000hdx &= in gem_do_attach()
4936 BOOLEAN(dp->mii_xstatus & in gem_do_attach()
4938 dp->anadv_100t4 &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4); in gem_do_attach()
4939 dp->anadv_100fdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD); in gem_do_attach()
4940 dp->anadv_100hdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX); in gem_do_attach()
4941 dp->anadv_10fdx &= BOOLEAN(dp->mii_status & MII_STATUS_10_FD); in gem_do_attach()
4942 dp->anadv_10hdx &= BOOLEAN(dp->mii_status & MII_STATUS_10); in gem_do_attach()
4944 gem_choose_forcedmode(dp); in gem_do_attach()
4947 if (dp->gc.gc_mii_init) { in gem_do_attach()
4948 if ((*dp->gc.gc_mii_init)(dp) != GEM_SUCCESS) { in gem_do_attach()
4956 gem_nd_setup(dp); in gem_do_attach()
4961 if (ret = mac_register(macp, &dp->mh)) { in gem_do_attach()
4963 dp->name, ret); in gem_do_attach()
4969 if (dp->misc_flag & GEM_SOFTINTR) { in gem_do_attach()
4971 DDI_SOFTINT_LOW, &dp->soft_id, in gem_do_attach()
4974 (caddr_t)dp) != DDI_SUCCESS) { in gem_do_attach()
4976 dp->name); in gem_do_attach()
4979 } else if ((dp->misc_flag & GEM_NOINTR) == 0) { in gem_do_attach()
4982 (caddr_t)dp) != DDI_SUCCESS) { in gem_do_attach()
4983 cmn_err(CE_WARN, "!%s: ddi_add_intr failed", dp->name); in gem_do_attach()
4991 dp->intr_watcher_id = in gem_do_attach()
4993 (void *)dp, drv_usectohz(3*1000000)); in gem_do_attach()
4997 dp->next = (struct gem_dev *)ddi_get_driver_private(dip); in gem_do_attach()
4998 dp->port = port; in gem_do_attach()
4999 ddi_set_driver_private(dip, (caddr_t)dp); in gem_do_attach()
5002 gem_mii_start(dp); in gem_do_attach()
5005 return (dp); in gem_do_attach()
5008 (void) mac_unregister(dp->mh); in gem_do_attach()
5011 gem_nd_cleanup(dp); in gem_do_attach()
5014 gem_free_memory(dp); in gem_do_attach()
5016 ddi_regs_map_free(&dp->regs_handle); in gem_do_attach()
5018 mutex_destroy(&dp->xmitlock); in gem_do_attach()
5019 mutex_destroy(&dp->intrlock); in gem_do_attach()
5020 cv_destroy(&dp->tx_drain_cv); in gem_do_attach()
5025 kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(gc)); in gem_do_attach()
5033 struct gem_dev *dp; in gem_do_detach() local
5039 dp = GEM_GET_DEV(dip); in gem_do_detach()
5040 if (dp == NULL) { in gem_do_detach()
5044 rh = dp->regs_handle; in gem_do_detach()
5045 private = dp->private; in gem_do_detach()
5046 priv_size = dp->priv_size; in gem_do_detach()
5048 while (dp) { in gem_do_detach()
5050 if (mac_unregister(dp->mh) != 0) { in gem_do_detach()
5055 if (dp->rx_buf_allocated != dp->rx_buf_freecnt) { in gem_do_detach()
5059 dp->name, __func__, in gem_do_detach()
5060 dp->rx_buf_allocated, dp->rx_buf_freecnt); in gem_do_detach()
5065 gem_mii_stop(dp); in gem_do_detach()
5068 if (dp->misc_flag & GEM_SOFTINTR) { in gem_do_detach()
5069 ddi_remove_softintr(dp->soft_id); in gem_do_detach()
5070 } else if ((dp->misc_flag & GEM_NOINTR) == 0) { in gem_do_detach()
5071 ddi_remove_intr(dip, 0, dp->iblock_cookie); in gem_do_detach()
5074 if (dp->intr_watcher_id) { in gem_do_detach()
5075 while (untimeout(dp->intr_watcher_id) == -1) in gem_do_detach()
5077 dp->intr_watcher_id = 0; in gem_do_detach()
5082 gem_nd_cleanup(dp); in gem_do_detach()
5084 gem_free_memory(dp); in gem_do_detach()
5087 mutex_destroy(&dp->xmitlock); in gem_do_detach()
5088 mutex_destroy(&dp->intrlock); in gem_do_detach()
5089 cv_destroy(&dp->tx_drain_cv); in gem_do_detach()
5092 tmp = dp->next; in gem_do_detach()
5093 kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(&dp->gc)); in gem_do_detach()
5094 dp = tmp; in gem_do_detach()
5112 struct gem_dev *dp; in gem_suspend() local
5117 dp = GEM_GET_DEV(dip); in gem_suspend()
5118 ASSERT(dp); in gem_suspend()
5120 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_suspend()
5122 for (; dp; dp = dp->next) { in gem_suspend()
5125 gem_mii_stop(dp); in gem_suspend()
5128 if (dp->misc_flag & GEM_NOINTR) { in gem_suspend()
5129 if (dp->intr_watcher_id) { in gem_suspend()
5130 while (untimeout(dp->intr_watcher_id) == -1) in gem_suspend()
5133 dp->intr_watcher_id = 0; in gem_suspend()
5137 if (dp->timeout_id) { in gem_suspend()
5138 while (untimeout(dp->timeout_id) == -1) in gem_suspend()
5140 dp->timeout_id = 0; in gem_suspend()
5144 mutex_enter(&dp->intrlock); in gem_suspend()
5145 (void) gem_mac_stop(dp, 0); in gem_suspend()
5146 ASSERT(!dp->mac_active); in gem_suspend()
5149 dp->mac_suspended = B_TRUE; in gem_suspend()
5150 mutex_exit(&dp->intrlock); in gem_suspend()
5161 struct gem_dev *dp; in gem_resume() local
5166 dp = GEM_GET_DEV(dip); in gem_resume()
5167 ASSERT(dp); in gem_resume()
5169 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__)); in gem_resume()
5171 for (; dp; dp = dp->next) { in gem_resume()
5178 ASSERT(!dp->mac_active); in gem_resume()
5181 mutex_enter(&dp->intrlock); in gem_resume()
5183 dp->mac_suspended = B_FALSE; in gem_resume()
5184 dp->nic_state = NIC_STATE_STOPPED; in gem_resume()
5186 if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) { in gem_resume()
5188 dp->name, __func__); in gem_resume()
5189 mutex_exit(&dp->intrlock); in gem_resume()
5192 mutex_exit(&dp->intrlock); in gem_resume()
5195 if (dp->gc.gc_mii_init) { in gem_resume()
5196 (void) (*dp->gc.gc_mii_init)(dp); in gem_resume()
5199 if (dp->misc_flag & GEM_NOINTR) { in gem_resume()
5204 dp->intr_watcher_id = in gem_resume()
5206 (void *)dp, drv_usectohz(3*1000000)); in gem_resume()
5210 gem_mii_start(dp); in gem_resume()
5213 mutex_enter(&dp->intrlock); in gem_resume()
5215 if (gem_mac_init(dp) != GEM_SUCCESS) { in gem_resume()
5216 mutex_exit(&dp->intrlock); in gem_resume()
5219 dp->nic_state = NIC_STATE_INITIALIZED; in gem_resume()
5222 if (dp->mii_state == MII_STATE_LINKUP) { in gem_resume()
5223 if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) { in gem_resume()
5224 mutex_exit(&dp->intrlock); in gem_resume()
5230 dp->rxmode |= RXMODE_ENABLE; in gem_resume()
5231 if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) { in gem_resume()
5232 mutex_exit(&dp->intrlock); in gem_resume()
5235 dp->nic_state = NIC_STATE_ONLINE; in gem_resume()
5238 dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout, in gem_resume()
5239 (void *)dp, in gem_resume()
5240 dp->gc.gc_tx_timeout_interval); in gem_resume()
5243 if (dp->mii_state == MII_STATE_LINKUP) { in gem_resume()
5244 if (gem_mac_start(dp) != GEM_SUCCESS) { in gem_resume()
5245 mutex_exit(&dp->intrlock); in gem_resume()
5249 mutex_exit(&dp->intrlock); in gem_resume()
5255 if (dp->intr_watcher_id) { in gem_resume()
5256 while (untimeout(dp->intr_watcher_id) == -1) in gem_resume()
5258 dp->intr_watcher_id = 0; in gem_resume()
5260 mutex_enter(&dp->intrlock); in gem_resume()
5261 (*dp->gc.gc_reset_chip)(dp); in gem_resume()
5262 dp->nic_state = NIC_STATE_STOPPED; in gem_resume()
5263 mutex_exit(&dp->intrlock); in gem_resume()