Lines Matching refs:sc
404 vioif_link_state(struct vioif_softc *sc) in vioif_link_state() argument
406 if (sc->sc_virtio.sc_features & VIRTIO_NET_F_STATUS) { in vioif_link_state()
407 if (virtio_read_device_config_2(&sc->sc_virtio, in vioif_link_state()
461 struct vioif_softc *sc = buf->rb_sc; in vioif_rx_free() local
463 kmem_cache_free(sc->sc_rxbuf_cache, buf); in vioif_rx_free()
464 atomic_dec_ulong(&sc->sc_rxloan); in vioif_rx_free()
471 struct vioif_softc *sc = user_arg; in vioif_rx_construct() local
475 if (ddi_dma_alloc_handle(sc->sc_dev, &vioif_mapped_buf_dma_attr, in vioif_rx_construct()
477 dev_err(sc->sc_dev, CE_WARN, in vioif_rx_construct()
486 dev_err(sc->sc_dev, CE_WARN, in vioif_rx_construct()
496 dev_err(sc->sc_dev, CE_WARN, "Can't bind tx buffer"); in vioif_rx_construct()
503 buf->rb_sc = sc; in vioif_rx_construct()
532 vioif_free_mems(struct vioif_softc *sc) in vioif_free_mems() argument
536 for (i = 0; i < sc->sc_tx_vq->vq_num; i++) { in vioif_free_mems()
537 struct vioif_tx_buf *buf = &sc->sc_txbufs[i]; in vioif_free_mems()
561 kmem_free(sc->sc_txbufs, sizeof (struct vioif_tx_buf) * in vioif_free_mems()
562 sc->sc_tx_vq->vq_num); in vioif_free_mems()
564 for (i = 0; i < sc->sc_rx_vq->vq_num; i++) { in vioif_free_mems()
565 struct vioif_rx_buf *buf = sc->sc_rxbufs[i]; in vioif_free_mems()
568 kmem_cache_free(sc->sc_rxbuf_cache, buf); in vioif_free_mems()
570 kmem_free(sc->sc_rxbufs, sizeof (struct vioif_rx_buf *) * in vioif_free_mems()
571 sc->sc_rx_vq->vq_num); in vioif_free_mems()
575 vioif_alloc_mems(struct vioif_softc *sc) in vioif_alloc_mems() argument
581 txqsize = sc->sc_tx_vq->vq_num; in vioif_alloc_mems()
582 rxqsize = sc->sc_rx_vq->vq_num; in vioif_alloc_mems()
584 sc->sc_txbufs = kmem_zalloc(sizeof (struct vioif_tx_buf) * txqsize, in vioif_alloc_mems()
586 if (sc->sc_txbufs == NULL) { in vioif_alloc_mems()
587 dev_err(sc->sc_dev, CE_WARN, in vioif_alloc_mems()
597 sc->sc_rxbufs = kmem_zalloc(sizeof (struct vioif_rx_buf *) * rxqsize, in vioif_alloc_mems()
599 if (sc->sc_rxbufs == NULL) { in vioif_alloc_mems()
600 dev_err(sc->sc_dev, CE_WARN, in vioif_alloc_mems()
606 struct vioif_tx_buf *buf = &sc->sc_txbufs[i]; in vioif_alloc_mems()
610 if (ddi_dma_alloc_handle(sc->sc_dev, in vioif_alloc_mems()
614 dev_err(sc->sc_dev, CE_WARN, in vioif_alloc_mems()
624 dev_err(sc->sc_dev, CE_WARN, in vioif_alloc_mems()
635 dev_err(sc->sc_dev, CE_WARN, in vioif_alloc_mems()
662 struct vioif_tx_buf *buf = &sc->sc_txbufs[i]; in vioif_alloc_mems()
682 kmem_free(sc->sc_rxbufs, sizeof (struct vioif_rx_buf) * rxqsize); in vioif_alloc_mems()
685 kmem_free(sc->sc_txbufs, sizeof (struct vioif_tx_buf) * txqsize); in vioif_alloc_mems()
713 vioif_add_rx(struct vioif_softc *sc, int kmflag) in vioif_add_rx() argument
718 ve = vq_alloc_entry(sc->sc_rx_vq); in vioif_add_rx()
726 sc->sc_norecvbuf++; in vioif_add_rx()
729 buf = sc->sc_rxbufs[ve->qe_index]; in vioif_add_rx()
733 buf = kmem_cache_alloc(sc->sc_rxbuf_cache, kmflag); in vioif_add_rx()
734 sc->sc_rxbufs[ve->qe_index] = buf; in vioif_add_rx()
739 dev_err(sc->sc_dev, CE_WARN, "Can't allocate rx buffer"); in vioif_add_rx()
740 sc->sc_norecvbuf++; in vioif_add_rx()
783 vq_free_entry(sc->sc_rx_vq, ve); in vioif_add_rx()
789 vioif_populate_rx(struct vioif_softc *sc, int kmflag) in vioif_populate_rx() argument
795 ret = vioif_add_rx(sc, kmflag); in vioif_populate_rx()
806 virtio_sync_vq(sc->sc_rx_vq); in vioif_populate_rx()
812 vioif_process_rx(struct vioif_softc *sc) in vioif_process_rx() argument
820 while ((ve = virtio_pull_chain(sc->sc_rx_vq, &len))) { in vioif_process_rx()
822 buf = sc->sc_rxbufs[ve->qe_index]; in vioif_process_rx()
826 dev_err(sc->sc_dev, CE_WARN, "RX: Cnain too small: %u", in vioif_process_rx()
828 sc->sc_ierrors++; in vioif_process_rx()
839 if (len < sc->sc_rxcopy_thresh) { in vioif_process_rx()
842 sc->sc_norecvbuf++; in vioif_process_rx()
843 sc->sc_ierrors++; in vioif_process_rx()
859 sc->sc_norecvbuf++; in vioif_process_rx()
860 sc->sc_ierrors++; in vioif_process_rx()
867 atomic_inc_ulong(&sc->sc_rxloan); in vioif_process_rx()
872 sc->sc_rxbufs[ve->qe_index] = NULL; in vioif_process_rx()
881 sc->sc_multircv++; in vioif_process_rx()
883 sc->sc_brdcstrcv++; in vioif_process_rx()
886 sc->sc_rbytes += len; in vioif_process_rx()
887 sc->sc_ipackets++; in vioif_process_rx()
890 mac_rx(sc->sc_mac_handle, NULL, mp); in vioif_process_rx()
898 vioif_reclaim_used_tx(struct vioif_softc *sc) in vioif_reclaim_used_tx() argument
906 while ((ve = virtio_pull_chain(sc->sc_tx_vq, &len))) { in vioif_reclaim_used_tx()
910 buf = &sc->sc_txbufs[ve->qe_index]; in vioif_reclaim_used_tx()
928 if (sc->sc_tx_stopped && i) { in vioif_reclaim_used_tx()
929 sc->sc_tx_stopped = 0; in vioif_reclaim_used_tx()
930 mac_tx_update(sc->sc_mac_handle); in vioif_reclaim_used_tx()
937 vioif_tx_inline(struct vioif_softc *sc, struct vq_entry *ve, mblk_t *mp, in vioif_tx_inline() argument
941 buf = &sc->sc_txbufs[ve->qe_index]; in vioif_tx_inline()
955 vioif_tx_lazy_handle_alloc(struct vioif_softc *sc, struct vioif_tx_buf *buf, in vioif_tx_lazy_handle_alloc() argument
961 ret = ddi_dma_alloc_handle(sc->sc_dev, in vioif_tx_lazy_handle_alloc()
965 dev_err(sc->sc_dev, CE_WARN, in vioif_tx_lazy_handle_alloc()
974 vioif_tx_external(struct vioif_softc *sc, struct vq_entry *ve, mblk_t *mp, in vioif_tx_external() argument
984 buf = &sc->sc_txbufs[ve->qe_index]; in vioif_tx_external()
1007 ret = vioif_tx_lazy_handle_alloc(sc, buf, i); in vioif_tx_external()
1009 sc->sc_notxbuf++; in vioif_tx_external()
1010 sc->sc_oerrors++; in vioif_tx_external()
1020 sc->sc_oerrors++; in vioif_tx_external()
1021 dev_err(sc->sc_dev, CE_NOTE, in vioif_tx_external()
1028 dev_err(sc->sc_dev, CE_NOTE, in vioif_tx_external()
1031 sc->sc_notxbuf++; in vioif_tx_external()
1032 sc->sc_oerrors++; in vioif_tx_external()
1064 vioif_send(struct vioif_softc *sc, mblk_t *mp) in vioif_send() argument
1082 if (sc->sc_tx_tso4) { in vioif_send()
1087 ve = vq_alloc_entry(sc->sc_tx_vq); in vioif_send()
1090 sc->sc_notxbuf++; in vioif_send()
1094 buf = &sc->sc_txbufs[ve->qe_index]; in vioif_send()
1111 ASSERT(sc->sc_tx_csum); in vioif_send()
1141 sc->sc_multixmt++; in vioif_send()
1143 sc->sc_brdcstxmt++; in vioif_send()
1150 if (msg_size < sc->sc_txcopy_thresh) { in vioif_send()
1151 vioif_tx_inline(sc, ve, mp, msg_size); in vioif_send()
1154 ret = vioif_tx_external(sc, ve, mp, msg_size); in vioif_send()
1161 sc->sc_opackets++; in vioif_send()
1162 sc->sc_obytes += msg_size; in vioif_send()
1168 vq_free_entry(sc->sc_tx_vq, ve); in vioif_send()
1182 struct vioif_softc *sc = arg; in vioif_tx() local
1189 if (!vioif_send(sc, mp)) { in vioif_tx()
1190 sc->sc_tx_stopped = 1; in vioif_tx()
1203 struct vioif_softc *sc = arg; in vioif_start() local
1205 mac_link_update(sc->sc_mac_handle, in vioif_start()
1206 vioif_link_state(sc)); in vioif_start()
1208 virtio_start_vq_intr(sc->sc_rx_vq); in vioif_start()
1216 struct vioif_softc *sc = arg; in vioif_stop() local
1218 virtio_stop_vq_intr(sc->sc_rx_vq); in vioif_stop()
1225 struct vioif_softc *sc = arg; in vioif_stat() local
1229 *val = sc->sc_ierrors; in vioif_stat()
1232 *val = sc->sc_oerrors; in vioif_stat()
1235 *val = sc->sc_multircv; in vioif_stat()
1238 *val = sc->sc_brdcstrcv; in vioif_stat()
1241 *val = sc->sc_multixmt; in vioif_stat()
1244 *val = sc->sc_brdcstxmt; in vioif_stat()
1247 *val = sc->sc_ipackets; in vioif_stat()
1250 *val = sc->sc_rbytes; in vioif_stat()
1253 *val = sc->sc_opackets; in vioif_stat()
1256 *val = sc->sc_obytes; in vioif_stat()
1259 *val = sc->sc_norecvbuf; in vioif_stat()
1262 *val = sc->sc_notxbuf; in vioif_stat()
1281 vioif_set_prop_private(struct vioif_softc *sc, const char *pr_name, in vioif_set_prop_private() argument
1297 sc->sc_txcopy_thresh = result; in vioif_set_prop_private()
1308 sc->sc_rxcopy_thresh = result; in vioif_set_prop_private()
1317 struct vioif_softc *sc = arg; in vioif_setprop() local
1329 err = mac_maxsdu_update(sc->sc_mac_handle, *new_mtu); in vioif_setprop()
1335 err = vioif_set_prop_private(sc, pr_name, in vioif_setprop()
1348 vioif_get_prop_private(struct vioif_softc *sc, const char *pr_name, in vioif_get_prop_private() argument
1356 value = sc->sc_txcopy_thresh; in vioif_get_prop_private()
1362 value = sc->sc_rxcopy_thresh; in vioif_get_prop_private()
1377 struct vioif_softc *sc = arg; in vioif_getprop() local
1382 err = vioif_get_prop_private(sc, pr_name, in vioif_getprop()
1395 struct vioif_softc *sc = arg; in vioif_propinfo() local
1408 value = sc->sc_txcopy_thresh; in vioif_propinfo()
1411 value = sc->sc_rxcopy_thresh; in vioif_propinfo()
1426 struct vioif_softc *sc = arg; in vioif_getcapab() local
1430 if (sc->sc_tx_csum) { in vioif_getcapab()
1438 if (sc->sc_tx_tso4) { in vioif_getcapab()
1473 vioif_show_features(struct vioif_softc *sc, const char *prefix, in vioif_show_features() argument
1488 dev_err(sc->sc_dev, CE_NOTE, "!%s Vioif (%b)", buf, features, in vioif_show_features()
1497 vioif_dev_features(struct vioif_softc *sc) in vioif_dev_features() argument
1501 host_features = virtio_negotiate_features(&sc->sc_virtio, in vioif_dev_features()
1510 vioif_show_features(sc, "Host features: ", host_features); in vioif_dev_features()
1511 vioif_show_features(sc, "Negotiated features: ", in vioif_dev_features()
1512 sc->sc_virtio.sc_features); in vioif_dev_features()
1514 if (!(sc->sc_virtio.sc_features & VIRTIO_F_RING_INDIRECT_DESC)) { in vioif_dev_features()
1515 dev_err(sc->sc_dev, CE_NOTE, in vioif_dev_features()
1524 vioif_has_feature(struct vioif_softc *sc, uint32_t feature) in vioif_has_feature() argument
1526 return (virtio_has_feature(&sc->sc_virtio, feature)); in vioif_has_feature()
1530 vioif_set_mac(struct vioif_softc *sc) in vioif_set_mac() argument
1535 virtio_write_device_config_1(&sc->sc_virtio, in vioif_set_mac()
1536 VIRTIO_NET_CONFIG_MAC + i, sc->sc_mac[i]); in vioif_set_mac()
1542 vioif_get_mac(struct vioif_softc *sc) in vioif_get_mac() argument
1545 if (sc->sc_virtio.sc_features & VIRTIO_NET_F_MAC) { in vioif_get_mac()
1547 sc->sc_mac[i] = virtio_read_device_config_1( in vioif_get_mac()
1548 &sc->sc_virtio, in vioif_get_mac()
1551 dev_err(sc->sc_dev, CE_NOTE, "Got MAC address from host: %s", in vioif_get_mac()
1552 ether_sprintf((struct ether_addr *)sc->sc_mac)); in vioif_get_mac()
1555 (void) random_get_pseudo_bytes(sc->sc_mac, ETHERADDRL); in vioif_get_mac()
1557 sc->sc_mac[0] &= ~1; in vioif_get_mac()
1559 sc->sc_mac[1] |= 2; in vioif_get_mac()
1561 vioif_set_mac(sc); in vioif_get_mac()
1563 dev_err(sc->sc_dev, CE_NOTE, in vioif_get_mac()
1565 ether_sprintf((struct ether_addr *)sc->sc_mac)); in vioif_get_mac()
1577 struct vioif_softc *sc = container_of(vsc, in vioif_rx_handler() local
1580 (void) vioif_process_rx(sc); in vioif_rx_handler()
1582 (void) vioif_populate_rx(sc, KM_NOSLEEP); in vioif_rx_handler()
1592 struct vioif_softc *sc = container_of(vsc, in vioif_tx_handler() local
1595 vioif_reclaim_used_tx(sc); in vioif_tx_handler()
1600 vioif_register_ints(struct vioif_softc *sc) in vioif_register_ints() argument
1610 ret = virtio_register_ints(&sc->sc_virtio, NULL, vioif_vq_h); in vioif_register_ints()
1617 vioif_check_features(struct vioif_softc *sc) in vioif_check_features() argument
1619 if (vioif_has_feature(sc, VIRTIO_NET_F_CSUM)) { in vioif_check_features()
1621 sc->sc_tx_csum = 1; in vioif_check_features()
1622 sc->sc_rx_csum = 1; in vioif_check_features()
1624 if (!vioif_has_feature(sc, VIRTIO_NET_F_GUEST_CSUM)) { in vioif_check_features()
1625 sc->sc_rx_csum = 0; in vioif_check_features()
1629 if (vioif_has_feature(sc, VIRTIO_NET_F_HOST_TSO4)) { in vioif_check_features()
1631 sc->sc_tx_tso4 = 1; in vioif_check_features()
1639 if (!vioif_has_feature(sc, VIRTIO_NET_F_HOST_ECN)) { in vioif_check_features()
1640 dev_err(sc->sc_dev, CE_NOTE, in vioif_check_features()
1643 sc->sc_tx_tso4 = 0; in vioif_check_features()
1655 struct vioif_softc *sc; in vioif_attach() local
1675 sc = kmem_zalloc(sizeof (struct vioif_softc), KM_SLEEP); in vioif_attach()
1676 ddi_set_driver_private(devinfo, sc); in vioif_attach()
1678 vsc = &sc->sc_virtio; in vioif_attach()
1681 sc->sc_dev = devinfo; in vioif_attach()
1687 sc->sc_intrstat = kstat_create("vioif", instance, "intr", "controller", in vioif_attach()
1689 if (sc->sc_intrstat == NULL) { in vioif_attach()
1693 kstat_install(sc->sc_intrstat); in vioif_attach()
1697 (caddr_t *)&sc->sc_virtio.sc_io_addr, in vioif_attach()
1698 0, 0, &vioif_attr, &sc->sc_virtio.sc_ioh); in vioif_attach()
1704 virtio_device_reset(&sc->sc_virtio); in vioif_attach()
1705 virtio_set_status(&sc->sc_virtio, VIRTIO_CONFIG_DEVICE_STATUS_ACK); in vioif_attach()
1706 virtio_set_status(&sc->sc_virtio, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); in vioif_attach()
1708 ret = vioif_dev_features(sc); in vioif_attach()
1712 vsc->sc_nvqs = vioif_has_feature(sc, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2; in vioif_attach()
1715 sc->sc_rxbuf_cache = kmem_cache_create(cache_name, in vioif_attach()
1717 vioif_rx_destruct, NULL, sc, NULL, KM_SLEEP); in vioif_attach()
1718 if (sc->sc_rxbuf_cache == NULL) { in vioif_attach()
1719 dev_err(sc->sc_dev, CE_WARN, "Can't allocate the buffer cache"); in vioif_attach()
1723 ret = vioif_register_ints(sc); in vioif_attach()
1725 dev_err(sc->sc_dev, CE_WARN, in vioif_attach()
1734 vioif_get_mac(sc); in vioif_attach()
1736 sc->sc_rx_vq = virtio_alloc_vq(&sc->sc_virtio, 0, in vioif_attach()
1738 if (!sc->sc_rx_vq) in vioif_attach()
1740 virtio_stop_vq_intr(sc->sc_rx_vq); in vioif_attach()
1742 sc->sc_tx_vq = virtio_alloc_vq(&sc->sc_virtio, 1, in vioif_attach()
1744 if (!sc->sc_rx_vq) in vioif_attach()
1746 virtio_stop_vq_intr(sc->sc_tx_vq); in vioif_attach()
1748 if (vioif_has_feature(sc, VIRTIO_NET_F_CTRL_VQ)) { in vioif_attach()
1749 sc->sc_ctrl_vq = virtio_alloc_vq(&sc->sc_virtio, 2, in vioif_attach()
1751 if (!sc->sc_ctrl_vq) { in vioif_attach()
1754 virtio_stop_vq_intr(sc->sc_ctrl_vq); in vioif_attach()
1757 virtio_set_status(&sc->sc_virtio, in vioif_attach()
1760 sc->sc_rxloan = 0; in vioif_attach()
1763 sc->sc_rxcopy_thresh = 300; in vioif_attach()
1764 sc->sc_txcopy_thresh = 300; in vioif_attach()
1765 sc->sc_mtu = ETHERMTU; in vioif_attach()
1767 vioif_check_features(sc); in vioif_attach()
1769 if (vioif_alloc_mems(sc)) in vioif_attach()
1778 macp->m_driver = sc; in vioif_attach()
1780 macp->m_src_addr = sc->sc_mac; in vioif_attach()
1783 macp->m_max_sdu = sc->sc_mtu; in vioif_attach()
1787 sc->sc_macp = macp; in vioif_attach()
1790 (void) vioif_populate_rx(sc, KM_SLEEP); in vioif_attach()
1792 ret = mac_register(macp, &sc->sc_mac_handle); in vioif_attach()
1799 ret = virtio_enable_ints(&sc->sc_virtio); in vioif_attach()
1805 mac_link_update(sc->sc_mac_handle, LINK_STATE_UP); in vioif_attach()
1809 (void) mac_unregister(sc->sc_mac_handle); in vioif_attach()
1813 vioif_free_mems(sc); in vioif_attach()
1815 virtio_release_ints(&sc->sc_virtio); in vioif_attach()
1816 if (sc->sc_ctrl_vq) in vioif_attach()
1817 virtio_free_vq(sc->sc_ctrl_vq); in vioif_attach()
1819 virtio_free_vq(sc->sc_tx_vq); in vioif_attach()
1821 virtio_free_vq(sc->sc_rx_vq); in vioif_attach()
1824 kmem_cache_destroy(sc->sc_rxbuf_cache); in vioif_attach()
1827 virtio_set_status(&sc->sc_virtio, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); in vioif_attach()
1828 ddi_regs_map_free(&sc->sc_virtio.sc_ioh); in vioif_attach()
1831 kstat_delete(sc->sc_intrstat); in vioif_attach()
1832 kmem_free(sc, sizeof (struct vioif_softc)); in vioif_attach()
1840 struct vioif_softc *sc; in vioif_detach() local
1842 if ((sc = ddi_get_driver_private(devinfo)) == NULL) in vioif_detach()
1857 if (sc->sc_rxloan) { in vioif_detach()
1863 virtio_stop_vq_intr(sc->sc_rx_vq); in vioif_detach()
1864 virtio_stop_vq_intr(sc->sc_tx_vq); in vioif_detach()
1866 virtio_release_ints(&sc->sc_virtio); in vioif_detach()
1868 if (mac_unregister(sc->sc_mac_handle)) { in vioif_detach()
1872 mac_free(sc->sc_macp); in vioif_detach()
1874 vioif_free_mems(sc); in vioif_detach()
1875 virtio_free_vq(sc->sc_rx_vq); in vioif_detach()
1876 virtio_free_vq(sc->sc_tx_vq); in vioif_detach()
1878 virtio_device_reset(&sc->sc_virtio); in vioif_detach()
1880 ddi_regs_map_free(&sc->sc_virtio.sc_ioh); in vioif_detach()
1882 kmem_cache_destroy(sc->sc_rxbuf_cache); in vioif_detach()
1883 kstat_delete(sc->sc_intrstat); in vioif_detach()
1884 kmem_free(sc, sizeof (struct vioif_softc)); in vioif_detach()
1892 struct vioif_softc *sc; in vioif_quiesce() local
1894 if ((sc = ddi_get_driver_private(devinfo)) == NULL) in vioif_quiesce()
1897 virtio_stop_vq_intr(sc->sc_rx_vq); in vioif_quiesce()
1898 virtio_stop_vq_intr(sc->sc_tx_vq); in vioif_quiesce()
1899 virtio_device_reset(&sc->sc_virtio); in vioif_quiesce()