Lines Matching refs:xnbp

159 	xnb_t *xnbp;  in xnb_ks_aux_update()  local
165 xnbp = ksp->ks_private; in xnb_ks_aux_update()
172 (knp++)->value.ui64 = xnbp->xnb_stat_rx_cksum_deferred; in xnb_ks_aux_update()
173 (knp++)->value.ui64 = xnbp->xnb_stat_tx_cksum_no_need; in xnb_ks_aux_update()
174 (knp++)->value.ui64 = xnbp->xnb_stat_rx_rsp_notok; in xnb_ks_aux_update()
175 (knp++)->value.ui64 = xnbp->xnb_stat_tx_notify_deferred; in xnb_ks_aux_update()
176 (knp++)->value.ui64 = xnbp->xnb_stat_tx_notify_sent; in xnb_ks_aux_update()
177 (knp++)->value.ui64 = xnbp->xnb_stat_rx_notify_deferred; in xnb_ks_aux_update()
178 (knp++)->value.ui64 = xnbp->xnb_stat_rx_notify_sent; in xnb_ks_aux_update()
179 (knp++)->value.ui64 = xnbp->xnb_stat_tx_too_early; in xnb_ks_aux_update()
180 (knp++)->value.ui64 = xnbp->xnb_stat_rx_too_early; in xnb_ks_aux_update()
181 (knp++)->value.ui64 = xnbp->xnb_stat_rx_allocb_failed; in xnb_ks_aux_update()
182 (knp++)->value.ui64 = xnbp->xnb_stat_tx_allocb_failed; in xnb_ks_aux_update()
183 (knp++)->value.ui64 = xnbp->xnb_stat_rx_foreign_page; in xnb_ks_aux_update()
184 (knp++)->value.ui64 = xnbp->xnb_stat_mac_full; in xnb_ks_aux_update()
185 (knp++)->value.ui64 = xnbp->xnb_stat_spurious_intr; in xnb_ks_aux_update()
186 (knp++)->value.ui64 = xnbp->xnb_stat_allocation_success; in xnb_ks_aux_update()
187 (knp++)->value.ui64 = xnbp->xnb_stat_allocation_failure; in xnb_ks_aux_update()
188 (knp++)->value.ui64 = xnbp->xnb_stat_small_allocation_success; in xnb_ks_aux_update()
189 (knp++)->value.ui64 = xnbp->xnb_stat_small_allocation_failure; in xnb_ks_aux_update()
190 (knp++)->value.ui64 = xnbp->xnb_stat_other_allocation_failure; in xnb_ks_aux_update()
191 (knp++)->value.ui64 = xnbp->xnb_stat_rx_pagebndry_crossed; in xnb_ks_aux_update()
192 (knp++)->value.ui64 = xnbp->xnb_stat_rx_cpoparea_grown; in xnb_ks_aux_update()
193 (knp++)->value.ui64 = xnbp->xnb_stat_csum_hardware; in xnb_ks_aux_update()
194 (knp++)->value.ui64 = xnbp->xnb_stat_csum_software; in xnb_ks_aux_update()
195 (knp++)->value.ui64 = xnbp->xnb_stat_tx_overflow_page; in xnb_ks_aux_update()
196 (knp++)->value.ui64 = xnbp->xnb_stat_tx_unexpected_flags; in xnb_ks_aux_update()
202 xnb_ks_init(xnb_t *xnbp) in xnb_ks_init() argument
212 xnbp->xnb_kstat_aux = kstat_create(ddi_driver_name(xnbp->xnb_devinfo), in xnb_ks_init()
213 ddi_get_instance(xnbp->xnb_devinfo), "aux_statistics", "net", in xnb_ks_init()
215 if (xnbp->xnb_kstat_aux == NULL) in xnb_ks_init()
218 xnbp->xnb_kstat_aux->ks_private = xnbp; in xnb_ks_init()
219 xnbp->xnb_kstat_aux->ks_update = xnb_ks_aux_update; in xnb_ks_init()
221 knp = xnbp->xnb_kstat_aux->ks_data; in xnb_ks_init()
230 kstat_install(xnbp->xnb_kstat_aux); in xnb_ks_init()
236 xnb_ks_free(xnb_t *xnbp) in xnb_ks_free() argument
238 kstat_delete(xnbp->xnb_kstat_aux); in xnb_ks_free()
245 xnb_software_csum(xnb_t *xnbp, mblk_t *mp) in xnb_software_csum() argument
247 _NOTE(ARGUNUSED(xnbp)); in xnb_software_csum()
259 xnb_process_cksum_flags(xnb_t *xnbp, mblk_t *mp, uint32_t capab) in xnb_process_cksum_flags() argument
346 xnbp->xnb_stat_csum_hardware++; in xnb_process_cksum_flags()
378 xnbp->xnb_stat_csum_hardware++; in xnb_process_cksum_flags()
397 xnbp->xnb_stat_csum_software++; in xnb_process_cksum_flags()
399 return (xnb_software_csum(xnbp, mp)); in xnb_process_cksum_flags()
405 xnb_t *xnbp; in xnb_attach() local
409 xnbp = kmem_zalloc(sizeof (*xnbp), KM_SLEEP); in xnb_attach()
411 xnbp->xnb_flavour = flavour; in xnb_attach()
412 xnbp->xnb_flavour_data = flavour_data; in xnb_attach()
413 xnbp->xnb_devinfo = dip; in xnb_attach()
414 xnbp->xnb_evtchn = INVALID_EVTCHN; in xnb_attach()
415 xnbp->xnb_irq = B_FALSE; in xnb_attach()
416 xnbp->xnb_tx_ring_handle = INVALID_GRANT_HANDLE; in xnb_attach()
417 xnbp->xnb_rx_ring_handle = INVALID_GRANT_HANDLE; in xnb_attach()
418 xnbp->xnb_connected = B_FALSE; in xnb_attach()
419 xnbp->xnb_hotplugged = B_FALSE; in xnb_attach()
420 xnbp->xnb_detachable = B_FALSE; in xnb_attach()
421 xnbp->xnb_peer = xvdi_get_oeid(dip); in xnb_attach()
422 xnbp->xnb_be_status = XNB_STATE_INIT; in xnb_attach()
423 xnbp->xnb_fe_status = XNB_STATE_INIT; in xnb_attach()
425 xnbp->xnb_tx_buf_count = 0; in xnb_attach()
427 xnbp->xnb_rx_hv_copy = B_FALSE; in xnb_attach()
428 xnbp->xnb_multicast_control = B_FALSE; in xnb_attach()
430 xnbp->xnb_rx_va = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP); in xnb_attach()
431 ASSERT(xnbp->xnb_rx_va != NULL); in xnb_attach()
433 if (ddi_get_iblock_cookie(dip, 0, &xnbp->xnb_icookie) in xnb_attach()
438 xnbp->xnb_rx_cpop = NULL; in xnb_attach()
439 xnbp->xnb_rx_cpop_count = 0; in xnb_attach()
441 mutex_init(&xnbp->xnb_tx_lock, NULL, MUTEX_DRIVER, in xnb_attach()
442 xnbp->xnb_icookie); in xnb_attach()
443 mutex_init(&xnbp->xnb_rx_lock, NULL, MUTEX_DRIVER, in xnb_attach()
444 xnbp->xnb_icookie); in xnb_attach()
445 mutex_init(&xnbp->xnb_state_lock, NULL, MUTEX_DRIVER, in xnb_attach()
446 xnbp->xnb_icookie); in xnb_attach()
449 ddi_set_driver_private(dip, xnbp); in xnb_attach()
452 xnbp->xnb_tx_buf_cache = kmem_cache_create(cachename, in xnb_attach()
455 NULL, xnbp, NULL, 0); in xnb_attach()
456 if (xnbp->xnb_tx_buf_cache == NULL) in xnb_attach()
459 if (!xnb_ks_init(xnbp)) in xnb_attach()
506 xnb_ks_free(xnbp); in xnb_attach()
509 kmem_cache_destroy(xnbp->xnb_tx_buf_cache); in xnb_attach()
512 mutex_destroy(&xnbp->xnb_state_lock); in xnb_attach()
513 mutex_destroy(&xnbp->xnb_rx_lock); in xnb_attach()
514 mutex_destroy(&xnbp->xnb_tx_lock); in xnb_attach()
517 vmem_free(heap_arena, xnbp->xnb_rx_va, PAGESIZE); in xnb_attach()
518 kmem_free(xnbp, sizeof (*xnbp)); in xnb_attach()
525 xnb_t *xnbp = ddi_get_driver_private(dip); in xnb_detach() local
527 ASSERT(xnbp != NULL); in xnb_detach()
528 ASSERT(!xnbp->xnb_connected); in xnb_detach()
529 ASSERT(xnbp->xnb_tx_buf_count == 0); in xnb_detach()
535 xnb_ks_free(xnbp); in xnb_detach()
537 kmem_cache_destroy(xnbp->xnb_tx_buf_cache); in xnb_detach()
541 mutex_destroy(&xnbp->xnb_state_lock); in xnb_detach()
542 mutex_destroy(&xnbp->xnb_rx_lock); in xnb_detach()
543 mutex_destroy(&xnbp->xnb_tx_lock); in xnb_detach()
545 if (xnbp->xnb_rx_cpop_count > 0) in xnb_detach()
546 kmem_free(xnbp->xnb_rx_cpop, sizeof (xnbp->xnb_rx_cpop[0]) in xnb_detach()
547 * xnbp->xnb_rx_cpop_count); in xnb_detach()
549 ASSERT(xnbp->xnb_rx_va != NULL); in xnb_detach()
550 vmem_free(heap_arena, xnbp->xnb_rx_va, PAGESIZE); in xnb_detach()
552 kmem_free(xnbp, sizeof (*xnbp)); in xnb_detach()
562 xnb_alloc_page(xnb_t *xnbp) in xnb_alloc_page() argument
573 xnbp->xnb_stat_allocation_failure++; in xnb_alloc_page()
580 if ((xnbp->xnb_stat_small_allocation_failure++ in xnb_alloc_page()
587 xnbp->xnb_stat_small_allocation_success++; in xnb_alloc_page()
593 xnbp->xnb_stat_allocation_success++; in xnb_alloc_page()
613 xnb_free_page(xnb_t *xnbp, mfn_t mfn) in xnb_free_page() argument
615 _NOTE(ARGUNUSED(xnbp)); in xnb_free_page()
644 xnb_to_peer(xnb_t *xnbp, mblk_t *mp) in xnb_to_peer() argument
668 mutex_enter(&xnbp->xnb_rx_lock); in xnb_to_peer()
675 if (!(xnbp->xnb_connected && xnbp->xnb_hotplugged)) { in xnb_to_peer()
676 mutex_exit(&xnbp->xnb_rx_lock); in xnb_to_peer()
678 xnbp->xnb_stat_rx_too_early++; in xnb_to_peer()
682 loop = xnbp->xnb_rx_ring.req_cons; in xnb_to_peer()
683 prod = xnbp->xnb_rx_ring.rsp_prod_pvt; in xnb_to_peer()
684 gop = xnbp->xnb_rx_top; in xnb_to_peer()
687 XNB_RING_HAS_UNCONSUMED_REQUESTS(&xnbp->xnb_rx_ring)) { in xnb_to_peer()
698 if ((mfn = xnb_alloc_page(xnbp)) == 0) { in xnb_to_peer()
699 xnbp->xnb_stat_rx_defer++; in xnb_to_peer()
704 rxreq = RING_GET_REQUEST(&xnbp->xnb_rx_ring, loop); in xnb_to_peer()
715 hat_devload(kas.a_hat, xnbp->xnb_rx_va, PAGESIZE, in xnb_to_peer()
720 valoop = xnbp->xnb_rx_va; in xnb_to_peer()
732 hat_unload(kas.a_hat, xnbp->xnb_rx_va, PAGESIZE, in xnb_to_peer()
738 gop->domid = xnbp->xnb_peer; in xnb_to_peer()
742 rxresp = RING_GET_RESPONSE(&xnbp->xnb_rx_ring, prod); in xnb_to_peer()
746 cksum_flags = xnbp->xnb_flavour->xf_cksum_to_peer(xnbp, mp); in xnb_to_peer()
748 xnbp->xnb_stat_rx_cksum_deferred++; in xnb_to_peer()
751 rxresp->id = RING_GET_REQUEST(&xnbp->xnb_rx_ring, prod)->id; in xnb_to_peer()
764 if (loop == xnbp->xnb_rx_ring.req_cons) { in xnb_to_peer()
765 mutex_exit(&xnbp->xnb_rx_lock); in xnb_to_peer()
777 if (HYPERVISOR_grant_table_op(GNTTABOP_transfer, xnbp->xnb_rx_top, in xnb_to_peer()
778 loop - xnbp->xnb_rx_ring.req_cons) != 0) { in xnb_to_peer()
782 loop = xnbp->xnb_rx_ring.req_cons; in xnb_to_peer()
783 prod = xnbp->xnb_rx_ring.rsp_prod_pvt; in xnb_to_peer()
784 gop = xnbp->xnb_rx_top; in xnb_to_peer()
809 xnb_free_page(xnbp, gop->mfn); in xnb_to_peer()
819 RING_GET_RESPONSE(&xnbp->xnb_rx_ring, prod)->status = in xnb_to_peer()
822 xnbp->xnb_stat_ipackets++; in xnb_to_peer()
823 xnbp->xnb_stat_rbytes += len; in xnb_to_peer()
831 xnbp->xnb_rx_ring.req_cons = loop; in xnb_to_peer()
832 xnbp->xnb_rx_ring.rsp_prod_pvt = prod; in xnb_to_peer()
836 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xnbp->xnb_rx_ring, notify); in xnb_to_peer()
838 ec_notify_via_evtchn(xnbp->xnb_evtchn); in xnb_to_peer()
839 xnbp->xnb_stat_rx_notify_sent++; in xnb_to_peer()
841 xnbp->xnb_stat_rx_notify_deferred++; in xnb_to_peer()
845 xnbp->xnb_stat_rx_defer++; in xnb_to_peer()
847 mutex_exit(&xnbp->xnb_rx_lock); in xnb_to_peer()
861 grow_cpop_area(xnb_t *xnbp) in grow_cpop_area() argument
866 ASSERT(MUTEX_HELD(&xnbp->xnb_rx_lock)); in grow_cpop_area()
868 count = xnbp->xnb_rx_cpop_count + CPOP_DEFCNT; in grow_cpop_area()
871 xnbp->xnb_stat_other_allocation_failure++; in grow_cpop_area()
875 bcopy(xnbp->xnb_rx_cpop, new, in grow_cpop_area()
876 sizeof (xnbp->xnb_rx_cpop[0]) * xnbp->xnb_rx_cpop_count); in grow_cpop_area()
878 kmem_free(xnbp->xnb_rx_cpop, in grow_cpop_area()
879 sizeof (xnbp->xnb_rx_cpop[0]) * xnbp->xnb_rx_cpop_count); in grow_cpop_area()
881 xnbp->xnb_rx_cpop = new; in grow_cpop_area()
882 xnbp->xnb_rx_cpop_count = count; in grow_cpop_area()
884 xnbp->xnb_stat_rx_cpoparea_grown++; in grow_cpop_area()
939 setup_gop(xnb_t *xnbp, gnttab_copy_t *gp, uchar_t *rptr, in setup_gop() argument
942 ASSERT(xnbp != NULL && gp != NULL); in setup_gop()
954 gp->dest.domid = xnbp->xnb_peer; in setup_gop()
961 xnb_copy_to_peer(xnb_t *xnbp, mblk_t *mp) in xnb_copy_to_peer() argument
973 if (!xnbp->xnb_rx_hv_copy) in xnb_copy_to_peer()
974 return (xnb_to_peer(xnbp, mp)); in xnb_copy_to_peer()
995 mutex_enter(&xnbp->xnb_rx_lock); in xnb_copy_to_peer()
997 if (!(xnbp->xnb_connected && xnbp->xnb_hotplugged)) { in xnb_copy_to_peer()
998 mutex_exit(&xnbp->xnb_rx_lock); in xnb_copy_to_peer()
1000 xnbp->xnb_stat_rx_too_early++; in xnb_copy_to_peer()
1004 loop = xnbp->xnb_rx_ring.req_cons; in xnb_copy_to_peer()
1005 prod = xnbp->xnb_rx_ring.rsp_prod_pvt; in xnb_copy_to_peer()
1008 XNB_RING_HAS_UNCONSUMED_REQUESTS(&xnbp->xnb_rx_ring)) { in xnb_copy_to_peer()
1018 rxreq = RING_GET_REQUEST(&xnbp->xnb_rx_ring, loop); in xnb_copy_to_peer()
1032 gop_cp = xnbp->xnb_rx_cpop; in xnb_copy_to_peer()
1065 xnbp->xnb_stat_rx_foreign_page++; in xnb_copy_to_peer()
1083 if (item_count == xnbp->xnb_rx_cpop_count) { in xnb_copy_to_peer()
1084 if (!grow_cpop_area(xnbp)) in xnb_copy_to_peer()
1086 gop_cp = &xnbp->xnb_rx_cpop[item_count]; in xnb_copy_to_peer()
1099 xnbp->xnb_stat_rx_pagebndry_crossed++; in xnb_copy_to_peer()
1104 setup_gop(xnbp, gop_cp, r_tmp, r_offset, in xnb_copy_to_peer()
1126 if (HYPERVISOR_grant_table_op(GNTTABOP_copy, xnbp->xnb_rx_cpop, in xnb_copy_to_peer()
1133 rxresp = RING_GET_RESPONSE(&xnbp->xnb_rx_ring, prod); in xnb_copy_to_peer()
1142 cksum_flags = xnbp->xnb_flavour->xf_cksum_to_peer(xnbp, mp); in xnb_copy_to_peer()
1144 xnbp->xnb_stat_rx_cksum_deferred++; in xnb_copy_to_peer()
1147 rxresp->id = RING_GET_REQUEST(&xnbp->xnb_rx_ring, prod)->id; in xnb_copy_to_peer()
1155 if (xnbp->xnb_rx_cpop[i].status != 0) { in xnb_copy_to_peer()
1157 (int)xnbp->xnb_rx_cpop[i].status, in xnb_copy_to_peer()
1165 RING_GET_RESPONSE(&xnbp->xnb_rx_ring, prod)->status = in xnb_copy_to_peer()
1167 xnbp->xnb_stat_rx_rsp_notok++; in xnb_copy_to_peer()
1169 xnbp->xnb_stat_ipackets++; in xnb_copy_to_peer()
1170 xnbp->xnb_stat_rbytes += len; in xnb_copy_to_peer()
1182 if (loop == xnbp->xnb_rx_ring.req_cons) { in xnb_copy_to_peer()
1183 mutex_exit(&xnbp->xnb_rx_lock); in xnb_copy_to_peer()
1193 xnbp->xnb_rx_ring.req_cons = loop; in xnb_copy_to_peer()
1194 xnbp->xnb_rx_ring.rsp_prod_pvt = prod; in xnb_copy_to_peer()
1198 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xnbp->xnb_rx_ring, notify); in xnb_copy_to_peer()
1200 ec_notify_via_evtchn(xnbp->xnb_evtchn); in xnb_copy_to_peer()
1201 xnbp->xnb_stat_rx_notify_sent++; in xnb_copy_to_peer()
1203 xnbp->xnb_stat_rx_notify_deferred++; in xnb_copy_to_peer()
1207 xnbp->xnb_stat_rx_defer++; in xnb_copy_to_peer()
1209 mutex_exit(&xnbp->xnb_rx_lock); in xnb_copy_to_peer()
1219 xnb_tx_notify_peer(xnb_t *xnbp, boolean_t force) in xnb_tx_notify_peer() argument
1223 ASSERT(MUTEX_HELD(&xnbp->xnb_tx_lock)); in xnb_tx_notify_peer()
1226 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xnbp->xnb_tx_ring, notify); in xnb_tx_notify_peer()
1228 ec_notify_via_evtchn(xnbp->xnb_evtchn); in xnb_tx_notify_peer()
1229 xnbp->xnb_stat_tx_notify_sent++; in xnb_tx_notify_peer()
1231 xnbp->xnb_stat_tx_notify_deferred++; in xnb_tx_notify_peer()
1236 xnb_tx_mark_complete(xnb_t *xnbp, RING_IDX id, int16_t status) in xnb_tx_mark_complete() argument
1241 ASSERT(MUTEX_HELD(&xnbp->xnb_tx_lock)); in xnb_tx_mark_complete()
1243 i = xnbp->xnb_tx_ring.rsp_prod_pvt; in xnb_tx_mark_complete()
1245 txresp = RING_GET_RESPONSE(&xnbp->xnb_tx_ring, i); in xnb_tx_mark_complete()
1249 xnbp->xnb_tx_ring.rsp_prod_pvt = i + 1; in xnb_tx_mark_complete()
1260 xnb_t *xnbp = txp->xt_xnbp; in xnb_txbuf_recycle() local
1262 kmem_cache_free(xnbp->xnb_tx_buf_cache, txp); in xnb_txbuf_recycle()
1264 xnbp->xnb_tx_buf_outstanding--; in xnb_txbuf_recycle()
1272 xnb_t *xnbp = arg; in xnb_txbuf_constructor() local
1279 txp->xt_xnbp = xnbp; in xnb_txbuf_constructor()
1282 if (ddi_dma_alloc_handle(xnbp->xnb_devinfo, &buf_dma_attr, in xnb_txbuf_constructor()
1303 atomic_inc_32(&xnbp->xnb_tx_buf_count); in xnb_txbuf_constructor()
1304 xnbp->xnb_tx_buf_outstanding++; in xnb_txbuf_constructor()
1323 xnb_t *xnbp = arg; in xnb_txbuf_destructor() local
1329 atomic_dec_32(&xnbp->xnb_tx_buf_count); in xnb_txbuf_destructor()
1336 xnb_from_peer(xnb_t *xnbp) in xnb_from_peer() argument
1346 ASSERT(MUTEX_HELD(&xnbp->xnb_tx_lock)); in xnb_from_peer()
1352 RING_FINAL_CHECK_FOR_REQUESTS(&xnbp->xnb_tx_ring, work_to_do); in xnb_from_peer()
1355 xnb_tx_notify_peer(xnbp, need_notify); in xnb_from_peer()
1360 start = xnbp->xnb_tx_ring.req_cons; in xnb_from_peer()
1361 end = xnbp->xnb_tx_ring.sring->req_prod; in xnb_from_peer()
1375 xnbp->xnb_peer, (end - start)); in xnb_from_peer()
1378 BACK_RING_ATTACH(&xnbp->xnb_tx_ring, in xnb_from_peer()
1379 (netif_tx_sring_t *)xnbp->xnb_tx_ring_addr, PAGESIZE); in xnb_from_peer()
1385 cop = xnbp->xnb_tx_cop; in xnb_from_peer()
1386 txpp = xnbp->xnb_tx_bufp; in xnb_from_peer()
1396 txreq = RING_GET_REQUEST(&xnbp->xnb_tx_ring, loop); in xnb_from_peer()
1408 xnbp->xnb_stat_tx_unexpected_flags++; in xnb_from_peer()
1411 xnb_tx_mark_complete(xnbp, txreq->id, NETIF_RSP_ERROR); in xnb_from_peer()
1422 RING_GET_REQUEST(&xnbp->xnb_tx_ring, loop); in xnb_from_peer()
1426 ASSERT(xnbp->xnb_multicast_control); in xnb_from_peer()
1427 status = xnbp->xnb_flavour->xf_mcast_add(xnbp, in xnb_from_peer()
1431 ASSERT(xnbp->xnb_multicast_control); in xnb_from_peer()
1432 status = xnbp->xnb_flavour->xf_mcast_del(xnbp, in xnb_from_peer()
1442 xnb_tx_mark_complete(xnbp, txreq->id, in xnb_from_peer()
1456 xnbp->xnb_stat_tx_overflow_page++; in xnb_from_peer()
1459 xnb_tx_mark_complete(xnbp, txreq->id, NETIF_RSP_ERROR); in xnb_from_peer()
1465 txp = kmem_cache_alloc(xnbp->xnb_tx_buf_cache, in xnb_from_peer()
1473 kmem_cache_free(xnbp->xnb_tx_buf_cache, txp); in xnb_from_peer()
1481 cop->source.domid = xnbp->xnb_peer; in xnb_from_peer()
1504 xnbp->xnb_tx_ring.req_cons = loop; in xnb_from_peer()
1510 xnbp->xnb_tx_cop, n_data_req) != 0) { in xnb_from_peer()
1514 txpp = xnbp->xnb_tx_bufp; in xnb_from_peer()
1517 kmem_cache_free(xnbp->xnb_tx_buf_cache, *txpp); in xnb_from_peer()
1525 txpp = xnbp->xnb_tx_bufp; in xnb_from_peer()
1526 cop = xnbp->xnb_tx_cop; in xnb_from_peer()
1532 txreq = RING_GET_REQUEST(&xnbp->xnb_tx_ring, txp->xt_idx); in xnb_from_peer()
1540 xnb_tx_mark_complete(xnbp, txp->xt_id, NETIF_RSP_ERROR); in xnb_from_peer()
1557 mp = xnbp->xnb_flavour->xf_cksum_from_peer(xnbp, in xnb_from_peer()
1559 xnbp->xnb_stat_tx_cksum_no_need++; in xnb_from_peer()
1573 xnbp->xnb_stat_opackets++; in xnb_from_peer()
1574 xnbp->xnb_stat_obytes += txreq->size; in xnb_from_peer()
1576 xnb_tx_mark_complete(xnbp, txp->xt_id, NETIF_RSP_OKAY); in xnb_from_peer()
1591 xnb_t *xnbp = (xnb_t *)arg; in xnb_intr() local
1594 xnbp->xnb_stat_intr++; in xnb_intr()
1596 mutex_enter(&xnbp->xnb_tx_lock); in xnb_intr()
1598 ASSERT(xnbp->xnb_connected); in xnb_intr()
1600 mp = xnb_from_peer(xnbp); in xnb_intr()
1602 mutex_exit(&xnbp->xnb_tx_lock); in xnb_intr()
1604 if (!xnbp->xnb_hotplugged) { in xnb_intr()
1605 xnbp->xnb_stat_tx_too_early++; in xnb_intr()
1609 xnbp->xnb_stat_spurious_intr++; in xnb_intr()
1613 xnbp->xnb_flavour->xf_from_peer(xnbp, mp); in xnb_intr()
1626 xnb_read_xs_config(xnb_t *xnbp) in xnb_read_xs_config() argument
1631 xsname = xvdi_get_xsname(xnbp->xnb_devinfo); in xnb_read_xs_config()
1641 if (ether_aton(mac, xnbp->xnb_mac_addr) != ETHERADDRL) { in xnb_read_xs_config()
1655 xnb_read_oe_config(xnb_t *xnbp) in xnb_read_oe_config() argument
1660 oename = xvdi_get_oename(xnbp->xnb_devinfo); in xnb_read_oe_config()
1663 "event-channel", "%u", &xnbp->xnb_fe_evtchn, in xnb_read_oe_config()
1664 "tx-ring-ref", "%lu", &xnbp->xnb_tx_ring_ref, in xnb_read_oe_config()
1665 "rx-ring-ref", "%lu", &xnbp->xnb_rx_ring_ref, in xnb_read_oe_config()
1681 xnbp->xnb_rx_hv_copy = B_TRUE; in xnb_read_oe_config()
1690 xnbp->xnb_multicast_control = B_TRUE; in xnb_read_oe_config()
1720 xnb_start_connect(xnb_t *xnbp) in xnb_start_connect() argument
1722 dev_info_t *dip = xnbp->xnb_devinfo; in xnb_start_connect()
1730 if (!xnbp->xnb_flavour->xf_start_connect(xnbp)) { in xnb_start_connect()
1740 xnbp->xnb_flavour->xf_peer_disconnected(xnbp); in xnb_start_connect()
1750 xnb_t *xnbp = ddi_get_driver_private(dip); in xnb_connect_rings() local
1756 ASSERT(!xnbp->xnb_connected); in xnb_connect_rings()
1769 xnbp->xnb_tx_ring_addr = vmem_xalloc(heap_arena, PAGESIZE, PAGESIZE, in xnb_connect_rings()
1771 ASSERT(xnbp->xnb_tx_ring_addr != NULL); in xnb_connect_rings()
1774 map_op.host_addr = (uint64_t)((long)xnbp->xnb_tx_ring_addr); in xnb_connect_rings()
1776 map_op.ref = xnbp->xnb_tx_ring_ref; in xnb_connect_rings()
1777 map_op.dom = xnbp->xnb_peer; in xnb_connect_rings()
1778 hat_prepare_mapping(kas.a_hat, xnbp->xnb_tx_ring_addr, NULL); in xnb_connect_rings()
1784 xnbp->xnb_tx_ring_handle = map_op.handle; in xnb_connect_rings()
1787 BACK_RING_INIT(&xnbp->xnb_tx_ring, in xnb_connect_rings()
1788 (netif_tx_sring_t *)xnbp->xnb_tx_ring_addr, PAGESIZE); in xnb_connect_rings()
1791 xnbp->xnb_rx_ring_addr = vmem_xalloc(heap_arena, PAGESIZE, PAGESIZE, in xnb_connect_rings()
1793 ASSERT(xnbp->xnb_rx_ring_addr != NULL); in xnb_connect_rings()
1796 map_op.host_addr = (uint64_t)((long)xnbp->xnb_rx_ring_addr); in xnb_connect_rings()
1798 map_op.ref = xnbp->xnb_rx_ring_ref; in xnb_connect_rings()
1799 map_op.dom = xnbp->xnb_peer; in xnb_connect_rings()
1800 hat_prepare_mapping(kas.a_hat, xnbp->xnb_rx_ring_addr, NULL); in xnb_connect_rings()
1806 xnbp->xnb_rx_ring_handle = map_op.handle; in xnb_connect_rings()
1809 BACK_RING_INIT(&xnbp->xnb_rx_ring, in xnb_connect_rings()
1810 (netif_rx_sring_t *)xnbp->xnb_rx_ring_addr, PAGESIZE); in xnb_connect_rings()
1813 if (xvdi_bind_evtchn(dip, xnbp->xnb_fe_evtchn) != DDI_SUCCESS) { in xnb_connect_rings()
1815 "cannot bind event channel %d", xnbp->xnb_evtchn); in xnb_connect_rings()
1816 xnbp->xnb_evtchn = INVALID_EVTCHN; in xnb_connect_rings()
1819 xnbp->xnb_evtchn = xvdi_get_evtchn(dip); in xnb_connect_rings()
1827 mutex_enter(&xnbp->xnb_tx_lock); in xnb_connect_rings()
1828 mutex_enter(&xnbp->xnb_rx_lock); in xnb_connect_rings()
1830 xnbp->xnb_connected = B_TRUE; in xnb_connect_rings()
1832 mutex_exit(&xnbp->xnb_rx_lock); in xnb_connect_rings()
1833 mutex_exit(&xnbp->xnb_tx_lock); in xnb_connect_rings()
1836 if (ddi_add_intr(dip, 0, NULL, NULL, xnb_intr, (caddr_t)xnbp) in xnb_connect_rings()
1841 xnbp->xnb_irq = B_TRUE; in xnb_connect_rings()
1846 mutex_enter(&xnbp->xnb_tx_lock); in xnb_connect_rings()
1847 mutex_enter(&xnbp->xnb_rx_lock); in xnb_connect_rings()
1849 xnbp->xnb_connected = B_FALSE; in xnb_connect_rings()
1851 mutex_exit(&xnbp->xnb_rx_lock); in xnb_connect_rings()
1852 mutex_exit(&xnbp->xnb_tx_lock); in xnb_connect_rings()
1860 xnb_t *xnbp = ddi_get_driver_private(dip); in xnb_disconnect_rings() local
1862 if (xnbp->xnb_irq) { in xnb_disconnect_rings()
1864 xnbp->xnb_irq = B_FALSE; in xnb_disconnect_rings()
1867 if (xnbp->xnb_evtchn != INVALID_EVTCHN) { in xnb_disconnect_rings()
1869 xnbp->xnb_evtchn = INVALID_EVTCHN; in xnb_disconnect_rings()
1872 if (xnbp->xnb_rx_ring_handle != INVALID_GRANT_HANDLE) { in xnb_disconnect_rings()
1876 xnbp->xnb_rx_ring_addr; in xnb_disconnect_rings()
1878 unmap_op.handle = xnbp->xnb_rx_ring_handle; in xnb_disconnect_rings()
1885 xnbp->xnb_rx_ring_handle = INVALID_GRANT_HANDLE; in xnb_disconnect_rings()
1888 if (xnbp->xnb_rx_ring_addr != NULL) { in xnb_disconnect_rings()
1889 hat_release_mapping(kas.a_hat, xnbp->xnb_rx_ring_addr); in xnb_disconnect_rings()
1890 vmem_free(heap_arena, xnbp->xnb_rx_ring_addr, PAGESIZE); in xnb_disconnect_rings()
1891 xnbp->xnb_rx_ring_addr = NULL; in xnb_disconnect_rings()
1894 if (xnbp->xnb_tx_ring_handle != INVALID_GRANT_HANDLE) { in xnb_disconnect_rings()
1898 xnbp->xnb_tx_ring_addr; in xnb_disconnect_rings()
1900 unmap_op.handle = xnbp->xnb_tx_ring_handle; in xnb_disconnect_rings()
1907 xnbp->xnb_tx_ring_handle = INVALID_GRANT_HANDLE; in xnb_disconnect_rings()
1910 if (xnbp->xnb_tx_ring_addr != NULL) { in xnb_disconnect_rings()
1911 hat_release_mapping(kas.a_hat, xnbp->xnb_tx_ring_addr); in xnb_disconnect_rings()
1912 vmem_free(heap_arena, xnbp->xnb_tx_ring_addr, PAGESIZE); in xnb_disconnect_rings()
1913 xnbp->xnb_tx_ring_addr = NULL; in xnb_disconnect_rings()
1922 xnb_t *xnbp = ddi_get_driver_private(dip); in xnb_oe_state_change() local
1925 ASSERT(xnbp != NULL); in xnb_oe_state_change()
1930 if (xnbp->xnb_connected) in xnb_oe_state_change()
1933 if (!xnb_read_oe_config(xnbp) || in xnb_oe_state_change()
1934 !xnbp->xnb_flavour->xf_peer_connected(xnbp)) { in xnb_oe_state_change()
1945 mutex_enter(&xnbp->xnb_state_lock); in xnb_oe_state_change()
1946 xnbp->xnb_fe_status = XNB_STATE_READY; in xnb_oe_state_change()
1947 if (xnbp->xnb_be_status == XNB_STATE_READY) in xnb_oe_state_change()
1948 xnb_start_connect(xnbp); in xnb_oe_state_change()
1949 mutex_exit(&xnbp->xnb_state_lock); in xnb_oe_state_change()
1955 xnbp->xnb_detachable = B_TRUE; in xnb_oe_state_change()
1965 xnbp->xnb_flavour->xf_peer_disconnected(xnbp); in xnb_oe_state_change()
1967 mutex_enter(&xnbp->xnb_tx_lock); in xnb_oe_state_change()
1968 mutex_enter(&xnbp->xnb_rx_lock); in xnb_oe_state_change()
1971 xnbp->xnb_connected = B_FALSE; in xnb_oe_state_change()
1973 mutex_exit(&xnbp->xnb_rx_lock); in xnb_oe_state_change()
1974 mutex_exit(&xnbp->xnb_tx_lock); in xnb_oe_state_change()
1985 xnbp->xnb_detachable = B_TRUE; in xnb_oe_state_change()
1999 xnb_t *xnbp = ddi_get_driver_private(dip); in xnb_hp_state_change() local
2002 ASSERT(xnbp != NULL); in xnb_hp_state_change()
2007 if (xnbp->xnb_hotplugged) in xnb_hp_state_change()
2010 if (!xnb_read_xs_config(xnbp)) in xnb_hp_state_change()
2013 if (!xnbp->xnb_flavour->xf_hotplug_connected(xnbp)) in xnb_hp_state_change()
2016 mutex_enter(&xnbp->xnb_tx_lock); in xnb_hp_state_change()
2017 mutex_enter(&xnbp->xnb_rx_lock); in xnb_hp_state_change()
2019 xnbp->xnb_hotplugged = B_TRUE; in xnb_hp_state_change()
2021 mutex_exit(&xnbp->xnb_rx_lock); in xnb_hp_state_change()
2022 mutex_exit(&xnbp->xnb_tx_lock); in xnb_hp_state_change()
2024 mutex_enter(&xnbp->xnb_state_lock); in xnb_hp_state_change()
2025 xnbp->xnb_be_status = XNB_STATE_READY; in xnb_hp_state_change()
2026 if (xnbp->xnb_fe_status == XNB_STATE_READY) in xnb_hp_state_change()
2027 xnb_start_connect(xnbp); in xnb_hp_state_change()
2028 mutex_exit(&xnbp->xnb_state_lock); in xnb_hp_state_change()