Lines Matching refs:kring
225 struct netmap_kring *kring = NULL; in generic_netmap_unregister() local
240 for_each_rx_kring(r, kring, na) { in generic_netmap_unregister()
244 mbq_safe_purge(&kring->rx_queue); in generic_netmap_unregister()
252 for_each_tx_kring(r, kring, na) { in generic_netmap_unregister()
257 mtx_lock_spin(&kring->tx_event_lock); in generic_netmap_unregister()
258 if (kring->tx_event) { in generic_netmap_unregister()
259 SET_MBUF_DESTRUCTOR(kring->tx_event, NULL, NULL); in generic_netmap_unregister()
261 kring->tx_event = NULL; in generic_netmap_unregister()
262 mtx_unlock_spin(&kring->tx_event_lock); in generic_netmap_unregister()
268 for_each_rx_kring(r, kring, na) { in generic_netmap_unregister()
269 mbq_safe_fini(&kring->rx_queue); in generic_netmap_unregister()
272 for_each_tx_kring(r, kring, na) { in generic_netmap_unregister()
273 callout_drain(&kring->tx_event_callout); in generic_netmap_unregister()
275 if (kring->tx_pool == NULL) { in generic_netmap_unregister()
280 if (kring->tx_pool[i]) { in generic_netmap_unregister()
281 m_free(kring->tx_pool[i]); in generic_netmap_unregister()
282 kring->tx_pool[i] = NULL; in generic_netmap_unregister()
285 mtx_destroy(&kring->tx_event_lock); in generic_netmap_unregister()
286 nm_os_free(kring->tx_pool); in generic_netmap_unregister()
287 kring->tx_pool = NULL; in generic_netmap_unregister()
307 struct netmap_kring *kring = NULL; in generic_netmap_register() local
333 for_each_rx_kring(r, kring, na) { in generic_netmap_register()
340 mbq_safe_init(&kring->rx_queue); in generic_netmap_register()
348 for_each_tx_kring(r, kring, na) { in generic_netmap_register()
349 kring->tx_pool = NULL; in generic_netmap_register()
351 for_each_tx_kring(r, kring, na) { in generic_netmap_register()
352 kring->tx_pool = in generic_netmap_register()
354 if (!kring->tx_pool) { in generic_netmap_register()
359 mtx_init(&kring->tx_event_lock, "tx_event_lock", in generic_netmap_register()
361 callout_init_mtx(&kring->tx_event_callout, in generic_netmap_register()
362 &kring->tx_event_lock, in generic_netmap_register()
369 for_each_tx_kring(r, kring, na) { in generic_netmap_register()
372 kring->tx_pool[i] = NULL; in generic_netmap_register()
375 kring->tx_event = NULL; in generic_netmap_register()
414 for_each_tx_kring(r, kring, na) { in generic_netmap_register()
415 mtx_destroy(&kring->tx_event_lock); in generic_netmap_register()
416 if (kring->tx_pool == NULL) { in generic_netmap_register()
419 nm_os_free(kring->tx_pool); in generic_netmap_register()
420 kring->tx_pool = NULL; in generic_netmap_register()
422 for_each_rx_kring(r, kring, na) { in generic_netmap_register()
423 mbq_safe_fini(&kring->rx_queue); in generic_netmap_register()
440 struct netmap_kring *kring; in generic_mbuf_dtor() local
461 kring = na->tx_rings[r]; in generic_mbuf_dtor()
462 mtx_lock_spin(&kring->tx_event_lock); in generic_mbuf_dtor()
471 mtx_unlock_spin(&kring->tx_event_lock); in generic_mbuf_dtor()
475 if (kring->tx_event == m) { in generic_mbuf_dtor()
476 kring->tx_event = NULL; in generic_mbuf_dtor()
479 mtx_unlock_spin(&kring->tx_event_lock); in generic_mbuf_dtor()
516 generic_netmap_tx_clean(struct netmap_kring *kring, int txqdisc) in generic_netmap_tx_clean() argument
518 u_int const lim = kring->nkr_num_slots - 1; in generic_netmap_tx_clean()
519 u_int nm_i = nm_next(kring->nr_hwtail, lim); in generic_netmap_tx_clean()
520 u_int hwcur = kring->nr_hwcur; in generic_netmap_tx_clean()
522 struct mbuf **tx_pool = kring->tx_pool; in generic_netmap_tx_clean()
524 nm_prdis("hwcur = %d, hwtail = %d", kring->nr_hwcur, kring->nr_hwtail); in generic_netmap_tx_clean()
551 mtx_lock_spin(&kring->tx_event_lock); in generic_netmap_tx_clean()
552 event_consumed = (kring->tx_event == NULL); in generic_netmap_tx_clean()
553 mtx_unlock_spin(&kring->tx_event_lock); in generic_netmap_tx_clean()
570 kring->nr_hwtail = nm_prev(nm_i, lim); in generic_netmap_tx_clean()
571 nm_prdis("tx completed [%d] -> hwtail %d", n, kring->nr_hwtail); in generic_netmap_tx_clean()
604 struct netmap_kring *kring = arg; in generic_tx_callout() local
606 kring->tx_event = NULL; in generic_tx_callout()
607 mtx_unlock_spin(&kring->tx_event_lock); in generic_tx_callout()
608 netmap_generic_irq(kring->na, kring->ring_id, NULL); in generic_tx_callout()
613 generic_set_tx_event(struct netmap_kring *kring, u_int hwcur) in generic_set_tx_event() argument
615 u_int lim = kring->nkr_num_slots - 1; in generic_set_tx_event()
618 u_int ntc = nm_next(kring->nr_hwtail, lim); /* next to clean */ in generic_set_tx_event()
642 m = kring->tx_pool[e]; in generic_set_tx_event()
648 mtx_lock_spin(&kring->tx_event_lock); in generic_set_tx_event()
649 if (kring->tx_event) { in generic_set_tx_event()
651 mtx_unlock_spin(&kring->tx_event_lock); in generic_set_tx_event()
655 SET_MBUF_DESTRUCTOR(m, generic_mbuf_dtor, kring->na); in generic_set_tx_event()
657 kring->tx_event = m; in generic_set_tx_event()
671 callout_reset_sbt_curcpu(&kring->tx_event_callout, SBT_1MS, 0, in generic_set_tx_event()
672 generic_tx_callout, kring, 0); in generic_set_tx_event()
674 mtx_unlock_spin(&kring->tx_event_lock); in generic_set_tx_event()
676 kring->tx_pool[e] = NULL; in generic_set_tx_event()
693 generic_netmap_txsync(struct netmap_kring *kring, int flags) in generic_netmap_txsync() argument
695 struct netmap_adapter *na = kring->na; in generic_netmap_txsync()
698 struct netmap_ring *ring = kring->ring; in generic_netmap_txsync()
700 u_int const lim = kring->nkr_num_slots - 1; in generic_netmap_txsync()
701 u_int const head = kring->rhead; in generic_netmap_txsync()
702 u_int ring_nr = kring->ring_id; in generic_netmap_txsync()
711 nm_i = kring->nr_hwcur; in generic_netmap_txsync()
721 if (gna->txqdisc && nm_kr_txempty(kring)) { in generic_netmap_txsync()
728 kring->nr_hwtail); in generic_netmap_txsync()
747 m = kring->tx_pool[nm_i]; in generic_netmap_txsync()
749 kring->tx_pool[nm_i] = m = in generic_netmap_txsync()
794 generic_set_tx_event(kring, nm_i); in generic_netmap_txsync()
795 if (generic_netmap_tx_clean(kring, gna->txqdisc)) { in generic_netmap_txsync()
828 kring->nr_hwcur = nm_i; in generic_netmap_txsync()
835 if (!gna->txqdisc && (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring))) { in generic_netmap_txsync()
841 generic_set_tx_event(kring, nm_i); in generic_netmap_txsync()
847 generic_netmap_tx_clean(kring, gna->txqdisc); in generic_netmap_txsync()
867 struct netmap_kring *kring; in generic_rx_handler() local
875 kring = na->rx_rings[r]; in generic_rx_handler()
877 if (kring->nr_mode == NKR_NETMAP_OFF) { in generic_rx_handler()
891 } else if (unlikely(mbq_len(&kring->rx_queue) > na->num_rx_desc)) { in generic_rx_handler()
895 mbq_safe_enqueue(&kring->rx_queue, m); in generic_rx_handler()
925 generic_netmap_rxsync(struct netmap_kring *kring, int flags) in generic_netmap_rxsync() argument
927 struct netmap_ring *ring = kring->ring; in generic_netmap_rxsync()
928 struct netmap_adapter *na = kring->na; in generic_netmap_rxsync()
931 u_int const lim = kring->nkr_num_slots - 1; in generic_netmap_rxsync()
932 u_int const head = kring->rhead; in generic_netmap_rxsync()
933 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; in generic_netmap_rxsync()
944 return netmap_ring_reinit(kring); in generic_netmap_rxsync()
952 nm_i = kring->nr_hwcur; in generic_netmap_rxsync()
961 kring->nr_hwcur = head; in generic_netmap_rxsync()
971 nm_i = kring->nr_hwtail; /* First empty slot in the receive ring. */ in generic_netmap_rxsync()
977 avail = nm_prev(kring->nr_hwcur, lim) - nm_i; in generic_netmap_rxsync()
989 mbq_lock(&kring->rx_queue); in generic_netmap_rxsync()
991 m = mbq_peek(&kring->rx_queue); in generic_netmap_rxsync()
1003 mbq_dequeue(&kring->rx_queue); in generic_netmap_rxsync()
1020 mbq_unlock(&kring->rx_queue); in generic_netmap_rxsync()
1024 nm_i = kring->nr_hwtail; in generic_netmap_rxsync()
1043 return netmap_ring_reinit(kring); in generic_netmap_rxsync()
1059 kring->nr_hwtail = nm_i; in generic_netmap_rxsync()
1062 kring->nr_kflags &= ~NKR_PENDINTR; in generic_netmap_rxsync()