Lines Matching refs:txr
211 struct hn_tx_ring *txr; member
670 hn_txpkt_sglist(struct hn_tx_ring *txr, struct hn_txdesc *txd) in hn_txpkt_sglist() argument
675 return (hn_nvs_send_rndis_sglist(txr->hn_chan, HN_NVS_RNDIS_MTYPE_DATA, in hn_txpkt_sglist()
676 &txd->send_ctx, txr->hn_gpa, txr->hn_gpa_cnt)); in hn_txpkt_sglist()
680 hn_txpkt_chim(struct hn_tx_ring *txr, struct hn_txdesc *txd) in hn_txpkt_chim() argument
692 return (hn_nvs_send(txr->hn_chan, VMBUS_CHANPKT_FLAG_RC, in hn_txpkt_chim()
1020 struct hn_tx_ring *txr = &sc->hn_tx_ring[i]; in hn_set_txagg() local
1022 mtx_lock(&txr->hn_tx_lock); in hn_set_txagg()
1023 txr->hn_agg_szmax = size; in hn_set_txagg()
1024 txr->hn_agg_pktmax = pkts; in hn_set_txagg()
1025 txr->hn_agg_align = sc->hn_rndis_agg_align; in hn_set_txagg()
1026 mtx_unlock(&txr->hn_tx_lock); in hn_set_txagg()
1031 hn_get_txswq_depth(const struct hn_tx_ring *txr) in hn_get_txswq_depth() argument
1034 KASSERT(txr->hn_txdesc_cnt > 0, ("tx ring is not setup yet")); in hn_get_txswq_depth()
1035 if (hn_tx_swq_depth < txr->hn_txdesc_cnt) in hn_get_txswq_depth()
1036 return txr->hn_txdesc_cnt; in hn_get_txswq_depth()
2652 hn_txdesc_dmamap_load(struct hn_tx_ring *txr, struct hn_txdesc *txd, in hn_txdesc_dmamap_load() argument
2660 error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, txd->data_dmap, in hn_txdesc_dmamap_load()
2670 txr->hn_tx_collapsed++; in hn_txdesc_dmamap_load()
2672 error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, in hn_txdesc_dmamap_load()
2676 bus_dmamap_sync(txr->hn_tx_data_dtag, txd->data_dmap, in hn_txdesc_dmamap_load()
2684 hn_txdesc_put(struct hn_tx_ring *txr, struct hn_txdesc *txd) in hn_txdesc_put() argument
2717 freed = hn_txdesc_put(txr, tmp_txd); in hn_txdesc_put()
2725 hn_chim_free(txr->hn_sc, txd->chim_index); in hn_txdesc_put()
2729 bus_dmamap_sync(txr->hn_tx_data_dtag, in hn_txdesc_put()
2731 bus_dmamap_unload(txr->hn_tx_data_dtag, in hn_txdesc_put()
2743 mtx_lock_spin(&txr->hn_txlist_spin); in hn_txdesc_put()
2744 KASSERT(txr->hn_txdesc_avail >= 0 && in hn_txdesc_put()
2745 txr->hn_txdesc_avail < txr->hn_txdesc_cnt, in hn_txdesc_put()
2746 ("txdesc_put: invalid txd avail %d", txr->hn_txdesc_avail)); in hn_txdesc_put()
2747 txr->hn_txdesc_avail++; in hn_txdesc_put()
2748 SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link); in hn_txdesc_put()
2749 mtx_unlock_spin(&txr->hn_txlist_spin); in hn_txdesc_put()
2752 atomic_add_int(&txr->hn_txdesc_avail, 1); in hn_txdesc_put()
2754 buf_ring_enqueue(txr->hn_txdesc_br, txd); in hn_txdesc_put()
2761 hn_txdesc_get(struct hn_tx_ring *txr) in hn_txdesc_get() argument
2766 mtx_lock_spin(&txr->hn_txlist_spin); in hn_txdesc_get()
2767 txd = SLIST_FIRST(&txr->hn_txlist); in hn_txdesc_get()
2769 KASSERT(txr->hn_txdesc_avail > 0, in hn_txdesc_get()
2770 ("txdesc_get: invalid txd avail %d", txr->hn_txdesc_avail)); in hn_txdesc_get()
2771 txr->hn_txdesc_avail--; in hn_txdesc_get()
2772 SLIST_REMOVE_HEAD(&txr->hn_txlist, link); in hn_txdesc_get()
2774 mtx_unlock_spin(&txr->hn_txlist_spin); in hn_txdesc_get()
2776 txd = buf_ring_dequeue_sc(txr->hn_txdesc_br); in hn_txdesc_get()
2782 atomic_subtract_int(&txr->hn_txdesc_avail, 1); in hn_txdesc_get()
2824 hn_tx_ring_pending(struct hn_tx_ring *txr) in hn_tx_ring_pending() argument
2829 mtx_lock_spin(&txr->hn_txlist_spin); in hn_tx_ring_pending()
2830 if (txr->hn_txdesc_avail != txr->hn_txdesc_cnt) in hn_tx_ring_pending()
2832 mtx_unlock_spin(&txr->hn_txlist_spin); in hn_tx_ring_pending()
2834 if (!buf_ring_full(txr->hn_txdesc_br)) in hn_tx_ring_pending()
2841 hn_txeof(struct hn_tx_ring *txr) in hn_txeof() argument
2843 txr->hn_has_txeof = 0; in hn_txeof()
2844 txr->hn_txeof(txr); in hn_txeof()
2852 struct hn_tx_ring *txr; in hn_txpkt_done() local
2854 txr = txd->txr; in hn_txpkt_done()
2855 KASSERT(txr->hn_chan == chan, in hn_txpkt_done()
2857 vmbus_chan_id(chan), vmbus_chan_id(txr->hn_chan))); in hn_txpkt_done()
2859 txr->hn_has_txeof = 1; in hn_txpkt_done()
2860 hn_txdesc_put(txr, txd); in hn_txpkt_done()
2862 ++txr->hn_txdone_cnt; in hn_txpkt_done()
2863 if (txr->hn_txdone_cnt >= HN_EARLY_TXEOF_THRESH) { in hn_txpkt_done()
2864 txr->hn_txdone_cnt = 0; in hn_txpkt_done()
2865 if (txr->hn_oactive) in hn_txpkt_done()
2866 hn_txeof(txr); in hn_txpkt_done()
2871 hn_chan_rollup(struct hn_rx_ring *rxr, struct hn_tx_ring *txr) in hn_chan_rollup() argument
2886 if (txr == NULL || !txr->hn_has_txeof) in hn_chan_rollup()
2889 txr->hn_txdone_cnt = 0; in hn_chan_rollup()
2890 hn_txeof(txr); in hn_chan_rollup()
2934 hn_flush_txagg(if_t ifp, struct hn_tx_ring *txr) in hn_flush_txagg() argument
2940 txd = txr->hn_agg_txd; in hn_flush_txagg()
2948 pkts = txr->hn_stat_pkts; in hn_flush_txagg()
2956 error = hn_txpkt(ifp, txr, txd); in hn_flush_txagg()
2961 txr->hn_flush_failed++; in hn_flush_txagg()
2966 txr->hn_agg_txd = NULL; in hn_flush_txagg()
2967 txr->hn_agg_szleft = 0; in hn_flush_txagg()
2968 txr->hn_agg_pktleft = 0; in hn_flush_txagg()
2969 txr->hn_agg_prevpkt = NULL; in hn_flush_txagg()
2975 hn_try_txagg(if_t ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd, in hn_try_txagg() argument
2980 if (txr->hn_agg_txd != NULL) { in hn_try_txagg()
2981 if (txr->hn_agg_pktleft >= 1 && txr->hn_agg_szleft > pktsize) { in hn_try_txagg()
2982 struct hn_txdesc *agg_txd = txr->hn_agg_txd; in hn_try_txagg()
2983 struct rndis_packet_msg *pkt = txr->hn_agg_prevpkt; in hn_try_txagg()
2997 pkt->rm_len = roundup2(olen, txr->hn_agg_align); in hn_try_txagg()
3005 txr->hn_agg_prevpkt = chim; in hn_try_txagg()
3007 txr->hn_agg_pktleft--; in hn_try_txagg()
3008 txr->hn_agg_szleft -= pktsize; in hn_try_txagg()
3009 if (txr->hn_agg_szleft <= in hn_try_txagg()
3010 HN_PKTSIZE_MIN(txr->hn_agg_align)) { in hn_try_txagg()
3015 txr->hn_agg_pktleft = 0; in hn_try_txagg()
3020 hn_flush_txagg(ifp, txr); in hn_try_txagg()
3022 KASSERT(txr->hn_agg_txd == NULL, ("lingering aggregating txdesc")); in hn_try_txagg()
3024 txr->hn_tx_chimney_tried++; in hn_try_txagg()
3025 txd->chim_index = hn_chim_alloc(txr->hn_sc); in hn_try_txagg()
3028 txr->hn_tx_chimney++; in hn_try_txagg()
3030 chim = txr->hn_sc->hn_chim + in hn_try_txagg()
3031 (txd->chim_index * txr->hn_sc->hn_chim_szmax); in hn_try_txagg()
3033 if (txr->hn_agg_pktmax > 1 && in hn_try_txagg()
3034 txr->hn_agg_szmax > pktsize + HN_PKTSIZE_MIN(txr->hn_agg_align)) { in hn_try_txagg()
3035 txr->hn_agg_txd = txd; in hn_try_txagg()
3036 txr->hn_agg_pktleft = txr->hn_agg_pktmax - 1; in hn_try_txagg()
3037 txr->hn_agg_szleft = txr->hn_agg_szmax - pktsize; in hn_try_txagg()
3038 txr->hn_agg_prevpkt = chim; in hn_try_txagg()
3048 hn_encap(if_t ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd, in hn_encap() argument
3060 pkt_size = HN_PKTSIZE(m_head, txr->hn_agg_align); in hn_encap()
3061 if (pkt_size < txr->hn_chim_size) { in hn_encap()
3062 chim = hn_try_txagg(ifp, txr, txd, pkt_size); in hn_encap()
3066 if (txr->hn_agg_txd != NULL) in hn_encap()
3067 hn_flush_txagg(ifp, txr); in hn_encap()
3082 if (txr->hn_tx_flags & HN_TX_FLAG_HASHVAL) { in hn_encap()
3101 *pi_data = txr->hn_tx_idx; in hn_encap()
3135 } else if (m_head->m_pkthdr.csum_flags & txr->hn_csum_assist) { in hn_encap()
3171 if (txr->hn_agg_txd != NULL) { in hn_encap()
3172 tgt_txd = txr->hn_agg_txd; in hn_encap()
3187 txr->hn_gpa_cnt = 0; in hn_encap()
3188 txr->hn_sendpkt = hn_txpkt_chim; in hn_encap()
3192 KASSERT(txr->hn_agg_txd == NULL, ("aggregating sglist txdesc")); in hn_encap()
3197 error = hn_txdesc_dmamap_load(txr, txd, &m_head, segs, &nsegs); in hn_encap()
3207 freed = hn_txdesc_put(txr, txd); in hn_encap()
3211 txr->hn_txdma_failed++; in hn_encap()
3218 txr->hn_gpa_cnt = nsegs + 1; in hn_encap()
3221 txr->hn_gpa[0].gpa_page = atop(txd->rndis_pkt_paddr); in hn_encap()
3222 txr->hn_gpa[0].gpa_ofs = txd->rndis_pkt_paddr & PAGE_MASK; in hn_encap()
3223 txr->hn_gpa[0].gpa_len = pkt_hlen; in hn_encap()
3230 struct vmbus_gpa *gpa = &txr->hn_gpa[i + 1]; in hn_encap()
3239 txr->hn_sendpkt = hn_txpkt_sglist; in hn_encap()
3247 txr->hn_stat_pkts++; in hn_encap()
3248 txr->hn_stat_size += m_head->m_pkthdr.len; in hn_encap()
3250 txr->hn_stat_mcasts++; in hn_encap()
3261 hn_txpkt(if_t ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd) in hn_txpkt() argument
3274 error = txr->hn_sendpkt(txr, txd); in hn_txpkt()
3284 if_inc_counter(ifp, IFCOUNTER_OPACKETS, txr->hn_stat_pkts); in hn_txpkt()
3290 txr->hn_stat_size); in hn_txpkt()
3291 if (txr->hn_stat_mcasts != 0) { in hn_txpkt()
3293 txr->hn_stat_mcasts); in hn_txpkt()
3296 txr->hn_pkts += txr->hn_stat_pkts; in hn_txpkt()
3297 txr->hn_sends++; in hn_txpkt()
3300 hn_txdesc_put(txr, txd); in hn_txpkt()
3312 txr->hn_has_txeof = 1; in hn_txpkt()
3314 txr->hn_send_failed++; in hn_txpkt()
3332 freed = hn_txdesc_put(txr, txd); in hn_txpkt()
3336 txr->hn_send_failed++; in hn_txpkt()
3340 txr->hn_stat_size = 0; in hn_txpkt()
3341 txr->hn_stat_pkts = 0; in hn_txpkt()
3342 txr->hn_stat_mcasts = 0; in hn_txpkt()
4309 struct hn_tx_ring *txr; in hn_tx_stat_ulong_sysctl() local
4314 txr = &sc->hn_tx_ring[i]; in hn_tx_stat_ulong_sysctl()
4315 stat += *((u_long *)((uint8_t *)txr + ofs)); in hn_tx_stat_ulong_sysctl()
4324 txr = &sc->hn_tx_ring[i]; in hn_tx_stat_ulong_sysctl()
4325 *((u_long *)((uint8_t *)txr + ofs)) = 0; in hn_tx_stat_ulong_sysctl()
4335 struct hn_tx_ring *txr; in hn_tx_conf_int_sysctl() local
4337 txr = &sc->hn_tx_ring[0]; in hn_tx_conf_int_sysctl()
4338 conf = *((int *)((uint8_t *)txr + ofs)); in hn_tx_conf_int_sysctl()
4346 txr = &sc->hn_tx_ring[i]; in hn_tx_conf_int_sysctl()
4347 *((int *)((uint8_t *)txr + ofs)) = conf; in hn_tx_conf_int_sysctl()
5169 struct hn_tx_ring *txr = &sc->hn_tx_ring[id]; in hn_tx_ring_create() local
5174 txr->hn_sc = sc; in hn_tx_ring_create()
5175 txr->hn_tx_idx = id; in hn_tx_ring_create()
5178 mtx_init(&txr->hn_txlist_spin, "hn txlist", NULL, MTX_SPIN); in hn_tx_ring_create()
5180 mtx_init(&txr->hn_tx_lock, "hn tx", NULL, MTX_DEF); in hn_tx_ring_create()
5182 txr->hn_txdesc_cnt = HN_TX_DESC_CNT; in hn_tx_ring_create()
5183 txr->hn_txdesc = malloc(sizeof(struct hn_txdesc) * txr->hn_txdesc_cnt, in hn_tx_ring_create()
5186 SLIST_INIT(&txr->hn_txlist); in hn_tx_ring_create()
5188 txr->hn_txdesc_br = buf_ring_alloc(txr->hn_txdesc_cnt, M_DEVBUF, in hn_tx_ring_create()
5189 M_WAITOK, &txr->hn_tx_lock); in hn_tx_ring_create()
5193 txr->hn_tx_taskq = VMBUS_GET_EVENT_TASKQ( in hn_tx_ring_create()
5196 txr->hn_tx_taskq = sc->hn_tx_taskqs[id % hn_tx_taskq_cnt]; in hn_tx_ring_create()
5201 txr->hn_txeof = hn_start_txeof; in hn_tx_ring_create()
5202 TASK_INIT(&txr->hn_tx_task, 0, hn_start_taskfunc, txr); in hn_tx_ring_create()
5203 TASK_INIT(&txr->hn_txeof_task, 0, hn_start_txeof_taskfunc, txr); in hn_tx_ring_create()
5209 txr->hn_txeof = hn_xmit_txeof; in hn_tx_ring_create()
5210 TASK_INIT(&txr->hn_tx_task, 0, hn_xmit_taskfunc, txr); in hn_tx_ring_create()
5211 TASK_INIT(&txr->hn_txeof_task, 0, hn_xmit_txeof_taskfunc, txr); in hn_tx_ring_create()
5213 br_depth = hn_get_txswq_depth(txr); in hn_tx_ring_create()
5214 txr->hn_mbuf_br = buf_ring_alloc(br_depth, M_DEVBUF, in hn_tx_ring_create()
5215 M_WAITOK, &txr->hn_tx_lock); in hn_tx_ring_create()
5218 txr->hn_direct_tx_size = hn_direct_tx_size; in hn_tx_ring_create()
5224 txr->hn_sched_tx = 1; in hn_tx_ring_create()
5241 &txr->hn_tx_rndis_dtag); in hn_tx_ring_create()
5260 &txr->hn_tx_data_dtag); in hn_tx_ring_create()
5266 for (i = 0; i < txr->hn_txdesc_cnt; ++i) { in hn_tx_ring_create()
5267 struct hn_txdesc *txd = &txr->hn_txdesc[i]; in hn_tx_ring_create()
5269 txd->txr = txr; in hn_tx_ring_create()
5276 error = bus_dmamem_alloc(txr->hn_tx_rndis_dtag, in hn_tx_ring_create()
5286 error = bus_dmamap_load(txr->hn_tx_rndis_dtag, in hn_tx_ring_create()
5294 bus_dmamem_free(txr->hn_tx_rndis_dtag, in hn_tx_ring_create()
5300 error = bus_dmamap_create(txr->hn_tx_data_dtag, 0, in hn_tx_ring_create()
5305 bus_dmamap_unload(txr->hn_tx_rndis_dtag, in hn_tx_ring_create()
5307 bus_dmamem_free(txr->hn_tx_rndis_dtag, in hn_tx_ring_create()
5315 SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link); in hn_tx_ring_create()
5317 buf_ring_enqueue(txr->hn_txdesc_br, txd); in hn_tx_ring_create()
5320 txr->hn_txdesc_avail = txr->hn_txdesc_cnt; in hn_tx_ring_create()
5335 txr->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, in hn_tx_ring_create()
5338 if (txr->hn_tx_sysctl_tree != NULL) { in hn_tx_ring_create()
5339 child = SYSCTL_CHILDREN(txr->hn_tx_sysctl_tree); in hn_tx_ring_create()
5343 CTLFLAG_RD, &txr->hn_txdesc_avail, 0, in hn_tx_ring_create()
5351 CTLFLAG_RD, &txr->hn_oactive, 0, in hn_tx_ring_create()
5355 CTLFLAG_RW | CTLFLAG_STATS, &txr->hn_pkts, in hn_tx_ring_create()
5358 CTLFLAG_RW | CTLFLAG_STATS, &txr->hn_sends, in hn_tx_ring_create()
5369 struct hn_tx_ring *txr = txd->txr; in hn_txdesc_dmamap_destroy() local
5374 bus_dmamap_unload(txr->hn_tx_rndis_dtag, txd->rndis_pkt_dmap); in hn_txdesc_dmamap_destroy()
5375 bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_pkt, in hn_txdesc_dmamap_destroy()
5377 bus_dmamap_destroy(txr->hn_tx_data_dtag, txd->data_dmap); in hn_txdesc_dmamap_destroy()
5381 hn_txdesc_gc(struct hn_tx_ring *txr, struct hn_txdesc *txd) in hn_txdesc_gc() argument
5391 freed = hn_txdesc_put(txr, txd); in hn_txdesc_gc()
5397 hn_tx_ring_destroy(struct hn_tx_ring *txr) in hn_tx_ring_destroy() argument
5401 if (txr->hn_txdesc == NULL) in hn_tx_ring_destroy()
5414 for (i = 0; i < txr->hn_txdesc_cnt; ++i) in hn_tx_ring_destroy()
5415 hn_txdesc_gc(txr, &txr->hn_txdesc[i]); in hn_tx_ring_destroy()
5416 for (i = 0; i < txr->hn_txdesc_cnt; ++i) in hn_tx_ring_destroy()
5417 hn_txdesc_dmamap_destroy(&txr->hn_txdesc[i]); in hn_tx_ring_destroy()
5419 if (txr->hn_tx_data_dtag != NULL) in hn_tx_ring_destroy()
5420 bus_dma_tag_destroy(txr->hn_tx_data_dtag); in hn_tx_ring_destroy()
5421 if (txr->hn_tx_rndis_dtag != NULL) in hn_tx_ring_destroy()
5422 bus_dma_tag_destroy(txr->hn_tx_rndis_dtag); in hn_tx_ring_destroy()
5425 buf_ring_free(txr->hn_txdesc_br, M_DEVBUF); in hn_tx_ring_destroy()
5428 free(txr->hn_txdesc, M_DEVBUF); in hn_tx_ring_destroy()
5429 txr->hn_txdesc = NULL; in hn_tx_ring_destroy()
5431 if (txr->hn_mbuf_br != NULL) in hn_tx_ring_destroy()
5432 buf_ring_free(txr->hn_mbuf_br, M_DEVBUF); in hn_tx_ring_destroy()
5435 mtx_destroy(&txr->hn_txlist_spin); in hn_tx_ring_destroy()
5437 mtx_destroy(&txr->hn_tx_lock); in hn_tx_ring_destroy()
5675 struct hn_tx_ring *txr = xtxr; in hn_start_taskfunc() local
5677 mtx_lock(&txr->hn_tx_lock); in hn_start_taskfunc()
5678 hn_start_locked(txr, 0); in hn_start_taskfunc()
5679 mtx_unlock(&txr->hn_tx_lock); in hn_start_taskfunc()
5683 hn_start_locked(struct hn_tx_ring *txr, int len) in hn_start_locked() argument
5685 struct hn_softc *sc = txr->hn_sc; in hn_start_locked()
5691 KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring")); in hn_start_locked()
5692 mtx_assert(&txr->hn_tx_lock, MA_OWNED); in hn_start_locked()
5693 KASSERT(txr->hn_agg_txd == NULL, ("lingering aggregating txdesc")); in hn_start_locked()
5695 if (__predict_false(txr->hn_suspended)) in hn_start_locked()
5739 txd = hn_txdesc_get(txr); in hn_start_locked()
5741 txr->hn_no_txdescs++; in hn_start_locked()
5747 error = hn_encap(ifp, txr, txd, &m_head); in hn_start_locked()
5750 KASSERT(txr->hn_agg_txd == NULL, in hn_start_locked()
5755 if (txr->hn_agg_pktleft == 0) { in hn_start_locked()
5756 if (txr->hn_agg_txd != NULL) { in hn_start_locked()
5759 error = hn_flush_txagg(ifp, txr); in hn_start_locked()
5767 error = hn_txpkt(ifp, txr, txd); in hn_start_locked()
5779 KASSERT(txr->hn_agg_txd != NULL, in hn_start_locked()
5788 if (txr->hn_agg_txd != NULL) in hn_start_locked()
5789 hn_flush_txagg(ifp, txr); in hn_start_locked()
5797 struct hn_tx_ring *txr = &sc->hn_tx_ring[0]; in hn_start() local
5799 if (txr->hn_sched_tx) in hn_start()
5802 if (mtx_trylock(&txr->hn_tx_lock)) { in hn_start()
5805 sched = hn_start_locked(txr, txr->hn_direct_tx_size); in hn_start()
5806 mtx_unlock(&txr->hn_tx_lock); in hn_start()
5811 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task); in hn_start()
5817 struct hn_tx_ring *txr = xtxr; in hn_start_txeof_taskfunc() local
5819 mtx_lock(&txr->hn_tx_lock); in hn_start_txeof_taskfunc()
5820 if_setdrvflagbits(txr->hn_sc->hn_ifp, 0, IFF_DRV_OACTIVE); in hn_start_txeof_taskfunc()
5821 hn_start_locked(txr, 0); in hn_start_txeof_taskfunc()
5822 mtx_unlock(&txr->hn_tx_lock); in hn_start_txeof_taskfunc()
5826 hn_start_txeof(struct hn_tx_ring *txr) in hn_start_txeof() argument
5828 struct hn_softc *sc = txr->hn_sc; in hn_start_txeof()
5831 KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring")); in hn_start_txeof()
5833 if (txr->hn_sched_tx) in hn_start_txeof()
5836 if (mtx_trylock(&txr->hn_tx_lock)) { in hn_start_txeof()
5840 sched = hn_start_locked(txr, txr->hn_direct_tx_size); in hn_start_txeof()
5841 mtx_unlock(&txr->hn_tx_lock); in hn_start_txeof()
5843 taskqueue_enqueue(txr->hn_tx_taskq, in hn_start_txeof()
5844 &txr->hn_tx_task); in hn_start_txeof()
5855 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task); in hn_start_txeof()
5862 hn_xmit(struct hn_tx_ring *txr, int len) in hn_xmit() argument
5864 struct hn_softc *sc = txr->hn_sc; in hn_xmit()
5869 mtx_assert(&txr->hn_tx_lock, MA_OWNED); in hn_xmit()
5874 KASSERT(txr->hn_agg_txd == NULL, ("lingering aggregating txdesc")); in hn_xmit()
5876 if (__predict_false(txr->hn_suspended)) in hn_xmit()
5879 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || txr->hn_oactive) in hn_xmit()
5882 while ((m_head = drbr_peek(ifp, txr->hn_mbuf_br)) != NULL) { in hn_xmit()
5892 drbr_putback(ifp, txr->hn_mbuf_br, m_head); in hn_xmit()
5897 txd = hn_txdesc_get(txr); in hn_xmit()
5899 txr->hn_no_txdescs++; in hn_xmit()
5900 drbr_putback(ifp, txr->hn_mbuf_br, m_head); in hn_xmit()
5901 txr->hn_oactive = 1; in hn_xmit()
5905 error = hn_encap(ifp, txr, txd, &m_head); in hn_xmit()
5908 KASSERT(txr->hn_agg_txd == NULL, in hn_xmit()
5910 drbr_advance(ifp, txr->hn_mbuf_br); in hn_xmit()
5914 if (txr->hn_agg_pktleft == 0) { in hn_xmit()
5915 if (txr->hn_agg_txd != NULL) { in hn_xmit()
5918 error = hn_flush_txagg(ifp, txr); in hn_xmit()
5920 txr->hn_oactive = 1; in hn_xmit()
5925 error = hn_txpkt(ifp, txr, txd); in hn_xmit()
5928 drbr_putback(ifp, txr->hn_mbuf_br, in hn_xmit()
5930 txr->hn_oactive = 1; in hn_xmit()
5937 KASSERT(txr->hn_agg_txd != NULL, in hn_xmit()
5945 drbr_advance(ifp, txr->hn_mbuf_br); in hn_xmit()
5949 if (txr->hn_agg_txd != NULL) in hn_xmit()
5950 hn_flush_txagg(ifp, txr); in hn_xmit()
5958 struct hn_tx_ring *txr; in hn_transmit() local
6070 txr = &sc->hn_tx_ring[idx]; in hn_transmit()
6072 error = drbr_enqueue(ifp, txr->hn_mbuf_br, m); in hn_transmit()
6078 if (txr->hn_oactive) in hn_transmit()
6081 if (txr->hn_sched_tx) in hn_transmit()
6084 if (mtx_trylock(&txr->hn_tx_lock)) { in hn_transmit()
6087 sched = hn_xmit(txr, txr->hn_direct_tx_size); in hn_transmit()
6088 mtx_unlock(&txr->hn_tx_lock); in hn_transmit()
6093 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task); in hn_transmit()
6098 hn_tx_ring_qflush(struct hn_tx_ring *txr) in hn_tx_ring_qflush() argument
6102 mtx_lock(&txr->hn_tx_lock); in hn_tx_ring_qflush()
6103 while ((m = buf_ring_dequeue_sc(txr->hn_mbuf_br)) != NULL) in hn_tx_ring_qflush()
6105 mtx_unlock(&txr->hn_tx_lock); in hn_tx_ring_qflush()
6126 hn_xmit_txeof(struct hn_tx_ring *txr) in hn_xmit_txeof() argument
6129 if (txr->hn_sched_tx) in hn_xmit_txeof()
6132 if (mtx_trylock(&txr->hn_tx_lock)) { in hn_xmit_txeof()
6135 txr->hn_oactive = 0; in hn_xmit_txeof()
6136 sched = hn_xmit(txr, txr->hn_direct_tx_size); in hn_xmit_txeof()
6137 mtx_unlock(&txr->hn_tx_lock); in hn_xmit_txeof()
6139 taskqueue_enqueue(txr->hn_tx_taskq, in hn_xmit_txeof()
6140 &txr->hn_tx_task); in hn_xmit_txeof()
6150 txr->hn_oactive = 0; in hn_xmit_txeof()
6151 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task); in hn_xmit_txeof()
6158 struct hn_tx_ring *txr = xtxr; in hn_xmit_taskfunc() local
6160 mtx_lock(&txr->hn_tx_lock); in hn_xmit_taskfunc()
6161 hn_xmit(txr, 0); in hn_xmit_taskfunc()
6162 mtx_unlock(&txr->hn_tx_lock); in hn_xmit_taskfunc()
6168 struct hn_tx_ring *txr = xtxr; in hn_xmit_txeof_taskfunc() local
6170 mtx_lock(&txr->hn_tx_lock); in hn_xmit_txeof_taskfunc()
6171 txr->hn_oactive = 0; in hn_xmit_txeof_taskfunc()
6172 hn_xmit(txr, 0); in hn_xmit_txeof_taskfunc()
6173 mtx_unlock(&txr->hn_tx_lock); in hn_xmit_txeof_taskfunc()
6181 struct hn_tx_ring *txr = NULL; in hn_chan_attach() local
6204 txr = &sc->hn_tx_ring[idx]; in hn_chan_attach()
6205 KASSERT((txr->hn_tx_flags & HN_TX_FLAG_ATTACHED) == 0, in hn_chan_attach()
6207 txr->hn_tx_flags |= HN_TX_FLAG_ATTACHED; in hn_chan_attach()
6209 txr->hn_chan = chan; in hn_chan_attach()
6260 struct hn_tx_ring *txr = &sc->hn_tx_ring[idx]; in hn_chan_detach() local
6262 KASSERT((txr->hn_tx_flags & HN_TX_FLAG_ATTACHED), in hn_chan_detach()
6264 txr->hn_tx_flags &= ~HN_TX_FLAG_ATTACHED; in hn_chan_detach()
6779 struct hn_tx_ring *txr; in hn_suspend_data() local
6788 txr = &sc->hn_tx_ring[i]; in hn_suspend_data()
6790 mtx_lock(&txr->hn_tx_lock); in hn_suspend_data()
6791 txr->hn_suspended = 1; in hn_suspend_data()
6792 mtx_unlock(&txr->hn_tx_lock); in hn_suspend_data()
6802 while (hn_tx_ring_pending(txr) && in hn_suspend_data()
6825 txr = &sc->hn_tx_ring[i]; in hn_suspend_data()
6827 taskqueue_drain(txr->hn_tx_taskq, &txr->hn_tx_task); in hn_suspend_data()
6828 taskqueue_drain(txr->hn_tx_taskq, &txr->hn_txeof_task); in hn_suspend_data()
6888 struct hn_tx_ring *txr = &sc->hn_tx_ring[i]; in hn_resume_tx() local
6890 mtx_lock(&txr->hn_tx_lock); in hn_resume_tx()
6891 txr->hn_suspended = 0; in hn_resume_tx()
6892 mtx_unlock(&txr->hn_tx_lock); in hn_resume_tx()
6931 struct hn_tx_ring *txr = &sc->hn_tx_ring[i]; in hn_resume_data() local
6937 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task); in hn_resume_data()