Lines Matching full:qs

228 #define	TXQ_LOCK_ASSERT(qs)	mtx_assert(&(qs)->lock, MA_OWNED)  argument
229 #define TXQ_TRYLOCK(qs) mtx_trylock(&(qs)->lock) argument
230 #define TXQ_LOCK(qs) mtx_lock(&(qs)->lock) argument
231 #define TXQ_UNLOCK(qs) mtx_unlock(&(qs)->lock) argument
232 #define TXQ_RING_EMPTY(qs) drbr_empty((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr) argument
233 #define TXQ_RING_NEEDS_ENQUEUE(qs) \ argument
234 drbr_needs_enqueue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
235 #define TXQ_RING_FLUSH(qs) drbr_flush((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr) argument
236 #define TXQ_RING_DEQUEUE_COND(qs, func, arg) \ argument
237 drbr_dequeue_cond((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr, func, arg)
238 #define TXQ_RING_DEQUEUE(qs) \ argument
239 drbr_dequeue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
246 static void cxgb_start_locked(struct sge_qset *qs);
254 check_pkt_coalesce(struct sge_qset *qs) in check_pkt_coalesce() argument
262 txq = &qs->txq[TXQ_ETH]; in check_pkt_coalesce()
263 sc = qs->port->adapter; in check_pkt_coalesce()
264 fill = &sc->tunq_fill[qs->idx]; in check_pkt_coalesce()
277 TXQ_RING_EMPTY(qs) && (qs->coalescing == 0)) in check_pkt_coalesce()
335 cxgb_dequeue(struct sge_qset *qs) in cxgb_dequeue() argument
341 if (check_pkt_coalesce(qs) == 0) in cxgb_dequeue()
342 return TXQ_RING_DEQUEUE(qs); in cxgb_dequeue()
347 m = TXQ_RING_DEQUEUE_COND(qs, coalesce_check, &ci); in cxgb_dequeue()
370 reclaim_completed_tx(struct sge_qset *qs, int reclaim_min, int queue) in reclaim_completed_tx() argument
372 struct sge_txq *q = &qs->txq[queue]; in reclaim_completed_tx()
382 mtx_assert(&qs->lock, MA_OWNED); in reclaim_completed_tx()
384 t3_free_tx_desc(qs, reclaim, queue); in reclaim_completed_tx()
388 if (isset(&qs->txq_stopped, TXQ_ETH)) in reclaim_completed_tx()
389 clrbit(&qs->txq_stopped, TXQ_ETH); in reclaim_completed_tx()
396 cxgb_debugnet_poll_tx(struct sge_qset *qs) in cxgb_debugnet_poll_tx() argument
399 return (reclaim_completed_tx(qs, TX_RECLAIM_MAX, TXQ_ETH)); in cxgb_debugnet_poll_tx()
674 t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) in t3_update_qset_coalesce() argument
677 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U); in t3_update_qset_coalesce()
678 qs->rspq.polling = 0 /* p->polling */; in t3_update_qset_coalesce()
963 struct sge_qset *qs; in sge_timer_cb() local
974 qs = &sc->sge.qs[pi->first_qset + j]; in sge_timer_cb()
975 txq = &qs->txq[0]; in sge_timer_cb()
977 refill_rx = ((qs->fl[0].credits < qs->fl[0].size) || in sge_timer_cb()
978 (qs->fl[1].credits < qs->fl[1].size)); in sge_timer_cb()
1052 struct sge_qset *qs = arg; in sge_txq_reclaim_handler() local
1056 reclaim_completed_tx(qs, 16, i); in sge_txq_reclaim_handler()
1065 struct sge_qset *qs; in sge_timer_reclaim() local
1072 qs = &sc->sge.qs[pi->first_qset + i]; in sge_timer_reclaim()
1074 reclaim_completed_tx(qs, 16, TXQ_OFLD); in sge_timer_reclaim()
1075 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock : in sge_timer_reclaim()
1076 &sc->sge.qs[0].rspq.lock; in sge_timer_reclaim()
1082 if (qs->fl[0].credits < qs->fl[0].size - 16) in sge_timer_reclaim()
1083 __refill_fl(sc, &qs->fl[0]); in sge_timer_reclaim()
1084 if (qs->fl[1].credits < qs->fl[1].size - 16) in sge_timer_reclaim()
1085 __refill_fl(sc, &qs->fl[1]); in sge_timer_reclaim()
1087 if (status & (1 << qs->rspq.cntxt_id)) { in sge_timer_reclaim()
1088 if (qs->rspq.credits) { in sge_timer_reclaim()
1089 refill_rspq(sc, &qs->rspq, 1); in sge_timer_reclaim()
1090 qs->rspq.credits--; in sge_timer_reclaim()
1092 1 << qs->rspq.cntxt_id); in sge_timer_reclaim()
1102 * @qs: the queue set
1108 init_qset_cntxt(struct sge_qset *qs, u_int id) in init_qset_cntxt() argument
1111 qs->rspq.cntxt_id = id; in init_qset_cntxt()
1112 qs->fl[0].cntxt_id = 2 * id; in init_qset_cntxt()
1113 qs->fl[1].cntxt_id = 2 * id + 1; in init_qset_cntxt()
1114 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; in init_qset_cntxt()
1115 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; in init_qset_cntxt()
1116 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; in init_qset_cntxt()
1117 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; in init_qset_cntxt()
1118 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; in init_qset_cntxt()
1121 mbufq_init(&qs->txq[TXQ_ETH].sendq, INT_MAX); in init_qset_cntxt()
1122 mbufq_init(&qs->txq[TXQ_OFLD].sendq, INT_MAX); in init_qset_cntxt()
1123 mbufq_init(&qs->txq[TXQ_CTRL].sendq, INT_MAX); in init_qset_cntxt()
1352 t3_encap(struct sge_qset *qs, struct mbuf **m) in t3_encap() argument
1370 pi = qs->port; in t3_encap()
1372 txq = &qs->txq[TXQ_ETH]; in t3_encap()
1380 mtx_assert(&qs->lock, MA_OWNED); in t3_encap()
1590 cxgb_debugnet_encap(struct sge_qset *qs, struct mbuf **m) in cxgb_debugnet_encap() argument
1594 error = t3_encap(qs, m); in cxgb_debugnet_encap()
1596 check_ring_tx_db(qs->port->adapter, &qs->txq[TXQ_ETH], 1); in cxgb_debugnet_encap()
1608 struct sge_qset *qs = arg; in cxgb_tx_watchdog() local
1609 struct sge_txq *txq = &qs->txq[TXQ_ETH]; in cxgb_tx_watchdog()
1611 if (qs->coalescing != 0 && in cxgb_tx_watchdog()
1613 TXQ_RING_EMPTY(qs)) in cxgb_tx_watchdog()
1614 qs->coalescing = 0; in cxgb_tx_watchdog()
1615 else if (qs->coalescing == 0 && in cxgb_tx_watchdog()
1617 qs->coalescing = 1; in cxgb_tx_watchdog()
1618 if (TXQ_TRYLOCK(qs)) { in cxgb_tx_watchdog()
1619 qs->qs_flags |= QS_FLUSHING; in cxgb_tx_watchdog()
1620 cxgb_start_locked(qs); in cxgb_tx_watchdog()
1621 qs->qs_flags &= ~QS_FLUSHING; in cxgb_tx_watchdog()
1622 TXQ_UNLOCK(qs); in cxgb_tx_watchdog()
1624 if (if_getdrvflags(qs->port->ifp) & IFF_DRV_RUNNING) in cxgb_tx_watchdog()
1626 qs, txq->txq_watchdog.c_cpu); in cxgb_tx_watchdog()
1632 struct sge_qset *qs = arg; in cxgb_tx_timeout() local
1633 struct sge_txq *txq = &qs->txq[TXQ_ETH]; in cxgb_tx_timeout()
1635 if (qs->coalescing == 0 && (txq->in_use >= (txq->size>>3))) in cxgb_tx_timeout()
1636 qs->coalescing = 1; in cxgb_tx_timeout()
1637 if (TXQ_TRYLOCK(qs)) { in cxgb_tx_timeout()
1638 qs->qs_flags |= QS_TIMEOUT; in cxgb_tx_timeout()
1639 cxgb_start_locked(qs); in cxgb_tx_timeout()
1640 qs->qs_flags &= ~QS_TIMEOUT; in cxgb_tx_timeout()
1641 TXQ_UNLOCK(qs); in cxgb_tx_timeout()
1646 cxgb_start_locked(struct sge_qset *qs) in cxgb_start_locked() argument
1649 struct sge_txq *txq = &qs->txq[TXQ_ETH]; in cxgb_start_locked()
1650 struct port_info *pi = qs->port; in cxgb_start_locked()
1653 if (qs->qs_flags & (QS_FLUSHING|QS_TIMEOUT)) in cxgb_start_locked()
1654 reclaim_completed_tx(qs, 0, TXQ_ETH); in cxgb_start_locked()
1657 TXQ_RING_FLUSH(qs); in cxgb_start_locked()
1660 TXQ_LOCK_ASSERT(qs); in cxgb_start_locked()
1661 while (!TXQ_RING_EMPTY(qs) && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) && in cxgb_start_locked()
1663 reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH); in cxgb_start_locked()
1668 if ((m_head = cxgb_dequeue(qs)) == NULL) in cxgb_start_locked()
1674 if (t3_encap(qs, &m_head) || m_head == NULL) in cxgb_start_locked()
1683 if (!TXQ_RING_EMPTY(qs) && callout_pending(&txq->txq_timer) == 0 && in cxgb_start_locked()
1686 qs, txq->txq_timer.c_cpu); in cxgb_start_locked()
1692 cxgb_transmit_locked(if_t ifp, struct sge_qset *qs, struct mbuf *m) in cxgb_transmit_locked() argument
1694 struct port_info *pi = qs->port; in cxgb_transmit_locked()
1695 struct sge_txq *txq = &qs->txq[TXQ_ETH]; in cxgb_transmit_locked()
1700 TXQ_LOCK_ASSERT(qs); in cxgb_transmit_locked()
1709 if (check_pkt_coalesce(qs) == 0 && in cxgb_transmit_locked()
1710 !TXQ_RING_NEEDS_ENQUEUE(qs) && avail > TX_MAX_DESC) { in cxgb_transmit_locked()
1711 if (t3_encap(qs, &m)) { in cxgb_transmit_locked()
1729 reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH); in cxgb_transmit_locked()
1730 if (!TXQ_RING_EMPTY(qs) && pi->link_config.link_ok && in cxgb_transmit_locked()
1731 (!check_pkt_coalesce(qs) || (drbr_inuse(ifp, br) >= 7))) in cxgb_transmit_locked()
1732 cxgb_start_locked(qs); in cxgb_transmit_locked()
1733 else if (!TXQ_RING_EMPTY(qs) && !callout_pending(&txq->txq_timer)) in cxgb_transmit_locked()
1735 qs, txq->txq_timer.c_cpu); in cxgb_transmit_locked()
1742 struct sge_qset *qs; in cxgb_transmit() local
1756 qs = &pi->adapter->sge.qs[qidx]; in cxgb_transmit()
1758 if (TXQ_TRYLOCK(qs)) { in cxgb_transmit()
1760 error = cxgb_transmit_locked(ifp, qs, m); in cxgb_transmit()
1761 TXQ_UNLOCK(qs); in cxgb_transmit()
1763 error = drbr_enqueue(ifp, qs->txq[TXQ_ETH].txq_mr, m); in cxgb_transmit()
1844 struct sge_qset *qs = txq_to_qset(q, qid); in check_desc_avail() local
1846 setbit(&qs->txq_stopped, qid); in check_desc_avail()
1848 test_and_clear_bit(qid, &qs->txq_stopped)) in check_desc_avail()
1886 ctrl_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m) in ctrl_xmit() argument
1890 struct sge_txq *q = &qs->txq[TXQ_CTRL]; in ctrl_xmit()
1897 TXQ_LOCK(qs); in ctrl_xmit()
1903 TXQ_UNLOCK(qs); in ctrl_xmit()
1915 TXQ_UNLOCK(qs); in ctrl_xmit()
1927 * @qs: the queue set containing the control queue
1935 struct sge_qset *qs = (struct sge_qset *)data; in restart_ctrlq() local
1936 struct sge_txq *q = &qs->txq[TXQ_CTRL]; in restart_ctrlq()
1937 adapter_t *adap = qs->port->adapter; in restart_ctrlq()
1939 TXQ_LOCK(qs); in restart_ctrlq()
1955 setbit(&qs->txq_stopped, TXQ_CTRL); in restart_ctrlq()
1958 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) in restart_ctrlq()
1962 TXQ_UNLOCK(qs); in restart_ctrlq()
1974 return ctrl_xmit(adap, &adap->sge.qs[0], m); in t3_mgmt_tx()
2066 TXQ_LOCK(&sc->sge.qs[i]); in t3_free_sge_resources()
2067 t3_free_qset(sc, &sc->sge.qs[i]); in t3_free_sge_resources()
2118 t3_free_tx_desc(struct sge_qset *qs, int reclaimable, int queue) in t3_free_tx_desc() argument
2122 struct sge_txq *q = &qs->txq[queue]; in t3_free_tx_desc()
2132 mtx_assert(&qs->lock, MA_OWNED); in t3_free_tx_desc()
2263 ofld_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m) in ofld_xmit() argument
2268 struct sge_txq *q = &qs->txq[TXQ_OFLD]; in ofld_xmit()
2273 TXQ_LOCK(qs); in ofld_xmit()
2274 again: reclaim_completed_tx(qs, 16, TXQ_OFLD); in ofld_xmit()
2278 TXQ_UNLOCK(qs); in ofld_xmit()
2295 TXQ_UNLOCK(qs); in ofld_xmit()
2302 * @qs: the queue set containing the offload queue
2310 struct sge_qset *qs = data; in restart_offloadq() local
2311 struct sge_txq *q = &qs->txq[TXQ_OFLD]; in restart_offloadq()
2312 adapter_t *adap = qs->port->adapter; in restart_offloadq()
2314 TXQ_LOCK(qs); in restart_offloadq()
2322 setbit(&qs->txq_stopped, TXQ_OFLD); in restart_offloadq()
2324 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) in restart_offloadq()
2340 TXQ_UNLOCK(qs); in restart_offloadq()
2342 TXQ_LOCK(qs); in restart_offloadq()
2348 TXQ_UNLOCK(qs); in restart_offloadq()
2366 struct sge_qset *qs = &sc->sge.qs[G_HDR_QSET(oh->flags)]; in t3_offload_tx() local
2370 return (ctrl_xmit(sc, qs, m)); in t3_offload_tx()
2372 return (ofld_xmit(sc, qs, m)); in t3_offload_tx()
2377 restart_tx(struct sge_qset *qs) in restart_tx() argument
2379 struct adapter *sc = qs->port->adapter; in restart_tx()
2381 if (isset(&qs->txq_stopped, TXQ_OFLD) && in restart_tx()
2382 should_restart_tx(&qs->txq[TXQ_OFLD]) && in restart_tx()
2383 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) { in restart_tx()
2384 qs->txq[TXQ_OFLD].restarts++; in restart_tx()
2385 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task); in restart_tx()
2388 if (isset(&qs->txq_stopped, TXQ_CTRL) && in restart_tx()
2389 should_restart_tx(&qs->txq[TXQ_CTRL]) && in restart_tx()
2390 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) { in restart_tx()
2391 qs->txq[TXQ_CTRL].restarts++; in restart_tx()
2392 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task); in restart_tx()
2415 struct sge_qset *q = &sc->sge.qs[id]; in t3_sge_alloc_qset()
2660 * @qs: the qset that the SGE free list holding the packet belongs to
2673 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs, in get_packet() argument
2678 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; in get_packet()
2763 * @qs: the queue set corresponding to the response
2771 handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags) in handle_rsp_cntrl_info() argument
2777 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags); in handle_rsp_cntrl_info()
2781 qs->txq[TXQ_ETH].processed += credits; in handle_rsp_cntrl_info()
2785 qs->txq[TXQ_CTRL].processed += credits; in handle_rsp_cntrl_info()
2789 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags); in handle_rsp_cntrl_info()
2793 qs->txq[TXQ_OFLD].processed += credits; in handle_rsp_cntrl_info()
2798 check_ring_db(adapter_t *adap, struct sge_qset *qs, in check_ring_db() argument
2807 * @qs: the queue set to which the response queue belongs
2820 process_responses(adapter_t *adap, struct sge_qset *qs, int budget) in process_responses() argument
2822 struct sge_rspq *rspq = &qs->rspq; in process_responses()
2827 int lro_enabled = qs->lro.enabled; in process_responses()
2829 struct lro_ctrl *lro_ctrl = &qs->lro.ctrl; in process_responses()
2893 eop = get_packet(adap, drop_thresh, qs, mh, r); in process_responses()
2909 handle_rsp_cntrl_info(qs, flags); in process_responses()
2915 adap->cpl_handler[opcode](qs, r, mh->mh_head); in process_responses()
2934 skip_lro = __predict_false(qs->port->ifp != m->m_pkthdr.rcvif); in process_responses()
2966 __refill_fl_lt(adap, &qs->fl[0], 32); in process_responses()
2967 __refill_fl_lt(adap, &qs->fl[1], 32); in process_responses()
2977 check_ring_db(adap, qs, sleeping); in process_responses()
2980 if (__predict_false(qs->txq_stopped > 1)) in process_responses()
2981 restart_tx(qs); in process_responses()
2983 __refill_fl_lt(adap, &qs->fl[0], 512); in process_responses()
2984 __refill_fl_lt(adap, &qs->fl[1], 512); in process_responses()
3012 cxgb_debugnet_poll_rx(adapter_t *adap, struct sge_qset *qs) in cxgb_debugnet_poll_rx() argument
3015 return (process_responses_gts(adap, &qs->rspq)); in cxgb_debugnet_poll_rx()
3031 struct sge_rspq *q0 = &adap->sge.qs[0].rspq; in t3b_intr()
3048 process_responses_gts(adap, &adap->sge.qs[i].rspq); in t3b_intr()
3062 struct sge_rspq *q0 = &adap->sge.qs[0].rspq; in t3_intr_msi()
3068 if (process_responses_gts(adap, &adap->sge.qs[i].rspq)) in t3_intr_msi()
3081 struct sge_qset *qs = data; in t3_intr_msix() local
3082 adapter_t *adap = qs->port->adapter; in t3_intr_msix()
3083 struct sge_rspq *rspq = &qs->rspq; in t3_intr_msix()
3094 struct sge_qset *qs; in t3_dump_rspq() local
3101 qs = rspq_to_qset(rspq); in t3_dump_rspq()
3117 err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data); in t3_dump_rspq()
3156 struct sge_qset *qs; in t3_dump_txq_eth() local
3164 qs = txq_to_qset(txq, TXQ_ETH); in t3_dump_txq_eth()
3181 err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data); in t3_dump_txq_eth()
3195 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx, in t3_dump_txq_eth()
3223 struct sge_qset *qs; in t3_dump_txq_ctrl() local
3230 qs = txq_to_qset(txq, TXQ_CTRL); in t3_dump_txq_ctrl()
3252 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx, in t3_dump_txq_ctrl()
3282 struct sge_qset *qs; in t3_set_coalesce_usecs() local
3305 qs = &sc->sge.qs[i]; in t3_set_coalesce_usecs()
3309 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock : in t3_set_coalesce_usecs()
3310 &sc->sge.qs[0].rspq.lock; in t3_set_coalesce_usecs()
3313 t3_update_qset_coalesce(qs, qsp); in t3_set_coalesce_usecs()
3314 t3_write_reg(sc, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | in t3_set_coalesce_usecs()
3315 V_NEWTIMER(qs->rspq.holdoff_tmr)); in t3_set_coalesce_usecs()
3447 struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j]; in t3_add_configured_sysctls() local
3453 struct sge_txq *txq = &qs->txq[TXQ_ETH]; in t3_add_configured_sysctls()
3455 snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j); in t3_add_configured_sysctls()
3458 qs->namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, in t3_add_configured_sysctls()
3463 CTLFLAG_RD, &qs->fl[0].empty, 0, in t3_add_configured_sysctls()
3466 CTLFLAG_RD, &qs->fl[1].empty, 0, in t3_add_configured_sysctls()
3490 CTLFLAG_RD, &qs->rspq.size, in t3_add_configured_sysctls()
3493 CTLFLAG_RD, &qs->rspq.cidx, in t3_add_configured_sysctls()
3496 CTLFLAG_RD, &qs->rspq.credits, in t3_add_configured_sysctls()
3499 CTLFLAG_RD, &qs->rspq.starved, in t3_add_configured_sysctls()
3502 CTLFLAG_RD, &qs->rspq.phys_addr, in t3_add_configured_sysctls()
3505 CTLFLAG_RW, &qs->rspq.rspq_dump_start, in t3_add_configured_sysctls()
3508 CTLFLAG_RW, &qs->rspq.rspq_dump_count, in t3_add_configured_sysctls()
3512 &qs->rspq, 0, t3_dump_rspq, "A", in t3_add_configured_sysctls()
3516 CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops, in t3_add_configured_sysctls()
3519 CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.mq_len, in t3_add_configured_sysctls()
3523 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod, in t3_add_configured_sysctls()
3526 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons, in t3_add_configured_sysctls()
3530 CTLFLAG_RD, &qs->txq[TXQ_ETH].processed, in t3_add_configured_sysctls()
3551 CTLFLAG_RD, &qs->txq_stopped, in t3_add_configured_sysctls()
3557 CTLFLAG_RW, &qs->txq[TXQ_ETH].gen, in t3_add_configured_sysctls()
3566 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start, in t3_add_configured_sysctls()
3569 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count, in t3_add_configured_sysctls()
3573 &qs->txq[TXQ_ETH], 0, t3_dump_txq_eth, "A", in t3_add_configured_sysctls()
3577 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start, in t3_add_configured_sysctls()
3580 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count, in t3_add_configured_sysctls()
3584 &qs->txq[TXQ_CTRL], 0, t3_dump_txq_ctrl, "A", in t3_add_configured_sysctls()
3588 CTLFLAG_RD, &qs->lro.ctrl.lro_queued, 0, NULL); in t3_add_configured_sysctls()
3590 CTLFLAG_RD, &qs->lro.ctrl.lro_flushed, 0, NULL); in t3_add_configured_sysctls()
3592 CTLFLAG_RD, &qs->lro.ctrl.lro_bad_csum, 0, NULL); in t3_add_configured_sysctls()
3594 CTLFLAG_RD, &qs->lro.ctrl.lro_cnt, 0, NULL); in t3_add_configured_sysctls()
3683 * @qs: the queue set
3692 t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, in t3_get_desc() argument
3699 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size) in t3_get_desc()
3701 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc)); in t3_get_desc()
3706 if (!qs->rspq.desc || idx >= qs->rspq.size) in t3_get_desc()
3708 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc)); in t3_get_desc()
3713 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size) in t3_get_desc()
3715 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc)); in t3_get_desc()