Lines Matching refs:sidx

1465 			if (__predict_false(++iq->cidx == iq->sidx)) {  in service_iq()
1560 if (__predict_false(cidx == fl->sidx)) in move_to_next_rxbuf()
1625 if (IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 4) { in service_iq_fl()
1675 if (__predict_false(++iq->cidx == iq->sidx)) { in service_iq_fl()
2167 available = eq->sidx - 1; in drain_wrq_wr_list()
2169 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; in drain_wrq_wr_list()
2177 if (__predict_true(eq->sidx - eq->pidx > n)) { in drain_wrq_wr_list()
2182 int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE; in drain_wrq_wr_list()
2189 eq->pidx = n - (eq->sidx - eq->pidx); in drain_wrq_wr_list()
2193 if (available < eq->sidx / 4 && in drain_wrq_wr_list()
2945 available = eq->sidx - 1; in start_wrq_wr()
2947 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; in start_wrq_wr()
2956 IDXINCR(eq->pidx, ndesc, eq->sidx); in start_wrq_wr()
2957 if (__predict_false(cookie->pidx + ndesc > eq->sidx)) { in start_wrq_wr()
2984 int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE; in commit_wrq_wr()
2996 MPASS(pidx >= 0 && pidx < eq->sidx); in commit_wrq_wr()
3011 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; in commit_wrq_wr()
3012 if (available < eq->sidx / 4 && in commit_wrq_wr()
3026 IDXINCR(eq->dbidx, ndesc, eq->sidx); in commit_wrq_wr()
3028 MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc); in commit_wrq_wr()
3033 MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc); in commit_wrq_wr()
3055 return (total_available_tx_desc(eq) > eq->sidx / 8); in can_resume_eth_tx()
3098 if ((txp->npkt > 0 || avail < eq->sidx / 2) && in set_txupdate_flags()
3102 } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { in set_txupdate_flags()
3162 reclaim_tx_descs(txq, eq->sidx); in eth_tx()
3169 avail = eq->sidx - 1; in eth_tx()
3171 avail = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; in eth_tx()
3234 IDXINCR(eq->pidx, n, eq->sidx); in eth_tx()
3283 IDXINCR(eq->pidx, n, eq->sidx); in eth_tx()
3330 IDXINCR(eq->pidx, n, eq->sidx); in eth_tx()
3368 iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE; in init_iq()
3379 fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; in init_fl()
3410 eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; in init_eq()
3494 fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), in alloc_iq_fl()
3585 bzero(fl->desc, fl->sidx * EQ_ESIZE + sc->params.sge.spg_len); in alloc_iq_fl_hwq()
3631 for (i = 0; i < fl->sidx * 8; i++) in alloc_iq_fl_hwq()
3741 fl->sidx * EQ_ESIZE + sc->params.sge.spg_len, in add_fl_sysctls()
4252 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; in ctrl_eq_alloc()
4298 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; in eth_eq_alloc()
4344 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; in ofld_eq_alloc()
4394 qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; in alloc_eq()
4429 eq->sidx * EQ_ESIZE + sc->params.sge.spg_len, in add_eq_sysctls()
4440 eq->sidx, "status page index"); in add_eq_sysctls()
4455 bzero(eq->desc, eq->sidx * EQ_ESIZE + sc->params.sge.spg_len); in alloc_eq_hwq()
4618 rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, in alloc_txq()
4640 txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE, in alloc_txq()
4956 n = IDXDIFF(fl->pidx >> 3, fl->dbidx, fl->sidx); in ring_fl_db()
4965 IDXINCR(fl->dbidx, n, fl->sidx); in ring_fl_db()
4994 max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1; in refill_fl()
5065 if (__predict_false(pidx == fl->sidx)) { in refill_fl()
5074 if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4) in refill_fl()
5122 for (i = 0; i < fl->sidx * 8; i++, sd++) { in free_fl_buffers()
5519 if (dst == (void *)&eq->desc[eq->sidx]) { in write_txpkt_vm_wr()
5523 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); in write_txpkt_vm_wr()
5656 if (__predict_false((uintptr_t)dst == (uintptr_t)&eq->desc[eq->sidx])) in write_txpkt_wr()
5660 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); in write_txpkt_wr()
5872 checkwrap = eq->sidx - ndesc < eq->pidx; in write_txpkts_wr()
5893 (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx]) in write_txpkts_wr()
5924 (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) in write_txpkts_wr()
5988 if (i & 1 && (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) in write_txpkts_vm_wr()
6048 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); in write_gl_to_txd()
6056 wrap = (__be64 *)(&eq->desc[eq->sidx]); in write_gl_to_txd()
6113 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); in copy_to_txd()
6116 (uintptr_t)&eq->desc[eq->sidx])) { in copy_to_txd()
6120 int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to); in copy_to_txd()
6181 IDXINCR(eq->dbidx, n, eq->sidx); in ring_eq_db()
6190 return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx)); in reclaimable_tx_desc()
6202 return (eq->sidx - 1); in total_available_tx_desc()
6204 return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1); in total_available_tx_desc()
6210 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; in read_hw_cidx()
6253 IDXINCR(eq->cidx, ndesc, eq->sidx); in reclaim_tx_descs()