Lines Matching +full:cluster +full:- +full:index

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
110 SW_ZONE_SIZES = 4, /* cluster, jumbop, jumbo9k, jumbo16k */
112 SW_ZONE_SIZES = 3, /* cluster, jumbo9k, jumbo16k */
199 #define IS_DETACHING(vi) ((vi)->flags & VI_DETACHING)
200 #define SET_DETACHING(vi) do {(vi)->flags |= VI_DETACHING;} while (0)
201 #define CLR_DETACHING(vi) do {(vi)->flags &= ~VI_DETACHING;} while (0)
202 #define IS_BUSY(sc) ((sc)->flags & CXGBE_BUSY)
203 #define SET_BUSY(sc) do {(sc)->flags |= CXGBE_BUSY;} while (0)
204 #define CLR_BUSY(sc) do {(sc)->flags &= ~CXGBE_BUSY;} while (0)
222 int16_t xact_addr_filt;/* index of exact MAC address filter */
232 int first_txq; /* index of first tx queue */
233 int rsrv_noflowq; /* Reserve queue 0 for non-flowid packets */
235 int first_rxq; /* index of first rx queue */
237 int first_ofld_txq; /* index of first offload tx queue */
239 int first_ofld_rxq; /* index of first offload rx queue */
294 enum fw_sched_params_mode mode; /* aggr or per-flow */
335 uint8_t tx_chan; /* tx TP c-channel */
336 uint8_t rx_chan; /* rx TP c-channel */
338 uint8_t rx_e_chan_map; /* rx TP e-channel bitmap */
352 #define IS_MAIN_VI(vi) ((vi) == &((vi)->pi->vi[0]))
362 uint16_t nmbuf; /* # of driver originated mbufs with ref on cluster */
377 #define IQ_PAD (IQ_ESIZE - sizeof(struct rsp_ctrl) - sizeof(struct rss_header))
425 * Crypto replies use the low bit in the 64-bit cookie of CPL_FW6_PLD as a
438 #define CPL_FW6_COOKIE_MASK (NUM_CPL_FW6_COOKIES - 1)
440 #define CPL_FW6_PLD_COOKIE(cpl) (be64toh((cpl)->data[1]) & ~CPL_FW6_COOKIE_MASK)
458 int8_t intr_pktc_idx; /* packet count threshold index */
463 uint16_t sidx; /* index of the entry with the status page */
464 uint16_t cidx; /* consumer index */
512 uint16_t sidx; /* index of the entry with the status page */
517 uint16_t iqid; /* cached iq->cntxt_id (see iq below) */
528 uma_zone_t zone; /* zone that this cluster comes from */
529 uint16_t size1; /* same as size of cluster: 2K/4K/9K/16K.
532 * spare in cluster = size1 - size2. */
535 uint8_t type; /* EXT_xxx type of the cluster */
569 (IDXDIFF(fl->dbidx * 8, fl->cidx, fl->sidx * 8) <= fl->lowat)
571 (IDXDIFF(fl->dbidx * 8, fl->cidx, fl->sidx * 8) >= 2 * fl->lowat)
585 uint16_t sidx; /* index of status page */
589 uint32_t cidx; /* consumer index */
590 uint32_t pidx; /* producer index */
691 /* stats for not-that-common events */
711 /* stats for not-that-common events */
799 /* stats for not-that-common events */
830 const int mshift = ffs(mask) - 1; in ofld_txq_group()
831 const uint32_t gmask = ngroup - 1; in ofld_txq_group()
836 #define INVALID_NM_RXQ_CNTXT_ID ((uint16_t)(-1))
877 #define INVALID_NM_TXQ_CNTXT_ID ((u_int)(-1))
928 struct sge_iq **iqmap; /* iq->cntxt_id to iq mapping */
929 struct sge_eq **eqmap; /* eq->cntxt_id to eq mapping */
1005 uint8_t chan_map[MAX_NCHAN]; /* tx_chan -> port_id */
1006 uint8_t port_map[MAX_NPORTS]; /* hw_port -> port_id */
1034 int bt_map; /* hw_port's that are BASE-T */
1045 int traceq; /* iq used by all tracers, -1 if none */
1078 struct mtx sfl_lock; /* same cache-line as sc_lock? but that's ok */
1118 #define ADAPTER_LOCK(sc) mtx_lock(&(sc)->sc_lock)
1119 #define ADAPTER_UNLOCK(sc) mtx_unlock(&(sc)->sc_lock)
1120 #define ADAPTER_LOCK_ASSERT_OWNED(sc) mtx_assert(&(sc)->sc_lock, MA_OWNED)
1121 #define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED)
1125 (mtx_owned(&(sc)->sc_lock) || sc->last_op_thr == curthread), \
1128 #define PORT_LOCK(pi) mtx_lock(&(pi)->pi_lock)
1129 #define PORT_UNLOCK(pi) mtx_unlock(&(pi)->pi_lock)
1130 #define PORT_LOCK_ASSERT_OWNED(pi) mtx_assert(&(pi)->pi_lock, MA_OWNED)
1131 #define PORT_LOCK_ASSERT_NOTOWNED(pi) mtx_assert(&(pi)->pi_lock, MA_NOTOWNED)
1133 #define FL_LOCK(fl) mtx_lock(&(fl)->fl_lock)
1134 #define FL_TRYLOCK(fl) mtx_trylock(&(fl)->fl_lock)
1135 #define FL_UNLOCK(fl) mtx_unlock(&(fl)->fl_lock)
1136 #define FL_LOCK_ASSERT_OWNED(fl) mtx_assert(&(fl)->fl_lock, MA_OWNED)
1137 #define FL_LOCK_ASSERT_NOTOWNED(fl) mtx_assert(&(fl)->fl_lock, MA_NOTOWNED)
1139 #define RXQ_FL_LOCK(rxq) FL_LOCK(&(rxq)->fl)
1140 #define RXQ_FL_UNLOCK(rxq) FL_UNLOCK(&(rxq)->fl)
1141 #define RXQ_FL_LOCK_ASSERT_OWNED(rxq) FL_LOCK_ASSERT_OWNED(&(rxq)->fl)
1142 #define RXQ_FL_LOCK_ASSERT_NOTOWNED(rxq) FL_LOCK_ASSERT_NOTOWNED(&(rxq)->fl)
1144 #define EQ_LOCK(eq) mtx_lock(&(eq)->eq_lock)
1145 #define EQ_TRYLOCK(eq) mtx_trylock(&(eq)->eq_lock)
1146 #define EQ_UNLOCK(eq) mtx_unlock(&(eq)->eq_lock)
1147 #define EQ_LOCK_ASSERT_OWNED(eq) mtx_assert(&(eq)->eq_lock, MA_OWNED)
1148 #define EQ_LOCK_ASSERT_NOTOWNED(eq) mtx_assert(&(eq)->eq_lock, MA_NOTOWNED)
1150 #define TXQ_LOCK(txq) EQ_LOCK(&(txq)->eq)
1151 #define TXQ_TRYLOCK(txq) EQ_TRYLOCK(&(txq)->eq)
1152 #define TXQ_UNLOCK(txq) EQ_UNLOCK(&(txq)->eq)
1153 #define TXQ_LOCK_ASSERT_OWNED(txq) EQ_LOCK_ASSERT_OWNED(&(txq)->eq)
1154 #define TXQ_LOCK_ASSERT_NOTOWNED(txq) EQ_LOCK_ASSERT_NOTOWNED(&(txq)->eq)
1157 for (q = &vi->adapter->sge.txq[vi->first_txq], iter = 0; \
1158 iter < vi->ntxq; ++iter, ++q)
1160 for (q = &vi->adapter->sge.rxq[vi->first_rxq], iter = 0; \
1161 iter < vi->nrxq; ++iter, ++q)
1163 for (q = &vi->adapter->sge.ofld_txq[vi->first_ofld_txq], iter = 0; \
1164 iter < vi->nofldtxq; ++iter, ++q)
1166 for (q = &vi->adapter->sge.ofld_rxq[vi->first_ofld_rxq], iter = 0; \
1167 iter < vi->nofldrxq; ++iter, ++q)
1169 for (q = &vi->adapter->sge.nm_txq[vi->first_nm_txq], iter = 0; \
1170 iter < vi->nnmtxq; ++iter, ++q)
1172 for (q = &vi->adapter->sge.nm_rxq[vi->first_nm_rxq], iter = 0; \
1173 iter < vi->nnmrxq; ++iter, ++q)
1175 for ((_vi) = (_pi)->vi, (_iter) = 0; (_iter) < (_pi)->nvi; \
1179 idx = wrap - idx > incr ? idx + incr : incr - (wrap - idx); \
1182 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
1194 return (sc->intr_count == 1); in forwarding_intr_to_fwq()
1201 const int off_limits = atomic_load_int(&sc->error_flags) & HW_OFF_LIMITS; in hw_off_limits()
1210 const int not_ok = atomic_load_int(&sc->error_flags) & in hw_all_ok()
1220 KASSERT(m->m_pkthdr.inner_l5hlen > 0, in mbuf_nsegs()
1223 return (m->m_pkthdr.inner_l5hlen); in mbuf_nsegs()
1230 m->m_pkthdr.inner_l5hlen = nsegs; in set_mbuf_nsegs()
1242 return (m->m_pkthdr.PH_loc.eight[4]); in mbuf_cflags()
1249 m->m_pkthdr.PH_loc.eight[4] = flags; in set_mbuf_cflags()
1258 n = m->m_pkthdr.PH_loc.eight[0]; in mbuf_len16()
1271 m->m_pkthdr.PH_loc.eight[0] = len16; in set_mbuf_len16()
1278 MPASS(curthread == sc->reset_thread); in t4_read_reg()
1279 return bus_space_read_4(sc->bt, sc->bh, reg); in t4_read_reg()
1286 MPASS(curthread == sc->reset_thread); in t4_write_reg()
1287 bus_space_write_4(sc->bt, sc->bh, reg, val); in t4_write_reg()
1294 MPASS(curthread == sc->reset_thread); in t4_read_reg64()
1296 return bus_space_read_8(sc->bt, sc->bh, reg); in t4_read_reg64()
1298 return (uint64_t)bus_space_read_4(sc->bt, sc->bh, reg) + in t4_read_reg64()
1299 ((uint64_t)bus_space_read_4(sc->bt, sc->bh, reg + 4) << 32); in t4_read_reg64()
1308 MPASS(curthread == sc->reset_thread); in t4_write_reg64()
1310 bus_space_write_8(sc->bt, sc->bh, reg, val); in t4_write_reg64()
1312 bus_space_write_4(sc->bt, sc->bh, reg, val); in t4_write_reg64()
1313 bus_space_write_4(sc->bt, sc->bh, reg + 4, val>> 32); in t4_write_reg64()
1321 MPASS(curthread == sc->reset_thread); in t4_os_pci_read_cfg1()
1322 *val = pci_read_config(sc->dev, reg, 1); in t4_os_pci_read_cfg1()
1329 MPASS(curthread == sc->reset_thread); in t4_os_pci_write_cfg1()
1330 pci_write_config(sc->dev, reg, val, 1); in t4_os_pci_write_cfg1()
1338 MPASS(curthread == sc->reset_thread); in t4_os_pci_read_cfg2()
1339 *val = pci_read_config(sc->dev, reg, 2); in t4_os_pci_read_cfg2()
1346 MPASS(curthread == sc->reset_thread); in t4_os_pci_write_cfg2()
1347 pci_write_config(sc->dev, reg, val, 2); in t4_os_pci_write_cfg2()
1354 MPASS(curthread == sc->reset_thread); in t4_os_pci_read_cfg4()
1355 *val = pci_read_config(sc->dev, reg, 4); in t4_os_pci_read_cfg4()
1362 MPASS(curthread == sc->reset_thread); in t4_os_pci_write_cfg4()
1363 pci_write_config(sc->dev, reg, val, 4); in t4_os_pci_write_cfg4()
1370 return (sc->port[idx]); in adap2pinfo()
1377 bcopy(hw_addr, pi->vi[0].hw_addr, ETHER_ADDR_LEN); in t4_os_set_hw_addr()
1385 return (eq->sidx / 4); in tx_resume_threshold()
1393 return (sc->flags & FW_OK || !sc->use_bd); in t4_use_ldst()
1404 if (!(sc->debug_flags & DF_DUMP_MBOX) && !err) in CH_DUMP_MBOX()
1410 device_get_nameunit(sc->dev), mbox, msg, in CH_DUMP_MBOX()
1419 device_get_nameunit(sc->dev), mbox, msg, in CH_DUMP_MBOX()
1649 wr->wr_len = wr_len; in alloc_wrqe()
1650 wr->wrq = wrq; in alloc_wrqe()
1657 return (&wr->wr[0]); in wrtod()
1669 struct sge_wrq *wrq = wr->wrq; in t4_wrq_tx()
1672 if (__predict_true(wrq->eq.flags & EQ_HW_ALLOCATED)) in t4_wrq_tx()
1695 /* Number of len16 -> number of descriptors */