Lines Matching +full:use +full:- +full:dma +full:- +full:tx

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
5 * Copyright (c) 2007-2009 Marvell Semiconductor, Inc.
8 * Redistribution and use in source and binary forms, with or without
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
47 #define MWL_TXBUF 256 /* number of TX descriptors/buffers */
50 #define MWL_TXACKBUF (MWL_TXBUF/2) /* number of TX ACK desc's/buffers */
56 #define MWL_RXBUF ((5*MWL_RXDESC)/2)/* number of RX dma buffers */
63 #define MWL_TXDESC 6 /* max tx descriptors/segments */
65 #define MWL_TXDESC 1 /* max tx descriptors/segments */
68 #define MWL_AGGR_SIZE 3839 /* max tx aggregation size */
74 * DMA state for tx/rx descriptors.
78 * Software backed version of tx/rx descriptors. We keep
87 bus_dmamap_t bf_dmamap; /* DMA map for descriptors */
92 struct mwl_txq *bf_txq; /* backpointer to tx q/ring */
97 * Common "base class" for tx/rx descriptor resources
98 * allocated using the bus dma api.
107 bus_dma_tag_t dd_dmat; /* bus DMA tag */
108 bus_dmamap_t dd_dmamap; /* DMA map for descriptors */
113 * TX/RX ring definitions. There are 4 tx rings, one
116 * firmware pre-fetches descriptors. This means that we
121 struct mwl_descdma dma; /* bus dma resources */ member
122 struct mtx lock; /* tx q lock */
125 int txpri; /* f/w tx priority */
132 snprintf((_tq)->name, sizeof((_tq)->name), "%s_txq%u", \
133 device_get_nameunit((_sc)->sc_dev), (_tq)->qnum); \
134 mtx_init(&(_tq)->lock, (_tq)->name, NULL, MTX_DEF); \
136 #define MWL_TXQ_LOCK_DESTROY(_tq) mtx_destroy(&(_tq)->lock)
137 #define MWL_TXQ_LOCK(_tq) mtx_lock(&(_tq)->lock)
138 #define MWL_TXQ_UNLOCK(_tq) mtx_unlock(&(_tq)->lock)
139 #define MWL_TXQ_LOCK_ASSERT(_tq) mtx_assert(&(_tq)->lock, MA_OWNED)
142 bus_dmamap_sync((txq)->dma.dd_dmat, (txq)->dma.dd_dmamap, how); \
146 * RX dma buffers that are not in use are kept on a list.
156 (((const uint8_t *)(_data)) - (const uint8_t *)((_sc)->sc_rxmem))
158 ((_sc)->sc_rxmem_paddr + MWL_JUMBO_OFFSET(_sc, _data))
169 bus_dmamap_sync((sc)->sc_rxdma.dd_dmat, (sc)->sc_rxdma.dd_dmamap, how);\
174 * allocated/created for use. We pre-allocate the h/w stream
177 * to optimize the fast path in mwl_txstart--we precalculate
178 * the QoS control bits in the outbound frame and use those
179 * to identify which BA stream to use (assigning the h/w q to
187 const MWL_HAL_BASTREAM *bastream; /* A-MPDU BA stream */
193 bas->txq = txq; in mwl_bastream_setup()
194 bas->qos = htole16(tid | IEEE80211_QOS_ACKPOLICY_BA); in mwl_bastream_setup()
200 bas->qos = 0; in mwl_bastream_free()
201 bas->bastream = NULL; in mwl_bastream_free()
216 bas->qos; in mwl_bastream_match()
219 /* driver-specific node state */
232 * Driver-specific vap state.
239 uint16_t mv_eapolformat; /* fixed tx rate for EAPOL */
253 bus_dma_tag_t sc_dmat; /* bus DMA tag */
267 sc_rxblocked: 1;/* rx waiting for dma buffers */
273 struct mwl_hal_txrxdma sc_hwdma; /* h/w dma setup */
281 u_int16_t sc_txantenna; /* tx antenna */
311 struct mwl_descdma sc_rxdma; /* rx bus dma resources */
315 void *sc_rxmem; /* rx dma buffer pool */
316 bus_dma_tag_t sc_rxdmat; /* rx bus DMA tag */
317 bus_size_t sc_rxmemsize; /* rx dma buffer pool size */
318 bus_dmamap_t sc_rxmap; /* map for rx dma buffers */
320 mwl_jumbohead sc_rxfree; /* list of free dma buffers */
325 struct mwl_txq *sc_ac2q[5]; /* WME AC -> h/w q map */
327 struct task sc_txtask; /* tx int processing */
342 mtx_init(&(_sc)->sc_mtx, device_get_nameunit((_sc)->sc_dev), \
344 #define MWL_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx)
345 #define MWL_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
346 #define MWL_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
347 #define MWL_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
350 mtx_init(&(_sc)->sc_rxlock, device_get_nameunit((_sc)->sc_dev), \
352 #define MWL_RXFREE_DESTROY(_sc) mtx_destroy(&(_sc)->sc_rxlock)
353 #define MWL_RXFREE_LOCK(_sc) mtx_lock(&(_sc)->sc_rxlock)
354 #define MWL_RXFREE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_rxlock)
355 #define MWL_RXFREE_ASSERT(_sc) mtx_assert(&(_sc)->sc_rxlock, MA_OWNED)