Lines Matching full:ring

60 	struct mlx4_en_tx_ring *ring;  in mlx4_en_create_tx_ring()  local
65 ring = kzalloc_node(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL, node); in mlx4_en_create_tx_ring()
66 if (!ring) { in mlx4_en_create_tx_ring()
67 ring = kzalloc(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL); in mlx4_en_create_tx_ring()
68 if (!ring) { in mlx4_en_create_tx_ring()
69 en_err(priv, "Failed allocating TX ring\n"); in mlx4_en_create_tx_ring()
87 &ring->dma_tag))) in mlx4_en_create_tx_ring()
90 ring->size = size; in mlx4_en_create_tx_ring()
91 ring->size_mask = size - 1; in mlx4_en_create_tx_ring()
92 ring->stride = stride; in mlx4_en_create_tx_ring()
93 ring->inline_thold = MAX(MIN_PKT_LEN, MIN(priv->prof->inline_thold, MAX_INLINE)); in mlx4_en_create_tx_ring()
94 mtx_init(&ring->tx_lock, "mlx4 tx", NULL, MTX_DEF); in mlx4_en_create_tx_ring()
95 mtx_init(&ring->comp_lock, "mlx4 comp", NULL, MTX_DEF); in mlx4_en_create_tx_ring()
98 ring->tx_info = kzalloc_node(tmp, GFP_KERNEL, node); in mlx4_en_create_tx_ring()
99 if (!ring->tx_info) { in mlx4_en_create_tx_ring()
100 ring->tx_info = kzalloc(tmp, GFP_KERNEL); in mlx4_en_create_tx_ring()
101 if (!ring->tx_info) { in mlx4_en_create_tx_ring()
109 err = -bus_dmamap_create(ring->dma_tag, 0, in mlx4_en_create_tx_ring()
110 &ring->tx_info[x].dma_map); in mlx4_en_create_tx_ring()
113 bus_dmamap_destroy(ring->dma_tag, in mlx4_en_create_tx_ring()
114 ring->tx_info[x].dma_map); in mlx4_en_create_tx_ring()
120 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", in mlx4_en_create_tx_ring()
121 ring->tx_info, tmp); in mlx4_en_create_tx_ring()
123 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); in mlx4_en_create_tx_ring()
126 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, in mlx4_en_create_tx_ring()
133 err = mlx4_en_map_buffer(&ring->wqres.buf); in mlx4_en_create_tx_ring()
139 ring->buf = ring->wqres.buf.direct.buf; in mlx4_en_create_tx_ring()
141 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " in mlx4_en_create_tx_ring()
142 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, in mlx4_en_create_tx_ring()
143 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); in mlx4_en_create_tx_ring()
145 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, in mlx4_en_create_tx_ring()
148 en_err(priv, "failed reserving qp for TX ring\n"); in mlx4_en_create_tx_ring()
152 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); in mlx4_en_create_tx_ring()
154 en_err(priv, "Failed allocating qp %d\n", ring->qpn); in mlx4_en_create_tx_ring()
157 ring->qp.event = mlx4_en_sqp_event; in mlx4_en_create_tx_ring()
159 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); in mlx4_en_create_tx_ring()
162 ring->bf.uar = &mdev->priv_uar; in mlx4_en_create_tx_ring()
163 ring->bf.uar->map = mdev->uar_map; in mlx4_en_create_tx_ring()
164 ring->bf_enabled = false; in mlx4_en_create_tx_ring()
166 ring->bf_enabled = true; in mlx4_en_create_tx_ring()
167 ring->queue_index = queue_idx; in mlx4_en_create_tx_ring()
169 *pring = ring; in mlx4_en_create_tx_ring()
173 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); in mlx4_en_create_tx_ring()
175 mlx4_en_unmap_buffer(&ring->wqres.buf); in mlx4_en_create_tx_ring()
177 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); in mlx4_en_create_tx_ring()
180 bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map); in mlx4_en_create_tx_ring()
182 vfree(ring->tx_info); in mlx4_en_create_tx_ring()
184 bus_dma_tag_destroy(ring->dma_tag); in mlx4_en_create_tx_ring()
186 kfree(ring); in mlx4_en_create_tx_ring()
194 struct mlx4_en_tx_ring *ring = *pring; in mlx4_en_destroy_tx_ring() local
196 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); in mlx4_en_destroy_tx_ring()
198 if (ring->bf_enabled) in mlx4_en_destroy_tx_ring()
199 mlx4_bf_free(mdev->dev, &ring->bf); in mlx4_en_destroy_tx_ring()
200 mlx4_qp_remove(mdev->dev, &ring->qp); in mlx4_en_destroy_tx_ring()
201 mlx4_qp_free(mdev->dev, &ring->qp); in mlx4_en_destroy_tx_ring()
202 mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); in mlx4_en_destroy_tx_ring()
203 mlx4_en_unmap_buffer(&ring->wqres.buf); in mlx4_en_destroy_tx_ring()
204 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); in mlx4_en_destroy_tx_ring()
205 for (x = 0; x != ring->size; x++) in mlx4_en_destroy_tx_ring()
206 bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map); in mlx4_en_destroy_tx_ring()
207 vfree(ring->tx_info); in mlx4_en_destroy_tx_ring()
208 mtx_destroy(&ring->tx_lock); in mlx4_en_destroy_tx_ring()
209 mtx_destroy(&ring->comp_lock); in mlx4_en_destroy_tx_ring()
210 bus_dma_tag_destroy(ring->dma_tag); in mlx4_en_destroy_tx_ring()
211 kfree(ring); in mlx4_en_destroy_tx_ring()
216 struct mlx4_en_tx_ring *ring, in mlx4_en_activate_tx_ring() argument
222 ring->cqn = cq; in mlx4_en_activate_tx_ring()
223 ring->prod = 0; in mlx4_en_activate_tx_ring()
224 ring->cons = 0xffffffff; in mlx4_en_activate_tx_ring()
225 ring->last_nr_txbb = 1; in mlx4_en_activate_tx_ring()
226 ring->poll_cnt = 0; in mlx4_en_activate_tx_ring()
227 memset(ring->buf, 0, ring->buf_size); in mlx4_en_activate_tx_ring()
228 ring->watchdog_time = 0; in mlx4_en_activate_tx_ring()
230 ring->qp_state = MLX4_QP_STATE_RST; in mlx4_en_activate_tx_ring()
231 ring->doorbell_qpn = ring->qp.qpn << 8; in mlx4_en_activate_tx_ring()
233 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, in mlx4_en_activate_tx_ring()
234 ring->cqn, user_prio, &ring->context); in mlx4_en_activate_tx_ring()
235 if (ring->bf_enabled) in mlx4_en_activate_tx_ring()
236 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); in mlx4_en_activate_tx_ring()
238 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, in mlx4_en_activate_tx_ring()
239 &ring->qp, &ring->qp_state); in mlx4_en_activate_tx_ring()
244 struct mlx4_en_tx_ring *ring) in mlx4_en_deactivate_tx_ring() argument
248 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, in mlx4_en_deactivate_tx_ring()
249 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); in mlx4_en_deactivate_tx_ring()
272 struct mlx4_en_tx_ring *ring, u32 index, u8 owner) in mlx4_en_stamp_wqe() argument
274 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; in mlx4_en_stamp_wqe()
276 (ring->buf + (index * TXBB_SIZE)); in mlx4_en_stamp_wqe()
291 struct mlx4_en_tx_ring *ring, u32 index) in mlx4_en_free_tx_desc() argument
296 tx_info = &ring->tx_info[index]; in mlx4_en_free_tx_desc()
302 bus_dmamap_sync(ring->dma_tag, tx_info->dma_map, in mlx4_en_free_tx_desc()
304 bus_dmamap_unload(ring->dma_tag, tx_info->dma_map); in mlx4_en_free_tx_desc()
311 int mlx4_en_free_tx_buf(if_t dev, struct mlx4_en_tx_ring *ring) in mlx4_en_free_tx_buf() argument
317 ring->cons += ring->last_nr_txbb; in mlx4_en_free_tx_buf()
319 ring->cons, ring->prod); in mlx4_en_free_tx_buf()
321 if ((u32) (ring->prod - ring->cons) > ring->size) { in mlx4_en_free_tx_buf()
326 while (ring->cons != ring->prod) { in mlx4_en_free_tx_buf()
327 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, in mlx4_en_free_tx_buf()
328 ring->cons & ring->size_mask); in mlx4_en_free_tx_buf()
329 ring->cons += ring->last_nr_txbb; in mlx4_en_free_tx_buf()
340 mlx4_en_tx_ring_is_full(struct mlx4_en_tx_ring *ring) in mlx4_en_tx_ring_is_full() argument
343 wqs = ring->size - (ring->prod - ring->cons); in mlx4_en_tx_ring_is_full()
352 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; in mlx4_en_process_tx_cq() local
360 u32 size_mask = ring->size_mask; in mlx4_en_process_tx_cq()
369 ring_index = ring->cons & size_mask; in mlx4_en_process_tx_cq()
393 txbbs_skipped += ring->last_nr_txbb; in mlx4_en_process_tx_cq()
394 ring_index = (ring_index + ring->last_nr_txbb) & size_mask; in mlx4_en_process_tx_cq()
396 ring->last_nr_txbb = mlx4_en_free_tx_desc( in mlx4_en_process_tx_cq()
397 priv, ring, ring_index); in mlx4_en_process_tx_cq()
398 mlx4_en_stamp_wqe(priv, ring, stamp_index, in mlx4_en_process_tx_cq()
399 !!((ring->cons + txbbs_stamp) & in mlx4_en_process_tx_cq()
400 ring->size)); in mlx4_en_process_tx_cq()
413 * the ring consumer. in mlx4_en_process_tx_cq()
418 ring->cons += txbbs_skipped; in mlx4_en_process_tx_cq()
427 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; in mlx4_en_tx_irq() local
429 if (priv->port_up == 0 || !spin_trylock(&ring->comp_lock)) in mlx4_en_tx_irq()
433 spin_unlock(&ring->comp_lock); in mlx4_en_tx_irq()
440 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; in mlx4_en_poll_tx_cq() local
447 if (!spin_trylock(&ring->comp_lock)) { in mlx4_en_poll_tx_cq()
452 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); in mlx4_en_poll_tx_cq()
460 spin_unlock(&ring->comp_lock); in mlx4_en_poll_tx_cq()
466 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind]; in mlx4_en_xmit_poll() local
477 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) in mlx4_en_xmit_poll()
478 if (spin_trylock(&ring->comp_lock)) { in mlx4_en_xmit_poll()
480 spin_unlock(&ring->comp_lock); in mlx4_en_xmit_poll()
485 mlx4_en_get_inline_hdr_size(struct mlx4_en_tx_ring *ring, struct mbuf *mb) in mlx4_en_get_inline_hdr_size() argument
490 retval = MIN(ring->inline_thold, mb->m_len); in mlx4_en_get_inline_hdr_size()
494 retval = MIN(ring->inline_thold, mb->m_pkthdr.len); in mlx4_en_get_inline_hdr_size()
642 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind]; in mlx4_en_xmit() local
663 /* check if TX ring is full */ in mlx4_en_xmit()
664 if (unlikely(mlx4_en_tx_ring_is_full(ring))) { in mlx4_en_xmit()
671 KASSERT(((~ring->prod) & ring->size_mask) >= in mlx4_en_xmit()
672 (MLX4_EN_TX_WQE_MAX_WQEBBS - 1), ("Wrapping around TX ring")); in mlx4_en_xmit()
676 (u32) (ring->prod - ring->cons - 1)); in mlx4_en_xmit()
682 owner_bit = (ring->prod & ring->size) ? in mlx4_en_xmit()
684 index = ring->prod & ring->size_mask; in mlx4_en_xmit()
686 (ring->buf + index * TXBB_SIZE); in mlx4_en_xmit()
687 tx_info = &ring->tx_info[index]; in mlx4_en_xmit()
707 ring->tx_csum++; in mlx4_en_xmit()
742 ring->oversized_packets++; in mlx4_en_xmit()
752 ring->bytes += payload_len + (num_pkts * ihs); in mlx4_en_xmit()
753 ring->packets += num_pkts; in mlx4_en_xmit()
754 ring->tso_packets++; in mlx4_en_xmit()
763 ihs = mlx4_en_get_inline_hdr_size(ring, mb); in mlx4_en_xmit()
764 ring->bytes += max_t (unsigned int, in mlx4_en_xmit()
766 ring->packets++; in mlx4_en_xmit()
775 err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map, in mlx4_en_xmit()
779 ring->defrag_attempts++; in mlx4_en_xmit()
782 ring->oversized_packets++; in mlx4_en_xmit()
787 err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map, in mlx4_en_xmit()
792 ring->oversized_packets++; in mlx4_en_xmit()
798 bus_dmamap_sync(ring->dma_tag, tx_info->dma_map, in mlx4_en_xmit()
802 bus_dmamap_unload(ring->dma_tag, tx_info->dma_map); in mlx4_en_xmit()
815 pad = (~(ring->prod + pad)) & ring->size_mask; in mlx4_en_xmit()
820 * pad in order to achieve a TX ring wraparound: in mlx4_en_xmit()
848 bf_prod = ring->prod; in mlx4_en_xmit()
888 ring->prod += tx_info->nr_txbb; in mlx4_en_xmit()
890 if (ring->bf_enabled && bf_size <= MAX_BF && in mlx4_en_xmit()
894 *(volatile __be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); in mlx4_en_xmit()
906 mlx4_bf_copy(((u8 *)ring->bf.reg) + ring->bf.offset, in mlx4_en_xmit()
909 ring->bf.offset ^= ring->bf.buf_size; in mlx4_en_xmit()
918 writel(cpu_to_be32(ring->doorbell_qpn), in mlx4_en_xmit()
919 ((u8 *)ring->bf.uar->map) + MLX4_SEND_DOORBELL); in mlx4_en_xmit()
933 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind]; in mlx4_en_transmit_locked() local
946 if (ring->watchdog_time == 0) in mlx4_en_transmit_locked()
947 ring->watchdog_time = ticks + MLX4_EN_WATCHDOG_TIMEOUT; in mlx4_en_transmit_locked()
949 ring->watchdog_time = 0; in mlx4_en_transmit_locked()
958 struct mlx4_en_tx_ring *ring; in mlx4_en_transmit() local
974 ring = priv->tx_ring[i]; in mlx4_en_transmit()
976 spin_lock(&ring->tx_lock); in mlx4_en_transmit()
979 spin_unlock(&ring->tx_lock); in mlx4_en_transmit()
991 * Flush ring buffers.