Lines Matching full:ring

49 				 struct mlx4_en_rx_ring *ring,  in mlx4_en_init_rx_desc()  argument
53 ((struct mlx4_en_rx_desc *)ring->buf) + index; in mlx4_en_init_rx_desc()
61 * If the number of used fragments does not fill up the ring in mlx4_en_init_rx_desc()
74 mlx4_en_alloc_mbuf(struct mlx4_en_rx_ring *ring) in mlx4_en_alloc_mbuf() argument
79 mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size); in mlx4_en_alloc_mbuf()
81 mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size; in mlx4_en_alloc_mbuf()
92 if (mb_head->m_pkthdr.len >= ring->rx_mb_size) in mlx4_en_alloc_mbuf()
110 mlx4_en_alloc_buf(struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_desc *rx_desc, in mlx4_en_alloc_buf() argument
123 if (unlikely(ring->spare.mbuf == NULL)) { in mlx4_en_alloc_buf()
124 mb = mlx4_en_alloc_mbuf(ring); in mlx4_en_alloc_buf()
132 err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, ring->spare.dma_map, in mlx4_en_alloc_buf()
133 mb, ring->spare.segs, &nsegs, BUS_DMA_NOWAIT); in mlx4_en_alloc_buf()
140 ring->spare.mbuf = mb; in mlx4_en_alloc_buf()
145 ring->spare.segs[i].ds_addr = 0; in mlx4_en_alloc_buf()
146 ring->spare.segs[i].ds_len = 0; in mlx4_en_alloc_buf()
149 bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map, in mlx4_en_alloc_buf()
155 bus_dmamap_sync(ring->dma_tag, mb_list->dma_map, in mlx4_en_alloc_buf()
157 bus_dmamap_unload(ring->dma_tag, mb_list->dma_map); in mlx4_en_alloc_buf()
160 mb = mlx4_en_alloc_mbuf(ring); in mlx4_en_alloc_buf()
167 err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, mb_list->dma_map, in mlx4_en_alloc_buf()
179 rx_desc->data[i].lkey = ring->rx_mr_key_be; in mlx4_en_alloc_buf()
190 bus_dmamap_sync(ring->dma_tag, mb_list->dma_map, BUS_DMASYNC_PREREAD); in mlx4_en_alloc_buf()
196 mb_list->dma_map = ring->spare.dma_map; in mlx4_en_alloc_buf()
197 ring->spare.dma_map = map; in mlx4_en_alloc_buf()
200 mb_list->mbuf = ring->spare.mbuf; in mlx4_en_alloc_buf()
201 ring->spare.mbuf = NULL; in mlx4_en_alloc_buf()
205 rx_desc->data[0].addr = cpu_to_be64(ring->spare.segs[0].ds_addr); in mlx4_en_alloc_buf()
208 if (ring->spare.segs[i].ds_len != 0) { in mlx4_en_alloc_buf()
209 rx_desc->data[i].byte_count = cpu_to_be32(ring->spare.segs[i].ds_len); in mlx4_en_alloc_buf()
210 rx_desc->data[i].lkey = ring->rx_mr_key_be; in mlx4_en_alloc_buf()
211 rx_desc->data[i].addr = cpu_to_be64(ring->spare.segs[i].ds_addr); in mlx4_en_alloc_buf()
223 mlx4_en_free_buf(struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_mbuf *mb_list) in mlx4_en_free_buf() argument
226 bus_dmamap_sync(ring->dma_tag, map, BUS_DMASYNC_POSTREAD); in mlx4_en_free_buf()
227 bus_dmamap_unload(ring->dma_tag, map); in mlx4_en_free_buf()
234 struct mlx4_en_rx_ring *ring, int index) in mlx4_en_prepare_rx_desc() argument
237 ((struct mlx4_en_rx_desc *)ring->buf) + index; in mlx4_en_prepare_rx_desc()
238 struct mlx4_en_rx_mbuf *mb_list = ring->mbuf + index; in mlx4_en_prepare_rx_desc()
242 if (mlx4_en_alloc_buf(ring, rx_desc, mb_list)) { in mlx4_en_prepare_rx_desc()
250 mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) in mlx4_en_update_rx_prod_db() argument
252 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); in mlx4_en_update_rx_prod_db()
257 struct mlx4_en_rx_ring *ring; in mlx4_en_fill_rx_buffers() local
265 ring = priv->rx_ring[ring_ind]; in mlx4_en_fill_rx_buffers()
267 err = mlx4_en_prepare_rx_desc(priv, ring, in mlx4_en_fill_rx_buffers()
268 ring->actual_size); in mlx4_en_fill_rx_buffers()
270 if (ring->actual_size == 0) { in mlx4_en_fill_rx_buffers()
276 rounddown_pow_of_two(ring->actual_size); in mlx4_en_fill_rx_buffers()
278 "reducing ring size to %d\n", in mlx4_en_fill_rx_buffers()
279 ring->actual_size, new_size); in mlx4_en_fill_rx_buffers()
283 ring->actual_size++; in mlx4_en_fill_rx_buffers()
284 ring->prod++; in mlx4_en_fill_rx_buffers()
291 ring = priv->rx_ring[ring_ind]; in mlx4_en_fill_rx_buffers()
292 while (ring->actual_size > new_size) { in mlx4_en_fill_rx_buffers()
293 ring->actual_size--; in mlx4_en_fill_rx_buffers()
294 ring->prod--; in mlx4_en_fill_rx_buffers()
295 mlx4_en_free_buf(ring, in mlx4_en_fill_rx_buffers()
296 ring->mbuf + ring->actual_size); in mlx4_en_fill_rx_buffers()
304 struct mlx4_en_rx_ring *ring) in mlx4_en_free_rx_buf() argument
309 ring->cons, ring->prod); in mlx4_en_free_rx_buf()
312 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); in mlx4_en_free_rx_buf()
313 while (ring->cons != ring->prod) { in mlx4_en_free_rx_buf()
314 index = ring->cons & ring->size_mask; in mlx4_en_free_rx_buf()
316 mlx4_en_free_buf(ring, ring->mbuf + index); in mlx4_en_free_rx_buf()
317 ++ring->cons; in mlx4_en_free_rx_buf()
370 struct mlx4_en_rx_ring *ring; in mlx4_en_create_rx_ring() local
375 ring = kzalloc(sizeof(struct mlx4_en_rx_ring), GFP_KERNEL); in mlx4_en_create_rx_ring()
376 if (!ring) { in mlx4_en_create_rx_ring()
377 en_err(priv, "Failed to allocate RX ring structure\n"); in mlx4_en_create_rx_ring()
394 &ring->dma_tag))) { in mlx4_en_create_rx_ring()
399 ring->prod = 0; in mlx4_en_create_rx_ring()
400 ring->cons = 0; in mlx4_en_create_rx_ring()
401 ring->size = size; in mlx4_en_create_rx_ring()
402 ring->size_mask = size - 1; in mlx4_en_create_rx_ring()
404 ring->log_stride = ilog2(sizeof(struct mlx4_en_rx_desc)); in mlx4_en_create_rx_ring()
405 ring->buf_size = (ring->size * sizeof(struct mlx4_en_rx_desc)) + TXBB_SIZE; in mlx4_en_create_rx_ring()
409 ring->mbuf = kzalloc(tmp, GFP_KERNEL); in mlx4_en_create_rx_ring()
410 if (ring->mbuf == NULL) { in mlx4_en_create_rx_ring()
415 err = -bus_dmamap_create(ring->dma_tag, 0, &ring->spare.dma_map); in mlx4_en_create_rx_ring()
420 err = -bus_dmamap_create(ring->dma_tag, 0, in mlx4_en_create_rx_ring()
421 &ring->mbuf[x].dma_map); in mlx4_en_create_rx_ring()
424 bus_dmamap_destroy(ring->dma_tag, in mlx4_en_create_rx_ring()
425 ring->mbuf[x].dma_map); in mlx4_en_create_rx_ring()
429 en_dbg(DRV, priv, "Allocated MBUF ring at addr:%p size:%d\n", in mlx4_en_create_rx_ring()
430 ring->mbuf, tmp); in mlx4_en_create_rx_ring()
432 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, in mlx4_en_create_rx_ring()
433 ring->buf_size, 2 * PAGE_SIZE); in mlx4_en_create_rx_ring()
437 err = mlx4_en_map_buffer(&ring->wqres.buf); in mlx4_en_create_rx_ring()
442 ring->buf = ring->wqres.buf.direct.buf; in mlx4_en_create_rx_ring()
443 *pring = ring; in mlx4_en_create_rx_ring()
447 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); in mlx4_en_create_rx_ring()
450 bus_dmamap_destroy(ring->dma_tag, in mlx4_en_create_rx_ring()
451 ring->mbuf[x].dma_map); in mlx4_en_create_rx_ring()
453 bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map); in mlx4_en_create_rx_ring()
455 vfree(ring->mbuf); in mlx4_en_create_rx_ring()
457 bus_dma_tag_destroy(ring->dma_tag); in mlx4_en_create_rx_ring()
459 kfree(ring); in mlx4_en_create_rx_ring()
465 struct mlx4_en_rx_ring *ring; in mlx4_en_activate_rx_rings() local
473 ring = priv->rx_ring[ring_ind]; in mlx4_en_activate_rx_rings()
475 ring->prod = 0; in mlx4_en_activate_rx_rings()
476 ring->cons = 0; in mlx4_en_activate_rx_rings()
477 ring->actual_size = 0; in mlx4_en_activate_rx_rings()
478 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; in mlx4_en_activate_rx_rings()
479 ring->rx_mb_size = priv->rx_mb_size; in mlx4_en_activate_rx_rings()
483 __be32 *ptr = (__be32 *)ring->buf; in mlx4_en_activate_rx_rings()
487 ring->buf += TXBB_SIZE; in mlx4_en_activate_rx_rings()
490 ring->log_stride = ilog2(sizeof(struct mlx4_en_rx_desc)); in mlx4_en_activate_rx_rings()
491 ring->buf_size = ring->size * sizeof(struct mlx4_en_rx_desc); in mlx4_en_activate_rx_rings()
493 memset(ring->buf, 0, ring->buf_size); in mlx4_en_activate_rx_rings()
494 mlx4_en_update_rx_prod_db(ring); in mlx4_en_activate_rx_rings()
498 for (i = 0; i < ring->size; i++) in mlx4_en_activate_rx_rings()
499 mlx4_en_init_rx_desc(priv, ring, i); in mlx4_en_activate_rx_rings()
501 ring->rx_mr_key_be = cpu_to_be32(priv->mdev->mr.key); in mlx4_en_activate_rx_rings()
506 if (tcp_lro_init(&ring->lro)) in mlx4_en_activate_rx_rings()
509 ring->lro.ifp = priv->dev; in mlx4_en_activate_rx_rings()
520 ring = priv->rx_ring[ring_ind]; in mlx4_en_activate_rx_rings()
522 ring->size_mask = ring->actual_size - 1; in mlx4_en_activate_rx_rings()
523 mlx4_en_update_rx_prod_db(ring); in mlx4_en_activate_rx_rings()
535 ring = priv->rx_ring[ring_ind]; in mlx4_en_activate_rx_rings()
537 ring->buf -= TXBB_SIZE; in mlx4_en_activate_rx_rings()
550 struct mlx4_en_rx_ring *ring = *pring; in mlx4_en_destroy_rx_ring() local
553 mlx4_en_unmap_buffer(&ring->wqres.buf); in mlx4_en_destroy_rx_ring()
554 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * sizeof(struct mlx4_en_rx_desc) + TXBB_SIZE); in mlx4_en_destroy_rx_ring()
556 bus_dmamap_destroy(ring->dma_tag, ring->mbuf[x].dma_map); in mlx4_en_destroy_rx_ring()
558 if (ring->spare.mbuf != NULL) { in mlx4_en_destroy_rx_ring()
559 bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map, in mlx4_en_destroy_rx_ring()
561 bus_dmamap_unload(ring->dma_tag, ring->spare.dma_map); in mlx4_en_destroy_rx_ring()
562 m_freem(ring->spare.mbuf); in mlx4_en_destroy_rx_ring()
564 bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map); in mlx4_en_destroy_rx_ring()
565 vfree(ring->mbuf); in mlx4_en_destroy_rx_ring()
566 bus_dma_tag_destroy(ring->dma_tag); in mlx4_en_destroy_rx_ring()
567 kfree(ring); in mlx4_en_destroy_rx_ring()
570 mlx4_en_cleanup_filters(priv, ring); in mlx4_en_destroy_rx_ring()
575 struct mlx4_en_rx_ring *ring) in mlx4_en_deactivate_rx_ring() argument
578 tcp_lro_free(&ring->lro); in mlx4_en_deactivate_rx_ring()
580 mlx4_en_free_rx_buf(priv, ring); in mlx4_en_deactivate_rx_ring()
582 ring->buf -= TXBB_SIZE; in mlx4_en_deactivate_rx_ring()
623 mlx4_en_rx_mb(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, in mlx4_en_rx_mb() argument
642 bus_dmamap_sync(ring->dma_tag, mb_list->dma_map, in mlx4_en_rx_mb()
654 if (mlx4_en_alloc_buf(ring, rx_desc, mb_list)) in mlx4_en_rx_mb()
742 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; in mlx4_en_process_rx_cq() local
752 u32 size_mask = ring->size_mask; in mlx4_en_process_rx_cq()
769 mb_list = ring->mbuf + index; in mlx4_en_process_rx_cq()
770 rx_desc = ((struct mlx4_en_rx_desc *)ring->buf) + index; in mlx4_en_process_rx_cq()
784 length -= ring->fcs_del; in mlx4_en_process_rx_cq()
786 mb = mlx4_en_rx_mb(priv, ring, rx_desc, mb_list, length); in mlx4_en_process_rx_cq()
788 ring->errors++; in mlx4_en_process_rx_cq()
792 ring->bytes += length; in mlx4_en_process_rx_cq()
793 ring->packets++; in mlx4_en_process_rx_cq()
827 if (ring->lro.lro_cnt != 0 && in mlx4_en_process_rx_cq()
828 tcp_lro_rx(&ring->lro, mb, 0) == 0) in mlx4_en_process_rx_cq()
853 tcp_lro_flush_all(&ring->lro); in mlx4_en_process_rx_cq()
859 ring->cons = mcq->cons_index; in mlx4_en_process_rx_cq()
860 ring->prod += polled; /* Polled descriptors were reallocated in place */ in mlx4_en_process_rx_cq()
861 mlx4_en_update_rx_prod_db(ring); in mlx4_en_process_rx_cq()
922 struct mlx4_en_rx_ring *ring, in mlx4_en_config_rss_qp() argument
944 mlx4_en_fill_qp_context(priv, ring->actual_size, sizeof(struct mlx4_en_rx_desc), 0, 0, in mlx4_en_config_rss_qp()
945 qpn, ring->cqn, -1, context); in mlx4_en_config_rss_qp()
946 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); in mlx4_en_config_rss_qp()
951 ring->fcs_del = ETH_FCS_LEN; in mlx4_en_config_rss_qp()
953 ring->fcs_del = 0; in mlx4_en_config_rss_qp()
955 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); in mlx4_en_config_rss_qp()
960 mlx4_en_update_rx_prod_db(ring); in mlx4_en_config_rss_qp()