Lines Matching +full:num +full:- +full:rxq
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
38 struct gdma_queue *gdma_q = filp->private_data; in mana_dbg_q_read()
40 return simple_read_from_buffer(buf, count, pos, gdma_q->queue_mem_ptr, in mana_dbg_q_read()
41 gdma_q->queue_size); in mana_dbg_q_read()
63 apc->port_is_up = true; in mana_open()
78 if (!apc->port_is_up) in mana_close()
91 if (skb->protocol == htons(ETH_P_IP)) { in mana_checksum_info()
94 if (ip->protocol == IPPROTO_TCP) in mana_checksum_info()
97 if (ip->protocol == IPPROTO_UDP) in mana_checksum_info()
99 } else if (skb->protocol == htons(ETH_P_IPV6)) { in mana_checksum_info()
102 if (ip6->nexthdr == IPPROTO_TCP) in mana_checksum_info()
105 if (ip6->nexthdr == IPPROTO_UDP) in mana_checksum_info()
116 ash->dma_handle[sg_i] = da; in mana_add_sge()
117 ash->size[sg_i] = sge_len; in mana_add_sge()
119 tp->wqe_req.sgl[sg_i].address = da; in mana_add_sge()
120 tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey; in mana_add_sge()
121 tp->wqe_req.sgl[sg_i].size = sge_len; in mana_add_sge()
127 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; in mana_map_skb()
128 int hsg = 1; /* num of SGEs of linear part */ in mana_map_skb()
129 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_map_skb()
139 gc = gd->gdma_context; in mana_map_skb()
140 dev = gc->dev; in mana_map_skb()
144 sge1_len = skb_hlen - gso_hs; in mana_map_skb()
149 da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE); in mana_map_skb()
151 return -ENOMEM; in mana_map_skb()
153 mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey); in mana_map_skb()
157 da = dma_map_single(dev, skb->data + sge0_len, sge1_len, in mana_map_skb()
162 mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey); in mana_map_skb()
166 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in mana_map_skb()
169 frag = &skb_shinfo(skb)->frags[i]; in mana_map_skb()
176 gd->gpa_mkey); in mana_map_skb()
183 netdev_err(apc->ndev, "Failed to map skb of size %u to DMA\n", in mana_map_skb()
184 skb->len); in mana_map_skb()
185 for (i = sg_i - 1; i >= hsg; i--) in mana_map_skb()
186 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], in mana_map_skb()
189 for (i = hsg - 1; i >= 0; i--) in mana_map_skb()
190 dma_unmap_single(dev, ash->dma_handle[i], ash->size[i], in mana_map_skb()
193 return -ENOMEM; in mana_map_skb()
206 int num_sge = 1 + skb_shinfo(skb)->nr_frags; in mana_fix_skb_head()
217 return -EINVAL; in mana_fix_skb_head()
228 if (skb->encapsulation) { in mana_get_gso_hs()
231 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in mana_get_gso_hs()
246 int gso_hs = 0; /* zero for non-GSO pkts */ in mana_start_xmit()
248 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_start_xmit()
259 if (unlikely(!apc->port_is_up)) in mana_start_xmit()
268 txq = &apc->tx_qp[txq_idx].txq; in mana_start_xmit()
269 gdma_sq = txq->gdma_sq; in mana_start_xmit()
270 cq = &apc->tx_qp[txq_idx].tx_cq; in mana_start_xmit()
271 tx_stats = &txq->stats; in mana_start_xmit()
273 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; in mana_start_xmit()
274 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; in mana_start_xmit()
276 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) { in mana_start_xmit()
277 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset; in mana_start_xmit()
280 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset; in mana_start_xmit()
295 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
296 tx_stats->short_pkt_fmt++; in mana_start_xmit()
297 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
300 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
301 tx_stats->long_pkt_fmt++; in mana_start_xmit()
302 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
309 pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags; in mana_start_xmit()
311 if (skb->protocol == htons(ETH_P_IP)) in mana_start_xmit()
313 else if (skb->protocol == htons(ETH_P_IPV6)) in mana_start_xmit()
327 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
328 if (skb->encapsulation) { in mana_start_xmit()
329 tx_stats->tso_inner_packets++; in mana_start_xmit()
330 tx_stats->tso_inner_bytes += skb->len - gso_hs; in mana_start_xmit()
332 tx_stats->tso_packets++; in mana_start_xmit()
333 tx_stats->tso_bytes += skb->len - gso_hs; in mana_start_xmit()
335 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
344 pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size; in mana_start_xmit()
347 ip_hdr(skb)->tot_len = 0; in mana_start_xmit()
348 ip_hdr(skb)->check = 0; in mana_start_xmit()
349 tcp_hdr(skb)->check = in mana_start_xmit()
350 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in mana_start_xmit()
351 ip_hdr(skb)->daddr, 0, in mana_start_xmit()
354 ipv6_hdr(skb)->payload_len = 0; in mana_start_xmit()
355 tcp_hdr(skb)->check = in mana_start_xmit()
356 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in mana_start_xmit()
357 &ipv6_hdr(skb)->daddr, 0, in mana_start_xmit()
360 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in mana_start_xmit()
363 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
364 tx_stats->csum_partial++; in mana_start_xmit()
365 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
401 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
402 tx_stats->mana_map_err++; in mana_start_xmit()
403 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
407 skb_queue_tail(&txq->pending_skbs, skb); in mana_start_xmit()
409 len = skb->len; in mana_start_xmit()
413 (struct gdma_posted_wqe_info *)skb->cb); in mana_start_xmit()
416 apc->eth_stats.stop_queue++; in mana_start_xmit()
420 (void)skb_dequeue_tail(&txq->pending_skbs); in mana_start_xmit()
427 atomic_inc(&txq->pending_sends); in mana_start_xmit()
429 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq); in mana_start_xmit()
434 tx_stats = &txq->stats; in mana_start_xmit()
435 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
436 tx_stats->packets++; in mana_start_xmit()
437 tx_stats->bytes += len; in mana_start_xmit()
438 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
443 apc->eth_stats.wake_queue++; in mana_start_xmit()
452 ndev->stats.tx_dropped++; in mana_start_xmit()
462 unsigned int num_queues = apc->num_queues; in mana_get_stats64()
469 if (!apc->port_is_up) in mana_get_stats64()
472 netdev_stats_to_stats64(st, &ndev->stats); in mana_get_stats64()
475 rx_stats = &apc->rxqs[q]->stats; in mana_get_stats64()
478 start = u64_stats_fetch_begin(&rx_stats->syncp); in mana_get_stats64()
479 packets = rx_stats->packets; in mana_get_stats64()
480 bytes = rx_stats->bytes; in mana_get_stats64()
481 } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); in mana_get_stats64()
483 st->rx_packets += packets; in mana_get_stats64()
484 st->rx_bytes += bytes; in mana_get_stats64()
488 tx_stats = &apc->tx_qp[q].txq.stats; in mana_get_stats64()
491 start = u64_stats_fetch_begin(&tx_stats->syncp); in mana_get_stats64()
492 packets = tx_stats->packets; in mana_get_stats64()
493 bytes = tx_stats->bytes; in mana_get_stats64()
494 } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); in mana_get_stats64()
496 st->tx_packets += packets; in mana_get_stats64()
497 st->tx_bytes += bytes; in mana_get_stats64()
506 struct sock *sk = skb->sk; in mana_get_tx_queue()
509 txq = apc->indir_table[hash & (apc->indir_table_sz - 1)]; in mana_get_tx_queue()
512 rcu_access_pointer(sk->sk_dst_cache)) in mana_get_tx_queue()
523 if (ndev->real_num_tx_queues == 1) in mana_select_queue()
526 txq = sk_tx_queue_get(skb->sk); in mana_select_queue()
528 if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) { in mana_select_queue()
538 /* Release pre-allocated RX buffers */
544 dev = mpc->ac->gdma_dev->gdma_context->dev; in mana_pre_dealloc_rxbufs()
546 if (!mpc->rxbufs_pre) in mana_pre_dealloc_rxbufs()
549 if (!mpc->das_pre) in mana_pre_dealloc_rxbufs()
552 while (mpc->rxbpre_total) { in mana_pre_dealloc_rxbufs()
553 i = --mpc->rxbpre_total; in mana_pre_dealloc_rxbufs()
554 dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize, in mana_pre_dealloc_rxbufs()
556 put_page(virt_to_head_page(mpc->rxbufs_pre[i])); in mana_pre_dealloc_rxbufs()
559 kfree(mpc->das_pre); in mana_pre_dealloc_rxbufs()
560 mpc->das_pre = NULL; in mana_pre_dealloc_rxbufs()
563 kfree(mpc->rxbufs_pre); in mana_pre_dealloc_rxbufs()
564 mpc->rxbufs_pre = NULL; in mana_pre_dealloc_rxbufs()
567 mpc->rxbpre_datasize = 0; in mana_pre_dealloc_rxbufs()
568 mpc->rxbpre_alloc_size = 0; in mana_pre_dealloc_rxbufs()
569 mpc->rxbpre_headroom = 0; in mana_pre_dealloc_rxbufs()
572 /* Get a buffer from the pre-allocated RX buffers */
573 static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da) in mana_get_rxbuf_pre() argument
575 struct net_device *ndev = rxq->ndev; in mana_get_rxbuf_pre()
581 if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) { in mana_get_rxbuf_pre()
582 netdev_err(ndev, "No RX pre-allocated bufs\n"); in mana_get_rxbuf_pre()
587 if (mpc->rxbpre_datasize != rxq->datasize) { in mana_get_rxbuf_pre()
589 mpc->rxbpre_datasize, rxq->datasize); in mana_get_rxbuf_pre()
593 if (mpc->rxbpre_alloc_size != rxq->alloc_size) { in mana_get_rxbuf_pre()
595 mpc->rxbpre_alloc_size, rxq->alloc_size); in mana_get_rxbuf_pre()
599 if (mpc->rxbpre_headroom != rxq->headroom) { in mana_get_rxbuf_pre()
601 mpc->rxbpre_headroom, rxq->headroom); in mana_get_rxbuf_pre()
605 mpc->rxbpre_total--; in mana_get_rxbuf_pre()
607 *da = mpc->das_pre[mpc->rxbpre_total]; in mana_get_rxbuf_pre()
608 va = mpc->rxbufs_pre[mpc->rxbpre_total]; in mana_get_rxbuf_pre()
609 mpc->rxbufs_pre[mpc->rxbpre_total] = NULL; in mana_get_rxbuf_pre()
612 if (!mpc->rxbpre_total) in mana_get_rxbuf_pre()
645 mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize, in mana_pre_alloc_rxbufs()
646 &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom); in mana_pre_alloc_rxbufs()
648 dev = mpc->ac->gdma_dev->gdma_context->dev; in mana_pre_alloc_rxbufs()
650 num_rxb = num_queues * mpc->rx_queue_size; in mana_pre_alloc_rxbufs()
652 WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n"); in mana_pre_alloc_rxbufs()
653 mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL); in mana_pre_alloc_rxbufs()
654 if (!mpc->rxbufs_pre) in mana_pre_alloc_rxbufs()
657 mpc->das_pre = kmalloc_array(num_rxb, sizeof(dma_addr_t), GFP_KERNEL); in mana_pre_alloc_rxbufs()
658 if (!mpc->das_pre) in mana_pre_alloc_rxbufs()
661 mpc->rxbpre_total = 0; in mana_pre_alloc_rxbufs()
664 page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size)); in mana_pre_alloc_rxbufs()
670 da = dma_map_single(dev, va + mpc->rxbpre_headroom, in mana_pre_alloc_rxbufs()
671 mpc->rxbpre_datasize, DMA_FROM_DEVICE); in mana_pre_alloc_rxbufs()
677 mpc->rxbufs_pre[i] = va; in mana_pre_alloc_rxbufs()
678 mpc->das_pre[i] = da; in mana_pre_alloc_rxbufs()
679 mpc->rxbpre_total = i + 1; in mana_pre_alloc_rxbufs()
685 netdev_err(mpc->ndev, "Failed to pre-allocate RX buffers for %d queues\n", num_queues); in mana_pre_alloc_rxbufs()
687 return -ENOMEM; in mana_pre_alloc_rxbufs()
693 unsigned int old_mtu = ndev->mtu; in mana_change_mtu()
696 /* Pre-allocate buffers to prevent failure in mana_attach later */ in mana_change_mtu()
697 err = mana_pre_alloc_rxbufs(mpc, new_mtu, mpc->num_queues); in mana_change_mtu()
709 WRITE_ONCE(ndev->mtu, new_mtu); in mana_change_mtu()
714 WRITE_ONCE(ndev->mtu, old_mtu); in mana_change_mtu()
740 debugfs_remove(apc->mana_port_debugfs); in mana_cleanup_port_context()
741 apc->mana_port_debugfs = NULL; in mana_cleanup_port_context()
742 kfree(apc->rxqs); in mana_cleanup_port_context()
743 apc->rxqs = NULL; in mana_cleanup_port_context()
748 apc->indir_table_sz = 0; in mana_cleanup_indir_table()
749 kfree(apc->indir_table); in mana_cleanup_indir_table()
750 kfree(apc->rxobj_table); in mana_cleanup_indir_table()
755 apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *), in mana_init_port_context()
758 return !apc->rxqs ? -ENOMEM : 0; in mana_init_port_context()
764 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_send_request()
767 struct device *dev = gc->dev; in mana_send_request()
771 req->dev_id = gc->mana.dev_id; in mana_send_request()
772 req->activity_id = atomic_inc_return(&activity_id); in mana_send_request()
776 if (err || resp->status) { in mana_send_request()
778 err, resp->status); in mana_send_request()
779 return err ? err : -EPROTO; in mana_send_request()
782 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 || in mana_send_request()
783 req->activity_id != resp->activity_id) { in mana_send_request()
785 req->dev_id.as_uint32, resp->dev_id.as_uint32, in mana_send_request()
786 req->activity_id, resp->activity_id); in mana_send_request()
787 return -EPROTO; in mana_send_request()
797 if (resp_hdr->response.msg_type != expected_code) in mana_verify_resp_hdr()
798 return -EPROTO; in mana_verify_resp_hdr()
800 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1) in mana_verify_resp_hdr()
801 return -EPROTO; in mana_verify_resp_hdr()
803 if (resp_hdr->response.msg_size < min_size) in mana_verify_resp_hdr()
804 return -EPROTO; in mana_verify_resp_hdr()
821 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_register_hw_vport()
824 netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err); in mana_pf_register_hw_vport()
831 netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n", in mana_pf_register_hw_vport()
833 return err ? err : -EPROTO; in mana_pf_register_hw_vport()
836 apc->port_handle = resp.hw_vport_handle; in mana_pf_register_hw_vport()
848 req.hw_vport_handle = apc->port_handle; in mana_pf_deregister_hw_vport()
850 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_deregister_hw_vport()
853 netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n", in mana_pf_deregister_hw_vport()
861 netdev_err(apc->ndev, in mana_pf_deregister_hw_vport()
874 req.vport = apc->port_handle; in mana_pf_register_filter()
875 memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN); in mana_pf_register_filter()
877 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_register_filter()
880 netdev_err(apc->ndev, "Failed to register filter: %d\n", err); in mana_pf_register_filter()
887 netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n", in mana_pf_register_filter()
889 return err ? err : -EPROTO; in mana_pf_register_filter()
892 apc->pf_filter_handle = resp.filter_handle; in mana_pf_register_filter()
904 req.filter_handle = apc->pf_filter_handle; in mana_pf_deregister_filter()
906 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_deregister_filter()
909 netdev_err(apc->ndev, "Failed to unregister filter: %d\n", in mana_pf_deregister_filter()
917 netdev_err(apc->ndev, in mana_pf_deregister_filter()
926 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_query_device_cfg()
929 struct device *dev = gc->dev; in mana_query_device_cfg()
953 err = -EPROTO; in mana_query_device_cfg()
960 gc->adapter_mtu = resp.adapter_mtu; in mana_query_device_cfg()
962 gc->adapter_mtu = ETH_FRAME_LEN; in mana_query_device_cfg()
964 debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu); in mana_query_device_cfg()
981 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_query_vport_cfg()
992 return -EPROTO; in mana_query_vport_cfg()
1001 netdev_warn(apc->ndev, in mana_query_vport_cfg()
1003 MANA_INDIRECT_TABLE_DEF_SIZE, apc->port_idx); in mana_query_vport_cfg()
1007 apc->port_handle = resp.vport; in mana_query_vport_cfg()
1008 ether_addr_copy(apc->mac_addr, resp.mac_addr); in mana_query_vport_cfg()
1015 mutex_lock(&apc->vport_mutex); in mana_uncfg_vport()
1016 apc->vport_use_count--; in mana_uncfg_vport()
1017 WARN_ON(apc->vport_use_count < 0); in mana_uncfg_vport()
1018 mutex_unlock(&apc->vport_mutex); in mana_uncfg_vport()
1047 mutex_lock(&apc->vport_mutex); in mana_cfg_vport()
1048 if (apc->vport_use_count > 0) { in mana_cfg_vport()
1049 mutex_unlock(&apc->vport_mutex); in mana_cfg_vport()
1050 return -EBUSY; in mana_cfg_vport()
1052 apc->vport_use_count++; in mana_cfg_vport()
1053 mutex_unlock(&apc->vport_mutex); in mana_cfg_vport()
1057 req.vport = apc->port_handle; in mana_cfg_vport()
1061 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_cfg_vport()
1064 netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err); in mana_cfg_vport()
1071 netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n", in mana_cfg_vport()
1074 err = -EPROTO; in mana_cfg_vport()
1079 apc->tx_shortform_allowed = resp.short_form_allowed; in mana_cfg_vport()
1080 apc->tx_vp_offset = resp.tx_vport_offset; in mana_cfg_vport()
1082 netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n", in mana_cfg_vport()
1083 apc->port_handle, protection_dom_id, doorbell_pg_id); in mana_cfg_vport()
1099 struct net_device *ndev = apc->ndev; in mana_cfg_vport_steering()
1103 req_buf_size = struct_size(req, indir_tab, apc->indir_table_sz); in mana_cfg_vport_steering()
1106 return -ENOMEM; in mana_cfg_vport_steering()
1108 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size, in mana_cfg_vport_steering()
1111 req->hdr.req.msg_version = GDMA_MESSAGE_V2; in mana_cfg_vport_steering()
1113 req->vport = apc->port_handle; in mana_cfg_vport_steering()
1114 req->num_indir_entries = apc->indir_table_sz; in mana_cfg_vport_steering()
1115 req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2, in mana_cfg_vport_steering()
1117 req->rx_enable = rx; in mana_cfg_vport_steering()
1118 req->rss_enable = apc->rss_state; in mana_cfg_vport_steering()
1119 req->update_default_rxobj = update_default_rxobj; in mana_cfg_vport_steering()
1120 req->update_hashkey = update_key; in mana_cfg_vport_steering()
1121 req->update_indir_tab = update_tab; in mana_cfg_vport_steering()
1122 req->default_rxobj = apc->default_rxobj; in mana_cfg_vport_steering()
1123 req->cqe_coalescing_enable = 0; in mana_cfg_vport_steering()
1126 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE); in mana_cfg_vport_steering()
1129 memcpy(req->indir_tab, apc->rxobj_table, in mana_cfg_vport_steering()
1130 flex_array_size(req, indir_tab, req->num_indir_entries)); in mana_cfg_vport_steering()
1132 err = mana_send_request(apc->ac, req, req_buf_size, &resp, in mana_cfg_vport_steering()
1149 err = -EPROTO; in mana_cfg_vport_steering()
1153 apc->port_handle, apc->indir_table_sz); in mana_cfg_vport_steering()
1167 struct net_device *ndev = apc->ndev; in mana_create_wq_obj()
1174 req.wq_gdma_region = wq_spec->gdma_region; in mana_create_wq_obj()
1175 req.cq_gdma_region = cq_spec->gdma_region; in mana_create_wq_obj()
1176 req.wq_size = wq_spec->queue_size; in mana_create_wq_obj()
1177 req.cq_size = cq_spec->queue_size; in mana_create_wq_obj()
1178 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id; in mana_create_wq_obj()
1179 req.cq_parent_qid = cq_spec->attached_eq; in mana_create_wq_obj()
1181 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_create_wq_obj()
1194 err = -EPROTO; in mana_create_wq_obj()
1200 err = -EPROTO; in mana_create_wq_obj()
1205 wq_spec->queue_index = resp.wq_id; in mana_create_wq_obj()
1206 cq_spec->queue_index = resp.cq_id; in mana_create_wq_obj()
1219 struct net_device *ndev = apc->ndev; in mana_destroy_wq_obj()
1227 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_destroy_wq_obj()
1244 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_destroy_eq()
1248 if (!ac->eqs) in mana_destroy_eq()
1251 debugfs_remove_recursive(ac->mana_eqs_debugfs); in mana_destroy_eq()
1252 ac->mana_eqs_debugfs = NULL; in mana_destroy_eq()
1254 for (i = 0; i < gc->max_num_queues; i++) { in mana_destroy_eq()
1255 eq = ac->eqs[i].eq; in mana_destroy_eq()
1262 kfree(ac->eqs); in mana_destroy_eq()
1263 ac->eqs = NULL; in mana_destroy_eq()
1268 struct mana_eq eq = ac->eqs[i]; in mana_create_eq_debugfs()
1272 eq.mana_eq_debugfs = debugfs_create_dir(eqnum, ac->mana_eqs_debugfs); in mana_create_eq_debugfs()
1273 debugfs_create_u32("head", 0400, eq.mana_eq_debugfs, &eq.eq->head); in mana_create_eq_debugfs()
1274 debugfs_create_u32("tail", 0400, eq.mana_eq_debugfs, &eq.eq->tail); in mana_create_eq_debugfs()
1280 struct gdma_dev *gd = ac->gdma_dev; in mana_create_eq()
1281 struct gdma_context *gc = gd->gdma_context; in mana_create_eq()
1286 ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq), in mana_create_eq()
1288 if (!ac->eqs) in mana_create_eq()
1289 return -ENOMEM; in mana_create_eq()
1295 spec.eq.context = ac->eqs; in mana_create_eq()
1298 ac->mana_eqs_debugfs = debugfs_create_dir("EQs", gc->mana_pci_debugfs); in mana_create_eq()
1300 for (i = 0; i < gc->max_num_queues; i++) { in mana_create_eq()
1301 spec.eq.msix_index = (i + 1) % gc->num_msix_usable; in mana_create_eq()
1302 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq); in mana_create_eq()
1304 dev_err(gc->dev, "Failed to create EQ %d : %d\n", i, err); in mana_create_eq()
1316 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq) in mana_fence_rq() argument
1322 init_completion(&rxq->fence_event); in mana_fence_rq()
1326 req.wq_obj_handle = rxq->rxobj; in mana_fence_rq()
1328 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_fence_rq()
1331 netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n", in mana_fence_rq()
1332 rxq->rxq_idx, err); in mana_fence_rq()
1338 netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n", in mana_fence_rq()
1339 rxq->rxq_idx, err, resp.hdr.status); in mana_fence_rq()
1341 err = -EPROTO; in mana_fence_rq()
1346 if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) { in mana_fence_rq()
1347 netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n", in mana_fence_rq()
1348 rxq->rxq_idx); in mana_fence_rq()
1349 return -ETIMEDOUT; in mana_fence_rq()
1358 struct mana_rxq *rxq; in mana_fence_rqs() local
1361 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { in mana_fence_rqs()
1362 rxq = apc->rxqs[rxq_idx]; in mana_fence_rqs()
1363 err = mana_fence_rq(apc, rxq); in mana_fence_rqs()
1376 used_space_old = wq->head - wq->tail; in mana_move_wq_tail()
1377 used_space_new = wq->head - (wq->tail + num_units); in mana_move_wq_tail()
1380 return -ERANGE; in mana_move_wq_tail()
1382 wq->tail += num_units; in mana_move_wq_tail()
1388 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; in mana_unmap_skb()
1389 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_unmap_skb()
1390 struct device *dev = gc->dev; in mana_unmap_skb()
1394 hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1; in mana_unmap_skb()
1397 dma_unmap_single(dev, ash->dma_handle[i], ash->size[i], in mana_unmap_skb()
1400 for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++) in mana_unmap_skb()
1401 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], in mana_unmap_skb()
1407 struct gdma_comp *completions = cq->gdma_comp_buf; in mana_poll_tx_cq()
1411 struct mana_txq *txq = cq->txq; in mana_poll_tx_cq()
1422 ndev = txq->ndev; in mana_poll_tx_cq()
1425 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions, in mana_poll_tx_cq()
1438 if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type != in mana_poll_tx_cq()
1442 switch (cqe_oob->cqe_hdr.cqe_type) { in mana_poll_tx_cq()
1457 cqe_oob->cqe_hdr.cqe_type); in mana_poll_tx_cq()
1459 apc->eth_stats.tx_cqe_err++; in mana_poll_tx_cq()
1468 cqe_oob->cqe_hdr.cqe_type); in mana_poll_tx_cq()
1470 apc->eth_stats.tx_cqe_unknown_type++; in mana_poll_tx_cq()
1474 if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num)) in mana_poll_tx_cq()
1477 skb = skb_dequeue(&txq->pending_skbs); in mana_poll_tx_cq()
1481 wqe_info = (struct gdma_posted_wqe_info *)skb->cb; in mana_poll_tx_cq()
1482 wqe_unit_cnt += wqe_info->wqe_size_in_bu; in mana_poll_tx_cq()
1486 napi_consume_skb(skb, cq->budget); in mana_poll_tx_cq()
1494 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt); in mana_poll_tx_cq()
1496 gdma_wq = txq->gdma_sq; in mana_poll_tx_cq()
1502 net_txq = txq->net_txq; in mana_poll_tx_cq()
1505 /* Ensure checking txq_stopped before apc->port_is_up. */ in mana_poll_tx_cq()
1508 if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) { in mana_poll_tx_cq()
1510 apc->eth_stats.wake_queue++; in mana_poll_tx_cq()
1513 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0) in mana_poll_tx_cq()
1516 cq->work_done = pkt_transmitted; in mana_poll_tx_cq()
1519 static void mana_post_pkt_rxq(struct mana_rxq *rxq) in mana_post_pkt_rxq() argument
1525 curr_index = rxq->buf_index++; in mana_post_pkt_rxq()
1526 if (rxq->buf_index == rxq->num_rx_buf) in mana_post_pkt_rxq()
1527 rxq->buf_index = 0; in mana_post_pkt_rxq()
1529 recv_buf_oob = &rxq->rx_oobs[curr_index]; in mana_post_pkt_rxq()
1531 err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req, in mana_post_pkt_rxq()
1532 &recv_buf_oob->wqe_inf); in mana_post_pkt_rxq()
1536 WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1); in mana_post_pkt_rxq()
1539 static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va, in mana_build_skb() argument
1542 struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size); in mana_build_skb()
1547 if (xdp->data_hard_start) { in mana_build_skb()
1548 u32 metasize = xdp->data - xdp->data_meta; in mana_build_skb()
1550 skb_reserve(skb, xdp->data - xdp->data_hard_start); in mana_build_skb()
1551 skb_put(skb, xdp->data_end - xdp->data); in mana_build_skb()
1557 skb_reserve(skb, rxq->headroom); in mana_build_skb()
1564 struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq) in mana_rx_skb() argument
1566 struct mana_stats_rx *rx_stats = &rxq->stats; in mana_rx_skb()
1567 struct net_device *ndev = rxq->ndev; in mana_rx_skb()
1568 uint pkt_len = cqe->ppi[0].pkt_len; in mana_rx_skb()
1569 u16 rxq_idx = rxq->rxq_idx; in mana_rx_skb()
1576 rxq->rx_cq.work_done++; in mana_rx_skb()
1577 napi = &rxq->rx_cq.napi; in mana_rx_skb()
1580 ++ndev->stats.rx_dropped; in mana_rx_skb()
1584 act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len); in mana_rx_skb()
1586 if (act == XDP_REDIRECT && !rxq->xdp_rc) in mana_rx_skb()
1592 skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp); in mana_rx_skb()
1600 skb->dev = napi->dev; in mana_rx_skb()
1602 skb->protocol = eth_type_trans(skb, ndev); in mana_rx_skb()
1606 if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) { in mana_rx_skb()
1607 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) in mana_rx_skb()
1608 skb->ip_summed = CHECKSUM_UNNECESSARY; in mana_rx_skb()
1611 if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) { in mana_rx_skb()
1612 hash_value = cqe->ppi[0].pkt_hash; in mana_rx_skb()
1614 if (cqe->rx_hashtype & MANA_HASH_L4) in mana_rx_skb()
1620 if (cqe->rx_vlantag_present) { in mana_rx_skb()
1621 u16 vlan_tci = cqe->rx_vlan_id; in mana_rx_skb()
1626 u64_stats_update_begin(&rx_stats->syncp); in mana_rx_skb()
1627 rx_stats->packets++; in mana_rx_skb()
1628 rx_stats->bytes += pkt_len; in mana_rx_skb()
1631 rx_stats->xdp_tx++; in mana_rx_skb()
1632 u64_stats_update_end(&rx_stats->syncp); in mana_rx_skb()
1645 u64_stats_update_begin(&rx_stats->syncp); in mana_rx_skb()
1646 rx_stats->xdp_drop++; in mana_rx_skb()
1647 u64_stats_update_end(&rx_stats->syncp); in mana_rx_skb()
1651 page_pool_recycle_direct(rxq->page_pool, in mana_rx_skb()
1654 WARN_ON_ONCE(rxq->xdp_save_va); in mana_rx_skb()
1656 rxq->xdp_save_va = buf_va; in mana_rx_skb()
1659 ++ndev->stats.rx_dropped; in mana_rx_skb()
1664 static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, in mana_get_rxfrag() argument
1673 if (rxq->xdp_save_va) { in mana_get_rxfrag()
1674 va = rxq->xdp_save_va; in mana_get_rxfrag()
1675 rxq->xdp_save_va = NULL; in mana_get_rxfrag()
1677 page = page_pool_dev_alloc_pages(rxq->page_pool); in mana_get_rxfrag()
1685 *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize, in mana_get_rxfrag()
1689 page_pool_put_full_page(rxq->page_pool, page, false); in mana_get_rxfrag()
1700 static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq, in mana_refill_rx_oob() argument
1708 va = mana_get_rxfrag(rxq, dev, &da, &from_pool); in mana_refill_rx_oob()
1712 dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize, in mana_refill_rx_oob()
1714 *old_buf = rxoob->buf_va; in mana_refill_rx_oob()
1715 *old_fp = rxoob->from_pool; in mana_refill_rx_oob()
1717 rxoob->buf_va = va; in mana_refill_rx_oob()
1718 rxoob->sgl[0].address = da; in mana_refill_rx_oob()
1719 rxoob->from_pool = from_pool; in mana_refill_rx_oob()
1722 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, in mana_process_rx_cqe() argument
1725 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data; in mana_process_rx_cqe()
1726 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; in mana_process_rx_cqe()
1727 struct net_device *ndev = rxq->ndev; in mana_process_rx_cqe()
1730 struct device *dev = gc->dev; in mana_process_rx_cqe()
1737 switch (oob->cqe_hdr.cqe_type) { in mana_process_rx_cqe()
1742 ++ndev->stats.rx_dropped; in mana_process_rx_cqe()
1743 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index]; in mana_process_rx_cqe()
1749 apc->eth_stats.rx_coalesced_err++; in mana_process_rx_cqe()
1753 complete(&rxq->fence_event); in mana_process_rx_cqe()
1758 oob->cqe_hdr.cqe_type); in mana_process_rx_cqe()
1759 apc->eth_stats.rx_cqe_unknown_type++; in mana_process_rx_cqe()
1763 pktlen = oob->ppi[0].pkt_len; in mana_process_rx_cqe()
1768 rxq->gdma_id, cq->gdma_id, rxq->rxobj); in mana_process_rx_cqe()
1772 curr = rxq->buf_index; in mana_process_rx_cqe()
1773 rxbuf_oob = &rxq->rx_oobs[curr]; in mana_process_rx_cqe()
1774 WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1); in mana_process_rx_cqe()
1776 mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp); in mana_process_rx_cqe()
1781 mana_rx_skb(old_buf, old_fp, oob, rxq); in mana_process_rx_cqe()
1784 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu); in mana_process_rx_cqe()
1786 mana_post_pkt_rxq(rxq); in mana_process_rx_cqe()
1791 struct gdma_comp *comp = cq->gdma_comp_buf; in mana_poll_rx_cq()
1792 struct mana_rxq *rxq = cq->rxq; in mana_poll_rx_cq() local
1795 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); in mana_poll_rx_cq()
1798 rxq->xdp_flush = false; in mana_poll_rx_cq()
1804 /* verify recv cqe references the right rxq */ in mana_poll_rx_cq()
1805 if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id)) in mana_poll_rx_cq()
1808 mana_process_rx_cqe(rxq, cq, &comp[i]); in mana_poll_rx_cq()
1812 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; in mana_poll_rx_cq()
1814 mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq); in mana_poll_rx_cq()
1817 if (rxq->xdp_flush) in mana_poll_rx_cq()
1826 WARN_ON_ONCE(cq->gdma_cq != gdma_queue); in mana_cq_handler()
1828 if (cq->type == MANA_CQ_TYPE_RX) in mana_cq_handler()
1833 w = cq->work_done; in mana_cq_handler()
1834 cq->work_done_since_doorbell += w; in mana_cq_handler()
1836 if (w < cq->budget) { in mana_cq_handler()
1838 cq->work_done_since_doorbell = 0; in mana_cq_handler()
1839 napi_complete_done(&cq->napi, w); in mana_cq_handler()
1840 } else if (cq->work_done_since_doorbell > in mana_cq_handler()
1841 cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) { in mana_cq_handler()
1848 cq->work_done_since_doorbell = 0; in mana_cq_handler()
1859 cq->work_done = 0; in mana_poll()
1860 cq->budget = budget; in mana_poll()
1862 w = mana_cq_handler(cq, cq->gdma_cq); in mana_poll()
1871 napi_schedule_irqoff(&cq->napi); in mana_schedule_napi()
1876 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_deinit_cq()
1878 if (!cq->gdma_cq) in mana_deinit_cq()
1881 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq); in mana_deinit_cq()
1886 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_deinit_txq()
1888 if (!txq->gdma_sq) in mana_deinit_txq()
1891 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq); in mana_deinit_txq()
1899 if (!apc->tx_qp) in mana_destroy_txq()
1902 for (i = 0; i < apc->num_queues; i++) { in mana_destroy_txq()
1903 debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs); in mana_destroy_txq()
1904 apc->tx_qp[i].mana_tx_debugfs = NULL; in mana_destroy_txq()
1906 napi = &apc->tx_qp[i].tx_cq.napi; in mana_destroy_txq()
1907 if (apc->tx_qp[i].txq.napi_initialized) { in mana_destroy_txq()
1911 apc->tx_qp[i].txq.napi_initialized = false; in mana_destroy_txq()
1913 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); in mana_destroy_txq()
1915 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq); in mana_destroy_txq()
1917 mana_deinit_txq(apc, &apc->tx_qp[i].txq); in mana_destroy_txq()
1920 kfree(apc->tx_qp); in mana_destroy_txq()
1921 apc->tx_qp = NULL; in mana_destroy_txq()
1926 struct mana_tx_qp *tx_qp = &apc->tx_qp[idx]; in mana_create_txq_debugfs()
1929 sprintf(qnum, "TX-%d", idx); in mana_create_txq_debugfs()
1930 tx_qp->mana_tx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs); in mana_create_txq_debugfs()
1931 debugfs_create_u32("sq_head", 0400, tx_qp->mana_tx_debugfs, in mana_create_txq_debugfs()
1932 &tx_qp->txq.gdma_sq->head); in mana_create_txq_debugfs()
1933 debugfs_create_u32("sq_tail", 0400, tx_qp->mana_tx_debugfs, in mana_create_txq_debugfs()
1934 &tx_qp->txq.gdma_sq->tail); in mana_create_txq_debugfs()
1935 debugfs_create_u32("sq_pend_skb_qlen", 0400, tx_qp->mana_tx_debugfs, in mana_create_txq_debugfs()
1936 &tx_qp->txq.pending_skbs.qlen); in mana_create_txq_debugfs()
1937 debugfs_create_u32("cq_head", 0400, tx_qp->mana_tx_debugfs, in mana_create_txq_debugfs()
1938 &tx_qp->tx_cq.gdma_cq->head); in mana_create_txq_debugfs()
1939 debugfs_create_u32("cq_tail", 0400, tx_qp->mana_tx_debugfs, in mana_create_txq_debugfs()
1940 &tx_qp->tx_cq.gdma_cq->tail); in mana_create_txq_debugfs()
1941 debugfs_create_u32("cq_budget", 0400, tx_qp->mana_tx_debugfs, in mana_create_txq_debugfs()
1942 &tx_qp->tx_cq.budget); in mana_create_txq_debugfs()
1943 debugfs_create_file("txq_dump", 0400, tx_qp->mana_tx_debugfs, in mana_create_txq_debugfs()
1944 tx_qp->txq.gdma_sq, &mana_dbg_q_fops); in mana_create_txq_debugfs()
1945 debugfs_create_file("cq_dump", 0400, tx_qp->mana_tx_debugfs, in mana_create_txq_debugfs()
1946 tx_qp->tx_cq.gdma_cq, &mana_dbg_q_fops); in mana_create_txq_debugfs()
1952 struct mana_context *ac = apc->ac; in mana_create_txq()
1953 struct gdma_dev *gd = ac->gdma_dev; in mana_create_txq()
1965 apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp), in mana_create_txq()
1967 if (!apc->tx_qp) in mana_create_txq()
1968 return -ENOMEM; in mana_create_txq()
1971 * apc->tx_queue_size represents the maximum number of WQEs in mana_create_txq()
1975 * as min val of apc->tx_queue_size is 128 and that would make in mana_create_txq()
1976 * txq_size 128*32 = 4096 and the other higher values of apc->tx_queue_size in mana_create_txq()
1979 txq_size = apc->tx_queue_size * 32; in mana_create_txq()
1981 cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE; in mana_create_txq()
1983 gc = gd->gdma_context; in mana_create_txq()
1985 for (i = 0; i < apc->num_queues; i++) { in mana_create_txq()
1986 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE; in mana_create_txq()
1989 txq = &apc->tx_qp[i].txq; in mana_create_txq()
1991 u64_stats_init(&txq->stats.syncp); in mana_create_txq()
1992 txq->ndev = net; in mana_create_txq()
1993 txq->net_txq = netdev_get_tx_queue(net, i); in mana_create_txq()
1994 txq->vp_offset = apc->tx_vp_offset; in mana_create_txq()
1995 txq->napi_initialized = false; in mana_create_txq()
1996 skb_queue_head_init(&txq->pending_skbs); in mana_create_txq()
2002 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq); in mana_create_txq()
2007 cq = &apc->tx_qp[i].tx_cq; in mana_create_txq()
2008 cq->type = MANA_CQ_TYPE_TX; in mana_create_txq()
2010 cq->txq = txq; in mana_create_txq()
2017 spec.cq.parent_eq = ac->eqs[i].eq; in mana_create_txq()
2019 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); in mana_create_txq()
2026 wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle; in mana_create_txq()
2027 wq_spec.queue_size = txq->gdma_sq->queue_size; in mana_create_txq()
2029 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; in mana_create_txq()
2030 cq_spec.queue_size = cq->gdma_cq->queue_size; in mana_create_txq()
2032 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; in mana_create_txq()
2034 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ, in mana_create_txq()
2036 &apc->tx_qp[i].tx_object); in mana_create_txq()
2041 txq->gdma_sq->id = wq_spec.queue_index; in mana_create_txq()
2042 cq->gdma_cq->id = cq_spec.queue_index; in mana_create_txq()
2044 txq->gdma_sq->mem_info.dma_region_handle = in mana_create_txq()
2046 cq->gdma_cq->mem_info.dma_region_handle = in mana_create_txq()
2049 txq->gdma_txq_id = txq->gdma_sq->id; in mana_create_txq()
2051 cq->gdma_id = cq->gdma_cq->id; in mana_create_txq()
2053 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { in mana_create_txq()
2054 err = -EINVAL; in mana_create_txq()
2058 gc->cq_table[cq->gdma_id] = cq->gdma_cq; in mana_create_txq()
2062 netif_napi_add_tx(net, &cq->napi, mana_poll); in mana_create_txq()
2063 napi_enable(&cq->napi); in mana_create_txq()
2064 txq->napi_initialized = true; in mana_create_txq()
2066 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); in mana_create_txq()
2072 apc->num_queues, err); in mana_create_txq()
2078 struct mana_rxq *rxq, bool napi_initialized) in mana_destroy_rxq() argument
2081 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_destroy_rxq()
2083 struct device *dev = gc->dev; in mana_destroy_rxq()
2088 if (!rxq) in mana_destroy_rxq()
2091 debugfs_remove_recursive(rxq->mana_rx_debugfs); in mana_destroy_rxq()
2092 rxq->mana_rx_debugfs = NULL; in mana_destroy_rxq()
2094 napi = &rxq->rx_cq.napi; in mana_destroy_rxq()
2103 xdp_rxq_info_unreg(&rxq->xdp_rxq); in mana_destroy_rxq()
2105 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); in mana_destroy_rxq()
2107 mana_deinit_cq(apc, &rxq->rx_cq); in mana_destroy_rxq()
2109 if (rxq->xdp_save_va) in mana_destroy_rxq()
2110 put_page(virt_to_head_page(rxq->xdp_save_va)); in mana_destroy_rxq()
2112 for (i = 0; i < rxq->num_rx_buf; i++) { in mana_destroy_rxq()
2113 rx_oob = &rxq->rx_oobs[i]; in mana_destroy_rxq()
2115 if (!rx_oob->buf_va) in mana_destroy_rxq()
2118 dma_unmap_single(dev, rx_oob->sgl[0].address, in mana_destroy_rxq()
2119 rx_oob->sgl[0].size, DMA_FROM_DEVICE); in mana_destroy_rxq()
2121 page = virt_to_head_page(rx_oob->buf_va); in mana_destroy_rxq()
2123 if (rx_oob->from_pool) in mana_destroy_rxq()
2124 page_pool_put_full_page(rxq->page_pool, page, false); in mana_destroy_rxq()
2128 rx_oob->buf_va = NULL; in mana_destroy_rxq()
2131 page_pool_destroy(rxq->page_pool); in mana_destroy_rxq()
2133 if (rxq->gdma_rq) in mana_destroy_rxq()
2134 mana_gd_destroy_queue(gc, rxq->gdma_rq); in mana_destroy_rxq()
2136 kfree(rxq); in mana_destroy_rxq()
2140 struct mana_rxq *rxq, struct device *dev) in mana_fill_rx_oob() argument
2142 struct mana_port_context *mpc = netdev_priv(rxq->ndev); in mana_fill_rx_oob()
2147 if (mpc->rxbufs_pre) in mana_fill_rx_oob()
2148 va = mana_get_rxbuf_pre(rxq, &da); in mana_fill_rx_oob()
2150 va = mana_get_rxfrag(rxq, dev, &da, &from_pool); in mana_fill_rx_oob()
2153 return -ENOMEM; in mana_fill_rx_oob()
2155 rx_oob->buf_va = va; in mana_fill_rx_oob()
2156 rx_oob->from_pool = from_pool; in mana_fill_rx_oob()
2158 rx_oob->sgl[0].address = da; in mana_fill_rx_oob()
2159 rx_oob->sgl[0].size = rxq->datasize; in mana_fill_rx_oob()
2160 rx_oob->sgl[0].mem_key = mem_key; in mana_fill_rx_oob()
2169 struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size) in mana_alloc_rx_wqe() argument
2171 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_alloc_rx_wqe()
2173 struct device *dev = gc->dev; in mana_alloc_rx_wqe()
2177 WARN_ON(rxq->datasize == 0); in mana_alloc_rx_wqe()
2182 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { in mana_alloc_rx_wqe()
2183 rx_oob = &rxq->rx_oobs[buf_idx]; in mana_alloc_rx_wqe()
2186 rx_oob->num_sge = 1; in mana_alloc_rx_wqe()
2188 ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq, in mana_alloc_rx_wqe()
2193 rx_oob->wqe_req.sgl = rx_oob->sgl; in mana_alloc_rx_wqe()
2194 rx_oob->wqe_req.num_sge = rx_oob->num_sge; in mana_alloc_rx_wqe()
2195 rx_oob->wqe_req.inline_oob_size = 0; in mana_alloc_rx_wqe()
2196 rx_oob->wqe_req.inline_oob_data = NULL; in mana_alloc_rx_wqe()
2197 rx_oob->wqe_req.flags = 0; in mana_alloc_rx_wqe()
2198 rx_oob->wqe_req.client_data_unit = 0; in mana_alloc_rx_wqe()
2201 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32); in mana_alloc_rx_wqe()
2208 static int mana_push_wqe(struct mana_rxq *rxq) in mana_push_wqe() argument
2214 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { in mana_push_wqe()
2215 rx_oob = &rxq->rx_oobs[buf_idx]; in mana_push_wqe()
2217 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req, in mana_push_wqe()
2218 &rx_oob->wqe_inf); in mana_push_wqe()
2220 return -ENOSPC; in mana_push_wqe()
2226 static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc) in mana_create_page_pool() argument
2228 struct mana_port_context *mpc = netdev_priv(rxq->ndev); in mana_create_page_pool()
2232 pprm.pool_size = mpc->rx_queue_size; in mana_create_page_pool()
2233 pprm.nid = gc->numa_node; in mana_create_page_pool()
2234 pprm.napi = &rxq->rx_cq.napi; in mana_create_page_pool()
2235 pprm.netdev = rxq->ndev; in mana_create_page_pool()
2236 pprm.order = get_order(rxq->alloc_size); in mana_create_page_pool()
2238 rxq->page_pool = page_pool_create(&pprm); in mana_create_page_pool()
2240 if (IS_ERR(rxq->page_pool)) { in mana_create_page_pool()
2241 ret = PTR_ERR(rxq->page_pool); in mana_create_page_pool()
2242 rxq->page_pool = NULL; in mana_create_page_pool()
2253 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_create_rxq()
2260 struct mana_rxq *rxq; in mana_create_rxq() local
2263 gc = gd->gdma_context; in mana_create_rxq()
2265 rxq = kzalloc(struct_size(rxq, rx_oobs, apc->rx_queue_size), in mana_create_rxq()
2267 if (!rxq) in mana_create_rxq()
2270 rxq->ndev = ndev; in mana_create_rxq()
2271 rxq->num_rx_buf = apc->rx_queue_size; in mana_create_rxq()
2272 rxq->rxq_idx = rxq_idx; in mana_create_rxq()
2273 rxq->rxobj = INVALID_MANA_HANDLE; in mana_create_rxq()
2275 mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size, in mana_create_rxq()
2276 &rxq->headroom); in mana_create_rxq()
2279 err = mana_create_page_pool(rxq, gc); in mana_create_rxq()
2285 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); in mana_create_rxq()
2297 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq); in mana_create_rxq()
2302 cq = &rxq->rx_cq; in mana_create_rxq()
2303 cq->type = MANA_CQ_TYPE_RX; in mana_create_rxq()
2304 cq->rxq = rxq; in mana_create_rxq()
2311 spec.cq.parent_eq = eq->eq; in mana_create_rxq()
2313 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); in mana_create_rxq()
2319 wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle; in mana_create_rxq()
2320 wq_spec.queue_size = rxq->gdma_rq->queue_size; in mana_create_rxq()
2322 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; in mana_create_rxq()
2323 cq_spec.queue_size = cq->gdma_cq->queue_size; in mana_create_rxq()
2325 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; in mana_create_rxq()
2327 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ, in mana_create_rxq()
2328 &wq_spec, &cq_spec, &rxq->rxobj); in mana_create_rxq()
2332 rxq->gdma_rq->id = wq_spec.queue_index; in mana_create_rxq()
2333 cq->gdma_cq->id = cq_spec.queue_index; in mana_create_rxq()
2335 rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; in mana_create_rxq()
2336 cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; in mana_create_rxq()
2338 rxq->gdma_id = rxq->gdma_rq->id; in mana_create_rxq()
2339 cq->gdma_id = cq->gdma_cq->id; in mana_create_rxq()
2341 err = mana_push_wqe(rxq); in mana_create_rxq()
2345 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { in mana_create_rxq()
2346 err = -EINVAL; in mana_create_rxq()
2350 gc->cq_table[cq->gdma_id] = cq->gdma_cq; in mana_create_rxq()
2352 netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1); in mana_create_rxq()
2354 WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx, in mana_create_rxq()
2355 cq->napi.napi_id)); in mana_create_rxq()
2356 WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, in mana_create_rxq()
2357 rxq->page_pool)); in mana_create_rxq()
2359 napi_enable(&cq->napi); in mana_create_rxq()
2361 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); in mana_create_rxq()
2364 return rxq; in mana_create_rxq()
2366 netdev_err(ndev, "Failed to create RXQ: err = %d\n", err); in mana_create_rxq()
2368 mana_destroy_rxq(apc, rxq, false); in mana_create_rxq()
2378 struct mana_rxq *rxq; in mana_create_rxq_debugfs() local
2381 rxq = apc->rxqs[idx]; in mana_create_rxq_debugfs()
2383 sprintf(qnum, "RX-%d", idx); in mana_create_rxq_debugfs()
2384 rxq->mana_rx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs); in mana_create_rxq_debugfs()
2385 debugfs_create_u32("rq_head", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->head); in mana_create_rxq_debugfs()
2386 debugfs_create_u32("rq_tail", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->tail); in mana_create_rxq_debugfs()
2387 debugfs_create_u32("rq_nbuf", 0400, rxq->mana_rx_debugfs, &rxq->num_rx_buf); in mana_create_rxq_debugfs()
2388 debugfs_create_u32("cq_head", 0400, rxq->mana_rx_debugfs, in mana_create_rxq_debugfs()
2389 &rxq->rx_cq.gdma_cq->head); in mana_create_rxq_debugfs()
2390 debugfs_create_u32("cq_tail", 0400, rxq->mana_rx_debugfs, in mana_create_rxq_debugfs()
2391 &rxq->rx_cq.gdma_cq->tail); in mana_create_rxq_debugfs()
2392 debugfs_create_u32("cq_budget", 0400, rxq->mana_rx_debugfs, &rxq->rx_cq.budget); in mana_create_rxq_debugfs()
2393 debugfs_create_file("rxq_dump", 0400, rxq->mana_rx_debugfs, rxq->gdma_rq, &mana_dbg_q_fops); in mana_create_rxq_debugfs()
2394 debugfs_create_file("cq_dump", 0400, rxq->mana_rx_debugfs, rxq->rx_cq.gdma_cq, in mana_create_rxq_debugfs()
2401 struct mana_context *ac = apc->ac; in mana_add_rx_queues()
2402 struct mana_rxq *rxq; in mana_add_rx_queues() local
2406 for (i = 0; i < apc->num_queues; i++) { in mana_add_rx_queues()
2407 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev); in mana_add_rx_queues()
2408 if (!rxq) { in mana_add_rx_queues()
2409 err = -ENOMEM; in mana_add_rx_queues()
2410 netdev_err(ndev, "Failed to create rxq %d : %d\n", i, err); in mana_add_rx_queues()
2414 u64_stats_init(&rxq->stats.syncp); in mana_add_rx_queues()
2416 apc->rxqs[i] = rxq; in mana_add_rx_queues()
2421 apc->default_rxobj = apc->rxqs[0]->rxobj; in mana_add_rx_queues()
2428 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_destroy_vport()
2429 struct mana_rxq *rxq; in mana_destroy_vport() local
2432 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { in mana_destroy_vport()
2433 rxq = apc->rxqs[rxq_idx]; in mana_destroy_vport()
2434 if (!rxq) in mana_destroy_vport()
2437 mana_destroy_rxq(apc, rxq, true); in mana_destroy_vport()
2438 apc->rxqs[rxq_idx] = NULL; in mana_destroy_vport()
2444 if (gd->gdma_context->is_pf) in mana_destroy_vport()
2451 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_create_vport()
2454 apc->default_rxobj = INVALID_MANA_HANDLE; in mana_create_vport()
2456 if (gd->gdma_context->is_pf) { in mana_create_vport()
2462 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell); in mana_create_vport()
2471 if (!apc->indir_table_sz) { in mana_rss_table_alloc()
2472 netdev_err(apc->ndev, in mana_rss_table_alloc()
2474 apc->port_idx); in mana_rss_table_alloc()
2475 return -EINVAL; in mana_rss_table_alloc()
2478 apc->indir_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL); in mana_rss_table_alloc()
2479 if (!apc->indir_table) in mana_rss_table_alloc()
2480 return -ENOMEM; in mana_rss_table_alloc()
2482 apc->rxobj_table = kcalloc(apc->indir_table_sz, sizeof(mana_handle_t), GFP_KERNEL); in mana_rss_table_alloc()
2483 if (!apc->rxobj_table) { in mana_rss_table_alloc()
2484 kfree(apc->indir_table); in mana_rss_table_alloc()
2485 return -ENOMEM; in mana_rss_table_alloc()
2495 for (i = 0; i < apc->indir_table_sz; i++) in mana_rss_table_init()
2496 apc->indir_table[i] = in mana_rss_table_init()
2497 ethtool_rxfh_indir_default(i, apc->num_queues); in mana_rss_table_init()
2508 for (i = 0; i < apc->indir_table_sz; i++) { in mana_config_rss()
2509 queue_idx = apc->indir_table[i]; in mana_config_rss()
2510 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj; in mana_config_rss()
2527 struct net_device *ndev = apc->ndev; in mana_query_gf_stats()
2561 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_query_gf_stats()
2575 apc->eth_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe; in mana_query_gf_stats()
2576 apc->eth_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled; in mana_query_gf_stats()
2577 apc->eth_stats.hc_rx_bytes = resp.hc_rx_bytes; in mana_query_gf_stats()
2578 apc->eth_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts; in mana_query_gf_stats()
2579 apc->eth_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes; in mana_query_gf_stats()
2580 apc->eth_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts; in mana_query_gf_stats()
2581 apc->eth_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes; in mana_query_gf_stats()
2582 apc->eth_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts; in mana_query_gf_stats()
2583 apc->eth_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes; in mana_query_gf_stats()
2584 apc->eth_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled; in mana_query_gf_stats()
2585 apc->eth_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled; in mana_query_gf_stats()
2586 apc->eth_stats.hc_tx_err_inval_vportoffset_pkt = in mana_query_gf_stats()
2588 apc->eth_stats.hc_tx_err_vlan_enforcement = in mana_query_gf_stats()
2590 apc->eth_stats.hc_tx_err_eth_type_enforcement = in mana_query_gf_stats()
2592 apc->eth_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement; in mana_query_gf_stats()
2593 apc->eth_stats.hc_tx_err_sqpdid_enforcement = in mana_query_gf_stats()
2595 apc->eth_stats.hc_tx_err_cqpdid_enforcement = in mana_query_gf_stats()
2597 apc->eth_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation; in mana_query_gf_stats()
2598 apc->eth_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob; in mana_query_gf_stats()
2599 apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes; in mana_query_gf_stats()
2600 apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts; in mana_query_gf_stats()
2601 apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes; in mana_query_gf_stats()
2602 apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts; in mana_query_gf_stats()
2603 apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes; in mana_query_gf_stats()
2604 apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts; in mana_query_gf_stats()
2605 apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes; in mana_query_gf_stats()
2606 apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma; in mana_query_gf_stats()
2612 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_init_port()
2614 int port_idx = apc->port_idx; in mana_init_port()
2623 gc = gd->gdma_context; in mana_init_port()
2626 &apc->indir_table_sz); in mana_init_port()
2634 if (apc->max_queues > max_queues) in mana_init_port()
2635 apc->max_queues = max_queues; in mana_init_port()
2637 if (apc->num_queues > apc->max_queues) in mana_init_port()
2638 apc->num_queues = apc->max_queues; in mana_init_port()
2640 eth_hw_addr_set(ndev, apc->mac_addr); in mana_init_port()
2642 apc->mana_port_debugfs = debugfs_create_dir(vport, gc->mana_pci_debugfs); in mana_init_port()
2653 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_alloc_queues()
2658 netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err); in mana_alloc_queues()
2662 err = netif_set_real_num_tx_queues(ndev, apc->num_queues); in mana_alloc_queues()
2666 apc->num_queues, err); in mana_alloc_queues()
2674 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE; in mana_alloc_queues()
2676 err = netif_set_real_num_rx_queues(ndev, apc->num_queues); in mana_alloc_queues()
2680 apc->num_queues, err); in mana_alloc_queues()
2692 if (gd->gdma_context->is_pf) { in mana_alloc_queues()
2718 if (apc->port_st_save) { in mana_attach()
2726 apc->port_is_up = apc->port_st_save; in mana_attach()
2731 if (apc->port_is_up) in mana_attach()
2743 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_dealloc_queues()
2749 if (apc->port_is_up) in mana_dealloc_queues()
2750 return -EINVAL; in mana_dealloc_queues()
2754 if (gd->gdma_context->is_pf) in mana_dealloc_queues()
2757 /* No packet can be transmitted now since apc->port_is_up is false. in mana_dealloc_queues()
2758 * There is still a tiny chance that mana_poll_tx_cq() can re-enable in mana_dealloc_queues()
2759 * a txq because it may not timely see apc->port_is_up being cleared in mana_dealloc_queues()
2761 * new packets due to apc->port_is_up being false. in mana_dealloc_queues()
2763 * Drain all the in-flight TX packets. in mana_dealloc_queues()
2770 for (i = 0; i < apc->num_queues; i++) { in mana_dealloc_queues()
2771 txq = &apc->tx_qp[i].txq; in mana_dealloc_queues()
2773 while (atomic_read(&txq->pending_sends) > 0 && in mana_dealloc_queues()
2778 if (atomic_read(&txq->pending_sends)) { in mana_dealloc_queues()
2779 err = pcie_flr(to_pci_dev(gd->gdma_context->dev)); in mana_dealloc_queues()
2782 err, atomic_read(&txq->pending_sends), in mana_dealloc_queues()
2783 txq->gdma_txq_id); in mana_dealloc_queues()
2789 for (i = 0; i < apc->num_queues; i++) { in mana_dealloc_queues()
2790 txq = &apc->tx_qp[i].txq; in mana_dealloc_queues()
2791 while ((skb = skb_dequeue(&txq->pending_skbs))) { in mana_dealloc_queues()
2795 atomic_set(&txq->pending_sends, 0); in mana_dealloc_queues()
2801 apc->rss_state = TRI_STATE_FALSE; in mana_dealloc_queues()
2820 apc->port_st_save = apc->port_is_up; in mana_detach()
2821 apc->port_is_up = false; in mana_detach()
2829 if (apc->port_st_save) { in mana_detach()
2848 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_probe_port()
2854 gc->max_num_queues); in mana_probe_port()
2856 return -ENOMEM; in mana_probe_port()
2861 apc->ac = ac; in mana_probe_port()
2862 apc->ndev = ndev; in mana_probe_port()
2863 apc->max_queues = gc->max_num_queues; in mana_probe_port()
2864 apc->num_queues = gc->max_num_queues; in mana_probe_port()
2865 apc->tx_queue_size = DEF_TX_BUFFERS_PER_QUEUE; in mana_probe_port()
2866 apc->rx_queue_size = DEF_RX_BUFFERS_PER_QUEUE; in mana_probe_port()
2867 apc->port_handle = INVALID_MANA_HANDLE; in mana_probe_port()
2868 apc->pf_filter_handle = INVALID_MANA_HANDLE; in mana_probe_port()
2869 apc->port_idx = port_idx; in mana_probe_port()
2871 mutex_init(&apc->vport_mutex); in mana_probe_port()
2872 apc->vport_use_count = 0; in mana_probe_port()
2874 ndev->netdev_ops = &mana_devops; in mana_probe_port()
2875 ndev->ethtool_ops = &mana_ethtool_ops; in mana_probe_port()
2876 ndev->mtu = ETH_DATA_LEN; in mana_probe_port()
2877 ndev->max_mtu = gc->adapter_mtu - ETH_HLEN; in mana_probe_port()
2878 ndev->min_mtu = ETH_MIN_MTU; in mana_probe_port()
2879 ndev->needed_headroom = MANA_HEADROOM; in mana_probe_port()
2880 ndev->dev_port = port_idx; in mana_probe_port()
2881 SET_NETDEV_DEV(ndev, gc->dev); in mana_probe_port()
2887 netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE); in mana_probe_port()
2899 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; in mana_probe_port()
2900 ndev->hw_features |= NETIF_F_RXCSUM; in mana_probe_port()
2901 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; in mana_probe_port()
2902 ndev->hw_features |= NETIF_F_RXHASH; in mana_probe_port()
2903 ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | in mana_probe_port()
2905 ndev->vlan_features = ndev->features; in mana_probe_port()
2938 struct auxiliary_device *adev = gd->adev; in remove_adev()
2939 int id = adev->id; in remove_adev()
2945 gd->adev = NULL; in remove_adev()
2956 return -ENOMEM; in add_adev()
2958 adev = &madev->adev; in add_adev()
2962 adev->id = ret; in add_adev()
2964 adev->name = "rdma"; in add_adev()
2965 adev->dev.parent = gd->gdma_context->dev; in add_adev()
2966 adev->dev.release = adev_release; in add_adev()
2967 madev->mdev = gd; in add_adev()
2979 gd->adev = adev; in add_adev()
2980 dev_dbg(gd->gdma_context->dev, in add_adev()
2988 mana_adev_idx_free(adev->id); in add_adev()
2998 struct gdma_context *gc = gd->gdma_context; in mana_probe()
2999 struct mana_context *ac = gd->driver_data; in mana_probe()
3000 struct device *dev = gc->dev; in mana_probe()
3016 return -ENOMEM; in mana_probe()
3018 ac->gdma_dev = gd; in mana_probe()
3019 gd->driver_data = ac; in mana_probe()
3034 ac->num_ports = num_ports; in mana_probe()
3036 if (ac->num_ports != num_ports) { in mana_probe()
3037 dev_err(dev, "The number of vPorts changed: %d->%d\n", in mana_probe()
3038 ac->num_ports, num_ports); in mana_probe()
3039 err = -EPROTO; in mana_probe()
3044 if (ac->num_ports == 0) in mana_probe()
3047 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV) in mana_probe()
3048 ac->num_ports = MAX_PORTS_IN_MANA_DEV; in mana_probe()
3051 for (i = 0; i < ac->num_ports; i++) { in mana_probe()
3052 err = mana_probe_port(ac, i, &ac->ports[i]); in mana_probe()
3064 for (i = 0; i < ac->num_ports; i++) { in mana_probe()
3066 err = mana_attach(ac->ports[i]); in mana_probe()
3086 gd, gd->dev_id.as_uint32, ac->num_ports, in mana_probe()
3087 gd->dev_id.type, gd->dev_id.instance); in mana_probe()
3096 struct gdma_context *gc = gd->gdma_context; in mana_remove()
3097 struct mana_context *ac = gd->driver_data; in mana_remove()
3099 struct device *dev = gc->dev; in mana_remove()
3105 if (gd->adev) in mana_remove()
3108 for (i = 0; i < ac->num_ports; i++) { in mana_remove()
3109 ndev = ac->ports[i]; in mana_remove()
3148 gd->driver_data = NULL; in mana_remove()
3149 gd->gdma_context = NULL; in mana_remove()
3160 if (port_index >= ac->num_ports) in mana_get_primary_netdev()
3166 ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]); in mana_get_primary_netdev()
3170 ndev = ac->ports[port_index]; in mana_get_primary_netdev()