Lines Matching +full:rx +full:- +full:eq
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
37 struct gdma_queue *gdma_q = filp->private_data; in mana_dbg_q_read()
39 return simple_read_from_buffer(buf, count, pos, gdma_q->queue_mem_ptr, in mana_dbg_q_read()
40 gdma_q->queue_size); in mana_dbg_q_read()
60 apc->port_is_up = true; in mana_open()
75 if (!apc->port_is_up) in mana_close()
88 if (skb->protocol == htons(ETH_P_IP)) { in mana_checksum_info()
91 if (ip->protocol == IPPROTO_TCP) in mana_checksum_info()
94 if (ip->protocol == IPPROTO_UDP) in mana_checksum_info()
96 } else if (skb->protocol == htons(ETH_P_IPV6)) { in mana_checksum_info()
99 if (ip6->nexthdr == IPPROTO_TCP) in mana_checksum_info()
102 if (ip6->nexthdr == IPPROTO_UDP) in mana_checksum_info()
113 ash->dma_handle[sg_i] = da; in mana_add_sge()
114 ash->size[sg_i] = sge_len; in mana_add_sge()
116 tp->wqe_req.sgl[sg_i].address = da; in mana_add_sge()
117 tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey; in mana_add_sge()
118 tp->wqe_req.sgl[sg_i].size = sge_len; in mana_add_sge()
124 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; in mana_map_skb()
126 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_map_skb()
136 gc = gd->gdma_context; in mana_map_skb()
137 dev = gc->dev; in mana_map_skb()
141 sge1_len = skb_hlen - gso_hs; in mana_map_skb()
146 da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE); in mana_map_skb()
148 return -ENOMEM; in mana_map_skb()
150 mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey); in mana_map_skb()
154 da = dma_map_single(dev, skb->data + sge0_len, sge1_len, in mana_map_skb()
159 mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey); in mana_map_skb()
163 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in mana_map_skb()
166 frag = &skb_shinfo(skb)->frags[i]; in mana_map_skb()
173 gd->gpa_mkey); in mana_map_skb()
179 for (i = sg_i - 1; i >= hsg; i--) in mana_map_skb()
180 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], in mana_map_skb()
183 for (i = hsg - 1; i >= 0; i--) in mana_map_skb()
184 dma_unmap_single(dev, ash->dma_handle[i], ash->size[i], in mana_map_skb()
187 return -ENOMEM; in mana_map_skb()
200 int num_sge = 1 + skb_shinfo(skb)->nr_frags; in mana_fix_skb_head()
211 return -EINVAL; in mana_fix_skb_head()
222 if (skb->encapsulation) { in mana_get_gso_hs()
225 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in mana_get_gso_hs()
240 int gso_hs = 0; /* zero for non-GSO pkts */ in mana_start_xmit()
242 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_start_xmit()
253 if (unlikely(!apc->port_is_up)) in mana_start_xmit()
259 txq = &apc->tx_qp[txq_idx].txq; in mana_start_xmit()
260 gdma_sq = txq->gdma_sq; in mana_start_xmit()
261 cq = &apc->tx_qp[txq_idx].tx_cq; in mana_start_xmit()
262 tx_stats = &txq->stats; in mana_start_xmit()
264 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; in mana_start_xmit()
265 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; in mana_start_xmit()
267 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) { in mana_start_xmit()
268 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset; in mana_start_xmit()
271 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset; in mana_start_xmit()
286 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
287 tx_stats->short_pkt_fmt++; in mana_start_xmit()
288 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
291 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
292 tx_stats->long_pkt_fmt++; in mana_start_xmit()
293 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
300 pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags; in mana_start_xmit()
302 if (skb->protocol == htons(ETH_P_IP)) in mana_start_xmit()
304 else if (skb->protocol == htons(ETH_P_IPV6)) in mana_start_xmit()
318 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
319 if (skb->encapsulation) { in mana_start_xmit()
320 tx_stats->tso_inner_packets++; in mana_start_xmit()
321 tx_stats->tso_inner_bytes += skb->len - gso_hs; in mana_start_xmit()
323 tx_stats->tso_packets++; in mana_start_xmit()
324 tx_stats->tso_bytes += skb->len - gso_hs; in mana_start_xmit()
326 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
335 pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size; in mana_start_xmit()
338 ip_hdr(skb)->tot_len = 0; in mana_start_xmit()
339 ip_hdr(skb)->check = 0; in mana_start_xmit()
340 tcp_hdr(skb)->check = in mana_start_xmit()
341 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in mana_start_xmit()
342 ip_hdr(skb)->daddr, 0, in mana_start_xmit()
345 ipv6_hdr(skb)->payload_len = 0; in mana_start_xmit()
346 tcp_hdr(skb)->check = in mana_start_xmit()
347 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in mana_start_xmit()
348 &ipv6_hdr(skb)->daddr, 0, in mana_start_xmit()
351 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in mana_start_xmit()
354 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
355 tx_stats->csum_partial++; in mana_start_xmit()
356 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
392 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
393 tx_stats->mana_map_err++; in mana_start_xmit()
394 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
398 skb_queue_tail(&txq->pending_skbs, skb); in mana_start_xmit()
400 len = skb->len; in mana_start_xmit()
404 (struct gdma_posted_wqe_info *)skb->cb); in mana_start_xmit()
407 apc->eth_stats.stop_queue++; in mana_start_xmit()
411 (void)skb_dequeue_tail(&txq->pending_skbs); in mana_start_xmit()
418 atomic_inc(&txq->pending_sends); in mana_start_xmit()
420 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq); in mana_start_xmit()
425 tx_stats = &txq->stats; in mana_start_xmit()
426 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
427 tx_stats->packets++; in mana_start_xmit()
428 tx_stats->bytes += len; in mana_start_xmit()
429 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
434 apc->eth_stats.wake_queue++; in mana_start_xmit()
443 ndev->stats.tx_dropped++; in mana_start_xmit()
453 unsigned int num_queues = apc->num_queues; in mana_get_stats64()
460 if (!apc->port_is_up) in mana_get_stats64()
463 netdev_stats_to_stats64(st, &ndev->stats); in mana_get_stats64()
466 rx_stats = &apc->rxqs[q]->stats; in mana_get_stats64()
469 start = u64_stats_fetch_begin(&rx_stats->syncp); in mana_get_stats64()
470 packets = rx_stats->packets; in mana_get_stats64()
471 bytes = rx_stats->bytes; in mana_get_stats64()
472 } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); in mana_get_stats64()
474 st->rx_packets += packets; in mana_get_stats64()
475 st->rx_bytes += bytes; in mana_get_stats64()
479 tx_stats = &apc->tx_qp[q].txq.stats; in mana_get_stats64()
482 start = u64_stats_fetch_begin(&tx_stats->syncp); in mana_get_stats64()
483 packets = tx_stats->packets; in mana_get_stats64()
484 bytes = tx_stats->bytes; in mana_get_stats64()
485 } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); in mana_get_stats64()
487 st->tx_packets += packets; in mana_get_stats64()
488 st->tx_bytes += bytes; in mana_get_stats64()
497 struct sock *sk = skb->sk; in mana_get_tx_queue()
500 txq = apc->indir_table[hash & (apc->indir_table_sz - 1)]; in mana_get_tx_queue()
503 rcu_access_pointer(sk->sk_dst_cache)) in mana_get_tx_queue()
514 if (ndev->real_num_tx_queues == 1) in mana_select_queue()
517 txq = sk_tx_queue_get(skb->sk); in mana_select_queue()
519 if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) { in mana_select_queue()
529 /* Release pre-allocated RX buffers */
535 dev = mpc->ac->gdma_dev->gdma_context->dev; in mana_pre_dealloc_rxbufs()
537 if (!mpc->rxbufs_pre) in mana_pre_dealloc_rxbufs()
540 if (!mpc->das_pre) in mana_pre_dealloc_rxbufs()
543 while (mpc->rxbpre_total) { in mana_pre_dealloc_rxbufs()
544 i = --mpc->rxbpre_total; in mana_pre_dealloc_rxbufs()
545 dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize, in mana_pre_dealloc_rxbufs()
547 put_page(virt_to_head_page(mpc->rxbufs_pre[i])); in mana_pre_dealloc_rxbufs()
550 kfree(mpc->das_pre); in mana_pre_dealloc_rxbufs()
551 mpc->das_pre = NULL; in mana_pre_dealloc_rxbufs()
554 kfree(mpc->rxbufs_pre); in mana_pre_dealloc_rxbufs()
555 mpc->rxbufs_pre = NULL; in mana_pre_dealloc_rxbufs()
558 mpc->rxbpre_datasize = 0; in mana_pre_dealloc_rxbufs()
559 mpc->rxbpre_alloc_size = 0; in mana_pre_dealloc_rxbufs()
560 mpc->rxbpre_headroom = 0; in mana_pre_dealloc_rxbufs()
563 /* Get a buffer from the pre-allocated RX buffers */
566 struct net_device *ndev = rxq->ndev; in mana_get_rxbuf_pre()
572 if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) { in mana_get_rxbuf_pre()
573 netdev_err(ndev, "No RX pre-allocated bufs\n"); in mana_get_rxbuf_pre()
578 if (mpc->rxbpre_datasize != rxq->datasize) { in mana_get_rxbuf_pre()
580 mpc->rxbpre_datasize, rxq->datasize); in mana_get_rxbuf_pre()
584 if (mpc->rxbpre_alloc_size != rxq->alloc_size) { in mana_get_rxbuf_pre()
586 mpc->rxbpre_alloc_size, rxq->alloc_size); in mana_get_rxbuf_pre()
590 if (mpc->rxbpre_headroom != rxq->headroom) { in mana_get_rxbuf_pre()
592 mpc->rxbpre_headroom, rxq->headroom); in mana_get_rxbuf_pre()
596 mpc->rxbpre_total--; in mana_get_rxbuf_pre()
598 *da = mpc->das_pre[mpc->rxbpre_total]; in mana_get_rxbuf_pre()
599 va = mpc->rxbufs_pre[mpc->rxbpre_total]; in mana_get_rxbuf_pre()
600 mpc->rxbufs_pre[mpc->rxbpre_total] = NULL; in mana_get_rxbuf_pre()
603 if (!mpc->rxbpre_total) in mana_get_rxbuf_pre()
609 /* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
636 mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize, in mana_pre_alloc_rxbufs()
637 &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom); in mana_pre_alloc_rxbufs()
639 dev = mpc->ac->gdma_dev->gdma_context->dev; in mana_pre_alloc_rxbufs()
641 num_rxb = num_queues * mpc->rx_queue_size; in mana_pre_alloc_rxbufs()
643 WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n"); in mana_pre_alloc_rxbufs()
644 mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL); in mana_pre_alloc_rxbufs()
645 if (!mpc->rxbufs_pre) in mana_pre_alloc_rxbufs()
648 mpc->das_pre = kmalloc_array(num_rxb, sizeof(dma_addr_t), GFP_KERNEL); in mana_pre_alloc_rxbufs()
649 if (!mpc->das_pre) in mana_pre_alloc_rxbufs()
652 mpc->rxbpre_total = 0; in mana_pre_alloc_rxbufs()
655 if (mpc->rxbpre_alloc_size > PAGE_SIZE) { in mana_pre_alloc_rxbufs()
656 va = netdev_alloc_frag(mpc->rxbpre_alloc_size); in mana_pre_alloc_rxbufs()
663 get_order(mpc->rxbpre_alloc_size)) { in mana_pre_alloc_rxbufs()
675 da = dma_map_single(dev, va + mpc->rxbpre_headroom, in mana_pre_alloc_rxbufs()
676 mpc->rxbpre_datasize, DMA_FROM_DEVICE); in mana_pre_alloc_rxbufs()
682 mpc->rxbufs_pre[i] = va; in mana_pre_alloc_rxbufs()
683 mpc->das_pre[i] = da; in mana_pre_alloc_rxbufs()
684 mpc->rxbpre_total = i + 1; in mana_pre_alloc_rxbufs()
691 return -ENOMEM; in mana_pre_alloc_rxbufs()
697 unsigned int old_mtu = ndev->mtu; in mana_change_mtu()
700 /* Pre-allocate buffers to prevent failure in mana_attach later */ in mana_change_mtu()
701 err = mana_pre_alloc_rxbufs(mpc, new_mtu, mpc->num_queues); in mana_change_mtu()
713 WRITE_ONCE(ndev->mtu, new_mtu); in mana_change_mtu()
718 WRITE_ONCE(ndev->mtu, old_mtu); in mana_change_mtu()
743 * We are sure the apc->mana_port_debugfs remove will not in mana_cleanup_port_context()
746 debugfs_remove(apc->mana_port_debugfs); in mana_cleanup_port_context()
747 kfree(apc->rxqs); in mana_cleanup_port_context()
748 apc->rxqs = NULL; in mana_cleanup_port_context()
753 apc->indir_table_sz = 0; in mana_cleanup_indir_table()
754 kfree(apc->indir_table); in mana_cleanup_indir_table()
755 kfree(apc->rxobj_table); in mana_cleanup_indir_table()
760 apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *), in mana_init_port_context()
763 return !apc->rxqs ? -ENOMEM : 0; in mana_init_port_context()
769 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_send_request()
772 struct device *dev = gc->dev; in mana_send_request()
776 req->dev_id = gc->mana.dev_id; in mana_send_request()
777 req->activity_id = atomic_inc_return(&activity_id); in mana_send_request()
781 if (err || resp->status) { in mana_send_request()
783 err, resp->status); in mana_send_request()
784 return err ? err : -EPROTO; in mana_send_request()
787 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 || in mana_send_request()
788 req->activity_id != resp->activity_id) { in mana_send_request()
790 req->dev_id.as_uint32, resp->dev_id.as_uint32, in mana_send_request()
791 req->activity_id, resp->activity_id); in mana_send_request()
792 return -EPROTO; in mana_send_request()
802 if (resp_hdr->response.msg_type != expected_code) in mana_verify_resp_hdr()
803 return -EPROTO; in mana_verify_resp_hdr()
805 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1) in mana_verify_resp_hdr()
806 return -EPROTO; in mana_verify_resp_hdr()
808 if (resp_hdr->response.msg_size < min_size) in mana_verify_resp_hdr()
809 return -EPROTO; in mana_verify_resp_hdr()
826 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_register_hw_vport()
829 netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err); in mana_pf_register_hw_vport()
836 netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n", in mana_pf_register_hw_vport()
838 return err ? err : -EPROTO; in mana_pf_register_hw_vport()
841 apc->port_handle = resp.hw_vport_handle; in mana_pf_register_hw_vport()
853 req.hw_vport_handle = apc->port_handle; in mana_pf_deregister_hw_vport()
855 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_deregister_hw_vport()
858 netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n", in mana_pf_deregister_hw_vport()
866 netdev_err(apc->ndev, in mana_pf_deregister_hw_vport()
879 req.vport = apc->port_handle; in mana_pf_register_filter()
880 memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN); in mana_pf_register_filter()
882 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_register_filter()
885 netdev_err(apc->ndev, "Failed to register filter: %d\n", err); in mana_pf_register_filter()
892 netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n", in mana_pf_register_filter()
894 return err ? err : -EPROTO; in mana_pf_register_filter()
897 apc->pf_filter_handle = resp.filter_handle; in mana_pf_register_filter()
909 req.filter_handle = apc->pf_filter_handle; in mana_pf_deregister_filter()
911 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_deregister_filter()
914 netdev_err(apc->ndev, "Failed to unregister filter: %d\n", in mana_pf_deregister_filter()
922 netdev_err(apc->ndev, in mana_pf_deregister_filter()
931 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_query_device_cfg()
934 struct device *dev = gc->dev; in mana_query_device_cfg()
958 err = -EPROTO; in mana_query_device_cfg()
965 gc->adapter_mtu = resp.adapter_mtu; in mana_query_device_cfg()
967 gc->adapter_mtu = ETH_FRAME_LEN; in mana_query_device_cfg()
969 debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu); in mana_query_device_cfg()
986 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_query_vport_cfg()
997 return -EPROTO; in mana_query_vport_cfg()
1006 netdev_warn(apc->ndev, in mana_query_vport_cfg()
1008 MANA_INDIRECT_TABLE_DEF_SIZE, apc->port_idx); in mana_query_vport_cfg()
1012 apc->port_handle = resp.vport; in mana_query_vport_cfg()
1013 ether_addr_copy(apc->mac_addr, resp.mac_addr); in mana_query_vport_cfg()
1020 mutex_lock(&apc->vport_mutex); in mana_uncfg_vport()
1021 apc->vport_use_count--; in mana_uncfg_vport()
1022 WARN_ON(apc->vport_use_count < 0); in mana_uncfg_vport()
1023 mutex_unlock(&apc->vport_mutex); in mana_uncfg_vport()
1052 mutex_lock(&apc->vport_mutex); in mana_cfg_vport()
1053 if (apc->vport_use_count > 0) { in mana_cfg_vport()
1054 mutex_unlock(&apc->vport_mutex); in mana_cfg_vport()
1055 return -EBUSY; in mana_cfg_vport()
1057 apc->vport_use_count++; in mana_cfg_vport()
1058 mutex_unlock(&apc->vport_mutex); in mana_cfg_vport()
1062 req.vport = apc->port_handle; in mana_cfg_vport()
1066 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_cfg_vport()
1069 netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err); in mana_cfg_vport()
1076 netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n", in mana_cfg_vport()
1079 err = -EPROTO; in mana_cfg_vport()
1084 apc->tx_shortform_allowed = resp.short_form_allowed; in mana_cfg_vport()
1085 apc->tx_vp_offset = resp.tx_vport_offset; in mana_cfg_vport()
1087 netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n", in mana_cfg_vport()
1088 apc->port_handle, protection_dom_id, doorbell_pg_id); in mana_cfg_vport()
1098 enum TRI_STATE rx, in mana_cfg_vport_steering() argument
1104 struct net_device *ndev = apc->ndev; in mana_cfg_vport_steering()
1108 req_buf_size = struct_size(req, indir_tab, apc->indir_table_sz); in mana_cfg_vport_steering()
1111 return -ENOMEM; in mana_cfg_vport_steering()
1113 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size, in mana_cfg_vport_steering()
1116 req->hdr.req.msg_version = GDMA_MESSAGE_V2; in mana_cfg_vport_steering()
1118 req->vport = apc->port_handle; in mana_cfg_vport_steering()
1119 req->num_indir_entries = apc->indir_table_sz; in mana_cfg_vport_steering()
1120 req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2, in mana_cfg_vport_steering()
1122 req->rx_enable = rx; in mana_cfg_vport_steering()
1123 req->rss_enable = apc->rss_state; in mana_cfg_vport_steering()
1124 req->update_default_rxobj = update_default_rxobj; in mana_cfg_vport_steering()
1125 req->update_hashkey = update_key; in mana_cfg_vport_steering()
1126 req->update_indir_tab = update_tab; in mana_cfg_vport_steering()
1127 req->default_rxobj = apc->default_rxobj; in mana_cfg_vport_steering()
1128 req->cqe_coalescing_enable = 0; in mana_cfg_vport_steering()
1131 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE); in mana_cfg_vport_steering()
1134 memcpy(req->indir_tab, apc->rxobj_table, in mana_cfg_vport_steering()
1135 flex_array_size(req, indir_tab, req->num_indir_entries)); in mana_cfg_vport_steering()
1137 err = mana_send_request(apc->ac, req, req_buf_size, &resp, in mana_cfg_vport_steering()
1140 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err); in mana_cfg_vport_steering()
1147 netdev_err(ndev, "vPort RX configuration failed: %d\n", err); in mana_cfg_vport_steering()
1152 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n", in mana_cfg_vport_steering()
1154 err = -EPROTO; in mana_cfg_vport_steering()
1158 apc->port_handle, apc->indir_table_sz); in mana_cfg_vport_steering()
1172 struct net_device *ndev = apc->ndev; in mana_create_wq_obj()
1179 req.wq_gdma_region = wq_spec->gdma_region; in mana_create_wq_obj()
1180 req.cq_gdma_region = cq_spec->gdma_region; in mana_create_wq_obj()
1181 req.wq_size = wq_spec->queue_size; in mana_create_wq_obj()
1182 req.cq_size = cq_spec->queue_size; in mana_create_wq_obj()
1183 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id; in mana_create_wq_obj()
1184 req.cq_parent_qid = cq_spec->attached_eq; in mana_create_wq_obj()
1186 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_create_wq_obj()
1199 err = -EPROTO; in mana_create_wq_obj()
1205 err = -EPROTO; in mana_create_wq_obj()
1210 wq_spec->queue_index = resp.wq_id; in mana_create_wq_obj()
1211 cq_spec->queue_index = resp.cq_id; in mana_create_wq_obj()
1224 struct net_device *ndev = apc->ndev; in mana_destroy_wq_obj()
1232 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_destroy_wq_obj()
1249 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_destroy_eq()
1250 struct gdma_queue *eq; in mana_destroy_eq() local
1253 if (!ac->eqs) in mana_destroy_eq()
1256 debugfs_remove_recursive(ac->mana_eqs_debugfs); in mana_destroy_eq()
1258 for (i = 0; i < gc->max_num_queues; i++) { in mana_destroy_eq()
1259 eq = ac->eqs[i].eq; in mana_destroy_eq()
1260 if (!eq) in mana_destroy_eq()
1263 mana_gd_destroy_queue(gc, eq); in mana_destroy_eq()
1266 kfree(ac->eqs); in mana_destroy_eq()
1267 ac->eqs = NULL; in mana_destroy_eq()
1272 struct mana_eq eq = ac->eqs[i]; in mana_create_eq_debugfs() local
1275 sprintf(eqnum, "eq%d", i); in mana_create_eq_debugfs()
1276 eq.mana_eq_debugfs = debugfs_create_dir(eqnum, ac->mana_eqs_debugfs); in mana_create_eq_debugfs()
1277 debugfs_create_u32("head", 0400, eq.mana_eq_debugfs, &eq.eq->head); in mana_create_eq_debugfs()
1278 debugfs_create_u32("tail", 0400, eq.mana_eq_debugfs, &eq.eq->tail); in mana_create_eq_debugfs()
1279 debugfs_create_file("eq_dump", 0400, eq.mana_eq_debugfs, eq.eq, &mana_dbg_q_fops); in mana_create_eq_debugfs()
1284 struct gdma_dev *gd = ac->gdma_dev; in mana_create_eq()
1285 struct gdma_context *gc = gd->gdma_context; in mana_create_eq()
1290 ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq), in mana_create_eq()
1292 if (!ac->eqs) in mana_create_eq()
1293 return -ENOMEM; in mana_create_eq()
1298 spec.eq.callback = NULL; in mana_create_eq()
1299 spec.eq.context = ac->eqs; in mana_create_eq()
1300 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; in mana_create_eq()
1302 ac->mana_eqs_debugfs = debugfs_create_dir("EQs", gc->mana_pci_debugfs); in mana_create_eq()
1304 for (i = 0; i < gc->max_num_queues; i++) { in mana_create_eq()
1305 spec.eq.msix_index = (i + 1) % gc->num_msix_usable; in mana_create_eq()
1306 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq); in mana_create_eq()
1324 init_completion(&rxq->fence_event); in mana_fence_rq()
1328 req.wq_obj_handle = rxq->rxobj; in mana_fence_rq()
1330 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_fence_rq()
1333 netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n", in mana_fence_rq()
1334 rxq->rxq_idx, err); in mana_fence_rq()
1340 netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n", in mana_fence_rq()
1341 rxq->rxq_idx, err, resp.hdr.status); in mana_fence_rq()
1343 err = -EPROTO; in mana_fence_rq()
1348 if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) { in mana_fence_rq()
1349 netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n", in mana_fence_rq()
1350 rxq->rxq_idx); in mana_fence_rq()
1351 return -ETIMEDOUT; in mana_fence_rq()
1363 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { in mana_fence_rqs()
1364 rxq = apc->rxqs[rxq_idx]; in mana_fence_rqs()
1378 used_space_old = wq->head - wq->tail; in mana_move_wq_tail()
1379 used_space_new = wq->head - (wq->tail + num_units); in mana_move_wq_tail()
1382 return -ERANGE; in mana_move_wq_tail()
1384 wq->tail += num_units; in mana_move_wq_tail()
1390 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; in mana_unmap_skb()
1391 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_unmap_skb()
1392 struct device *dev = gc->dev; in mana_unmap_skb()
1396 hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1; in mana_unmap_skb()
1399 dma_unmap_single(dev, ash->dma_handle[i], ash->size[i], in mana_unmap_skb()
1402 for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++) in mana_unmap_skb()
1403 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], in mana_unmap_skb()
1409 struct gdma_comp *completions = cq->gdma_comp_buf; in mana_poll_tx_cq()
1413 struct mana_txq *txq = cq->txq; in mana_poll_tx_cq()
1424 ndev = txq->ndev; in mana_poll_tx_cq()
1427 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions, in mana_poll_tx_cq()
1440 if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type != in mana_poll_tx_cq()
1444 switch (cqe_oob->cqe_hdr.cqe_type) { in mana_poll_tx_cq()
1459 cqe_oob->cqe_hdr.cqe_type); in mana_poll_tx_cq()
1461 apc->eth_stats.tx_cqe_err++; in mana_poll_tx_cq()
1470 cqe_oob->cqe_hdr.cqe_type); in mana_poll_tx_cq()
1472 apc->eth_stats.tx_cqe_unknown_type++; in mana_poll_tx_cq()
1476 if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num)) in mana_poll_tx_cq()
1479 skb = skb_dequeue(&txq->pending_skbs); in mana_poll_tx_cq()
1483 wqe_info = (struct gdma_posted_wqe_info *)skb->cb; in mana_poll_tx_cq()
1484 wqe_unit_cnt += wqe_info->wqe_size_in_bu; in mana_poll_tx_cq()
1488 napi_consume_skb(skb, cq->budget); in mana_poll_tx_cq()
1496 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt); in mana_poll_tx_cq()
1498 gdma_wq = txq->gdma_sq; in mana_poll_tx_cq()
1504 net_txq = txq->net_txq; in mana_poll_tx_cq()
1507 /* Ensure checking txq_stopped before apc->port_is_up. */ in mana_poll_tx_cq()
1510 if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) { in mana_poll_tx_cq()
1512 apc->eth_stats.wake_queue++; in mana_poll_tx_cq()
1515 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0) in mana_poll_tx_cq()
1518 cq->work_done = pkt_transmitted; in mana_poll_tx_cq()
1527 curr_index = rxq->buf_index++; in mana_post_pkt_rxq()
1528 if (rxq->buf_index == rxq->num_rx_buf) in mana_post_pkt_rxq()
1529 rxq->buf_index = 0; in mana_post_pkt_rxq()
1531 recv_buf_oob = &rxq->rx_oobs[curr_index]; in mana_post_pkt_rxq()
1533 err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req, in mana_post_pkt_rxq()
1534 &recv_buf_oob->wqe_inf); in mana_post_pkt_rxq()
1538 WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1); in mana_post_pkt_rxq()
1544 struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size); in mana_build_skb()
1549 if (xdp->data_hard_start) { in mana_build_skb()
1550 skb_reserve(skb, xdp->data - xdp->data_hard_start); in mana_build_skb()
1551 skb_put(skb, xdp->data_end - xdp->data); in mana_build_skb()
1555 skb_reserve(skb, rxq->headroom); in mana_build_skb()
1564 struct mana_stats_rx *rx_stats = &rxq->stats; in mana_rx_skb()
1565 struct net_device *ndev = rxq->ndev; in mana_rx_skb()
1566 uint pkt_len = cqe->ppi[0].pkt_len; in mana_rx_skb()
1567 u16 rxq_idx = rxq->rxq_idx; in mana_rx_skb()
1574 rxq->rx_cq.work_done++; in mana_rx_skb()
1575 napi = &rxq->rx_cq.napi; in mana_rx_skb()
1578 ++ndev->stats.rx_dropped; in mana_rx_skb()
1584 if (act == XDP_REDIRECT && !rxq->xdp_rc) in mana_rx_skb()
1598 skb->dev = napi->dev; in mana_rx_skb()
1600 skb->protocol = eth_type_trans(skb, ndev); in mana_rx_skb()
1604 if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) { in mana_rx_skb()
1605 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) in mana_rx_skb()
1606 skb->ip_summed = CHECKSUM_UNNECESSARY; in mana_rx_skb()
1609 if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) { in mana_rx_skb()
1610 hash_value = cqe->ppi[0].pkt_hash; in mana_rx_skb()
1612 if (cqe->rx_hashtype & MANA_HASH_L4) in mana_rx_skb()
1618 if (cqe->rx_vlantag_present) { in mana_rx_skb()
1619 u16 vlan_tci = cqe->rx_vlan_id; in mana_rx_skb()
1624 u64_stats_update_begin(&rx_stats->syncp); in mana_rx_skb()
1625 rx_stats->packets++; in mana_rx_skb()
1626 rx_stats->bytes += pkt_len; in mana_rx_skb()
1629 rx_stats->xdp_tx++; in mana_rx_skb()
1630 u64_stats_update_end(&rx_stats->syncp); in mana_rx_skb()
1643 u64_stats_update_begin(&rx_stats->syncp); in mana_rx_skb()
1644 rx_stats->xdp_drop++; in mana_rx_skb()
1645 u64_stats_update_end(&rx_stats->syncp); in mana_rx_skb()
1649 page_pool_recycle_direct(rxq->page_pool, in mana_rx_skb()
1652 WARN_ON_ONCE(rxq->xdp_save_va); in mana_rx_skb()
1654 rxq->xdp_save_va = buf_va; in mana_rx_skb()
1657 ++ndev->stats.rx_dropped; in mana_rx_skb()
1671 if (rxq->xdp_save_va) { in mana_get_rxfrag()
1672 va = rxq->xdp_save_va; in mana_get_rxfrag()
1673 rxq->xdp_save_va = NULL; in mana_get_rxfrag()
1674 } else if (rxq->alloc_size > PAGE_SIZE) { in mana_get_rxfrag()
1676 va = napi_alloc_frag(rxq->alloc_size); in mana_get_rxfrag()
1678 va = netdev_alloc_frag(rxq->alloc_size); in mana_get_rxfrag()
1685 if (compound_order(page) < get_order(rxq->alloc_size)) { in mana_get_rxfrag()
1690 page = page_pool_dev_alloc_pages(rxq->page_pool); in mana_get_rxfrag()
1698 *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize, in mana_get_rxfrag()
1702 page_pool_put_full_page(rxq->page_pool, page, false); in mana_get_rxfrag()
1712 /* Allocate frag for rx buffer, and save the old buf */
1725 dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize, in mana_refill_rx_oob()
1727 *old_buf = rxoob->buf_va; in mana_refill_rx_oob()
1728 *old_fp = rxoob->from_pool; in mana_refill_rx_oob()
1730 rxoob->buf_va = va; in mana_refill_rx_oob()
1731 rxoob->sgl[0].address = da; in mana_refill_rx_oob()
1732 rxoob->from_pool = from_pool; in mana_refill_rx_oob()
1738 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data; in mana_process_rx_cqe()
1739 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; in mana_process_rx_cqe()
1740 struct net_device *ndev = rxq->ndev; in mana_process_rx_cqe()
1743 struct device *dev = gc->dev; in mana_process_rx_cqe()
1750 switch (oob->cqe_hdr.cqe_type) { in mana_process_rx_cqe()
1755 ++ndev->stats.rx_dropped; in mana_process_rx_cqe()
1756 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index]; in mana_process_rx_cqe()
1761 netdev_err(ndev, "RX coalescing is unsupported\n"); in mana_process_rx_cqe()
1762 apc->eth_stats.rx_coalesced_err++; in mana_process_rx_cqe()
1766 complete(&rxq->fence_event); in mana_process_rx_cqe()
1770 netdev_err(ndev, "Unknown RX CQE type = %d\n", in mana_process_rx_cqe()
1771 oob->cqe_hdr.cqe_type); in mana_process_rx_cqe()
1772 apc->eth_stats.rx_cqe_unknown_type++; in mana_process_rx_cqe()
1776 pktlen = oob->ppi[0].pkt_len; in mana_process_rx_cqe()
1780 netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n", in mana_process_rx_cqe()
1781 rxq->gdma_id, cq->gdma_id, rxq->rxobj); in mana_process_rx_cqe()
1785 curr = rxq->buf_index; in mana_process_rx_cqe()
1786 rxbuf_oob = &rxq->rx_oobs[curr]; in mana_process_rx_cqe()
1787 WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1); in mana_process_rx_cqe()
1797 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu); in mana_process_rx_cqe()
1804 struct gdma_comp *comp = cq->gdma_comp_buf; in mana_poll_rx_cq()
1805 struct mana_rxq *rxq = cq->rxq; in mana_poll_rx_cq()
1808 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); in mana_poll_rx_cq()
1811 rxq->xdp_flush = false; in mana_poll_rx_cq()
1818 if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id)) in mana_poll_rx_cq()
1825 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; in mana_poll_rx_cq()
1827 mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq); in mana_poll_rx_cq()
1830 if (rxq->xdp_flush) in mana_poll_rx_cq()
1839 WARN_ON_ONCE(cq->gdma_cq != gdma_queue); in mana_cq_handler()
1841 if (cq->type == MANA_CQ_TYPE_RX) in mana_cq_handler()
1846 w = cq->work_done; in mana_cq_handler()
1847 cq->work_done_since_doorbell += w; in mana_cq_handler()
1849 if (w < cq->budget) { in mana_cq_handler()
1851 cq->work_done_since_doorbell = 0; in mana_cq_handler()
1852 napi_complete_done(&cq->napi, w); in mana_cq_handler()
1853 } else if (cq->work_done_since_doorbell > in mana_cq_handler()
1854 cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) { in mana_cq_handler()
1861 cq->work_done_since_doorbell = 0; in mana_cq_handler()
1872 cq->work_done = 0; in mana_poll()
1873 cq->budget = budget; in mana_poll()
1875 w = mana_cq_handler(cq, cq->gdma_cq); in mana_poll()
1884 napi_schedule_irqoff(&cq->napi); in mana_schedule_napi()
1889 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_deinit_cq()
1891 if (!cq->gdma_cq) in mana_deinit_cq()
1894 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq); in mana_deinit_cq()
1899 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_deinit_txq()
1901 if (!txq->gdma_sq) in mana_deinit_txq()
1904 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq); in mana_deinit_txq()
1912 if (!apc->tx_qp) in mana_destroy_txq()
1915 for (i = 0; i < apc->num_queues; i++) { in mana_destroy_txq()
1916 debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs); in mana_destroy_txq()
1918 napi = &apc->tx_qp[i].tx_cq.napi; in mana_destroy_txq()
1919 if (apc->tx_qp[i].txq.napi_initialized) { in mana_destroy_txq()
1923 apc->tx_qp[i].txq.napi_initialized = false; in mana_destroy_txq()
1925 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); in mana_destroy_txq()
1927 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq); in mana_destroy_txq()
1929 mana_deinit_txq(apc, &apc->tx_qp[i].txq); in mana_destroy_txq()
1932 kfree(apc->tx_qp); in mana_destroy_txq()
1933 apc->tx_qp = NULL; in mana_destroy_txq()
1938 struct mana_tx_qp *tx_qp = &apc->tx_qp[idx]; in mana_create_txq_debugfs()
1941 sprintf(qnum, "TX-%d", idx); in mana_create_txq_debugfs()
1942 tx_qp->mana_tx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs); in mana_create_txq_debugfs()
1943 debugfs_create_u32("sq_head", 0400, tx_qp->mana_tx_debugfs, in mana_create_txq_debugfs()
1944 &tx_qp->txq.gdma_sq->head); in mana_create_txq_debugfs()
1945 debugfs_create_u32("sq_tail", 0400, tx_qp->mana_tx_debugfs, in mana_create_txq_debugfs()
1946 &tx_qp->txq.gdma_sq->tail); in mana_create_txq_debugfs()
1947 debugfs_create_u32("sq_pend_skb_qlen", 0400, tx_qp->mana_tx_debugfs, in mana_create_txq_debugfs()
1948 &tx_qp->txq.pending_skbs.qlen); in mana_create_txq_debugfs()
1949 debugfs_create_u32("cq_head", 0400, tx_qp->mana_tx_debugfs, in mana_create_txq_debugfs()
1950 &tx_qp->tx_cq.gdma_cq->head); in mana_create_txq_debugfs()
1951 debugfs_create_u32("cq_tail", 0400, tx_qp->mana_tx_debugfs, in mana_create_txq_debugfs()
1952 &tx_qp->tx_cq.gdma_cq->tail); in mana_create_txq_debugfs()
1953 debugfs_create_u32("cq_budget", 0400, tx_qp->mana_tx_debugfs, in mana_create_txq_debugfs()
1954 &tx_qp->tx_cq.budget); in mana_create_txq_debugfs()
1955 debugfs_create_file("txq_dump", 0400, tx_qp->mana_tx_debugfs, in mana_create_txq_debugfs()
1956 tx_qp->txq.gdma_sq, &mana_dbg_q_fops); in mana_create_txq_debugfs()
1957 debugfs_create_file("cq_dump", 0400, tx_qp->mana_tx_debugfs, in mana_create_txq_debugfs()
1958 tx_qp->tx_cq.gdma_cq, &mana_dbg_q_fops); in mana_create_txq_debugfs()
1964 struct mana_context *ac = apc->ac; in mana_create_txq()
1965 struct gdma_dev *gd = ac->gdma_dev; in mana_create_txq()
1977 apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp), in mana_create_txq()
1979 if (!apc->tx_qp) in mana_create_txq()
1980 return -ENOMEM; in mana_create_txq()
1983 * apc->tx_queue_size represents the maximum number of WQEs in mana_create_txq()
1987 * as min val of apc->tx_queue_size is 128 and that would make in mana_create_txq()
1988 * txq_size 128*32 = 4096 and the other higher values of apc->tx_queue_size in mana_create_txq()
1991 txq_size = apc->tx_queue_size * 32; in mana_create_txq()
1993 cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE; in mana_create_txq()
1995 gc = gd->gdma_context; in mana_create_txq()
1997 for (i = 0; i < apc->num_queues; i++) { in mana_create_txq()
1998 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE; in mana_create_txq()
2001 txq = &apc->tx_qp[i].txq; in mana_create_txq()
2003 u64_stats_init(&txq->stats.syncp); in mana_create_txq()
2004 txq->ndev = net; in mana_create_txq()
2005 txq->net_txq = netdev_get_tx_queue(net, i); in mana_create_txq()
2006 txq->vp_offset = apc->tx_vp_offset; in mana_create_txq()
2007 txq->napi_initialized = false; in mana_create_txq()
2008 skb_queue_head_init(&txq->pending_skbs); in mana_create_txq()
2014 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq); in mana_create_txq()
2019 cq = &apc->tx_qp[i].tx_cq; in mana_create_txq()
2020 cq->type = MANA_CQ_TYPE_TX; in mana_create_txq()
2022 cq->txq = txq; in mana_create_txq()
2029 spec.cq.parent_eq = ac->eqs[i].eq; in mana_create_txq()
2031 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); in mana_create_txq()
2038 wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle; in mana_create_txq()
2039 wq_spec.queue_size = txq->gdma_sq->queue_size; in mana_create_txq()
2041 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; in mana_create_txq()
2042 cq_spec.queue_size = cq->gdma_cq->queue_size; in mana_create_txq()
2044 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; in mana_create_txq()
2046 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ, in mana_create_txq()
2048 &apc->tx_qp[i].tx_object); in mana_create_txq()
2053 txq->gdma_sq->id = wq_spec.queue_index; in mana_create_txq()
2054 cq->gdma_cq->id = cq_spec.queue_index; in mana_create_txq()
2056 txq->gdma_sq->mem_info.dma_region_handle = in mana_create_txq()
2058 cq->gdma_cq->mem_info.dma_region_handle = in mana_create_txq()
2061 txq->gdma_txq_id = txq->gdma_sq->id; in mana_create_txq()
2063 cq->gdma_id = cq->gdma_cq->id; in mana_create_txq()
2065 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { in mana_create_txq()
2066 err = -EINVAL; in mana_create_txq()
2070 gc->cq_table[cq->gdma_id] = cq->gdma_cq; in mana_create_txq()
2074 netif_napi_add_tx(net, &cq->napi, mana_poll); in mana_create_txq()
2075 napi_enable(&cq->napi); in mana_create_txq()
2076 txq->napi_initialized = true; in mana_create_txq()
2078 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); in mana_create_txq()
2091 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_destroy_rxq()
2093 struct device *dev = gc->dev; in mana_destroy_rxq()
2101 debugfs_remove_recursive(rxq->mana_rx_debugfs); in mana_destroy_rxq()
2103 napi = &rxq->rx_cq.napi; in mana_destroy_rxq()
2112 xdp_rxq_info_unreg(&rxq->xdp_rxq); in mana_destroy_rxq()
2114 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); in mana_destroy_rxq()
2116 mana_deinit_cq(apc, &rxq->rx_cq); in mana_destroy_rxq()
2118 if (rxq->xdp_save_va) in mana_destroy_rxq()
2119 put_page(virt_to_head_page(rxq->xdp_save_va)); in mana_destroy_rxq()
2121 for (i = 0; i < rxq->num_rx_buf; i++) { in mana_destroy_rxq()
2122 rx_oob = &rxq->rx_oobs[i]; in mana_destroy_rxq()
2124 if (!rx_oob->buf_va) in mana_destroy_rxq()
2127 dma_unmap_single(dev, rx_oob->sgl[0].address, in mana_destroy_rxq()
2128 rx_oob->sgl[0].size, DMA_FROM_DEVICE); in mana_destroy_rxq()
2130 page = virt_to_head_page(rx_oob->buf_va); in mana_destroy_rxq()
2132 if (rx_oob->from_pool) in mana_destroy_rxq()
2133 page_pool_put_full_page(rxq->page_pool, page, false); in mana_destroy_rxq()
2137 rx_oob->buf_va = NULL; in mana_destroy_rxq()
2140 page_pool_destroy(rxq->page_pool); in mana_destroy_rxq()
2142 if (rxq->gdma_rq) in mana_destroy_rxq()
2143 mana_gd_destroy_queue(gc, rxq->gdma_rq); in mana_destroy_rxq()
2151 struct mana_port_context *mpc = netdev_priv(rxq->ndev); in mana_fill_rx_oob()
2156 if (mpc->rxbufs_pre) in mana_fill_rx_oob()
2162 return -ENOMEM; in mana_fill_rx_oob()
2164 rx_oob->buf_va = va; in mana_fill_rx_oob()
2165 rx_oob->from_pool = from_pool; in mana_fill_rx_oob()
2167 rx_oob->sgl[0].address = da; in mana_fill_rx_oob()
2168 rx_oob->sgl[0].size = rxq->datasize; in mana_fill_rx_oob()
2169 rx_oob->sgl[0].mem_key = mem_key; in mana_fill_rx_oob()
2180 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_alloc_rx_wqe()
2182 struct device *dev = gc->dev; in mana_alloc_rx_wqe()
2186 WARN_ON(rxq->datasize == 0); in mana_alloc_rx_wqe()
2191 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { in mana_alloc_rx_wqe()
2192 rx_oob = &rxq->rx_oobs[buf_idx]; in mana_alloc_rx_wqe()
2195 rx_oob->num_sge = 1; in mana_alloc_rx_wqe()
2197 ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq, in mana_alloc_rx_wqe()
2202 rx_oob->wqe_req.sgl = rx_oob->sgl; in mana_alloc_rx_wqe()
2203 rx_oob->wqe_req.num_sge = rx_oob->num_sge; in mana_alloc_rx_wqe()
2204 rx_oob->wqe_req.inline_oob_size = 0; in mana_alloc_rx_wqe()
2205 rx_oob->wqe_req.inline_oob_data = NULL; in mana_alloc_rx_wqe()
2206 rx_oob->wqe_req.flags = 0; in mana_alloc_rx_wqe()
2207 rx_oob->wqe_req.client_data_unit = 0; in mana_alloc_rx_wqe()
2210 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32); in mana_alloc_rx_wqe()
2223 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { in mana_push_wqe()
2224 rx_oob = &rxq->rx_oobs[buf_idx]; in mana_push_wqe()
2226 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req, in mana_push_wqe()
2227 &rx_oob->wqe_inf); in mana_push_wqe()
2229 return -ENOSPC; in mana_push_wqe()
2237 struct mana_port_context *mpc = netdev_priv(rxq->ndev); in mana_create_page_pool()
2241 pprm.pool_size = mpc->rx_queue_size; in mana_create_page_pool()
2242 pprm.nid = gc->numa_node; in mana_create_page_pool()
2243 pprm.napi = &rxq->rx_cq.napi; in mana_create_page_pool()
2244 pprm.netdev = rxq->ndev; in mana_create_page_pool()
2246 rxq->page_pool = page_pool_create(&pprm); in mana_create_page_pool()
2248 if (IS_ERR(rxq->page_pool)) { in mana_create_page_pool()
2249 ret = PTR_ERR(rxq->page_pool); in mana_create_page_pool()
2250 rxq->page_pool = NULL; in mana_create_page_pool()
2258 u32 rxq_idx, struct mana_eq *eq, in mana_create_rxq() argument
2261 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_create_rxq()
2271 gc = gd->gdma_context; in mana_create_rxq()
2273 rxq = kzalloc(struct_size(rxq, rx_oobs, apc->rx_queue_size), in mana_create_rxq()
2278 rxq->ndev = ndev; in mana_create_rxq()
2279 rxq->num_rx_buf = apc->rx_queue_size; in mana_create_rxq()
2280 rxq->rxq_idx = rxq_idx; in mana_create_rxq()
2281 rxq->rxobj = INVALID_MANA_HANDLE; in mana_create_rxq()
2283 mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size, in mana_create_rxq()
2284 &rxq->headroom); in mana_create_rxq()
2286 /* Create page pool for RX queue */ in mana_create_rxq()
2305 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq); in mana_create_rxq()
2310 cq = &rxq->rx_cq; in mana_create_rxq()
2311 cq->type = MANA_CQ_TYPE_RX; in mana_create_rxq()
2312 cq->rxq = rxq; in mana_create_rxq()
2319 spec.cq.parent_eq = eq->eq; in mana_create_rxq()
2321 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); in mana_create_rxq()
2327 wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle; in mana_create_rxq()
2328 wq_spec.queue_size = rxq->gdma_rq->queue_size; in mana_create_rxq()
2330 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; in mana_create_rxq()
2331 cq_spec.queue_size = cq->gdma_cq->queue_size; in mana_create_rxq()
2333 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; in mana_create_rxq()
2335 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ, in mana_create_rxq()
2336 &wq_spec, &cq_spec, &rxq->rxobj); in mana_create_rxq()
2340 rxq->gdma_rq->id = wq_spec.queue_index; in mana_create_rxq()
2341 cq->gdma_cq->id = cq_spec.queue_index; in mana_create_rxq()
2343 rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; in mana_create_rxq()
2344 cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; in mana_create_rxq()
2346 rxq->gdma_id = rxq->gdma_rq->id; in mana_create_rxq()
2347 cq->gdma_id = cq->gdma_cq->id; in mana_create_rxq()
2353 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { in mana_create_rxq()
2354 err = -EINVAL; in mana_create_rxq()
2358 gc->cq_table[cq->gdma_id] = cq->gdma_cq; in mana_create_rxq()
2360 netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1); in mana_create_rxq()
2362 WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx, in mana_create_rxq()
2363 cq->napi.napi_id)); in mana_create_rxq()
2364 WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, in mana_create_rxq()
2365 rxq->page_pool)); in mana_create_rxq()
2367 napi_enable(&cq->napi); in mana_create_rxq()
2369 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); in mana_create_rxq()
2389 rxq = apc->rxqs[idx]; in mana_create_rxq_debugfs()
2391 sprintf(qnum, "RX-%d", idx); in mana_create_rxq_debugfs()
2392 rxq->mana_rx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs); in mana_create_rxq_debugfs()
2393 debugfs_create_u32("rq_head", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->head); in mana_create_rxq_debugfs()
2394 debugfs_create_u32("rq_tail", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->tail); in mana_create_rxq_debugfs()
2395 debugfs_create_u32("rq_nbuf", 0400, rxq->mana_rx_debugfs, &rxq->num_rx_buf); in mana_create_rxq_debugfs()
2396 debugfs_create_u32("cq_head", 0400, rxq->mana_rx_debugfs, in mana_create_rxq_debugfs()
2397 &rxq->rx_cq.gdma_cq->head); in mana_create_rxq_debugfs()
2398 debugfs_create_u32("cq_tail", 0400, rxq->mana_rx_debugfs, in mana_create_rxq_debugfs()
2399 &rxq->rx_cq.gdma_cq->tail); in mana_create_rxq_debugfs()
2400 debugfs_create_u32("cq_budget", 0400, rxq->mana_rx_debugfs, &rxq->rx_cq.budget); in mana_create_rxq_debugfs()
2401 debugfs_create_file("rxq_dump", 0400, rxq->mana_rx_debugfs, rxq->gdma_rq, &mana_dbg_q_fops); in mana_create_rxq_debugfs()
2402 debugfs_create_file("cq_dump", 0400, rxq->mana_rx_debugfs, rxq->rx_cq.gdma_cq, in mana_create_rxq_debugfs()
2409 struct mana_context *ac = apc->ac; in mana_add_rx_queues()
2414 for (i = 0; i < apc->num_queues; i++) { in mana_add_rx_queues()
2415 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev); in mana_add_rx_queues()
2417 err = -ENOMEM; in mana_add_rx_queues()
2421 u64_stats_init(&rxq->stats.syncp); in mana_add_rx_queues()
2423 apc->rxqs[i] = rxq; in mana_add_rx_queues()
2428 apc->default_rxobj = apc->rxqs[0]->rxobj; in mana_add_rx_queues()
2435 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_destroy_vport()
2439 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { in mana_destroy_vport()
2440 rxq = apc->rxqs[rxq_idx]; in mana_destroy_vport()
2445 apc->rxqs[rxq_idx] = NULL; in mana_destroy_vport()
2451 if (gd->gdma_context->is_pf) in mana_destroy_vport()
2458 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_create_vport()
2461 apc->default_rxobj = INVALID_MANA_HANDLE; in mana_create_vport()
2463 if (gd->gdma_context->is_pf) { in mana_create_vport()
2469 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell); in mana_create_vport()
2478 if (!apc->indir_table_sz) { in mana_rss_table_alloc()
2479 netdev_err(apc->ndev, in mana_rss_table_alloc()
2481 apc->port_idx); in mana_rss_table_alloc()
2482 return -EINVAL; in mana_rss_table_alloc()
2485 apc->indir_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL); in mana_rss_table_alloc()
2486 if (!apc->indir_table) in mana_rss_table_alloc()
2487 return -ENOMEM; in mana_rss_table_alloc()
2489 apc->rxobj_table = kcalloc(apc->indir_table_sz, sizeof(mana_handle_t), GFP_KERNEL); in mana_rss_table_alloc()
2490 if (!apc->rxobj_table) { in mana_rss_table_alloc()
2491 kfree(apc->indir_table); in mana_rss_table_alloc()
2492 return -ENOMEM; in mana_rss_table_alloc()
2502 for (i = 0; i < apc->indir_table_sz; i++) in mana_rss_table_init()
2503 apc->indir_table[i] = in mana_rss_table_init()
2504 ethtool_rxfh_indir_default(i, apc->num_queues); in mana_rss_table_init()
2507 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx, in mana_config_rss() argument
2515 for (i = 0; i < apc->indir_table_sz; i++) { in mana_config_rss()
2516 queue_idx = apc->indir_table[i]; in mana_config_rss()
2517 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj; in mana_config_rss()
2521 err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab); in mana_config_rss()
2534 struct net_device *ndev = apc->ndev; in mana_query_gf_stats()
2568 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_query_gf_stats()
2582 apc->eth_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe; in mana_query_gf_stats()
2583 apc->eth_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled; in mana_query_gf_stats()
2584 apc->eth_stats.hc_rx_bytes = resp.hc_rx_bytes; in mana_query_gf_stats()
2585 apc->eth_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts; in mana_query_gf_stats()
2586 apc->eth_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes; in mana_query_gf_stats()
2587 apc->eth_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts; in mana_query_gf_stats()
2588 apc->eth_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes; in mana_query_gf_stats()
2589 apc->eth_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts; in mana_query_gf_stats()
2590 apc->eth_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes; in mana_query_gf_stats()
2591 apc->eth_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled; in mana_query_gf_stats()
2592 apc->eth_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled; in mana_query_gf_stats()
2593 apc->eth_stats.hc_tx_err_inval_vportoffset_pkt = in mana_query_gf_stats()
2595 apc->eth_stats.hc_tx_err_vlan_enforcement = in mana_query_gf_stats()
2597 apc->eth_stats.hc_tx_err_eth_type_enforcement = in mana_query_gf_stats()
2599 apc->eth_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement; in mana_query_gf_stats()
2600 apc->eth_stats.hc_tx_err_sqpdid_enforcement = in mana_query_gf_stats()
2602 apc->eth_stats.hc_tx_err_cqpdid_enforcement = in mana_query_gf_stats()
2604 apc->eth_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation; in mana_query_gf_stats()
2605 apc->eth_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob; in mana_query_gf_stats()
2606 apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes; in mana_query_gf_stats()
2607 apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts; in mana_query_gf_stats()
2608 apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes; in mana_query_gf_stats()
2609 apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts; in mana_query_gf_stats()
2610 apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes; in mana_query_gf_stats()
2611 apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts; in mana_query_gf_stats()
2612 apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes; in mana_query_gf_stats()
2613 apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma; in mana_query_gf_stats()
2619 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_init_port()
2621 int port_idx = apc->port_idx; in mana_init_port()
2630 gc = gd->gdma_context; in mana_init_port()
2633 &apc->indir_table_sz); in mana_init_port()
2641 if (apc->max_queues > max_queues) in mana_init_port()
2642 apc->max_queues = max_queues; in mana_init_port()
2644 if (apc->num_queues > apc->max_queues) in mana_init_port()
2645 apc->num_queues = apc->max_queues; in mana_init_port()
2647 eth_hw_addr_set(ndev, apc->mac_addr); in mana_init_port()
2649 apc->mana_port_debugfs = debugfs_create_dir(vport, gc->mana_pci_debugfs); in mana_init_port()
2660 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_alloc_queues()
2667 err = netif_set_real_num_tx_queues(ndev, apc->num_queues); in mana_alloc_queues()
2675 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE; in mana_alloc_queues()
2677 err = netif_set_real_num_rx_queues(ndev, apc->num_queues); in mana_alloc_queues()
2687 if (gd->gdma_context->is_pf) { in mana_alloc_queues()
2713 if (apc->port_st_save) { in mana_attach()
2721 apc->port_is_up = apc->port_st_save; in mana_attach()
2726 if (apc->port_is_up) in mana_attach()
2738 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_dealloc_queues()
2744 if (apc->port_is_up) in mana_dealloc_queues()
2745 return -EINVAL; in mana_dealloc_queues()
2749 if (gd->gdma_context->is_pf) in mana_dealloc_queues()
2752 /* No packet can be transmitted now since apc->port_is_up is false. in mana_dealloc_queues()
2753 * There is still a tiny chance that mana_poll_tx_cq() can re-enable in mana_dealloc_queues()
2754 * a txq because it may not timely see apc->port_is_up being cleared in mana_dealloc_queues()
2756 * new packets due to apc->port_is_up being false. in mana_dealloc_queues()
2758 * Drain all the in-flight TX packets. in mana_dealloc_queues()
2765 for (i = 0; i < apc->num_queues; i++) { in mana_dealloc_queues()
2766 txq = &apc->tx_qp[i].txq; in mana_dealloc_queues()
2768 while (atomic_read(&txq->pending_sends) > 0 && in mana_dealloc_queues()
2773 if (atomic_read(&txq->pending_sends)) { in mana_dealloc_queues()
2774 err = pcie_flr(to_pci_dev(gd->gdma_context->dev)); in mana_dealloc_queues()
2777 err, atomic_read(&txq->pending_sends), in mana_dealloc_queues()
2778 txq->gdma_txq_id); in mana_dealloc_queues()
2784 for (i = 0; i < apc->num_queues; i++) { in mana_dealloc_queues()
2785 txq = &apc->tx_qp[i].txq; in mana_dealloc_queues()
2786 while ((skb = skb_dequeue(&txq->pending_skbs))) { in mana_dealloc_queues()
2790 atomic_set(&txq->pending_sends, 0); in mana_dealloc_queues()
2796 apc->rss_state = TRI_STATE_FALSE; in mana_dealloc_queues()
2815 apc->port_st_save = apc->port_is_up; in mana_detach()
2816 apc->port_is_up = false; in mana_detach()
2824 if (apc->port_st_save) { in mana_detach()
2841 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_probe_port()
2847 gc->max_num_queues); in mana_probe_port()
2849 return -ENOMEM; in mana_probe_port()
2854 apc->ac = ac; in mana_probe_port()
2855 apc->ndev = ndev; in mana_probe_port()
2856 apc->max_queues = gc->max_num_queues; in mana_probe_port()
2857 apc->num_queues = gc->max_num_queues; in mana_probe_port()
2858 apc->tx_queue_size = DEF_TX_BUFFERS_PER_QUEUE; in mana_probe_port()
2859 apc->rx_queue_size = DEF_RX_BUFFERS_PER_QUEUE; in mana_probe_port()
2860 apc->port_handle = INVALID_MANA_HANDLE; in mana_probe_port()
2861 apc->pf_filter_handle = INVALID_MANA_HANDLE; in mana_probe_port()
2862 apc->port_idx = port_idx; in mana_probe_port()
2864 mutex_init(&apc->vport_mutex); in mana_probe_port()
2865 apc->vport_use_count = 0; in mana_probe_port()
2867 ndev->netdev_ops = &mana_devops; in mana_probe_port()
2868 ndev->ethtool_ops = &mana_ethtool_ops; in mana_probe_port()
2869 ndev->mtu = ETH_DATA_LEN; in mana_probe_port()
2870 ndev->max_mtu = gc->adapter_mtu - ETH_HLEN; in mana_probe_port()
2871 ndev->min_mtu = ETH_MIN_MTU; in mana_probe_port()
2872 ndev->needed_headroom = MANA_HEADROOM; in mana_probe_port()
2873 ndev->dev_port = port_idx; in mana_probe_port()
2874 SET_NETDEV_DEV(ndev, gc->dev); in mana_probe_port()
2878 netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE); in mana_probe_port()
2890 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; in mana_probe_port()
2891 ndev->hw_features |= NETIF_F_RXCSUM; in mana_probe_port()
2892 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; in mana_probe_port()
2893 ndev->hw_features |= NETIF_F_RXHASH; in mana_probe_port()
2894 ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | in mana_probe_port()
2896 ndev->vlan_features = ndev->features; in mana_probe_port()
2929 struct auxiliary_device *adev = gd->adev; in remove_adev()
2930 int id = adev->id; in remove_adev()
2936 gd->adev = NULL; in remove_adev()
2947 return -ENOMEM; in add_adev()
2949 adev = &madev->adev; in add_adev()
2953 adev->id = ret; in add_adev()
2955 adev->name = "rdma"; in add_adev()
2956 adev->dev.parent = gd->gdma_context->dev; in add_adev()
2957 adev->dev.release = adev_release; in add_adev()
2958 madev->mdev = gd; in add_adev()
2970 gd->adev = adev; in add_adev()
2977 mana_adev_idx_free(adev->id); in add_adev()
2987 struct gdma_context *gc = gd->gdma_context; in mana_probe()
2988 struct mana_context *ac = gd->driver_data; in mana_probe()
2989 struct device *dev = gc->dev; in mana_probe()
3005 return -ENOMEM; in mana_probe()
3007 ac->gdma_dev = gd; in mana_probe()
3008 gd->driver_data = ac; in mana_probe()
3021 ac->num_ports = num_ports; in mana_probe()
3023 if (ac->num_ports != num_ports) { in mana_probe()
3024 dev_err(dev, "The number of vPorts changed: %d->%d\n", in mana_probe()
3025 ac->num_ports, num_ports); in mana_probe()
3026 err = -EPROTO; in mana_probe()
3031 if (ac->num_ports == 0) in mana_probe()
3034 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV) in mana_probe()
3035 ac->num_ports = MAX_PORTS_IN_MANA_DEV; in mana_probe()
3038 for (i = 0; i < ac->num_ports; i++) { in mana_probe()
3039 err = mana_probe_port(ac, i, &ac->ports[i]); in mana_probe()
3051 for (i = 0; i < ac->num_ports; i++) { in mana_probe()
3053 err = mana_attach(ac->ports[i]); in mana_probe()
3077 struct gdma_context *gc = gd->gdma_context; in mana_remove()
3078 struct mana_context *ac = gd->driver_data; in mana_remove()
3080 struct device *dev = gc->dev; in mana_remove()
3086 if (gd->adev) in mana_remove()
3089 for (i = 0; i < ac->num_ports; i++) { in mana_remove()
3090 ndev = ac->ports[i]; in mana_remove()
3129 gd->driver_data = NULL; in mana_remove()
3130 gd->gdma_context = NULL; in mana_remove()
3140 if (port_index >= ac->num_ports) in mana_get_primary_netdev_rcu()
3144 if (ac->ports[port_index]->flags & IFF_SLAVE) in mana_get_primary_netdev_rcu()
3145 ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]); in mana_get_primary_netdev_rcu()
3147 ndev = ac->ports[port_index]; in mana_get_primary_netdev_rcu()