Lines Matching +full:mbox +full:- +full:rx
1 // SPDX-License-Identifier: GPL-2.0
39 struct otx2_nic *priv = rep->mdev; in rvu_rep_mcam_flow_init()
43 rep->flow_cfg = kcalloc(1, sizeof(struct otx2_flow_config), GFP_KERNEL); in rvu_rep_mcam_flow_init()
45 if (!rep->flow_cfg) in rvu_rep_mcam_flow_init()
46 return -ENOMEM; in rvu_rep_mcam_flow_init()
50 rep->flow_cfg->flow_ent = kcalloc(count, sizeof(u16), GFP_KERNEL); in rvu_rep_mcam_flow_init()
51 if (!rep->flow_cfg->flow_ent) in rvu_rep_mcam_flow_init()
52 return -ENOMEM; in rvu_rep_mcam_flow_init()
55 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&priv->mbox); in rvu_rep_mcam_flow_init()
59 req->hdr.pcifunc = rep->pcifunc; in rvu_rep_mcam_flow_init()
60 req->contig = false; in rvu_rep_mcam_flow_init()
61 req->ref_entry = 0; in rvu_rep_mcam_flow_init()
62 req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ? in rvu_rep_mcam_flow_init()
63 NPC_MAX_NONCONTIG_ENTRIES : count - allocated; in rvu_rep_mcam_flow_init()
65 if (otx2_sync_mbox_msg(&priv->mbox)) in rvu_rep_mcam_flow_init()
69 (&priv->mbox.mbox, 0, &req->hdr); in rvu_rep_mcam_flow_init()
73 for (ent = 0; ent < rsp->count; ent++) in rvu_rep_mcam_flow_init()
74 rep->flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent]; in rvu_rep_mcam_flow_init()
76 allocated += rsp->count; in rvu_rep_mcam_flow_init()
78 if (rsp->count != req->count) in rvu_rep_mcam_flow_init()
82 /* Multiple MCAM entry alloc requests could result in non-sequential in rvu_rep_mcam_flow_init()
88 sort(&rep->flow_cfg->flow_ent[0], allocated, in rvu_rep_mcam_flow_init()
89 sizeof(rep->flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL); in rvu_rep_mcam_flow_init()
91 mutex_unlock(&priv->mbox.lock); in rvu_rep_mcam_flow_init()
93 rep->flow_cfg->max_flows = allocated; in rvu_rep_mcam_flow_init()
96 rep->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC; in rvu_rep_mcam_flow_init()
97 rep->flags |= OTX2_FLAG_NTUPLE_SUPPORT; in rvu_rep_mcam_flow_init()
98 rep->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT; in rvu_rep_mcam_flow_init()
101 INIT_LIST_HEAD(&rep->flow_cfg->flow_list); in rvu_rep_mcam_flow_init()
102 INIT_LIST_HEAD(&rep->flow_cfg->flow_list_tc); in rvu_rep_mcam_flow_init()
110 struct otx2_nic *priv = rep->mdev; in rvu_rep_setup_tc_cb()
112 if (!(rep->flags & RVU_REP_VF_INITIALIZED)) in rvu_rep_setup_tc_cb()
113 return -EINVAL; in rvu_rep_setup_tc_cb()
115 if (!(rep->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) in rvu_rep_setup_tc_cb()
118 priv->netdev = rep->netdev; in rvu_rep_setup_tc_cb()
119 priv->flags = rep->flags; in rvu_rep_setup_tc_cb()
120 priv->pcifunc = rep->pcifunc; in rvu_rep_setup_tc_cb()
121 priv->flow_cfg = rep->flow_cfg; in rvu_rep_setup_tc_cb()
127 return -EOPNOTSUPP; in rvu_rep_setup_tc_cb()
144 return -EOPNOTSUPP; in rvu_rep_setup_tc()
153 struct otx2_nic *priv = rep->mdev; in rvu_rep_sp_stats64()
156 u16 qidx = rep->rep_id; in rvu_rep_sp_stats64()
159 rq = &priv->qset.rq[qidx]; in rvu_rep_sp_stats64()
162 sq = &priv->qset.sq[qidx]; in rvu_rep_sp_stats64()
164 stats->tx_bytes = sq->stats.bytes; in rvu_rep_sp_stats64()
165 stats->tx_packets = sq->stats.pkts; in rvu_rep_sp_stats64()
166 stats->rx_bytes = rq->stats.bytes; in rvu_rep_sp_stats64()
167 stats->rx_packets = rq->stats.pkts; in rvu_rep_sp_stats64()
184 return -EINVAL; in rvu_rep_get_offload_stats()
193 ether_addr_copy(hw_addr, rep->mac); in rvu_rep_dl_port_fn_hw_addr_get()
203 struct otx2_nic *priv = rep->mdev; in rvu_rep_dl_port_fn_hw_addr_set()
206 eth_hw_addr_set(rep->netdev, hw_addr); in rvu_rep_dl_port_fn_hw_addr_set()
207 ether_addr_copy(rep->mac, hw_addr); in rvu_rep_dl_port_fn_hw_addr_set()
210 evt.pcifunc = rep->pcifunc; in rvu_rep_dl_port_fn_hw_addr_set()
224 struct pci_dev *pdev = priv->pdev; in rvu_rep_devlink_set_switch_id()
229 ppid->id_len = sizeof(id); in rvu_rep_devlink_set_switch_id()
230 put_unaligned_be64(id, &ppid->id); in rvu_rep_devlink_set_switch_id()
235 devlink_port_unregister(&rep->dl_port); in rvu_rep_devlink_port_unregister()
241 struct otx2_nic *priv = rep->mdev; in rvu_rep_devlink_port_register()
242 struct devlink *dl = priv->dl->dl; in rvu_rep_devlink_port_register()
245 if (!(rep->pcifunc & RVU_PFVF_FUNC_MASK)) { in rvu_rep_devlink_port_register()
247 attrs.phys.port_number = rvu_get_pf(priv->pdev, rep->pcifunc); in rvu_rep_devlink_port_register()
250 attrs.pci_vf.pf = rvu_get_pf(priv->pdev, rep->pcifunc); in rvu_rep_devlink_port_register()
251 attrs.pci_vf.vf = rep->pcifunc & RVU_PFVF_FUNC_MASK; in rvu_rep_devlink_port_register()
255 devlink_port_attrs_set(&rep->dl_port, &attrs); in rvu_rep_devlink_port_register()
257 err = devl_port_register_with_ops(dl, &rep->dl_port, rep->rep_id, in rvu_rep_devlink_port_register()
260 dev_err(rep->mdev->dev, "devlink_port_register failed: %d\n", in rvu_rep_devlink_port_register()
271 for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++) in rvu_rep_get_repid()
272 if (priv->rep_pf_map[rep_id] == pcifunc) in rvu_rep_get_repid()
274 return -EINVAL; in rvu_rep_get_repid()
282 mutex_lock(&priv->mbox.lock); in rvu_rep_notify_pfvf()
283 req = otx2_mbox_alloc_msg_rep_event_notify(&priv->mbox); in rvu_rep_notify_pfvf()
285 mutex_unlock(&priv->mbox.lock); in rvu_rep_notify_pfvf()
286 return -ENOMEM; in rvu_rep_notify_pfvf()
288 req->event = event; in rvu_rep_notify_pfvf()
289 req->pcifunc = data->pcifunc; in rvu_rep_notify_pfvf()
291 memcpy(&req->evt_data, &data->evt_data, sizeof(struct rep_evt_data)); in rvu_rep_notify_pfvf()
292 otx2_sync_mbox_msg(&priv->mbox); in rvu_rep_notify_pfvf()
293 mutex_unlock(&priv->mbox.lock); in rvu_rep_notify_pfvf()
303 rep_id = rvu_rep_get_repid(priv, info->pcifunc); in rvu_rep_state_evt_handler()
304 rep = priv->reps[rep_id]; in rvu_rep_state_evt_handler()
305 if (info->evt_data.vf_state) in rvu_rep_state_evt_handler()
306 rep->flags |= RVU_REP_VF_INITIALIZED; in rvu_rep_state_evt_handler()
308 rep->flags &= ~RVU_REP_VF_INITIALIZED; in rvu_rep_state_evt_handler()
313 if (info->event & RVU_EVENT_PFVF_STATE) in rvu_event_up_notify()
321 struct otx2_nic *priv = rep->mdev; in rvu_rep_change_mtu()
325 dev->mtu, new_mtu); in rvu_rep_change_mtu()
326 dev->mtu = new_mtu; in rvu_rep_change_mtu()
329 evt.pcifunc = rep->pcifunc; in rvu_rep_change_mtu()
345 priv = rep->mdev; in rvu_rep_get_stats()
347 mutex_lock(&priv->mbox.lock); in rvu_rep_get_stats()
348 req = otx2_mbox_alloc_msg_nix_lf_stats(&priv->mbox); in rvu_rep_get_stats()
350 mutex_unlock(&priv->mbox.lock); in rvu_rep_get_stats()
353 req->pcifunc = rep->pcifunc; in rvu_rep_get_stats()
354 err = otx2_sync_mbox_msg_busy_poll(&priv->mbox); in rvu_rep_get_stats()
359 otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr); in rvu_rep_get_stats()
366 stats = &rep->stats; in rvu_rep_get_stats()
367 stats->rx_bytes = rsp->rx.octs; in rvu_rep_get_stats()
368 stats->rx_frames = rsp->rx.ucast + rsp->rx.bcast + in rvu_rep_get_stats()
369 rsp->rx.mcast; in rvu_rep_get_stats()
370 stats->rx_drops = rsp->rx.drop; in rvu_rep_get_stats()
371 stats->rx_mcast_frames = rsp->rx.mcast; in rvu_rep_get_stats()
372 stats->tx_bytes = rsp->tx.octs; in rvu_rep_get_stats()
373 stats->tx_frames = rsp->tx.ucast + rsp->tx.bcast + rsp->tx.mcast; in rvu_rep_get_stats()
374 stats->tx_drops = rsp->tx.drop; in rvu_rep_get_stats()
376 mutex_unlock(&priv->mbox.lock); in rvu_rep_get_stats()
384 if (!(rep->flags & RVU_REP_VF_INITIALIZED)) in rvu_rep_get_stats64()
387 stats->rx_packets = rep->stats.rx_frames; in rvu_rep_get_stats64()
388 stats->rx_bytes = rep->stats.rx_bytes; in rvu_rep_get_stats64()
389 stats->rx_dropped = rep->stats.rx_drops; in rvu_rep_get_stats64()
390 stats->multicast = rep->stats.rx_mcast_frames; in rvu_rep_get_stats64()
392 stats->tx_packets = rep->stats.tx_frames; in rvu_rep_get_stats64()
393 stats->tx_bytes = rep->stats.tx_bytes; in rvu_rep_get_stats64()
394 stats->tx_dropped = rep->stats.tx_drops; in rvu_rep_get_stats64()
396 schedule_delayed_work(&rep->stats_wrk, msecs_to_jiffies(100)); in rvu_rep_get_stats64()
403 mutex_lock(&priv->mbox.lock); in rvu_eswitch_config()
404 req = otx2_mbox_alloc_msg_esw_cfg(&priv->mbox); in rvu_eswitch_config()
406 mutex_unlock(&priv->mbox.lock); in rvu_eswitch_config()
407 return -ENOMEM; in rvu_eswitch_config()
409 req->ena = ena; in rvu_eswitch_config()
410 otx2_sync_mbox_msg(&priv->mbox); in rvu_eswitch_config()
411 mutex_unlock(&priv->mbox.lock); in rvu_eswitch_config()
418 struct otx2_nic *pf = rep->mdev; in rvu_rep_xmit()
422 sq = &pf->qset.sq[rep->rep_id]; in rvu_rep_xmit()
425 if (!otx2_sq_append_skb(pf, txq, sq, skb, rep->rep_id)) { in rvu_rep_xmit()
430 if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb) in rvu_rep_xmit()
431 > sq->sqe_thresh) in rvu_rep_xmit()
442 struct otx2_nic *priv = rep->mdev; in rvu_rep_open()
445 if (!(rep->flags & RVU_REP_VF_INITIALIZED)) in rvu_rep_open()
453 evt.pcifunc = rep->pcifunc; in rvu_rep_open()
461 struct otx2_nic *priv = rep->mdev; in rvu_rep_stop()
464 if (!(rep->flags & RVU_REP_VF_INITIALIZED)) in rvu_rep_stop()
471 evt.pcifunc = rep->pcifunc; in rvu_rep_stop()
490 struct otx2_qset *qset = &priv->qset; in rvu_rep_napi_init()
492 struct otx2_hw *hw = &priv->hw; in rvu_rep_napi_init()
496 qset->napi = kcalloc(hw->cint_cnt, sizeof(*cq_poll), GFP_KERNEL); in rvu_rep_napi_init()
497 if (!qset->napi) in rvu_rep_napi_init()
498 return -ENOMEM; in rvu_rep_napi_init()
501 for (qidx = 0; qidx < hw->cint_cnt; qidx++) { in rvu_rep_napi_init()
502 cq_poll = &qset->napi[qidx]; in rvu_rep_napi_init()
503 cq_poll->cint_idx = qidx; in rvu_rep_napi_init()
504 cq_poll->cq_ids[CQ_RX] = in rvu_rep_napi_init()
505 (qidx < hw->rx_queues) ? qidx : CINT_INVALID_CQ; in rvu_rep_napi_init()
506 cq_poll->cq_ids[CQ_TX] = (qidx < hw->tx_queues) ? in rvu_rep_napi_init()
507 qidx + hw->rx_queues : in rvu_rep_napi_init()
509 cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ; in rvu_rep_napi_init()
510 cq_poll->cq_ids[CQ_QOS] = CINT_INVALID_CQ; in rvu_rep_napi_init()
512 cq_poll->dev = (void *)priv; in rvu_rep_napi_init()
513 netif_napi_add(priv->reps[qidx]->netdev, &cq_poll->napi, in rvu_rep_napi_init()
515 napi_enable(&cq_poll->napi); in rvu_rep_napi_init()
518 vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START; in rvu_rep_napi_init()
519 for (qidx = 0; qidx < hw->cint_cnt; qidx++) { in rvu_rep_napi_init()
520 irq_name = &hw->irq_name[vec * NAME_SIZE]; in rvu_rep_napi_init()
522 snprintf(irq_name, NAME_SIZE, "rep%d-rxtx-%d", qidx, qidx); in rvu_rep_napi_init()
524 err = request_irq(pci_irq_vector(priv->pdev, vec), in rvu_rep_napi_init()
526 &qset->napi[qidx]); in rvu_rep_napi_init()
539 priv->flags &= ~OTX2_FLAG_INTF_DOWN; in rvu_rep_napi_init()
550 struct otx2_qset *qset = &priv->qset; in rvu_rep_free_cq_rsrc()
555 vec = priv->hw.nix_msixoff + NIX_LF_CINT_VEC_START; in rvu_rep_free_cq_rsrc()
556 for (qidx = 0; qidx < priv->hw.cint_cnt; qidx++) { in rvu_rep_free_cq_rsrc()
560 synchronize_irq(pci_irq_vector(priv->pdev, vec)); in rvu_rep_free_cq_rsrc()
562 cq_poll = &qset->napi[qidx]; in rvu_rep_free_cq_rsrc()
563 napi_synchronize(&cq_poll->napi); in rvu_rep_free_cq_rsrc()
566 otx2_free_cints(priv, priv->hw.cint_cnt); in rvu_rep_free_cq_rsrc()
572 struct otx2_qset *qset = &priv->qset; in rvu_rep_rsrc_free()
576 for (wrk = 0; wrk < priv->qset.cq_cnt; wrk++) { in rvu_rep_rsrc_free()
577 work = &priv->refill_wrk[wrk].pool_refill_work; in rvu_rep_rsrc_free()
580 devm_kfree(priv->dev, priv->refill_wrk); in rvu_rep_rsrc_free()
588 struct otx2_qset *qset = &priv->qset; in rvu_rep_rsrc_init()
595 priv->hw.max_mtu = otx2_get_max_mtu(priv); in rvu_rep_rsrc_init()
596 priv->tx_max_pktlen = priv->hw.max_mtu + OTX2_ETH_HLEN; in rvu_rep_rsrc_init()
597 priv->rbsize = ALIGN(priv->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM; in rvu_rep_rsrc_init()
604 err = otx2_hw_set_mtu(priv, priv->hw.max_mtu); in rvu_rep_rsrc_init()
606 dev_err(priv->dev, "Failed to set HW MTU\n"); in rvu_rep_rsrc_init()
623 priv->flags |= OTX2_FLAG_INTF_DOWN; in rvu_rep_destroy()
625 for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++) { in rvu_rep_destroy()
626 rep = priv->reps[rep_id]; in rvu_rep_destroy()
627 unregister_netdev(rep->netdev); in rvu_rep_destroy()
629 free_netdev(rep->netdev); in rvu_rep_destroy()
630 kfree(rep->flow_cfg); in rvu_rep_destroy()
632 kfree(priv->reps); in rvu_rep_destroy()
638 int rep_cnt = priv->rep_cnt; in rvu_rep_create()
646 return -ENOMEM; in rvu_rep_create()
648 priv->reps = kcalloc(rep_cnt, sizeof(struct rep_dev *), GFP_KERNEL); in rvu_rep_create()
649 if (!priv->reps) in rvu_rep_create()
650 return -ENOMEM; in rvu_rep_create()
658 err = -ENOMEM; in rvu_rep_create()
663 priv->reps[rep_id] = rep; in rvu_rep_create()
664 rep->mdev = priv; in rvu_rep_create()
665 rep->netdev = ndev; in rvu_rep_create()
666 rep->rep_id = rep_id; in rvu_rep_create()
668 ndev->min_mtu = OTX2_MIN_MTU; in rvu_rep_create()
669 ndev->max_mtu = priv->hw.max_mtu; in rvu_rep_create()
670 ndev->netdev_ops = &rvu_rep_netdev_ops; in rvu_rep_create()
671 pcifunc = priv->rep_pf_map[rep_id]; in rvu_rep_create()
672 rep->pcifunc = pcifunc; in rvu_rep_create()
674 snprintf(ndev->name, sizeof(ndev->name), "Rpf%dvf%d", in rvu_rep_create()
675 rvu_get_pf(priv->pdev, pcifunc), in rvu_rep_create()
678 ndev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | in rvu_rep_create()
682 ndev->hw_features |= NETIF_F_HW_TC; in rvu_rep_create()
683 ndev->features |= ndev->hw_features; in rvu_rep_create()
691 SET_NETDEV_DEVLINK_PORT(ndev, &rep->dl_port); in rvu_rep_create()
701 INIT_DELAYED_WORK(&rep->stats_wrk, rvu_rep_get_stats); in rvu_rep_create()
710 while (--rep_id >= 0) { in rvu_rep_create()
711 rep = priv->reps[rep_id]; in rvu_rep_create()
712 unregister_netdev(rep->netdev); in rvu_rep_create()
714 free_netdev(rep->netdev); in rvu_rep_create()
716 kfree(priv->reps); in rvu_rep_create()
728 mutex_lock(&priv->mbox.lock); in rvu_get_rep_cnt()
729 req = otx2_mbox_alloc_msg_get_rep_cnt(&priv->mbox); in rvu_get_rep_cnt()
731 mutex_unlock(&priv->mbox.lock); in rvu_get_rep_cnt()
732 return -ENOMEM; in rvu_get_rep_cnt()
734 err = otx2_sync_mbox_msg(&priv->mbox); in rvu_get_rep_cnt()
738 msghdr = otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr); in rvu_get_rep_cnt()
745 priv->hw.tx_queues = rsp->rep_cnt; in rvu_get_rep_cnt()
746 priv->hw.rx_queues = rsp->rep_cnt; in rvu_get_rep_cnt()
747 priv->rep_cnt = rsp->rep_cnt; in rvu_get_rep_cnt()
748 for (rep = 0; rep < priv->rep_cnt; rep++) in rvu_get_rep_cnt()
749 priv->rep_pf_map[rep] = rsp->rep_pf_map[rep]; in rvu_get_rep_cnt()
752 mutex_unlock(&priv->mbox.lock); in rvu_get_rep_cnt()
758 struct device *dev = &pdev->dev; in rvu_rep_probe()
785 err = -ENOMEM; in rvu_rep_probe()
790 priv->pdev = pdev; in rvu_rep_probe()
791 priv->dev = dev; in rvu_rep_probe()
792 priv->flags |= OTX2_FLAG_INTF_DOWN; in rvu_rep_probe()
793 priv->flags |= OTX2_FLAG_REP_MODE_ENABLED; in rvu_rep_probe()
795 hw = &priv->hw; in rvu_rep_probe()
796 hw->pdev = pdev; in rvu_rep_probe()
797 hw->max_queues = OTX2_MAX_CQ_CNT; in rvu_rep_probe()
798 hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN; in rvu_rep_probe()
799 hw->xqe_size = 128; in rvu_rep_probe()
805 priv->iommu_domain = iommu_get_domain_for_dev(dev); in rvu_rep_probe()
818 if (priv->hw.lmt_info) in rvu_rep_probe()
819 free_percpu(priv->hw.lmt_info); in rvu_rep_probe()
820 if (test_bit(CN10K_LMTST, &priv->hw.cap_flag)) in rvu_rep_probe()
821 qmem_free(priv->dev, priv->dync_lmt); in rvu_rep_probe()
822 otx2_detach_resources(&priv->mbox); in rvu_rep_probe()
836 if (!(priv->flags & OTX2_FLAG_INTF_DOWN)) in rvu_rep_remove()
838 otx2_detach_resources(&priv->mbox); in rvu_rep_remove()
839 if (priv->hw.lmt_info) in rvu_rep_remove()
840 free_percpu(priv->hw.lmt_info); in rvu_rep_remove()
841 if (test_bit(CN10K_LMTST, &priv->hw.cap_flag)) in rvu_rep_remove()
842 qmem_free(priv->dev, priv->dync_lmt); in rvu_rep_remove()
845 pci_free_irq_vectors(priv->pdev); in rvu_rep_remove()