Lines Matching refs:apc

51 static bool mana_en_need_log(struct mana_port_context *apc, int err)  in mana_en_need_log()  argument
53 if (apc && apc->ac && apc->ac->gdma_dev && in mana_en_need_log()
54 apc->ac->gdma_dev->gdma_context) in mana_en_need_log()
55 return mana_need_log(apc->ac->gdma_dev->gdma_context, err); in mana_en_need_log()
64 struct mana_port_context *apc = netdev_priv(ndev); in mana_open() local
73 apc->port_is_up = true; in mana_open()
86 struct mana_port_context *apc = netdev_priv(ndev); in mana_close() local
88 if (!apc->port_is_up) in mana_close()
134 static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc, in mana_map_skb() argument
139 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_map_skb()
193 netdev_err(apc->ndev, "Failed to map skb of size %u to DMA\n", in mana_map_skb()
255 struct mana_port_context *apc = netdev_priv(ndev); in mana_start_xmit() local
258 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_start_xmit()
269 if (unlikely(!apc->port_is_up)) in mana_start_xmit()
278 txq = &apc->tx_qp[txq_idx].txq; in mana_start_xmit()
280 cq = &apc->tx_qp[txq_idx].tx_cq; in mana_start_xmit()
410 if (mana_map_skb(skb, apc, &pkg, gso_hs)) { in mana_start_xmit()
427 apc->eth_stats.stop_queue++; in mana_start_xmit()
457 apc->eth_stats.wake_queue++; in mana_start_xmit()
475 struct mana_port_context *apc = netdev_priv(ndev); in mana_get_stats64() local
476 unsigned int num_queues = apc->num_queues; in mana_get_stats64()
483 if (!apc->port_is_up) in mana_get_stats64()
489 rx_stats = &apc->rxqs[q]->stats; in mana_get_stats64()
502 tx_stats = &apc->tx_qp[q].txq.stats; in mana_get_stats64()
518 struct mana_port_context *apc = netdev_priv(ndev); in mana_get_tx_queue() local
523 txq = apc->indir_table[hash & (apc->indir_table_sz - 1)]; in mana_get_tx_queue()
740 struct mana_port_context *apc = netdev_priv(binding->netdev); in mana_shaper_set() local
749 if (apc->handle.id && shaper->handle.id != apc->handle.id) { in mana_shaper_set()
763 err = mana_query_link_cfg(apc); in mana_shaper_set()
764 old_speed = (err) ? SPEED_UNKNOWN : apc->speed; in mana_shaper_set()
767 err = mana_set_bw_clamp(apc, rate, TRI_STATE_TRUE); in mana_shaper_set()
768 apc->speed = (err) ? old_speed : rate; in mana_shaper_set()
769 apc->handle = (err) ? apc->handle : shaper->handle; in mana_shaper_set()
779 struct mana_port_context *apc = netdev_priv(binding->netdev); in mana_shaper_del() local
782 err = mana_set_bw_clamp(apc, 0, TRI_STATE_FALSE); in mana_shaper_del()
786 apc->handle.id = 0; in mana_shaper_del()
787 apc->handle.scope = NET_SHAPER_SCOPE_UNSPEC; in mana_shaper_del()
788 apc->speed = 0; in mana_shaper_del()
821 static void mana_cleanup_port_context(struct mana_port_context *apc) in mana_cleanup_port_context() argument
827 debugfs_remove(apc->mana_port_debugfs); in mana_cleanup_port_context()
828 apc->mana_port_debugfs = NULL; in mana_cleanup_port_context()
829 kfree(apc->rxqs); in mana_cleanup_port_context()
830 apc->rxqs = NULL; in mana_cleanup_port_context()
833 static void mana_cleanup_indir_table(struct mana_port_context *apc) in mana_cleanup_indir_table() argument
835 apc->indir_table_sz = 0; in mana_cleanup_indir_table()
836 kfree(apc->indir_table); in mana_cleanup_indir_table()
837 kfree(apc->rxobj_table); in mana_cleanup_indir_table()
840 static int mana_init_port_context(struct mana_port_context *apc) in mana_init_port_context() argument
842 apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *), in mana_init_port_context()
845 return !apc->rxqs ? -ENOMEM : 0; in mana_init_port_context()
901 static int mana_pf_register_hw_vport(struct mana_port_context *apc) in mana_pf_register_hw_vport() argument
913 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_register_hw_vport()
916 netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err); in mana_pf_register_hw_vport()
923 netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n", in mana_pf_register_hw_vport()
928 apc->port_handle = resp.hw_vport_handle; in mana_pf_register_hw_vport()
932 static void mana_pf_deregister_hw_vport(struct mana_port_context *apc) in mana_pf_deregister_hw_vport() argument
940 req.hw_vport_handle = apc->port_handle; in mana_pf_deregister_hw_vport()
942 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_deregister_hw_vport()
945 if (mana_en_need_log(apc, err)) in mana_pf_deregister_hw_vport()
946 netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n", in mana_pf_deregister_hw_vport()
955 netdev_err(apc->ndev, in mana_pf_deregister_hw_vport()
960 static int mana_pf_register_filter(struct mana_port_context *apc) in mana_pf_register_filter() argument
968 req.vport = apc->port_handle; in mana_pf_register_filter()
969 memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN); in mana_pf_register_filter()
971 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_register_filter()
974 netdev_err(apc->ndev, "Failed to register filter: %d\n", err); in mana_pf_register_filter()
981 netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n", in mana_pf_register_filter()
986 apc->pf_filter_handle = resp.filter_handle; in mana_pf_register_filter()
990 static void mana_pf_deregister_filter(struct mana_port_context *apc) in mana_pf_deregister_filter() argument
998 req.filter_handle = apc->pf_filter_handle; in mana_pf_deregister_filter()
1000 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_deregister_filter()
1003 if (mana_en_need_log(apc, err)) in mana_pf_deregister_filter()
1004 netdev_err(apc->ndev, "Failed to unregister filter: %d\n", in mana_pf_deregister_filter()
1013 netdev_err(apc->ndev, in mana_pf_deregister_filter()
1070 static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index, in mana_query_vport_cfg() argument
1082 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_query_vport_cfg()
1102 netdev_warn(apc->ndev, in mana_query_vport_cfg()
1104 MANA_INDIRECT_TABLE_DEF_SIZE, apc->port_idx); in mana_query_vport_cfg()
1108 apc->port_handle = resp.vport; in mana_query_vport_cfg()
1109 ether_addr_copy(apc->mac_addr, resp.mac_addr); in mana_query_vport_cfg()
1114 void mana_uncfg_vport(struct mana_port_context *apc) in mana_uncfg_vport() argument
1116 mutex_lock(&apc->vport_mutex); in mana_uncfg_vport()
1117 apc->vport_use_count--; in mana_uncfg_vport()
1118 WARN_ON(apc->vport_use_count < 0); in mana_uncfg_vport()
1119 mutex_unlock(&apc->vport_mutex); in mana_uncfg_vport()
1123 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, in mana_cfg_vport() argument
1148 mutex_lock(&apc->vport_mutex); in mana_cfg_vport()
1149 if (apc->vport_use_count > 0) { in mana_cfg_vport()
1150 mutex_unlock(&apc->vport_mutex); in mana_cfg_vport()
1153 apc->vport_use_count++; in mana_cfg_vport()
1154 mutex_unlock(&apc->vport_mutex); in mana_cfg_vport()
1158 req.vport = apc->port_handle; in mana_cfg_vport()
1162 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_cfg_vport()
1165 netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err); in mana_cfg_vport()
1172 netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n", in mana_cfg_vport()
1180 apc->tx_shortform_allowed = resp.short_form_allowed; in mana_cfg_vport()
1181 apc->tx_vp_offset = resp.tx_vport_offset; in mana_cfg_vport()
1183 netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n", in mana_cfg_vport()
1184 apc->port_handle, protection_dom_id, doorbell_pg_id); in mana_cfg_vport()
1187 mana_uncfg_vport(apc); in mana_cfg_vport()
1193 static int mana_cfg_vport_steering(struct mana_port_context *apc, in mana_cfg_vport_steering() argument
1200 struct net_device *ndev = apc->ndev; in mana_cfg_vport_steering()
1204 req_buf_size = struct_size(req, indir_tab, apc->indir_table_sz); in mana_cfg_vport_steering()
1214 req->vport = apc->port_handle; in mana_cfg_vport_steering()
1215 req->num_indir_entries = apc->indir_table_sz; in mana_cfg_vport_steering()
1219 req->rss_enable = apc->rss_state; in mana_cfg_vport_steering()
1223 req->default_rxobj = apc->default_rxobj; in mana_cfg_vport_steering()
1227 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE); in mana_cfg_vport_steering()
1230 memcpy(req->indir_tab, apc->rxobj_table, in mana_cfg_vport_steering()
1233 err = mana_send_request(apc->ac, req, req_buf_size, &resp, in mana_cfg_vport_steering()
1236 if (mana_en_need_log(apc, err)) in mana_cfg_vport_steering()
1256 apc->port_handle, apc->indir_table_sz); in mana_cfg_vport_steering()
1262 int mana_query_link_cfg(struct mana_port_context *apc) in mana_query_link_cfg() argument
1264 struct net_device *ndev = apc->ndev; in mana_query_link_cfg()
1272 req.vport = apc->port_handle; in mana_query_link_cfg()
1275 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_query_link_cfg()
1302 apc->speed = resp.link_speed_mbps; in mana_query_link_cfg()
1303 apc->max_speed = resp.qos_speed_mbps; in mana_query_link_cfg()
1307 int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed, in mana_set_bw_clamp() argument
1312 struct net_device *ndev = apc->ndev; in mana_set_bw_clamp()
1317 req.vport = apc->port_handle; in mana_set_bw_clamp()
1321 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_set_bw_clamp()
1351 int mana_create_wq_obj(struct mana_port_context *apc, in mana_create_wq_obj() argument
1359 struct net_device *ndev = apc->ndev; in mana_create_wq_obj()
1373 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_create_wq_obj()
1406 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, in mana_destroy_wq_obj() argument
1411 struct net_device *ndev = apc->ndev; in mana_destroy_wq_obj()
1419 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_destroy_wq_obj()
1422 if (mana_en_need_log(apc, err)) in mana_destroy_wq_obj()
1510 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq) in mana_fence_rq() argument
1522 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_fence_rq()
1525 netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n", in mana_fence_rq()
1532 netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n", in mana_fence_rq()
1541 netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n", in mana_fence_rq()
1549 static void mana_fence_rqs(struct mana_port_context *apc) in mana_fence_rqs() argument
1555 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { in mana_fence_rqs()
1556 rxq = apc->rxqs[rxq_idx]; in mana_fence_rqs()
1557 err = mana_fence_rq(apc, rxq); in mana_fence_rqs()
1580 static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) in mana_unmap_skb() argument
1583 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_unmap_skb()
1606 struct mana_port_context *apc; in mana_poll_tx_cq() local
1617 apc = netdev_priv(ndev); in mana_poll_tx_cq()
1653 apc->eth_stats.tx_cqe_err++; in mana_poll_tx_cq()
1664 apc->eth_stats.tx_cqe_unknown_type++; in mana_poll_tx_cq()
1678 mana_unmap_skb(skb, apc); in mana_poll_tx_cq()
1702 if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) { in mana_poll_tx_cq()
1704 apc->eth_stats.wake_queue++; in mana_poll_tx_cq()
1923 struct mana_port_context *apc; in mana_process_rx_cqe() local
1929 apc = netdev_priv(ndev); in mana_process_rx_cqe()
1943 apc->eth_stats.rx_coalesced_err++; in mana_process_rx_cqe()
1953 apc->eth_stats.rx_cqe_unknown_type++; in mana_process_rx_cqe()
2068 static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq) in mana_deinit_cq() argument
2070 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_deinit_cq()
2078 static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq) in mana_deinit_txq() argument
2080 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_deinit_txq()
2088 static void mana_destroy_txq(struct mana_port_context *apc) in mana_destroy_txq() argument
2093 if (!apc->tx_qp) in mana_destroy_txq()
2096 for (i = 0; i < apc->num_queues; i++) { in mana_destroy_txq()
2097 debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs); in mana_destroy_txq()
2098 apc->tx_qp[i].mana_tx_debugfs = NULL; in mana_destroy_txq()
2100 napi = &apc->tx_qp[i].tx_cq.napi; in mana_destroy_txq()
2101 if (apc->tx_qp[i].txq.napi_initialized) { in mana_destroy_txq()
2107 apc->tx_qp[i].txq.napi_initialized = false; in mana_destroy_txq()
2109 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); in mana_destroy_txq()
2111 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq); in mana_destroy_txq()
2113 mana_deinit_txq(apc, &apc->tx_qp[i].txq); in mana_destroy_txq()
2116 kfree(apc->tx_qp); in mana_destroy_txq()
2117 apc->tx_qp = NULL; in mana_destroy_txq()
2120 static void mana_create_txq_debugfs(struct mana_port_context *apc, int idx) in mana_create_txq_debugfs() argument
2122 struct mana_tx_qp *tx_qp = &apc->tx_qp[idx]; in mana_create_txq_debugfs()
2126 tx_qp->mana_tx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs); in mana_create_txq_debugfs()
2145 static int mana_create_txq(struct mana_port_context *apc, in mana_create_txq() argument
2148 struct mana_context *ac = apc->ac; in mana_create_txq()
2161 apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp), in mana_create_txq()
2163 if (!apc->tx_qp) in mana_create_txq()
2175 txq_size = apc->tx_queue_size * 32; in mana_create_txq()
2177 cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE; in mana_create_txq()
2181 for (i = 0; i < apc->num_queues; i++) { in mana_create_txq()
2182 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE; in mana_create_txq()
2185 txq = &apc->tx_qp[i].txq; in mana_create_txq()
2190 txq->vp_offset = apc->tx_vp_offset; in mana_create_txq()
2203 cq = &apc->tx_qp[i].tx_cq; in mana_create_txq()
2230 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ, in mana_create_txq()
2232 &apc->tx_qp[i].tx_object); in mana_create_txq()
2256 mana_create_txq_debugfs(apc, i); in mana_create_txq()
2271 apc->num_queues, err); in mana_create_txq()
2272 mana_destroy_txq(apc); in mana_create_txq()
2276 static void mana_destroy_rxq(struct mana_port_context *apc, in mana_destroy_rxq() argument
2280 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_destroy_rxq()
2305 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); in mana_destroy_rxq()
2307 mana_deinit_cq(apc, &rxq->rx_cq); in mana_destroy_rxq()
2368 static int mana_alloc_rx_wqe(struct mana_port_context *apc, in mana_alloc_rx_wqe() argument
2371 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_alloc_rx_wqe()
2388 ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq, in mana_alloc_rx_wqe()
2449 static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, in mana_create_rxq() argument
2453 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_create_rxq()
2465 rxq = kzalloc(struct_size(rxq, rx_oobs, apc->rx_queue_size), in mana_create_rxq()
2471 rxq->num_rx_buf = apc->rx_queue_size; in mana_create_rxq()
2485 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); in mana_create_rxq()
2527 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ, in mana_create_rxq()
2572 mana_destroy_rxq(apc, rxq, false); in mana_create_rxq()
2575 mana_deinit_cq(apc, cq); in mana_create_rxq()
2580 static void mana_create_rxq_debugfs(struct mana_port_context *apc, int idx) in mana_create_rxq_debugfs() argument
2585 rxq = apc->rxqs[idx]; in mana_create_rxq_debugfs()
2588 rxq->mana_rx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs); in mana_create_rxq_debugfs()
2602 static int mana_add_rx_queues(struct mana_port_context *apc, in mana_add_rx_queues() argument
2605 struct mana_context *ac = apc->ac; in mana_add_rx_queues()
2610 for (i = 0; i < apc->num_queues; i++) { in mana_add_rx_queues()
2611 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev); in mana_add_rx_queues()
2620 apc->rxqs[i] = rxq; in mana_add_rx_queues()
2622 mana_create_rxq_debugfs(apc, i); in mana_add_rx_queues()
2625 apc->default_rxobj = apc->rxqs[0]->rxobj; in mana_add_rx_queues()
2630 static void mana_destroy_vport(struct mana_port_context *apc) in mana_destroy_vport() argument
2632 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_destroy_vport()
2636 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { in mana_destroy_vport()
2637 rxq = apc->rxqs[rxq_idx]; in mana_destroy_vport()
2641 mana_destroy_rxq(apc, rxq, true); in mana_destroy_vport()
2642 apc->rxqs[rxq_idx] = NULL; in mana_destroy_vport()
2645 mana_destroy_txq(apc); in mana_destroy_vport()
2646 mana_uncfg_vport(apc); in mana_destroy_vport()
2648 if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) in mana_destroy_vport()
2649 mana_pf_deregister_hw_vport(apc); in mana_destroy_vport()
2652 static int mana_create_vport(struct mana_port_context *apc, in mana_create_vport() argument
2655 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_create_vport()
2658 apc->default_rxobj = INVALID_MANA_HANDLE; in mana_create_vport()
2660 if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) { in mana_create_vport()
2661 err = mana_pf_register_hw_vport(apc); in mana_create_vport()
2666 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell); in mana_create_vport()
2670 return mana_create_txq(apc, net); in mana_create_vport()
2673 static int mana_rss_table_alloc(struct mana_port_context *apc) in mana_rss_table_alloc() argument
2675 if (!apc->indir_table_sz) { in mana_rss_table_alloc()
2676 netdev_err(apc->ndev, in mana_rss_table_alloc()
2678 apc->port_idx); in mana_rss_table_alloc()
2682 apc->indir_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL); in mana_rss_table_alloc()
2683 if (!apc->indir_table) in mana_rss_table_alloc()
2686 apc->rxobj_table = kcalloc(apc->indir_table_sz, sizeof(mana_handle_t), GFP_KERNEL); in mana_rss_table_alloc()
2687 if (!apc->rxobj_table) { in mana_rss_table_alloc()
2688 kfree(apc->indir_table); in mana_rss_table_alloc()
2695 static void mana_rss_table_init(struct mana_port_context *apc) in mana_rss_table_init() argument
2699 for (i = 0; i < apc->indir_table_sz; i++) in mana_rss_table_init()
2700 apc->indir_table[i] = in mana_rss_table_init()
2701 ethtool_rxfh_indir_default(i, apc->num_queues); in mana_rss_table_init()
2704 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx, in mana_config_rss() argument
2712 for (i = 0; i < apc->indir_table_sz; i++) { in mana_config_rss()
2713 queue_idx = apc->indir_table[i]; in mana_config_rss()
2714 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj; in mana_config_rss()
2718 err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab); in mana_config_rss()
2722 mana_fence_rqs(apc); in mana_config_rss()
2727 void mana_query_gf_stats(struct mana_port_context *apc) in mana_query_gf_stats() argument
2731 struct net_device *ndev = apc->ndev; in mana_query_gf_stats()
2765 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_query_gf_stats()
2779 apc->eth_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe; in mana_query_gf_stats()
2780 apc->eth_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled; in mana_query_gf_stats()
2781 apc->eth_stats.hc_rx_bytes = resp.hc_rx_bytes; in mana_query_gf_stats()
2782 apc->eth_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts; in mana_query_gf_stats()
2783 apc->eth_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes; in mana_query_gf_stats()
2784 apc->eth_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts; in mana_query_gf_stats()
2785 apc->eth_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes; in mana_query_gf_stats()
2786 apc->eth_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts; in mana_query_gf_stats()
2787 apc->eth_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes; in mana_query_gf_stats()
2788 apc->eth_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled; in mana_query_gf_stats()
2789 apc->eth_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled; in mana_query_gf_stats()
2790 apc->eth_stats.hc_tx_err_inval_vportoffset_pkt = in mana_query_gf_stats()
2792 apc->eth_stats.hc_tx_err_vlan_enforcement = in mana_query_gf_stats()
2794 apc->eth_stats.hc_tx_err_eth_type_enforcement = in mana_query_gf_stats()
2796 apc->eth_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement; in mana_query_gf_stats()
2797 apc->eth_stats.hc_tx_err_sqpdid_enforcement = in mana_query_gf_stats()
2799 apc->eth_stats.hc_tx_err_cqpdid_enforcement = in mana_query_gf_stats()
2801 apc->eth_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation; in mana_query_gf_stats()
2802 apc->eth_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob; in mana_query_gf_stats()
2803 apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes; in mana_query_gf_stats()
2804 apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts; in mana_query_gf_stats()
2805 apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes; in mana_query_gf_stats()
2806 apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts; in mana_query_gf_stats()
2807 apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes; in mana_query_gf_stats()
2808 apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts; in mana_query_gf_stats()
2809 apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes; in mana_query_gf_stats()
2810 apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma; in mana_query_gf_stats()
2813 void mana_query_phy_stats(struct mana_port_context *apc) in mana_query_phy_stats() argument
2817 struct net_device *ndev = apc->ndev; in mana_query_phy_stats()
2822 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_query_phy_stats()
2837 apc->phy_stats.rx_pkt_drop_phy = resp.rx_pkt_drop_phy; in mana_query_phy_stats()
2838 apc->phy_stats.tx_pkt_drop_phy = resp.tx_pkt_drop_phy; in mana_query_phy_stats()
2841 apc->phy_stats.rx_pkt_tc0_phy = resp.rx_pkt_tc0_phy; in mana_query_phy_stats()
2842 apc->phy_stats.tx_pkt_tc0_phy = resp.tx_pkt_tc0_phy; in mana_query_phy_stats()
2843 apc->phy_stats.rx_pkt_tc1_phy = resp.rx_pkt_tc1_phy; in mana_query_phy_stats()
2844 apc->phy_stats.tx_pkt_tc1_phy = resp.tx_pkt_tc1_phy; in mana_query_phy_stats()
2845 apc->phy_stats.rx_pkt_tc2_phy = resp.rx_pkt_tc2_phy; in mana_query_phy_stats()
2846 apc->phy_stats.tx_pkt_tc2_phy = resp.tx_pkt_tc2_phy; in mana_query_phy_stats()
2847 apc->phy_stats.rx_pkt_tc3_phy = resp.rx_pkt_tc3_phy; in mana_query_phy_stats()
2848 apc->phy_stats.tx_pkt_tc3_phy = resp.tx_pkt_tc3_phy; in mana_query_phy_stats()
2849 apc->phy_stats.rx_pkt_tc4_phy = resp.rx_pkt_tc4_phy; in mana_query_phy_stats()
2850 apc->phy_stats.tx_pkt_tc4_phy = resp.tx_pkt_tc4_phy; in mana_query_phy_stats()
2851 apc->phy_stats.rx_pkt_tc5_phy = resp.rx_pkt_tc5_phy; in mana_query_phy_stats()
2852 apc->phy_stats.tx_pkt_tc5_phy = resp.tx_pkt_tc5_phy; in mana_query_phy_stats()
2853 apc->phy_stats.rx_pkt_tc6_phy = resp.rx_pkt_tc6_phy; in mana_query_phy_stats()
2854 apc->phy_stats.tx_pkt_tc6_phy = resp.tx_pkt_tc6_phy; in mana_query_phy_stats()
2855 apc->phy_stats.rx_pkt_tc7_phy = resp.rx_pkt_tc7_phy; in mana_query_phy_stats()
2856 apc->phy_stats.tx_pkt_tc7_phy = resp.tx_pkt_tc7_phy; in mana_query_phy_stats()
2859 apc->phy_stats.rx_byte_tc0_phy = resp.rx_byte_tc0_phy; in mana_query_phy_stats()
2860 apc->phy_stats.tx_byte_tc0_phy = resp.tx_byte_tc0_phy; in mana_query_phy_stats()
2861 apc->phy_stats.rx_byte_tc1_phy = resp.rx_byte_tc1_phy; in mana_query_phy_stats()
2862 apc->phy_stats.tx_byte_tc1_phy = resp.tx_byte_tc1_phy; in mana_query_phy_stats()
2863 apc->phy_stats.rx_byte_tc2_phy = resp.rx_byte_tc2_phy; in mana_query_phy_stats()
2864 apc->phy_stats.tx_byte_tc2_phy = resp.tx_byte_tc2_phy; in mana_query_phy_stats()
2865 apc->phy_stats.rx_byte_tc3_phy = resp.rx_byte_tc3_phy; in mana_query_phy_stats()
2866 apc->phy_stats.tx_byte_tc3_phy = resp.tx_byte_tc3_phy; in mana_query_phy_stats()
2867 apc->phy_stats.rx_byte_tc4_phy = resp.rx_byte_tc4_phy; in mana_query_phy_stats()
2868 apc->phy_stats.tx_byte_tc4_phy = resp.tx_byte_tc4_phy; in mana_query_phy_stats()
2869 apc->phy_stats.rx_byte_tc5_phy = resp.rx_byte_tc5_phy; in mana_query_phy_stats()
2870 apc->phy_stats.tx_byte_tc5_phy = resp.tx_byte_tc5_phy; in mana_query_phy_stats()
2871 apc->phy_stats.rx_byte_tc6_phy = resp.rx_byte_tc6_phy; in mana_query_phy_stats()
2872 apc->phy_stats.tx_byte_tc6_phy = resp.tx_byte_tc6_phy; in mana_query_phy_stats()
2873 apc->phy_stats.rx_byte_tc7_phy = resp.rx_byte_tc7_phy; in mana_query_phy_stats()
2874 apc->phy_stats.tx_byte_tc7_phy = resp.tx_byte_tc7_phy; in mana_query_phy_stats()
2877 apc->phy_stats.rx_pause_tc0_phy = resp.rx_pause_tc0_phy; in mana_query_phy_stats()
2878 apc->phy_stats.tx_pause_tc0_phy = resp.tx_pause_tc0_phy; in mana_query_phy_stats()
2879 apc->phy_stats.rx_pause_tc1_phy = resp.rx_pause_tc1_phy; in mana_query_phy_stats()
2880 apc->phy_stats.tx_pause_tc1_phy = resp.tx_pause_tc1_phy; in mana_query_phy_stats()
2881 apc->phy_stats.rx_pause_tc2_phy = resp.rx_pause_tc2_phy; in mana_query_phy_stats()
2882 apc->phy_stats.tx_pause_tc2_phy = resp.tx_pause_tc2_phy; in mana_query_phy_stats()
2883 apc->phy_stats.rx_pause_tc3_phy = resp.rx_pause_tc3_phy; in mana_query_phy_stats()
2884 apc->phy_stats.tx_pause_tc3_phy = resp.tx_pause_tc3_phy; in mana_query_phy_stats()
2885 apc->phy_stats.rx_pause_tc4_phy = resp.rx_pause_tc4_phy; in mana_query_phy_stats()
2886 apc->phy_stats.tx_pause_tc4_phy = resp.tx_pause_tc4_phy; in mana_query_phy_stats()
2887 apc->phy_stats.rx_pause_tc5_phy = resp.rx_pause_tc5_phy; in mana_query_phy_stats()
2888 apc->phy_stats.tx_pause_tc5_phy = resp.tx_pause_tc5_phy; in mana_query_phy_stats()
2889 apc->phy_stats.rx_pause_tc6_phy = resp.rx_pause_tc6_phy; in mana_query_phy_stats()
2890 apc->phy_stats.tx_pause_tc6_phy = resp.tx_pause_tc6_phy; in mana_query_phy_stats()
2891 apc->phy_stats.rx_pause_tc7_phy = resp.rx_pause_tc7_phy; in mana_query_phy_stats()
2892 apc->phy_stats.tx_pause_tc7_phy = resp.tx_pause_tc7_phy; in mana_query_phy_stats()
2897 struct mana_port_context *apc = netdev_priv(ndev); in mana_init_port() local
2898 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_init_port()
2900 int port_idx = apc->port_idx; in mana_init_port()
2905 err = mana_init_port_context(apc); in mana_init_port()
2911 err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq, in mana_init_port()
2912 &apc->indir_table_sz); in mana_init_port()
2920 if (apc->max_queues > max_queues) in mana_init_port()
2921 apc->max_queues = max_queues; in mana_init_port()
2923 if (apc->num_queues > apc->max_queues) in mana_init_port()
2924 apc->num_queues = apc->max_queues; in mana_init_port()
2926 eth_hw_addr_set(ndev, apc->mac_addr); in mana_init_port()
2928 apc->mana_port_debugfs = debugfs_create_dir(vport, gc->mana_pci_debugfs); in mana_init_port()
2932 mana_cleanup_port_context(apc); in mana_init_port()
2938 struct mana_port_context *apc = netdev_priv(ndev); in mana_alloc_queues() local
2939 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_alloc_queues()
2942 err = mana_create_vport(apc, ndev); in mana_alloc_queues()
2944 netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err); in mana_alloc_queues()
2948 err = netif_set_real_num_tx_queues(ndev, apc->num_queues); in mana_alloc_queues()
2952 apc->num_queues, err); in mana_alloc_queues()
2956 err = mana_add_rx_queues(apc, ndev); in mana_alloc_queues()
2960 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE; in mana_alloc_queues()
2962 err = netif_set_real_num_rx_queues(ndev, apc->num_queues); in mana_alloc_queues()
2966 apc->num_queues, err); in mana_alloc_queues()
2970 mana_rss_table_init(apc); in mana_alloc_queues()
2972 err = mana_config_rss(apc, TRI_STATE_TRUE, true, true); in mana_alloc_queues()
2978 if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) { in mana_alloc_queues()
2979 err = mana_pf_register_filter(apc); in mana_alloc_queues()
2984 mana_chn_setxdp(apc, mana_xdp_get(apc)); in mana_alloc_queues()
2989 mana_destroy_vport(apc); in mana_alloc_queues()
2995 struct mana_port_context *apc = netdev_priv(ndev); in mana_attach() local
3004 if (apc->port_st_save) { in mana_attach()
3007 mana_cleanup_port_context(apc); in mana_attach()
3012 apc->port_is_up = apc->port_st_save; in mana_attach()
3017 if (apc->port_is_up) in mana_attach()
3027 struct mana_port_context *apc = netdev_priv(ndev); in mana_dealloc_queues() local
3029 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_dealloc_queues()
3035 if (apc->port_is_up) in mana_dealloc_queues()
3038 mana_chn_setxdp(apc, NULL); in mana_dealloc_queues()
3040 if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) in mana_dealloc_queues()
3041 mana_pf_deregister_filter(apc); in mana_dealloc_queues()
3056 for (i = 0; i < apc->num_queues; i++) { in mana_dealloc_queues()
3057 txq = &apc->tx_qp[i].txq; in mana_dealloc_queues()
3075 for (i = 0; i < apc->num_queues; i++) { in mana_dealloc_queues()
3076 txq = &apc->tx_qp[i].txq; in mana_dealloc_queues()
3078 mana_unmap_skb(skb, apc); in mana_dealloc_queues()
3087 apc->rss_state = TRI_STATE_FALSE; in mana_dealloc_queues()
3088 err = mana_config_rss(apc, TRI_STATE_FALSE, false, false); in mana_dealloc_queues()
3089 if (err && mana_en_need_log(apc, err)) in mana_dealloc_queues()
3093 mana_destroy_vport(apc); in mana_dealloc_queues()
3100 struct mana_port_context *apc = netdev_priv(ndev); in mana_detach() local
3105 apc->port_st_save = apc->port_is_up; in mana_detach()
3106 apc->port_is_up = false; in mana_detach()
3114 if (apc->port_st_save) { in mana_detach()
3124 mana_cleanup_port_context(apc); in mana_detach()
3134 struct mana_port_context *apc; in mana_probe_port() local
3145 apc = netdev_priv(ndev); in mana_probe_port()
3146 apc->ac = ac; in mana_probe_port()
3147 apc->ndev = ndev; in mana_probe_port()
3148 apc->max_queues = gc->max_num_queues; in mana_probe_port()
3149 apc->num_queues = gc->max_num_queues; in mana_probe_port()
3150 apc->tx_queue_size = DEF_TX_BUFFERS_PER_QUEUE; in mana_probe_port()
3151 apc->rx_queue_size = DEF_RX_BUFFERS_PER_QUEUE; in mana_probe_port()
3152 apc->port_handle = INVALID_MANA_HANDLE; in mana_probe_port()
3153 apc->pf_filter_handle = INVALID_MANA_HANDLE; in mana_probe_port()
3154 apc->port_idx = port_idx; in mana_probe_port()
3156 mutex_init(&apc->vport_mutex); in mana_probe_port()
3157 apc->vport_use_count = 0; in mana_probe_port()
3172 netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE); in mana_probe_port()
3178 err = mana_rss_table_alloc(apc); in mana_probe_port()
3201 debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed); in mana_probe_port()
3206 mana_cleanup_indir_table(apc); in mana_probe_port()
3208 mana_cleanup_port_context(apc); in mana_probe_port()
3452 struct mana_port_context *apc; in mana_remove() local
3464 apc = netdev_priv(ndev); in mana_remove()
3488 mana_cleanup_indir_table(apc); in mana_remove()