Lines Matching full:bp

28 static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx,  in hwrm_cfa_vfr_alloc()  argument
35 rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_ALLOC); in hwrm_cfa_vfr_alloc()
40 resp = hwrm_req_hold(bp, req); in hwrm_cfa_vfr_alloc()
41 rc = hwrm_req_send(bp, req); in hwrm_cfa_vfr_alloc()
45 netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x", in hwrm_cfa_vfr_alloc()
48 hwrm_req_drop(bp, req); in hwrm_cfa_vfr_alloc()
51 netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc); in hwrm_cfa_vfr_alloc()
55 static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx) in hwrm_cfa_vfr_free() argument
60 rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_FREE); in hwrm_cfa_vfr_free()
63 rc = hwrm_req_send(bp, req); in hwrm_cfa_vfr_free()
66 netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc); in hwrm_cfa_vfr_free()
70 static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, in bnxt_hwrm_vfr_qcfg() argument
78 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); in bnxt_hwrm_vfr_qcfg()
82 req->fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid); in bnxt_hwrm_vfr_qcfg()
83 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_vfr_qcfg()
84 rc = hwrm_req_send(bp, req); in bnxt_hwrm_vfr_qcfg()
92 hwrm_req_drop(bp, req); in bnxt_hwrm_vfr_qcfg()
99 struct bnxt *bp = vf_rep->bp; in bnxt_vf_rep_open() local
102 if (netif_running(bp->dev)) { in bnxt_vf_rep_open()
153 struct bnxt *bp = vf_rep->bp; in bnxt_vf_rep_setup_tc_block_cb() local
154 int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid; in bnxt_vf_rep_setup_tc_block_cb()
156 if (!bnxt_tc_flower_enabled(vf_rep->bp) || in bnxt_vf_rep_setup_tc_block_cb()
157 !tc_cls_can_offload_and_chain0(bp->dev, type_data)) in bnxt_vf_rep_setup_tc_block_cb()
162 return bnxt_tc_setup_flower(bp, vf_fid, type_data); in bnxt_vf_rep_setup_tc_block_cb()
186 struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code) in bnxt_get_vf_rep() argument
190 if (cfa_code && bp->cfa_code_map && BNXT_PF(bp)) { in bnxt_get_vf_rep()
191 vf_idx = bp->cfa_code_map[cfa_code]; in bnxt_get_vf_rep()
193 return bp->vf_reps[vf_idx]->dev; in bnxt_get_vf_rep()
198 void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb) in bnxt_vf_rep_rx() argument
212 struct pci_dev *pf_pdev = vf_rep->bp->pdev; in bnxt_vf_rep_get_phys_port_name()
236 return bnxt_get_port_parent_id(vf_rep->bp->dev, ppid); in bnxt_vf_rep_get_port_parent_id()
262 void bnxt_vf_reps_close(struct bnxt *bp) in bnxt_vf_reps_close() argument
267 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) in bnxt_vf_reps_close()
270 num_vfs = pci_num_vf(bp->pdev); in bnxt_vf_reps_close()
272 vf_rep = bp->vf_reps[i]; in bnxt_vf_reps_close()
282 void bnxt_vf_reps_open(struct bnxt *bp) in bnxt_vf_reps_open() argument
286 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) in bnxt_vf_reps_open()
289 for (i = 0; i < pci_num_vf(bp->pdev); i++) { in bnxt_vf_reps_open()
291 if (bp->vf_reps[i]->tx_cfa_action != CFA_HANDLE_INVALID) in bnxt_vf_reps_open()
292 bnxt_vf_rep_open(bp->vf_reps[i]->dev); in bnxt_vf_reps_open()
296 static void __bnxt_free_one_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep) in __bnxt_free_one_vf_rep() argument
306 hwrm_cfa_vfr_free(bp, vf_rep->vf_idx); in __bnxt_free_one_vf_rep()
311 static void __bnxt_vf_reps_destroy(struct bnxt *bp) in __bnxt_vf_reps_destroy() argument
313 u16 num_vfs = pci_num_vf(bp->pdev); in __bnxt_vf_reps_destroy()
318 vf_rep = bp->vf_reps[i]; in __bnxt_vf_reps_destroy()
320 __bnxt_free_one_vf_rep(bp, vf_rep); in __bnxt_vf_reps_destroy()
332 kfree(bp->vf_reps); in __bnxt_vf_reps_destroy()
333 bp->vf_reps = NULL; in __bnxt_vf_reps_destroy()
336 void bnxt_vf_reps_destroy(struct bnxt *bp) in bnxt_vf_reps_destroy() argument
340 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) in bnxt_vf_reps_destroy()
343 if (!bp->vf_reps) in bnxt_vf_reps_destroy()
349 netdev_lock(bp->dev); in bnxt_vf_reps_destroy()
350 if (netif_running(bp->dev)) { in bnxt_vf_reps_destroy()
351 bnxt_close_nic(bp, false, false); in bnxt_vf_reps_destroy()
355 kfree(bp->cfa_code_map); in bnxt_vf_reps_destroy()
356 bp->cfa_code_map = NULL; in bnxt_vf_reps_destroy()
362 bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; in bnxt_vf_reps_destroy()
363 bnxt_open_nic(bp, false, false); in bnxt_vf_reps_destroy()
364 bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; in bnxt_vf_reps_destroy()
366 netdev_unlock(bp->dev); in bnxt_vf_reps_destroy()
371 __bnxt_vf_reps_destroy(bp); in bnxt_vf_reps_destroy()
379 void bnxt_vf_reps_free(struct bnxt *bp) in bnxt_vf_reps_free() argument
381 u16 num_vfs = pci_num_vf(bp->pdev); in bnxt_vf_reps_free()
384 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) in bnxt_vf_reps_free()
388 __bnxt_free_one_vf_rep(bp, bp->vf_reps[i]); in bnxt_vf_reps_free()
391 static int bnxt_alloc_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, in bnxt_alloc_vf_rep() argument
395 if (hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx, &vf_rep->tx_cfa_action, in bnxt_alloc_vf_rep()
406 vf_rep->dst->u.port_info.lower_dev = bp->dev; in bnxt_alloc_vf_rep()
416 int bnxt_vf_reps_alloc(struct bnxt *bp) in bnxt_vf_reps_alloc() argument
418 u16 *cfa_code_map = bp->cfa_code_map, num_vfs = pci_num_vf(bp->pdev); in bnxt_vf_reps_alloc()
422 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) in bnxt_vf_reps_alloc()
432 vf_rep = bp->vf_reps[i]; in bnxt_vf_reps_alloc()
435 rc = bnxt_alloc_vf_rep(bp, vf_rep, cfa_code_map); in bnxt_vf_reps_alloc()
443 netdev_info(bp->dev, "%s error=%d\n", __func__, rc); in bnxt_vf_reps_alloc()
444 bnxt_vf_reps_free(bp); in bnxt_vf_reps_alloc()
463 static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, in bnxt_vf_rep_netdev_init() argument
466 struct net_device *pf_dev = bp->dev; in bnxt_vf_rep_netdev_init()
469 SET_NETDEV_DEV(dev, &bp->pdev->dev); in bnxt_vf_rep_netdev_init()
480 bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx, in bnxt_vf_rep_netdev_init()
484 if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu)) in bnxt_vf_rep_netdev_init()
489 int bnxt_vf_reps_create(struct bnxt *bp) in bnxt_vf_reps_create() argument
491 u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev); in bnxt_vf_reps_create()
496 if (!(bp->flags & BNXT_FLAG_DSN_VALID)) in bnxt_vf_reps_create()
499 bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL); in bnxt_vf_reps_create()
500 if (!bp->vf_reps) in bnxt_vf_reps_create()
504 cfa_code_map = kmalloc_array(MAX_CFA_CODE, sizeof(*bp->cfa_code_map), in bnxt_vf_reps_create()
521 bp->vf_reps[i] = vf_rep; in bnxt_vf_reps_create()
523 vf_rep->bp = bp; in bnxt_vf_reps_create()
527 rc = bnxt_alloc_vf_rep(bp, vf_rep, cfa_code_map); in bnxt_vf_reps_create()
531 bnxt_vf_rep_netdev_init(bp, vf_rep, dev); in bnxt_vf_reps_create()
541 bp->cfa_code_map = cfa_code_map; in bnxt_vf_reps_create()
542 netif_keep_dst(bp->dev); in bnxt_vf_reps_create()
546 netdev_info(bp->dev, "%s error=%d\n", __func__, rc); in bnxt_vf_reps_create()
548 __bnxt_vf_reps_destroy(bp); in bnxt_vf_reps_create()
555 struct bnxt *bp = bnxt_get_bp_from_dl(devlink); in bnxt_dl_eswitch_mode_get() local
557 *mode = bp->eswitch_mode; in bnxt_dl_eswitch_mode_get()
564 struct bnxt *bp = bnxt_get_bp_from_dl(devlink); in bnxt_dl_eswitch_mode_set() local
567 if (bp->eswitch_mode == mode) { in bnxt_dl_eswitch_mode_set()
568 netdev_info(bp->dev, "already in %s eswitch mode\n", in bnxt_dl_eswitch_mode_set()
576 bnxt_vf_reps_destroy(bp); in bnxt_dl_eswitch_mode_set()
580 if (bp->hwrm_spec_code < 0x10803) { in bnxt_dl_eswitch_mode_set()
581 netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n"); in bnxt_dl_eswitch_mode_set()
586 if (pci_num_vf(bp->pdev) > 0) in bnxt_dl_eswitch_mode_set()
587 ret = bnxt_vf_reps_create(bp); in bnxt_dl_eswitch_mode_set()
595 bp->eswitch_mode = mode; in bnxt_dl_eswitch_mode_set()