Lines Matching refs:mvdev
147 static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx) in is_index_valid() argument
149 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) { in is_index_valid()
150 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in is_index_valid()
156 return idx <= mvdev->max_idx; in is_index_valid()
170 mlx5_vdpa_info(mvdev, "%s\n", #_feature); \
176 mlx5_vdpa_info(mvdev, "%s\n", #_status); \
180 static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev) in mlx5_vdpa_is_little_endian() argument
183 (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1)); in mlx5_vdpa_is_little_endian()
186 static u16 mlx5vdpa16_to_cpu(struct mlx5_vdpa_dev *mvdev, __virtio16 val) in mlx5vdpa16_to_cpu() argument
188 return __virtio16_to_cpu(mlx5_vdpa_is_little_endian(mvdev), val); in mlx5vdpa16_to_cpu()
191 static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val) in cpu_to_mlx5vdpa16() argument
193 return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val); in cpu_to_mlx5vdpa16()
196 static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev) in ctrl_vq_idx() argument
198 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) in ctrl_vq_idx()
201 return mvdev->max_vqs; in ctrl_vq_idx()
204 static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx) in is_ctrl_vq_idx() argument
206 return idx == ctrl_vq_idx(mvdev); in is_ctrl_vq_idx()
209 static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set) in print_status() argument
212 mlx5_vdpa_warn(mvdev, "Warning: there are invalid status bits 0x%x\n", in print_status()
218 mlx5_vdpa_info(mvdev, "driver status %s", set ? "set" : "get"); in print_status()
220 mlx5_vdpa_info(mvdev, "driver resets the device\n"); in print_status()
232 static void print_features(struct mlx5_vdpa_dev *mvdev, u64 features, bool set) in print_features() argument
235 mlx5_vdpa_warn(mvdev, "There are invalid feature bits 0x%llx\n", in print_features()
241 mlx5_vdpa_info(mvdev, "driver %s feature bits:\n", set ? "sets" : "reads"); in print_features()
243 mlx5_vdpa_info(mvdev, "all feature bits are cleared\n"); in print_features()
283 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_tis() local
290 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn); in create_tis()
292 mlx5_vdpa_warn(mvdev, "create TIS (%d)\n", err); in create_tis()
299 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn); in destroy_tis()
312 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf, in cq_frag_buf_alloc()
313 ndev->mvdev.mdev->priv.numa_node); in cq_frag_buf_alloc()
329 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf, in umem_frag_buf_alloc()
330 ndev->mvdev.mdev->priv.numa_node); in umem_frag_buf_alloc()
335 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf); in cq_frag_buf_free()
381 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid); in qp_prepare()
395 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_prepare()
397 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index); in qp_prepare()
409 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, in rq_buf_alloc()
411 ndev->mvdev.mdev->priv.numa_node); in rq_buf_alloc()
416 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf); in rq_buf_free()
422 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in qp_create()
435 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db); in qp_create()
451 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_create()
461 vqp->mqp.uid = ndev->mvdev.res.uid; in qp_create()
471 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_create()
485 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid); in qp_destroy()
486 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in)) in qp_destroy()
487 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn); in qp_destroy()
489 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_destroy()
532 void __iomem *uar_page = ndev->mvdev.res.uar->map; in mlx5_vdpa_cq_comp()
558 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_create()
559 void __iomem *uar_page = ndev->mvdev.res.uar->map; in cq_create()
591 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid); in cq_create()
607 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); in cq_create()
628 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_create()
635 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_destroy()
639 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn); in cq_destroy()
643 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_destroy()
650 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in read_umem_params()
665 mlx5_vdpa_warn(&ndev->mvdev, in read_umem_params()
715 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf); in umem_frag_buf_free()
742 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid); in create_umem()
750 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_umem()
752 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err); in create_umem()
788 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in umem_destroy()
825 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); in get_queue_type()
865 static bool counters_supported(const struct mlx5_vdpa_dev *mvdev) in counters_supported() argument
867 return MLX5_CAP_GEN_64(mvdev->mdev, general_obj_types) & in counters_supported()
871 static bool msix_mode_supported(struct mlx5_vdpa_dev *mvdev) in msix_mode_supported() argument
873 return MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, event_mode) & in msix_mode_supported()
875 pci_msix_can_alloc_dyn(mvdev->mdev->pdev); in msix_mode_supported()
884 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_virtqueue() local
887 u64 features = filled ? mvdev->actual_features : mvdev->mlx_features; in create_virtqueue()
910 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in create_virtqueue()
944 vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]]; in create_virtqueue()
948 vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; in create_virtqueue()
950 MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) in create_virtqueue()
956 vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]]; in create_virtqueue()
960 vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; in create_virtqueue()
971 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); in create_virtqueue()
972 if (counters_supported(&ndev->mvdev)) in create_virtqueue()
975 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_virtqueue()
984 mlx5_vdpa_get_mr(mvdev, vq_mr); in create_virtqueue()
988 MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) { in create_virtqueue()
989 mlx5_vdpa_get_mr(mvdev, vq_desc_mr); in create_virtqueue()
1011 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid); in destroy_virtqueue()
1014 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) { in destroy_virtqueue()
1015 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
1021 mlx5_vdpa_put_mr(&ndev->mvdev, mvq->vq_mr); in destroy_virtqueue()
1024 mlx5_vdpa_put_mr(&ndev->mvdev, mvq->desc_mr); in destroy_virtqueue()
1054 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1066 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1083 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1101 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1145 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen); in modify_qp()
1206 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in fill_query_virtqueue_cmd()
1226 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in query_virtqueues() local
1231 WARN(start_vq + num_vqs > mvdev->max_vqs, "query vq range invalid [%d, %d), max_vqs: %u\n", in query_virtqueues()
1232 start_vq, start_vq + num_vqs, mvdev->max_vqs); in query_virtqueues()
1249 err = mlx5_vdpa_exec_async_cmds(&ndev->mvdev, cmds, num_vqs); in query_virtqueues()
1251 mlx5_vdpa_err(mvdev, "error issuing query cmd for vq range [%d, %d): %d\n", in query_virtqueues()
1261 mlx5_vdpa_err(mvdev, "query vq %d failed, err: %d\n", vq_idx, err); in query_virtqueues()
1278 return ndev->mvdev.vdev.config->resume; in is_resumable()
1311 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in fill_modify_virtqueue_cmd() local
1323 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in fill_modify_virtqueue_cmd()
1345 !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1))); in fill_modify_virtqueue_cmd()
1348 u16 mlx_features = get_features(ndev->mvdev.actual_features); in fill_modify_virtqueue_cmd()
1357 vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]]; in fill_modify_virtqueue_cmd()
1366 desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; in fill_modify_virtqueue_cmd()
1368 if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) in fill_modify_virtqueue_cmd()
1381 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in modify_virtqueue_end() local
1384 unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]; in modify_virtqueue_end()
1385 struct mlx5_vdpa_mr *vq_mr = mvdev->mres.mr[asid]; in modify_virtqueue_end()
1387 mlx5_vdpa_put_mr(mvdev, mvq->vq_mr); in modify_virtqueue_end()
1388 mlx5_vdpa_get_mr(mvdev, vq_mr); in modify_virtqueue_end()
1393 unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]; in modify_virtqueue_end()
1394 struct mlx5_vdpa_mr *desc_mr = mvdev->mres.mr[asid]; in modify_virtqueue_end()
1396 mlx5_vdpa_put_mr(mvdev, mvq->desc_mr); in modify_virtqueue_end()
1397 mlx5_vdpa_get_mr(mvdev, desc_mr); in modify_virtqueue_end()
1414 if (!counters_supported(&ndev->mvdev)) in counter_set_alloc()
1421 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_alloc()
1423 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_alloc()
1437 if (!counters_supported(&ndev->mvdev)) in counter_set_dealloc()
1442 MLX5_SET(destroy_virtio_q_counters_in, in, hdr.uid, ndev->mvdev.res.uid); in counter_set_dealloc()
1444 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in counter_set_dealloc()
1445 mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id); in counter_set_dealloc()
1470 dev_name(&ndev->mvdev.vdev.dev), mvq->index); in alloc_vector()
1559 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in modify_virtqueues() local
1564 WARN(start_vq + num_vqs > mvdev->max_vqs, "modify vq range invalid [%d, %d), max_vqs: %u\n", in modify_virtqueues()
1565 start_vq, start_vq + num_vqs, mvdev->max_vqs); in modify_virtqueues()
1602 err = mlx5_vdpa_exec_async_cmds(&ndev->mvdev, cmds, num_vqs); in modify_virtqueues()
1604 mlx5_vdpa_err(mvdev, "error issuing modify cmd for vq range [%d, %d)\n", in modify_virtqueues()
1617 mlx5_vdpa_err(mvdev, "modify vq %d failed, state: %d -> %d, err: %d\n", in modify_virtqueues()
1683 if (start_vq >= ndev->mvdev.max_vqs) in resume_vqs()
1704 mlx5_vdpa_warn(&ndev->mvdev, "vq %d is not resumable\n", mvq->index); in resume_vqs()
1711 mlx5_vdpa_err(&ndev->mvdev, "resume vq %u called from bad state %d\n", in resume_vqs()
1756 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid); in create_rqt()
1766 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); in create_rqt()
1791 MLX5_SET(modify_rqt_in, in, uid, ndev->mvdev.res.uid); in modify_rqt()
1801 err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn); in modify_rqt()
1811 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn); in destroy_rqt()
1834 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid); in create_tir()
1851 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn); in create_tir()
1863 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn); in destroy_tir()
1883 node->ucast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_steering_counters()
1887 node->mcast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_steering_counters()
1898 mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter); in add_steering_counters()
1909 mlx5_fc_destroy(ndev->mvdev.mdev, node->mcast_counter.counter); in remove_steering_counters()
1910 mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter); in remove_steering_counters()
1939 if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)) { in mlx5_vdpa_add_mac_vlan_rules()
2098 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS); in setup_steering()
2100 mlx5_vdpa_err(&ndev->mvdev, "failed to get flow namespace\n"); in setup_steering()
2106 mlx5_vdpa_err(&ndev->mvdev, "failed to create flow table\n"); in setup_steering()
2130 static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd) in handle_ctrl_mac() argument
2132 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_mac()
2133 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mac()
2139 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); in handle_ctrl_mac()
2156 mlx5_vdpa_warn(mvdev, "failed to delete old MAC %pM from MPFS table\n", in handle_ctrl_mac()
2163 mlx5_vdpa_warn(mvdev, "failed to insert new MAC %pM into MPFS table\n", in handle_ctrl_mac()
2180 mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n"); in handle_ctrl_mac()
2184 mlx5_vdpa_warn(mvdev, "restore mac failed: Original MAC is zero\n"); in handle_ctrl_mac()
2192 mlx5_vdpa_warn(mvdev, "restore mac failed: delete MAC %pM from MPFS table failed\n", in handle_ctrl_mac()
2197 mlx5_vdpa_warn(mvdev, "restore mac failed: insert old MAC %pM into MPFS table failed\n", in handle_ctrl_mac()
2204 mlx5_vdpa_warn(mvdev, "restore forward rules failed: insert forward rules failed\n"); in handle_ctrl_mac()
2219 static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps) in change_num_qps() argument
2221 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in change_num_qps()
2268 static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd) in handle_ctrl_mq() argument
2270 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_mq()
2272 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mq()
2288 if (!MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ)) in handle_ctrl_mq()
2295 newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs); in handle_ctrl_mq()
2305 if (!change_num_qps(mvdev, newqps)) in handle_ctrl_mq()
2316 static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd) in handle_ctrl_vlan() argument
2318 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_vlan()
2320 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_vlan()
2325 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) in handle_ctrl_vlan()
2334 id = mlx5vdpa16_to_cpu(mvdev, vlan); in handle_ctrl_vlan()
2345 id = mlx5vdpa16_to_cpu(mvdev, vlan); in handle_ctrl_vlan()
2361 struct mlx5_vdpa_dev *mvdev; in mlx5_cvq_kick_handler() local
2368 mvdev = wqent->mvdev; in mlx5_cvq_kick_handler()
2369 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_cvq_kick_handler()
2370 cvq = &mvdev->cvq; in mlx5_cvq_kick_handler()
2374 if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) in mlx5_cvq_kick_handler()
2377 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in mlx5_cvq_kick_handler()
2396 status = handle_ctrl_mac(mvdev, ctrl.cmd); in mlx5_cvq_kick_handler()
2399 status = handle_ctrl_mq(mvdev, ctrl.cmd); in mlx5_cvq_kick_handler()
2402 status = handle_ctrl_vlan(mvdev, ctrl.cmd); in mlx5_cvq_kick_handler()
2420 queue_work(mvdev->wq, &wqent->work); in mlx5_cvq_kick_handler()
2430 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_kick_vq() local
2431 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_kick_vq()
2434 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_kick_vq()
2437 if (unlikely(is_ctrl_vq_idx(mvdev, idx))) { in mlx5_vdpa_kick_vq()
2438 if (!mvdev->wq || !mvdev->cvq.ready) in mlx5_vdpa_kick_vq()
2441 queue_work(mvdev->wq, &ndev->cvq_ent.work); in mlx5_vdpa_kick_vq()
2449 iowrite16(idx, ndev->mvdev.res.kick_addr); in mlx5_vdpa_kick_vq()
2455 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_address() local
2456 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_address()
2459 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_set_vq_address()
2462 if (is_ctrl_vq_idx(mvdev, idx)) { in mlx5_vdpa_set_vq_address()
2463 mvdev->cvq.desc_addr = desc_area; in mlx5_vdpa_set_vq_address()
2464 mvdev->cvq.device_addr = device_area; in mlx5_vdpa_set_vq_address()
2465 mvdev->cvq.driver_addr = driver_area; in mlx5_vdpa_set_vq_address()
2479 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_num() local
2480 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_num()
2483 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_set_vq_num()
2486 if (is_ctrl_vq_idx(mvdev, idx)) { in mlx5_vdpa_set_vq_num()
2487 struct mlx5_control_vq *cvq = &mvdev->cvq; in mlx5_vdpa_set_vq_num()
2500 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_cb() local
2501 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_cb()
2504 if (is_ctrl_vq_idx(mvdev, idx)) in mlx5_vdpa_set_vq_cb()
2505 mvdev->cvq.event_cb = *cb; in mlx5_vdpa_set_vq_cb()
2518 static void set_cvq_ready(struct mlx5_vdpa_dev *mvdev, bool ready) in set_cvq_ready() argument
2520 struct mlx5_control_vq *cvq = &mvdev->cvq; in set_cvq_ready()
2531 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_ready() local
2532 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_ready()
2535 if (!mvdev->actual_features) in mlx5_vdpa_set_vq_ready()
2538 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_set_vq_ready()
2541 if (is_ctrl_vq_idx(mvdev, idx)) { in mlx5_vdpa_set_vq_ready()
2542 set_cvq_ready(mvdev, ready); in mlx5_vdpa_set_vq_ready()
2549 } else if (mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) { in mlx5_vdpa_set_vq_ready()
2559 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_vq_ready() local
2560 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_ready()
2562 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_get_vq_ready()
2565 if (is_ctrl_vq_idx(mvdev, idx)) in mlx5_vdpa_get_vq_ready()
2566 return mvdev->cvq.ready; in mlx5_vdpa_get_vq_ready()
2574 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_state() local
2575 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_state()
2578 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_set_vq_state()
2581 if (is_ctrl_vq_idx(mvdev, idx)) { in mlx5_vdpa_set_vq_state()
2582 mvdev->cvq.vring.last_avail_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2588 mlx5_vdpa_warn(mvdev, "can't modify available index\n"); in mlx5_vdpa_set_vq_state()
2601 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_vq_state() local
2602 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_state()
2607 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_get_vq_state()
2610 if (is_ctrl_vq_idx(mvdev, idx)) { in mlx5_vdpa_get_vq_state()
2611 state->split.avail_index = mvdev->cvq.vring.last_avail_idx; in mlx5_vdpa_get_vq_state()
2631 mlx5_vdpa_err(mvdev, "failed to query virtqueue\n"); in mlx5_vdpa_get_vq_state()
2645 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_vq_group() local
2647 if (is_ctrl_vq_idx(mvdev, idx)) in mlx5_vdpa_get_vq_group()
2655 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_vq_desc_group() local
2657 if (is_ctrl_vq_idx(mvdev, idx)) in mlx5_vdpa_get_vq_desc_group()
2712 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_device_features() local
2713 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_device_features()
2715 print_features(mvdev, ndev->mvdev.mlx_features, false); in mlx5_vdpa_get_device_features()
2716 return ndev->mvdev.mlx_features; in mlx5_vdpa_get_device_features()
2719 static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features) in verify_driver_features() argument
2741 static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev, bool filled) in setup_virtqueues() argument
2743 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in setup_virtqueues()
2747 for (i = 0; i < mvdev->max_vqs; i++) { in setup_virtqueues()
2766 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) in teardown_virtqueues()
2770 static void update_cvq_info(struct mlx5_vdpa_dev *mvdev) in update_cvq_info() argument
2772 if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_CTRL_VQ)) { in update_cvq_info()
2773 if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ)) { in update_cvq_info()
2775 mvdev->max_idx = mvdev->max_vqs; in update_cvq_info()
2780 mvdev->max_idx = 2; in update_cvq_info()
2784 mvdev->max_idx = 1; in update_cvq_info()
2807 static bool get_link_state(struct mlx5_vdpa_dev *mvdev) in get_link_state() argument
2809 if (query_vport_state(mvdev->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0) == in get_link_state()
2819 struct mlx5_vdpa_dev *mvdev; in update_carrier() local
2823 mvdev = wqent->mvdev; in update_carrier()
2824 ndev = to_mlx5_vdpa_ndev(mvdev); in update_carrier()
2825 if (get_link_state(mvdev)) in update_carrier()
2826 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in update_carrier()
2828 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in update_carrier()
2844 wqent->mvdev = &ndev->mvdev; in queue_link_work()
2846 queue_work(ndev->mvdev.wq, &wqent->work); in queue_link_work()
2856 if (ndev->mvdev.suspended) in event_handler()
2878 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_STATUS))) in register_link_notifier()
2882 mlx5_notifier_register(ndev->mvdev.mdev, &ndev->nb); in register_link_notifier()
2893 mlx5_notifier_unregister(ndev->mvdev.mdev, &ndev->nb); in unregister_link_notifier()
2894 if (ndev->mvdev.wq) in unregister_link_notifier()
2895 flush_workqueue(ndev->mvdev.wq); in unregister_link_notifier()
2905 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_driver_features() local
2906 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_driver_features()
2907 u64 old_features = mvdev->actual_features; in mlx5_vdpa_set_driver_features()
2911 print_features(mvdev, features, true); in mlx5_vdpa_set_driver_features()
2913 err = verify_driver_features(mvdev, features); in mlx5_vdpa_set_driver_features()
2917 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; in mlx5_vdpa_set_driver_features()
2920 if (get_features(old_features) != get_features(mvdev->actual_features)) { in mlx5_vdpa_set_driver_features()
2921 for (int i = 0; i < mvdev->max_vqs; ++i) { in mlx5_vdpa_set_driver_features()
2936 diff_features = mvdev->mlx_features ^ mvdev->actual_features; in mlx5_vdpa_set_driver_features()
2939 update_cvq_info(mvdev); in mlx5_vdpa_set_driver_features()
2945 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_config_cb() local
2946 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_config_cb()
2969 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_status() local
2970 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_status()
2972 print_status(mvdev, ndev->mvdev.status, false); in mlx5_vdpa_get_status()
2973 return ndev->mvdev.status; in mlx5_vdpa_get_status()
3004 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in save_channels_info()
3015 for (i = 0; i < ndev->mvdev.max_vqs; i++) in mlx5_clear_vqs()
3027 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in restore_channels_info()
3044 static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, in mlx5_vdpa_change_map() argument
3048 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_change_map()
3061 mlx5_vdpa_update_mr(mvdev, new_mr, asid); in mlx5_vdpa_change_map()
3063 for (int i = 0; i < mvdev->max_vqs; i++) in mlx5_vdpa_change_map()
3067 if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended) in mlx5_vdpa_change_map()
3085 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in setup_vq_resources() local
3091 mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n"); in setup_vq_resources()
3101 err = setup_virtqueues(mvdev, filled); in setup_vq_resources()
3103 mlx5_vdpa_warn(mvdev, "setup_virtqueues\n"); in setup_vq_resources()
3109 mlx5_vdpa_warn(mvdev, "create_rqt\n"); in setup_vq_resources()
3115 mlx5_vdpa_warn(mvdev, "create_tir\n"); in setup_vq_resources()
3121 mlx5_vdpa_warn(mvdev, "setup_steering\n"); in setup_vq_resources()
3158 static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev) in setup_cvq_vring() argument
3160 struct mlx5_control_vq *cvq = &mvdev->cvq; in setup_cvq_vring()
3163 if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) { in setup_cvq_vring()
3166 err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features, in setup_cvq_vring()
3180 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_status() local
3181 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_status()
3184 print_status(mvdev, status, true); in mlx5_vdpa_set_status()
3188 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { in mlx5_vdpa_set_status()
3190 err = setup_cvq_vring(mvdev); in mlx5_vdpa_set_status()
3192 mlx5_vdpa_warn(mvdev, "failed to setup control VQ vring\n"); in mlx5_vdpa_set_status()
3203 mlx5_vdpa_warn(mvdev, "failed to resume VQs\n"); in mlx5_vdpa_set_status()
3209 mlx5_vdpa_warn(mvdev, "failed to setup driver\n"); in mlx5_vdpa_set_status()
3214 mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n"); in mlx5_vdpa_set_status()
3219 ndev->mvdev.status = status; in mlx5_vdpa_set_status()
3226 mlx5_vdpa_clean_mrs(&ndev->mvdev); in mlx5_vdpa_set_status()
3227 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; in mlx5_vdpa_set_status()
3232 static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev) in init_group_to_asid_map() argument
3238 mvdev->mres.group2asid[i] = 0; in init_group_to_asid_map()
3241 static bool needs_vqs_reset(const struct mlx5_vdpa_dev *mvdev) in needs_vqs_reset() argument
3243 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in needs_vqs_reset()
3246 if (mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) in needs_vqs_reset()
3262 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_compat_reset() local
3263 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_compat_reset()
3266 print_status(mvdev, 0, true); in mlx5_vdpa_compat_reset()
3267 mlx5_vdpa_info(mvdev, "performing device reset\n"); in mlx5_vdpa_compat_reset()
3271 vq_reset = needs_vqs_reset(mvdev); in mlx5_vdpa_compat_reset()
3278 mlx5_vdpa_clean_mrs(&ndev->mvdev); in mlx5_vdpa_compat_reset()
3279 ndev->mvdev.status = 0; in mlx5_vdpa_compat_reset()
3280 ndev->mvdev.suspended = false; in mlx5_vdpa_compat_reset()
3282 ndev->mvdev.cvq.ready = false; in mlx5_vdpa_compat_reset()
3283 ndev->mvdev.cvq.received_desc = 0; in mlx5_vdpa_compat_reset()
3284 ndev->mvdev.cvq.completed_desc = 0; in mlx5_vdpa_compat_reset()
3285 memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1)); in mlx5_vdpa_compat_reset()
3286 ndev->mvdev.actual_features = 0; in mlx5_vdpa_compat_reset()
3287 init_group_to_asid_map(mvdev); in mlx5_vdpa_compat_reset()
3288 ++mvdev->generation; in mlx5_vdpa_compat_reset()
3291 MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { in mlx5_vdpa_compat_reset()
3292 if (mlx5_vdpa_create_dma_mr(mvdev)) in mlx5_vdpa_compat_reset()
3293 mlx5_vdpa_err(mvdev, "create MR failed\n"); in mlx5_vdpa_compat_reset()
3315 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_config() local
3316 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_config()
3330 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_generation() local
3332 return mvdev->generation; in mlx5_vdpa_get_generation()
3335 static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, in set_map_data() argument
3345 new_mr = mlx5_vdpa_create_mr(mvdev, iotlb); in set_map_data()
3348 mlx5_vdpa_err(mvdev, "create map failed(%d)\n", err); in set_map_data()
3356 if (!mvdev->mres.mr[asid]) { in set_map_data()
3357 mlx5_vdpa_update_mr(mvdev, new_mr, asid); in set_map_data()
3359 err = mlx5_vdpa_change_map(mvdev, new_mr, asid); in set_map_data()
3361 mlx5_vdpa_err(mvdev, "change map failed(%d)\n", err); in set_map_data()
3366 return mlx5_vdpa_update_cvq_iotlb(mvdev, iotlb, asid); in set_map_data()
3369 mlx5_vdpa_put_mr(mvdev, new_mr); in set_map_data()
3376 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_map() local
3377 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_map()
3381 err = set_map_data(mvdev, iotlb, asid); in mlx5_vdpa_set_map()
3388 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_reset_map() local
3389 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_reset_map()
3393 err = mlx5_vdpa_reset_mr(mvdev, asid); in mlx5_vdpa_reset_map()
3400 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_get_vq_dma_dev() local
3402 if (is_ctrl_vq_idx(mvdev, idx)) in mlx5_get_vq_dma_dev()
3405 return mvdev->vdev.dma_dev; in mlx5_get_vq_dma_dev()
3413 if (!msix_mode_supported(&ndev->mvdev)) in free_irqs()
3422 pci_msix_free_irq(ndev->mvdev.mdev->pdev, ent->map); in free_irqs()
3429 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_free() local
3433 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_free()
3436 mlx5_vdpa_clean_mrs(mvdev); in mlx5_vdpa_free()
3437 mlx5_vdpa_destroy_mr_resources(&ndev->mvdev); in mlx5_vdpa_free()
3438 mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx); in mlx5_vdpa_free()
3441 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); in mlx5_vdpa_free()
3444 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_free()
3452 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_get_vq_notification() local
3457 if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx)) in mlx5_get_vq_notification()
3464 if (MLX5_CAP_GEN(mvdev->mdev, log_min_sf_size) + 12 < PAGE_SHIFT) in mlx5_get_vq_notification()
3467 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_get_vq_notification()
3468 addr = (phys_addr_t)ndev->mvdev.res.phys_kick_addr; in mlx5_get_vq_notification()
3476 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_get_vq_irq() local
3477 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_get_vq_irq()
3480 if (!is_index_valid(mvdev, idx)) in mlx5_get_vq_irq()
3483 if (is_ctrl_vq_idx(mvdev, idx)) in mlx5_get_vq_irq()
3495 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_driver_features() local
3497 return mvdev->actual_features; in mlx5_vdpa_get_driver_features()
3509 if (!counters_supported(&ndev->mvdev)) in counter_set_query()
3519 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_query()
3522 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_query()
3536 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_vendor_vq_stats() local
3537 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vendor_vq_stats()
3545 if (!is_index_valid(mvdev, idx)) { in mlx5_vdpa_get_vendor_vq_stats()
3551 if (idx == ctrl_vq_idx(mvdev)) { in mlx5_vdpa_get_vendor_vq_stats()
3552 cvq = &mvdev->cvq; in mlx5_vdpa_get_vendor_vq_stats()
3587 static void mlx5_vdpa_cvq_suspend(struct mlx5_vdpa_dev *mvdev) in mlx5_vdpa_cvq_suspend() argument
3591 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in mlx5_vdpa_cvq_suspend()
3594 cvq = &mvdev->cvq; in mlx5_vdpa_cvq_suspend()
3600 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_suspend() local
3601 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_suspend()
3604 mlx5_vdpa_info(mvdev, "suspending device\n"); in mlx5_vdpa_suspend()
3608 mlx5_vdpa_cvq_suspend(mvdev); in mlx5_vdpa_suspend()
3609 mvdev->suspended = true; in mlx5_vdpa_suspend()
3617 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_resume() local
3621 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_resume()
3623 mlx5_vdpa_info(mvdev, "resuming device\n"); in mlx5_vdpa_resume()
3626 mvdev->suspended = false; in mlx5_vdpa_resume()
3637 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_set_group_asid() local
3643 mvdev->mres.group2asid[group] = asid; in mlx5_set_group_asid()
3645 mutex_lock(&mvdev->mres.lock); in mlx5_set_group_asid()
3646 if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mres.mr[asid]) in mlx5_set_group_asid()
3647 err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mres.mr[asid]->iotlb, asid); in mlx5_set_group_asid()
3648 mutex_unlock(&mvdev->mres.lock); in mlx5_set_group_asid()
3712 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n"); in alloc_fixed_resources()
3716 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn); in alloc_fixed_resources()
3729 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in alloc_fixed_resources()
3741 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in free_fixed_resources()
3750 for (i = 0; i < ndev->mvdev.max_vqs; ++i) { in mvqs_set_defaults()
3795 if (!msix_mode_supported(&ndev->mvdev)) in allocate_irqs()
3798 if (!ndev->mvdev.mdev->pdev) in allocate_irqs()
3801 ndev->irqp.entries = kcalloc(ndev->mvdev.max_vqs, sizeof(*ndev->irqp.entries), GFP_KERNEL); in allocate_irqs()
3806 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in allocate_irqs()
3809 dev_name(&ndev->mvdev.vdev.dev), i); in allocate_irqs()
3810 ent->map = pci_msix_alloc_irq_at(ndev->mvdev.mdev->pdev, MSI_ANY_INDEX, NULL); in allocate_irqs()
3824 struct mlx5_vdpa_dev *mvdev; in mlx5_vdpa_dev_add() local
3879 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mgtdev->vdpa_ops, in mlx5_vdpa_dev_add()
3884 ndev->mvdev.max_vqs = max_vqs; in mlx5_vdpa_dev_add()
3885 mvdev = &ndev->mvdev; in mlx5_vdpa_dev_add()
3886 mvdev->mdev = mdev; in mlx5_vdpa_dev_add()
3912 ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu); in mlx5_vdpa_dev_add()
3916 if (get_link_state(mvdev)) in mlx5_vdpa_dev_add()
3917 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3919 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3947 mlx5_vdpa_warn(&ndev->mvdev, in mlx5_vdpa_dev_add()
3954 config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2); in mlx5_vdpa_dev_add()
3960 mlx5_cmd_init_async_ctx(mdev, &mvdev->async_ctx); in mlx5_vdpa_dev_add()
3962 ndev->mvdev.mlx_features = device_features; in mlx5_vdpa_dev_add()
3963 mvdev->vdev.dma_dev = &mdev->pdev->dev; in mlx5_vdpa_dev_add()
3964 err = mlx5_vdpa_alloc_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
3968 err = mlx5_vdpa_init_mr_resources(mvdev); in mlx5_vdpa_dev_add()
3972 if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { in mlx5_vdpa_dev_add()
3973 err = mlx5_vdpa_create_dma_mr(mvdev); in mlx5_vdpa_dev_add()
3982 ndev->cvq_ent.mvdev = mvdev; in mlx5_vdpa_dev_add()
3984 mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq"); in mlx5_vdpa_dev_add()
3985 if (!mvdev->wq) { in mlx5_vdpa_dev_add()
3990 mvdev->vdev.mdev = &mgtdev->mgtdev; in mlx5_vdpa_dev_add()
3991 err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1); in mlx5_vdpa_dev_add()
4010 _vdpa_unregister_device(&mvdev->vdev); in mlx5_vdpa_dev_add()
4012 destroy_workqueue(mvdev->wq); in mlx5_vdpa_dev_add()
4014 put_device(&mvdev->vdev.dev); in mlx5_vdpa_dev_add()
4021 struct mlx5_vdpa_dev *mvdev = to_mvdev(dev); in mlx5_vdpa_dev_del() local
4022 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_dev_del()
4032 wq = mvdev->wq; in mlx5_vdpa_dev_del()
4033 mvdev->wq = NULL; in mlx5_vdpa_dev_del()
4043 struct mlx5_vdpa_dev *mvdev; in mlx5_vdpa_set_attr() local
4048 mvdev = to_mvdev(dev); in mlx5_vdpa_set_attr()
4049 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_attr()
4050 mdev = mvdev->mdev; in mlx5_vdpa_set_attr()