Lines Matching +full:ctx +full:- +full:asid
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
47 #define MLX5_FEATURE(_mvdev, _feature) (!!((_mvdev)->actual_features & BIT_ULL(_feature)))
149 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) { in is_index_valid()
150 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in is_index_valid()
156 return idx <= mvdev->max_idx; in is_index_valid()
179 /* TODO: cross-endian support */
183 (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1)); in mlx5_vdpa_is_little_endian()
198 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) in ctrl_vq_idx()
201 return mvdev->max_vqs; in ctrl_vq_idx()
283 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_tis()
288 tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); in create_tis()
289 MLX5_SET(tisc, tisc, transport_domain, ndev->res.tdn); in create_tis()
290 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn); in create_tis()
299 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn); in destroy_tis()
307 struct mlx5_frag_buf *frag_buf = &buf->frag_buf; in cq_frag_buf_alloc()
312 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf, in cq_frag_buf_alloc()
313 ndev->mvdev.mdev->priv.numa_node); in cq_frag_buf_alloc()
317 mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc); in cq_frag_buf_alloc()
319 buf->cqe_size = MLX5_VDPA_CQE_SIZE; in cq_frag_buf_alloc()
320 buf->nent = nent; in cq_frag_buf_alloc()
327 struct mlx5_frag_buf *frag_buf = &umem->frag_buf; in umem_frag_buf_alloc()
329 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf, in umem_frag_buf_alloc()
330 ndev->mvdev.mdev->priv.numa_node); in umem_frag_buf_alloc()
335 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf); in cq_frag_buf_free()
340 return mlx5_frag_buf_get_wqe(&vcq->buf.fbc, n); in get_cqe()
349 for (i = 0; i < buf->nent; i++) { in cq_frag_buf_init()
352 cqe64->op_own = MLX5_CQE_INVALID << 4; in cq_frag_buf_init()
358 struct mlx5_cqe64 *cqe64 = get_cqe(cq, n & (cq->cqe - 1)); in get_sw_cqe()
361 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & cq->cqe))) in get_sw_cqe()
369 vqp->head += n; in rx_post()
370 vqp->db.db[0] = cpu_to_be32(vqp->head); in rx_post()
380 vqp = fw ? &mvq->fwqp : &mvq->vqqp; in qp_prepare()
381 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid); in qp_prepare()
383 if (vqp->fw) { in qp_prepare()
395 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_prepare()
397 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index); in qp_prepare()
398 MLX5_SET(qpc, qpc, log_page_size, vqp->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); in qp_prepare()
400 MLX5_SET(qpc, qpc, cqn_rcv, mvq->cq.mcq.cqn); in qp_prepare()
404 mlx5_fill_page_frag_array(&vqp->frag_buf, pas); in qp_prepare()
409 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, in rq_buf_alloc()
410 num_ent * sizeof(struct mlx5_wqe_data_seg), &vqp->frag_buf, in rq_buf_alloc()
411 ndev->mvdev.mdev->priv.numa_node); in rq_buf_alloc()
416 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf); in rq_buf_free()
422 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in qp_create()
429 if (!vqp->fw) { in qp_create()
430 vqp = &mvq->vqqp; in qp_create()
431 err = rq_buf_alloc(ndev, vqp, mvq->num_ent); in qp_create()
435 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db); in qp_create()
438 inlen += vqp->frag_buf.npages * sizeof(__be64); in qp_create()
443 err = -ENOMEM; in qp_create()
447 qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent); in qp_create()
451 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_create()
453 if (!vqp->fw) in qp_create()
454 MLX5_SET64(qpc, qpc, dbr_addr, vqp->db.dma); in qp_create()
461 vqp->mqp.uid = ndev->mvdev.res.uid; in qp_create()
462 vqp->mqp.qpn = MLX5_GET(create_qp_out, out, qpn); in qp_create()
464 if (!vqp->fw) in qp_create()
465 rx_post(vqp, mvq->num_ent); in qp_create()
470 if (!vqp->fw) in qp_create()
471 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_create()
473 if (!vqp->fw) in qp_create()
484 MLX5_SET(destroy_qp_in, in, qpn, vqp->mqp.qpn); in qp_destroy()
485 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid); in qp_destroy()
486 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in)) in qp_destroy()
487 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn); in qp_destroy()
488 if (!vqp->fw) { in qp_destroy()
489 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_destroy()
496 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw()
505 return -EAGAIN; in mlx5_vdpa_poll_one()
507 vcq->mcq.cons_index++; in mlx5_vdpa_poll_one()
513 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_handle_completions()
516 event_cb = &ndev->event_cbs[mvq->index]; in mlx5_vdpa_handle_completions()
517 mlx5_cq_set_ci(&mvq->cq.mcq); in mlx5_vdpa_handle_completions()
523 rx_post(&mvq->vqqp, num); in mlx5_vdpa_handle_completions()
524 if (event_cb->callback) in mlx5_vdpa_handle_completions()
525 event_cb->callback(event_cb->private); in mlx5_vdpa_handle_completions()
531 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_cq_comp()
532 void __iomem *uar_page = ndev->mvdev.res.uar->map; in mlx5_vdpa_cq_comp()
535 while (!mlx5_vdpa_poll_one(&mvq->cq)) { in mlx5_vdpa_cq_comp()
537 if (num > mvq->num_ent / 2) { in mlx5_vdpa_cq_comp()
552 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); in mlx5_vdpa_cq_comp()
557 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_create()
558 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_create()
559 void __iomem *uar_page = ndev->mvdev.res.uar->map; in cq_create()
561 struct mlx5_vdpa_cq *vcq = &mvq->cq; in cq_create()
569 err = mlx5_db_alloc(mdev, &vcq->db); in cq_create()
573 vcq->mcq.set_ci_db = vcq->db.db; in cq_create()
574 vcq->mcq.arm_db = vcq->db.db + 1; in cq_create()
575 vcq->mcq.cqe_sz = 64; in cq_create()
576 vcq->mcq.comp = mlx5_vdpa_cq_comp; in cq_create()
577 vcq->cqe = num_ent; in cq_create()
579 err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent); in cq_create()
583 cq_frag_buf_init(vcq, &vcq->buf); in cq_create()
586 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * vcq->buf.frag_buf.npages; in cq_create()
589 err = -ENOMEM; in cq_create()
593 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid); in cq_create()
595 mlx5_fill_page_frag_array(&vcq->buf.frag_buf, pas); in cq_create()
598 MLX5_SET(cqc, cqc, log_page_size, vcq->buf.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); in cq_create()
609 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); in cq_create()
611 MLX5_SET64(cqc, cqc, dbr_addr, vcq->db.dma); in cq_create()
613 err = mlx5_core_create_cq(mdev, &vcq->mcq, in, inlen, out, sizeof(out)); in cq_create()
617 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); in cq_create()
624 cq_frag_buf_free(ndev, &vcq->buf); in cq_create()
626 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_create()
632 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_destroy()
633 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_destroy()
634 struct mlx5_vdpa_cq *vcq = &mvq->cq; in cq_destroy()
636 if (mlx5_core_destroy_cq(mdev, &vcq->mcq)) { in cq_destroy()
637 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn); in cq_destroy()
640 cq_frag_buf_free(ndev, &vcq->buf); in cq_destroy()
641 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_destroy()
648 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in read_umem_params()
657 return -ENOMEM; in read_umem_params()
663 mlx5_vdpa_warn(&ndev->mvdev, in read_umem_params()
670 ndev->umem_1_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_a); in read_umem_params()
671 ndev->umem_1_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_b); in read_umem_params()
673 ndev->umem_2_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_a); in read_umem_params()
674 ndev->umem_2_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_b); in read_umem_params()
676 ndev->umem_3_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_a); in read_umem_params()
677 ndev->umem_3_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_b); in read_umem_params()
692 p_a = ndev->umem_1_buffer_param_a; in set_umem_size()
693 p_b = ndev->umem_1_buffer_param_b; in set_umem_size()
694 *umemp = &mvq->umem1; in set_umem_size()
697 p_a = ndev->umem_2_buffer_param_a; in set_umem_size()
698 p_b = ndev->umem_2_buffer_param_b; in set_umem_size()
699 *umemp = &mvq->umem2; in set_umem_size()
702 p_a = ndev->umem_3_buffer_param_a; in set_umem_size()
703 p_b = ndev->umem_3_buffer_param_b; in set_umem_size()
704 *umemp = &mvq->umem3; in set_umem_size()
708 (*umemp)->size = p_a * mvq->num_ent + p_b; in set_umem_size()
713 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf); in umem_frag_buf_free()
727 err = umem_frag_buf_alloc(ndev, umem, umem->size); in create_umem()
731 inlen = MLX5_ST_SZ_BYTES(create_umem_in) + MLX5_ST_SZ_BYTES(mtt) * umem->frag_buf.npages; in create_umem()
735 err = -ENOMEM; in create_umem()
740 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid); in create_umem()
742 MLX5_SET(umem, um, log_page_size, umem->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); in create_umem()
743 MLX5_SET64(umem, um, num_of_mtt, umem->frag_buf.npages); in create_umem()
746 mlx5_fill_page_frag_array_perm(&umem->frag_buf, pas, MLX5_MTT_PERM_RW); in create_umem()
748 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_umem()
750 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err); in create_umem()
755 umem->id = MLX5_GET(create_umem_out, out, umem_id); in create_umem()
774 umem = &mvq->umem1; in umem_destroy()
777 umem = &mvq->umem2; in umem_destroy()
780 umem = &mvq->umem3; in umem_destroy()
785 MLX5_SET(destroy_umem_in, in, umem_id, umem->id); in umem_destroy()
786 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in umem_destroy()
805 for (num--; num > 0; num--) in umems_create()
815 for (num = 3; num > 0; num--) in umems_destroy()
823 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); in get_queue_type()
865 return MLX5_CAP_GEN_64(mvdev->mdev, general_obj_types) & in counters_supported()
871 return MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, event_mode) & in msix_mode_supported()
873 pci_msix_can_alloc_dyn(mvdev->mdev->pdev); in msix_mode_supported()
882 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_virtqueue()
885 u64 features = filled ? mvdev->actual_features : mvdev->mlx_features; in create_virtqueue()
899 err = -ENOMEM; in create_virtqueue()
908 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in create_virtqueue()
918 if (vq_is_tx(mvq->index)) in create_virtqueue()
919 MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn); in create_virtqueue()
921 if (mvq->map.virq) { in create_virtqueue()
923 MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->map.index); in create_virtqueue()
926 MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn); in create_virtqueue()
929 MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index); in create_virtqueue()
930 MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent); in create_virtqueue()
935 MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx); in create_virtqueue()
936 MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx); in create_virtqueue()
938 MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); in create_virtqueue()
939 MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); in create_virtqueue()
940 MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); in create_virtqueue()
942 vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]]; in create_virtqueue()
944 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey); in create_virtqueue()
946 vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; in create_virtqueue()
948 MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) in create_virtqueue()
949 MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, vq_desc_mr->mkey); in create_virtqueue()
954 vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]]; in create_virtqueue()
956 mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY; in create_virtqueue()
958 vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; in create_virtqueue()
960 mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY; in create_virtqueue()
963 MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id); in create_virtqueue()
964 MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size); in create_virtqueue()
965 MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id); in create_virtqueue()
966 MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem2.size); in create_virtqueue()
967 MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id); in create_virtqueue()
968 MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size); in create_virtqueue()
969 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); in create_virtqueue()
970 if (counters_supported(&ndev->mvdev)) in create_virtqueue()
971 MLX5_SET(virtio_q, vq_ctx, counter_set_id, mvq->counter_set_id); in create_virtqueue()
973 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_virtqueue()
977 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT; in create_virtqueue()
979 mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); in create_virtqueue()
983 mvq->vq_mr = vq_mr; in create_virtqueue()
986 MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) { in create_virtqueue()
988 mvq->desc_mr = vq_desc_mr; in create_virtqueue()
1008 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_id, mvq->virtq_id); in destroy_virtqueue()
1009 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid); in destroy_virtqueue()
1012 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) { in destroy_virtqueue()
1013 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
1016 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE; in destroy_virtqueue()
1019 mlx5_vdpa_put_mr(&ndev->mvdev, mvq->vq_mr); in destroy_virtqueue()
1020 mvq->vq_mr = NULL; in destroy_virtqueue()
1022 mlx5_vdpa_put_mr(&ndev->mvdev, mvq->desc_mr); in destroy_virtqueue()
1023 mvq->desc_mr = NULL; in destroy_virtqueue()
1028 return fw ? mvq->vqqp.mqp.qpn : mvq->fwqp.mqp.qpn; in get_rqpn()
1033 return fw ? mvq->fwqp.mqp.qpn : mvq->vqqp.mqp.qpn; in get_qpn()
1052 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1064 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1081 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1099 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1141 return -ENOMEM; in modify_qp()
1143 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen); in modify_qp()
1199 void *cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, cmd->in, general_obj_in_cmd_hdr); in fill_query_virtqueue_cmd()
1203 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); in fill_query_virtqueue_cmd()
1204 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in fill_query_virtqueue_cmd()
1211 void *obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, cmd->out, obj_context); in query_virtqueue_end()
1214 attr->state = MLX5_GET(virtio_net_q_object, obj_context, state); in query_virtqueue_end()
1215 attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index); in query_virtqueue_end()
1216 attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index); in query_virtqueue_end()
1224 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in query_virtqueues()
1229 WARN(start_vq + num_vqs > mvdev->max_vqs, "query vq range invalid [%d, %d), max_vqs: %u\n", in query_virtqueues()
1230 start_vq, start_vq + num_vqs, mvdev->max_vqs); in query_virtqueues()
1235 err = -ENOMEM; in query_virtqueues()
1244 fill_query_virtqueue_cmd(ndev, &ndev->vqs[start_vq + i], &cmd_mem[i]); in query_virtqueues()
1247 err = mlx5_vdpa_exec_async_cmds(&ndev->mvdev, cmds, num_vqs); in query_virtqueues()
1258 if (cmd->err) { in query_virtqueues()
1259 mlx5_vdpa_err(mvdev, "query vq %d failed, err: %d\n", vq_idx, cmd->err); in query_virtqueues()
1261 err = cmd->err; in query_virtqueues()
1276 return ndev->mvdev.vdev.config->resume; in is_resumable()
1297 if (mvq->modified_fields & ~MLX5_VIRTQ_MODIFY_MASK_STATE) in modifiable_virtqueue_fields()
1298 return mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT || in modifiable_virtqueue_fields()
1299 mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND; in modifiable_virtqueue_fields()
1309 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in fill_modify_virtqueue_cmd()
1316 cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, cmd->in, general_obj_in_cmd_hdr); in fill_modify_virtqueue_cmd()
1320 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); in fill_modify_virtqueue_cmd()
1321 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in fill_modify_virtqueue_cmd()
1323 obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, cmd->in, obj_context); in fill_modify_virtqueue_cmd()
1326 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) in fill_modify_virtqueue_cmd()
1329 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS) { in fill_modify_virtqueue_cmd()
1330 MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); in fill_modify_virtqueue_cmd()
1331 MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); in fill_modify_virtqueue_cmd()
1332 MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); in fill_modify_virtqueue_cmd()
1335 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX) in fill_modify_virtqueue_cmd()
1336 MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx); in fill_modify_virtqueue_cmd()
1338 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX) in fill_modify_virtqueue_cmd()
1339 MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx); in fill_modify_virtqueue_cmd()
1341 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_QUEUE_VIRTIO_VERSION) in fill_modify_virtqueue_cmd()
1343 !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1))); in fill_modify_virtqueue_cmd()
1345 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_QUEUE_FEATURES) { in fill_modify_virtqueue_cmd()
1346 u16 mlx_features = get_features(ndev->mvdev.actual_features); in fill_modify_virtqueue_cmd()
1354 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { in fill_modify_virtqueue_cmd()
1355 vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]]; in fill_modify_virtqueue_cmd()
1358 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey); in fill_modify_virtqueue_cmd()
1360 mvq->modified_fields &= ~MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY; in fill_modify_virtqueue_cmd()
1363 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { in fill_modify_virtqueue_cmd()
1364 desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; in fill_modify_virtqueue_cmd()
1366 if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) in fill_modify_virtqueue_cmd()
1367 MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, desc_mr->mkey); in fill_modify_virtqueue_cmd()
1369 mvq->modified_fields &= ~MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY; in fill_modify_virtqueue_cmd()
1372 MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields); in fill_modify_virtqueue_cmd()
1379 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in modify_virtqueue_end()
1381 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { in modify_virtqueue_end()
1382 unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]; in modify_virtqueue_end() local
1383 struct mlx5_vdpa_mr *vq_mr = mvdev->mres.mr[asid]; in modify_virtqueue_end()
1385 mlx5_vdpa_put_mr(mvdev, mvq->vq_mr); in modify_virtqueue_end()
1387 mvq->vq_mr = vq_mr; in modify_virtqueue_end()
1390 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { in modify_virtqueue_end()
1391 unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]; in modify_virtqueue_end() local
1392 struct mlx5_vdpa_mr *desc_mr = mvdev->mres.mr[asid]; in modify_virtqueue_end()
1394 mlx5_vdpa_put_mr(mvdev, mvq->desc_mr); in modify_virtqueue_end()
1396 mvq->desc_mr = desc_mr; in modify_virtqueue_end()
1399 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) in modify_virtqueue_end()
1400 mvq->fw_state = state; in modify_virtqueue_end()
1402 mvq->modified_fields = 0; in modify_virtqueue_end()
1412 if (!counters_supported(&ndev->mvdev)) in counter_set_alloc()
1419 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_alloc()
1421 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_alloc()
1425 mvq->counter_set_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); in counter_set_alloc()
1435 if (!counters_supported(&ndev->mvdev)) in counter_set_dealloc()
1439 MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_id, mvq->counter_set_id); in counter_set_dealloc()
1440 MLX5_SET(destroy_virtio_q_counters_in, in, hdr.uid, ndev->mvdev.res.uid); in counter_set_dealloc()
1442 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in counter_set_dealloc()
1443 mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id); in counter_set_dealloc()
1450 if (cb->callback) in mlx5_vdpa_int_handler()
1451 return cb->callback(cb->private); in mlx5_vdpa_int_handler()
1459 struct mlx5_vdpa_irq_pool *irqp = &ndev->irqp; in alloc_vector()
1464 for (i = 0; i < irqp->num_ent; i++) { in alloc_vector()
1465 ent = &irqp->entries[i]; in alloc_vector()
1466 if (!ent->used) { in alloc_vector()
1467 snprintf(ent->name, MLX5_VDPA_IRQ_NAME_LEN, "%s-vq-%d", in alloc_vector()
1468 dev_name(&ndev->mvdev.vdev.dev), mvq->index); in alloc_vector()
1469 ent->dev_id = &ndev->event_cbs[mvq->index]; in alloc_vector()
1470 err = request_irq(ent->map.virq, mlx5_vdpa_int_handler, 0, in alloc_vector()
1471 ent->name, ent->dev_id); in alloc_vector()
1475 ent->used = true; in alloc_vector()
1476 mvq->map = ent->map; in alloc_vector()
1485 struct mlx5_vdpa_irq_pool *irqp = &ndev->irqp; in dealloc_vector()
1488 for (i = 0; i < irqp->num_ent; i++) in dealloc_vector()
1489 if (mvq->map.virq == irqp->entries[i].map.virq) { in dealloc_vector()
1490 free_irq(mvq->map.virq, irqp->entries[i].dev_id); in dealloc_vector()
1491 irqp->entries[i].used = false; in dealloc_vector()
1500 u16 idx = mvq->index; in setup_vq()
1503 if (mvq->initialized) in setup_vq()
1506 err = cq_create(ndev, idx, mvq->num_ent); in setup_vq()
1510 err = qp_create(ndev, mvq, &mvq->fwqp); in setup_vq()
1514 err = qp_create(ndev, mvq, &mvq->vqqp); in setup_vq()
1531 mvq->initialized = true; in setup_vq()
1533 if (mvq->ready) { in setup_vq()
1547 qp_destroy(ndev, &mvq->vqqp); in setup_vq()
1549 qp_destroy(ndev, &mvq->fwqp); in setup_vq()
1557 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in modify_virtqueues()
1562 WARN(start_vq + num_vqs > mvdev->max_vqs, "modify vq range invalid [%d, %d), max_vqs: %u\n", in modify_virtqueues()
1563 start_vq, start_vq + num_vqs, mvdev->max_vqs); in modify_virtqueues()
1568 err = -ENOMEM; in modify_virtqueues()
1577 mvq = &ndev->vqs[vq_idx]; in modify_virtqueues()
1580 err = -EINVAL; in modify_virtqueues()
1584 if (mvq->fw_state != state) { in modify_virtqueues()
1585 if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) { in modify_virtqueues()
1586 err = -EINVAL; in modify_virtqueues()
1590 mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE; in modify_virtqueues()
1593 cmd->in = &cmd_mem[i].in; in modify_virtqueues()
1594 cmd->inlen = sizeof(cmd_mem[i].in); in modify_virtqueues()
1595 cmd->out = &cmd_mem[i].out; in modify_virtqueues()
1596 cmd->outlen = sizeof(cmd_mem[i].out); in modify_virtqueues()
1600 err = mlx5_vdpa_exec_async_cmds(&ndev->mvdev, cmds, num_vqs); in modify_virtqueues()
1612 mvq = &ndev->vqs[vq_idx]; in modify_virtqueues()
1614 if (cmd->err) { in modify_virtqueues()
1615 mlx5_vdpa_err(mvdev, "modify vq %d failed, state: %d -> %d, err: %d\n", in modify_virtqueues()
1616 vq_idx, mvq->fw_state, state, err); in modify_virtqueues()
1618 err = cmd->err; in modify_virtqueues()
1638 if (start_vq >= ndev->cur_num_vqs) in suspend_vqs()
1639 return -EINVAL; in suspend_vqs()
1641 mvq = &ndev->vqs[start_vq]; in suspend_vqs()
1642 if (!mvq->initialized) in suspend_vqs()
1645 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) in suspend_vqs()
1654 return -ENOMEM; in suspend_vqs()
1661 mvq = &ndev->vqs[vq_idx]; in suspend_vqs()
1662 mvq->avail_idx = attrs[i].available_index; in suspend_vqs()
1663 mvq->used_idx = attrs[i].used_index; in suspend_vqs()
1673 return suspend_vqs(ndev, mvq->index, 1); in suspend_vq()
1681 if (start_vq >= ndev->mvdev.max_vqs) in resume_vqs()
1682 return -EINVAL; in resume_vqs()
1684 mvq = &ndev->vqs[start_vq]; in resume_vqs()
1685 if (!mvq->initialized) in resume_vqs()
1688 if (mvq->index >= ndev->cur_num_vqs) in resume_vqs()
1691 switch (mvq->fw_state) { in resume_vqs()
1696 err = modify_virtqueues(ndev, start_vq, num_vqs, mvq->fw_state); in resume_vqs()
1702 mlx5_vdpa_warn(&ndev->mvdev, "vq %d is not resumable\n", mvq->index); in resume_vqs()
1703 return -EINVAL; in resume_vqs()
1709 mlx5_vdpa_err(&ndev->mvdev, "resume vq %u called from bad state %d\n", in resume_vqs()
1710 mvq->index, mvq->fw_state); in resume_vqs()
1711 return -EINVAL; in resume_vqs()
1719 return resume_vqs(ndev, mvq->index, 1); in resume_vq()
1724 if (!mvq->initialized) in teardown_vq()
1728 mvq->modified_fields = 0; in teardown_vq()
1732 qp_destroy(ndev, &mvq->vqqp); in teardown_vq()
1733 qp_destroy(ndev, &mvq->fwqp); in teardown_vq()
1734 cq_destroy(ndev, mvq->index); in teardown_vq()
1735 mvq->initialized = false; in teardown_vq()
1740 int rqt_table_size = roundup_pow_of_two(ndev->rqt_size); in create_rqt()
1741 int act_sz = roundup_pow_of_two(ndev->cur_num_vqs / 2); in create_rqt()
1752 return -ENOMEM; in create_rqt()
1754 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid); in create_rqt()
1761 list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id); in create_rqt()
1764 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); in create_rqt()
1787 return -ENOMEM; in modify_rqt()
1789 MLX5_SET(modify_rqt_in, in, uid, ndev->mvdev.res.uid); in modify_rqt()
1791 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx); in modify_rqt()
1796 list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id); in modify_rqt()
1799 err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn); in modify_rqt()
1809 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn); in destroy_rqt()
1830 return -ENOMEM; in create_tir()
1832 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid); in create_tir()
1833 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); in create_tir()
1846 MLX5_SET(tirc, tirc, indirect_table, ndev->res.rqtn); in create_tir()
1847 MLX5_SET(tirc, tirc, transport_domain, ndev->res.tdn); in create_tir()
1849 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn); in create_tir()
1861 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn); in destroy_tir()
1881 node->ucast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_steering_counters()
1882 if (IS_ERR(node->ucast_counter.counter)) in add_steering_counters()
1883 return PTR_ERR(node->ucast_counter.counter); in add_steering_counters()
1885 node->mcast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_steering_counters()
1886 if (IS_ERR(node->mcast_counter.counter)) { in add_steering_counters()
1887 err = PTR_ERR(node->mcast_counter.counter); in add_steering_counters()
1892 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; in add_steering_counters()
1896 mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter); in add_steering_counters()
1907 mlx5_fc_destroy(ndev->mvdev.mdev, node->mcast_counter.counter); in remove_steering_counters()
1908 mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter); in remove_steering_counters()
1927 return -ENOMEM; in mlx5_vdpa_add_mac_vlan_rules()
1929 vid = key2vid(node->macvlan); in mlx5_vdpa_add_mac_vlan_rules()
1930 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; in mlx5_vdpa_add_mac_vlan_rules()
1931 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); in mlx5_vdpa_add_mac_vlan_rules()
1932 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); in mlx5_vdpa_add_mac_vlan_rules()
1937 if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)) { in mlx5_vdpa_add_mac_vlan_rules()
1941 if (node->tagged) { in mlx5_vdpa_add_mac_vlan_rules()
1947 dests[0].tir_num = ndev->res.tirn; in mlx5_vdpa_add_mac_vlan_rules()
1953 dests[1].counter = node->ucast_counter.counter; in mlx5_vdpa_add_mac_vlan_rules()
1955 node->ucast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS); in mlx5_vdpa_add_mac_vlan_rules()
1956 if (IS_ERR(node->ucast_rule)) { in mlx5_vdpa_add_mac_vlan_rules()
1957 err = PTR_ERR(node->ucast_rule); in mlx5_vdpa_add_mac_vlan_rules()
1962 dests[1].counter = node->mcast_counter.counter; in mlx5_vdpa_add_mac_vlan_rules()
1969 node->mcast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS); in mlx5_vdpa_add_mac_vlan_rules()
1970 if (IS_ERR(node->mcast_rule)) { in mlx5_vdpa_add_mac_vlan_rules()
1971 err = PTR_ERR(node->mcast_rule); in mlx5_vdpa_add_mac_vlan_rules()
1979 mlx5_del_flow_rules(node->ucast_rule); in mlx5_vdpa_add_mac_vlan_rules()
1991 mlx5_del_flow_rules(node->ucast_rule); in mlx5_vdpa_del_mac_vlan_rules()
1992 mlx5_del_flow_rules(node->mcast_rule); in mlx5_vdpa_del_mac_vlan_rules()
2019 hlist_for_each_entry(pos, &ndev->macvlan_hash[idx], hlist) { in mac_vlan_lookup()
2020 if (pos->macvlan == value) in mac_vlan_lookup()
2035 return -EEXIST; in mac_vlan_add()
2039 return -ENOMEM; in mac_vlan_add()
2041 ptr->tagged = tagged; in mac_vlan_add()
2042 ptr->macvlan = val; in mac_vlan_add()
2043 ptr->ndev = ndev; in mac_vlan_add()
2044 err = mlx5_vdpa_add_mac_vlan_rules(ndev, ndev->config.mac, ptr); in mac_vlan_add()
2049 hlist_add_head(&ptr->hlist, &ndev->macvlan_hash[idx]); in mac_vlan_add()
2065 hlist_del(&ptr->hlist); in mac_vlan_del()
2078 hlist_for_each_entry_safe(pos, n, &ndev->macvlan_hash[i], hlist) { in clear_mac_vlan_table()
2079 hlist_del(&pos->hlist); in clear_mac_vlan_table()
2096 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS); in setup_steering()
2098 mlx5_vdpa_err(&ndev->mvdev, "failed to get flow namespace\n"); in setup_steering()
2099 return -EOPNOTSUPP; in setup_steering()
2102 ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); in setup_steering()
2103 if (IS_ERR(ndev->rxft)) { in setup_steering()
2104 mlx5_vdpa_err(&ndev->mvdev, "failed to create flow table\n"); in setup_steering()
2105 return PTR_ERR(ndev->rxft); in setup_steering()
2109 err = mac_vlan_add(ndev, ndev->config.mac, 0, false); in setup_steering()
2117 mlx5_destroy_flow_table(ndev->rxft); in setup_steering()
2125 mlx5_destroy_flow_table(ndev->rxft); in teardown_steering()
2131 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mac()
2137 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); in handle_ctrl_mac()
2140 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)mac, ETH_ALEN); in handle_ctrl_mac()
2144 if (!memcmp(ndev->config.mac, mac, 6)) { in handle_ctrl_mac()
2152 if (!is_zero_ether_addr(ndev->config.mac)) { in handle_ctrl_mac()
2153 if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) { in handle_ctrl_mac()
2155 ndev->config.mac); in handle_ctrl_mac()
2169 memcpy(mac_back, ndev->config.mac, ETH_ALEN); in handle_ctrl_mac()
2171 memcpy(ndev->config.mac, mac, ETH_ALEN); in handle_ctrl_mac()
2177 if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) { in handle_ctrl_mac()
2189 if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) { in handle_ctrl_mac()
2191 ndev->config.mac); in handle_ctrl_mac()
2199 memcpy(ndev->config.mac, mac_back, ETH_ALEN); in handle_ctrl_mac()
2201 if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) in handle_ctrl_mac()
2220 int cur_vqs = ndev->cur_num_vqs; in change_num_qps()
2231 suspend_vqs(ndev, new_vqs, cur_vqs - new_vqs); in change_num_qps()
2234 teardown_vq(ndev, &ndev->vqs[i]); in change_num_qps()
2237 ndev->cur_num_vqs = new_vqs; in change_num_qps()
2239 ndev->cur_num_vqs = new_vqs; in change_num_qps()
2242 err = setup_vq(ndev, &ndev->vqs[i], false); in change_num_qps()
2247 err = resume_vqs(ndev, cur_vqs, new_vqs - cur_vqs); in change_num_qps()
2258 for (--i; i >= cur_vqs; --i) in change_num_qps()
2259 teardown_vq(ndev, &ndev->vqs[i]); in change_num_qps()
2261 ndev->cur_num_vqs = cur_vqs; in change_num_qps()
2270 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mq()
2277 /* This mq feature check aligns with pre-existing userspace in handle_ctrl_mq()
2281 * request down to a non-mq device that may cause kernel to in handle_ctrl_mq()
2284 * changing the number of vqs on a non-mq device. in handle_ctrl_mq()
2289 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)&mq, sizeof(mq)); in handle_ctrl_mq()
2295 newqps > ndev->rqt_size) in handle_ctrl_mq()
2298 if (ndev->cur_num_vqs == 2 * newqps) { in handle_ctrl_mq()
2318 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_vlan()
2323 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) in handle_ctrl_vlan()
2328 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan)); in handle_ctrl_vlan()
2333 if (mac_vlan_add(ndev, ndev->config.mac, id, true)) in handle_ctrl_vlan()
2339 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan)); in handle_ctrl_vlan()
2344 mac_vlan_del(ndev, ndev->config.mac, id, true); in handle_ctrl_vlan()
2366 mvdev = wqent->mvdev; in mlx5_cvq_kick_handler()
2368 cvq = &mvdev->cvq; in mlx5_cvq_kick_handler()
2370 down_write(&ndev->reslock); in mlx5_cvq_kick_handler()
2372 if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) in mlx5_cvq_kick_handler()
2375 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in mlx5_cvq_kick_handler()
2378 if (!cvq->ready) in mlx5_cvq_kick_handler()
2382 err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head, in mlx5_cvq_kick_handler()
2387 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &ctrl, sizeof(ctrl)); in mlx5_cvq_kick_handler()
2391 cvq->received_desc++; in mlx5_cvq_kick_handler()
2409 write = vringh_iov_push_iotlb(&cvq->vring, &cvq->wiov, &status, sizeof(status)); in mlx5_cvq_kick_handler()
2410 vringh_complete_iotlb(&cvq->vring, cvq->head, write); in mlx5_cvq_kick_handler()
2411 vringh_kiov_cleanup(&cvq->riov); in mlx5_cvq_kick_handler()
2412 vringh_kiov_cleanup(&cvq->wiov); in mlx5_cvq_kick_handler()
2414 if (vringh_need_notify_iotlb(&cvq->vring)) in mlx5_cvq_kick_handler()
2415 vringh_notify(&cvq->vring); in mlx5_cvq_kick_handler()
2417 cvq->completed_desc++; in mlx5_cvq_kick_handler()
2418 queue_work(mvdev->wq, &wqent->work); in mlx5_cvq_kick_handler()
2423 up_write(&ndev->reslock); in mlx5_cvq_kick_handler()
2436 if (!mvdev->wq || !mvdev->cvq.ready) in mlx5_vdpa_kick_vq()
2439 queue_work(mvdev->wq, &ndev->cvq_ent.work); in mlx5_vdpa_kick_vq()
2443 mvq = &ndev->vqs[idx]; in mlx5_vdpa_kick_vq()
2444 if (unlikely(!mvq->ready)) in mlx5_vdpa_kick_vq()
2447 iowrite16(idx, ndev->mvdev.res.kick_addr); in mlx5_vdpa_kick_vq()
2458 return -EINVAL; in mlx5_vdpa_set_vq_address()
2461 mvdev->cvq.desc_addr = desc_area; in mlx5_vdpa_set_vq_address()
2462 mvdev->cvq.device_addr = device_area; in mlx5_vdpa_set_vq_address()
2463 mvdev->cvq.driver_addr = driver_area; in mlx5_vdpa_set_vq_address()
2467 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_address()
2468 mvq->desc_addr = desc_area; in mlx5_vdpa_set_vq_address()
2469 mvq->device_addr = device_area; in mlx5_vdpa_set_vq_address()
2470 mvq->driver_addr = driver_area; in mlx5_vdpa_set_vq_address()
2471 mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS; in mlx5_vdpa_set_vq_address()
2485 struct mlx5_control_vq *cvq = &mvdev->cvq; in mlx5_vdpa_set_vq_num()
2487 cvq->vring.vring.num = num; in mlx5_vdpa_set_vq_num()
2491 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_num()
2492 ndev->needs_teardown |= num != mvq->num_ent; in mlx5_vdpa_set_vq_num()
2493 mvq->num_ent = num; in mlx5_vdpa_set_vq_num()
2501 ndev->event_cbs[idx] = *cb; in mlx5_vdpa_set_vq_cb()
2503 mvdev->cvq.event_cb = *cb; in mlx5_vdpa_set_vq_cb()
2510 if (!cvq->event_cb.callback) in mlx5_cvq_notify()
2513 cvq->event_cb.callback(cvq->event_cb.private); in mlx5_cvq_notify()
2518 struct mlx5_control_vq *cvq = &mvdev->cvq; in set_cvq_ready()
2520 cvq->ready = ready; in set_cvq_ready()
2524 cvq->vring.notify = mlx5_cvq_notify; in set_cvq_ready()
2533 if (!mvdev->actual_features) in mlx5_vdpa_set_vq_ready()
2544 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_ready()
2547 } else if (mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) { in mlx5_vdpa_set_vq_ready()
2552 mvq->ready = ready; in mlx5_vdpa_set_vq_ready()
2564 return mvdev->cvq.ready; in mlx5_vdpa_get_vq_ready()
2566 return ndev->vqs[idx].ready; in mlx5_vdpa_get_vq_ready()
2577 return -EINVAL; in mlx5_vdpa_set_vq_state()
2580 mvdev->cvq.vring.last_avail_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2584 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_state()
2585 if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) { in mlx5_vdpa_set_vq_state()
2587 return -EINVAL; in mlx5_vdpa_set_vq_state()
2590 mvq->used_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2591 mvq->avail_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2592 mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX | in mlx5_vdpa_set_vq_state()
2606 return -EINVAL; in mlx5_vdpa_get_vq_state()
2609 state->split.avail_index = mvdev->cvq.vring.last_avail_idx; in mlx5_vdpa_get_vq_state()
2613 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_state()
2618 if (!mvq->initialized) { in mlx5_vdpa_get_vq_state()
2623 state->split.avail_index = mvq->used_idx; in mlx5_vdpa_get_vq_state()
2627 err = query_virtqueues(ndev, mvq->index, 1, &attr); in mlx5_vdpa_get_vq_state()
2632 state->split.avail_index = attr.used_index; in mlx5_vdpa_get_vq_state()
2713 print_features(mvdev, ndev->mvdev.mlx_features, false); in mlx5_vdpa_get_device_features()
2714 return ndev->mvdev.mlx_features; in mlx5_vdpa_get_device_features()
2721 return -EOPNOTSUPP; in verify_driver_features()
2734 return -EINVAL; in verify_driver_features()
2745 for (i = 0; i < mvdev->max_vqs; i++) { in setup_virtqueues()
2746 err = setup_vq(ndev, &ndev->vqs[i], filled); in setup_virtqueues()
2754 for (--i; i >= 0; i--) in setup_virtqueues()
2755 teardown_vq(ndev, &ndev->vqs[i]); in setup_virtqueues()
2764 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) in teardown_virtqueues()
2765 teardown_vq(ndev, &ndev->vqs[i]); in teardown_virtqueues()
2773 mvdev->max_idx = mvdev->max_vqs; in update_cvq_info()
2778 mvdev->max_idx = 2; in update_cvq_info()
2782 mvdev->max_idx = 1; in update_cvq_info()
2807 if (query_vport_state(mvdev->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0) == in get_link_state()
2821 mvdev = wqent->mvdev; in update_carrier()
2824 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in update_carrier()
2826 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in update_carrier()
2828 if (ndev->config_cb.callback) in update_carrier()
2829 ndev->config_cb.callback(ndev->config_cb.private); in update_carrier()
2840 return -ENOMEM; in queue_link_work()
2842 wqent->mvdev = &ndev->mvdev; in queue_link_work()
2843 INIT_WORK(&wqent->work, update_carrier); in queue_link_work()
2844 queue_work(ndev->mvdev.wq, &wqent->work); in queue_link_work()
2854 if (ndev->mvdev.suspended) in event_handler()
2858 switch (eqe->sub_type) { in event_handler()
2876 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_STATUS))) in register_link_notifier()
2879 ndev->nb.notifier_call = event_handler; in register_link_notifier()
2880 mlx5_notifier_register(ndev->mvdev.mdev, &ndev->nb); in register_link_notifier()
2881 ndev->nb_registered = true; in register_link_notifier()
2887 if (!ndev->nb_registered) in unregister_link_notifier()
2890 ndev->nb_registered = false; in unregister_link_notifier()
2891 mlx5_notifier_unregister(ndev->mvdev.mdev, &ndev->nb); in unregister_link_notifier()
2892 if (ndev->mvdev.wq) in unregister_link_notifier()
2893 flush_workqueue(ndev->mvdev.wq); in unregister_link_notifier()
2905 u64 old_features = mvdev->actual_features; in mlx5_vdpa_set_driver_features()
2915 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; in mlx5_vdpa_set_driver_features()
2918 if (get_features(old_features) != get_features(mvdev->actual_features)) { in mlx5_vdpa_set_driver_features()
2919 for (int i = 0; i < mvdev->max_vqs; ++i) { in mlx5_vdpa_set_driver_features()
2920 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[i]; in mlx5_vdpa_set_driver_features()
2922 mvq->modified_fields |= ( in mlx5_vdpa_set_driver_features()
2934 diff_features = mvdev->mlx_features ^ mvdev->actual_features; in mlx5_vdpa_set_driver_features()
2935 ndev->needs_teardown = !!(diff_features & NEEDS_TEARDOWN_MASK); in mlx5_vdpa_set_driver_features()
2946 ndev->config_cb = *cb; in mlx5_vdpa_set_config_cb()
2970 print_status(mvdev, ndev->mvdev.status, false); in mlx5_vdpa_get_status()
2971 return ndev->mvdev.status; in mlx5_vdpa_get_status()
2976 struct mlx5_vq_restore_info *ri = &mvq->ri; in save_channel_info()
2980 if (mvq->initialized) { in save_channel_info()
2981 err = query_virtqueues(ndev, mvq->index, 1, &attr); in save_channel_info()
2986 ri->avail_index = attr.available_index; in save_channel_info()
2987 ri->used_index = attr.used_index; in save_channel_info()
2988 ri->ready = mvq->ready; in save_channel_info()
2989 ri->num_ent = mvq->num_ent; in save_channel_info()
2990 ri->desc_addr = mvq->desc_addr; in save_channel_info()
2991 ri->device_addr = mvq->device_addr; in save_channel_info()
2992 ri->driver_addr = mvq->driver_addr; in save_channel_info()
2993 ri->map = mvq->map; in save_channel_info()
2994 ri->restore = true; in save_channel_info()
3002 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in save_channels_info()
3003 memset(&ndev->vqs[i].ri, 0, sizeof(ndev->vqs[i].ri)); in save_channels_info()
3004 save_channel_info(ndev, &ndev->vqs[i]); in save_channels_info()
3013 for (i = 0; i < ndev->mvdev.max_vqs; i++) in mlx5_clear_vqs()
3014 memset(&ndev->vqs[i], 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in mlx5_clear_vqs()
3025 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in restore_channels_info()
3026 mvq = &ndev->vqs[i]; in restore_channels_info()
3027 ri = &mvq->ri; in restore_channels_info()
3028 if (!ri->restore) in restore_channels_info()
3031 mvq->avail_idx = ri->avail_index; in restore_channels_info()
3032 mvq->used_idx = ri->used_index; in restore_channels_info()
3033 mvq->ready = ri->ready; in restore_channels_info()
3034 mvq->num_ent = ri->num_ent; in restore_channels_info()
3035 mvq->desc_addr = ri->desc_addr; in restore_channels_info()
3036 mvq->device_addr = ri->device_addr; in restore_channels_info()
3037 mvq->driver_addr = ri->driver_addr; in restore_channels_info()
3038 mvq->map = ri->map; in restore_channels_info()
3044 unsigned int asid) in mlx5_vdpa_change_map() argument
3050 suspend_vqs(ndev, 0, ndev->cur_num_vqs); in mlx5_vdpa_change_map()
3059 mlx5_vdpa_update_mr(mvdev, new_mr, asid); in mlx5_vdpa_change_map()
3061 for (int i = 0; i < mvdev->max_vqs; i++) in mlx5_vdpa_change_map()
3062 ndev->vqs[i].modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY | in mlx5_vdpa_change_map()
3065 if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended) in mlx5_vdpa_change_map()
3075 resume_vqs(ndev, 0, ndev->cur_num_vqs); in mlx5_vdpa_change_map()
3083 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in setup_vq_resources()
3086 WARN_ON(!rwsem_is_locked(&ndev->reslock)); in setup_vq_resources()
3088 if (ndev->setup) { in setup_vq_resources()
3122 ndev->setup = true; in setup_vq_resources()
3142 WARN_ON(!rwsem_is_locked(&ndev->reslock)); in teardown_vq_resources()
3144 if (!ndev->setup) in teardown_vq_resources()
3152 ndev->setup = false; in teardown_vq_resources()
3153 ndev->needs_teardown = false; in teardown_vq_resources()
3158 struct mlx5_control_vq *cvq = &mvdev->cvq; in setup_cvq_vring()
3161 if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) { in setup_cvq_vring()
3162 u16 idx = cvq->vring.last_avail_idx; in setup_cvq_vring()
3164 err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features, in setup_cvq_vring()
3165 cvq->vring.vring.num, false, in setup_cvq_vring()
3166 (struct vring_desc *)(uintptr_t)cvq->desc_addr, in setup_cvq_vring()
3167 (struct vring_avail *)(uintptr_t)cvq->driver_addr, in setup_cvq_vring()
3168 (struct vring_used *)(uintptr_t)cvq->device_addr); in setup_cvq_vring()
3171 cvq->vring.last_avail_idx = cvq->vring.last_used_idx = idx; in setup_cvq_vring()
3184 down_write(&ndev->reslock); in mlx5_vdpa_set_status()
3186 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { in mlx5_vdpa_set_status()
3195 if (ndev->needs_teardown) in mlx5_vdpa_set_status()
3198 if (ndev->setup) { in mlx5_vdpa_set_status()
3199 err = resume_vqs(ndev, 0, ndev->cur_num_vqs); in mlx5_vdpa_set_status()
3217 ndev->mvdev.status = status; in mlx5_vdpa_set_status()
3218 up_write(&ndev->reslock); in mlx5_vdpa_set_status()
3224 mlx5_vdpa_clean_mrs(&ndev->mvdev); in mlx5_vdpa_set_status()
3225 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; in mlx5_vdpa_set_status()
3227 up_write(&ndev->reslock); in mlx5_vdpa_set_status()
3234 /* default mapping all groups are mapped to asid 0 */ in init_group_to_asid_map()
3236 mvdev->mres.group2asid[i] = 0; in init_group_to_asid_map()
3242 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[0]; in needs_vqs_reset()
3244 if (mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) in needs_vqs_reset()
3247 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT) in needs_vqs_reset()
3250 return mvq->modified_fields & ( in needs_vqs_reset()
3267 down_write(&ndev->reslock); in mlx5_vdpa_compat_reset()
3276 mlx5_vdpa_clean_mrs(&ndev->mvdev); in mlx5_vdpa_compat_reset()
3277 ndev->mvdev.status = 0; in mlx5_vdpa_compat_reset()
3278 ndev->mvdev.suspended = false; in mlx5_vdpa_compat_reset()
3279 ndev->cur_num_vqs = MLX5V_DEFAULT_VQ_COUNT; in mlx5_vdpa_compat_reset()
3280 ndev->mvdev.cvq.ready = false; in mlx5_vdpa_compat_reset()
3281 ndev->mvdev.cvq.received_desc = 0; in mlx5_vdpa_compat_reset()
3282 ndev->mvdev.cvq.completed_desc = 0; in mlx5_vdpa_compat_reset()
3283 memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1)); in mlx5_vdpa_compat_reset()
3284 ndev->mvdev.actual_features = 0; in mlx5_vdpa_compat_reset()
3286 ++mvdev->generation; in mlx5_vdpa_compat_reset()
3289 MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { in mlx5_vdpa_compat_reset()
3295 up_write(&ndev->reslock); in mlx5_vdpa_compat_reset()
3317 memcpy(buf, (u8 *)&ndev->config + offset, len); in mlx5_vdpa_get_config()
3330 return mvdev->generation; in mlx5_vdpa_get_generation()
3334 unsigned int asid) in set_map_data() argument
3339 if (asid >= MLX5_VDPA_NUM_AS) in set_map_data()
3340 return -EINVAL; in set_map_data()
3354 if (!mvdev->mres.mr[asid]) { in set_map_data()
3355 mlx5_vdpa_update_mr(mvdev, new_mr, asid); in set_map_data()
3357 err = mlx5_vdpa_change_map(mvdev, new_mr, asid); in set_map_data()
3364 return mlx5_vdpa_update_cvq_iotlb(mvdev, iotlb, asid); in set_map_data()
3371 static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid, in mlx5_vdpa_set_map() argument
3376 int err = -EINVAL; in mlx5_vdpa_set_map()
3378 down_write(&ndev->reslock); in mlx5_vdpa_set_map()
3379 err = set_map_data(mvdev, iotlb, asid); in mlx5_vdpa_set_map()
3380 up_write(&ndev->reslock); in mlx5_vdpa_set_map()
3384 static int mlx5_vdpa_reset_map(struct vdpa_device *vdev, unsigned int asid) in mlx5_vdpa_reset_map() argument
3390 down_write(&ndev->reslock); in mlx5_vdpa_reset_map()
3391 err = mlx5_vdpa_reset_mr(mvdev, asid); in mlx5_vdpa_reset_map()
3392 up_write(&ndev->reslock); in mlx5_vdpa_reset_map()
3402 map.dma_dev = &vdev->dev; in mlx5_get_vq_map()
3404 map.dma_dev = mvdev->vdev.vmap.dma_dev; in mlx5_get_vq_map()
3414 if (!msix_mode_supported(&ndev->mvdev)) in free_irqs()
3417 if (!ndev->irqp.entries) in free_irqs()
3420 for (i = ndev->irqp.num_ent - 1; i >= 0; i--) { in free_irqs()
3421 ent = ndev->irqp.entries + i; in free_irqs()
3422 if (ent->map.virq) in free_irqs()
3423 pci_msix_free_irq(ndev->mvdev.mdev->pdev, ent->map); in free_irqs()
3425 kfree(ndev->irqp.entries); in free_irqs()
3441 mlx5_vdpa_destroy_mr_resources(&ndev->mvdev); in mlx5_vdpa_free()
3442 if (!is_zero_ether_addr(ndev->config.mac)) { in mlx5_vdpa_free()
3443 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); in mlx5_vdpa_free()
3444 mlx5_mpfs_del_mac(pfmdev, ndev->config.mac); in mlx5_vdpa_free()
3446 mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx); in mlx5_vdpa_free()
3447 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_free()
3449 kfree(ndev->event_cbs); in mlx5_vdpa_free()
3450 kfree(ndev->vqs); in mlx5_vdpa_free()
3467 if (MLX5_CAP_GEN(mvdev->mdev, log_min_sf_size) + 12 < PAGE_SHIFT) in mlx5_get_vq_notification()
3471 addr = (phys_addr_t)ndev->mvdev.res.phys_kick_addr; in mlx5_get_vq_notification()
3484 return -EINVAL; in mlx5_get_vq_irq()
3487 return -EOPNOTSUPP; in mlx5_get_vq_irq()
3489 mvq = &ndev->vqs[idx]; in mlx5_get_vq_irq()
3490 if (!mvq->map.virq) in mlx5_get_vq_irq()
3491 return -EOPNOTSUPP; in mlx5_get_vq_irq()
3493 return mvq->map.virq; in mlx5_get_vq_irq()
3500 return mvdev->actual_features; in mlx5_vdpa_get_driver_features()
3509 void *ctx; in counter_set_query() local
3512 if (!counters_supported(&ndev->mvdev)) in counter_set_query()
3513 return -EOPNOTSUPP; in counter_set_query()
3515 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) in counter_set_query()
3516 return -EAGAIN; in counter_set_query()
3522 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_query()
3523 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->counter_set_id); in counter_set_query()
3525 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_query()
3529 ctx = MLX5_ADDR_OF(query_virtio_q_counters_out, out, counters); in counter_set_query()
3530 *received_desc = MLX5_GET64(virtio_q_counters, ctx, received_desc); in counter_set_query()
3531 *completed_desc = MLX5_GET64(virtio_q_counters, ctx, completed_desc); in counter_set_query()
3547 down_read(&ndev->reslock); in mlx5_vdpa_get_vendor_vq_stats()
3550 err = -EINVAL; in mlx5_vdpa_get_vendor_vq_stats()
3555 cvq = &mvdev->cvq; in mlx5_vdpa_get_vendor_vq_stats()
3556 received_desc = cvq->received_desc; in mlx5_vdpa_get_vendor_vq_stats()
3557 completed_desc = cvq->completed_desc; in mlx5_vdpa_get_vendor_vq_stats()
3561 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vendor_vq_stats()
3569 err = -EMSGSIZE; in mlx5_vdpa_get_vendor_vq_stats()
3586 up_read(&ndev->reslock); in mlx5_vdpa_get_vendor_vq_stats()
3594 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in mlx5_vdpa_cvq_suspend()
3597 cvq = &mvdev->cvq; in mlx5_vdpa_cvq_suspend()
3598 cvq->ready = false; in mlx5_vdpa_cvq_suspend()
3609 down_write(&ndev->reslock); in mlx5_vdpa_suspend()
3610 err = suspend_vqs(ndev, 0, ndev->cur_num_vqs); in mlx5_vdpa_suspend()
3612 mvdev->suspended = true; in mlx5_vdpa_suspend()
3613 up_write(&ndev->reslock); in mlx5_vdpa_suspend()
3628 down_write(&ndev->reslock); in mlx5_vdpa_resume()
3629 mvdev->suspended = false; in mlx5_vdpa_resume()
3630 err = resume_vqs(ndev, 0, ndev->cur_num_vqs); in mlx5_vdpa_resume()
3632 up_write(&ndev->reslock); in mlx5_vdpa_resume()
3638 unsigned int asid) in mlx5_set_group_asid() argument
3644 return -EINVAL; in mlx5_set_group_asid()
3646 mvdev->mres.group2asid[group] = asid; in mlx5_set_group_asid()
3648 mutex_lock(&mvdev->mres.lock); in mlx5_set_group_asid()
3649 if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mres.mr[asid]) in mlx5_set_group_asid()
3650 err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mres.mr[asid]->iotlb, asid); in mlx5_set_group_asid()
3651 mutex_unlock(&mvdev->mres.lock); in mlx5_set_group_asid()
3705 *mtu = hw_mtu - MLX5V_ETH_HARD_MTU; in query_mtu()
3711 struct mlx5_vdpa_net_resources *res = &ndev->res; in alloc_fixed_resources()
3714 if (res->valid) { in alloc_fixed_resources()
3715 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n"); in alloc_fixed_resources()
3716 return -EEXIST; in alloc_fixed_resources()
3719 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn); in alloc_fixed_resources()
3727 res->valid = true; in alloc_fixed_resources()
3732 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in alloc_fixed_resources()
3738 struct mlx5_vdpa_net_resources *res = &ndev->res; in free_fixed_resources()
3740 if (!res->valid) in free_fixed_resources()
3744 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in free_fixed_resources()
3745 res->valid = false; in free_fixed_resources()
3753 for (i = 0; i < ndev->mvdev.max_vqs; ++i) { in mvqs_set_defaults()
3754 mvq = &ndev->vqs[i]; in mvqs_set_defaults()
3756 mvq->index = i; in mvqs_set_defaults()
3757 mvq->ndev = ndev; in mvqs_set_defaults()
3758 mvq->fwqp.fw = true; in mvqs_set_defaults()
3759 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE; in mvqs_set_defaults()
3760 mvq->num_ent = MLX5V_DEFAULT_VQ_SIZE; in mvqs_set_defaults()
3779 return -ENOMEM; in config_func_mtu()
3798 if (!msix_mode_supported(&ndev->mvdev)) in allocate_irqs()
3801 if (!ndev->mvdev.mdev->pdev) in allocate_irqs()
3804 ndev->irqp.entries = kcalloc(ndev->mvdev.max_vqs, sizeof(*ndev->irqp.entries), GFP_KERNEL); in allocate_irqs()
3805 if (!ndev->irqp.entries) in allocate_irqs()
3809 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in allocate_irqs()
3810 ent = ndev->irqp.entries + i; in allocate_irqs()
3811 snprintf(ent->name, MLX5_VDPA_IRQ_NAME_LEN, "%s-vq-%d", in allocate_irqs()
3812 dev_name(&ndev->mvdev.vdev.dev), i); in allocate_irqs()
3813 ent->map = pci_msix_alloc_irq_at(ndev->mvdev.mdev->pdev, MSI_ANY_INDEX, NULL); in allocate_irqs()
3814 if (!ent->map.virq) in allocate_irqs()
3817 ndev->irqp.num_ent++; in allocate_irqs()
3835 if (mgtdev->ndev) in mlx5_vdpa_dev_add()
3836 return -ENOSPC; in mlx5_vdpa_dev_add()
3838 mdev = mgtdev->madev->mdev; in mlx5_vdpa_dev_add()
3839 device_features = mgtdev->mgtdev.supported_features; in mlx5_vdpa_dev_add()
3840 if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) { in mlx5_vdpa_dev_add()
3841 if (add_config->device_features & ~device_features) { in mlx5_vdpa_dev_add()
3842 dev_warn(mdev->device, in mlx5_vdpa_dev_add()
3844 add_config->device_features, device_features); in mlx5_vdpa_dev_add()
3845 return -EINVAL; in mlx5_vdpa_dev_add()
3847 device_features &= add_config->device_features; in mlx5_vdpa_dev_add()
3853 dev_warn(mdev->device, in mlx5_vdpa_dev_add()
3856 return -EOPNOTSUPP; in mlx5_vdpa_dev_add()
3861 dev_warn(mdev->device, "missing support for split virtqueues\n"); in mlx5_vdpa_dev_add()
3862 return -EOPNOTSUPP; in mlx5_vdpa_dev_add()
3868 dev_warn(mdev->device, in mlx5_vdpa_dev_add()
3871 return -EAGAIN; in mlx5_vdpa_dev_add()
3874 if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) { in mlx5_vdpa_dev_add()
3875 if (add_config->net.max_vq_pairs > max_vqs / 2) in mlx5_vdpa_dev_add()
3876 return -EINVAL; in mlx5_vdpa_dev_add()
3877 max_vqs = min_t(u32, max_vqs, 2 * add_config->net.max_vq_pairs); in mlx5_vdpa_dev_add()
3882 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mgtdev->vdpa_ops, in mlx5_vdpa_dev_add()
3887 ndev->mvdev.max_vqs = max_vqs; in mlx5_vdpa_dev_add()
3888 mvdev = &ndev->mvdev; in mlx5_vdpa_dev_add()
3889 mvdev->mdev = mdev; in mlx5_vdpa_dev_add()
3891 mvdev->actual_features = in mlx5_vdpa_dev_add()
3894 mlx5_cmd_init_async_ctx(mdev, &mvdev->async_ctx); in mlx5_vdpa_dev_add()
3896 ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL); in mlx5_vdpa_dev_add()
3897 ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL); in mlx5_vdpa_dev_add()
3898 if (!ndev->vqs || !ndev->event_cbs) { in mlx5_vdpa_dev_add()
3899 err = -ENOMEM; in mlx5_vdpa_dev_add()
3902 ndev->cur_num_vqs = MLX5V_DEFAULT_VQ_COUNT; in mlx5_vdpa_dev_add()
3906 init_rwsem(&ndev->reslock); in mlx5_vdpa_dev_add()
3907 config = &ndev->config; in mlx5_vdpa_dev_add()
3909 if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU)) { in mlx5_vdpa_dev_add()
3910 err = config_func_mtu(mdev, add_config->net.mtu); in mlx5_vdpa_dev_add()
3920 ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu); in mlx5_vdpa_dev_add()
3925 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3927 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3930 if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR)) { in mlx5_vdpa_dev_add()
3931 memcpy(ndev->config.mac, add_config->net.mac, ETH_ALEN); in mlx5_vdpa_dev_add()
3933 } else if ((add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) == 0 || in mlx5_vdpa_dev_add()
3935 err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac); in mlx5_vdpa_dev_add()
3940 if (!is_zero_ether_addr(config->mac)) { in mlx5_vdpa_dev_add()
3941 pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev)); in mlx5_vdpa_dev_add()
3942 err = mlx5_mpfs_add_mac(pfmdev, config->mac); in mlx5_vdpa_dev_add()
3945 } else if ((add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) == 0) { in mlx5_vdpa_dev_add()
3955 mlx5_vdpa_warn(&ndev->mvdev, in mlx5_vdpa_dev_add()
3957 err = -EINVAL; in mlx5_vdpa_dev_add()
3962 config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2); in mlx5_vdpa_dev_add()
3963 ndev->rqt_size = max_vqs / 2; in mlx5_vdpa_dev_add()
3965 ndev->rqt_size = 1; in mlx5_vdpa_dev_add()
3968 ndev->mvdev.mlx_features = device_features; in mlx5_vdpa_dev_add()
3969 mvdev->vdev.vmap.dma_dev = &mdev->pdev->dev; in mlx5_vdpa_dev_add()
3970 err = mlx5_vdpa_alloc_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
3978 if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { in mlx5_vdpa_dev_add()
3988 ndev->cvq_ent.mvdev = mvdev; in mlx5_vdpa_dev_add()
3989 INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler); in mlx5_vdpa_dev_add()
3990 mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq"); in mlx5_vdpa_dev_add()
3991 if (!mvdev->wq) { in mlx5_vdpa_dev_add()
3992 err = -ENOMEM; in mlx5_vdpa_dev_add()
3996 mvdev->vdev.mdev = &mgtdev->mgtdev; in mlx5_vdpa_dev_add()
3997 err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1); in mlx5_vdpa_dev_add()
4001 mgtdev->ndev = ndev; in mlx5_vdpa_dev_add()
4003 /* For virtio-vdpa, the device was set up during device register. */ in mlx5_vdpa_dev_add()
4004 if (ndev->setup) in mlx5_vdpa_dev_add()
4007 down_write(&ndev->reslock); in mlx5_vdpa_dev_add()
4009 up_write(&ndev->reslock); in mlx5_vdpa_dev_add()
4016 _vdpa_unregister_device(&mvdev->vdev); in mlx5_vdpa_dev_add()
4018 destroy_workqueue(mvdev->wq); in mlx5_vdpa_dev_add()
4020 put_device(&mvdev->vdev.dev); in mlx5_vdpa_dev_add()
4034 down_write(&ndev->reslock); in mlx5_vdpa_dev_del()
4036 up_write(&ndev->reslock); in mlx5_vdpa_dev_del()
4038 wq = mvdev->wq; in mlx5_vdpa_dev_del()
4039 mvdev->wq = NULL; in mlx5_vdpa_dev_del()
4041 mgtdev->ndev = NULL; in mlx5_vdpa_dev_del()
4052 int err = -EOPNOTSUPP; in mlx5_vdpa_set_attr()
4056 mdev = mvdev->mdev; in mlx5_vdpa_set_attr()
4057 config = &ndev->config; in mlx5_vdpa_set_attr()
4059 down_write(&ndev->reslock); in mlx5_vdpa_set_attr()
4060 if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR)) { in mlx5_vdpa_set_attr()
4061 pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev)); in mlx5_vdpa_set_attr()
4062 err = mlx5_mpfs_add_mac(pfmdev, config->mac); in mlx5_vdpa_set_attr()
4064 ether_addr_copy(config->mac, add_config->net.mac); in mlx5_vdpa_set_attr()
4067 up_write(&ndev->reslock); in mlx5_vdpa_set_attr()
4087 struct mlx5_core_dev *mdev = madev->mdev; in mlx5v_probe()
4093 return -ENOMEM; in mlx5v_probe()
4095 mgtdev->mgtdev.ops = &mdev_ops; in mlx5v_probe()
4096 mgtdev->mgtdev.device = mdev->device; in mlx5v_probe()
4097 mgtdev->mgtdev.id_table = id_table; in mlx5v_probe()
4098 mgtdev->mgtdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | in mlx5v_probe()
4102 mgtdev->mgtdev.max_supported_vqs = in mlx5v_probe()
4104 mgtdev->mgtdev.supported_features = get_supported_features(mdev); in mlx5v_probe()
4105 mgtdev->madev = madev; in mlx5v_probe()
4106 mgtdev->vdpa_ops = mlx5_vdpa_ops; in mlx5v_probe()
4109 mgtdev->vdpa_ops.get_vq_desc_group = NULL; in mlx5v_probe()
4112 mgtdev->vdpa_ops.resume = NULL; in mlx5v_probe()
4114 err = vdpa_mgmtdev_register(&mgtdev->mgtdev); in mlx5v_probe()
4132 vdpa_mgmtdev_unregister(&mgtdev->mgtdev); in mlx5v_remove()