Lines Matching +full:mpfs +full:- +full:can

1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
20 #include <linux/mlx5/mpfs.h>
47 #define MLX5_FEATURE(_mvdev, _feature) (!!((_mvdev)->actual_features & BIT_ULL(_feature)))
149 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) { in is_index_valid()
150 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in is_index_valid()
156 return idx <= mvdev->max_idx; in is_index_valid()
179 /* TODO: cross-endian support */
183 (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1)); in mlx5_vdpa_is_little_endian()
198 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) in ctrl_vq_idx()
201 return mvdev->max_vqs; in ctrl_vq_idx()
283 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_tis()
289 MLX5_SET(tisc, tisc, transport_domain, ndev->res.tdn); in create_tis()
290 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn); in create_tis()
299 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn); in destroy_tis()
307 struct mlx5_frag_buf *frag_buf = &buf->frag_buf; in cq_frag_buf_alloc()
312 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf, in cq_frag_buf_alloc()
313 ndev->mvdev.mdev->priv.numa_node); in cq_frag_buf_alloc()
317 mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc); in cq_frag_buf_alloc()
319 buf->cqe_size = MLX5_VDPA_CQE_SIZE; in cq_frag_buf_alloc()
320 buf->nent = nent; in cq_frag_buf_alloc()
327 struct mlx5_frag_buf *frag_buf = &umem->frag_buf; in umem_frag_buf_alloc()
329 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf, in umem_frag_buf_alloc()
330 ndev->mvdev.mdev->priv.numa_node); in umem_frag_buf_alloc()
335 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf); in cq_frag_buf_free()
340 return mlx5_frag_buf_get_wqe(&vcq->buf.fbc, n); in get_cqe()
349 for (i = 0; i < buf->nent; i++) { in cq_frag_buf_init()
352 cqe64->op_own = MLX5_CQE_INVALID << 4; in cq_frag_buf_init()
358 struct mlx5_cqe64 *cqe64 = get_cqe(cq, n & (cq->cqe - 1)); in get_sw_cqe()
361 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & cq->cqe))) in get_sw_cqe()
369 vqp->head += n; in rx_post()
370 vqp->db.db[0] = cpu_to_be32(vqp->head); in rx_post()
380 vqp = fw ? &mvq->fwqp : &mvq->vqqp; in qp_prepare()
381 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid); in qp_prepare()
383 if (vqp->fw) { in qp_prepare()
385 * use so we can skip part of the params as they will be chosen by firmware in qp_prepare()
395 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_prepare()
397 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index); in qp_prepare()
398 MLX5_SET(qpc, qpc, log_page_size, vqp->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); in qp_prepare()
400 MLX5_SET(qpc, qpc, cqn_rcv, mvq->cq.mcq.cqn); in qp_prepare()
404 mlx5_fill_page_frag_array(&vqp->frag_buf, pas); in qp_prepare()
409 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, in rq_buf_alloc()
410 num_ent * sizeof(struct mlx5_wqe_data_seg), &vqp->frag_buf, in rq_buf_alloc()
411 ndev->mvdev.mdev->priv.numa_node); in rq_buf_alloc()
416 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf); in rq_buf_free()
422 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in qp_create()
429 if (!vqp->fw) { in qp_create()
430 vqp = &mvq->vqqp; in qp_create()
431 err = rq_buf_alloc(ndev, vqp, mvq->num_ent); in qp_create()
435 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db); in qp_create()
438 inlen += vqp->frag_buf.npages * sizeof(__be64); in qp_create()
443 err = -ENOMEM; in qp_create()
447 qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent); in qp_create()
451 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_create()
453 if (!vqp->fw) in qp_create()
454 MLX5_SET64(qpc, qpc, dbr_addr, vqp->db.dma); in qp_create()
461 vqp->mqp.uid = ndev->mvdev.res.uid; in qp_create()
462 vqp->mqp.qpn = MLX5_GET(create_qp_out, out, qpn); in qp_create()
464 if (!vqp->fw) in qp_create()
465 rx_post(vqp, mvq->num_ent); in qp_create()
470 if (!vqp->fw) in qp_create()
471 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_create()
473 if (!vqp->fw) in qp_create()
484 MLX5_SET(destroy_qp_in, in, qpn, vqp->mqp.qpn); in qp_destroy()
485 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid); in qp_destroy()
486 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in)) in qp_destroy()
487 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn); in qp_destroy()
488 if (!vqp->fw) { in qp_destroy()
489 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_destroy()
496 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw()
505 return -EAGAIN; in mlx5_vdpa_poll_one()
507 vcq->mcq.cons_index++; in mlx5_vdpa_poll_one()
513 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_handle_completions()
516 event_cb = &ndev->event_cbs[mvq->index]; in mlx5_vdpa_handle_completions()
517 mlx5_cq_set_ci(&mvq->cq.mcq); in mlx5_vdpa_handle_completions()
523 rx_post(&mvq->vqqp, num); in mlx5_vdpa_handle_completions()
524 if (event_cb->callback) in mlx5_vdpa_handle_completions()
525 event_cb->callback(event_cb->private); in mlx5_vdpa_handle_completions()
531 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_cq_comp()
532 void __iomem *uar_page = ndev->mvdev.res.uar->map; in mlx5_vdpa_cq_comp()
535 while (!mlx5_vdpa_poll_one(&mvq->cq)) { in mlx5_vdpa_cq_comp()
537 if (num > mvq->num_ent / 2) { in mlx5_vdpa_cq_comp()
552 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); in mlx5_vdpa_cq_comp()
557 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_create()
558 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_create()
559 void __iomem *uar_page = ndev->mvdev.res.uar->map; in cq_create()
561 struct mlx5_vdpa_cq *vcq = &mvq->cq; in cq_create()
569 err = mlx5_db_alloc(mdev, &vcq->db); in cq_create()
573 vcq->mcq.set_ci_db = vcq->db.db; in cq_create()
574 vcq->mcq.arm_db = vcq->db.db + 1; in cq_create()
575 vcq->mcq.cqe_sz = 64; in cq_create()
577 err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent); in cq_create()
581 cq_frag_buf_init(vcq, &vcq->buf); in cq_create()
584 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * vcq->buf.frag_buf.npages; in cq_create()
587 err = -ENOMEM; in cq_create()
591 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid); in cq_create()
593 mlx5_fill_page_frag_array(&vcq->buf.frag_buf, pas); in cq_create()
596 MLX5_SET(cqc, cqc, log_page_size, vcq->buf.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); in cq_create()
607 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); in cq_create()
609 MLX5_SET64(cqc, cqc, dbr_addr, vcq->db.dma); in cq_create()
611 err = mlx5_core_create_cq(mdev, &vcq->mcq, in, inlen, out, sizeof(out)); in cq_create()
615 vcq->mcq.comp = mlx5_vdpa_cq_comp; in cq_create()
616 vcq->cqe = num_ent; in cq_create()
617 vcq->mcq.set_ci_db = vcq->db.db; in cq_create()
618 vcq->mcq.arm_db = vcq->db.db + 1; in cq_create()
619 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); in cq_create()
626 cq_frag_buf_free(ndev, &vcq->buf); in cq_create()
628 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_create()
634 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_destroy()
635 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_destroy()
636 struct mlx5_vdpa_cq *vcq = &mvq->cq; in cq_destroy()
638 if (mlx5_core_destroy_cq(mdev, &vcq->mcq)) { in cq_destroy()
639 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn); in cq_destroy()
642 cq_frag_buf_free(ndev, &vcq->buf); in cq_destroy()
643 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_destroy()
650 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in read_umem_params()
659 return -ENOMEM; in read_umem_params()
665 mlx5_vdpa_warn(&ndev->mvdev, in read_umem_params()
672 ndev->umem_1_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_a); in read_umem_params()
673 ndev->umem_1_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_b); in read_umem_params()
675 ndev->umem_2_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_a); in read_umem_params()
676 ndev->umem_2_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_b); in read_umem_params()
678 ndev->umem_3_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_a); in read_umem_params()
679 ndev->umem_3_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_b); in read_umem_params()
694 p_a = ndev->umem_1_buffer_param_a; in set_umem_size()
695 p_b = ndev->umem_1_buffer_param_b; in set_umem_size()
696 *umemp = &mvq->umem1; in set_umem_size()
699 p_a = ndev->umem_2_buffer_param_a; in set_umem_size()
700 p_b = ndev->umem_2_buffer_param_b; in set_umem_size()
701 *umemp = &mvq->umem2; in set_umem_size()
704 p_a = ndev->umem_3_buffer_param_a; in set_umem_size()
705 p_b = ndev->umem_3_buffer_param_b; in set_umem_size()
706 *umemp = &mvq->umem3; in set_umem_size()
710 (*umemp)->size = p_a * mvq->num_ent + p_b; in set_umem_size()
715 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf); in umem_frag_buf_free()
729 err = umem_frag_buf_alloc(ndev, umem, umem->size); in create_umem()
733 inlen = MLX5_ST_SZ_BYTES(create_umem_in) + MLX5_ST_SZ_BYTES(mtt) * umem->frag_buf.npages; in create_umem()
737 err = -ENOMEM; in create_umem()
742 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid); in create_umem()
744 MLX5_SET(umem, um, log_page_size, umem->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); in create_umem()
745 MLX5_SET64(umem, um, num_of_mtt, umem->frag_buf.npages); in create_umem()
748 mlx5_fill_page_frag_array_perm(&umem->frag_buf, pas, MLX5_MTT_PERM_RW); in create_umem()
750 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_umem()
752 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err); in create_umem()
757 umem->id = MLX5_GET(create_umem_out, out, umem_id); in create_umem()
776 umem = &mvq->umem1; in umem_destroy()
779 umem = &mvq->umem2; in umem_destroy()
782 umem = &mvq->umem3; in umem_destroy()
787 MLX5_SET(destroy_umem_in, in, umem_id, umem->id); in umem_destroy()
788 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in umem_destroy()
807 for (num--; num > 0; num--) in umems_create()
817 for (num = 3; num > 0; num--) in umems_destroy()
825 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); in get_queue_type()
867 return MLX5_CAP_GEN_64(mvdev->mdev, general_obj_types) & in counters_supported()
873 return MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, event_mode) & in msix_mode_supported()
875 pci_msix_can_alloc_dyn(mvdev->mdev->pdev); in msix_mode_supported()
884 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_virtqueue()
887 u64 features = filled ? mvdev->actual_features : mvdev->mlx_features; in create_virtqueue()
901 err = -ENOMEM; in create_virtqueue()
910 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in create_virtqueue()
920 if (vq_is_tx(mvq->index)) in create_virtqueue()
921 MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn); in create_virtqueue()
923 if (mvq->map.virq) { in create_virtqueue()
925 MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->map.index); in create_virtqueue()
928 MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn); in create_virtqueue()
931 MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index); in create_virtqueue()
932 MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent); in create_virtqueue()
937 MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx); in create_virtqueue()
938 MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx); in create_virtqueue()
940 MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); in create_virtqueue()
941 MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); in create_virtqueue()
942 MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); in create_virtqueue()
944 vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]]; in create_virtqueue()
946 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey); in create_virtqueue()
948 vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; in create_virtqueue()
950 MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) in create_virtqueue()
951 MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, vq_desc_mr->mkey); in create_virtqueue()
956 vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]]; in create_virtqueue()
958 mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY; in create_virtqueue()
960 vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; in create_virtqueue()
962 mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY; in create_virtqueue()
965 MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id); in create_virtqueue()
966 MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size); in create_virtqueue()
967 MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id); in create_virtqueue()
968 MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem2.size); in create_virtqueue()
969 MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id); in create_virtqueue()
970 MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size); in create_virtqueue()
971 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); in create_virtqueue()
972 if (counters_supported(&ndev->mvdev)) in create_virtqueue()
973 MLX5_SET(virtio_q, vq_ctx, counter_set_id, mvq->counter_set_id); in create_virtqueue()
975 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_virtqueue()
979 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT; in create_virtqueue()
981 mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); in create_virtqueue()
985 mvq->vq_mr = vq_mr; in create_virtqueue()
988 MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) { in create_virtqueue()
990 mvq->desc_mr = vq_desc_mr; in create_virtqueue()
1010 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_id, mvq->virtq_id); in destroy_virtqueue()
1011 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid); in destroy_virtqueue()
1014 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) { in destroy_virtqueue()
1015 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
1018 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE; in destroy_virtqueue()
1021 mlx5_vdpa_put_mr(&ndev->mvdev, mvq->vq_mr); in destroy_virtqueue()
1022 mvq->vq_mr = NULL; in destroy_virtqueue()
1024 mlx5_vdpa_put_mr(&ndev->mvdev, mvq->desc_mr); in destroy_virtqueue()
1025 mvq->desc_mr = NULL; in destroy_virtqueue()
1030 return fw ? mvq->vqqp.mqp.qpn : mvq->fwqp.mqp.qpn; in get_rqpn()
1035 return fw ? mvq->fwqp.mqp.qpn : mvq->vqqp.mqp.qpn; in get_qpn()
1054 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1066 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1083 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1101 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1143 return -ENOMEM; in modify_qp()
1145 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen); in modify_qp()
1201 void *cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, cmd->in, general_obj_in_cmd_hdr); in fill_query_virtqueue_cmd()
1205 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); in fill_query_virtqueue_cmd()
1206 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in fill_query_virtqueue_cmd()
1213 void *obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, cmd->out, obj_context); in query_virtqueue_end()
1216 attr->state = MLX5_GET(virtio_net_q_object, obj_context, state); in query_virtqueue_end()
1217 attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index); in query_virtqueue_end()
1218 attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index); in query_virtqueue_end()
1226 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in query_virtqueues()
1231 WARN(start_vq + num_vqs > mvdev->max_vqs, "query vq range invalid [%d, %d), max_vqs: %u\n", in query_virtqueues()
1232 start_vq, start_vq + num_vqs, mvdev->max_vqs); in query_virtqueues()
1237 err = -ENOMEM; in query_virtqueues()
1246 fill_query_virtqueue_cmd(ndev, &ndev->vqs[start_vq + i], &cmd_mem[i]); in query_virtqueues()
1249 err = mlx5_vdpa_exec_async_cmds(&ndev->mvdev, cmds, num_vqs); in query_virtqueues()
1260 if (cmd->err) { in query_virtqueues()
1263 err = cmd->err; in query_virtqueues()
1278 return ndev->mvdev.vdev.config->resume; in is_resumable()
1299 if (mvq->modified_fields & ~MLX5_VIRTQ_MODIFY_MASK_STATE) in modifiable_virtqueue_fields()
1300 return mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT || in modifiable_virtqueue_fields()
1301 mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND; in modifiable_virtqueue_fields()
1311 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in fill_modify_virtqueue_cmd()
1318 cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, cmd->in, general_obj_in_cmd_hdr); in fill_modify_virtqueue_cmd()
1322 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); in fill_modify_virtqueue_cmd()
1323 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in fill_modify_virtqueue_cmd()
1325 obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, cmd->in, obj_context); in fill_modify_virtqueue_cmd()
1328 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) in fill_modify_virtqueue_cmd()
1331 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS) { in fill_modify_virtqueue_cmd()
1332 MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); in fill_modify_virtqueue_cmd()
1333 MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); in fill_modify_virtqueue_cmd()
1334 MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); in fill_modify_virtqueue_cmd()
1337 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX) in fill_modify_virtqueue_cmd()
1338 MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx); in fill_modify_virtqueue_cmd()
1340 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX) in fill_modify_virtqueue_cmd()
1341 MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx); in fill_modify_virtqueue_cmd()
1343 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_QUEUE_VIRTIO_VERSION) in fill_modify_virtqueue_cmd()
1345 !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1))); in fill_modify_virtqueue_cmd()
1347 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_QUEUE_FEATURES) { in fill_modify_virtqueue_cmd()
1348 u16 mlx_features = get_features(ndev->mvdev.actual_features); in fill_modify_virtqueue_cmd()
1356 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { in fill_modify_virtqueue_cmd()
1357 vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]]; in fill_modify_virtqueue_cmd()
1360 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey); in fill_modify_virtqueue_cmd()
1362 mvq->modified_fields &= ~MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY; in fill_modify_virtqueue_cmd()
1365 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { in fill_modify_virtqueue_cmd()
1366 desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; in fill_modify_virtqueue_cmd()
1368 if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) in fill_modify_virtqueue_cmd()
1369 MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, desc_mr->mkey); in fill_modify_virtqueue_cmd()
1371 mvq->modified_fields &= ~MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY; in fill_modify_virtqueue_cmd()
1374 MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields); in fill_modify_virtqueue_cmd()
1381 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in modify_virtqueue_end()
1383 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { in modify_virtqueue_end()
1384 unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]; in modify_virtqueue_end()
1385 struct mlx5_vdpa_mr *vq_mr = mvdev->mres.mr[asid]; in modify_virtqueue_end()
1387 mlx5_vdpa_put_mr(mvdev, mvq->vq_mr); in modify_virtqueue_end()
1389 mvq->vq_mr = vq_mr; in modify_virtqueue_end()
1392 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { in modify_virtqueue_end()
1393 unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]; in modify_virtqueue_end()
1394 struct mlx5_vdpa_mr *desc_mr = mvdev->mres.mr[asid]; in modify_virtqueue_end()
1396 mlx5_vdpa_put_mr(mvdev, mvq->desc_mr); in modify_virtqueue_end()
1398 mvq->desc_mr = desc_mr; in modify_virtqueue_end()
1401 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) in modify_virtqueue_end()
1402 mvq->fw_state = state; in modify_virtqueue_end()
1404 mvq->modified_fields = 0; in modify_virtqueue_end()
1414 if (!counters_supported(&ndev->mvdev)) in counter_set_alloc()
1421 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_alloc()
1423 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_alloc()
1427 mvq->counter_set_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); in counter_set_alloc()
1437 if (!counters_supported(&ndev->mvdev)) in counter_set_dealloc()
1441 MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_id, mvq->counter_set_id); in counter_set_dealloc()
1442 MLX5_SET(destroy_virtio_q_counters_in, in, hdr.uid, ndev->mvdev.res.uid); in counter_set_dealloc()
1444 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in counter_set_dealloc()
1445 mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id); in counter_set_dealloc()
1452 if (cb->callback) in mlx5_vdpa_int_handler()
1453 return cb->callback(cb->private); in mlx5_vdpa_int_handler()
1461 struct mlx5_vdpa_irq_pool *irqp = &ndev->irqp; in alloc_vector()
1466 for (i = 0; i < irqp->num_ent; i++) { in alloc_vector()
1467 ent = &irqp->entries[i]; in alloc_vector()
1468 if (!ent->used) { in alloc_vector()
1469 snprintf(ent->name, MLX5_VDPA_IRQ_NAME_LEN, "%s-vq-%d", in alloc_vector()
1470 dev_name(&ndev->mvdev.vdev.dev), mvq->index); in alloc_vector()
1471 ent->dev_id = &ndev->event_cbs[mvq->index]; in alloc_vector()
1472 err = request_irq(ent->map.virq, mlx5_vdpa_int_handler, 0, in alloc_vector()
1473 ent->name, ent->dev_id); in alloc_vector()
1477 ent->used = true; in alloc_vector()
1478 mvq->map = ent->map; in alloc_vector()
1487 struct mlx5_vdpa_irq_pool *irqp = &ndev->irqp; in dealloc_vector()
1490 for (i = 0; i < irqp->num_ent; i++) in dealloc_vector()
1491 if (mvq->map.virq == irqp->entries[i].map.virq) { in dealloc_vector()
1492 free_irq(mvq->map.virq, irqp->entries[i].dev_id); in dealloc_vector()
1493 irqp->entries[i].used = false; in dealloc_vector()
1502 u16 idx = mvq->index; in setup_vq()
1505 if (mvq->initialized) in setup_vq()
1508 err = cq_create(ndev, idx, mvq->num_ent); in setup_vq()
1512 err = qp_create(ndev, mvq, &mvq->fwqp); in setup_vq()
1516 err = qp_create(ndev, mvq, &mvq->vqqp); in setup_vq()
1533 mvq->initialized = true; in setup_vq()
1535 if (mvq->ready) { in setup_vq()
1549 qp_destroy(ndev, &mvq->vqqp); in setup_vq()
1551 qp_destroy(ndev, &mvq->fwqp); in setup_vq()
1559 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in modify_virtqueues()
1564 WARN(start_vq + num_vqs > mvdev->max_vqs, "modify vq range invalid [%d, %d), max_vqs: %u\n", in modify_virtqueues()
1565 start_vq, start_vq + num_vqs, mvdev->max_vqs); in modify_virtqueues()
1570 err = -ENOMEM; in modify_virtqueues()
1579 mvq = &ndev->vqs[vq_idx]; in modify_virtqueues()
1582 err = -EINVAL; in modify_virtqueues()
1586 if (mvq->fw_state != state) { in modify_virtqueues()
1587 if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) { in modify_virtqueues()
1588 err = -EINVAL; in modify_virtqueues()
1592 mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE; in modify_virtqueues()
1595 cmd->in = &cmd_mem[i].in; in modify_virtqueues()
1596 cmd->inlen = sizeof(cmd_mem[i].in); in modify_virtqueues()
1597 cmd->out = &cmd_mem[i].out; in modify_virtqueues()
1598 cmd->outlen = sizeof(cmd_mem[i].out); in modify_virtqueues()
1602 err = mlx5_vdpa_exec_async_cmds(&ndev->mvdev, cmds, num_vqs); in modify_virtqueues()
1614 mvq = &ndev->vqs[vq_idx]; in modify_virtqueues()
1616 if (cmd->err) { in modify_virtqueues()
1617 mlx5_vdpa_err(mvdev, "modify vq %d failed, state: %d -> %d, err: %d\n", in modify_virtqueues()
1618 vq_idx, mvq->fw_state, state, err); in modify_virtqueues()
1620 err = cmd->err; in modify_virtqueues()
1640 if (start_vq >= ndev->cur_num_vqs) in suspend_vqs()
1641 return -EINVAL; in suspend_vqs()
1643 mvq = &ndev->vqs[start_vq]; in suspend_vqs()
1644 if (!mvq->initialized) in suspend_vqs()
1647 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) in suspend_vqs()
1656 return -ENOMEM; in suspend_vqs()
1663 mvq = &ndev->vqs[vq_idx]; in suspend_vqs()
1664 mvq->avail_idx = attrs[i].available_index; in suspend_vqs()
1665 mvq->used_idx = attrs[i].used_index; in suspend_vqs()
1675 return suspend_vqs(ndev, mvq->index, 1); in suspend_vq()
1683 if (start_vq >= ndev->mvdev.max_vqs) in resume_vqs()
1684 return -EINVAL; in resume_vqs()
1686 mvq = &ndev->vqs[start_vq]; in resume_vqs()
1687 if (!mvq->initialized) in resume_vqs()
1690 if (mvq->index >= ndev->cur_num_vqs) in resume_vqs()
1693 switch (mvq->fw_state) { in resume_vqs()
1696 * This should be fixed soon. After that, a single command can be used. in resume_vqs()
1698 err = modify_virtqueues(ndev, start_vq, num_vqs, mvq->fw_state); in resume_vqs()
1704 mlx5_vdpa_warn(&ndev->mvdev, "vq %d is not resumable\n", mvq->index); in resume_vqs()
1705 return -EINVAL; in resume_vqs()
1711 mlx5_vdpa_err(&ndev->mvdev, "resume vq %u called from bad state %d\n", in resume_vqs()
1712 mvq->index, mvq->fw_state); in resume_vqs()
1713 return -EINVAL; in resume_vqs()
1721 return resume_vqs(ndev, mvq->index, 1); in resume_vq()
1726 if (!mvq->initialized) in teardown_vq()
1730 mvq->modified_fields = 0; in teardown_vq()
1734 qp_destroy(ndev, &mvq->vqqp); in teardown_vq()
1735 qp_destroy(ndev, &mvq->fwqp); in teardown_vq()
1736 cq_destroy(ndev, mvq->index); in teardown_vq()
1737 mvq->initialized = false; in teardown_vq()
1742 int rqt_table_size = roundup_pow_of_two(ndev->rqt_size); in create_rqt()
1743 int act_sz = roundup_pow_of_two(ndev->cur_num_vqs / 2); in create_rqt()
1754 return -ENOMEM; in create_rqt()
1756 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid); in create_rqt()
1763 list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id); in create_rqt()
1766 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); in create_rqt()
1789 return -ENOMEM; in modify_rqt()
1791 MLX5_SET(modify_rqt_in, in, uid, ndev->mvdev.res.uid); in modify_rqt()
1798 list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id); in modify_rqt()
1801 err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn); in modify_rqt()
1811 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn); in destroy_rqt()
1832 return -ENOMEM; in create_tir()
1834 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid); in create_tir()
1848 MLX5_SET(tirc, tirc, indirect_table, ndev->res.rqtn); in create_tir()
1849 MLX5_SET(tirc, tirc, transport_domain, ndev->res.tdn); in create_tir()
1851 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn); in create_tir()
1863 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn); in destroy_tir()
1883 node->ucast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_steering_counters()
1884 if (IS_ERR(node->ucast_counter.counter)) in add_steering_counters()
1885 return PTR_ERR(node->ucast_counter.counter); in add_steering_counters()
1887 node->mcast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_steering_counters()
1888 if (IS_ERR(node->mcast_counter.counter)) { in add_steering_counters()
1889 err = PTR_ERR(node->mcast_counter.counter); in add_steering_counters()
1894 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; in add_steering_counters()
1898 mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter); in add_steering_counters()
1909 mlx5_fc_destroy(ndev->mvdev.mdev, node->mcast_counter.counter); in remove_steering_counters()
1910 mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter); in remove_steering_counters()
1929 return -ENOMEM; in mlx5_vdpa_add_mac_vlan_rules()
1931 vid = key2vid(node->macvlan); in mlx5_vdpa_add_mac_vlan_rules()
1932 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; in mlx5_vdpa_add_mac_vlan_rules()
1933 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); in mlx5_vdpa_add_mac_vlan_rules()
1934 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); in mlx5_vdpa_add_mac_vlan_rules()
1939 if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)) { in mlx5_vdpa_add_mac_vlan_rules()
1943 if (node->tagged) { in mlx5_vdpa_add_mac_vlan_rules()
1949 dests[0].tir_num = ndev->res.tirn; in mlx5_vdpa_add_mac_vlan_rules()
1955 dests[1].counter_id = mlx5_fc_id(node->ucast_counter.counter); in mlx5_vdpa_add_mac_vlan_rules()
1957 node->ucast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS); in mlx5_vdpa_add_mac_vlan_rules()
1958 if (IS_ERR(node->ucast_rule)) { in mlx5_vdpa_add_mac_vlan_rules()
1959 err = PTR_ERR(node->ucast_rule); in mlx5_vdpa_add_mac_vlan_rules()
1964 dests[1].counter_id = mlx5_fc_id(node->mcast_counter.counter); in mlx5_vdpa_add_mac_vlan_rules()
1971 node->mcast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS); in mlx5_vdpa_add_mac_vlan_rules()
1972 if (IS_ERR(node->mcast_rule)) { in mlx5_vdpa_add_mac_vlan_rules()
1973 err = PTR_ERR(node->mcast_rule); in mlx5_vdpa_add_mac_vlan_rules()
1981 mlx5_del_flow_rules(node->ucast_rule); in mlx5_vdpa_add_mac_vlan_rules()
1993 mlx5_del_flow_rules(node->ucast_rule); in mlx5_vdpa_del_mac_vlan_rules()
1994 mlx5_del_flow_rules(node->mcast_rule); in mlx5_vdpa_del_mac_vlan_rules()
2021 hlist_for_each_entry(pos, &ndev->macvlan_hash[idx], hlist) { in mac_vlan_lookup()
2022 if (pos->macvlan == value) in mac_vlan_lookup()
2037 return -EEXIST; in mac_vlan_add()
2041 return -ENOMEM; in mac_vlan_add()
2043 ptr->tagged = tagged; in mac_vlan_add()
2044 ptr->macvlan = val; in mac_vlan_add()
2045 ptr->ndev = ndev; in mac_vlan_add()
2046 err = mlx5_vdpa_add_mac_vlan_rules(ndev, ndev->config.mac, ptr); in mac_vlan_add()
2051 hlist_add_head(&ptr->hlist, &ndev->macvlan_hash[idx]); in mac_vlan_add()
2067 hlist_del(&ptr->hlist); in mac_vlan_del()
2080 hlist_for_each_entry_safe(pos, n, &ndev->macvlan_hash[i], hlist) { in clear_mac_vlan_table()
2081 hlist_del(&pos->hlist); in clear_mac_vlan_table()
2098 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS); in setup_steering()
2100 mlx5_vdpa_err(&ndev->mvdev, "failed to get flow namespace\n"); in setup_steering()
2101 return -EOPNOTSUPP; in setup_steering()
2104 ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); in setup_steering()
2105 if (IS_ERR(ndev->rxft)) { in setup_steering()
2106 mlx5_vdpa_err(&ndev->mvdev, "failed to create flow table\n"); in setup_steering()
2107 return PTR_ERR(ndev->rxft); in setup_steering()
2111 err = mac_vlan_add(ndev, ndev->config.mac, 0, false); in setup_steering()
2119 mlx5_destroy_flow_table(ndev->rxft); in setup_steering()
2127 mlx5_destroy_flow_table(ndev->rxft); in teardown_steering()
2133 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mac()
2139 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); in handle_ctrl_mac()
2142 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)mac, ETH_ALEN); in handle_ctrl_mac()
2146 if (!memcmp(ndev->config.mac, mac, 6)) { in handle_ctrl_mac()
2154 if (!is_zero_ether_addr(ndev->config.mac)) { in handle_ctrl_mac()
2155 if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) { in handle_ctrl_mac()
2156 mlx5_vdpa_warn(mvdev, "failed to delete old MAC %pM from MPFS table\n", in handle_ctrl_mac()
2157 ndev->config.mac); in handle_ctrl_mac()
2163 mlx5_vdpa_warn(mvdev, "failed to insert new MAC %pM into MPFS table\n", in handle_ctrl_mac()
2171 memcpy(mac_back, ndev->config.mac, ETH_ALEN); in handle_ctrl_mac()
2173 memcpy(ndev->config.mac, mac, ETH_ALEN); in handle_ctrl_mac()
2179 if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) { in handle_ctrl_mac()
2191 if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) { in handle_ctrl_mac()
2192 mlx5_vdpa_warn(mvdev, "restore mac failed: delete MAC %pM from MPFS table failed\n", in handle_ctrl_mac()
2193 ndev->config.mac); in handle_ctrl_mac()
2197 mlx5_vdpa_warn(mvdev, "restore mac failed: insert old MAC %pM into MPFS table failed\n", in handle_ctrl_mac()
2201 memcpy(ndev->config.mac, mac_back, ETH_ALEN); in handle_ctrl_mac()
2203 if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) in handle_ctrl_mac()
2222 int cur_vqs = ndev->cur_num_vqs; in change_num_qps()
2233 suspend_vqs(ndev, new_vqs, cur_vqs - new_vqs); in change_num_qps()
2236 teardown_vq(ndev, &ndev->vqs[i]); in change_num_qps()
2239 ndev->cur_num_vqs = new_vqs; in change_num_qps()
2241 ndev->cur_num_vqs = new_vqs; in change_num_qps()
2244 err = setup_vq(ndev, &ndev->vqs[i], false); in change_num_qps()
2249 err = resume_vqs(ndev, cur_vqs, new_vqs - cur_vqs); in change_num_qps()
2260 for (--i; i >= cur_vqs; --i) in change_num_qps()
2261 teardown_vq(ndev, &ndev->vqs[i]); in change_num_qps()
2263 ndev->cur_num_vqs = cur_vqs; in change_num_qps()
2272 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mq()
2279 /* This mq feature check aligns with pre-existing userspace in handle_ctrl_mq()
2283 * request down to a non-mq device that may cause kernel to in handle_ctrl_mq()
2286 * changing the number of vqs on a non-mq device. in handle_ctrl_mq()
2291 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)&mq, sizeof(mq)); in handle_ctrl_mq()
2297 newqps > ndev->rqt_size) in handle_ctrl_mq()
2300 if (ndev->cur_num_vqs == 2 * newqps) { in handle_ctrl_mq()
2320 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_vlan()
2325 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) in handle_ctrl_vlan()
2330 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan)); in handle_ctrl_vlan()
2335 if (mac_vlan_add(ndev, ndev->config.mac, id, true)) in handle_ctrl_vlan()
2341 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan)); in handle_ctrl_vlan()
2346 mac_vlan_del(ndev, ndev->config.mac, id, true); in handle_ctrl_vlan()
2368 mvdev = wqent->mvdev; in mlx5_cvq_kick_handler()
2370 cvq = &mvdev->cvq; in mlx5_cvq_kick_handler()
2372 down_write(&ndev->reslock); in mlx5_cvq_kick_handler()
2374 if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) in mlx5_cvq_kick_handler()
2377 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in mlx5_cvq_kick_handler()
2380 if (!cvq->ready) in mlx5_cvq_kick_handler()
2384 err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head, in mlx5_cvq_kick_handler()
2389 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &ctrl, sizeof(ctrl)); in mlx5_cvq_kick_handler()
2393 cvq->received_desc++; in mlx5_cvq_kick_handler()
2411 write = vringh_iov_push_iotlb(&cvq->vring, &cvq->wiov, &status, sizeof(status)); in mlx5_cvq_kick_handler()
2412 vringh_complete_iotlb(&cvq->vring, cvq->head, write); in mlx5_cvq_kick_handler()
2413 vringh_kiov_cleanup(&cvq->riov); in mlx5_cvq_kick_handler()
2414 vringh_kiov_cleanup(&cvq->wiov); in mlx5_cvq_kick_handler()
2416 if (vringh_need_notify_iotlb(&cvq->vring)) in mlx5_cvq_kick_handler()
2417 vringh_notify(&cvq->vring); in mlx5_cvq_kick_handler()
2419 cvq->completed_desc++; in mlx5_cvq_kick_handler()
2420 queue_work(mvdev->wq, &wqent->work); in mlx5_cvq_kick_handler()
2425 up_write(&ndev->reslock); in mlx5_cvq_kick_handler()
2438 if (!mvdev->wq || !mvdev->cvq.ready) in mlx5_vdpa_kick_vq()
2441 queue_work(mvdev->wq, &ndev->cvq_ent.work); in mlx5_vdpa_kick_vq()
2445 mvq = &ndev->vqs[idx]; in mlx5_vdpa_kick_vq()
2446 if (unlikely(!mvq->ready)) in mlx5_vdpa_kick_vq()
2449 iowrite16(idx, ndev->mvdev.res.kick_addr); in mlx5_vdpa_kick_vq()
2460 return -EINVAL; in mlx5_vdpa_set_vq_address()
2463 mvdev->cvq.desc_addr = desc_area; in mlx5_vdpa_set_vq_address()
2464 mvdev->cvq.device_addr = device_area; in mlx5_vdpa_set_vq_address()
2465 mvdev->cvq.driver_addr = driver_area; in mlx5_vdpa_set_vq_address()
2469 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_address()
2470 mvq->desc_addr = desc_area; in mlx5_vdpa_set_vq_address()
2471 mvq->device_addr = device_area; in mlx5_vdpa_set_vq_address()
2472 mvq->driver_addr = driver_area; in mlx5_vdpa_set_vq_address()
2473 mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS; in mlx5_vdpa_set_vq_address()
2487 struct mlx5_control_vq *cvq = &mvdev->cvq; in mlx5_vdpa_set_vq_num()
2489 cvq->vring.vring.num = num; in mlx5_vdpa_set_vq_num()
2493 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_num()
2494 ndev->needs_teardown = num != mvq->num_ent; in mlx5_vdpa_set_vq_num()
2495 mvq->num_ent = num; in mlx5_vdpa_set_vq_num()
2503 ndev->event_cbs[idx] = *cb; in mlx5_vdpa_set_vq_cb()
2505 mvdev->cvq.event_cb = *cb; in mlx5_vdpa_set_vq_cb()
2512 if (!cvq->event_cb.callback) in mlx5_cvq_notify()
2515 cvq->event_cb.callback(cvq->event_cb.private); in mlx5_cvq_notify()
2520 struct mlx5_control_vq *cvq = &mvdev->cvq; in set_cvq_ready()
2522 cvq->ready = ready; in set_cvq_ready()
2526 cvq->vring.notify = mlx5_cvq_notify; in set_cvq_ready()
2535 if (!mvdev->actual_features) in mlx5_vdpa_set_vq_ready()
2546 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_ready()
2549 } else if (mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) { in mlx5_vdpa_set_vq_ready()
2554 mvq->ready = ready; in mlx5_vdpa_set_vq_ready()
2566 return mvdev->cvq.ready; in mlx5_vdpa_get_vq_ready()
2568 return ndev->vqs[idx].ready; in mlx5_vdpa_get_vq_ready()
2579 return -EINVAL; in mlx5_vdpa_set_vq_state()
2582 mvdev->cvq.vring.last_avail_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2586 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_state()
2587 if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) { in mlx5_vdpa_set_vq_state()
2588 mlx5_vdpa_warn(mvdev, "can't modify available index\n"); in mlx5_vdpa_set_vq_state()
2589 return -EINVAL; in mlx5_vdpa_set_vq_state()
2592 mvq->used_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2593 mvq->avail_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2594 mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX | in mlx5_vdpa_set_vq_state()
2608 return -EINVAL; in mlx5_vdpa_get_vq_state()
2611 state->split.avail_index = mvdev->cvq.vring.last_avail_idx; in mlx5_vdpa_get_vq_state()
2615 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_state()
2620 if (!mvq->initialized) { in mlx5_vdpa_get_vq_state()
2625 state->split.avail_index = mvq->used_idx; in mlx5_vdpa_get_vq_state()
2629 err = query_virtqueues(ndev, mvq->index, 1, &attr); in mlx5_vdpa_get_vq_state()
2634 state->split.avail_index = attr.used_index; in mlx5_vdpa_get_vq_state()
2715 print_features(mvdev, ndev->mvdev.mlx_features, false); in mlx5_vdpa_get_device_features()
2716 return ndev->mvdev.mlx_features; in mlx5_vdpa_get_device_features()
2723 return -EOPNOTSUPP; in verify_driver_features()
2736 return -EINVAL; in verify_driver_features()
2747 for (i = 0; i < mvdev->max_vqs; i++) { in setup_virtqueues()
2748 err = setup_vq(ndev, &ndev->vqs[i], filled); in setup_virtqueues()
2756 for (--i; i >= 0; i--) in setup_virtqueues()
2757 teardown_vq(ndev, &ndev->vqs[i]); in setup_virtqueues()
2766 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) in teardown_virtqueues()
2767 teardown_vq(ndev, &ndev->vqs[i]); in teardown_virtqueues()
2775 mvdev->max_idx = mvdev->max_vqs; in update_cvq_info()
2780 mvdev->max_idx = 2; in update_cvq_info()
2784 mvdev->max_idx = 1; in update_cvq_info()
2809 if (query_vport_state(mvdev->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0) == in get_link_state()
2823 mvdev = wqent->mvdev; in update_carrier()
2826 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in update_carrier()
2828 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in update_carrier()
2830 if (ndev->config_cb.callback) in update_carrier()
2831 ndev->config_cb.callback(ndev->config_cb.private); in update_carrier()
2842 return -ENOMEM; in queue_link_work()
2844 wqent->mvdev = &ndev->mvdev; in queue_link_work()
2845 INIT_WORK(&wqent->work, update_carrier); in queue_link_work()
2846 queue_work(ndev->mvdev.wq, &wqent->work); in queue_link_work()
2856 if (ndev->mvdev.suspended) in event_handler()
2860 switch (eqe->sub_type) { in event_handler()
2878 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_STATUS))) in register_link_notifier()
2881 ndev->nb.notifier_call = event_handler; in register_link_notifier()
2882 mlx5_notifier_register(ndev->mvdev.mdev, &ndev->nb); in register_link_notifier()
2883 ndev->nb_registered = true; in register_link_notifier()
2889 if (!ndev->nb_registered) in unregister_link_notifier()
2892 ndev->nb_registered = false; in unregister_link_notifier()
2893 mlx5_notifier_unregister(ndev->mvdev.mdev, &ndev->nb); in unregister_link_notifier()
2894 if (ndev->mvdev.wq) in unregister_link_notifier()
2895 flush_workqueue(ndev->mvdev.wq); in unregister_link_notifier()
2907 u64 old_features = mvdev->actual_features; in mlx5_vdpa_set_driver_features()
2917 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; in mlx5_vdpa_set_driver_features()
2920 if (get_features(old_features) != get_features(mvdev->actual_features)) { in mlx5_vdpa_set_driver_features()
2921 for (int i = 0; i < mvdev->max_vqs; ++i) { in mlx5_vdpa_set_driver_features()
2922 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[i]; in mlx5_vdpa_set_driver_features()
2924 mvq->modified_fields |= ( in mlx5_vdpa_set_driver_features()
2936 diff_features = mvdev->mlx_features ^ mvdev->actual_features; in mlx5_vdpa_set_driver_features()
2937 ndev->needs_teardown = !!(diff_features & NEEDS_TEARDOWN_MASK); in mlx5_vdpa_set_driver_features()
2948 ndev->config_cb = *cb; in mlx5_vdpa_set_config_cb()
2972 print_status(mvdev, ndev->mvdev.status, false); in mlx5_vdpa_get_status()
2973 return ndev->mvdev.status; in mlx5_vdpa_get_status()
2978 struct mlx5_vq_restore_info *ri = &mvq->ri; in save_channel_info()
2982 if (mvq->initialized) { in save_channel_info()
2983 err = query_virtqueues(ndev, mvq->index, 1, &attr); in save_channel_info()
2988 ri->avail_index = attr.available_index; in save_channel_info()
2989 ri->used_index = attr.used_index; in save_channel_info()
2990 ri->ready = mvq->ready; in save_channel_info()
2991 ri->num_ent = mvq->num_ent; in save_channel_info()
2992 ri->desc_addr = mvq->desc_addr; in save_channel_info()
2993 ri->device_addr = mvq->device_addr; in save_channel_info()
2994 ri->driver_addr = mvq->driver_addr; in save_channel_info()
2995 ri->map = mvq->map; in save_channel_info()
2996 ri->restore = true; in save_channel_info()
3004 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in save_channels_info()
3005 memset(&ndev->vqs[i].ri, 0, sizeof(ndev->vqs[i].ri)); in save_channels_info()
3006 save_channel_info(ndev, &ndev->vqs[i]); in save_channels_info()
3015 for (i = 0; i < ndev->mvdev.max_vqs; i++) in mlx5_clear_vqs()
3016 memset(&ndev->vqs[i], 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in mlx5_clear_vqs()
3027 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in restore_channels_info()
3028 mvq = &ndev->vqs[i]; in restore_channels_info()
3029 ri = &mvq->ri; in restore_channels_info()
3030 if (!ri->restore) in restore_channels_info()
3033 mvq->avail_idx = ri->avail_index; in restore_channels_info()
3034 mvq->used_idx = ri->used_index; in restore_channels_info()
3035 mvq->ready = ri->ready; in restore_channels_info()
3036 mvq->num_ent = ri->num_ent; in restore_channels_info()
3037 mvq->desc_addr = ri->desc_addr; in restore_channels_info()
3038 mvq->device_addr = ri->device_addr; in restore_channels_info()
3039 mvq->driver_addr = ri->driver_addr; in restore_channels_info()
3040 mvq->map = ri->map; in restore_channels_info()
3052 suspend_vqs(ndev, 0, ndev->cur_num_vqs); in mlx5_vdpa_change_map()
3063 for (int i = 0; i < mvdev->max_vqs; i++) in mlx5_vdpa_change_map()
3064 ndev->vqs[i].modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY | in mlx5_vdpa_change_map()
3067 if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended) in mlx5_vdpa_change_map()
3077 resume_vqs(ndev, 0, ndev->cur_num_vqs); in mlx5_vdpa_change_map()
3085 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in setup_vq_resources()
3088 WARN_ON(!rwsem_is_locked(&ndev->reslock)); in setup_vq_resources()
3090 if (ndev->setup) { in setup_vq_resources()
3124 ndev->setup = true; in setup_vq_resources()
3144 WARN_ON(!rwsem_is_locked(&ndev->reslock)); in teardown_vq_resources()
3146 if (!ndev->setup) in teardown_vq_resources()
3154 ndev->setup = false; in teardown_vq_resources()
3155 ndev->needs_teardown = false; in teardown_vq_resources()
3160 struct mlx5_control_vq *cvq = &mvdev->cvq; in setup_cvq_vring()
3163 if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) { in setup_cvq_vring()
3164 u16 idx = cvq->vring.last_avail_idx; in setup_cvq_vring()
3166 err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features, in setup_cvq_vring()
3167 cvq->vring.vring.num, false, in setup_cvq_vring()
3168 (struct vring_desc *)(uintptr_t)cvq->desc_addr, in setup_cvq_vring()
3169 (struct vring_avail *)(uintptr_t)cvq->driver_addr, in setup_cvq_vring()
3170 (struct vring_used *)(uintptr_t)cvq->device_addr); in setup_cvq_vring()
3173 cvq->vring.last_avail_idx = cvq->vring.last_used_idx = idx; in setup_cvq_vring()
3186 down_write(&ndev->reslock); in mlx5_vdpa_set_status()
3188 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { in mlx5_vdpa_set_status()
3197 if (ndev->needs_teardown) in mlx5_vdpa_set_status()
3200 if (ndev->setup) { in mlx5_vdpa_set_status()
3201 err = resume_vqs(ndev, 0, ndev->cur_num_vqs); in mlx5_vdpa_set_status()
3219 ndev->mvdev.status = status; in mlx5_vdpa_set_status()
3220 up_write(&ndev->reslock); in mlx5_vdpa_set_status()
3226 mlx5_vdpa_clean_mrs(&ndev->mvdev); in mlx5_vdpa_set_status()
3227 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; in mlx5_vdpa_set_status()
3229 up_write(&ndev->reslock); in mlx5_vdpa_set_status()
3238 mvdev->mres.group2asid[i] = 0; in init_group_to_asid_map()
3244 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[0]; in needs_vqs_reset()
3246 if (mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) in needs_vqs_reset()
3249 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT) in needs_vqs_reset()
3252 return mvq->modified_fields & ( in needs_vqs_reset()
3269 down_write(&ndev->reslock); in mlx5_vdpa_compat_reset()
3278 mlx5_vdpa_clean_mrs(&ndev->mvdev); in mlx5_vdpa_compat_reset()
3279 ndev->mvdev.status = 0; in mlx5_vdpa_compat_reset()
3280 ndev->mvdev.suspended = false; in mlx5_vdpa_compat_reset()
3281 ndev->cur_num_vqs = MLX5V_DEFAULT_VQ_COUNT; in mlx5_vdpa_compat_reset()
3282 ndev->mvdev.cvq.ready = false; in mlx5_vdpa_compat_reset()
3283 ndev->mvdev.cvq.received_desc = 0; in mlx5_vdpa_compat_reset()
3284 ndev->mvdev.cvq.completed_desc = 0; in mlx5_vdpa_compat_reset()
3285 memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1)); in mlx5_vdpa_compat_reset()
3286 ndev->mvdev.actual_features = 0; in mlx5_vdpa_compat_reset()
3288 ++mvdev->generation; in mlx5_vdpa_compat_reset()
3291 MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { in mlx5_vdpa_compat_reset()
3297 up_write(&ndev->reslock); in mlx5_vdpa_compat_reset()
3319 memcpy(buf, (u8 *)&ndev->config + offset, len); in mlx5_vdpa_get_config()
3332 return mvdev->generation; in mlx5_vdpa_get_generation()
3342 return -EINVAL; in set_map_data()
3356 if (!mvdev->mres.mr[asid]) { in set_map_data()
3378 int err = -EINVAL; in mlx5_vdpa_set_map()
3380 down_write(&ndev->reslock); in mlx5_vdpa_set_map()
3382 up_write(&ndev->reslock); in mlx5_vdpa_set_map()
3392 down_write(&ndev->reslock); in mlx5_vdpa_reset_map()
3394 up_write(&ndev->reslock); in mlx5_vdpa_reset_map()
3403 return &vdev->dev; in mlx5_get_vq_dma_dev()
3405 return mvdev->vdev.dma_dev; in mlx5_get_vq_dma_dev()
3413 if (!msix_mode_supported(&ndev->mvdev)) in free_irqs()
3416 if (!ndev->irqp.entries) in free_irqs()
3419 for (i = ndev->irqp.num_ent - 1; i >= 0; i--) { in free_irqs()
3420 ent = ndev->irqp.entries + i; in free_irqs()
3421 if (ent->map.virq) in free_irqs()
3422 pci_msix_free_irq(ndev->mvdev.mdev->pdev, ent->map); in free_irqs()
3424 kfree(ndev->irqp.entries); in free_irqs()
3437 mlx5_vdpa_destroy_mr_resources(&ndev->mvdev); in mlx5_vdpa_free()
3438 mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx); in mlx5_vdpa_free()
3440 if (!is_zero_ether_addr(ndev->config.mac)) { in mlx5_vdpa_free()
3441 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); in mlx5_vdpa_free()
3442 mlx5_mpfs_del_mac(pfmdev, ndev->config.mac); in mlx5_vdpa_free()
3444 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_free()
3446 kfree(ndev->event_cbs); in mlx5_vdpa_free()
3447 kfree(ndev->vqs); in mlx5_vdpa_free()
3464 if (MLX5_CAP_GEN(mvdev->mdev, log_min_sf_size) + 12 < PAGE_SHIFT) in mlx5_get_vq_notification()
3468 addr = (phys_addr_t)ndev->mvdev.res.phys_kick_addr; in mlx5_get_vq_notification()
3481 return -EINVAL; in mlx5_get_vq_irq()
3484 return -EOPNOTSUPP; in mlx5_get_vq_irq()
3486 mvq = &ndev->vqs[idx]; in mlx5_get_vq_irq()
3487 if (!mvq->map.virq) in mlx5_get_vq_irq()
3488 return -EOPNOTSUPP; in mlx5_get_vq_irq()
3490 return mvq->map.virq; in mlx5_get_vq_irq()
3497 return mvdev->actual_features; in mlx5_vdpa_get_driver_features()
3509 if (!counters_supported(&ndev->mvdev)) in counter_set_query()
3510 return -EOPNOTSUPP; in counter_set_query()
3512 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) in counter_set_query()
3513 return -EAGAIN; in counter_set_query()
3519 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_query()
3520 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->counter_set_id); in counter_set_query()
3522 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_query()
3544 down_read(&ndev->reslock); in mlx5_vdpa_get_vendor_vq_stats()
3547 err = -EINVAL; in mlx5_vdpa_get_vendor_vq_stats()
3552 cvq = &mvdev->cvq; in mlx5_vdpa_get_vendor_vq_stats()
3553 received_desc = cvq->received_desc; in mlx5_vdpa_get_vendor_vq_stats()
3554 completed_desc = cvq->completed_desc; in mlx5_vdpa_get_vendor_vq_stats()
3558 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vendor_vq_stats()
3566 err = -EMSGSIZE; in mlx5_vdpa_get_vendor_vq_stats()
3583 up_read(&ndev->reslock); in mlx5_vdpa_get_vendor_vq_stats()
3591 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in mlx5_vdpa_cvq_suspend()
3594 cvq = &mvdev->cvq; in mlx5_vdpa_cvq_suspend()
3595 cvq->ready = false; in mlx5_vdpa_cvq_suspend()
3606 down_write(&ndev->reslock); in mlx5_vdpa_suspend()
3607 err = suspend_vqs(ndev, 0, ndev->cur_num_vqs); in mlx5_vdpa_suspend()
3609 mvdev->suspended = true; in mlx5_vdpa_suspend()
3610 up_write(&ndev->reslock); in mlx5_vdpa_suspend()
3625 down_write(&ndev->reslock); in mlx5_vdpa_resume()
3626 mvdev->suspended = false; in mlx5_vdpa_resume()
3627 err = resume_vqs(ndev, 0, ndev->cur_num_vqs); in mlx5_vdpa_resume()
3629 up_write(&ndev->reslock); in mlx5_vdpa_resume()
3641 return -EINVAL; in mlx5_set_group_asid()
3643 mvdev->mres.group2asid[group] = asid; in mlx5_set_group_asid()
3645 mutex_lock(&mvdev->mres.lock); in mlx5_set_group_asid()
3646 if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mres.mr[asid]) in mlx5_set_group_asid()
3647 err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mres.mr[asid]->iotlb, asid); in mlx5_set_group_asid()
3648 mutex_unlock(&mvdev->mres.lock); in mlx5_set_group_asid()
3702 *mtu = hw_mtu - MLX5V_ETH_HARD_MTU; in query_mtu()
3708 struct mlx5_vdpa_net_resources *res = &ndev->res; in alloc_fixed_resources()
3711 if (res->valid) { in alloc_fixed_resources()
3712 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n"); in alloc_fixed_resources()
3713 return -EEXIST; in alloc_fixed_resources()
3716 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn); in alloc_fixed_resources()
3724 res->valid = true; in alloc_fixed_resources()
3729 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in alloc_fixed_resources()
3735 struct mlx5_vdpa_net_resources *res = &ndev->res; in free_fixed_resources()
3737 if (!res->valid) in free_fixed_resources()
3741 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in free_fixed_resources()
3742 res->valid = false; in free_fixed_resources()
3750 for (i = 0; i < ndev->mvdev.max_vqs; ++i) { in mvqs_set_defaults()
3751 mvq = &ndev->vqs[i]; in mvqs_set_defaults()
3753 mvq->index = i; in mvqs_set_defaults()
3754 mvq->ndev = ndev; in mvqs_set_defaults()
3755 mvq->fwqp.fw = true; in mvqs_set_defaults()
3756 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE; in mvqs_set_defaults()
3757 mvq->num_ent = MLX5V_DEFAULT_VQ_SIZE; in mvqs_set_defaults()
3776 return -ENOMEM; in config_func_mtu()
3795 if (!msix_mode_supported(&ndev->mvdev)) in allocate_irqs()
3798 if (!ndev->mvdev.mdev->pdev) in allocate_irqs()
3801 ndev->irqp.entries = kcalloc(ndev->mvdev.max_vqs, sizeof(*ndev->irqp.entries), GFP_KERNEL); in allocate_irqs()
3802 if (!ndev->irqp.entries) in allocate_irqs()
3806 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in allocate_irqs()
3807 ent = ndev->irqp.entries + i; in allocate_irqs()
3808 snprintf(ent->name, MLX5_VDPA_IRQ_NAME_LEN, "%s-vq-%d", in allocate_irqs()
3809 dev_name(&ndev->mvdev.vdev.dev), i); in allocate_irqs()
3810 ent->map = pci_msix_alloc_irq_at(ndev->mvdev.mdev->pdev, MSI_ANY_INDEX, NULL); in allocate_irqs()
3811 if (!ent->map.virq) in allocate_irqs()
3814 ndev->irqp.num_ent++; in allocate_irqs()
3832 if (mgtdev->ndev) in mlx5_vdpa_dev_add()
3833 return -ENOSPC; in mlx5_vdpa_dev_add()
3835 mdev = mgtdev->madev->mdev; in mlx5_vdpa_dev_add()
3836 device_features = mgtdev->mgtdev.supported_features; in mlx5_vdpa_dev_add()
3837 if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) { in mlx5_vdpa_dev_add()
3838 if (add_config->device_features & ~device_features) { in mlx5_vdpa_dev_add()
3839 dev_warn(mdev->device, in mlx5_vdpa_dev_add()
3841 add_config->device_features, device_features); in mlx5_vdpa_dev_add()
3842 return -EINVAL; in mlx5_vdpa_dev_add()
3844 device_features &= add_config->device_features; in mlx5_vdpa_dev_add()
3850 dev_warn(mdev->device, in mlx5_vdpa_dev_add()
3853 return -EOPNOTSUPP; in mlx5_vdpa_dev_add()
3858 dev_warn(mdev->device, "missing support for split virtqueues\n"); in mlx5_vdpa_dev_add()
3859 return -EOPNOTSUPP; in mlx5_vdpa_dev_add()
3865 dev_warn(mdev->device, in mlx5_vdpa_dev_add()
3868 return -EAGAIN; in mlx5_vdpa_dev_add()
3871 if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) { in mlx5_vdpa_dev_add()
3872 if (add_config->net.max_vq_pairs > max_vqs / 2) in mlx5_vdpa_dev_add()
3873 return -EINVAL; in mlx5_vdpa_dev_add()
3874 max_vqs = min_t(u32, max_vqs, 2 * add_config->net.max_vq_pairs); in mlx5_vdpa_dev_add()
3879 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mgtdev->vdpa_ops, in mlx5_vdpa_dev_add()
3884 ndev->mvdev.max_vqs = max_vqs; in mlx5_vdpa_dev_add()
3885 mvdev = &ndev->mvdev; in mlx5_vdpa_dev_add()
3886 mvdev->mdev = mdev; in mlx5_vdpa_dev_add()
3888 ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL); in mlx5_vdpa_dev_add()
3889 ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL); in mlx5_vdpa_dev_add()
3890 if (!ndev->vqs || !ndev->event_cbs) { in mlx5_vdpa_dev_add()
3891 err = -ENOMEM; in mlx5_vdpa_dev_add()
3894 ndev->cur_num_vqs = MLX5V_DEFAULT_VQ_COUNT; in mlx5_vdpa_dev_add()
3898 init_rwsem(&ndev->reslock); in mlx5_vdpa_dev_add()
3899 config = &ndev->config; in mlx5_vdpa_dev_add()
3901 if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU)) { in mlx5_vdpa_dev_add()
3902 err = config_func_mtu(mdev, add_config->net.mtu); in mlx5_vdpa_dev_add()
3912 ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu); in mlx5_vdpa_dev_add()
3917 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3919 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3922 if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR)) { in mlx5_vdpa_dev_add()
3923 memcpy(ndev->config.mac, add_config->net.mac, ETH_ALEN); in mlx5_vdpa_dev_add()
3925 } else if ((add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) == 0 || in mlx5_vdpa_dev_add()
3927 err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac); in mlx5_vdpa_dev_add()
3932 if (!is_zero_ether_addr(config->mac)) { in mlx5_vdpa_dev_add()
3933 pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev)); in mlx5_vdpa_dev_add()
3934 err = mlx5_mpfs_add_mac(pfmdev, config->mac); in mlx5_vdpa_dev_add()
3937 } else if ((add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) == 0) { in mlx5_vdpa_dev_add()
3947 mlx5_vdpa_warn(&ndev->mvdev, in mlx5_vdpa_dev_add()
3949 err = -EINVAL; in mlx5_vdpa_dev_add()
3954 config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2); in mlx5_vdpa_dev_add()
3955 ndev->rqt_size = max_vqs / 2; in mlx5_vdpa_dev_add()
3957 ndev->rqt_size = 1; in mlx5_vdpa_dev_add()
3960 mlx5_cmd_init_async_ctx(mdev, &mvdev->async_ctx); in mlx5_vdpa_dev_add()
3962 ndev->mvdev.mlx_features = device_features; in mlx5_vdpa_dev_add()
3963 mvdev->vdev.dma_dev = &mdev->pdev->dev; in mlx5_vdpa_dev_add()
3964 err = mlx5_vdpa_alloc_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
3972 if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { in mlx5_vdpa_dev_add()
3982 ndev->cvq_ent.mvdev = mvdev; in mlx5_vdpa_dev_add()
3983 INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler); in mlx5_vdpa_dev_add()
3984 mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq"); in mlx5_vdpa_dev_add()
3985 if (!mvdev->wq) { in mlx5_vdpa_dev_add()
3986 err = -ENOMEM; in mlx5_vdpa_dev_add()
3990 mvdev->vdev.mdev = &mgtdev->mgtdev; in mlx5_vdpa_dev_add()
3991 err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1); in mlx5_vdpa_dev_add()
3995 mgtdev->ndev = ndev; in mlx5_vdpa_dev_add()
3997 /* For virtio-vdpa, the device was set up during device register. */ in mlx5_vdpa_dev_add()
3998 if (ndev->setup) in mlx5_vdpa_dev_add()
4001 down_write(&ndev->reslock); in mlx5_vdpa_dev_add()
4003 up_write(&ndev->reslock); in mlx5_vdpa_dev_add()
4010 _vdpa_unregister_device(&mvdev->vdev); in mlx5_vdpa_dev_add()
4012 destroy_workqueue(mvdev->wq); in mlx5_vdpa_dev_add()
4014 put_device(&mvdev->vdev.dev); in mlx5_vdpa_dev_add()
4028 down_write(&ndev->reslock); in mlx5_vdpa_dev_del()
4030 up_write(&ndev->reslock); in mlx5_vdpa_dev_del()
4032 wq = mvdev->wq; in mlx5_vdpa_dev_del()
4033 mvdev->wq = NULL; in mlx5_vdpa_dev_del()
4035 mgtdev->ndev = NULL; in mlx5_vdpa_dev_del()
4046 int err = -EOPNOTSUPP; in mlx5_vdpa_set_attr()
4050 mdev = mvdev->mdev; in mlx5_vdpa_set_attr()
4051 config = &ndev->config; in mlx5_vdpa_set_attr()
4053 down_write(&ndev->reslock); in mlx5_vdpa_set_attr()
4054 if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR)) { in mlx5_vdpa_set_attr()
4055 pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev)); in mlx5_vdpa_set_attr()
4056 err = mlx5_mpfs_add_mac(pfmdev, config->mac); in mlx5_vdpa_set_attr()
4058 ether_addr_copy(config->mac, add_config->net.mac); in mlx5_vdpa_set_attr()
4061 up_write(&ndev->reslock); in mlx5_vdpa_set_attr()
4081 struct mlx5_core_dev *mdev = madev->mdev; in mlx5v_probe()
4087 return -ENOMEM; in mlx5v_probe()
4089 mgtdev->mgtdev.ops = &mdev_ops; in mlx5v_probe()
4090 mgtdev->mgtdev.device = mdev->device; in mlx5v_probe()
4091 mgtdev->mgtdev.id_table = id_table; in mlx5v_probe()
4092 mgtdev->mgtdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | in mlx5v_probe()
4096 mgtdev->mgtdev.max_supported_vqs = in mlx5v_probe()
4098 mgtdev->mgtdev.supported_features = get_supported_features(mdev); in mlx5v_probe()
4099 mgtdev->madev = madev; in mlx5v_probe()
4100 mgtdev->vdpa_ops = mlx5_vdpa_ops; in mlx5v_probe()
4103 mgtdev->vdpa_ops.get_vq_desc_group = NULL; in mlx5v_probe()
4106 mgtdev->vdpa_ops.resume = NULL; in mlx5v_probe()
4108 err = vdpa_mgmtdev_register(&mgtdev->mgtdev); in mlx5v_probe()
4126 vdpa_mgmtdev_unregister(&mgtdev->mgtdev); in mlx5v_remove()