xref: /linux/drivers/vdpa/mlx5/core/mlx5_vdpa.h (revision 58d4d50e758ab1e880b30ba815d733d46f5cbfac)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2020 Mellanox Technologies Ltd. */
3 
4 #ifndef __MLX5_VDPA_H__
5 #define __MLX5_VDPA_H__
6 
7 #include <linux/etherdevice.h>
8 #include <linux/vringh.h>
9 #include <linux/vdpa.h>
10 #include <linux/mlx5/driver.h>
11 
12 #define MLX5V_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
13 
14 struct mlx5_vdpa_direct_mr {
15 	u64 start;
16 	u64 end;
17 	u32 perm;
18 	u32 mr;
19 	struct sg_table sg_head;
20 	int log_size;
21 	int nsg;
22 	int nent;
23 	struct list_head list;
24 	u64 offset;
25 };
26 
27 struct mlx5_vdpa_mr {
28 	u32 mkey;
29 
30 	/* list of direct MRs descendants of this indirect mr */
31 	struct list_head head;
32 	unsigned long num_directs;
33 	unsigned long num_klms;
34 
35 	struct vhost_iotlb *iotlb;
36 
37 	bool user_mr;
38 
39 	refcount_t refcount;
40 	struct list_head mr_list;
41 };
42 
43 struct mlx5_vdpa_resources {
44 	u32 pdn;
45 	struct mlx5_uars_page *uar;
46 	void __iomem *kick_addr;
47 	u64 phys_kick_addr;
48 	u16 uid;
49 	u32 null_mkey;
50 	bool valid;
51 };
52 
53 struct mlx5_control_vq {
54 	struct vhost_iotlb *iotlb;
55 	/* spinlock to synchronize iommu table */
56 	spinlock_t iommu_lock;
57 	struct vringh vring;
58 	bool ready;
59 	u64 desc_addr;
60 	u64 device_addr;
61 	u64 driver_addr;
62 	struct vdpa_callback event_cb;
63 	struct vringh_kiov riov;
64 	struct vringh_kiov wiov;
65 	unsigned short head;
66 	unsigned int received_desc;
67 	unsigned int completed_desc;
68 };
69 
70 struct mlx5_vdpa_wq_ent {
71 	struct work_struct work;
72 	struct mlx5_vdpa_dev *mvdev;
73 };
74 
75 enum {
76 	MLX5_VDPA_DATAVQ_GROUP,
77 	MLX5_VDPA_CVQ_GROUP,
78 	MLX5_VDPA_DATAVQ_DESC_GROUP,
79 	MLX5_VDPA_NUMVQ_GROUPS
80 };
81 
82 enum {
83 	MLX5_VDPA_NUM_AS = 2
84 };
85 
86 struct mlx5_vdpa_mr_resources {
87 	struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
88 	unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
89 	struct list_head mr_list_head;
90 	struct mutex lock;
91 };
92 
93 struct mlx5_vdpa_dev {
94 	struct vdpa_device vdev;
95 	struct mlx5_core_dev *mdev;
96 	struct mlx5_vdpa_resources res;
97 	struct mlx5_vdpa_mr_resources mres;
98 
99 	u64 mlx_features;
100 	u64 actual_features;
101 	u8 status;
102 	u32 max_vqs;
103 	u16 max_idx;
104 	u32 generation;
105 
106 	struct mlx5_control_vq cvq;
107 	struct workqueue_struct *wq;
108 	bool suspended;
109 
110 	struct mlx5_async_ctx async_ctx;
111 };
112 
113 struct mlx5_vdpa_async_cmd {
114 	int err;
115 	struct mlx5_async_work cb_work;
116 	struct completion cmd_done;
117 
118 	void *in;
119 	size_t inlen;
120 
121 	void *out;
122 	size_t outlen;
123 };
124 
125 int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn);
126 void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn);
127 int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn);
128 int mlx5_vdpa_modify_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 rqtn);
129 void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn);
130 int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn);
131 void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn);
132 int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn);
133 void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn);
134 int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev);
135 void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev);
136 int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
137 			  int inlen);
138 int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey);
139 struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
140 					 struct vhost_iotlb *iotlb);
141 void mlx5_vdpa_clean_mrs(struct mlx5_vdpa_dev *mvdev);
142 void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
143 		      struct mlx5_vdpa_mr *mr);
144 void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
145 		      struct mlx5_vdpa_mr *mr);
146 void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
147 			 struct mlx5_vdpa_mr *mr,
148 			 unsigned int asid);
149 int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
150 				struct vhost_iotlb *iotlb,
151 				unsigned int asid);
152 int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev);
153 int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
154 int mlx5_vdpa_exec_async_cmds(struct mlx5_vdpa_dev *mvdev,
155 			      struct mlx5_vdpa_async_cmd *cmds,
156 			      int num_cmds);
157 
158 #define mlx5_vdpa_err(__dev, format, ...)                                                          \
159 	dev_err((__dev)->mdev->device, "%s:%d:(pid %d) error: " format, __func__, __LINE__,        \
160 		 current->pid, ##__VA_ARGS__)
161 
162 
163 #define mlx5_vdpa_warn(__dev, format, ...)                                                         \
164 	dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__,     \
165 		 current->pid, ##__VA_ARGS__)
166 
167 #define mlx5_vdpa_info(__dev, format, ...)                                                         \
168 	dev_info((__dev)->mdev->device, "%s:%d:(pid %d): " format, __func__, __LINE__,             \
169 		 current->pid, ##__VA_ARGS__)
170 
171 #define mlx5_vdpa_dbg(__dev, format, ...)                                                          \
172 	dev_debug((__dev)->mdev->device, "%s:%d:(pid %d): " format, __func__, __LINE__,            \
173 		  current->pid, ##__VA_ARGS__)
174 
175 #endif /* __MLX5_VDPA_H__ */
176