xref: /linux/drivers/vdpa/mlx5/core/mlx5_vdpa.h (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2020 Mellanox Technologies Ltd. */
3 
4 #ifndef __MLX5_VDPA_H__
5 #define __MLX5_VDPA_H__
6 
7 #include <linux/etherdevice.h>
8 #include <linux/vringh.h>
9 #include <linux/vdpa.h>
10 #include <linux/mlx5/driver.h>
11 
12 #define MLX5V_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
13 
14 struct mlx5_vdpa_direct_mr {
15 	u64 start;
16 	u64 end;
17 	u32 perm;
18 	u32 mr;
19 	struct sg_table sg_head;
20 	int log_size;
21 	int nsg;
22 	int nent;
23 	struct list_head list;
24 	u64 offset;
25 };
26 
27 struct mlx5_vdpa_mr {
28 	u32 mkey;
29 
30 	/* list of direct MRs descendants of this indirect mr */
31 	struct list_head head;
32 	unsigned long num_directs;
33 	unsigned long num_klms;
34 
35 	struct vhost_iotlb *iotlb;
36 
37 	bool user_mr;
38 
39 	refcount_t refcount;
40 	struct list_head mr_list;
41 };
42 
43 struct mlx5_vdpa_resources {
44 	u32 pdn;
45 	struct mlx5_uars_page *uar;
46 	void __iomem *kick_addr;
47 	u64 phys_kick_addr;
48 	u16 uid;
49 	u32 null_mkey;
50 	bool valid;
51 };
52 
53 struct mlx5_control_vq {
54 	struct vhost_iotlb *iotlb;
55 	/* spinlock to synchronize iommu table */
56 	spinlock_t iommu_lock;
57 	struct vringh vring;
58 	bool ready;
59 	u64 desc_addr;
60 	u64 device_addr;
61 	u64 driver_addr;
62 	struct vdpa_callback event_cb;
63 	struct vringh_kiov riov;
64 	struct vringh_kiov wiov;
65 	unsigned short head;
66 	unsigned int received_desc;
67 	unsigned int completed_desc;
68 };
69 
70 struct mlx5_vdpa_wq_ent {
71 	struct work_struct work;
72 	struct mlx5_vdpa_dev *mvdev;
73 };
74 
75 enum {
76 	MLX5_VDPA_DATAVQ_GROUP,
77 	MLX5_VDPA_CVQ_GROUP,
78 	MLX5_VDPA_DATAVQ_DESC_GROUP,
79 	MLX5_VDPA_NUMVQ_GROUPS
80 };
81 
82 enum {
83 	MLX5_VDPA_NUM_AS = 2
84 };
85 
86 struct mlx5_vdpa_mr_resources {
87 	struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
88 	unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
89 
90 	/* Pre-deletion mr list */
91 	struct list_head mr_list_head;
92 
93 	/* Deferred mr list */
94 	struct list_head mr_gc_list_head;
95 	struct workqueue_struct *wq_gc;
96 	struct delayed_work gc_dwork_ent;
97 
98 	struct mutex lock;
99 
100 	atomic_t shutdown;
101 };
102 
103 struct mlx5_vdpa_dev {
104 	struct vdpa_device vdev;
105 	struct mlx5_core_dev *mdev;
106 	struct mlx5_vdpa_resources res;
107 	struct mlx5_vdpa_mr_resources mres;
108 
109 	u64 mlx_features;
110 	u64 actual_features;
111 	u8 status;
112 	u32 max_vqs;
113 	u16 max_idx;
114 	u32 generation;
115 
116 	struct mlx5_control_vq cvq;
117 	struct workqueue_struct *wq;
118 	bool suspended;
119 
120 	struct mlx5_async_ctx async_ctx;
121 };
122 
123 struct mlx5_vdpa_async_cmd {
124 	int err;
125 	struct mlx5_async_work cb_work;
126 	struct completion cmd_done;
127 
128 	void *in;
129 	size_t inlen;
130 
131 	void *out;
132 	size_t outlen;
133 };
134 
135 int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn);
136 void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn);
137 int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn);
138 int mlx5_vdpa_modify_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 rqtn);
139 void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn);
140 int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn);
141 void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn);
142 int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn);
143 void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn);
144 int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev);
145 void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev);
146 int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
147 			  int inlen);
148 int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey);
149 struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
150 					 struct vhost_iotlb *iotlb);
151 int mlx5_vdpa_init_mr_resources(struct mlx5_vdpa_dev *mvdev);
152 void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev);
153 void mlx5_vdpa_clean_mrs(struct mlx5_vdpa_dev *mvdev);
154 void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
155 		      struct mlx5_vdpa_mr *mr);
156 void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
157 		      struct mlx5_vdpa_mr *mr);
158 void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
159 			 struct mlx5_vdpa_mr *mr,
160 			 unsigned int asid);
161 int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
162 				struct vhost_iotlb *iotlb,
163 				unsigned int asid);
164 int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev);
165 int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
166 int mlx5_vdpa_exec_async_cmds(struct mlx5_vdpa_dev *mvdev,
167 			      struct mlx5_vdpa_async_cmd *cmds,
168 			      int num_cmds);
169 
170 #define mlx5_vdpa_err(__dev, format, ...)                                                          \
171 	dev_err((__dev)->mdev->device, "%s:%d:(pid %d) error: " format, __func__, __LINE__,        \
172 		 current->pid, ##__VA_ARGS__)
173 
174 
175 #define mlx5_vdpa_warn(__dev, format, ...)                                                         \
176 	dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__,     \
177 		 current->pid, ##__VA_ARGS__)
178 
179 #define mlx5_vdpa_info(__dev, format, ...)                                                         \
180 	dev_info((__dev)->mdev->device, "%s:%d:(pid %d): " format, __func__, __LINE__,             \
181 		 current->pid, ##__VA_ARGS__)
182 
183 #define mlx5_vdpa_dbg(__dev, format, ...)                                                          \
184 	dev_debug((__dev)->mdev->device, "%s:%d:(pid %d): " format, __func__, __LINE__,            \
185 		  current->pid, ##__VA_ARGS__)
186 
187 #endif /* __MLX5_VDPA_H__ */
188