1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2020 Mellanox Technologies Ltd. */ 3 4 #include <linux/iova.h> 5 #include <linux/mlx5/driver.h> 6 #include "mlx5_vdpa.h" 7 8 static int alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid) 9 { 10 struct mlx5_core_dev *mdev = dev->mdev; 11 12 u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {}; 13 u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {}; 14 int err; 15 16 MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); 17 MLX5_SET(alloc_pd_in, in, uid, uid); 18 19 err = mlx5_cmd_exec_inout(mdev, alloc_pd, in, out); 20 if (!err) 21 *pdn = MLX5_GET(alloc_pd_out, out, pd); 22 23 return err; 24 } 25 26 static int dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid) 27 { 28 u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {}; 29 struct mlx5_core_dev *mdev = dev->mdev; 30 31 MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD); 32 MLX5_SET(dealloc_pd_in, in, pd, pdn); 33 MLX5_SET(dealloc_pd_in, in, uid, uid); 34 return mlx5_cmd_exec_in(mdev, dealloc_pd, in); 35 } 36 37 static int get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey) 38 { 39 u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {}; 40 u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {}; 41 struct mlx5_core_dev *mdev = dev->mdev; 42 int err; 43 44 MLX5_SET(query_special_contexts_in, in, opcode, MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); 45 err = mlx5_cmd_exec_inout(mdev, query_special_contexts, in, out); 46 if (!err) 47 *null_mkey = MLX5_GET(query_special_contexts_out, out, null_mkey); 48 return err; 49 } 50 51 static int create_uctx(struct mlx5_vdpa_dev *mvdev, u16 *uid) 52 { 53 u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {}; 54 int inlen; 55 void *in; 56 int err; 57 58 if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) 59 return 0; 60 61 /* 0 means not supported */ 62 if (!MLX5_CAP_GEN(mvdev->mdev, log_max_uctx)) 63 return -EOPNOTSUPP; 64 65 inlen = MLX5_ST_SZ_BYTES(create_uctx_in); 66 in = kzalloc(inlen, GFP_KERNEL); 67 if (!in) 68 return -ENOMEM; 69 70 MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX); 71 MLX5_SET(create_uctx_in, in, uctx.cap, MLX5_UCTX_CAP_RAW_TX); 72 73 err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out)); 74 kfree(in); 75 if (!err) 76 *uid = MLX5_GET(create_uctx_out, out, uid); 77 78 return err; 79 } 80 81 static void destroy_uctx(struct mlx5_vdpa_dev *mvdev, u32 uid) 82 { 83 u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {}; 84 u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {}; 85 86 if (!uid) 87 return; 88 89 MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX); 90 MLX5_SET(destroy_uctx_in, in, uid, uid); 91 92 mlx5_cmd_exec(mvdev->mdev, in, sizeof(in), out, sizeof(out)); 93 } 94 95 int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn) 96 { 97 u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {}; 98 int err; 99 100 MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS); 101 MLX5_SET(create_tis_in, in, uid, mvdev->res.uid); 102 err = mlx5_cmd_exec_inout(mvdev->mdev, create_tis, in, out); 103 if (!err) 104 *tisn = MLX5_GET(create_tis_out, out, tisn); 105 106 return err; 107 } 108 109 void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn) 110 { 111 u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {}; 112 113 MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS); 114 MLX5_SET(destroy_tis_in, in, uid, mvdev->res.uid); 115 MLX5_SET(destroy_tis_in, in, tisn, tisn); 116 mlx5_cmd_exec_in(mvdev->mdev, destroy_tis, in); 117 } 118 119 int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn) 120 { 121 u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {}; 122 int err; 123 124 MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); 125 err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out)); 126 if (!err) 127 *rqtn = MLX5_GET(create_rqt_out, out, rqtn); 128 129 return err; 130 } 131 132 int mlx5_vdpa_modify_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 rqtn) 133 { 134 u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {}; 135 136 MLX5_SET(modify_rqt_in, in, uid, mvdev->res.uid); 137 MLX5_SET(modify_rqt_in, in, rqtn, rqtn); 138 MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT); 139 return mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out)); 140 } 141 142 void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn) 143 { 144 u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {}; 145 146 MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); 147 MLX5_SET(destroy_rqt_in, in, uid, mvdev->res.uid); 148 MLX5_SET(destroy_rqt_in, in, rqtn, rqtn); 149 mlx5_cmd_exec_in(mvdev->mdev, destroy_rqt, in); 150 } 151 152 int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn) 153 { 154 u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {}; 155 int err; 156 157 MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); 158 err = mlx5_cmd_exec_inout(mvdev->mdev, create_tir, in, out); 159 if (!err) 160 *tirn = MLX5_GET(create_tir_out, out, tirn); 161 162 return err; 163 } 164 165 void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn) 166 { 167 u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {}; 168 169 MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR); 170 MLX5_SET(destroy_tir_in, in, uid, mvdev->res.uid); 171 MLX5_SET(destroy_tir_in, in, tirn, tirn); 172 mlx5_cmd_exec_in(mvdev->mdev, destroy_tir, in); 173 } 174 175 int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn) 176 { 177 u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {}; 178 u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {}; 179 int err; 180 181 MLX5_SET(alloc_transport_domain_in, in, opcode, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN); 182 MLX5_SET(alloc_transport_domain_in, in, uid, mvdev->res.uid); 183 184 err = mlx5_cmd_exec_inout(mvdev->mdev, alloc_transport_domain, in, out); 185 if (!err) 186 *tdn = MLX5_GET(alloc_transport_domain_out, out, transport_domain); 187 188 return err; 189 } 190 191 void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn) 192 { 193 u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {}; 194 195 MLX5_SET(dealloc_transport_domain_in, in, opcode, MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN); 196 MLX5_SET(dealloc_transport_domain_in, in, uid, mvdev->res.uid); 197 MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn); 198 mlx5_cmd_exec_in(mvdev->mdev, dealloc_transport_domain, in); 199 } 200 201 int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in, 202 int inlen) 203 { 204 u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {}; 205 u32 mkey_index; 206 int err; 207 208 MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY); 209 MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid); 210 211 err = mlx5_cmd_exec(mvdev->mdev, in, inlen, lout, sizeof(lout)); 212 if (err) 213 return err; 214 215 mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index); 216 *mkey = mlx5_idx_to_mkey(mkey_index); 217 return 0; 218 } 219 220 int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey) 221 { 222 u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {}; 223 224 MLX5_SET(destroy_mkey_in, in, uid, mvdev->res.uid); 225 MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY); 226 MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey)); 227 return mlx5_cmd_exec_in(mvdev->mdev, destroy_mkey, in); 228 } 229 230 static int init_ctrl_vq(struct mlx5_vdpa_dev *mvdev) 231 { 232 mvdev->cvq.iotlb = vhost_iotlb_alloc(0, 0); 233 if (!mvdev->cvq.iotlb) 234 return -ENOMEM; 235 236 spin_lock_init(&mvdev->cvq.iommu_lock); 237 vringh_set_iotlb(&mvdev->cvq.vring, mvdev->cvq.iotlb, &mvdev->cvq.iommu_lock); 238 239 return 0; 240 } 241 242 static void cleanup_ctrl_vq(struct mlx5_vdpa_dev *mvdev) 243 { 244 vhost_iotlb_free(mvdev->cvq.iotlb); 245 } 246 247 int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev) 248 { 249 u64 offset = MLX5_CAP64_DEV_VDPA_EMULATION(mvdev->mdev, doorbell_bar_offset); 250 struct mlx5_vdpa_resources *res = &mvdev->res; 251 struct mlx5_core_dev *mdev = mvdev->mdev; 252 u64 kick_addr; 253 int err; 254 255 if (res->valid) { 256 mlx5_vdpa_warn(mvdev, "resources already allocated\n"); 257 return -EINVAL; 258 } 259 res->uar = mlx5_get_uars_page(mdev); 260 if (IS_ERR(res->uar)) { 261 err = PTR_ERR(res->uar); 262 goto err_uars; 263 } 264 265 err = create_uctx(mvdev, &res->uid); 266 if (err) 267 goto err_uctx; 268 269 err = alloc_pd(mvdev, &res->pdn, res->uid); 270 if (err) 271 goto err_pd; 272 273 err = get_null_mkey(mvdev, &res->null_mkey); 274 if (err) 275 goto err_key; 276 277 kick_addr = mdev->bar_addr + offset; 278 res->phys_kick_addr = kick_addr; 279 280 res->kick_addr = ioremap(kick_addr, PAGE_SIZE); 281 if (!res->kick_addr) { 282 err = -ENOMEM; 283 goto err_key; 284 } 285 286 err = init_ctrl_vq(mvdev); 287 if (err) 288 goto err_ctrl; 289 290 res->valid = true; 291 292 return 0; 293 294 err_ctrl: 295 iounmap(res->kick_addr); 296 err_key: 297 dealloc_pd(mvdev, res->pdn, res->uid); 298 err_pd: 299 destroy_uctx(mvdev, res->uid); 300 err_uctx: 301 mlx5_put_uars_page(mdev, res->uar); 302 err_uars: 303 return err; 304 } 305 306 void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev) 307 { 308 struct mlx5_vdpa_resources *res = &mvdev->res; 309 310 if (!res->valid) 311 return; 312 313 cleanup_ctrl_vq(mvdev); 314 iounmap(res->kick_addr); 315 res->kick_addr = NULL; 316 dealloc_pd(mvdev, res->pdn, res->uid); 317 destroy_uctx(mvdev, res->uid); 318 mlx5_put_uars_page(mvdev->mdev, res->uar); 319 res->valid = false; 320 } 321 322 static void virtqueue_cmd_callback(int status, struct mlx5_async_work *context) 323 { 324 struct mlx5_vdpa_async_cmd *cmd = 325 container_of(context, struct mlx5_vdpa_async_cmd, cb_work); 326 327 cmd->err = mlx5_cmd_check(context->ctx->dev, status, cmd->in, cmd->out); 328 complete(&cmd->cmd_done); 329 } 330 331 static int issue_async_cmd(struct mlx5_vdpa_dev *mvdev, 332 struct mlx5_vdpa_async_cmd *cmds, 333 int issued, 334 int *completed) 335 336 { 337 struct mlx5_vdpa_async_cmd *cmd = &cmds[issued]; 338 int err; 339 340 retry: 341 err = mlx5_cmd_exec_cb(&mvdev->async_ctx, 342 cmd->in, cmd->inlen, 343 cmd->out, cmd->outlen, 344 virtqueue_cmd_callback, 345 &cmd->cb_work); 346 if (err == -EBUSY) { 347 if (*completed < issued) { 348 /* Throttled by own commands: wait for oldest completion. */ 349 wait_for_completion(&cmds[*completed].cmd_done); 350 (*completed)++; 351 352 goto retry; 353 } else { 354 /* Throttled by external commands: switch to sync api. */ 355 err = mlx5_cmd_exec(mvdev->mdev, 356 cmd->in, cmd->inlen, 357 cmd->out, cmd->outlen); 358 if (!err) 359 (*completed)++; 360 } 361 } 362 363 return err; 364 } 365 366 int mlx5_vdpa_exec_async_cmds(struct mlx5_vdpa_dev *mvdev, 367 struct mlx5_vdpa_async_cmd *cmds, 368 int num_cmds) 369 { 370 int completed = 0; 371 int issued = 0; 372 int err = 0; 373 374 for (int i = 0; i < num_cmds; i++) 375 init_completion(&cmds[i].cmd_done); 376 377 while (issued < num_cmds) { 378 379 err = issue_async_cmd(mvdev, cmds, issued, &completed); 380 if (err) { 381 mlx5_vdpa_err(mvdev, "error issuing command %d of %d: %d\n", 382 issued, num_cmds, err); 383 break; 384 } 385 386 issued++; 387 } 388 389 while (completed < issued) 390 wait_for_completion(&cmds[completed++].cmd_done); 391 392 return err; 393 } 394