1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2013-2018, Mellanox Technologies inc. All rights reserved. 4 */ 5 6 #include <linux/mlx5/qp.h> 7 #include <linux/slab.h> 8 #include <rdma/ib_umem.h> 9 #include <rdma/ib_user_verbs.h> 10 #include "mlx5_ib.h" 11 #include "srq.h" 12 13 static void *get_wqe(struct mlx5_ib_srq *srq, int n) 14 { 15 return mlx5_frag_buf_get_wqe(&srq->fbc, n); 16 } 17 18 static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type) 19 { 20 struct ib_event event; 21 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; 22 23 if (ibsrq->event_handler) { 24 event.device = ibsrq->device; 25 event.element.srq = ibsrq; 26 switch (type) { 27 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: 28 event.event = IB_EVENT_SRQ_LIMIT_REACHED; 29 break; 30 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: 31 event.event = IB_EVENT_SRQ_ERR; 32 break; 33 default: 34 pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n", 35 type, srq->srqn); 36 return; 37 } 38 39 ibsrq->event_handler(&event, ibsrq->srq_context); 40 } 41 } 42 43 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, 44 struct mlx5_srq_attr *in, 45 struct ib_udata *udata, int buf_size) 46 { 47 struct mlx5_ib_dev *dev = to_mdev(pd->device); 48 struct mlx5_ib_create_srq ucmd = {}; 49 struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context( 50 udata, struct mlx5_ib_ucontext, ibucontext); 51 size_t ucmdlen; 52 int err; 53 u32 uidx = MLX5_IB_DEFAULT_UIDX; 54 55 ucmdlen = min(udata->inlen, sizeof(ucmd)); 56 57 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) { 58 mlx5_ib_dbg(dev, "failed copy udata\n"); 59 return -EFAULT; 60 } 61 62 if (ucmd.reserved0 || ucmd.reserved1) 63 return -EINVAL; 64 65 if (udata->inlen > sizeof(ucmd) && 66 !ib_is_udata_cleared(udata, sizeof(ucmd), 67 udata->inlen - sizeof(ucmd))) 68 return -EINVAL; 69 70 if (in->type != IB_SRQT_BASIC) { 71 err = get_srq_user_index(ucontext, &ucmd, udata->inlen, &uidx); 72 if (err) 73 return err; 74 } 75 76 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); 77 78 srq->umem = ib_umem_get(pd->device, ucmd.buf_addr, buf_size, 0); 79 if (IS_ERR(srq->umem)) { 80 mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size); 81 err = PTR_ERR(srq->umem); 82 return err; 83 } 84 in->umem = srq->umem; 85 86 err = mlx5_ib_db_map_user(ucontext, ucmd.db_addr, &srq->db); 87 if (err) { 88 mlx5_ib_dbg(dev, "map doorbell failed\n"); 89 goto err_umem; 90 } 91 92 in->uid = (in->type != IB_SRQT_XRC) ? to_mpd(pd)->uid : 0; 93 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 && 94 in->type != IB_SRQT_BASIC) 95 in->user_index = uidx; 96 97 return 0; 98 99 err_umem: 100 ib_umem_release(srq->umem); 101 102 return err; 103 } 104 105 static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, 106 struct mlx5_srq_attr *in, int buf_size) 107 { 108 int err; 109 int i; 110 struct mlx5_wqe_srq_next_seg *next; 111 112 err = mlx5_db_alloc(dev->mdev, &srq->db); 113 if (err) { 114 mlx5_ib_warn(dev, "alloc dbell rec failed\n"); 115 return err; 116 } 117 118 if (mlx5_frag_buf_alloc_node(dev->mdev, buf_size, &srq->buf, 119 dev->mdev->priv.numa_node)) { 120 mlx5_ib_dbg(dev, "buf alloc failed\n"); 121 err = -ENOMEM; 122 goto err_db; 123 } 124 125 mlx5_init_fbc(srq->buf.frags, srq->msrq.wqe_shift, ilog2(srq->msrq.max), 126 &srq->fbc); 127 128 srq->head = 0; 129 srq->tail = srq->msrq.max - 1; 130 srq->wqe_ctr = 0; 131 132 for (i = 0; i < srq->msrq.max; i++) { 133 next = get_wqe(srq, i); 134 next->next_wqe_index = 135 cpu_to_be16((i + 1) & (srq->msrq.max - 1)); 136 } 137 138 mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift); 139 in->pas = kvcalloc(srq->buf.npages, sizeof(*in->pas), GFP_KERNEL); 140 if (!in->pas) { 141 err = -ENOMEM; 142 goto err_buf; 143 } 144 mlx5_fill_page_frag_array(&srq->buf, in->pas); 145 146 srq->wrid = kvmalloc_array(srq->msrq.max, sizeof(u64), GFP_KERNEL); 147 if (!srq->wrid) { 148 err = -ENOMEM; 149 goto err_in; 150 } 151 srq->wq_sig = 0; 152 153 in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; 154 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 && 155 in->type != IB_SRQT_BASIC) 156 in->user_index = MLX5_IB_DEFAULT_UIDX; 157 158 return 0; 159 160 err_in: 161 kvfree(in->pas); 162 163 err_buf: 164 mlx5_frag_buf_free(dev->mdev, &srq->buf); 165 166 err_db: 167 mlx5_db_free(dev->mdev, &srq->db); 168 return err; 169 } 170 171 static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, 172 struct ib_udata *udata) 173 { 174 mlx5_ib_db_unmap_user( 175 rdma_udata_to_drv_context( 176 udata, 177 struct mlx5_ib_ucontext, 178 ibucontext), 179 &srq->db); 180 ib_umem_release(srq->umem); 181 } 182 183 184 static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq) 185 { 186 kvfree(srq->wrid); 187 mlx5_frag_buf_free(dev->mdev, &srq->buf); 188 mlx5_db_free(dev->mdev, &srq->db); 189 } 190 191 int mlx5_ib_create_srq(struct ib_srq *ib_srq, 192 struct ib_srq_init_attr *init_attr, 193 struct ib_udata *udata) 194 { 195 struct mlx5_ib_dev *dev = to_mdev(ib_srq->device); 196 struct mlx5_ib_srq *srq = to_msrq(ib_srq); 197 size_t desc_size; 198 size_t buf_size; 199 int err; 200 struct mlx5_srq_attr in = {}; 201 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); 202 __u32 max_sge_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) / 203 sizeof(struct mlx5_wqe_data_seg); 204 205 if (init_attr->srq_type != IB_SRQT_BASIC && 206 init_attr->srq_type != IB_SRQT_XRC && 207 init_attr->srq_type != IB_SRQT_TM) 208 return -EOPNOTSUPP; 209 210 /* Sanity check SRQ and sge size before proceeding */ 211 if (init_attr->attr.max_wr >= max_srq_wqes || 212 init_attr->attr.max_sge > max_sge_sz) { 213 mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n", 214 init_attr->attr.max_wr, max_srq_wqes, 215 init_attr->attr.max_sge, max_sge_sz); 216 return -EINVAL; 217 } 218 219 err = mlx5_ib_dev_res_cq_init(dev); 220 if (err) 221 return err; 222 223 mutex_init(&srq->mutex); 224 spin_lock_init(&srq->lock); 225 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); 226 srq->msrq.max_gs = init_attr->attr.max_sge; 227 228 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + 229 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); 230 if (desc_size == 0 || srq->msrq.max_gs > desc_size) 231 return -EINVAL; 232 233 desc_size = roundup_pow_of_two(desc_size); 234 desc_size = max_t(size_t, 32, desc_size); 235 if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) 236 return -EINVAL; 237 238 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / 239 sizeof(struct mlx5_wqe_data_seg); 240 srq->msrq.wqe_shift = ilog2(desc_size); 241 buf_size = srq->msrq.max * desc_size; 242 if (buf_size < desc_size) 243 return -EINVAL; 244 245 in.type = init_attr->srq_type; 246 247 if (udata) 248 err = create_srq_user(ib_srq->pd, srq, &in, udata, buf_size); 249 else 250 err = create_srq_kernel(dev, srq, &in, buf_size); 251 252 if (err) { 253 mlx5_ib_warn(dev, "create srq %s failed, err %d\n", 254 udata ? "user" : "kernel", err); 255 return err; 256 } 257 258 in.log_size = ilog2(srq->msrq.max); 259 in.wqe_shift = srq->msrq.wqe_shift - 4; 260 if (srq->wq_sig) 261 in.flags |= MLX5_SRQ_FLAG_WQ_SIG; 262 263 if (init_attr->srq_type == IB_SRQT_XRC && init_attr->ext.xrc.xrcd) 264 in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn; 265 else 266 in.xrcd = dev->devr.xrcdn0; 267 268 if (init_attr->srq_type == IB_SRQT_TM) { 269 in.tm_log_list_size = 270 ilog2(init_attr->ext.tag_matching.max_num_tags) + 1; 271 if (in.tm_log_list_size > 272 MLX5_CAP_GEN(dev->mdev, log_tag_matching_list_sz)) { 273 mlx5_ib_dbg(dev, "TM SRQ max_num_tags exceeding limit\n"); 274 err = -EINVAL; 275 goto err_usr_kern_srq; 276 } 277 in.flags |= MLX5_SRQ_FLAG_RNDV; 278 } 279 280 if (ib_srq_has_cq(init_attr->srq_type)) 281 in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn; 282 else 283 in.cqn = to_mcq(dev->devr.c0)->mcq.cqn; 284 285 in.pd = to_mpd(ib_srq->pd)->pdn; 286 in.db_record = srq->db.dma; 287 err = mlx5_cmd_create_srq(dev, &srq->msrq, &in); 288 kvfree(in.pas); 289 if (err) { 290 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); 291 goto err_usr_kern_srq; 292 } 293 294 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); 295 296 srq->msrq.event = mlx5_ib_srq_event; 297 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; 298 299 if (udata) { 300 struct mlx5_ib_create_srq_resp resp = { 301 .srqn = srq->msrq.srqn, 302 }; 303 304 if (ib_copy_to_udata(udata, &resp, min(udata->outlen, 305 sizeof(resp)))) { 306 mlx5_ib_dbg(dev, "copy to user failed\n"); 307 err = -EFAULT; 308 goto err_core; 309 } 310 } 311 312 init_attr->attr.max_wr = srq->msrq.max - 1; 313 314 return 0; 315 316 err_core: 317 mlx5_cmd_destroy_srq(dev, &srq->msrq); 318 319 err_usr_kern_srq: 320 if (udata) 321 destroy_srq_user(ib_srq->pd, srq, udata); 322 else 323 destroy_srq_kernel(dev, srq); 324 325 return err; 326 } 327 328 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 329 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) 330 { 331 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); 332 struct mlx5_ib_srq *srq = to_msrq(ibsrq); 333 int ret; 334 335 /* We don't support resizing SRQs yet */ 336 if (attr_mask & IB_SRQ_MAX_WR) 337 return -EINVAL; 338 339 if (attr_mask & IB_SRQ_LIMIT) { 340 if (attr->srq_limit >= srq->msrq.max) 341 return -EINVAL; 342 343 mutex_lock(&srq->mutex); 344 ret = mlx5_cmd_arm_srq(dev, &srq->msrq, attr->srq_limit, 1); 345 mutex_unlock(&srq->mutex); 346 347 if (ret) 348 return ret; 349 } 350 351 return 0; 352 } 353 354 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) 355 { 356 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); 357 struct mlx5_ib_srq *srq = to_msrq(ibsrq); 358 int ret; 359 struct mlx5_srq_attr *out; 360 361 out = kzalloc(sizeof(*out), GFP_KERNEL); 362 if (!out) 363 return -ENOMEM; 364 365 ret = mlx5_cmd_query_srq(dev, &srq->msrq, out); 366 if (ret) 367 goto out_box; 368 369 srq_attr->srq_limit = out->lwm; 370 srq_attr->max_wr = srq->msrq.max - 1; 371 srq_attr->max_sge = srq->msrq.max_gs; 372 373 out_box: 374 kfree(out); 375 return ret; 376 } 377 378 int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) 379 { 380 struct mlx5_ib_dev *dev = to_mdev(srq->device); 381 struct mlx5_ib_srq *msrq = to_msrq(srq); 382 int ret; 383 384 ret = mlx5_cmd_destroy_srq(dev, &msrq->msrq); 385 if (ret) 386 return ret; 387 388 if (udata) 389 destroy_srq_user(srq->pd, msrq, udata); 390 else 391 destroy_srq_kernel(dev, msrq); 392 return 0; 393 } 394 395 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index) 396 { 397 struct mlx5_wqe_srq_next_seg *next; 398 399 /* always called with interrupts disabled. */ 400 spin_lock(&srq->lock); 401 402 next = get_wqe(srq, srq->tail); 403 next->next_wqe_index = cpu_to_be16(wqe_index); 404 srq->tail = wqe_index; 405 406 spin_unlock(&srq->lock); 407 } 408 409 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, 410 const struct ib_recv_wr **bad_wr) 411 { 412 struct mlx5_ib_srq *srq = to_msrq(ibsrq); 413 struct mlx5_wqe_srq_next_seg *next; 414 struct mlx5_wqe_data_seg *scat; 415 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); 416 struct mlx5_core_dev *mdev = dev->mdev; 417 unsigned long flags; 418 int err = 0; 419 int nreq; 420 int i; 421 422 spin_lock_irqsave(&srq->lock, flags); 423 424 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 425 err = -EIO; 426 *bad_wr = wr; 427 goto out; 428 } 429 430 for (nreq = 0; wr; nreq++, wr = wr->next) { 431 if (unlikely(wr->num_sge > srq->msrq.max_gs)) { 432 err = -EINVAL; 433 *bad_wr = wr; 434 break; 435 } 436 437 if (unlikely(srq->head == srq->tail)) { 438 err = -ENOMEM; 439 *bad_wr = wr; 440 break; 441 } 442 443 srq->wrid[srq->head] = wr->wr_id; 444 445 next = get_wqe(srq, srq->head); 446 srq->head = be16_to_cpu(next->next_wqe_index); 447 scat = (struct mlx5_wqe_data_seg *)(next + 1); 448 449 for (i = 0; i < wr->num_sge; i++) { 450 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); 451 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); 452 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); 453 } 454 455 if (i < srq->msrq.max_avail_gather) { 456 scat[i].byte_count = 0; 457 scat[i].lkey = dev->mkeys.terminate_scatter_list_mkey; 458 scat[i].addr = 0; 459 } 460 } 461 462 if (likely(nreq)) { 463 srq->wqe_ctr += nreq; 464 465 /* Make sure that descriptors are written before 466 * doorbell record. 467 */ 468 wmb(); 469 470 *srq->db.db = cpu_to_be32(srq->wqe_ctr); 471 } 472 out: 473 spin_unlock_irqrestore(&srq->lock, flags); 474 475 return err; 476 } 477