1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
3
4 #include <linux/module.h>
5 #include <linux/printk.h>
6 #include <rdma/ib_addr.h>
7 #include <rdma/ib_cache.h>
8 #include <rdma/ib_user_verbs.h>
9 #include <ionic_api.h>
10
11 #include "ionic_fw.h"
12 #include "ionic_ibdev.h"
13
14 #define ionic_set_ecn(tos) (((tos) | 2u) & ~1u)
15 #define ionic_clear_ecn(tos) ((tos) & ~3u)
16
ionic_validate_qdesc(struct ionic_qdesc * q)17 static int ionic_validate_qdesc(struct ionic_qdesc *q)
18 {
19 if (!q->addr || !q->size || !q->mask ||
20 !q->depth_log2 || !q->stride_log2)
21 return -EINVAL;
22
23 if (q->addr & (PAGE_SIZE - 1))
24 return -EINVAL;
25
26 if (q->mask != BIT(q->depth_log2) - 1)
27 return -EINVAL;
28
29 if (q->size < BIT_ULL(q->depth_log2 + q->stride_log2))
30 return -EINVAL;
31
32 return 0;
33 }
34
ionic_get_eqid(struct ionic_ibdev * dev,u32 comp_vector,u8 udma_idx)35 static u32 ionic_get_eqid(struct ionic_ibdev *dev, u32 comp_vector, u8 udma_idx)
36 {
37 /* EQ per vector per udma, and the first eqs reserved for async events.
38 * The rest of the vectors can be requested for completions.
39 */
40 u32 comp_vec_count = dev->lif_cfg.eq_count / dev->lif_cfg.udma_count - 1;
41
42 return (comp_vector % comp_vec_count + 1) * dev->lif_cfg.udma_count + udma_idx;
43 }
44
ionic_get_cqid(struct ionic_ibdev * dev,u32 * cqid,u8 udma_idx)45 static int ionic_get_cqid(struct ionic_ibdev *dev, u32 *cqid, u8 udma_idx)
46 {
47 unsigned int size, base, bound;
48 int rc;
49
50 size = dev->lif_cfg.cq_count / dev->lif_cfg.udma_count;
51 base = size * udma_idx;
52 bound = base + size;
53
54 rc = ionic_resid_get_shared(&dev->inuse_cqid, base, bound);
55 if (rc >= 0) {
56 /* cq_base is zero or a multiple of two queue groups */
57 *cqid = dev->lif_cfg.cq_base +
58 ionic_bitid_to_qid(rc, dev->lif_cfg.udma_qgrp_shift,
59 dev->half_cqid_udma_shift);
60
61 rc = 0;
62 }
63
64 return rc;
65 }
66
ionic_put_cqid(struct ionic_ibdev * dev,u32 cqid)67 static void ionic_put_cqid(struct ionic_ibdev *dev, u32 cqid)
68 {
69 u32 bitid = ionic_qid_to_bitid(cqid - dev->lif_cfg.cq_base,
70 dev->lif_cfg.udma_qgrp_shift,
71 dev->half_cqid_udma_shift);
72
73 ionic_resid_put(&dev->inuse_cqid, bitid);
74 }
75
ionic_create_cq_common(struct ionic_vcq * vcq,struct ionic_tbl_buf * buf,const struct ib_cq_init_attr * attr,struct ionic_ctx * ctx,struct ib_udata * udata,struct ionic_qdesc * req_cq,__u32 * resp_cqid,int udma_idx)76 int ionic_create_cq_common(struct ionic_vcq *vcq,
77 struct ionic_tbl_buf *buf,
78 const struct ib_cq_init_attr *attr,
79 struct ionic_ctx *ctx,
80 struct ib_udata *udata,
81 struct ionic_qdesc *req_cq,
82 __u32 *resp_cqid,
83 int udma_idx)
84 {
85 struct ionic_ibdev *dev = to_ionic_ibdev(vcq->ibcq.device);
86 struct ionic_cq *cq = &vcq->cq[udma_idx];
87 void *entry;
88 int rc;
89
90 cq->vcq = vcq;
91
92 if (attr->cqe < 1 || attr->cqe + IONIC_CQ_GRACE > 0xffff) {
93 rc = -EINVAL;
94 goto err_args;
95 }
96
97 rc = ionic_get_cqid(dev, &cq->cqid, udma_idx);
98 if (rc)
99 goto err_args;
100
101 cq->eqid = ionic_get_eqid(dev, attr->comp_vector, udma_idx);
102
103 spin_lock_init(&cq->lock);
104 INIT_LIST_HEAD(&cq->poll_sq);
105 INIT_LIST_HEAD(&cq->flush_sq);
106 INIT_LIST_HEAD(&cq->flush_rq);
107
108 if (udata) {
109 rc = ionic_validate_qdesc(req_cq);
110 if (rc)
111 goto err_qdesc;
112
113 cq->umem = ib_umem_get(&dev->ibdev, req_cq->addr, req_cq->size,
114 IB_ACCESS_LOCAL_WRITE);
115 if (IS_ERR(cq->umem)) {
116 rc = PTR_ERR(cq->umem);
117 goto err_qdesc;
118 }
119
120 cq->q.ptr = NULL;
121 cq->q.size = req_cq->size;
122 cq->q.mask = req_cq->mask;
123 cq->q.depth_log2 = req_cq->depth_log2;
124 cq->q.stride_log2 = req_cq->stride_log2;
125
126 *resp_cqid = cq->cqid;
127 } else {
128 rc = ionic_queue_init(&cq->q, dev->lif_cfg.hwdev,
129 attr->cqe + IONIC_CQ_GRACE,
130 sizeof(struct ionic_v1_cqe));
131 if (rc)
132 goto err_q_init;
133
134 ionic_queue_dbell_init(&cq->q, cq->cqid);
135 cq->color = true;
136 cq->credit = cq->q.mask;
137 }
138
139 rc = ionic_pgtbl_init(dev, buf, cq->umem, cq->q.dma, 1, PAGE_SIZE);
140 if (rc)
141 goto err_pgtbl_init;
142
143 init_completion(&cq->cq_rel_comp);
144 kref_init(&cq->cq_kref);
145
146 entry = xa_store_irq(&dev->cq_tbl, cq->cqid, cq, GFP_KERNEL);
147 if (entry) {
148 if (!xa_is_err(entry))
149 rc = -EINVAL;
150 else
151 rc = xa_err(entry);
152
153 goto err_xa;
154 }
155
156 return 0;
157
158 err_xa:
159 ionic_pgtbl_unbuf(dev, buf);
160 err_pgtbl_init:
161 if (!udata)
162 ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev);
163 err_q_init:
164 if (cq->umem)
165 ib_umem_release(cq->umem);
166 err_qdesc:
167 ionic_put_cqid(dev, cq->cqid);
168 err_args:
169 cq->vcq = NULL;
170
171 return rc;
172 }
173
ionic_destroy_cq_common(struct ionic_ibdev * dev,struct ionic_cq * cq)174 void ionic_destroy_cq_common(struct ionic_ibdev *dev, struct ionic_cq *cq)
175 {
176 if (!cq->vcq)
177 return;
178
179 xa_erase_irq(&dev->cq_tbl, cq->cqid);
180
181 kref_put(&cq->cq_kref, ionic_cq_complete);
182 wait_for_completion(&cq->cq_rel_comp);
183
184 if (cq->umem)
185 ib_umem_release(cq->umem);
186 else
187 ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev);
188
189 ionic_put_cqid(dev, cq->cqid);
190
191 cq->vcq = NULL;
192 }
193
ionic_validate_qdesc_zero(struct ionic_qdesc * q)194 static int ionic_validate_qdesc_zero(struct ionic_qdesc *q)
195 {
196 if (q->addr || q->size || q->mask || q->depth_log2 || q->stride_log2)
197 return -EINVAL;
198
199 return 0;
200 }
201
ionic_get_pdid(struct ionic_ibdev * dev,u32 * pdid)202 static int ionic_get_pdid(struct ionic_ibdev *dev, u32 *pdid)
203 {
204 int rc;
205
206 rc = ionic_resid_get(&dev->inuse_pdid);
207 if (rc < 0)
208 return rc;
209
210 *pdid = rc;
211 return 0;
212 }
213
ionic_get_ahid(struct ionic_ibdev * dev,u32 * ahid)214 static int ionic_get_ahid(struct ionic_ibdev *dev, u32 *ahid)
215 {
216 int rc;
217
218 rc = ionic_resid_get(&dev->inuse_ahid);
219 if (rc < 0)
220 return rc;
221
222 *ahid = rc;
223 return 0;
224 }
225
ionic_get_mrid(struct ionic_ibdev * dev,u32 * mrid)226 static int ionic_get_mrid(struct ionic_ibdev *dev, u32 *mrid)
227 {
228 int rc;
229
230 /* wrap to 1, skip reserved lkey */
231 rc = ionic_resid_get_shared(&dev->inuse_mrid, 1,
232 dev->inuse_mrid.inuse_size);
233 if (rc < 0)
234 return rc;
235
236 *mrid = ionic_mrid(rc, dev->next_mrkey++);
237 return 0;
238 }
239
ionic_get_gsi_qpid(struct ionic_ibdev * dev,u32 * qpid)240 static int ionic_get_gsi_qpid(struct ionic_ibdev *dev, u32 *qpid)
241 {
242 int rc = 0;
243
244 rc = ionic_resid_get_shared(&dev->inuse_qpid, IB_QPT_GSI, IB_QPT_GSI + 1);
245 if (rc < 0)
246 return rc;
247
248 *qpid = IB_QPT_GSI;
249 return 0;
250 }
251
ionic_get_qpid(struct ionic_ibdev * dev,u32 * qpid,u8 * udma_idx,u8 udma_mask)252 static int ionic_get_qpid(struct ionic_ibdev *dev, u32 *qpid,
253 u8 *udma_idx, u8 udma_mask)
254 {
255 unsigned int size, base, bound;
256 int udma_i, udma_x, udma_ix;
257 int rc = -EINVAL;
258
259 udma_x = dev->next_qpid_udma_idx;
260
261 dev->next_qpid_udma_idx ^= dev->lif_cfg.udma_count - 1;
262
263 for (udma_i = 0; udma_i < dev->lif_cfg.udma_count; ++udma_i) {
264 udma_ix = udma_i ^ udma_x;
265
266 if (!(udma_mask & BIT(udma_ix)))
267 continue;
268
269 size = dev->lif_cfg.qp_count / dev->lif_cfg.udma_count;
270 base = size * udma_ix;
271 bound = base + size;
272
273 /* skip reserved SMI and GSI qpids in group zero */
274 if (!base)
275 base = 2;
276
277 rc = ionic_resid_get_shared(&dev->inuse_qpid, base, bound);
278 if (rc >= 0) {
279 *qpid = ionic_bitid_to_qid(rc,
280 dev->lif_cfg.udma_qgrp_shift,
281 dev->half_qpid_udma_shift);
282 *udma_idx = udma_ix;
283
284 rc = 0;
285 break;
286 }
287 }
288
289 return rc;
290 }
291
ionic_get_dbid(struct ionic_ibdev * dev,u32 * dbid,phys_addr_t * addr)292 static int ionic_get_dbid(struct ionic_ibdev *dev, u32 *dbid, phys_addr_t *addr)
293 {
294 int rc, dbpage_num;
295
296 /* wrap to 1, skip kernel reserved */
297 rc = ionic_resid_get_shared(&dev->inuse_dbid, 1,
298 dev->inuse_dbid.inuse_size);
299 if (rc < 0)
300 return rc;
301
302 dbpage_num = (dev->lif_cfg.lif_hw_index * dev->lif_cfg.dbid_count) + rc;
303 *addr = dev->lif_cfg.db_phys + ((phys_addr_t)dbpage_num << PAGE_SHIFT);
304
305 *dbid = rc;
306
307 return 0;
308 }
309
ionic_put_pdid(struct ionic_ibdev * dev,u32 pdid)310 static void ionic_put_pdid(struct ionic_ibdev *dev, u32 pdid)
311 {
312 ionic_resid_put(&dev->inuse_pdid, pdid);
313 }
314
ionic_put_ahid(struct ionic_ibdev * dev,u32 ahid)315 static void ionic_put_ahid(struct ionic_ibdev *dev, u32 ahid)
316 {
317 ionic_resid_put(&dev->inuse_ahid, ahid);
318 }
319
ionic_put_mrid(struct ionic_ibdev * dev,u32 mrid)320 static void ionic_put_mrid(struct ionic_ibdev *dev, u32 mrid)
321 {
322 ionic_resid_put(&dev->inuse_mrid, ionic_mrid_index(mrid));
323 }
324
ionic_put_qpid(struct ionic_ibdev * dev,u32 qpid)325 static void ionic_put_qpid(struct ionic_ibdev *dev, u32 qpid)
326 {
327 u32 bitid = ionic_qid_to_bitid(qpid,
328 dev->lif_cfg.udma_qgrp_shift,
329 dev->half_qpid_udma_shift);
330
331 ionic_resid_put(&dev->inuse_qpid, bitid);
332 }
333
ionic_put_dbid(struct ionic_ibdev * dev,u32 dbid)334 static void ionic_put_dbid(struct ionic_ibdev *dev, u32 dbid)
335 {
336 ionic_resid_put(&dev->inuse_dbid, dbid);
337 }
338
339 static struct rdma_user_mmap_entry*
ionic_mmap_entry_insert(struct ionic_ctx * ctx,unsigned long size,unsigned long pfn,u8 mmap_flags,u64 * offset)340 ionic_mmap_entry_insert(struct ionic_ctx *ctx, unsigned long size,
341 unsigned long pfn, u8 mmap_flags, u64 *offset)
342 {
343 struct ionic_mmap_entry *entry;
344 int rc;
345
346 entry = kzalloc_obj(*entry);
347 if (!entry)
348 return NULL;
349
350 entry->size = size;
351 entry->pfn = pfn;
352 entry->mmap_flags = mmap_flags;
353
354 rc = rdma_user_mmap_entry_insert(&ctx->ibctx, &entry->rdma_entry,
355 entry->size);
356 if (rc) {
357 kfree(entry);
358 return NULL;
359 }
360
361 if (offset)
362 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
363
364 return &entry->rdma_entry;
365 }
366
ionic_alloc_ucontext(struct ib_ucontext * ibctx,struct ib_udata * udata)367 int ionic_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
368 {
369 struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device);
370 struct ionic_ctx *ctx = to_ionic_ctx(ibctx);
371 struct ionic_ctx_resp resp = {};
372 struct ionic_ctx_req req;
373 phys_addr_t db_phys = 0;
374 int rc;
375
376 rc = ib_copy_from_udata(&req, udata, sizeof(req));
377 if (rc)
378 return rc;
379
380 /* try to allocate dbid for user ctx */
381 rc = ionic_get_dbid(dev, &ctx->dbid, &db_phys);
382 if (rc < 0)
383 return rc;
384
385 ibdev_dbg(&dev->ibdev, "user space dbid %u\n", ctx->dbid);
386
387 ctx->mmap_dbell = ionic_mmap_entry_insert(ctx, PAGE_SIZE,
388 PHYS_PFN(db_phys), 0, NULL);
389 if (!ctx->mmap_dbell) {
390 rc = -ENOMEM;
391 goto err_mmap_dbell;
392 }
393
394 resp.page_shift = PAGE_SHIFT;
395
396 resp.dbell_offset = db_phys & ~PAGE_MASK;
397
398 resp.version = dev->lif_cfg.rdma_version;
399 resp.qp_opcodes = dev->lif_cfg.qp_opcodes;
400 resp.admin_opcodes = dev->lif_cfg.admin_opcodes;
401
402 resp.sq_qtype = dev->lif_cfg.sq_qtype;
403 resp.rq_qtype = dev->lif_cfg.rq_qtype;
404 resp.cq_qtype = dev->lif_cfg.cq_qtype;
405 resp.admin_qtype = dev->lif_cfg.aq_qtype;
406 resp.max_stride = dev->lif_cfg.max_stride;
407 resp.max_spec = IONIC_SPEC_HIGH;
408
409 resp.udma_count = dev->lif_cfg.udma_count;
410 resp.expdb_mask = dev->lif_cfg.expdb_mask;
411
412 if (dev->lif_cfg.sq_expdb)
413 resp.expdb_qtypes |= IONIC_EXPDB_SQ;
414 if (dev->lif_cfg.rq_expdb)
415 resp.expdb_qtypes |= IONIC_EXPDB_RQ;
416
417 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
418 if (rc)
419 goto err_resp;
420
421 return 0;
422
423 err_resp:
424 rdma_user_mmap_entry_remove(ctx->mmap_dbell);
425 err_mmap_dbell:
426 ionic_put_dbid(dev, ctx->dbid);
427
428 return rc;
429 }
430
ionic_dealloc_ucontext(struct ib_ucontext * ibctx)431 void ionic_dealloc_ucontext(struct ib_ucontext *ibctx)
432 {
433 struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device);
434 struct ionic_ctx *ctx = to_ionic_ctx(ibctx);
435
436 rdma_user_mmap_entry_remove(ctx->mmap_dbell);
437 ionic_put_dbid(dev, ctx->dbid);
438 }
439
ionic_mmap(struct ib_ucontext * ibctx,struct vm_area_struct * vma)440 int ionic_mmap(struct ib_ucontext *ibctx, struct vm_area_struct *vma)
441 {
442 struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device);
443 struct ionic_ctx *ctx = to_ionic_ctx(ibctx);
444 struct rdma_user_mmap_entry *rdma_entry;
445 struct ionic_mmap_entry *ionic_entry;
446 int rc = 0;
447
448 rdma_entry = rdma_user_mmap_entry_get(&ctx->ibctx, vma);
449 if (!rdma_entry) {
450 ibdev_dbg(&dev->ibdev, "not found %#lx\n",
451 vma->vm_pgoff << PAGE_SHIFT);
452 return -EINVAL;
453 }
454
455 ionic_entry = container_of(rdma_entry, struct ionic_mmap_entry,
456 rdma_entry);
457
458 ibdev_dbg(&dev->ibdev, "writecombine? %d\n",
459 ionic_entry->mmap_flags & IONIC_MMAP_WC);
460 if (ionic_entry->mmap_flags & IONIC_MMAP_WC)
461 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
462 else
463 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
464
465 ibdev_dbg(&dev->ibdev, "remap st %#lx pf %#lx sz %#lx\n",
466 vma->vm_start, ionic_entry->pfn, ionic_entry->size);
467 rc = rdma_user_mmap_io(&ctx->ibctx, vma, ionic_entry->pfn,
468 ionic_entry->size, vma->vm_page_prot,
469 rdma_entry);
470 if (rc)
471 ibdev_dbg(&dev->ibdev, "remap failed %d\n", rc);
472
473 rdma_user_mmap_entry_put(rdma_entry);
474 return rc;
475 }
476
ionic_mmap_free(struct rdma_user_mmap_entry * rdma_entry)477 void ionic_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
478 {
479 struct ionic_mmap_entry *ionic_entry;
480
481 ionic_entry = container_of(rdma_entry, struct ionic_mmap_entry,
482 rdma_entry);
483 kfree(ionic_entry);
484 }
485
ionic_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)486 int ionic_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
487 {
488 struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
489 struct ionic_pd *pd = to_ionic_pd(ibpd);
490
491 return ionic_get_pdid(dev, &pd->pdid);
492 }
493
ionic_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)494 int ionic_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
495 {
496 struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
497 struct ionic_pd *pd = to_ionic_pd(ibpd);
498
499 ionic_put_pdid(dev, pd->pdid);
500
501 return 0;
502 }
503
ionic_build_hdr(struct ionic_ibdev * dev,struct ib_ud_header * hdr,const struct rdma_ah_attr * attr,u16 sport,bool want_ecn)504 static int ionic_build_hdr(struct ionic_ibdev *dev,
505 struct ib_ud_header *hdr,
506 const struct rdma_ah_attr *attr,
507 u16 sport, bool want_ecn)
508 {
509 const struct ib_global_route *grh;
510 enum rdma_network_type net;
511 u8 smac[ETH_ALEN];
512 u16 vlan;
513 int rc;
514
515 if (attr->ah_flags != IB_AH_GRH)
516 return -EINVAL;
517 if (attr->type != RDMA_AH_ATTR_TYPE_ROCE)
518 return -EINVAL;
519
520 grh = rdma_ah_read_grh(attr);
521
522 rc = rdma_read_gid_l2_fields(grh->sgid_attr, &vlan, smac);
523 if (rc)
524 return rc;
525
526 net = rdma_gid_attr_network_type(grh->sgid_attr);
527
528 rc = ib_ud_header_init(0, /* no payload */
529 0, /* no lrh */
530 1, /* yes eth */
531 vlan != 0xffff,
532 0, /* no grh */
533 net == RDMA_NETWORK_IPV4 ? 4 : 6,
534 1, /* yes udp */
535 0, /* no imm */
536 hdr);
537 if (rc)
538 return rc;
539
540 ether_addr_copy(hdr->eth.smac_h, smac);
541 ether_addr_copy(hdr->eth.dmac_h, attr->roce.dmac);
542
543 if (net == RDMA_NETWORK_IPV4) {
544 hdr->eth.type = cpu_to_be16(ETH_P_IP);
545 hdr->ip4.frag_off = cpu_to_be16(0x4000); /* don't fragment */
546 hdr->ip4.ttl = grh->hop_limit;
547 hdr->ip4.tot_len = cpu_to_be16(0xffff);
548 hdr->ip4.saddr =
549 *(const __be32 *)(grh->sgid_attr->gid.raw + 12);
550 hdr->ip4.daddr = *(const __be32 *)(grh->dgid.raw + 12);
551
552 if (want_ecn)
553 hdr->ip4.tos = ionic_set_ecn(grh->traffic_class);
554 else
555 hdr->ip4.tos = ionic_clear_ecn(grh->traffic_class);
556 } else {
557 hdr->eth.type = cpu_to_be16(ETH_P_IPV6);
558 hdr->grh.flow_label = cpu_to_be32(grh->flow_label);
559 hdr->grh.hop_limit = grh->hop_limit;
560 hdr->grh.source_gid = grh->sgid_attr->gid;
561 hdr->grh.destination_gid = grh->dgid;
562
563 if (want_ecn)
564 hdr->grh.traffic_class =
565 ionic_set_ecn(grh->traffic_class);
566 else
567 hdr->grh.traffic_class =
568 ionic_clear_ecn(grh->traffic_class);
569 }
570
571 if (vlan != 0xffff) {
572 vlan |= rdma_ah_get_sl(attr) << VLAN_PRIO_SHIFT;
573 hdr->vlan.tag = cpu_to_be16(vlan);
574 hdr->vlan.type = hdr->eth.type;
575 hdr->eth.type = cpu_to_be16(ETH_P_8021Q);
576 }
577
578 hdr->udp.sport = cpu_to_be16(sport);
579 hdr->udp.dport = cpu_to_be16(ROCE_V2_UDP_DPORT);
580
581 return 0;
582 }
583
ionic_set_ah_attr(struct ionic_ibdev * dev,struct rdma_ah_attr * ah_attr,struct ib_ud_header * hdr,int sgid_index)584 static void ionic_set_ah_attr(struct ionic_ibdev *dev,
585 struct rdma_ah_attr *ah_attr,
586 struct ib_ud_header *hdr,
587 int sgid_index)
588 {
589 u32 flow_label;
590 u16 vlan = 0;
591 u8 tos, ttl;
592
593 if (hdr->vlan_present)
594 vlan = be16_to_cpu(hdr->vlan.tag);
595
596 if (hdr->ipv4_present) {
597 flow_label = 0;
598 ttl = hdr->ip4.ttl;
599 tos = hdr->ip4.tos;
600 *(__be16 *)(hdr->grh.destination_gid.raw + 10) = cpu_to_be16(0xffff);
601 *(__be32 *)(hdr->grh.destination_gid.raw + 12) = hdr->ip4.daddr;
602 } else {
603 flow_label = be32_to_cpu(hdr->grh.flow_label);
604 ttl = hdr->grh.hop_limit;
605 tos = hdr->grh.traffic_class;
606 }
607
608 memset(ah_attr, 0, sizeof(*ah_attr));
609 ah_attr->type = RDMA_AH_ATTR_TYPE_ROCE;
610 if (hdr->eth_present)
611 ether_addr_copy(ah_attr->roce.dmac, hdr->eth.dmac_h);
612 rdma_ah_set_sl(ah_attr, vlan >> VLAN_PRIO_SHIFT);
613 rdma_ah_set_port_num(ah_attr, 1);
614 rdma_ah_set_grh(ah_attr, NULL, flow_label, sgid_index, ttl, tos);
615 rdma_ah_set_dgid_raw(ah_attr, &hdr->grh.destination_gid);
616 }
617
ionic_create_ah_cmd(struct ionic_ibdev * dev,struct ionic_ah * ah,struct ionic_pd * pd,struct rdma_ah_attr * attr,u32 flags)618 static int ionic_create_ah_cmd(struct ionic_ibdev *dev,
619 struct ionic_ah *ah,
620 struct ionic_pd *pd,
621 struct rdma_ah_attr *attr,
622 u32 flags)
623 {
624 struct ionic_admin_wr wr = {
625 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
626 .wqe = {
627 .op = IONIC_V1_ADMIN_CREATE_AH,
628 .len = cpu_to_le16(IONIC_ADMIN_CREATE_AH_IN_V1_LEN),
629 .cmd.create_ah = {
630 .pd_id = cpu_to_le32(pd->pdid),
631 .dbid_flags = cpu_to_le16(dev->lif_cfg.dbid),
632 .id_ver = cpu_to_le32(ah->ahid),
633 }
634 }
635 };
636 enum ionic_admin_flags admin_flags = 0;
637 dma_addr_t hdr_dma = 0;
638 void *hdr_buf;
639 gfp_t gfp = GFP_ATOMIC;
640 int rc, hdr_len = 0;
641
642 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_AH)
643 return -EBADRQC;
644
645 if (flags & RDMA_CREATE_AH_SLEEPABLE)
646 gfp = GFP_KERNEL;
647 else
648 admin_flags |= IONIC_ADMIN_F_BUSYWAIT;
649
650 rc = ionic_build_hdr(dev, &ah->hdr, attr, IONIC_ROCE_UDP_SPORT, false);
651 if (rc)
652 return rc;
653
654 if (ah->hdr.eth.type == cpu_to_be16(ETH_P_8021Q)) {
655 if (ah->hdr.vlan.type == cpu_to_be16(ETH_P_IP))
656 wr.wqe.cmd.create_ah.csum_profile =
657 IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP;
658 else
659 wr.wqe.cmd.create_ah.csum_profile =
660 IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP;
661 } else {
662 if (ah->hdr.eth.type == cpu_to_be16(ETH_P_IP))
663 wr.wqe.cmd.create_ah.csum_profile =
664 IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP;
665 else
666 wr.wqe.cmd.create_ah.csum_profile =
667 IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP;
668 }
669
670 ah->sgid_index = rdma_ah_read_grh(attr)->sgid_index;
671
672 hdr_buf = kmalloc(PAGE_SIZE, gfp);
673 if (!hdr_buf)
674 return -ENOMEM;
675
676 hdr_len = ib_ud_header_pack(&ah->hdr, hdr_buf);
677 hdr_len -= IB_BTH_BYTES;
678 hdr_len -= IB_DETH_BYTES;
679 ibdev_dbg(&dev->ibdev, "roce packet header template\n");
680 print_hex_dump_debug("hdr ", DUMP_PREFIX_OFFSET, 16, 1,
681 hdr_buf, hdr_len, true);
682
683 hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf, hdr_len,
684 DMA_TO_DEVICE);
685
686 rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
687 if (rc)
688 goto err_dma;
689
690 wr.wqe.cmd.create_ah.dma_addr = cpu_to_le64(hdr_dma);
691 wr.wqe.cmd.create_ah.length = cpu_to_le32(hdr_len);
692
693 ionic_admin_post(dev, &wr);
694 rc = ionic_admin_wait(dev, &wr, admin_flags);
695
696 dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, hdr_len,
697 DMA_TO_DEVICE);
698 err_dma:
699 kfree(hdr_buf);
700
701 return rc;
702 }
703
ionic_destroy_ah_cmd(struct ionic_ibdev * dev,u32 ahid,u32 flags)704 static int ionic_destroy_ah_cmd(struct ionic_ibdev *dev, u32 ahid, u32 flags)
705 {
706 struct ionic_admin_wr wr = {
707 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
708 .wqe = {
709 .op = IONIC_V1_ADMIN_DESTROY_AH,
710 .len = cpu_to_le16(IONIC_ADMIN_DESTROY_AH_IN_V1_LEN),
711 .cmd.destroy_ah = {
712 .ah_id = cpu_to_le32(ahid),
713 },
714 }
715 };
716 enum ionic_admin_flags admin_flags = IONIC_ADMIN_F_TEARDOWN;
717
718 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_AH)
719 return -EBADRQC;
720
721 if (!(flags & RDMA_CREATE_AH_SLEEPABLE))
722 admin_flags |= IONIC_ADMIN_F_BUSYWAIT;
723
724 ionic_admin_post(dev, &wr);
725 ionic_admin_wait(dev, &wr, admin_flags);
726
727 /* No host-memory resource is associated with ah, so it is ok
728 * to "succeed" and complete this destroy ah on the host.
729 */
730 return 0;
731 }
732
ionic_create_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)733 int ionic_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
734 struct ib_udata *udata)
735 {
736 struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device);
737 struct rdma_ah_attr *attr = init_attr->ah_attr;
738 struct ionic_pd *pd = to_ionic_pd(ibah->pd);
739 struct ionic_ah *ah = to_ionic_ah(ibah);
740 struct ionic_ah_resp resp = {};
741 u32 flags = init_attr->flags;
742 int rc;
743
744 rc = ionic_get_ahid(dev, &ah->ahid);
745 if (rc)
746 return rc;
747
748 rc = ionic_create_ah_cmd(dev, ah, pd, attr, flags);
749 if (rc)
750 goto err_cmd;
751
752 if (udata) {
753 resp.ahid = ah->ahid;
754
755 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
756 if (rc)
757 goto err_resp;
758 }
759
760 return 0;
761
762 err_resp:
763 ionic_destroy_ah_cmd(dev, ah->ahid, flags);
764 err_cmd:
765 ionic_put_ahid(dev, ah->ahid);
766 return rc;
767 }
768
ionic_query_ah(struct ib_ah * ibah,struct rdma_ah_attr * ah_attr)769 int ionic_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
770 {
771 struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device);
772 struct ionic_ah *ah = to_ionic_ah(ibah);
773
774 ionic_set_ah_attr(dev, ah_attr, &ah->hdr, ah->sgid_index);
775
776 return 0;
777 }
778
ionic_destroy_ah(struct ib_ah * ibah,u32 flags)779 int ionic_destroy_ah(struct ib_ah *ibah, u32 flags)
780 {
781 struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device);
782 struct ionic_ah *ah = to_ionic_ah(ibah);
783 int rc;
784
785 rc = ionic_destroy_ah_cmd(dev, ah->ahid, flags);
786 if (rc)
787 return rc;
788
789 ionic_put_ahid(dev, ah->ahid);
790
791 return 0;
792 }
793
ionic_create_mr_cmd(struct ionic_ibdev * dev,struct ionic_pd * pd,struct ionic_mr * mr,u64 addr,u64 length)794 static int ionic_create_mr_cmd(struct ionic_ibdev *dev,
795 struct ionic_pd *pd,
796 struct ionic_mr *mr,
797 u64 addr,
798 u64 length)
799 {
800 struct ionic_admin_wr wr = {
801 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
802 .wqe = {
803 .op = IONIC_V1_ADMIN_CREATE_MR,
804 .len = cpu_to_le16(IONIC_ADMIN_CREATE_MR_IN_V1_LEN),
805 .cmd.create_mr = {
806 .va = cpu_to_le64(addr),
807 .length = cpu_to_le64(length),
808 .pd_id = cpu_to_le32(pd->pdid),
809 .page_size_log2 = mr->buf.page_size_log2,
810 .tbl_index = cpu_to_le32(~0),
811 .map_count = cpu_to_le32(mr->buf.tbl_pages),
812 .dma_addr = ionic_pgtbl_dma(&mr->buf, addr),
813 .dbid_flags = cpu_to_le16(mr->flags),
814 .id_ver = cpu_to_le32(mr->mrid),
815 }
816 }
817 };
818 int rc;
819
820 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_MR)
821 return -EBADRQC;
822
823 ionic_admin_post(dev, &wr);
824 rc = ionic_admin_wait(dev, &wr, 0);
825 if (!rc)
826 mr->created = true;
827
828 return rc;
829 }
830
ionic_destroy_mr_cmd(struct ionic_ibdev * dev,u32 mrid)831 static int ionic_destroy_mr_cmd(struct ionic_ibdev *dev, u32 mrid)
832 {
833 struct ionic_admin_wr wr = {
834 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
835 .wqe = {
836 .op = IONIC_V1_ADMIN_DESTROY_MR,
837 .len = cpu_to_le16(IONIC_ADMIN_DESTROY_MR_IN_V1_LEN),
838 .cmd.destroy_mr = {
839 .mr_id = cpu_to_le32(mrid),
840 },
841 }
842 };
843
844 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_MR)
845 return -EBADRQC;
846
847 ionic_admin_post(dev, &wr);
848
849 return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN);
850 }
851
ionic_get_dma_mr(struct ib_pd * ibpd,int access)852 struct ib_mr *ionic_get_dma_mr(struct ib_pd *ibpd, int access)
853 {
854 struct ionic_pd *pd = to_ionic_pd(ibpd);
855 struct ionic_mr *mr;
856
857 mr = kzalloc_obj(*mr);
858 if (!mr)
859 return ERR_PTR(-ENOMEM);
860
861 mr->ibmr.lkey = IONIC_DMA_LKEY;
862 mr->ibmr.rkey = IONIC_DMA_RKEY;
863
864 if (pd)
865 pd->flags |= IONIC_QPF_PRIVILEGED;
866
867 return &mr->ibmr;
868 }
869
ionic_reg_user_mr(struct ib_pd * ibpd,u64 start,u64 length,u64 addr,int access,struct ib_dmah * dmah,struct ib_udata * udata)870 struct ib_mr *ionic_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
871 u64 addr, int access, struct ib_dmah *dmah,
872 struct ib_udata *udata)
873 {
874 struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
875 struct ionic_pd *pd = to_ionic_pd(ibpd);
876 struct ionic_mr *mr;
877 unsigned long pg_sz;
878 int rc;
879
880 if (dmah)
881 return ERR_PTR(-EOPNOTSUPP);
882
883 mr = kzalloc_obj(*mr);
884 if (!mr)
885 return ERR_PTR(-ENOMEM);
886
887 rc = ionic_get_mrid(dev, &mr->mrid);
888 if (rc)
889 goto err_mrid;
890
891 mr->ibmr.lkey = mr->mrid;
892 mr->ibmr.rkey = mr->mrid;
893 mr->ibmr.iova = addr;
894 mr->ibmr.length = length;
895
896 mr->flags = IONIC_MRF_USER_MR | to_ionic_mr_flags(access);
897
898 mr->umem = ib_umem_get(&dev->ibdev, start, length, access);
899 if (IS_ERR(mr->umem)) {
900 rc = PTR_ERR(mr->umem);
901 goto err_umem;
902 }
903
904 pg_sz = ib_umem_find_best_pgsz(mr->umem,
905 dev->lif_cfg.page_size_supported,
906 addr);
907 if (!pg_sz) {
908 rc = -EINVAL;
909 goto err_pgtbl;
910 }
911
912 rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, 1, pg_sz);
913 if (rc)
914 goto err_pgtbl;
915
916 rc = ionic_create_mr_cmd(dev, pd, mr, addr, length);
917 if (rc)
918 goto err_cmd;
919
920 ionic_pgtbl_unbuf(dev, &mr->buf);
921
922 return &mr->ibmr;
923
924 err_cmd:
925 ionic_pgtbl_unbuf(dev, &mr->buf);
926 err_pgtbl:
927 ib_umem_release(mr->umem);
928 err_umem:
929 ionic_put_mrid(dev, mr->mrid);
930 err_mrid:
931 kfree(mr);
932 return ERR_PTR(rc);
933 }
934
ionic_reg_user_mr_dmabuf(struct ib_pd * ibpd,u64 offset,u64 length,u64 addr,int fd,int access,struct ib_dmah * dmah,struct uverbs_attr_bundle * attrs)935 struct ib_mr *ionic_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 offset,
936 u64 length, u64 addr, int fd, int access,
937 struct ib_dmah *dmah,
938 struct uverbs_attr_bundle *attrs)
939 {
940 struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
941 struct ionic_pd *pd = to_ionic_pd(ibpd);
942 struct ib_umem_dmabuf *umem_dmabuf;
943 struct ionic_mr *mr;
944 u64 pg_sz;
945 int rc;
946
947 if (dmah)
948 return ERR_PTR(-EOPNOTSUPP);
949
950 mr = kzalloc_obj(*mr);
951 if (!mr)
952 return ERR_PTR(-ENOMEM);
953
954 rc = ionic_get_mrid(dev, &mr->mrid);
955 if (rc)
956 goto err_mrid;
957
958 mr->ibmr.lkey = mr->mrid;
959 mr->ibmr.rkey = mr->mrid;
960 mr->ibmr.iova = addr;
961 mr->ibmr.length = length;
962
963 mr->flags = IONIC_MRF_USER_MR | to_ionic_mr_flags(access);
964
965 umem_dmabuf = ib_umem_dmabuf_get_pinned(&dev->ibdev, offset, length,
966 fd, access);
967 if (IS_ERR(umem_dmabuf)) {
968 rc = PTR_ERR(umem_dmabuf);
969 goto err_umem;
970 }
971
972 mr->umem = &umem_dmabuf->umem;
973
974 pg_sz = ib_umem_find_best_pgsz(mr->umem,
975 dev->lif_cfg.page_size_supported,
976 addr);
977 if (!pg_sz) {
978 rc = -EINVAL;
979 goto err_pgtbl;
980 }
981
982 rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, 1, pg_sz);
983 if (rc)
984 goto err_pgtbl;
985
986 rc = ionic_create_mr_cmd(dev, pd, mr, addr, length);
987 if (rc)
988 goto err_cmd;
989
990 ionic_pgtbl_unbuf(dev, &mr->buf);
991
992 return &mr->ibmr;
993
994 err_cmd:
995 ionic_pgtbl_unbuf(dev, &mr->buf);
996 err_pgtbl:
997 ib_umem_release(mr->umem);
998 err_umem:
999 ionic_put_mrid(dev, mr->mrid);
1000 err_mrid:
1001 kfree(mr);
1002 return ERR_PTR(rc);
1003 }
1004
ionic_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)1005 int ionic_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1006 {
1007 struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device);
1008 struct ionic_mr *mr = to_ionic_mr(ibmr);
1009 int rc;
1010
1011 if (!mr->ibmr.lkey)
1012 goto out;
1013
1014 if (mr->created) {
1015 rc = ionic_destroy_mr_cmd(dev, mr->mrid);
1016 if (rc)
1017 return rc;
1018 }
1019
1020 ionic_pgtbl_unbuf(dev, &mr->buf);
1021
1022 if (mr->umem)
1023 ib_umem_release(mr->umem);
1024
1025 ionic_put_mrid(dev, mr->mrid);
1026
1027 out:
1028 kfree(mr);
1029
1030 return 0;
1031 }
1032
ionic_alloc_mr(struct ib_pd * ibpd,enum ib_mr_type type,u32 max_sg)1033 struct ib_mr *ionic_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type type,
1034 u32 max_sg)
1035 {
1036 struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
1037 struct ionic_pd *pd = to_ionic_pd(ibpd);
1038 struct ionic_mr *mr;
1039 int rc;
1040
1041 if (type != IB_MR_TYPE_MEM_REG)
1042 return ERR_PTR(-EINVAL);
1043
1044 mr = kzalloc_obj(*mr);
1045 if (!mr)
1046 return ERR_PTR(-ENOMEM);
1047
1048 rc = ionic_get_mrid(dev, &mr->mrid);
1049 if (rc)
1050 goto err_mrid;
1051
1052 mr->ibmr.lkey = mr->mrid;
1053 mr->ibmr.rkey = mr->mrid;
1054
1055 mr->flags = IONIC_MRF_PHYS_MR;
1056
1057 rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, max_sg, PAGE_SIZE);
1058 if (rc)
1059 goto err_pgtbl;
1060
1061 mr->buf.tbl_pages = 0;
1062
1063 rc = ionic_create_mr_cmd(dev, pd, mr, 0, 0);
1064 if (rc)
1065 goto err_cmd;
1066
1067 return &mr->ibmr;
1068
1069 err_cmd:
1070 ionic_pgtbl_unbuf(dev, &mr->buf);
1071 err_pgtbl:
1072 ionic_put_mrid(dev, mr->mrid);
1073 err_mrid:
1074 kfree(mr);
1075 return ERR_PTR(rc);
1076 }
1077
ionic_map_mr_page(struct ib_mr * ibmr,u64 dma)1078 static int ionic_map_mr_page(struct ib_mr *ibmr, u64 dma)
1079 {
1080 struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device);
1081 struct ionic_mr *mr = to_ionic_mr(ibmr);
1082
1083 ibdev_dbg(&dev->ibdev, "dma %p\n", (void *)dma);
1084 return ionic_pgtbl_page(&mr->buf, dma);
1085 }
1086
ionic_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)1087 int ionic_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1088 unsigned int *sg_offset)
1089 {
1090 struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device);
1091 struct ionic_mr *mr = to_ionic_mr(ibmr);
1092 int rc;
1093
1094 /* mr must be allocated using ib_alloc_mr() */
1095 if (unlikely(!mr->buf.tbl_limit))
1096 return -EINVAL;
1097
1098 mr->buf.tbl_pages = 0;
1099
1100 if (mr->buf.tbl_buf)
1101 dma_sync_single_for_cpu(dev->lif_cfg.hwdev, mr->buf.tbl_dma,
1102 mr->buf.tbl_size, DMA_TO_DEVICE);
1103
1104 ibdev_dbg(&dev->ibdev, "sg %p nent %d\n", sg, sg_nents);
1105 rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ionic_map_mr_page);
1106
1107 mr->buf.page_size_log2 = order_base_2(ibmr->page_size);
1108
1109 if (mr->buf.tbl_buf)
1110 dma_sync_single_for_device(dev->lif_cfg.hwdev, mr->buf.tbl_dma,
1111 mr->buf.tbl_size, DMA_TO_DEVICE);
1112
1113 return rc;
1114 }
1115
ionic_alloc_mw(struct ib_mw * ibmw,struct ib_udata * udata)1116 int ionic_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
1117 {
1118 struct ionic_ibdev *dev = to_ionic_ibdev(ibmw->device);
1119 struct ionic_pd *pd = to_ionic_pd(ibmw->pd);
1120 struct ionic_mr *mr = to_ionic_mw(ibmw);
1121 int rc;
1122
1123 rc = ionic_get_mrid(dev, &mr->mrid);
1124 if (rc)
1125 return rc;
1126
1127 mr->ibmw.rkey = mr->mrid;
1128
1129 if (mr->ibmw.type == IB_MW_TYPE_1)
1130 mr->flags = IONIC_MRF_MW_1;
1131 else
1132 mr->flags = IONIC_MRF_MW_2;
1133
1134 rc = ionic_create_mr_cmd(dev, pd, mr, 0, 0);
1135 if (rc)
1136 goto err_cmd;
1137
1138 return 0;
1139
1140 err_cmd:
1141 ionic_put_mrid(dev, mr->mrid);
1142 return rc;
1143 }
1144
ionic_dealloc_mw(struct ib_mw * ibmw)1145 int ionic_dealloc_mw(struct ib_mw *ibmw)
1146 {
1147 struct ionic_ibdev *dev = to_ionic_ibdev(ibmw->device);
1148 struct ionic_mr *mr = to_ionic_mw(ibmw);
1149 int rc;
1150
1151 rc = ionic_destroy_mr_cmd(dev, mr->mrid);
1152 if (rc)
1153 return rc;
1154
1155 ionic_put_mrid(dev, mr->mrid);
1156
1157 return 0;
1158 }
1159
ionic_create_cq_cmd(struct ionic_ibdev * dev,struct ionic_ctx * ctx,struct ionic_cq * cq,struct ionic_tbl_buf * buf)1160 static int ionic_create_cq_cmd(struct ionic_ibdev *dev,
1161 struct ionic_ctx *ctx,
1162 struct ionic_cq *cq,
1163 struct ionic_tbl_buf *buf)
1164 {
1165 const u16 dbid = ionic_ctx_dbid(dev, ctx);
1166 struct ionic_admin_wr wr = {
1167 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
1168 .wqe = {
1169 .op = IONIC_V1_ADMIN_CREATE_CQ,
1170 .len = cpu_to_le16(IONIC_ADMIN_CREATE_CQ_IN_V1_LEN),
1171 .cmd.create_cq = {
1172 .eq_id = cpu_to_le32(cq->eqid),
1173 .depth_log2 = cq->q.depth_log2,
1174 .stride_log2 = cq->q.stride_log2,
1175 .page_size_log2 = buf->page_size_log2,
1176 .tbl_index = cpu_to_le32(~0),
1177 .map_count = cpu_to_le32(buf->tbl_pages),
1178 .dma_addr = ionic_pgtbl_dma(buf, 0),
1179 .dbid_flags = cpu_to_le16(dbid),
1180 .id_ver = cpu_to_le32(cq->cqid),
1181 }
1182 }
1183 };
1184
1185 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_CQ)
1186 return -EBADRQC;
1187
1188 ionic_admin_post(dev, &wr);
1189
1190 return ionic_admin_wait(dev, &wr, 0);
1191 }
1192
ionic_destroy_cq_cmd(struct ionic_ibdev * dev,u32 cqid)1193 static int ionic_destroy_cq_cmd(struct ionic_ibdev *dev, u32 cqid)
1194 {
1195 struct ionic_admin_wr wr = {
1196 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
1197 .wqe = {
1198 .op = IONIC_V1_ADMIN_DESTROY_CQ,
1199 .len = cpu_to_le16(IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN),
1200 .cmd.destroy_cq = {
1201 .cq_id = cpu_to_le32(cqid),
1202 },
1203 }
1204 };
1205
1206 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_CQ)
1207 return -EBADRQC;
1208
1209 ionic_admin_post(dev, &wr);
1210
1211 return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN);
1212 }
1213
ionic_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct uverbs_attr_bundle * attrs)1214 int ionic_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1215 struct uverbs_attr_bundle *attrs)
1216 {
1217 struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
1218 struct ib_udata *udata = &attrs->driver_udata;
1219 struct ionic_ctx *ctx =
1220 rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx);
1221 struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
1222 struct ionic_tbl_buf buf = {};
1223 struct ionic_cq_resp resp = {};
1224 struct ionic_cq_req req;
1225 int udma_idx = 0, rc;
1226
1227 if (udata) {
1228 rc = ib_copy_from_udata(&req, udata, sizeof(req));
1229 if (rc)
1230 return rc;
1231 }
1232
1233 vcq->udma_mask = BIT(dev->lif_cfg.udma_count) - 1;
1234
1235 if (udata)
1236 vcq->udma_mask &= req.udma_mask;
1237
1238 if (!vcq->udma_mask) {
1239 rc = -EINVAL;
1240 goto err_init;
1241 }
1242
1243 for (; udma_idx < dev->lif_cfg.udma_count; ++udma_idx) {
1244 if (!(vcq->udma_mask & BIT(udma_idx)))
1245 continue;
1246
1247 rc = ionic_create_cq_common(vcq, &buf, attr, ctx, udata,
1248 &req.cq[udma_idx],
1249 &resp.cqid[udma_idx],
1250 udma_idx);
1251 if (rc)
1252 goto err_init;
1253
1254 rc = ionic_create_cq_cmd(dev, ctx, &vcq->cq[udma_idx], &buf);
1255 if (rc)
1256 goto err_cmd;
1257
1258 ionic_pgtbl_unbuf(dev, &buf);
1259 }
1260
1261 vcq->ibcq.cqe = attr->cqe;
1262
1263 if (udata) {
1264 resp.udma_mask = vcq->udma_mask;
1265
1266 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1267 if (rc)
1268 goto err_resp;
1269 }
1270
1271 return 0;
1272
1273 err_resp:
1274 while (udma_idx) {
1275 --udma_idx;
1276 if (!(vcq->udma_mask & BIT(udma_idx)))
1277 continue;
1278 ionic_destroy_cq_cmd(dev, vcq->cq[udma_idx].cqid);
1279 err_cmd:
1280 ionic_pgtbl_unbuf(dev, &buf);
1281 ionic_destroy_cq_common(dev, &vcq->cq[udma_idx]);
1282 err_init:
1283 ;
1284 }
1285
1286 return rc;
1287 }
1288
ionic_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)1289 int ionic_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1290 {
1291 struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
1292 struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
1293 int udma_idx, rc_tmp, rc = 0;
1294
1295 for (udma_idx = dev->lif_cfg.udma_count; udma_idx; ) {
1296 --udma_idx;
1297
1298 if (!(vcq->udma_mask & BIT(udma_idx)))
1299 continue;
1300
1301 rc_tmp = ionic_destroy_cq_cmd(dev, vcq->cq[udma_idx].cqid);
1302 if (rc_tmp) {
1303 if (!rc)
1304 rc = rc_tmp;
1305
1306 continue;
1307 }
1308
1309 ionic_destroy_cq_common(dev, &vcq->cq[udma_idx]);
1310 }
1311
1312 return rc;
1313 }
1314
pd_remote_privileged(struct ib_pd * pd)1315 static bool pd_remote_privileged(struct ib_pd *pd)
1316 {
1317 return pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
1318 }
1319
ionic_create_qp_cmd(struct ionic_ibdev * dev,struct ionic_pd * pd,struct ionic_cq * send_cq,struct ionic_cq * recv_cq,struct ionic_qp * qp,struct ionic_tbl_buf * sq_buf,struct ionic_tbl_buf * rq_buf,struct ib_qp_init_attr * attr)1320 static int ionic_create_qp_cmd(struct ionic_ibdev *dev,
1321 struct ionic_pd *pd,
1322 struct ionic_cq *send_cq,
1323 struct ionic_cq *recv_cq,
1324 struct ionic_qp *qp,
1325 struct ionic_tbl_buf *sq_buf,
1326 struct ionic_tbl_buf *rq_buf,
1327 struct ib_qp_init_attr *attr)
1328 {
1329 const u16 dbid = ionic_obj_dbid(dev, pd->ibpd.uobject);
1330 const u32 flags = to_ionic_qp_flags(0, 0,
1331 qp->sq_cmb & IONIC_CMB_ENABLE,
1332 qp->rq_cmb & IONIC_CMB_ENABLE,
1333 qp->sq_spec, qp->rq_spec,
1334 pd->flags & IONIC_QPF_PRIVILEGED,
1335 pd_remote_privileged(&pd->ibpd));
1336 struct ionic_admin_wr wr = {
1337 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
1338 .wqe = {
1339 .op = IONIC_V1_ADMIN_CREATE_QP,
1340 .len = cpu_to_le16(IONIC_ADMIN_CREATE_QP_IN_V1_LEN),
1341 .cmd.create_qp = {
1342 .pd_id = cpu_to_le32(pd->pdid),
1343 .priv_flags = cpu_to_be32(flags),
1344 .type_state = to_ionic_qp_type(attr->qp_type),
1345 .dbid_flags = cpu_to_le16(dbid),
1346 .id_ver = cpu_to_le32(qp->qpid),
1347 }
1348 }
1349 };
1350
1351 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_QP)
1352 return -EBADRQC;
1353
1354 if (qp->has_sq) {
1355 wr.wqe.cmd.create_qp.sq_cq_id = cpu_to_le32(send_cq->cqid);
1356 wr.wqe.cmd.create_qp.sq_depth_log2 = qp->sq.depth_log2;
1357 wr.wqe.cmd.create_qp.sq_stride_log2 = qp->sq.stride_log2;
1358 wr.wqe.cmd.create_qp.sq_page_size_log2 = sq_buf->page_size_log2;
1359 wr.wqe.cmd.create_qp.sq_tbl_index_xrcd_id = cpu_to_le32(~0);
1360 wr.wqe.cmd.create_qp.sq_map_count =
1361 cpu_to_le32(sq_buf->tbl_pages);
1362 wr.wqe.cmd.create_qp.sq_dma_addr = ionic_pgtbl_dma(sq_buf, 0);
1363 }
1364
1365 if (qp->has_rq) {
1366 wr.wqe.cmd.create_qp.rq_cq_id = cpu_to_le32(recv_cq->cqid);
1367 wr.wqe.cmd.create_qp.rq_depth_log2 = qp->rq.depth_log2;
1368 wr.wqe.cmd.create_qp.rq_stride_log2 = qp->rq.stride_log2;
1369 wr.wqe.cmd.create_qp.rq_page_size_log2 = rq_buf->page_size_log2;
1370 wr.wqe.cmd.create_qp.rq_tbl_index_srq_id = cpu_to_le32(~0);
1371 wr.wqe.cmd.create_qp.rq_map_count =
1372 cpu_to_le32(rq_buf->tbl_pages);
1373 wr.wqe.cmd.create_qp.rq_dma_addr = ionic_pgtbl_dma(rq_buf, 0);
1374 }
1375
1376 ionic_admin_post(dev, &wr);
1377
1378 return ionic_admin_wait(dev, &wr, 0);
1379 }
1380
ionic_modify_qp_cmd(struct ionic_ibdev * dev,struct ionic_pd * pd,struct ionic_qp * qp,struct ib_qp_attr * attr,int mask)1381 static int ionic_modify_qp_cmd(struct ionic_ibdev *dev,
1382 struct ionic_pd *pd,
1383 struct ionic_qp *qp,
1384 struct ib_qp_attr *attr,
1385 int mask)
1386 {
1387 const u32 flags = to_ionic_qp_flags(attr->qp_access_flags,
1388 attr->en_sqd_async_notify,
1389 qp->sq_cmb & IONIC_CMB_ENABLE,
1390 qp->rq_cmb & IONIC_CMB_ENABLE,
1391 qp->sq_spec, qp->rq_spec,
1392 pd->flags & IONIC_QPF_PRIVILEGED,
1393 pd_remote_privileged(qp->ibqp.pd));
1394 const u8 state = to_ionic_qp_modify_state(attr->qp_state,
1395 attr->cur_qp_state);
1396 struct ionic_admin_wr wr = {
1397 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
1398 .wqe = {
1399 .op = IONIC_V1_ADMIN_MODIFY_QP,
1400 .len = cpu_to_le16(IONIC_ADMIN_MODIFY_QP_IN_V1_LEN),
1401 .cmd.mod_qp = {
1402 .attr_mask = cpu_to_be32(mask),
1403 .access_flags = cpu_to_be16(flags),
1404 .rq_psn = cpu_to_le32(attr->rq_psn),
1405 .sq_psn = cpu_to_le32(attr->sq_psn),
1406 .rate_limit_kbps =
1407 cpu_to_le32(attr->rate_limit),
1408 .pmtu = (attr->path_mtu + 7),
1409 .retry = (attr->retry_cnt |
1410 (attr->rnr_retry << 4)),
1411 .rnr_timer = attr->min_rnr_timer,
1412 .retry_timeout = attr->timeout,
1413 .type_state = state,
1414 .id_ver = cpu_to_le32(qp->qpid),
1415 }
1416 }
1417 };
1418 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1419 void *hdr_buf = NULL;
1420 dma_addr_t hdr_dma = 0;
1421 int rc, hdr_len = 0;
1422 u16 sport;
1423
1424 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_MODIFY_QP)
1425 return -EBADRQC;
1426
1427 if ((mask & IB_QP_MAX_DEST_RD_ATOMIC) && attr->max_dest_rd_atomic) {
1428 /* Note, round up/down was already done for allocating
1429 * resources on the device. The allocation order is in cache
1430 * line size. We can't use the order of the resource
1431 * allocation to determine the order wqes here, because for
1432 * queue length <= one cache line it is not distinct.
1433 *
1434 * Therefore, order wqes is computed again here.
1435 *
1436 * Account for hole and round up to the next order.
1437 */
1438 wr.wqe.cmd.mod_qp.rsq_depth =
1439 order_base_2(attr->max_dest_rd_atomic + 1);
1440 wr.wqe.cmd.mod_qp.rsq_index = cpu_to_le32(~0);
1441 }
1442
1443 if ((mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
1444 /* Account for hole and round down to the next order */
1445 wr.wqe.cmd.mod_qp.rrq_depth =
1446 order_base_2(attr->max_rd_atomic + 2) - 1;
1447 wr.wqe.cmd.mod_qp.rrq_index = cpu_to_le32(~0);
1448 }
1449
1450 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
1451 wr.wqe.cmd.mod_qp.qkey_dest_qpn =
1452 cpu_to_le32(attr->dest_qp_num);
1453 else
1454 wr.wqe.cmd.mod_qp.qkey_dest_qpn = cpu_to_le32(attr->qkey);
1455
1456 if (mask & IB_QP_AV) {
1457 if (!qp->hdr)
1458 return -ENOMEM;
1459
1460 sport = rdma_get_udp_sport(grh->flow_label,
1461 qp->qpid,
1462 attr->dest_qp_num);
1463
1464 rc = ionic_build_hdr(dev, qp->hdr, &attr->ah_attr, sport, true);
1465 if (rc)
1466 return rc;
1467
1468 qp->sgid_index = grh->sgid_index;
1469
1470 hdr_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1471 if (!hdr_buf)
1472 return -ENOMEM;
1473
1474 hdr_len = ib_ud_header_pack(qp->hdr, hdr_buf);
1475 hdr_len -= IB_BTH_BYTES;
1476 hdr_len -= IB_DETH_BYTES;
1477 ibdev_dbg(&dev->ibdev, "roce packet header template\n");
1478 print_hex_dump_debug("hdr ", DUMP_PREFIX_OFFSET, 16, 1,
1479 hdr_buf, hdr_len, true);
1480
1481 hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf, hdr_len,
1482 DMA_TO_DEVICE);
1483
1484 rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
1485 if (rc)
1486 goto err_dma;
1487
1488 if (qp->hdr->ipv4_present) {
1489 wr.wqe.cmd.mod_qp.tfp_csum_profile =
1490 qp->hdr->vlan_present ?
1491 IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP :
1492 IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP;
1493 } else {
1494 wr.wqe.cmd.mod_qp.tfp_csum_profile =
1495 qp->hdr->vlan_present ?
1496 IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP :
1497 IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP;
1498 }
1499
1500 wr.wqe.cmd.mod_qp.ah_id_len =
1501 cpu_to_le32(qp->ahid | (hdr_len << 24));
1502 wr.wqe.cmd.mod_qp.dma_addr = cpu_to_le64(hdr_dma);
1503
1504 wr.wqe.cmd.mod_qp.en_pcp = attr->ah_attr.sl;
1505 wr.wqe.cmd.mod_qp.ip_dscp = grh->traffic_class >> 2;
1506 }
1507
1508 ionic_admin_post(dev, &wr);
1509
1510 rc = ionic_admin_wait(dev, &wr, 0);
1511
1512 if (mask & IB_QP_AV)
1513 dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, hdr_len,
1514 DMA_TO_DEVICE);
1515 err_dma:
1516 if (mask & IB_QP_AV)
1517 kfree(hdr_buf);
1518
1519 return rc;
1520 }
1521
ionic_query_qp_cmd(struct ionic_ibdev * dev,struct ionic_qp * qp,struct ib_qp_attr * attr,int mask)1522 static int ionic_query_qp_cmd(struct ionic_ibdev *dev,
1523 struct ionic_qp *qp,
1524 struct ib_qp_attr *attr,
1525 int mask)
1526 {
1527 struct ionic_admin_wr wr = {
1528 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
1529 .wqe = {
1530 .op = IONIC_V1_ADMIN_QUERY_QP,
1531 .len = cpu_to_le16(IONIC_ADMIN_QUERY_QP_IN_V1_LEN),
1532 .cmd.query_qp = {
1533 .id_ver = cpu_to_le32(qp->qpid),
1534 },
1535 }
1536 };
1537 struct ionic_v1_admin_query_qp_sq *query_sqbuf;
1538 struct ionic_v1_admin_query_qp_rq *query_rqbuf;
1539 dma_addr_t query_sqdma;
1540 dma_addr_t query_rqdma;
1541 dma_addr_t hdr_dma = 0;
1542 void *hdr_buf = NULL;
1543 int flags, rc;
1544
1545 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_QUERY_QP)
1546 return -EBADRQC;
1547
1548 if (qp->has_sq) {
1549 bool expdb = !!(qp->sq_cmb & IONIC_CMB_EXPDB);
1550
1551 attr->cap.max_send_sge =
1552 ionic_v1_send_wqe_max_sge(qp->sq.stride_log2,
1553 qp->sq_spec,
1554 expdb);
1555 attr->cap.max_inline_data =
1556 ionic_v1_send_wqe_max_data(qp->sq.stride_log2, expdb);
1557 }
1558
1559 if (qp->has_rq) {
1560 attr->cap.max_recv_sge =
1561 ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2,
1562 qp->rq_spec,
1563 qp->rq_cmb & IONIC_CMB_EXPDB);
1564 }
1565
1566 query_sqbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1567 if (!query_sqbuf)
1568 return -ENOMEM;
1569
1570 query_rqbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1571 if (!query_rqbuf) {
1572 rc = -ENOMEM;
1573 goto err_rqbuf;
1574 }
1575
1576 query_sqdma = dma_map_single(dev->lif_cfg.hwdev, query_sqbuf, PAGE_SIZE,
1577 DMA_FROM_DEVICE);
1578 rc = dma_mapping_error(dev->lif_cfg.hwdev, query_sqdma);
1579 if (rc)
1580 goto err_sqdma;
1581
1582 query_rqdma = dma_map_single(dev->lif_cfg.hwdev, query_rqbuf, PAGE_SIZE,
1583 DMA_FROM_DEVICE);
1584 rc = dma_mapping_error(dev->lif_cfg.hwdev, query_rqdma);
1585 if (rc)
1586 goto err_rqdma;
1587
1588 if (mask & IB_QP_AV) {
1589 hdr_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1590 if (!hdr_buf) {
1591 rc = -ENOMEM;
1592 goto err_hdrbuf;
1593 }
1594
1595 hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf,
1596 PAGE_SIZE, DMA_FROM_DEVICE);
1597 rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
1598 if (rc)
1599 goto err_hdrdma;
1600 }
1601
1602 wr.wqe.cmd.query_qp.sq_dma_addr = cpu_to_le64(query_sqdma);
1603 wr.wqe.cmd.query_qp.rq_dma_addr = cpu_to_le64(query_rqdma);
1604 wr.wqe.cmd.query_qp.hdr_dma_addr = cpu_to_le64(hdr_dma);
1605 wr.wqe.cmd.query_qp.ah_id = cpu_to_le32(qp->ahid);
1606
1607 ionic_admin_post(dev, &wr);
1608
1609 rc = ionic_admin_wait(dev, &wr, 0);
1610
1611 if (rc)
1612 goto err_hdrdma;
1613
1614 flags = be16_to_cpu(query_sqbuf->access_perms_flags |
1615 query_rqbuf->access_perms_flags);
1616
1617 print_hex_dump_debug("sqbuf ", DUMP_PREFIX_OFFSET, 16, 1,
1618 query_sqbuf, sizeof(*query_sqbuf), true);
1619 print_hex_dump_debug("rqbuf ", DUMP_PREFIX_OFFSET, 16, 1,
1620 query_rqbuf, sizeof(*query_rqbuf), true);
1621 ibdev_dbg(&dev->ibdev, "query qp %u state_pmtu %#x flags %#x",
1622 qp->qpid, query_rqbuf->state_pmtu, flags);
1623
1624 attr->qp_state = from_ionic_qp_state(query_rqbuf->state_pmtu >> 4);
1625 attr->cur_qp_state = attr->qp_state;
1626 attr->path_mtu = (query_rqbuf->state_pmtu & 0xf) - 7;
1627 attr->path_mig_state = IB_MIG_MIGRATED;
1628 attr->qkey = be32_to_cpu(query_sqbuf->qkey_dest_qpn);
1629 attr->rq_psn = be32_to_cpu(query_sqbuf->rq_psn);
1630 attr->sq_psn = be32_to_cpu(query_rqbuf->sq_psn);
1631 attr->dest_qp_num = attr->qkey;
1632 attr->qp_access_flags = from_ionic_qp_flags(flags);
1633 attr->pkey_index = 0;
1634 attr->alt_pkey_index = 0;
1635 attr->en_sqd_async_notify = !!(flags & IONIC_QPF_SQD_NOTIFY);
1636 attr->sq_draining = !!(flags & IONIC_QPF_SQ_DRAINING);
1637 attr->max_rd_atomic = BIT(query_rqbuf->rrq_depth) - 1;
1638 attr->max_dest_rd_atomic = BIT(query_rqbuf->rsq_depth) - 1;
1639 attr->min_rnr_timer = query_sqbuf->rnr_timer;
1640 attr->port_num = 0;
1641 attr->timeout = query_sqbuf->retry_timeout;
1642 attr->retry_cnt = query_rqbuf->retry_rnrtry & 0xf;
1643 attr->rnr_retry = query_rqbuf->retry_rnrtry >> 4;
1644 attr->alt_port_num = 0;
1645 attr->alt_timeout = 0;
1646 attr->rate_limit = be32_to_cpu(query_sqbuf->rate_limit_kbps);
1647
1648 if (mask & IB_QP_AV)
1649 ionic_set_ah_attr(dev, &attr->ah_attr,
1650 qp->hdr, qp->sgid_index);
1651
1652 err_hdrdma:
1653 if (mask & IB_QP_AV) {
1654 dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma,
1655 PAGE_SIZE, DMA_FROM_DEVICE);
1656 kfree(hdr_buf);
1657 }
1658 err_hdrbuf:
1659 dma_unmap_single(dev->lif_cfg.hwdev, query_rqdma, sizeof(*query_rqbuf),
1660 DMA_FROM_DEVICE);
1661 err_rqdma:
1662 dma_unmap_single(dev->lif_cfg.hwdev, query_sqdma, sizeof(*query_sqbuf),
1663 DMA_FROM_DEVICE);
1664 err_sqdma:
1665 kfree(query_rqbuf);
1666 err_rqbuf:
1667 kfree(query_sqbuf);
1668
1669 return rc;
1670 }
1671
ionic_destroy_qp_cmd(struct ionic_ibdev * dev,u32 qpid)1672 static int ionic_destroy_qp_cmd(struct ionic_ibdev *dev, u32 qpid)
1673 {
1674 struct ionic_admin_wr wr = {
1675 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
1676 .wqe = {
1677 .op = IONIC_V1_ADMIN_DESTROY_QP,
1678 .len = cpu_to_le16(IONIC_ADMIN_DESTROY_QP_IN_V1_LEN),
1679 .cmd.destroy_qp = {
1680 .qp_id = cpu_to_le32(qpid),
1681 },
1682 }
1683 };
1684
1685 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_QP)
1686 return -EBADRQC;
1687
1688 ionic_admin_post(dev, &wr);
1689
1690 return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN);
1691 }
1692
ionic_expdb_wqe_size_supported(struct ionic_ibdev * dev,uint32_t wqe_size)1693 static bool ionic_expdb_wqe_size_supported(struct ionic_ibdev *dev,
1694 uint32_t wqe_size)
1695 {
1696 switch (wqe_size) {
1697 case 64: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_64;
1698 case 128: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_128;
1699 case 256: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_256;
1700 case 512: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_512;
1701 }
1702
1703 return false;
1704 }
1705
ionic_qp_sq_init_cmb(struct ionic_ibdev * dev,struct ionic_qp * qp,struct ib_udata * udata,int max_data)1706 static void ionic_qp_sq_init_cmb(struct ionic_ibdev *dev,
1707 struct ionic_qp *qp,
1708 struct ib_udata *udata,
1709 int max_data)
1710 {
1711 u8 expdb_stride_log2 = 0;
1712 bool expdb;
1713 int rc;
1714
1715 if (!(qp->sq_cmb & IONIC_CMB_ENABLE))
1716 goto not_in_cmb;
1717
1718 if (qp->sq_cmb & ~IONIC_CMB_SUPPORTED) {
1719 if (qp->sq_cmb & IONIC_CMB_REQUIRE)
1720 goto not_in_cmb;
1721
1722 qp->sq_cmb &= IONIC_CMB_SUPPORTED;
1723 }
1724
1725 if ((qp->sq_cmb & IONIC_CMB_EXPDB) && !dev->lif_cfg.sq_expdb) {
1726 if (qp->sq_cmb & IONIC_CMB_REQUIRE)
1727 goto not_in_cmb;
1728
1729 qp->sq_cmb &= ~IONIC_CMB_EXPDB;
1730 }
1731
1732 qp->sq_cmb_order = order_base_2(qp->sq.size / PAGE_SIZE);
1733
1734 if (qp->sq_cmb_order >= IONIC_SQCMB_ORDER)
1735 goto not_in_cmb;
1736
1737 if (qp->sq_cmb & IONIC_CMB_EXPDB)
1738 expdb_stride_log2 = qp->sq.stride_log2;
1739
1740 rc = ionic_get_cmb(dev->lif_cfg.lif, &qp->sq_cmb_pgid,
1741 &qp->sq_cmb_addr, qp->sq_cmb_order,
1742 expdb_stride_log2, &expdb);
1743 if (rc)
1744 goto not_in_cmb;
1745
1746 if ((qp->sq_cmb & IONIC_CMB_EXPDB) && !expdb) {
1747 if (qp->sq_cmb & IONIC_CMB_REQUIRE)
1748 goto err_map;
1749
1750 qp->sq_cmb &= ~IONIC_CMB_EXPDB;
1751 }
1752
1753 return;
1754
1755 err_map:
1756 ionic_put_cmb(dev->lif_cfg.lif, qp->sq_cmb_pgid, qp->sq_cmb_order);
1757 not_in_cmb:
1758 if (qp->sq_cmb & IONIC_CMB_REQUIRE)
1759 ibdev_dbg(&dev->ibdev, "could not place sq in cmb as required\n");
1760
1761 qp->sq_cmb = 0;
1762 qp->sq_cmb_order = IONIC_RES_INVALID;
1763 qp->sq_cmb_pgid = 0;
1764 qp->sq_cmb_addr = 0;
1765 }
1766
ionic_qp_sq_destroy_cmb(struct ionic_ibdev * dev,struct ionic_ctx * ctx,struct ionic_qp * qp)1767 static void ionic_qp_sq_destroy_cmb(struct ionic_ibdev *dev,
1768 struct ionic_ctx *ctx,
1769 struct ionic_qp *qp)
1770 {
1771 if (!(qp->sq_cmb & IONIC_CMB_ENABLE))
1772 return;
1773
1774 if (ctx)
1775 rdma_user_mmap_entry_remove(qp->mmap_sq_cmb);
1776
1777 ionic_put_cmb(dev->lif_cfg.lif, qp->sq_cmb_pgid, qp->sq_cmb_order);
1778 }
1779
ionic_qp_sq_init(struct ionic_ibdev * dev,struct ionic_ctx * ctx,struct ionic_qp * qp,struct ionic_qdesc * sq,struct ionic_tbl_buf * buf,int max_wr,int max_sge,int max_data,int sq_spec,struct ib_udata * udata)1780 static int ionic_qp_sq_init(struct ionic_ibdev *dev, struct ionic_ctx *ctx,
1781 struct ionic_qp *qp, struct ionic_qdesc *sq,
1782 struct ionic_tbl_buf *buf, int max_wr, int max_sge,
1783 int max_data, int sq_spec, struct ib_udata *udata)
1784 {
1785 u32 wqe_size;
1786 int rc = 0;
1787
1788 qp->sq_msn_prod = 0;
1789 qp->sq_msn_cons = 0;
1790
1791 if (!qp->has_sq) {
1792 if (buf) {
1793 buf->tbl_buf = NULL;
1794 buf->tbl_limit = 0;
1795 buf->tbl_pages = 0;
1796 }
1797 if (udata)
1798 rc = ionic_validate_qdesc_zero(sq);
1799
1800 return rc;
1801 }
1802
1803 rc = -EINVAL;
1804
1805 if (max_wr < 0 || max_wr > 0xffff)
1806 return rc;
1807
1808 if (max_sge < 1)
1809 return rc;
1810
1811 if (max_sge > min(ionic_v1_send_wqe_max_sge(dev->lif_cfg.max_stride, 0,
1812 qp->sq_cmb &
1813 IONIC_CMB_EXPDB),
1814 IONIC_SPEC_HIGH))
1815 return rc;
1816
1817 if (max_data < 0)
1818 return rc;
1819
1820 if (max_data > ionic_v1_send_wqe_max_data(dev->lif_cfg.max_stride,
1821 qp->sq_cmb & IONIC_CMB_EXPDB))
1822 return rc;
1823
1824 if (udata) {
1825 rc = ionic_validate_qdesc(sq);
1826 if (rc)
1827 return rc;
1828
1829 qp->sq_spec = sq_spec;
1830
1831 qp->sq.ptr = NULL;
1832 qp->sq.size = sq->size;
1833 qp->sq.mask = sq->mask;
1834 qp->sq.depth_log2 = sq->depth_log2;
1835 qp->sq.stride_log2 = sq->stride_log2;
1836
1837 qp->sq_meta = NULL;
1838 qp->sq_msn_idx = NULL;
1839
1840 qp->sq_umem = ib_umem_get(&dev->ibdev, sq->addr, sq->size, 0);
1841 if (IS_ERR(qp->sq_umem))
1842 return PTR_ERR(qp->sq_umem);
1843 } else {
1844 qp->sq_umem = NULL;
1845
1846 qp->sq_spec = ionic_v1_use_spec_sge(max_sge, sq_spec);
1847 if (sq_spec && !qp->sq_spec)
1848 ibdev_dbg(&dev->ibdev,
1849 "init sq: max_sge %u disables spec\n",
1850 max_sge);
1851
1852 if (qp->sq_cmb & IONIC_CMB_EXPDB) {
1853 wqe_size = ionic_v1_send_wqe_min_size(max_sge, max_data,
1854 qp->sq_spec,
1855 true);
1856
1857 if (!ionic_expdb_wqe_size_supported(dev, wqe_size))
1858 qp->sq_cmb &= ~IONIC_CMB_EXPDB;
1859 }
1860
1861 if (!(qp->sq_cmb & IONIC_CMB_EXPDB))
1862 wqe_size = ionic_v1_send_wqe_min_size(max_sge, max_data,
1863 qp->sq_spec,
1864 false);
1865
1866 rc = ionic_queue_init(&qp->sq, dev->lif_cfg.hwdev,
1867 max_wr, wqe_size);
1868 if (rc)
1869 return rc;
1870
1871 ionic_queue_dbell_init(&qp->sq, qp->qpid);
1872
1873 qp->sq_meta = kmalloc_objs(*qp->sq_meta, (u32)qp->sq.mask + 1);
1874 if (!qp->sq_meta) {
1875 rc = -ENOMEM;
1876 goto err_sq_meta;
1877 }
1878
1879 qp->sq_msn_idx = kmalloc_array((u32)qp->sq.mask + 1,
1880 sizeof(*qp->sq_msn_idx),
1881 GFP_KERNEL);
1882 if (!qp->sq_msn_idx) {
1883 rc = -ENOMEM;
1884 goto err_sq_msn;
1885 }
1886 }
1887
1888 ionic_qp_sq_init_cmb(dev, qp, udata, max_data);
1889
1890 if (qp->sq_cmb & IONIC_CMB_ENABLE)
1891 rc = ionic_pgtbl_init(dev, buf, NULL,
1892 (u64)qp->sq_cmb_pgid << PAGE_SHIFT,
1893 1, PAGE_SIZE);
1894 else
1895 rc = ionic_pgtbl_init(dev, buf,
1896 qp->sq_umem, qp->sq.dma, 1, PAGE_SIZE);
1897 if (rc)
1898 goto err_sq_tbl;
1899
1900 return 0;
1901
1902 err_sq_tbl:
1903 ionic_qp_sq_destroy_cmb(dev, ctx, qp);
1904 kfree(qp->sq_msn_idx);
1905 err_sq_msn:
1906 kfree(qp->sq_meta);
1907 err_sq_meta:
1908 if (qp->sq_umem)
1909 ib_umem_release(qp->sq_umem);
1910 else
1911 ionic_queue_destroy(&qp->sq, dev->lif_cfg.hwdev);
1912 return rc;
1913 }
1914
ionic_qp_sq_destroy(struct ionic_ibdev * dev,struct ionic_ctx * ctx,struct ionic_qp * qp)1915 static void ionic_qp_sq_destroy(struct ionic_ibdev *dev,
1916 struct ionic_ctx *ctx,
1917 struct ionic_qp *qp)
1918 {
1919 if (!qp->has_sq)
1920 return;
1921
1922 ionic_qp_sq_destroy_cmb(dev, ctx, qp);
1923
1924 kfree(qp->sq_msn_idx);
1925 kfree(qp->sq_meta);
1926
1927 if (qp->sq_umem)
1928 ib_umem_release(qp->sq_umem);
1929 else
1930 ionic_queue_destroy(&qp->sq, dev->lif_cfg.hwdev);
1931 }
1932
ionic_qp_rq_init_cmb(struct ionic_ibdev * dev,struct ionic_qp * qp,struct ib_udata * udata)1933 static void ionic_qp_rq_init_cmb(struct ionic_ibdev *dev,
1934 struct ionic_qp *qp,
1935 struct ib_udata *udata)
1936 {
1937 u8 expdb_stride_log2 = 0;
1938 bool expdb;
1939 int rc;
1940
1941 if (!(qp->rq_cmb & IONIC_CMB_ENABLE))
1942 goto not_in_cmb;
1943
1944 if (qp->rq_cmb & ~IONIC_CMB_SUPPORTED) {
1945 if (qp->rq_cmb & IONIC_CMB_REQUIRE)
1946 goto not_in_cmb;
1947
1948 qp->rq_cmb &= IONIC_CMB_SUPPORTED;
1949 }
1950
1951 if ((qp->rq_cmb & IONIC_CMB_EXPDB) && !dev->lif_cfg.rq_expdb) {
1952 if (qp->rq_cmb & IONIC_CMB_REQUIRE)
1953 goto not_in_cmb;
1954
1955 qp->rq_cmb &= ~IONIC_CMB_EXPDB;
1956 }
1957
1958 qp->rq_cmb_order = order_base_2(qp->rq.size / PAGE_SIZE);
1959
1960 if (qp->rq_cmb_order >= IONIC_RQCMB_ORDER)
1961 goto not_in_cmb;
1962
1963 if (qp->rq_cmb & IONIC_CMB_EXPDB)
1964 expdb_stride_log2 = qp->rq.stride_log2;
1965
1966 rc = ionic_get_cmb(dev->lif_cfg.lif, &qp->rq_cmb_pgid,
1967 &qp->rq_cmb_addr, qp->rq_cmb_order,
1968 expdb_stride_log2, &expdb);
1969 if (rc)
1970 goto not_in_cmb;
1971
1972 if ((qp->rq_cmb & IONIC_CMB_EXPDB) && !expdb) {
1973 if (qp->rq_cmb & IONIC_CMB_REQUIRE)
1974 goto err_map;
1975
1976 qp->rq_cmb &= ~IONIC_CMB_EXPDB;
1977 }
1978
1979 return;
1980
1981 err_map:
1982 ionic_put_cmb(dev->lif_cfg.lif, qp->rq_cmb_pgid, qp->rq_cmb_order);
1983 not_in_cmb:
1984 if (qp->rq_cmb & IONIC_CMB_REQUIRE)
1985 ibdev_dbg(&dev->ibdev, "could not place rq in cmb as required\n");
1986
1987 qp->rq_cmb = 0;
1988 qp->rq_cmb_order = IONIC_RES_INVALID;
1989 qp->rq_cmb_pgid = 0;
1990 qp->rq_cmb_addr = 0;
1991 }
1992
ionic_qp_rq_destroy_cmb(struct ionic_ibdev * dev,struct ionic_ctx * ctx,struct ionic_qp * qp)1993 static void ionic_qp_rq_destroy_cmb(struct ionic_ibdev *dev,
1994 struct ionic_ctx *ctx,
1995 struct ionic_qp *qp)
1996 {
1997 if (!(qp->rq_cmb & IONIC_CMB_ENABLE))
1998 return;
1999
2000 if (ctx)
2001 rdma_user_mmap_entry_remove(qp->mmap_rq_cmb);
2002
2003 ionic_put_cmb(dev->lif_cfg.lif, qp->rq_cmb_pgid, qp->rq_cmb_order);
2004 }
2005
ionic_qp_rq_init(struct ionic_ibdev * dev,struct ionic_ctx * ctx,struct ionic_qp * qp,struct ionic_qdesc * rq,struct ionic_tbl_buf * buf,int max_wr,int max_sge,int rq_spec,struct ib_udata * udata)2006 static int ionic_qp_rq_init(struct ionic_ibdev *dev, struct ionic_ctx *ctx,
2007 struct ionic_qp *qp, struct ionic_qdesc *rq,
2008 struct ionic_tbl_buf *buf, int max_wr, int max_sge,
2009 int rq_spec, struct ib_udata *udata)
2010 {
2011 int rc = 0, i;
2012 u32 wqe_size;
2013
2014 if (!qp->has_rq) {
2015 if (buf) {
2016 buf->tbl_buf = NULL;
2017 buf->tbl_limit = 0;
2018 buf->tbl_pages = 0;
2019 }
2020 if (udata)
2021 rc = ionic_validate_qdesc_zero(rq);
2022
2023 return rc;
2024 }
2025
2026 rc = -EINVAL;
2027
2028 if (max_wr < 0 || max_wr > 0xffff)
2029 return rc;
2030
2031 if (max_sge < 1)
2032 return rc;
2033
2034 if (max_sge > min(ionic_v1_recv_wqe_max_sge(dev->lif_cfg.max_stride, 0, false),
2035 IONIC_SPEC_HIGH))
2036 return rc;
2037
2038 if (udata) {
2039 rc = ionic_validate_qdesc(rq);
2040 if (rc)
2041 return rc;
2042
2043 qp->rq_spec = rq_spec;
2044
2045 qp->rq.ptr = NULL;
2046 qp->rq.size = rq->size;
2047 qp->rq.mask = rq->mask;
2048 qp->rq.depth_log2 = rq->depth_log2;
2049 qp->rq.stride_log2 = rq->stride_log2;
2050
2051 qp->rq_meta = NULL;
2052
2053 qp->rq_umem = ib_umem_get(&dev->ibdev, rq->addr, rq->size, 0);
2054 if (IS_ERR(qp->rq_umem))
2055 return PTR_ERR(qp->rq_umem);
2056 } else {
2057 qp->rq_umem = NULL;
2058
2059 qp->rq_spec = ionic_v1_use_spec_sge(max_sge, rq_spec);
2060 if (rq_spec && !qp->rq_spec)
2061 ibdev_dbg(&dev->ibdev,
2062 "init rq: max_sge %u disables spec\n",
2063 max_sge);
2064
2065 if (qp->rq_cmb & IONIC_CMB_EXPDB) {
2066 wqe_size = ionic_v1_recv_wqe_min_size(max_sge,
2067 qp->rq_spec,
2068 true);
2069
2070 if (!ionic_expdb_wqe_size_supported(dev, wqe_size))
2071 qp->rq_cmb &= ~IONIC_CMB_EXPDB;
2072 }
2073
2074 if (!(qp->rq_cmb & IONIC_CMB_EXPDB))
2075 wqe_size = ionic_v1_recv_wqe_min_size(max_sge,
2076 qp->rq_spec,
2077 false);
2078
2079 rc = ionic_queue_init(&qp->rq, dev->lif_cfg.hwdev,
2080 max_wr, wqe_size);
2081 if (rc)
2082 return rc;
2083
2084 ionic_queue_dbell_init(&qp->rq, qp->qpid);
2085
2086 qp->rq_meta = kmalloc_objs(*qp->rq_meta, (u32)qp->rq.mask + 1);
2087 if (!qp->rq_meta) {
2088 rc = -ENOMEM;
2089 goto err_rq_meta;
2090 }
2091
2092 for (i = 0; i < qp->rq.mask; ++i)
2093 qp->rq_meta[i].next = &qp->rq_meta[i + 1];
2094 qp->rq_meta[i].next = IONIC_META_LAST;
2095 qp->rq_meta_head = &qp->rq_meta[0];
2096 }
2097
2098 ionic_qp_rq_init_cmb(dev, qp, udata);
2099
2100 if (qp->rq_cmb & IONIC_CMB_ENABLE)
2101 rc = ionic_pgtbl_init(dev, buf, NULL,
2102 (u64)qp->rq_cmb_pgid << PAGE_SHIFT,
2103 1, PAGE_SIZE);
2104 else
2105 rc = ionic_pgtbl_init(dev, buf,
2106 qp->rq_umem, qp->rq.dma, 1, PAGE_SIZE);
2107 if (rc)
2108 goto err_rq_tbl;
2109
2110 return 0;
2111
2112 err_rq_tbl:
2113 ionic_qp_rq_destroy_cmb(dev, ctx, qp);
2114 kfree(qp->rq_meta);
2115 err_rq_meta:
2116 if (qp->rq_umem)
2117 ib_umem_release(qp->rq_umem);
2118 else
2119 ionic_queue_destroy(&qp->rq, dev->lif_cfg.hwdev);
2120 return rc;
2121 }
2122
ionic_qp_rq_destroy(struct ionic_ibdev * dev,struct ionic_ctx * ctx,struct ionic_qp * qp)2123 static void ionic_qp_rq_destroy(struct ionic_ibdev *dev,
2124 struct ionic_ctx *ctx,
2125 struct ionic_qp *qp)
2126 {
2127 if (!qp->has_rq)
2128 return;
2129
2130 ionic_qp_rq_destroy_cmb(dev, ctx, qp);
2131
2132 kfree(qp->rq_meta);
2133
2134 if (qp->rq_umem)
2135 ib_umem_release(qp->rq_umem);
2136 else
2137 ionic_queue_destroy(&qp->rq, dev->lif_cfg.hwdev);
2138 }
2139
ionic_create_qp(struct ib_qp * ibqp,struct ib_qp_init_attr * attr,struct ib_udata * udata)2140 int ionic_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
2141 struct ib_udata *udata)
2142 {
2143 struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
2144 struct ionic_tbl_buf sq_buf = {}, rq_buf = {};
2145 struct ionic_pd *pd = to_ionic_pd(ibqp->pd);
2146 struct ionic_qp *qp = to_ionic_qp(ibqp);
2147 struct ionic_ctx *ctx =
2148 rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx);
2149 struct ionic_qp_resp resp = {};
2150 struct ionic_qp_req req = {};
2151 struct ionic_cq *cq;
2152 u8 udma_mask;
2153 void *entry;
2154 int rc;
2155
2156 if (udata) {
2157 rc = ib_copy_from_udata(&req, udata, sizeof(req));
2158 if (rc)
2159 return rc;
2160 } else {
2161 req.sq_spec = IONIC_SPEC_HIGH;
2162 req.rq_spec = IONIC_SPEC_HIGH;
2163 }
2164
2165 if (attr->qp_type == IB_QPT_SMI || attr->qp_type > IB_QPT_UD)
2166 return -EOPNOTSUPP;
2167
2168 qp->state = IB_QPS_RESET;
2169
2170 INIT_LIST_HEAD(&qp->cq_poll_sq);
2171 INIT_LIST_HEAD(&qp->cq_flush_sq);
2172 INIT_LIST_HEAD(&qp->cq_flush_rq);
2173
2174 spin_lock_init(&qp->sq_lock);
2175 spin_lock_init(&qp->rq_lock);
2176
2177 qp->has_sq = 1;
2178 qp->has_rq = 1;
2179
2180 if (attr->qp_type == IB_QPT_GSI) {
2181 rc = ionic_get_gsi_qpid(dev, &qp->qpid);
2182 } else {
2183 udma_mask = BIT(dev->lif_cfg.udma_count) - 1;
2184
2185 if (qp->has_sq)
2186 udma_mask &= to_ionic_vcq(attr->send_cq)->udma_mask;
2187
2188 if (qp->has_rq)
2189 udma_mask &= to_ionic_vcq(attr->recv_cq)->udma_mask;
2190
2191 if (udata && req.udma_mask)
2192 udma_mask &= req.udma_mask;
2193
2194 if (!udma_mask)
2195 return -EINVAL;
2196
2197 rc = ionic_get_qpid(dev, &qp->qpid, &qp->udma_idx, udma_mask);
2198 }
2199 if (rc)
2200 return rc;
2201
2202 qp->sig_all = attr->sq_sig_type == IB_SIGNAL_ALL_WR;
2203 qp->has_ah = attr->qp_type == IB_QPT_RC;
2204
2205 if (qp->has_ah) {
2206 qp->hdr = kzalloc_obj(*qp->hdr);
2207 if (!qp->hdr) {
2208 rc = -ENOMEM;
2209 goto err_ah_alloc;
2210 }
2211
2212 rc = ionic_get_ahid(dev, &qp->ahid);
2213 if (rc)
2214 goto err_ahid;
2215 }
2216
2217 if (udata) {
2218 if (req.rq_cmb & IONIC_CMB_ENABLE)
2219 qp->rq_cmb = req.rq_cmb;
2220
2221 if (req.sq_cmb & IONIC_CMB_ENABLE)
2222 qp->sq_cmb = req.sq_cmb;
2223 }
2224
2225 rc = ionic_qp_sq_init(dev, ctx, qp, &req.sq, &sq_buf,
2226 attr->cap.max_send_wr, attr->cap.max_send_sge,
2227 attr->cap.max_inline_data, req.sq_spec, udata);
2228 if (rc)
2229 goto err_sq;
2230
2231 rc = ionic_qp_rq_init(dev, ctx, qp, &req.rq, &rq_buf,
2232 attr->cap.max_recv_wr, attr->cap.max_recv_sge,
2233 req.rq_spec, udata);
2234 if (rc)
2235 goto err_rq;
2236
2237 rc = ionic_create_qp_cmd(dev, pd,
2238 to_ionic_vcq_cq(attr->send_cq, qp->udma_idx),
2239 to_ionic_vcq_cq(attr->recv_cq, qp->udma_idx),
2240 qp, &sq_buf, &rq_buf, attr);
2241 if (rc)
2242 goto err_cmd;
2243
2244 if (udata) {
2245 resp.qpid = qp->qpid;
2246 resp.udma_idx = qp->udma_idx;
2247
2248 if (qp->sq_cmb & IONIC_CMB_ENABLE) {
2249 bool wc;
2250
2251 if ((qp->sq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC)) ==
2252 (IONIC_CMB_WC | IONIC_CMB_UC)) {
2253 ibdev_dbg(&dev->ibdev,
2254 "Both sq_cmb flags IONIC_CMB_WC and IONIC_CMB_UC are set, using default driver mapping\n");
2255 qp->sq_cmb &= ~(IONIC_CMB_WC | IONIC_CMB_UC);
2256 }
2257
2258 wc = (qp->sq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC))
2259 != IONIC_CMB_UC;
2260
2261 /* let userspace know the mapping */
2262 if (wc)
2263 qp->sq_cmb |= IONIC_CMB_WC;
2264 else
2265 qp->sq_cmb |= IONIC_CMB_UC;
2266
2267 qp->mmap_sq_cmb =
2268 ionic_mmap_entry_insert(ctx,
2269 qp->sq.size,
2270 PHYS_PFN(qp->sq_cmb_addr),
2271 wc ? IONIC_MMAP_WC : 0,
2272 &resp.sq_cmb_offset);
2273 if (!qp->mmap_sq_cmb) {
2274 rc = -ENOMEM;
2275 goto err_mmap_sq;
2276 }
2277
2278 resp.sq_cmb = qp->sq_cmb;
2279 }
2280
2281 if (qp->rq_cmb & IONIC_CMB_ENABLE) {
2282 bool wc;
2283
2284 if ((qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC)) ==
2285 (IONIC_CMB_WC | IONIC_CMB_UC)) {
2286 ibdev_dbg(&dev->ibdev,
2287 "Both rq_cmb flags IONIC_CMB_WC and IONIC_CMB_UC are set, using default driver mapping\n");
2288 qp->rq_cmb &= ~(IONIC_CMB_WC | IONIC_CMB_UC);
2289 }
2290
2291 if (qp->rq_cmb & IONIC_CMB_EXPDB)
2292 wc = (qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC))
2293 == IONIC_CMB_WC;
2294 else
2295 wc = (qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC))
2296 != IONIC_CMB_UC;
2297
2298 /* let userspace know the mapping */
2299 if (wc)
2300 qp->rq_cmb |= IONIC_CMB_WC;
2301 else
2302 qp->rq_cmb |= IONIC_CMB_UC;
2303
2304 qp->mmap_rq_cmb =
2305 ionic_mmap_entry_insert(ctx,
2306 qp->rq.size,
2307 PHYS_PFN(qp->rq_cmb_addr),
2308 wc ? IONIC_MMAP_WC : 0,
2309 &resp.rq_cmb_offset);
2310 if (!qp->mmap_rq_cmb) {
2311 rc = -ENOMEM;
2312 goto err_mmap_rq;
2313 }
2314
2315 resp.rq_cmb = qp->rq_cmb;
2316 }
2317
2318 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2319 if (rc)
2320 goto err_resp;
2321 }
2322
2323 ionic_pgtbl_unbuf(dev, &rq_buf);
2324 ionic_pgtbl_unbuf(dev, &sq_buf);
2325
2326 qp->ibqp.qp_num = qp->qpid;
2327
2328 init_completion(&qp->qp_rel_comp);
2329 kref_init(&qp->qp_kref);
2330
2331 entry = xa_store_irq(&dev->qp_tbl, qp->qpid, qp, GFP_KERNEL);
2332 if (entry) {
2333 if (!xa_is_err(entry))
2334 rc = -EINVAL;
2335 else
2336 rc = xa_err(entry);
2337
2338 goto err_resp;
2339 }
2340
2341 if (qp->has_sq) {
2342 cq = to_ionic_vcq_cq(attr->send_cq, qp->udma_idx);
2343
2344 attr->cap.max_send_wr = qp->sq.mask;
2345 attr->cap.max_send_sge =
2346 ionic_v1_send_wqe_max_sge(qp->sq.stride_log2,
2347 qp->sq_spec,
2348 qp->sq_cmb & IONIC_CMB_EXPDB);
2349 attr->cap.max_inline_data =
2350 ionic_v1_send_wqe_max_data(qp->sq.stride_log2,
2351 qp->sq_cmb &
2352 IONIC_CMB_EXPDB);
2353 qp->sq_cqid = cq->cqid;
2354 }
2355
2356 if (qp->has_rq) {
2357 cq = to_ionic_vcq_cq(attr->recv_cq, qp->udma_idx);
2358
2359 attr->cap.max_recv_wr = qp->rq.mask;
2360 attr->cap.max_recv_sge =
2361 ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2,
2362 qp->rq_spec,
2363 qp->rq_cmb & IONIC_CMB_EXPDB);
2364 qp->rq_cqid = cq->cqid;
2365 }
2366
2367 return 0;
2368
2369 err_resp:
2370 if (udata && (qp->rq_cmb & IONIC_CMB_ENABLE))
2371 rdma_user_mmap_entry_remove(qp->mmap_rq_cmb);
2372 err_mmap_rq:
2373 if (udata && (qp->sq_cmb & IONIC_CMB_ENABLE))
2374 rdma_user_mmap_entry_remove(qp->mmap_sq_cmb);
2375 err_mmap_sq:
2376 ionic_destroy_qp_cmd(dev, qp->qpid);
2377 err_cmd:
2378 ionic_pgtbl_unbuf(dev, &rq_buf);
2379 ionic_qp_rq_destroy(dev, ctx, qp);
2380 err_rq:
2381 ionic_pgtbl_unbuf(dev, &sq_buf);
2382 ionic_qp_sq_destroy(dev, ctx, qp);
2383 err_sq:
2384 if (qp->has_ah)
2385 ionic_put_ahid(dev, qp->ahid);
2386 err_ahid:
2387 kfree(qp->hdr);
2388 err_ah_alloc:
2389 ionic_put_qpid(dev, qp->qpid);
2390 return rc;
2391 }
2392
ionic_notify_flush_cq(struct ionic_cq * cq)2393 void ionic_notify_flush_cq(struct ionic_cq *cq)
2394 {
2395 if (cq->flush && cq->vcq->ibcq.comp_handler)
2396 cq->vcq->ibcq.comp_handler(&cq->vcq->ibcq,
2397 cq->vcq->ibcq.cq_context);
2398 }
2399
ionic_notify_qp_cqs(struct ionic_ibdev * dev,struct ionic_qp * qp)2400 static void ionic_notify_qp_cqs(struct ionic_ibdev *dev, struct ionic_qp *qp)
2401 {
2402 if (qp->ibqp.send_cq)
2403 ionic_notify_flush_cq(to_ionic_vcq_cq(qp->ibqp.send_cq,
2404 qp->udma_idx));
2405 if (qp->ibqp.recv_cq && qp->ibqp.recv_cq != qp->ibqp.send_cq)
2406 ionic_notify_flush_cq(to_ionic_vcq_cq(qp->ibqp.recv_cq,
2407 qp->udma_idx));
2408 }
2409
ionic_flush_qp(struct ionic_ibdev * dev,struct ionic_qp * qp)2410 void ionic_flush_qp(struct ionic_ibdev *dev, struct ionic_qp *qp)
2411 {
2412 unsigned long irqflags;
2413 struct ionic_cq *cq;
2414
2415 if (qp->ibqp.send_cq) {
2416 cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx);
2417
2418 /* Hold the CQ lock and QP sq_lock to set up flush */
2419 spin_lock_irqsave(&cq->lock, irqflags);
2420 spin_lock(&qp->sq_lock);
2421 qp->sq_flush = true;
2422 if (!ionic_queue_empty(&qp->sq)) {
2423 cq->flush = true;
2424 list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
2425 }
2426 spin_unlock(&qp->sq_lock);
2427 spin_unlock_irqrestore(&cq->lock, irqflags);
2428 }
2429
2430 if (qp->ibqp.recv_cq) {
2431 cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx);
2432
2433 /* Hold the CQ lock and QP rq_lock to set up flush */
2434 spin_lock_irqsave(&cq->lock, irqflags);
2435 spin_lock(&qp->rq_lock);
2436 qp->rq_flush = true;
2437 if (!ionic_queue_empty(&qp->rq)) {
2438 cq->flush = true;
2439 list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
2440 }
2441 spin_unlock(&qp->rq_lock);
2442 spin_unlock_irqrestore(&cq->lock, irqflags);
2443 }
2444 }
2445
ionic_clean_cq(struct ionic_cq * cq,u32 qpid)2446 static void ionic_clean_cq(struct ionic_cq *cq, u32 qpid)
2447 {
2448 struct ionic_v1_cqe *qcqe;
2449 int prod, qtf, qid, type;
2450 bool color;
2451
2452 if (!cq->q.ptr)
2453 return;
2454
2455 color = cq->color;
2456 prod = cq->q.prod;
2457 qcqe = ionic_queue_at(&cq->q, prod);
2458
2459 while (color == ionic_v1_cqe_color(qcqe)) {
2460 qtf = ionic_v1_cqe_qtf(qcqe);
2461 qid = ionic_v1_cqe_qtf_qid(qtf);
2462 type = ionic_v1_cqe_qtf_type(qtf);
2463
2464 if (qid == qpid && type != IONIC_V1_CQE_TYPE_ADMIN)
2465 ionic_v1_cqe_clean(qcqe);
2466
2467 prod = ionic_queue_next(&cq->q, prod);
2468 qcqe = ionic_queue_at(&cq->q, prod);
2469 color = ionic_color_wrap(prod, color);
2470 }
2471 }
2472
ionic_reset_qp(struct ionic_ibdev * dev,struct ionic_qp * qp)2473 static void ionic_reset_qp(struct ionic_ibdev *dev, struct ionic_qp *qp)
2474 {
2475 unsigned long irqflags;
2476 struct ionic_cq *cq;
2477 int i;
2478
2479 local_irq_save(irqflags);
2480
2481 if (qp->ibqp.send_cq) {
2482 cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx);
2483 spin_lock(&cq->lock);
2484 ionic_clean_cq(cq, qp->qpid);
2485 spin_unlock(&cq->lock);
2486 }
2487
2488 if (qp->ibqp.recv_cq) {
2489 cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx);
2490 spin_lock(&cq->lock);
2491 ionic_clean_cq(cq, qp->qpid);
2492 spin_unlock(&cq->lock);
2493 }
2494
2495 if (qp->has_sq) {
2496 spin_lock(&qp->sq_lock);
2497 qp->sq_flush = false;
2498 qp->sq_flush_rcvd = false;
2499 qp->sq_msn_prod = 0;
2500 qp->sq_msn_cons = 0;
2501 qp->sq.prod = 0;
2502 qp->sq.cons = 0;
2503 spin_unlock(&qp->sq_lock);
2504 }
2505
2506 if (qp->has_rq) {
2507 spin_lock(&qp->rq_lock);
2508 qp->rq_flush = false;
2509 qp->rq.prod = 0;
2510 qp->rq.cons = 0;
2511 if (qp->rq_meta) {
2512 for (i = 0; i < qp->rq.mask; ++i)
2513 qp->rq_meta[i].next = &qp->rq_meta[i + 1];
2514 qp->rq_meta[i].next = IONIC_META_LAST;
2515 }
2516 qp->rq_meta_head = &qp->rq_meta[0];
2517 spin_unlock(&qp->rq_lock);
2518 }
2519
2520 local_irq_restore(irqflags);
2521 }
2522
ionic_qp_cur_state_is_ok(enum ib_qp_state q_state,enum ib_qp_state attr_state)2523 static bool ionic_qp_cur_state_is_ok(enum ib_qp_state q_state,
2524 enum ib_qp_state attr_state)
2525 {
2526 if (q_state == attr_state)
2527 return true;
2528
2529 if (attr_state == IB_QPS_ERR)
2530 return true;
2531
2532 if (attr_state == IB_QPS_SQE)
2533 return q_state == IB_QPS_RTS || q_state == IB_QPS_SQD;
2534
2535 return false;
2536 }
2537
ionic_check_modify_qp(struct ionic_qp * qp,struct ib_qp_attr * attr,int mask)2538 static int ionic_check_modify_qp(struct ionic_qp *qp, struct ib_qp_attr *attr,
2539 int mask)
2540 {
2541 enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
2542 attr->cur_qp_state : qp->state;
2543 enum ib_qp_state next_state = (mask & IB_QP_STATE) ?
2544 attr->qp_state : cur_state;
2545
2546 if ((mask & IB_QP_CUR_STATE) &&
2547 !ionic_qp_cur_state_is_ok(qp->state, attr->cur_qp_state))
2548 return -EINVAL;
2549
2550 if (!ib_modify_qp_is_ok(cur_state, next_state, qp->ibqp.qp_type, mask))
2551 return -EINVAL;
2552
2553 /* unprivileged qp not allowed privileged qkey */
2554 if ((mask & IB_QP_QKEY) && (attr->qkey & 0x80000000) &&
2555 qp->ibqp.uobject)
2556 return -EPERM;
2557
2558 return 0;
2559 }
2560
ionic_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int mask,struct ib_udata * udata)2561 int ionic_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
2562 struct ib_udata *udata)
2563 {
2564 struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
2565 struct ionic_pd *pd = to_ionic_pd(ibqp->pd);
2566 struct ionic_qp *qp = to_ionic_qp(ibqp);
2567 int rc;
2568
2569 rc = ionic_check_modify_qp(qp, attr, mask);
2570 if (rc)
2571 return rc;
2572
2573 if (mask & IB_QP_CAP)
2574 return -EINVAL;
2575
2576 rc = ionic_modify_qp_cmd(dev, pd, qp, attr, mask);
2577 if (rc)
2578 return rc;
2579
2580 if (mask & IB_QP_STATE) {
2581 qp->state = attr->qp_state;
2582
2583 if (attr->qp_state == IB_QPS_ERR) {
2584 ionic_flush_qp(dev, qp);
2585 ionic_notify_qp_cqs(dev, qp);
2586 } else if (attr->qp_state == IB_QPS_RESET) {
2587 ionic_reset_qp(dev, qp);
2588 }
2589 }
2590
2591 return 0;
2592 }
2593
ionic_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int mask,struct ib_qp_init_attr * init_attr)2594 int ionic_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2595 int mask, struct ib_qp_init_attr *init_attr)
2596 {
2597 struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
2598 struct ionic_qp *qp = to_ionic_qp(ibqp);
2599 int rc;
2600
2601 memset(attr, 0, sizeof(*attr));
2602 memset(init_attr, 0, sizeof(*init_attr));
2603
2604 rc = ionic_query_qp_cmd(dev, qp, attr, mask);
2605 if (rc)
2606 return rc;
2607
2608 if (qp->has_sq)
2609 attr->cap.max_send_wr = qp->sq.mask;
2610
2611 if (qp->has_rq)
2612 attr->cap.max_recv_wr = qp->rq.mask;
2613
2614 init_attr->event_handler = ibqp->event_handler;
2615 init_attr->qp_context = ibqp->qp_context;
2616 init_attr->send_cq = ibqp->send_cq;
2617 init_attr->recv_cq = ibqp->recv_cq;
2618 init_attr->srq = ibqp->srq;
2619 init_attr->xrcd = ibqp->xrcd;
2620 init_attr->cap = attr->cap;
2621 init_attr->sq_sig_type = qp->sig_all ?
2622 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
2623 init_attr->qp_type = ibqp->qp_type;
2624 init_attr->create_flags = 0;
2625 init_attr->port_num = 0;
2626 init_attr->rwq_ind_tbl = ibqp->rwq_ind_tbl;
2627 init_attr->source_qpn = 0;
2628
2629 return rc;
2630 }
2631
ionic_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)2632 int ionic_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
2633 {
2634 struct ionic_ctx *ctx =
2635 rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx);
2636 struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
2637 struct ionic_qp *qp = to_ionic_qp(ibqp);
2638 unsigned long irqflags;
2639 struct ionic_cq *cq;
2640 int rc;
2641
2642 rc = ionic_destroy_qp_cmd(dev, qp->qpid);
2643 if (rc)
2644 return rc;
2645
2646 xa_erase_irq(&dev->qp_tbl, qp->qpid);
2647
2648 kref_put(&qp->qp_kref, ionic_qp_complete);
2649 wait_for_completion(&qp->qp_rel_comp);
2650
2651 if (qp->ibqp.send_cq) {
2652 cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx);
2653 spin_lock_irqsave(&cq->lock, irqflags);
2654 ionic_clean_cq(cq, qp->qpid);
2655 list_del(&qp->cq_poll_sq);
2656 list_del(&qp->cq_flush_sq);
2657 spin_unlock_irqrestore(&cq->lock, irqflags);
2658 }
2659
2660 if (qp->ibqp.recv_cq) {
2661 cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx);
2662 spin_lock_irqsave(&cq->lock, irqflags);
2663 ionic_clean_cq(cq, qp->qpid);
2664 list_del(&qp->cq_flush_rq);
2665 spin_unlock_irqrestore(&cq->lock, irqflags);
2666 }
2667
2668 ionic_qp_rq_destroy(dev, ctx, qp);
2669 ionic_qp_sq_destroy(dev, ctx, qp);
2670 if (qp->has_ah) {
2671 ionic_put_ahid(dev, qp->ahid);
2672 kfree(qp->hdr);
2673 }
2674 ionic_put_qpid(dev, qp->qpid);
2675
2676 return 0;
2677 }
2678