1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
3
4 #include <linux/module.h>
5 #include <linux/printk.h>
6 #include <rdma/ib_addr.h>
7 #include <rdma/ib_cache.h>
8 #include <rdma/ib_user_verbs.h>
9 #include <ionic_api.h>
10
11 #include "ionic_fw.h"
12 #include "ionic_ibdev.h"
13
14 #define ionic_set_ecn(tos) (((tos) | 2u) & ~1u)
15 #define ionic_clear_ecn(tos) ((tos) & ~3u)
16
ionic_validate_qdesc(struct ionic_qdesc * q)17 static int ionic_validate_qdesc(struct ionic_qdesc *q)
18 {
19 if (!q->addr || !q->size || !q->mask ||
20 !q->depth_log2 || !q->stride_log2)
21 return -EINVAL;
22
23 if (q->addr & (PAGE_SIZE - 1))
24 return -EINVAL;
25
26 if (q->mask != BIT(q->depth_log2) - 1)
27 return -EINVAL;
28
29 if (q->size < BIT_ULL(q->depth_log2 + q->stride_log2))
30 return -EINVAL;
31
32 return 0;
33 }
34
ionic_get_eqid(struct ionic_ibdev * dev,u32 comp_vector,u8 udma_idx)35 static u32 ionic_get_eqid(struct ionic_ibdev *dev, u32 comp_vector, u8 udma_idx)
36 {
37 /* EQ per vector per udma, and the first eqs reserved for async events.
38 * The rest of the vectors can be requested for completions.
39 */
40 u32 comp_vec_count = dev->lif_cfg.eq_count / dev->lif_cfg.udma_count - 1;
41
42 return (comp_vector % comp_vec_count + 1) * dev->lif_cfg.udma_count + udma_idx;
43 }
44
ionic_get_cqid(struct ionic_ibdev * dev,u32 * cqid,u8 udma_idx)45 static int ionic_get_cqid(struct ionic_ibdev *dev, u32 *cqid, u8 udma_idx)
46 {
47 unsigned int size, base, bound;
48 int rc;
49
50 size = dev->lif_cfg.cq_count / dev->lif_cfg.udma_count;
51 base = size * udma_idx;
52 bound = base + size;
53
54 rc = ionic_resid_get_shared(&dev->inuse_cqid, base, bound);
55 if (rc >= 0) {
56 /* cq_base is zero or a multiple of two queue groups */
57 *cqid = dev->lif_cfg.cq_base +
58 ionic_bitid_to_qid(rc, dev->lif_cfg.udma_qgrp_shift,
59 dev->half_cqid_udma_shift);
60
61 rc = 0;
62 }
63
64 return rc;
65 }
66
ionic_put_cqid(struct ionic_ibdev * dev,u32 cqid)67 static void ionic_put_cqid(struct ionic_ibdev *dev, u32 cqid)
68 {
69 u32 bitid = ionic_qid_to_bitid(cqid - dev->lif_cfg.cq_base,
70 dev->lif_cfg.udma_qgrp_shift,
71 dev->half_cqid_udma_shift);
72
73 ionic_resid_put(&dev->inuse_cqid, bitid);
74 }
75
ionic_create_cq_common(struct ionic_vcq * vcq,struct ionic_tbl_buf * buf,const struct ib_cq_init_attr * attr,struct ionic_ctx * ctx,struct ib_udata * udata,struct ionic_qdesc * req_cq,__u32 * resp_cqid,int udma_idx)76 int ionic_create_cq_common(struct ionic_vcq *vcq,
77 struct ionic_tbl_buf *buf,
78 const struct ib_cq_init_attr *attr,
79 struct ionic_ctx *ctx,
80 struct ib_udata *udata,
81 struct ionic_qdesc *req_cq,
82 __u32 *resp_cqid,
83 int udma_idx)
84 {
85 struct ionic_ibdev *dev = to_ionic_ibdev(vcq->ibcq.device);
86 struct ionic_cq *cq = &vcq->cq[udma_idx];
87 void *entry;
88 int rc;
89
90 cq->vcq = vcq;
91
92 if (attr->cqe < 1 || attr->cqe + IONIC_CQ_GRACE > 0xffff) {
93 rc = -EINVAL;
94 goto err_args;
95 }
96
97 rc = ionic_get_cqid(dev, &cq->cqid, udma_idx);
98 if (rc)
99 goto err_args;
100
101 cq->eqid = ionic_get_eqid(dev, attr->comp_vector, udma_idx);
102
103 spin_lock_init(&cq->lock);
104 INIT_LIST_HEAD(&cq->poll_sq);
105 INIT_LIST_HEAD(&cq->flush_sq);
106 INIT_LIST_HEAD(&cq->flush_rq);
107
108 if (udata) {
109 rc = ionic_validate_qdesc(req_cq);
110 if (rc)
111 goto err_qdesc;
112
113 cq->umem = ib_umem_get(&dev->ibdev, req_cq->addr, req_cq->size,
114 IB_ACCESS_LOCAL_WRITE);
115 if (IS_ERR(cq->umem)) {
116 rc = PTR_ERR(cq->umem);
117 goto err_qdesc;
118 }
119
120 cq->q.ptr = NULL;
121 cq->q.size = req_cq->size;
122 cq->q.mask = req_cq->mask;
123 cq->q.depth_log2 = req_cq->depth_log2;
124 cq->q.stride_log2 = req_cq->stride_log2;
125
126 *resp_cqid = cq->cqid;
127 } else {
128 rc = ionic_queue_init(&cq->q, dev->lif_cfg.hwdev,
129 attr->cqe + IONIC_CQ_GRACE,
130 sizeof(struct ionic_v1_cqe));
131 if (rc)
132 goto err_q_init;
133
134 ionic_queue_dbell_init(&cq->q, cq->cqid);
135 cq->color = true;
136 cq->credit = cq->q.mask;
137 }
138
139 rc = ionic_pgtbl_init(dev, buf, cq->umem, cq->q.dma, 1, PAGE_SIZE);
140 if (rc)
141 goto err_pgtbl_init;
142
143 init_completion(&cq->cq_rel_comp);
144 kref_init(&cq->cq_kref);
145
146 entry = xa_store_irq(&dev->cq_tbl, cq->cqid, cq, GFP_KERNEL);
147 if (entry) {
148 if (!xa_is_err(entry))
149 rc = -EINVAL;
150 else
151 rc = xa_err(entry);
152
153 goto err_xa;
154 }
155
156 return 0;
157
158 err_xa:
159 ionic_pgtbl_unbuf(dev, buf);
160 err_pgtbl_init:
161 if (!udata)
162 ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev);
163 err_q_init:
164 if (cq->umem)
165 ib_umem_release(cq->umem);
166 err_qdesc:
167 ionic_put_cqid(dev, cq->cqid);
168 err_args:
169 cq->vcq = NULL;
170
171 return rc;
172 }
173
ionic_destroy_cq_common(struct ionic_ibdev * dev,struct ionic_cq * cq)174 void ionic_destroy_cq_common(struct ionic_ibdev *dev, struct ionic_cq *cq)
175 {
176 if (!cq->vcq)
177 return;
178
179 xa_erase_irq(&dev->cq_tbl, cq->cqid);
180
181 kref_put(&cq->cq_kref, ionic_cq_complete);
182 wait_for_completion(&cq->cq_rel_comp);
183
184 if (cq->umem)
185 ib_umem_release(cq->umem);
186 else
187 ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev);
188
189 ionic_put_cqid(dev, cq->cqid);
190
191 cq->vcq = NULL;
192 }
193
ionic_validate_qdesc_zero(struct ionic_qdesc * q)194 static int ionic_validate_qdesc_zero(struct ionic_qdesc *q)
195 {
196 if (q->addr || q->size || q->mask || q->depth_log2 || q->stride_log2)
197 return -EINVAL;
198
199 return 0;
200 }
201
ionic_get_pdid(struct ionic_ibdev * dev,u32 * pdid)202 static int ionic_get_pdid(struct ionic_ibdev *dev, u32 *pdid)
203 {
204 int rc;
205
206 rc = ionic_resid_get(&dev->inuse_pdid);
207 if (rc < 0)
208 return rc;
209
210 *pdid = rc;
211 return 0;
212 }
213
ionic_get_ahid(struct ionic_ibdev * dev,u32 * ahid)214 static int ionic_get_ahid(struct ionic_ibdev *dev, u32 *ahid)
215 {
216 int rc;
217
218 rc = ionic_resid_get(&dev->inuse_ahid);
219 if (rc < 0)
220 return rc;
221
222 *ahid = rc;
223 return 0;
224 }
225
ionic_get_mrid(struct ionic_ibdev * dev,u32 * mrid)226 static int ionic_get_mrid(struct ionic_ibdev *dev, u32 *mrid)
227 {
228 int rc;
229
230 /* wrap to 1, skip reserved lkey */
231 rc = ionic_resid_get_shared(&dev->inuse_mrid, 1,
232 dev->inuse_mrid.inuse_size);
233 if (rc < 0)
234 return rc;
235
236 *mrid = ionic_mrid(rc, dev->next_mrkey++);
237 return 0;
238 }
239
ionic_get_gsi_qpid(struct ionic_ibdev * dev,u32 * qpid)240 static int ionic_get_gsi_qpid(struct ionic_ibdev *dev, u32 *qpid)
241 {
242 int rc = 0;
243
244 rc = ionic_resid_get_shared(&dev->inuse_qpid, IB_QPT_GSI, IB_QPT_GSI + 1);
245 if (rc < 0)
246 return rc;
247
248 *qpid = IB_QPT_GSI;
249 return 0;
250 }
251
ionic_get_qpid(struct ionic_ibdev * dev,u32 * qpid,u8 * udma_idx,u8 udma_mask)252 static int ionic_get_qpid(struct ionic_ibdev *dev, u32 *qpid,
253 u8 *udma_idx, u8 udma_mask)
254 {
255 unsigned int size, base, bound;
256 int udma_i, udma_x, udma_ix;
257 int rc = -EINVAL;
258
259 udma_x = dev->next_qpid_udma_idx;
260
261 dev->next_qpid_udma_idx ^= dev->lif_cfg.udma_count - 1;
262
263 for (udma_i = 0; udma_i < dev->lif_cfg.udma_count; ++udma_i) {
264 udma_ix = udma_i ^ udma_x;
265
266 if (!(udma_mask & BIT(udma_ix)))
267 continue;
268
269 size = dev->lif_cfg.qp_count / dev->lif_cfg.udma_count;
270 base = size * udma_ix;
271 bound = base + size;
272
273 /* skip reserved SMI and GSI qpids in group zero */
274 if (!base)
275 base = 2;
276
277 rc = ionic_resid_get_shared(&dev->inuse_qpid, base, bound);
278 if (rc >= 0) {
279 *qpid = ionic_bitid_to_qid(rc,
280 dev->lif_cfg.udma_qgrp_shift,
281 dev->half_qpid_udma_shift);
282 *udma_idx = udma_ix;
283
284 rc = 0;
285 break;
286 }
287 }
288
289 return rc;
290 }
291
ionic_get_dbid(struct ionic_ibdev * dev,u32 * dbid,phys_addr_t * addr)292 static int ionic_get_dbid(struct ionic_ibdev *dev, u32 *dbid, phys_addr_t *addr)
293 {
294 int rc, dbpage_num;
295
296 /* wrap to 1, skip kernel reserved */
297 rc = ionic_resid_get_shared(&dev->inuse_dbid, 1,
298 dev->inuse_dbid.inuse_size);
299 if (rc < 0)
300 return rc;
301
302 dbpage_num = (dev->lif_cfg.lif_hw_index * dev->lif_cfg.dbid_count) + rc;
303 *addr = dev->lif_cfg.db_phys + ((phys_addr_t)dbpage_num << PAGE_SHIFT);
304
305 *dbid = rc;
306
307 return 0;
308 }
309
ionic_put_pdid(struct ionic_ibdev * dev,u32 pdid)310 static void ionic_put_pdid(struct ionic_ibdev *dev, u32 pdid)
311 {
312 ionic_resid_put(&dev->inuse_pdid, pdid);
313 }
314
ionic_put_ahid(struct ionic_ibdev * dev,u32 ahid)315 static void ionic_put_ahid(struct ionic_ibdev *dev, u32 ahid)
316 {
317 ionic_resid_put(&dev->inuse_ahid, ahid);
318 }
319
ionic_put_mrid(struct ionic_ibdev * dev,u32 mrid)320 static void ionic_put_mrid(struct ionic_ibdev *dev, u32 mrid)
321 {
322 ionic_resid_put(&dev->inuse_mrid, ionic_mrid_index(mrid));
323 }
324
ionic_put_qpid(struct ionic_ibdev * dev,u32 qpid)325 static void ionic_put_qpid(struct ionic_ibdev *dev, u32 qpid)
326 {
327 u32 bitid = ionic_qid_to_bitid(qpid,
328 dev->lif_cfg.udma_qgrp_shift,
329 dev->half_qpid_udma_shift);
330
331 ionic_resid_put(&dev->inuse_qpid, bitid);
332 }
333
ionic_put_dbid(struct ionic_ibdev * dev,u32 dbid)334 static void ionic_put_dbid(struct ionic_ibdev *dev, u32 dbid)
335 {
336 ionic_resid_put(&dev->inuse_dbid, dbid);
337 }
338
339 static struct rdma_user_mmap_entry*
ionic_mmap_entry_insert(struct ionic_ctx * ctx,unsigned long size,unsigned long pfn,u8 mmap_flags,u64 * offset)340 ionic_mmap_entry_insert(struct ionic_ctx *ctx, unsigned long size,
341 unsigned long pfn, u8 mmap_flags, u64 *offset)
342 {
343 struct ionic_mmap_entry *entry;
344 int rc;
345
346 entry = kzalloc_obj(*entry);
347 if (!entry)
348 return NULL;
349
350 entry->size = size;
351 entry->pfn = pfn;
352 entry->mmap_flags = mmap_flags;
353
354 rc = rdma_user_mmap_entry_insert(&ctx->ibctx, &entry->rdma_entry,
355 entry->size);
356 if (rc) {
357 kfree(entry);
358 return NULL;
359 }
360
361 if (offset)
362 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
363
364 return &entry->rdma_entry;
365 }
366
ionic_alloc_ucontext(struct ib_ucontext * ibctx,struct ib_udata * udata)367 int ionic_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
368 {
369 struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device);
370 struct ionic_ctx *ctx = to_ionic_ctx(ibctx);
371 struct ionic_ctx_resp resp = {};
372 struct ionic_ctx_req req;
373 phys_addr_t db_phys = 0;
374 int rc;
375
376 rc = ib_copy_from_udata(&req, udata, sizeof(req));
377 if (rc)
378 return rc;
379
380 /* try to allocate dbid for user ctx */
381 rc = ionic_get_dbid(dev, &ctx->dbid, &db_phys);
382 if (rc < 0)
383 return rc;
384
385 ibdev_dbg(&dev->ibdev, "user space dbid %u\n", ctx->dbid);
386
387 ctx->mmap_dbell = ionic_mmap_entry_insert(ctx, PAGE_SIZE,
388 PHYS_PFN(db_phys), 0, NULL);
389 if (!ctx->mmap_dbell) {
390 rc = -ENOMEM;
391 goto err_mmap_dbell;
392 }
393
394 resp.page_shift = PAGE_SHIFT;
395
396 resp.dbell_offset = db_phys & ~PAGE_MASK;
397
398 resp.version = dev->lif_cfg.rdma_version;
399 resp.qp_opcodes = dev->lif_cfg.qp_opcodes;
400 resp.admin_opcodes = dev->lif_cfg.admin_opcodes;
401
402 resp.sq_qtype = dev->lif_cfg.sq_qtype;
403 resp.rq_qtype = dev->lif_cfg.rq_qtype;
404 resp.cq_qtype = dev->lif_cfg.cq_qtype;
405 resp.admin_qtype = dev->lif_cfg.aq_qtype;
406 resp.max_stride = dev->lif_cfg.max_stride;
407 resp.max_spec = IONIC_SPEC_HIGH;
408
409 resp.udma_count = dev->lif_cfg.udma_count;
410 resp.expdb_mask = dev->lif_cfg.expdb_mask;
411
412 if (dev->lif_cfg.sq_expdb)
413 resp.expdb_qtypes |= IONIC_EXPDB_SQ;
414 if (dev->lif_cfg.rq_expdb)
415 resp.expdb_qtypes |= IONIC_EXPDB_RQ;
416
417 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
418 if (rc)
419 goto err_resp;
420
421 return 0;
422
423 err_resp:
424 rdma_user_mmap_entry_remove(ctx->mmap_dbell);
425 err_mmap_dbell:
426 ionic_put_dbid(dev, ctx->dbid);
427
428 return rc;
429 }
430
ionic_dealloc_ucontext(struct ib_ucontext * ibctx)431 void ionic_dealloc_ucontext(struct ib_ucontext *ibctx)
432 {
433 struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device);
434 struct ionic_ctx *ctx = to_ionic_ctx(ibctx);
435
436 rdma_user_mmap_entry_remove(ctx->mmap_dbell);
437 ionic_put_dbid(dev, ctx->dbid);
438 }
439
ionic_mmap(struct ib_ucontext * ibctx,struct vm_area_struct * vma)440 int ionic_mmap(struct ib_ucontext *ibctx, struct vm_area_struct *vma)
441 {
442 struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device);
443 struct ionic_ctx *ctx = to_ionic_ctx(ibctx);
444 struct rdma_user_mmap_entry *rdma_entry;
445 struct ionic_mmap_entry *ionic_entry;
446 int rc = 0;
447
448 rdma_entry = rdma_user_mmap_entry_get(&ctx->ibctx, vma);
449 if (!rdma_entry) {
450 ibdev_dbg(&dev->ibdev, "not found %#lx\n",
451 vma->vm_pgoff << PAGE_SHIFT);
452 return -EINVAL;
453 }
454
455 ionic_entry = container_of(rdma_entry, struct ionic_mmap_entry,
456 rdma_entry);
457
458 ibdev_dbg(&dev->ibdev, "writecombine? %d\n",
459 ionic_entry->mmap_flags & IONIC_MMAP_WC);
460 if (ionic_entry->mmap_flags & IONIC_MMAP_WC)
461 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
462 else
463 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
464
465 ibdev_dbg(&dev->ibdev, "remap st %#lx pf %#lx sz %#lx\n",
466 vma->vm_start, ionic_entry->pfn, ionic_entry->size);
467 rc = rdma_user_mmap_io(&ctx->ibctx, vma, ionic_entry->pfn,
468 ionic_entry->size, vma->vm_page_prot,
469 rdma_entry);
470 if (rc)
471 ibdev_dbg(&dev->ibdev, "remap failed %d\n", rc);
472
473 rdma_user_mmap_entry_put(rdma_entry);
474 return rc;
475 }
476
ionic_mmap_free(struct rdma_user_mmap_entry * rdma_entry)477 void ionic_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
478 {
479 struct ionic_mmap_entry *ionic_entry;
480
481 ionic_entry = container_of(rdma_entry, struct ionic_mmap_entry,
482 rdma_entry);
483 kfree(ionic_entry);
484 }
485
ionic_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)486 int ionic_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
487 {
488 struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
489 struct ionic_pd *pd = to_ionic_pd(ibpd);
490
491 return ionic_get_pdid(dev, &pd->pdid);
492 }
493
ionic_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)494 int ionic_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
495 {
496 struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
497 struct ionic_pd *pd = to_ionic_pd(ibpd);
498
499 ionic_put_pdid(dev, pd->pdid);
500
501 return 0;
502 }
503
ionic_build_hdr(struct ionic_ibdev * dev,struct ib_ud_header * hdr,const struct rdma_ah_attr * attr,u16 sport,bool want_ecn)504 static int ionic_build_hdr(struct ionic_ibdev *dev,
505 struct ib_ud_header *hdr,
506 const struct rdma_ah_attr *attr,
507 u16 sport, bool want_ecn)
508 {
509 const struct ib_global_route *grh;
510 enum rdma_network_type net;
511 u16 vlan;
512 int rc;
513
514 if (attr->ah_flags != IB_AH_GRH)
515 return -EINVAL;
516 if (attr->type != RDMA_AH_ATTR_TYPE_ROCE)
517 return -EINVAL;
518
519 grh = rdma_ah_read_grh(attr);
520
521 rc = rdma_read_gid_l2_fields(grh->sgid_attr, &vlan, &hdr->eth.smac_h[0]);
522 if (rc)
523 return rc;
524
525 net = rdma_gid_attr_network_type(grh->sgid_attr);
526
527 rc = ib_ud_header_init(0, /* no payload */
528 0, /* no lrh */
529 1, /* yes eth */
530 vlan != 0xffff,
531 0, /* no grh */
532 net == RDMA_NETWORK_IPV4 ? 4 : 6,
533 1, /* yes udp */
534 0, /* no imm */
535 hdr);
536 if (rc)
537 return rc;
538
539 ether_addr_copy(hdr->eth.dmac_h, attr->roce.dmac);
540
541 if (net == RDMA_NETWORK_IPV4) {
542 hdr->eth.type = cpu_to_be16(ETH_P_IP);
543 hdr->ip4.frag_off = cpu_to_be16(0x4000); /* don't fragment */
544 hdr->ip4.ttl = grh->hop_limit;
545 hdr->ip4.tot_len = cpu_to_be16(0xffff);
546 hdr->ip4.saddr =
547 *(const __be32 *)(grh->sgid_attr->gid.raw + 12);
548 hdr->ip4.daddr = *(const __be32 *)(grh->dgid.raw + 12);
549
550 if (want_ecn)
551 hdr->ip4.tos = ionic_set_ecn(grh->traffic_class);
552 else
553 hdr->ip4.tos = ionic_clear_ecn(grh->traffic_class);
554 } else {
555 hdr->eth.type = cpu_to_be16(ETH_P_IPV6);
556 hdr->grh.flow_label = cpu_to_be32(grh->flow_label);
557 hdr->grh.hop_limit = grh->hop_limit;
558 hdr->grh.source_gid = grh->sgid_attr->gid;
559 hdr->grh.destination_gid = grh->dgid;
560
561 if (want_ecn)
562 hdr->grh.traffic_class =
563 ionic_set_ecn(grh->traffic_class);
564 else
565 hdr->grh.traffic_class =
566 ionic_clear_ecn(grh->traffic_class);
567 }
568
569 if (vlan != 0xffff) {
570 vlan |= rdma_ah_get_sl(attr) << VLAN_PRIO_SHIFT;
571 hdr->vlan.tag = cpu_to_be16(vlan);
572 hdr->vlan.type = hdr->eth.type;
573 hdr->eth.type = cpu_to_be16(ETH_P_8021Q);
574 }
575
576 hdr->udp.sport = cpu_to_be16(sport);
577 hdr->udp.dport = cpu_to_be16(ROCE_V2_UDP_DPORT);
578
579 return 0;
580 }
581
ionic_set_ah_attr(struct ionic_ibdev * dev,struct rdma_ah_attr * ah_attr,struct ib_ud_header * hdr,int sgid_index)582 static void ionic_set_ah_attr(struct ionic_ibdev *dev,
583 struct rdma_ah_attr *ah_attr,
584 struct ib_ud_header *hdr,
585 int sgid_index)
586 {
587 u32 flow_label;
588 u16 vlan = 0;
589 u8 tos, ttl;
590
591 if (hdr->vlan_present)
592 vlan = be16_to_cpu(hdr->vlan.tag);
593
594 if (hdr->ipv4_present) {
595 flow_label = 0;
596 ttl = hdr->ip4.ttl;
597 tos = hdr->ip4.tos;
598 *(__be16 *)(hdr->grh.destination_gid.raw + 10) = cpu_to_be16(0xffff);
599 *(__be32 *)(hdr->grh.destination_gid.raw + 12) = hdr->ip4.daddr;
600 } else {
601 flow_label = be32_to_cpu(hdr->grh.flow_label);
602 ttl = hdr->grh.hop_limit;
603 tos = hdr->grh.traffic_class;
604 }
605
606 memset(ah_attr, 0, sizeof(*ah_attr));
607 ah_attr->type = RDMA_AH_ATTR_TYPE_ROCE;
608 if (hdr->eth_present)
609 ether_addr_copy(ah_attr->roce.dmac, hdr->eth.dmac_h);
610 rdma_ah_set_sl(ah_attr, vlan >> VLAN_PRIO_SHIFT);
611 rdma_ah_set_port_num(ah_attr, 1);
612 rdma_ah_set_grh(ah_attr, NULL, flow_label, sgid_index, ttl, tos);
613 rdma_ah_set_dgid_raw(ah_attr, &hdr->grh.destination_gid);
614 }
615
ionic_create_ah_cmd(struct ionic_ibdev * dev,struct ionic_ah * ah,struct ionic_pd * pd,struct rdma_ah_attr * attr,u32 flags)616 static int ionic_create_ah_cmd(struct ionic_ibdev *dev,
617 struct ionic_ah *ah,
618 struct ionic_pd *pd,
619 struct rdma_ah_attr *attr,
620 u32 flags)
621 {
622 struct ionic_admin_wr wr = {
623 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
624 .wqe = {
625 .op = IONIC_V1_ADMIN_CREATE_AH,
626 .len = cpu_to_le16(IONIC_ADMIN_CREATE_AH_IN_V1_LEN),
627 .cmd.create_ah = {
628 .pd_id = cpu_to_le32(pd->pdid),
629 .dbid_flags = cpu_to_le16(dev->lif_cfg.dbid),
630 .id_ver = cpu_to_le32(ah->ahid),
631 }
632 }
633 };
634 enum ionic_admin_flags admin_flags = 0;
635 dma_addr_t hdr_dma = 0;
636 void *hdr_buf;
637 gfp_t gfp = GFP_ATOMIC;
638 int rc, hdr_len = 0;
639
640 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_AH)
641 return -EBADRQC;
642
643 if (flags & RDMA_CREATE_AH_SLEEPABLE)
644 gfp = GFP_KERNEL;
645 else
646 admin_flags |= IONIC_ADMIN_F_BUSYWAIT;
647
648 rc = ionic_build_hdr(dev, &ah->hdr, attr, IONIC_ROCE_UDP_SPORT, false);
649 if (rc)
650 return rc;
651
652 if (ah->hdr.eth.type == cpu_to_be16(ETH_P_8021Q)) {
653 if (ah->hdr.vlan.type == cpu_to_be16(ETH_P_IP))
654 wr.wqe.cmd.create_ah.csum_profile =
655 IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP;
656 else
657 wr.wqe.cmd.create_ah.csum_profile =
658 IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP;
659 } else {
660 if (ah->hdr.eth.type == cpu_to_be16(ETH_P_IP))
661 wr.wqe.cmd.create_ah.csum_profile =
662 IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP;
663 else
664 wr.wqe.cmd.create_ah.csum_profile =
665 IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP;
666 }
667
668 ah->sgid_index = rdma_ah_read_grh(attr)->sgid_index;
669
670 hdr_buf = kmalloc(PAGE_SIZE, gfp);
671 if (!hdr_buf)
672 return -ENOMEM;
673
674 hdr_len = ib_ud_header_pack(&ah->hdr, hdr_buf);
675 hdr_len -= IB_BTH_BYTES;
676 hdr_len -= IB_DETH_BYTES;
677 ibdev_dbg(&dev->ibdev, "roce packet header template\n");
678 print_hex_dump_debug("hdr ", DUMP_PREFIX_OFFSET, 16, 1,
679 hdr_buf, hdr_len, true);
680
681 hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf, hdr_len,
682 DMA_TO_DEVICE);
683
684 rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
685 if (rc)
686 goto err_dma;
687
688 wr.wqe.cmd.create_ah.dma_addr = cpu_to_le64(hdr_dma);
689 wr.wqe.cmd.create_ah.length = cpu_to_le32(hdr_len);
690
691 ionic_admin_post(dev, &wr);
692 rc = ionic_admin_wait(dev, &wr, admin_flags);
693
694 dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, hdr_len,
695 DMA_TO_DEVICE);
696 err_dma:
697 kfree(hdr_buf);
698
699 return rc;
700 }
701
ionic_destroy_ah_cmd(struct ionic_ibdev * dev,u32 ahid,u32 flags)702 static int ionic_destroy_ah_cmd(struct ionic_ibdev *dev, u32 ahid, u32 flags)
703 {
704 struct ionic_admin_wr wr = {
705 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
706 .wqe = {
707 .op = IONIC_V1_ADMIN_DESTROY_AH,
708 .len = cpu_to_le16(IONIC_ADMIN_DESTROY_AH_IN_V1_LEN),
709 .cmd.destroy_ah = {
710 .ah_id = cpu_to_le32(ahid),
711 },
712 }
713 };
714 enum ionic_admin_flags admin_flags = IONIC_ADMIN_F_TEARDOWN;
715
716 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_AH)
717 return -EBADRQC;
718
719 if (!(flags & RDMA_CREATE_AH_SLEEPABLE))
720 admin_flags |= IONIC_ADMIN_F_BUSYWAIT;
721
722 ionic_admin_post(dev, &wr);
723 ionic_admin_wait(dev, &wr, admin_flags);
724
725 /* No host-memory resource is associated with ah, so it is ok
726 * to "succeed" and complete this destroy ah on the host.
727 */
728 return 0;
729 }
730
ionic_create_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)731 int ionic_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
732 struct ib_udata *udata)
733 {
734 struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device);
735 struct rdma_ah_attr *attr = init_attr->ah_attr;
736 struct ionic_pd *pd = to_ionic_pd(ibah->pd);
737 struct ionic_ah *ah = to_ionic_ah(ibah);
738 struct ionic_ah_resp resp = {};
739 u32 flags = init_attr->flags;
740 int rc;
741
742 rc = ionic_get_ahid(dev, &ah->ahid);
743 if (rc)
744 return rc;
745
746 rc = ionic_create_ah_cmd(dev, ah, pd, attr, flags);
747 if (rc)
748 goto err_cmd;
749
750 if (udata) {
751 resp.ahid = ah->ahid;
752
753 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
754 if (rc)
755 goto err_resp;
756 }
757
758 return 0;
759
760 err_resp:
761 ionic_destroy_ah_cmd(dev, ah->ahid, flags);
762 err_cmd:
763 ionic_put_ahid(dev, ah->ahid);
764 return rc;
765 }
766
ionic_query_ah(struct ib_ah * ibah,struct rdma_ah_attr * ah_attr)767 int ionic_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
768 {
769 struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device);
770 struct ionic_ah *ah = to_ionic_ah(ibah);
771
772 ionic_set_ah_attr(dev, ah_attr, &ah->hdr, ah->sgid_index);
773
774 return 0;
775 }
776
ionic_destroy_ah(struct ib_ah * ibah,u32 flags)777 int ionic_destroy_ah(struct ib_ah *ibah, u32 flags)
778 {
779 struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device);
780 struct ionic_ah *ah = to_ionic_ah(ibah);
781 int rc;
782
783 rc = ionic_destroy_ah_cmd(dev, ah->ahid, flags);
784 if (rc)
785 return rc;
786
787 ionic_put_ahid(dev, ah->ahid);
788
789 return 0;
790 }
791
ionic_create_mr_cmd(struct ionic_ibdev * dev,struct ionic_pd * pd,struct ionic_mr * mr,u64 addr,u64 length)792 static int ionic_create_mr_cmd(struct ionic_ibdev *dev,
793 struct ionic_pd *pd,
794 struct ionic_mr *mr,
795 u64 addr,
796 u64 length)
797 {
798 struct ionic_admin_wr wr = {
799 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
800 .wqe = {
801 .op = IONIC_V1_ADMIN_CREATE_MR,
802 .len = cpu_to_le16(IONIC_ADMIN_CREATE_MR_IN_V1_LEN),
803 .cmd.create_mr = {
804 .va = cpu_to_le64(addr),
805 .length = cpu_to_le64(length),
806 .pd_id = cpu_to_le32(pd->pdid),
807 .page_size_log2 = mr->buf.page_size_log2,
808 .tbl_index = cpu_to_le32(~0),
809 .map_count = cpu_to_le32(mr->buf.tbl_pages),
810 .dma_addr = ionic_pgtbl_dma(&mr->buf, addr),
811 .dbid_flags = cpu_to_le16(mr->flags),
812 .id_ver = cpu_to_le32(mr->mrid),
813 }
814 }
815 };
816 int rc;
817
818 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_MR)
819 return -EBADRQC;
820
821 ionic_admin_post(dev, &wr);
822 rc = ionic_admin_wait(dev, &wr, 0);
823 if (!rc)
824 mr->created = true;
825
826 return rc;
827 }
828
ionic_destroy_mr_cmd(struct ionic_ibdev * dev,u32 mrid)829 static int ionic_destroy_mr_cmd(struct ionic_ibdev *dev, u32 mrid)
830 {
831 struct ionic_admin_wr wr = {
832 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
833 .wqe = {
834 .op = IONIC_V1_ADMIN_DESTROY_MR,
835 .len = cpu_to_le16(IONIC_ADMIN_DESTROY_MR_IN_V1_LEN),
836 .cmd.destroy_mr = {
837 .mr_id = cpu_to_le32(mrid),
838 },
839 }
840 };
841
842 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_MR)
843 return -EBADRQC;
844
845 ionic_admin_post(dev, &wr);
846
847 return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN);
848 }
849
ionic_get_dma_mr(struct ib_pd * ibpd,int access)850 struct ib_mr *ionic_get_dma_mr(struct ib_pd *ibpd, int access)
851 {
852 struct ionic_pd *pd = to_ionic_pd(ibpd);
853 struct ionic_mr *mr;
854
855 mr = kzalloc_obj(*mr);
856 if (!mr)
857 return ERR_PTR(-ENOMEM);
858
859 mr->ibmr.lkey = IONIC_DMA_LKEY;
860 mr->ibmr.rkey = IONIC_DMA_RKEY;
861
862 if (pd)
863 pd->flags |= IONIC_QPF_PRIVILEGED;
864
865 return &mr->ibmr;
866 }
867
ionic_reg_user_mr(struct ib_pd * ibpd,u64 start,u64 length,u64 addr,int access,struct ib_dmah * dmah,struct ib_udata * udata)868 struct ib_mr *ionic_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
869 u64 addr, int access, struct ib_dmah *dmah,
870 struct ib_udata *udata)
871 {
872 struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
873 struct ionic_pd *pd = to_ionic_pd(ibpd);
874 struct ionic_mr *mr;
875 unsigned long pg_sz;
876 int rc;
877
878 if (dmah)
879 return ERR_PTR(-EOPNOTSUPP);
880
881 mr = kzalloc_obj(*mr);
882 if (!mr)
883 return ERR_PTR(-ENOMEM);
884
885 rc = ionic_get_mrid(dev, &mr->mrid);
886 if (rc)
887 goto err_mrid;
888
889 mr->ibmr.lkey = mr->mrid;
890 mr->ibmr.rkey = mr->mrid;
891 mr->ibmr.iova = addr;
892 mr->ibmr.length = length;
893
894 mr->flags = IONIC_MRF_USER_MR | to_ionic_mr_flags(access);
895
896 mr->umem = ib_umem_get(&dev->ibdev, start, length, access);
897 if (IS_ERR(mr->umem)) {
898 rc = PTR_ERR(mr->umem);
899 goto err_umem;
900 }
901
902 pg_sz = ib_umem_find_best_pgsz(mr->umem,
903 dev->lif_cfg.page_size_supported,
904 addr);
905 if (!pg_sz) {
906 rc = -EINVAL;
907 goto err_pgtbl;
908 }
909
910 rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, 1, pg_sz);
911 if (rc)
912 goto err_pgtbl;
913
914 rc = ionic_create_mr_cmd(dev, pd, mr, addr, length);
915 if (rc)
916 goto err_cmd;
917
918 ionic_pgtbl_unbuf(dev, &mr->buf);
919
920 return &mr->ibmr;
921
922 err_cmd:
923 ionic_pgtbl_unbuf(dev, &mr->buf);
924 err_pgtbl:
925 ib_umem_release(mr->umem);
926 err_umem:
927 ionic_put_mrid(dev, mr->mrid);
928 err_mrid:
929 kfree(mr);
930 return ERR_PTR(rc);
931 }
932
ionic_reg_user_mr_dmabuf(struct ib_pd * ibpd,u64 offset,u64 length,u64 addr,int fd,int access,struct ib_dmah * dmah,struct uverbs_attr_bundle * attrs)933 struct ib_mr *ionic_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 offset,
934 u64 length, u64 addr, int fd, int access,
935 struct ib_dmah *dmah,
936 struct uverbs_attr_bundle *attrs)
937 {
938 struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
939 struct ionic_pd *pd = to_ionic_pd(ibpd);
940 struct ib_umem_dmabuf *umem_dmabuf;
941 struct ionic_mr *mr;
942 u64 pg_sz;
943 int rc;
944
945 if (dmah)
946 return ERR_PTR(-EOPNOTSUPP);
947
948 mr = kzalloc_obj(*mr);
949 if (!mr)
950 return ERR_PTR(-ENOMEM);
951
952 rc = ionic_get_mrid(dev, &mr->mrid);
953 if (rc)
954 goto err_mrid;
955
956 mr->ibmr.lkey = mr->mrid;
957 mr->ibmr.rkey = mr->mrid;
958 mr->ibmr.iova = addr;
959 mr->ibmr.length = length;
960
961 mr->flags = IONIC_MRF_USER_MR | to_ionic_mr_flags(access);
962
963 umem_dmabuf = ib_umem_dmabuf_get_pinned(&dev->ibdev, offset, length,
964 fd, access);
965 if (IS_ERR(umem_dmabuf)) {
966 rc = PTR_ERR(umem_dmabuf);
967 goto err_umem;
968 }
969
970 mr->umem = &umem_dmabuf->umem;
971
972 pg_sz = ib_umem_find_best_pgsz(mr->umem,
973 dev->lif_cfg.page_size_supported,
974 addr);
975 if (!pg_sz) {
976 rc = -EINVAL;
977 goto err_pgtbl;
978 }
979
980 rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, 1, pg_sz);
981 if (rc)
982 goto err_pgtbl;
983
984 rc = ionic_create_mr_cmd(dev, pd, mr, addr, length);
985 if (rc)
986 goto err_cmd;
987
988 ionic_pgtbl_unbuf(dev, &mr->buf);
989
990 return &mr->ibmr;
991
992 err_cmd:
993 ionic_pgtbl_unbuf(dev, &mr->buf);
994 err_pgtbl:
995 ib_umem_release(mr->umem);
996 err_umem:
997 ionic_put_mrid(dev, mr->mrid);
998 err_mrid:
999 kfree(mr);
1000 return ERR_PTR(rc);
1001 }
1002
ionic_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)1003 int ionic_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1004 {
1005 struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device);
1006 struct ionic_mr *mr = to_ionic_mr(ibmr);
1007 int rc;
1008
1009 if (!mr->ibmr.lkey)
1010 goto out;
1011
1012 if (mr->created) {
1013 rc = ionic_destroy_mr_cmd(dev, mr->mrid);
1014 if (rc)
1015 return rc;
1016 }
1017
1018 ionic_pgtbl_unbuf(dev, &mr->buf);
1019
1020 if (mr->umem)
1021 ib_umem_release(mr->umem);
1022
1023 ionic_put_mrid(dev, mr->mrid);
1024
1025 out:
1026 kfree(mr);
1027
1028 return 0;
1029 }
1030
ionic_alloc_mr(struct ib_pd * ibpd,enum ib_mr_type type,u32 max_sg)1031 struct ib_mr *ionic_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type type,
1032 u32 max_sg)
1033 {
1034 struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
1035 struct ionic_pd *pd = to_ionic_pd(ibpd);
1036 struct ionic_mr *mr;
1037 int rc;
1038
1039 if (type != IB_MR_TYPE_MEM_REG)
1040 return ERR_PTR(-EINVAL);
1041
1042 mr = kzalloc_obj(*mr);
1043 if (!mr)
1044 return ERR_PTR(-ENOMEM);
1045
1046 rc = ionic_get_mrid(dev, &mr->mrid);
1047 if (rc)
1048 goto err_mrid;
1049
1050 mr->ibmr.lkey = mr->mrid;
1051 mr->ibmr.rkey = mr->mrid;
1052
1053 mr->flags = IONIC_MRF_PHYS_MR;
1054
1055 rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, max_sg, PAGE_SIZE);
1056 if (rc)
1057 goto err_pgtbl;
1058
1059 mr->buf.tbl_pages = 0;
1060
1061 rc = ionic_create_mr_cmd(dev, pd, mr, 0, 0);
1062 if (rc)
1063 goto err_cmd;
1064
1065 return &mr->ibmr;
1066
1067 err_cmd:
1068 ionic_pgtbl_unbuf(dev, &mr->buf);
1069 err_pgtbl:
1070 ionic_put_mrid(dev, mr->mrid);
1071 err_mrid:
1072 kfree(mr);
1073 return ERR_PTR(rc);
1074 }
1075
ionic_map_mr_page(struct ib_mr * ibmr,u64 dma)1076 static int ionic_map_mr_page(struct ib_mr *ibmr, u64 dma)
1077 {
1078 struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device);
1079 struct ionic_mr *mr = to_ionic_mr(ibmr);
1080
1081 ibdev_dbg(&dev->ibdev, "dma %p\n", (void *)dma);
1082 return ionic_pgtbl_page(&mr->buf, dma);
1083 }
1084
ionic_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)1085 int ionic_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1086 unsigned int *sg_offset)
1087 {
1088 struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device);
1089 struct ionic_mr *mr = to_ionic_mr(ibmr);
1090 int rc;
1091
1092 /* mr must be allocated using ib_alloc_mr() */
1093 if (unlikely(!mr->buf.tbl_limit))
1094 return -EINVAL;
1095
1096 mr->buf.tbl_pages = 0;
1097
1098 if (mr->buf.tbl_buf)
1099 dma_sync_single_for_cpu(dev->lif_cfg.hwdev, mr->buf.tbl_dma,
1100 mr->buf.tbl_size, DMA_TO_DEVICE);
1101
1102 ibdev_dbg(&dev->ibdev, "sg %p nent %d\n", sg, sg_nents);
1103 rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ionic_map_mr_page);
1104
1105 mr->buf.page_size_log2 = order_base_2(ibmr->page_size);
1106
1107 if (mr->buf.tbl_buf)
1108 dma_sync_single_for_device(dev->lif_cfg.hwdev, mr->buf.tbl_dma,
1109 mr->buf.tbl_size, DMA_TO_DEVICE);
1110
1111 return rc;
1112 }
1113
ionic_alloc_mw(struct ib_mw * ibmw,struct ib_udata * udata)1114 int ionic_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
1115 {
1116 struct ionic_ibdev *dev = to_ionic_ibdev(ibmw->device);
1117 struct ionic_pd *pd = to_ionic_pd(ibmw->pd);
1118 struct ionic_mr *mr = to_ionic_mw(ibmw);
1119 int rc;
1120
1121 rc = ionic_get_mrid(dev, &mr->mrid);
1122 if (rc)
1123 return rc;
1124
1125 mr->ibmw.rkey = mr->mrid;
1126
1127 if (mr->ibmw.type == IB_MW_TYPE_1)
1128 mr->flags = IONIC_MRF_MW_1;
1129 else
1130 mr->flags = IONIC_MRF_MW_2;
1131
1132 rc = ionic_create_mr_cmd(dev, pd, mr, 0, 0);
1133 if (rc)
1134 goto err_cmd;
1135
1136 return 0;
1137
1138 err_cmd:
1139 ionic_put_mrid(dev, mr->mrid);
1140 return rc;
1141 }
1142
ionic_dealloc_mw(struct ib_mw * ibmw)1143 int ionic_dealloc_mw(struct ib_mw *ibmw)
1144 {
1145 struct ionic_ibdev *dev = to_ionic_ibdev(ibmw->device);
1146 struct ionic_mr *mr = to_ionic_mw(ibmw);
1147 int rc;
1148
1149 rc = ionic_destroy_mr_cmd(dev, mr->mrid);
1150 if (rc)
1151 return rc;
1152
1153 ionic_put_mrid(dev, mr->mrid);
1154
1155 return 0;
1156 }
1157
ionic_create_cq_cmd(struct ionic_ibdev * dev,struct ionic_ctx * ctx,struct ionic_cq * cq,struct ionic_tbl_buf * buf)1158 static int ionic_create_cq_cmd(struct ionic_ibdev *dev,
1159 struct ionic_ctx *ctx,
1160 struct ionic_cq *cq,
1161 struct ionic_tbl_buf *buf)
1162 {
1163 const u16 dbid = ionic_ctx_dbid(dev, ctx);
1164 struct ionic_admin_wr wr = {
1165 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
1166 .wqe = {
1167 .op = IONIC_V1_ADMIN_CREATE_CQ,
1168 .len = cpu_to_le16(IONIC_ADMIN_CREATE_CQ_IN_V1_LEN),
1169 .cmd.create_cq = {
1170 .eq_id = cpu_to_le32(cq->eqid),
1171 .depth_log2 = cq->q.depth_log2,
1172 .stride_log2 = cq->q.stride_log2,
1173 .page_size_log2 = buf->page_size_log2,
1174 .tbl_index = cpu_to_le32(~0),
1175 .map_count = cpu_to_le32(buf->tbl_pages),
1176 .dma_addr = ionic_pgtbl_dma(buf, 0),
1177 .dbid_flags = cpu_to_le16(dbid),
1178 .id_ver = cpu_to_le32(cq->cqid),
1179 }
1180 }
1181 };
1182
1183 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_CQ)
1184 return -EBADRQC;
1185
1186 ionic_admin_post(dev, &wr);
1187
1188 return ionic_admin_wait(dev, &wr, 0);
1189 }
1190
ionic_destroy_cq_cmd(struct ionic_ibdev * dev,u32 cqid)1191 static int ionic_destroy_cq_cmd(struct ionic_ibdev *dev, u32 cqid)
1192 {
1193 struct ionic_admin_wr wr = {
1194 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
1195 .wqe = {
1196 .op = IONIC_V1_ADMIN_DESTROY_CQ,
1197 .len = cpu_to_le16(IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN),
1198 .cmd.destroy_cq = {
1199 .cq_id = cpu_to_le32(cqid),
1200 },
1201 }
1202 };
1203
1204 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_CQ)
1205 return -EBADRQC;
1206
1207 ionic_admin_post(dev, &wr);
1208
1209 return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN);
1210 }
1211
ionic_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct uverbs_attr_bundle * attrs)1212 int ionic_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1213 struct uverbs_attr_bundle *attrs)
1214 {
1215 struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
1216 struct ib_udata *udata = &attrs->driver_udata;
1217 struct ionic_ctx *ctx =
1218 rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx);
1219 struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
1220 struct ionic_tbl_buf buf = {};
1221 struct ionic_cq_resp resp;
1222 struct ionic_cq_req req;
1223 int udma_idx = 0, rc;
1224
1225 if (udata) {
1226 rc = ib_copy_from_udata(&req, udata, sizeof(req));
1227 if (rc)
1228 return rc;
1229 }
1230
1231 vcq->udma_mask = BIT(dev->lif_cfg.udma_count) - 1;
1232
1233 if (udata)
1234 vcq->udma_mask &= req.udma_mask;
1235
1236 if (!vcq->udma_mask) {
1237 rc = -EINVAL;
1238 goto err_init;
1239 }
1240
1241 for (; udma_idx < dev->lif_cfg.udma_count; ++udma_idx) {
1242 if (!(vcq->udma_mask & BIT(udma_idx)))
1243 continue;
1244
1245 rc = ionic_create_cq_common(vcq, &buf, attr, ctx, udata,
1246 &req.cq[udma_idx],
1247 &resp.cqid[udma_idx],
1248 udma_idx);
1249 if (rc)
1250 goto err_init;
1251
1252 rc = ionic_create_cq_cmd(dev, ctx, &vcq->cq[udma_idx], &buf);
1253 if (rc)
1254 goto err_cmd;
1255
1256 ionic_pgtbl_unbuf(dev, &buf);
1257 }
1258
1259 vcq->ibcq.cqe = attr->cqe;
1260
1261 if (udata) {
1262 resp.udma_mask = vcq->udma_mask;
1263
1264 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1265 if (rc)
1266 goto err_resp;
1267 }
1268
1269 return 0;
1270
1271 err_resp:
1272 while (udma_idx) {
1273 --udma_idx;
1274 if (!(vcq->udma_mask & BIT(udma_idx)))
1275 continue;
1276 ionic_destroy_cq_cmd(dev, vcq->cq[udma_idx].cqid);
1277 err_cmd:
1278 ionic_pgtbl_unbuf(dev, &buf);
1279 ionic_destroy_cq_common(dev, &vcq->cq[udma_idx]);
1280 err_init:
1281 ;
1282 }
1283
1284 return rc;
1285 }
1286
ionic_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)1287 int ionic_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1288 {
1289 struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
1290 struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
1291 int udma_idx, rc_tmp, rc = 0;
1292
1293 for (udma_idx = dev->lif_cfg.udma_count; udma_idx; ) {
1294 --udma_idx;
1295
1296 if (!(vcq->udma_mask & BIT(udma_idx)))
1297 continue;
1298
1299 rc_tmp = ionic_destroy_cq_cmd(dev, vcq->cq[udma_idx].cqid);
1300 if (rc_tmp) {
1301 if (!rc)
1302 rc = rc_tmp;
1303
1304 continue;
1305 }
1306
1307 ionic_destroy_cq_common(dev, &vcq->cq[udma_idx]);
1308 }
1309
1310 return rc;
1311 }
1312
pd_remote_privileged(struct ib_pd * pd)1313 static bool pd_remote_privileged(struct ib_pd *pd)
1314 {
1315 return pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
1316 }
1317
ionic_create_qp_cmd(struct ionic_ibdev * dev,struct ionic_pd * pd,struct ionic_cq * send_cq,struct ionic_cq * recv_cq,struct ionic_qp * qp,struct ionic_tbl_buf * sq_buf,struct ionic_tbl_buf * rq_buf,struct ib_qp_init_attr * attr)1318 static int ionic_create_qp_cmd(struct ionic_ibdev *dev,
1319 struct ionic_pd *pd,
1320 struct ionic_cq *send_cq,
1321 struct ionic_cq *recv_cq,
1322 struct ionic_qp *qp,
1323 struct ionic_tbl_buf *sq_buf,
1324 struct ionic_tbl_buf *rq_buf,
1325 struct ib_qp_init_attr *attr)
1326 {
1327 const u16 dbid = ionic_obj_dbid(dev, pd->ibpd.uobject);
1328 const u32 flags = to_ionic_qp_flags(0, 0,
1329 qp->sq_cmb & IONIC_CMB_ENABLE,
1330 qp->rq_cmb & IONIC_CMB_ENABLE,
1331 qp->sq_spec, qp->rq_spec,
1332 pd->flags & IONIC_QPF_PRIVILEGED,
1333 pd_remote_privileged(&pd->ibpd));
1334 struct ionic_admin_wr wr = {
1335 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
1336 .wqe = {
1337 .op = IONIC_V1_ADMIN_CREATE_QP,
1338 .len = cpu_to_le16(IONIC_ADMIN_CREATE_QP_IN_V1_LEN),
1339 .cmd.create_qp = {
1340 .pd_id = cpu_to_le32(pd->pdid),
1341 .priv_flags = cpu_to_be32(flags),
1342 .type_state = to_ionic_qp_type(attr->qp_type),
1343 .dbid_flags = cpu_to_le16(dbid),
1344 .id_ver = cpu_to_le32(qp->qpid),
1345 }
1346 }
1347 };
1348
1349 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_QP)
1350 return -EBADRQC;
1351
1352 if (qp->has_sq) {
1353 wr.wqe.cmd.create_qp.sq_cq_id = cpu_to_le32(send_cq->cqid);
1354 wr.wqe.cmd.create_qp.sq_depth_log2 = qp->sq.depth_log2;
1355 wr.wqe.cmd.create_qp.sq_stride_log2 = qp->sq.stride_log2;
1356 wr.wqe.cmd.create_qp.sq_page_size_log2 = sq_buf->page_size_log2;
1357 wr.wqe.cmd.create_qp.sq_tbl_index_xrcd_id = cpu_to_le32(~0);
1358 wr.wqe.cmd.create_qp.sq_map_count =
1359 cpu_to_le32(sq_buf->tbl_pages);
1360 wr.wqe.cmd.create_qp.sq_dma_addr = ionic_pgtbl_dma(sq_buf, 0);
1361 }
1362
1363 if (qp->has_rq) {
1364 wr.wqe.cmd.create_qp.rq_cq_id = cpu_to_le32(recv_cq->cqid);
1365 wr.wqe.cmd.create_qp.rq_depth_log2 = qp->rq.depth_log2;
1366 wr.wqe.cmd.create_qp.rq_stride_log2 = qp->rq.stride_log2;
1367 wr.wqe.cmd.create_qp.rq_page_size_log2 = rq_buf->page_size_log2;
1368 wr.wqe.cmd.create_qp.rq_tbl_index_srq_id = cpu_to_le32(~0);
1369 wr.wqe.cmd.create_qp.rq_map_count =
1370 cpu_to_le32(rq_buf->tbl_pages);
1371 wr.wqe.cmd.create_qp.rq_dma_addr = ionic_pgtbl_dma(rq_buf, 0);
1372 }
1373
1374 ionic_admin_post(dev, &wr);
1375
1376 return ionic_admin_wait(dev, &wr, 0);
1377 }
1378
ionic_modify_qp_cmd(struct ionic_ibdev * dev,struct ionic_pd * pd,struct ionic_qp * qp,struct ib_qp_attr * attr,int mask)1379 static int ionic_modify_qp_cmd(struct ionic_ibdev *dev,
1380 struct ionic_pd *pd,
1381 struct ionic_qp *qp,
1382 struct ib_qp_attr *attr,
1383 int mask)
1384 {
1385 const u32 flags = to_ionic_qp_flags(attr->qp_access_flags,
1386 attr->en_sqd_async_notify,
1387 qp->sq_cmb & IONIC_CMB_ENABLE,
1388 qp->rq_cmb & IONIC_CMB_ENABLE,
1389 qp->sq_spec, qp->rq_spec,
1390 pd->flags & IONIC_QPF_PRIVILEGED,
1391 pd_remote_privileged(qp->ibqp.pd));
1392 const u8 state = to_ionic_qp_modify_state(attr->qp_state,
1393 attr->cur_qp_state);
1394 struct ionic_admin_wr wr = {
1395 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
1396 .wqe = {
1397 .op = IONIC_V1_ADMIN_MODIFY_QP,
1398 .len = cpu_to_le16(IONIC_ADMIN_MODIFY_QP_IN_V1_LEN),
1399 .cmd.mod_qp = {
1400 .attr_mask = cpu_to_be32(mask),
1401 .access_flags = cpu_to_be16(flags),
1402 .rq_psn = cpu_to_le32(attr->rq_psn),
1403 .sq_psn = cpu_to_le32(attr->sq_psn),
1404 .rate_limit_kbps =
1405 cpu_to_le32(attr->rate_limit),
1406 .pmtu = (attr->path_mtu + 7),
1407 .retry = (attr->retry_cnt |
1408 (attr->rnr_retry << 4)),
1409 .rnr_timer = attr->min_rnr_timer,
1410 .retry_timeout = attr->timeout,
1411 .type_state = state,
1412 .id_ver = cpu_to_le32(qp->qpid),
1413 }
1414 }
1415 };
1416 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1417 void *hdr_buf = NULL;
1418 dma_addr_t hdr_dma = 0;
1419 int rc, hdr_len = 0;
1420 u16 sport;
1421
1422 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_MODIFY_QP)
1423 return -EBADRQC;
1424
1425 if ((mask & IB_QP_MAX_DEST_RD_ATOMIC) && attr->max_dest_rd_atomic) {
1426 /* Note, round up/down was already done for allocating
1427 * resources on the device. The allocation order is in cache
1428 * line size. We can't use the order of the resource
1429 * allocation to determine the order wqes here, because for
1430 * queue length <= one cache line it is not distinct.
1431 *
1432 * Therefore, order wqes is computed again here.
1433 *
1434 * Account for hole and round up to the next order.
1435 */
1436 wr.wqe.cmd.mod_qp.rsq_depth =
1437 order_base_2(attr->max_dest_rd_atomic + 1);
1438 wr.wqe.cmd.mod_qp.rsq_index = cpu_to_le32(~0);
1439 }
1440
1441 if ((mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
1442 /* Account for hole and round down to the next order */
1443 wr.wqe.cmd.mod_qp.rrq_depth =
1444 order_base_2(attr->max_rd_atomic + 2) - 1;
1445 wr.wqe.cmd.mod_qp.rrq_index = cpu_to_le32(~0);
1446 }
1447
1448 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
1449 wr.wqe.cmd.mod_qp.qkey_dest_qpn =
1450 cpu_to_le32(attr->dest_qp_num);
1451 else
1452 wr.wqe.cmd.mod_qp.qkey_dest_qpn = cpu_to_le32(attr->qkey);
1453
1454 if (mask & IB_QP_AV) {
1455 if (!qp->hdr)
1456 return -ENOMEM;
1457
1458 sport = rdma_get_udp_sport(grh->flow_label,
1459 qp->qpid,
1460 attr->dest_qp_num);
1461
1462 rc = ionic_build_hdr(dev, qp->hdr, &attr->ah_attr, sport, true);
1463 if (rc)
1464 return rc;
1465
1466 qp->sgid_index = grh->sgid_index;
1467
1468 hdr_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1469 if (!hdr_buf)
1470 return -ENOMEM;
1471
1472 hdr_len = ib_ud_header_pack(qp->hdr, hdr_buf);
1473 hdr_len -= IB_BTH_BYTES;
1474 hdr_len -= IB_DETH_BYTES;
1475 ibdev_dbg(&dev->ibdev, "roce packet header template\n");
1476 print_hex_dump_debug("hdr ", DUMP_PREFIX_OFFSET, 16, 1,
1477 hdr_buf, hdr_len, true);
1478
1479 hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf, hdr_len,
1480 DMA_TO_DEVICE);
1481
1482 rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
1483 if (rc)
1484 goto err_dma;
1485
1486 if (qp->hdr->ipv4_present) {
1487 wr.wqe.cmd.mod_qp.tfp_csum_profile =
1488 qp->hdr->vlan_present ?
1489 IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP :
1490 IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP;
1491 } else {
1492 wr.wqe.cmd.mod_qp.tfp_csum_profile =
1493 qp->hdr->vlan_present ?
1494 IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP :
1495 IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP;
1496 }
1497
1498 wr.wqe.cmd.mod_qp.ah_id_len =
1499 cpu_to_le32(qp->ahid | (hdr_len << 24));
1500 wr.wqe.cmd.mod_qp.dma_addr = cpu_to_le64(hdr_dma);
1501
1502 wr.wqe.cmd.mod_qp.en_pcp = attr->ah_attr.sl;
1503 wr.wqe.cmd.mod_qp.ip_dscp = grh->traffic_class >> 2;
1504 }
1505
1506 ionic_admin_post(dev, &wr);
1507
1508 rc = ionic_admin_wait(dev, &wr, 0);
1509
1510 if (mask & IB_QP_AV)
1511 dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, hdr_len,
1512 DMA_TO_DEVICE);
1513 err_dma:
1514 if (mask & IB_QP_AV)
1515 kfree(hdr_buf);
1516
1517 return rc;
1518 }
1519
ionic_query_qp_cmd(struct ionic_ibdev * dev,struct ionic_qp * qp,struct ib_qp_attr * attr,int mask)1520 static int ionic_query_qp_cmd(struct ionic_ibdev *dev,
1521 struct ionic_qp *qp,
1522 struct ib_qp_attr *attr,
1523 int mask)
1524 {
1525 struct ionic_admin_wr wr = {
1526 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
1527 .wqe = {
1528 .op = IONIC_V1_ADMIN_QUERY_QP,
1529 .len = cpu_to_le16(IONIC_ADMIN_QUERY_QP_IN_V1_LEN),
1530 .cmd.query_qp = {
1531 .id_ver = cpu_to_le32(qp->qpid),
1532 },
1533 }
1534 };
1535 struct ionic_v1_admin_query_qp_sq *query_sqbuf;
1536 struct ionic_v1_admin_query_qp_rq *query_rqbuf;
1537 dma_addr_t query_sqdma;
1538 dma_addr_t query_rqdma;
1539 dma_addr_t hdr_dma = 0;
1540 void *hdr_buf = NULL;
1541 int flags, rc;
1542
1543 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_QUERY_QP)
1544 return -EBADRQC;
1545
1546 if (qp->has_sq) {
1547 bool expdb = !!(qp->sq_cmb & IONIC_CMB_EXPDB);
1548
1549 attr->cap.max_send_sge =
1550 ionic_v1_send_wqe_max_sge(qp->sq.stride_log2,
1551 qp->sq_spec,
1552 expdb);
1553 attr->cap.max_inline_data =
1554 ionic_v1_send_wqe_max_data(qp->sq.stride_log2, expdb);
1555 }
1556
1557 if (qp->has_rq) {
1558 attr->cap.max_recv_sge =
1559 ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2,
1560 qp->rq_spec,
1561 qp->rq_cmb & IONIC_CMB_EXPDB);
1562 }
1563
1564 query_sqbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1565 if (!query_sqbuf)
1566 return -ENOMEM;
1567
1568 query_rqbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1569 if (!query_rqbuf) {
1570 rc = -ENOMEM;
1571 goto err_rqbuf;
1572 }
1573
1574 query_sqdma = dma_map_single(dev->lif_cfg.hwdev, query_sqbuf, PAGE_SIZE,
1575 DMA_FROM_DEVICE);
1576 rc = dma_mapping_error(dev->lif_cfg.hwdev, query_sqdma);
1577 if (rc)
1578 goto err_sqdma;
1579
1580 query_rqdma = dma_map_single(dev->lif_cfg.hwdev, query_rqbuf, PAGE_SIZE,
1581 DMA_FROM_DEVICE);
1582 rc = dma_mapping_error(dev->lif_cfg.hwdev, query_rqdma);
1583 if (rc)
1584 goto err_rqdma;
1585
1586 if (mask & IB_QP_AV) {
1587 hdr_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1588 if (!hdr_buf) {
1589 rc = -ENOMEM;
1590 goto err_hdrbuf;
1591 }
1592
1593 hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf,
1594 PAGE_SIZE, DMA_FROM_DEVICE);
1595 rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
1596 if (rc)
1597 goto err_hdrdma;
1598 }
1599
1600 wr.wqe.cmd.query_qp.sq_dma_addr = cpu_to_le64(query_sqdma);
1601 wr.wqe.cmd.query_qp.rq_dma_addr = cpu_to_le64(query_rqdma);
1602 wr.wqe.cmd.query_qp.hdr_dma_addr = cpu_to_le64(hdr_dma);
1603 wr.wqe.cmd.query_qp.ah_id = cpu_to_le32(qp->ahid);
1604
1605 ionic_admin_post(dev, &wr);
1606
1607 rc = ionic_admin_wait(dev, &wr, 0);
1608
1609 if (rc)
1610 goto err_hdrdma;
1611
1612 flags = be16_to_cpu(query_sqbuf->access_perms_flags |
1613 query_rqbuf->access_perms_flags);
1614
1615 print_hex_dump_debug("sqbuf ", DUMP_PREFIX_OFFSET, 16, 1,
1616 query_sqbuf, sizeof(*query_sqbuf), true);
1617 print_hex_dump_debug("rqbuf ", DUMP_PREFIX_OFFSET, 16, 1,
1618 query_rqbuf, sizeof(*query_rqbuf), true);
1619 ibdev_dbg(&dev->ibdev, "query qp %u state_pmtu %#x flags %#x",
1620 qp->qpid, query_rqbuf->state_pmtu, flags);
1621
1622 attr->qp_state = from_ionic_qp_state(query_rqbuf->state_pmtu >> 4);
1623 attr->cur_qp_state = attr->qp_state;
1624 attr->path_mtu = (query_rqbuf->state_pmtu & 0xf) - 7;
1625 attr->path_mig_state = IB_MIG_MIGRATED;
1626 attr->qkey = be32_to_cpu(query_sqbuf->qkey_dest_qpn);
1627 attr->rq_psn = be32_to_cpu(query_sqbuf->rq_psn);
1628 attr->sq_psn = be32_to_cpu(query_rqbuf->sq_psn);
1629 attr->dest_qp_num = attr->qkey;
1630 attr->qp_access_flags = from_ionic_qp_flags(flags);
1631 attr->pkey_index = 0;
1632 attr->alt_pkey_index = 0;
1633 attr->en_sqd_async_notify = !!(flags & IONIC_QPF_SQD_NOTIFY);
1634 attr->sq_draining = !!(flags & IONIC_QPF_SQ_DRAINING);
1635 attr->max_rd_atomic = BIT(query_rqbuf->rrq_depth) - 1;
1636 attr->max_dest_rd_atomic = BIT(query_rqbuf->rsq_depth) - 1;
1637 attr->min_rnr_timer = query_sqbuf->rnr_timer;
1638 attr->port_num = 0;
1639 attr->timeout = query_sqbuf->retry_timeout;
1640 attr->retry_cnt = query_rqbuf->retry_rnrtry & 0xf;
1641 attr->rnr_retry = query_rqbuf->retry_rnrtry >> 4;
1642 attr->alt_port_num = 0;
1643 attr->alt_timeout = 0;
1644 attr->rate_limit = be32_to_cpu(query_sqbuf->rate_limit_kbps);
1645
1646 if (mask & IB_QP_AV)
1647 ionic_set_ah_attr(dev, &attr->ah_attr,
1648 qp->hdr, qp->sgid_index);
1649
1650 err_hdrdma:
1651 if (mask & IB_QP_AV) {
1652 dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma,
1653 PAGE_SIZE, DMA_FROM_DEVICE);
1654 kfree(hdr_buf);
1655 }
1656 err_hdrbuf:
1657 dma_unmap_single(dev->lif_cfg.hwdev, query_rqdma, sizeof(*query_rqbuf),
1658 DMA_FROM_DEVICE);
1659 err_rqdma:
1660 dma_unmap_single(dev->lif_cfg.hwdev, query_sqdma, sizeof(*query_sqbuf),
1661 DMA_FROM_DEVICE);
1662 err_sqdma:
1663 kfree(query_rqbuf);
1664 err_rqbuf:
1665 kfree(query_sqbuf);
1666
1667 return rc;
1668 }
1669
ionic_destroy_qp_cmd(struct ionic_ibdev * dev,u32 qpid)1670 static int ionic_destroy_qp_cmd(struct ionic_ibdev *dev, u32 qpid)
1671 {
1672 struct ionic_admin_wr wr = {
1673 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
1674 .wqe = {
1675 .op = IONIC_V1_ADMIN_DESTROY_QP,
1676 .len = cpu_to_le16(IONIC_ADMIN_DESTROY_QP_IN_V1_LEN),
1677 .cmd.destroy_qp = {
1678 .qp_id = cpu_to_le32(qpid),
1679 },
1680 }
1681 };
1682
1683 if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_QP)
1684 return -EBADRQC;
1685
1686 ionic_admin_post(dev, &wr);
1687
1688 return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN);
1689 }
1690
ionic_expdb_wqe_size_supported(struct ionic_ibdev * dev,uint32_t wqe_size)1691 static bool ionic_expdb_wqe_size_supported(struct ionic_ibdev *dev,
1692 uint32_t wqe_size)
1693 {
1694 switch (wqe_size) {
1695 case 64: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_64;
1696 case 128: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_128;
1697 case 256: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_256;
1698 case 512: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_512;
1699 }
1700
1701 return false;
1702 }
1703
ionic_qp_sq_init_cmb(struct ionic_ibdev * dev,struct ionic_qp * qp,struct ib_udata * udata,int max_data)1704 static void ionic_qp_sq_init_cmb(struct ionic_ibdev *dev,
1705 struct ionic_qp *qp,
1706 struct ib_udata *udata,
1707 int max_data)
1708 {
1709 u8 expdb_stride_log2 = 0;
1710 bool expdb;
1711 int rc;
1712
1713 if (!(qp->sq_cmb & IONIC_CMB_ENABLE))
1714 goto not_in_cmb;
1715
1716 if (qp->sq_cmb & ~IONIC_CMB_SUPPORTED) {
1717 if (qp->sq_cmb & IONIC_CMB_REQUIRE)
1718 goto not_in_cmb;
1719
1720 qp->sq_cmb &= IONIC_CMB_SUPPORTED;
1721 }
1722
1723 if ((qp->sq_cmb & IONIC_CMB_EXPDB) && !dev->lif_cfg.sq_expdb) {
1724 if (qp->sq_cmb & IONIC_CMB_REQUIRE)
1725 goto not_in_cmb;
1726
1727 qp->sq_cmb &= ~IONIC_CMB_EXPDB;
1728 }
1729
1730 qp->sq_cmb_order = order_base_2(qp->sq.size / PAGE_SIZE);
1731
1732 if (qp->sq_cmb_order >= IONIC_SQCMB_ORDER)
1733 goto not_in_cmb;
1734
1735 if (qp->sq_cmb & IONIC_CMB_EXPDB)
1736 expdb_stride_log2 = qp->sq.stride_log2;
1737
1738 rc = ionic_get_cmb(dev->lif_cfg.lif, &qp->sq_cmb_pgid,
1739 &qp->sq_cmb_addr, qp->sq_cmb_order,
1740 expdb_stride_log2, &expdb);
1741 if (rc)
1742 goto not_in_cmb;
1743
1744 if ((qp->sq_cmb & IONIC_CMB_EXPDB) && !expdb) {
1745 if (qp->sq_cmb & IONIC_CMB_REQUIRE)
1746 goto err_map;
1747
1748 qp->sq_cmb &= ~IONIC_CMB_EXPDB;
1749 }
1750
1751 return;
1752
1753 err_map:
1754 ionic_put_cmb(dev->lif_cfg.lif, qp->sq_cmb_pgid, qp->sq_cmb_order);
1755 not_in_cmb:
1756 if (qp->sq_cmb & IONIC_CMB_REQUIRE)
1757 ibdev_dbg(&dev->ibdev, "could not place sq in cmb as required\n");
1758
1759 qp->sq_cmb = 0;
1760 qp->sq_cmb_order = IONIC_RES_INVALID;
1761 qp->sq_cmb_pgid = 0;
1762 qp->sq_cmb_addr = 0;
1763 }
1764
ionic_qp_sq_destroy_cmb(struct ionic_ibdev * dev,struct ionic_ctx * ctx,struct ionic_qp * qp)1765 static void ionic_qp_sq_destroy_cmb(struct ionic_ibdev *dev,
1766 struct ionic_ctx *ctx,
1767 struct ionic_qp *qp)
1768 {
1769 if (!(qp->sq_cmb & IONIC_CMB_ENABLE))
1770 return;
1771
1772 if (ctx)
1773 rdma_user_mmap_entry_remove(qp->mmap_sq_cmb);
1774
1775 ionic_put_cmb(dev->lif_cfg.lif, qp->sq_cmb_pgid, qp->sq_cmb_order);
1776 }
1777
ionic_qp_sq_init(struct ionic_ibdev * dev,struct ionic_ctx * ctx,struct ionic_qp * qp,struct ionic_qdesc * sq,struct ionic_tbl_buf * buf,int max_wr,int max_sge,int max_data,int sq_spec,struct ib_udata * udata)1778 static int ionic_qp_sq_init(struct ionic_ibdev *dev, struct ionic_ctx *ctx,
1779 struct ionic_qp *qp, struct ionic_qdesc *sq,
1780 struct ionic_tbl_buf *buf, int max_wr, int max_sge,
1781 int max_data, int sq_spec, struct ib_udata *udata)
1782 {
1783 u32 wqe_size;
1784 int rc = 0;
1785
1786 qp->sq_msn_prod = 0;
1787 qp->sq_msn_cons = 0;
1788
1789 if (!qp->has_sq) {
1790 if (buf) {
1791 buf->tbl_buf = NULL;
1792 buf->tbl_limit = 0;
1793 buf->tbl_pages = 0;
1794 }
1795 if (udata)
1796 rc = ionic_validate_qdesc_zero(sq);
1797
1798 return rc;
1799 }
1800
1801 rc = -EINVAL;
1802
1803 if (max_wr < 0 || max_wr > 0xffff)
1804 return rc;
1805
1806 if (max_sge < 1)
1807 return rc;
1808
1809 if (max_sge > min(ionic_v1_send_wqe_max_sge(dev->lif_cfg.max_stride, 0,
1810 qp->sq_cmb &
1811 IONIC_CMB_EXPDB),
1812 IONIC_SPEC_HIGH))
1813 return rc;
1814
1815 if (max_data < 0)
1816 return rc;
1817
1818 if (max_data > ionic_v1_send_wqe_max_data(dev->lif_cfg.max_stride,
1819 qp->sq_cmb & IONIC_CMB_EXPDB))
1820 return rc;
1821
1822 if (udata) {
1823 rc = ionic_validate_qdesc(sq);
1824 if (rc)
1825 return rc;
1826
1827 qp->sq_spec = sq_spec;
1828
1829 qp->sq.ptr = NULL;
1830 qp->sq.size = sq->size;
1831 qp->sq.mask = sq->mask;
1832 qp->sq.depth_log2 = sq->depth_log2;
1833 qp->sq.stride_log2 = sq->stride_log2;
1834
1835 qp->sq_meta = NULL;
1836 qp->sq_msn_idx = NULL;
1837
1838 qp->sq_umem = ib_umem_get(&dev->ibdev, sq->addr, sq->size, 0);
1839 if (IS_ERR(qp->sq_umem))
1840 return PTR_ERR(qp->sq_umem);
1841 } else {
1842 qp->sq_umem = NULL;
1843
1844 qp->sq_spec = ionic_v1_use_spec_sge(max_sge, sq_spec);
1845 if (sq_spec && !qp->sq_spec)
1846 ibdev_dbg(&dev->ibdev,
1847 "init sq: max_sge %u disables spec\n",
1848 max_sge);
1849
1850 if (qp->sq_cmb & IONIC_CMB_EXPDB) {
1851 wqe_size = ionic_v1_send_wqe_min_size(max_sge, max_data,
1852 qp->sq_spec,
1853 true);
1854
1855 if (!ionic_expdb_wqe_size_supported(dev, wqe_size))
1856 qp->sq_cmb &= ~IONIC_CMB_EXPDB;
1857 }
1858
1859 if (!(qp->sq_cmb & IONIC_CMB_EXPDB))
1860 wqe_size = ionic_v1_send_wqe_min_size(max_sge, max_data,
1861 qp->sq_spec,
1862 false);
1863
1864 rc = ionic_queue_init(&qp->sq, dev->lif_cfg.hwdev,
1865 max_wr, wqe_size);
1866 if (rc)
1867 return rc;
1868
1869 ionic_queue_dbell_init(&qp->sq, qp->qpid);
1870
1871 qp->sq_meta = kmalloc_objs(*qp->sq_meta, (u32)qp->sq.mask + 1);
1872 if (!qp->sq_meta) {
1873 rc = -ENOMEM;
1874 goto err_sq_meta;
1875 }
1876
1877 qp->sq_msn_idx = kmalloc_array((u32)qp->sq.mask + 1,
1878 sizeof(*qp->sq_msn_idx),
1879 GFP_KERNEL);
1880 if (!qp->sq_msn_idx) {
1881 rc = -ENOMEM;
1882 goto err_sq_msn;
1883 }
1884 }
1885
1886 ionic_qp_sq_init_cmb(dev, qp, udata, max_data);
1887
1888 if (qp->sq_cmb & IONIC_CMB_ENABLE)
1889 rc = ionic_pgtbl_init(dev, buf, NULL,
1890 (u64)qp->sq_cmb_pgid << PAGE_SHIFT,
1891 1, PAGE_SIZE);
1892 else
1893 rc = ionic_pgtbl_init(dev, buf,
1894 qp->sq_umem, qp->sq.dma, 1, PAGE_SIZE);
1895 if (rc)
1896 goto err_sq_tbl;
1897
1898 return 0;
1899
1900 err_sq_tbl:
1901 ionic_qp_sq_destroy_cmb(dev, ctx, qp);
1902 kfree(qp->sq_msn_idx);
1903 err_sq_msn:
1904 kfree(qp->sq_meta);
1905 err_sq_meta:
1906 if (qp->sq_umem)
1907 ib_umem_release(qp->sq_umem);
1908 else
1909 ionic_queue_destroy(&qp->sq, dev->lif_cfg.hwdev);
1910 return rc;
1911 }
1912
ionic_qp_sq_destroy(struct ionic_ibdev * dev,struct ionic_ctx * ctx,struct ionic_qp * qp)1913 static void ionic_qp_sq_destroy(struct ionic_ibdev *dev,
1914 struct ionic_ctx *ctx,
1915 struct ionic_qp *qp)
1916 {
1917 if (!qp->has_sq)
1918 return;
1919
1920 ionic_qp_sq_destroy_cmb(dev, ctx, qp);
1921
1922 kfree(qp->sq_msn_idx);
1923 kfree(qp->sq_meta);
1924
1925 if (qp->sq_umem)
1926 ib_umem_release(qp->sq_umem);
1927 else
1928 ionic_queue_destroy(&qp->sq, dev->lif_cfg.hwdev);
1929 }
1930
ionic_qp_rq_init_cmb(struct ionic_ibdev * dev,struct ionic_qp * qp,struct ib_udata * udata)1931 static void ionic_qp_rq_init_cmb(struct ionic_ibdev *dev,
1932 struct ionic_qp *qp,
1933 struct ib_udata *udata)
1934 {
1935 u8 expdb_stride_log2 = 0;
1936 bool expdb;
1937 int rc;
1938
1939 if (!(qp->rq_cmb & IONIC_CMB_ENABLE))
1940 goto not_in_cmb;
1941
1942 if (qp->rq_cmb & ~IONIC_CMB_SUPPORTED) {
1943 if (qp->rq_cmb & IONIC_CMB_REQUIRE)
1944 goto not_in_cmb;
1945
1946 qp->rq_cmb &= IONIC_CMB_SUPPORTED;
1947 }
1948
1949 if ((qp->rq_cmb & IONIC_CMB_EXPDB) && !dev->lif_cfg.rq_expdb) {
1950 if (qp->rq_cmb & IONIC_CMB_REQUIRE)
1951 goto not_in_cmb;
1952
1953 qp->rq_cmb &= ~IONIC_CMB_EXPDB;
1954 }
1955
1956 qp->rq_cmb_order = order_base_2(qp->rq.size / PAGE_SIZE);
1957
1958 if (qp->rq_cmb_order >= IONIC_RQCMB_ORDER)
1959 goto not_in_cmb;
1960
1961 if (qp->rq_cmb & IONIC_CMB_EXPDB)
1962 expdb_stride_log2 = qp->rq.stride_log2;
1963
1964 rc = ionic_get_cmb(dev->lif_cfg.lif, &qp->rq_cmb_pgid,
1965 &qp->rq_cmb_addr, qp->rq_cmb_order,
1966 expdb_stride_log2, &expdb);
1967 if (rc)
1968 goto not_in_cmb;
1969
1970 if ((qp->rq_cmb & IONIC_CMB_EXPDB) && !expdb) {
1971 if (qp->rq_cmb & IONIC_CMB_REQUIRE)
1972 goto err_map;
1973
1974 qp->rq_cmb &= ~IONIC_CMB_EXPDB;
1975 }
1976
1977 return;
1978
1979 err_map:
1980 ionic_put_cmb(dev->lif_cfg.lif, qp->rq_cmb_pgid, qp->rq_cmb_order);
1981 not_in_cmb:
1982 if (qp->rq_cmb & IONIC_CMB_REQUIRE)
1983 ibdev_dbg(&dev->ibdev, "could not place rq in cmb as required\n");
1984
1985 qp->rq_cmb = 0;
1986 qp->rq_cmb_order = IONIC_RES_INVALID;
1987 qp->rq_cmb_pgid = 0;
1988 qp->rq_cmb_addr = 0;
1989 }
1990
ionic_qp_rq_destroy_cmb(struct ionic_ibdev * dev,struct ionic_ctx * ctx,struct ionic_qp * qp)1991 static void ionic_qp_rq_destroy_cmb(struct ionic_ibdev *dev,
1992 struct ionic_ctx *ctx,
1993 struct ionic_qp *qp)
1994 {
1995 if (!(qp->rq_cmb & IONIC_CMB_ENABLE))
1996 return;
1997
1998 if (ctx)
1999 rdma_user_mmap_entry_remove(qp->mmap_rq_cmb);
2000
2001 ionic_put_cmb(dev->lif_cfg.lif, qp->rq_cmb_pgid, qp->rq_cmb_order);
2002 }
2003
ionic_qp_rq_init(struct ionic_ibdev * dev,struct ionic_ctx * ctx,struct ionic_qp * qp,struct ionic_qdesc * rq,struct ionic_tbl_buf * buf,int max_wr,int max_sge,int rq_spec,struct ib_udata * udata)2004 static int ionic_qp_rq_init(struct ionic_ibdev *dev, struct ionic_ctx *ctx,
2005 struct ionic_qp *qp, struct ionic_qdesc *rq,
2006 struct ionic_tbl_buf *buf, int max_wr, int max_sge,
2007 int rq_spec, struct ib_udata *udata)
2008 {
2009 int rc = 0, i;
2010 u32 wqe_size;
2011
2012 if (!qp->has_rq) {
2013 if (buf) {
2014 buf->tbl_buf = NULL;
2015 buf->tbl_limit = 0;
2016 buf->tbl_pages = 0;
2017 }
2018 if (udata)
2019 rc = ionic_validate_qdesc_zero(rq);
2020
2021 return rc;
2022 }
2023
2024 rc = -EINVAL;
2025
2026 if (max_wr < 0 || max_wr > 0xffff)
2027 return rc;
2028
2029 if (max_sge < 1)
2030 return rc;
2031
2032 if (max_sge > min(ionic_v1_recv_wqe_max_sge(dev->lif_cfg.max_stride, 0, false),
2033 IONIC_SPEC_HIGH))
2034 return rc;
2035
2036 if (udata) {
2037 rc = ionic_validate_qdesc(rq);
2038 if (rc)
2039 return rc;
2040
2041 qp->rq_spec = rq_spec;
2042
2043 qp->rq.ptr = NULL;
2044 qp->rq.size = rq->size;
2045 qp->rq.mask = rq->mask;
2046 qp->rq.depth_log2 = rq->depth_log2;
2047 qp->rq.stride_log2 = rq->stride_log2;
2048
2049 qp->rq_meta = NULL;
2050
2051 qp->rq_umem = ib_umem_get(&dev->ibdev, rq->addr, rq->size, 0);
2052 if (IS_ERR(qp->rq_umem))
2053 return PTR_ERR(qp->rq_umem);
2054 } else {
2055 qp->rq_umem = NULL;
2056
2057 qp->rq_spec = ionic_v1_use_spec_sge(max_sge, rq_spec);
2058 if (rq_spec && !qp->rq_spec)
2059 ibdev_dbg(&dev->ibdev,
2060 "init rq: max_sge %u disables spec\n",
2061 max_sge);
2062
2063 if (qp->rq_cmb & IONIC_CMB_EXPDB) {
2064 wqe_size = ionic_v1_recv_wqe_min_size(max_sge,
2065 qp->rq_spec,
2066 true);
2067
2068 if (!ionic_expdb_wqe_size_supported(dev, wqe_size))
2069 qp->rq_cmb &= ~IONIC_CMB_EXPDB;
2070 }
2071
2072 if (!(qp->rq_cmb & IONIC_CMB_EXPDB))
2073 wqe_size = ionic_v1_recv_wqe_min_size(max_sge,
2074 qp->rq_spec,
2075 false);
2076
2077 rc = ionic_queue_init(&qp->rq, dev->lif_cfg.hwdev,
2078 max_wr, wqe_size);
2079 if (rc)
2080 return rc;
2081
2082 ionic_queue_dbell_init(&qp->rq, qp->qpid);
2083
2084 qp->rq_meta = kmalloc_objs(*qp->rq_meta, (u32)qp->rq.mask + 1);
2085 if (!qp->rq_meta) {
2086 rc = -ENOMEM;
2087 goto err_rq_meta;
2088 }
2089
2090 for (i = 0; i < qp->rq.mask; ++i)
2091 qp->rq_meta[i].next = &qp->rq_meta[i + 1];
2092 qp->rq_meta[i].next = IONIC_META_LAST;
2093 qp->rq_meta_head = &qp->rq_meta[0];
2094 }
2095
2096 ionic_qp_rq_init_cmb(dev, qp, udata);
2097
2098 if (qp->rq_cmb & IONIC_CMB_ENABLE)
2099 rc = ionic_pgtbl_init(dev, buf, NULL,
2100 (u64)qp->rq_cmb_pgid << PAGE_SHIFT,
2101 1, PAGE_SIZE);
2102 else
2103 rc = ionic_pgtbl_init(dev, buf,
2104 qp->rq_umem, qp->rq.dma, 1, PAGE_SIZE);
2105 if (rc)
2106 goto err_rq_tbl;
2107
2108 return 0;
2109
2110 err_rq_tbl:
2111 ionic_qp_rq_destroy_cmb(dev, ctx, qp);
2112 kfree(qp->rq_meta);
2113 err_rq_meta:
2114 if (qp->rq_umem)
2115 ib_umem_release(qp->rq_umem);
2116 else
2117 ionic_queue_destroy(&qp->rq, dev->lif_cfg.hwdev);
2118 return rc;
2119 }
2120
ionic_qp_rq_destroy(struct ionic_ibdev * dev,struct ionic_ctx * ctx,struct ionic_qp * qp)2121 static void ionic_qp_rq_destroy(struct ionic_ibdev *dev,
2122 struct ionic_ctx *ctx,
2123 struct ionic_qp *qp)
2124 {
2125 if (!qp->has_rq)
2126 return;
2127
2128 ionic_qp_rq_destroy_cmb(dev, ctx, qp);
2129
2130 kfree(qp->rq_meta);
2131
2132 if (qp->rq_umem)
2133 ib_umem_release(qp->rq_umem);
2134 else
2135 ionic_queue_destroy(&qp->rq, dev->lif_cfg.hwdev);
2136 }
2137
ionic_create_qp(struct ib_qp * ibqp,struct ib_qp_init_attr * attr,struct ib_udata * udata)2138 int ionic_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
2139 struct ib_udata *udata)
2140 {
2141 struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
2142 struct ionic_tbl_buf sq_buf = {}, rq_buf = {};
2143 struct ionic_pd *pd = to_ionic_pd(ibqp->pd);
2144 struct ionic_qp *qp = to_ionic_qp(ibqp);
2145 struct ionic_ctx *ctx =
2146 rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx);
2147 struct ionic_qp_resp resp = {};
2148 struct ionic_qp_req req = {};
2149 struct ionic_cq *cq;
2150 u8 udma_mask;
2151 void *entry;
2152 int rc;
2153
2154 if (udata) {
2155 rc = ib_copy_from_udata(&req, udata, sizeof(req));
2156 if (rc)
2157 return rc;
2158 } else {
2159 req.sq_spec = IONIC_SPEC_HIGH;
2160 req.rq_spec = IONIC_SPEC_HIGH;
2161 }
2162
2163 if (attr->qp_type == IB_QPT_SMI || attr->qp_type > IB_QPT_UD)
2164 return -EOPNOTSUPP;
2165
2166 qp->state = IB_QPS_RESET;
2167
2168 INIT_LIST_HEAD(&qp->cq_poll_sq);
2169 INIT_LIST_HEAD(&qp->cq_flush_sq);
2170 INIT_LIST_HEAD(&qp->cq_flush_rq);
2171
2172 spin_lock_init(&qp->sq_lock);
2173 spin_lock_init(&qp->rq_lock);
2174
2175 qp->has_sq = 1;
2176 qp->has_rq = 1;
2177
2178 if (attr->qp_type == IB_QPT_GSI) {
2179 rc = ionic_get_gsi_qpid(dev, &qp->qpid);
2180 } else {
2181 udma_mask = BIT(dev->lif_cfg.udma_count) - 1;
2182
2183 if (qp->has_sq)
2184 udma_mask &= to_ionic_vcq(attr->send_cq)->udma_mask;
2185
2186 if (qp->has_rq)
2187 udma_mask &= to_ionic_vcq(attr->recv_cq)->udma_mask;
2188
2189 if (udata && req.udma_mask)
2190 udma_mask &= req.udma_mask;
2191
2192 if (!udma_mask)
2193 return -EINVAL;
2194
2195 rc = ionic_get_qpid(dev, &qp->qpid, &qp->udma_idx, udma_mask);
2196 }
2197 if (rc)
2198 return rc;
2199
2200 qp->sig_all = attr->sq_sig_type == IB_SIGNAL_ALL_WR;
2201 qp->has_ah = attr->qp_type == IB_QPT_RC;
2202
2203 if (qp->has_ah) {
2204 qp->hdr = kzalloc_obj(*qp->hdr);
2205 if (!qp->hdr) {
2206 rc = -ENOMEM;
2207 goto err_ah_alloc;
2208 }
2209
2210 rc = ionic_get_ahid(dev, &qp->ahid);
2211 if (rc)
2212 goto err_ahid;
2213 }
2214
2215 if (udata) {
2216 if (req.rq_cmb & IONIC_CMB_ENABLE)
2217 qp->rq_cmb = req.rq_cmb;
2218
2219 if (req.sq_cmb & IONIC_CMB_ENABLE)
2220 qp->sq_cmb = req.sq_cmb;
2221 }
2222
2223 rc = ionic_qp_sq_init(dev, ctx, qp, &req.sq, &sq_buf,
2224 attr->cap.max_send_wr, attr->cap.max_send_sge,
2225 attr->cap.max_inline_data, req.sq_spec, udata);
2226 if (rc)
2227 goto err_sq;
2228
2229 rc = ionic_qp_rq_init(dev, ctx, qp, &req.rq, &rq_buf,
2230 attr->cap.max_recv_wr, attr->cap.max_recv_sge,
2231 req.rq_spec, udata);
2232 if (rc)
2233 goto err_rq;
2234
2235 rc = ionic_create_qp_cmd(dev, pd,
2236 to_ionic_vcq_cq(attr->send_cq, qp->udma_idx),
2237 to_ionic_vcq_cq(attr->recv_cq, qp->udma_idx),
2238 qp, &sq_buf, &rq_buf, attr);
2239 if (rc)
2240 goto err_cmd;
2241
2242 if (udata) {
2243 resp.qpid = qp->qpid;
2244 resp.udma_idx = qp->udma_idx;
2245
2246 if (qp->sq_cmb & IONIC_CMB_ENABLE) {
2247 bool wc;
2248
2249 if ((qp->sq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC)) ==
2250 (IONIC_CMB_WC | IONIC_CMB_UC)) {
2251 ibdev_dbg(&dev->ibdev,
2252 "Both sq_cmb flags IONIC_CMB_WC and IONIC_CMB_UC are set, using default driver mapping\n");
2253 qp->sq_cmb &= ~(IONIC_CMB_WC | IONIC_CMB_UC);
2254 }
2255
2256 wc = (qp->sq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC))
2257 != IONIC_CMB_UC;
2258
2259 /* let userspace know the mapping */
2260 if (wc)
2261 qp->sq_cmb |= IONIC_CMB_WC;
2262 else
2263 qp->sq_cmb |= IONIC_CMB_UC;
2264
2265 qp->mmap_sq_cmb =
2266 ionic_mmap_entry_insert(ctx,
2267 qp->sq.size,
2268 PHYS_PFN(qp->sq_cmb_addr),
2269 wc ? IONIC_MMAP_WC : 0,
2270 &resp.sq_cmb_offset);
2271 if (!qp->mmap_sq_cmb) {
2272 rc = -ENOMEM;
2273 goto err_mmap_sq;
2274 }
2275
2276 resp.sq_cmb = qp->sq_cmb;
2277 }
2278
2279 if (qp->rq_cmb & IONIC_CMB_ENABLE) {
2280 bool wc;
2281
2282 if ((qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC)) ==
2283 (IONIC_CMB_WC | IONIC_CMB_UC)) {
2284 ibdev_dbg(&dev->ibdev,
2285 "Both rq_cmb flags IONIC_CMB_WC and IONIC_CMB_UC are set, using default driver mapping\n");
2286 qp->rq_cmb &= ~(IONIC_CMB_WC | IONIC_CMB_UC);
2287 }
2288
2289 if (qp->rq_cmb & IONIC_CMB_EXPDB)
2290 wc = (qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC))
2291 == IONIC_CMB_WC;
2292 else
2293 wc = (qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC))
2294 != IONIC_CMB_UC;
2295
2296 /* let userspace know the mapping */
2297 if (wc)
2298 qp->rq_cmb |= IONIC_CMB_WC;
2299 else
2300 qp->rq_cmb |= IONIC_CMB_UC;
2301
2302 qp->mmap_rq_cmb =
2303 ionic_mmap_entry_insert(ctx,
2304 qp->rq.size,
2305 PHYS_PFN(qp->rq_cmb_addr),
2306 wc ? IONIC_MMAP_WC : 0,
2307 &resp.rq_cmb_offset);
2308 if (!qp->mmap_rq_cmb) {
2309 rc = -ENOMEM;
2310 goto err_mmap_rq;
2311 }
2312
2313 resp.rq_cmb = qp->rq_cmb;
2314 }
2315
2316 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2317 if (rc)
2318 goto err_resp;
2319 }
2320
2321 ionic_pgtbl_unbuf(dev, &rq_buf);
2322 ionic_pgtbl_unbuf(dev, &sq_buf);
2323
2324 qp->ibqp.qp_num = qp->qpid;
2325
2326 init_completion(&qp->qp_rel_comp);
2327 kref_init(&qp->qp_kref);
2328
2329 entry = xa_store_irq(&dev->qp_tbl, qp->qpid, qp, GFP_KERNEL);
2330 if (entry) {
2331 if (!xa_is_err(entry))
2332 rc = -EINVAL;
2333 else
2334 rc = xa_err(entry);
2335
2336 goto err_resp;
2337 }
2338
2339 if (qp->has_sq) {
2340 cq = to_ionic_vcq_cq(attr->send_cq, qp->udma_idx);
2341
2342 attr->cap.max_send_wr = qp->sq.mask;
2343 attr->cap.max_send_sge =
2344 ionic_v1_send_wqe_max_sge(qp->sq.stride_log2,
2345 qp->sq_spec,
2346 qp->sq_cmb & IONIC_CMB_EXPDB);
2347 attr->cap.max_inline_data =
2348 ionic_v1_send_wqe_max_data(qp->sq.stride_log2,
2349 qp->sq_cmb &
2350 IONIC_CMB_EXPDB);
2351 qp->sq_cqid = cq->cqid;
2352 }
2353
2354 if (qp->has_rq) {
2355 cq = to_ionic_vcq_cq(attr->recv_cq, qp->udma_idx);
2356
2357 attr->cap.max_recv_wr = qp->rq.mask;
2358 attr->cap.max_recv_sge =
2359 ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2,
2360 qp->rq_spec,
2361 qp->rq_cmb & IONIC_CMB_EXPDB);
2362 qp->rq_cqid = cq->cqid;
2363 }
2364
2365 return 0;
2366
2367 err_resp:
2368 if (udata && (qp->rq_cmb & IONIC_CMB_ENABLE))
2369 rdma_user_mmap_entry_remove(qp->mmap_rq_cmb);
2370 err_mmap_rq:
2371 if (udata && (qp->sq_cmb & IONIC_CMB_ENABLE))
2372 rdma_user_mmap_entry_remove(qp->mmap_sq_cmb);
2373 err_mmap_sq:
2374 ionic_destroy_qp_cmd(dev, qp->qpid);
2375 err_cmd:
2376 ionic_pgtbl_unbuf(dev, &rq_buf);
2377 ionic_qp_rq_destroy(dev, ctx, qp);
2378 err_rq:
2379 ionic_pgtbl_unbuf(dev, &sq_buf);
2380 ionic_qp_sq_destroy(dev, ctx, qp);
2381 err_sq:
2382 if (qp->has_ah)
2383 ionic_put_ahid(dev, qp->ahid);
2384 err_ahid:
2385 kfree(qp->hdr);
2386 err_ah_alloc:
2387 ionic_put_qpid(dev, qp->qpid);
2388 return rc;
2389 }
2390
ionic_notify_flush_cq(struct ionic_cq * cq)2391 void ionic_notify_flush_cq(struct ionic_cq *cq)
2392 {
2393 if (cq->flush && cq->vcq->ibcq.comp_handler)
2394 cq->vcq->ibcq.comp_handler(&cq->vcq->ibcq,
2395 cq->vcq->ibcq.cq_context);
2396 }
2397
ionic_notify_qp_cqs(struct ionic_ibdev * dev,struct ionic_qp * qp)2398 static void ionic_notify_qp_cqs(struct ionic_ibdev *dev, struct ionic_qp *qp)
2399 {
2400 if (qp->ibqp.send_cq)
2401 ionic_notify_flush_cq(to_ionic_vcq_cq(qp->ibqp.send_cq,
2402 qp->udma_idx));
2403 if (qp->ibqp.recv_cq && qp->ibqp.recv_cq != qp->ibqp.send_cq)
2404 ionic_notify_flush_cq(to_ionic_vcq_cq(qp->ibqp.recv_cq,
2405 qp->udma_idx));
2406 }
2407
ionic_flush_qp(struct ionic_ibdev * dev,struct ionic_qp * qp)2408 void ionic_flush_qp(struct ionic_ibdev *dev, struct ionic_qp *qp)
2409 {
2410 unsigned long irqflags;
2411 struct ionic_cq *cq;
2412
2413 if (qp->ibqp.send_cq) {
2414 cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx);
2415
2416 /* Hold the CQ lock and QP sq_lock to set up flush */
2417 spin_lock_irqsave(&cq->lock, irqflags);
2418 spin_lock(&qp->sq_lock);
2419 qp->sq_flush = true;
2420 if (!ionic_queue_empty(&qp->sq)) {
2421 cq->flush = true;
2422 list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
2423 }
2424 spin_unlock(&qp->sq_lock);
2425 spin_unlock_irqrestore(&cq->lock, irqflags);
2426 }
2427
2428 if (qp->ibqp.recv_cq) {
2429 cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx);
2430
2431 /* Hold the CQ lock and QP rq_lock to set up flush */
2432 spin_lock_irqsave(&cq->lock, irqflags);
2433 spin_lock(&qp->rq_lock);
2434 qp->rq_flush = true;
2435 if (!ionic_queue_empty(&qp->rq)) {
2436 cq->flush = true;
2437 list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
2438 }
2439 spin_unlock(&qp->rq_lock);
2440 spin_unlock_irqrestore(&cq->lock, irqflags);
2441 }
2442 }
2443
ionic_clean_cq(struct ionic_cq * cq,u32 qpid)2444 static void ionic_clean_cq(struct ionic_cq *cq, u32 qpid)
2445 {
2446 struct ionic_v1_cqe *qcqe;
2447 int prod, qtf, qid, type;
2448 bool color;
2449
2450 if (!cq->q.ptr)
2451 return;
2452
2453 color = cq->color;
2454 prod = cq->q.prod;
2455 qcqe = ionic_queue_at(&cq->q, prod);
2456
2457 while (color == ionic_v1_cqe_color(qcqe)) {
2458 qtf = ionic_v1_cqe_qtf(qcqe);
2459 qid = ionic_v1_cqe_qtf_qid(qtf);
2460 type = ionic_v1_cqe_qtf_type(qtf);
2461
2462 if (qid == qpid && type != IONIC_V1_CQE_TYPE_ADMIN)
2463 ionic_v1_cqe_clean(qcqe);
2464
2465 prod = ionic_queue_next(&cq->q, prod);
2466 qcqe = ionic_queue_at(&cq->q, prod);
2467 color = ionic_color_wrap(prod, color);
2468 }
2469 }
2470
ionic_reset_qp(struct ionic_ibdev * dev,struct ionic_qp * qp)2471 static void ionic_reset_qp(struct ionic_ibdev *dev, struct ionic_qp *qp)
2472 {
2473 unsigned long irqflags;
2474 struct ionic_cq *cq;
2475 int i;
2476
2477 local_irq_save(irqflags);
2478
2479 if (qp->ibqp.send_cq) {
2480 cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx);
2481 spin_lock(&cq->lock);
2482 ionic_clean_cq(cq, qp->qpid);
2483 spin_unlock(&cq->lock);
2484 }
2485
2486 if (qp->ibqp.recv_cq) {
2487 cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx);
2488 spin_lock(&cq->lock);
2489 ionic_clean_cq(cq, qp->qpid);
2490 spin_unlock(&cq->lock);
2491 }
2492
2493 if (qp->has_sq) {
2494 spin_lock(&qp->sq_lock);
2495 qp->sq_flush = false;
2496 qp->sq_flush_rcvd = false;
2497 qp->sq_msn_prod = 0;
2498 qp->sq_msn_cons = 0;
2499 qp->sq.prod = 0;
2500 qp->sq.cons = 0;
2501 spin_unlock(&qp->sq_lock);
2502 }
2503
2504 if (qp->has_rq) {
2505 spin_lock(&qp->rq_lock);
2506 qp->rq_flush = false;
2507 qp->rq.prod = 0;
2508 qp->rq.cons = 0;
2509 if (qp->rq_meta) {
2510 for (i = 0; i < qp->rq.mask; ++i)
2511 qp->rq_meta[i].next = &qp->rq_meta[i + 1];
2512 qp->rq_meta[i].next = IONIC_META_LAST;
2513 }
2514 qp->rq_meta_head = &qp->rq_meta[0];
2515 spin_unlock(&qp->rq_lock);
2516 }
2517
2518 local_irq_restore(irqflags);
2519 }
2520
ionic_qp_cur_state_is_ok(enum ib_qp_state q_state,enum ib_qp_state attr_state)2521 static bool ionic_qp_cur_state_is_ok(enum ib_qp_state q_state,
2522 enum ib_qp_state attr_state)
2523 {
2524 if (q_state == attr_state)
2525 return true;
2526
2527 if (attr_state == IB_QPS_ERR)
2528 return true;
2529
2530 if (attr_state == IB_QPS_SQE)
2531 return q_state == IB_QPS_RTS || q_state == IB_QPS_SQD;
2532
2533 return false;
2534 }
2535
ionic_check_modify_qp(struct ionic_qp * qp,struct ib_qp_attr * attr,int mask)2536 static int ionic_check_modify_qp(struct ionic_qp *qp, struct ib_qp_attr *attr,
2537 int mask)
2538 {
2539 enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
2540 attr->cur_qp_state : qp->state;
2541 enum ib_qp_state next_state = (mask & IB_QP_STATE) ?
2542 attr->qp_state : cur_state;
2543
2544 if ((mask & IB_QP_CUR_STATE) &&
2545 !ionic_qp_cur_state_is_ok(qp->state, attr->cur_qp_state))
2546 return -EINVAL;
2547
2548 if (!ib_modify_qp_is_ok(cur_state, next_state, qp->ibqp.qp_type, mask))
2549 return -EINVAL;
2550
2551 /* unprivileged qp not allowed privileged qkey */
2552 if ((mask & IB_QP_QKEY) && (attr->qkey & 0x80000000) &&
2553 qp->ibqp.uobject)
2554 return -EPERM;
2555
2556 return 0;
2557 }
2558
ionic_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int mask,struct ib_udata * udata)2559 int ionic_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
2560 struct ib_udata *udata)
2561 {
2562 struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
2563 struct ionic_pd *pd = to_ionic_pd(ibqp->pd);
2564 struct ionic_qp *qp = to_ionic_qp(ibqp);
2565 int rc;
2566
2567 rc = ionic_check_modify_qp(qp, attr, mask);
2568 if (rc)
2569 return rc;
2570
2571 if (mask & IB_QP_CAP)
2572 return -EINVAL;
2573
2574 rc = ionic_modify_qp_cmd(dev, pd, qp, attr, mask);
2575 if (rc)
2576 return rc;
2577
2578 if (mask & IB_QP_STATE) {
2579 qp->state = attr->qp_state;
2580
2581 if (attr->qp_state == IB_QPS_ERR) {
2582 ionic_flush_qp(dev, qp);
2583 ionic_notify_qp_cqs(dev, qp);
2584 } else if (attr->qp_state == IB_QPS_RESET) {
2585 ionic_reset_qp(dev, qp);
2586 }
2587 }
2588
2589 return 0;
2590 }
2591
ionic_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int mask,struct ib_qp_init_attr * init_attr)2592 int ionic_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2593 int mask, struct ib_qp_init_attr *init_attr)
2594 {
2595 struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
2596 struct ionic_qp *qp = to_ionic_qp(ibqp);
2597 int rc;
2598
2599 memset(attr, 0, sizeof(*attr));
2600 memset(init_attr, 0, sizeof(*init_attr));
2601
2602 rc = ionic_query_qp_cmd(dev, qp, attr, mask);
2603 if (rc)
2604 return rc;
2605
2606 if (qp->has_sq)
2607 attr->cap.max_send_wr = qp->sq.mask;
2608
2609 if (qp->has_rq)
2610 attr->cap.max_recv_wr = qp->rq.mask;
2611
2612 init_attr->event_handler = ibqp->event_handler;
2613 init_attr->qp_context = ibqp->qp_context;
2614 init_attr->send_cq = ibqp->send_cq;
2615 init_attr->recv_cq = ibqp->recv_cq;
2616 init_attr->srq = ibqp->srq;
2617 init_attr->xrcd = ibqp->xrcd;
2618 init_attr->cap = attr->cap;
2619 init_attr->sq_sig_type = qp->sig_all ?
2620 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
2621 init_attr->qp_type = ibqp->qp_type;
2622 init_attr->create_flags = 0;
2623 init_attr->port_num = 0;
2624 init_attr->rwq_ind_tbl = ibqp->rwq_ind_tbl;
2625 init_attr->source_qpn = 0;
2626
2627 return rc;
2628 }
2629
ionic_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)2630 int ionic_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
2631 {
2632 struct ionic_ctx *ctx =
2633 rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx);
2634 struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
2635 struct ionic_qp *qp = to_ionic_qp(ibqp);
2636 unsigned long irqflags;
2637 struct ionic_cq *cq;
2638 int rc;
2639
2640 rc = ionic_destroy_qp_cmd(dev, qp->qpid);
2641 if (rc)
2642 return rc;
2643
2644 xa_erase_irq(&dev->qp_tbl, qp->qpid);
2645
2646 kref_put(&qp->qp_kref, ionic_qp_complete);
2647 wait_for_completion(&qp->qp_rel_comp);
2648
2649 if (qp->ibqp.send_cq) {
2650 cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx);
2651 spin_lock_irqsave(&cq->lock, irqflags);
2652 ionic_clean_cq(cq, qp->qpid);
2653 list_del(&qp->cq_poll_sq);
2654 list_del(&qp->cq_flush_sq);
2655 spin_unlock_irqrestore(&cq->lock, irqflags);
2656 }
2657
2658 if (qp->ibqp.recv_cq) {
2659 cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx);
2660 spin_lock_irqsave(&cq->lock, irqflags);
2661 ionic_clean_cq(cq, qp->qpid);
2662 list_del(&qp->cq_flush_rq);
2663 spin_unlock_irqrestore(&cq->lock, irqflags);
2664 }
2665
2666 ionic_qp_rq_destroy(dev, ctx, qp);
2667 ionic_qp_sq_destroy(dev, ctx, qp);
2668 if (qp->has_ah) {
2669 ionic_put_ahid(dev, qp->ahid);
2670 kfree(qp->hdr);
2671 }
2672 ionic_put_qpid(dev, qp->qpid);
2673
2674 return 0;
2675 }
2676