1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
5
6 #include "mana_ib.h"
7 #include "linux/pci.h"
8
mana_ib_uncfg_vport(struct mana_ib_dev * dev,struct mana_ib_pd * pd,u32 port)9 void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
10 u32 port)
11 {
12 struct mana_port_context *mpc;
13 struct net_device *ndev;
14
15 ndev = mana_ib_get_netdev(&dev->ib_dev, port);
16 mpc = netdev_priv(ndev);
17
18 mutex_lock(&pd->vport_mutex);
19
20 pd->vport_use_count--;
21 WARN_ON(pd->vport_use_count < 0);
22
23 if (!pd->vport_use_count)
24 mana_uncfg_vport(mpc);
25
26 mutex_unlock(&pd->vport_mutex);
27 }
28
mana_ib_cfg_vport(struct mana_ib_dev * dev,u32 port,struct mana_ib_pd * pd,u32 doorbell_id)29 int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd,
30 u32 doorbell_id)
31 {
32 struct mana_port_context *mpc;
33 struct net_device *ndev;
34 int err;
35
36 ndev = mana_ib_get_netdev(&dev->ib_dev, port);
37 mpc = netdev_priv(ndev);
38
39 mutex_lock(&pd->vport_mutex);
40
41 pd->vport_use_count++;
42 if (pd->vport_use_count > 1) {
43 ibdev_dbg(&dev->ib_dev,
44 "Skip as this PD is already configured vport\n");
45 mutex_unlock(&pd->vport_mutex);
46 return 0;
47 }
48
49 err = mana_cfg_vport(mpc, pd->pdn, doorbell_id);
50 if (err) {
51 pd->vport_use_count--;
52 mutex_unlock(&pd->vport_mutex);
53
54 ibdev_dbg(&dev->ib_dev, "Failed to configure vPort %d\n", err);
55 return err;
56 }
57
58 mutex_unlock(&pd->vport_mutex);
59
60 pd->tx_shortform_allowed = mpc->tx_shortform_allowed;
61 pd->tx_vp_offset = mpc->tx_vp_offset;
62
63 ibdev_dbg(&dev->ib_dev, "vport handle %llx pdid %x doorbell_id %x\n",
64 mpc->port_handle, pd->pdn, doorbell_id);
65
66 return 0;
67 }
68
mana_ib_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)69 int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
70 {
71 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
72 struct ib_device *ibdev = ibpd->device;
73 struct gdma_create_pd_resp resp = {};
74 struct gdma_create_pd_req req = {};
75 enum gdma_pd_flags flags = 0;
76 struct mana_ib_dev *dev;
77 struct gdma_context *gc;
78 int err;
79
80 dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
81 gc = mdev_to_gc(dev);
82
83 mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req),
84 sizeof(resp));
85
86 if (!udata)
87 flags |= GDMA_PD_FLAG_ALLOW_GPA_MR;
88
89 req.flags = flags;
90 err = mana_gd_send_request(gc, sizeof(req), &req,
91 sizeof(resp), &resp);
92
93 if (err || resp.hdr.status) {
94 ibdev_dbg(&dev->ib_dev,
95 "Failed to get pd_id err %d status %u\n", err,
96 resp.hdr.status);
97 if (!err)
98 err = -EPROTO;
99
100 return err;
101 }
102
103 pd->pd_handle = resp.pd_handle;
104 pd->pdn = resp.pd_id;
105 ibdev_dbg(&dev->ib_dev, "pd_handle 0x%llx pd_id %d\n",
106 pd->pd_handle, pd->pdn);
107
108 mutex_init(&pd->vport_mutex);
109 pd->vport_use_count = 0;
110 return 0;
111 }
112
mana_ib_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)113 int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
114 {
115 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
116 struct ib_device *ibdev = ibpd->device;
117 struct gdma_destory_pd_resp resp = {};
118 struct gdma_destroy_pd_req req = {};
119 struct mana_ib_dev *dev;
120 struct gdma_context *gc;
121 int err;
122
123 dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
124 gc = mdev_to_gc(dev);
125
126 mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req),
127 sizeof(resp));
128
129 req.pd_handle = pd->pd_handle;
130 err = mana_gd_send_request(gc, sizeof(req), &req,
131 sizeof(resp), &resp);
132
133 if (err || resp.hdr.status) {
134 ibdev_dbg(&dev->ib_dev,
135 "Failed to destroy pd_handle 0x%llx err %d status %u",
136 pd->pd_handle, err, resp.hdr.status);
137 if (!err)
138 err = -EPROTO;
139 }
140
141 return err;
142 }
143
mana_gd_destroy_doorbell_page(struct gdma_context * gc,int doorbell_page)144 static int mana_gd_destroy_doorbell_page(struct gdma_context *gc,
145 int doorbell_page)
146 {
147 struct gdma_destroy_resource_range_req req = {};
148 struct gdma_resp_hdr resp = {};
149 int err;
150
151 mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_RESOURCE_RANGE,
152 sizeof(req), sizeof(resp));
153
154 req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
155 req.num_resources = 1;
156 req.allocated_resources = doorbell_page;
157
158 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
159 if (err || resp.status) {
160 dev_err(gc->dev,
161 "Failed to destroy doorbell page: ret %d, 0x%x\n",
162 err, resp.status);
163 return err ?: -EPROTO;
164 }
165
166 return 0;
167 }
168
mana_gd_allocate_doorbell_page(struct gdma_context * gc,int * doorbell_page)169 static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
170 int *doorbell_page)
171 {
172 struct gdma_allocate_resource_range_req req = {};
173 struct gdma_allocate_resource_range_resp resp = {};
174 int err;
175
176 mana_gd_init_req_hdr(&req.hdr, GDMA_ALLOCATE_RESOURCE_RANGE,
177 sizeof(req), sizeof(resp));
178
179 req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
180 req.num_resources = 1;
181 req.alignment = PAGE_SIZE / MANA_PAGE_SIZE;
182
183 /* Have GDMA start searching from 0 */
184 req.allocated_resources = 0;
185
186 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
187 if (err || resp.hdr.status) {
188 dev_err(gc->dev,
189 "Failed to allocate doorbell page: ret %d, 0x%x\n",
190 err, resp.hdr.status);
191 return err ?: -EPROTO;
192 }
193
194 *doorbell_page = resp.allocated_resources;
195
196 return 0;
197 }
198
mana_ib_alloc_ucontext(struct ib_ucontext * ibcontext,struct ib_udata * udata)199 int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext,
200 struct ib_udata *udata)
201 {
202 struct mana_ib_ucontext *ucontext =
203 container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
204 struct ib_device *ibdev = ibcontext->device;
205 struct mana_ib_dev *mdev;
206 struct gdma_context *gc;
207 int doorbell_page;
208 int ret;
209
210 mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
211 gc = mdev_to_gc(mdev);
212
213 /* Allocate a doorbell page index */
214 ret = mana_gd_allocate_doorbell_page(gc, &doorbell_page);
215 if (ret) {
216 ibdev_dbg(ibdev, "Failed to allocate doorbell page %d\n", ret);
217 return ret;
218 }
219
220 ibdev_dbg(ibdev, "Doorbell page allocated %d\n", doorbell_page);
221
222 ucontext->doorbell = doorbell_page;
223
224 return 0;
225 }
226
mana_ib_dealloc_ucontext(struct ib_ucontext * ibcontext)227 void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
228 {
229 struct mana_ib_ucontext *mana_ucontext =
230 container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
231 struct ib_device *ibdev = ibcontext->device;
232 struct mana_ib_dev *mdev;
233 struct gdma_context *gc;
234 int ret;
235
236 mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
237 gc = mdev_to_gc(mdev);
238
239 ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext->doorbell);
240 if (ret)
241 ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
242 }
243
mana_ib_create_kernel_queue(struct mana_ib_dev * mdev,u32 size,enum gdma_queue_type type,struct mana_ib_queue * queue)244 int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
245 struct mana_ib_queue *queue)
246 {
247 struct gdma_queue_spec spec = {};
248 int err;
249
250 queue->id = INVALID_QUEUE_ID;
251 queue->gdma_region = GDMA_INVALID_DMA_REGION;
252 spec.type = type;
253 spec.monitor_avl_buf = false;
254 spec.queue_size = size;
255 err = mana_gd_create_mana_wq_cq(mdev->gdma_dev, &spec, &queue->kmem);
256 if (err)
257 return err;
258 /* take ownership into mana_ib from mana */
259 queue->gdma_region = queue->kmem->mem_info.dma_region_handle;
260 queue->kmem->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
261 return 0;
262 }
263
mana_ib_create_queue(struct mana_ib_dev * mdev,u64 addr,u32 size,struct mana_ib_queue * queue)264 int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
265 struct mana_ib_queue *queue)
266 {
267 struct ib_umem *umem;
268 int err;
269
270 queue->umem = NULL;
271 queue->id = INVALID_QUEUE_ID;
272 queue->gdma_region = GDMA_INVALID_DMA_REGION;
273
274 umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
275 if (IS_ERR(umem)) {
276 err = PTR_ERR(umem);
277 ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
278 return err;
279 }
280
281 err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
282 if (err) {
283 ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err);
284 goto free_umem;
285 }
286 queue->umem = umem;
287
288 ibdev_dbg(&mdev->ib_dev, "created dma region 0x%llx\n", queue->gdma_region);
289
290 return 0;
291 free_umem:
292 ib_umem_release(umem);
293 return err;
294 }
295
mana_ib_destroy_queue(struct mana_ib_dev * mdev,struct mana_ib_queue * queue)296 void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue)
297 {
298 /* Ignore return code as there is not much we can do about it.
299 * The error message is printed inside.
300 */
301 mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
302 ib_umem_release(queue->umem);
303 if (queue->kmem)
304 mana_gd_destroy_queue(mdev_to_gc(mdev), queue->kmem);
305 }
306
307 static int
mana_ib_gd_first_dma_region(struct mana_ib_dev * dev,struct gdma_context * gc,struct gdma_create_dma_region_req * create_req,size_t num_pages,mana_handle_t * gdma_region,u32 expected_status)308 mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
309 struct gdma_context *gc,
310 struct gdma_create_dma_region_req *create_req,
311 size_t num_pages, mana_handle_t *gdma_region,
312 u32 expected_status)
313 {
314 struct gdma_create_dma_region_resp create_resp = {};
315 unsigned int create_req_msg_size;
316 int err;
317
318 create_req_msg_size =
319 struct_size(create_req, page_addr_list, num_pages);
320 create_req->page_addr_list_len = num_pages;
321
322 err = mana_gd_send_request(gc, create_req_msg_size, create_req,
323 sizeof(create_resp), &create_resp);
324 if (err || create_resp.hdr.status != expected_status) {
325 ibdev_dbg(&dev->ib_dev,
326 "Failed to create DMA region: %d, 0x%x\n",
327 err, create_resp.hdr.status);
328 if (!err)
329 err = -EPROTO;
330
331 return err;
332 }
333
334 *gdma_region = create_resp.dma_region_handle;
335 ibdev_dbg(&dev->ib_dev, "Created DMA region handle 0x%llx\n",
336 *gdma_region);
337
338 return 0;
339 }
340
341 static int
mana_ib_gd_add_dma_region(struct mana_ib_dev * dev,struct gdma_context * gc,struct gdma_dma_region_add_pages_req * add_req,unsigned int num_pages,u32 expected_status)342 mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc,
343 struct gdma_dma_region_add_pages_req *add_req,
344 unsigned int num_pages, u32 expected_status)
345 {
346 unsigned int add_req_msg_size =
347 struct_size(add_req, page_addr_list, num_pages);
348 struct gdma_general_resp add_resp = {};
349 int err;
350
351 mana_gd_init_req_hdr(&add_req->hdr, GDMA_DMA_REGION_ADD_PAGES,
352 add_req_msg_size, sizeof(add_resp));
353 add_req->page_addr_list_len = num_pages;
354
355 err = mana_gd_send_request(gc, add_req_msg_size, add_req,
356 sizeof(add_resp), &add_resp);
357 if (err || add_resp.hdr.status != expected_status) {
358 ibdev_dbg(&dev->ib_dev,
359 "Failed to create DMA region: %d, 0x%x\n",
360 err, add_resp.hdr.status);
361
362 if (!err)
363 err = -EPROTO;
364
365 return err;
366 }
367
368 return 0;
369 }
370
mana_ib_gd_create_dma_region(struct mana_ib_dev * dev,struct ib_umem * umem,mana_handle_t * gdma_region,unsigned long page_sz)371 static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
372 mana_handle_t *gdma_region, unsigned long page_sz)
373 {
374 struct gdma_dma_region_add_pages_req *add_req = NULL;
375 size_t num_pages_processed = 0, num_pages_to_handle;
376 struct gdma_create_dma_region_req *create_req;
377 unsigned int create_req_msg_size;
378 struct hw_channel_context *hwc;
379 struct ib_block_iter biter;
380 size_t max_pgs_add_cmd = 0;
381 size_t max_pgs_create_cmd;
382 struct gdma_context *gc;
383 size_t num_pages_total;
384 unsigned int tail = 0;
385 u64 *page_addr_list;
386 void *request_buf;
387 int err = 0;
388
389 gc = mdev_to_gc(dev);
390 hwc = gc->hwc.driver_data;
391
392 num_pages_total = ib_umem_num_dma_blocks(umem, page_sz);
393
394 max_pgs_create_cmd =
395 (hwc->max_req_msg_size - sizeof(*create_req)) / sizeof(u64);
396 num_pages_to_handle =
397 min_t(size_t, num_pages_total, max_pgs_create_cmd);
398 create_req_msg_size =
399 struct_size(create_req, page_addr_list, num_pages_to_handle);
400
401 request_buf = kzalloc(hwc->max_req_msg_size, GFP_KERNEL);
402 if (!request_buf)
403 return -ENOMEM;
404
405 create_req = request_buf;
406 mana_gd_init_req_hdr(&create_req->hdr, GDMA_CREATE_DMA_REGION,
407 create_req_msg_size,
408 sizeof(struct gdma_create_dma_region_resp));
409
410 create_req->length = umem->length;
411 create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz);
412 create_req->gdma_page_type = order_base_2(page_sz) - MANA_PAGE_SHIFT;
413 create_req->page_count = num_pages_total;
414
415 ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n",
416 umem->length, num_pages_total);
417
418 ibdev_dbg(&dev->ib_dev, "page_sz %lu offset_in_page %u\n",
419 page_sz, create_req->offset_in_page);
420
421 ibdev_dbg(&dev->ib_dev, "num_pages_to_handle %lu, gdma_page_type %u",
422 num_pages_to_handle, create_req->gdma_page_type);
423
424 page_addr_list = create_req->page_addr_list;
425 rdma_umem_for_each_dma_block(umem, &biter, page_sz) {
426 u32 expected_status = 0;
427
428 page_addr_list[tail++] = rdma_block_iter_dma_address(&biter);
429 if (tail < num_pages_to_handle)
430 continue;
431
432 if (num_pages_processed + num_pages_to_handle <
433 num_pages_total)
434 expected_status = GDMA_STATUS_MORE_ENTRIES;
435
436 if (!num_pages_processed) {
437 /* First create message */
438 err = mana_ib_gd_first_dma_region(dev, gc, create_req,
439 tail, gdma_region,
440 expected_status);
441 if (err)
442 goto out;
443
444 max_pgs_add_cmd = (hwc->max_req_msg_size -
445 sizeof(*add_req)) / sizeof(u64);
446
447 add_req = request_buf;
448 add_req->dma_region_handle = *gdma_region;
449 add_req->reserved3 = 0;
450 page_addr_list = add_req->page_addr_list;
451 } else {
452 /* Subsequent create messages */
453 err = mana_ib_gd_add_dma_region(dev, gc, add_req, tail,
454 expected_status);
455 if (err)
456 break;
457 }
458
459 num_pages_processed += tail;
460 tail = 0;
461
462 /* The remaining pages to create */
463 num_pages_to_handle =
464 min_t(size_t,
465 num_pages_total - num_pages_processed,
466 max_pgs_add_cmd);
467 }
468
469 if (err)
470 mana_ib_gd_destroy_dma_region(dev, *gdma_region);
471
472 out:
473 kfree(request_buf);
474 return err;
475 }
476
mana_ib_create_dma_region(struct mana_ib_dev * dev,struct ib_umem * umem,mana_handle_t * gdma_region,u64 virt)477 int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
478 mana_handle_t *gdma_region, u64 virt)
479 {
480 unsigned long page_sz;
481
482 page_sz = ib_umem_find_best_pgsz(umem, dev->adapter_caps.page_size_cap, virt);
483 if (!page_sz) {
484 ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
485 return -EINVAL;
486 }
487
488 return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
489 }
490
mana_ib_create_zero_offset_dma_region(struct mana_ib_dev * dev,struct ib_umem * umem,mana_handle_t * gdma_region)491 int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
492 mana_handle_t *gdma_region)
493 {
494 unsigned long page_sz;
495
496 /* Hardware requires dma region to align to chosen page size */
497 page_sz = ib_umem_find_best_pgoff(umem, dev->adapter_caps.page_size_cap, 0);
498 if (!page_sz) {
499 ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
500 return -EINVAL;
501 }
502
503 return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
504 }
505
mana_ib_gd_destroy_dma_region(struct mana_ib_dev * dev,u64 gdma_region)506 int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64 gdma_region)
507 {
508 struct gdma_context *gc = mdev_to_gc(dev);
509
510 ibdev_dbg(&dev->ib_dev, "destroy dma region 0x%llx\n", gdma_region);
511
512 return mana_gd_destroy_dma_region(gc, gdma_region);
513 }
514
mana_ib_mmap(struct ib_ucontext * ibcontext,struct vm_area_struct * vma)515 int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
516 {
517 struct mana_ib_ucontext *mana_ucontext =
518 container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
519 struct ib_device *ibdev = ibcontext->device;
520 struct mana_ib_dev *mdev;
521 struct gdma_context *gc;
522 phys_addr_t pfn;
523 pgprot_t prot;
524 int ret;
525
526 mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
527 gc = mdev_to_gc(mdev);
528
529 if (vma->vm_pgoff != 0) {
530 ibdev_dbg(ibdev, "Unexpected vm_pgoff %lu\n", vma->vm_pgoff);
531 return -EINVAL;
532 }
533
534 /* Map to the page indexed by ucontext->doorbell */
535 pfn = (gc->phys_db_page_base +
536 gc->db_page_size * mana_ucontext->doorbell) >>
537 PAGE_SHIFT;
538 prot = pgprot_writecombine(vma->vm_page_prot);
539
540 ret = rdma_user_mmap_io(ibcontext, vma, pfn, PAGE_SIZE, prot,
541 NULL);
542 if (ret)
543 ibdev_dbg(ibdev, "can't rdma_user_mmap_io ret %d\n", ret);
544 else
545 ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %lu, ret %d\n",
546 pfn, PAGE_SIZE, ret);
547
548 return ret;
549 }
550
mana_ib_get_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)551 int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
552 struct ib_port_immutable *immutable)
553 {
554 struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
555 struct ib_port_attr attr;
556 int err;
557
558 err = ib_query_port(ibdev, port_num, &attr);
559 if (err)
560 return err;
561
562 immutable->pkey_tbl_len = attr.pkey_tbl_len;
563 immutable->gid_tbl_len = attr.gid_tbl_len;
564
565 if (mana_ib_is_rnic(dev)) {
566 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
567 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
568 } else {
569 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
570 }
571
572 return 0;
573 }
574
mana_ib_query_device(struct ib_device * ibdev,struct ib_device_attr * props,struct ib_udata * uhw)575 int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
576 struct ib_udata *uhw)
577 {
578 struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
579 struct pci_dev *pdev = to_pci_dev(mdev_to_gc(dev)->dev);
580
581 memset(props, 0, sizeof(*props));
582 props->vendor_id = pdev->vendor;
583 props->vendor_part_id = dev->gdma_dev->dev_id.type;
584 props->max_mr_size = MANA_IB_MAX_MR_SIZE;
585 props->page_size_cap = dev->adapter_caps.page_size_cap;
586 props->max_qp = dev->adapter_caps.max_qp_count;
587 props->max_qp_wr = dev->adapter_caps.max_qp_wr;
588 props->device_cap_flags = IB_DEVICE_RC_RNR_NAK_GEN;
589 props->max_send_sge = dev->adapter_caps.max_send_sge_count;
590 props->max_recv_sge = dev->adapter_caps.max_recv_sge_count;
591 props->max_sge_rd = dev->adapter_caps.max_recv_sge_count;
592 props->max_cq = dev->adapter_caps.max_cq_count;
593 props->max_cqe = dev->adapter_caps.max_qp_wr;
594 props->max_mr = dev->adapter_caps.max_mr_count;
595 props->max_pd = dev->adapter_caps.max_pd_count;
596 props->max_qp_rd_atom = dev->adapter_caps.max_inbound_read_limit;
597 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
598 props->max_qp_init_rd_atom = dev->adapter_caps.max_outbound_read_limit;
599 props->atomic_cap = IB_ATOMIC_NONE;
600 props->masked_atomic_cap = IB_ATOMIC_NONE;
601 props->max_ah = INT_MAX;
602 props->max_pkeys = 1;
603 props->local_ca_ack_delay = MANA_CA_ACK_DELAY;
604 if (!mana_ib_is_rnic(dev))
605 props->raw_packet_caps = IB_RAW_PACKET_CAP_IP_CSUM;
606
607 return 0;
608 }
609
mana_ib_query_port(struct ib_device * ibdev,u32 port,struct ib_port_attr * props)610 int mana_ib_query_port(struct ib_device *ibdev, u32 port,
611 struct ib_port_attr *props)
612 {
613 struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
614 struct net_device *ndev = mana_ib_get_netdev(ibdev, port);
615
616 if (!ndev)
617 return -EINVAL;
618
619 memset(props, 0, sizeof(*props));
620 props->max_mtu = IB_MTU_4096;
621 props->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
622
623 if (netif_carrier_ok(ndev) && netif_running(ndev)) {
624 props->state = IB_PORT_ACTIVE;
625 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
626 } else {
627 props->state = IB_PORT_DOWN;
628 props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
629 }
630
631 props->active_width = IB_WIDTH_4X;
632 props->active_speed = IB_SPEED_EDR;
633 props->pkey_tbl_len = 1;
634 if (mana_ib_is_rnic(dev)) {
635 props->gid_tbl_len = 16;
636 props->port_cap_flags = IB_PORT_CM_SUP;
637 props->ip_gids = true;
638 }
639
640 return 0;
641 }
642
mana_ib_get_link_layer(struct ib_device * device,u32 port_num)643 enum rdma_link_layer mana_ib_get_link_layer(struct ib_device *device, u32 port_num)
644 {
645 return IB_LINK_LAYER_ETHERNET;
646 }
647
mana_ib_query_pkey(struct ib_device * ibdev,u32 port,u16 index,u16 * pkey)648 int mana_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
649 {
650 if (index != 0)
651 return -EINVAL;
652 *pkey = IB_DEFAULT_PKEY_FULL;
653 return 0;
654 }
655
mana_ib_query_gid(struct ib_device * ibdev,u32 port,int index,union ib_gid * gid)656 int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
657 union ib_gid *gid)
658 {
659 /* This version doesn't return GID properties */
660 return 0;
661 }
662
mana_ib_disassociate_ucontext(struct ib_ucontext * ibcontext)663 void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
664 {
665 }
666
mana_ib_gd_query_adapter_caps(struct mana_ib_dev * dev)667 int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
668 {
669 struct mana_ib_adapter_caps *caps = &dev->adapter_caps;
670 struct mana_ib_query_adapter_caps_resp resp = {};
671 struct mana_ib_query_adapter_caps_req req = {};
672 int err;
673
674 mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP, sizeof(req),
675 sizeof(resp));
676 req.hdr.resp.msg_version = GDMA_MESSAGE_V4;
677 req.hdr.dev_id = dev->gdma_dev->dev_id;
678
679 err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req),
680 &req, sizeof(resp), &resp);
681
682 if (err) {
683 ibdev_err(&dev->ib_dev,
684 "Failed to query adapter caps err %d", err);
685 return err;
686 }
687
688 caps->max_sq_id = resp.max_sq_id;
689 caps->max_rq_id = resp.max_rq_id;
690 caps->max_cq_id = resp.max_cq_id;
691 caps->max_qp_count = resp.max_qp_count;
692 caps->max_cq_count = resp.max_cq_count;
693 caps->max_mr_count = resp.max_mr_count;
694 caps->max_pd_count = resp.max_pd_count;
695 caps->max_inbound_read_limit = resp.max_inbound_read_limit;
696 caps->max_outbound_read_limit = resp.max_outbound_read_limit;
697 caps->mw_count = resp.mw_count;
698 caps->max_srq_count = resp.max_srq_count;
699 caps->max_qp_wr = min_t(u32,
700 resp.max_requester_sq_size / GDMA_MAX_SQE_SIZE,
701 resp.max_requester_rq_size / GDMA_MAX_RQE_SIZE);
702 caps->max_inline_data_size = resp.max_inline_data_size;
703 caps->max_send_sge_count = resp.max_send_sge_count;
704 caps->max_recv_sge_count = resp.max_recv_sge_count;
705 caps->feature_flags = resp.feature_flags;
706
707 caps->page_size_cap = PAGE_SZ_BM;
708 if (mdev_to_gc(dev)->pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB)
709 caps->page_size_cap |= (SZ_4M | SZ_1G | SZ_2G);
710
711 return 0;
712 }
713
mana_eth_query_adapter_caps(struct mana_ib_dev * dev)714 int mana_eth_query_adapter_caps(struct mana_ib_dev *dev)
715 {
716 struct mana_ib_adapter_caps *caps = &dev->adapter_caps;
717 struct gdma_query_max_resources_resp resp = {};
718 struct gdma_general_req req = {};
719 int err;
720
721 mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
722 sizeof(req), sizeof(resp));
723
724 err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req), &req, sizeof(resp), &resp);
725 if (err) {
726 ibdev_err(&dev->ib_dev,
727 "Failed to query adapter caps err %d", err);
728 return err;
729 }
730
731 caps->max_qp_count = min_t(u32, resp.max_sq, resp.max_rq);
732 caps->max_cq_count = resp.max_cq;
733 caps->max_mr_count = resp.max_mst;
734 caps->max_pd_count = 0x6000;
735 caps->max_qp_wr = min_t(u32,
736 0x100000 / GDMA_MAX_SQE_SIZE,
737 0x100000 / GDMA_MAX_RQE_SIZE);
738 caps->max_send_sge_count = 30;
739 caps->max_recv_sge_count = 15;
740 caps->page_size_cap = PAGE_SZ_BM;
741
742 return 0;
743 }
744
745 static void
mana_ib_event_handler(void * ctx,struct gdma_queue * q,struct gdma_event * event)746 mana_ib_event_handler(void *ctx, struct gdma_queue *q, struct gdma_event *event)
747 {
748 struct mana_ib_dev *mdev = (struct mana_ib_dev *)ctx;
749 struct mana_ib_qp *qp;
750 struct ib_event ev;
751 u32 qpn;
752
753 switch (event->type) {
754 case GDMA_EQE_RNIC_QP_FATAL:
755 qpn = event->details[0];
756 qp = mana_get_qp_ref(mdev, qpn, false);
757 if (!qp)
758 break;
759 if (qp->ibqp.event_handler) {
760 ev.device = qp->ibqp.device;
761 ev.element.qp = &qp->ibqp;
762 ev.event = IB_EVENT_QP_FATAL;
763 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
764 }
765 mana_put_qp_ref(qp);
766 break;
767 default:
768 break;
769 }
770 }
771
mana_ib_create_eqs(struct mana_ib_dev * mdev)772 int mana_ib_create_eqs(struct mana_ib_dev *mdev)
773 {
774 struct gdma_context *gc = mdev_to_gc(mdev);
775 struct gdma_queue_spec spec = {};
776 int err, i;
777
778 spec.type = GDMA_EQ;
779 spec.monitor_avl_buf = false;
780 spec.queue_size = EQ_SIZE;
781 spec.eq.callback = mana_ib_event_handler;
782 spec.eq.context = mdev;
783 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
784 spec.eq.msix_index = 0;
785
786 err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->fatal_err_eq);
787 if (err)
788 return err;
789
790 mdev->eqs = kcalloc(mdev->ib_dev.num_comp_vectors, sizeof(struct gdma_queue *),
791 GFP_KERNEL);
792 if (!mdev->eqs) {
793 err = -ENOMEM;
794 goto destroy_fatal_eq;
795 }
796 spec.eq.callback = NULL;
797 for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) {
798 spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
799 err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->eqs[i]);
800 if (err)
801 goto destroy_eqs;
802 }
803
804 return 0;
805
806 destroy_eqs:
807 while (i-- > 0)
808 mana_gd_destroy_queue(gc, mdev->eqs[i]);
809 kfree(mdev->eqs);
810 destroy_fatal_eq:
811 mana_gd_destroy_queue(gc, mdev->fatal_err_eq);
812 return err;
813 }
814
mana_ib_destroy_eqs(struct mana_ib_dev * mdev)815 void mana_ib_destroy_eqs(struct mana_ib_dev *mdev)
816 {
817 struct gdma_context *gc = mdev_to_gc(mdev);
818 int i;
819
820 mana_gd_destroy_queue(gc, mdev->fatal_err_eq);
821
822 for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++)
823 mana_gd_destroy_queue(gc, mdev->eqs[i]);
824
825 kfree(mdev->eqs);
826 }
827
mana_ib_gd_create_rnic_adapter(struct mana_ib_dev * mdev)828 int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev)
829 {
830 struct mana_rnic_create_adapter_resp resp = {};
831 struct mana_rnic_create_adapter_req req = {};
832 struct gdma_context *gc = mdev_to_gc(mdev);
833 int err;
834
835 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req), sizeof(resp));
836 req.hdr.req.msg_version = GDMA_MESSAGE_V2;
837 req.hdr.dev_id = mdev->gdma_dev->dev_id;
838 req.notify_eq_id = mdev->fatal_err_eq->id;
839
840 if (mdev->adapter_caps.feature_flags & MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT)
841 req.feature_flags |= MANA_IB_FEATURE_CLIENT_ERROR_CQE_REQUEST;
842
843 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
844 if (err) {
845 ibdev_err(&mdev->ib_dev, "Failed to create RNIC adapter err %d", err);
846 return err;
847 }
848 mdev->adapter_handle = resp.adapter;
849
850 return 0;
851 }
852
mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev * mdev)853 int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev)
854 {
855 struct mana_rnic_destroy_adapter_resp resp = {};
856 struct mana_rnic_destroy_adapter_req req = {};
857 struct gdma_context *gc;
858 int err;
859
860 gc = mdev_to_gc(mdev);
861 mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp));
862 req.hdr.dev_id = mdev->gdma_dev->dev_id;
863 req.adapter = mdev->adapter_handle;
864
865 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
866 if (err) {
867 ibdev_err(&mdev->ib_dev, "Failed to destroy RNIC adapter err %d", err);
868 return err;
869 }
870
871 return 0;
872 }
873
mana_ib_gd_add_gid(const struct ib_gid_attr * attr,void ** context)874 int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
875 {
876 struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev);
877 enum rdma_network_type ntype = rdma_gid_attr_network_type(attr);
878 struct mana_rnic_config_addr_resp resp = {};
879 struct gdma_context *gc = mdev_to_gc(mdev);
880 struct mana_rnic_config_addr_req req = {};
881 int err;
882
883 if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
884 ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
885 return -EINVAL;
886 }
887
888 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
889 req.hdr.dev_id = mdev->gdma_dev->dev_id;
890 req.adapter = mdev->adapter_handle;
891 req.op = ADDR_OP_ADD;
892 req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
893 copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
894
895 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
896 if (err) {
897 ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
898 return err;
899 }
900
901 return 0;
902 }
903
mana_ib_gd_del_gid(const struct ib_gid_attr * attr,void ** context)904 int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
905 {
906 struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev);
907 enum rdma_network_type ntype = rdma_gid_attr_network_type(attr);
908 struct mana_rnic_config_addr_resp resp = {};
909 struct gdma_context *gc = mdev_to_gc(mdev);
910 struct mana_rnic_config_addr_req req = {};
911 int err;
912
913 if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
914 ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
915 return -EINVAL;
916 }
917
918 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
919 req.hdr.dev_id = mdev->gdma_dev->dev_id;
920 req.adapter = mdev->adapter_handle;
921 req.op = ADDR_OP_REMOVE;
922 req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
923 copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
924
925 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
926 if (err) {
927 ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
928 return err;
929 }
930
931 return 0;
932 }
933
mana_ib_gd_config_mac(struct mana_ib_dev * mdev,enum mana_ib_addr_op op,u8 * mac)934 int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac)
935 {
936 struct mana_rnic_config_mac_addr_resp resp = {};
937 struct mana_rnic_config_mac_addr_req req = {};
938 struct gdma_context *gc = mdev_to_gc(mdev);
939 int err;
940
941 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp));
942 req.hdr.dev_id = mdev->gdma_dev->dev_id;
943 req.adapter = mdev->adapter_handle;
944 req.op = op;
945 copy_in_reverse(req.mac_addr, mac, ETH_ALEN);
946
947 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
948 if (err) {
949 ibdev_err(&mdev->ib_dev, "Failed to config Mac addr err %d", err);
950 return err;
951 }
952
953 return 0;
954 }
955
mana_ib_gd_create_cq(struct mana_ib_dev * mdev,struct mana_ib_cq * cq,u32 doorbell)956 int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell)
957 {
958 struct gdma_context *gc = mdev_to_gc(mdev);
959 struct mana_rnic_create_cq_resp resp = {};
960 struct mana_rnic_create_cq_req req = {};
961 int err;
962
963 if (!mdev->eqs)
964 return -EINVAL;
965
966 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_CQ, sizeof(req), sizeof(resp));
967 req.hdr.dev_id = mdev->gdma_dev->dev_id;
968 req.adapter = mdev->adapter_handle;
969 req.gdma_region = cq->queue.gdma_region;
970 req.eq_id = mdev->eqs[cq->comp_vector]->id;
971 req.doorbell_page = doorbell;
972
973 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
974
975 if (err) {
976 ibdev_err(&mdev->ib_dev, "Failed to create cq err %d", err);
977 return err;
978 }
979
980 cq->queue.id = resp.cq_id;
981 cq->cq_handle = resp.cq_handle;
982 /* The GDMA region is now owned by the CQ handle */
983 cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
984
985 return 0;
986 }
987
mana_ib_gd_destroy_cq(struct mana_ib_dev * mdev,struct mana_ib_cq * cq)988 int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
989 {
990 struct gdma_context *gc = mdev_to_gc(mdev);
991 struct mana_rnic_destroy_cq_resp resp = {};
992 struct mana_rnic_destroy_cq_req req = {};
993 int err;
994
995 if (cq->cq_handle == INVALID_MANA_HANDLE)
996 return 0;
997
998 mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_CQ, sizeof(req), sizeof(resp));
999 req.hdr.dev_id = mdev->gdma_dev->dev_id;
1000 req.adapter = mdev->adapter_handle;
1001 req.cq_handle = cq->cq_handle;
1002
1003 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1004
1005 if (err) {
1006 ibdev_err(&mdev->ib_dev, "Failed to destroy cq err %d", err);
1007 return err;
1008 }
1009
1010 return 0;
1011 }
1012
mana_ib_gd_create_rc_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp,struct ib_qp_init_attr * attr,u32 doorbell,u64 flags)1013 int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
1014 struct ib_qp_init_attr *attr, u32 doorbell, u64 flags)
1015 {
1016 struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
1017 struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
1018 struct mana_ib_pd *pd = container_of(qp->ibqp.pd, struct mana_ib_pd, ibpd);
1019 struct gdma_context *gc = mdev_to_gc(mdev);
1020 struct mana_rnic_create_qp_resp resp = {};
1021 struct mana_rnic_create_qp_req req = {};
1022 int err, i;
1023
1024 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_RC_QP, sizeof(req), sizeof(resp));
1025 req.hdr.dev_id = mdev->gdma_dev->dev_id;
1026 req.adapter = mdev->adapter_handle;
1027 req.pd_handle = pd->pd_handle;
1028 req.send_cq_handle = send_cq->cq_handle;
1029 req.recv_cq_handle = recv_cq->cq_handle;
1030 for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; i++)
1031 req.dma_region[i] = qp->rc_qp.queues[i].gdma_region;
1032 req.doorbell_page = doorbell;
1033 req.max_send_wr = attr->cap.max_send_wr;
1034 req.max_recv_wr = attr->cap.max_recv_wr;
1035 req.max_send_sge = attr->cap.max_send_sge;
1036 req.max_recv_sge = attr->cap.max_recv_sge;
1037 req.flags = flags;
1038
1039 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1040 if (err) {
1041 ibdev_err(&mdev->ib_dev, "Failed to create rc qp err %d", err);
1042 return err;
1043 }
1044 qp->qp_handle = resp.rc_qp_handle;
1045 for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; i++) {
1046 qp->rc_qp.queues[i].id = resp.queue_ids[i];
1047 /* The GDMA regions are now owned by the RNIC QP handle */
1048 qp->rc_qp.queues[i].gdma_region = GDMA_INVALID_DMA_REGION;
1049 }
1050 return 0;
1051 }
1052
mana_ib_gd_destroy_rc_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp)1053 int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
1054 {
1055 struct mana_rnic_destroy_rc_qp_resp resp = {0};
1056 struct mana_rnic_destroy_rc_qp_req req = {0};
1057 struct gdma_context *gc = mdev_to_gc(mdev);
1058 int err;
1059
1060 mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_RC_QP, sizeof(req), sizeof(resp));
1061 req.hdr.dev_id = mdev->gdma_dev->dev_id;
1062 req.adapter = mdev->adapter_handle;
1063 req.rc_qp_handle = qp->qp_handle;
1064 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1065 if (err) {
1066 ibdev_err(&mdev->ib_dev, "Failed to destroy rc qp err %d", err);
1067 return err;
1068 }
1069 return 0;
1070 }
1071
mana_ib_gd_create_ud_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp,struct ib_qp_init_attr * attr,u32 doorbell,u32 type)1072 int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
1073 struct ib_qp_init_attr *attr, u32 doorbell, u32 type)
1074 {
1075 struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
1076 struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
1077 struct mana_ib_pd *pd = container_of(qp->ibqp.pd, struct mana_ib_pd, ibpd);
1078 struct gdma_context *gc = mdev_to_gc(mdev);
1079 struct mana_rnic_create_udqp_resp resp = {};
1080 struct mana_rnic_create_udqp_req req = {};
1081 int err, i;
1082
1083 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_UD_QP, sizeof(req), sizeof(resp));
1084 req.hdr.dev_id = mdev->gdma_dev->dev_id;
1085 req.adapter = mdev->adapter_handle;
1086 req.pd_handle = pd->pd_handle;
1087 req.send_cq_handle = send_cq->cq_handle;
1088 req.recv_cq_handle = recv_cq->cq_handle;
1089 for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; i++)
1090 req.dma_region[i] = qp->ud_qp.queues[i].gdma_region;
1091 req.doorbell_page = doorbell;
1092 req.max_send_wr = attr->cap.max_send_wr;
1093 req.max_recv_wr = attr->cap.max_recv_wr;
1094 req.max_send_sge = attr->cap.max_send_sge;
1095 req.max_recv_sge = attr->cap.max_recv_sge;
1096 req.qp_type = type;
1097 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1098 if (err) {
1099 ibdev_err(&mdev->ib_dev, "Failed to create ud qp err %d", err);
1100 return err;
1101 }
1102 qp->qp_handle = resp.qp_handle;
1103 for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; i++) {
1104 qp->ud_qp.queues[i].id = resp.queue_ids[i];
1105 /* The GDMA regions are now owned by the RNIC QP handle */
1106 qp->ud_qp.queues[i].gdma_region = GDMA_INVALID_DMA_REGION;
1107 }
1108 return 0;
1109 }
1110
mana_ib_gd_destroy_ud_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp)1111 int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
1112 {
1113 struct mana_rnic_destroy_udqp_resp resp = {0};
1114 struct mana_rnic_destroy_udqp_req req = {0};
1115 struct gdma_context *gc = mdev_to_gc(mdev);
1116 int err;
1117
1118 mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_UD_QP, sizeof(req), sizeof(resp));
1119 req.hdr.dev_id = mdev->gdma_dev->dev_id;
1120 req.adapter = mdev->adapter_handle;
1121 req.qp_handle = qp->qp_handle;
1122 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1123 if (err) {
1124 ibdev_err(&mdev->ib_dev, "Failed to destroy ud qp err %d", err);
1125 return err;
1126 }
1127 return 0;
1128 }
1129