xref: /linux/drivers/infiniband/hw/mana/main.c (revision 6093a688a07da07808f0122f9aa2a3eed250d853)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4  */
5 
6 #include "mana_ib.h"
7 #include "linux/pci.h"
8 
9 void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
10 			 u32 port)
11 {
12 	struct mana_port_context *mpc;
13 	struct net_device *ndev;
14 
15 	ndev = mana_ib_get_netdev(&dev->ib_dev, port);
16 	mpc = netdev_priv(ndev);
17 
18 	mutex_lock(&pd->vport_mutex);
19 
20 	pd->vport_use_count--;
21 	WARN_ON(pd->vport_use_count < 0);
22 
23 	if (!pd->vport_use_count)
24 		mana_uncfg_vport(mpc);
25 
26 	mutex_unlock(&pd->vport_mutex);
27 }
28 
29 int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd,
30 		      u32 doorbell_id)
31 {
32 	struct mana_port_context *mpc;
33 	struct net_device *ndev;
34 	int err;
35 
36 	ndev = mana_ib_get_netdev(&dev->ib_dev, port);
37 	mpc = netdev_priv(ndev);
38 
39 	mutex_lock(&pd->vport_mutex);
40 
41 	pd->vport_use_count++;
42 	if (pd->vport_use_count > 1) {
43 		ibdev_dbg(&dev->ib_dev,
44 			  "Skip as this PD is already configured vport\n");
45 		mutex_unlock(&pd->vport_mutex);
46 		return 0;
47 	}
48 
49 	err = mana_cfg_vport(mpc, pd->pdn, doorbell_id);
50 	if (err) {
51 		pd->vport_use_count--;
52 		mutex_unlock(&pd->vport_mutex);
53 
54 		ibdev_dbg(&dev->ib_dev, "Failed to configure vPort %d\n", err);
55 		return err;
56 	}
57 
58 	mutex_unlock(&pd->vport_mutex);
59 
60 	pd->tx_shortform_allowed = mpc->tx_shortform_allowed;
61 	pd->tx_vp_offset = mpc->tx_vp_offset;
62 
63 	ibdev_dbg(&dev->ib_dev, "vport handle %llx pdid %x doorbell_id %x\n",
64 		  mpc->port_handle, pd->pdn, doorbell_id);
65 
66 	return 0;
67 }
68 
69 int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
70 {
71 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
72 	struct ib_device *ibdev = ibpd->device;
73 	struct gdma_create_pd_resp resp = {};
74 	struct gdma_create_pd_req req = {};
75 	enum gdma_pd_flags flags = 0;
76 	struct mana_ib_dev *dev;
77 	struct gdma_context *gc;
78 	int err;
79 
80 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
81 	gc = mdev_to_gc(dev);
82 
83 	mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req),
84 			     sizeof(resp));
85 
86 	if (!udata)
87 		flags |= GDMA_PD_FLAG_ALLOW_GPA_MR;
88 
89 	req.flags = flags;
90 	err = mana_gd_send_request(gc, sizeof(req), &req,
91 				   sizeof(resp), &resp);
92 
93 	if (err || resp.hdr.status) {
94 		ibdev_dbg(&dev->ib_dev,
95 			  "Failed to get pd_id err %d status %u\n", err,
96 			  resp.hdr.status);
97 		if (!err)
98 			err = -EPROTO;
99 
100 		return err;
101 	}
102 
103 	pd->pd_handle = resp.pd_handle;
104 	pd->pdn = resp.pd_id;
105 	ibdev_dbg(&dev->ib_dev, "pd_handle 0x%llx pd_id %d\n",
106 		  pd->pd_handle, pd->pdn);
107 
108 	mutex_init(&pd->vport_mutex);
109 	pd->vport_use_count = 0;
110 	return 0;
111 }
112 
113 int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
114 {
115 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
116 	struct ib_device *ibdev = ibpd->device;
117 	struct gdma_destory_pd_resp resp = {};
118 	struct gdma_destroy_pd_req req = {};
119 	struct mana_ib_dev *dev;
120 	struct gdma_context *gc;
121 	int err;
122 
123 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
124 	gc = mdev_to_gc(dev);
125 
126 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req),
127 			     sizeof(resp));
128 
129 	req.pd_handle = pd->pd_handle;
130 	err = mana_gd_send_request(gc, sizeof(req), &req,
131 				   sizeof(resp), &resp);
132 
133 	if (err || resp.hdr.status) {
134 		ibdev_dbg(&dev->ib_dev,
135 			  "Failed to destroy pd_handle 0x%llx err %d status %u",
136 			  pd->pd_handle, err, resp.hdr.status);
137 		if (!err)
138 			err = -EPROTO;
139 	}
140 
141 	return err;
142 }
143 
144 static int mana_gd_destroy_doorbell_page(struct gdma_context *gc,
145 					 int doorbell_page)
146 {
147 	struct gdma_destroy_resource_range_req req = {};
148 	struct gdma_resp_hdr resp = {};
149 	int err;
150 
151 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_RESOURCE_RANGE,
152 			     sizeof(req), sizeof(resp));
153 
154 	req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
155 	req.num_resources = 1;
156 	req.allocated_resources = doorbell_page;
157 
158 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
159 	if (err || resp.status) {
160 		dev_err(gc->dev,
161 			"Failed to destroy doorbell page: ret %d, 0x%x\n",
162 			err, resp.status);
163 		return err ?: -EPROTO;
164 	}
165 
166 	return 0;
167 }
168 
169 static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
170 					  int *doorbell_page)
171 {
172 	struct gdma_allocate_resource_range_req req = {};
173 	struct gdma_allocate_resource_range_resp resp = {};
174 	int err;
175 
176 	mana_gd_init_req_hdr(&req.hdr, GDMA_ALLOCATE_RESOURCE_RANGE,
177 			     sizeof(req), sizeof(resp));
178 
179 	req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
180 	req.num_resources = 1;
181 	req.alignment = PAGE_SIZE / MANA_PAGE_SIZE;
182 
183 	/* Have GDMA start searching from 0 */
184 	req.allocated_resources = 0;
185 
186 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
187 	if (err || resp.hdr.status) {
188 		dev_err(gc->dev,
189 			"Failed to allocate doorbell page: ret %d, 0x%x\n",
190 			err, resp.hdr.status);
191 		return err ?: -EPROTO;
192 	}
193 
194 	*doorbell_page = resp.allocated_resources;
195 
196 	return 0;
197 }
198 
199 int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext,
200 			   struct ib_udata *udata)
201 {
202 	struct mana_ib_ucontext *ucontext =
203 		container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
204 	struct ib_device *ibdev = ibcontext->device;
205 	struct mana_ib_dev *mdev;
206 	struct gdma_context *gc;
207 	int doorbell_page;
208 	int ret;
209 
210 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
211 	gc = mdev_to_gc(mdev);
212 
213 	/* Allocate a doorbell page index */
214 	ret = mana_gd_allocate_doorbell_page(gc, &doorbell_page);
215 	if (ret) {
216 		ibdev_dbg(ibdev, "Failed to allocate doorbell page %d\n", ret);
217 		return ret;
218 	}
219 
220 	ibdev_dbg(ibdev, "Doorbell page allocated %d\n", doorbell_page);
221 
222 	ucontext->doorbell = doorbell_page;
223 
224 	return 0;
225 }
226 
227 void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
228 {
229 	struct mana_ib_ucontext *mana_ucontext =
230 		container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
231 	struct ib_device *ibdev = ibcontext->device;
232 	struct mana_ib_dev *mdev;
233 	struct gdma_context *gc;
234 	int ret;
235 
236 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
237 	gc = mdev_to_gc(mdev);
238 
239 	ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext->doorbell);
240 	if (ret)
241 		ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
242 }
243 
244 int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
245 				struct mana_ib_queue *queue)
246 {
247 	struct gdma_queue_spec spec = {};
248 	int err;
249 
250 	queue->id = INVALID_QUEUE_ID;
251 	queue->gdma_region = GDMA_INVALID_DMA_REGION;
252 	spec.type = type;
253 	spec.monitor_avl_buf = false;
254 	spec.queue_size = size;
255 	err = mana_gd_create_mana_wq_cq(mdev->gdma_dev, &spec, &queue->kmem);
256 	if (err)
257 		return err;
258 	/* take ownership into mana_ib from mana */
259 	queue->gdma_region = queue->kmem->mem_info.dma_region_handle;
260 	queue->kmem->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
261 	return 0;
262 }
263 
264 int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
265 			 struct mana_ib_queue *queue)
266 {
267 	struct ib_umem *umem;
268 	int err;
269 
270 	queue->umem = NULL;
271 	queue->id = INVALID_QUEUE_ID;
272 	queue->gdma_region = GDMA_INVALID_DMA_REGION;
273 
274 	umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
275 	if (IS_ERR(umem)) {
276 		ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %pe\n", umem);
277 		return PTR_ERR(umem);
278 	}
279 
280 	err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
281 	if (err) {
282 		ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err);
283 		goto free_umem;
284 	}
285 	queue->umem = umem;
286 
287 	ibdev_dbg(&mdev->ib_dev, "created dma region 0x%llx\n", queue->gdma_region);
288 
289 	return 0;
290 free_umem:
291 	ib_umem_release(umem);
292 	return err;
293 }
294 
295 void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue)
296 {
297 	/* Ignore return code as there is not much we can do about it.
298 	 * The error message is printed inside.
299 	 */
300 	mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
301 	ib_umem_release(queue->umem);
302 	if (queue->kmem)
303 		mana_gd_destroy_queue(mdev_to_gc(mdev), queue->kmem);
304 }
305 
306 static int
307 mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
308 			    struct gdma_context *gc,
309 			    struct gdma_create_dma_region_req *create_req,
310 			    size_t num_pages, mana_handle_t *gdma_region,
311 			    u32 expected_status)
312 {
313 	struct gdma_create_dma_region_resp create_resp = {};
314 	unsigned int create_req_msg_size;
315 	int err;
316 
317 	create_req_msg_size =
318 		struct_size(create_req, page_addr_list, num_pages);
319 	create_req->page_addr_list_len = num_pages;
320 
321 	err = mana_gd_send_request(gc, create_req_msg_size, create_req,
322 				   sizeof(create_resp), &create_resp);
323 	if (err || create_resp.hdr.status != expected_status) {
324 		ibdev_dbg(&dev->ib_dev,
325 			  "Failed to create DMA region: %d, 0x%x\n",
326 			  err, create_resp.hdr.status);
327 		if (!err)
328 			err = -EPROTO;
329 
330 		return err;
331 	}
332 
333 	*gdma_region = create_resp.dma_region_handle;
334 	ibdev_dbg(&dev->ib_dev, "Created DMA region handle 0x%llx\n",
335 		  *gdma_region);
336 
337 	return 0;
338 }
339 
340 static int
341 mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc,
342 			  struct gdma_dma_region_add_pages_req *add_req,
343 			  unsigned int num_pages, u32 expected_status)
344 {
345 	unsigned int add_req_msg_size =
346 		struct_size(add_req, page_addr_list, num_pages);
347 	struct gdma_general_resp add_resp = {};
348 	int err;
349 
350 	mana_gd_init_req_hdr(&add_req->hdr, GDMA_DMA_REGION_ADD_PAGES,
351 			     add_req_msg_size, sizeof(add_resp));
352 	add_req->page_addr_list_len = num_pages;
353 
354 	err = mana_gd_send_request(gc, add_req_msg_size, add_req,
355 				   sizeof(add_resp), &add_resp);
356 	if (err || add_resp.hdr.status != expected_status) {
357 		ibdev_dbg(&dev->ib_dev,
358 			  "Failed to create DMA region: %d, 0x%x\n",
359 			  err, add_resp.hdr.status);
360 
361 		if (!err)
362 			err = -EPROTO;
363 
364 		return err;
365 	}
366 
367 	return 0;
368 }
369 
370 static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
371 					mana_handle_t *gdma_region, unsigned long page_sz)
372 {
373 	struct gdma_dma_region_add_pages_req *add_req = NULL;
374 	size_t num_pages_processed = 0, num_pages_to_handle;
375 	struct gdma_create_dma_region_req *create_req;
376 	unsigned int create_req_msg_size;
377 	struct hw_channel_context *hwc;
378 	struct ib_block_iter biter;
379 	size_t max_pgs_add_cmd = 0;
380 	size_t max_pgs_create_cmd;
381 	struct gdma_context *gc;
382 	size_t num_pages_total;
383 	unsigned int tail = 0;
384 	u64 *page_addr_list;
385 	void *request_buf;
386 	int err = 0;
387 
388 	gc = mdev_to_gc(dev);
389 	hwc = gc->hwc.driver_data;
390 
391 	num_pages_total = ib_umem_num_dma_blocks(umem, page_sz);
392 
393 	max_pgs_create_cmd =
394 		(hwc->max_req_msg_size - sizeof(*create_req)) / sizeof(u64);
395 	num_pages_to_handle =
396 		min_t(size_t, num_pages_total, max_pgs_create_cmd);
397 	create_req_msg_size =
398 		struct_size(create_req, page_addr_list, num_pages_to_handle);
399 
400 	request_buf = kzalloc(hwc->max_req_msg_size, GFP_KERNEL);
401 	if (!request_buf)
402 		return -ENOMEM;
403 
404 	create_req = request_buf;
405 	mana_gd_init_req_hdr(&create_req->hdr, GDMA_CREATE_DMA_REGION,
406 			     create_req_msg_size,
407 			     sizeof(struct gdma_create_dma_region_resp));
408 
409 	create_req->length = umem->length;
410 	create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz);
411 	create_req->gdma_page_type = order_base_2(page_sz) - MANA_PAGE_SHIFT;
412 	create_req->page_count = num_pages_total;
413 
414 	ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n",
415 		  umem->length, num_pages_total);
416 
417 	ibdev_dbg(&dev->ib_dev, "page_sz %lu offset_in_page %u\n",
418 		  page_sz, create_req->offset_in_page);
419 
420 	ibdev_dbg(&dev->ib_dev, "num_pages_to_handle %lu, gdma_page_type %u",
421 		  num_pages_to_handle, create_req->gdma_page_type);
422 
423 	page_addr_list = create_req->page_addr_list;
424 	rdma_umem_for_each_dma_block(umem, &biter, page_sz) {
425 		u32 expected_status = 0;
426 
427 		page_addr_list[tail++] = rdma_block_iter_dma_address(&biter);
428 		if (tail < num_pages_to_handle)
429 			continue;
430 
431 		if (num_pages_processed + num_pages_to_handle <
432 		    num_pages_total)
433 			expected_status = GDMA_STATUS_MORE_ENTRIES;
434 
435 		if (!num_pages_processed) {
436 			/* First create message */
437 			err = mana_ib_gd_first_dma_region(dev, gc, create_req,
438 							  tail, gdma_region,
439 							  expected_status);
440 			if (err)
441 				goto out;
442 
443 			max_pgs_add_cmd = (hwc->max_req_msg_size -
444 				sizeof(*add_req)) / sizeof(u64);
445 
446 			add_req = request_buf;
447 			add_req->dma_region_handle = *gdma_region;
448 			add_req->reserved3 = 0;
449 			page_addr_list = add_req->page_addr_list;
450 		} else {
451 			/* Subsequent create messages */
452 			err = mana_ib_gd_add_dma_region(dev, gc, add_req, tail,
453 							expected_status);
454 			if (err)
455 				break;
456 		}
457 
458 		num_pages_processed += tail;
459 		tail = 0;
460 
461 		/* The remaining pages to create */
462 		num_pages_to_handle =
463 			min_t(size_t,
464 			      num_pages_total - num_pages_processed,
465 			      max_pgs_add_cmd);
466 	}
467 
468 	if (err)
469 		mana_ib_gd_destroy_dma_region(dev, *gdma_region);
470 
471 out:
472 	kfree(request_buf);
473 	return err;
474 }
475 
476 int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
477 			      mana_handle_t *gdma_region, u64 virt)
478 {
479 	unsigned long page_sz;
480 
481 	page_sz = ib_umem_find_best_pgsz(umem, dev->adapter_caps.page_size_cap, virt);
482 	if (!page_sz) {
483 		ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
484 		return -EINVAL;
485 	}
486 
487 	return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
488 }
489 
490 int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
491 					  mana_handle_t *gdma_region)
492 {
493 	unsigned long page_sz;
494 
495 	/* Hardware requires dma region to align to chosen page size */
496 	page_sz = ib_umem_find_best_pgoff(umem, dev->adapter_caps.page_size_cap, 0);
497 	if (!page_sz) {
498 		ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
499 		return -EINVAL;
500 	}
501 
502 	return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
503 }
504 
505 int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64 gdma_region)
506 {
507 	struct gdma_context *gc = mdev_to_gc(dev);
508 
509 	ibdev_dbg(&dev->ib_dev, "destroy dma region 0x%llx\n", gdma_region);
510 
511 	return mana_gd_destroy_dma_region(gc, gdma_region);
512 }
513 
514 int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
515 {
516 	struct mana_ib_ucontext *mana_ucontext =
517 		container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
518 	struct ib_device *ibdev = ibcontext->device;
519 	struct mana_ib_dev *mdev;
520 	struct gdma_context *gc;
521 	phys_addr_t pfn;
522 	pgprot_t prot;
523 	int ret;
524 
525 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
526 	gc = mdev_to_gc(mdev);
527 
528 	if (vma->vm_pgoff != 0) {
529 		ibdev_dbg(ibdev, "Unexpected vm_pgoff %lu\n", vma->vm_pgoff);
530 		return -EINVAL;
531 	}
532 
533 	/* Map to the page indexed by ucontext->doorbell */
534 	pfn = (gc->phys_db_page_base +
535 	       gc->db_page_size * mana_ucontext->doorbell) >>
536 	      PAGE_SHIFT;
537 	prot = pgprot_writecombine(vma->vm_page_prot);
538 
539 	ret = rdma_user_mmap_io(ibcontext, vma, pfn, PAGE_SIZE, prot,
540 				NULL);
541 	if (ret)
542 		ibdev_dbg(ibdev, "can't rdma_user_mmap_io ret %d\n", ret);
543 	else
544 		ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %lu, ret %d\n",
545 			  pfn, PAGE_SIZE, ret);
546 
547 	return ret;
548 }
549 
550 int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
551 			       struct ib_port_immutable *immutable)
552 {
553 	struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
554 	struct ib_port_attr attr;
555 	int err;
556 
557 	err = ib_query_port(ibdev, port_num, &attr);
558 	if (err)
559 		return err;
560 
561 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
562 	immutable->gid_tbl_len = attr.gid_tbl_len;
563 
564 	if (mana_ib_is_rnic(dev)) {
565 		if (port_num == 1) {
566 			immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
567 			immutable->max_mad_size = IB_MGMT_MAD_SIZE;
568 		} else {
569 			immutable->core_cap_flags = RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP
570 						    | RDMA_CORE_CAP_ETH_AH;
571 			immutable->max_mad_size = 0;
572 		}
573 	} else {
574 		immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
575 	}
576 
577 	return 0;
578 }
579 
580 int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
581 			 struct ib_udata *uhw)
582 {
583 	struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
584 	struct pci_dev *pdev = to_pci_dev(mdev_to_gc(dev)->dev);
585 
586 	memset(props, 0, sizeof(*props));
587 	props->vendor_id = pdev->vendor;
588 	props->vendor_part_id = dev->gdma_dev->dev_id.type;
589 	props->max_mr_size = MANA_IB_MAX_MR_SIZE;
590 	props->page_size_cap = dev->adapter_caps.page_size_cap;
591 	props->max_qp = dev->adapter_caps.max_qp_count;
592 	props->max_qp_wr = dev->adapter_caps.max_qp_wr;
593 	props->device_cap_flags = IB_DEVICE_RC_RNR_NAK_GEN;
594 	props->max_send_sge = dev->adapter_caps.max_send_sge_count;
595 	props->max_recv_sge = dev->adapter_caps.max_recv_sge_count;
596 	props->max_sge_rd = dev->adapter_caps.max_recv_sge_count;
597 	props->max_cq = dev->adapter_caps.max_cq_count;
598 	props->max_cqe = dev->adapter_caps.max_qp_wr;
599 	props->max_mr = dev->adapter_caps.max_mr_count;
600 	props->max_pd = dev->adapter_caps.max_pd_count;
601 	props->max_qp_rd_atom = dev->adapter_caps.max_inbound_read_limit;
602 	props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
603 	props->max_qp_init_rd_atom = dev->adapter_caps.max_outbound_read_limit;
604 	props->atomic_cap = IB_ATOMIC_NONE;
605 	props->masked_atomic_cap = IB_ATOMIC_NONE;
606 	props->max_ah = INT_MAX;
607 	props->max_pkeys = 1;
608 	props->local_ca_ack_delay = MANA_CA_ACK_DELAY;
609 	if (!mana_ib_is_rnic(dev))
610 		props->raw_packet_caps = IB_RAW_PACKET_CAP_IP_CSUM;
611 
612 	return 0;
613 }
614 
615 int mana_ib_query_port(struct ib_device *ibdev, u32 port,
616 		       struct ib_port_attr *props)
617 {
618 	struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
619 	struct net_device *ndev = mana_ib_get_netdev(ibdev, port);
620 
621 	if (!ndev)
622 		return -EINVAL;
623 
624 	memset(props, 0, sizeof(*props));
625 	props->max_mtu = IB_MTU_4096;
626 	props->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
627 
628 	if (netif_carrier_ok(ndev) && netif_running(ndev)) {
629 		props->state = IB_PORT_ACTIVE;
630 		props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
631 	} else {
632 		props->state = IB_PORT_DOWN;
633 		props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
634 	}
635 
636 	props->active_width = IB_WIDTH_4X;
637 	props->active_speed = IB_SPEED_EDR;
638 	props->pkey_tbl_len = 1;
639 	if (mana_ib_is_rnic(dev)) {
640 		props->gid_tbl_len = 16;
641 		props->ip_gids = true;
642 		if (port == 1)
643 			props->port_cap_flags = IB_PORT_CM_SUP;
644 	}
645 
646 	return 0;
647 }
648 
649 enum rdma_link_layer mana_ib_get_link_layer(struct ib_device *device, u32 port_num)
650 {
651 	return IB_LINK_LAYER_ETHERNET;
652 }
653 
654 int mana_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
655 {
656 	if (index != 0)
657 		return -EINVAL;
658 	*pkey = IB_DEFAULT_PKEY_FULL;
659 	return 0;
660 }
661 
662 int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
663 		      union ib_gid *gid)
664 {
665 	/* This version doesn't return GID properties */
666 	return 0;
667 }
668 
669 void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
670 {
671 }
672 
673 int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
674 {
675 	struct mana_ib_adapter_caps *caps = &dev->adapter_caps;
676 	struct mana_ib_query_adapter_caps_resp resp = {};
677 	struct mana_ib_query_adapter_caps_req req = {};
678 	int err;
679 
680 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP, sizeof(req),
681 			     sizeof(resp));
682 	req.hdr.resp.msg_version = GDMA_MESSAGE_V4;
683 	req.hdr.dev_id = dev->gdma_dev->dev_id;
684 
685 	err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req),
686 				   &req, sizeof(resp), &resp);
687 
688 	if (err) {
689 		ibdev_err(&dev->ib_dev,
690 			  "Failed to query adapter caps err %d", err);
691 		return err;
692 	}
693 
694 	caps->max_sq_id = resp.max_sq_id;
695 	caps->max_rq_id = resp.max_rq_id;
696 	caps->max_cq_id = resp.max_cq_id;
697 	caps->max_qp_count = resp.max_qp_count;
698 	caps->max_cq_count = resp.max_cq_count;
699 	caps->max_mr_count = resp.max_mr_count;
700 	caps->max_pd_count = resp.max_pd_count;
701 	caps->max_inbound_read_limit = resp.max_inbound_read_limit;
702 	caps->max_outbound_read_limit = resp.max_outbound_read_limit;
703 	caps->mw_count = resp.mw_count;
704 	caps->max_srq_count = resp.max_srq_count;
705 	caps->max_qp_wr = min_t(u32,
706 				resp.max_requester_sq_size / GDMA_MAX_SQE_SIZE,
707 				resp.max_requester_rq_size / GDMA_MAX_RQE_SIZE);
708 	caps->max_inline_data_size = resp.max_inline_data_size;
709 	caps->max_send_sge_count = resp.max_send_sge_count;
710 	caps->max_recv_sge_count = resp.max_recv_sge_count;
711 	caps->feature_flags = resp.feature_flags;
712 
713 	caps->page_size_cap = PAGE_SZ_BM;
714 	if (mdev_to_gc(dev)->pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB)
715 		caps->page_size_cap |= (SZ_4M | SZ_1G | SZ_2G);
716 
717 	return 0;
718 }
719 
720 int mana_eth_query_adapter_caps(struct mana_ib_dev *dev)
721 {
722 	struct mana_ib_adapter_caps *caps = &dev->adapter_caps;
723 	struct gdma_query_max_resources_resp resp = {};
724 	struct gdma_general_req req = {};
725 	int err;
726 
727 	mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
728 			     sizeof(req), sizeof(resp));
729 
730 	err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req), &req, sizeof(resp), &resp);
731 	if (err) {
732 		ibdev_err(&dev->ib_dev,
733 			  "Failed to query adapter caps err %d", err);
734 		return err;
735 	}
736 
737 	caps->max_qp_count = min_t(u32, resp.max_sq, resp.max_rq);
738 	caps->max_cq_count = resp.max_cq;
739 	caps->max_mr_count = resp.max_mst;
740 	caps->max_pd_count = 0x6000;
741 	caps->max_qp_wr = min_t(u32,
742 				0x100000 / GDMA_MAX_SQE_SIZE,
743 				0x100000 / GDMA_MAX_RQE_SIZE);
744 	caps->max_send_sge_count = 30;
745 	caps->max_recv_sge_count = 15;
746 	caps->page_size_cap = PAGE_SZ_BM;
747 
748 	return 0;
749 }
750 
751 static void
752 mana_ib_event_handler(void *ctx, struct gdma_queue *q, struct gdma_event *event)
753 {
754 	struct mana_ib_dev *mdev = (struct mana_ib_dev *)ctx;
755 	struct mana_ib_qp *qp;
756 	struct ib_event ev;
757 	u32 qpn;
758 
759 	switch (event->type) {
760 	case GDMA_EQE_RNIC_QP_FATAL:
761 		qpn = event->details[0];
762 		qp = mana_get_qp_ref(mdev, qpn, false);
763 		if (!qp)
764 			break;
765 		if (qp->ibqp.event_handler) {
766 			ev.device = qp->ibqp.device;
767 			ev.element.qp = &qp->ibqp;
768 			ev.event = IB_EVENT_QP_FATAL;
769 			qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
770 		}
771 		mana_put_qp_ref(qp);
772 		break;
773 	default:
774 		break;
775 	}
776 }
777 
778 int mana_ib_create_eqs(struct mana_ib_dev *mdev)
779 {
780 	struct gdma_context *gc = mdev_to_gc(mdev);
781 	struct gdma_queue_spec spec = {};
782 	int err, i;
783 
784 	spec.type = GDMA_EQ;
785 	spec.monitor_avl_buf = false;
786 	spec.queue_size = EQ_SIZE;
787 	spec.eq.callback = mana_ib_event_handler;
788 	spec.eq.context = mdev;
789 	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
790 	spec.eq.msix_index = 0;
791 
792 	err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->fatal_err_eq);
793 	if (err)
794 		return err;
795 
796 	mdev->eqs = kcalloc(mdev->ib_dev.num_comp_vectors, sizeof(struct gdma_queue *),
797 			    GFP_KERNEL);
798 	if (!mdev->eqs) {
799 		err = -ENOMEM;
800 		goto destroy_fatal_eq;
801 	}
802 	spec.eq.callback = NULL;
803 	for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) {
804 		spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
805 		err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->eqs[i]);
806 		if (err)
807 			goto destroy_eqs;
808 	}
809 
810 	return 0;
811 
812 destroy_eqs:
813 	while (i-- > 0)
814 		mana_gd_destroy_queue(gc, mdev->eqs[i]);
815 	kfree(mdev->eqs);
816 destroy_fatal_eq:
817 	mana_gd_destroy_queue(gc, mdev->fatal_err_eq);
818 	return err;
819 }
820 
821 void mana_ib_destroy_eqs(struct mana_ib_dev *mdev)
822 {
823 	struct gdma_context *gc = mdev_to_gc(mdev);
824 	int i;
825 
826 	mana_gd_destroy_queue(gc, mdev->fatal_err_eq);
827 
828 	for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++)
829 		mana_gd_destroy_queue(gc, mdev->eqs[i]);
830 
831 	kfree(mdev->eqs);
832 }
833 
834 int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev)
835 {
836 	struct mana_rnic_create_adapter_resp resp = {};
837 	struct mana_rnic_create_adapter_req req = {};
838 	struct gdma_context *gc = mdev_to_gc(mdev);
839 	int err;
840 
841 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req), sizeof(resp));
842 	req.hdr.req.msg_version = GDMA_MESSAGE_V2;
843 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
844 	req.notify_eq_id = mdev->fatal_err_eq->id;
845 
846 	if (mdev->adapter_caps.feature_flags & MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT)
847 		req.feature_flags |= MANA_IB_FEATURE_CLIENT_ERROR_CQE_REQUEST;
848 
849 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
850 	if (err) {
851 		ibdev_err(&mdev->ib_dev, "Failed to create RNIC adapter err %d", err);
852 		return err;
853 	}
854 	mdev->adapter_handle = resp.adapter;
855 
856 	return 0;
857 }
858 
859 int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev)
860 {
861 	struct mana_rnic_destroy_adapter_resp resp = {};
862 	struct mana_rnic_destroy_adapter_req req = {};
863 	struct gdma_context *gc;
864 	int err;
865 
866 	gc = mdev_to_gc(mdev);
867 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp));
868 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
869 	req.adapter = mdev->adapter_handle;
870 
871 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
872 	if (err) {
873 		ibdev_err(&mdev->ib_dev, "Failed to destroy RNIC adapter err %d", err);
874 		return err;
875 	}
876 
877 	return 0;
878 }
879 
880 int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
881 {
882 	struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev);
883 	enum rdma_network_type ntype = rdma_gid_attr_network_type(attr);
884 	struct mana_rnic_config_addr_resp resp = {};
885 	struct gdma_context *gc = mdev_to_gc(mdev);
886 	struct mana_rnic_config_addr_req req = {};
887 	int err;
888 
889 	if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
890 		ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
891 		return -EINVAL;
892 	}
893 
894 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
895 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
896 	req.adapter = mdev->adapter_handle;
897 	req.op = ADDR_OP_ADD;
898 	req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
899 	copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
900 
901 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
902 	if (err) {
903 		ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
904 		return err;
905 	}
906 
907 	return 0;
908 }
909 
910 int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
911 {
912 	struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev);
913 	enum rdma_network_type ntype = rdma_gid_attr_network_type(attr);
914 	struct mana_rnic_config_addr_resp resp = {};
915 	struct gdma_context *gc = mdev_to_gc(mdev);
916 	struct mana_rnic_config_addr_req req = {};
917 	int err;
918 
919 	if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
920 		ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
921 		return -EINVAL;
922 	}
923 
924 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
925 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
926 	req.adapter = mdev->adapter_handle;
927 	req.op = ADDR_OP_REMOVE;
928 	req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
929 	copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
930 
931 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
932 	if (err) {
933 		ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
934 		return err;
935 	}
936 
937 	return 0;
938 }
939 
940 int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac)
941 {
942 	struct mana_rnic_config_mac_addr_resp resp = {};
943 	struct mana_rnic_config_mac_addr_req req = {};
944 	struct gdma_context *gc = mdev_to_gc(mdev);
945 	int err;
946 
947 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp));
948 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
949 	req.adapter = mdev->adapter_handle;
950 	req.op = op;
951 	copy_in_reverse(req.mac_addr, mac, ETH_ALEN);
952 
953 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
954 	if (err) {
955 		ibdev_err(&mdev->ib_dev, "Failed to config Mac addr err %d", err);
956 		return err;
957 	}
958 
959 	return 0;
960 }
961 
962 int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell)
963 {
964 	struct gdma_context *gc = mdev_to_gc(mdev);
965 	struct mana_rnic_create_cq_resp resp = {};
966 	struct mana_rnic_create_cq_req req = {};
967 	int err;
968 
969 	if (!mdev->eqs)
970 		return -EINVAL;
971 
972 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_CQ, sizeof(req), sizeof(resp));
973 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
974 	req.adapter = mdev->adapter_handle;
975 	req.gdma_region = cq->queue.gdma_region;
976 	req.eq_id = mdev->eqs[cq->comp_vector]->id;
977 	req.doorbell_page = doorbell;
978 
979 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
980 
981 	if (err) {
982 		ibdev_err(&mdev->ib_dev, "Failed to create cq err %d", err);
983 		return err;
984 	}
985 
986 	cq->queue.id  = resp.cq_id;
987 	cq->cq_handle = resp.cq_handle;
988 	/* The GDMA region is now owned by the CQ handle */
989 	cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
990 
991 	return 0;
992 }
993 
994 int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
995 {
996 	struct gdma_context *gc = mdev_to_gc(mdev);
997 	struct mana_rnic_destroy_cq_resp resp = {};
998 	struct mana_rnic_destroy_cq_req req = {};
999 	int err;
1000 
1001 	if (cq->cq_handle == INVALID_MANA_HANDLE)
1002 		return 0;
1003 
1004 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_CQ, sizeof(req), sizeof(resp));
1005 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
1006 	req.adapter = mdev->adapter_handle;
1007 	req.cq_handle = cq->cq_handle;
1008 
1009 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1010 
1011 	if (err) {
1012 		ibdev_err(&mdev->ib_dev, "Failed to destroy cq err %d", err);
1013 		return err;
1014 	}
1015 
1016 	return 0;
1017 }
1018 
1019 int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
1020 			    struct ib_qp_init_attr *attr, u32 doorbell, u64 flags)
1021 {
1022 	struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
1023 	struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
1024 	struct mana_ib_pd *pd = container_of(qp->ibqp.pd, struct mana_ib_pd, ibpd);
1025 	struct gdma_context *gc = mdev_to_gc(mdev);
1026 	struct mana_rnic_create_qp_resp resp = {};
1027 	struct mana_rnic_create_qp_req req = {};
1028 	int err, i;
1029 
1030 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_RC_QP, sizeof(req), sizeof(resp));
1031 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
1032 	req.adapter = mdev->adapter_handle;
1033 	req.pd_handle = pd->pd_handle;
1034 	req.send_cq_handle = send_cq->cq_handle;
1035 	req.recv_cq_handle = recv_cq->cq_handle;
1036 	for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; i++)
1037 		req.dma_region[i] = qp->rc_qp.queues[i].gdma_region;
1038 	req.doorbell_page = doorbell;
1039 	req.max_send_wr = attr->cap.max_send_wr;
1040 	req.max_recv_wr = attr->cap.max_recv_wr;
1041 	req.max_send_sge = attr->cap.max_send_sge;
1042 	req.max_recv_sge = attr->cap.max_recv_sge;
1043 	req.flags = flags;
1044 
1045 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1046 	if (err) {
1047 		ibdev_err(&mdev->ib_dev, "Failed to create rc qp err %d", err);
1048 		return err;
1049 	}
1050 	qp->qp_handle = resp.rc_qp_handle;
1051 	for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; i++) {
1052 		qp->rc_qp.queues[i].id = resp.queue_ids[i];
1053 		/* The GDMA regions are now owned by the RNIC QP handle */
1054 		qp->rc_qp.queues[i].gdma_region = GDMA_INVALID_DMA_REGION;
1055 	}
1056 	return 0;
1057 }
1058 
1059 int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
1060 {
1061 	struct mana_rnic_destroy_rc_qp_resp resp = {0};
1062 	struct mana_rnic_destroy_rc_qp_req req = {0};
1063 	struct gdma_context *gc = mdev_to_gc(mdev);
1064 	int err;
1065 
1066 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_RC_QP, sizeof(req), sizeof(resp));
1067 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
1068 	req.adapter = mdev->adapter_handle;
1069 	req.rc_qp_handle = qp->qp_handle;
1070 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1071 	if (err) {
1072 		ibdev_err(&mdev->ib_dev, "Failed to destroy rc qp err %d", err);
1073 		return err;
1074 	}
1075 	return 0;
1076 }
1077 
1078 int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
1079 			    struct ib_qp_init_attr *attr, u32 doorbell, u32 type)
1080 {
1081 	struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
1082 	struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
1083 	struct mana_ib_pd *pd = container_of(qp->ibqp.pd, struct mana_ib_pd, ibpd);
1084 	struct gdma_context *gc = mdev_to_gc(mdev);
1085 	struct mana_rnic_create_udqp_resp resp = {};
1086 	struct mana_rnic_create_udqp_req req = {};
1087 	int err, i;
1088 
1089 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_UD_QP, sizeof(req), sizeof(resp));
1090 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
1091 	req.adapter = mdev->adapter_handle;
1092 	req.pd_handle = pd->pd_handle;
1093 	req.send_cq_handle = send_cq->cq_handle;
1094 	req.recv_cq_handle = recv_cq->cq_handle;
1095 	for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; i++)
1096 		req.dma_region[i] = qp->ud_qp.queues[i].gdma_region;
1097 	req.doorbell_page = doorbell;
1098 	req.max_send_wr = attr->cap.max_send_wr;
1099 	req.max_recv_wr = attr->cap.max_recv_wr;
1100 	req.max_send_sge = attr->cap.max_send_sge;
1101 	req.max_recv_sge = attr->cap.max_recv_sge;
1102 	req.qp_type = type;
1103 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1104 	if (err) {
1105 		ibdev_err(&mdev->ib_dev, "Failed to create ud qp err %d", err);
1106 		return err;
1107 	}
1108 	qp->qp_handle = resp.qp_handle;
1109 	for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; i++) {
1110 		qp->ud_qp.queues[i].id = resp.queue_ids[i];
1111 		/* The GDMA regions are now owned by the RNIC QP handle */
1112 		qp->ud_qp.queues[i].gdma_region = GDMA_INVALID_DMA_REGION;
1113 	}
1114 	return 0;
1115 }
1116 
1117 int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
1118 {
1119 	struct mana_rnic_destroy_udqp_resp resp = {0};
1120 	struct mana_rnic_destroy_udqp_req req = {0};
1121 	struct gdma_context *gc = mdev_to_gc(mdev);
1122 	int err;
1123 
1124 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_UD_QP, sizeof(req), sizeof(resp));
1125 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
1126 	req.adapter = mdev->adapter_handle;
1127 	req.qp_handle = qp->qp_handle;
1128 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1129 	if (err) {
1130 		ibdev_err(&mdev->ib_dev, "Failed to destroy ud qp err %d", err);
1131 		return err;
1132 	}
1133 	return 0;
1134 }
1135