xref: /linux/drivers/infiniband/hw/mana/main.c (revision 7255fcc80d4b525cc10cfaaf7f485830d4ed2000)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4  */
5 
6 #include "mana_ib.h"
7 
8 void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
9 			 u32 port)
10 {
11 	struct mana_port_context *mpc;
12 	struct net_device *ndev;
13 
14 	ndev = mana_ib_get_netdev(&dev->ib_dev, port);
15 	mpc = netdev_priv(ndev);
16 
17 	mutex_lock(&pd->vport_mutex);
18 
19 	pd->vport_use_count--;
20 	WARN_ON(pd->vport_use_count < 0);
21 
22 	if (!pd->vport_use_count)
23 		mana_uncfg_vport(mpc);
24 
25 	mutex_unlock(&pd->vport_mutex);
26 }
27 
28 int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd,
29 		      u32 doorbell_id)
30 {
31 	struct mana_port_context *mpc;
32 	struct net_device *ndev;
33 	int err;
34 
35 	ndev = mana_ib_get_netdev(&dev->ib_dev, port);
36 	mpc = netdev_priv(ndev);
37 
38 	mutex_lock(&pd->vport_mutex);
39 
40 	pd->vport_use_count++;
41 	if (pd->vport_use_count > 1) {
42 		ibdev_dbg(&dev->ib_dev,
43 			  "Skip as this PD is already configured vport\n");
44 		mutex_unlock(&pd->vport_mutex);
45 		return 0;
46 	}
47 
48 	err = mana_cfg_vport(mpc, pd->pdn, doorbell_id);
49 	if (err) {
50 		pd->vport_use_count--;
51 		mutex_unlock(&pd->vport_mutex);
52 
53 		ibdev_dbg(&dev->ib_dev, "Failed to configure vPort %d\n", err);
54 		return err;
55 	}
56 
57 	mutex_unlock(&pd->vport_mutex);
58 
59 	pd->tx_shortform_allowed = mpc->tx_shortform_allowed;
60 	pd->tx_vp_offset = mpc->tx_vp_offset;
61 
62 	ibdev_dbg(&dev->ib_dev, "vport handle %llx pdid %x doorbell_id %x\n",
63 		  mpc->port_handle, pd->pdn, doorbell_id);
64 
65 	return 0;
66 }
67 
68 int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
69 {
70 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
71 	struct ib_device *ibdev = ibpd->device;
72 	struct gdma_create_pd_resp resp = {};
73 	struct gdma_create_pd_req req = {};
74 	enum gdma_pd_flags flags = 0;
75 	struct mana_ib_dev *dev;
76 	struct gdma_context *gc;
77 	int err;
78 
79 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
80 	gc = mdev_to_gc(dev);
81 
82 	mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req),
83 			     sizeof(resp));
84 
85 	req.flags = flags;
86 	err = mana_gd_send_request(gc, sizeof(req), &req,
87 				   sizeof(resp), &resp);
88 
89 	if (err || resp.hdr.status) {
90 		ibdev_dbg(&dev->ib_dev,
91 			  "Failed to get pd_id err %d status %u\n", err,
92 			  resp.hdr.status);
93 		if (!err)
94 			err = -EPROTO;
95 
96 		return err;
97 	}
98 
99 	pd->pd_handle = resp.pd_handle;
100 	pd->pdn = resp.pd_id;
101 	ibdev_dbg(&dev->ib_dev, "pd_handle 0x%llx pd_id %d\n",
102 		  pd->pd_handle, pd->pdn);
103 
104 	mutex_init(&pd->vport_mutex);
105 	pd->vport_use_count = 0;
106 	return 0;
107 }
108 
109 int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
110 {
111 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
112 	struct ib_device *ibdev = ibpd->device;
113 	struct gdma_destory_pd_resp resp = {};
114 	struct gdma_destroy_pd_req req = {};
115 	struct mana_ib_dev *dev;
116 	struct gdma_context *gc;
117 	int err;
118 
119 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
120 	gc = mdev_to_gc(dev);
121 
122 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req),
123 			     sizeof(resp));
124 
125 	req.pd_handle = pd->pd_handle;
126 	err = mana_gd_send_request(gc, sizeof(req), &req,
127 				   sizeof(resp), &resp);
128 
129 	if (err || resp.hdr.status) {
130 		ibdev_dbg(&dev->ib_dev,
131 			  "Failed to destroy pd_handle 0x%llx err %d status %u",
132 			  pd->pd_handle, err, resp.hdr.status);
133 		if (!err)
134 			err = -EPROTO;
135 	}
136 
137 	return err;
138 }
139 
140 static int mana_gd_destroy_doorbell_page(struct gdma_context *gc,
141 					 int doorbell_page)
142 {
143 	struct gdma_destroy_resource_range_req req = {};
144 	struct gdma_resp_hdr resp = {};
145 	int err;
146 
147 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_RESOURCE_RANGE,
148 			     sizeof(req), sizeof(resp));
149 
150 	req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
151 	req.num_resources = 1;
152 	req.allocated_resources = doorbell_page;
153 
154 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
155 	if (err || resp.status) {
156 		dev_err(gc->dev,
157 			"Failed to destroy doorbell page: ret %d, 0x%x\n",
158 			err, resp.status);
159 		return err ?: -EPROTO;
160 	}
161 
162 	return 0;
163 }
164 
165 static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
166 					  int *doorbell_page)
167 {
168 	struct gdma_allocate_resource_range_req req = {};
169 	struct gdma_allocate_resource_range_resp resp = {};
170 	int err;
171 
172 	mana_gd_init_req_hdr(&req.hdr, GDMA_ALLOCATE_RESOURCE_RANGE,
173 			     sizeof(req), sizeof(resp));
174 
175 	req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
176 	req.num_resources = 1;
177 	req.alignment = 1;
178 
179 	/* Have GDMA start searching from 0 */
180 	req.allocated_resources = 0;
181 
182 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
183 	if (err || resp.hdr.status) {
184 		dev_err(gc->dev,
185 			"Failed to allocate doorbell page: ret %d, 0x%x\n",
186 			err, resp.hdr.status);
187 		return err ?: -EPROTO;
188 	}
189 
190 	*doorbell_page = resp.allocated_resources;
191 
192 	return 0;
193 }
194 
195 int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext,
196 			   struct ib_udata *udata)
197 {
198 	struct mana_ib_ucontext *ucontext =
199 		container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
200 	struct ib_device *ibdev = ibcontext->device;
201 	struct mana_ib_dev *mdev;
202 	struct gdma_context *gc;
203 	int doorbell_page;
204 	int ret;
205 
206 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
207 	gc = mdev_to_gc(mdev);
208 
209 	/* Allocate a doorbell page index */
210 	ret = mana_gd_allocate_doorbell_page(gc, &doorbell_page);
211 	if (ret) {
212 		ibdev_dbg(ibdev, "Failed to allocate doorbell page %d\n", ret);
213 		return ret;
214 	}
215 
216 	ibdev_dbg(ibdev, "Doorbell page allocated %d\n", doorbell_page);
217 
218 	ucontext->doorbell = doorbell_page;
219 
220 	return 0;
221 }
222 
223 void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
224 {
225 	struct mana_ib_ucontext *mana_ucontext =
226 		container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
227 	struct ib_device *ibdev = ibcontext->device;
228 	struct mana_ib_dev *mdev;
229 	struct gdma_context *gc;
230 	int ret;
231 
232 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
233 	gc = mdev_to_gc(mdev);
234 
235 	ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext->doorbell);
236 	if (ret)
237 		ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
238 }
239 
240 static int
241 mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
242 			    struct gdma_context *gc,
243 			    struct gdma_create_dma_region_req *create_req,
244 			    size_t num_pages, mana_handle_t *gdma_region,
245 			    u32 expected_status)
246 {
247 	struct gdma_create_dma_region_resp create_resp = {};
248 	unsigned int create_req_msg_size;
249 	int err;
250 
251 	create_req_msg_size =
252 		struct_size(create_req, page_addr_list, num_pages);
253 	create_req->page_addr_list_len = num_pages;
254 
255 	err = mana_gd_send_request(gc, create_req_msg_size, create_req,
256 				   sizeof(create_resp), &create_resp);
257 	if (err || create_resp.hdr.status != expected_status) {
258 		ibdev_dbg(&dev->ib_dev,
259 			  "Failed to create DMA region: %d, 0x%x\n",
260 			  err, create_resp.hdr.status);
261 		if (!err)
262 			err = -EPROTO;
263 
264 		return err;
265 	}
266 
267 	*gdma_region = create_resp.dma_region_handle;
268 	ibdev_dbg(&dev->ib_dev, "Created DMA region handle 0x%llx\n",
269 		  *gdma_region);
270 
271 	return 0;
272 }
273 
274 static int
275 mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc,
276 			  struct gdma_dma_region_add_pages_req *add_req,
277 			  unsigned int num_pages, u32 expected_status)
278 {
279 	unsigned int add_req_msg_size =
280 		struct_size(add_req, page_addr_list, num_pages);
281 	struct gdma_general_resp add_resp = {};
282 	int err;
283 
284 	mana_gd_init_req_hdr(&add_req->hdr, GDMA_DMA_REGION_ADD_PAGES,
285 			     add_req_msg_size, sizeof(add_resp));
286 	add_req->page_addr_list_len = num_pages;
287 
288 	err = mana_gd_send_request(gc, add_req_msg_size, add_req,
289 				   sizeof(add_resp), &add_resp);
290 	if (err || add_resp.hdr.status != expected_status) {
291 		ibdev_dbg(&dev->ib_dev,
292 			  "Failed to create DMA region: %d, 0x%x\n",
293 			  err, add_resp.hdr.status);
294 
295 		if (!err)
296 			err = -EPROTO;
297 
298 		return err;
299 	}
300 
301 	return 0;
302 }
303 
304 static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
305 					mana_handle_t *gdma_region, unsigned long page_sz)
306 {
307 	struct gdma_dma_region_add_pages_req *add_req = NULL;
308 	size_t num_pages_processed = 0, num_pages_to_handle;
309 	struct gdma_create_dma_region_req *create_req;
310 	unsigned int create_req_msg_size;
311 	struct hw_channel_context *hwc;
312 	struct ib_block_iter biter;
313 	size_t max_pgs_add_cmd = 0;
314 	size_t max_pgs_create_cmd;
315 	struct gdma_context *gc;
316 	size_t num_pages_total;
317 	unsigned int tail = 0;
318 	u64 *page_addr_list;
319 	void *request_buf;
320 	int err;
321 
322 	gc = mdev_to_gc(dev);
323 	hwc = gc->hwc.driver_data;
324 
325 	num_pages_total = ib_umem_num_dma_blocks(umem, page_sz);
326 
327 	max_pgs_create_cmd =
328 		(hwc->max_req_msg_size - sizeof(*create_req)) / sizeof(u64);
329 	num_pages_to_handle =
330 		min_t(size_t, num_pages_total, max_pgs_create_cmd);
331 	create_req_msg_size =
332 		struct_size(create_req, page_addr_list, num_pages_to_handle);
333 
334 	request_buf = kzalloc(hwc->max_req_msg_size, GFP_KERNEL);
335 	if (!request_buf)
336 		return -ENOMEM;
337 
338 	create_req = request_buf;
339 	mana_gd_init_req_hdr(&create_req->hdr, GDMA_CREATE_DMA_REGION,
340 			     create_req_msg_size,
341 			     sizeof(struct gdma_create_dma_region_resp));
342 
343 	create_req->length = umem->length;
344 	create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz);
345 	create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT;
346 	create_req->page_count = num_pages_total;
347 
348 	ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n",
349 		  umem->length, num_pages_total);
350 
351 	ibdev_dbg(&dev->ib_dev, "page_sz %lu offset_in_page %u\n",
352 		  page_sz, create_req->offset_in_page);
353 
354 	ibdev_dbg(&dev->ib_dev, "num_pages_to_handle %lu, gdma_page_type %u",
355 		  num_pages_to_handle, create_req->gdma_page_type);
356 
357 	page_addr_list = create_req->page_addr_list;
358 	rdma_umem_for_each_dma_block(umem, &biter, page_sz) {
359 		u32 expected_status = 0;
360 
361 		page_addr_list[tail++] = rdma_block_iter_dma_address(&biter);
362 		if (tail < num_pages_to_handle)
363 			continue;
364 
365 		if (num_pages_processed + num_pages_to_handle <
366 		    num_pages_total)
367 			expected_status = GDMA_STATUS_MORE_ENTRIES;
368 
369 		if (!num_pages_processed) {
370 			/* First create message */
371 			err = mana_ib_gd_first_dma_region(dev, gc, create_req,
372 							  tail, gdma_region,
373 							  expected_status);
374 			if (err)
375 				goto out;
376 
377 			max_pgs_add_cmd = (hwc->max_req_msg_size -
378 				sizeof(*add_req)) / sizeof(u64);
379 
380 			add_req = request_buf;
381 			add_req->dma_region_handle = *gdma_region;
382 			add_req->reserved3 = 0;
383 			page_addr_list = add_req->page_addr_list;
384 		} else {
385 			/* Subsequent create messages */
386 			err = mana_ib_gd_add_dma_region(dev, gc, add_req, tail,
387 							expected_status);
388 			if (err)
389 				break;
390 		}
391 
392 		num_pages_processed += tail;
393 		tail = 0;
394 
395 		/* The remaining pages to create */
396 		num_pages_to_handle =
397 			min_t(size_t,
398 			      num_pages_total - num_pages_processed,
399 			      max_pgs_add_cmd);
400 	}
401 
402 	if (err)
403 		mana_ib_gd_destroy_dma_region(dev, *gdma_region);
404 
405 out:
406 	kfree(request_buf);
407 	return err;
408 }
409 
410 int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
411 			      mana_handle_t *gdma_region, u64 virt)
412 {
413 	unsigned long page_sz;
414 
415 	page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt);
416 	if (!page_sz) {
417 		ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
418 		return -EINVAL;
419 	}
420 
421 	return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
422 }
423 
424 int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
425 					  mana_handle_t *gdma_region)
426 {
427 	unsigned long page_sz;
428 
429 	/* Hardware requires dma region to align to chosen page size */
430 	page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0);
431 	if (!page_sz) {
432 		ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
433 		return -EINVAL;
434 	}
435 
436 	return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
437 }
438 
439 int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64 gdma_region)
440 {
441 	struct gdma_context *gc = mdev_to_gc(dev);
442 
443 	ibdev_dbg(&dev->ib_dev, "destroy dma region 0x%llx\n", gdma_region);
444 
445 	return mana_gd_destroy_dma_region(gc, gdma_region);
446 }
447 
448 int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
449 {
450 	struct mana_ib_ucontext *mana_ucontext =
451 		container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
452 	struct ib_device *ibdev = ibcontext->device;
453 	struct mana_ib_dev *mdev;
454 	struct gdma_context *gc;
455 	phys_addr_t pfn;
456 	pgprot_t prot;
457 	int ret;
458 
459 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
460 	gc = mdev_to_gc(mdev);
461 
462 	if (vma->vm_pgoff != 0) {
463 		ibdev_dbg(ibdev, "Unexpected vm_pgoff %lu\n", vma->vm_pgoff);
464 		return -EINVAL;
465 	}
466 
467 	/* Map to the page indexed by ucontext->doorbell */
468 	pfn = (gc->phys_db_page_base +
469 	       gc->db_page_size * mana_ucontext->doorbell) >>
470 	      PAGE_SHIFT;
471 	prot = pgprot_writecombine(vma->vm_page_prot);
472 
473 	ret = rdma_user_mmap_io(ibcontext, vma, pfn, gc->db_page_size, prot,
474 				NULL);
475 	if (ret)
476 		ibdev_dbg(ibdev, "can't rdma_user_mmap_io ret %d\n", ret);
477 	else
478 		ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %u, ret %d\n",
479 			  pfn, gc->db_page_size, ret);
480 
481 	return ret;
482 }
483 
484 int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
485 			       struct ib_port_immutable *immutable)
486 {
487 	/*
488 	 * This version only support RAW_PACKET
489 	 * other values need to be filled for other types
490 	 */
491 	immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
492 
493 	return 0;
494 }
495 
496 int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
497 			 struct ib_udata *uhw)
498 {
499 	struct mana_ib_dev *dev = container_of(ibdev,
500 			struct mana_ib_dev, ib_dev);
501 
502 	props->max_qp = dev->adapter_caps.max_qp_count;
503 	props->max_qp_wr = dev->adapter_caps.max_qp_wr;
504 	props->max_cq = dev->adapter_caps.max_cq_count;
505 	props->max_cqe = dev->adapter_caps.max_qp_wr;
506 	props->max_mr = dev->adapter_caps.max_mr_count;
507 	props->max_mr_size = MANA_IB_MAX_MR_SIZE;
508 	props->max_send_sge = dev->adapter_caps.max_send_sge_count;
509 	props->max_recv_sge = dev->adapter_caps.max_recv_sge_count;
510 
511 	return 0;
512 }
513 
514 int mana_ib_query_port(struct ib_device *ibdev, u32 port,
515 		       struct ib_port_attr *props)
516 {
517 	/* This version doesn't return port properties */
518 	return 0;
519 }
520 
521 int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
522 		      union ib_gid *gid)
523 {
524 	/* This version doesn't return GID properties */
525 	return 0;
526 }
527 
528 void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
529 {
530 }
531 
532 int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
533 {
534 	struct mana_ib_adapter_caps *caps = &dev->adapter_caps;
535 	struct mana_ib_query_adapter_caps_resp resp = {};
536 	struct mana_ib_query_adapter_caps_req req = {};
537 	int err;
538 
539 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP, sizeof(req),
540 			     sizeof(resp));
541 	req.hdr.resp.msg_version = GDMA_MESSAGE_V3;
542 	req.hdr.dev_id = dev->gdma_dev->dev_id;
543 
544 	err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req),
545 				   &req, sizeof(resp), &resp);
546 
547 	if (err) {
548 		ibdev_err(&dev->ib_dev,
549 			  "Failed to query adapter caps err %d", err);
550 		return err;
551 	}
552 
553 	caps->max_sq_id = resp.max_sq_id;
554 	caps->max_rq_id = resp.max_rq_id;
555 	caps->max_cq_id = resp.max_cq_id;
556 	caps->max_qp_count = resp.max_qp_count;
557 	caps->max_cq_count = resp.max_cq_count;
558 	caps->max_mr_count = resp.max_mr_count;
559 	caps->max_pd_count = resp.max_pd_count;
560 	caps->max_inbound_read_limit = resp.max_inbound_read_limit;
561 	caps->max_outbound_read_limit = resp.max_outbound_read_limit;
562 	caps->mw_count = resp.mw_count;
563 	caps->max_srq_count = resp.max_srq_count;
564 	caps->max_qp_wr = min_t(u32,
565 				resp.max_requester_sq_size / GDMA_MAX_SQE_SIZE,
566 				resp.max_requester_rq_size / GDMA_MAX_RQE_SIZE);
567 	caps->max_inline_data_size = resp.max_inline_data_size;
568 	caps->max_send_sge_count = resp.max_send_sge_count;
569 	caps->max_recv_sge_count = resp.max_recv_sge_count;
570 
571 	return 0;
572 }
573