xref: /linux/drivers/infiniband/hw/mana/mr.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4  */
5 
6 #include "mana_ib.h"
7 
8 #define VALID_MR_FLAGS (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |\
9 			IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND | IB_ZERO_BASED)
10 
11 #define VALID_DMA_MR_FLAGS (IB_ACCESS_LOCAL_WRITE)
12 
13 static enum gdma_mr_access_flags
14 mana_ib_verbs_to_gdma_access_flags(int access_flags)
15 {
16 	enum gdma_mr_access_flags flags = GDMA_ACCESS_FLAG_LOCAL_READ;
17 
18 	if (access_flags & IB_ACCESS_LOCAL_WRITE)
19 		flags |= GDMA_ACCESS_FLAG_LOCAL_WRITE;
20 
21 	if (access_flags & IB_ACCESS_REMOTE_WRITE)
22 		flags |= GDMA_ACCESS_FLAG_REMOTE_WRITE;
23 
24 	if (access_flags & IB_ACCESS_REMOTE_READ)
25 		flags |= GDMA_ACCESS_FLAG_REMOTE_READ;
26 
27 	if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
28 		flags |= GDMA_ACCESS_FLAG_REMOTE_ATOMIC;
29 
30 	if (access_flags & IB_ACCESS_MW_BIND)
31 		flags |= GDMA_ACCESS_FLAG_BIND_MW;
32 
33 	return flags;
34 }
35 
36 static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
37 				struct gdma_create_mr_params *mr_params)
38 {
39 	struct gdma_create_mr_response resp = {};
40 	struct gdma_create_mr_request req = {};
41 	struct gdma_context *gc = mdev_to_gc(dev);
42 	int err;
43 
44 	mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
45 			     sizeof(resp));
46 	req.hdr.req.msg_version = GDMA_MESSAGE_V2;
47 	req.pd_handle = mr_params->pd_handle;
48 	req.mr_type = mr_params->mr_type;
49 
50 	switch (mr_params->mr_type) {
51 	case GDMA_MR_TYPE_GPA:
52 		break;
53 	case GDMA_MR_TYPE_GVA:
54 		req.gva.dma_region_handle = mr_params->gva.dma_region_handle;
55 		req.gva.virtual_address = mr_params->gva.virtual_address;
56 		req.gva.access_flags = mr_params->gva.access_flags;
57 		break;
58 	case GDMA_MR_TYPE_ZBVA:
59 		req.zbva.dma_region_handle = mr_params->zbva.dma_region_handle;
60 		req.zbva.access_flags = mr_params->zbva.access_flags;
61 		break;
62 	case GDMA_MR_TYPE_DM:
63 		req.da_ext.length = mr_params->da.length;
64 		req.da.dm_handle = mr_params->da.dm_handle;
65 		req.da.offset = mr_params->da.offset;
66 		req.da.access_flags = mr_params->da.access_flags;
67 		break;
68 	default:
69 		ibdev_dbg(&dev->ib_dev,
70 			  "invalid param (GDMA_MR_TYPE) passed, type %d\n",
71 			  req.mr_type);
72 		return -EINVAL;
73 	}
74 
75 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
76 	if (err)
77 		return err;
78 
79 	mr->ibmr.lkey = resp.lkey;
80 	mr->ibmr.rkey = resp.rkey;
81 	mr->mr_handle = resp.mr_handle;
82 
83 	return 0;
84 }
85 
86 static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
87 {
88 	struct gdma_destroy_mr_response resp = {};
89 	struct gdma_destroy_mr_request req = {};
90 	struct gdma_context *gc = mdev_to_gc(dev);
91 
92 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req),
93 			     sizeof(resp));
94 
95 	req.mr_handle = mr_handle;
96 
97 	return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
98 }
99 
100 struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
101 				  u64 iova, int access_flags,
102 				  struct ib_dmah *dmah,
103 				  struct ib_udata *udata)
104 {
105 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
106 	struct gdma_create_mr_params mr_params = {};
107 	struct ib_device *ibdev = ibpd->device;
108 	struct mana_ib_dev *dev;
109 	struct mana_ib_mr *mr;
110 	u64 dma_region_handle;
111 	int err;
112 
113 	if (dmah)
114 		return ERR_PTR(-EOPNOTSUPP);
115 
116 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
117 
118 	ibdev_dbg(ibdev,
119 		  "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
120 		  start, iova, length, access_flags);
121 
122 	access_flags &= ~IB_ACCESS_OPTIONAL;
123 	if (access_flags & ~VALID_MR_FLAGS)
124 		return ERR_PTR(-EINVAL);
125 
126 	mr = kzalloc_obj(*mr);
127 	if (!mr)
128 		return ERR_PTR(-ENOMEM);
129 
130 	mr->umem = ib_umem_get(ibdev, start, length, access_flags);
131 	if (IS_ERR(mr->umem)) {
132 		err = PTR_ERR(mr->umem);
133 		ibdev_dbg(ibdev,
134 			  "Failed to get umem for register user-mr, %pe\n",
135 			  mr->umem);
136 		goto err_free;
137 	}
138 
139 	err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
140 	if (err) {
141 		ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
142 			  err);
143 		goto err_umem;
144 	}
145 
146 	ibdev_dbg(ibdev,
147 		  "created dma region for user-mr 0x%llx\n",
148 		  dma_region_handle);
149 
150 	mr_params.pd_handle = pd->pd_handle;
151 	if (access_flags & IB_ZERO_BASED) {
152 		mr_params.mr_type = GDMA_MR_TYPE_ZBVA;
153 		mr_params.zbva.dma_region_handle = dma_region_handle;
154 		mr_params.zbva.access_flags =
155 			mana_ib_verbs_to_gdma_access_flags(access_flags);
156 	} else {
157 		mr_params.mr_type = GDMA_MR_TYPE_GVA;
158 		mr_params.gva.dma_region_handle = dma_region_handle;
159 		mr_params.gva.virtual_address = iova;
160 		mr_params.gva.access_flags =
161 			mana_ib_verbs_to_gdma_access_flags(access_flags);
162 	}
163 
164 	err = mana_ib_gd_create_mr(dev, mr, &mr_params);
165 	if (err)
166 		goto err_dma_region;
167 
168 	/*
169 	 * There is no need to keep track of dma_region_handle after MR is
170 	 * successfully created. The dma_region_handle is tracked in the PF
171 	 * as part of the lifecycle of this MR.
172 	 */
173 
174 	return &mr->ibmr;
175 
176 err_dma_region:
177 	mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
178 
179 err_umem:
180 	ib_umem_release(mr->umem);
181 
182 err_free:
183 	kfree(mr);
184 	return ERR_PTR(err);
185 }
186 
187 struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
188 					 u64 iova, int fd, int access_flags,
189 					 struct ib_dmah *dmah,
190 					 struct uverbs_attr_bundle *attrs)
191 {
192 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
193 	struct gdma_create_mr_params mr_params = {};
194 	struct ib_device *ibdev = ibpd->device;
195 	struct ib_umem_dmabuf *umem_dmabuf;
196 	struct mana_ib_dev *dev;
197 	struct mana_ib_mr *mr;
198 	u64 dma_region_handle;
199 	int err;
200 
201 	if (dmah)
202 		return ERR_PTR(-EOPNOTSUPP);
203 
204 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
205 
206 	access_flags &= ~IB_ACCESS_OPTIONAL;
207 	if (access_flags & ~VALID_MR_FLAGS)
208 		return ERR_PTR(-EOPNOTSUPP);
209 
210 	mr = kzalloc_obj(*mr);
211 	if (!mr)
212 		return ERR_PTR(-ENOMEM);
213 
214 	umem_dmabuf = ib_umem_dmabuf_get_pinned(ibdev, start, length, fd, access_flags);
215 	if (IS_ERR(umem_dmabuf)) {
216 		err = PTR_ERR(umem_dmabuf);
217 		ibdev_dbg(ibdev, "Failed to get dmabuf umem, %pe\n",
218 			  umem_dmabuf);
219 		goto err_free;
220 	}
221 
222 	mr->umem = &umem_dmabuf->umem;
223 
224 	err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
225 	if (err) {
226 		ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
227 			  err);
228 		goto err_umem;
229 	}
230 
231 	mr_params.pd_handle = pd->pd_handle;
232 	mr_params.mr_type = GDMA_MR_TYPE_GVA;
233 	mr_params.gva.dma_region_handle = dma_region_handle;
234 	mr_params.gva.virtual_address = iova;
235 	mr_params.gva.access_flags =
236 		mana_ib_verbs_to_gdma_access_flags(access_flags);
237 
238 	err = mana_ib_gd_create_mr(dev, mr, &mr_params);
239 	if (err)
240 		goto err_dma_region;
241 
242 	/*
243 	 * There is no need to keep track of dma_region_handle after MR is
244 	 * successfully created. The dma_region_handle is tracked in the PF
245 	 * as part of the lifecycle of this MR.
246 	 */
247 
248 	return &mr->ibmr;
249 
250 err_dma_region:
251 	mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
252 
253 err_umem:
254 	ib_umem_release(mr->umem);
255 
256 err_free:
257 	kfree(mr);
258 	return ERR_PTR(err);
259 }
260 
261 struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags)
262 {
263 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
264 	struct gdma_create_mr_params mr_params = {};
265 	struct ib_device *ibdev = ibpd->device;
266 	struct mana_ib_dev *dev;
267 	struct mana_ib_mr *mr;
268 	int err;
269 
270 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
271 
272 	if (access_flags & ~VALID_DMA_MR_FLAGS)
273 		return ERR_PTR(-EINVAL);
274 
275 	mr = kzalloc_obj(*mr);
276 	if (!mr)
277 		return ERR_PTR(-ENOMEM);
278 
279 	mr_params.pd_handle = pd->pd_handle;
280 	mr_params.mr_type = GDMA_MR_TYPE_GPA;
281 
282 	err = mana_ib_gd_create_mr(dev, mr, &mr_params);
283 	if (err)
284 		goto err_free;
285 
286 	return &mr->ibmr;
287 
288 err_free:
289 	kfree(mr);
290 	return ERR_PTR(err);
291 }
292 
293 static int mana_ib_gd_create_mw(struct mana_ib_dev *dev, struct mana_ib_pd *pd, struct ib_mw *ibmw)
294 {
295 	struct mana_ib_mw *mw = container_of(ibmw, struct mana_ib_mw, ibmw);
296 	struct gdma_context *gc = mdev_to_gc(dev);
297 	struct gdma_create_mr_response resp = {};
298 	struct gdma_create_mr_request req = {};
299 	int err;
300 
301 	mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req), sizeof(resp));
302 	req.hdr.req.msg_version = GDMA_MESSAGE_V2;
303 	req.pd_handle = pd->pd_handle;
304 
305 	switch (mw->ibmw.type) {
306 	case IB_MW_TYPE_1:
307 		req.mr_type = GDMA_MR_TYPE_MW1;
308 		break;
309 	case IB_MW_TYPE_2:
310 		req.mr_type = GDMA_MR_TYPE_MW2;
311 		break;
312 	default:
313 		return -EINVAL;
314 	}
315 
316 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
317 	if (err)
318 		return err;
319 
320 	mw->ibmw.rkey = resp.rkey;
321 	mw->mw_handle = resp.mr_handle;
322 
323 	return 0;
324 }
325 
326 int mana_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
327 {
328 	struct mana_ib_dev *mdev = container_of(ibmw->device, struct mana_ib_dev, ib_dev);
329 	struct mana_ib_pd *pd = container_of(ibmw->pd, struct mana_ib_pd, ibpd);
330 
331 	return mana_ib_gd_create_mw(mdev, pd, ibmw);
332 }
333 
334 int mana_ib_dealloc_mw(struct ib_mw *ibmw)
335 {
336 	struct mana_ib_dev *dev = container_of(ibmw->device, struct mana_ib_dev, ib_dev);
337 	struct mana_ib_mw *mw = container_of(ibmw, struct mana_ib_mw, ibmw);
338 
339 	return mana_ib_gd_destroy_mr(dev, mw->mw_handle);
340 }
341 
342 int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
343 {
344 	struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr);
345 	struct ib_device *ibdev = ibmr->device;
346 	struct mana_ib_dev *dev;
347 	int err;
348 
349 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
350 
351 	err = mana_ib_gd_destroy_mr(dev, mr->mr_handle);
352 	if (err)
353 		return err;
354 
355 	if (mr->umem)
356 		ib_umem_release(mr->umem);
357 
358 	kfree(mr);
359 
360 	return 0;
361 }
362 
363 static int mana_ib_gd_alloc_dm(struct mana_ib_dev *mdev, struct mana_ib_dm *dm,
364 			       struct ib_dm_alloc_attr *attr)
365 {
366 	struct gdma_context *gc = mdev_to_gc(mdev);
367 	struct gdma_alloc_dm_resp resp = {};
368 	struct gdma_alloc_dm_req req = {};
369 	int err;
370 
371 	mana_gd_init_req_hdr(&req.hdr, GDMA_ALLOC_DM, sizeof(req), sizeof(resp));
372 	req.length = attr->length;
373 	req.alignment = attr->alignment;
374 	req.flags =  attr->flags;
375 
376 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
377 	if (err)
378 		return err;
379 
380 	dm->dm_handle = resp.dm_handle;
381 
382 	return 0;
383 }
384 
385 struct ib_dm *mana_ib_alloc_dm(struct ib_device *ibdev,
386 			       struct ib_ucontext *context,
387 			       struct ib_dm_alloc_attr *attr,
388 			       struct uverbs_attr_bundle *attrs)
389 {
390 	struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
391 	struct mana_ib_dm *dm;
392 	int err;
393 
394 	dm = kzalloc_obj(*dm);
395 	if (!dm)
396 		return ERR_PTR(-ENOMEM);
397 
398 	err = mana_ib_gd_alloc_dm(dev, dm, attr);
399 	if (err)
400 		goto err_free;
401 
402 	return &dm->ibdm;
403 
404 err_free:
405 	kfree(dm);
406 	return ERR_PTR(err);
407 }
408 
409 static int mana_ib_gd_destroy_dm(struct mana_ib_dev *mdev, struct mana_ib_dm *dm)
410 {
411 	struct gdma_context *gc = mdev_to_gc(mdev);
412 	struct gdma_destroy_dm_resp resp = {};
413 	struct gdma_destroy_dm_req req = {};
414 
415 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DM, sizeof(req), sizeof(resp));
416 	req.dm_handle = dm->dm_handle;
417 
418 	return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
419 }
420 
421 int mana_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
422 {
423 	struct mana_ib_dev *dev = container_of(ibdm->device, struct mana_ib_dev, ib_dev);
424 	struct mana_ib_dm *dm = container_of(ibdm, struct mana_ib_dm, ibdm);
425 	int err;
426 
427 	err = mana_ib_gd_destroy_dm(dev, dm);
428 	if (err)
429 		return err;
430 
431 	kfree(dm);
432 	return 0;
433 }
434 
435 struct ib_mr *mana_ib_reg_dm_mr(struct ib_pd *ibpd, struct ib_dm *ibdm,
436 				struct ib_dm_mr_attr *attr,
437 				struct uverbs_attr_bundle *attrs)
438 {
439 	struct mana_ib_dev *dev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
440 	struct mana_ib_dm *mana_dm = container_of(ibdm, struct mana_ib_dm, ibdm);
441 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
442 	struct gdma_create_mr_params mr_params = {};
443 	struct mana_ib_mr *mr;
444 	int err;
445 
446 	attr->access_flags &= ~IB_ACCESS_OPTIONAL;
447 	if (attr->access_flags & ~VALID_MR_FLAGS)
448 		return ERR_PTR(-EOPNOTSUPP);
449 
450 	mr = kzalloc_obj(*mr);
451 	if (!mr)
452 		return ERR_PTR(-ENOMEM);
453 
454 	mr_params.pd_handle = pd->pd_handle;
455 	mr_params.mr_type = GDMA_MR_TYPE_DM;
456 	mr_params.da.dm_handle = mana_dm->dm_handle;
457 	mr_params.da.offset = attr->offset;
458 	mr_params.da.length = attr->length;
459 	mr_params.da.access_flags =
460 		mana_ib_verbs_to_gdma_access_flags(attr->access_flags);
461 
462 	err = mana_ib_gd_create_mr(dev, mr, &mr_params);
463 	if (err)
464 		goto err_free;
465 
466 	return &mr->ibmr;
467 
468 err_free:
469 	kfree(mr);
470 	return ERR_PTR(err);
471 }
472