xref: /linux/drivers/infiniband/hw/mana/mr.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4  */
5 
6 #include "mana_ib.h"
7 
8 #define VALID_MR_FLAGS (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |\
9 			IB_ACCESS_REMOTE_ATOMIC | IB_ZERO_BASED)
10 
11 #define VALID_DMA_MR_FLAGS (IB_ACCESS_LOCAL_WRITE)
12 
13 static enum gdma_mr_access_flags
14 mana_ib_verbs_to_gdma_access_flags(int access_flags)
15 {
16 	enum gdma_mr_access_flags flags = GDMA_ACCESS_FLAG_LOCAL_READ;
17 
18 	if (access_flags & IB_ACCESS_LOCAL_WRITE)
19 		flags |= GDMA_ACCESS_FLAG_LOCAL_WRITE;
20 
21 	if (access_flags & IB_ACCESS_REMOTE_WRITE)
22 		flags |= GDMA_ACCESS_FLAG_REMOTE_WRITE;
23 
24 	if (access_flags & IB_ACCESS_REMOTE_READ)
25 		flags |= GDMA_ACCESS_FLAG_REMOTE_READ;
26 
27 	if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
28 		flags |= GDMA_ACCESS_FLAG_REMOTE_ATOMIC;
29 
30 	return flags;
31 }
32 
33 static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
34 				struct gdma_create_mr_params *mr_params)
35 {
36 	struct gdma_create_mr_response resp = {};
37 	struct gdma_create_mr_request req = {};
38 	struct gdma_context *gc = mdev_to_gc(dev);
39 	int err;
40 
41 	mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
42 			     sizeof(resp));
43 	req.hdr.req.msg_version = GDMA_MESSAGE_V2;
44 	req.pd_handle = mr_params->pd_handle;
45 	req.mr_type = mr_params->mr_type;
46 
47 	switch (mr_params->mr_type) {
48 	case GDMA_MR_TYPE_GPA:
49 		break;
50 	case GDMA_MR_TYPE_GVA:
51 		req.gva.dma_region_handle = mr_params->gva.dma_region_handle;
52 		req.gva.virtual_address = mr_params->gva.virtual_address;
53 		req.gva.access_flags = mr_params->gva.access_flags;
54 		break;
55 	case GDMA_MR_TYPE_ZBVA:
56 		req.zbva.dma_region_handle = mr_params->zbva.dma_region_handle;
57 		req.zbva.access_flags = mr_params->zbva.access_flags;
58 		break;
59 	case GDMA_MR_TYPE_DM:
60 		req.da_ext.length = mr_params->da.length;
61 		req.da.dm_handle = mr_params->da.dm_handle;
62 		req.da.offset = mr_params->da.offset;
63 		req.da.access_flags = mr_params->da.access_flags;
64 		break;
65 	default:
66 		ibdev_dbg(&dev->ib_dev,
67 			  "invalid param (GDMA_MR_TYPE) passed, type %d\n",
68 			  req.mr_type);
69 		return -EINVAL;
70 	}
71 
72 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
73 
74 	if (err || resp.hdr.status) {
75 		ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err,
76 			  resp.hdr.status);
77 		if (!err)
78 			err = -EPROTO;
79 
80 		return err;
81 	}
82 
83 	mr->ibmr.lkey = resp.lkey;
84 	mr->ibmr.rkey = resp.rkey;
85 	mr->mr_handle = resp.mr_handle;
86 
87 	return 0;
88 }
89 
90 static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
91 {
92 	struct gdma_destroy_mr_response resp = {};
93 	struct gdma_destroy_mr_request req = {};
94 	struct gdma_context *gc = mdev_to_gc(dev);
95 	int err;
96 
97 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req),
98 			     sizeof(resp));
99 
100 	req.mr_handle = mr_handle;
101 
102 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
103 	if (err || resp.hdr.status) {
104 		dev_err(gc->dev, "Failed to destroy MR: %d, 0x%x\n", err,
105 			resp.hdr.status);
106 		if (!err)
107 			err = -EPROTO;
108 		return err;
109 	}
110 
111 	return 0;
112 }
113 
114 struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
115 				  u64 iova, int access_flags,
116 				  struct ib_dmah *dmah,
117 				  struct ib_udata *udata)
118 {
119 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
120 	struct gdma_create_mr_params mr_params = {};
121 	struct ib_device *ibdev = ibpd->device;
122 	struct mana_ib_dev *dev;
123 	struct mana_ib_mr *mr;
124 	u64 dma_region_handle;
125 	int err;
126 
127 	if (dmah)
128 		return ERR_PTR(-EOPNOTSUPP);
129 
130 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
131 
132 	ibdev_dbg(ibdev,
133 		  "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
134 		  start, iova, length, access_flags);
135 
136 	access_flags &= ~IB_ACCESS_OPTIONAL;
137 	if (access_flags & ~VALID_MR_FLAGS)
138 		return ERR_PTR(-EINVAL);
139 
140 	mr = kzalloc_obj(*mr);
141 	if (!mr)
142 		return ERR_PTR(-ENOMEM);
143 
144 	mr->umem = ib_umem_get(ibdev, start, length, access_flags);
145 	if (IS_ERR(mr->umem)) {
146 		err = PTR_ERR(mr->umem);
147 		ibdev_dbg(ibdev,
148 			  "Failed to get umem for register user-mr, %pe\n",
149 			  mr->umem);
150 		goto err_free;
151 	}
152 
153 	err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
154 	if (err) {
155 		ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
156 			  err);
157 		goto err_umem;
158 	}
159 
160 	ibdev_dbg(ibdev,
161 		  "created dma region for user-mr 0x%llx\n",
162 		  dma_region_handle);
163 
164 	mr_params.pd_handle = pd->pd_handle;
165 	if (access_flags & IB_ZERO_BASED) {
166 		mr_params.mr_type = GDMA_MR_TYPE_ZBVA;
167 		mr_params.zbva.dma_region_handle = dma_region_handle;
168 		mr_params.zbva.access_flags =
169 			mana_ib_verbs_to_gdma_access_flags(access_flags);
170 	} else {
171 		mr_params.mr_type = GDMA_MR_TYPE_GVA;
172 		mr_params.gva.dma_region_handle = dma_region_handle;
173 		mr_params.gva.virtual_address = iova;
174 		mr_params.gva.access_flags =
175 			mana_ib_verbs_to_gdma_access_flags(access_flags);
176 	}
177 
178 	err = mana_ib_gd_create_mr(dev, mr, &mr_params);
179 	if (err)
180 		goto err_dma_region;
181 
182 	/*
183 	 * There is no need to keep track of dma_region_handle after MR is
184 	 * successfully created. The dma_region_handle is tracked in the PF
185 	 * as part of the lifecycle of this MR.
186 	 */
187 
188 	return &mr->ibmr;
189 
190 err_dma_region:
191 	mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
192 
193 err_umem:
194 	ib_umem_release(mr->umem);
195 
196 err_free:
197 	kfree(mr);
198 	return ERR_PTR(err);
199 }
200 
201 struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
202 					 u64 iova, int fd, int access_flags,
203 					 struct ib_dmah *dmah,
204 					 struct uverbs_attr_bundle *attrs)
205 {
206 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
207 	struct gdma_create_mr_params mr_params = {};
208 	struct ib_device *ibdev = ibpd->device;
209 	struct ib_umem_dmabuf *umem_dmabuf;
210 	struct mana_ib_dev *dev;
211 	struct mana_ib_mr *mr;
212 	u64 dma_region_handle;
213 	int err;
214 
215 	if (dmah)
216 		return ERR_PTR(-EOPNOTSUPP);
217 
218 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
219 
220 	access_flags &= ~IB_ACCESS_OPTIONAL;
221 	if (access_flags & ~VALID_MR_FLAGS)
222 		return ERR_PTR(-EOPNOTSUPP);
223 
224 	mr = kzalloc_obj(*mr);
225 	if (!mr)
226 		return ERR_PTR(-ENOMEM);
227 
228 	umem_dmabuf = ib_umem_dmabuf_get_pinned(ibdev, start, length, fd, access_flags);
229 	if (IS_ERR(umem_dmabuf)) {
230 		err = PTR_ERR(umem_dmabuf);
231 		ibdev_dbg(ibdev, "Failed to get dmabuf umem, %pe\n",
232 			  umem_dmabuf);
233 		goto err_free;
234 	}
235 
236 	mr->umem = &umem_dmabuf->umem;
237 
238 	err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
239 	if (err) {
240 		ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
241 			  err);
242 		goto err_umem;
243 	}
244 
245 	mr_params.pd_handle = pd->pd_handle;
246 	mr_params.mr_type = GDMA_MR_TYPE_GVA;
247 	mr_params.gva.dma_region_handle = dma_region_handle;
248 	mr_params.gva.virtual_address = iova;
249 	mr_params.gva.access_flags =
250 		mana_ib_verbs_to_gdma_access_flags(access_flags);
251 
252 	err = mana_ib_gd_create_mr(dev, mr, &mr_params);
253 	if (err)
254 		goto err_dma_region;
255 
256 	/*
257 	 * There is no need to keep track of dma_region_handle after MR is
258 	 * successfully created. The dma_region_handle is tracked in the PF
259 	 * as part of the lifecycle of this MR.
260 	 */
261 
262 	return &mr->ibmr;
263 
264 err_dma_region:
265 	mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
266 
267 err_umem:
268 	ib_umem_release(mr->umem);
269 
270 err_free:
271 	kfree(mr);
272 	return ERR_PTR(err);
273 }
274 
275 struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags)
276 {
277 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
278 	struct gdma_create_mr_params mr_params = {};
279 	struct ib_device *ibdev = ibpd->device;
280 	struct mana_ib_dev *dev;
281 	struct mana_ib_mr *mr;
282 	int err;
283 
284 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
285 
286 	if (access_flags & ~VALID_DMA_MR_FLAGS)
287 		return ERR_PTR(-EINVAL);
288 
289 	mr = kzalloc_obj(*mr);
290 	if (!mr)
291 		return ERR_PTR(-ENOMEM);
292 
293 	mr_params.pd_handle = pd->pd_handle;
294 	mr_params.mr_type = GDMA_MR_TYPE_GPA;
295 
296 	err = mana_ib_gd_create_mr(dev, mr, &mr_params);
297 	if (err)
298 		goto err_free;
299 
300 	return &mr->ibmr;
301 
302 err_free:
303 	kfree(mr);
304 	return ERR_PTR(err);
305 }
306 
307 int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
308 {
309 	struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr);
310 	struct ib_device *ibdev = ibmr->device;
311 	struct mana_ib_dev *dev;
312 	int err;
313 
314 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
315 
316 	err = mana_ib_gd_destroy_mr(dev, mr->mr_handle);
317 	if (err)
318 		return err;
319 
320 	if (mr->umem)
321 		ib_umem_release(mr->umem);
322 
323 	kfree(mr);
324 
325 	return 0;
326 }
327 
328 static int mana_ib_gd_alloc_dm(struct mana_ib_dev *mdev, struct mana_ib_dm *dm,
329 			       struct ib_dm_alloc_attr *attr)
330 {
331 	struct gdma_context *gc = mdev_to_gc(mdev);
332 	struct gdma_alloc_dm_resp resp = {};
333 	struct gdma_alloc_dm_req req = {};
334 	int err;
335 
336 	mana_gd_init_req_hdr(&req.hdr, GDMA_ALLOC_DM, sizeof(req), sizeof(resp));
337 	req.length = attr->length;
338 	req.alignment = attr->alignment;
339 	req.flags =  attr->flags;
340 
341 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
342 	if (err || resp.hdr.status) {
343 		if (!err)
344 			err = -EPROTO;
345 
346 		return err;
347 	}
348 
349 	dm->dm_handle = resp.dm_handle;
350 
351 	return 0;
352 }
353 
354 struct ib_dm *mana_ib_alloc_dm(struct ib_device *ibdev,
355 			       struct ib_ucontext *context,
356 			       struct ib_dm_alloc_attr *attr,
357 			       struct uverbs_attr_bundle *attrs)
358 {
359 	struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
360 	struct mana_ib_dm *dm;
361 	int err;
362 
363 	dm = kzalloc_obj(*dm);
364 	if (!dm)
365 		return ERR_PTR(-ENOMEM);
366 
367 	err = mana_ib_gd_alloc_dm(dev, dm, attr);
368 	if (err)
369 		goto err_free;
370 
371 	return &dm->ibdm;
372 
373 err_free:
374 	kfree(dm);
375 	return ERR_PTR(err);
376 }
377 
378 static int mana_ib_gd_destroy_dm(struct mana_ib_dev *mdev, struct mana_ib_dm *dm)
379 {
380 	struct gdma_context *gc = mdev_to_gc(mdev);
381 	struct gdma_destroy_dm_resp resp = {};
382 	struct gdma_destroy_dm_req req = {};
383 	int err;
384 
385 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DM, sizeof(req), sizeof(resp));
386 	req.dm_handle = dm->dm_handle;
387 
388 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
389 	if (err || resp.hdr.status) {
390 		if (!err)
391 			err = -EPROTO;
392 
393 		return err;
394 	}
395 
396 	return 0;
397 }
398 
399 int mana_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
400 {
401 	struct mana_ib_dev *dev = container_of(ibdm->device, struct mana_ib_dev, ib_dev);
402 	struct mana_ib_dm *dm = container_of(ibdm, struct mana_ib_dm, ibdm);
403 	int err;
404 
405 	err = mana_ib_gd_destroy_dm(dev, dm);
406 	if (err)
407 		return err;
408 
409 	kfree(dm);
410 	return 0;
411 }
412 
413 struct ib_mr *mana_ib_reg_dm_mr(struct ib_pd *ibpd, struct ib_dm *ibdm,
414 				struct ib_dm_mr_attr *attr,
415 				struct uverbs_attr_bundle *attrs)
416 {
417 	struct mana_ib_dev *dev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
418 	struct mana_ib_dm *mana_dm = container_of(ibdm, struct mana_ib_dm, ibdm);
419 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
420 	struct gdma_create_mr_params mr_params = {};
421 	struct mana_ib_mr *mr;
422 	int err;
423 
424 	attr->access_flags &= ~IB_ACCESS_OPTIONAL;
425 	if (attr->access_flags & ~VALID_MR_FLAGS)
426 		return ERR_PTR(-EOPNOTSUPP);
427 
428 	mr = kzalloc_obj(*mr);
429 	if (!mr)
430 		return ERR_PTR(-ENOMEM);
431 
432 	mr_params.pd_handle = pd->pd_handle;
433 	mr_params.mr_type = GDMA_MR_TYPE_DM;
434 	mr_params.da.dm_handle = mana_dm->dm_handle;
435 	mr_params.da.offset = attr->offset;
436 	mr_params.da.length = attr->length;
437 	mr_params.da.access_flags =
438 		mana_ib_verbs_to_gdma_access_flags(attr->access_flags);
439 
440 	err = mana_ib_gd_create_mr(dev, mr, &mr_params);
441 	if (err)
442 		goto err_free;
443 
444 	return &mr->ibmr;
445 
446 err_free:
447 	kfree(mr);
448 	return ERR_PTR(err);
449 }
450