xref: /linux/drivers/infiniband/hw/mana/mr.c (revision 6093a688a07da07808f0122f9aa2a3eed250d853)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4  */
5 
6 #include "mana_ib.h"
7 
8 #define VALID_MR_FLAGS (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |\
9 			IB_ACCESS_REMOTE_ATOMIC | IB_ZERO_BASED)
10 
11 #define VALID_DMA_MR_FLAGS (IB_ACCESS_LOCAL_WRITE)
12 
13 static enum gdma_mr_access_flags
14 mana_ib_verbs_to_gdma_access_flags(int access_flags)
15 {
16 	enum gdma_mr_access_flags flags = GDMA_ACCESS_FLAG_LOCAL_READ;
17 
18 	if (access_flags & IB_ACCESS_LOCAL_WRITE)
19 		flags |= GDMA_ACCESS_FLAG_LOCAL_WRITE;
20 
21 	if (access_flags & IB_ACCESS_REMOTE_WRITE)
22 		flags |= GDMA_ACCESS_FLAG_REMOTE_WRITE;
23 
24 	if (access_flags & IB_ACCESS_REMOTE_READ)
25 		flags |= GDMA_ACCESS_FLAG_REMOTE_READ;
26 
27 	if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
28 		flags |= GDMA_ACCESS_FLAG_REMOTE_ATOMIC;
29 
30 	return flags;
31 }
32 
33 static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
34 				struct gdma_create_mr_params *mr_params)
35 {
36 	struct gdma_create_mr_response resp = {};
37 	struct gdma_create_mr_request req = {};
38 	struct gdma_context *gc = mdev_to_gc(dev);
39 	int err;
40 
41 	mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
42 			     sizeof(resp));
43 	req.pd_handle = mr_params->pd_handle;
44 	req.mr_type = mr_params->mr_type;
45 
46 	switch (mr_params->mr_type) {
47 	case GDMA_MR_TYPE_GPA:
48 		break;
49 	case GDMA_MR_TYPE_GVA:
50 		req.gva.dma_region_handle = mr_params->gva.dma_region_handle;
51 		req.gva.virtual_address = mr_params->gva.virtual_address;
52 		req.gva.access_flags = mr_params->gva.access_flags;
53 		break;
54 	case GDMA_MR_TYPE_ZBVA:
55 		req.zbva.dma_region_handle = mr_params->zbva.dma_region_handle;
56 		req.zbva.access_flags = mr_params->zbva.access_flags;
57 		break;
58 	default:
59 		ibdev_dbg(&dev->ib_dev,
60 			  "invalid param (GDMA_MR_TYPE) passed, type %d\n",
61 			  req.mr_type);
62 		return -EINVAL;
63 	}
64 
65 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
66 
67 	if (err || resp.hdr.status) {
68 		ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err,
69 			  resp.hdr.status);
70 		if (!err)
71 			err = -EPROTO;
72 
73 		return err;
74 	}
75 
76 	mr->ibmr.lkey = resp.lkey;
77 	mr->ibmr.rkey = resp.rkey;
78 	mr->mr_handle = resp.mr_handle;
79 
80 	return 0;
81 }
82 
83 static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
84 {
85 	struct gdma_destroy_mr_response resp = {};
86 	struct gdma_destroy_mr_request req = {};
87 	struct gdma_context *gc = mdev_to_gc(dev);
88 	int err;
89 
90 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req),
91 			     sizeof(resp));
92 
93 	req.mr_handle = mr_handle;
94 
95 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
96 	if (err || resp.hdr.status) {
97 		dev_err(gc->dev, "Failed to destroy MR: %d, 0x%x\n", err,
98 			resp.hdr.status);
99 		if (!err)
100 			err = -EPROTO;
101 		return err;
102 	}
103 
104 	return 0;
105 }
106 
107 struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
108 				  u64 iova, int access_flags,
109 				  struct ib_dmah *dmah,
110 				  struct ib_udata *udata)
111 {
112 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
113 	struct gdma_create_mr_params mr_params = {};
114 	struct ib_device *ibdev = ibpd->device;
115 	struct mana_ib_dev *dev;
116 	struct mana_ib_mr *mr;
117 	u64 dma_region_handle;
118 	int err;
119 
120 	if (dmah)
121 		return ERR_PTR(-EOPNOTSUPP);
122 
123 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
124 
125 	ibdev_dbg(ibdev,
126 		  "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
127 		  start, iova, length, access_flags);
128 
129 	access_flags &= ~IB_ACCESS_OPTIONAL;
130 	if (access_flags & ~VALID_MR_FLAGS)
131 		return ERR_PTR(-EINVAL);
132 
133 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
134 	if (!mr)
135 		return ERR_PTR(-ENOMEM);
136 
137 	mr->umem = ib_umem_get(ibdev, start, length, access_flags);
138 	if (IS_ERR(mr->umem)) {
139 		err = PTR_ERR(mr->umem);
140 		ibdev_dbg(ibdev,
141 			  "Failed to get umem for register user-mr, %pe\n",
142 			  mr->umem);
143 		goto err_free;
144 	}
145 
146 	err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
147 	if (err) {
148 		ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
149 			  err);
150 		goto err_umem;
151 	}
152 
153 	ibdev_dbg(ibdev,
154 		  "created dma region for user-mr 0x%llx\n",
155 		  dma_region_handle);
156 
157 	mr_params.pd_handle = pd->pd_handle;
158 	if (access_flags & IB_ZERO_BASED) {
159 		mr_params.mr_type = GDMA_MR_TYPE_ZBVA;
160 		mr_params.zbva.dma_region_handle = dma_region_handle;
161 		mr_params.zbva.access_flags =
162 			mana_ib_verbs_to_gdma_access_flags(access_flags);
163 	} else {
164 		mr_params.mr_type = GDMA_MR_TYPE_GVA;
165 		mr_params.gva.dma_region_handle = dma_region_handle;
166 		mr_params.gva.virtual_address = iova;
167 		mr_params.gva.access_flags =
168 			mana_ib_verbs_to_gdma_access_flags(access_flags);
169 	}
170 
171 	err = mana_ib_gd_create_mr(dev, mr, &mr_params);
172 	if (err)
173 		goto err_dma_region;
174 
175 	/*
176 	 * There is no need to keep track of dma_region_handle after MR is
177 	 * successfully created. The dma_region_handle is tracked in the PF
178 	 * as part of the lifecycle of this MR.
179 	 */
180 
181 	return &mr->ibmr;
182 
183 err_dma_region:
184 	mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
185 
186 err_umem:
187 	ib_umem_release(mr->umem);
188 
189 err_free:
190 	kfree(mr);
191 	return ERR_PTR(err);
192 }
193 
194 struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
195 					 u64 iova, int fd, int access_flags,
196 					 struct ib_dmah *dmah,
197 					 struct uverbs_attr_bundle *attrs)
198 {
199 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
200 	struct gdma_create_mr_params mr_params = {};
201 	struct ib_device *ibdev = ibpd->device;
202 	struct ib_umem_dmabuf *umem_dmabuf;
203 	struct mana_ib_dev *dev;
204 	struct mana_ib_mr *mr;
205 	u64 dma_region_handle;
206 	int err;
207 
208 	if (dmah)
209 		return ERR_PTR(-EOPNOTSUPP);
210 
211 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
212 
213 	access_flags &= ~IB_ACCESS_OPTIONAL;
214 	if (access_flags & ~VALID_MR_FLAGS)
215 		return ERR_PTR(-EOPNOTSUPP);
216 
217 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
218 	if (!mr)
219 		return ERR_PTR(-ENOMEM);
220 
221 	umem_dmabuf = ib_umem_dmabuf_get_pinned(ibdev, start, length, fd, access_flags);
222 	if (IS_ERR(umem_dmabuf)) {
223 		err = PTR_ERR(umem_dmabuf);
224 		ibdev_dbg(ibdev, "Failed to get dmabuf umem, %pe\n",
225 			  umem_dmabuf);
226 		goto err_free;
227 	}
228 
229 	mr->umem = &umem_dmabuf->umem;
230 
231 	err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
232 	if (err) {
233 		ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
234 			  err);
235 		goto err_umem;
236 	}
237 
238 	mr_params.pd_handle = pd->pd_handle;
239 	mr_params.mr_type = GDMA_MR_TYPE_GVA;
240 	mr_params.gva.dma_region_handle = dma_region_handle;
241 	mr_params.gva.virtual_address = iova;
242 	mr_params.gva.access_flags =
243 		mana_ib_verbs_to_gdma_access_flags(access_flags);
244 
245 	err = mana_ib_gd_create_mr(dev, mr, &mr_params);
246 	if (err)
247 		goto err_dma_region;
248 
249 	/*
250 	 * There is no need to keep track of dma_region_handle after MR is
251 	 * successfully created. The dma_region_handle is tracked in the PF
252 	 * as part of the lifecycle of this MR.
253 	 */
254 
255 	return &mr->ibmr;
256 
257 err_dma_region:
258 	mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
259 
260 err_umem:
261 	ib_umem_release(mr->umem);
262 
263 err_free:
264 	kfree(mr);
265 	return ERR_PTR(err);
266 }
267 
268 struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags)
269 {
270 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
271 	struct gdma_create_mr_params mr_params = {};
272 	struct ib_device *ibdev = ibpd->device;
273 	struct mana_ib_dev *dev;
274 	struct mana_ib_mr *mr;
275 	int err;
276 
277 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
278 
279 	if (access_flags & ~VALID_DMA_MR_FLAGS)
280 		return ERR_PTR(-EINVAL);
281 
282 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
283 	if (!mr)
284 		return ERR_PTR(-ENOMEM);
285 
286 	mr_params.pd_handle = pd->pd_handle;
287 	mr_params.mr_type = GDMA_MR_TYPE_GPA;
288 
289 	err = mana_ib_gd_create_mr(dev, mr, &mr_params);
290 	if (err)
291 		goto err_free;
292 
293 	return &mr->ibmr;
294 
295 err_free:
296 	kfree(mr);
297 	return ERR_PTR(err);
298 }
299 
300 int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
301 {
302 	struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr);
303 	struct ib_device *ibdev = ibmr->device;
304 	struct mana_ib_dev *dev;
305 	int err;
306 
307 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
308 
309 	err = mana_ib_gd_destroy_mr(dev, mr->mr_handle);
310 	if (err)
311 		return err;
312 
313 	if (mr->umem)
314 		ib_umem_release(mr->umem);
315 
316 	kfree(mr);
317 
318 	return 0;
319 }
320