1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
5
6 #include "mana_ib.h"
7
8 #define VALID_MR_FLAGS (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |\
9 IB_ACCESS_REMOTE_ATOMIC | IB_ZERO_BASED)
10
11 #define VALID_DMA_MR_FLAGS (IB_ACCESS_LOCAL_WRITE)
12
13 static enum gdma_mr_access_flags
mana_ib_verbs_to_gdma_access_flags(int access_flags)14 mana_ib_verbs_to_gdma_access_flags(int access_flags)
15 {
16 enum gdma_mr_access_flags flags = GDMA_ACCESS_FLAG_LOCAL_READ;
17
18 if (access_flags & IB_ACCESS_LOCAL_WRITE)
19 flags |= GDMA_ACCESS_FLAG_LOCAL_WRITE;
20
21 if (access_flags & IB_ACCESS_REMOTE_WRITE)
22 flags |= GDMA_ACCESS_FLAG_REMOTE_WRITE;
23
24 if (access_flags & IB_ACCESS_REMOTE_READ)
25 flags |= GDMA_ACCESS_FLAG_REMOTE_READ;
26
27 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
28 flags |= GDMA_ACCESS_FLAG_REMOTE_ATOMIC;
29
30 return flags;
31 }
32
mana_ib_gd_create_mr(struct mana_ib_dev * dev,struct mana_ib_mr * mr,struct gdma_create_mr_params * mr_params)33 static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
34 struct gdma_create_mr_params *mr_params)
35 {
36 struct gdma_create_mr_response resp = {};
37 struct gdma_create_mr_request req = {};
38 struct gdma_context *gc = mdev_to_gc(dev);
39 int err;
40
41 mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
42 sizeof(resp));
43 req.pd_handle = mr_params->pd_handle;
44 req.mr_type = mr_params->mr_type;
45
46 switch (mr_params->mr_type) {
47 case GDMA_MR_TYPE_GPA:
48 break;
49 case GDMA_MR_TYPE_GVA:
50 req.gva.dma_region_handle = mr_params->gva.dma_region_handle;
51 req.gva.virtual_address = mr_params->gva.virtual_address;
52 req.gva.access_flags = mr_params->gva.access_flags;
53 break;
54 case GDMA_MR_TYPE_ZBVA:
55 req.zbva.dma_region_handle = mr_params->zbva.dma_region_handle;
56 req.zbva.access_flags = mr_params->zbva.access_flags;
57 break;
58 default:
59 ibdev_dbg(&dev->ib_dev,
60 "invalid param (GDMA_MR_TYPE) passed, type %d\n",
61 req.mr_type);
62 return -EINVAL;
63 }
64
65 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
66
67 if (err || resp.hdr.status) {
68 ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err,
69 resp.hdr.status);
70 if (!err)
71 err = -EPROTO;
72
73 return err;
74 }
75
76 mr->ibmr.lkey = resp.lkey;
77 mr->ibmr.rkey = resp.rkey;
78 mr->mr_handle = resp.mr_handle;
79
80 return 0;
81 }
82
mana_ib_gd_destroy_mr(struct mana_ib_dev * dev,u64 mr_handle)83 static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
84 {
85 struct gdma_destroy_mr_response resp = {};
86 struct gdma_destroy_mr_request req = {};
87 struct gdma_context *gc = mdev_to_gc(dev);
88 int err;
89
90 mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req),
91 sizeof(resp));
92
93 req.mr_handle = mr_handle;
94
95 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
96 if (err || resp.hdr.status) {
97 dev_err(gc->dev, "Failed to destroy MR: %d, 0x%x\n", err,
98 resp.hdr.status);
99 if (!err)
100 err = -EPROTO;
101 return err;
102 }
103
104 return 0;
105 }
106
mana_ib_reg_user_mr(struct ib_pd * ibpd,u64 start,u64 length,u64 iova,int access_flags,struct ib_udata * udata)107 struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
108 u64 iova, int access_flags,
109 struct ib_udata *udata)
110 {
111 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
112 struct gdma_create_mr_params mr_params = {};
113 struct ib_device *ibdev = ibpd->device;
114 struct mana_ib_dev *dev;
115 struct mana_ib_mr *mr;
116 u64 dma_region_handle;
117 int err;
118
119 dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
120
121 ibdev_dbg(ibdev,
122 "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
123 start, iova, length, access_flags);
124
125 access_flags &= ~IB_ACCESS_OPTIONAL;
126 if (access_flags & ~VALID_MR_FLAGS)
127 return ERR_PTR(-EINVAL);
128
129 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
130 if (!mr)
131 return ERR_PTR(-ENOMEM);
132
133 mr->umem = ib_umem_get(ibdev, start, length, access_flags);
134 if (IS_ERR(mr->umem)) {
135 err = PTR_ERR(mr->umem);
136 ibdev_dbg(ibdev,
137 "Failed to get umem for register user-mr, %d\n", err);
138 goto err_free;
139 }
140
141 err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
142 if (err) {
143 ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
144 err);
145 goto err_umem;
146 }
147
148 ibdev_dbg(ibdev,
149 "created dma region for user-mr 0x%llx\n",
150 dma_region_handle);
151
152 mr_params.pd_handle = pd->pd_handle;
153 if (access_flags & IB_ZERO_BASED) {
154 mr_params.mr_type = GDMA_MR_TYPE_ZBVA;
155 mr_params.zbva.dma_region_handle = dma_region_handle;
156 mr_params.zbva.access_flags =
157 mana_ib_verbs_to_gdma_access_flags(access_flags);
158 } else {
159 mr_params.mr_type = GDMA_MR_TYPE_GVA;
160 mr_params.gva.dma_region_handle = dma_region_handle;
161 mr_params.gva.virtual_address = iova;
162 mr_params.gva.access_flags =
163 mana_ib_verbs_to_gdma_access_flags(access_flags);
164 }
165
166 err = mana_ib_gd_create_mr(dev, mr, &mr_params);
167 if (err)
168 goto err_dma_region;
169
170 /*
171 * There is no need to keep track of dma_region_handle after MR is
172 * successfully created. The dma_region_handle is tracked in the PF
173 * as part of the lifecycle of this MR.
174 */
175
176 return &mr->ibmr;
177
178 err_dma_region:
179 mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
180
181 err_umem:
182 ib_umem_release(mr->umem);
183
184 err_free:
185 kfree(mr);
186 return ERR_PTR(err);
187 }
188
mana_ib_reg_user_mr_dmabuf(struct ib_pd * ibpd,u64 start,u64 length,u64 iova,int fd,int access_flags,struct uverbs_attr_bundle * attrs)189 struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
190 u64 iova, int fd, int access_flags,
191 struct uverbs_attr_bundle *attrs)
192 {
193 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
194 struct gdma_create_mr_params mr_params = {};
195 struct ib_device *ibdev = ibpd->device;
196 struct ib_umem_dmabuf *umem_dmabuf;
197 struct mana_ib_dev *dev;
198 struct mana_ib_mr *mr;
199 u64 dma_region_handle;
200 int err;
201
202 dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
203
204 access_flags &= ~IB_ACCESS_OPTIONAL;
205 if (access_flags & ~VALID_MR_FLAGS)
206 return ERR_PTR(-EOPNOTSUPP);
207
208 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
209 if (!mr)
210 return ERR_PTR(-ENOMEM);
211
212 umem_dmabuf = ib_umem_dmabuf_get_pinned(ibdev, start, length, fd, access_flags);
213 if (IS_ERR(umem_dmabuf)) {
214 err = PTR_ERR(umem_dmabuf);
215 ibdev_dbg(ibdev, "Failed to get dmabuf umem, %d\n", err);
216 goto err_free;
217 }
218
219 mr->umem = &umem_dmabuf->umem;
220
221 err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
222 if (err) {
223 ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
224 err);
225 goto err_umem;
226 }
227
228 mr_params.pd_handle = pd->pd_handle;
229 mr_params.mr_type = GDMA_MR_TYPE_GVA;
230 mr_params.gva.dma_region_handle = dma_region_handle;
231 mr_params.gva.virtual_address = iova;
232 mr_params.gva.access_flags =
233 mana_ib_verbs_to_gdma_access_flags(access_flags);
234
235 err = mana_ib_gd_create_mr(dev, mr, &mr_params);
236 if (err)
237 goto err_dma_region;
238
239 /*
240 * There is no need to keep track of dma_region_handle after MR is
241 * successfully created. The dma_region_handle is tracked in the PF
242 * as part of the lifecycle of this MR.
243 */
244
245 return &mr->ibmr;
246
247 err_dma_region:
248 mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
249
250 err_umem:
251 ib_umem_release(mr->umem);
252
253 err_free:
254 kfree(mr);
255 return ERR_PTR(err);
256 }
257
mana_ib_get_dma_mr(struct ib_pd * ibpd,int access_flags)258 struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags)
259 {
260 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
261 struct gdma_create_mr_params mr_params = {};
262 struct ib_device *ibdev = ibpd->device;
263 struct mana_ib_dev *dev;
264 struct mana_ib_mr *mr;
265 int err;
266
267 dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
268
269 if (access_flags & ~VALID_DMA_MR_FLAGS)
270 return ERR_PTR(-EINVAL);
271
272 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
273 if (!mr)
274 return ERR_PTR(-ENOMEM);
275
276 mr_params.pd_handle = pd->pd_handle;
277 mr_params.mr_type = GDMA_MR_TYPE_GPA;
278
279 err = mana_ib_gd_create_mr(dev, mr, &mr_params);
280 if (err)
281 goto err_free;
282
283 return &mr->ibmr;
284
285 err_free:
286 kfree(mr);
287 return ERR_PTR(err);
288 }
289
mana_ib_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)290 int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
291 {
292 struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr);
293 struct ib_device *ibdev = ibmr->device;
294 struct mana_ib_dev *dev;
295 int err;
296
297 dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
298
299 err = mana_ib_gd_destroy_mr(dev, mr->mr_handle);
300 if (err)
301 return err;
302
303 if (mr->umem)
304 ib_umem_release(mr->umem);
305
306 kfree(mr);
307
308 return 0;
309 }
310