1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2021, Mellanox Technologies inc. All rights reserved.
4 */
5
6 #include <rdma/uverbs_std_types.h>
7 #include "dm.h"
8
9 #define UVERBS_MODULE_NAME mlx5_ib
10 #include <rdma/uverbs_named_ioctl.h>
11
mlx5_cmd_alloc_memic(struct mlx5_dm * dm,phys_addr_t * addr,u64 length,u32 alignment)12 static int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
13 u64 length, u32 alignment)
14 {
15 struct mlx5_core_dev *dev = dm->dev;
16 u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
17 >> PAGE_SHIFT;
18 u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
19 u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment);
20 u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
21 u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {};
22 u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {};
23 u32 mlx5_alignment;
24 u64 page_idx = 0;
25 int ret = 0;
26
27 if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK))
28 return -EINVAL;
29
30 /* mlx5 device sets alignment as 64*2^driver_value
31 * so normalizing is needed.
32 */
33 mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 :
34 alignment - MLX5_MEMIC_BASE_ALIGN;
35 if (mlx5_alignment > max_alignment)
36 return -EINVAL;
37
38 MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC);
39 MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
40 MLX5_SET(alloc_memic_in, in, memic_size, length);
41 MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment,
42 mlx5_alignment);
43
44 while (page_idx < num_memic_hw_pages) {
45 spin_lock(&dm->lock);
46 page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
47 num_memic_hw_pages,
48 page_idx,
49 num_pages, 0);
50
51 if (page_idx < num_memic_hw_pages)
52 bitmap_set(dm->memic_alloc_pages,
53 page_idx, num_pages);
54
55 spin_unlock(&dm->lock);
56
57 if (page_idx >= num_memic_hw_pages)
58 break;
59
60 MLX5_SET64(alloc_memic_in, in, range_start_addr,
61 hw_start_addr + (page_idx * PAGE_SIZE));
62
63 ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
64 if (ret) {
65 spin_lock(&dm->lock);
66 bitmap_clear(dm->memic_alloc_pages,
67 page_idx, num_pages);
68 spin_unlock(&dm->lock);
69
70 if (ret == -EAGAIN) {
71 page_idx++;
72 continue;
73 }
74
75 return ret;
76 }
77
78 *addr = dev->bar_addr +
79 MLX5_GET64(alloc_memic_out, out, memic_start_addr);
80
81 return 0;
82 }
83
84 return -ENOMEM;
85 }
86
mlx5_cmd_dealloc_memic(struct mlx5_dm * dm,phys_addr_t addr,u64 length)87 void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr,
88 u64 length)
89 {
90 struct mlx5_core_dev *dev = dm->dev;
91 u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
92 u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
93 u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
94 u64 start_page_idx;
95 int err;
96
97 addr -= dev->bar_addr;
98 start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
99
100 MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
101 MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
102 MLX5_SET(dealloc_memic_in, in, memic_size, length);
103
104 err = mlx5_cmd_exec_in(dev, dealloc_memic, in);
105 if (err)
106 return;
107
108 spin_lock(&dm->lock);
109 bitmap_clear(dm->memic_alloc_pages,
110 start_page_idx, num_pages);
111 spin_unlock(&dm->lock);
112 }
113
mlx5_cmd_dealloc_memic_op(struct mlx5_dm * dm,phys_addr_t addr,u8 operation)114 void mlx5_cmd_dealloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
115 u8 operation)
116 {
117 u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {};
118 struct mlx5_core_dev *dev = dm->dev;
119
120 MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC);
121 MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC);
122 MLX5_SET(modify_memic_in, in, memic_operation_type, operation);
123 MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr);
124
125 mlx5_cmd_exec_in(dev, modify_memic, in);
126 }
127
mlx5_cmd_alloc_memic_op(struct mlx5_dm * dm,phys_addr_t addr,u8 operation,phys_addr_t * op_addr)128 static int mlx5_cmd_alloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
129 u8 operation, phys_addr_t *op_addr)
130 {
131 u32 out[MLX5_ST_SZ_DW(modify_memic_out)] = {};
132 u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {};
133 struct mlx5_core_dev *dev = dm->dev;
134 int err;
135
136 MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC);
137 MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_ALLOC);
138 MLX5_SET(modify_memic_in, in, memic_operation_type, operation);
139 MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr);
140
141 err = mlx5_cmd_exec_inout(dev, modify_memic, in, out);
142 if (err)
143 return err;
144
145 *op_addr = dev->bar_addr +
146 MLX5_GET64(modify_memic_out, out, memic_operation_addr);
147 return 0;
148 }
149
add_dm_mmap_entry(struct ib_ucontext * context,struct mlx5_user_mmap_entry * mentry,u8 mmap_flag,size_t size,u64 address)150 static int add_dm_mmap_entry(struct ib_ucontext *context,
151 struct mlx5_user_mmap_entry *mentry, u8 mmap_flag,
152 size_t size, u64 address)
153 {
154 mentry->mmap_flag = mmap_flag;
155 mentry->address = address;
156
157 return rdma_user_mmap_entry_insert_range(
158 context, &mentry->rdma_entry, size,
159 MLX5_IB_MMAP_DEVICE_MEM << 16,
160 (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1);
161 }
162
mlx5_ib_dm_memic_free(struct kref * kref)163 static void mlx5_ib_dm_memic_free(struct kref *kref)
164 {
165 struct mlx5_ib_dm_memic *dm =
166 container_of(kref, struct mlx5_ib_dm_memic, ref);
167 struct mlx5_ib_dev *dev = to_mdev(dm->base.ibdm.device);
168
169 mlx5_cmd_dealloc_memic(&dev->dm, dm->base.dev_addr, dm->base.size);
170 kfree(dm);
171 }
172
copy_op_to_user(struct mlx5_ib_dm_op_entry * op_entry,struct uverbs_attr_bundle * attrs)173 static int copy_op_to_user(struct mlx5_ib_dm_op_entry *op_entry,
174 struct uverbs_attr_bundle *attrs)
175 {
176 u64 start_offset;
177 u16 page_idx;
178 int err;
179
180 page_idx = op_entry->mentry.rdma_entry.start_pgoff & 0xFFFF;
181 start_offset = op_entry->op_addr & ~PAGE_MASK;
182 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
183 &page_idx, sizeof(page_idx));
184 if (err)
185 return err;
186
187 return uverbs_copy_to(attrs,
188 MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
189 &start_offset, sizeof(start_offset));
190 }
191
map_existing_op(struct mlx5_ib_dm_memic * dm,u8 op,struct uverbs_attr_bundle * attrs)192 static int map_existing_op(struct mlx5_ib_dm_memic *dm, u8 op,
193 struct uverbs_attr_bundle *attrs)
194 {
195 struct mlx5_ib_dm_op_entry *op_entry;
196
197 op_entry = xa_load(&dm->ops, op);
198 if (!op_entry)
199 return -ENOENT;
200
201 return copy_op_to_user(op_entry, attrs);
202 }
203
UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR)204 static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR)(
205 struct uverbs_attr_bundle *attrs)
206 {
207 struct ib_uobject *uobj = uverbs_attr_get_uobject(
208 attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE);
209 struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
210 struct ib_dm *ibdm = uobj->object;
211 struct mlx5_ib_dm_memic *dm = to_memic(ibdm);
212 struct mlx5_ib_dm_op_entry *op_entry;
213 int err;
214 u8 op;
215
216 err = uverbs_copy_from(&op, attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP);
217 if (err)
218 return err;
219
220 if (op >= BITS_PER_TYPE(u32))
221 return -EOPNOTSUPP;
222
223 if (!(MLX5_CAP_DEV_MEM(dev->mdev, memic_operations) & BIT(op)))
224 return -EOPNOTSUPP;
225
226 mutex_lock(&dm->ops_xa_lock);
227 err = map_existing_op(dm, op, attrs);
228 if (!err || err != -ENOENT)
229 goto err_unlock;
230
231 op_entry = kzalloc(sizeof(*op_entry), GFP_KERNEL);
232 if (!op_entry)
233 goto err_unlock;
234
235 err = mlx5_cmd_alloc_memic_op(&dev->dm, dm->base.dev_addr, op,
236 &op_entry->op_addr);
237 if (err) {
238 kfree(op_entry);
239 goto err_unlock;
240 }
241 op_entry->op = op;
242 op_entry->dm = dm;
243
244 err = add_dm_mmap_entry(uobj->context, &op_entry->mentry,
245 MLX5_IB_MMAP_TYPE_MEMIC_OP, dm->base.size,
246 op_entry->op_addr & PAGE_MASK);
247 if (err) {
248 mlx5_cmd_dealloc_memic_op(&dev->dm, dm->base.dev_addr, op);
249 kfree(op_entry);
250 goto err_unlock;
251 }
252 /* From this point, entry will be freed by mmap_free */
253 kref_get(&dm->ref);
254
255 err = copy_op_to_user(op_entry, attrs);
256 if (err)
257 goto err_remove;
258
259 err = xa_insert(&dm->ops, op, op_entry, GFP_KERNEL);
260 if (err)
261 goto err_remove;
262 mutex_unlock(&dm->ops_xa_lock);
263
264 return 0;
265
266 err_remove:
267 rdma_user_mmap_entry_remove(&op_entry->mentry.rdma_entry);
268 err_unlock:
269 mutex_unlock(&dm->ops_xa_lock);
270
271 return err;
272 }
273
handle_alloc_dm_memic(struct ib_ucontext * ctx,struct ib_dm_alloc_attr * attr,struct uverbs_attr_bundle * attrs)274 static struct ib_dm *handle_alloc_dm_memic(struct ib_ucontext *ctx,
275 struct ib_dm_alloc_attr *attr,
276 struct uverbs_attr_bundle *attrs)
277 {
278 struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
279 struct mlx5_ib_dm_memic *dm;
280 u64 start_offset;
281 u16 page_idx;
282 int err;
283 u64 address;
284
285 if (!MLX5_CAP_DEV_MEM(dm_db->dev, memic))
286 return ERR_PTR(-EOPNOTSUPP);
287
288 dm = kzalloc(sizeof(*dm), GFP_KERNEL);
289 if (!dm)
290 return ERR_PTR(-ENOMEM);
291
292 dm->base.type = MLX5_IB_UAPI_DM_TYPE_MEMIC;
293 dm->base.size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
294 dm->base.ibdm.device = ctx->device;
295
296 kref_init(&dm->ref);
297 xa_init(&dm->ops);
298 mutex_init(&dm->ops_xa_lock);
299 dm->req_length = attr->length;
300
301 err = mlx5_cmd_alloc_memic(dm_db, &dm->base.dev_addr,
302 dm->base.size, attr->alignment);
303 if (err) {
304 kfree(dm);
305 return ERR_PTR(err);
306 }
307
308 address = dm->base.dev_addr & PAGE_MASK;
309 err = add_dm_mmap_entry(ctx, &dm->mentry, MLX5_IB_MMAP_TYPE_MEMIC,
310 dm->base.size, address);
311 if (err) {
312 mlx5_cmd_dealloc_memic(dm_db, dm->base.dev_addr, dm->base.size);
313 kfree(dm);
314 return ERR_PTR(err);
315 }
316
317 page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF;
318 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
319 &page_idx, sizeof(page_idx));
320 if (err)
321 goto err_copy;
322
323 start_offset = dm->base.dev_addr & ~PAGE_MASK;
324 err = uverbs_copy_to(attrs,
325 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
326 &start_offset, sizeof(start_offset));
327 if (err)
328 goto err_copy;
329
330 return &dm->base.ibdm;
331
332 err_copy:
333 rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
334 return ERR_PTR(err);
335 }
336
get_icm_type(int uapi_type)337 static enum mlx5_sw_icm_type get_icm_type(int uapi_type)
338 {
339 switch (uapi_type) {
340 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
341 return MLX5_SW_ICM_TYPE_HEADER_MODIFY;
342 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
343 return MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN;
344 case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM:
345 return MLX5_SW_ICM_TYPE_SW_ENCAP;
346 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
347 default:
348 return MLX5_SW_ICM_TYPE_STEERING;
349 }
350 }
351
handle_alloc_dm_sw_icm(struct ib_ucontext * ctx,struct ib_dm_alloc_attr * attr,struct uverbs_attr_bundle * attrs,int type)352 static struct ib_dm *handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
353 struct ib_dm_alloc_attr *attr,
354 struct uverbs_attr_bundle *attrs,
355 int type)
356 {
357 struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
358 enum mlx5_sw_icm_type icm_type;
359 struct mlx5_ib_dm_icm *dm;
360 u64 act_size;
361 int err;
362
363 if (!capable(CAP_SYS_RAWIO) || !capable(CAP_NET_RAW))
364 return ERR_PTR(-EPERM);
365
366 switch (type) {
367 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
368 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
369 case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM:
370 if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner) ||
371 MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner) ||
372 MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) ||
373 MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2)))
374 return ERR_PTR(-EOPNOTSUPP);
375 break;
376 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
377 if (!MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) ||
378 !MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2))
379 return ERR_PTR(-EOPNOTSUPP);
380 break;
381 default:
382 return ERR_PTR(-EOPNOTSUPP);
383 }
384
385 dm = kzalloc(sizeof(*dm), GFP_KERNEL);
386 if (!dm)
387 return ERR_PTR(-ENOMEM);
388
389 dm->base.type = type;
390 dm->base.ibdm.device = ctx->device;
391
392 /* Allocation size must a multiple of the basic block size
393 * and a power of 2.
394 */
395 act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
396 act_size = roundup_pow_of_two(act_size);
397
398 dm->base.size = act_size;
399 icm_type = get_icm_type(type);
400
401 err = mlx5_dm_sw_icm_alloc(dev, icm_type, act_size, attr->alignment,
402 to_mucontext(ctx)->devx_uid,
403 &dm->base.dev_addr, &dm->obj_id);
404 if (err)
405 goto free;
406
407 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
408 &dm->base.dev_addr, sizeof(dm->base.dev_addr));
409 if (err) {
410 mlx5_dm_sw_icm_dealloc(dev, icm_type, dm->base.size,
411 to_mucontext(ctx)->devx_uid,
412 dm->base.dev_addr, dm->obj_id);
413 goto free;
414 }
415 return &dm->base.ibdm;
416 free:
417 kfree(dm);
418 return ERR_PTR(err);
419 }
420
mlx5_ib_alloc_dm(struct ib_device * ibdev,struct ib_ucontext * context,struct ib_dm_alloc_attr * attr,struct uverbs_attr_bundle * attrs)421 struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
422 struct ib_ucontext *context,
423 struct ib_dm_alloc_attr *attr,
424 struct uverbs_attr_bundle *attrs)
425 {
426 enum mlx5_ib_uapi_dm_type type;
427 int err;
428
429 err = uverbs_get_const_default(&type, attrs,
430 MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
431 MLX5_IB_UAPI_DM_TYPE_MEMIC);
432 if (err)
433 return ERR_PTR(err);
434
435 mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
436 type, attr->length, attr->alignment);
437
438 switch (type) {
439 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
440 return handle_alloc_dm_memic(context, attr, attrs);
441 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
442 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
443 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
444 case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM:
445 return handle_alloc_dm_sw_icm(context, attr, attrs, type);
446 default:
447 return ERR_PTR(-EOPNOTSUPP);
448 }
449 }
450
dm_memic_remove_ops(struct mlx5_ib_dm_memic * dm)451 static void dm_memic_remove_ops(struct mlx5_ib_dm_memic *dm)
452 {
453 struct mlx5_ib_dm_op_entry *entry;
454 unsigned long idx;
455
456 mutex_lock(&dm->ops_xa_lock);
457 xa_for_each(&dm->ops, idx, entry) {
458 xa_erase(&dm->ops, idx);
459 rdma_user_mmap_entry_remove(&entry->mentry.rdma_entry);
460 }
461 mutex_unlock(&dm->ops_xa_lock);
462 }
463
mlx5_dm_memic_dealloc(struct mlx5_ib_dm_memic * dm)464 static void mlx5_dm_memic_dealloc(struct mlx5_ib_dm_memic *dm)
465 {
466 dm_memic_remove_ops(dm);
467 rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
468 }
469
mlx5_dm_icm_dealloc(struct mlx5_ib_ucontext * ctx,struct mlx5_ib_dm_icm * dm)470 static int mlx5_dm_icm_dealloc(struct mlx5_ib_ucontext *ctx,
471 struct mlx5_ib_dm_icm *dm)
472 {
473 enum mlx5_sw_icm_type type = get_icm_type(dm->base.type);
474 struct mlx5_core_dev *dev = to_mdev(dm->base.ibdm.device)->mdev;
475 int err;
476
477 err = mlx5_dm_sw_icm_dealloc(dev, type, dm->base.size, ctx->devx_uid,
478 dm->base.dev_addr, dm->obj_id);
479 if (!err)
480 kfree(dm);
481 return 0;
482 }
483
mlx5_ib_dealloc_dm(struct ib_dm * ibdm,struct uverbs_attr_bundle * attrs)484 static int mlx5_ib_dealloc_dm(struct ib_dm *ibdm,
485 struct uverbs_attr_bundle *attrs)
486 {
487 struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
488 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
489 struct mlx5_ib_dm *dm = to_mdm(ibdm);
490
491 switch (dm->type) {
492 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
493 mlx5_dm_memic_dealloc(to_memic(ibdm));
494 return 0;
495 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
496 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
497 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
498 case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM:
499 return mlx5_dm_icm_dealloc(ctx, to_icm(ibdm));
500 default:
501 return -EOPNOTSUPP;
502 }
503 }
504
UVERBS_HANDLER(MLX5_IB_METHOD_DM_QUERY)505 static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_QUERY)(
506 struct uverbs_attr_bundle *attrs)
507 {
508 struct ib_dm *ibdm =
509 uverbs_attr_get_obj(attrs, MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE);
510 struct mlx5_ib_dm *dm = to_mdm(ibdm);
511 struct mlx5_ib_dm_memic *memic;
512 u64 start_offset;
513 u16 page_idx;
514 int err;
515
516 if (dm->type != MLX5_IB_UAPI_DM_TYPE_MEMIC)
517 return -EOPNOTSUPP;
518
519 memic = to_memic(ibdm);
520 page_idx = memic->mentry.rdma_entry.start_pgoff & 0xFFFF;
521 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
522 &page_idx, sizeof(page_idx));
523 if (err)
524 return err;
525
526 start_offset = memic->base.dev_addr & ~PAGE_MASK;
527 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
528 &start_offset, sizeof(start_offset));
529 if (err)
530 return err;
531
532 return uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
533 &memic->req_length,
534 sizeof(memic->req_length));
535 }
536
mlx5_ib_dm_mmap_free(struct mlx5_ib_dev * dev,struct mlx5_user_mmap_entry * mentry)537 void mlx5_ib_dm_mmap_free(struct mlx5_ib_dev *dev,
538 struct mlx5_user_mmap_entry *mentry)
539 {
540 struct mlx5_ib_dm_op_entry *op_entry;
541 struct mlx5_ib_dm_memic *mdm;
542
543 switch (mentry->mmap_flag) {
544 case MLX5_IB_MMAP_TYPE_MEMIC:
545 mdm = container_of(mentry, struct mlx5_ib_dm_memic, mentry);
546 kref_put(&mdm->ref, mlx5_ib_dm_memic_free);
547 break;
548 case MLX5_IB_MMAP_TYPE_MEMIC_OP:
549 op_entry = container_of(mentry, struct mlx5_ib_dm_op_entry,
550 mentry);
551 mdm = op_entry->dm;
552 mlx5_cmd_dealloc_memic_op(&dev->dm, mdm->base.dev_addr,
553 op_entry->op);
554 kfree(op_entry);
555 kref_put(&mdm->ref, mlx5_ib_dm_memic_free);
556 break;
557 default:
558 WARN_ON(true);
559 }
560 }
561
562 DECLARE_UVERBS_NAMED_METHOD(
563 MLX5_IB_METHOD_DM_QUERY,
564 UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE, UVERBS_OBJECT_DM,
565 UVERBS_ACCESS_READ, UA_MANDATORY),
566 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
567 UVERBS_ATTR_TYPE(u64), UA_MANDATORY),
568 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
569 UVERBS_ATTR_TYPE(u16), UA_MANDATORY),
570 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
571 UVERBS_ATTR_TYPE(u64), UA_MANDATORY));
572
573 ADD_UVERBS_ATTRIBUTES_SIMPLE(
574 mlx5_ib_dm, UVERBS_OBJECT_DM, UVERBS_METHOD_DM_ALLOC,
575 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
576 UVERBS_ATTR_TYPE(u64), UA_MANDATORY),
577 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
578 UVERBS_ATTR_TYPE(u16), UA_OPTIONAL),
579 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
580 enum mlx5_ib_uapi_dm_type, UA_OPTIONAL));
581
582 DECLARE_UVERBS_NAMED_METHOD(
583 MLX5_IB_METHOD_DM_MAP_OP_ADDR,
584 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE,
585 UVERBS_OBJECT_DM,
586 UVERBS_ACCESS_READ,
587 UA_MANDATORY),
588 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP,
589 UVERBS_ATTR_TYPE(u8),
590 UA_MANDATORY),
591 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
592 UVERBS_ATTR_TYPE(u64),
593 UA_MANDATORY),
594 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
595 UVERBS_ATTR_TYPE(u16),
596 UA_OPTIONAL));
597
598 DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DM,
599 &UVERBS_METHOD(MLX5_IB_METHOD_DM_MAP_OP_ADDR),
600 &UVERBS_METHOD(MLX5_IB_METHOD_DM_QUERY));
601
602 const struct uapi_definition mlx5_ib_dm_defs[] = {
603 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
604 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DM),
605 {},
606 };
607
608 const struct ib_device_ops mlx5_ib_dev_dm_ops = {
609 .alloc_dm = mlx5_ib_alloc_dm,
610 .dealloc_dm = mlx5_ib_dealloc_dm,
611 .reg_dm_mr = mlx5_ib_reg_dm_mr,
612 };
613