xref: /linux/drivers/infiniband/hw/mlx5/mr.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2020, Intel Corporation. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/kref.h>
35 #include <linux/random.h>
36 #include <linux/debugfs.h>
37 #include <linux/export.h>
38 #include <linux/delay.h>
39 #include <linux/dma-buf.h>
40 #include <linux/dma-resv.h>
41 #include <rdma/frmr_pools.h>
42 #include <rdma/ib_umem_odp.h>
43 #include "dm.h"
44 #include "mlx5_ib.h"
45 #include "umr.h"
46 #include "data_direct.h"
47 #include "dmah.h"
48 
49 static int mkey_max_umr_order(struct mlx5_ib_dev *dev)
50 {
51 	if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
52 		return MLX5_MAX_UMR_EXTENDED_SHIFT;
53 	return MLX5_MAX_UMR_SHIFT;
54 }
55 
56 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
57 				     u64 iova, int access_flags,
58 				     unsigned long page_size, bool populate,
59 				     int access_mode, u16 st_index, u8 ph);
60 static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr);
61 
62 static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
63 					  struct ib_pd *pd)
64 {
65 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
66 
67 	MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
68 	MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
69 	MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
70 	MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
71 	MLX5_SET(mkc, mkc, lr, 1);
72 
73 	if (acc & IB_ACCESS_RELAXED_ORDERING) {
74 		if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
75 			MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
76 
77 		if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
78 		    (MLX5_CAP_GEN(dev->mdev,
79 				  relaxed_ordering_read_pci_enabled) &&
80 		     pcie_relaxed_ordering_enabled(dev->mdev->pdev)))
81 			MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
82 	}
83 
84 	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
85 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
86 	MLX5_SET64(mkc, mkc, start_addr, start_addr);
87 }
88 
89 static void assign_mkey_variant(struct mlx5_ib_dev *dev, u32 *mkey, u32 *in)
90 {
91 	u8 key = atomic_inc_return(&dev->mkey_var);
92 	void *mkc;
93 
94 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
95 	MLX5_SET(mkc, mkc, mkey_7_0, key);
96 	*mkey = key;
97 }
98 
99 static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
100 			       struct mlx5_ib_mkey *mkey, u32 *in, int inlen)
101 {
102 	int ret;
103 
104 	assign_mkey_variant(dev, &mkey->key, in);
105 	ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen);
106 	if (!ret)
107 		init_waitqueue_head(&mkey->wait);
108 
109 	return ret;
110 }
111 
112 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
113 {
114 	WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
115 
116 	return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
117 }
118 
119 static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
120 {
121 	int ret = 0;
122 
123 	switch (access_mode) {
124 	case MLX5_MKC_ACCESS_MODE_MTT:
125 		ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD /
126 						   sizeof(struct mlx5_mtt));
127 		break;
128 	case MLX5_MKC_ACCESS_MODE_KSM:
129 		ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD /
130 						   sizeof(struct mlx5_klm));
131 		break;
132 	default:
133 		WARN_ON(1);
134 	}
135 	return ret;
136 }
137 
138 static int get_unchangeable_access_flags(struct mlx5_ib_dev *dev,
139 					 int access_flags)
140 {
141 	int ret = 0;
142 
143 	if ((access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
144 	    MLX5_CAP_GEN(dev->mdev, atomic) &&
145 	    MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
146 		ret |= IB_ACCESS_REMOTE_ATOMIC;
147 
148 	if ((access_flags & IB_ACCESS_RELAXED_ORDERING) &&
149 	    MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
150 	    !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
151 		ret |= IB_ACCESS_RELAXED_ORDERING;
152 
153 	if ((access_flags & IB_ACCESS_RELAXED_ORDERING) &&
154 	    (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
155 	     MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) &&
156 	    !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
157 		ret |= IB_ACCESS_RELAXED_ORDERING;
158 
159 	return ret;
160 }
161 
162 #define MLX5_FRMR_POOLS_KEY_ACCESS_MODE_KSM_MASK 1ULL
163 #define MLX5_FRMR_POOLS_KEY_VENDOR_KEY_SUPPORTED \
164 	MLX5_FRMR_POOLS_KEY_ACCESS_MODE_KSM_MASK
165 
166 #define MLX5_FRMR_POOLS_KERNEL_KEY_PH_SHIFT 16
167 #define MLX5_FRMR_POOLS_KERNEL_KEY_PH_MASK 0xFF0000
168 #define MLX5_FRMR_POOLS_KERNEL_KEY_ST_INDEX_MASK 0xFFFF
169 
170 static struct mlx5_ib_mr *
171 _mlx5_frmr_pool_alloc(struct mlx5_ib_dev *dev, struct ib_umem *umem,
172 		      int access_flags, int access_mode,
173 		      unsigned long page_size, u16 st_index, u8 ph)
174 {
175 	struct mlx5_ib_mr *mr;
176 	int err;
177 
178 	mr = kzalloc_obj(*mr);
179 	if (!mr)
180 		return ERR_PTR(-ENOMEM);
181 
182 	mr->ibmr.frmr.key.ats = mlx5_umem_needs_ats(dev, umem, access_flags);
183 	mr->ibmr.frmr.key.access_flags =
184 		get_unchangeable_access_flags(dev, access_flags);
185 	mr->ibmr.frmr.key.num_dma_blocks =
186 		ib_umem_num_dma_blocks(umem, page_size);
187 	mr->ibmr.frmr.key.vendor_key =
188 		access_mode == MLX5_MKC_ACCESS_MODE_KSM ?
189 			MLX5_FRMR_POOLS_KEY_ACCESS_MODE_KSM_MASK :
190 			0;
191 
192 	/* Normalize ph: swap 0 and MLX5_IB_NO_PH */
193 	if (ph == MLX5_IB_NO_PH || ph == 0)
194 		ph ^= MLX5_IB_NO_PH;
195 
196 	mr->ibmr.frmr.key.kernel_vendor_key =
197 		st_index | (ph << MLX5_FRMR_POOLS_KERNEL_KEY_PH_SHIFT);
198 	err = ib_frmr_pool_pop(&dev->ib_dev, &mr->ibmr);
199 	if (err) {
200 		kfree(mr);
201 		return ERR_PTR(err);
202 	}
203 	mr->mmkey.key = mr->ibmr.frmr.handle;
204 	init_waitqueue_head(&mr->mmkey.wait);
205 
206 	return mr;
207 }
208 
209 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
210 				       int access_flags, int access_mode,
211 				       int ndescs)
212 {
213 	struct ib_frmr_key key = {
214 		.access_flags =
215 			get_unchangeable_access_flags(dev, access_flags),
216 		.vendor_key = access_mode == MLX5_MKC_ACCESS_MODE_MTT ?
217 				      0 :
218 				      MLX5_FRMR_POOLS_KEY_ACCESS_MODE_KSM_MASK,
219 		.num_dma_blocks = ndescs,
220 		.kernel_vendor_key = 0, /* no PH and no ST index */
221 	};
222 	struct mlx5_ib_mr *mr;
223 	int ret;
224 
225 	mr = kzalloc_obj(*mr);
226 	if (!mr)
227 		return ERR_PTR(-ENOMEM);
228 
229 	init_waitqueue_head(&mr->mmkey.wait);
230 
231 	mr->ibmr.frmr.key = key;
232 	ret = ib_frmr_pool_pop(&dev->ib_dev, &mr->ibmr);
233 	if (ret) {
234 		kfree(mr);
235 		return ERR_PTR(ret);
236 	}
237 	mr->mmkey.key = mr->ibmr.frmr.handle;
238 	mr->mmkey.type = MLX5_MKEY_MR;
239 
240 	return mr;
241 }
242 
243 static int mlx5r_create_mkeys(struct ib_device *device, struct ib_frmr_key *key,
244 			      u32 *handles, unsigned int count)
245 {
246 	int access_mode =
247 		key->vendor_key & MLX5_FRMR_POOLS_KEY_ACCESS_MODE_KSM_MASK ?
248 			MLX5_MKC_ACCESS_MODE_KSM :
249 			MLX5_MKC_ACCESS_MODE_MTT;
250 
251 	struct mlx5_ib_dev *dev = to_mdev(device);
252 	size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
253 	u16 st_index;
254 	void *mkc;
255 	u32 *in;
256 	int err, i;
257 	u8 ph;
258 
259 	in = kzalloc(inlen, GFP_KERNEL);
260 	if (!in)
261 		return -ENOMEM;
262 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
263 
264 	set_mkc_access_pd_addr_fields(mkc, key->access_flags, 0, dev->umrc.pd);
265 	MLX5_SET(mkc, mkc, free, 1);
266 	MLX5_SET(mkc, mkc, umr_en, 1);
267 	MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
268 	MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
269 	MLX5_SET(mkc, mkc, ma_translation_mode, !!key->ats);
270 	MLX5_SET(mkc, mkc, translations_octword_size,
271 		 get_mkc_octo_size(access_mode, key->num_dma_blocks));
272 	MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
273 
274 	st_index = key->kernel_vendor_key &
275 		   MLX5_FRMR_POOLS_KERNEL_KEY_ST_INDEX_MASK;
276 	ph = key->kernel_vendor_key & MLX5_FRMR_POOLS_KERNEL_KEY_PH_MASK;
277 	if (ph) {
278 		/* Normalize ph: swap MLX5_IB_NO_PH for 0 */
279 		if (ph == MLX5_IB_NO_PH)
280 			ph = 0;
281 		MLX5_SET(mkc, mkc, pcie_tph_en, 1);
282 		MLX5_SET(mkc, mkc, pcie_tph_ph, ph);
283 		if (st_index != MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX)
284 			MLX5_SET(mkc, mkc, pcie_tph_steering_tag_index,
285 				 st_index);
286 	}
287 
288 	for (i = 0; i < count; i++) {
289 		assign_mkey_variant(dev, handles + i, in);
290 		err = mlx5_core_create_mkey(dev->mdev, handles + i, in, inlen);
291 		if (err)
292 			goto free_in;
293 	}
294 free_in:
295 	kfree(in);
296 	if (err)
297 		for (; i > 0; i--)
298 			mlx5_core_destroy_mkey(dev->mdev, handles[i]);
299 	return err;
300 }
301 
302 static void mlx5r_destroy_mkeys(struct ib_device *device, u32 *handles,
303 				unsigned int count)
304 {
305 	struct mlx5_ib_dev *dev = to_mdev(device);
306 	int i, err;
307 
308 	for (i = 0; i < count; i++) {
309 		err = mlx5_core_destroy_mkey(dev->mdev, handles[i]);
310 		if (err)
311 			pr_warn_ratelimited(
312 				"mlx5_ib: failed to destroy mkey %d: %d",
313 				handles[i], err);
314 	}
315 }
316 
317 static int mlx5r_build_frmr_key(struct ib_device *device,
318 				const struct ib_frmr_key *in,
319 				struct ib_frmr_key *out)
320 {
321 	struct mlx5_ib_dev *dev = to_mdev(device);
322 
323 	/* check HW capabilities of users requested frmr key */
324 	if ((in->ats && !MLX5_CAP_GEN(dev->mdev, ats)) ||
325 	    ilog2(in->num_dma_blocks) > mkey_max_umr_order(dev))
326 		return -EOPNOTSUPP;
327 
328 	if (in->vendor_key & ~MLX5_FRMR_POOLS_KEY_VENDOR_KEY_SUPPORTED)
329 		return -EOPNOTSUPP;
330 
331 	out->ats = in->ats;
332 	out->access_flags =
333 		get_unchangeable_access_flags(dev, in->access_flags);
334 	out->vendor_key = in->vendor_key;
335 	out->num_dma_blocks = in->num_dma_blocks;
336 
337 	return 0;
338 }
339 
340 static struct ib_frmr_pool_ops mlx5r_frmr_pool_ops = {
341 	.create_frmrs = mlx5r_create_mkeys,
342 	.destroy_frmrs = mlx5r_destroy_mkeys,
343 	.build_key = mlx5r_build_frmr_key,
344 };
345 
346 int mlx5r_frmr_pools_init(struct ib_device *device)
347 {
348 	struct mlx5_ib_dev *dev = to_mdev(device);
349 
350 	mutex_init(&dev->slow_path_mutex);
351 	return ib_frmr_pools_init(device, &mlx5r_frmr_pool_ops);
352 }
353 
354 void mlx5r_frmr_pools_cleanup(struct ib_device *device)
355 {
356 	ib_frmr_pools_cleanup(device);
357 }
358 
359 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
360 {
361 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
362 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
363 	struct mlx5_ib_mr *mr;
364 	void *mkc;
365 	u32 *in;
366 	int err;
367 
368 	mr = kzalloc_obj(*mr);
369 	if (!mr)
370 		return ERR_PTR(-ENOMEM);
371 
372 	in = kzalloc(inlen, GFP_KERNEL);
373 	if (!in) {
374 		err = -ENOMEM;
375 		goto err_free;
376 	}
377 
378 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
379 
380 	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
381 	MLX5_SET(mkc, mkc, length64, 1);
382 	set_mkc_access_pd_addr_fields(mkc, acc | IB_ACCESS_RELAXED_ORDERING, 0,
383 				      pd);
384 	MLX5_SET(mkc, mkc, ma_translation_mode, MLX5_CAP_GEN(dev->mdev, ats));
385 
386 	err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
387 	if (err)
388 		goto err_in;
389 
390 	kfree(in);
391 	mr->mmkey.type = MLX5_MKEY_MR;
392 	mr->ibmr.lkey = mr->mmkey.key;
393 	mr->ibmr.rkey = mr->mmkey.key;
394 	mr->umem = NULL;
395 
396 	return &mr->ibmr;
397 
398 err_in:
399 	kfree(in);
400 
401 err_free:
402 	kfree(mr);
403 
404 	return ERR_PTR(err);
405 }
406 
407 static int get_octo_len(u64 addr, u64 len, int page_shift)
408 {
409 	u64 page_size = 1ULL << page_shift;
410 	u64 offset;
411 	int npages;
412 
413 	offset = addr & (page_size - 1);
414 	npages = ALIGN(len + offset, page_size) >> page_shift;
415 	return (npages + 1) / 2;
416 }
417 
418 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
419 			  u64 length, int access_flags, u64 iova)
420 {
421 	mr->ibmr.lkey = mr->mmkey.key;
422 	mr->ibmr.rkey = mr->mmkey.key;
423 	mr->ibmr.length = length;
424 	mr->ibmr.device = &dev->ib_dev;
425 	mr->ibmr.iova = iova;
426 	mr->access_flags = access_flags;
427 }
428 
429 static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem,
430 						  u64 iova)
431 {
432 	/*
433 	 * The alignment of iova has already been checked upon entering
434 	 * UVERBS_METHOD_REG_DMABUF_MR
435 	 */
436 	umem->iova = iova;
437 	return PAGE_SIZE;
438 }
439 
440 static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
441 					     struct ib_umem *umem, u64 iova,
442 					     int access_flags, int access_mode,
443 					     u16 st_index, u8 ph)
444 {
445 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
446 	struct mlx5_ib_mr *mr;
447 	unsigned long page_size;
448 
449 	if (umem->is_dmabuf)
450 		page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
451 	else
452 		page_size = mlx5_umem_mkc_find_best_pgsz(dev, umem, iova,
453 							 access_mode);
454 	if (WARN_ON(!page_size))
455 		return ERR_PTR(-EINVAL);
456 
457 	mr = _mlx5_frmr_pool_alloc(dev, umem, access_flags, access_mode,
458 				   page_size, st_index, ph);
459 	if (IS_ERR(mr))
460 		return mr;
461 
462 	mr->mmkey.type = MLX5_MKEY_MR;
463 	mr->ibmr.pd = pd;
464 	mr->umem = umem;
465 	mr->page_shift = order_base_2(page_size);
466 	set_mr_fields(dev, mr, umem->length, access_flags, iova);
467 
468 	return mr;
469 }
470 
471 static struct ib_mr *
472 reg_create_crossing_vhca_mr(struct ib_pd *pd, u64 iova, u64 length, int access_flags,
473 			    u32 crossed_lkey)
474 {
475 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
476 	int access_mode = MLX5_MKC_ACCESS_MODE_CROSSING;
477 	struct mlx5_ib_mr *mr;
478 	void *mkc;
479 	int inlen;
480 	u32 *in;
481 	int err;
482 
483 	if (!MLX5_CAP_GEN(dev->mdev, crossing_vhca_mkey))
484 		return ERR_PTR(-EOPNOTSUPP);
485 
486 	mr = kzalloc_obj(*mr);
487 	if (!mr)
488 		return ERR_PTR(-ENOMEM);
489 
490 	inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
491 	in = kvzalloc(inlen, GFP_KERNEL);
492 	if (!in) {
493 		err = -ENOMEM;
494 		goto err_1;
495 	}
496 
497 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
498 	MLX5_SET(mkc, mkc, crossing_target_vhca_id,
499 		 MLX5_CAP_GEN(dev->mdev, vhca_id));
500 	MLX5_SET(mkc, mkc, translations_octword_size, crossed_lkey);
501 	MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
502 	MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
503 
504 	/* for this crossing mkey IOVA should be 0 and len should be IOVA + len */
505 	set_mkc_access_pd_addr_fields(mkc, access_flags, 0, pd);
506 	MLX5_SET64(mkc, mkc, len, iova + length);
507 
508 	MLX5_SET(mkc, mkc, free, 0);
509 	MLX5_SET(mkc, mkc, umr_en, 0);
510 	err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
511 	if (err)
512 		goto err_2;
513 
514 	mr->mmkey.type = MLX5_MKEY_MR;
515 	set_mr_fields(dev, mr, length, access_flags, iova);
516 	mr->ibmr.pd = pd;
517 	kvfree(in);
518 	mlx5_ib_dbg(dev, "crossing mkey = 0x%x\n", mr->mmkey.key);
519 
520 	return &mr->ibmr;
521 err_2:
522 	kvfree(in);
523 err_1:
524 	kfree(mr);
525 	return ERR_PTR(err);
526 }
527 
528 /*
529  * If ibmr is NULL it will be allocated by reg_create.
530  * Else, the given ibmr will be used.
531  */
532 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
533 				     u64 iova, int access_flags,
534 				     unsigned long page_size, bool populate,
535 				     int access_mode, u16 st_index, u8 ph)
536 {
537 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
538 	struct mlx5_ib_mr *mr;
539 	__be64 *pas;
540 	void *mkc;
541 	int inlen;
542 	u32 *in;
543 	int err;
544 	bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)) &&
545 		(access_mode == MLX5_MKC_ACCESS_MODE_MTT) &&
546 		(ph == MLX5_IB_NO_PH);
547 	bool ksm_mode = (access_mode == MLX5_MKC_ACCESS_MODE_KSM);
548 
549 	if (!page_size)
550 		return ERR_PTR(-EINVAL);
551 	mr = kzalloc_obj(*mr);
552 	if (!mr)
553 		return ERR_PTR(-ENOMEM);
554 
555 	mr->ibmr.pd = pd;
556 	mr->access_flags = access_flags;
557 	mr->page_shift = order_base_2(page_size);
558 
559 	inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
560 	if (populate)
561 		inlen += sizeof(*pas) *
562 			 roundup(ib_umem_num_dma_blocks(umem, page_size), 2);
563 	in = kvzalloc(inlen, GFP_KERNEL);
564 	if (!in) {
565 		err = -ENOMEM;
566 		goto err_1;
567 	}
568 	pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
569 	if (populate) {
570 		if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND || ksm_mode)) {
571 			err = -EINVAL;
572 			goto err_2;
573 		}
574 		mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas,
575 				     pg_cap ? MLX5_IB_MTT_PRESENT : 0);
576 	}
577 
578 	/* The pg_access bit allows setting the access flags
579 	 * in the page list submitted with the command.
580 	 */
581 	MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
582 
583 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
584 	set_mkc_access_pd_addr_fields(mkc, access_flags, iova,
585 				      populate ? pd : dev->umrc.pd);
586 	/* In case a data direct flow, overwrite the pdn field by its internal kernel PD */
587 	if (umem->is_dmabuf && ksm_mode)
588 		MLX5_SET(mkc, mkc, pd, dev->ddr.pdn);
589 
590 	MLX5_SET(mkc, mkc, free, !populate);
591 	MLX5_SET(mkc, mkc, access_mode_1_0, access_mode);
592 	MLX5_SET(mkc, mkc, umr_en, 1);
593 
594 	MLX5_SET64(mkc, mkc, len, umem->length);
595 	MLX5_SET(mkc, mkc, bsf_octword_size, 0);
596 	if (ksm_mode)
597 		MLX5_SET(mkc, mkc, translations_octword_size,
598 			 get_octo_len(iova, umem->length, mr->page_shift) * 2);
599 	else
600 		MLX5_SET(mkc, mkc, translations_octword_size,
601 			 get_octo_len(iova, umem->length, mr->page_shift));
602 	MLX5_SET(mkc, mkc, log_page_size, mr->page_shift);
603 	if (mlx5_umem_needs_ats(dev, umem, access_flags))
604 		MLX5_SET(mkc, mkc, ma_translation_mode, 1);
605 	if (populate) {
606 		MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
607 			 get_octo_len(iova, umem->length, mr->page_shift));
608 	}
609 
610 	if (ph != MLX5_IB_NO_PH) {
611 		MLX5_SET(mkc, mkc, pcie_tph_en, 1);
612 		MLX5_SET(mkc, mkc, pcie_tph_ph, ph);
613 		if (st_index != MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX)
614 			MLX5_SET(mkc, mkc, pcie_tph_steering_tag_index, st_index);
615 	}
616 
617 	err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
618 	if (err) {
619 		mlx5_ib_warn(dev, "create mkey failed\n");
620 		goto err_2;
621 	}
622 	mr->mmkey.type = MLX5_MKEY_MR;
623 	mr->mmkey.ndescs = get_octo_len(iova, umem->length, mr->page_shift);
624 	mr->umem = umem;
625 	set_mr_fields(dev, mr, umem->length, access_flags, iova);
626 	kvfree(in);
627 
628 	mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
629 
630 	return mr;
631 
632 err_2:
633 	kvfree(in);
634 err_1:
635 	kfree(mr);
636 	return ERR_PTR(err);
637 }
638 
639 static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
640 				       u64 length, int acc, int mode)
641 {
642 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
643 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
644 	struct mlx5_ib_mr *mr;
645 	void *mkc;
646 	u32 *in;
647 	int err;
648 
649 	mr = kzalloc_obj(*mr);
650 	if (!mr)
651 		return ERR_PTR(-ENOMEM);
652 
653 	in = kzalloc(inlen, GFP_KERNEL);
654 	if (!in) {
655 		err = -ENOMEM;
656 		goto err_free;
657 	}
658 
659 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
660 
661 	MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
662 	MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
663 	MLX5_SET64(mkc, mkc, len, length);
664 	set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd);
665 
666 	err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
667 	if (err)
668 		goto err_in;
669 
670 	kfree(in);
671 
672 	set_mr_fields(dev, mr, length, acc, start_addr);
673 
674 	return &mr->ibmr;
675 
676 err_in:
677 	kfree(in);
678 
679 err_free:
680 	kfree(mr);
681 
682 	return ERR_PTR(err);
683 }
684 
685 int mlx5_ib_advise_mr(struct ib_pd *pd,
686 		      enum ib_uverbs_advise_mr_advice advice,
687 		      u32 flags,
688 		      struct ib_sge *sg_list,
689 		      u32 num_sge,
690 		      struct uverbs_attr_bundle *attrs)
691 {
692 	if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH &&
693 	    advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
694 	    advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
695 		return -EOPNOTSUPP;
696 
697 	return mlx5_ib_advise_mr_prefetch(pd, advice, flags,
698 					 sg_list, num_sge);
699 }
700 
701 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
702 				struct ib_dm_mr_attr *attr,
703 				struct uverbs_attr_bundle *attrs)
704 {
705 	struct mlx5_ib_dm *mdm = to_mdm(dm);
706 	struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev;
707 	u64 start_addr = mdm->dev_addr + attr->offset;
708 	int mode;
709 
710 	switch (mdm->type) {
711 	case MLX5_IB_UAPI_DM_TYPE_MEMIC:
712 		if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS)
713 			return ERR_PTR(-EINVAL);
714 
715 		mode = MLX5_MKC_ACCESS_MODE_MEMIC;
716 		start_addr -= pci_resource_start(dev->pdev, 0);
717 		break;
718 	case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
719 	case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
720 	case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
721 	case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM:
722 		if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS)
723 			return ERR_PTR(-EINVAL);
724 
725 		mode = MLX5_MKC_ACCESS_MODE_SW_ICM;
726 		break;
727 	default:
728 		return ERR_PTR(-EINVAL);
729 	}
730 
731 	return mlx5_ib_get_dm_mr(pd, start_addr, attr->length,
732 				 attr->access_flags, mode);
733 }
734 
735 static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
736 				    u64 iova, int access_flags,
737 				    struct ib_dmah *dmah)
738 {
739 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
740 	struct mlx5_ib_mr *mr = NULL;
741 	bool xlt_with_umr;
742 	u16 st_index = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX;
743 	u8 ph = MLX5_IB_NO_PH;
744 	int err;
745 
746 	if (dmah) {
747 		struct mlx5_ib_dmah *mdmah = to_mdmah(dmah);
748 
749 		ph = dmah->ph;
750 		if (dmah->valid_fields & BIT(IB_DMAH_CPU_ID_EXISTS))
751 			st_index = mdmah->st_index;
752 	}
753 
754 	xlt_with_umr = mlx5r_umr_can_load_pas(dev, umem->length);
755 	if (xlt_with_umr) {
756 		mr = alloc_cacheable_mr(pd, umem, iova, access_flags,
757 					MLX5_MKC_ACCESS_MODE_MTT,
758 					st_index, ph);
759 	} else {
760 		unsigned long page_size = mlx5_umem_mkc_find_best_pgsz(
761 				dev, umem, iova, MLX5_MKC_ACCESS_MODE_MTT);
762 
763 		mutex_lock(&dev->slow_path_mutex);
764 		mr = reg_create(pd, umem, iova, access_flags, page_size,
765 				true, MLX5_MKC_ACCESS_MODE_MTT,
766 				st_index, ph);
767 		mutex_unlock(&dev->slow_path_mutex);
768 	}
769 	if (IS_ERR(mr)) {
770 		ib_umem_release(umem);
771 		return ERR_CAST(mr);
772 	}
773 
774 	mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
775 
776 	atomic_add(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
777 
778 	if (xlt_with_umr) {
779 		/*
780 		 * If the MR was created with reg_create then it will be
781 		 * configured properly but left disabled. It is safe to go ahead
782 		 * and configure it again via UMR while enabling it.
783 		 */
784 		err = mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE);
785 		if (err) {
786 			mlx5_ib_dereg_mr(&mr->ibmr, NULL);
787 			return ERR_PTR(err);
788 		}
789 	}
790 	return &mr->ibmr;
791 }
792 
793 static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
794 					u64 iova, int access_flags,
795 					struct ib_udata *udata)
796 {
797 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
798 	struct ib_umem_odp *odp;
799 	struct mlx5_ib_mr *mr;
800 	int err;
801 
802 	if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
803 		return ERR_PTR(-EOPNOTSUPP);
804 
805 	err = mlx5r_odp_create_eq(dev, &dev->odp_pf_eq);
806 	if (err)
807 		return ERR_PTR(err);
808 	if (!start && length == U64_MAX) {
809 		if (iova != 0)
810 			return ERR_PTR(-EINVAL);
811 		if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
812 			return ERR_PTR(-EINVAL);
813 
814 		mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
815 		if (IS_ERR(mr))
816 			return ERR_CAST(mr);
817 		return &mr->ibmr;
818 	}
819 
820 	/* ODP requires xlt update via umr to work. */
821 	if (!mlx5r_umr_can_load_pas(dev, length))
822 		return ERR_PTR(-EINVAL);
823 
824 	odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags,
825 			      &mlx5_mn_ops);
826 	if (IS_ERR(odp))
827 		return ERR_CAST(odp);
828 
829 	mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags,
830 				MLX5_MKC_ACCESS_MODE_MTT,
831 				MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX,
832 				MLX5_IB_NO_PH);
833 	if (IS_ERR(mr)) {
834 		ib_umem_release(&odp->umem);
835 		return ERR_CAST(mr);
836 	}
837 	xa_init(&mr->implicit_children);
838 
839 	odp->private = mr;
840 	err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
841 	if (err)
842 		goto err_dereg_mr;
843 
844 	err = mlx5_ib_init_odp_mr(mr);
845 	if (err)
846 		goto err_dereg_mr;
847 	return &mr->ibmr;
848 
849 err_dereg_mr:
850 	mlx5_ib_dereg_mr(&mr->ibmr, NULL);
851 	return ERR_PTR(err);
852 }
853 
854 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
855 				  u64 iova, int access_flags,
856 				  struct ib_dmah *dmah,
857 				  struct ib_udata *udata)
858 {
859 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
860 	struct ib_umem *umem;
861 	int err;
862 
863 	if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
864 	    ((access_flags & IB_ACCESS_ON_DEMAND) && dmah))
865 		return ERR_PTR(-EOPNOTSUPP);
866 
867 	mlx5_ib_dbg(dev, "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
868 		    start, iova, length, access_flags);
869 
870 	err = mlx5r_umr_resource_init(dev);
871 	if (err)
872 		return ERR_PTR(err);
873 
874 	if (access_flags & IB_ACCESS_ON_DEMAND)
875 		return create_user_odp_mr(pd, start, length, iova, access_flags,
876 					  udata);
877 	umem = ib_umem_get(&dev->ib_dev, start, length, access_flags);
878 	if (IS_ERR(umem))
879 		return ERR_CAST(umem);
880 	return create_real_mr(pd, umem, iova, access_flags, dmah);
881 }
882 
883 static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
884 {
885 	struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
886 	struct mlx5_ib_mr *mr = umem_dmabuf->private;
887 
888 	dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
889 
890 	if (!umem_dmabuf->sgt || !mr)
891 		return;
892 
893 	mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP);
894 	ib_umem_dmabuf_unmap_pages(umem_dmabuf);
895 }
896 
897 static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = {
898 	.allow_peer2peer = 1,
899 	.invalidate_mappings = mlx5_ib_dmabuf_invalidate_cb,
900 };
901 
902 static struct ib_mr *
903 reg_user_mr_dmabuf(struct ib_pd *pd, struct device *dma_device,
904 		   u64 offset, u64 length, u64 virt_addr,
905 		   int fd, int access_flags, int access_mode,
906 		   struct ib_dmah *dmah)
907 {
908 	bool pinned_mode = (access_mode == MLX5_MKC_ACCESS_MODE_KSM);
909 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
910 	struct mlx5_ib_mr *mr = NULL;
911 	struct ib_umem_dmabuf *umem_dmabuf;
912 	u16 st_index = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX;
913 	u8 ph = MLX5_IB_NO_PH;
914 	int err;
915 
916 	err = mlx5r_umr_resource_init(dev);
917 	if (err)
918 		return ERR_PTR(err);
919 
920 	if (!pinned_mode)
921 		umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev,
922 						 offset, length, fd,
923 						 access_flags,
924 						 &mlx5_ib_dmabuf_attach_ops);
925 	else if (dma_device)
926 		umem_dmabuf = ib_umem_dmabuf_get_pinned_with_dma_device(&dev->ib_dev,
927 				dma_device, offset, length,
928 				fd, access_flags);
929 	else
930 		umem_dmabuf = ib_umem_dmabuf_get_pinned(
931 			&dev->ib_dev, offset, length, fd, access_flags);
932 
933 	if (IS_ERR(umem_dmabuf)) {
934 		mlx5_ib_dbg(dev, "umem_dmabuf get failed (%pe)\n", umem_dmabuf);
935 		return ERR_CAST(umem_dmabuf);
936 	}
937 
938 	if (dmah) {
939 		struct mlx5_ib_dmah *mdmah = to_mdmah(dmah);
940 
941 		ph = dmah->ph;
942 		if (dmah->valid_fields & BIT(IB_DMAH_CPU_ID_EXISTS))
943 			st_index = mdmah->st_index;
944 	}
945 
946 	mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr,
947 				access_flags, access_mode,
948 				st_index, ph);
949 	if (IS_ERR(mr)) {
950 		ib_umem_release(&umem_dmabuf->umem);
951 		return ERR_CAST(mr);
952 	}
953 
954 	mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
955 
956 	atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages);
957 	umem_dmabuf->private = mr;
958 	if (!pinned_mode) {
959 		err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
960 		if (err)
961 			goto err_dereg_mr;
962 	} else {
963 		mr->data_direct = true;
964 	}
965 
966 	err = mlx5_ib_init_dmabuf_mr(mr);
967 	if (err)
968 		goto err_dereg_mr;
969 	return &mr->ibmr;
970 
971 err_dereg_mr:
972 	__mlx5_ib_dereg_mr(&mr->ibmr);
973 	return ERR_PTR(err);
974 }
975 
976 static struct ib_mr *
977 reg_user_mr_dmabuf_by_data_direct(struct ib_pd *pd, u64 offset,
978 				  u64 length, u64 virt_addr,
979 				  int fd, int access_flags)
980 {
981 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
982 	struct mlx5_data_direct_dev *data_direct_dev;
983 	struct ib_mr *crossing_mr;
984 	struct ib_mr *crossed_mr;
985 	int ret = 0;
986 
987 	/* As of HW behaviour the IOVA must be page aligned in KSM mode */
988 	if (!PAGE_ALIGNED(virt_addr) || (access_flags & IB_ACCESS_ON_DEMAND))
989 		return ERR_PTR(-EOPNOTSUPP);
990 
991 	mutex_lock(&dev->data_direct_lock);
992 	data_direct_dev = dev->data_direct_dev;
993 	if (!data_direct_dev) {
994 		ret = -EINVAL;
995 		goto end;
996 	}
997 
998 	/* If no device's 'data direct mkey' with RO flags exists
999 	 * mask it out accordingly.
1000 	 */
1001 	if (!dev->ddr.mkey_ro_valid)
1002 		access_flags &= ~IB_ACCESS_RELAXED_ORDERING;
1003 	crossed_mr = reg_user_mr_dmabuf(pd, &data_direct_dev->pdev->dev,
1004 					offset, length, virt_addr, fd,
1005 					access_flags, MLX5_MKC_ACCESS_MODE_KSM,
1006 					NULL);
1007 	if (IS_ERR(crossed_mr)) {
1008 		ret = PTR_ERR(crossed_mr);
1009 		goto end;
1010 	}
1011 
1012 	mutex_lock(&dev->slow_path_mutex);
1013 	crossing_mr = reg_create_crossing_vhca_mr(pd, virt_addr, length, access_flags,
1014 						  crossed_mr->lkey);
1015 	mutex_unlock(&dev->slow_path_mutex);
1016 	if (IS_ERR(crossing_mr)) {
1017 		__mlx5_ib_dereg_mr(crossed_mr);
1018 		ret = PTR_ERR(crossing_mr);
1019 		goto end;
1020 	}
1021 
1022 	list_add_tail(&to_mmr(crossed_mr)->dd_node, &dev->data_direct_mr_list);
1023 	to_mmr(crossing_mr)->dd_crossed_mr = to_mmr(crossed_mr);
1024 	to_mmr(crossing_mr)->data_direct = true;
1025 end:
1026 	mutex_unlock(&dev->data_direct_lock);
1027 	return ret ? ERR_PTR(ret) : crossing_mr;
1028 }
1029 
1030 struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
1031 					 u64 length, u64 virt_addr,
1032 					 int fd, int access_flags,
1033 					 struct ib_dmah *dmah,
1034 					 struct uverbs_attr_bundle *attrs)
1035 {
1036 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
1037 	int mlx5_access_flags = 0;
1038 	int err;
1039 
1040 	if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
1041 	    !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1042 		return ERR_PTR(-EOPNOTSUPP);
1043 
1044 	if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS)) {
1045 		err = uverbs_get_flags32(&mlx5_access_flags, attrs,
1046 					 MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
1047 					 MLX5_IB_UAPI_REG_DMABUF_ACCESS_DATA_DIRECT);
1048 		if (err)
1049 			return ERR_PTR(err);
1050 	}
1051 
1052 	mlx5_ib_dbg(dev,
1053 		    "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x, mlx5_access_flags 0x%x\n",
1054 		    offset, virt_addr, length, fd, access_flags, mlx5_access_flags);
1055 
1056 	/* dmabuf requires xlt update via umr to work. */
1057 	if (!mlx5r_umr_can_load_pas(dev, length))
1058 		return ERR_PTR(-EINVAL);
1059 
1060 	if (mlx5_access_flags & MLX5_IB_UAPI_REG_DMABUF_ACCESS_DATA_DIRECT)
1061 		return reg_user_mr_dmabuf_by_data_direct(pd, offset, length, virt_addr,
1062 							 fd, access_flags);
1063 
1064 	return reg_user_mr_dmabuf(pd, NULL, offset, length, virt_addr, fd,
1065 				  access_flags, MLX5_MKC_ACCESS_MODE_MTT, dmah);
1066 }
1067 
1068 /*
1069  * True if the change in access flags can be done via UMR, only some access
1070  * flags can be updated.
1071  */
1072 static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
1073 				     unsigned int current_access_flags,
1074 				     unsigned int target_access_flags)
1075 {
1076 	unsigned int diffs = current_access_flags ^ target_access_flags;
1077 
1078 	if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
1079 		      IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING |
1080 		      IB_ACCESS_REMOTE_ATOMIC))
1081 		return false;
1082 	return mlx5r_umr_can_reconfig(dev, current_access_flags,
1083 				      target_access_flags);
1084 }
1085 
1086 static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
1087 				  struct ib_umem *new_umem,
1088 				  int new_access_flags, u64 iova,
1089 				  unsigned long *page_size)
1090 {
1091 	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1092 	u8 access_mode;
1093 
1094 	/* We only track the allocated sizes of MRs from the frmr pools */
1095 	if (!mr->ibmr.frmr.pool)
1096 		return false;
1097 	if (!mlx5r_umr_can_load_pas(dev, new_umem->length))
1098 		return false;
1099 
1100 	access_mode = mr->ibmr.frmr.key.vendor_key &
1101 				      MLX5_FRMR_POOLS_KEY_ACCESS_MODE_KSM_MASK ?
1102 			      MLX5_MKC_ACCESS_MODE_KSM :
1103 			      MLX5_MKC_ACCESS_MODE_MTT;
1104 
1105 	*page_size =
1106 		mlx5_umem_mkc_find_best_pgsz(dev, new_umem, iova, access_mode);
1107 	if (WARN_ON(!*page_size))
1108 		return false;
1109 	return (mr->ibmr.frmr.key.num_dma_blocks) >=
1110 	       ib_umem_num_dma_blocks(new_umem, *page_size);
1111 }
1112 
1113 static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
1114 			 int access_flags, int flags, struct ib_umem *new_umem,
1115 			 u64 iova, unsigned long page_size)
1116 {
1117 	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1118 	int upd_flags = MLX5_IB_UPD_XLT_ADDR | MLX5_IB_UPD_XLT_ENABLE;
1119 	struct ib_umem *old_umem = mr->umem;
1120 	int err;
1121 
1122 	/*
1123 	 * To keep everything simple the MR is revoked before we start to mess
1124 	 * with it. This ensure the change is atomic relative to any use of the
1125 	 * MR.
1126 	 */
1127 	err = mlx5r_umr_revoke_mr(mr);
1128 	if (err)
1129 		return err;
1130 
1131 	if (flags & IB_MR_REREG_PD) {
1132 		mr->ibmr.pd = pd;
1133 		upd_flags |= MLX5_IB_UPD_XLT_PD;
1134 	}
1135 	if (flags & IB_MR_REREG_ACCESS) {
1136 		mr->access_flags = access_flags;
1137 		upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1138 	}
1139 
1140 	mr->ibmr.iova = iova;
1141 	mr->ibmr.length = new_umem->length;
1142 	mr->page_shift = order_base_2(page_size);
1143 	mr->umem = new_umem;
1144 	err = mlx5r_umr_update_mr_pas(mr, upd_flags);
1145 	if (err) {
1146 		/*
1147 		 * The MR is revoked at this point so there is no issue to free
1148 		 * new_umem.
1149 		 */
1150 		mr->umem = old_umem;
1151 		return err;
1152 	}
1153 
1154 	atomic_sub(ib_umem_num_pages(old_umem), &dev->mdev->priv.reg_pages);
1155 	ib_umem_release(old_umem);
1156 	atomic_add(ib_umem_num_pages(new_umem), &dev->mdev->priv.reg_pages);
1157 	return 0;
1158 }
1159 
1160 struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1161 				    u64 length, u64 iova, int new_access_flags,
1162 				    struct ib_pd *new_pd,
1163 				    struct ib_udata *udata)
1164 {
1165 	struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1166 	struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1167 	int err;
1168 
1169 	if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) || mr->data_direct ||
1170 	    (mr->ibmr.frmr.key.kernel_vendor_key &
1171 	     MLX5_FRMR_POOLS_KERNEL_KEY_PH_MASK) != 0)
1172 		return ERR_PTR(-EOPNOTSUPP);
1173 
1174 	mlx5_ib_dbg(
1175 		dev,
1176 		"start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
1177 		start, iova, length, new_access_flags);
1178 
1179 	if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
1180 		return ERR_PTR(-EOPNOTSUPP);
1181 
1182 	if (!(flags & IB_MR_REREG_ACCESS))
1183 		new_access_flags = mr->access_flags;
1184 	if (!(flags & IB_MR_REREG_PD))
1185 		new_pd = ib_mr->pd;
1186 
1187 	if (!(flags & IB_MR_REREG_TRANS)) {
1188 		struct ib_umem *umem;
1189 
1190 		/* Fast path for PD/access change */
1191 		if (can_use_umr_rereg_access(dev, mr->access_flags,
1192 					     new_access_flags)) {
1193 			err = mlx5r_umr_rereg_pd_access(mr, new_pd,
1194 							new_access_flags);
1195 			if (err)
1196 				return ERR_PTR(err);
1197 			return NULL;
1198 		}
1199 		/* DM or ODP MR's don't have a normal umem so we can't re-use it */
1200 		if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
1201 			goto recreate;
1202 
1203 		/*
1204 		 * Only one active MR can refer to a umem at one time, revoke
1205 		 * the old MR before assigning the umem to the new one.
1206 		 */
1207 		err = mlx5r_umr_revoke_mr(mr);
1208 		if (err)
1209 			return ERR_PTR(err);
1210 		umem = mr->umem;
1211 		mr->umem = NULL;
1212 		atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
1213 
1214 		return create_real_mr(new_pd, umem, mr->ibmr.iova,
1215 				      new_access_flags, NULL);
1216 	}
1217 
1218 	/*
1219 	 * DM doesn't have a PAS list so we can't re-use it, odp/dmabuf does
1220 	 * but the logic around releasing the umem is different
1221 	 */
1222 	if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
1223 		goto recreate;
1224 
1225 	if (!(new_access_flags & IB_ACCESS_ON_DEMAND) &&
1226 	    can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) {
1227 		struct ib_umem *new_umem;
1228 		unsigned long page_size;
1229 
1230 		new_umem = ib_umem_get(&dev->ib_dev, start, length,
1231 				       new_access_flags);
1232 		if (IS_ERR(new_umem))
1233 			return ERR_CAST(new_umem);
1234 
1235 		/* Fast path for PAS change */
1236 		if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova,
1237 					  &page_size)) {
1238 			err = umr_rereg_pas(mr, new_pd, new_access_flags, flags,
1239 					    new_umem, iova, page_size);
1240 			if (err) {
1241 				ib_umem_release(new_umem);
1242 				return ERR_PTR(err);
1243 			}
1244 			return NULL;
1245 		}
1246 		return create_real_mr(new_pd, new_umem, iova, new_access_flags, NULL);
1247 	}
1248 
1249 	/*
1250 	 * Everything else has no state we can preserve, just create a new MR
1251 	 * from scratch
1252 	 */
1253 recreate:
1254 	return mlx5_ib_reg_user_mr(new_pd, start, length, iova,
1255 				   new_access_flags, NULL, udata);
1256 }
1257 
1258 static int
1259 mlx5_alloc_priv_descs(struct ib_device *device,
1260 		      struct mlx5_ib_mr *mr,
1261 		      int ndescs,
1262 		      int desc_size)
1263 {
1264 	struct mlx5_ib_dev *dev = to_mdev(device);
1265 	struct device *ddev = &dev->mdev->pdev->dev;
1266 	int size = ndescs * desc_size;
1267 	int add_size;
1268 	int ret;
1269 
1270 	add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1271 	if (is_power_of_2(MLX5_UMR_ALIGN) && add_size) {
1272 		int end = max_t(int, MLX5_UMR_ALIGN, roundup_pow_of_two(size));
1273 
1274 		add_size = min_t(int, end - size, add_size);
1275 	}
1276 
1277 	mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1278 	if (!mr->descs_alloc)
1279 		return -ENOMEM;
1280 
1281 	mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1282 
1283 	mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE);
1284 	if (dma_mapping_error(ddev, mr->desc_map)) {
1285 		ret = -ENOMEM;
1286 		goto err;
1287 	}
1288 
1289 	return 0;
1290 err:
1291 	kfree(mr->descs_alloc);
1292 
1293 	return ret;
1294 }
1295 
1296 static void
1297 mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1298 {
1299 	if (!mr->umem && !mr->data_direct &&
1300 	    mr->ibmr.type != IB_MR_TYPE_DM && mr->descs) {
1301 		struct ib_device *device = mr->ibmr.device;
1302 		int size = mr->max_descs * mr->desc_size;
1303 		struct mlx5_ib_dev *dev = to_mdev(device);
1304 
1305 		dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
1306 				 DMA_TO_DEVICE);
1307 		kfree(mr->descs_alloc);
1308 		mr->descs = NULL;
1309 	}
1310 }
1311 
1312 static int mlx5_ib_revoke_data_direct_mr(struct mlx5_ib_mr *mr)
1313 {
1314 	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1315 	struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
1316 	int err;
1317 
1318 	lockdep_assert_held(&dev->data_direct_lock);
1319 	mr->revoked = true;
1320 	err = mlx5r_umr_revoke_mr(mr);
1321 	if (WARN_ON(err))
1322 		return err;
1323 
1324 	ib_umem_dmabuf_revoke(umem_dmabuf);
1325 	return 0;
1326 }
1327 
1328 void mlx5_ib_revoke_data_direct_mrs(struct mlx5_ib_dev *dev)
1329 {
1330 	struct mlx5_ib_mr *mr, *next;
1331 
1332 	lockdep_assert_held(&dev->data_direct_lock);
1333 
1334 	list_for_each_entry_safe(mr, next, &dev->data_direct_mr_list, dd_node) {
1335 		list_del(&mr->dd_node);
1336 		mlx5_ib_revoke_data_direct_mr(mr);
1337 	}
1338 }
1339 
1340 static int mlx5_umr_revoke_mr_with_lock(struct mlx5_ib_mr *mr)
1341 {
1342 	bool is_odp_dma_buf = is_dmabuf_mr(mr) &&
1343 			      !to_ib_umem_dmabuf(mr->umem)->pinned;
1344 	bool is_odp = is_odp_mr(mr);
1345 	int ret;
1346 
1347 	if (is_odp)
1348 		mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
1349 
1350 	if (is_odp_dma_buf)
1351 		dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv,
1352 			      NULL);
1353 
1354 	ret = mlx5r_umr_revoke_mr(mr);
1355 
1356 	if (is_odp) {
1357 		if (!ret)
1358 			to_ib_umem_odp(mr->umem)->private = NULL;
1359 		mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
1360 	}
1361 
1362 	if (is_odp_dma_buf) {
1363 		if (!ret)
1364 			to_ib_umem_dmabuf(mr->umem)->private = NULL;
1365 		dma_resv_unlock(
1366 			to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
1367 	}
1368 
1369 	return ret;
1370 }
1371 
1372 static int mlx5r_handle_mkey_cleanup(struct mlx5_ib_mr *mr)
1373 {
1374 	bool is_odp_dma_buf = is_dmabuf_mr(mr) &&
1375 			      !to_ib_umem_dmabuf(mr->umem)->pinned;
1376 	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1377 	bool is_odp = is_odp_mr(mr);
1378 	int ret;
1379 
1380 	if (mr->ibmr.frmr.pool && !mlx5_umr_revoke_mr_with_lock(mr) &&
1381 	    !ib_frmr_pool_push(mr->ibmr.device, &mr->ibmr))
1382 		return 0;
1383 
1384 	if (is_odp)
1385 		mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
1386 
1387 	if (is_odp_dma_buf)
1388 		dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv,
1389 			      NULL);
1390 	ret = destroy_mkey(dev, mr);
1391 	if (is_odp) {
1392 		if (!ret)
1393 			to_ib_umem_odp(mr->umem)->private = NULL;
1394 		mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
1395 	}
1396 
1397 	if (is_odp_dma_buf) {
1398 		if (!ret)
1399 			to_ib_umem_dmabuf(mr->umem)->private = NULL;
1400 		dma_resv_unlock(
1401 			to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
1402 	}
1403 	return ret;
1404 }
1405 
1406 static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1407 {
1408 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
1409 	struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1410 	int rc;
1411 
1412 	/*
1413 	 * Any async use of the mr must hold the refcount, once the refcount
1414 	 * goes to zero no other thread, such as ODP page faults, prefetch, any
1415 	 * UMR activity, etc can touch the mkey. Thus it is safe to destroy it.
1416 	 */
1417 	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
1418 	    refcount_read(&mr->mmkey.usecount) != 0 &&
1419 	    xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)))
1420 		mlx5r_deref_wait_odp_mkey(&mr->mmkey);
1421 
1422 	if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
1423 		xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
1424 			   mr->sig, NULL, GFP_KERNEL);
1425 
1426 		if (mr->mtt_mr) {
1427 			rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
1428 			if (rc)
1429 				return rc;
1430 			mr->mtt_mr = NULL;
1431 		}
1432 		if (mr->klm_mr) {
1433 			rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
1434 			if (rc)
1435 				return rc;
1436 			mr->klm_mr = NULL;
1437 		}
1438 
1439 		if (mlx5_core_destroy_psv(dev->mdev,
1440 					  mr->sig->psv_memory.psv_idx))
1441 			mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1442 				     mr->sig->psv_memory.psv_idx);
1443 		if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
1444 			mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1445 				     mr->sig->psv_wire.psv_idx);
1446 		kfree(mr->sig);
1447 		mr->sig = NULL;
1448 	}
1449 
1450 	/* Stop DMA */
1451 	rc = mlx5r_handle_mkey_cleanup(mr);
1452 	if (rc)
1453 		return rc;
1454 
1455 	if (mr->umem) {
1456 		bool is_odp = is_odp_mr(mr);
1457 
1458 		if (!is_odp)
1459 			atomic_sub(ib_umem_num_pages(mr->umem),
1460 				   &dev->mdev->priv.reg_pages);
1461 		ib_umem_release(mr->umem);
1462 		if (is_odp)
1463 			mlx5_ib_free_odp_mr(mr);
1464 	}
1465 
1466 	if (!mr->ibmr.frmr.pool)
1467 		mlx5_free_priv_descs(mr);
1468 
1469 	kfree(mr);
1470 	return 0;
1471 }
1472 
1473 static int dereg_crossing_data_direct_mr(struct mlx5_ib_dev *dev,
1474 					struct mlx5_ib_mr *mr)
1475 {
1476 	struct mlx5_ib_mr *dd_crossed_mr = mr->dd_crossed_mr;
1477 	int ret;
1478 
1479 	ret = __mlx5_ib_dereg_mr(&mr->ibmr);
1480 	if (ret)
1481 		return ret;
1482 
1483 	mutex_lock(&dev->data_direct_lock);
1484 	if (!dd_crossed_mr->revoked)
1485 		list_del(&dd_crossed_mr->dd_node);
1486 
1487 	ret = __mlx5_ib_dereg_mr(&dd_crossed_mr->ibmr);
1488 	mutex_unlock(&dev->data_direct_lock);
1489 	return ret;
1490 }
1491 
1492 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1493 {
1494 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
1495 	struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1496 
1497 	if (mr->data_direct)
1498 		return dereg_crossing_data_direct_mr(dev, mr);
1499 
1500 	return __mlx5_ib_dereg_mr(ibmr);
1501 }
1502 
1503 static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
1504 				   int access_mode, int page_shift)
1505 {
1506 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
1507 	void *mkc;
1508 
1509 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1510 
1511 	/* This is only used from the kernel, so setting the PD is OK. */
1512 	set_mkc_access_pd_addr_fields(mkc, IB_ACCESS_RELAXED_ORDERING, 0, pd);
1513 	MLX5_SET(mkc, mkc, free, 1);
1514 	MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1515 	MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
1516 	MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
1517 	MLX5_SET(mkc, mkc, umr_en, 1);
1518 	MLX5_SET(mkc, mkc, log_page_size, page_shift);
1519 	if (access_mode == MLX5_MKC_ACCESS_MODE_PA ||
1520 	    access_mode == MLX5_MKC_ACCESS_MODE_MTT)
1521 		MLX5_SET(mkc, mkc, ma_translation_mode, MLX5_CAP_GEN(dev->mdev, ats));
1522 }
1523 
1524 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1525 				  int ndescs, int desc_size, int page_shift,
1526 				  int access_mode, u32 *in, int inlen)
1527 {
1528 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
1529 	int err;
1530 
1531 	mr->access_mode = access_mode;
1532 	mr->desc_size = desc_size;
1533 	mr->max_descs = ndescs;
1534 
1535 	err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size);
1536 	if (err)
1537 		return err;
1538 
1539 	mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift);
1540 
1541 	err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1542 	if (err)
1543 		goto err_free_descs;
1544 
1545 	mr->mmkey.type = MLX5_MKEY_MR;
1546 	mr->ibmr.lkey = mr->mmkey.key;
1547 	mr->ibmr.rkey = mr->mmkey.key;
1548 
1549 	return 0;
1550 
1551 err_free_descs:
1552 	mlx5_free_priv_descs(mr);
1553 	return err;
1554 }
1555 
1556 static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
1557 				u32 max_num_sg, u32 max_num_meta_sg,
1558 				int desc_size, int access_mode)
1559 {
1560 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1561 	int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4);
1562 	int page_shift = 0;
1563 	struct mlx5_ib_mr *mr;
1564 	u32 *in;
1565 	int err;
1566 
1567 	mr = kzalloc_obj(*mr);
1568 	if (!mr)
1569 		return ERR_PTR(-ENOMEM);
1570 
1571 	mr->ibmr.pd = pd;
1572 	mr->ibmr.device = pd->device;
1573 
1574 	in = kzalloc(inlen, GFP_KERNEL);
1575 	if (!in) {
1576 		err = -ENOMEM;
1577 		goto err_free;
1578 	}
1579 
1580 	if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
1581 		page_shift = PAGE_SHIFT;
1582 
1583 	err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift,
1584 				     access_mode, in, inlen);
1585 	if (err)
1586 		goto err_free_in;
1587 
1588 	mr->umem = NULL;
1589 	kfree(in);
1590 
1591 	return mr;
1592 
1593 err_free_in:
1594 	kfree(in);
1595 err_free:
1596 	kfree(mr);
1597 	return ERR_PTR(err);
1598 }
1599 
1600 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1601 				    int ndescs, u32 *in, int inlen)
1602 {
1603 	return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt),
1604 				      PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in,
1605 				      inlen);
1606 }
1607 
1608 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1609 				    int ndescs, u32 *in, int inlen)
1610 {
1611 	return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm),
1612 				      0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
1613 }
1614 
1615 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1616 				      int max_num_sg, int max_num_meta_sg,
1617 				      u32 *in, int inlen)
1618 {
1619 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
1620 	u32 psv_index[2];
1621 	void *mkc;
1622 	int err;
1623 
1624 	mr->sig = kzalloc_obj(*mr->sig);
1625 	if (!mr->sig)
1626 		return -ENOMEM;
1627 
1628 	/* create mem & wire PSVs */
1629 	err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index);
1630 	if (err)
1631 		goto err_free_sig;
1632 
1633 	mr->sig->psv_memory.psv_idx = psv_index[0];
1634 	mr->sig->psv_wire.psv_idx = psv_index[1];
1635 
1636 	mr->sig->sig_status_checked = true;
1637 	mr->sig->sig_err_exists = false;
1638 	/* Next UMR, Arm SIGERR */
1639 	++mr->sig->sigerr_count;
1640 	mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
1641 					 sizeof(struct mlx5_klm),
1642 					 MLX5_MKC_ACCESS_MODE_KLMS);
1643 	if (IS_ERR(mr->klm_mr)) {
1644 		err = PTR_ERR(mr->klm_mr);
1645 		goto err_destroy_psv;
1646 	}
1647 	mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
1648 					 sizeof(struct mlx5_mtt),
1649 					 MLX5_MKC_ACCESS_MODE_MTT);
1650 	if (IS_ERR(mr->mtt_mr)) {
1651 		err = PTR_ERR(mr->mtt_mr);
1652 		goto err_free_klm_mr;
1653 	}
1654 
1655 	/* Set bsf descriptors for mkey */
1656 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1657 	MLX5_SET(mkc, mkc, bsf_en, 1);
1658 	MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
1659 
1660 	err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0,
1661 				     MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
1662 	if (err)
1663 		goto err_free_mtt_mr;
1664 
1665 	err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
1666 			      mr->sig, GFP_KERNEL));
1667 	if (err)
1668 		goto err_free_descs;
1669 	return 0;
1670 
1671 err_free_descs:
1672 	destroy_mkey(dev, mr);
1673 	mlx5_free_priv_descs(mr);
1674 err_free_mtt_mr:
1675 	mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
1676 	mr->mtt_mr = NULL;
1677 err_free_klm_mr:
1678 	mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
1679 	mr->klm_mr = NULL;
1680 err_destroy_psv:
1681 	if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
1682 		mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1683 			     mr->sig->psv_memory.psv_idx);
1684 	if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
1685 		mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1686 			     mr->sig->psv_wire.psv_idx);
1687 err_free_sig:
1688 	kfree(mr->sig);
1689 
1690 	return err;
1691 }
1692 
1693 static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
1694 					enum ib_mr_type mr_type, u32 max_num_sg,
1695 					u32 max_num_meta_sg)
1696 {
1697 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
1698 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1699 	int ndescs = ALIGN(max_num_sg, 4);
1700 	struct mlx5_ib_mr *mr;
1701 	u32 *in;
1702 	int err;
1703 
1704 	mr = kzalloc_obj(*mr);
1705 	if (!mr)
1706 		return ERR_PTR(-ENOMEM);
1707 
1708 	in = kzalloc(inlen, GFP_KERNEL);
1709 	if (!in) {
1710 		err = -ENOMEM;
1711 		goto err_free;
1712 	}
1713 
1714 	mr->ibmr.device = pd->device;
1715 	mr->umem = NULL;
1716 
1717 	switch (mr_type) {
1718 	case IB_MR_TYPE_MEM_REG:
1719 		err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen);
1720 		break;
1721 	case IB_MR_TYPE_SG_GAPS:
1722 		err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen);
1723 		break;
1724 	case IB_MR_TYPE_INTEGRITY:
1725 		err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg,
1726 						 max_num_meta_sg, in, inlen);
1727 		break;
1728 	default:
1729 		mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1730 		err = -EINVAL;
1731 	}
1732 
1733 	if (err)
1734 		goto err_free_in;
1735 
1736 	kfree(in);
1737 
1738 	return &mr->ibmr;
1739 
1740 err_free_in:
1741 	kfree(in);
1742 err_free:
1743 	kfree(mr);
1744 	return ERR_PTR(err);
1745 }
1746 
1747 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1748 			       u32 max_num_sg)
1749 {
1750 	return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0);
1751 }
1752 
1753 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
1754 					 u32 max_num_sg, u32 max_num_meta_sg)
1755 {
1756 	return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg,
1757 				  max_num_meta_sg);
1758 }
1759 
1760 int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
1761 {
1762 	struct mlx5_ib_dev *dev = to_mdev(ibmw->device);
1763 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1764 	struct mlx5_ib_mw *mw = to_mmw(ibmw);
1765 	unsigned int ndescs;
1766 	u32 *in = NULL;
1767 	void *mkc;
1768 	int err;
1769 	struct mlx5_ib_alloc_mw req = {};
1770 	struct {
1771 		__u32	comp_mask;
1772 		__u32	response_length;
1773 	} resp = {};
1774 
1775 	if (udata->inlen) {
1776 		err = ib_copy_validate_udata_in_cm(udata, req, reserved2, 0);
1777 		if (err)
1778 			return err;
1779 	}
1780 
1781 	if (req.reserved1 || req.reserved2)
1782 		return -EOPNOTSUPP;
1783 
1784 	ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1785 
1786 	in = kzalloc(inlen, GFP_KERNEL);
1787 	if (!in)
1788 		return -ENOMEM;
1789 
1790 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1791 
1792 	MLX5_SET(mkc, mkc, free, 1);
1793 	MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1794 	MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn);
1795 	MLX5_SET(mkc, mkc, umr_en, 1);
1796 	MLX5_SET(mkc, mkc, lr, 1);
1797 	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
1798 	MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2)));
1799 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
1800 
1801 	err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen);
1802 	if (err)
1803 		goto free;
1804 
1805 	mw->mmkey.type = MLX5_MKEY_MW;
1806 	ibmw->rkey = mw->mmkey.key;
1807 	mw->mmkey.ndescs = ndescs;
1808 
1809 	resp.response_length =
1810 		min(offsetofend(typeof(resp), response_length), udata->outlen);
1811 	if (resp.response_length) {
1812 		err = ib_copy_to_udata(udata, &resp, resp.response_length);
1813 		if (err)
1814 			goto free_mkey;
1815 	}
1816 
1817 	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
1818 		err = mlx5r_store_odp_mkey(dev, &mw->mmkey);
1819 		if (err)
1820 			goto free_mkey;
1821 	}
1822 
1823 	kfree(in);
1824 	return 0;
1825 
1826 free_mkey:
1827 	mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key);
1828 free:
1829 	kfree(in);
1830 	return err;
1831 }
1832 
1833 int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1834 {
1835 	struct mlx5_ib_dev *dev = to_mdev(mw->device);
1836 	struct mlx5_ib_mw *mmw = to_mmw(mw);
1837 
1838 	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
1839 	    xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key)))
1840 		/*
1841 		 * pagefault_single_data_segment() may be accessing mmw
1842 		 * if the user bound an ODP MR to this MW.
1843 		 */
1844 		mlx5r_deref_wait_odp_mkey(&mmw->mmkey);
1845 
1846 	return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key);
1847 }
1848 
1849 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1850 			    struct ib_mr_status *mr_status)
1851 {
1852 	struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1853 	int ret = 0;
1854 
1855 	if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1856 		pr_err("Invalid status check mask\n");
1857 		ret = -EINVAL;
1858 		goto done;
1859 	}
1860 
1861 	mr_status->fail_status = 0;
1862 	if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1863 		if (!mmr->sig) {
1864 			ret = -EINVAL;
1865 			pr_err("signature status check requested on a non-signature enabled MR\n");
1866 			goto done;
1867 		}
1868 
1869 		mmr->sig->sig_status_checked = true;
1870 		if (!mmr->sig->sig_err_exists)
1871 			goto done;
1872 
1873 		if (ibmr->lkey == mmr->sig->err_item.key)
1874 			memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1875 			       sizeof(mr_status->sig_err));
1876 		else {
1877 			mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1878 			mr_status->sig_err.sig_err_offset = 0;
1879 			mr_status->sig_err.key = mmr->sig->err_item.key;
1880 		}
1881 
1882 		mmr->sig->sig_err_exists = false;
1883 		mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1884 	}
1885 
1886 done:
1887 	return ret;
1888 }
1889 
1890 static int
1891 mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
1892 			int data_sg_nents, unsigned int *data_sg_offset,
1893 			struct scatterlist *meta_sg, int meta_sg_nents,
1894 			unsigned int *meta_sg_offset)
1895 {
1896 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
1897 	unsigned int sg_offset = 0;
1898 	int n = 0;
1899 
1900 	mr->meta_length = 0;
1901 	if (data_sg_nents == 1) {
1902 		n++;
1903 		mr->mmkey.ndescs = 1;
1904 		if (data_sg_offset)
1905 			sg_offset = *data_sg_offset;
1906 		mr->data_length = sg_dma_len(data_sg) - sg_offset;
1907 		mr->data_iova = sg_dma_address(data_sg) + sg_offset;
1908 		if (meta_sg_nents == 1) {
1909 			n++;
1910 			mr->meta_ndescs = 1;
1911 			if (meta_sg_offset)
1912 				sg_offset = *meta_sg_offset;
1913 			else
1914 				sg_offset = 0;
1915 			mr->meta_length = sg_dma_len(meta_sg) - sg_offset;
1916 			mr->pi_iova = sg_dma_address(meta_sg) + sg_offset;
1917 		}
1918 		ibmr->length = mr->data_length + mr->meta_length;
1919 	}
1920 
1921 	return n;
1922 }
1923 
1924 static int
1925 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1926 		   struct scatterlist *sgl,
1927 		   unsigned short sg_nents,
1928 		   unsigned int *sg_offset_p,
1929 		   struct scatterlist *meta_sgl,
1930 		   unsigned short meta_sg_nents,
1931 		   unsigned int *meta_sg_offset_p)
1932 {
1933 	struct scatterlist *sg = sgl;
1934 	struct mlx5_klm *klms = mr->descs;
1935 	unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1936 	u32 lkey = mr->ibmr.pd->local_dma_lkey;
1937 	int i, j = 0;
1938 
1939 	mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
1940 	mr->ibmr.length = 0;
1941 
1942 	for_each_sg(sgl, sg, sg_nents, i) {
1943 		if (unlikely(i >= mr->max_descs))
1944 			break;
1945 		klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1946 		klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
1947 		klms[i].key = cpu_to_be32(lkey);
1948 		mr->ibmr.length += sg_dma_len(sg) - sg_offset;
1949 
1950 		sg_offset = 0;
1951 	}
1952 
1953 	if (sg_offset_p)
1954 		*sg_offset_p = sg_offset;
1955 
1956 	mr->mmkey.ndescs = i;
1957 	mr->data_length = mr->ibmr.length;
1958 
1959 	if (meta_sg_nents) {
1960 		sg = meta_sgl;
1961 		sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0;
1962 		for_each_sg(meta_sgl, sg, meta_sg_nents, j) {
1963 			if (unlikely(i + j >= mr->max_descs))
1964 				break;
1965 			klms[i + j].va = cpu_to_be64(sg_dma_address(sg) +
1966 						     sg_offset);
1967 			klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) -
1968 							 sg_offset);
1969 			klms[i + j].key = cpu_to_be32(lkey);
1970 			mr->ibmr.length += sg_dma_len(sg) - sg_offset;
1971 
1972 			sg_offset = 0;
1973 		}
1974 		if (meta_sg_offset_p)
1975 			*meta_sg_offset_p = sg_offset;
1976 
1977 		mr->meta_ndescs = j;
1978 		mr->meta_length = mr->ibmr.length - mr->data_length;
1979 	}
1980 
1981 	return i + j;
1982 }
1983 
1984 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1985 {
1986 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
1987 	__be64 *descs;
1988 
1989 	if (unlikely(mr->mmkey.ndescs == mr->max_descs))
1990 		return -ENOMEM;
1991 
1992 	descs = mr->descs;
1993 	descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1994 
1995 	return 0;
1996 }
1997 
1998 static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
1999 {
2000 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
2001 	__be64 *descs;
2002 
2003 	if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs))
2004 		return -ENOMEM;
2005 
2006 	descs = mr->descs;
2007 	descs[mr->mmkey.ndescs + mr->meta_ndescs++] =
2008 		cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2009 
2010 	return 0;
2011 }
2012 
2013 static int
2014 mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2015 			 int data_sg_nents, unsigned int *data_sg_offset,
2016 			 struct scatterlist *meta_sg, int meta_sg_nents,
2017 			 unsigned int *meta_sg_offset)
2018 {
2019 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
2020 	struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
2021 	int n;
2022 
2023 	pi_mr->mmkey.ndescs = 0;
2024 	pi_mr->meta_ndescs = 0;
2025 	pi_mr->meta_length = 0;
2026 
2027 	ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2028 				   pi_mr->desc_size * pi_mr->max_descs,
2029 				   DMA_TO_DEVICE);
2030 
2031 	pi_mr->ibmr.page_size = ibmr->page_size;
2032 	n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset,
2033 			   mlx5_set_page);
2034 	if (n != data_sg_nents)
2035 		return n;
2036 
2037 	pi_mr->data_iova = pi_mr->ibmr.iova;
2038 	pi_mr->data_length = pi_mr->ibmr.length;
2039 	pi_mr->ibmr.length = pi_mr->data_length;
2040 	ibmr->length = pi_mr->data_length;
2041 
2042 	if (meta_sg_nents) {
2043 		u64 page_mask = ~((u64)ibmr->page_size - 1);
2044 		u64 iova = pi_mr->data_iova;
2045 
2046 		n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents,
2047 				    meta_sg_offset, mlx5_set_page_pi);
2048 
2049 		pi_mr->meta_length = pi_mr->ibmr.length;
2050 		/*
2051 		 * PI address for the HW is the offset of the metadata address
2052 		 * relative to the first data page address.
2053 		 * It equals to first data page address + size of data pages +
2054 		 * metadata offset at the first metadata page
2055 		 */
2056 		pi_mr->pi_iova = (iova & page_mask) +
2057 				 pi_mr->mmkey.ndescs * ibmr->page_size +
2058 				 (pi_mr->ibmr.iova & ~page_mask);
2059 		/*
2060 		 * In order to use one MTT MR for data and metadata, we register
2061 		 * also the gaps between the end of the data and the start of
2062 		 * the metadata (the sig MR will verify that the HW will access
2063 		 * to right addresses). This mapping is safe because we use
2064 		 * internal mkey for the registration.
2065 		 */
2066 		pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova;
2067 		pi_mr->ibmr.iova = iova;
2068 		ibmr->length += pi_mr->meta_length;
2069 	}
2070 
2071 	ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2072 				      pi_mr->desc_size * pi_mr->max_descs,
2073 				      DMA_TO_DEVICE);
2074 
2075 	return n;
2076 }
2077 
2078 static int
2079 mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2080 			 int data_sg_nents, unsigned int *data_sg_offset,
2081 			 struct scatterlist *meta_sg, int meta_sg_nents,
2082 			 unsigned int *meta_sg_offset)
2083 {
2084 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
2085 	struct mlx5_ib_mr *pi_mr = mr->klm_mr;
2086 	int n;
2087 
2088 	pi_mr->mmkey.ndescs = 0;
2089 	pi_mr->meta_ndescs = 0;
2090 	pi_mr->meta_length = 0;
2091 
2092 	ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2093 				   pi_mr->desc_size * pi_mr->max_descs,
2094 				   DMA_TO_DEVICE);
2095 
2096 	n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset,
2097 			       meta_sg, meta_sg_nents, meta_sg_offset);
2098 
2099 	ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2100 				      pi_mr->desc_size * pi_mr->max_descs,
2101 				      DMA_TO_DEVICE);
2102 
2103 	/* This is zero-based memory region */
2104 	pi_mr->data_iova = 0;
2105 	pi_mr->ibmr.iova = 0;
2106 	pi_mr->pi_iova = pi_mr->data_length;
2107 	ibmr->length = pi_mr->ibmr.length;
2108 
2109 	return n;
2110 }
2111 
2112 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2113 			 int data_sg_nents, unsigned int *data_sg_offset,
2114 			 struct scatterlist *meta_sg, int meta_sg_nents,
2115 			 unsigned int *meta_sg_offset)
2116 {
2117 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
2118 	struct mlx5_ib_mr *pi_mr = NULL;
2119 	int n;
2120 
2121 	WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
2122 
2123 	mr->mmkey.ndescs = 0;
2124 	mr->data_length = 0;
2125 	mr->data_iova = 0;
2126 	mr->meta_ndescs = 0;
2127 	mr->pi_iova = 0;
2128 	/*
2129 	 * As a performance optimization, if possible, there is no need to
2130 	 * perform UMR operation to register the data/metadata buffers.
2131 	 * First try to map the sg lists to PA descriptors with local_dma_lkey.
2132 	 * Fallback to UMR only in case of a failure.
2133 	 */
2134 	n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2135 				    data_sg_offset, meta_sg, meta_sg_nents,
2136 				    meta_sg_offset);
2137 	if (n == data_sg_nents + meta_sg_nents)
2138 		goto out;
2139 	/*
2140 	 * As a performance optimization, if possible, there is no need to map
2141 	 * the sg lists to KLM descriptors. First try to map the sg lists to MTT
2142 	 * descriptors and fallback to KLM only in case of a failure.
2143 	 * It's more efficient for the HW to work with MTT descriptors
2144 	 * (especially in high load).
2145 	 * Use KLM (indirect access) only if it's mandatory.
2146 	 */
2147 	pi_mr = mr->mtt_mr;
2148 	n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2149 				     data_sg_offset, meta_sg, meta_sg_nents,
2150 				     meta_sg_offset);
2151 	if (n == data_sg_nents + meta_sg_nents)
2152 		goto out;
2153 
2154 	pi_mr = mr->klm_mr;
2155 	n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2156 				     data_sg_offset, meta_sg, meta_sg_nents,
2157 				     meta_sg_offset);
2158 	if (unlikely(n != data_sg_nents + meta_sg_nents))
2159 		return -ENOMEM;
2160 
2161 out:
2162 	/* This is zero-based memory region */
2163 	ibmr->iova = 0;
2164 	mr->pi_mr = pi_mr;
2165 	if (pi_mr)
2166 		ibmr->sig_attrs->meta_length = pi_mr->meta_length;
2167 	else
2168 		ibmr->sig_attrs->meta_length = mr->meta_length;
2169 
2170 	return 0;
2171 }
2172 
2173 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
2174 		      unsigned int *sg_offset)
2175 {
2176 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
2177 	int n;
2178 
2179 	mr->mmkey.ndescs = 0;
2180 
2181 	ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
2182 				   mr->desc_size * mr->max_descs,
2183 				   DMA_TO_DEVICE);
2184 
2185 	if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
2186 		n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0,
2187 				       NULL);
2188 	else
2189 		n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
2190 				mlx5_set_page);
2191 
2192 	ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
2193 				      mr->desc_size * mr->max_descs,
2194 				      DMA_TO_DEVICE);
2195 
2196 	return n;
2197 }
2198