1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
3
4 #ifndef _MLX5_IB_UMR_H
5 #define _MLX5_IB_UMR_H
6
7 #include "mlx5_ib.h"
8
9
10 #define MLX5_MAX_UMR_SHIFT 16
11 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
12
13 #define MLX5_IB_UMR_OCTOWORD 16
14 #define MLX5_IB_UMR_XLT_ALIGNMENT 64
15
16 int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev);
17 void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev);
18
19 int mlx5r_umr_init(struct mlx5_ib_dev *dev);
20 void mlx5r_umr_cleanup(struct mlx5_ib_dev *dev);
21
mlx5r_umr_can_load_pas(struct mlx5_ib_dev * dev,size_t length)22 static inline bool mlx5r_umr_can_load_pas(struct mlx5_ib_dev *dev,
23 size_t length)
24 {
25 /*
26 * umr_check_mkey_mask() rejects MLX5_MKEY_MASK_PAGE_SIZE which is
27 * always set if MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (aka
28 * MLX5_IB_UPD_XLT_ADDR and MLX5_IB_UPD_XLT_ENABLE) is set. Thus, a mkey
29 * can never be enabled without this capability. Simplify this weird
30 * quirky hardware by just saying it can't use PAS lists with UMR at
31 * all.
32 */
33 if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
34 return false;
35
36 /*
37 * length is the size of the MR in bytes when mlx5_ib_update_xlt() is
38 * used.
39 */
40 if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
41 length >= MLX5_MAX_UMR_PAGES * PAGE_SIZE)
42 return false;
43 return true;
44 }
45
46 /*
47 * true if an existing MR can be reconfigured to new access_flags using UMR.
48 * Older HW cannot use UMR to update certain elements of the MKC. See
49 * get_umr_update_access_mask() and umr_check_mkey_mask()
50 */
mlx5r_umr_can_reconfig(struct mlx5_ib_dev * dev,unsigned int current_access_flags,unsigned int target_access_flags)51 static inline bool mlx5r_umr_can_reconfig(struct mlx5_ib_dev *dev,
52 unsigned int current_access_flags,
53 unsigned int target_access_flags)
54 {
55 unsigned int diffs = current_access_flags ^ target_access_flags;
56
57 if ((diffs & IB_ACCESS_REMOTE_ATOMIC) &&
58 MLX5_CAP_GEN(dev->mdev, atomic) &&
59 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
60 return false;
61
62 if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
63 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
64 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
65 return false;
66
67 if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
68 (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
69 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) &&
70 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
71 return false;
72
73 return true;
74 }
75
mlx5r_umr_get_xlt_octo(u64 bytes)76 static inline u64 mlx5r_umr_get_xlt_octo(u64 bytes)
77 {
78 return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
79 MLX5_IB_UMR_OCTOWORD;
80 }
81
82 struct mlx5r_umr_context {
83 struct ib_cqe cqe;
84 enum ib_wc_status status;
85 struct completion done;
86 };
87
88 struct mlx5r_umr_wqe {
89 struct mlx5_wqe_umr_ctrl_seg ctrl_seg;
90 struct mlx5_mkey_seg mkey_seg;
91 struct mlx5_wqe_data_seg data_seg;
92 };
93
94 int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr);
95 int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
96 int access_flags);
97 int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags);
98 int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr, unsigned int flags);
99 int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
100 int page_shift, int flags);
101
102 #endif /* _MLX5_IB_UMR_H */
103