1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
4 */
5
6 #include <linux/mlx5/driver.h>
7 #include <linux/mlx5/device.h>
8
9 #include "mlx5_core.h"
10 #include "lib/mlx5.h"
11
12 struct mlx5_st_idx_data {
13 refcount_t usecount;
14 u16 tag;
15 };
16
17 struct mlx5_st {
18 /* serialize access upon alloc/free flows */
19 struct mutex lock;
20 struct xa_limit index_limit;
21 struct xarray idx_xa; /* key == index, value == struct mlx5_st_idx_data */
22 u8 direct_mode : 1;
23 };
24
mlx5_st_create(struct mlx5_core_dev * dev)25 struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev)
26 {
27 struct pci_dev *pdev = dev->pdev;
28 struct mlx5_st *st;
29 u8 direct_mode = 0;
30 u16 num_entries;
31 u32 tbl_loc;
32 int ret;
33
34 if (!MLX5_CAP_GEN(dev, mkey_pcie_tph))
35 return NULL;
36
37 #ifdef CONFIG_MLX5_SF
38 if (mlx5_core_is_sf(dev))
39 return dev->priv.parent_mdev->st;
40 #endif
41
42 /* Checking whether the device is capable */
43 if (!pdev->tph_cap)
44 return NULL;
45
46 tbl_loc = pcie_tph_get_st_table_loc(pdev);
47 if (tbl_loc == PCI_TPH_LOC_NONE)
48 direct_mode = 1;
49
50 if (!direct_mode) {
51 num_entries = pcie_tph_get_st_table_size(pdev);
52 /* We need a reserved entry for non TPH cases */
53 if (num_entries < 2)
54 return NULL;
55 }
56
57 /* The OS doesn't support ST */
58 ret = pcie_enable_tph(pdev, PCI_TPH_ST_DS_MODE);
59 if (ret)
60 return NULL;
61
62 st = kzalloc(sizeof(*st), GFP_KERNEL);
63 if (!st)
64 goto end;
65
66 mutex_init(&st->lock);
67 xa_init_flags(&st->idx_xa, XA_FLAGS_ALLOC);
68 st->direct_mode = direct_mode;
69 if (st->direct_mode)
70 return st;
71
72 /* entry 0 is reserved for non TPH cases */
73 st->index_limit.min = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX + 1;
74 st->index_limit.max = num_entries - 1;
75
76 return st;
77
78 end:
79 pcie_disable_tph(dev->pdev);
80 return NULL;
81 }
82
mlx5_st_destroy(struct mlx5_core_dev * dev)83 void mlx5_st_destroy(struct mlx5_core_dev *dev)
84 {
85 struct mlx5_st *st = dev->st;
86
87 if (mlx5_core_is_sf(dev) || !st)
88 return;
89
90 pcie_disable_tph(dev->pdev);
91 WARN_ON_ONCE(!xa_empty(&st->idx_xa));
92 kfree(st);
93 }
94
mlx5_st_alloc_index(struct mlx5_core_dev * dev,enum tph_mem_type mem_type,unsigned int cpu_uid,u16 * st_index)95 int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type,
96 unsigned int cpu_uid, u16 *st_index)
97 {
98 struct mlx5_st_idx_data *idx_data;
99 struct mlx5_st *st = dev->st;
100 unsigned long index;
101 u32 xa_id;
102 u16 tag;
103 int ret;
104
105 if (!st)
106 return -EOPNOTSUPP;
107
108 ret = pcie_tph_get_cpu_st(dev->pdev, mem_type, cpu_uid, &tag);
109 if (ret)
110 return ret;
111
112 if (st->direct_mode) {
113 *st_index = tag;
114 return 0;
115 }
116
117 mutex_lock(&st->lock);
118
119 xa_for_each(&st->idx_xa, index, idx_data) {
120 if (tag == idx_data->tag) {
121 refcount_inc(&idx_data->usecount);
122 *st_index = index;
123 goto end;
124 }
125 }
126
127 idx_data = kzalloc(sizeof(*idx_data), GFP_KERNEL);
128 if (!idx_data) {
129 ret = -ENOMEM;
130 goto end;
131 }
132
133 refcount_set(&idx_data->usecount, 1);
134 idx_data->tag = tag;
135
136 ret = xa_alloc(&st->idx_xa, &xa_id, idx_data, st->index_limit, GFP_KERNEL);
137 if (ret)
138 goto clean_idx_data;
139
140 ret = pcie_tph_set_st_entry(dev->pdev, xa_id, tag);
141 if (ret)
142 goto clean_idx_xa;
143
144 *st_index = xa_id;
145 goto end;
146
147 clean_idx_xa:
148 xa_erase(&st->idx_xa, xa_id);
149 clean_idx_data:
150 kfree(idx_data);
151 end:
152 mutex_unlock(&st->lock);
153 return ret;
154 }
155 EXPORT_SYMBOL_GPL(mlx5_st_alloc_index);
156
mlx5_st_dealloc_index(struct mlx5_core_dev * dev,u16 st_index)157 int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index)
158 {
159 struct mlx5_st_idx_data *idx_data;
160 struct mlx5_st *st = dev->st;
161 int ret = 0;
162
163 if (!st)
164 return -EOPNOTSUPP;
165
166 if (st->direct_mode)
167 return 0;
168
169 mutex_lock(&st->lock);
170 idx_data = xa_load(&st->idx_xa, st_index);
171 if (WARN_ON_ONCE(!idx_data)) {
172 ret = -EINVAL;
173 goto end;
174 }
175
176 if (refcount_dec_and_test(&idx_data->usecount)) {
177 xa_erase(&st->idx_xa, st_index);
178 /* We leave PCI config space as was before, no mkey will refer to it */
179 }
180
181 end:
182 mutex_unlock(&st->lock);
183 return ret;
184 }
185 EXPORT_SYMBOL_GPL(mlx5_st_dealloc_index);
186