xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c (revision 8d2b0853add1d7534dc0794e3c8e0b9e8c4ec640)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
4  */
5 
6 #include <linux/mlx5/driver.h>
7 #include <linux/mlx5/device.h>
8 
9 #include "mlx5_core.h"
10 #include "lib/mlx5.h"
11 
12 struct mlx5_st_idx_data {
13 	refcount_t usecount;
14 	u16 tag;
15 };
16 
17 struct mlx5_st {
18 	/* serialize access upon alloc/free flows */
19 	struct mutex lock;
20 	struct xa_limit index_limit;
21 	struct xarray idx_xa; /* key == index, value == struct mlx5_st_idx_data */
22 };
23 
24 struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev)
25 {
26 	struct pci_dev *pdev = dev->pdev;
27 	struct mlx5_st *st;
28 	u16 num_entries;
29 	int ret;
30 
31 	if (!MLX5_CAP_GEN(dev, mkey_pcie_tph))
32 		return NULL;
33 
34 #ifdef CONFIG_MLX5_SF
35 	if (mlx5_core_is_sf(dev))
36 		return dev->priv.parent_mdev->st;
37 #endif
38 
39 	/* Checking whether the device is capable */
40 	if (!pdev->tph_cap)
41 		return NULL;
42 
43 	num_entries = pcie_tph_get_st_table_size(pdev);
44 	/* We need a reserved entry for non TPH cases */
45 	if (num_entries < 2)
46 		return NULL;
47 
48 	/* The OS doesn't support ST */
49 	ret = pcie_enable_tph(pdev, PCI_TPH_ST_DS_MODE);
50 	if (ret)
51 		return NULL;
52 
53 	st = kzalloc(sizeof(*st), GFP_KERNEL);
54 	if (!st)
55 		goto end;
56 
57 	mutex_init(&st->lock);
58 	xa_init_flags(&st->idx_xa, XA_FLAGS_ALLOC);
59 	/* entry 0 is reserved for non TPH cases */
60 	st->index_limit.min = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX + 1;
61 	st->index_limit.max = num_entries - 1;
62 
63 	return st;
64 
65 end:
66 	pcie_disable_tph(dev->pdev);
67 	return NULL;
68 }
69 
70 void mlx5_st_destroy(struct mlx5_core_dev *dev)
71 {
72 	struct mlx5_st *st = dev->st;
73 
74 	if (mlx5_core_is_sf(dev) || !st)
75 		return;
76 
77 	pcie_disable_tph(dev->pdev);
78 	WARN_ON_ONCE(!xa_empty(&st->idx_xa));
79 	kfree(st);
80 }
81 
82 int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type,
83 			unsigned int cpu_uid, u16 *st_index)
84 {
85 	struct mlx5_st_idx_data *idx_data;
86 	struct mlx5_st *st = dev->st;
87 	unsigned long index;
88 	u32 xa_id;
89 	u16 tag;
90 	int ret;
91 
92 	if (!st)
93 		return -EOPNOTSUPP;
94 
95 	ret = pcie_tph_get_cpu_st(dev->pdev, mem_type, cpu_uid, &tag);
96 	if (ret)
97 		return ret;
98 
99 	mutex_lock(&st->lock);
100 
101 	xa_for_each(&st->idx_xa, index, idx_data) {
102 		if (tag == idx_data->tag) {
103 			refcount_inc(&idx_data->usecount);
104 			*st_index = index;
105 			goto end;
106 		}
107 	}
108 
109 	idx_data = kzalloc(sizeof(*idx_data), GFP_KERNEL);
110 	if (!idx_data) {
111 		ret = -ENOMEM;
112 		goto end;
113 	}
114 
115 	refcount_set(&idx_data->usecount, 1);
116 	idx_data->tag = tag;
117 
118 	ret = xa_alloc(&st->idx_xa, &xa_id, idx_data, st->index_limit, GFP_KERNEL);
119 	if (ret)
120 		goto clean_idx_data;
121 
122 	ret = pcie_tph_set_st_entry(dev->pdev, xa_id, tag);
123 	if (ret)
124 		goto clean_idx_xa;
125 
126 	*st_index = xa_id;
127 	goto end;
128 
129 clean_idx_xa:
130 	xa_erase(&st->idx_xa, xa_id);
131 clean_idx_data:
132 	kfree(idx_data);
133 end:
134 	mutex_unlock(&st->lock);
135 	return ret;
136 }
137 EXPORT_SYMBOL_GPL(mlx5_st_alloc_index);
138 
139 int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index)
140 {
141 	struct mlx5_st_idx_data *idx_data;
142 	struct mlx5_st *st = dev->st;
143 	int ret = 0;
144 
145 	if (!st)
146 		return -EOPNOTSUPP;
147 
148 	mutex_lock(&st->lock);
149 	idx_data = xa_load(&st->idx_xa, st_index);
150 	if (WARN_ON_ONCE(!idx_data)) {
151 		ret = -EINVAL;
152 		goto end;
153 	}
154 
155 	if (refcount_dec_and_test(&idx_data->usecount)) {
156 		xa_erase(&st->idx_xa, st_index);
157 		/* We leave PCI config space as was before, no mkey will refer to it */
158 	}
159 
160 end:
161 	mutex_unlock(&st->lock);
162 	return ret;
163 }
164 EXPORT_SYMBOL_GPL(mlx5_st_dealloc_index);
165