xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies
3 
4 #include <linux/mlx5/driver.h>
5 #include <linux/mlx5/device.h>
6 
7 #include "mlx5_core.h"
8 #include "lib/mlx5.h"
9 
10 struct mlx5_dm {
11 	/* protect access to icm bitmask */
12 	spinlock_t lock;
13 	unsigned long *steering_sw_icm_alloc_blocks;
14 	unsigned long *header_modify_sw_icm_alloc_blocks;
15 	unsigned long *header_modify_pattern_sw_icm_alloc_blocks;
16 	unsigned long *header_encap_sw_icm_alloc_blocks;
17 };
18 
mlx5_dm_create(struct mlx5_core_dev * dev)19 struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
20 {
21 	u64 header_modify_pattern_icm_blocks = 0;
22 	u64 header_sw_encap_icm_blocks = 0;
23 	u64 header_modify_icm_blocks = 0;
24 	u64 steering_icm_blocks = 0;
25 	struct mlx5_dm *dm;
26 	bool support_v2;
27 
28 	if (!(MLX5_CAP_GEN_64(dev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM))
29 		return NULL;
30 
31 	dm = kzalloc(sizeof(*dm), GFP_KERNEL);
32 	if (!dm)
33 		return ERR_PTR(-ENOMEM);
34 
35 	spin_lock_init(&dm->lock);
36 
37 	if (MLX5_CAP64_DEV_MEM(dev, steering_sw_icm_start_address)) {
38 		steering_icm_blocks =
39 			BIT(MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size) -
40 			    MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
41 
42 		dm->steering_sw_icm_alloc_blocks =
43 			bitmap_zalloc(steering_icm_blocks, GFP_KERNEL);
44 		if (!dm->steering_sw_icm_alloc_blocks)
45 			goto err_steering;
46 	}
47 
48 	if (MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address)) {
49 		header_modify_icm_blocks =
50 			BIT(MLX5_CAP_DEV_MEM(dev, log_header_modify_sw_icm_size) -
51 			    MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
52 
53 		dm->header_modify_sw_icm_alloc_blocks =
54 			bitmap_zalloc(header_modify_icm_blocks, GFP_KERNEL);
55 		if (!dm->header_modify_sw_icm_alloc_blocks)
56 			goto err_modify_hdr;
57 	}
58 
59 	if (MLX5_CAP_DEV_MEM(dev, log_indirect_encap_sw_icm_size)) {
60 		header_sw_encap_icm_blocks =
61 			BIT(MLX5_CAP_DEV_MEM(dev, log_indirect_encap_sw_icm_size) -
62 			    MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
63 
64 		dm->header_encap_sw_icm_alloc_blocks =
65 			bitmap_zalloc(header_sw_encap_icm_blocks, GFP_KERNEL);
66 		if (!dm->header_encap_sw_icm_alloc_blocks)
67 			goto err_pattern;
68 	}
69 
70 	support_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) &&
71 		     MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2) &&
72 		     MLX5_CAP64_DEV_MEM(dev, header_modify_pattern_sw_icm_start_address);
73 
74 	if (support_v2) {
75 		header_modify_pattern_icm_blocks =
76 			BIT(MLX5_CAP_DEV_MEM(dev, log_header_modify_pattern_sw_icm_size) -
77 			    MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
78 
79 		dm->header_modify_pattern_sw_icm_alloc_blocks =
80 			bitmap_zalloc(header_modify_pattern_icm_blocks, GFP_KERNEL);
81 		if (!dm->header_modify_pattern_sw_icm_alloc_blocks)
82 			goto err_sw_encap;
83 	}
84 
85 	return dm;
86 
87 err_sw_encap:
88 	bitmap_free(dm->header_encap_sw_icm_alloc_blocks);
89 
90 err_pattern:
91 	bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
92 
93 err_modify_hdr:
94 	bitmap_free(dm->steering_sw_icm_alloc_blocks);
95 
96 err_steering:
97 	kfree(dm);
98 
99 	return ERR_PTR(-ENOMEM);
100 }
101 
mlx5_dm_cleanup(struct mlx5_core_dev * dev)102 void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
103 {
104 	struct mlx5_dm *dm = dev->dm;
105 
106 	if (!dev->dm)
107 		return;
108 
109 	if (dm->steering_sw_icm_alloc_blocks) {
110 		WARN_ON(!bitmap_empty(dm->steering_sw_icm_alloc_blocks,
111 				      BIT(MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size) -
112 					  MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
113 		bitmap_free(dm->steering_sw_icm_alloc_blocks);
114 	}
115 
116 	if (dm->header_modify_sw_icm_alloc_blocks) {
117 		WARN_ON(!bitmap_empty(dm->header_modify_sw_icm_alloc_blocks,
118 				      BIT(MLX5_CAP_DEV_MEM(dev,
119 							   log_header_modify_sw_icm_size) -
120 				      MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
121 		bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
122 	}
123 
124 	if (dm->header_encap_sw_icm_alloc_blocks) {
125 		WARN_ON(!bitmap_empty(dm->header_encap_sw_icm_alloc_blocks,
126 				      BIT(MLX5_CAP_DEV_MEM(dev,
127 							   log_indirect_encap_sw_icm_size) -
128 				      MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
129 		bitmap_free(dm->header_encap_sw_icm_alloc_blocks);
130 	}
131 
132 	if (dm->header_modify_pattern_sw_icm_alloc_blocks) {
133 		WARN_ON(!bitmap_empty(dm->header_modify_pattern_sw_icm_alloc_blocks,
134 				      BIT(MLX5_CAP_DEV_MEM(dev,
135 							   log_header_modify_pattern_sw_icm_size) -
136 					  MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
137 		bitmap_free(dm->header_modify_pattern_sw_icm_alloc_blocks);
138 	}
139 
140 	kfree(dm);
141 }
142 
mlx5_dm_sw_icm_alloc(struct mlx5_core_dev * dev,enum mlx5_sw_icm_type type,u64 length,u32 log_alignment,u16 uid,phys_addr_t * addr,u32 * obj_id)143 int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
144 			 u64 length, u32 log_alignment, u16 uid,
145 			 phys_addr_t *addr, u32 *obj_id)
146 {
147 	u32 num_blocks = DIV_ROUND_UP_ULL(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
148 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
149 	u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {};
150 	struct mlx5_dm *dm = dev->dm;
151 	unsigned long *block_map;
152 	u64 icm_start_addr;
153 	u32 log_icm_size;
154 	u64 align_mask;
155 	u32 max_blocks;
156 	u64 block_idx;
157 	void *sw_icm;
158 	int ret;
159 
160 	if (!dev->dm)
161 		return -EOPNOTSUPP;
162 
163 	if (!length || (length & (length - 1)) ||
164 	    length & (MLX5_SW_ICM_BLOCK_SIZE(dev) - 1))
165 		return -EINVAL;
166 
167 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
168 		 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
169 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
170 	MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
171 
172 	switch (type) {
173 	case MLX5_SW_ICM_TYPE_STEERING:
174 		icm_start_addr = MLX5_CAP64_DEV_MEM(dev, steering_sw_icm_start_address);
175 		log_icm_size = MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size);
176 		block_map = dm->steering_sw_icm_alloc_blocks;
177 		break;
178 	case MLX5_SW_ICM_TYPE_HEADER_MODIFY:
179 		icm_start_addr = MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address);
180 		log_icm_size = MLX5_CAP_DEV_MEM(dev,
181 						log_header_modify_sw_icm_size);
182 		block_map = dm->header_modify_sw_icm_alloc_blocks;
183 		break;
184 	case MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN:
185 		icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
186 						    header_modify_pattern_sw_icm_start_address);
187 		log_icm_size = MLX5_CAP_DEV_MEM(dev,
188 						log_header_modify_pattern_sw_icm_size);
189 		block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
190 		break;
191 	case MLX5_SW_ICM_TYPE_SW_ENCAP:
192 		icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
193 						    indirect_encap_sw_icm_start_address);
194 		log_icm_size = MLX5_CAP_DEV_MEM(dev,
195 						log_indirect_encap_sw_icm_size);
196 		block_map = dm->header_encap_sw_icm_alloc_blocks;
197 		break;
198 	default:
199 		return -EINVAL;
200 	}
201 
202 	if (!block_map)
203 		return -EOPNOTSUPP;
204 
205 	max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
206 
207 	if (log_alignment < MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
208 		log_alignment = MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
209 	align_mask = BIT(log_alignment - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) - 1;
210 
211 	spin_lock(&dm->lock);
212 	block_idx = bitmap_find_next_zero_area(block_map, max_blocks, 0,
213 					       num_blocks, align_mask);
214 
215 	if (block_idx < max_blocks)
216 		bitmap_set(block_map,
217 			   block_idx, num_blocks);
218 
219 	spin_unlock(&dm->lock);
220 
221 	if (block_idx >= max_blocks)
222 		return -ENOMEM;
223 
224 	sw_icm = MLX5_ADDR_OF(create_sw_icm_in, in, sw_icm);
225 	icm_start_addr += block_idx << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
226 	MLX5_SET64(sw_icm, sw_icm, sw_icm_start_addr,
227 		   icm_start_addr);
228 	MLX5_SET(sw_icm, sw_icm, log_sw_icm_size, ilog2(length));
229 
230 	ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
231 	if (ret) {
232 		spin_lock(&dm->lock);
233 		bitmap_clear(block_map,
234 			     block_idx, num_blocks);
235 		spin_unlock(&dm->lock);
236 
237 		return ret;
238 	}
239 
240 	*addr = icm_start_addr;
241 	*obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
242 
243 	return 0;
244 }
245 EXPORT_SYMBOL_GPL(mlx5_dm_sw_icm_alloc);
246 
mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev * dev,enum mlx5_sw_icm_type type,u64 length,u16 uid,phys_addr_t addr,u32 obj_id)247 int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
248 			   u64 length, u16 uid, phys_addr_t addr, u32 obj_id)
249 {
250 	u32 num_blocks = DIV_ROUND_UP_ULL(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
251 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
252 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
253 	struct mlx5_dm *dm = dev->dm;
254 	unsigned long *block_map;
255 	u64 icm_start_addr;
256 	u64 start_idx;
257 	int err;
258 
259 	if (!dev->dm)
260 		return -EOPNOTSUPP;
261 
262 	switch (type) {
263 	case MLX5_SW_ICM_TYPE_STEERING:
264 		icm_start_addr = MLX5_CAP64_DEV_MEM(dev, steering_sw_icm_start_address);
265 		block_map = dm->steering_sw_icm_alloc_blocks;
266 		break;
267 	case MLX5_SW_ICM_TYPE_HEADER_MODIFY:
268 		icm_start_addr = MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address);
269 		block_map = dm->header_modify_sw_icm_alloc_blocks;
270 		break;
271 	case MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN:
272 		icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
273 						    header_modify_pattern_sw_icm_start_address);
274 		block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
275 		break;
276 	case MLX5_SW_ICM_TYPE_SW_ENCAP:
277 		icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
278 						    indirect_encap_sw_icm_start_address);
279 		block_map = dm->header_encap_sw_icm_alloc_blocks;
280 		break;
281 	default:
282 		return -EINVAL;
283 	}
284 
285 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
286 		 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
287 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
288 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
289 	MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
290 
291 	err =  mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
292 	if (err)
293 		return err;
294 
295 	start_idx = (addr - icm_start_addr) >> MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
296 	spin_lock(&dm->lock);
297 	bitmap_clear(block_map,
298 		     start_idx, num_blocks);
299 	spin_unlock(&dm->lock);
300 
301 	return 0;
302 }
303 EXPORT_SYMBOL_GPL(mlx5_dm_sw_icm_dealloc);
304