xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "dr_types.h"
5 
6 #define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
7 #define DR_ICM_POOL_STE_HOT_MEM_PERCENT 25
8 #define DR_ICM_POOL_MODIFY_HDR_PTRN_HOT_MEM_PERCENT 50
9 #define DR_ICM_POOL_MODIFY_ACTION_HOT_MEM_PERCENT 90
10 
11 struct mlx5dr_icm_hot_chunk {
12 	struct mlx5dr_icm_buddy_mem *buddy_mem;
13 	unsigned int seg;
14 	enum mlx5dr_icm_chunk_size size;
15 };
16 
17 struct mlx5dr_icm_pool {
18 	enum mlx5dr_icm_type icm_type;
19 	enum mlx5dr_icm_chunk_size max_log_chunk_sz;
20 	struct mlx5dr_domain *dmn;
21 	struct kmem_cache *chunks_kmem_cache;
22 
23 	/* memory management */
24 	struct mutex mutex; /* protect the ICM pool and ICM buddy */
25 	struct list_head buddy_mem_list;
26 
27 	/* Hardware may be accessing this memory but at some future,
28 	 * undetermined time, it might cease to do so.
29 	 * sync_ste command sets them free.
30 	 */
31 	struct mlx5dr_icm_hot_chunk *hot_chunks_arr;
32 	u32 hot_chunks_num;
33 	u64 hot_memory_size;
34 	/* hot memory size threshold for triggering sync */
35 	u64 th;
36 };
37 
38 struct mlx5dr_icm_dm {
39 	u32 obj_id;
40 	enum mlx5_sw_icm_type type;
41 	phys_addr_t addr;
42 	size_t length;
43 };
44 
45 struct mlx5dr_icm_mr {
46 	u32 mkey;
47 	struct mlx5dr_icm_dm dm;
48 	struct mlx5dr_domain *dmn;
49 	size_t length;
50 	u64 icm_start_addr;
51 };
52 
dr_icm_create_dm_mkey(struct mlx5_core_dev * mdev,u32 pd,u64 length,u64 start_addr,int mode,u32 * mkey)53 static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
54 				 u32 pd, u64 length, u64 start_addr, int mode,
55 				 u32 *mkey)
56 {
57 	u32 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
58 	u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
59 	void *mkc;
60 
61 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
62 
63 	MLX5_SET(mkc, mkc, access_mode_1_0, mode);
64 	MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
65 	MLX5_SET(mkc, mkc, lw, 1);
66 	MLX5_SET(mkc, mkc, lr, 1);
67 	if (mode == MLX5_MKC_ACCESS_MODE_SW_ICM) {
68 		MLX5_SET(mkc, mkc, rw, 1);
69 		MLX5_SET(mkc, mkc, rr, 1);
70 	}
71 
72 	MLX5_SET64(mkc, mkc, len, length);
73 	MLX5_SET(mkc, mkc, pd, pd);
74 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
75 	MLX5_SET64(mkc, mkc, start_addr, start_addr);
76 
77 	return mlx5_core_create_mkey(mdev, mkey, in, inlen);
78 }
79 
mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk * chunk)80 u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk)
81 {
82 	u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
83 
84 	return (u64)offset * chunk->seg;
85 }
86 
mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk * chunk)87 u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk)
88 {
89 	return chunk->buddy_mem->icm_mr->mkey;
90 }
91 
mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk * chunk)92 u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk)
93 {
94 	u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
95 
96 	return (u64)chunk->buddy_mem->icm_mr->icm_start_addr + size * chunk->seg;
97 }
98 
mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk * chunk)99 u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk)
100 {
101 	return mlx5dr_icm_pool_chunk_size_to_byte(chunk->size,
102 			chunk->buddy_mem->pool->icm_type);
103 }
104 
mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk * chunk)105 u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk)
106 {
107 	return mlx5dr_icm_pool_chunk_size_to_entries(chunk->size);
108 }
109 
110 static struct mlx5dr_icm_mr *
dr_icm_pool_mr_create(struct mlx5dr_icm_pool * pool)111 dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
112 {
113 	struct mlx5_core_dev *mdev = pool->dmn->mdev;
114 	enum mlx5_sw_icm_type dm_type = 0;
115 	struct mlx5dr_icm_mr *icm_mr;
116 	size_t log_align_base = 0;
117 	int err;
118 
119 	icm_mr = kvzalloc(sizeof(*icm_mr), GFP_KERNEL);
120 	if (!icm_mr)
121 		return NULL;
122 
123 	icm_mr->dmn = pool->dmn;
124 
125 	icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
126 							       pool->icm_type);
127 
128 	switch (pool->icm_type) {
129 	case DR_ICM_TYPE_STE:
130 		dm_type = MLX5_SW_ICM_TYPE_STEERING;
131 		log_align_base = ilog2(icm_mr->dm.length);
132 		break;
133 	case DR_ICM_TYPE_MODIFY_ACTION:
134 		dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
135 		/* Align base is 64B */
136 		log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
137 		break;
138 	case DR_ICM_TYPE_MODIFY_HDR_PTRN:
139 		dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN;
140 		/* Align base is 64B */
141 		log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
142 		break;
143 	default:
144 		WARN_ON(pool->icm_type);
145 	}
146 
147 	icm_mr->dm.type = dm_type;
148 
149 	err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length,
150 				   log_align_base, 0, &icm_mr->dm.addr,
151 				   &icm_mr->dm.obj_id);
152 	if (err) {
153 		mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err);
154 		goto free_icm_mr;
155 	}
156 
157 	/* Register device memory */
158 	err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn,
159 				    icm_mr->dm.length,
160 				    icm_mr->dm.addr,
161 				    MLX5_MKC_ACCESS_MODE_SW_ICM,
162 				    &icm_mr->mkey);
163 	if (err) {
164 		mlx5dr_err(pool->dmn, "Failed to create SW ICM MKEY, err (%d)\n", err);
165 		goto free_dm;
166 	}
167 
168 	icm_mr->icm_start_addr = icm_mr->dm.addr;
169 
170 	if (icm_mr->icm_start_addr & (BIT(log_align_base) - 1)) {
171 		mlx5dr_err(pool->dmn, "Failed to get Aligned ICM mem (asked: %zu)\n",
172 			   log_align_base);
173 		goto free_mkey;
174 	}
175 
176 	return icm_mr;
177 
178 free_mkey:
179 	mlx5_core_destroy_mkey(mdev, icm_mr->mkey);
180 free_dm:
181 	mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
182 			       icm_mr->dm.addr, icm_mr->dm.obj_id);
183 free_icm_mr:
184 	kvfree(icm_mr);
185 	return NULL;
186 }
187 
dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr * icm_mr)188 static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
189 {
190 	struct mlx5_core_dev *mdev = icm_mr->dmn->mdev;
191 	struct mlx5dr_icm_dm *dm = &icm_mr->dm;
192 
193 	mlx5_core_destroy_mkey(mdev, icm_mr->mkey);
194 	mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0,
195 			       dm->addr, dm->obj_id);
196 	kvfree(icm_mr);
197 }
198 
dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem * buddy)199 static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy)
200 {
201 	/* We support only one type of STE size, both for ConnectX-5 and later
202 	 * devices. Once the support for match STE which has a larger tag is
203 	 * added (32B instead of 16B), the STE size for devices later than
204 	 * ConnectX-5 needs to account for that.
205 	 */
206 	return DR_STE_SIZE_REDUCED;
207 }
208 
dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk * chunk,int offset)209 static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
210 {
211 	int num_of_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
212 	struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
213 	int ste_size = dr_icm_buddy_get_ste_size(buddy);
214 	int index = offset / DR_STE_SIZE;
215 
216 	chunk->ste_arr = &buddy->ste_arr[index];
217 	chunk->miss_list = &buddy->miss_list[index];
218 	chunk->hw_ste_arr = buddy->hw_ste_arr + index * ste_size;
219 
220 	memset(chunk->hw_ste_arr, 0, num_of_entries * ste_size);
221 	memset(chunk->ste_arr, 0,
222 	       num_of_entries * sizeof(chunk->ste_arr[0]));
223 }
224 
dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem * buddy)225 static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
226 {
227 	int num_of_entries =
228 		mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz);
229 
230 	buddy->ste_arr = kvcalloc(num_of_entries,
231 				  sizeof(struct mlx5dr_ste), GFP_KERNEL);
232 	if (!buddy->ste_arr)
233 		return -ENOMEM;
234 
235 	/* Preallocate full STE size on non-ConnectX-5 devices since
236 	 * we need to support both full and reduced with the same cache.
237 	 */
238 	buddy->hw_ste_arr = kvcalloc(num_of_entries,
239 				     dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL);
240 	if (!buddy->hw_ste_arr)
241 		goto free_ste_arr;
242 
243 	buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL);
244 	if (!buddy->miss_list)
245 		goto free_hw_ste_arr;
246 
247 	return 0;
248 
249 free_hw_ste_arr:
250 	kvfree(buddy->hw_ste_arr);
251 free_ste_arr:
252 	kvfree(buddy->ste_arr);
253 	return -ENOMEM;
254 }
255 
dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem * buddy)256 static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
257 {
258 	kvfree(buddy->ste_arr);
259 	kvfree(buddy->hw_ste_arr);
260 	kvfree(buddy->miss_list);
261 }
262 
dr_icm_buddy_create(struct mlx5dr_icm_pool * pool)263 static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
264 {
265 	struct mlx5dr_icm_buddy_mem *buddy;
266 	struct mlx5dr_icm_mr *icm_mr;
267 
268 	icm_mr = dr_icm_pool_mr_create(pool);
269 	if (!icm_mr)
270 		return -ENOMEM;
271 
272 	buddy = kvzalloc(sizeof(*buddy), GFP_KERNEL);
273 	if (!buddy)
274 		goto free_mr;
275 
276 	if (mlx5dr_buddy_init(buddy, pool->max_log_chunk_sz))
277 		goto err_free_buddy;
278 
279 	buddy->icm_mr = icm_mr;
280 	buddy->pool = pool;
281 
282 	if (pool->icm_type == DR_ICM_TYPE_STE) {
283 		/* Reduce allocations by preallocating and reusing the STE structures */
284 		if (dr_icm_buddy_init_ste_cache(buddy))
285 			goto err_cleanup_buddy;
286 	}
287 
288 	/* add it to the -start- of the list in order to search in it first */
289 	list_add(&buddy->list_node, &pool->buddy_mem_list);
290 
291 	pool->dmn->num_buddies[pool->icm_type]++;
292 
293 	return 0;
294 
295 err_cleanup_buddy:
296 	mlx5dr_buddy_cleanup(buddy);
297 err_free_buddy:
298 	kvfree(buddy);
299 free_mr:
300 	dr_icm_pool_mr_destroy(icm_mr);
301 	return -ENOMEM;
302 }
303 
dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem * buddy)304 static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
305 {
306 	enum mlx5dr_icm_type icm_type = buddy->pool->icm_type;
307 
308 	dr_icm_pool_mr_destroy(buddy->icm_mr);
309 
310 	mlx5dr_buddy_cleanup(buddy);
311 
312 	if (icm_type == DR_ICM_TYPE_STE)
313 		dr_icm_buddy_cleanup_ste_cache(buddy);
314 
315 	buddy->pool->dmn->num_buddies[icm_type]--;
316 
317 	kvfree(buddy);
318 }
319 
320 static void
dr_icm_chunk_init(struct mlx5dr_icm_chunk * chunk,struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size,struct mlx5dr_icm_buddy_mem * buddy_mem_pool,unsigned int seg)321 dr_icm_chunk_init(struct mlx5dr_icm_chunk *chunk,
322 		  struct mlx5dr_icm_pool *pool,
323 		  enum mlx5dr_icm_chunk_size chunk_size,
324 		  struct mlx5dr_icm_buddy_mem *buddy_mem_pool,
325 		  unsigned int seg)
326 {
327 	int offset;
328 
329 	chunk->seg = seg;
330 	chunk->size = chunk_size;
331 	chunk->buddy_mem = buddy_mem_pool;
332 
333 	if (pool->icm_type == DR_ICM_TYPE_STE) {
334 		offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
335 		dr_icm_chunk_ste_init(chunk, offset);
336 	}
337 
338 	buddy_mem_pool->used_memory += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
339 }
340 
dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool * pool)341 static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
342 {
343 	return pool->hot_memory_size > pool->th;
344 }
345 
dr_icm_pool_clear_hot_chunks_arr(struct mlx5dr_icm_pool * pool)346 static void dr_icm_pool_clear_hot_chunks_arr(struct mlx5dr_icm_pool *pool)
347 {
348 	struct mlx5dr_icm_hot_chunk *hot_chunk;
349 	u32 i, num_entries;
350 
351 	for (i = 0; i < pool->hot_chunks_num; i++) {
352 		hot_chunk = &pool->hot_chunks_arr[i];
353 		num_entries = mlx5dr_icm_pool_chunk_size_to_entries(hot_chunk->size);
354 		mlx5dr_buddy_free_mem(hot_chunk->buddy_mem,
355 				      hot_chunk->seg, ilog2(num_entries));
356 		hot_chunk->buddy_mem->used_memory -=
357 			mlx5dr_icm_pool_chunk_size_to_byte(hot_chunk->size,
358 							   pool->icm_type);
359 	}
360 
361 	pool->hot_chunks_num = 0;
362 	pool->hot_memory_size = 0;
363 }
364 
dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool * pool)365 static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
366 {
367 	struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
368 	int err;
369 
370 	err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
371 	if (err) {
372 		mlx5dr_err(pool->dmn, "Failed to sync to HW (err: %d)\n", err);
373 		return err;
374 	}
375 
376 	dr_icm_pool_clear_hot_chunks_arr(pool);
377 
378 	list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) {
379 		if (!buddy->used_memory && pool->icm_type == DR_ICM_TYPE_STE)
380 			dr_icm_buddy_destroy(buddy);
381 	}
382 
383 	return 0;
384 }
385 
dr_icm_handle_buddies_get_mem(struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size,struct mlx5dr_icm_buddy_mem ** buddy,unsigned int * seg)386 static int dr_icm_handle_buddies_get_mem(struct mlx5dr_icm_pool *pool,
387 					 enum mlx5dr_icm_chunk_size chunk_size,
388 					 struct mlx5dr_icm_buddy_mem **buddy,
389 					 unsigned int *seg)
390 {
391 	struct mlx5dr_icm_buddy_mem *buddy_mem_pool;
392 	bool new_mem = false;
393 	int err;
394 
395 alloc_buddy_mem:
396 	/* find the next free place from the buddy list */
397 	list_for_each_entry(buddy_mem_pool, &pool->buddy_mem_list, list_node) {
398 		err = mlx5dr_buddy_alloc_mem(buddy_mem_pool,
399 					     chunk_size, seg);
400 		if (!err)
401 			goto found;
402 
403 		if (WARN_ON(new_mem)) {
404 			/* We have new memory pool, first in the list */
405 			mlx5dr_err(pool->dmn,
406 				   "No memory for order: %d\n",
407 				   chunk_size);
408 			goto out;
409 		}
410 	}
411 
412 	/* no more available allocators in that pool, create new */
413 	err = dr_icm_buddy_create(pool);
414 	if (err) {
415 		mlx5dr_err(pool->dmn,
416 			   "Failed creating buddy for order %d\n",
417 			   chunk_size);
418 		goto out;
419 	}
420 
421 	/* mark we have new memory, first in list */
422 	new_mem = true;
423 	goto alloc_buddy_mem;
424 
425 found:
426 	*buddy = buddy_mem_pool;
427 out:
428 	return err;
429 }
430 
431 /* Allocate an ICM chunk, each chunk holds a piece of ICM memory and
432  * also memory used for HW STE management for optimizations.
433  */
434 struct mlx5dr_icm_chunk *
mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size)435 mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
436 		       enum mlx5dr_icm_chunk_size chunk_size)
437 {
438 	struct mlx5dr_icm_chunk *chunk = NULL;
439 	struct mlx5dr_icm_buddy_mem *buddy;
440 	unsigned int seg;
441 	int ret;
442 
443 	if (chunk_size > pool->max_log_chunk_sz)
444 		return NULL;
445 
446 	mutex_lock(&pool->mutex);
447 	/* find mem, get back the relevant buddy pool and seg in that mem */
448 	ret = dr_icm_handle_buddies_get_mem(pool, chunk_size, &buddy, &seg);
449 	if (ret)
450 		goto out;
451 
452 	chunk = kmem_cache_alloc(pool->chunks_kmem_cache, GFP_KERNEL);
453 	if (!chunk)
454 		goto out_err;
455 
456 	dr_icm_chunk_init(chunk, pool, chunk_size, buddy, seg);
457 
458 	goto out;
459 
460 out_err:
461 	mlx5dr_buddy_free_mem(buddy, seg, chunk_size);
462 out:
463 	mutex_unlock(&pool->mutex);
464 	return chunk;
465 }
466 
mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk * chunk)467 void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
468 {
469 	struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
470 	struct mlx5dr_icm_pool *pool = buddy->pool;
471 	struct mlx5dr_icm_hot_chunk *hot_chunk;
472 	struct kmem_cache *chunks_cache;
473 
474 	chunks_cache = pool->chunks_kmem_cache;
475 
476 	/* move the chunk to the waiting chunks array, AKA "hot" memory */
477 	mutex_lock(&pool->mutex);
478 
479 	pool->hot_memory_size += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
480 
481 	hot_chunk = &pool->hot_chunks_arr[pool->hot_chunks_num++];
482 	hot_chunk->buddy_mem = chunk->buddy_mem;
483 	hot_chunk->seg = chunk->seg;
484 	hot_chunk->size = chunk->size;
485 
486 	kmem_cache_free(chunks_cache, chunk);
487 
488 	/* Check if we have chunks that are waiting for sync-ste */
489 	if (dr_icm_pool_is_sync_required(pool))
490 		dr_icm_pool_sync_all_buddy_pools(pool);
491 
492 	mutex_unlock(&pool->mutex);
493 }
494 
mlx5dr_icm_pool_alloc_htbl(struct mlx5dr_icm_pool * pool)495 struct mlx5dr_ste_htbl *mlx5dr_icm_pool_alloc_htbl(struct mlx5dr_icm_pool *pool)
496 {
497 	return kmem_cache_alloc(pool->dmn->htbls_kmem_cache, GFP_KERNEL);
498 }
499 
mlx5dr_icm_pool_free_htbl(struct mlx5dr_icm_pool * pool,struct mlx5dr_ste_htbl * htbl)500 void mlx5dr_icm_pool_free_htbl(struct mlx5dr_icm_pool *pool, struct mlx5dr_ste_htbl *htbl)
501 {
502 	kmem_cache_free(pool->dmn->htbls_kmem_cache, htbl);
503 }
504 
mlx5dr_icm_pool_create(struct mlx5dr_domain * dmn,enum mlx5dr_icm_type icm_type)505 struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
506 					       enum mlx5dr_icm_type icm_type)
507 {
508 	u32 num_of_chunks, entry_size;
509 	struct mlx5dr_icm_pool *pool;
510 	u32 max_hot_size = 0;
511 
512 	pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
513 	if (!pool)
514 		return NULL;
515 
516 	pool->dmn = dmn;
517 	pool->icm_type = icm_type;
518 	pool->chunks_kmem_cache = dmn->chunks_kmem_cache;
519 
520 	INIT_LIST_HEAD(&pool->buddy_mem_list);
521 	mutex_init(&pool->mutex);
522 
523 	switch (icm_type) {
524 	case DR_ICM_TYPE_STE:
525 		pool->max_log_chunk_sz = dmn->info.max_log_sw_icm_sz;
526 		max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
527 								  pool->icm_type) *
528 			       DR_ICM_POOL_STE_HOT_MEM_PERCENT / 100;
529 		break;
530 	case DR_ICM_TYPE_MODIFY_ACTION:
531 		pool->max_log_chunk_sz = dmn->info.max_log_action_icm_sz;
532 		max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
533 								  pool->icm_type) *
534 			       DR_ICM_POOL_MODIFY_ACTION_HOT_MEM_PERCENT / 100;
535 		break;
536 	case DR_ICM_TYPE_MODIFY_HDR_PTRN:
537 		pool->max_log_chunk_sz = dmn->info.max_log_modify_hdr_pattern_icm_sz;
538 		max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
539 								  pool->icm_type) *
540 			       DR_ICM_POOL_MODIFY_HDR_PTRN_HOT_MEM_PERCENT / 100;
541 		break;
542 	default:
543 		WARN_ON(icm_type);
544 	}
545 
546 	entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type);
547 
548 	num_of_chunks = DIV_ROUND_UP(max_hot_size, entry_size) + 1;
549 	pool->th = max_hot_size;
550 
551 	pool->hot_chunks_arr = kvcalloc(num_of_chunks,
552 					sizeof(struct mlx5dr_icm_hot_chunk),
553 					GFP_KERNEL);
554 	if (!pool->hot_chunks_arr)
555 		goto free_pool;
556 
557 	return pool;
558 
559 free_pool:
560 	kvfree(pool);
561 	return NULL;
562 }
563 
mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool * pool)564 void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool)
565 {
566 	struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
567 
568 	dr_icm_pool_clear_hot_chunks_arr(pool);
569 
570 	list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node)
571 		dr_icm_buddy_destroy(buddy);
572 
573 	kvfree(pool->hot_chunks_arr);
574 	mutex_destroy(&pool->mutex);
575 	kvfree(pool);
576 }
577