1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include "dr_types.h"
5
6 #define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
7 #define DR_ICM_POOL_STE_HOT_MEM_PERCENT 25
8 #define DR_ICM_POOL_MODIFY_HDR_PTRN_HOT_MEM_PERCENT 50
9 #define DR_ICM_POOL_MODIFY_ACTION_HOT_MEM_PERCENT 90
10
11 struct mlx5dr_icm_hot_chunk {
12 struct mlx5dr_icm_buddy_mem *buddy_mem;
13 unsigned int seg;
14 enum mlx5dr_icm_chunk_size size;
15 };
16
17 struct mlx5dr_icm_pool {
18 enum mlx5dr_icm_type icm_type;
19 enum mlx5dr_icm_chunk_size max_log_chunk_sz;
20 struct mlx5dr_domain *dmn;
21 struct kmem_cache *chunks_kmem_cache;
22
23 /* memory management */
24 struct mutex mutex; /* protect the ICM pool and ICM buddy */
25 struct list_head buddy_mem_list;
26
27 /* Hardware may be accessing this memory but at some future,
28 * undetermined time, it might cease to do so.
29 * sync_ste command sets them free.
30 */
31 struct mlx5dr_icm_hot_chunk *hot_chunks_arr;
32 u32 hot_chunks_num;
33 u64 hot_memory_size;
34 /* hot memory size threshold for triggering sync */
35 u64 th;
36 };
37
38 struct mlx5dr_icm_dm {
39 u32 obj_id;
40 enum mlx5_sw_icm_type type;
41 phys_addr_t addr;
42 size_t length;
43 };
44
45 struct mlx5dr_icm_mr {
46 u32 mkey;
47 struct mlx5dr_icm_dm dm;
48 struct mlx5dr_domain *dmn;
49 size_t length;
50 u64 icm_start_addr;
51 };
52
dr_icm_create_dm_mkey(struct mlx5_core_dev * mdev,u32 pd,u64 length,u64 start_addr,int mode,u32 * mkey)53 static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
54 u32 pd, u64 length, u64 start_addr, int mode,
55 u32 *mkey)
56 {
57 u32 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
58 u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
59 void *mkc;
60
61 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
62
63 MLX5_SET(mkc, mkc, access_mode_1_0, mode);
64 MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
65 MLX5_SET(mkc, mkc, lw, 1);
66 MLX5_SET(mkc, mkc, lr, 1);
67 if (mode == MLX5_MKC_ACCESS_MODE_SW_ICM) {
68 MLX5_SET(mkc, mkc, rw, 1);
69 MLX5_SET(mkc, mkc, rr, 1);
70 }
71
72 MLX5_SET64(mkc, mkc, len, length);
73 MLX5_SET(mkc, mkc, pd, pd);
74 MLX5_SET(mkc, mkc, qpn, 0xffffff);
75 MLX5_SET64(mkc, mkc, start_addr, start_addr);
76
77 return mlx5_core_create_mkey(mdev, mkey, in, inlen);
78 }
79
mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk * chunk)80 u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk)
81 {
82 u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
83
84 return (u64)offset * chunk->seg;
85 }
86
mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk * chunk)87 u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk)
88 {
89 return chunk->buddy_mem->icm_mr->mkey;
90 }
91
mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk * chunk)92 u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk)
93 {
94 u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
95
96 return (u64)chunk->buddy_mem->icm_mr->icm_start_addr + size * chunk->seg;
97 }
98
mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk * chunk)99 u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk)
100 {
101 return mlx5dr_icm_pool_chunk_size_to_byte(chunk->size,
102 chunk->buddy_mem->pool->icm_type);
103 }
104
mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk * chunk)105 u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk)
106 {
107 return mlx5dr_icm_pool_chunk_size_to_entries(chunk->size);
108 }
109
110 static struct mlx5dr_icm_mr *
dr_icm_pool_mr_create(struct mlx5dr_icm_pool * pool)111 dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
112 {
113 struct mlx5_core_dev *mdev = pool->dmn->mdev;
114 enum mlx5_sw_icm_type dm_type = 0;
115 struct mlx5dr_icm_mr *icm_mr;
116 size_t log_align_base = 0;
117 int err;
118
119 icm_mr = kvzalloc_obj(*icm_mr);
120 if (!icm_mr)
121 return NULL;
122
123 icm_mr->dmn = pool->dmn;
124
125 icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
126 pool->icm_type);
127
128 switch (pool->icm_type) {
129 case DR_ICM_TYPE_STE:
130 dm_type = MLX5_SW_ICM_TYPE_STEERING;
131 log_align_base = ilog2(icm_mr->dm.length);
132 break;
133 case DR_ICM_TYPE_MODIFY_ACTION:
134 dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
135 /* Align base is 64B */
136 log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
137 break;
138 case DR_ICM_TYPE_MODIFY_HDR_PTRN:
139 dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN;
140 /* Align base is 64B */
141 log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
142 break;
143 default:
144 WARN_ON(pool->icm_type);
145 }
146
147 icm_mr->dm.type = dm_type;
148
149 err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length,
150 log_align_base, 0, &icm_mr->dm.addr,
151 &icm_mr->dm.obj_id);
152 if (err) {
153 mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err);
154 goto free_icm_mr;
155 }
156
157 /* Register device memory */
158 err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn,
159 icm_mr->dm.length,
160 icm_mr->dm.addr,
161 MLX5_MKC_ACCESS_MODE_SW_ICM,
162 &icm_mr->mkey);
163 if (err) {
164 mlx5dr_err(pool->dmn, "Failed to create SW ICM MKEY, err (%d)\n", err);
165 goto free_dm;
166 }
167
168 icm_mr->icm_start_addr = icm_mr->dm.addr;
169
170 if (icm_mr->icm_start_addr & (BIT(log_align_base) - 1)) {
171 mlx5dr_err(pool->dmn, "Failed to get Aligned ICM mem (asked: %zu)\n",
172 log_align_base);
173 goto free_mkey;
174 }
175
176 return icm_mr;
177
178 free_mkey:
179 mlx5_core_destroy_mkey(mdev, icm_mr->mkey);
180 free_dm:
181 mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
182 icm_mr->dm.addr, icm_mr->dm.obj_id);
183 free_icm_mr:
184 kvfree(icm_mr);
185 return NULL;
186 }
187
dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr * icm_mr)188 static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
189 {
190 struct mlx5_core_dev *mdev = icm_mr->dmn->mdev;
191 struct mlx5dr_icm_dm *dm = &icm_mr->dm;
192
193 mlx5_core_destroy_mkey(mdev, icm_mr->mkey);
194 mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0,
195 dm->addr, dm->obj_id);
196 kvfree(icm_mr);
197 }
198
dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem * buddy)199 static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy)
200 {
201 /* We support only one type of STE size, both for ConnectX-5 and later
202 * devices. Once the support for match STE which has a larger tag is
203 * added (32B instead of 16B), the STE size for devices later than
204 * ConnectX-5 needs to account for that.
205 */
206 return DR_STE_SIZE_REDUCED;
207 }
208
dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk * chunk,int offset)209 static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
210 {
211 int num_of_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
212 struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
213 int ste_size = dr_icm_buddy_get_ste_size(buddy);
214 int index = offset / DR_STE_SIZE;
215
216 chunk->ste_arr = &buddy->ste_arr[index];
217 chunk->miss_list = &buddy->miss_list[index];
218 chunk->hw_ste_arr = buddy->hw_ste_arr + index * ste_size;
219
220 memset(chunk->hw_ste_arr, 0, num_of_entries * ste_size);
221 memset(chunk->ste_arr, 0,
222 num_of_entries * sizeof(chunk->ste_arr[0]));
223 }
224
dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem * buddy)225 static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
226 {
227 int num_of_entries =
228 mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz);
229
230 buddy->ste_arr = kvzalloc_objs(struct mlx5dr_ste, num_of_entries);
231 if (!buddy->ste_arr)
232 return -ENOMEM;
233
234 /* Preallocate full STE size on non-ConnectX-5 devices since
235 * we need to support both full and reduced with the same cache.
236 */
237 buddy->hw_ste_arr = kvcalloc(num_of_entries,
238 dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL);
239 if (!buddy->hw_ste_arr)
240 goto free_ste_arr;
241
242 buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL);
243 if (!buddy->miss_list)
244 goto free_hw_ste_arr;
245
246 return 0;
247
248 free_hw_ste_arr:
249 kvfree(buddy->hw_ste_arr);
250 free_ste_arr:
251 kvfree(buddy->ste_arr);
252 return -ENOMEM;
253 }
254
dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem * buddy)255 static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
256 {
257 kvfree(buddy->ste_arr);
258 kvfree(buddy->hw_ste_arr);
259 kvfree(buddy->miss_list);
260 }
261
dr_icm_buddy_create(struct mlx5dr_icm_pool * pool)262 static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
263 {
264 struct mlx5dr_icm_buddy_mem *buddy;
265 struct mlx5dr_icm_mr *icm_mr;
266
267 icm_mr = dr_icm_pool_mr_create(pool);
268 if (!icm_mr)
269 return -ENOMEM;
270
271 buddy = kvzalloc_obj(*buddy);
272 if (!buddy)
273 goto free_mr;
274
275 if (mlx5dr_buddy_init(buddy, pool->max_log_chunk_sz))
276 goto err_free_buddy;
277
278 buddy->icm_mr = icm_mr;
279 buddy->pool = pool;
280
281 if (pool->icm_type == DR_ICM_TYPE_STE) {
282 /* Reduce allocations by preallocating and reusing the STE structures */
283 if (dr_icm_buddy_init_ste_cache(buddy))
284 goto err_cleanup_buddy;
285 }
286
287 /* add it to the -start- of the list in order to search in it first */
288 list_add(&buddy->list_node, &pool->buddy_mem_list);
289
290 pool->dmn->num_buddies[pool->icm_type]++;
291
292 return 0;
293
294 err_cleanup_buddy:
295 mlx5dr_buddy_cleanup(buddy);
296 err_free_buddy:
297 kvfree(buddy);
298 free_mr:
299 dr_icm_pool_mr_destroy(icm_mr);
300 return -ENOMEM;
301 }
302
dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem * buddy)303 static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
304 {
305 enum mlx5dr_icm_type icm_type = buddy->pool->icm_type;
306
307 dr_icm_pool_mr_destroy(buddy->icm_mr);
308
309 mlx5dr_buddy_cleanup(buddy);
310
311 if (icm_type == DR_ICM_TYPE_STE)
312 dr_icm_buddy_cleanup_ste_cache(buddy);
313
314 buddy->pool->dmn->num_buddies[icm_type]--;
315
316 kvfree(buddy);
317 }
318
319 static void
dr_icm_chunk_init(struct mlx5dr_icm_chunk * chunk,struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size,struct mlx5dr_icm_buddy_mem * buddy_mem_pool,unsigned int seg)320 dr_icm_chunk_init(struct mlx5dr_icm_chunk *chunk,
321 struct mlx5dr_icm_pool *pool,
322 enum mlx5dr_icm_chunk_size chunk_size,
323 struct mlx5dr_icm_buddy_mem *buddy_mem_pool,
324 unsigned int seg)
325 {
326 int offset;
327
328 chunk->seg = seg;
329 chunk->size = chunk_size;
330 chunk->buddy_mem = buddy_mem_pool;
331
332 if (pool->icm_type == DR_ICM_TYPE_STE) {
333 offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
334 dr_icm_chunk_ste_init(chunk, offset);
335 }
336
337 buddy_mem_pool->used_memory += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
338 }
339
dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool * pool)340 static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
341 {
342 return pool->hot_memory_size > pool->th;
343 }
344
dr_icm_pool_clear_hot_chunks_arr(struct mlx5dr_icm_pool * pool)345 static void dr_icm_pool_clear_hot_chunks_arr(struct mlx5dr_icm_pool *pool)
346 {
347 struct mlx5dr_icm_hot_chunk *hot_chunk;
348 u32 i, num_entries;
349
350 for (i = 0; i < pool->hot_chunks_num; i++) {
351 hot_chunk = &pool->hot_chunks_arr[i];
352 num_entries = mlx5dr_icm_pool_chunk_size_to_entries(hot_chunk->size);
353 mlx5dr_buddy_free_mem(hot_chunk->buddy_mem,
354 hot_chunk->seg, ilog2(num_entries));
355 hot_chunk->buddy_mem->used_memory -=
356 mlx5dr_icm_pool_chunk_size_to_byte(hot_chunk->size,
357 pool->icm_type);
358 }
359
360 pool->hot_chunks_num = 0;
361 pool->hot_memory_size = 0;
362 }
363
dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool * pool)364 static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
365 {
366 struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
367 int err;
368
369 err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
370 if (err) {
371 mlx5dr_err(pool->dmn, "Failed to sync to HW (err: %d)\n", err);
372 return err;
373 }
374
375 dr_icm_pool_clear_hot_chunks_arr(pool);
376
377 list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) {
378 if (!buddy->used_memory && pool->icm_type == DR_ICM_TYPE_STE)
379 dr_icm_buddy_destroy(buddy);
380 }
381
382 return 0;
383 }
384
dr_icm_handle_buddies_get_mem(struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size,struct mlx5dr_icm_buddy_mem ** buddy,unsigned int * seg)385 static int dr_icm_handle_buddies_get_mem(struct mlx5dr_icm_pool *pool,
386 enum mlx5dr_icm_chunk_size chunk_size,
387 struct mlx5dr_icm_buddy_mem **buddy,
388 unsigned int *seg)
389 {
390 struct mlx5dr_icm_buddy_mem *buddy_mem_pool;
391 bool new_mem = false;
392 int err;
393
394 alloc_buddy_mem:
395 /* find the next free place from the buddy list */
396 list_for_each_entry(buddy_mem_pool, &pool->buddy_mem_list, list_node) {
397 err = mlx5dr_buddy_alloc_mem(buddy_mem_pool,
398 chunk_size, seg);
399 if (!err)
400 goto found;
401
402 if (WARN_ON(new_mem)) {
403 /* We have new memory pool, first in the list */
404 mlx5dr_err(pool->dmn,
405 "No memory for order: %d\n",
406 chunk_size);
407 goto out;
408 }
409 }
410
411 /* no more available allocators in that pool, create new */
412 err = dr_icm_buddy_create(pool);
413 if (err) {
414 mlx5dr_err(pool->dmn,
415 "Failed creating buddy for order %d\n",
416 chunk_size);
417 goto out;
418 }
419
420 /* mark we have new memory, first in list */
421 new_mem = true;
422 goto alloc_buddy_mem;
423
424 found:
425 *buddy = buddy_mem_pool;
426 out:
427 return err;
428 }
429
430 /* Allocate an ICM chunk, each chunk holds a piece of ICM memory and
431 * also memory used for HW STE management for optimizations.
432 */
433 struct mlx5dr_icm_chunk *
mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size)434 mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
435 enum mlx5dr_icm_chunk_size chunk_size)
436 {
437 struct mlx5dr_icm_chunk *chunk = NULL;
438 struct mlx5dr_icm_buddy_mem *buddy;
439 unsigned int seg;
440 int ret;
441
442 if (chunk_size > pool->max_log_chunk_sz)
443 return NULL;
444
445 mutex_lock(&pool->mutex);
446 /* find mem, get back the relevant buddy pool and seg in that mem */
447 ret = dr_icm_handle_buddies_get_mem(pool, chunk_size, &buddy, &seg);
448 if (ret)
449 goto out;
450
451 chunk = kmem_cache_alloc(pool->chunks_kmem_cache, GFP_KERNEL);
452 if (!chunk)
453 goto out_err;
454
455 dr_icm_chunk_init(chunk, pool, chunk_size, buddy, seg);
456
457 goto out;
458
459 out_err:
460 mlx5dr_buddy_free_mem(buddy, seg, chunk_size);
461 out:
462 mutex_unlock(&pool->mutex);
463 return chunk;
464 }
465
mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk * chunk)466 void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
467 {
468 struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
469 struct mlx5dr_icm_pool *pool = buddy->pool;
470 struct mlx5dr_icm_hot_chunk *hot_chunk;
471 struct kmem_cache *chunks_cache;
472
473 chunks_cache = pool->chunks_kmem_cache;
474
475 /* move the chunk to the waiting chunks array, AKA "hot" memory */
476 mutex_lock(&pool->mutex);
477
478 pool->hot_memory_size += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
479
480 hot_chunk = &pool->hot_chunks_arr[pool->hot_chunks_num++];
481 hot_chunk->buddy_mem = chunk->buddy_mem;
482 hot_chunk->seg = chunk->seg;
483 hot_chunk->size = chunk->size;
484
485 kmem_cache_free(chunks_cache, chunk);
486
487 /* Check if we have chunks that are waiting for sync-ste */
488 if (dr_icm_pool_is_sync_required(pool))
489 dr_icm_pool_sync_all_buddy_pools(pool);
490
491 mutex_unlock(&pool->mutex);
492 }
493
mlx5dr_icm_pool_alloc_htbl(struct mlx5dr_icm_pool * pool)494 struct mlx5dr_ste_htbl *mlx5dr_icm_pool_alloc_htbl(struct mlx5dr_icm_pool *pool)
495 {
496 return kmem_cache_alloc(pool->dmn->htbls_kmem_cache, GFP_KERNEL);
497 }
498
mlx5dr_icm_pool_free_htbl(struct mlx5dr_icm_pool * pool,struct mlx5dr_ste_htbl * htbl)499 void mlx5dr_icm_pool_free_htbl(struct mlx5dr_icm_pool *pool, struct mlx5dr_ste_htbl *htbl)
500 {
501 kmem_cache_free(pool->dmn->htbls_kmem_cache, htbl);
502 }
503
mlx5dr_icm_pool_create(struct mlx5dr_domain * dmn,enum mlx5dr_icm_type icm_type)504 struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
505 enum mlx5dr_icm_type icm_type)
506 {
507 u32 num_of_chunks, entry_size;
508 struct mlx5dr_icm_pool *pool;
509 u32 max_hot_size = 0;
510
511 pool = kvzalloc_obj(*pool);
512 if (!pool)
513 return NULL;
514
515 pool->dmn = dmn;
516 pool->icm_type = icm_type;
517 pool->chunks_kmem_cache = dmn->chunks_kmem_cache;
518
519 INIT_LIST_HEAD(&pool->buddy_mem_list);
520 mutex_init(&pool->mutex);
521
522 switch (icm_type) {
523 case DR_ICM_TYPE_STE:
524 pool->max_log_chunk_sz = dmn->info.max_log_sw_icm_sz;
525 max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
526 pool->icm_type) *
527 DR_ICM_POOL_STE_HOT_MEM_PERCENT / 100;
528 break;
529 case DR_ICM_TYPE_MODIFY_ACTION:
530 pool->max_log_chunk_sz = dmn->info.max_log_action_icm_sz;
531 max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
532 pool->icm_type) *
533 DR_ICM_POOL_MODIFY_ACTION_HOT_MEM_PERCENT / 100;
534 break;
535 case DR_ICM_TYPE_MODIFY_HDR_PTRN:
536 pool->max_log_chunk_sz = dmn->info.max_log_modify_hdr_pattern_icm_sz;
537 max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
538 pool->icm_type) *
539 DR_ICM_POOL_MODIFY_HDR_PTRN_HOT_MEM_PERCENT / 100;
540 break;
541 default:
542 WARN_ON(icm_type);
543 }
544
545 entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type);
546
547 num_of_chunks = DIV_ROUND_UP(max_hot_size, entry_size) + 1;
548 pool->th = max_hot_size;
549
550 pool->hot_chunks_arr = kvzalloc_objs(struct mlx5dr_icm_hot_chunk,
551 num_of_chunks);
552 if (!pool->hot_chunks_arr)
553 goto free_pool;
554
555 return pool;
556
557 free_pool:
558 kvfree(pool);
559 return NULL;
560 }
561
mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool * pool)562 void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool)
563 {
564 struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
565
566 dr_icm_pool_clear_hot_chunks_arr(pool);
567
568 list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node)
569 dr_icm_buddy_destroy(buddy);
570
571 kvfree(pool->hot_chunks_arr);
572 mutex_destroy(&pool->mutex);
573 kvfree(pool);
574 }
575