xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h (revision 9410645520e9b820069761f3450ef6661418e279)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3 
4 #ifndef MLX5HWS_POOL_H_
5 #define MLX5HWS_POOL_H_
6 
7 #define MLX5HWS_POOL_STC_LOG_SZ 15
8 
9 #define MLX5HWS_POOL_RESOURCE_ARR_SZ 100
10 
11 enum mlx5hws_pool_type {
12 	MLX5HWS_POOL_TYPE_STE,
13 	MLX5HWS_POOL_TYPE_STC,
14 };
15 
16 struct mlx5hws_pool_chunk {
17 	u32 resource_idx;
18 	/* Internal offset, relative to base index */
19 	int offset;
20 	int order;
21 };
22 
23 struct mlx5hws_pool_resource {
24 	struct mlx5hws_pool *pool;
25 	u32 base_id;
26 	u32 range;
27 };
28 
29 enum mlx5hws_pool_flags {
30 	/* Only a one resource in that pool */
31 	MLX5HWS_POOL_FLAGS_ONE_RESOURCE = 1 << 0,
32 	MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE = 1 << 1,
33 	/* No sharing resources between chunks */
34 	MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK = 1 << 2,
35 	/* All objects are in the same size */
36 	MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS = 1 << 3,
37 	/* Managed by buddy allocator */
38 	MLX5HWS_POOL_FLAGS_BUDDY_MANAGED = 1 << 4,
39 	/* Allocate pool_type memory on pool creation */
40 	MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE = 1 << 5,
41 
42 	/* These values should be used by the caller */
43 	MLX5HWS_POOL_FLAGS_FOR_STC_POOL =
44 		MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
45 		MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS,
46 	MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL =
47 		MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
48 		MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK,
49 	MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL =
50 		MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
51 		MLX5HWS_POOL_FLAGS_BUDDY_MANAGED |
52 		MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE,
53 };
54 
55 enum mlx5hws_pool_optimize {
56 	MLX5HWS_POOL_OPTIMIZE_NONE = 0x0,
57 	MLX5HWS_POOL_OPTIMIZE_ORIG = 0x1,
58 	MLX5HWS_POOL_OPTIMIZE_MIRROR = 0x2,
59 };
60 
61 struct mlx5hws_pool_attr {
62 	enum mlx5hws_pool_type pool_type;
63 	enum mlx5hws_table_type table_type;
64 	enum mlx5hws_pool_flags flags;
65 	enum mlx5hws_pool_optimize opt_type;
66 	/* Allocation size once memory is depleted */
67 	size_t alloc_log_sz;
68 };
69 
70 enum mlx5hws_db_type {
71 	/* Uses for allocating chunk of big memory, each element has its own resource in the FW*/
72 	MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE,
73 	/* One resource only, all the elements are with same one size */
74 	MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE,
75 	/* Many resources, the memory allocated with buddy mechanism */
76 	MLX5HWS_POOL_DB_TYPE_BUDDY,
77 };
78 
79 struct mlx5hws_buddy_manager {
80 	struct mlx5hws_buddy_mem *buddies[MLX5HWS_POOL_RESOURCE_ARR_SZ];
81 };
82 
83 struct mlx5hws_pool_elements {
84 	u32 num_of_elements;
85 	unsigned long *bitmap;
86 	u32 log_size;
87 	bool is_full;
88 };
89 
90 struct mlx5hws_element_manager {
91 	struct mlx5hws_pool_elements *elements[MLX5HWS_POOL_RESOURCE_ARR_SZ];
92 };
93 
94 struct mlx5hws_pool_db {
95 	enum mlx5hws_db_type type;
96 	union {
97 		struct mlx5hws_element_manager *element_manager;
98 		struct mlx5hws_buddy_manager *buddy_manager;
99 	};
100 };
101 
102 typedef int (*mlx5hws_pool_db_get_chunk)(struct mlx5hws_pool *pool,
103 					struct mlx5hws_pool_chunk *chunk);
104 typedef void (*mlx5hws_pool_db_put_chunk)(struct mlx5hws_pool *pool,
105 					 struct mlx5hws_pool_chunk *chunk);
106 typedef void (*mlx5hws_pool_unint_db)(struct mlx5hws_pool *pool);
107 
108 struct mlx5hws_pool {
109 	struct mlx5hws_context *ctx;
110 	enum mlx5hws_pool_type type;
111 	enum mlx5hws_pool_flags flags;
112 	struct mutex lock; /* protect the pool */
113 	size_t alloc_log_sz;
114 	enum mlx5hws_table_type tbl_type;
115 	enum mlx5hws_pool_optimize opt_type;
116 	struct mlx5hws_pool_resource *resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
117 	struct mlx5hws_pool_resource *mirror_resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
118 	/* DB */
119 	struct mlx5hws_pool_db db;
120 	/* Functions */
121 	mlx5hws_pool_unint_db p_db_uninit;
122 	mlx5hws_pool_db_get_chunk p_get_chunk;
123 	mlx5hws_pool_db_put_chunk p_put_chunk;
124 };
125 
126 struct mlx5hws_pool *
127 mlx5hws_pool_create(struct mlx5hws_context *ctx,
128 		    struct mlx5hws_pool_attr *pool_attr);
129 
130 int mlx5hws_pool_destroy(struct mlx5hws_pool *pool);
131 
132 int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
133 			     struct mlx5hws_pool_chunk *chunk);
134 
135 void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
136 			     struct mlx5hws_pool_chunk *chunk);
137 
138 static inline u32
mlx5hws_pool_chunk_get_base_id(struct mlx5hws_pool * pool,struct mlx5hws_pool_chunk * chunk)139 mlx5hws_pool_chunk_get_base_id(struct mlx5hws_pool *pool,
140 			       struct mlx5hws_pool_chunk *chunk)
141 {
142 	return pool->resource[chunk->resource_idx]->base_id;
143 }
144 
145 static inline u32
mlx5hws_pool_chunk_get_base_mirror_id(struct mlx5hws_pool * pool,struct mlx5hws_pool_chunk * chunk)146 mlx5hws_pool_chunk_get_base_mirror_id(struct mlx5hws_pool *pool,
147 				      struct mlx5hws_pool_chunk *chunk)
148 {
149 	return pool->mirror_resource[chunk->resource_idx]->base_id;
150 }
151 #endif /* MLX5HWS_POOL_H_ */
152