1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "en/params.h"
5 #include "en/txrx.h"
6 #include "en/port.h"
7 #include "en_accel/en_accel.h"
8 #include "en_accel/ipsec.h"
9 #include "en_accel/psp.h"
10 #include <linux/dim.h>
11 #include <net/page_pool/types.h>
12 #include <net/xdp_sock_drv.h>
13 
14 #define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
15 #define MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ 17
16 
mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev * mdev)17 static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
18 {
19 	u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size);
20 
21 	return min_page_shift ? : 12;
22 }
23 
mlx5e_mpwrq_page_shift(struct mlx5_core_dev * mdev,struct mlx5e_xsk_param * xsk)24 u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
25 {
26 	u8 req_page_shift = xsk ? order_base_2(xsk->chunk_size) : PAGE_SHIFT;
27 	u8 min_page_shift = mlx5e_mpwrq_min_page_shift(mdev);
28 
29 	/* Regular RQ uses order-0 pages, the NIC must be able to map them. */
30 	if (WARN_ON_ONCE(!xsk && req_page_shift < min_page_shift))
31 		min_page_shift = req_page_shift;
32 
33 	return max(req_page_shift, min_page_shift);
34 }
35 
36 enum mlx5e_mpwrq_umr_mode
mlx5e_mpwrq_umr_mode(struct mlx5_core_dev * mdev,struct mlx5e_xsk_param * xsk)37 mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
38 {
39 	/* Different memory management schemes use different mechanisms to map
40 	 * user-mode memory. The stricter guarantees we have, the faster
41 	 * mechanisms we use:
42 	 * 1. MTT - direct mapping in page granularity.
43 	 * 2. KSM - indirect mapping to another MKey to arbitrary addresses, but
44 	 *    all mappings have the same size.
45 	 * 3. KLM - indirect mapping to another MKey to arbitrary addresses, and
46 	 *    mappings can have different sizes.
47 	 */
48 	u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
49 	bool unaligned = xsk ? xsk->unaligned : false;
50 	bool oversized = false;
51 
52 	if (xsk) {
53 		oversized = xsk->chunk_size < (1 << page_shift);
54 		WARN_ON_ONCE(xsk->chunk_size > (1 << page_shift));
55 	}
56 
57 	/* XSK frame size doesn't match the UMR page size, either because the
58 	 * frame size is not a power of two, or it's smaller than the minimal
59 	 * page size supported by the firmware.
60 	 * It's possible to receive packets bigger than MTU in certain setups.
61 	 * To avoid writing over the XSK frame boundary, the top region of each
62 	 * stride is mapped to a garbage page, resulting in two mappings of
63 	 * different sizes per frame.
64 	 */
65 	if (oversized) {
66 		/* An optimization for frame sizes equal to 3 * power_of_two.
67 		 * 3 KSMs point to the frame, and one KSM points to the garbage
68 		 * page, which works faster than KLM.
69 		 */
70 		if (xsk->chunk_size % 3 == 0 && is_power_of_2(xsk->chunk_size / 3))
71 			return MLX5E_MPWRQ_UMR_MODE_TRIPLE;
72 
73 		return MLX5E_MPWRQ_UMR_MODE_OVERSIZED;
74 	}
75 
76 	/* XSK frames can start at arbitrary unaligned locations, but they all
77 	 * have the same size which is a power of two. It allows to optimize to
78 	 * one KSM per frame.
79 	 */
80 	if (unaligned)
81 		return MLX5E_MPWRQ_UMR_MODE_UNALIGNED;
82 
83 	/* XSK: frames are naturally aligned, MTT can be used.
84 	 * Non-XSK: Allocations happen in units of CPU pages, therefore, the
85 	 * mappings are naturally aligned.
86 	 */
87 	return MLX5E_MPWRQ_UMR_MODE_ALIGNED;
88 }
89 
mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode)90 u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode)
91 {
92 	switch (mode) {
93 	case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
94 		return sizeof(struct mlx5_mtt);
95 	case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
96 		return sizeof(struct mlx5_ksm);
97 	case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
98 		return sizeof(struct mlx5_klm) * 2;
99 	case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
100 		return sizeof(struct mlx5_ksm) * 4;
101 	}
102 	WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode);
103 	return 1;
104 }
105 
mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)106 u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
107 			  enum mlx5e_mpwrq_umr_mode umr_mode)
108 {
109 	u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
110 	u8 max_pages_per_wqe, max_log_wqe_size_calc;
111 	u8 max_log_wqe_size_cap;
112 	u16 max_wqe_size;
113 
114 	/* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
115 	max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
116 	max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
117 				       MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size;
118 	max_log_wqe_size_calc = ilog2(max_pages_per_wqe) + page_shift;
119 
120 	WARN_ON_ONCE(max_log_wqe_size_calc < MLX5E_ORDER2_MAX_PACKET_MTU);
121 
122 	max_log_wqe_size_cap = mlx5_core_is_ecpf(mdev) ?
123 			   MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ : MLX5_MPWRQ_MAX_LOG_WQE_SZ;
124 
125 	return min_t(u8, max_log_wqe_size_calc, max_log_wqe_size_cap);
126 }
127 
mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)128 u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
129 			     enum mlx5e_mpwrq_umr_mode umr_mode)
130 {
131 	u8 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
132 	u8 pages_per_wqe;
133 
134 	pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1;
135 
136 	/* Two MTTs are needed to form an octword. The number of MTTs is encoded
137 	 * in octwords in a UMR WQE, so we need at least two to avoid mapping
138 	 * garbage addresses.
139 	 */
140 	if (WARN_ON_ONCE(pages_per_wqe < 2 && umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED))
141 		pages_per_wqe = 2;
142 
143 	/* Sanity check for further calculations to succeed. */
144 	BUILD_BUG_ON(MLX5_MPWRQ_MAX_PAGES_PER_WQE > 64);
145 	if (WARN_ON_ONCE(pages_per_wqe > MLX5_MPWRQ_MAX_PAGES_PER_WQE))
146 		return MLX5_MPWRQ_MAX_PAGES_PER_WQE;
147 
148 	return pages_per_wqe;
149 }
150 
mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)151 u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
152 			   enum mlx5e_mpwrq_umr_mode umr_mode)
153 {
154 	u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode);
155 	u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
156 	u16 umr_wqe_sz;
157 
158 	umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) +
159 		ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_FLEX_ALIGNMENT);
160 
161 	WARN_ON_ONCE(DIV_ROUND_UP(umr_wqe_sz, MLX5_SEND_WQE_DS) > MLX5_WQE_CTRL_DS_MASK);
162 
163 	return umr_wqe_sz;
164 }
165 
mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)166 u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift,
167 			  enum mlx5e_mpwrq_umr_mode umr_mode)
168 {
169 	return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(mdev, page_shift, umr_mode),
170 			    MLX5_SEND_WQE_BB);
171 }
172 
mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)173 u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
174 			    enum mlx5e_mpwrq_umr_mode umr_mode)
175 {
176 	u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode);
177 
178 	/* Add another page as a buffer between WQEs. This page will absorb
179 	 * write overflow by the hardware, when receiving packets larger than
180 	 * MTU. These oversize packets are dropped by the driver at a later
181 	 * stage.
182 	 */
183 	return ALIGN(pages_per_wqe + 1,
184 		     MLX5_SEND_WQE_BB / mlx5e_mpwrq_umr_entry_size(umr_mode));
185 }
186 
mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev * mdev,enum mlx5e_mpwrq_umr_mode umr_mode)187 u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev,
188 				enum mlx5e_mpwrq_umr_mode umr_mode)
189 {
190 	/* Same limits apply to KSMs and KLMs. */
191 	u32 klm_limit = min(MLX5E_MAX_RQ_NUM_KSMS,
192 			    1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size));
193 
194 	switch (umr_mode) {
195 	case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
196 		return MLX5E_MAX_RQ_NUM_MTTS;
197 	case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
198 		return klm_limit;
199 	case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
200 		/* Each entry is two KLMs. */
201 		return klm_limit / 2;
202 	case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
203 		/* Each entry is four KSMs. */
204 		return klm_limit / 4;
205 	}
206 	WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode);
207 	return 0;
208 }
209 
mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)210 static u8 mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev *mdev, u8 page_shift,
211 				      enum mlx5e_mpwrq_umr_mode umr_mode)
212 {
213 	u8 mtts_per_wqe = mlx5e_mpwrq_mtts_per_wqe(mdev, page_shift, umr_mode);
214 	u32 max_entries = mlx5e_mpwrq_max_num_entries(mdev, umr_mode);
215 
216 	return ilog2(max_entries / mtts_per_wqe);
217 }
218 
mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)219 u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
220 			       enum mlx5e_mpwrq_umr_mode umr_mode)
221 {
222 	return mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode) +
223 		mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
224 		MLX5E_ORDER2_MAX_PACKET_MTU;
225 }
226 
mlx5e_get_linear_rq_headroom(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)227 u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
228 				 struct mlx5e_xsk_param *xsk)
229 {
230 	u16 headroom;
231 
232 	if (xsk)
233 		return xsk->headroom;
234 
235 	headroom = NET_IP_ALIGN;
236 	if (params->xdp_prog)
237 		headroom += XDP_PACKET_HEADROOM;
238 	else
239 		headroom += MLX5_RX_HEADROOM;
240 
241 	return headroom;
242 }
243 
mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)244 static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params,
245 				      struct mlx5e_xsk_param *xsk)
246 {
247 	u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
248 
249 	return xsk->headroom + hw_mtu;
250 }
251 
mlx5e_rx_get_linear_sz_skb(struct mlx5e_params * params,bool no_head_tail_room)252 static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool no_head_tail_room)
253 {
254 	u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
255 	u16 headroom;
256 
257 	if (no_head_tail_room)
258 		return SKB_DATA_ALIGN(hw_mtu);
259 	headroom = mlx5e_get_linear_rq_headroom(params, NULL);
260 
261 	return MLX5_SKB_FRAG_SZ(headroom + hw_mtu);
262 }
263 
mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,bool mpwqe)264 static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
265 					 struct mlx5e_params *params,
266 					 struct mlx5e_xsk_param *xsk,
267 					 bool mpwqe)
268 {
269 	bool no_head_tail_room;
270 	u32 sz;
271 
272 	/* XSK frames are mapped as individual pages, because frames may come in
273 	 * an arbitrary order from random locations in the UMEM.
274 	 */
275 	if (xsk)
276 		return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
277 
278 	no_head_tail_room = params->xdp_prog && mpwqe && !mlx5e_rx_is_linear_skb(mdev, params, xsk);
279 
280 	/* When no_head_tail_room is set, headroom and tailroom are excluded from skb calculations.
281 	 * no_head_tail_room should be set in the case of XDP with Striding RQ
282 	 * when SKB is not linear. This is because another page is allocated for the linear part.
283 	 */
284 	sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, no_head_tail_room));
285 
286 	/* XDP in mlx5e doesn't support multiple packets per page.
287 	 * Do not assume sz <= PAGE_SIZE if params->xdp_prog is set.
288 	 */
289 	return params->xdp_prog && sz < PAGE_SIZE ? PAGE_SIZE : sz;
290 }
291 
mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)292 static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev,
293 				       struct mlx5e_params *params,
294 				       struct mlx5e_xsk_param *xsk)
295 {
296 	u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true);
297 	enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
298 	u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
299 
300 	return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
301 		order_base_2(linear_stride_sz);
302 }
303 
mlx5e_rx_is_linear_skb(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)304 bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
305 			    struct mlx5e_params *params,
306 			    struct mlx5e_xsk_param *xsk)
307 {
308 	if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE)
309 		return false;
310 
311 	/* Call mlx5e_rx_get_linear_sz_skb with the no_head_tail_room parameter set
312 	 * to exclude headroom and tailroom from calculations.
313 	 * no_head_tail_room is true when SKB is built on XDP_PASS on XSK RQs
314 	 * since packet data buffers don't have headroom and tailroom resreved for the SKB.
315 	 * Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
316 	 * must fit into a CPU page.
317 	 */
318 	if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE)
319 		return false;
320 
321 	/* XSK frames must be big enough to hold the packet data. */
322 	if (xsk && mlx5e_rx_get_linear_sz_xsk(params, xsk) > xsk->chunk_size)
323 		return false;
324 
325 	return true;
326 }
327 
mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev * mdev,u8 log_stride_sz,u8 log_num_strides,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)328 static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
329 					  u8 log_stride_sz, u8 log_num_strides,
330 					  u8 page_shift,
331 					  enum mlx5e_mpwrq_umr_mode umr_mode)
332 {
333 	if (log_stride_sz + log_num_strides !=
334 	    mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode))
335 		return false;
336 
337 	if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE ||
338 	    log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX)
339 		return false;
340 
341 	if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX)
342 		return false;
343 
344 	if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
345 		return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE;
346 
347 	return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
348 }
349 
mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)350 bool mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
351 					  struct mlx5e_params *params,
352 					  struct mlx5e_xsk_param *xsk)
353 {
354 	u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
355 	u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
356 	enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
357 	u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
358 
359 	return mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
360 					     log_wqe_num_of_strides,
361 					     page_shift, umr_mode);
362 }
363 
mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)364 bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
365 				  struct mlx5e_params *params,
366 				  struct mlx5e_xsk_param *xsk)
367 {
368 	enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
369 	u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
370 	u8 log_num_strides;
371 	u8 log_stride_sz;
372 	u8 log_wqe_sz;
373 
374 	if (!mlx5e_rx_is_linear_skb(mdev, params, xsk))
375 		return false;
376 
377 	log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
378 	log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
379 
380 	if (log_wqe_sz < log_stride_sz)
381 		return false;
382 
383 	log_num_strides = log_wqe_sz - log_stride_sz;
384 
385 	return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz,
386 					     log_num_strides, page_shift,
387 					     umr_mode);
388 }
389 
mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)390 u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
391 			       struct mlx5e_params *params,
392 			       struct mlx5e_xsk_param *xsk)
393 {
394 	enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
395 	u8 log_pkts_per_wqe, page_shift, max_log_rq_size;
396 
397 	log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, xsk);
398 	page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
399 	max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode);
400 
401 	/* Numbers are unsigned, don't subtract to avoid underflow. */
402 	if (params->log_rq_mtu_frames <
403 	    log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
404 		return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
405 
406 	/* Ethtool's rx_max_pending is calculated for regular RQ, that uses
407 	 * pages of PAGE_SIZE. Max length of an XSK RQ might differ if it uses a
408 	 * frame size not equal to PAGE_SIZE.
409 	 * A stricter condition is checked in mlx5e_mpwrq_validate_xsk, WARN on
410 	 * unexpected failure.
411 	 */
412 	if (WARN_ON_ONCE(params->log_rq_mtu_frames > log_pkts_per_wqe + max_log_rq_size))
413 		return max_log_rq_size;
414 
415 	return params->log_rq_mtu_frames - log_pkts_per_wqe;
416 }
417 
mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5e_params * params)418 static u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5e_params *params)
419 {
420 	return order_base_2(DIV_ROUND_UP(MLX5E_SHAMPO_WQ_RESRV_SIZE,
421 					 params->sw_mtu));
422 }
423 
mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)424 u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
425 				   struct mlx5e_params *params,
426 				   struct mlx5e_xsk_param *xsk)
427 {
428 	if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
429 		return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
430 
431 	/* XDP in mlx5e doesn't support multiple packets per page. */
432 	if (params->xdp_prog)
433 		return PAGE_SHIFT;
434 
435 	return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
436 }
437 
mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)438 u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
439 				   struct mlx5e_params *params,
440 				   struct mlx5e_xsk_param *xsk)
441 {
442 	enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
443 	u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
444 	u8 log_wqe_size, log_stride_size;
445 
446 	log_wqe_size = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
447 	log_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
448 	WARN(log_wqe_size < log_stride_size,
449 	     "Log WQE size %u < log stride size %u (page shift %u, umr mode %d, xsk on? %d)\n",
450 	     log_wqe_size, log_stride_size, page_shift, umr_mode, !!xsk);
451 	return log_wqe_size - log_stride_size;
452 }
453 
mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)454 u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)
455 {
456 #define UMR_WQE_BULK (2)
457 	return min_t(unsigned int, UMR_WQE_BULK, wq_sz / 2 - 1);
458 }
459 
mlx5e_get_rq_headroom(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)460 u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
461 			  struct mlx5e_params *params,
462 			  struct mlx5e_xsk_param *xsk)
463 {
464 	u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
465 
466 	if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
467 		return linear_headroom;
468 
469 	if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
470 		return linear_headroom;
471 
472 	if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
473 		return linear_headroom;
474 
475 	return 0;
476 }
477 
mlx5e_calc_sq_stop_room(struct mlx5_core_dev * mdev,struct mlx5e_params * params)478 u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
479 {
480 	bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
481 	u16 stop_room;
482 
483 	stop_room  = mlx5e_ktls_get_stop_room(mdev, params);
484 	stop_room += mlx5e_stop_room_for_max_wqe(mdev);
485 	if (is_mpwqe)
486 		/* A MPWQE can take up to the maximum cacheline-aligned WQE +
487 		 * all the normal stop room can be taken if a new packet breaks
488 		 * the active MPWQE session and allocates its WQEs right away.
489 		 */
490 		stop_room += mlx5e_stop_room_for_mpwqe(mdev);
491 
492 	return stop_room;
493 }
494 
mlx5e_validate_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params)495 int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
496 {
497 	size_t sq_size = 1 << params->log_sq_size;
498 	u16 stop_room;
499 
500 	stop_room = mlx5e_calc_sq_stop_room(mdev, params);
501 	if (stop_room >= sq_size) {
502 		mlx5_core_err(mdev, "Stop room %u is bigger than the SQ size %zu\n",
503 			      stop_room, sq_size);
504 		return -EINVAL;
505 	}
506 
507 	return 0;
508 }
509 
slow_pci_heuristic(struct mlx5_core_dev * mdev)510 bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
511 {
512 	u32 link_speed = 0;
513 	u32 pci_bw = 0;
514 
515 	mlx5_port_max_linkspeed(mdev, &link_speed);
516 	pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
517 	mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
518 			   link_speed, pci_bw);
519 
520 #define MLX5E_SLOW_PCI_RATIO (2)
521 
522 	return link_speed && pci_bw &&
523 		link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
524 }
525 
mlx5e_mpwrq_validate_regular(struct mlx5_core_dev * mdev,struct mlx5e_params * params)526 int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
527 {
528 	enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, NULL);
529 	u8 page_shift = mlx5e_mpwrq_page_shift(mdev, NULL);
530 
531 	if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
532 		return -EOPNOTSUPP;
533 
534 	return 0;
535 }
536 
mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)537 int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
538 			     struct mlx5e_xsk_param *xsk)
539 {
540 	enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
541 	u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
542 	u16 max_mtu_pkts;
543 
544 	if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) {
545 		mlx5_core_err(mdev, "Striding RQ for XSK can't be activated with page_shift %u and umr_mode %d\n",
546 			      page_shift, umr_mode);
547 		return -EOPNOTSUPP;
548 	}
549 
550 	if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) {
551 		mlx5_core_err(mdev, "Striding RQ linear mode for XSK can't be activated with current params\n");
552 		return -EINVAL;
553 	}
554 
555 	/* Current RQ length is too big for the given frame size, the
556 	 * needed number of WQEs exceeds the maximum.
557 	 */
558 	max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
559 			     mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, xsk->unaligned));
560 	if (params->log_rq_mtu_frames > max_mtu_pkts) {
561 		mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n",
562 			      1 << params->log_rq_mtu_frames, xsk->chunk_size);
563 		return -EINVAL;
564 	}
565 
566 	return 0;
567 }
568 
mlx5e_init_rq_type_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params)569 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
570 			       struct mlx5e_params *params)
571 {
572 	params->log_rq_mtu_frames = is_kdump_kernel() ?
573 		MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
574 		MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
575 }
576 
mlx5e_set_rq_type(struct mlx5_core_dev * mdev,struct mlx5e_params * params)577 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
578 {
579 	params->rq_wq_type = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
580 		MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
581 		MLX5_WQ_TYPE_CYCLIC;
582 }
583 
mlx5e_build_rq_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params)584 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
585 			   struct mlx5e_params *params)
586 {
587 	/* Prefer Striding RQ, unless any of the following holds:
588 	 * - Striding RQ configuration is not possible/supported.
589 	 * - CQE compression is ON, and stride_index mini_cqe layout is not supported.
590 	 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
591 	 *
592 	 * No XSK params: checking the availability of striding RQ in general.
593 	 */
594 	if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ||
595 	     MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) &&
596 	    !mlx5e_mpwrq_validate_regular(mdev, params) &&
597 	    (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
598 	     !mlx5e_rx_is_linear_skb(mdev, params, NULL)))
599 		MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
600 	mlx5e_set_rq_type(mdev, params);
601 	mlx5e_init_rq_type_params(mdev, params);
602 }
603 
604 /* Build queue parameters */
605 
mlx5e_build_create_cq_param(struct mlx5e_create_cq_param * ccp,struct mlx5e_channel * c)606 void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
607 {
608 	*ccp = (struct mlx5e_create_cq_param) {
609 		.netdev = c->netdev,
610 		.wq = c->priv->wq,
611 		.napi = &c->napi,
612 		.ch_stats = c->stats,
613 		.node = cpu_to_node(c->cpu),
614 		.ix = c->vec_ix,
615 		.uar = c->bfreg->up,
616 	};
617 }
618 
mlx5e_max_nonlinear_mtu(int first_frag_size,int frag_size,bool xdp)619 static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp)
620 {
621 	if (xdp)
622 		/* XDP requires all fragments to be of the same size. */
623 		return first_frag_size + (MLX5E_MAX_RX_FRAGS - 1) * frag_size;
624 
625 	/* Optimization for small packets: the last fragment is bigger than the others. */
626 	return first_frag_size + (MLX5E_MAX_RX_FRAGS - 2) * frag_size + PAGE_SIZE;
627 }
628 
mlx5e_rx_compute_wqe_bulk_params(struct mlx5e_params * params,struct mlx5e_rq_frags_info * info)629 static void mlx5e_rx_compute_wqe_bulk_params(struct mlx5e_params *params,
630 					     struct mlx5e_rq_frags_info *info)
631 {
632 	u16 bulk_bound_rq_size = (1 << params->log_rq_mtu_frames) / 4;
633 	u32 bulk_bound_rq_size_in_bytes;
634 	u32 sum_frag_strides = 0;
635 	u32 wqe_bulk_in_bytes;
636 	u16 split_factor;
637 	u32 wqe_bulk;
638 	int i;
639 
640 	for (i = 0; i < info->num_frags; i++)
641 		sum_frag_strides += info->arr[i].frag_stride;
642 
643 	/* For MTUs larger than PAGE_SIZE, align to PAGE_SIZE to reflect
644 	 * amount of consumed pages per wqe in bytes.
645 	 */
646 	if (sum_frag_strides > PAGE_SIZE)
647 		sum_frag_strides = ALIGN(sum_frag_strides, PAGE_SIZE);
648 
649 	bulk_bound_rq_size_in_bytes = bulk_bound_rq_size * sum_frag_strides;
650 
651 #define MAX_WQE_BULK_BYTES(xdp) ((xdp ? 256 : 512) * 1024)
652 
653 	/* A WQE bulk should not exceed min(512KB, 1/4 of rq size). For XDP
654 	 * keep bulk size smaller to avoid filling the page_pool cache on
655 	 * every bulk refill.
656 	 */
657 	wqe_bulk_in_bytes = min_t(u32, MAX_WQE_BULK_BYTES(params->xdp_prog),
658 				  bulk_bound_rq_size_in_bytes);
659 	wqe_bulk = DIV_ROUND_UP(wqe_bulk_in_bytes, sum_frag_strides);
660 
661 	/* Make sure that allocations don't start when the page is still used
662 	 * by older WQEs.
663 	 */
664 	info->wqe_bulk = max_t(u16, info->wqe_index_mask + 1, wqe_bulk);
665 
666 	split_factor = DIV_ROUND_UP(MAX_WQE_BULK_BYTES(params->xdp_prog),
667 				    PP_ALLOC_CACHE_REFILL * PAGE_SIZE);
668 	info->refill_unit = DIV_ROUND_UP(info->wqe_bulk, split_factor);
669 }
670 
671 #define DEFAULT_FRAG_SIZE (2048)
672 
mlx5e_build_rq_frags_info(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct mlx5e_rq_frags_info * info,u32 * xdp_frag_size)673 static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
674 				     struct mlx5e_params *params,
675 				     struct mlx5e_xsk_param *xsk,
676 				     struct mlx5e_rq_frags_info *info,
677 				     u32 *xdp_frag_size)
678 {
679 	u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
680 	int frag_size_max = DEFAULT_FRAG_SIZE;
681 	int first_frag_size_max;
682 	u32 buf_size = 0;
683 	u16 headroom;
684 	int max_mtu;
685 	int i;
686 
687 	if (mlx5e_rx_is_linear_skb(mdev, params, xsk)) {
688 		int frag_stride;
689 
690 		frag_stride = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, false);
691 
692 		info->arr[0].frag_size = byte_count;
693 		info->arr[0].frag_stride = frag_stride;
694 		info->num_frags = 1;
695 
696 		/* N WQEs share the same page, N = PAGE_SIZE / frag_stride. The
697 		 * first WQE in the page is responsible for allocation of this
698 		 * page, this WQE's index is k*N. If WQEs [k*N+1; k*N+N-1] are
699 		 * still not completed, the allocation must stop before k*N.
700 		 */
701 		info->wqe_index_mask = (PAGE_SIZE / frag_stride) - 1;
702 
703 		goto out;
704 	}
705 
706 	headroom = mlx5e_get_linear_rq_headroom(params, xsk);
707 	first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
708 
709 	max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
710 					  params->xdp_prog);
711 	if (byte_count > max_mtu || params->xdp_prog) {
712 		frag_size_max = PAGE_SIZE;
713 		first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
714 
715 		max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
716 						  params->xdp_prog);
717 		if (byte_count > max_mtu) {
718 			mlx5_core_err(mdev, "MTU %u is too big for non-linear legacy RQ (max %d)\n",
719 				      params->sw_mtu, max_mtu);
720 			return -EINVAL;
721 		}
722 	}
723 
724 	i = 0;
725 	while (buf_size < byte_count) {
726 		int frag_size = byte_count - buf_size;
727 
728 		if (i == 0)
729 			frag_size = min(frag_size, first_frag_size_max);
730 		else if (i < MLX5E_MAX_RX_FRAGS - 1)
731 			frag_size = min(frag_size, frag_size_max);
732 
733 		info->arr[i].frag_size = frag_size;
734 		buf_size += frag_size;
735 
736 		if (params->xdp_prog) {
737 			/* XDP multi buffer expects fragments of the same size. */
738 			info->arr[i].frag_stride = frag_size_max;
739 		} else {
740 			if (i == 0) {
741 				/* Ensure that headroom and tailroom are included. */
742 				frag_size += headroom;
743 				frag_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
744 			}
745 			info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
746 		}
747 
748 		i++;
749 	}
750 	info->num_frags = i;
751 
752 	/* The last fragment of WQE with index 2*N may share the page with the
753 	 * first fragment of WQE with index 2*N+1 in certain cases. If WQE 2*N+1
754 	 * is not completed yet, WQE 2*N must not be allocated, as it's
755 	 * responsible for allocating a new page.
756 	 */
757 	if (frag_size_max == PAGE_SIZE) {
758 		/* No WQE can start in the middle of a page. */
759 		info->wqe_index_mask = 0;
760 	} else {
761 		/* PAGE_SIZEs starting from 8192 don't use 2K-sized fragments,
762 		 * because there would be more than MLX5E_MAX_RX_FRAGS of them.
763 		 */
764 		WARN_ON(PAGE_SIZE != 2 * DEFAULT_FRAG_SIZE);
765 
766 		/* Odd number of fragments allows to pack the last fragment of
767 		 * the previous WQE and the first fragment of the next WQE into
768 		 * the same page.
769 		 * As long as DEFAULT_FRAG_SIZE is 2048, and MLX5E_MAX_RX_FRAGS
770 		 * is 4, the last fragment can be bigger than the rest only if
771 		 * it's the fourth one, so WQEs consisting of 3 fragments will
772 		 * always share a page.
773 		 * When a page is shared, WQE bulk size is 2, otherwise just 1.
774 		 */
775 		info->wqe_index_mask = info->num_frags % 2;
776 	}
777 
778 out:
779 	/* Bulking optimization to skip allocation until a large enough number
780 	 * of WQEs can be allocated in a row. Bulking also influences how well
781 	 * deferred page release works.
782 	 */
783 	mlx5e_rx_compute_wqe_bulk_params(params, info);
784 
785 	mlx5_core_dbg(mdev, "%s: wqe_bulk = %u, wqe_bulk_refill_unit = %u\n",
786 		      __func__, info->wqe_bulk, info->refill_unit);
787 
788 	info->log_num_frags = order_base_2(info->num_frags);
789 
790 	*xdp_frag_size = info->num_frags > 1 && params->xdp_prog ? PAGE_SIZE : 0;
791 
792 	return 0;
793 }
794 
mlx5e_get_rqwq_log_stride(u8 wq_type,int ndsegs)795 static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
796 {
797 	int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
798 
799 	switch (wq_type) {
800 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
801 		sz += sizeof(struct mlx5e_rx_wqe_ll);
802 		break;
803 	default: /* MLX5_WQ_TYPE_CYCLIC */
804 		sz += sizeof(struct mlx5e_rx_wqe_cyc);
805 	}
806 
807 	return order_base_2(sz);
808 }
809 
mlx5e_build_common_cq_param(struct mlx5_core_dev * mdev,struct mlx5e_cq_param * param)810 static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
811 					struct mlx5e_cq_param *param)
812 {
813 	void *cqc = param->cqc;
814 
815 	MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
816 	if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
817 		MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
818 }
819 
mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)820 static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
821 					struct mlx5e_params *params,
822 					struct mlx5e_xsk_param *xsk)
823 {
824 	u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
825 	u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
826 	int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(params));
827 	int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
828 	int wqe_size = BIT(log_stride_sz) * num_strides;
829 	int rsrv_size = MLX5E_SHAMPO_WQ_RESRV_SIZE;
830 
831 	/* +1 is for the case that the pkt_per_rsrv dont consume the reservation
832 	 * so we get a filler cqe for the rest of the reservation.
833 	 */
834 	return order_base_2((wqe_size / rsrv_size) * wq_size * (pkt_per_rsrv + 1));
835 }
836 
mlx5e_build_rx_cq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct mlx5e_cq_param * param)837 static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
838 				    struct mlx5e_params *params,
839 				    struct mlx5e_xsk_param *xsk,
840 				    struct mlx5e_cq_param *param)
841 {
842 	bool hw_stridx = false;
843 	void *cqc = param->cqc;
844 	u8 log_cq_size;
845 
846 	switch (params->rq_wq_type) {
847 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
848 		hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
849 		if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
850 			log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk);
851 		else
852 			log_cq_size = mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk) +
853 				mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
854 		break;
855 	default: /* MLX5_WQ_TYPE_CYCLIC */
856 		log_cq_size = params->log_rq_mtu_frames;
857 	}
858 
859 	MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
860 	if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
861 		MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
862 			 MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
863 		MLX5_SET(cqc, cqc, cqe_compression_layout,
864 			 MLX5_CAP_GEN(mdev, enhanced_cqe_compression) ?
865 			 MLX5_CQE_COMPRESS_LAYOUT_ENHANCED :
866 			 MLX5_CQE_COMPRESS_LAYOUT_BASIC);
867 		MLX5_SET(cqc, cqc, cqe_comp_en, 1);
868 	}
869 
870 	mlx5e_build_common_cq_param(mdev, param);
871 	param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
872 }
873 
rq_end_pad_mode(struct mlx5_core_dev * mdev,struct mlx5e_params * params)874 static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
875 {
876 	bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO;
877 	bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
878 
879 	return ro && lro_en ?
880 		MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
881 }
882 
mlx5e_build_rq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct mlx5e_rq_param * param)883 int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
884 			 struct mlx5e_params *params,
885 			 struct mlx5e_xsk_param *xsk,
886 			 struct mlx5e_rq_param *param)
887 {
888 	void *rqc = param->rqc;
889 	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
890 	u32 lro_timeout;
891 	int ndsegs = 1;
892 	int err;
893 
894 	switch (params->rq_wq_type) {
895 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
896 		u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
897 		u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
898 		enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
899 		u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
900 
901 		if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
902 						   log_wqe_num_of_strides,
903 						   page_shift, umr_mode)) {
904 			mlx5_core_err(mdev,
905 				      "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u, umr_mode %d\n",
906 				      log_wqe_stride_size, log_wqe_num_of_strides,
907 				      umr_mode);
908 			return -EINVAL;
909 		}
910 
911 		MLX5_SET(wq, wq, log_wqe_num_of_strides,
912 			 log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
913 		MLX5_SET(wq, wq, log_wqe_stride_size,
914 			 log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
915 		MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
916 		if (params->packet_merge.type != MLX5E_PACKET_MERGE_SHAMPO)
917 			break;
918 
919 		MLX5_SET(wq, wq, shampo_enable, true);
920 		MLX5_SET(wq, wq, log_reservation_size,
921 			 MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE -
922 			 MLX5E_SHAMPO_WQ_RESRV_SIZE_BASE_SHIFT);
923 		MLX5_SET(wq, wq,
924 			 log_max_num_of_packets_per_reservation,
925 			 mlx5e_shampo_get_log_pkt_per_rsrv(params));
926 		MLX5_SET(wq, wq, log_headers_entry_size,
927 			 MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE -
928 			 MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE_SHIFT);
929 		lro_timeout =
930 			mlx5e_choose_lro_timeout(mdev,
931 						 MLX5E_DEFAULT_SHAMPO_TIMEOUT);
932 		MLX5_SET(rqc, rqc, reservation_timeout, lro_timeout);
933 		MLX5_SET(rqc, rqc, shampo_match_criteria_type,
934 			 MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED);
935 		MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
936 			 MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE);
937 		break;
938 	}
939 	default: /* MLX5_WQ_TYPE_CYCLIC */
940 		MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
941 		err = mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info,
942 						¶m->xdp_frag_size);
943 		if (err)
944 			return err;
945 		ndsegs = param->frags_info.num_frags;
946 	}
947 
948 	MLX5_SET(wq, wq, wq_type,          params->rq_wq_type);
949 	MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params));
950 	MLX5_SET(wq, wq, log_wq_stride,
951 		 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
952 	MLX5_SET(wq, wq, pd,               mdev->mlx5e_res.hw_objs.pdn);
953 	MLX5_SET(rqc, rqc, vsd,            params->vlan_strip_disable);
954 	MLX5_SET(rqc, rqc, scatter_fcs,    params->scatter_fcs_en);
955 
956 	param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
957 	mlx5e_build_rx_cq_param(mdev, params, xsk, ¶m->cqp);
958 
959 	return 0;
960 }
961 
mlx5e_build_drop_rq_param(struct mlx5_core_dev * mdev,struct mlx5e_rq_param * param)962 void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
963 			       struct mlx5e_rq_param *param)
964 {
965 	void *rqc = param->rqc;
966 	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
967 
968 	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
969 	MLX5_SET(wq, wq, log_wq_stride,
970 		 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
971 
972 	param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
973 }
974 
mlx5e_build_tx_cq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_cq_param * param)975 void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
976 			     struct mlx5e_params *params,
977 			     struct mlx5e_cq_param *param)
978 {
979 	void *cqc = param->cqc;
980 
981 	MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
982 
983 	mlx5e_build_common_cq_param(mdev, param);
984 	param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
985 }
986 
mlx5e_build_sq_param_common(struct mlx5_core_dev * mdev,struct mlx5e_sq_param * param)987 void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
988 				 struct mlx5e_sq_param *param)
989 {
990 	void *sqc = param->sqc;
991 	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
992 
993 	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
994 	MLX5_SET(wq, wq, pd,            mdev->mlx5e_res.hw_objs.pdn);
995 
996 	param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
997 }
998 
mlx5e_build_sq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_sq_param * param)999 void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
1000 			  struct mlx5e_params *params,
1001 			  struct mlx5e_sq_param *param)
1002 {
1003 	void *sqc = param->sqc;
1004 	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1005 	bool allow_swp;
1006 
1007 	allow_swp = mlx5_geneve_tx_allowed(mdev) ||
1008 		    (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO) ||
1009 		    mlx5_is_psp_device(mdev);
1010 	mlx5e_build_sq_param_common(mdev, param);
1011 	MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
1012 	MLX5_SET(sqc, sqc, allow_swp, allow_swp);
1013 	param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
1014 	param->stop_room = mlx5e_calc_sq_stop_room(mdev, params);
1015 	mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
1016 }
1017 
mlx5e_build_ico_cq_param(struct mlx5_core_dev * mdev,u8 log_wq_size,struct mlx5e_cq_param * param)1018 static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev,
1019 				     u8 log_wq_size,
1020 				     struct mlx5e_cq_param *param)
1021 {
1022 	void *cqc = param->cqc;
1023 
1024 	MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
1025 
1026 	mlx5e_build_common_cq_param(mdev, param);
1027 
1028 	param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1029 }
1030 
1031 /* This function calculates the maximum number of headers entries that are needed
1032  * per WQE, the formula is based on the size of the reservations and the
1033  * restriction we have about max packets for reservation that is equal to max
1034  * headers per reservation.
1035  */
mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_rq_param * rq_param)1036 u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
1037 			    struct mlx5e_params *params,
1038 			    struct mlx5e_rq_param *rq_param)
1039 {
1040 	u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL));
1041 	u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL);
1042 	int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(params));
1043 	int wqe_size = BIT(log_stride_sz) * num_strides;
1044 	int rsrv_size = MLX5E_SHAMPO_WQ_RESRV_SIZE;
1045 	u32 hd_per_wqe;
1046 
1047 	/* Assumption: hd_per_wqe % 8 == 0. */
1048 	hd_per_wqe = (wqe_size / rsrv_size) * pkt_per_rsrv;
1049 	mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_rsrv = %d\n",
1050 		      __func__, hd_per_wqe, rsrv_size, wqe_size, pkt_per_rsrv);
1051 	return hd_per_wqe;
1052 }
1053 
1054 /* This function calculates the maximum number of headers entries that are needed
1055  * for the WQ, this value is uesed to allocate the header buffer in HW, thus
1056  * must be a pow of 2.
1057  */
mlx5e_shampo_hd_per_wq(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_rq_param * rq_param)1058 u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
1059 			   struct mlx5e_params *params,
1060 			   struct mlx5e_rq_param *rq_param)
1061 {
1062 	void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
1063 	int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
1064 	u32 hd_per_wqe, hd_per_wq;
1065 
1066 	hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
1067 	hd_per_wq = roundup_pow_of_two(hd_per_wqe * wq_size);
1068 	return hd_per_wq;
1069 }
1070 
mlx5e_shampo_icosq_sz(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_rq_param * rq_param)1071 static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
1072 				 struct mlx5e_params *params,
1073 				 struct mlx5e_rq_param *rq_param)
1074 {
1075 	int max_num_of_umr_per_wqe, max_hd_per_wqe, max_ksm_per_umr, rest;
1076 	void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
1077 	int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
1078 	u32 wqebbs;
1079 
1080 	max_ksm_per_umr = MLX5E_MAX_KSM_PER_WQE(mdev);
1081 	max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
1082 	max_num_of_umr_per_wqe = max_hd_per_wqe / max_ksm_per_umr;
1083 	rest = max_hd_per_wqe % max_ksm_per_umr;
1084 	wqebbs = MLX5E_KSM_UMR_WQEBBS(max_ksm_per_umr) * max_num_of_umr_per_wqe;
1085 	if (rest)
1086 		wqebbs += MLX5E_KSM_UMR_WQEBBS(rest);
1087 	wqebbs *= wq_size;
1088 	return wqebbs;
1089 }
1090 
1091 #define MLX5E_LRO_TIMEOUT_ARR_SIZE                      4
1092 
mlx5e_choose_lro_timeout(struct mlx5_core_dev * mdev,u32 wanted_timeout)1093 u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
1094 {
1095 	int i;
1096 
1097 	/* The supported periods are organized in ascending order */
1098 	for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
1099 		if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
1100 			break;
1101 
1102 	return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
1103 }
1104 
mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)1105 static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
1106 					struct mlx5e_params *params,
1107 					struct mlx5e_xsk_param *xsk)
1108 {
1109 	enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
1110 	u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
1111 	u8 umr_wqebbs;
1112 
1113 	umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode);
1114 
1115 	return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
1116 }
1117 
mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_rq_param * rqp)1118 static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
1119 				      struct mlx5e_params *params,
1120 				      struct mlx5e_rq_param *rqp)
1121 {
1122 	u32 wqebbs, total_pages, useful_space;
1123 
1124 	/* MLX5_WQ_TYPE_CYCLIC */
1125 	if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
1126 		return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
1127 
1128 	/* UMR WQEs for the regular RQ. */
1129 	wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, NULL);
1130 
1131 	/* If XDP program is attached, XSK may be turned on at any time without
1132 	 * restarting the channel. ICOSQ must be big enough to fit UMR WQEs of
1133 	 * both regular RQ and XSK RQ.
1134 	 *
1135 	 * XSK uses different values of page_shift, and the total number of UMR
1136 	 * WQEBBs depends on it. This dependency is complex and not monotonic,
1137 	 * especially taking into consideration that some of the parameters come
1138 	 * from capabilities. Hence, we have to try all valid values of XSK
1139 	 * frame size (and page_shift) to find the maximum.
1140 	 */
1141 	if (params->xdp_prog) {
1142 		u32 max_xsk_wqebbs = 0;
1143 		u8 frame_shift;
1144 
1145 		for (frame_shift = XDP_UMEM_MIN_CHUNK_SHIFT;
1146 		     frame_shift <= PAGE_SHIFT; frame_shift++) {
1147 			/* The headroom doesn't affect the calculation. */
1148 			struct mlx5e_xsk_param xsk = {
1149 				.chunk_size = 1 << frame_shift,
1150 				.unaligned = false,
1151 			};
1152 
1153 			/* XSK aligned mode. */
1154 			max_xsk_wqebbs = max(max_xsk_wqebbs,
1155 				mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
1156 
1157 			/* XSK unaligned mode, frame size is a power of two. */
1158 			xsk.unaligned = true;
1159 			max_xsk_wqebbs = max(max_xsk_wqebbs,
1160 				mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
1161 
1162 			/* XSK unaligned mode, frame size is not equal to stride size. */
1163 			xsk.chunk_size -= 1;
1164 			max_xsk_wqebbs = max(max_xsk_wqebbs,
1165 				mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
1166 
1167 			/* XSK unaligned mode, frame size is a triple power of two. */
1168 			xsk.chunk_size = (1 << frame_shift) / 4 * 3;
1169 			max_xsk_wqebbs = max(max_xsk_wqebbs,
1170 				mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
1171 		}
1172 
1173 		wqebbs += max_xsk_wqebbs;
1174 	}
1175 
1176 	if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
1177 		wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
1178 
1179 	/* UMR WQEs don't cross the page boundary, they are padded with NOPs.
1180 	 * This padding is always smaller than the max WQE size. That gives us
1181 	 * at least (PAGE_SIZE - (max WQE size - MLX5_SEND_WQE_BB)) useful bytes
1182 	 * per page. The number of pages is estimated as the total size of WQEs
1183 	 * divided by the useful space in page, rounding up. If some WQEs don't
1184 	 * fully fit into the useful space, they can occupy part of the padding,
1185 	 * which proves this estimation to be correct (reserve enough space).
1186 	 */
1187 	useful_space = PAGE_SIZE - mlx5e_get_max_sq_wqebbs(mdev) + MLX5_SEND_WQE_BB;
1188 	total_pages = DIV_ROUND_UP(wqebbs * MLX5_SEND_WQE_BB, useful_space);
1189 	wqebbs = total_pages * (PAGE_SIZE / MLX5_SEND_WQE_BB);
1190 
1191 	return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs));
1192 }
1193 
mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev * mdev)1194 static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
1195 {
1196 	if (mlx5e_is_ktls_rx(mdev))
1197 		return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
1198 
1199 	return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
1200 }
1201 
mlx5e_build_icosq_param(struct mlx5_core_dev * mdev,u8 log_wq_size,struct mlx5e_sq_param * param)1202 static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev,
1203 				    u8 log_wq_size,
1204 				    struct mlx5e_sq_param *param)
1205 {
1206 	void *sqc = param->sqc;
1207 	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1208 
1209 	mlx5e_build_sq_param_common(mdev, param);
1210 
1211 	MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
1212 	MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
1213 	mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp);
1214 }
1215 
mlx5e_build_async_icosq_param(struct mlx5_core_dev * mdev,u8 log_wq_size,struct mlx5e_sq_param * param)1216 static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
1217 					  u8 log_wq_size,
1218 					  struct mlx5e_sq_param *param)
1219 {
1220 	void *sqc = param->sqc;
1221 	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1222 
1223 	mlx5e_build_sq_param_common(mdev, param);
1224 	param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */
1225 	param->is_tls = mlx5e_is_ktls_rx(mdev);
1226 	if (param->is_tls)
1227 		param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */
1228 	MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
1229 	MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
1230 	mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp);
1231 }
1232 
mlx5e_build_xdpsq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_sq_param * param)1233 void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
1234 			     struct mlx5e_params *params,
1235 			     struct mlx5e_sq_param *param)
1236 {
1237 	void *sqc = param->sqc;
1238 	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1239 
1240 	mlx5e_build_sq_param_common(mdev, param);
1241 	MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
1242 	param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
1243 	mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
1244 }
1245 
mlx5e_build_channel_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_channel_param * cparam)1246 int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
1247 			      struct mlx5e_params *params,
1248 			      struct mlx5e_channel_param *cparam)
1249 {
1250 	u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
1251 	int err;
1252 
1253 	err = mlx5e_build_rq_param(mdev, params, NULL, &cparam->rq);
1254 	if (err)
1255 		return err;
1256 
1257 	icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(mdev, params, &cparam->rq);
1258 	async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
1259 
1260 	mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
1261 	mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
1262 	mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
1263 	mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
1264 
1265 	return 0;
1266 }
1267