1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include "en/params.h"
5 #include "en/txrx.h"
6 #include "en/port.h"
7 #include "en_accel/en_accel.h"
8 #include "en_accel/ipsec.h"
9 #include <linux/dim.h>
10 #include <net/page_pool/types.h>
11 #include <net/xdp_sock_drv.h>
12
13 #define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
14 #define MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ 17
15
mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev * mdev)16 static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
17 {
18 u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size);
19
20 return min_page_shift ? : 12;
21 }
22
mlx5e_mpwrq_page_shift(struct mlx5_core_dev * mdev,struct mlx5e_xsk_param * xsk)23 u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
24 {
25 u8 req_page_shift = xsk ? order_base_2(xsk->chunk_size) : PAGE_SHIFT;
26 u8 min_page_shift = mlx5e_mpwrq_min_page_shift(mdev);
27
28 /* Regular RQ uses order-0 pages, the NIC must be able to map them. */
29 if (WARN_ON_ONCE(!xsk && req_page_shift < min_page_shift))
30 min_page_shift = req_page_shift;
31
32 return max(req_page_shift, min_page_shift);
33 }
34
35 enum mlx5e_mpwrq_umr_mode
mlx5e_mpwrq_umr_mode(struct mlx5_core_dev * mdev,struct mlx5e_xsk_param * xsk)36 mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
37 {
38 /* Different memory management schemes use different mechanisms to map
39 * user-mode memory. The stricter guarantees we have, the faster
40 * mechanisms we use:
41 * 1. MTT - direct mapping in page granularity.
42 * 2. KSM - indirect mapping to another MKey to arbitrary addresses, but
43 * all mappings have the same size.
44 * 3. KLM - indirect mapping to another MKey to arbitrary addresses, and
45 * mappings can have different sizes.
46 */
47 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
48 bool unaligned = xsk ? xsk->unaligned : false;
49 bool oversized = false;
50
51 if (xsk) {
52 oversized = xsk->chunk_size < (1 << page_shift);
53 WARN_ON_ONCE(xsk->chunk_size > (1 << page_shift));
54 }
55
56 /* XSK frame size doesn't match the UMR page size, either because the
57 * frame size is not a power of two, or it's smaller than the minimal
58 * page size supported by the firmware.
59 * It's possible to receive packets bigger than MTU in certain setups.
60 * To avoid writing over the XSK frame boundary, the top region of each
61 * stride is mapped to a garbage page, resulting in two mappings of
62 * different sizes per frame.
63 */
64 if (oversized) {
65 /* An optimization for frame sizes equal to 3 * power_of_two.
66 * 3 KSMs point to the frame, and one KSM points to the garbage
67 * page, which works faster than KLM.
68 */
69 if (xsk->chunk_size % 3 == 0 && is_power_of_2(xsk->chunk_size / 3))
70 return MLX5E_MPWRQ_UMR_MODE_TRIPLE;
71
72 return MLX5E_MPWRQ_UMR_MODE_OVERSIZED;
73 }
74
75 /* XSK frames can start at arbitrary unaligned locations, but they all
76 * have the same size which is a power of two. It allows to optimize to
77 * one KSM per frame.
78 */
79 if (unaligned)
80 return MLX5E_MPWRQ_UMR_MODE_UNALIGNED;
81
82 /* XSK: frames are naturally aligned, MTT can be used.
83 * Non-XSK: Allocations happen in units of CPU pages, therefore, the
84 * mappings are naturally aligned.
85 */
86 return MLX5E_MPWRQ_UMR_MODE_ALIGNED;
87 }
88
mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode)89 u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode)
90 {
91 switch (mode) {
92 case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
93 return sizeof(struct mlx5_mtt);
94 case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
95 return sizeof(struct mlx5_ksm);
96 case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
97 return sizeof(struct mlx5_klm) * 2;
98 case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
99 return sizeof(struct mlx5_ksm) * 4;
100 }
101 WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode);
102 return 0;
103 }
104
mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)105 u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
106 enum mlx5e_mpwrq_umr_mode umr_mode)
107 {
108 u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
109 u8 max_pages_per_wqe, max_log_wqe_size_calc;
110 u8 max_log_wqe_size_cap;
111 u16 max_wqe_size;
112
113 /* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
114 max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
115 max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
116 MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size;
117 max_log_wqe_size_calc = ilog2(max_pages_per_wqe) + page_shift;
118
119 WARN_ON_ONCE(max_log_wqe_size_calc < MLX5E_ORDER2_MAX_PACKET_MTU);
120
121 max_log_wqe_size_cap = mlx5_core_is_ecpf(mdev) ?
122 MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ : MLX5_MPWRQ_MAX_LOG_WQE_SZ;
123
124 return min_t(u8, max_log_wqe_size_calc, max_log_wqe_size_cap);
125 }
126
mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)127 u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
128 enum mlx5e_mpwrq_umr_mode umr_mode)
129 {
130 u8 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
131 u8 pages_per_wqe;
132
133 pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1;
134
135 /* Two MTTs are needed to form an octword. The number of MTTs is encoded
136 * in octwords in a UMR WQE, so we need at least two to avoid mapping
137 * garbage addresses.
138 */
139 if (WARN_ON_ONCE(pages_per_wqe < 2 && umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED))
140 pages_per_wqe = 2;
141
142 /* Sanity check for further calculations to succeed. */
143 BUILD_BUG_ON(MLX5_MPWRQ_MAX_PAGES_PER_WQE > 64);
144 if (WARN_ON_ONCE(pages_per_wqe > MLX5_MPWRQ_MAX_PAGES_PER_WQE))
145 return MLX5_MPWRQ_MAX_PAGES_PER_WQE;
146
147 return pages_per_wqe;
148 }
149
mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)150 u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
151 enum mlx5e_mpwrq_umr_mode umr_mode)
152 {
153 u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode);
154 u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
155 u16 umr_wqe_sz;
156
157 umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) +
158 ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_FLEX_ALIGNMENT);
159
160 WARN_ON_ONCE(DIV_ROUND_UP(umr_wqe_sz, MLX5_SEND_WQE_DS) > MLX5_WQE_CTRL_DS_MASK);
161
162 return umr_wqe_sz;
163 }
164
mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)165 u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift,
166 enum mlx5e_mpwrq_umr_mode umr_mode)
167 {
168 return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(mdev, page_shift, umr_mode),
169 MLX5_SEND_WQE_BB);
170 }
171
mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)172 u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
173 enum mlx5e_mpwrq_umr_mode umr_mode)
174 {
175 u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode);
176
177 /* Add another page as a buffer between WQEs. This page will absorb
178 * write overflow by the hardware, when receiving packets larger than
179 * MTU. These oversize packets are dropped by the driver at a later
180 * stage.
181 */
182 return ALIGN(pages_per_wqe + 1,
183 MLX5_SEND_WQE_BB / mlx5e_mpwrq_umr_entry_size(umr_mode));
184 }
185
mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev * mdev,enum mlx5e_mpwrq_umr_mode umr_mode)186 u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev,
187 enum mlx5e_mpwrq_umr_mode umr_mode)
188 {
189 /* Same limits apply to KSMs and KLMs. */
190 u32 klm_limit = min(MLX5E_MAX_RQ_NUM_KSMS,
191 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size));
192
193 switch (umr_mode) {
194 case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
195 return MLX5E_MAX_RQ_NUM_MTTS;
196 case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
197 return klm_limit;
198 case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
199 /* Each entry is two KLMs. */
200 return klm_limit / 2;
201 case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
202 /* Each entry is four KSMs. */
203 return klm_limit / 4;
204 }
205 WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode);
206 return 0;
207 }
208
mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)209 static u8 mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev *mdev, u8 page_shift,
210 enum mlx5e_mpwrq_umr_mode umr_mode)
211 {
212 u8 mtts_per_wqe = mlx5e_mpwrq_mtts_per_wqe(mdev, page_shift, umr_mode);
213 u32 max_entries = mlx5e_mpwrq_max_num_entries(mdev, umr_mode);
214
215 return ilog2(max_entries / mtts_per_wqe);
216 }
217
mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)218 u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
219 enum mlx5e_mpwrq_umr_mode umr_mode)
220 {
221 return mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode) +
222 mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
223 MLX5E_ORDER2_MAX_PACKET_MTU;
224 }
225
mlx5e_get_linear_rq_headroom(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)226 u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
227 struct mlx5e_xsk_param *xsk)
228 {
229 u16 headroom;
230
231 if (xsk)
232 return xsk->headroom;
233
234 headroom = NET_IP_ALIGN;
235 if (params->xdp_prog)
236 headroom += XDP_PACKET_HEADROOM;
237 else
238 headroom += MLX5_RX_HEADROOM;
239
240 return headroom;
241 }
242
mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)243 static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params,
244 struct mlx5e_xsk_param *xsk)
245 {
246 u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
247
248 return xsk->headroom + hw_mtu;
249 }
250
mlx5e_rx_get_linear_sz_skb(struct mlx5e_params * params,bool no_head_tail_room)251 static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool no_head_tail_room)
252 {
253 u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
254 u16 headroom;
255
256 if (no_head_tail_room)
257 return SKB_DATA_ALIGN(hw_mtu);
258 headroom = mlx5e_get_linear_rq_headroom(params, NULL);
259
260 return MLX5_SKB_FRAG_SZ(headroom + hw_mtu);
261 }
262
mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,bool mpwqe)263 static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
264 struct mlx5e_params *params,
265 struct mlx5e_xsk_param *xsk,
266 bool mpwqe)
267 {
268 bool no_head_tail_room;
269 u32 sz;
270
271 /* XSK frames are mapped as individual pages, because frames may come in
272 * an arbitrary order from random locations in the UMEM.
273 */
274 if (xsk)
275 return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
276
277 no_head_tail_room = params->xdp_prog && mpwqe && !mlx5e_rx_is_linear_skb(mdev, params, xsk);
278
279 /* When no_head_tail_room is set, headroom and tailroom are excluded from skb calculations.
280 * no_head_tail_room should be set in the case of XDP with Striding RQ
281 * when SKB is not linear. This is because another page is allocated for the linear part.
282 */
283 sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, no_head_tail_room));
284
285 /* XDP in mlx5e doesn't support multiple packets per page.
286 * Do not assume sz <= PAGE_SIZE if params->xdp_prog is set.
287 */
288 return params->xdp_prog && sz < PAGE_SIZE ? PAGE_SIZE : sz;
289 }
290
mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)291 static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev,
292 struct mlx5e_params *params,
293 struct mlx5e_xsk_param *xsk)
294 {
295 u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true);
296 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
297 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
298
299 return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
300 order_base_2(linear_stride_sz);
301 }
302
mlx5e_rx_is_linear_skb(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)303 bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
304 struct mlx5e_params *params,
305 struct mlx5e_xsk_param *xsk)
306 {
307 if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE)
308 return false;
309
310 /* Call mlx5e_rx_get_linear_sz_skb with the no_head_tail_room parameter set
311 * to exclude headroom and tailroom from calculations.
312 * no_head_tail_room is true when SKB is built on XDP_PASS on XSK RQs
313 * since packet data buffers don't have headroom and tailroom resreved for the SKB.
314 * Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
315 * must fit into a CPU page.
316 */
317 if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE)
318 return false;
319
320 /* XSK frames must be big enough to hold the packet data. */
321 if (xsk && mlx5e_rx_get_linear_sz_xsk(params, xsk) > xsk->chunk_size)
322 return false;
323
324 return true;
325 }
326
mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev * mdev,u8 log_stride_sz,u8 log_num_strides,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)327 static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
328 u8 log_stride_sz, u8 log_num_strides,
329 u8 page_shift,
330 enum mlx5e_mpwrq_umr_mode umr_mode)
331 {
332 if (log_stride_sz + log_num_strides !=
333 mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode))
334 return false;
335
336 if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE ||
337 log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX)
338 return false;
339
340 if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX)
341 return false;
342
343 if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
344 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE;
345
346 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
347 }
348
mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)349 bool mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
350 struct mlx5e_params *params,
351 struct mlx5e_xsk_param *xsk)
352 {
353 u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
354 u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
355 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
356 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
357
358 return mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
359 log_wqe_num_of_strides,
360 page_shift, umr_mode);
361 }
362
mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)363 bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
364 struct mlx5e_params *params,
365 struct mlx5e_xsk_param *xsk)
366 {
367 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
368 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
369 u8 log_num_strides;
370 u8 log_stride_sz;
371 u8 log_wqe_sz;
372
373 if (!mlx5e_rx_is_linear_skb(mdev, params, xsk))
374 return false;
375
376 log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
377 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
378
379 if (log_wqe_sz < log_stride_sz)
380 return false;
381
382 log_num_strides = log_wqe_sz - log_stride_sz;
383
384 return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz,
385 log_num_strides, page_shift,
386 umr_mode);
387 }
388
mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)389 u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
390 struct mlx5e_params *params,
391 struct mlx5e_xsk_param *xsk)
392 {
393 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
394 u8 log_pkts_per_wqe, page_shift, max_log_rq_size;
395
396 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, xsk);
397 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
398 max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode);
399
400 /* Numbers are unsigned, don't subtract to avoid underflow. */
401 if (params->log_rq_mtu_frames <
402 log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
403 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
404
405 /* Ethtool's rx_max_pending is calculated for regular RQ, that uses
406 * pages of PAGE_SIZE. Max length of an XSK RQ might differ if it uses a
407 * frame size not equal to PAGE_SIZE.
408 * A stricter condition is checked in mlx5e_mpwrq_validate_xsk, WARN on
409 * unexpected failure.
410 */
411 if (WARN_ON_ONCE(params->log_rq_mtu_frames > log_pkts_per_wqe + max_log_rq_size))
412 return max_log_rq_size;
413
414 return params->log_rq_mtu_frames - log_pkts_per_wqe;
415 }
416
mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5e_params * params)417 static u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5e_params *params)
418 {
419 return order_base_2(DIV_ROUND_UP(MLX5E_SHAMPO_WQ_RESRV_SIZE,
420 params->sw_mtu));
421 }
422
mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)423 u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
424 struct mlx5e_params *params,
425 struct mlx5e_xsk_param *xsk)
426 {
427 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
428 return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
429
430 /* XDP in mlx5e doesn't support multiple packets per page. */
431 if (params->xdp_prog)
432 return PAGE_SHIFT;
433
434 return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
435 }
436
mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)437 u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
438 struct mlx5e_params *params,
439 struct mlx5e_xsk_param *xsk)
440 {
441 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
442 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
443 u8 log_wqe_size, log_stride_size;
444
445 log_wqe_size = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
446 log_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
447 WARN(log_wqe_size < log_stride_size,
448 "Log WQE size %u < log stride size %u (page shift %u, umr mode %d, xsk on? %d)\n",
449 log_wqe_size, log_stride_size, page_shift, umr_mode, !!xsk);
450 return log_wqe_size - log_stride_size;
451 }
452
mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)453 u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)
454 {
455 #define UMR_WQE_BULK (2)
456 return min_t(unsigned int, UMR_WQE_BULK, wq_sz / 2 - 1);
457 }
458
mlx5e_get_rq_headroom(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)459 u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
460 struct mlx5e_params *params,
461 struct mlx5e_xsk_param *xsk)
462 {
463 u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
464
465 if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
466 return linear_headroom;
467
468 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
469 return linear_headroom;
470
471 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
472 return linear_headroom;
473
474 return 0;
475 }
476
mlx5e_calc_sq_stop_room(struct mlx5_core_dev * mdev,struct mlx5e_params * params)477 u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
478 {
479 bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
480 u16 stop_room;
481
482 stop_room = mlx5e_ktls_get_stop_room(mdev, params);
483 stop_room += mlx5e_stop_room_for_max_wqe(mdev);
484 if (is_mpwqe)
485 /* A MPWQE can take up to the maximum cacheline-aligned WQE +
486 * all the normal stop room can be taken if a new packet breaks
487 * the active MPWQE session and allocates its WQEs right away.
488 */
489 stop_room += mlx5e_stop_room_for_mpwqe(mdev);
490
491 return stop_room;
492 }
493
mlx5e_validate_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params)494 int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
495 {
496 size_t sq_size = 1 << params->log_sq_size;
497 u16 stop_room;
498
499 stop_room = mlx5e_calc_sq_stop_room(mdev, params);
500 if (stop_room >= sq_size) {
501 mlx5_core_err(mdev, "Stop room %u is bigger than the SQ size %zu\n",
502 stop_room, sq_size);
503 return -EINVAL;
504 }
505
506 return 0;
507 }
508
slow_pci_heuristic(struct mlx5_core_dev * mdev)509 bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
510 {
511 u32 link_speed = 0;
512 u32 pci_bw = 0;
513
514 mlx5_port_max_linkspeed(mdev, &link_speed);
515 pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
516 mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
517 link_speed, pci_bw);
518
519 #define MLX5E_SLOW_PCI_RATIO (2)
520
521 return link_speed && pci_bw &&
522 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
523 }
524
mlx5e_mpwrq_validate_regular(struct mlx5_core_dev * mdev,struct mlx5e_params * params)525 int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
526 {
527 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, NULL);
528 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, NULL);
529
530 if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
531 return -EOPNOTSUPP;
532
533 return 0;
534 }
535
mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)536 int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
537 struct mlx5e_xsk_param *xsk)
538 {
539 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
540 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
541 u16 max_mtu_pkts;
542
543 if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) {
544 mlx5_core_err(mdev, "Striding RQ for XSK can't be activated with page_shift %u and umr_mode %d\n",
545 page_shift, umr_mode);
546 return -EOPNOTSUPP;
547 }
548
549 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) {
550 mlx5_core_err(mdev, "Striding RQ linear mode for XSK can't be activated with current params\n");
551 return -EINVAL;
552 }
553
554 /* Current RQ length is too big for the given frame size, the
555 * needed number of WQEs exceeds the maximum.
556 */
557 max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
558 mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, xsk->unaligned));
559 if (params->log_rq_mtu_frames > max_mtu_pkts) {
560 mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n",
561 1 << params->log_rq_mtu_frames, xsk->chunk_size);
562 return -EINVAL;
563 }
564
565 return 0;
566 }
567
mlx5e_init_rq_type_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params)568 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
569 struct mlx5e_params *params)
570 {
571 params->log_rq_mtu_frames = is_kdump_kernel() ?
572 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
573 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
574 }
575
mlx5e_set_rq_type(struct mlx5_core_dev * mdev,struct mlx5e_params * params)576 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
577 {
578 params->rq_wq_type = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
579 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
580 MLX5_WQ_TYPE_CYCLIC;
581 }
582
mlx5e_build_rq_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params)583 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
584 struct mlx5e_params *params)
585 {
586 /* Prefer Striding RQ, unless any of the following holds:
587 * - Striding RQ configuration is not possible/supported.
588 * - CQE compression is ON, and stride_index mini_cqe layout is not supported.
589 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
590 *
591 * No XSK params: checking the availability of striding RQ in general.
592 */
593 if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ||
594 MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) &&
595 !mlx5e_mpwrq_validate_regular(mdev, params) &&
596 (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
597 !mlx5e_rx_is_linear_skb(mdev, params, NULL)))
598 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
599 mlx5e_set_rq_type(mdev, params);
600 mlx5e_init_rq_type_params(mdev, params);
601 }
602
603 /* Build queue parameters */
604
mlx5e_build_create_cq_param(struct mlx5e_create_cq_param * ccp,struct mlx5e_channel * c)605 void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
606 {
607 *ccp = (struct mlx5e_create_cq_param) {
608 .netdev = c->netdev,
609 .wq = c->priv->wq,
610 .napi = &c->napi,
611 .ch_stats = c->stats,
612 .node = cpu_to_node(c->cpu),
613 .ix = c->vec_ix,
614 };
615 }
616
mlx5e_max_nonlinear_mtu(int first_frag_size,int frag_size,bool xdp)617 static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp)
618 {
619 if (xdp)
620 /* XDP requires all fragments to be of the same size. */
621 return first_frag_size + (MLX5E_MAX_RX_FRAGS - 1) * frag_size;
622
623 /* Optimization for small packets: the last fragment is bigger than the others. */
624 return first_frag_size + (MLX5E_MAX_RX_FRAGS - 2) * frag_size + PAGE_SIZE;
625 }
626
mlx5e_rx_compute_wqe_bulk_params(struct mlx5e_params * params,struct mlx5e_rq_frags_info * info)627 static void mlx5e_rx_compute_wqe_bulk_params(struct mlx5e_params *params,
628 struct mlx5e_rq_frags_info *info)
629 {
630 u16 bulk_bound_rq_size = (1 << params->log_rq_mtu_frames) / 4;
631 u32 bulk_bound_rq_size_in_bytes;
632 u32 sum_frag_strides = 0;
633 u32 wqe_bulk_in_bytes;
634 u16 split_factor;
635 u32 wqe_bulk;
636 int i;
637
638 for (i = 0; i < info->num_frags; i++)
639 sum_frag_strides += info->arr[i].frag_stride;
640
641 /* For MTUs larger than PAGE_SIZE, align to PAGE_SIZE to reflect
642 * amount of consumed pages per wqe in bytes.
643 */
644 if (sum_frag_strides > PAGE_SIZE)
645 sum_frag_strides = ALIGN(sum_frag_strides, PAGE_SIZE);
646
647 bulk_bound_rq_size_in_bytes = bulk_bound_rq_size * sum_frag_strides;
648
649 #define MAX_WQE_BULK_BYTES(xdp) ((xdp ? 256 : 512) * 1024)
650
651 /* A WQE bulk should not exceed min(512KB, 1/4 of rq size). For XDP
652 * keep bulk size smaller to avoid filling the page_pool cache on
653 * every bulk refill.
654 */
655 wqe_bulk_in_bytes = min_t(u32, MAX_WQE_BULK_BYTES(params->xdp_prog),
656 bulk_bound_rq_size_in_bytes);
657 wqe_bulk = DIV_ROUND_UP(wqe_bulk_in_bytes, sum_frag_strides);
658
659 /* Make sure that allocations don't start when the page is still used
660 * by older WQEs.
661 */
662 info->wqe_bulk = max_t(u16, info->wqe_index_mask + 1, wqe_bulk);
663
664 split_factor = DIV_ROUND_UP(MAX_WQE_BULK_BYTES(params->xdp_prog),
665 PP_ALLOC_CACHE_REFILL * PAGE_SIZE);
666 info->refill_unit = DIV_ROUND_UP(info->wqe_bulk, split_factor);
667 }
668
669 #define DEFAULT_FRAG_SIZE (2048)
670
mlx5e_build_rq_frags_info(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct mlx5e_rq_frags_info * info,u32 * xdp_frag_size)671 static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
672 struct mlx5e_params *params,
673 struct mlx5e_xsk_param *xsk,
674 struct mlx5e_rq_frags_info *info,
675 u32 *xdp_frag_size)
676 {
677 u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
678 int frag_size_max = DEFAULT_FRAG_SIZE;
679 int first_frag_size_max;
680 u32 buf_size = 0;
681 u16 headroom;
682 int max_mtu;
683 int i;
684
685 if (mlx5e_rx_is_linear_skb(mdev, params, xsk)) {
686 int frag_stride;
687
688 frag_stride = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, false);
689
690 info->arr[0].frag_size = byte_count;
691 info->arr[0].frag_stride = frag_stride;
692 info->num_frags = 1;
693
694 /* N WQEs share the same page, N = PAGE_SIZE / frag_stride. The
695 * first WQE in the page is responsible for allocation of this
696 * page, this WQE's index is k*N. If WQEs [k*N+1; k*N+N-1] are
697 * still not completed, the allocation must stop before k*N.
698 */
699 info->wqe_index_mask = (PAGE_SIZE / frag_stride) - 1;
700
701 goto out;
702 }
703
704 headroom = mlx5e_get_linear_rq_headroom(params, xsk);
705 first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
706
707 max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
708 params->xdp_prog);
709 if (byte_count > max_mtu || params->xdp_prog) {
710 frag_size_max = PAGE_SIZE;
711 first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
712
713 max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
714 params->xdp_prog);
715 if (byte_count > max_mtu) {
716 mlx5_core_err(mdev, "MTU %u is too big for non-linear legacy RQ (max %d)\n",
717 params->sw_mtu, max_mtu);
718 return -EINVAL;
719 }
720 }
721
722 i = 0;
723 while (buf_size < byte_count) {
724 int frag_size = byte_count - buf_size;
725
726 if (i == 0)
727 frag_size = min(frag_size, first_frag_size_max);
728 else if (i < MLX5E_MAX_RX_FRAGS - 1)
729 frag_size = min(frag_size, frag_size_max);
730
731 info->arr[i].frag_size = frag_size;
732 buf_size += frag_size;
733
734 if (params->xdp_prog) {
735 /* XDP multi buffer expects fragments of the same size. */
736 info->arr[i].frag_stride = frag_size_max;
737 } else {
738 if (i == 0) {
739 /* Ensure that headroom and tailroom are included. */
740 frag_size += headroom;
741 frag_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
742 }
743 info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
744 }
745
746 i++;
747 }
748 info->num_frags = i;
749
750 /* The last fragment of WQE with index 2*N may share the page with the
751 * first fragment of WQE with index 2*N+1 in certain cases. If WQE 2*N+1
752 * is not completed yet, WQE 2*N must not be allocated, as it's
753 * responsible for allocating a new page.
754 */
755 if (frag_size_max == PAGE_SIZE) {
756 /* No WQE can start in the middle of a page. */
757 info->wqe_index_mask = 0;
758 } else {
759 /* PAGE_SIZEs starting from 8192 don't use 2K-sized fragments,
760 * because there would be more than MLX5E_MAX_RX_FRAGS of them.
761 */
762 WARN_ON(PAGE_SIZE != 2 * DEFAULT_FRAG_SIZE);
763
764 /* Odd number of fragments allows to pack the last fragment of
765 * the previous WQE and the first fragment of the next WQE into
766 * the same page.
767 * As long as DEFAULT_FRAG_SIZE is 2048, and MLX5E_MAX_RX_FRAGS
768 * is 4, the last fragment can be bigger than the rest only if
769 * it's the fourth one, so WQEs consisting of 3 fragments will
770 * always share a page.
771 * When a page is shared, WQE bulk size is 2, otherwise just 1.
772 */
773 info->wqe_index_mask = info->num_frags % 2;
774 }
775
776 out:
777 /* Bulking optimization to skip allocation until a large enough number
778 * of WQEs can be allocated in a row. Bulking also influences how well
779 * deferred page release works.
780 */
781 mlx5e_rx_compute_wqe_bulk_params(params, info);
782
783 mlx5_core_dbg(mdev, "%s: wqe_bulk = %u, wqe_bulk_refill_unit = %u\n",
784 __func__, info->wqe_bulk, info->refill_unit);
785
786 info->log_num_frags = order_base_2(info->num_frags);
787
788 *xdp_frag_size = info->num_frags > 1 && params->xdp_prog ? PAGE_SIZE : 0;
789
790 return 0;
791 }
792
mlx5e_get_rqwq_log_stride(u8 wq_type,int ndsegs)793 static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
794 {
795 int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
796
797 switch (wq_type) {
798 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
799 sz += sizeof(struct mlx5e_rx_wqe_ll);
800 break;
801 default: /* MLX5_WQ_TYPE_CYCLIC */
802 sz += sizeof(struct mlx5e_rx_wqe_cyc);
803 }
804
805 return order_base_2(sz);
806 }
807
mlx5e_build_common_cq_param(struct mlx5_core_dev * mdev,struct mlx5e_cq_param * param)808 static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
809 struct mlx5e_cq_param *param)
810 {
811 void *cqc = param->cqc;
812
813 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
814 if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
815 MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
816 }
817
mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)818 static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
819 struct mlx5e_params *params,
820 struct mlx5e_xsk_param *xsk)
821 {
822 u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
823 u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
824 int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(params));
825 int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
826 int wqe_size = BIT(log_stride_sz) * num_strides;
827 int rsrv_size = MLX5E_SHAMPO_WQ_RESRV_SIZE;
828
829 /* +1 is for the case that the pkt_per_rsrv dont consume the reservation
830 * so we get a filler cqe for the rest of the reservation.
831 */
832 return order_base_2((wqe_size / rsrv_size) * wq_size * (pkt_per_rsrv + 1));
833 }
834
mlx5e_build_rx_cq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct mlx5e_cq_param * param)835 static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
836 struct mlx5e_params *params,
837 struct mlx5e_xsk_param *xsk,
838 struct mlx5e_cq_param *param)
839 {
840 bool hw_stridx = false;
841 void *cqc = param->cqc;
842 u8 log_cq_size;
843
844 switch (params->rq_wq_type) {
845 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
846 hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
847 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
848 log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk);
849 else
850 log_cq_size = mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk) +
851 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
852 break;
853 default: /* MLX5_WQ_TYPE_CYCLIC */
854 log_cq_size = params->log_rq_mtu_frames;
855 }
856
857 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
858 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
859 MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
860 MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
861 MLX5_SET(cqc, cqc, cqe_compression_layout,
862 MLX5_CAP_GEN(mdev, enhanced_cqe_compression) ?
863 MLX5_CQE_COMPRESS_LAYOUT_ENHANCED :
864 MLX5_CQE_COMPRESS_LAYOUT_BASIC);
865 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
866 }
867
868 mlx5e_build_common_cq_param(mdev, param);
869 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
870 }
871
rq_end_pad_mode(struct mlx5_core_dev * mdev,struct mlx5e_params * params)872 static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
873 {
874 bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO;
875 bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
876
877 return ro && lro_en ?
878 MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
879 }
880
mlx5e_build_rq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct mlx5e_rq_param * param)881 int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
882 struct mlx5e_params *params,
883 struct mlx5e_xsk_param *xsk,
884 struct mlx5e_rq_param *param)
885 {
886 void *rqc = param->rqc;
887 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
888 u32 lro_timeout;
889 int ndsegs = 1;
890 int err;
891
892 switch (params->rq_wq_type) {
893 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
894 u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
895 u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
896 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
897 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
898
899 if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
900 log_wqe_num_of_strides,
901 page_shift, umr_mode)) {
902 mlx5_core_err(mdev,
903 "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u, umr_mode %d\n",
904 log_wqe_stride_size, log_wqe_num_of_strides,
905 umr_mode);
906 return -EINVAL;
907 }
908
909 MLX5_SET(wq, wq, log_wqe_num_of_strides,
910 log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
911 MLX5_SET(wq, wq, log_wqe_stride_size,
912 log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
913 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
914 if (params->packet_merge.type != MLX5E_PACKET_MERGE_SHAMPO)
915 break;
916
917 MLX5_SET(wq, wq, shampo_enable, true);
918 MLX5_SET(wq, wq, log_reservation_size,
919 MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE -
920 MLX5E_SHAMPO_WQ_RESRV_SIZE_BASE_SHIFT);
921 MLX5_SET(wq, wq,
922 log_max_num_of_packets_per_reservation,
923 mlx5e_shampo_get_log_pkt_per_rsrv(params));
924 MLX5_SET(wq, wq, log_headers_entry_size,
925 MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE -
926 MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE_SHIFT);
927 lro_timeout =
928 mlx5e_choose_lro_timeout(mdev,
929 MLX5E_DEFAULT_SHAMPO_TIMEOUT);
930 MLX5_SET(rqc, rqc, reservation_timeout, lro_timeout);
931 MLX5_SET(rqc, rqc, shampo_match_criteria_type,
932 MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED);
933 MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
934 MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE);
935 break;
936 }
937 default: /* MLX5_WQ_TYPE_CYCLIC */
938 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
939 err = mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info,
940 ¶m->xdp_frag_size);
941 if (err)
942 return err;
943 ndsegs = param->frags_info.num_frags;
944 }
945
946 MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
947 MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params));
948 MLX5_SET(wq, wq, log_wq_stride,
949 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
950 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
951 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
952 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
953
954 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
955 mlx5e_build_rx_cq_param(mdev, params, xsk, ¶m->cqp);
956
957 return 0;
958 }
959
mlx5e_build_drop_rq_param(struct mlx5_core_dev * mdev,struct mlx5e_rq_param * param)960 void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
961 struct mlx5e_rq_param *param)
962 {
963 void *rqc = param->rqc;
964 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
965
966 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
967 MLX5_SET(wq, wq, log_wq_stride,
968 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
969
970 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
971 }
972
mlx5e_build_tx_cq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_cq_param * param)973 void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
974 struct mlx5e_params *params,
975 struct mlx5e_cq_param *param)
976 {
977 void *cqc = param->cqc;
978
979 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
980
981 mlx5e_build_common_cq_param(mdev, param);
982 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
983 }
984
mlx5e_build_sq_param_common(struct mlx5_core_dev * mdev,struct mlx5e_sq_param * param)985 void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
986 struct mlx5e_sq_param *param)
987 {
988 void *sqc = param->sqc;
989 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
990
991 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
992 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
993
994 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
995 }
996
mlx5e_build_sq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_sq_param * param)997 void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
998 struct mlx5e_params *params,
999 struct mlx5e_sq_param *param)
1000 {
1001 void *sqc = param->sqc;
1002 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1003 bool allow_swp;
1004
1005 allow_swp = mlx5_geneve_tx_allowed(mdev) ||
1006 (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
1007 mlx5e_build_sq_param_common(mdev, param);
1008 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
1009 MLX5_SET(sqc, sqc, allow_swp, allow_swp);
1010 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
1011 param->stop_room = mlx5e_calc_sq_stop_room(mdev, params);
1012 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
1013 }
1014
mlx5e_build_ico_cq_param(struct mlx5_core_dev * mdev,u8 log_wq_size,struct mlx5e_cq_param * param)1015 static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev,
1016 u8 log_wq_size,
1017 struct mlx5e_cq_param *param)
1018 {
1019 void *cqc = param->cqc;
1020
1021 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
1022
1023 mlx5e_build_common_cq_param(mdev, param);
1024
1025 param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1026 }
1027
1028 /* This function calculates the maximum number of headers entries that are needed
1029 * per WQE, the formula is based on the size of the reservations and the
1030 * restriction we have about max packets for reservation that is equal to max
1031 * headers per reservation.
1032 */
mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_rq_param * rq_param)1033 u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
1034 struct mlx5e_params *params,
1035 struct mlx5e_rq_param *rq_param)
1036 {
1037 u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL));
1038 u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL);
1039 int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(params));
1040 int wqe_size = BIT(log_stride_sz) * num_strides;
1041 int rsrv_size = MLX5E_SHAMPO_WQ_RESRV_SIZE;
1042 u32 hd_per_wqe;
1043
1044 /* Assumption: hd_per_wqe % 8 == 0. */
1045 hd_per_wqe = (wqe_size / rsrv_size) * pkt_per_rsrv;
1046 mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_rsrv = %d\n",
1047 __func__, hd_per_wqe, rsrv_size, wqe_size, pkt_per_rsrv);
1048 return hd_per_wqe;
1049 }
1050
1051 /* This function calculates the maximum number of headers entries that are needed
1052 * for the WQ, this value is uesed to allocate the header buffer in HW, thus
1053 * must be a pow of 2.
1054 */
mlx5e_shampo_hd_per_wq(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_rq_param * rq_param)1055 u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
1056 struct mlx5e_params *params,
1057 struct mlx5e_rq_param *rq_param)
1058 {
1059 void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
1060 int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
1061 u32 hd_per_wqe, hd_per_wq;
1062
1063 hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
1064 hd_per_wq = roundup_pow_of_two(hd_per_wqe * wq_size);
1065 return hd_per_wq;
1066 }
1067
mlx5e_shampo_icosq_sz(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_rq_param * rq_param)1068 static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
1069 struct mlx5e_params *params,
1070 struct mlx5e_rq_param *rq_param)
1071 {
1072 int max_num_of_umr_per_wqe, max_hd_per_wqe, max_ksm_per_umr, rest;
1073 void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
1074 int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
1075 u32 wqebbs;
1076
1077 max_ksm_per_umr = MLX5E_MAX_KSM_PER_WQE(mdev);
1078 max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
1079 max_num_of_umr_per_wqe = max_hd_per_wqe / max_ksm_per_umr;
1080 rest = max_hd_per_wqe % max_ksm_per_umr;
1081 wqebbs = MLX5E_KSM_UMR_WQEBBS(max_ksm_per_umr) * max_num_of_umr_per_wqe;
1082 if (rest)
1083 wqebbs += MLX5E_KSM_UMR_WQEBBS(rest);
1084 wqebbs *= wq_size;
1085 return wqebbs;
1086 }
1087
1088 #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
1089
mlx5e_choose_lro_timeout(struct mlx5_core_dev * mdev,u32 wanted_timeout)1090 u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
1091 {
1092 int i;
1093
1094 /* The supported periods are organized in ascending order */
1095 for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
1096 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
1097 break;
1098
1099 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
1100 }
1101
mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)1102 static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
1103 struct mlx5e_params *params,
1104 struct mlx5e_xsk_param *xsk)
1105 {
1106 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
1107 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
1108 u8 umr_wqebbs;
1109
1110 umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode);
1111
1112 return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
1113 }
1114
mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_rq_param * rqp)1115 static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
1116 struct mlx5e_params *params,
1117 struct mlx5e_rq_param *rqp)
1118 {
1119 u32 wqebbs, total_pages, useful_space;
1120
1121 /* MLX5_WQ_TYPE_CYCLIC */
1122 if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
1123 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
1124
1125 /* UMR WQEs for the regular RQ. */
1126 wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, NULL);
1127
1128 /* If XDP program is attached, XSK may be turned on at any time without
1129 * restarting the channel. ICOSQ must be big enough to fit UMR WQEs of
1130 * both regular RQ and XSK RQ.
1131 *
1132 * XSK uses different values of page_shift, and the total number of UMR
1133 * WQEBBs depends on it. This dependency is complex and not monotonic,
1134 * especially taking into consideration that some of the parameters come
1135 * from capabilities. Hence, we have to try all valid values of XSK
1136 * frame size (and page_shift) to find the maximum.
1137 */
1138 if (params->xdp_prog) {
1139 u32 max_xsk_wqebbs = 0;
1140 u8 frame_shift;
1141
1142 for (frame_shift = XDP_UMEM_MIN_CHUNK_SHIFT;
1143 frame_shift <= PAGE_SHIFT; frame_shift++) {
1144 /* The headroom doesn't affect the calculation. */
1145 struct mlx5e_xsk_param xsk = {
1146 .chunk_size = 1 << frame_shift,
1147 .unaligned = false,
1148 };
1149
1150 /* XSK aligned mode. */
1151 max_xsk_wqebbs = max(max_xsk_wqebbs,
1152 mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
1153
1154 /* XSK unaligned mode, frame size is a power of two. */
1155 xsk.unaligned = true;
1156 max_xsk_wqebbs = max(max_xsk_wqebbs,
1157 mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
1158
1159 /* XSK unaligned mode, frame size is not equal to stride size. */
1160 xsk.chunk_size -= 1;
1161 max_xsk_wqebbs = max(max_xsk_wqebbs,
1162 mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
1163
1164 /* XSK unaligned mode, frame size is a triple power of two. */
1165 xsk.chunk_size = (1 << frame_shift) / 4 * 3;
1166 max_xsk_wqebbs = max(max_xsk_wqebbs,
1167 mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
1168 }
1169
1170 wqebbs += max_xsk_wqebbs;
1171 }
1172
1173 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
1174 wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
1175
1176 /* UMR WQEs don't cross the page boundary, they are padded with NOPs.
1177 * This padding is always smaller than the max WQE size. That gives us
1178 * at least (PAGE_SIZE - (max WQE size - MLX5_SEND_WQE_BB)) useful bytes
1179 * per page. The number of pages is estimated as the total size of WQEs
1180 * divided by the useful space in page, rounding up. If some WQEs don't
1181 * fully fit into the useful space, they can occupy part of the padding,
1182 * which proves this estimation to be correct (reserve enough space).
1183 */
1184 useful_space = PAGE_SIZE - mlx5e_get_max_sq_wqebbs(mdev) + MLX5_SEND_WQE_BB;
1185 total_pages = DIV_ROUND_UP(wqebbs * MLX5_SEND_WQE_BB, useful_space);
1186 wqebbs = total_pages * (PAGE_SIZE / MLX5_SEND_WQE_BB);
1187
1188 return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs));
1189 }
1190
mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev * mdev)1191 static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
1192 {
1193 if (mlx5e_is_ktls_rx(mdev))
1194 return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
1195
1196 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
1197 }
1198
mlx5e_build_icosq_param(struct mlx5_core_dev * mdev,u8 log_wq_size,struct mlx5e_sq_param * param)1199 static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev,
1200 u8 log_wq_size,
1201 struct mlx5e_sq_param *param)
1202 {
1203 void *sqc = param->sqc;
1204 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1205
1206 mlx5e_build_sq_param_common(mdev, param);
1207
1208 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
1209 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
1210 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp);
1211 }
1212
mlx5e_build_async_icosq_param(struct mlx5_core_dev * mdev,u8 log_wq_size,struct mlx5e_sq_param * param)1213 static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
1214 u8 log_wq_size,
1215 struct mlx5e_sq_param *param)
1216 {
1217 void *sqc = param->sqc;
1218 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1219
1220 mlx5e_build_sq_param_common(mdev, param);
1221 param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */
1222 param->is_tls = mlx5e_is_ktls_rx(mdev);
1223 if (param->is_tls)
1224 param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */
1225 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
1226 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
1227 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp);
1228 }
1229
mlx5e_build_xdpsq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct mlx5e_sq_param * param)1230 void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
1231 struct mlx5e_params *params,
1232 struct mlx5e_xsk_param *xsk,
1233 struct mlx5e_sq_param *param)
1234 {
1235 void *sqc = param->sqc;
1236 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1237
1238 mlx5e_build_sq_param_common(mdev, param);
1239 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
1240 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
1241 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
1242 }
1243
mlx5e_build_channel_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_channel_param * cparam)1244 int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
1245 struct mlx5e_params *params,
1246 struct mlx5e_channel_param *cparam)
1247 {
1248 u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
1249 int err;
1250
1251 err = mlx5e_build_rq_param(mdev, params, NULL, &cparam->rq);
1252 if (err)
1253 return err;
1254
1255 icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(mdev, params, &cparam->rq);
1256 async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
1257
1258 mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
1259 mlx5e_build_xdpsq_param(mdev, params, NULL, &cparam->xdp_sq);
1260 mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
1261 mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
1262
1263 return 0;
1264 }
1265