xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c (revision c2c2ccfd4ba72718266a56f3ecc34c989cb5b7a0)
1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/ip.h>
34 #include <linux/ipv6.h>
35 #include <linux/tcp.h>
36 #include <linux/bitmap.h>
37 #include <linux/filter.h>
38 #include <net/ip6_checksum.h>
39 #include <net/page_pool/helpers.h>
40 #include <net/inet_ecn.h>
41 #include <net/gro.h>
42 #include <net/udp.h>
43 #include <net/tcp.h>
44 #include <net/xdp_sock_drv.h>
45 #include "en.h"
46 #include "en/txrx.h"
47 #include "en_tc.h"
48 #include "eswitch.h"
49 #include "en_rep.h"
50 #include "en/rep/tc.h"
51 #include "ipoib/ipoib.h"
52 #include "en_accel/ipsec.h"
53 #include "en_accel/macsec.h"
54 #include "en_accel/psp_rxtx.h"
55 #include "en_accel/ipsec_rxtx.h"
56 #include "en_accel/ktls_txrx.h"
57 #include "en/xdp.h"
58 #include "en/xsk/rx.h"
59 #include "en/health.h"
60 #include "en/params.h"
61 #include "devlink.h"
62 #include "en/devlink.h"
63 
64 static struct sk_buff *
65 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
66 				struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
67 				u32 page_idx);
68 static struct sk_buff *
69 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
70 				   struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
71 				   u32 page_idx);
72 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
73 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
74 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
75 
76 const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
77 	.handle_rx_cqe       = mlx5e_handle_rx_cqe,
78 	.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
79 	.handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo,
80 };
81 
mlx5e_read_cqe_slot(struct mlx5_cqwq * wq,u32 cqcc,void * data)82 static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
83 				       u32 cqcc, void *data)
84 {
85 	u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
86 
87 	memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64));
88 }
89 
mlx5e_read_enhanced_title_slot(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)90 static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq,
91 					   struct mlx5_cqe64 *cqe)
92 {
93 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
94 	struct mlx5_cqe64 *title = &cqd->title;
95 
96 	memcpy(title, cqe, sizeof(struct mlx5_cqe64));
97 
98 	if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)))
99 		return;
100 
101 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
102 		cqd->wqe_counter = mpwrq_get_cqe_stride_index(title) +
103 			mpwrq_get_cqe_consumed_strides(title);
104 	else
105 		cqd->wqe_counter =
106 			mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, be16_to_cpu(title->wqe_counter) + 1);
107 }
108 
mlx5e_read_title_slot(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,u32 cqcc)109 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
110 					 struct mlx5_cqwq *wq,
111 					 u32 cqcc)
112 {
113 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
114 	struct mlx5_cqe64 *title = &cqd->title;
115 
116 	mlx5e_read_cqe_slot(wq, cqcc, title);
117 	cqd->left        = be32_to_cpu(title->byte_cnt);
118 	cqd->wqe_counter = be16_to_cpu(title->wqe_counter);
119 	rq->stats->cqe_compress_blks++;
120 }
121 
mlx5e_read_mini_arr_slot(struct mlx5_cqwq * wq,struct mlx5e_cq_decomp * cqd,u32 cqcc)122 static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq,
123 					    struct mlx5e_cq_decomp *cqd,
124 					    u32 cqcc)
125 {
126 	mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr);
127 	cqd->mini_arr_idx = 0;
128 }
129 
mlx5e_cqes_update_owner(struct mlx5_cqwq * wq,int n)130 static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n)
131 {
132 	u32 cqcc   = wq->cc;
133 	u8  op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1;
134 	u32 ci     = mlx5_cqwq_ctr2ix(wq, cqcc);
135 	u32 wq_sz  = mlx5_cqwq_get_size(wq);
136 	u32 ci_top = min_t(u32, wq_sz, ci + n);
137 
138 	for (; ci < ci_top; ci++, n--) {
139 		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
140 
141 		cqe->op_own = op_own;
142 	}
143 
144 	if (unlikely(ci == wq_sz)) {
145 		op_own = !op_own;
146 		for (ci = 0; ci < n; ci++) {
147 			struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
148 
149 			cqe->op_own = op_own;
150 		}
151 	}
152 }
153 
mlx5e_decompress_cqe(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,u32 cqcc)154 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
155 					struct mlx5_cqwq *wq,
156 					u32 cqcc)
157 {
158 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
159 	struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx];
160 	struct mlx5_cqe64 *title = &cqd->title;
161 
162 	title->byte_cnt     = mini_cqe->byte_cnt;
163 	title->check_sum    = mini_cqe->checksum;
164 	title->op_own      &= 0xf0;
165 	title->op_own      |= 0x01 & (cqcc >> wq->fbc.log_sz);
166 
167 	/* state bit set implies linked-list striding RQ wq type and
168 	 * HW stride index capability supported
169 	 */
170 	if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) {
171 		title->wqe_counter = mini_cqe->stridx;
172 		return;
173 	}
174 
175 	/* HW stride index capability not supported */
176 	title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
177 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
178 		cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
179 	else
180 		cqd->wqe_counter =
181 			mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1);
182 }
183 
mlx5e_decompress_cqe_no_hash(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,u32 cqcc)184 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
185 						struct mlx5_cqwq *wq,
186 						u32 cqcc)
187 {
188 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
189 
190 	mlx5e_decompress_cqe(rq, wq, cqcc);
191 	cqd->title.rss_hash_type   = 0;
192 	cqd->title.rss_hash_result = 0;
193 }
194 
mlx5e_decompress_enhanced_cqe(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,struct mlx5_cqe64 * cqe,int budget_rem)195 static u32 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq *rq,
196 					 struct mlx5_cqwq *wq,
197 					 struct mlx5_cqe64 *cqe,
198 					 int budget_rem)
199 {
200 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
201 	u32 cqcc, left;
202 	u32 i;
203 
204 	left = get_cqe_enhanced_num_mini_cqes(cqe);
205 	/* Here we avoid breaking the cqe compression session in the middle
206 	 * in case budget is not sufficient to handle all of it. In this case
207 	 * we return work_done == budget_rem to give 'busy' napi indication.
208 	 */
209 	if (unlikely(left > budget_rem))
210 		return budget_rem;
211 
212 	cqcc = wq->cc;
213 	cqd->mini_arr_idx = 0;
214 	memcpy(cqd->mini_arr, cqe, sizeof(struct mlx5_cqe64));
215 	for (i = 0; i < left; i++, cqd->mini_arr_idx++, cqcc++) {
216 		mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
217 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
218 				mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
219 				rq, &cqd->title);
220 	}
221 	wq->cc = cqcc;
222 	rq->stats->cqe_compress_pkts += left;
223 
224 	return left;
225 }
226 
mlx5e_decompress_cqes_cont(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,int update_owner_only,int budget_rem)227 static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
228 					     struct mlx5_cqwq *wq,
229 					     int update_owner_only,
230 					     int budget_rem)
231 {
232 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
233 	u32 cqcc = wq->cc + update_owner_only;
234 	u32 cqe_count;
235 	u32 i;
236 
237 	cqe_count = min_t(u32, cqd->left, budget_rem);
238 
239 	for (i = update_owner_only; i < cqe_count;
240 	     i++, cqd->mini_arr_idx++, cqcc++) {
241 		if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
242 			mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
243 
244 		mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
245 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
246 				mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
247 				rq, &cqd->title);
248 	}
249 	mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
250 	wq->cc = cqcc;
251 	cqd->left -= cqe_count;
252 	rq->stats->cqe_compress_pkts += cqe_count;
253 
254 	return cqe_count;
255 }
256 
mlx5e_decompress_cqes_start(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,int budget_rem)257 static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
258 					      struct mlx5_cqwq *wq,
259 					      int budget_rem)
260 {
261 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
262 	u32 cc = wq->cc;
263 
264 	mlx5e_read_title_slot(rq, wq, cc);
265 	mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
266 	mlx5e_decompress_cqe(rq, wq, cc);
267 	INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
268 			mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
269 			rq, &cqd->title);
270 	cqd->mini_arr_idx++;
271 
272 	return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem);
273 }
274 
275 #define MLX5E_PAGECNT_BIAS_MAX (PAGE_SIZE / 64)
276 
mlx5e_page_alloc_fragmented(struct page_pool * pp,struct mlx5e_frag_page * frag_page)277 static int mlx5e_page_alloc_fragmented(struct page_pool *pp,
278 				       struct mlx5e_frag_page *frag_page)
279 {
280 	netmem_ref netmem = page_pool_dev_alloc_netmems(pp);
281 
282 	if (unlikely(!netmem))
283 		return -ENOMEM;
284 
285 	page_pool_fragment_netmem(netmem, MLX5E_PAGECNT_BIAS_MAX);
286 
287 	*frag_page = (struct mlx5e_frag_page) {
288 		.netmem	= netmem,
289 		.frags	= 0,
290 	};
291 
292 	return 0;
293 }
294 
mlx5e_page_release_fragmented(struct page_pool * pp,struct mlx5e_frag_page * frag_page)295 static void mlx5e_page_release_fragmented(struct page_pool *pp,
296 					  struct mlx5e_frag_page *frag_page)
297 {
298 	u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags;
299 	netmem_ref netmem = frag_page->netmem;
300 
301 	if (page_pool_unref_netmem(netmem, drain_count) == 0)
302 		page_pool_put_unrefed_netmem(pp, netmem, -1, true);
303 }
304 
mlx5e_get_rx_frag(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * frag)305 static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
306 				    struct mlx5e_wqe_frag_info *frag)
307 {
308 	int err = 0;
309 
310 	if (!frag->offset)
311 		/* On first frag (offset == 0), replenish page.
312 		 * Other frags that point to the same page (with a different
313 		 * offset) should just use the new one without replenishing again
314 		 * by themselves.
315 		 */
316 		err = mlx5e_page_alloc_fragmented(rq->page_pool,
317 						  frag->frag_page);
318 
319 	return err;
320 }
321 
mlx5e_frag_can_release(struct mlx5e_wqe_frag_info * frag)322 static bool mlx5e_frag_can_release(struct mlx5e_wqe_frag_info *frag)
323 {
324 #define CAN_RELEASE_MASK \
325 	(BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE) | BIT(MLX5E_WQE_FRAG_SKIP_RELEASE))
326 
327 #define CAN_RELEASE_VALUE BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE)
328 
329 	return (frag->flags & CAN_RELEASE_MASK) == CAN_RELEASE_VALUE;
330 }
331 
mlx5e_put_rx_frag(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * frag)332 static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
333 				     struct mlx5e_wqe_frag_info *frag)
334 {
335 	if (mlx5e_frag_can_release(frag))
336 		mlx5e_page_release_fragmented(rq->page_pool, frag->frag_page);
337 }
338 
get_frag(struct mlx5e_rq * rq,u16 ix)339 static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
340 {
341 	return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
342 }
343 
mlx5e_alloc_rx_wqe(struct mlx5e_rq * rq,struct mlx5e_rx_wqe_cyc * wqe,u16 ix)344 static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
345 			      u16 ix)
346 {
347 	struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix);
348 	int err;
349 	int i;
350 
351 	for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
352 		dma_addr_t addr;
353 		u16 headroom;
354 
355 		err = mlx5e_get_rx_frag(rq, frag);
356 		if (unlikely(err))
357 			goto free_frags;
358 
359 		frag->flags &= ~BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
360 
361 		headroom = i == 0 ? rq->buff.headroom : 0;
362 		addr = page_pool_get_dma_addr_netmem(frag->frag_page->netmem);
363 		wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
364 	}
365 
366 	return 0;
367 
368 free_frags:
369 	while (--i >= 0)
370 		mlx5e_put_rx_frag(rq, --frag);
371 
372 	return err;
373 }
374 
mlx5e_free_rx_wqe(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi)375 static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
376 				     struct mlx5e_wqe_frag_info *wi)
377 {
378 	int i;
379 
380 	for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
381 		mlx5e_put_rx_frag(rq, wi);
382 }
383 
mlx5e_xsk_free_rx_wqe(struct mlx5e_wqe_frag_info * wi)384 static void mlx5e_xsk_free_rx_wqe(struct mlx5e_wqe_frag_info *wi)
385 {
386 	if (!(wi->flags & BIT(MLX5E_WQE_FRAG_SKIP_RELEASE)))
387 		xsk_buff_free(*wi->xskp);
388 }
389 
mlx5e_dealloc_rx_wqe(struct mlx5e_rq * rq,u16 ix)390 static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
391 {
392 	struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
393 
394 	if (rq->xsk_pool) {
395 		mlx5e_xsk_free_rx_wqe(wi);
396 	} else {
397 		mlx5e_free_rx_wqe(rq, wi);
398 
399 		/* Avoid a second release of the wqe pages: dealloc is called
400 		 * for the same missing wqes on regular RQ flush and on regular
401 		 * RQ close. This happens when XSK RQs come into play.
402 		 */
403 		for (int i = 0; i < rq->wqe.info.num_frags; i++, wi++)
404 			wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
405 	}
406 }
407 
mlx5e_xsk_free_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)408 static void mlx5e_xsk_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
409 {
410 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
411 	int i;
412 
413 	for (i = 0; i < wqe_bulk; i++) {
414 		int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
415 		struct mlx5e_wqe_frag_info *wi;
416 
417 		wi = get_frag(rq, j);
418 		/* The page is always put into the Reuse Ring, because there
419 		 * is no way to return the page to the userspace when the
420 		 * interface goes down.
421 		 */
422 		mlx5e_xsk_free_rx_wqe(wi);
423 	}
424 }
425 
mlx5e_free_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)426 static void mlx5e_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
427 {
428 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
429 	int i;
430 
431 	for (i = 0; i < wqe_bulk; i++) {
432 		int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
433 		struct mlx5e_wqe_frag_info *wi;
434 
435 		wi = get_frag(rq, j);
436 		mlx5e_free_rx_wqe(rq, wi);
437 	}
438 }
439 
mlx5e_alloc_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)440 static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
441 {
442 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
443 	int i;
444 
445 	for (i = 0; i < wqe_bulk; i++) {
446 		int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
447 		struct mlx5e_rx_wqe_cyc *wqe;
448 
449 		wqe = mlx5_wq_cyc_get_wqe(wq, j);
450 
451 		if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, j)))
452 			break;
453 	}
454 
455 	return i;
456 }
457 
mlx5e_refill_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)458 static int mlx5e_refill_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
459 {
460 	int remaining = wqe_bulk;
461 	int total_alloc = 0;
462 	int refill_alloc;
463 	int refill;
464 
465 	/* The WQE bulk is split into smaller bulks that are sized
466 	 * according to the page pool cache refill size to avoid overflowing
467 	 * the page pool cache due to too many page releases at once.
468 	 */
469 	do {
470 		refill = min_t(u16, rq->wqe.info.refill_unit, remaining);
471 
472 		mlx5e_free_rx_wqes(rq, ix + total_alloc, refill);
473 		refill_alloc = mlx5e_alloc_rx_wqes(rq, ix + total_alloc, refill);
474 		if (unlikely(refill_alloc != refill))
475 			goto err_free;
476 
477 		total_alloc += refill_alloc;
478 		remaining -= refill;
479 	} while (remaining);
480 
481 	return total_alloc;
482 
483 err_free:
484 	mlx5e_free_rx_wqes(rq, ix, total_alloc + refill_alloc);
485 
486 	for (int i = 0; i < total_alloc + refill; i++) {
487 		int j = mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, ix + i);
488 		struct mlx5e_wqe_frag_info *frag;
489 
490 		frag = get_frag(rq, j);
491 		for (int k = 0; k < rq->wqe.info.num_frags; k++, frag++)
492 			frag->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
493 	}
494 
495 	return 0;
496 }
497 
498 static void
mlx5e_add_skb_shared_info_frag(struct mlx5e_rq * rq,struct skb_shared_info * sinfo,struct xdp_buff * xdp,struct mlx5e_frag_page * frag_page,u32 frag_offset,u32 len)499 mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinfo,
500 			       struct xdp_buff *xdp, struct mlx5e_frag_page *frag_page,
501 			       u32 frag_offset, u32 len)
502 {
503 	netmem_ref netmem = frag_page->netmem;
504 	skb_frag_t *frag;
505 
506 	dma_addr_t addr = page_pool_get_dma_addr_netmem(netmem);
507 
508 	dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir);
509 	if (!xdp_buff_has_frags(xdp)) {
510 		/* Init on the first fragment to avoid cold cache access
511 		 * when possible.
512 		 */
513 		sinfo->nr_frags = 0;
514 		sinfo->xdp_frags_size = 0;
515 		xdp_buff_set_frags_flag(xdp);
516 	}
517 
518 	frag = &sinfo->frags[sinfo->nr_frags++];
519 	skb_frag_fill_netmem_desc(frag, netmem, frag_offset, len);
520 
521 	if (netmem_is_pfmemalloc(netmem))
522 		xdp_buff_set_frag_pfmemalloc(xdp);
523 	sinfo->xdp_frags_size += len;
524 }
525 
526 static inline void
mlx5e_add_skb_frag(struct mlx5e_rq * rq,struct sk_buff * skb,struct mlx5e_frag_page * frag_page,u32 frag_offset,u32 len,unsigned int truesize)527 mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
528 		   struct mlx5e_frag_page *frag_page,
529 		   u32 frag_offset, u32 len,
530 		   unsigned int truesize)
531 {
532 	dma_addr_t addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
533 	u8 next_frag = skb_shinfo(skb)->nr_frags;
534 	netmem_ref netmem = frag_page->netmem;
535 
536 	dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
537 				rq->buff.map_dir);
538 
539 	if (skb_can_coalesce_netmem(skb, next_frag, netmem, frag_offset)) {
540 		skb_coalesce_rx_frag(skb, next_frag - 1, len, truesize);
541 		return;
542 	}
543 
544 	frag_page->frags++;
545 	skb_add_rx_frag_netmem(skb, next_frag, netmem,
546 			       frag_offset, len, truesize);
547 }
548 
549 static inline void
mlx5e_copy_skb_header(struct mlx5e_rq * rq,struct sk_buff * skb,netmem_ref netmem,dma_addr_t addr,int offset_from,int dma_offset,u32 headlen)550 mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
551 		      netmem_ref netmem, dma_addr_t addr,
552 		      int offset_from, int dma_offset, u32 headlen)
553 {
554 	const void *from = netmem_address(netmem) + offset_from;
555 	/* Aligning len to sizeof(long) optimizes memcpy performance */
556 	unsigned int len = ALIGN(headlen, sizeof(long));
557 
558 	dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len,
559 				rq->buff.map_dir);
560 	skb_copy_to_linear_data(skb, from, len);
561 }
562 
563 static void
mlx5e_free_rx_mpwqe(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi)564 mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
565 {
566 	bool no_xdp_xmit;
567 	int i;
568 
569 	/* A common case for AF_XDP. */
570 	if (bitmap_full(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe))
571 		return;
572 
573 	no_xdp_xmit = bitmap_empty(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
574 
575 	if (rq->xsk_pool) {
576 		struct xdp_buff **xsk_buffs = wi->alloc_units.xsk_buffs;
577 
578 		/* The page is always put into the Reuse Ring, because there
579 		 * is no way to return the page to userspace when the interface
580 		 * goes down.
581 		 */
582 		for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
583 			if (no_xdp_xmit || !test_bit(i, wi->skip_release_bitmap))
584 				xsk_buff_free(xsk_buffs[i]);
585 	} else {
586 		for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) {
587 			if (no_xdp_xmit || !test_bit(i, wi->skip_release_bitmap)) {
588 				struct mlx5e_frag_page *frag_page;
589 
590 				frag_page = &wi->alloc_units.frag_pages[i];
591 				mlx5e_page_release_fragmented(rq->page_pool,
592 							      frag_page);
593 			}
594 		}
595 	}
596 }
597 
mlx5e_post_rx_mpwqe(struct mlx5e_rq * rq,u8 n)598 static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
599 {
600 	struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
601 
602 	do {
603 		u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head);
604 
605 		mlx5_wq_ll_push(wq, next_wqe_index);
606 	} while (--n);
607 
608 	/* ensure wqes are visible to device before updating doorbell record */
609 	dma_wmb();
610 
611 	mlx5_wq_ll_update_db_record(wq);
612 }
613 
614 /* This function returns the size of the continuous free space inside a bitmap
615  * that starts from first and no longer than len including circular ones.
616  */
bitmap_find_window(unsigned long * bitmap,int len,int bitmap_size,int first)617 static int bitmap_find_window(unsigned long *bitmap, int len,
618 			      int bitmap_size, int first)
619 {
620 	int next_one, count;
621 
622 	next_one = find_next_bit(bitmap, bitmap_size, first);
623 	if (next_one == bitmap_size) {
624 		if (bitmap_size - first >= len)
625 			return len;
626 		next_one = find_next_bit(bitmap, bitmap_size, 0);
627 		count = next_one + bitmap_size - first;
628 	} else {
629 		count = next_one - first;
630 	}
631 
632 	return min(len, count);
633 }
634 
build_ksm_umr(struct mlx5e_icosq * sq,struct mlx5e_umr_wqe * umr_wqe,__be32 key,u16 offset,u16 ksm_len)635 static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
636 			  __be32 key, u16 offset, u16 ksm_len)
637 {
638 	memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_ksms));
639 	umr_wqe->hdr.ctrl.opmod_idx_opcode =
640 		cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
641 			     MLX5_OPCODE_UMR);
642 	umr_wqe->hdr.ctrl.umr_mkey = key;
643 	umr_wqe->hdr.ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
644 					    | MLX5E_KSM_UMR_DS_CNT(ksm_len));
645 	umr_wqe->hdr.uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
646 	umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
647 	umr_wqe->hdr.uctrl.xlt_octowords = cpu_to_be16(ksm_len);
648 	umr_wqe->hdr.uctrl.mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
649 }
650 
mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq * rq,int header_index)651 static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq,
652 							    int header_index)
653 {
654 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
655 
656 	return &shampo->pages[header_index >> shampo->log_hd_per_page];
657 }
658 
mlx5e_shampo_hd_offset(struct mlx5e_rq * rq,int header_index)659 static u64 mlx5e_shampo_hd_offset(struct mlx5e_rq *rq, int header_index)
660 {
661 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
662 	u32 hd_per_page = shampo->hd_per_page;
663 
664 	return (header_index & (hd_per_page - 1)) << shampo->log_hd_entry_size;
665 }
666 
667 static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index);
668 
mlx5e_build_shampo_hd_umr(struct mlx5e_rq * rq,struct mlx5e_icosq * sq,u16 ksm_entries,u16 index)669 static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
670 				     struct mlx5e_icosq *sq,
671 				     u16 ksm_entries, u16 index)
672 {
673 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
674 	u16 pi, header_offset, err, wqe_bbs;
675 	u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
676 	struct mlx5e_umr_wqe *umr_wqe;
677 	int headroom, i;
678 
679 	headroom = rq->buff.headroom;
680 	wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries);
681 	pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
682 	umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
683 	build_ksm_umr(sq, umr_wqe, shampo->mkey_be, index, ksm_entries);
684 
685 	for (i = 0; i < ksm_entries; i++, index++) {
686 		struct mlx5e_frag_page *frag_page;
687 		u64 addr;
688 
689 		frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
690 		header_offset = mlx5e_shampo_hd_offset(rq, index);
691 		if (!header_offset) {
692 			err = mlx5e_page_alloc_fragmented(rq->hd_page_pool,
693 							  frag_page);
694 			if (err)
695 				goto err_unmap;
696 		}
697 
698 		addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
699 		umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
700 			.key = cpu_to_be32(lkey),
701 			.va  = cpu_to_be64(addr + header_offset + headroom),
702 		};
703 	}
704 
705 	sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
706 		.wqe_type	= MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
707 		.num_wqebbs	= wqe_bbs,
708 		.shampo.len	= ksm_entries,
709 	};
710 
711 	shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1);
712 	sq->pc += wqe_bbs;
713 	sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
714 
715 	return 0;
716 
717 err_unmap:
718 	while (--i >= 0) {
719 		--index;
720 		header_offset = mlx5e_shampo_hd_offset(rq, index);
721 		if (!header_offset) {
722 			struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
723 
724 			mlx5e_page_release_fragmented(rq->hd_page_pool,
725 						      frag_page);
726 		}
727 	}
728 
729 	rq->stats->buff_alloc_err++;
730 	return err;
731 }
732 
mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq * rq)733 static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
734 {
735 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
736 	u16 ksm_entries, num_wqe, index, entries_before;
737 	struct mlx5e_icosq *sq = rq->icosq;
738 	int i, err, max_ksm_entries, len;
739 
740 	max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev);
741 	ksm_entries = bitmap_find_window(shampo->bitmap,
742 					 shampo->hd_per_wqe,
743 					 shampo->hd_per_wq, shampo->pi);
744 	ksm_entries = ALIGN_DOWN(ksm_entries, shampo->hd_per_page);
745 	if (!ksm_entries)
746 		return 0;
747 
748 	/* pi is aligned to MLX5E_SHAMPO_WQ_HEADER_PER_PAGE */
749 	index = shampo->pi;
750 	entries_before = shampo->hd_per_wq - index;
751 
752 	if (unlikely(entries_before < ksm_entries))
753 		num_wqe = DIV_ROUND_UP(entries_before, max_ksm_entries) +
754 			  DIV_ROUND_UP(ksm_entries - entries_before, max_ksm_entries);
755 	else
756 		num_wqe = DIV_ROUND_UP(ksm_entries, max_ksm_entries);
757 
758 	for (i = 0; i < num_wqe; i++) {
759 		len = (ksm_entries > max_ksm_entries) ? max_ksm_entries :
760 							ksm_entries;
761 		if (unlikely(index + len > shampo->hd_per_wq))
762 			len = shampo->hd_per_wq - index;
763 		err = mlx5e_build_shampo_hd_umr(rq, sq, len, index);
764 		if (unlikely(err))
765 			return err;
766 		index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1);
767 		ksm_entries -= len;
768 	}
769 
770 	return 0;
771 }
772 
mlx5e_alloc_rx_mpwqe(struct mlx5e_rq * rq,u16 ix)773 static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
774 {
775 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
776 	struct mlx5e_icosq *sq = rq->icosq;
777 	struct mlx5e_frag_page *frag_page;
778 	struct mlx5_wq_cyc *wq = &sq->wq;
779 	struct mlx5e_umr_wqe *umr_wqe;
780 	u32 offset; /* 17-bit value with MTT. */
781 	u16 pi;
782 	int err;
783 	int i;
784 
785 	if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
786 		err = mlx5e_alloc_rx_hd_mpwqe(rq);
787 		if (unlikely(err))
788 			goto err;
789 	}
790 
791 	pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs);
792 	umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
793 	memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
794 
795 	frag_page = &wi->alloc_units.frag_pages[0];
796 
797 	for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, frag_page++) {
798 		dma_addr_t addr;
799 
800 		err = mlx5e_page_alloc_fragmented(rq->page_pool, frag_page);
801 		if (unlikely(err))
802 			goto err_unmap;
803 
804 		addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
805 		umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
806 			.ptag = cpu_to_be64(addr | MLX5_EN_WR),
807 		};
808 	}
809 
810 	/* Pad if needed, in case the value set to ucseg->xlt_octowords
811 	 * in mlx5e_build_umr_wqe() needed alignment.
812 	 */
813 	if (rq->mpwqe.pages_per_wqe & (MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1)) {
814 		int pad = ALIGN(rq->mpwqe.pages_per_wqe, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT) -
815 			rq->mpwqe.pages_per_wqe;
816 
817 		memset(&umr_wqe->inline_mtts[rq->mpwqe.pages_per_wqe], 0,
818 		       sizeof(*umr_wqe->inline_mtts) * pad);
819 	}
820 
821 	bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
822 	wi->consumed_strides = 0;
823 
824 	umr_wqe->hdr.ctrl.opmod_idx_opcode =
825 		cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
826 			    MLX5_OPCODE_UMR);
827 
828 	offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
829 	umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
830 
831 	sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
832 		.wqe_type   = MLX5E_ICOSQ_WQE_UMR_RX,
833 		.num_wqebbs = rq->mpwqe.umr_wqebbs,
834 		.umr.rq     = rq,
835 	};
836 
837 	sq->pc += rq->mpwqe.umr_wqebbs;
838 
839 	sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
840 
841 	return 0;
842 
843 err_unmap:
844 	while (--i >= 0) {
845 		frag_page--;
846 		mlx5e_page_release_fragmented(rq->page_pool, frag_page);
847 	}
848 
849 	bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
850 
851 err:
852 	rq->stats->buff_alloc_err++;
853 
854 	return err;
855 }
856 
857 static void
mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq * rq,u16 header_index)858 mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
859 {
860 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
861 
862 	if (((header_index + 1) & (shampo->hd_per_page - 1)) == 0) {
863 		struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
864 
865 		mlx5e_page_release_fragmented(rq->hd_page_pool, frag_page);
866 	}
867 	clear_bit(header_index, shampo->bitmap);
868 }
869 
mlx5e_shampo_dealloc_hd(struct mlx5e_rq * rq)870 void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq)
871 {
872 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
873 	int i;
874 
875 	for_each_set_bit(i, shampo->bitmap, rq->mpwqe.shampo->hd_per_wq)
876 		mlx5e_free_rx_shampo_hd_entry(rq, i);
877 }
878 
mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq * rq,u16 ix)879 static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
880 {
881 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
882 	/* This function is called on rq/netdev close. */
883 	mlx5e_free_rx_mpwqe(rq, wi);
884 
885 	/* Avoid a second release of the wqe pages: dealloc is called also
886 	 * for missing wqes on an already flushed RQ.
887 	 */
888 	bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
889 }
890 
mlx5e_post_rx_wqes(struct mlx5e_rq * rq)891 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
892 {
893 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
894 	int wqe_bulk, count;
895 	bool busy = false;
896 	u16 head;
897 
898 	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
899 		return false;
900 
901 	if (mlx5_wq_cyc_missing(wq) < rq->wqe.info.wqe_bulk)
902 		return false;
903 
904 	if (rq->page_pool)
905 		page_pool_nid_changed(rq->page_pool, numa_mem_id());
906 
907 	wqe_bulk = mlx5_wq_cyc_missing(wq);
908 	head = mlx5_wq_cyc_get_head(wq);
909 
910 	/* Don't allow any newly allocated WQEs to share the same page with old
911 	 * WQEs that aren't completed yet. Stop earlier.
912 	 */
913 	wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask;
914 
915 	if (!rq->xsk_pool) {
916 		count = mlx5e_refill_rx_wqes(rq, head, wqe_bulk);
917 	} else if (likely(!dma_dev_need_sync(rq->pdev))) {
918 		mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
919 		count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk);
920 	} else {
921 		mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
922 		/* If dma_need_sync is true, it's more efficient to call
923 		 * xsk_buff_alloc in a loop, rather than xsk_buff_alloc_batch,
924 		 * because the latter does the same check and returns only one
925 		 * frame.
926 		 */
927 		count = mlx5e_xsk_alloc_rx_wqes(rq, head, wqe_bulk);
928 	}
929 
930 	mlx5_wq_cyc_push_n(wq, count);
931 	if (unlikely(count != wqe_bulk)) {
932 		rq->stats->buff_alloc_err++;
933 		busy = true;
934 	}
935 
936 	/* ensure wqes are visible to device before updating doorbell record */
937 	dma_wmb();
938 
939 	mlx5_wq_cyc_update_db_record(wq);
940 
941 	return busy;
942 }
943 
mlx5e_free_icosq_descs(struct mlx5e_icosq * sq)944 void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
945 {
946 	u16 sqcc;
947 
948 	sqcc = sq->cc;
949 
950 	while (sqcc != sq->pc) {
951 		struct mlx5e_icosq_wqe_info *wi;
952 		u16 ci;
953 
954 		ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
955 		wi = &sq->db.wqe_info[ci];
956 		sqcc += wi->num_wqebbs;
957 #ifdef CONFIG_MLX5_EN_TLS
958 		switch (wi->wqe_type) {
959 		case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
960 			mlx5e_ktls_handle_ctx_completion(wi);
961 			break;
962 		case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
963 			mlx5e_ktls_handle_get_psv_completion(wi, sq);
964 			break;
965 		}
966 #endif
967 	}
968 	sq->cc = sqcc;
969 }
970 
mlx5e_shampo_fill_umr(struct mlx5e_rq * rq,int len)971 void mlx5e_shampo_fill_umr(struct mlx5e_rq *rq, int len)
972 {
973 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
974 	int end, from, full_len = len;
975 
976 	end = shampo->hd_per_wq;
977 	from = shampo->ci;
978 	if (from + len > end) {
979 		len -= end - from;
980 		bitmap_set(shampo->bitmap, from, end - from);
981 		from = 0;
982 	}
983 
984 	bitmap_set(shampo->bitmap, from, len);
985 	shampo->ci = (shampo->ci + full_len) & (shampo->hd_per_wq - 1);
986 }
987 
mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,struct mlx5e_icosq * sq)988 static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,
989 				       struct mlx5e_icosq *sq)
990 {
991 	struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq);
992 	/* assume 1:1 relationship between RQ and icosq */
993 	struct mlx5e_rq *rq = &c->rq;
994 
995 	mlx5e_shampo_fill_umr(rq, umr.len);
996 }
997 
mlx5e_poll_ico_cq(struct mlx5e_cq * cq)998 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
999 {
1000 	struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
1001 	struct mlx5_cqe64 *cqe;
1002 	u16 sqcc;
1003 	int i;
1004 
1005 	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
1006 		return 0;
1007 
1008 	cqe = mlx5_cqwq_get_cqe(&cq->wq);
1009 	if (likely(!cqe))
1010 		return 0;
1011 
1012 	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
1013 	 * otherwise a cq overrun may occur
1014 	 */
1015 	sqcc = sq->cc;
1016 
1017 	i = 0;
1018 	do {
1019 		u16 wqe_counter;
1020 		bool last_wqe;
1021 
1022 		mlx5_cqwq_pop(&cq->wq);
1023 
1024 		wqe_counter = be16_to_cpu(cqe->wqe_counter);
1025 
1026 		do {
1027 			struct mlx5e_icosq_wqe_info *wi;
1028 			u16 ci;
1029 
1030 			last_wqe = (sqcc == wqe_counter);
1031 
1032 			ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
1033 			wi = &sq->db.wqe_info[ci];
1034 			sqcc += wi->num_wqebbs;
1035 
1036 			if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
1037 				netdev_WARN_ONCE(cq->netdev,
1038 						 "Bad OP in ICOSQ CQE: 0x%x\n",
1039 						 get_cqe_opcode(cqe));
1040 #ifdef CONFIG_MLX5_EN_TLS
1041 				if (wi->wqe_type == MLX5E_ICOSQ_WQE_GET_PSV_TLS)
1042 					mlx5e_ktls_rx_resync_async_request_cancel(wi);
1043 #endif
1044 				mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
1045 						     (struct mlx5_err_cqe *)cqe);
1046 				mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
1047 				if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
1048 					queue_work(cq->workqueue, &sq->recover_work);
1049 				break;
1050 			}
1051 
1052 			switch (wi->wqe_type) {
1053 			case MLX5E_ICOSQ_WQE_UMR_RX:
1054 				wi->umr.rq->mpwqe.umr_completed++;
1055 				break;
1056 			case MLX5E_ICOSQ_WQE_NOP:
1057 				break;
1058 			case MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR:
1059 				mlx5e_handle_shampo_hd_umr(wi->shampo, sq);
1060 				break;
1061 #ifdef CONFIG_MLX5_EN_TLS
1062 			case MLX5E_ICOSQ_WQE_UMR_TLS:
1063 				break;
1064 			case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
1065 				mlx5e_ktls_handle_ctx_completion(wi);
1066 				break;
1067 			case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
1068 				mlx5e_ktls_handle_get_psv_completion(wi, sq);
1069 				break;
1070 #endif
1071 			default:
1072 				netdev_WARN_ONCE(cq->netdev,
1073 						 "Bad WQE type in ICOSQ WQE info: 0x%x\n",
1074 						 wi->wqe_type);
1075 			}
1076 		} while (!last_wqe);
1077 	} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
1078 
1079 	sq->cc = sqcc;
1080 
1081 	mlx5_cqwq_update_db_record(&cq->wq);
1082 
1083 	return i;
1084 }
1085 
mlx5e_post_rx_mpwqes(struct mlx5e_rq * rq)1086 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
1087 {
1088 	struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
1089 	u8  umr_completed = rq->mpwqe.umr_completed;
1090 	struct mlx5e_icosq *sq = rq->icosq;
1091 	int alloc_err = 0;
1092 	u8  missing, i;
1093 	u16 head;
1094 
1095 	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
1096 		return false;
1097 
1098 	if (umr_completed) {
1099 		mlx5e_post_rx_mpwqe(rq, umr_completed);
1100 		rq->mpwqe.umr_in_progress -= umr_completed;
1101 		rq->mpwqe.umr_completed = 0;
1102 	}
1103 
1104 	missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
1105 
1106 	if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
1107 		rq->stats->congst_umr++;
1108 
1109 	if (likely(missing < rq->mpwqe.min_wqe_bulk))
1110 		return false;
1111 
1112 	if (rq->page_pool)
1113 		page_pool_nid_changed(rq->page_pool, numa_mem_id());
1114 	if (rq->hd_page_pool)
1115 		page_pool_nid_changed(rq->hd_page_pool, numa_mem_id());
1116 
1117 	head = rq->mpwqe.actual_wq_head;
1118 	i = missing;
1119 	do {
1120 		struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, head);
1121 
1122 		/* Deferred free for better page pool cache usage. */
1123 		mlx5e_free_rx_mpwqe(rq, wi);
1124 
1125 		alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) :
1126 					   mlx5e_alloc_rx_mpwqe(rq, head);
1127 
1128 		if (unlikely(alloc_err))
1129 			break;
1130 		head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
1131 	} while (--i);
1132 
1133 	rq->mpwqe.umr_last_bulk    = missing - i;
1134 	if (sq->doorbell_cseg) {
1135 		mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
1136 		sq->doorbell_cseg = NULL;
1137 	}
1138 
1139 	rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
1140 	rq->mpwqe.actual_wq_head   = head;
1141 
1142 	/* If XSK Fill Ring doesn't have enough frames, report the error, so
1143 	 * that one of the actions can be performed:
1144 	 * 1. If need_wakeup is used, signal that the application has to kick
1145 	 * the driver when it refills the Fill Ring.
1146 	 * 2. Otherwise, busy poll by rescheduling the NAPI poll.
1147 	 */
1148 	if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool))
1149 		return true;
1150 
1151 	return false;
1152 }
1153 
mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 * cqe,struct tcphdr * tcp)1154 static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
1155 {
1156 	u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
1157 	u8 tcp_ack     = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
1158 			 (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
1159 
1160 	tcp->check                      = 0;
1161 	tcp->psh                        = get_cqe_lro_tcppsh(cqe);
1162 
1163 	if (tcp_ack) {
1164 		tcp->ack                = 1;
1165 		tcp->ack_seq            = cqe->lro.ack_seq_num;
1166 		tcp->window             = cqe->lro.tcp_win;
1167 	}
1168 }
1169 
mlx5e_lro_update_hdr(struct sk_buff * skb,struct mlx5_cqe64 * cqe,u32 cqe_bcnt)1170 static unsigned int mlx5e_lro_update_hdr(struct sk_buff *skb,
1171 					 struct mlx5_cqe64 *cqe,
1172 					 u32 cqe_bcnt)
1173 {
1174 	struct ethhdr	*eth = (struct ethhdr *)(skb->data);
1175 	struct tcphdr	*tcp;
1176 	int network_depth = 0;
1177 	__wsum check;
1178 	__be16 proto;
1179 	u16 tot_len;
1180 	void *ip_p;
1181 
1182 	proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
1183 
1184 	tot_len = cqe_bcnt - network_depth;
1185 	ip_p = skb->data + network_depth;
1186 
1187 	if (proto == htons(ETH_P_IP)) {
1188 		struct iphdr *ipv4 = ip_p;
1189 
1190 		tcp = ip_p + sizeof(struct iphdr);
1191 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1192 
1193 		ipv4->ttl               = cqe->lro.min_ttl;
1194 		ipv4->tot_len           = cpu_to_be16(tot_len);
1195 		ipv4->check             = 0;
1196 		ipv4->check             = ip_fast_csum((unsigned char *)ipv4,
1197 						       ipv4->ihl);
1198 
1199 		mlx5e_lro_update_tcp_hdr(cqe, tcp);
1200 		check = csum_partial(tcp, tcp->doff * 4,
1201 				     csum_unfold((__force __sum16)cqe->check_sum));
1202 		/* Almost done, don't forget the pseudo header */
1203 		tcp->check = tcp_v4_check(tot_len - sizeof(struct iphdr),
1204 					  ipv4->saddr, ipv4->daddr, check);
1205 	} else {
1206 		u16 payload_len = tot_len - sizeof(struct ipv6hdr);
1207 		struct ipv6hdr *ipv6 = ip_p;
1208 
1209 		tcp = ip_p + sizeof(struct ipv6hdr);
1210 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1211 
1212 		ipv6->hop_limit         = cqe->lro.min_ttl;
1213 		ipv6->payload_len       = cpu_to_be16(payload_len);
1214 
1215 		mlx5e_lro_update_tcp_hdr(cqe, tcp);
1216 		check = csum_partial(tcp, tcp->doff * 4,
1217 				     csum_unfold((__force __sum16)cqe->check_sum));
1218 		/* Almost done, don't forget the pseudo header */
1219 		tcp->check = tcp_v6_check(payload_len, &ipv6->saddr,
1220 					  &ipv6->daddr, check);
1221 	}
1222 
1223 	return (unsigned int)((unsigned char *)tcp + tcp->doff * 4 - skb->data);
1224 }
1225 
mlx5e_shampo_get_packet_hd(struct mlx5e_rq * rq,u16 header_index)1226 static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
1227 {
1228 	struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
1229 	u16 head_offset = mlx5e_shampo_hd_offset(rq, header_index);
1230 	void *addr = netmem_address(frag_page->netmem);
1231 
1232 	return addr + head_offset + rq->buff.headroom;
1233 }
1234 
mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq * rq,struct iphdr * ipv4)1235 static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
1236 {
1237 	int udp_off = rq->hw_gro_data->fk.control.thoff;
1238 	struct sk_buff *skb = rq->hw_gro_data->skb;
1239 	struct udphdr *uh;
1240 
1241 	uh = (struct udphdr *)(skb->data + udp_off);
1242 	uh->len = htons(skb->len - udp_off);
1243 
1244 	if (uh->check)
1245 		uh->check = ~udp_v4_check(skb->len - udp_off, ipv4->saddr,
1246 					  ipv4->daddr, 0);
1247 
1248 	skb->csum_start = (unsigned char *)uh - skb->head;
1249 	skb->csum_offset = offsetof(struct udphdr, check);
1250 
1251 	skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
1252 }
1253 
mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq * rq,struct ipv6hdr * ipv6)1254 static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6)
1255 {
1256 	int udp_off = rq->hw_gro_data->fk.control.thoff;
1257 	struct sk_buff *skb = rq->hw_gro_data->skb;
1258 	struct udphdr *uh;
1259 
1260 	uh = (struct udphdr *)(skb->data + udp_off);
1261 	uh->len = htons(skb->len - udp_off);
1262 
1263 	if (uh->check)
1264 		uh->check = ~udp_v6_check(skb->len - udp_off, &ipv6->saddr,
1265 					  &ipv6->daddr, 0);
1266 
1267 	skb->csum_start = (unsigned char *)uh - skb->head;
1268 	skb->csum_offset = offsetof(struct udphdr, check);
1269 
1270 	skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
1271 }
1272 
mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,struct tcphdr * skb_tcp_hd)1273 static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1274 					      struct tcphdr *skb_tcp_hd)
1275 {
1276 	u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
1277 	struct tcphdr *last_tcp_hd;
1278 	void *last_hd_addr;
1279 
1280 	last_hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
1281 	last_tcp_hd =  last_hd_addr + ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
1282 	tcp_flag_word(skb_tcp_hd) |= tcp_flag_word(last_tcp_hd) & (TCP_FLAG_FIN | TCP_FLAG_PSH);
1283 }
1284 
mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq * rq,struct iphdr * ipv4,struct mlx5_cqe64 * cqe,bool match)1285 static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4,
1286 					     struct mlx5_cqe64 *cqe, bool match)
1287 {
1288 	int tcp_off = rq->hw_gro_data->fk.control.thoff;
1289 	struct sk_buff *skb = rq->hw_gro_data->skb;
1290 	struct tcphdr *tcp;
1291 
1292 	tcp = (struct tcphdr *)(skb->data + tcp_off);
1293 	if (match)
1294 		mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
1295 
1296 	tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr,
1297 				   ipv4->daddr, 0);
1298 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
1299 	if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id) {
1300 		bool encap = rq->hw_gro_data->fk.control.flags & FLOW_DIS_ENCAPSULATION;
1301 
1302 		skb_shinfo(skb)->gso_type |= encap ? SKB_GSO_TCP_FIXEDID_INNER :
1303 						     SKB_GSO_TCP_FIXEDID;
1304 	}
1305 
1306 	skb->csum_start = (unsigned char *)tcp - skb->head;
1307 	skb->csum_offset = offsetof(struct tcphdr, check);
1308 
1309 	if (tcp->cwr)
1310 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1311 }
1312 
mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq * rq,struct ipv6hdr * ipv6,struct mlx5_cqe64 * cqe,bool match)1313 static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6,
1314 					     struct mlx5_cqe64 *cqe, bool match)
1315 {
1316 	int tcp_off = rq->hw_gro_data->fk.control.thoff;
1317 	struct sk_buff *skb = rq->hw_gro_data->skb;
1318 	struct tcphdr *tcp;
1319 
1320 	tcp = (struct tcphdr *)(skb->data + tcp_off);
1321 	if (match)
1322 		mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
1323 
1324 	tcp->check = ~tcp_v6_check(skb->len - tcp_off, &ipv6->saddr,
1325 				   &ipv6->daddr, 0);
1326 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
1327 	skb->csum_start = (unsigned char *)tcp - skb->head;
1328 	skb->csum_offset = offsetof(struct tcphdr, check);
1329 
1330 	if (tcp->cwr)
1331 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1332 }
1333 
mlx5e_shampo_update_hdr(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,bool match)1334 static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
1335 {
1336 	bool is_ipv4 = (rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP));
1337 	struct sk_buff *skb = rq->hw_gro_data->skb;
1338 
1339 	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
1340 	skb->ip_summed = CHECKSUM_PARTIAL;
1341 
1342 	if (is_ipv4) {
1343 		int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct iphdr);
1344 		struct iphdr *ipv4 = (struct iphdr *)(skb->data + nhoff);
1345 		__be16 newlen = htons(skb->len - nhoff);
1346 
1347 		csum_replace2(&ipv4->check, ipv4->tot_len, newlen);
1348 		ipv4->tot_len = newlen;
1349 
1350 		if (ipv4->protocol == IPPROTO_TCP)
1351 			mlx5e_shampo_update_ipv4_tcp_hdr(rq, ipv4, cqe, match);
1352 		else
1353 			mlx5e_shampo_update_ipv4_udp_hdr(rq, ipv4);
1354 	} else {
1355 		int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct ipv6hdr);
1356 		struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + nhoff);
1357 
1358 		ipv6->payload_len = htons(skb->len - nhoff - sizeof(*ipv6));
1359 
1360 		if (ipv6->nexthdr == IPPROTO_TCP)
1361 			mlx5e_shampo_update_ipv6_tcp_hdr(rq, ipv6, cqe, match);
1362 		else
1363 			mlx5e_shampo_update_ipv6_udp_hdr(rq, ipv6);
1364 	}
1365 }
1366 
mlx5e_skb_set_hash(struct mlx5_cqe64 * cqe,struct sk_buff * skb)1367 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
1368 				      struct sk_buff *skb)
1369 {
1370 	u8 cht = cqe->rss_hash_type;
1371 	int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
1372 		 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
1373 					    PKT_HASH_TYPE_NONE;
1374 	skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
1375 }
1376 
is_last_ethertype_ip(struct sk_buff * skb,int * network_depth,__be16 * proto)1377 static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
1378 					__be16 *proto)
1379 {
1380 	*proto = ((struct ethhdr *)skb->data)->h_proto;
1381 	*proto = __vlan_get_protocol(skb, *proto, network_depth);
1382 
1383 	if (*proto == htons(ETH_P_IP))
1384 		return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
1385 
1386 	if (*proto == htons(ETH_P_IPV6))
1387 		return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
1388 
1389 	return false;
1390 }
1391 
mlx5e_enable_ecn(struct mlx5e_rq * rq,struct sk_buff * skb)1392 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
1393 {
1394 	int network_depth = 0;
1395 	__be16 proto;
1396 	void *ip;
1397 	int rc;
1398 
1399 	if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto)))
1400 		return;
1401 
1402 	ip = skb->data + network_depth;
1403 	rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) :
1404 					 IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip));
1405 
1406 	rq->stats->ecn_mark += !!rc;
1407 }
1408 
get_ip_proto(struct sk_buff * skb,int network_depth,__be16 proto)1409 static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
1410 {
1411 	void *ip_p = skb->data + network_depth;
1412 
1413 	return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
1414 					    ((struct ipv6hdr *)ip_p)->nexthdr;
1415 }
1416 
1417 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
1418 
1419 #define MAX_PADDING 8
1420 
1421 static void
tail_padding_csum_slow(struct sk_buff * skb,int offset,int len,struct mlx5e_rq_stats * stats)1422 tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
1423 		       struct mlx5e_rq_stats *stats)
1424 {
1425 	stats->csum_complete_tail_slow++;
1426 	skb->csum = csum_block_add(skb->csum,
1427 				   skb_checksum(skb, offset, len, 0),
1428 				   offset);
1429 }
1430 
1431 static void
tail_padding_csum(struct sk_buff * skb,int offset,struct mlx5e_rq_stats * stats)1432 tail_padding_csum(struct sk_buff *skb, int offset,
1433 		  struct mlx5e_rq_stats *stats)
1434 {
1435 	u8 tail_padding[MAX_PADDING];
1436 	int len = skb->len - offset;
1437 	void *tail;
1438 
1439 	if (unlikely(len > MAX_PADDING)) {
1440 		tail_padding_csum_slow(skb, offset, len, stats);
1441 		return;
1442 	}
1443 
1444 	tail = skb_header_pointer(skb, offset, len, tail_padding);
1445 	if (unlikely(!tail)) {
1446 		tail_padding_csum_slow(skb, offset, len, stats);
1447 		return;
1448 	}
1449 
1450 	stats->csum_complete_tail++;
1451 	skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
1452 }
1453 
1454 static void
mlx5e_skb_csum_fixup(struct sk_buff * skb,int network_depth,__be16 proto,struct mlx5e_rq_stats * stats)1455 mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto,
1456 		     struct mlx5e_rq_stats *stats)
1457 {
1458 	struct ipv6hdr *ip6;
1459 	struct iphdr   *ip4;
1460 	int pkt_len;
1461 
1462 	/* Fixup vlan headers, if any */
1463 	if (network_depth > ETH_HLEN)
1464 		/* CQE csum is calculated from the IP header and does
1465 		 * not cover VLAN headers (if present). This will add
1466 		 * the checksum manually.
1467 		 */
1468 		skb->csum = csum_partial(skb->data + ETH_HLEN,
1469 					 network_depth - ETH_HLEN,
1470 					 skb->csum);
1471 
1472 	/* Fixup tail padding, if any */
1473 	switch (proto) {
1474 	case htons(ETH_P_IP):
1475 		ip4 = (struct iphdr *)(skb->data + network_depth);
1476 		pkt_len = network_depth + ntohs(ip4->tot_len);
1477 		break;
1478 	case htons(ETH_P_IPV6):
1479 		ip6 = (struct ipv6hdr *)(skb->data + network_depth);
1480 		pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
1481 		break;
1482 	default:
1483 		return;
1484 	}
1485 
1486 	if (likely(pkt_len >= skb->len))
1487 		return;
1488 
1489 	tail_padding_csum(skb, pkt_len, stats);
1490 }
1491 
mlx5e_handle_csum(struct net_device * netdev,struct mlx5_cqe64 * cqe,struct mlx5e_rq * rq,struct sk_buff * skb,bool lro)1492 static inline void mlx5e_handle_csum(struct net_device *netdev,
1493 				     struct mlx5_cqe64 *cqe,
1494 				     struct mlx5e_rq *rq,
1495 				     struct sk_buff *skb,
1496 				     bool   lro)
1497 {
1498 	struct mlx5e_rq_stats *stats = rq->stats;
1499 	int network_depth = 0;
1500 	__be16 proto;
1501 
1502 	if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
1503 		goto csum_none;
1504 
1505 	if (lro) {
1506 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1507 		stats->csum_unnecessary++;
1508 		return;
1509 	}
1510 
1511 	/* True when explicitly set via priv flag, or XDP prog is loaded */
1512 	if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) ||
1513 	    get_cqe_tls_offload(cqe))
1514 		goto csum_unnecessary;
1515 
1516 	/* CQE csum doesn't cover padding octets in short ethernet
1517 	 * frames. And the pad field is appended prior to calculating
1518 	 * and appending the FCS field.
1519 	 *
1520 	 * Detecting these padded frames requires to verify and parse
1521 	 * IP headers, so we simply force all those small frames to be
1522 	 * CHECKSUM_UNNECESSARY even if they are not padded.
1523 	 */
1524 	if (short_frame(skb->len))
1525 		goto csum_unnecessary;
1526 
1527 	if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
1528 		if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
1529 			goto csum_unnecessary;
1530 
1531 		stats->csum_complete++;
1532 		skb->ip_summed = CHECKSUM_COMPLETE;
1533 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
1534 
1535 		if (unlikely(mlx5e_psp_is_rx_flow(cqe))) {
1536 			/* TBD: PSP csum complete corrections for now chose csum_unnecessary path */
1537 			goto csum_unnecessary;
1538 		}
1539 
1540 		if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
1541 			return; /* CQE csum covers all received bytes */
1542 
1543 		/* csum might need some fixups ...*/
1544 		mlx5e_skb_csum_fixup(skb, network_depth, proto, stats);
1545 		return;
1546 	}
1547 
1548 csum_unnecessary:
1549 	if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
1550 		   (cqe->hds_ip_ext & CQE_L4_OK))) {
1551 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1552 		if (cqe_is_tunneled(cqe)) {
1553 			skb->csum_level = 1;
1554 			skb->encapsulation = 1;
1555 			stats->csum_unnecessary_inner++;
1556 			return;
1557 		}
1558 		stats->csum_unnecessary++;
1559 		return;
1560 	}
1561 csum_none:
1562 	skb->ip_summed = CHECKSUM_NONE;
1563 	stats->csum_none++;
1564 }
1565 
1566 #define MLX5E_CE_BIT_MASK 0x80
1567 
mlx5e_build_rx_skb(struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct mlx5e_rq * rq,struct sk_buff * skb)1568 static inline bool mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
1569 				      u32 cqe_bcnt,
1570 				      struct mlx5e_rq *rq,
1571 				      struct sk_buff *skb)
1572 {
1573 	u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
1574 	struct mlx5e_rq_stats *stats = rq->stats;
1575 	struct net_device *netdev = rq->netdev;
1576 
1577 	skb->mac_len = ETH_HLEN;
1578 
1579 	if (unlikely(get_cqe_tls_offload(cqe)))
1580 		mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
1581 
1582 	if (unlikely(mlx5e_psp_is_rx_flow(cqe))) {
1583 		if (mlx5e_psp_offload_handle_rx_skb(netdev, skb, cqe))
1584 			return true;
1585 	}
1586 
1587 	if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
1588 		mlx5e_ipsec_offload_handle_rx_skb(netdev, skb,
1589 						  be32_to_cpu(cqe->ft_metadata));
1590 
1591 	if (unlikely(mlx5e_macsec_is_rx_flow(cqe)))
1592 		mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe);
1593 
1594 	if (lro_num_seg > 1) {
1595 		unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
1596 
1597 		skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg);
1598 		skb_shinfo(skb)->gso_segs = lro_num_seg;
1599 		/* Subtract one since we already counted this as one
1600 		 * "regular" packet in mlx5e_complete_rx_cqe()
1601 		 */
1602 		stats->packets += lro_num_seg - 1;
1603 		stats->lro_packets++;
1604 		stats->lro_bytes += cqe_bcnt;
1605 	}
1606 
1607 	if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
1608 		skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
1609 								  rq->clock, get_cqe_ts(cqe));
1610 	skb_record_rx_queue(skb, rq->ix);
1611 
1612 	if (likely(netdev->features & NETIF_F_RXHASH))
1613 		mlx5e_skb_set_hash(cqe, skb);
1614 
1615 	if (cqe_has_vlan(cqe)) {
1616 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1617 				       be16_to_cpu(cqe->vlan_info));
1618 		stats->removed_vlan_packets++;
1619 	}
1620 
1621 	skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
1622 
1623 	mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
1624 	/* checking CE bit in cqe - MSB in ml_path field */
1625 	if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK))
1626 		mlx5e_enable_ecn(rq, skb);
1627 
1628 	skb->protocol = eth_type_trans(skb, netdev);
1629 
1630 	if (unlikely(mlx5e_skb_is_multicast(skb)))
1631 		stats->mcast_packets++;
1632 
1633 	return false;
1634 }
1635 
mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct sk_buff * skb)1636 static bool mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
1637 					 struct mlx5_cqe64 *cqe,
1638 					 u32 cqe_bcnt,
1639 					 struct sk_buff *skb)
1640 {
1641 	struct mlx5e_rq_stats *stats = rq->stats;
1642 
1643 	stats->packets++;
1644 	stats->bytes += cqe_bcnt;
1645 	if (NAPI_GRO_CB(skb)->count != 1)
1646 		return false;
1647 
1648 	if (mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb))
1649 		return true;
1650 
1651 	skb_reset_network_header(skb);
1652 	if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) {
1653 		napi_gro_receive(rq->cq.napi, skb);
1654 		rq->hw_gro_data->skb = NULL;
1655 	}
1656 	return false;
1657 }
1658 
mlx5e_complete_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct sk_buff * skb)1659 static inline bool mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
1660 					 struct mlx5_cqe64 *cqe,
1661 					 u32 cqe_bcnt,
1662 					 struct sk_buff *skb)
1663 {
1664 	struct mlx5e_rq_stats *stats = rq->stats;
1665 
1666 	stats->packets++;
1667 	stats->bytes += cqe_bcnt;
1668 	return mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
1669 }
1670 
1671 static inline
mlx5e_build_linear_skb(struct mlx5e_rq * rq,void * va,u32 frag_size,u16 headroom,u32 cqe_bcnt,u32 metasize)1672 struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
1673 				       u32 frag_size, u16 headroom,
1674 				       u32 cqe_bcnt, u32 metasize)
1675 {
1676 	struct sk_buff *skb = napi_build_skb(va, frag_size);
1677 
1678 	if (unlikely(!skb)) {
1679 		rq->stats->buff_alloc_err++;
1680 		return NULL;
1681 	}
1682 
1683 	skb_reserve(skb, headroom);
1684 	skb_put(skb, cqe_bcnt);
1685 
1686 	if (metasize)
1687 		skb_metadata_set(skb, metasize);
1688 
1689 	return skb;
1690 }
1691 
mlx5e_fill_mxbuf(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,void * va,u16 headroom,u32 frame_sz,u32 len,struct mlx5e_xdp_buff * mxbuf)1692 static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1693 			     void *va, u16 headroom, u32 frame_sz, u32 len,
1694 			     struct mlx5e_xdp_buff *mxbuf)
1695 {
1696 	xdp_init_buff(&mxbuf->xdp, frame_sz, &rq->xdp_rxq);
1697 	xdp_prepare_buff(&mxbuf->xdp, va, headroom, len, true);
1698 	mxbuf->cqe = cqe;
1699 	mxbuf->rq = rq;
1700 }
1701 
1702 static struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi,struct mlx5_cqe64 * cqe,u32 cqe_bcnt)1703 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
1704 			  struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
1705 {
1706 	struct mlx5e_frag_page *frag_page = wi->frag_page;
1707 	u16 rx_headroom = rq->buff.headroom;
1708 	struct bpf_prog *prog;
1709 	struct sk_buff *skb;
1710 	u32 metasize = 0;
1711 	void *va, *data;
1712 	dma_addr_t addr;
1713 	u32 frag_size;
1714 
1715 	va             = netmem_address(frag_page->netmem) + wi->offset;
1716 	data           = va + rx_headroom;
1717 	frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1718 
1719 	addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
1720 	dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
1721 				      frag_size, rq->buff.map_dir);
1722 	net_prefetch(data);
1723 
1724 	prog = rcu_dereference(rq->xdp_prog);
1725 	if (prog) {
1726 		struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
1727 
1728 		net_prefetchw(va); /* xdp_frame data area */
1729 		mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
1730 				 cqe_bcnt, mxbuf);
1731 		if (mlx5e_xdp_handle(rq, prog, mxbuf))
1732 			return NULL; /* page/packet was consumed by XDP */
1733 
1734 		rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
1735 		metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta;
1736 		cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
1737 	}
1738 	frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1739 	skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
1740 	if (unlikely(!skb))
1741 		return NULL;
1742 
1743 	/* queue up for recycling/reuse */
1744 	skb_mark_for_recycle(skb);
1745 	frag_page->frags++;
1746 
1747 	return skb;
1748 }
1749 
1750 static struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi,struct mlx5_cqe64 * cqe,u32 cqe_bcnt)1751 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
1752 			     struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
1753 {
1754 	struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
1755 	struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
1756 	struct mlx5e_wqe_frag_info *head_wi = wi;
1757 	u16 rx_headroom = rq->buff.headroom;
1758 	struct mlx5e_frag_page *frag_page;
1759 	struct skb_shared_info *sinfo;
1760 	u32 frag_consumed_bytes;
1761 	struct bpf_prog *prog;
1762 	struct sk_buff *skb;
1763 	dma_addr_t addr;
1764 	u32 truesize;
1765 	void *va;
1766 
1767 	frag_page = wi->frag_page;
1768 
1769 	va = netmem_address(frag_page->netmem) + wi->offset;
1770 	frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
1771 
1772 	addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
1773 	dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
1774 				      rq->buff.frame0_sz, rq->buff.map_dir);
1775 	net_prefetchw(va); /* xdp_frame data area */
1776 	net_prefetch(va + rx_headroom);
1777 
1778 	mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
1779 			 frag_consumed_bytes, mxbuf);
1780 	sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
1781 	truesize = 0;
1782 
1783 	cqe_bcnt -= frag_consumed_bytes;
1784 	frag_info++;
1785 	wi++;
1786 
1787 	while (cqe_bcnt) {
1788 		frag_page = wi->frag_page;
1789 
1790 		frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
1791 
1792 		mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
1793 					       frag_page, wi->offset,
1794 					       frag_consumed_bytes);
1795 		truesize += frag_info->frag_stride;
1796 
1797 		cqe_bcnt -= frag_consumed_bytes;
1798 		frag_info++;
1799 		wi++;
1800 	}
1801 
1802 	prog = rcu_dereference(rq->xdp_prog);
1803 	if (prog) {
1804 		u8 nr_frags_free, old_nr_frags = sinfo->nr_frags;
1805 
1806 		if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
1807 			if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT,
1808 						 rq->flags)) {
1809 				struct mlx5e_wqe_frag_info *pwi;
1810 
1811 				wi -= old_nr_frags - sinfo->nr_frags;
1812 
1813 				for (pwi = head_wi; pwi < wi; pwi++)
1814 					pwi->frag_page->frags++;
1815 			}
1816 			return NULL; /* page/packet was consumed by XDP */
1817 		}
1818 
1819 		nr_frags_free = old_nr_frags - sinfo->nr_frags;
1820 		if (unlikely(nr_frags_free)) {
1821 			wi -= nr_frags_free;
1822 			truesize -= nr_frags_free * frag_info->frag_stride;
1823 		}
1824 	}
1825 
1826 	skb = mlx5e_build_linear_skb(
1827 		rq, mxbuf->xdp.data_hard_start, rq->buff.frame0_sz,
1828 		mxbuf->xdp.data - mxbuf->xdp.data_hard_start,
1829 		mxbuf->xdp.data_end - mxbuf->xdp.data,
1830 		mxbuf->xdp.data - mxbuf->xdp.data_meta);
1831 	if (unlikely(!skb))
1832 		return NULL;
1833 
1834 	skb_mark_for_recycle(skb);
1835 	head_wi->frag_page->frags++;
1836 
1837 	if (xdp_buff_has_frags(&mxbuf->xdp)) {
1838 		/* sinfo->nr_frags is reset by build_skb, calculate again. */
1839 		xdp_update_skb_frags_info(skb, wi - head_wi - 1,
1840 					  sinfo->xdp_frags_size, truesize,
1841 					  xdp_buff_get_skb_flags(&mxbuf->xdp));
1842 
1843 		for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++)
1844 			pwi->frag_page->frags++;
1845 	}
1846 
1847 	return skb;
1848 }
1849 
trigger_report(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1850 static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1851 {
1852 	struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
1853 	struct mlx5e_priv *priv = rq->priv;
1854 
1855 	if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
1856 	    !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) {
1857 		mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe);
1858 		queue_work(priv->wq, &rq->recover_work);
1859 	}
1860 }
1861 
mlx5e_handle_rx_err_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1862 static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1863 {
1864 	trigger_report(rq, cqe);
1865 	rq->stats->wqe_err++;
1866 }
1867 
mlx5e_handle_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1868 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1869 {
1870 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1871 	struct mlx5e_wqe_frag_info *wi;
1872 	struct sk_buff *skb;
1873 	u32 cqe_bcnt;
1874 	u16 ci;
1875 
1876 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1877 	wi       = get_frag(rq, ci);
1878 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1879 
1880 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1881 		mlx5e_handle_rx_err_cqe(rq, cqe);
1882 		goto wq_cyc_pop;
1883 	}
1884 
1885 	skb = INDIRECT_CALL_3(rq->wqe.skb_from_cqe,
1886 			      mlx5e_skb_from_cqe_linear,
1887 			      mlx5e_skb_from_cqe_nonlinear,
1888 			      mlx5e_xsk_skb_from_cqe_linear,
1889 			      rq, wi, cqe, cqe_bcnt);
1890 	if (!skb) {
1891 		/* probably for XDP */
1892 		if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1893 			wi->frag_page->frags++;
1894 		goto wq_cyc_pop;
1895 	}
1896 
1897 	if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
1898 		goto wq_cyc_pop;
1899 
1900 	if (mlx5e_cqe_regb_chain(cqe))
1901 		if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
1902 			dev_kfree_skb_any(skb);
1903 			goto wq_cyc_pop;
1904 		}
1905 
1906 	napi_gro_receive(rq->cq.napi, skb);
1907 
1908 wq_cyc_pop:
1909 	mlx5_wq_cyc_pop(wq);
1910 }
1911 
1912 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_handle_rx_cqe_rep(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1913 static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1914 {
1915 	struct net_device *netdev = rq->netdev;
1916 	struct mlx5e_priv *priv = netdev_priv(netdev);
1917 	struct mlx5e_rep_priv *rpriv  = priv->ppriv;
1918 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1919 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1920 	struct mlx5e_wqe_frag_info *wi;
1921 	struct sk_buff *skb;
1922 	u32 cqe_bcnt;
1923 	u16 ci;
1924 
1925 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1926 	wi       = get_frag(rq, ci);
1927 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1928 
1929 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1930 		mlx5e_handle_rx_err_cqe(rq, cqe);
1931 		goto wq_cyc_pop;
1932 	}
1933 
1934 	skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1935 			      mlx5e_skb_from_cqe_linear,
1936 			      mlx5e_skb_from_cqe_nonlinear,
1937 			      rq, wi, cqe, cqe_bcnt);
1938 	if (!skb) {
1939 		/* probably for XDP */
1940 		if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1941 			wi->frag_page->frags++;
1942 		goto wq_cyc_pop;
1943 	}
1944 
1945 	if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
1946 		goto wq_cyc_pop;
1947 
1948 	if (rep->vlan && skb_vlan_tag_present(skb))
1949 		skb_vlan_pop(skb);
1950 
1951 	mlx5e_rep_tc_receive(cqe, rq, skb);
1952 
1953 wq_cyc_pop:
1954 	mlx5_wq_cyc_pop(wq);
1955 }
1956 
mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1957 static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1958 {
1959 	u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
1960 	u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
1961 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
1962 	u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
1963 	u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
1964 	u32 head_offset    = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
1965 	u32 page_idx       = wqe_offset >> rq->mpwqe.page_shift;
1966 	struct mlx5e_rx_wqe_ll *wqe;
1967 	struct mlx5_wq_ll *wq;
1968 	struct sk_buff *skb;
1969 	u16 cqe_bcnt;
1970 
1971 	wi->consumed_strides += cstrides;
1972 
1973 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1974 		mlx5e_handle_rx_err_cqe(rq, cqe);
1975 		goto mpwrq_cqe_out;
1976 	}
1977 
1978 	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
1979 		struct mlx5e_rq_stats *stats = rq->stats;
1980 
1981 		stats->mpwqe_filler_cqes++;
1982 		stats->mpwqe_filler_strides += cstrides;
1983 		goto mpwrq_cqe_out;
1984 	}
1985 
1986 	cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
1987 
1988 	skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
1989 			      mlx5e_skb_from_cqe_mpwrq_linear,
1990 			      mlx5e_skb_from_cqe_mpwrq_nonlinear,
1991 			      rq, wi, cqe, cqe_bcnt, head_offset, page_idx);
1992 	if (!skb)
1993 		goto mpwrq_cqe_out;
1994 
1995 	if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
1996 		goto mpwrq_cqe_out;
1997 
1998 	mlx5e_rep_tc_receive(cqe, rq, skb);
1999 
2000 mpwrq_cqe_out:
2001 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
2002 		return;
2003 
2004 	wq  = &rq->mpwqe.wq;
2005 	wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
2006 	mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
2007 }
2008 
2009 const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
2010 	.handle_rx_cqe       = mlx5e_handle_rx_cqe_rep,
2011 	.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
2012 };
2013 #endif
2014 
2015 static void
mlx5e_shampo_fill_skb_data(struct sk_buff * skb,struct mlx5e_rq * rq,struct mlx5e_frag_page * frag_page,u32 data_bcnt,u32 data_offset)2016 mlx5e_shampo_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
2017 			   struct mlx5e_frag_page *frag_page,
2018 			   u32 data_bcnt, u32 data_offset)
2019 {
2020 	net_prefetchw(skb->data);
2021 
2022 	do {
2023 		/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
2024 		u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
2025 		unsigned int truesize = pg_consumed_bytes;
2026 
2027 		mlx5e_add_skb_frag(rq, skb, frag_page, data_offset,
2028 				   pg_consumed_bytes, truesize);
2029 
2030 		data_bcnt -= pg_consumed_bytes;
2031 		data_offset = 0;
2032 		frag_page++;
2033 	} while (data_bcnt);
2034 }
2035 
2036 static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 cqe_bcnt,u32 head_offset,u32 page_idx)2037 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
2038 				   struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
2039 				   u32 page_idx)
2040 {
2041 	struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
2042 	u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
2043 	struct mlx5e_frag_page *head_page = frag_page;
2044 	struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
2045 	u32 frag_offset    = head_offset;
2046 	u32 byte_cnt       = cqe_bcnt;
2047 	struct skb_shared_info *sinfo;
2048 	unsigned int truesize = 0;
2049 	u32 pg_consumed_bytes;
2050 	struct bpf_prog *prog;
2051 	struct sk_buff *skb;
2052 	u32 linear_frame_sz;
2053 	u16 linear_data_len;
2054 	u16 linear_hr;
2055 	void *va;
2056 
2057 	prog = rcu_dereference(rq->xdp_prog);
2058 
2059 	if (prog) {
2060 		/* area for bpf_xdp_[store|load]_bytes */
2061 		net_prefetchw(netmem_address(frag_page->netmem) + frag_offset);
2062 		if (unlikely(mlx5e_page_alloc_fragmented(rq->page_pool,
2063 							 &wi->linear_page))) {
2064 			rq->stats->buff_alloc_err++;
2065 			return NULL;
2066 		}
2067 
2068 		va = netmem_address(wi->linear_page.netmem);
2069 		net_prefetchw(va); /* xdp_frame data area */
2070 		linear_hr = XDP_PACKET_HEADROOM;
2071 		linear_data_len = 0;
2072 		linear_frame_sz = MLX5_SKB_FRAG_SZ(linear_hr + MLX5E_RX_MAX_HEAD);
2073 	} else {
2074 		skb = napi_alloc_skb(rq->cq.napi,
2075 				     ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
2076 		if (unlikely(!skb)) {
2077 			rq->stats->buff_alloc_err++;
2078 			return NULL;
2079 		}
2080 		skb_mark_for_recycle(skb);
2081 		va = skb->head;
2082 		net_prefetchw(va); /* xdp_frame data area */
2083 		net_prefetchw(skb->data);
2084 
2085 		frag_offset += headlen;
2086 		byte_cnt -= headlen;
2087 		linear_hr = skb_headroom(skb);
2088 		linear_data_len = headlen;
2089 		linear_frame_sz = MLX5_SKB_FRAG_SZ(skb_end_offset(skb));
2090 		if (unlikely(frag_offset >= PAGE_SIZE)) {
2091 			frag_page++;
2092 			frag_offset -= PAGE_SIZE;
2093 		}
2094 	}
2095 
2096 	mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz,
2097 			 linear_data_len, mxbuf);
2098 
2099 	sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
2100 
2101 	while (byte_cnt) {
2102 		/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
2103 		pg_consumed_bytes =
2104 			min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
2105 
2106 		if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
2107 			truesize += pg_consumed_bytes;
2108 		else
2109 			truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
2110 
2111 		mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
2112 					       frag_page, frag_offset,
2113 					       pg_consumed_bytes);
2114 		byte_cnt -= pg_consumed_bytes;
2115 		frag_offset = 0;
2116 		frag_page++;
2117 	}
2118 
2119 	if (prog) {
2120 		u8 nr_frags_free, old_nr_frags = sinfo->nr_frags;
2121 		u32 len;
2122 
2123 		if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
2124 			if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
2125 				struct mlx5e_frag_page *pfp;
2126 
2127 				frag_page -= old_nr_frags - sinfo->nr_frags;
2128 
2129 				for (pfp = head_page; pfp < frag_page; pfp++)
2130 					pfp->frags++;
2131 
2132 				wi->linear_page.frags++;
2133 			}
2134 			mlx5e_page_release_fragmented(rq->page_pool,
2135 						      &wi->linear_page);
2136 			return NULL; /* page/packet was consumed by XDP */
2137 		}
2138 
2139 		nr_frags_free = old_nr_frags - sinfo->nr_frags;
2140 		if (unlikely(nr_frags_free)) {
2141 			frag_page -= nr_frags_free;
2142 			truesize -= (nr_frags_free - 1) * PAGE_SIZE +
2143 				ALIGN(pg_consumed_bytes,
2144 				      BIT(rq->mpwqe.log_stride_sz));
2145 		}
2146 
2147 		len = mxbuf->xdp.data_end - mxbuf->xdp.data;
2148 
2149 		skb = mlx5e_build_linear_skb(
2150 			rq, mxbuf->xdp.data_hard_start, linear_frame_sz,
2151 			mxbuf->xdp.data - mxbuf->xdp.data_hard_start, len,
2152 			mxbuf->xdp.data - mxbuf->xdp.data_meta);
2153 		if (unlikely(!skb)) {
2154 			mlx5e_page_release_fragmented(rq->page_pool,
2155 						      &wi->linear_page);
2156 			return NULL;
2157 		}
2158 
2159 		skb_mark_for_recycle(skb);
2160 		wi->linear_page.frags++;
2161 		mlx5e_page_release_fragmented(rq->page_pool, &wi->linear_page);
2162 
2163 		if (xdp_buff_has_frags(&mxbuf->xdp)) {
2164 			struct mlx5e_frag_page *pagep;
2165 
2166 			/* sinfo->nr_frags is reset by build_skb, calculate again. */
2167 			xdp_update_skb_frags_info(skb, frag_page - head_page,
2168 						  sinfo->xdp_frags_size,
2169 						  truesize,
2170 						  xdp_buff_get_skb_flags(&mxbuf->xdp));
2171 
2172 			pagep = head_page;
2173 			do
2174 				pagep->frags++;
2175 			while (++pagep < frag_page);
2176 
2177 			headlen = min_t(u16, MLX5E_RX_MAX_HEAD - len,
2178 					skb->data_len);
2179 			__pskb_pull_tail(skb, headlen);
2180 		}
2181 	} else {
2182 		dma_addr_t addr;
2183 
2184 		if (xdp_buff_has_frags(&mxbuf->xdp)) {
2185 			struct mlx5e_frag_page *pagep;
2186 
2187 			xdp_update_skb_frags_info(skb, sinfo->nr_frags,
2188 						  sinfo->xdp_frags_size,
2189 						  truesize,
2190 						  xdp_buff_get_skb_flags(&mxbuf->xdp));
2191 
2192 			pagep = frag_page - sinfo->nr_frags;
2193 			do
2194 				pagep->frags++;
2195 			while (++pagep < frag_page);
2196 		}
2197 		/* copy header */
2198 		addr = page_pool_get_dma_addr_netmem(head_page->netmem);
2199 		mlx5e_copy_skb_header(rq, skb, head_page->netmem, addr,
2200 				      head_offset, head_offset, headlen);
2201 		/* skb linear part was allocated with headlen and aligned to long */
2202 		skb->tail += headlen;
2203 		skb->len  += headlen;
2204 	}
2205 
2206 	return skb;
2207 }
2208 
2209 static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 cqe_bcnt,u32 head_offset,u32 page_idx)2210 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
2211 				struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
2212 				u32 page_idx)
2213 {
2214 	struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
2215 	u16 rx_headroom = rq->buff.headroom;
2216 	struct bpf_prog *prog;
2217 	struct sk_buff *skb;
2218 	u32 metasize = 0;
2219 	void *va, *data;
2220 	dma_addr_t addr;
2221 	u32 frag_size;
2222 
2223 	/* Check packet size. Note LRO doesn't use linear SKB */
2224 	if (unlikely(cqe_bcnt > rq->hw_mtu)) {
2225 		rq->stats->oversize_pkts_sw_drop++;
2226 		return NULL;
2227 	}
2228 
2229 	va             = netmem_address(frag_page->netmem) + head_offset;
2230 	data           = va + rx_headroom;
2231 	frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
2232 
2233 	addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
2234 	dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
2235 				      frag_size, rq->buff.map_dir);
2236 	net_prefetch(data);
2237 
2238 	prog = rcu_dereference(rq->xdp_prog);
2239 	if (prog) {
2240 		struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
2241 
2242 		net_prefetchw(va); /* xdp_frame data area */
2243 		mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
2244 				 cqe_bcnt, mxbuf);
2245 		if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
2246 			if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
2247 				frag_page->frags++;
2248 			return NULL; /* page/packet was consumed by XDP */
2249 		}
2250 
2251 		rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
2252 		metasize =  mxbuf->xdp.data -  mxbuf->xdp.data_meta;
2253 		cqe_bcnt =  mxbuf->xdp.data_end -  mxbuf->xdp.data;
2254 	}
2255 	frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
2256 	skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
2257 	if (unlikely(!skb))
2258 		return NULL;
2259 
2260 	/* queue up for recycling/reuse */
2261 	skb_mark_for_recycle(skb);
2262 	frag_page->frags++;
2263 
2264 	return skb;
2265 }
2266 
2267 static struct sk_buff *
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 header_index)2268 mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
2269 			  struct mlx5_cqe64 *cqe, u16 header_index)
2270 {
2271 	struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
2272 	u16 head_offset = mlx5e_shampo_hd_offset(rq, header_index);
2273 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
2274 	u16 head_size = cqe->shampo.header_size;
2275 	u16 rx_headroom = rq->buff.headroom;
2276 	struct sk_buff *skb = NULL;
2277 	dma_addr_t page_dma_addr;
2278 	dma_addr_t dma_addr;
2279 	void *hdr, *data;
2280 	u32 frag_size;
2281 
2282 	page_dma_addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
2283 	dma_addr = page_dma_addr + head_offset;
2284 
2285 	hdr		= netmem_address(frag_page->netmem) + head_offset;
2286 	data		= hdr + rx_headroom;
2287 	frag_size	= MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
2288 
2289 	if (likely(frag_size <= BIT(shampo->log_hd_entry_size))) {
2290 		/* build SKB around header */
2291 		dma_sync_single_range_for_cpu(rq->pdev, dma_addr, 0, frag_size, rq->buff.map_dir);
2292 		net_prefetchw(hdr);
2293 		net_prefetch(data);
2294 		skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
2295 		if (unlikely(!skb))
2296 			return NULL;
2297 
2298 		frag_page->frags++;
2299 	} else {
2300 		/* allocate SKB and copy header for large header */
2301 		rq->stats->gro_large_hds++;
2302 		skb = napi_alloc_skb(rq->cq.napi,
2303 				     ALIGN(head_size, sizeof(long)));
2304 		if (unlikely(!skb)) {
2305 			rq->stats->buff_alloc_err++;
2306 			return NULL;
2307 		}
2308 
2309 		net_prefetchw(skb->data);
2310 		mlx5e_copy_skb_header(rq, skb, frag_page->netmem, dma_addr,
2311 				      head_offset + rx_headroom,
2312 				      rx_headroom, head_size);
2313 		/* skb linear part was allocated with headlen and aligned to long */
2314 		skb->tail += head_size;
2315 		skb->len  += head_size;
2316 	}
2317 
2318 	/* queue up for recycling/reuse */
2319 	skb_mark_for_recycle(skb);
2320 
2321 	return skb;
2322 }
2323 
2324 static void
mlx5e_shampo_align_fragment(struct sk_buff * skb,u8 log_stride_sz)2325 mlx5e_shampo_align_fragment(struct sk_buff *skb, u8 log_stride_sz)
2326 {
2327 	skb_frag_t *last_frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
2328 	unsigned int frag_size = skb_frag_size(last_frag);
2329 	unsigned int frag_truesize;
2330 
2331 	frag_truesize = ALIGN(frag_size, BIT(log_stride_sz));
2332 	skb->truesize += frag_truesize - frag_size;
2333 }
2334 
2335 static void
mlx5e_shampo_flush_skb(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,bool match)2336 mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
2337 {
2338 	struct sk_buff *skb = rq->hw_gro_data->skb;
2339 	struct mlx5e_rq_stats *stats = rq->stats;
2340 	u16 gro_count = NAPI_GRO_CB(skb)->count;
2341 
2342 	if (likely(skb_shinfo(skb)->nr_frags))
2343 		mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz);
2344 	if (gro_count > 1) {
2345 		stats->gro_skbs++;
2346 		stats->gro_packets += gro_count;
2347 		stats->gro_bytes += skb->data_len + skb_headlen(skb) * gro_count;
2348 
2349 		mlx5e_shampo_update_hdr(rq, cqe, match);
2350 	} else {
2351 		skb_shinfo(skb)->gso_size = 0;
2352 	}
2353 	napi_gro_receive(rq->cq.napi, skb);
2354 	rq->hw_gro_data->skb = NULL;
2355 }
2356 
2357 static bool
mlx5e_hw_gro_skb_has_enough_space(struct sk_buff * skb,u16 data_bcnt)2358 mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
2359 {
2360 	int nr_frags = skb_shinfo(skb)->nr_frags;
2361 
2362 	if (PAGE_SIZE >= GRO_LEGACY_MAX_SIZE)
2363 		return skb->len + data_bcnt <= GRO_LEGACY_MAX_SIZE;
2364 	else
2365 		return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
2366 }
2367 
mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2368 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2369 {
2370 	u16 data_bcnt		= mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size;
2371 	u16 header_index	= mlx5e_shampo_get_cqe_header_index(rq, cqe);
2372 	u32 wqe_offset		= be32_to_cpu(cqe->shampo.data_offset);
2373 	u16 cstrides		= mpwrq_get_cqe_consumed_strides(cqe);
2374 	u32 data_offset		= wqe_offset & (PAGE_SIZE - 1);
2375 	u32 cqe_bcnt		= mpwrq_get_cqe_byte_cnt(cqe);
2376 	u16 wqe_id		= be16_to_cpu(cqe->wqe_id);
2377 	u32 page_idx		= wqe_offset >> PAGE_SHIFT;
2378 	u16 head_size		= cqe->shampo.header_size;
2379 	struct sk_buff **skb	= &rq->hw_gro_data->skb;
2380 	bool flush		= cqe->shampo.flush;
2381 	bool match		= cqe->shampo.match;
2382 	struct mlx5e_rq_stats *stats = rq->stats;
2383 	struct mlx5e_rx_wqe_ll *wqe;
2384 	struct mlx5e_mpw_info *wi;
2385 	struct mlx5_wq_ll *wq;
2386 
2387 	wi = mlx5e_get_mpw_info(rq, wqe_id);
2388 	wi->consumed_strides += cstrides;
2389 
2390 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2391 		mlx5e_handle_rx_err_cqe(rq, cqe);
2392 		goto mpwrq_cqe_out;
2393 	}
2394 
2395 	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
2396 		stats->mpwqe_filler_cqes++;
2397 		stats->mpwqe_filler_strides += cstrides;
2398 		goto mpwrq_cqe_out;
2399 	}
2400 
2401 	if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) {
2402 		match = false;
2403 		mlx5e_shampo_flush_skb(rq, cqe, match);
2404 	}
2405 
2406 	if (!*skb) {
2407 		if (likely(head_size)) {
2408 			*skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
2409 		} else {
2410 			struct mlx5e_frag_page *frag_page;
2411 
2412 			frag_page = &wi->alloc_units.frag_pages[page_idx];
2413 			/* Drop packets with header in unreadable data area to
2414 			 * prevent the kernel from touching it.
2415 			 */
2416 			if (unlikely(netmem_is_net_iov(frag_page->netmem)))
2417 				goto free_hd_entry;
2418 			*skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe,
2419 								  cqe_bcnt,
2420 								  data_offset,
2421 								  page_idx);
2422 		}
2423 
2424 		if (unlikely(!*skb))
2425 			goto free_hd_entry;
2426 
2427 		NAPI_GRO_CB(*skb)->count = 1;
2428 		skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size;
2429 	} else {
2430 		NAPI_GRO_CB(*skb)->count++;
2431 		if (NAPI_GRO_CB(*skb)->count == 2 &&
2432 		    rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) {
2433 			void *hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
2434 			int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff -
2435 				    sizeof(struct iphdr);
2436 			struct iphdr *iph = (struct iphdr *)(hd_addr + nhoff);
2437 
2438 			rq->hw_gro_data->second_ip_id = ntohs(iph->id);
2439 		}
2440 	}
2441 
2442 	if (likely(head_size)) {
2443 		if (data_bcnt) {
2444 			struct mlx5e_frag_page *frag_page;
2445 
2446 			frag_page = &wi->alloc_units.frag_pages[page_idx];
2447 			mlx5e_shampo_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
2448 		} else {
2449 			stats->hds_nodata_packets++;
2450 			stats->hds_nodata_bytes += head_size;
2451 		}
2452 	} else {
2453 		stats->hds_nosplit_packets++;
2454 		stats->hds_nosplit_bytes += data_bcnt;
2455 	}
2456 
2457 	if (mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb)) {
2458 		*skb = NULL;
2459 		goto free_hd_entry;
2460 	}
2461 	if (flush && rq->hw_gro_data->skb)
2462 		mlx5e_shampo_flush_skb(rq, cqe, match);
2463 free_hd_entry:
2464 	if (likely(head_size))
2465 		mlx5e_free_rx_shampo_hd_entry(rq, header_index);
2466 mpwrq_cqe_out:
2467 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
2468 		return;
2469 
2470 	if (unlikely(!cstrides))
2471 		return;
2472 
2473 	wq  = &rq->mpwqe.wq;
2474 	wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
2475 	mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
2476 }
2477 
mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2478 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2479 {
2480 	u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
2481 	u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
2482 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
2483 	u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
2484 	u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
2485 	u32 head_offset    = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
2486 	u32 page_idx       = wqe_offset >> rq->mpwqe.page_shift;
2487 	struct mlx5e_rx_wqe_ll *wqe;
2488 	struct mlx5_wq_ll *wq;
2489 	struct sk_buff *skb;
2490 	u16 cqe_bcnt;
2491 
2492 	wi->consumed_strides += cstrides;
2493 
2494 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2495 		mlx5e_handle_rx_err_cqe(rq, cqe);
2496 		goto mpwrq_cqe_out;
2497 	}
2498 
2499 	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
2500 		struct mlx5e_rq_stats *stats = rq->stats;
2501 
2502 		stats->mpwqe_filler_cqes++;
2503 		stats->mpwqe_filler_strides += cstrides;
2504 		goto mpwrq_cqe_out;
2505 	}
2506 
2507 	cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
2508 
2509 	skb = INDIRECT_CALL_3(rq->mpwqe.skb_from_cqe_mpwrq,
2510 			      mlx5e_skb_from_cqe_mpwrq_linear,
2511 			      mlx5e_skb_from_cqe_mpwrq_nonlinear,
2512 			      mlx5e_xsk_skb_from_cqe_mpwrq_linear,
2513 			      rq, wi, cqe, cqe_bcnt, head_offset,
2514 			      page_idx);
2515 	if (!skb)
2516 		goto mpwrq_cqe_out;
2517 
2518 	if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
2519 		goto mpwrq_cqe_out;
2520 
2521 	if (mlx5e_cqe_regb_chain(cqe))
2522 		if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
2523 			dev_kfree_skb_any(skb);
2524 			goto mpwrq_cqe_out;
2525 		}
2526 
2527 	napi_gro_receive(rq->cq.napi, skb);
2528 
2529 mpwrq_cqe_out:
2530 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
2531 		return;
2532 
2533 	wq  = &rq->mpwqe.wq;
2534 	wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
2535 	mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
2536 }
2537 
mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq * rq,struct mlx5_cqwq * cqwq,int budget_rem)2538 static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq,
2539 						 struct mlx5_cqwq *cqwq,
2540 						 int budget_rem)
2541 {
2542 	struct mlx5_cqe64 *cqe, *title_cqe = NULL;
2543 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
2544 	int work_done = 0;
2545 
2546 	cqe = mlx5_cqwq_get_cqe_enhanced_comp(cqwq);
2547 	if (!cqe)
2548 		return work_done;
2549 
2550 	if (cqd->last_cqe_title &&
2551 	    (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED)) {
2552 		rq->stats->cqe_compress_blks++;
2553 		cqd->last_cqe_title = false;
2554 	}
2555 
2556 	do {
2557 		if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
2558 			if (title_cqe) {
2559 				mlx5e_read_enhanced_title_slot(rq, title_cqe);
2560 				title_cqe = NULL;
2561 				rq->stats->cqe_compress_blks++;
2562 			}
2563 			work_done +=
2564 				mlx5e_decompress_enhanced_cqe(rq, cqwq, cqe,
2565 							      budget_rem - work_done);
2566 			continue;
2567 		}
2568 		title_cqe = cqe;
2569 		mlx5_cqwq_pop(cqwq);
2570 
2571 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
2572 				mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
2573 				rq, cqe);
2574 		work_done++;
2575 	} while (work_done < budget_rem &&
2576 		 (cqe = mlx5_cqwq_get_cqe_enhanced_comp(cqwq)));
2577 
2578 	/* last cqe might be title on next poll bulk */
2579 	if (title_cqe) {
2580 		mlx5e_read_enhanced_title_slot(rq, title_cqe);
2581 		cqd->last_cqe_title = true;
2582 	}
2583 
2584 	return work_done;
2585 }
2586 
mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq * rq,struct mlx5_cqwq * cqwq,int budget_rem)2587 static int mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq *rq,
2588 					      struct mlx5_cqwq *cqwq,
2589 					      int budget_rem)
2590 {
2591 	struct mlx5_cqe64 *cqe;
2592 	int work_done = 0;
2593 
2594 	if (rq->cqd.left)
2595 		work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget_rem);
2596 
2597 	while (work_done < budget_rem && (cqe = mlx5_cqwq_get_cqe(cqwq))) {
2598 		if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
2599 			work_done +=
2600 				mlx5e_decompress_cqes_start(rq, cqwq,
2601 							    budget_rem - work_done);
2602 			continue;
2603 		}
2604 
2605 		mlx5_cqwq_pop(cqwq);
2606 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
2607 				mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
2608 				rq, cqe);
2609 		work_done++;
2610 	}
2611 
2612 	return work_done;
2613 }
2614 
mlx5e_poll_rx_cq(struct mlx5e_cq * cq,int budget)2615 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
2616 {
2617 	struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
2618 	struct mlx5_cqwq *cqwq = &cq->wq;
2619 	int work_done;
2620 
2621 	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
2622 		return 0;
2623 
2624 	if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state))
2625 		work_done = mlx5e_rx_cq_process_enhanced_cqe_comp(rq, cqwq,
2626 								  budget);
2627 	else
2628 		work_done = mlx5e_rx_cq_process_basic_cqe_comp(rq, cqwq,
2629 							       budget);
2630 
2631 	if (work_done == 0)
2632 		return 0;
2633 
2634 	if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb)
2635 		mlx5e_shampo_flush_skb(rq, NULL, false);
2636 
2637 	if (rcu_access_pointer(rq->xdp_prog))
2638 		mlx5e_xdp_rx_poll_complete(rq);
2639 
2640 	mlx5_cqwq_update_db_record(cqwq);
2641 
2642 	/* ensure cq space is freed before enabling more cqes */
2643 	wmb();
2644 
2645 	return work_done;
2646 }
2647 
2648 #ifdef CONFIG_MLX5_CORE_IPOIB
2649 
2650 #define MLX5_IB_GRH_SGID_OFFSET 8
2651 #define MLX5_IB_GRH_DGID_OFFSET 24
2652 #define MLX5_GID_SIZE           16
2653 
mlx5i_complete_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct sk_buff * skb)2654 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
2655 					 struct mlx5_cqe64 *cqe,
2656 					 u32 cqe_bcnt,
2657 					 struct sk_buff *skb)
2658 {
2659 	struct hwtstamp_config *tstamp;
2660 	struct mlx5e_rq_stats *stats;
2661 	struct net_device *netdev;
2662 	struct mlx5e_priv *priv;
2663 	char *pseudo_header;
2664 	u32 flags_rqpn;
2665 	u32 qpn;
2666 	u8 *dgid;
2667 	u8 g;
2668 
2669 	qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff;
2670 	netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
2671 
2672 	/* No mapping present, cannot process SKB. This might happen if a child
2673 	 * interface is going down while having unprocessed CQEs on parent RQ
2674 	 */
2675 	if (unlikely(!netdev)) {
2676 		/* TODO: add drop counters support */
2677 		skb->dev = NULL;
2678 		pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn);
2679 		return;
2680 	}
2681 
2682 	priv = mlx5i_epriv(netdev);
2683 	tstamp = &priv->tstamp;
2684 	stats = &priv->channel_stats[rq->ix]->rq;
2685 
2686 	flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
2687 	g = (flags_rqpn >> 28) & 3;
2688 	dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
2689 	if ((!g) || dgid[0] != 0xff)
2690 		skb->pkt_type = PACKET_HOST;
2691 	else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0)
2692 		skb->pkt_type = PACKET_BROADCAST;
2693 	else
2694 		skb->pkt_type = PACKET_MULTICAST;
2695 
2696 	/* Drop packets that this interface sent, ie multicast packets
2697 	 * that the HCA has replicated.
2698 	 */
2699 	if (g && (qpn == (flags_rqpn & 0xffffff)) &&
2700 	    (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET,
2701 		    MLX5_GID_SIZE) == 0)) {
2702 		skb->dev = NULL;
2703 		return;
2704 	}
2705 
2706 	skb_pull(skb, MLX5_IB_GRH_BYTES);
2707 
2708 	skb->protocol = *((__be16 *)(skb->data));
2709 
2710 	if (netdev->features & NETIF_F_RXCSUM) {
2711 		skb->ip_summed = CHECKSUM_COMPLETE;
2712 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
2713 		stats->csum_complete++;
2714 	} else {
2715 		skb->ip_summed = CHECKSUM_NONE;
2716 		stats->csum_none++;
2717 	}
2718 
2719 	if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
2720 		skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
2721 								  rq->clock, get_cqe_ts(cqe));
2722 	skb_record_rx_queue(skb, rq->ix);
2723 
2724 	if (likely(netdev->features & NETIF_F_RXHASH))
2725 		mlx5e_skb_set_hash(cqe, skb);
2726 
2727 	/* 20 bytes of ipoib header and 4 for encap existing */
2728 	pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
2729 	memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
2730 	skb_reset_mac_header(skb);
2731 	skb_pull(skb, MLX5_IPOIB_HARD_LEN);
2732 
2733 	skb->dev = netdev;
2734 
2735 	stats->packets++;
2736 	stats->bytes += cqe_bcnt;
2737 }
2738 
mlx5i_handle_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2739 static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2740 {
2741 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
2742 	struct mlx5e_wqe_frag_info *wi;
2743 	struct sk_buff *skb;
2744 	u32 cqe_bcnt;
2745 	u16 ci;
2746 
2747 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
2748 	wi       = get_frag(rq, ci);
2749 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
2750 
2751 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2752 		rq->stats->wqe_err++;
2753 		goto wq_cyc_pop;
2754 	}
2755 
2756 	skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
2757 			      mlx5e_skb_from_cqe_linear,
2758 			      mlx5e_skb_from_cqe_nonlinear,
2759 			      rq, wi, cqe, cqe_bcnt);
2760 	if (!skb)
2761 		goto wq_cyc_pop;
2762 
2763 	mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
2764 	if (unlikely(!skb->dev)) {
2765 		dev_kfree_skb_any(skb);
2766 		goto wq_cyc_pop;
2767 	}
2768 	napi_gro_receive(rq->cq.napi, skb);
2769 
2770 wq_cyc_pop:
2771 	mlx5_wq_cyc_pop(wq);
2772 }
2773 
2774 const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
2775 	.handle_rx_cqe       = mlx5i_handle_rx_cqe,
2776 	.handle_rx_cqe_mpwqe = NULL, /* Not supported */
2777 };
2778 #endif /* CONFIG_MLX5_CORE_IPOIB */
2779 
mlx5e_rq_set_handlers(struct mlx5e_rq * rq,struct mlx5e_params * params,bool xsk)2780 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
2781 {
2782 	struct net_device *netdev = rq->netdev;
2783 	struct mlx5_core_dev *mdev = rq->mdev;
2784 	struct mlx5e_priv *priv = rq->priv;
2785 
2786 	switch (rq->wq_type) {
2787 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2788 		rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
2789 			mlx5e_xsk_skb_from_cqe_mpwrq_linear :
2790 			mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
2791 				mlx5e_skb_from_cqe_mpwrq_linear :
2792 				mlx5e_skb_from_cqe_mpwrq_nonlinear;
2793 		rq->post_wqes = mlx5e_post_rx_mpwqes;
2794 		rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
2795 
2796 		if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
2797 			rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo;
2798 			if (!rq->handle_rx_cqe) {
2799 				netdev_err(netdev, "RX handler of SHAMPO MPWQE RQ is not set\n");
2800 				return -EINVAL;
2801 			}
2802 		} else {
2803 			rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
2804 			if (!rq->handle_rx_cqe) {
2805 				netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
2806 				return -EINVAL;
2807 			}
2808 		}
2809 
2810 		break;
2811 	default: /* MLX5_WQ_TYPE_CYCLIC */
2812 		rq->wqe.skb_from_cqe = xsk ?
2813 			mlx5e_xsk_skb_from_cqe_linear :
2814 			mlx5e_rx_is_linear_skb(mdev, params, NULL) ?
2815 				mlx5e_skb_from_cqe_linear :
2816 				mlx5e_skb_from_cqe_nonlinear;
2817 		rq->post_wqes = mlx5e_post_rx_wqes;
2818 		rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
2819 		rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
2820 		if (!rq->handle_rx_cqe) {
2821 			netdev_err(netdev, "RX handler of RQ is not set\n");
2822 			return -EINVAL;
2823 		}
2824 	}
2825 
2826 	return 0;
2827 }
2828 
mlx5e_trap_handle_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2829 static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2830 {
2831 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
2832 	struct mlx5e_wqe_frag_info *wi;
2833 	struct sk_buff *skb;
2834 	u32 cqe_bcnt;
2835 	u16 trap_id;
2836 	u16 ci;
2837 
2838 	trap_id  = get_cqe_flow_tag(cqe);
2839 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
2840 	wi       = get_frag(rq, ci);
2841 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
2842 
2843 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2844 		rq->stats->wqe_err++;
2845 		goto wq_cyc_pop;
2846 	}
2847 
2848 	skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe, cqe_bcnt);
2849 	if (!skb)
2850 		goto wq_cyc_pop;
2851 
2852 	if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
2853 		goto wq_cyc_pop;
2854 	skb_push(skb, ETH_HLEN);
2855 
2856 	mlx5_devlink_trap_report(rq->mdev, trap_id, skb,
2857 				 rq->netdev->devlink_port);
2858 	dev_kfree_skb_any(skb);
2859 
2860 wq_cyc_pop:
2861 	mlx5_wq_cyc_pop(wq);
2862 }
2863 
mlx5e_rq_set_trap_handlers(struct mlx5e_rq * rq,struct mlx5e_params * params)2864 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
2865 {
2866 	rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(rq->mdev, params, NULL) ?
2867 			       mlx5e_skb_from_cqe_linear :
2868 			       mlx5e_skb_from_cqe_nonlinear;
2869 	rq->post_wqes = mlx5e_post_rx_wqes;
2870 	rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
2871 	rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe;
2872 }
2873