xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c (revision 2c7e63d702f6c4209c5af833308e7fcbc7d4ab17)
1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/ip.h>
34 #include <linux/ipv6.h>
35 #include <linux/tcp.h>
36 #include <linux/bitmap.h>
37 #include <linux/filter.h>
38 #include <net/ip6_checksum.h>
39 #include <net/page_pool/helpers.h>
40 #include <net/inet_ecn.h>
41 #include <net/gro.h>
42 #include <net/udp.h>
43 #include <net/tcp.h>
44 #include <net/xdp_sock_drv.h>
45 #include "en.h"
46 #include "en/txrx.h"
47 #include "en_tc.h"
48 #include "eswitch.h"
49 #include "en_rep.h"
50 #include "en/rep/tc.h"
51 #include "ipoib/ipoib.h"
52 #include "en_accel/ipsec.h"
53 #include "en_accel/macsec.h"
54 #include "en_accel/psp_rxtx.h"
55 #include "en_accel/ipsec_rxtx.h"
56 #include "en_accel/ktls_txrx.h"
57 #include "en/xdp.h"
58 #include "en/xsk/rx.h"
59 #include "en/health.h"
60 #include "en/params.h"
61 #include "devlink.h"
62 #include "en/devlink.h"
63 
64 static struct sk_buff *
65 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
66 				struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
67 				u32 page_idx);
68 static struct sk_buff *
69 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
70 				   struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
71 				   u32 page_idx);
72 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
73 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
74 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
75 
76 const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
77 	.handle_rx_cqe       = mlx5e_handle_rx_cqe,
78 	.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
79 	.handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo,
80 };
81 
mlx5e_read_cqe_slot(struct mlx5_cqwq * wq,u32 cqcc,void * data)82 static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
83 				       u32 cqcc, void *data)
84 {
85 	u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
86 
87 	memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64));
88 }
89 
mlx5e_read_enhanced_title_slot(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)90 static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq,
91 					   struct mlx5_cqe64 *cqe)
92 {
93 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
94 	struct mlx5_cqe64 *title = &cqd->title;
95 
96 	memcpy(title, cqe, sizeof(struct mlx5_cqe64));
97 
98 	if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)))
99 		return;
100 
101 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
102 		cqd->wqe_counter = mpwrq_get_cqe_stride_index(title) +
103 			mpwrq_get_cqe_consumed_strides(title);
104 	else
105 		cqd->wqe_counter =
106 			mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, be16_to_cpu(title->wqe_counter) + 1);
107 }
108 
mlx5e_read_title_slot(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,u32 cqcc)109 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
110 					 struct mlx5_cqwq *wq,
111 					 u32 cqcc)
112 {
113 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
114 	struct mlx5_cqe64 *title = &cqd->title;
115 
116 	mlx5e_read_cqe_slot(wq, cqcc, title);
117 	cqd->left        = be32_to_cpu(title->byte_cnt);
118 	cqd->wqe_counter = be16_to_cpu(title->wqe_counter);
119 	rq->stats->cqe_compress_blks++;
120 }
121 
mlx5e_read_mini_arr_slot(struct mlx5_cqwq * wq,struct mlx5e_cq_decomp * cqd,u32 cqcc)122 static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq,
123 					    struct mlx5e_cq_decomp *cqd,
124 					    u32 cqcc)
125 {
126 	mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr);
127 	cqd->mini_arr_idx = 0;
128 }
129 
mlx5e_cqes_update_owner(struct mlx5_cqwq * wq,int n)130 static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n)
131 {
132 	u32 cqcc   = wq->cc;
133 	u8  op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1;
134 	u32 ci     = mlx5_cqwq_ctr2ix(wq, cqcc);
135 	u32 wq_sz  = mlx5_cqwq_get_size(wq);
136 	u32 ci_top = min_t(u32, wq_sz, ci + n);
137 
138 	for (; ci < ci_top; ci++, n--) {
139 		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
140 
141 		cqe->op_own = op_own;
142 	}
143 
144 	if (unlikely(ci == wq_sz)) {
145 		op_own = !op_own;
146 		for (ci = 0; ci < n; ci++) {
147 			struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
148 
149 			cqe->op_own = op_own;
150 		}
151 	}
152 }
153 
mlx5e_decompress_cqe(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,u32 cqcc)154 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
155 					struct mlx5_cqwq *wq,
156 					u32 cqcc)
157 {
158 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
159 	struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx];
160 	struct mlx5_cqe64 *title = &cqd->title;
161 
162 	title->byte_cnt     = mini_cqe->byte_cnt;
163 	title->check_sum    = mini_cqe->checksum;
164 	title->op_own      &= 0xf0;
165 	title->op_own      |= 0x01 & (cqcc >> wq->fbc.log_sz);
166 
167 	/* state bit set implies linked-list striding RQ wq type and
168 	 * HW stride index capability supported
169 	 */
170 	if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) {
171 		title->wqe_counter = mini_cqe->stridx;
172 		return;
173 	}
174 
175 	/* HW stride index capability not supported */
176 	title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
177 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
178 		cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
179 	else
180 		cqd->wqe_counter =
181 			mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1);
182 }
183 
mlx5e_decompress_cqe_no_hash(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,u32 cqcc)184 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
185 						struct mlx5_cqwq *wq,
186 						u32 cqcc)
187 {
188 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
189 
190 	mlx5e_decompress_cqe(rq, wq, cqcc);
191 	cqd->title.rss_hash_type   = 0;
192 	cqd->title.rss_hash_result = 0;
193 }
194 
mlx5e_decompress_enhanced_cqe(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,struct mlx5_cqe64 * cqe,int budget_rem)195 static u32 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq *rq,
196 					 struct mlx5_cqwq *wq,
197 					 struct mlx5_cqe64 *cqe,
198 					 int budget_rem)
199 {
200 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
201 	u32 cqcc, left;
202 	u32 i;
203 
204 	left = get_cqe_enhanced_num_mini_cqes(cqe);
205 	/* Here we avoid breaking the cqe compression session in the middle
206 	 * in case budget is not sufficient to handle all of it. In this case
207 	 * we return work_done == budget_rem to give 'busy' napi indication.
208 	 */
209 	if (unlikely(left > budget_rem))
210 		return budget_rem;
211 
212 	cqcc = wq->cc;
213 	cqd->mini_arr_idx = 0;
214 	memcpy(cqd->mini_arr, cqe, sizeof(struct mlx5_cqe64));
215 	for (i = 0; i < left; i++, cqd->mini_arr_idx++, cqcc++) {
216 		mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
217 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
218 				mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
219 				rq, &cqd->title);
220 	}
221 	wq->cc = cqcc;
222 	rq->stats->cqe_compress_pkts += left;
223 
224 	return left;
225 }
226 
mlx5e_decompress_cqes_cont(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,int update_owner_only,int budget_rem)227 static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
228 					     struct mlx5_cqwq *wq,
229 					     int update_owner_only,
230 					     int budget_rem)
231 {
232 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
233 	u32 cqcc = wq->cc + update_owner_only;
234 	u32 cqe_count;
235 	u32 i;
236 
237 	cqe_count = min_t(u32, cqd->left, budget_rem);
238 
239 	for (i = update_owner_only; i < cqe_count;
240 	     i++, cqd->mini_arr_idx++, cqcc++) {
241 		if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
242 			mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
243 
244 		mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
245 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
246 				mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
247 				rq, &cqd->title);
248 	}
249 	mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
250 	wq->cc = cqcc;
251 	cqd->left -= cqe_count;
252 	rq->stats->cqe_compress_pkts += cqe_count;
253 
254 	return cqe_count;
255 }
256 
mlx5e_decompress_cqes_start(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,int budget_rem)257 static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
258 					      struct mlx5_cqwq *wq,
259 					      int budget_rem)
260 {
261 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
262 	u32 cc = wq->cc;
263 
264 	mlx5e_read_title_slot(rq, wq, cc);
265 	mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
266 	mlx5e_decompress_cqe(rq, wq, cc);
267 	INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
268 			mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
269 			rq, &cqd->title);
270 	cqd->mini_arr_idx++;
271 
272 	return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem);
273 }
274 
275 #define MLX5E_PAGECNT_BIAS_MAX (PAGE_SIZE / 64)
276 
mlx5e_page_alloc_fragmented(struct page_pool * pp,struct mlx5e_frag_page * frag_page)277 static int mlx5e_page_alloc_fragmented(struct page_pool *pp,
278 				       struct mlx5e_frag_page *frag_page)
279 {
280 	netmem_ref netmem = page_pool_dev_alloc_netmems(pp);
281 
282 	if (unlikely(!netmem))
283 		return -ENOMEM;
284 
285 	page_pool_fragment_netmem(netmem, MLX5E_PAGECNT_BIAS_MAX);
286 
287 	*frag_page = (struct mlx5e_frag_page) {
288 		.netmem	= netmem,
289 		.frags	= 0,
290 	};
291 
292 	return 0;
293 }
294 
mlx5e_page_release_fragmented(struct page_pool * pp,struct mlx5e_frag_page * frag_page)295 static void mlx5e_page_release_fragmented(struct page_pool *pp,
296 					  struct mlx5e_frag_page *frag_page)
297 {
298 	u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags;
299 	netmem_ref netmem = frag_page->netmem;
300 
301 	if (page_pool_unref_netmem(netmem, drain_count) == 0)
302 		page_pool_put_unrefed_netmem(pp, netmem, -1, true);
303 }
304 
mlx5e_get_rx_frag(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * frag)305 static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
306 				    struct mlx5e_wqe_frag_info *frag)
307 {
308 	int err = 0;
309 
310 	if (!frag->offset)
311 		/* On first frag (offset == 0), replenish page.
312 		 * Other frags that point to the same page (with a different
313 		 * offset) should just use the new one without replenishing again
314 		 * by themselves.
315 		 */
316 		err = mlx5e_page_alloc_fragmented(rq->page_pool,
317 						  frag->frag_page);
318 
319 	return err;
320 }
321 
mlx5e_frag_can_release(struct mlx5e_wqe_frag_info * frag)322 static bool mlx5e_frag_can_release(struct mlx5e_wqe_frag_info *frag)
323 {
324 #define CAN_RELEASE_MASK \
325 	(BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE) | BIT(MLX5E_WQE_FRAG_SKIP_RELEASE))
326 
327 #define CAN_RELEASE_VALUE BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE)
328 
329 	return (frag->flags & CAN_RELEASE_MASK) == CAN_RELEASE_VALUE;
330 }
331 
mlx5e_put_rx_frag(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * frag)332 static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
333 				     struct mlx5e_wqe_frag_info *frag)
334 {
335 	if (mlx5e_frag_can_release(frag))
336 		mlx5e_page_release_fragmented(rq->page_pool, frag->frag_page);
337 }
338 
get_frag(struct mlx5e_rq * rq,u16 ix)339 static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
340 {
341 	return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
342 }
343 
mlx5e_alloc_rx_wqe(struct mlx5e_rq * rq,struct mlx5e_rx_wqe_cyc * wqe,u16 ix)344 static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
345 			      u16 ix)
346 {
347 	struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix);
348 	int err;
349 	int i;
350 
351 	for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
352 		dma_addr_t addr;
353 		u16 headroom;
354 
355 		err = mlx5e_get_rx_frag(rq, frag);
356 		if (unlikely(err))
357 			goto free_frags;
358 
359 		frag->flags &= ~BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
360 
361 		headroom = i == 0 ? rq->buff.headroom : 0;
362 		addr = page_pool_get_dma_addr_netmem(frag->frag_page->netmem);
363 		wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
364 	}
365 
366 	return 0;
367 
368 free_frags:
369 	while (--i >= 0)
370 		mlx5e_put_rx_frag(rq, --frag);
371 
372 	return err;
373 }
374 
mlx5e_free_rx_wqe(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi)375 static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
376 				     struct mlx5e_wqe_frag_info *wi)
377 {
378 	int i;
379 
380 	for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
381 		mlx5e_put_rx_frag(rq, wi);
382 }
383 
mlx5e_xsk_free_rx_wqe(struct mlx5e_wqe_frag_info * wi)384 static void mlx5e_xsk_free_rx_wqe(struct mlx5e_wqe_frag_info *wi)
385 {
386 	if (!(wi->flags & BIT(MLX5E_WQE_FRAG_SKIP_RELEASE)))
387 		xsk_buff_free(*wi->xskp);
388 }
389 
mlx5e_dealloc_rx_wqe(struct mlx5e_rq * rq,u16 ix)390 static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
391 {
392 	struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
393 
394 	if (rq->xsk_pool) {
395 		mlx5e_xsk_free_rx_wqe(wi);
396 	} else {
397 		mlx5e_free_rx_wqe(rq, wi);
398 
399 		/* Avoid a second release of the wqe pages: dealloc is called
400 		 * for the same missing wqes on regular RQ flush and on regular
401 		 * RQ close. This happens when XSK RQs come into play.
402 		 */
403 		for (int i = 0; i < rq->wqe.info.num_frags; i++, wi++)
404 			wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
405 	}
406 }
407 
mlx5e_xsk_free_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)408 static void mlx5e_xsk_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
409 {
410 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
411 	int i;
412 
413 	for (i = 0; i < wqe_bulk; i++) {
414 		int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
415 		struct mlx5e_wqe_frag_info *wi;
416 
417 		wi = get_frag(rq, j);
418 		/* The page is always put into the Reuse Ring, because there
419 		 * is no way to return the page to the userspace when the
420 		 * interface goes down.
421 		 */
422 		mlx5e_xsk_free_rx_wqe(wi);
423 	}
424 }
425 
mlx5e_free_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)426 static void mlx5e_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
427 {
428 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
429 	int i;
430 
431 	for (i = 0; i < wqe_bulk; i++) {
432 		int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
433 		struct mlx5e_wqe_frag_info *wi;
434 
435 		wi = get_frag(rq, j);
436 		mlx5e_free_rx_wqe(rq, wi);
437 	}
438 }
439 
mlx5e_alloc_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)440 static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
441 {
442 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
443 	int i;
444 
445 	for (i = 0; i < wqe_bulk; i++) {
446 		int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
447 		struct mlx5e_rx_wqe_cyc *wqe;
448 
449 		wqe = mlx5_wq_cyc_get_wqe(wq, j);
450 
451 		if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, j)))
452 			break;
453 	}
454 
455 	return i;
456 }
457 
mlx5e_refill_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)458 static int mlx5e_refill_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
459 {
460 	int remaining = wqe_bulk;
461 	int total_alloc = 0;
462 	int refill_alloc;
463 	int refill;
464 
465 	/* The WQE bulk is split into smaller bulks that are sized
466 	 * according to the page pool cache refill size to avoid overflowing
467 	 * the page pool cache due to too many page releases at once.
468 	 */
469 	do {
470 		refill = min_t(u16, rq->wqe.info.refill_unit, remaining);
471 
472 		mlx5e_free_rx_wqes(rq, ix + total_alloc, refill);
473 		refill_alloc = mlx5e_alloc_rx_wqes(rq, ix + total_alloc, refill);
474 		if (unlikely(refill_alloc != refill))
475 			goto err_free;
476 
477 		total_alloc += refill_alloc;
478 		remaining -= refill;
479 	} while (remaining);
480 
481 	return total_alloc;
482 
483 err_free:
484 	mlx5e_free_rx_wqes(rq, ix, total_alloc + refill_alloc);
485 
486 	for (int i = 0; i < total_alloc + refill; i++) {
487 		int j = mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, ix + i);
488 		struct mlx5e_wqe_frag_info *frag;
489 
490 		frag = get_frag(rq, j);
491 		for (int k = 0; k < rq->wqe.info.num_frags; k++, frag++)
492 			frag->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
493 	}
494 
495 	return 0;
496 }
497 
498 static void
mlx5e_add_skb_shared_info_frag(struct mlx5e_rq * rq,struct skb_shared_info * sinfo,struct xdp_buff * xdp,struct mlx5e_frag_page * frag_page,u32 frag_offset,u32 len)499 mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinfo,
500 			       struct xdp_buff *xdp, struct mlx5e_frag_page *frag_page,
501 			       u32 frag_offset, u32 len)
502 {
503 	netmem_ref netmem = frag_page->netmem;
504 	skb_frag_t *frag;
505 
506 	dma_addr_t addr = page_pool_get_dma_addr_netmem(netmem);
507 
508 	dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir);
509 	if (!xdp_buff_has_frags(xdp)) {
510 		/* Init on the first fragment to avoid cold cache access
511 		 * when possible.
512 		 */
513 		sinfo->nr_frags = 0;
514 		sinfo->xdp_frags_size = 0;
515 		xdp_buff_set_frags_flag(xdp);
516 	}
517 
518 	frag = &sinfo->frags[sinfo->nr_frags++];
519 	skb_frag_fill_netmem_desc(frag, netmem, frag_offset, len);
520 
521 	if (netmem_is_pfmemalloc(netmem))
522 		xdp_buff_set_frag_pfmemalloc(xdp);
523 	sinfo->xdp_frags_size += len;
524 }
525 
526 static inline void
mlx5e_add_skb_frag(struct mlx5e_rq * rq,struct sk_buff * skb,struct mlx5e_frag_page * frag_page,u32 frag_offset,u32 len,unsigned int truesize)527 mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
528 		   struct mlx5e_frag_page *frag_page,
529 		   u32 frag_offset, u32 len,
530 		   unsigned int truesize)
531 {
532 	dma_addr_t addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
533 	u8 next_frag = skb_shinfo(skb)->nr_frags;
534 	netmem_ref netmem = frag_page->netmem;
535 
536 	dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
537 				rq->buff.map_dir);
538 
539 	if (skb_can_coalesce_netmem(skb, next_frag, netmem, frag_offset)) {
540 		skb_coalesce_rx_frag(skb, next_frag - 1, len, truesize);
541 		return;
542 	}
543 
544 	frag_page->frags++;
545 	skb_add_rx_frag_netmem(skb, next_frag, netmem,
546 			       frag_offset, len, truesize);
547 }
548 
549 static inline void
mlx5e_copy_skb_header(struct mlx5e_rq * rq,struct sk_buff * skb,netmem_ref netmem,dma_addr_t addr,int offset_from,int dma_offset,u32 headlen)550 mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
551 		      netmem_ref netmem, dma_addr_t addr,
552 		      int offset_from, int dma_offset, u32 headlen)
553 {
554 	const void *from = netmem_address(netmem) + offset_from;
555 	/* Aligning len to sizeof(long) optimizes memcpy performance */
556 	unsigned int len = ALIGN(headlen, sizeof(long));
557 
558 	dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len,
559 				rq->buff.map_dir);
560 	skb_copy_to_linear_data(skb, from, len);
561 }
562 
563 static void
mlx5e_free_rx_mpwqe(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi)564 mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
565 {
566 	bool no_xdp_xmit;
567 	int i;
568 
569 	/* A common case for AF_XDP. */
570 	if (bitmap_full(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe))
571 		return;
572 
573 	no_xdp_xmit = bitmap_empty(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
574 
575 	if (rq->xsk_pool) {
576 		struct xdp_buff **xsk_buffs = wi->alloc_units.xsk_buffs;
577 
578 		/* The page is always put into the Reuse Ring, because there
579 		 * is no way to return the page to userspace when the interface
580 		 * goes down.
581 		 */
582 		for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
583 			if (no_xdp_xmit || !test_bit(i, wi->skip_release_bitmap))
584 				xsk_buff_free(xsk_buffs[i]);
585 	} else {
586 		for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) {
587 			if (no_xdp_xmit || !test_bit(i, wi->skip_release_bitmap)) {
588 				struct mlx5e_frag_page *frag_page;
589 
590 				frag_page = &wi->alloc_units.frag_pages[i];
591 				mlx5e_page_release_fragmented(rq->page_pool,
592 							      frag_page);
593 			}
594 		}
595 	}
596 }
597 
mlx5e_post_rx_mpwqe(struct mlx5e_rq * rq,u8 n)598 static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
599 {
600 	struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
601 
602 	do {
603 		u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head);
604 
605 		mlx5_wq_ll_push(wq, next_wqe_index);
606 	} while (--n);
607 
608 	/* ensure wqes are visible to device before updating doorbell record */
609 	dma_wmb();
610 
611 	mlx5_wq_ll_update_db_record(wq);
612 }
613 
mlx5e_alloc_rx_mpwqe(struct mlx5e_rq * rq,u16 ix)614 static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
615 {
616 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
617 	struct mlx5e_icosq *sq = rq->icosq;
618 	struct mlx5e_frag_page *frag_page;
619 	struct mlx5_wq_cyc *wq = &sq->wq;
620 	struct mlx5e_umr_wqe *umr_wqe;
621 	u32 offset; /* 17-bit value with MTT. */
622 	bool sync_locked;
623 	u16 pi;
624 	int err;
625 	int i;
626 
627 	sync_locked = mlx5e_icosq_sync_lock(sq);
628 	pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs);
629 	umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
630 	memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
631 
632 	frag_page = &wi->alloc_units.frag_pages[0];
633 
634 	for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, frag_page++) {
635 		dma_addr_t addr;
636 
637 		err = mlx5e_page_alloc_fragmented(rq->page_pool, frag_page);
638 		if (unlikely(err))
639 			goto err_unmap;
640 
641 		addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
642 		umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
643 			.ptag = cpu_to_be64(addr | MLX5_EN_WR),
644 		};
645 	}
646 
647 	/* Pad if needed, in case the value set to ucseg->xlt_octowords
648 	 * in mlx5e_build_umr_wqe() needed alignment.
649 	 */
650 	if (rq->mpwqe.pages_per_wqe & (MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1)) {
651 		int pad = ALIGN(rq->mpwqe.pages_per_wqe, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT) -
652 			rq->mpwqe.pages_per_wqe;
653 
654 		memset(&umr_wqe->inline_mtts[rq->mpwqe.pages_per_wqe], 0,
655 		       sizeof(*umr_wqe->inline_mtts) * pad);
656 	}
657 
658 	bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
659 	wi->consumed_strides = 0;
660 
661 	umr_wqe->hdr.ctrl.opmod_idx_opcode =
662 		cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
663 			    MLX5_OPCODE_UMR);
664 
665 	offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
666 	umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
667 
668 	sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
669 		.wqe_type   = MLX5E_ICOSQ_WQE_UMR_RX,
670 		.num_wqebbs = rq->mpwqe.umr_wqebbs,
671 		.umr.rq     = rq,
672 	};
673 
674 	sq->pc += rq->mpwqe.umr_wqebbs;
675 	mlx5e_icosq_sync_unlock(sq, sync_locked);
676 
677 	sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
678 
679 	return 0;
680 
681 err_unmap:
682 	mlx5e_icosq_sync_unlock(sq, sync_locked);
683 	while (--i >= 0) {
684 		frag_page--;
685 		mlx5e_page_release_fragmented(rq->page_pool, frag_page);
686 	}
687 
688 	bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
689 
690 	rq->stats->buff_alloc_err++;
691 
692 	return err;
693 }
694 
mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq * rq,u16 ix)695 static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
696 {
697 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
698 	/* This function is called on rq/netdev close. */
699 	mlx5e_free_rx_mpwqe(rq, wi);
700 
701 	/* Avoid a second release of the wqe pages: dealloc is called also
702 	 * for missing wqes on an already flushed RQ.
703 	 */
704 	bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
705 }
706 
mlx5e_post_rx_wqes(struct mlx5e_rq * rq)707 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
708 {
709 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
710 	int wqe_bulk, count;
711 	bool busy = false;
712 	u16 head;
713 
714 	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
715 		return false;
716 
717 	if (mlx5_wq_cyc_missing(wq) < rq->wqe.info.wqe_bulk)
718 		return false;
719 
720 	if (rq->page_pool)
721 		page_pool_nid_changed(rq->page_pool, numa_mem_id());
722 
723 	wqe_bulk = mlx5_wq_cyc_missing(wq);
724 	head = mlx5_wq_cyc_get_head(wq);
725 
726 	/* Don't allow any newly allocated WQEs to share the same page with old
727 	 * WQEs that aren't completed yet. Stop earlier.
728 	 */
729 	wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask;
730 
731 	if (!rq->xsk_pool) {
732 		count = mlx5e_refill_rx_wqes(rq, head, wqe_bulk);
733 	} else if (likely(!dma_dev_need_sync(rq->pdev))) {
734 		mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
735 		count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk);
736 	} else {
737 		mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
738 		/* If dma_need_sync is true, it's more efficient to call
739 		 * xsk_buff_alloc in a loop, rather than xsk_buff_alloc_batch,
740 		 * because the latter does the same check and returns only one
741 		 * frame.
742 		 */
743 		count = mlx5e_xsk_alloc_rx_wqes(rq, head, wqe_bulk);
744 	}
745 
746 	mlx5_wq_cyc_push_n(wq, count);
747 	if (unlikely(count != wqe_bulk)) {
748 		rq->stats->buff_alloc_err++;
749 		busy = true;
750 	}
751 
752 	/* ensure wqes are visible to device before updating doorbell record */
753 	dma_wmb();
754 
755 	mlx5_wq_cyc_update_db_record(wq);
756 
757 	return busy;
758 }
759 
mlx5e_free_icosq_descs(struct mlx5e_icosq * sq)760 void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
761 {
762 	u16 sqcc;
763 
764 	sqcc = sq->cc;
765 
766 	while (sqcc != sq->pc) {
767 		struct mlx5e_icosq_wqe_info *wi;
768 		u16 ci;
769 
770 		ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
771 		wi = &sq->db.wqe_info[ci];
772 		sqcc += wi->num_wqebbs;
773 #ifdef CONFIG_MLX5_EN_TLS
774 		switch (wi->wqe_type) {
775 		case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
776 			mlx5e_ktls_handle_ctx_completion(wi);
777 			break;
778 		case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
779 			mlx5e_ktls_handle_get_psv_completion(wi, sq);
780 			break;
781 		}
782 #endif
783 	}
784 	sq->cc = sqcc;
785 }
786 
mlx5e_poll_ico_cq(struct mlx5e_cq * cq)787 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
788 {
789 	struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
790 	struct mlx5_cqe64 *cqe;
791 	u16 sqcc;
792 	int i;
793 
794 	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
795 		return 0;
796 
797 	cqe = mlx5_cqwq_get_cqe(&cq->wq);
798 	if (likely(!cqe))
799 		return 0;
800 
801 	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
802 	 * otherwise a cq overrun may occur
803 	 */
804 	sqcc = sq->cc;
805 
806 	i = 0;
807 	do {
808 		u16 wqe_counter;
809 		bool last_wqe;
810 
811 		mlx5_cqwq_pop(&cq->wq);
812 
813 		wqe_counter = be16_to_cpu(cqe->wqe_counter);
814 
815 		do {
816 			struct mlx5e_icosq_wqe_info *wi;
817 			u16 ci;
818 
819 			last_wqe = (sqcc == wqe_counter);
820 
821 			ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
822 			wi = &sq->db.wqe_info[ci];
823 			sqcc += wi->num_wqebbs;
824 
825 			if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
826 				netdev_WARN_ONCE(cq->netdev,
827 						 "Bad OP in ICOSQ CQE: 0x%x\n",
828 						 get_cqe_opcode(cqe));
829 #ifdef CONFIG_MLX5_EN_TLS
830 				if (wi->wqe_type == MLX5E_ICOSQ_WQE_GET_PSV_TLS)
831 					mlx5e_ktls_rx_resync_async_request_cancel(wi);
832 #endif
833 				mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
834 						     (struct mlx5_err_cqe *)cqe);
835 				mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
836 				if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
837 					queue_work(cq->workqueue, &sq->recover_work);
838 				break;
839 			}
840 
841 			switch (wi->wqe_type) {
842 			case MLX5E_ICOSQ_WQE_UMR_RX:
843 				wi->umr.rq->mpwqe.umr_completed++;
844 				break;
845 			case MLX5E_ICOSQ_WQE_NOP:
846 				break;
847 #ifdef CONFIG_MLX5_EN_TLS
848 			case MLX5E_ICOSQ_WQE_UMR_TLS:
849 				break;
850 			case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
851 				mlx5e_ktls_handle_ctx_completion(wi);
852 				break;
853 			case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
854 				mlx5e_ktls_handle_get_psv_completion(wi, sq);
855 				break;
856 #endif
857 			default:
858 				netdev_WARN_ONCE(cq->netdev,
859 						 "Bad WQE type in ICOSQ WQE info: 0x%x\n",
860 						 wi->wqe_type);
861 			}
862 		} while (!last_wqe);
863 	} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
864 
865 	sq->cc = sqcc;
866 
867 	mlx5_cqwq_update_db_record(&cq->wq);
868 
869 	return i;
870 }
871 
mlx5e_reclaim_mpwqe_pages(struct mlx5e_rq * rq,int head,int reclaim)872 static void mlx5e_reclaim_mpwqe_pages(struct mlx5e_rq *rq, int head,
873 				      int reclaim)
874 {
875 	struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
876 
877 	for (int i = 0; i < reclaim; i++) {
878 		head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
879 
880 		mlx5e_dealloc_rx_mpwqe(rq, head);
881 	}
882 }
883 
mlx5e_post_rx_mpwqes(struct mlx5e_rq * rq)884 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
885 {
886 	struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
887 	u8  umr_completed = rq->mpwqe.umr_completed;
888 	struct mlx5e_icosq *sq = rq->icosq;
889 	bool reclaimed = false;
890 	int alloc_err = 0;
891 	u8  missing, i;
892 	u16 head;
893 
894 	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
895 		return false;
896 
897 	if (umr_completed) {
898 		mlx5e_post_rx_mpwqe(rq, umr_completed);
899 		rq->mpwqe.umr_in_progress -= umr_completed;
900 		rq->mpwqe.umr_completed = 0;
901 	}
902 
903 	missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
904 
905 	if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
906 		rq->stats->congst_umr++;
907 
908 	if (likely(missing < rq->mpwqe.min_wqe_bulk))
909 		return false;
910 
911 	if (rq->page_pool)
912 		page_pool_nid_changed(rq->page_pool, numa_mem_id());
913 	if (rq->hd_page_pool)
914 		page_pool_nid_changed(rq->hd_page_pool, numa_mem_id());
915 
916 	head = rq->mpwqe.actual_wq_head;
917 	i = missing;
918 	do {
919 		struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, head);
920 
921 		/* Deferred free for better page pool cache usage. */
922 		mlx5e_free_rx_mpwqe(rq, wi);
923 
924 retry:
925 		alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) :
926 					   mlx5e_alloc_rx_mpwqe(rq, head);
927 		if (unlikely(alloc_err)) {
928 			int reclaim = i - 1;
929 
930 			if (reclaimed || !reclaim)
931 				break;
932 
933 			mlx5e_reclaim_mpwqe_pages(rq, head, reclaim);
934 			reclaimed = true;
935 
936 			goto retry;
937 		}
938 		head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
939 	} while (--i);
940 
941 	rq->mpwqe.umr_last_bulk    = missing - i;
942 	if (sq->doorbell_cseg) {
943 		mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
944 		sq->doorbell_cseg = NULL;
945 	}
946 
947 	rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
948 	rq->mpwqe.actual_wq_head   = head;
949 
950 	/* If XSK Fill Ring doesn't have enough frames, report the error, so
951 	 * that one of the actions can be performed:
952 	 * 1. If need_wakeup is used, signal that the application has to kick
953 	 * the driver when it refills the Fill Ring.
954 	 * 2. Otherwise, busy poll by rescheduling the NAPI poll.
955 	 */
956 	if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool))
957 		return true;
958 
959 	return false;
960 }
961 
mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 * cqe,struct tcphdr * tcp)962 static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
963 {
964 	u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
965 	u8 tcp_ack     = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
966 			 (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
967 
968 	tcp->check                      = 0;
969 	tcp->psh                        = get_cqe_lro_tcppsh(cqe);
970 
971 	if (tcp_ack) {
972 		tcp->ack                = 1;
973 		tcp->ack_seq            = cqe->lro.ack_seq_num;
974 		tcp->window             = cqe->lro.tcp_win;
975 	}
976 }
977 
mlx5e_lro_update_hdr(struct sk_buff * skb,struct mlx5_cqe64 * cqe,u32 cqe_bcnt)978 static unsigned int mlx5e_lro_update_hdr(struct sk_buff *skb,
979 					 struct mlx5_cqe64 *cqe,
980 					 u32 cqe_bcnt)
981 {
982 	struct ethhdr	*eth = (struct ethhdr *)(skb->data);
983 	struct tcphdr	*tcp;
984 	int network_depth = 0;
985 	__wsum check;
986 	__be16 proto;
987 	u16 tot_len;
988 	void *ip_p;
989 
990 	proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
991 
992 	tot_len = cqe_bcnt - network_depth;
993 	ip_p = skb->data + network_depth;
994 
995 	if (proto == htons(ETH_P_IP)) {
996 		struct iphdr *ipv4 = ip_p;
997 
998 		tcp = ip_p + sizeof(struct iphdr);
999 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1000 
1001 		ipv4->ttl               = cqe->lro.min_ttl;
1002 		ipv4->tot_len           = cpu_to_be16(tot_len);
1003 		ipv4->check             = 0;
1004 		ipv4->check             = ip_fast_csum((unsigned char *)ipv4,
1005 						       ipv4->ihl);
1006 
1007 		mlx5e_lro_update_tcp_hdr(cqe, tcp);
1008 		check = csum_partial(tcp, tcp->doff * 4,
1009 				     csum_unfold((__force __sum16)cqe->check_sum));
1010 		/* Almost done, don't forget the pseudo header */
1011 		tcp->check = tcp_v4_check(tot_len - sizeof(struct iphdr),
1012 					  ipv4->saddr, ipv4->daddr, check);
1013 	} else {
1014 		u16 payload_len = tot_len - sizeof(struct ipv6hdr);
1015 		struct ipv6hdr *ipv6 = ip_p;
1016 
1017 		tcp = ip_p + sizeof(struct ipv6hdr);
1018 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1019 
1020 		ipv6->hop_limit         = cqe->lro.min_ttl;
1021 		ipv6->payload_len       = cpu_to_be16(payload_len);
1022 
1023 		mlx5e_lro_update_tcp_hdr(cqe, tcp);
1024 		check = csum_partial(tcp, tcp->doff * 4,
1025 				     csum_unfold((__force __sum16)cqe->check_sum));
1026 		/* Almost done, don't forget the pseudo header */
1027 		tcp->check = tcp_v6_check(payload_len, &ipv6->saddr,
1028 					  &ipv6->daddr, check);
1029 	}
1030 
1031 	return (unsigned int)((unsigned char *)tcp + tcp->doff * 4 - skb->data);
1032 }
1033 
mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq * rq,struct iphdr * ipv4)1034 static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
1035 {
1036 	int udp_off = rq->hw_gro_data->fk.control.thoff;
1037 	struct sk_buff *skb = rq->hw_gro_data->skb;
1038 	struct udphdr *uh;
1039 
1040 	uh = (struct udphdr *)(skb->data + udp_off);
1041 	uh->len = htons(skb->len - udp_off);
1042 
1043 	if (uh->check)
1044 		uh->check = ~udp_v4_check(skb->len - udp_off, ipv4->saddr,
1045 					  ipv4->daddr, 0);
1046 
1047 	skb->csum_start = (unsigned char *)uh - skb->head;
1048 	skb->csum_offset = offsetof(struct udphdr, check);
1049 
1050 	skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
1051 }
1052 
mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq * rq,struct ipv6hdr * ipv6)1053 static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6)
1054 {
1055 	int udp_off = rq->hw_gro_data->fk.control.thoff;
1056 	struct sk_buff *skb = rq->hw_gro_data->skb;
1057 	struct udphdr *uh;
1058 
1059 	uh = (struct udphdr *)(skb->data + udp_off);
1060 	uh->len = htons(skb->len - udp_off);
1061 
1062 	if (uh->check)
1063 		uh->check = ~udp_v6_check(skb->len - udp_off, &ipv6->saddr,
1064 					  &ipv6->daddr, 0);
1065 
1066 	skb->csum_start = (unsigned char *)uh - skb->head;
1067 	skb->csum_offset = offsetof(struct udphdr, check);
1068 
1069 	skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
1070 }
1071 
mlx5e_shampo_get_hd_buf_info(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,struct mlx5e_dma_info ** di,u32 * head_offset)1072 static void mlx5e_shampo_get_hd_buf_info(struct mlx5e_rq *rq,
1073 					 struct mlx5_cqe64 *cqe,
1074 					 struct mlx5e_dma_info **di,
1075 					 u32 *head_offset)
1076 {
1077 	u32 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
1078 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
1079 	u32 di_index;
1080 
1081 	di_index = header_index >> MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE;
1082 	*di = &shampo->hd_buf_pages[di_index];
1083 	*head_offset = (header_index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) *
1084 		       BIT(MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE);
1085 }
1086 
mlx5e_shampo_get_hdr(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,int len)1087 static void *mlx5e_shampo_get_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1088 				  int len)
1089 {
1090 	struct mlx5e_dma_info *di;
1091 	u32 head_offset;
1092 
1093 	mlx5e_shampo_get_hd_buf_info(rq, cqe, &di, &head_offset);
1094 
1095 	dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
1096 				      len, rq->buff.map_dir);
1097 
1098 	return page_address(di->page) + head_offset;
1099 }
1100 
mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,struct tcphdr * skb_tcp_hd)1101 static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1102 					      struct tcphdr *skb_tcp_hd)
1103 {
1104 	int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
1105 	int len = nhoff + sizeof(struct tcphdr);
1106 	struct tcphdr *last_tcp_hd;
1107 	void *last_hd_addr;
1108 
1109 	last_hd_addr = mlx5e_shampo_get_hdr(rq, cqe, len);
1110 	last_tcp_hd = (struct tcphdr *)(last_hd_addr + nhoff);
1111 
1112 	tcp_flag_word(skb_tcp_hd) |= tcp_flag_word(last_tcp_hd) & (TCP_FLAG_FIN | TCP_FLAG_PSH);
1113 }
1114 
mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq * rq,struct iphdr * ipv4,struct mlx5_cqe64 * cqe,bool match)1115 static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4,
1116 					     struct mlx5_cqe64 *cqe, bool match)
1117 {
1118 	int tcp_off = rq->hw_gro_data->fk.control.thoff;
1119 	struct sk_buff *skb = rq->hw_gro_data->skb;
1120 	struct tcphdr *tcp;
1121 
1122 	tcp = (struct tcphdr *)(skb->data + tcp_off);
1123 	if (match)
1124 		mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
1125 
1126 	tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr,
1127 				   ipv4->daddr, 0);
1128 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
1129 	if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id) {
1130 		bool encap = rq->hw_gro_data->fk.control.flags & FLOW_DIS_ENCAPSULATION;
1131 
1132 		skb_shinfo(skb)->gso_type |= encap ? SKB_GSO_TCP_FIXEDID_INNER :
1133 						     SKB_GSO_TCP_FIXEDID;
1134 	}
1135 
1136 	skb->csum_start = (unsigned char *)tcp - skb->head;
1137 	skb->csum_offset = offsetof(struct tcphdr, check);
1138 
1139 	if (tcp->cwr)
1140 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1141 }
1142 
mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq * rq,struct ipv6hdr * ipv6,struct mlx5_cqe64 * cqe,bool match)1143 static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6,
1144 					     struct mlx5_cqe64 *cqe, bool match)
1145 {
1146 	int tcp_off = rq->hw_gro_data->fk.control.thoff;
1147 	struct sk_buff *skb = rq->hw_gro_data->skb;
1148 	struct tcphdr *tcp;
1149 
1150 	tcp = (struct tcphdr *)(skb->data + tcp_off);
1151 	if (match)
1152 		mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
1153 
1154 	tcp->check = ~tcp_v6_check(skb->len - tcp_off, &ipv6->saddr,
1155 				   &ipv6->daddr, 0);
1156 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
1157 	skb->csum_start = (unsigned char *)tcp - skb->head;
1158 	skb->csum_offset = offsetof(struct tcphdr, check);
1159 
1160 	if (tcp->cwr)
1161 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1162 }
1163 
mlx5e_shampo_update_hdr(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,bool match)1164 static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
1165 {
1166 	bool is_ipv4 = (rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP));
1167 	struct sk_buff *skb = rq->hw_gro_data->skb;
1168 
1169 	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
1170 	skb->ip_summed = CHECKSUM_PARTIAL;
1171 
1172 	if (is_ipv4) {
1173 		int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct iphdr);
1174 		struct iphdr *ipv4 = (struct iphdr *)(skb->data + nhoff);
1175 		__be16 newlen = htons(skb->len - nhoff);
1176 
1177 		csum_replace2(&ipv4->check, ipv4->tot_len, newlen);
1178 		ipv4->tot_len = newlen;
1179 
1180 		if (ipv4->protocol == IPPROTO_TCP)
1181 			mlx5e_shampo_update_ipv4_tcp_hdr(rq, ipv4, cqe, match);
1182 		else
1183 			mlx5e_shampo_update_ipv4_udp_hdr(rq, ipv4);
1184 	} else {
1185 		int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct ipv6hdr);
1186 		struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + nhoff);
1187 
1188 		ipv6->payload_len = htons(skb->len - nhoff - sizeof(*ipv6));
1189 
1190 		if (ipv6->nexthdr == IPPROTO_TCP)
1191 			mlx5e_shampo_update_ipv6_tcp_hdr(rq, ipv6, cqe, match);
1192 		else
1193 			mlx5e_shampo_update_ipv6_udp_hdr(rq, ipv6);
1194 	}
1195 }
1196 
mlx5e_skb_set_hash(struct mlx5_cqe64 * cqe,struct sk_buff * skb)1197 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
1198 				      struct sk_buff *skb)
1199 {
1200 	u8 cht = cqe->rss_hash_type;
1201 	int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
1202 		 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
1203 					    PKT_HASH_TYPE_NONE;
1204 	skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
1205 }
1206 
is_last_ethertype_ip(struct sk_buff * skb,int * network_depth,__be16 * proto)1207 static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
1208 					__be16 *proto)
1209 {
1210 	*proto = ((struct ethhdr *)skb->data)->h_proto;
1211 	*proto = __vlan_get_protocol(skb, *proto, network_depth);
1212 
1213 	if (*proto == htons(ETH_P_IP))
1214 		return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
1215 
1216 	if (*proto == htons(ETH_P_IPV6))
1217 		return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
1218 
1219 	return false;
1220 }
1221 
mlx5e_enable_ecn(struct mlx5e_rq * rq,struct sk_buff * skb)1222 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
1223 {
1224 	int network_depth = 0;
1225 	__be16 proto;
1226 	void *ip;
1227 	int rc;
1228 
1229 	if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto)))
1230 		return;
1231 
1232 	ip = skb->data + network_depth;
1233 	rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) :
1234 					 IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip));
1235 
1236 	rq->stats->ecn_mark += !!rc;
1237 }
1238 
get_ip_proto(struct sk_buff * skb,int network_depth,__be16 proto)1239 static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
1240 {
1241 	void *ip_p = skb->data + network_depth;
1242 
1243 	return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
1244 					    ((struct ipv6hdr *)ip_p)->nexthdr;
1245 }
1246 
1247 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
1248 
1249 #define MAX_PADDING 8
1250 
1251 static void
tail_padding_csum_slow(struct sk_buff * skb,int offset,int len,struct mlx5e_rq_stats * stats)1252 tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
1253 		       struct mlx5e_rq_stats *stats)
1254 {
1255 	stats->csum_complete_tail_slow++;
1256 	skb->csum = csum_block_add(skb->csum,
1257 				   skb_checksum(skb, offset, len, 0),
1258 				   offset);
1259 }
1260 
1261 static void
tail_padding_csum(struct sk_buff * skb,int offset,struct mlx5e_rq_stats * stats)1262 tail_padding_csum(struct sk_buff *skb, int offset,
1263 		  struct mlx5e_rq_stats *stats)
1264 {
1265 	u8 tail_padding[MAX_PADDING];
1266 	int len = skb->len - offset;
1267 	void *tail;
1268 
1269 	if (unlikely(len > MAX_PADDING)) {
1270 		tail_padding_csum_slow(skb, offset, len, stats);
1271 		return;
1272 	}
1273 
1274 	tail = skb_header_pointer(skb, offset, len, tail_padding);
1275 	if (unlikely(!tail)) {
1276 		tail_padding_csum_slow(skb, offset, len, stats);
1277 		return;
1278 	}
1279 
1280 	stats->csum_complete_tail++;
1281 	skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
1282 }
1283 
1284 static void
mlx5e_skb_csum_fixup(struct sk_buff * skb,int network_depth,__be16 proto,struct mlx5e_rq_stats * stats)1285 mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto,
1286 		     struct mlx5e_rq_stats *stats)
1287 {
1288 	struct ipv6hdr *ip6;
1289 	struct iphdr   *ip4;
1290 	int pkt_len;
1291 
1292 	/* Fixup vlan headers, if any */
1293 	if (network_depth > ETH_HLEN)
1294 		/* CQE csum is calculated from the IP header and does
1295 		 * not cover VLAN headers (if present). This will add
1296 		 * the checksum manually.
1297 		 */
1298 		skb->csum = csum_partial(skb->data + ETH_HLEN,
1299 					 network_depth - ETH_HLEN,
1300 					 skb->csum);
1301 
1302 	/* Fixup tail padding, if any */
1303 	switch (proto) {
1304 	case htons(ETH_P_IP):
1305 		ip4 = (struct iphdr *)(skb->data + network_depth);
1306 		pkt_len = network_depth + ntohs(ip4->tot_len);
1307 		break;
1308 	case htons(ETH_P_IPV6):
1309 		ip6 = (struct ipv6hdr *)(skb->data + network_depth);
1310 		pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
1311 		break;
1312 	default:
1313 		return;
1314 	}
1315 
1316 	if (likely(pkt_len >= skb->len))
1317 		return;
1318 
1319 	tail_padding_csum(skb, pkt_len, stats);
1320 }
1321 
mlx5e_handle_csum(struct net_device * netdev,struct mlx5_cqe64 * cqe,struct mlx5e_rq * rq,struct sk_buff * skb,bool lro)1322 static inline void mlx5e_handle_csum(struct net_device *netdev,
1323 				     struct mlx5_cqe64 *cqe,
1324 				     struct mlx5e_rq *rq,
1325 				     struct sk_buff *skb,
1326 				     bool   lro)
1327 {
1328 	struct mlx5e_rq_stats *stats = rq->stats;
1329 	int network_depth = 0;
1330 	__be16 proto;
1331 
1332 	if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
1333 		goto csum_none;
1334 
1335 	if (lro) {
1336 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1337 		stats->csum_unnecessary++;
1338 		return;
1339 	}
1340 
1341 	/* True when explicitly set via priv flag, or XDP prog is loaded */
1342 	if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) ||
1343 	    get_cqe_tls_offload(cqe))
1344 		goto csum_unnecessary;
1345 
1346 	/* CQE csum doesn't cover padding octets in short ethernet
1347 	 * frames. And the pad field is appended prior to calculating
1348 	 * and appending the FCS field.
1349 	 *
1350 	 * Detecting these padded frames requires to verify and parse
1351 	 * IP headers, so we simply force all those small frames to be
1352 	 * CHECKSUM_UNNECESSARY even if they are not padded.
1353 	 */
1354 	if (short_frame(skb->len))
1355 		goto csum_unnecessary;
1356 
1357 	if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
1358 		if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
1359 			goto csum_unnecessary;
1360 
1361 		stats->csum_complete++;
1362 		skb->ip_summed = CHECKSUM_COMPLETE;
1363 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
1364 
1365 		if (unlikely(mlx5e_psp_is_rx_flow(cqe))) {
1366 			/* TBD: PSP csum complete corrections for now chose csum_unnecessary path */
1367 			goto csum_unnecessary;
1368 		}
1369 
1370 		if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
1371 			return; /* CQE csum covers all received bytes */
1372 
1373 		/* csum might need some fixups ...*/
1374 		mlx5e_skb_csum_fixup(skb, network_depth, proto, stats);
1375 		return;
1376 	}
1377 
1378 csum_unnecessary:
1379 	if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
1380 		   (cqe->hds_ip_ext & CQE_L4_OK))) {
1381 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1382 		if (cqe_is_tunneled(cqe)) {
1383 			skb->csum_level = 1;
1384 			skb->encapsulation = 1;
1385 			stats->csum_unnecessary_inner++;
1386 			return;
1387 		}
1388 		stats->csum_unnecessary++;
1389 		return;
1390 	}
1391 csum_none:
1392 	skb->ip_summed = CHECKSUM_NONE;
1393 	stats->csum_none++;
1394 }
1395 
1396 #define MLX5E_CE_BIT_MASK 0x80
1397 
mlx5e_build_rx_skb(struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct mlx5e_rq * rq,struct sk_buff * skb)1398 static inline bool mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
1399 				      u32 cqe_bcnt,
1400 				      struct mlx5e_rq *rq,
1401 				      struct sk_buff *skb)
1402 {
1403 	u8 lro_num_seg = get_cqe_lro_num_seg(cqe);
1404 	struct mlx5e_rq_stats *stats = rq->stats;
1405 	struct net_device *netdev = rq->netdev;
1406 
1407 	skb->mac_len = ETH_HLEN;
1408 
1409 	if (unlikely(get_cqe_tls_offload(cqe)))
1410 		mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
1411 
1412 	if (unlikely(mlx5e_psp_is_rx_flow(cqe))) {
1413 		if (mlx5e_psp_offload_handle_rx_skb(netdev, skb, cqe))
1414 			return true;
1415 	}
1416 
1417 	if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
1418 		mlx5e_ipsec_offload_handle_rx_skb(netdev, skb,
1419 						  be32_to_cpu(cqe->ft_metadata));
1420 
1421 	if (unlikely(mlx5e_macsec_is_rx_flow(cqe)))
1422 		mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe);
1423 
1424 	if (lro_num_seg > 1) {
1425 		unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
1426 
1427 		skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg);
1428 		skb_shinfo(skb)->gso_segs = lro_num_seg;
1429 		/* Subtract one since we already counted this as one
1430 		 * "regular" packet in mlx5e_complete_rx_cqe()
1431 		 */
1432 		stats->packets += lro_num_seg - 1;
1433 		stats->lro_packets++;
1434 		stats->lro_bytes += cqe_bcnt;
1435 	}
1436 
1437 	if (unlikely(mlx5e_rx_hw_stamp(rq->hwtstamp_config)))
1438 		skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
1439 								  rq->clock, get_cqe_ts(cqe));
1440 	skb_record_rx_queue(skb, rq->ix);
1441 
1442 	if (likely(netdev->features & NETIF_F_RXHASH))
1443 		mlx5e_skb_set_hash(cqe, skb);
1444 
1445 	if (cqe_has_vlan(cqe)) {
1446 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1447 				       be16_to_cpu(cqe->vlan_info));
1448 		stats->removed_vlan_packets++;
1449 	}
1450 
1451 	skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
1452 
1453 	mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
1454 	/* checking CE bit in cqe - MSB in ml_path field */
1455 	if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK))
1456 		mlx5e_enable_ecn(rq, skb);
1457 
1458 	skb->protocol = eth_type_trans(skb, netdev);
1459 
1460 	if (unlikely(mlx5e_skb_is_multicast(skb)))
1461 		stats->mcast_packets++;
1462 
1463 	return false;
1464 }
1465 
mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct sk_buff * skb)1466 static bool mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
1467 					 struct mlx5_cqe64 *cqe,
1468 					 u32 cqe_bcnt,
1469 					 struct sk_buff *skb)
1470 {
1471 	struct mlx5e_rq_stats *stats = rq->stats;
1472 
1473 	stats->packets++;
1474 	stats->bytes += cqe_bcnt;
1475 	if (NAPI_GRO_CB(skb)->count != 1)
1476 		return false;
1477 
1478 	if (mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb))
1479 		return true;
1480 
1481 	skb_reset_network_header(skb);
1482 	if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) {
1483 		napi_gro_receive(rq->cq.napi, skb);
1484 		rq->hw_gro_data->skb = NULL;
1485 	}
1486 	return false;
1487 }
1488 
mlx5e_complete_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct sk_buff * skb)1489 static inline bool mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
1490 					 struct mlx5_cqe64 *cqe,
1491 					 u32 cqe_bcnt,
1492 					 struct sk_buff *skb)
1493 {
1494 	struct mlx5e_rq_stats *stats = rq->stats;
1495 
1496 	stats->packets++;
1497 	stats->bytes += cqe_bcnt;
1498 	return mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
1499 }
1500 
1501 static inline
mlx5e_build_linear_skb(struct mlx5e_rq * rq,void * va,u32 frag_size,u16 headroom,u32 cqe_bcnt,u32 metasize)1502 struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
1503 				       u32 frag_size, u16 headroom,
1504 				       u32 cqe_bcnt, u32 metasize)
1505 {
1506 	struct sk_buff *skb = napi_build_skb(va, frag_size);
1507 
1508 	if (unlikely(!skb)) {
1509 		rq->stats->buff_alloc_err++;
1510 		return NULL;
1511 	}
1512 
1513 	skb_reserve(skb, headroom);
1514 	skb_put(skb, cqe_bcnt);
1515 
1516 	if (metasize)
1517 		skb_metadata_set(skb, metasize);
1518 
1519 	return skb;
1520 }
1521 
mlx5e_fill_mxbuf(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,void * va,u16 headroom,u32 frame_sz,u32 len,struct mlx5e_xdp_buff * mxbuf)1522 static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1523 			     void *va, u16 headroom, u32 frame_sz, u32 len,
1524 			     struct mlx5e_xdp_buff *mxbuf)
1525 {
1526 	xdp_init_buff(&mxbuf->xdp, frame_sz, &rq->xdp_rxq);
1527 	xdp_prepare_buff(&mxbuf->xdp, va, headroom, len, true);
1528 	mxbuf->cqe = cqe;
1529 	mxbuf->rq = rq;
1530 }
1531 
1532 static struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi,struct mlx5_cqe64 * cqe,u32 cqe_bcnt)1533 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
1534 			  struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
1535 {
1536 	struct mlx5e_frag_page *frag_page = wi->frag_page;
1537 	u16 rx_headroom = rq->buff.headroom;
1538 	struct bpf_prog *prog;
1539 	struct sk_buff *skb;
1540 	u32 metasize = 0;
1541 	void *va, *data;
1542 	dma_addr_t addr;
1543 	u32 frag_size;
1544 
1545 	va             = netmem_address(frag_page->netmem) + wi->offset;
1546 	data           = va + rx_headroom;
1547 	frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1548 
1549 	addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
1550 	dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
1551 				      frag_size, rq->buff.map_dir);
1552 	net_prefetch(data);
1553 
1554 	prog = rcu_dereference(rq->xdp_prog);
1555 	if (prog) {
1556 		struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
1557 
1558 		net_prefetchw(va); /* xdp_frame data area */
1559 		mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
1560 				 cqe_bcnt, mxbuf);
1561 		if (mlx5e_xdp_handle(rq, prog, mxbuf))
1562 			return NULL; /* page/packet was consumed by XDP */
1563 
1564 		rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
1565 		metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta;
1566 		cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
1567 	}
1568 	frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1569 	skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
1570 	if (unlikely(!skb))
1571 		return NULL;
1572 
1573 	/* queue up for recycling/reuse */
1574 	skb_mark_for_recycle(skb);
1575 	frag_page->frags++;
1576 
1577 	return skb;
1578 }
1579 
1580 static struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi,struct mlx5_cqe64 * cqe,u32 cqe_bcnt)1581 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
1582 			     struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
1583 {
1584 	struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
1585 	struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
1586 	struct mlx5e_wqe_frag_info *head_wi = wi;
1587 	u16 rx_headroom = rq->buff.headroom;
1588 	struct mlx5e_frag_page *frag_page;
1589 	struct skb_shared_info *sinfo;
1590 	u32 frag_consumed_bytes;
1591 	struct bpf_prog *prog;
1592 	u8 nr_frags_free = 0;
1593 	struct sk_buff *skb;
1594 	dma_addr_t addr;
1595 	u32 truesize;
1596 	void *va;
1597 
1598 	frag_page = wi->frag_page;
1599 
1600 	va = netmem_address(frag_page->netmem) + wi->offset;
1601 	frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
1602 
1603 	addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
1604 	dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
1605 				      rq->buff.frame0_sz, rq->buff.map_dir);
1606 	net_prefetchw(va); /* xdp_frame data area */
1607 	net_prefetch(va + rx_headroom);
1608 
1609 	mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
1610 			 frag_consumed_bytes, mxbuf);
1611 	sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
1612 	truesize = 0;
1613 
1614 	cqe_bcnt -= frag_consumed_bytes;
1615 	frag_info++;
1616 	wi++;
1617 
1618 	while (cqe_bcnt) {
1619 		frag_page = wi->frag_page;
1620 
1621 		frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
1622 
1623 		mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
1624 					       frag_page, wi->offset,
1625 					       frag_consumed_bytes);
1626 		truesize += frag_info->frag_stride;
1627 
1628 		cqe_bcnt -= frag_consumed_bytes;
1629 		frag_info++;
1630 		wi++;
1631 	}
1632 
1633 	prog = rcu_dereference(rq->xdp_prog);
1634 	if (prog) {
1635 		u8 old_nr_frags = sinfo->nr_frags;
1636 
1637 		if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
1638 			if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT,
1639 						 rq->flags)) {
1640 				struct mlx5e_wqe_frag_info *pwi;
1641 
1642 				for (pwi = head_wi; pwi < wi; pwi++)
1643 					pwi->frag_page->frags++;
1644 			}
1645 			return NULL; /* page/packet was consumed by XDP */
1646 		}
1647 
1648 		nr_frags_free = old_nr_frags - sinfo->nr_frags;
1649 		if (unlikely(nr_frags_free))
1650 			truesize -= nr_frags_free * frag_info->frag_stride;
1651 	}
1652 
1653 	skb = mlx5e_build_linear_skb(
1654 		rq, mxbuf->xdp.data_hard_start, rq->buff.frame0_sz,
1655 		mxbuf->xdp.data - mxbuf->xdp.data_hard_start,
1656 		mxbuf->xdp.data_end - mxbuf->xdp.data,
1657 		mxbuf->xdp.data - mxbuf->xdp.data_meta);
1658 	if (unlikely(!skb))
1659 		return NULL;
1660 
1661 	skb_mark_for_recycle(skb);
1662 	head_wi->frag_page->frags++;
1663 
1664 	if (xdp_buff_has_frags(&mxbuf->xdp)) {
1665 		/* sinfo->nr_frags is reset by build_skb, calculate again. */
1666 		xdp_update_skb_frags_info(skb, wi - head_wi - nr_frags_free - 1,
1667 					  sinfo->xdp_frags_size, truesize,
1668 					  xdp_buff_get_skb_flags(&mxbuf->xdp));
1669 
1670 		for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++)
1671 			pwi->frag_page->frags++;
1672 	}
1673 
1674 	return skb;
1675 }
1676 
trigger_report(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1677 static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1678 {
1679 	struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
1680 	struct mlx5e_priv *priv = rq->priv;
1681 
1682 	if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
1683 	    !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) {
1684 		mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe);
1685 		queue_work(priv->wq, &rq->recover_work);
1686 	}
1687 }
1688 
mlx5e_handle_rx_err_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1689 static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1690 {
1691 	trigger_report(rq, cqe);
1692 	rq->stats->wqe_err++;
1693 }
1694 
mlx5e_handle_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1695 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1696 {
1697 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1698 	struct mlx5e_wqe_frag_info *wi;
1699 	struct sk_buff *skb;
1700 	u32 cqe_bcnt;
1701 	u16 ci;
1702 
1703 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1704 	wi       = get_frag(rq, ci);
1705 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1706 
1707 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1708 		mlx5e_handle_rx_err_cqe(rq, cqe);
1709 		goto wq_cyc_pop;
1710 	}
1711 
1712 	skb = INDIRECT_CALL_3(rq->wqe.skb_from_cqe,
1713 			      mlx5e_skb_from_cqe_linear,
1714 			      mlx5e_skb_from_cqe_nonlinear,
1715 			      mlx5e_xsk_skb_from_cqe_linear,
1716 			      rq, wi, cqe, cqe_bcnt);
1717 	if (!skb) {
1718 		/* probably for XDP */
1719 		if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1720 			wi->frag_page->frags++;
1721 		goto wq_cyc_pop;
1722 	}
1723 
1724 	if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
1725 		goto wq_cyc_pop;
1726 
1727 	if (mlx5e_cqe_regb_chain(cqe))
1728 		if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
1729 			dev_kfree_skb_any(skb);
1730 			goto wq_cyc_pop;
1731 		}
1732 
1733 	napi_gro_receive(rq->cq.napi, skb);
1734 
1735 wq_cyc_pop:
1736 	mlx5_wq_cyc_pop(wq);
1737 }
1738 
1739 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_handle_rx_cqe_rep(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1740 static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1741 {
1742 	struct net_device *netdev = rq->netdev;
1743 	struct mlx5e_priv *priv = netdev_priv(netdev);
1744 	struct mlx5e_rep_priv *rpriv  = priv->ppriv;
1745 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1746 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1747 	struct mlx5e_wqe_frag_info *wi;
1748 	struct sk_buff *skb;
1749 	u32 cqe_bcnt;
1750 	u16 ci;
1751 
1752 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1753 	wi       = get_frag(rq, ci);
1754 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1755 
1756 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1757 		mlx5e_handle_rx_err_cqe(rq, cqe);
1758 		goto wq_cyc_pop;
1759 	}
1760 
1761 	skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1762 			      mlx5e_skb_from_cqe_linear,
1763 			      mlx5e_skb_from_cqe_nonlinear,
1764 			      rq, wi, cqe, cqe_bcnt);
1765 	if (!skb) {
1766 		/* probably for XDP */
1767 		if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1768 			wi->frag_page->frags++;
1769 		goto wq_cyc_pop;
1770 	}
1771 
1772 	if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
1773 		goto wq_cyc_pop;
1774 
1775 	if (rep->vlan && skb_vlan_tag_present(skb))
1776 		skb_vlan_pop(skb);
1777 
1778 	mlx5e_rep_tc_receive(cqe, rq, skb);
1779 
1780 wq_cyc_pop:
1781 	mlx5_wq_cyc_pop(wq);
1782 }
1783 
mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1784 static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1785 {
1786 	u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
1787 	u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
1788 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
1789 	u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
1790 	u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
1791 	u32 head_offset    = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
1792 	u32 page_idx       = wqe_offset >> rq->mpwqe.page_shift;
1793 	struct mlx5e_rx_wqe_ll *wqe;
1794 	struct mlx5_wq_ll *wq;
1795 	struct sk_buff *skb;
1796 	u16 cqe_bcnt;
1797 
1798 	wi->consumed_strides += cstrides;
1799 
1800 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1801 		mlx5e_handle_rx_err_cqe(rq, cqe);
1802 		goto mpwrq_cqe_out;
1803 	}
1804 
1805 	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
1806 		struct mlx5e_rq_stats *stats = rq->stats;
1807 
1808 		stats->mpwqe_filler_cqes++;
1809 		stats->mpwqe_filler_strides += cstrides;
1810 		goto mpwrq_cqe_out;
1811 	}
1812 
1813 	cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
1814 
1815 	skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
1816 			      mlx5e_skb_from_cqe_mpwrq_linear,
1817 			      mlx5e_skb_from_cqe_mpwrq_nonlinear,
1818 			      rq, wi, cqe, cqe_bcnt, head_offset, page_idx);
1819 	if (!skb)
1820 		goto mpwrq_cqe_out;
1821 
1822 	if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
1823 		goto mpwrq_cqe_out;
1824 
1825 	mlx5e_rep_tc_receive(cqe, rq, skb);
1826 
1827 mpwrq_cqe_out:
1828 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
1829 		return;
1830 
1831 	wq  = &rq->mpwqe.wq;
1832 	wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
1833 	mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
1834 }
1835 
1836 const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
1837 	.handle_rx_cqe       = mlx5e_handle_rx_cqe_rep,
1838 	.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
1839 };
1840 #endif
1841 
1842 static void
mlx5e_shampo_fill_skb_data(struct sk_buff * skb,struct mlx5e_rq * rq,struct mlx5e_frag_page * frag_page,u32 data_bcnt,u32 data_offset)1843 mlx5e_shampo_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
1844 			   struct mlx5e_frag_page *frag_page,
1845 			   u32 data_bcnt, u32 data_offset)
1846 {
1847 	net_prefetchw(skb->data);
1848 
1849 	do {
1850 		/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
1851 		u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
1852 		unsigned int truesize = pg_consumed_bytes;
1853 
1854 		mlx5e_add_skb_frag(rq, skb, frag_page, data_offset,
1855 				   pg_consumed_bytes, truesize);
1856 
1857 		data_bcnt -= pg_consumed_bytes;
1858 		data_offset = 0;
1859 		frag_page++;
1860 	} while (data_bcnt);
1861 }
1862 
1863 static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 cqe_bcnt,u32 head_offset,u32 page_idx)1864 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1865 				   struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
1866 				   u32 page_idx)
1867 {
1868 	struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
1869 	u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
1870 	struct mlx5e_frag_page *head_page = frag_page;
1871 	struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
1872 	u32 frag_offset    = head_offset;
1873 	u32 byte_cnt       = cqe_bcnt;
1874 	struct skb_shared_info *sinfo;
1875 	unsigned int truesize = 0;
1876 	u32 pg_consumed_bytes;
1877 	struct bpf_prog *prog;
1878 	struct sk_buff *skb;
1879 	u32 linear_frame_sz;
1880 	u16 linear_data_len;
1881 	u16 linear_hr;
1882 	void *va;
1883 
1884 	if (unlikely(cqe_bcnt > rq->hw_mtu)) {
1885 		u8 lro_num_seg = get_cqe_lro_num_seg(cqe);
1886 
1887 		if (lro_num_seg <= 1) {
1888 			rq->stats->oversize_pkts_sw_drop++;
1889 			return NULL;
1890 		}
1891 	}
1892 
1893 	prog = rcu_dereference(rq->xdp_prog);
1894 
1895 	if (prog) {
1896 		/* area for bpf_xdp_[store|load]_bytes */
1897 		net_prefetchw(netmem_address(frag_page->netmem) + frag_offset);
1898 		if (unlikely(mlx5e_page_alloc_fragmented(rq->page_pool,
1899 							 &wi->linear_page))) {
1900 			rq->stats->buff_alloc_err++;
1901 			return NULL;
1902 		}
1903 
1904 		va = netmem_address(wi->linear_page.netmem);
1905 		net_prefetchw(va); /* xdp_frame data area */
1906 		linear_hr = XDP_PACKET_HEADROOM;
1907 		linear_data_len = 0;
1908 		linear_frame_sz = MLX5_SKB_FRAG_SZ(linear_hr + MLX5E_RX_MAX_HEAD);
1909 	} else {
1910 		skb = napi_alloc_skb(rq->cq.napi,
1911 				     ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
1912 		if (unlikely(!skb)) {
1913 			rq->stats->buff_alloc_err++;
1914 			return NULL;
1915 		}
1916 		skb_mark_for_recycle(skb);
1917 		va = skb->head;
1918 		net_prefetchw(va); /* xdp_frame data area */
1919 		net_prefetchw(skb->data);
1920 
1921 		frag_offset += headlen;
1922 		byte_cnt -= headlen;
1923 		linear_hr = skb_headroom(skb);
1924 		linear_data_len = headlen;
1925 		linear_frame_sz = MLX5_SKB_FRAG_SZ(skb_end_offset(skb));
1926 		if (unlikely(frag_offset >= PAGE_SIZE)) {
1927 			frag_page++;
1928 			frag_offset -= PAGE_SIZE;
1929 		}
1930 	}
1931 
1932 	mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz,
1933 			 linear_data_len, mxbuf);
1934 
1935 	sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
1936 
1937 	while (byte_cnt) {
1938 		/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
1939 		pg_consumed_bytes =
1940 			min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
1941 
1942 		if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
1943 			truesize += pg_consumed_bytes;
1944 		else
1945 			truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
1946 
1947 		mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
1948 					       frag_page, frag_offset,
1949 					       pg_consumed_bytes);
1950 		byte_cnt -= pg_consumed_bytes;
1951 		frag_offset = 0;
1952 		frag_page++;
1953 	}
1954 
1955 	if (prog) {
1956 		u8 nr_frags_free, old_nr_frags = sinfo->nr_frags;
1957 		u8 new_nr_frags;
1958 		u32 len;
1959 
1960 		if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
1961 			if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1962 				struct mlx5e_frag_page *pfp;
1963 
1964 				for (pfp = head_page; pfp < frag_page; pfp++)
1965 					pfp->frags++;
1966 
1967 				wi->linear_page.frags++;
1968 			}
1969 			mlx5e_page_release_fragmented(rq->page_pool,
1970 						      &wi->linear_page);
1971 			return NULL; /* page/packet was consumed by XDP */
1972 		}
1973 
1974 		new_nr_frags = sinfo->nr_frags;
1975 		nr_frags_free = old_nr_frags - new_nr_frags;
1976 		if (unlikely(nr_frags_free))
1977 			truesize -= (nr_frags_free - 1) * PAGE_SIZE +
1978 				ALIGN(pg_consumed_bytes,
1979 				      BIT(rq->mpwqe.log_stride_sz));
1980 
1981 		len = mxbuf->xdp.data_end - mxbuf->xdp.data;
1982 
1983 		skb = mlx5e_build_linear_skb(
1984 			rq, mxbuf->xdp.data_hard_start, linear_frame_sz,
1985 			mxbuf->xdp.data - mxbuf->xdp.data_hard_start, len,
1986 			mxbuf->xdp.data - mxbuf->xdp.data_meta);
1987 		if (unlikely(!skb)) {
1988 			mlx5e_page_release_fragmented(rq->page_pool,
1989 						      &wi->linear_page);
1990 			return NULL;
1991 		}
1992 
1993 		skb_mark_for_recycle(skb);
1994 		wi->linear_page.frags++;
1995 		mlx5e_page_release_fragmented(rq->page_pool, &wi->linear_page);
1996 
1997 		if (xdp_buff_has_frags(&mxbuf->xdp)) {
1998 			struct mlx5e_frag_page *pagep;
1999 
2000 			/* sinfo->nr_frags is reset by build_skb, calculate again. */
2001 			xdp_update_skb_frags_info(skb, new_nr_frags,
2002 						  sinfo->xdp_frags_size,
2003 						  truesize,
2004 						  xdp_buff_get_skb_flags(&mxbuf->xdp));
2005 
2006 			pagep = head_page;
2007 			do
2008 				pagep->frags++;
2009 			while (++pagep < frag_page);
2010 
2011 			headlen = min_t(u16, MLX5E_RX_MAX_HEAD - len,
2012 					skb->data_len);
2013 			__pskb_pull_tail(skb, headlen);
2014 		}
2015 	} else {
2016 		dma_addr_t addr;
2017 
2018 		if (xdp_buff_has_frags(&mxbuf->xdp)) {
2019 			struct mlx5e_frag_page *pagep;
2020 
2021 			xdp_update_skb_frags_info(skb, sinfo->nr_frags,
2022 						  sinfo->xdp_frags_size,
2023 						  truesize,
2024 						  xdp_buff_get_skb_flags(&mxbuf->xdp));
2025 
2026 			pagep = frag_page - sinfo->nr_frags;
2027 			do
2028 				pagep->frags++;
2029 			while (++pagep < frag_page);
2030 		}
2031 		/* copy header */
2032 		addr = page_pool_get_dma_addr_netmem(head_page->netmem);
2033 		mlx5e_copy_skb_header(rq, skb, head_page->netmem, addr,
2034 				      head_offset, head_offset, headlen);
2035 		/* skb linear part was allocated with headlen and aligned to long */
2036 		skb->tail += headlen;
2037 		skb->len  += headlen;
2038 	}
2039 
2040 	return skb;
2041 }
2042 
2043 static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 cqe_bcnt,u32 head_offset,u32 page_idx)2044 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
2045 				struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
2046 				u32 page_idx)
2047 {
2048 	struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
2049 	u16 rx_headroom = rq->buff.headroom;
2050 	struct bpf_prog *prog;
2051 	struct sk_buff *skb;
2052 	u32 metasize = 0;
2053 	void *va, *data;
2054 	dma_addr_t addr;
2055 	u32 frag_size;
2056 
2057 	/* Check packet size. Note LRO doesn't use linear SKB */
2058 	if (unlikely(cqe_bcnt > rq->hw_mtu)) {
2059 		rq->stats->oversize_pkts_sw_drop++;
2060 		return NULL;
2061 	}
2062 
2063 	va             = netmem_address(frag_page->netmem) + head_offset;
2064 	data           = va + rx_headroom;
2065 	frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
2066 
2067 	addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
2068 	dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
2069 				      frag_size, rq->buff.map_dir);
2070 	net_prefetch(data);
2071 
2072 	prog = rcu_dereference(rq->xdp_prog);
2073 	if (prog) {
2074 		struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
2075 
2076 		net_prefetchw(va); /* xdp_frame data area */
2077 		mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
2078 				 cqe_bcnt, mxbuf);
2079 		if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
2080 			if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
2081 				frag_page->frags++;
2082 			return NULL; /* page/packet was consumed by XDP */
2083 		}
2084 
2085 		rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
2086 		metasize =  mxbuf->xdp.data -  mxbuf->xdp.data_meta;
2087 		cqe_bcnt =  mxbuf->xdp.data_end -  mxbuf->xdp.data;
2088 	}
2089 	frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
2090 	skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
2091 	if (unlikely(!skb))
2092 		return NULL;
2093 
2094 	/* queue up for recycling/reuse */
2095 	skb_mark_for_recycle(skb);
2096 	frag_page->frags++;
2097 
2098 	return skb;
2099 }
2100 
2101 static struct sk_buff *
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 header_index)2102 mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
2103 			  struct mlx5_cqe64 *cqe, u16 header_index)
2104 {
2105 	u16 head_size = cqe->shampo.header_size;
2106 	struct mlx5e_dma_info *di;
2107 	struct sk_buff *skb;
2108 	u32 head_offset;
2109 	int len;
2110 
2111 	len = ALIGN(head_size, sizeof(long));
2112 	skb = napi_alloc_skb(rq->cq.napi, len);
2113 	if (unlikely(!skb)) {
2114 		rq->stats->buff_alloc_err++;
2115 		return NULL;
2116 	}
2117 
2118 	net_prefetchw(skb->data);
2119 
2120 	mlx5e_shampo_get_hd_buf_info(rq, cqe, &di, &head_offset);
2121 	mlx5e_copy_skb_header(rq, skb, page_to_netmem(di->page), di->addr,
2122 			      head_offset, head_offset, len);
2123 	__skb_put(skb, head_size);
2124 
2125 	/* queue up for recycling/reuse */
2126 	skb_mark_for_recycle(skb);
2127 
2128 	return skb;
2129 }
2130 
2131 static void
mlx5e_shampo_align_fragment(struct sk_buff * skb,u8 log_stride_sz)2132 mlx5e_shampo_align_fragment(struct sk_buff *skb, u8 log_stride_sz)
2133 {
2134 	skb_frag_t *last_frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
2135 	unsigned int frag_size = skb_frag_size(last_frag);
2136 	unsigned int frag_truesize;
2137 
2138 	frag_truesize = ALIGN(frag_size, BIT(log_stride_sz));
2139 	skb->truesize += frag_truesize - frag_size;
2140 }
2141 
2142 static void
mlx5e_shampo_flush_skb(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,bool match)2143 mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
2144 {
2145 	struct sk_buff *skb = rq->hw_gro_data->skb;
2146 	struct mlx5e_rq_stats *stats = rq->stats;
2147 	u16 gro_count = NAPI_GRO_CB(skb)->count;
2148 
2149 	if (likely(skb_shinfo(skb)->nr_frags))
2150 		mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz);
2151 	if (gro_count > 1) {
2152 		stats->gro_skbs++;
2153 		stats->gro_packets += gro_count;
2154 		stats->gro_bytes += skb->data_len + skb_headlen(skb) * gro_count;
2155 
2156 		mlx5e_shampo_update_hdr(rq, cqe, match);
2157 	} else {
2158 		skb_shinfo(skb)->gso_size = 0;
2159 	}
2160 	napi_gro_receive(rq->cq.napi, skb);
2161 	rq->hw_gro_data->skb = NULL;
2162 }
2163 
2164 static bool
mlx5e_hw_gro_skb_has_enough_space(struct sk_buff * skb,u16 data_bcnt)2165 mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
2166 {
2167 	int nr_frags = skb_shinfo(skb)->nr_frags;
2168 
2169 	if (PAGE_SIZE >= GRO_LEGACY_MAX_SIZE)
2170 		return skb->len + data_bcnt <= GRO_LEGACY_MAX_SIZE;
2171 	else
2172 		return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
2173 }
2174 
mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2175 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2176 {
2177 	u16 data_bcnt		= mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size;
2178 	u16 header_index	= mlx5e_shampo_get_cqe_header_index(rq, cqe);
2179 	u32 wqe_offset		= be32_to_cpu(cqe->shampo.data_offset);
2180 	u16 cstrides		= mpwrq_get_cqe_consumed_strides(cqe);
2181 	u32 data_offset		= wqe_offset & (PAGE_SIZE - 1);
2182 	u32 cqe_bcnt		= mpwrq_get_cqe_byte_cnt(cqe);
2183 	u16 wqe_id		= be16_to_cpu(cqe->wqe_id);
2184 	u32 page_idx		= wqe_offset >> PAGE_SHIFT;
2185 	u16 head_size		= cqe->shampo.header_size;
2186 	struct sk_buff **skb	= &rq->hw_gro_data->skb;
2187 	bool flush		= cqe->shampo.flush;
2188 	bool match		= cqe->shampo.match;
2189 	struct mlx5e_rq_stats *stats = rq->stats;
2190 	struct mlx5e_rx_wqe_ll *wqe;
2191 	struct mlx5e_mpw_info *wi;
2192 	struct mlx5_wq_ll *wq;
2193 
2194 	wi = mlx5e_get_mpw_info(rq, wqe_id);
2195 	wi->consumed_strides += cstrides;
2196 
2197 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2198 		mlx5e_handle_rx_err_cqe(rq, cqe);
2199 		goto mpwrq_cqe_out;
2200 	}
2201 
2202 	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
2203 		stats->mpwqe_filler_cqes++;
2204 		stats->mpwqe_filler_strides += cstrides;
2205 		goto mpwrq_cqe_out;
2206 	}
2207 
2208 	if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) {
2209 		match = false;
2210 		mlx5e_shampo_flush_skb(rq, cqe, match);
2211 	}
2212 
2213 	if (!*skb) {
2214 		if (likely(head_size)) {
2215 			*skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
2216 		} else {
2217 			struct mlx5e_frag_page *frag_page;
2218 
2219 			frag_page = &wi->alloc_units.frag_pages[page_idx];
2220 			/* Drop packets with header in unreadable data area to
2221 			 * prevent the kernel from touching it.
2222 			 */
2223 			if (unlikely(netmem_is_net_iov(frag_page->netmem)))
2224 				goto mpwrq_cqe_out;
2225 			*skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe,
2226 								  cqe_bcnt,
2227 								  data_offset,
2228 								  page_idx);
2229 		}
2230 
2231 		if (unlikely(!*skb))
2232 			goto mpwrq_cqe_out;
2233 
2234 		NAPI_GRO_CB(*skb)->count = 1;
2235 		skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size;
2236 	} else {
2237 		NAPI_GRO_CB(*skb)->count++;
2238 
2239 		if (NAPI_GRO_CB(*skb)->count == 2 &&
2240 		    rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) {
2241 			int len = ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
2242 			int nhoff = len - sizeof(struct iphdr);
2243 			void *last_hd_addr;
2244 			struct iphdr *iph;
2245 
2246 			last_hd_addr = mlx5e_shampo_get_hdr(rq, cqe, len);
2247 			iph = (struct iphdr *)(last_hd_addr + nhoff);
2248 			rq->hw_gro_data->second_ip_id = ntohs(iph->id);
2249 		}
2250 	}
2251 
2252 	if (likely(head_size)) {
2253 		if (data_bcnt) {
2254 			struct mlx5e_frag_page *frag_page;
2255 
2256 			frag_page = &wi->alloc_units.frag_pages[page_idx];
2257 			mlx5e_shampo_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
2258 		} else {
2259 			stats->hds_nodata_packets++;
2260 			stats->hds_nodata_bytes += head_size;
2261 		}
2262 	} else {
2263 		stats->hds_nosplit_packets++;
2264 		stats->hds_nosplit_bytes += data_bcnt;
2265 	}
2266 
2267 	if (mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb)) {
2268 		*skb = NULL;
2269 		goto mpwrq_cqe_out;
2270 	}
2271 	if (flush && rq->hw_gro_data->skb)
2272 		mlx5e_shampo_flush_skb(rq, cqe, match);
2273 mpwrq_cqe_out:
2274 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
2275 		return;
2276 
2277 	if (unlikely(!cstrides))
2278 		return;
2279 
2280 	wq  = &rq->mpwqe.wq;
2281 	wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
2282 	mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
2283 }
2284 
mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2285 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2286 {
2287 	u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
2288 	u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
2289 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
2290 	u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
2291 	u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
2292 	u32 head_offset    = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
2293 	u32 page_idx       = wqe_offset >> rq->mpwqe.page_shift;
2294 	struct mlx5e_rx_wqe_ll *wqe;
2295 	struct mlx5_wq_ll *wq;
2296 	struct sk_buff *skb;
2297 	u16 cqe_bcnt;
2298 
2299 	wi->consumed_strides += cstrides;
2300 
2301 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2302 		mlx5e_handle_rx_err_cqe(rq, cqe);
2303 		goto mpwrq_cqe_out;
2304 	}
2305 
2306 	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
2307 		struct mlx5e_rq_stats *stats = rq->stats;
2308 
2309 		stats->mpwqe_filler_cqes++;
2310 		stats->mpwqe_filler_strides += cstrides;
2311 		goto mpwrq_cqe_out;
2312 	}
2313 
2314 	cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
2315 
2316 	skb = INDIRECT_CALL_3(rq->mpwqe.skb_from_cqe_mpwrq,
2317 			      mlx5e_skb_from_cqe_mpwrq_linear,
2318 			      mlx5e_skb_from_cqe_mpwrq_nonlinear,
2319 			      mlx5e_xsk_skb_from_cqe_mpwrq_linear,
2320 			      rq, wi, cqe, cqe_bcnt, head_offset,
2321 			      page_idx);
2322 	if (!skb)
2323 		goto mpwrq_cqe_out;
2324 
2325 	if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
2326 		goto mpwrq_cqe_out;
2327 
2328 	if (mlx5e_cqe_regb_chain(cqe))
2329 		if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
2330 			dev_kfree_skb_any(skb);
2331 			goto mpwrq_cqe_out;
2332 		}
2333 
2334 	napi_gro_receive(rq->cq.napi, skb);
2335 
2336 mpwrq_cqe_out:
2337 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
2338 		return;
2339 
2340 	wq  = &rq->mpwqe.wq;
2341 	wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
2342 	mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
2343 }
2344 
mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq * rq,struct mlx5_cqwq * cqwq,int budget_rem)2345 static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq,
2346 						 struct mlx5_cqwq *cqwq,
2347 						 int budget_rem)
2348 {
2349 	struct mlx5_cqe64 *cqe, *title_cqe = NULL;
2350 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
2351 	int work_done = 0;
2352 
2353 	cqe = mlx5_cqwq_get_cqe_enhanced_comp(cqwq);
2354 	if (!cqe)
2355 		return work_done;
2356 
2357 	if (cqd->last_cqe_title &&
2358 	    (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED)) {
2359 		rq->stats->cqe_compress_blks++;
2360 		cqd->last_cqe_title = false;
2361 	}
2362 
2363 	do {
2364 		if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
2365 			if (title_cqe) {
2366 				mlx5e_read_enhanced_title_slot(rq, title_cqe);
2367 				title_cqe = NULL;
2368 				rq->stats->cqe_compress_blks++;
2369 			}
2370 			work_done +=
2371 				mlx5e_decompress_enhanced_cqe(rq, cqwq, cqe,
2372 							      budget_rem - work_done);
2373 			continue;
2374 		}
2375 		title_cqe = cqe;
2376 		mlx5_cqwq_pop(cqwq);
2377 
2378 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
2379 				mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
2380 				rq, cqe);
2381 		work_done++;
2382 	} while (work_done < budget_rem &&
2383 		 (cqe = mlx5_cqwq_get_cqe_enhanced_comp(cqwq)));
2384 
2385 	/* last cqe might be title on next poll bulk */
2386 	if (title_cqe) {
2387 		mlx5e_read_enhanced_title_slot(rq, title_cqe);
2388 		cqd->last_cqe_title = true;
2389 	}
2390 
2391 	return work_done;
2392 }
2393 
mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq * rq,struct mlx5_cqwq * cqwq,int budget_rem)2394 static int mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq *rq,
2395 					      struct mlx5_cqwq *cqwq,
2396 					      int budget_rem)
2397 {
2398 	struct mlx5_cqe64 *cqe;
2399 	int work_done = 0;
2400 
2401 	if (rq->cqd.left)
2402 		work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget_rem);
2403 
2404 	while (work_done < budget_rem && (cqe = mlx5_cqwq_get_cqe(cqwq))) {
2405 		if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
2406 			work_done +=
2407 				mlx5e_decompress_cqes_start(rq, cqwq,
2408 							    budget_rem - work_done);
2409 			continue;
2410 		}
2411 
2412 		mlx5_cqwq_pop(cqwq);
2413 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
2414 				mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
2415 				rq, cqe);
2416 		work_done++;
2417 	}
2418 
2419 	return work_done;
2420 }
2421 
mlx5e_poll_rx_cq(struct mlx5e_cq * cq,int budget)2422 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
2423 {
2424 	struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
2425 	struct mlx5_cqwq *cqwq = &cq->wq;
2426 	int work_done;
2427 
2428 	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
2429 		return 0;
2430 
2431 	if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state))
2432 		work_done = mlx5e_rx_cq_process_enhanced_cqe_comp(rq, cqwq,
2433 								  budget);
2434 	else
2435 		work_done = mlx5e_rx_cq_process_basic_cqe_comp(rq, cqwq,
2436 							       budget);
2437 
2438 	if (work_done == 0)
2439 		return 0;
2440 
2441 	if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb)
2442 		mlx5e_shampo_flush_skb(rq, NULL, false);
2443 
2444 	if (rcu_access_pointer(rq->xdp_prog))
2445 		mlx5e_xdp_rx_poll_complete(rq);
2446 
2447 	mlx5_cqwq_update_db_record(cqwq);
2448 
2449 	/* ensure cq space is freed before enabling more cqes */
2450 	wmb();
2451 
2452 	return work_done;
2453 }
2454 
2455 #ifdef CONFIG_MLX5_CORE_IPOIB
2456 
2457 #define MLX5_IB_GRH_SGID_OFFSET 8
2458 #define MLX5_IB_GRH_DGID_OFFSET 24
2459 #define MLX5_GID_SIZE           16
2460 
mlx5i_complete_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct sk_buff * skb)2461 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
2462 					 struct mlx5_cqe64 *cqe,
2463 					 u32 cqe_bcnt,
2464 					 struct sk_buff *skb)
2465 {
2466 	struct mlx5e_rq_stats *stats;
2467 	struct net_device *netdev;
2468 	struct mlx5e_priv *priv;
2469 	char *pseudo_header;
2470 	u32 flags_rqpn;
2471 	u32 qpn;
2472 	u8 *dgid;
2473 	u8 g;
2474 
2475 	qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff;
2476 	netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
2477 
2478 	/* No mapping present, cannot process SKB. This might happen if a child
2479 	 * interface is going down while having unprocessed CQEs on parent RQ
2480 	 */
2481 	if (unlikely(!netdev)) {
2482 		/* TODO: add drop counters support */
2483 		skb->dev = NULL;
2484 		pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn);
2485 		return;
2486 	}
2487 
2488 	priv = mlx5i_epriv(netdev);
2489 	stats = &priv->channel_stats[rq->ix]->rq;
2490 
2491 	flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
2492 	g = (flags_rqpn >> 28) & 3;
2493 	dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
2494 	if ((!g) || dgid[0] != 0xff)
2495 		skb->pkt_type = PACKET_HOST;
2496 	else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0)
2497 		skb->pkt_type = PACKET_BROADCAST;
2498 	else
2499 		skb->pkt_type = PACKET_MULTICAST;
2500 
2501 	/* Drop packets that this interface sent, ie multicast packets
2502 	 * that the HCA has replicated.
2503 	 */
2504 	if (g && (qpn == (flags_rqpn & 0xffffff)) &&
2505 	    (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET,
2506 		    MLX5_GID_SIZE) == 0)) {
2507 		skb->dev = NULL;
2508 		return;
2509 	}
2510 
2511 	skb_pull(skb, MLX5_IB_GRH_BYTES);
2512 
2513 	skb->protocol = *((__be16 *)(skb->data));
2514 
2515 	if (netdev->features & NETIF_F_RXCSUM) {
2516 		skb->ip_summed = CHECKSUM_COMPLETE;
2517 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
2518 		stats->csum_complete++;
2519 	} else {
2520 		skb->ip_summed = CHECKSUM_NONE;
2521 		stats->csum_none++;
2522 	}
2523 
2524 	if (unlikely(mlx5e_rx_hw_stamp(&priv->hwtstamp_config)))
2525 		skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
2526 								  rq->clock, get_cqe_ts(cqe));
2527 	skb_record_rx_queue(skb, rq->ix);
2528 
2529 	if (likely(netdev->features & NETIF_F_RXHASH))
2530 		mlx5e_skb_set_hash(cqe, skb);
2531 
2532 	/* 20 bytes of ipoib header and 4 for encap existing */
2533 	pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
2534 	memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
2535 	skb_reset_mac_header(skb);
2536 	skb_pull(skb, MLX5_IPOIB_HARD_LEN);
2537 
2538 	skb->dev = netdev;
2539 
2540 	stats->packets++;
2541 	stats->bytes += cqe_bcnt;
2542 }
2543 
mlx5i_handle_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2544 static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2545 {
2546 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
2547 	struct mlx5e_wqe_frag_info *wi;
2548 	struct sk_buff *skb;
2549 	u32 cqe_bcnt;
2550 	u16 ci;
2551 
2552 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
2553 	wi       = get_frag(rq, ci);
2554 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
2555 
2556 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2557 		rq->stats->wqe_err++;
2558 		goto wq_cyc_pop;
2559 	}
2560 
2561 	skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
2562 			      mlx5e_skb_from_cqe_linear,
2563 			      mlx5e_skb_from_cqe_nonlinear,
2564 			      rq, wi, cqe, cqe_bcnt);
2565 	if (!skb)
2566 		goto wq_cyc_pop;
2567 
2568 	mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
2569 	if (unlikely(!skb->dev)) {
2570 		dev_kfree_skb_any(skb);
2571 		goto wq_cyc_pop;
2572 	}
2573 	napi_gro_receive(rq->cq.napi, skb);
2574 
2575 wq_cyc_pop:
2576 	mlx5_wq_cyc_pop(wq);
2577 }
2578 
2579 const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
2580 	.handle_rx_cqe       = mlx5i_handle_rx_cqe,
2581 	.handle_rx_cqe_mpwqe = NULL, /* Not supported */
2582 };
2583 #endif /* CONFIG_MLX5_CORE_IPOIB */
2584 
mlx5e_rq_set_handlers(struct mlx5e_rq * rq,struct mlx5e_params * params,bool xsk)2585 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
2586 {
2587 	struct net_device *netdev = rq->netdev;
2588 	struct mlx5_core_dev *mdev = rq->mdev;
2589 	struct mlx5e_priv *priv = rq->priv;
2590 
2591 	switch (rq->wq_type) {
2592 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2593 		rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
2594 			mlx5e_xsk_skb_from_cqe_mpwrq_linear :
2595 			mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
2596 				mlx5e_skb_from_cqe_mpwrq_linear :
2597 				mlx5e_skb_from_cqe_mpwrq_nonlinear;
2598 		rq->post_wqes = mlx5e_post_rx_mpwqes;
2599 		rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
2600 
2601 		if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
2602 			rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo;
2603 			if (!rq->handle_rx_cqe) {
2604 				netdev_err(netdev, "RX handler of SHAMPO MPWQE RQ is not set\n");
2605 				return -EINVAL;
2606 			}
2607 		} else {
2608 			rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
2609 			if (!rq->handle_rx_cqe) {
2610 				netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
2611 				return -EINVAL;
2612 			}
2613 		}
2614 
2615 		break;
2616 	default: /* MLX5_WQ_TYPE_CYCLIC */
2617 		rq->wqe.skb_from_cqe = xsk ?
2618 			mlx5e_xsk_skb_from_cqe_linear :
2619 			mlx5e_rx_is_linear_skb(mdev, params, NULL) ?
2620 				mlx5e_skb_from_cqe_linear :
2621 				mlx5e_skb_from_cqe_nonlinear;
2622 		rq->post_wqes = mlx5e_post_rx_wqes;
2623 		rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
2624 		rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
2625 		if (!rq->handle_rx_cqe) {
2626 			netdev_err(netdev, "RX handler of RQ is not set\n");
2627 			return -EINVAL;
2628 		}
2629 	}
2630 
2631 	return 0;
2632 }
2633 
mlx5e_trap_handle_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2634 static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2635 {
2636 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
2637 	struct mlx5e_wqe_frag_info *wi;
2638 	struct sk_buff *skb;
2639 	u32 cqe_bcnt;
2640 	u16 trap_id;
2641 	u16 ci;
2642 
2643 	trap_id  = get_cqe_flow_tag(cqe);
2644 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
2645 	wi       = get_frag(rq, ci);
2646 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
2647 
2648 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2649 		rq->stats->wqe_err++;
2650 		goto wq_cyc_pop;
2651 	}
2652 
2653 	skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe, cqe_bcnt);
2654 	if (!skb)
2655 		goto wq_cyc_pop;
2656 
2657 	if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
2658 		goto wq_cyc_pop;
2659 	skb_push(skb, ETH_HLEN);
2660 
2661 	mlx5_devlink_trap_report(rq->mdev, trap_id, skb,
2662 				 rq->netdev->devlink_port);
2663 	dev_kfree_skb_any(skb);
2664 
2665 wq_cyc_pop:
2666 	mlx5_wq_cyc_pop(wq);
2667 }
2668 
mlx5e_rq_set_trap_handlers(struct mlx5e_rq * rq,struct mlx5e_params * params)2669 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
2670 {
2671 	rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(rq->mdev, params, NULL) ?
2672 			       mlx5e_skb_from_cqe_linear :
2673 			       mlx5e_skb_from_cqe_nonlinear;
2674 	rq->post_wqes = mlx5e_post_rx_wqes;
2675 	rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
2676 	rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe;
2677 }
2678