1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #ifndef __MLX5_EN_TXRX_H___
5 #define __MLX5_EN_TXRX_H___
6
7 #include "en.h"
8 #include <linux/indirect_call_wrapper.h>
9 #include <net/ip6_checksum.h>
10 #include <net/tcp.h>
11
12 #define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
13
14 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
15
16 /* IPSEC inline data includes:
17 * 1. ESP trailer: up to 255 bytes of padding, 1 byte for pad length, 1 byte for
18 * next header.
19 * 2. ESP authentication data: 16 bytes for ICV.
20 */
21 #define MLX5E_MAX_TX_IPSEC_DS DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + \
22 255 + 1 + 1 + 16, MLX5_SEND_WQE_DS)
23
24 /* 366 should be big enough to cover all L2, L3 and L4 headers with possible
25 * encapsulations.
26 */
27 #define MLX5E_MAX_TX_INLINE_DS DIV_ROUND_UP(366 - INL_HDR_START_SZ + VLAN_HLEN, \
28 MLX5_SEND_WQE_DS)
29
30 /* Sync the calculation with mlx5e_sq_calc_wqe_attr. */
31 #define MLX5E_MAX_TX_WQEBBS DIV_ROUND_UP(MLX5E_TX_WQE_EMPTY_DS_COUNT + \
32 MLX5E_MAX_TX_INLINE_DS + \
33 MLX5E_MAX_TX_IPSEC_DS + \
34 MAX_SKB_FRAGS + 1, \
35 MLX5_SEND_WQEBB_NUM_DS)
36
37 #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
38
39 #define MLX5E_KSM_UMR_WQE_SZ(sgl_len)\
40 (sizeof(struct mlx5e_umr_wqe) +\
41 (sizeof(struct mlx5_ksm) * (sgl_len)))
42
43 #define MLX5E_KSM_UMR_WQEBBS(ksm_entries) \
44 (DIV_ROUND_UP(MLX5E_KSM_UMR_WQE_SZ(ksm_entries), MLX5_SEND_WQE_BB))
45
46 #define MLX5E_KSM_UMR_DS_CNT(ksm_entries)\
47 (DIV_ROUND_UP(MLX5E_KSM_UMR_WQE_SZ(ksm_entries), MLX5_SEND_WQE_DS))
48
49 #define MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size)\
50 (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_ksm))
51
52 #define MLX5E_KSM_ENTRIES_PER_WQE(wqe_size)\
53 ALIGN_DOWN(MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT)
54
55 #define MLX5E_MAX_KSM_PER_WQE(mdev) \
56 MLX5E_KSM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
57
58 static inline
mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func,struct mlx5_clock * clock,u64 cqe_ts)59 ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts)
60 {
61 return INDIRECT_CALL_2(func, mlx5_real_time_cyc2time, mlx5_timecounter_cyc2time,
62 clock, cqe_ts);
63 }
64
65 enum mlx5e_icosq_wqe_type {
66 MLX5E_ICOSQ_WQE_NOP,
67 MLX5E_ICOSQ_WQE_UMR_RX,
68 MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
69 #ifdef CONFIG_MLX5_EN_TLS
70 MLX5E_ICOSQ_WQE_UMR_TLS,
71 MLX5E_ICOSQ_WQE_SET_PSV_TLS,
72 MLX5E_ICOSQ_WQE_GET_PSV_TLS,
73 #endif
74 };
75
76 /* General */
mlx5e_skb_is_multicast(struct sk_buff * skb)77 static inline bool mlx5e_skb_is_multicast(struct sk_buff *skb)
78 {
79 return skb->pkt_type == PACKET_MULTICAST || skb->pkt_type == PACKET_BROADCAST;
80 }
81
82 void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
83 void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
84 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
85 int mlx5e_napi_poll(struct napi_struct *napi, int budget);
86 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
87
88 /* RX */
89 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
90 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
91 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
92 void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
93 void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq);
94
mlx5e_rx_hw_stamp(struct hwtstamp_config * config)95 static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
96 {
97 return config->rx_filter == HWTSTAMP_FILTER_ALL;
98 }
99
100 /* TX */
101 struct mlx5e_xmit_data {
102 dma_addr_t dma_addr;
103 void *data;
104 u32 len : 31;
105 u32 has_frags : 1;
106 };
107
108 struct mlx5e_xmit_data_frags {
109 struct mlx5e_xmit_data xd;
110 struct skb_shared_info *sinfo;
111 dma_addr_t *dma_arr;
112 };
113
114 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
115 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
116 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
117
118 static inline bool
mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo * fifo)119 mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo *fifo)
120 {
121 return (u16)(*fifo->pc - *fifo->cc) <= fifo->mask;
122 }
123
124 static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc * wq,u16 cc,u16 pc,u16 n)125 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
126 {
127 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
128 }
129
mlx5e_fetch_wqe(struct mlx5_wq_cyc * wq,u16 pi,size_t wqe_size)130 static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size)
131 {
132 void *wqe;
133
134 wqe = mlx5_wq_cyc_get_wqe(wq, pi);
135 memset(wqe, 0, wqe_size);
136
137 return wqe;
138 }
139
140 #define MLX5E_TX_FETCH_WQE(sq, pi) \
141 ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe)))
142
143 static inline struct mlx5e_tx_wqe *
mlx5e_post_nop(struct mlx5_wq_cyc * wq,u32 sqn,u16 * pc)144 mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
145 {
146 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
147 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
148 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
149
150 memset(cseg, 0, sizeof(*cseg));
151
152 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
153 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
154
155 (*pc)++;
156
157 return wqe;
158 }
159
160 static inline struct mlx5e_tx_wqe *
mlx5e_post_nop_fence(struct mlx5_wq_cyc * wq,u32 sqn,u16 * pc)161 mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
162 {
163 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
164 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
165 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
166
167 memset(cseg, 0, sizeof(*cseg));
168
169 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
170 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
171 cseg->fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL;
172
173 (*pc)++;
174
175 return wqe;
176 }
177
178 struct mlx5e_tx_wqe_info {
179 struct sk_buff *skb;
180 u32 num_bytes;
181 u8 num_wqebbs;
182 u8 num_dma;
183 u8 num_fifo_pkts;
184 #ifdef CONFIG_MLX5_EN_TLS
185 struct page *resync_dump_frag_page;
186 #endif
187 };
188
mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq * sq,u16 size)189 static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
190 {
191 struct mlx5_wq_cyc *wq = &sq->wq;
192 u16 pi, contig_wqebbs;
193
194 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
195 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
196 if (unlikely(contig_wqebbs < size)) {
197 struct mlx5e_tx_wqe_info *wi, *edge_wi;
198
199 wi = &sq->db.wqe_info[pi];
200 edge_wi = wi + contig_wqebbs;
201
202 /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
203 for (; wi < edge_wi; wi++) {
204 *wi = (struct mlx5e_tx_wqe_info) {
205 .num_wqebbs = 1,
206 };
207 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
208 }
209 sq->stats->nop += contig_wqebbs;
210
211 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
212 }
213
214 return pi;
215 }
216
mlx5e_txqsq_get_next_pi_anysize(struct mlx5e_txqsq * sq,u16 * size)217 static inline u16 mlx5e_txqsq_get_next_pi_anysize(struct mlx5e_txqsq *sq,
218 u16 *size)
219 {
220 struct mlx5_wq_cyc *wq = &sq->wq;
221 u16 pi, contig_wqebbs;
222
223 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
224 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
225 *size = min_t(u16, contig_wqebbs, sq->max_sq_mpw_wqebbs);
226
227 return pi;
228 }
229
230 void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq);
231
mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)232 static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
233 {
234 return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
235 }
236
237 struct mlx5e_shampo_umr {
238 u16 len;
239 };
240
241 struct mlx5e_icosq_wqe_info {
242 u8 wqe_type;
243 u8 num_wqebbs;
244
245 /* Auxiliary data for different wqe types. */
246 union {
247 struct {
248 struct mlx5e_rq *rq;
249 } umr;
250 struct mlx5e_shampo_umr shampo;
251 #ifdef CONFIG_MLX5_EN_TLS
252 struct {
253 struct mlx5e_ktls_offload_context_rx *priv_rx;
254 } tls_set_params;
255 struct {
256 struct mlx5e_ktls_rx_resync_buf *buf;
257 } tls_get_params;
258 #endif
259 };
260 };
261
262 void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq);
263
mlx5e_icosq_get_next_pi(struct mlx5e_icosq * sq,u16 size)264 static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
265 {
266 struct mlx5_wq_cyc *wq = &sq->wq;
267 u16 pi, contig_wqebbs;
268
269 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
270 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
271 if (unlikely(contig_wqebbs < size)) {
272 struct mlx5e_icosq_wqe_info *wi, *edge_wi;
273
274 wi = &sq->db.wqe_info[pi];
275 edge_wi = wi + contig_wqebbs;
276
277 /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
278 for (; wi < edge_wi; wi++) {
279 *wi = (struct mlx5e_icosq_wqe_info) {
280 .wqe_type = MLX5E_ICOSQ_WQE_NOP,
281 .num_wqebbs = 1,
282 };
283 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
284 }
285
286 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
287 }
288
289 return pi;
290 }
291
292 static inline void
mlx5e_notify_hw(struct mlx5_wq_cyc * wq,u16 pc,void __iomem * uar_map,struct mlx5_wqe_ctrl_seg * ctrl)293 mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
294 struct mlx5_wqe_ctrl_seg *ctrl)
295 {
296 ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
297 /* ensure wqe is visible to device before updating doorbell record */
298 dma_wmb();
299
300 *wq->db = cpu_to_be32(pc);
301
302 /* ensure doorbell record is visible to device before ringing the
303 * doorbell
304 */
305 wmb();
306
307 mlx5_write64((__be32 *)ctrl, uar_map);
308 }
309
mlx5e_cq_arm(struct mlx5e_cq * cq)310 static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
311 {
312 struct mlx5_core_cq *mcq;
313
314 mcq = &cq->mcq;
315 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
316 }
317
318 static inline struct mlx5e_sq_dma *
mlx5e_dma_get(struct mlx5e_txqsq * sq,u32 i)319 mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
320 {
321 return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
322 }
323
324 static inline void
mlx5e_dma_push_single(struct mlx5e_txqsq * sq,dma_addr_t addr,u32 size)325 mlx5e_dma_push_single(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size)
326 {
327 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
328
329 dma->addr = addr;
330 dma->size = size;
331 dma->type = MLX5E_DMA_MAP_SINGLE;
332 }
333
334 static inline void
mlx5e_dma_push_netmem(struct mlx5e_txqsq * sq,netmem_ref netmem,dma_addr_t addr,u32 size)335 mlx5e_dma_push_netmem(struct mlx5e_txqsq *sq, netmem_ref netmem,
336 dma_addr_t addr, u32 size)
337 {
338 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
339
340 netmem_dma_unmap_addr_set(netmem, dma, addr, addr);
341 dma->size = size;
342 dma->type = MLX5E_DMA_MAP_PAGE;
343 }
344
345 static inline
mlx5e_skb_fifo_get(struct mlx5e_skb_fifo * fifo,u16 i)346 struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_skb_fifo *fifo, u16 i)
347 {
348 return &fifo->fifo[i & fifo->mask];
349 }
350
351 static inline
mlx5e_skb_fifo_push(struct mlx5e_skb_fifo * fifo,struct sk_buff * skb)352 void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb)
353 {
354 struct sk_buff **skb_item = mlx5e_skb_fifo_get(fifo, (*fifo->pc)++);
355
356 *skb_item = skb;
357 }
358
359 static inline
mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo * fifo)360 struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo)
361 {
362 WARN_ON_ONCE(*fifo->pc == *fifo->cc);
363
364 return *mlx5e_skb_fifo_get(fifo, (*fifo->cc)++);
365 }
366
367 static inline void
mlx5e_tx_dma_unmap(struct device * pdev,struct mlx5e_sq_dma * dma)368 mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
369 {
370 switch (dma->type) {
371 case MLX5E_DMA_MAP_SINGLE:
372 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
373 break;
374 case MLX5E_DMA_MAP_PAGE:
375 netmem_dma_unmap_page_attrs(pdev, dma->addr, dma->size,
376 DMA_TO_DEVICE, 0);
377 break;
378 default:
379 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
380 }
381 }
382
383 void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
384
mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe * session)385 static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
386 {
387 return session->ds_count == session->ds_count_max;
388 }
389
mlx5e_rqwq_reset(struct mlx5e_rq * rq)390 static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
391 {
392 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
393 mlx5_wq_ll_reset(&rq->mpwqe.wq);
394 rq->mpwqe.actual_wq_head = 0;
395 } else {
396 mlx5_wq_cyc_reset(&rq->wqe.wq);
397 }
398 }
399
mlx5e_dump_error_cqe(struct mlx5e_cq * cq,u32 qn,struct mlx5_err_cqe * err_cqe)400 static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn,
401 struct mlx5_err_cqe *err_cqe)
402 {
403 struct mlx5_cqwq *wq = &cq->wq;
404 u32 ci;
405
406 ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
407
408 netdev_err(cq->netdev,
409 "Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
410 cq->mcq.cqn, ci, qn,
411 get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
412 err_cqe->syndrome, err_cqe->vendor_err_synd);
413 mlx5_dump_err_cqe(cq->mdev, err_cqe);
414 }
415
mlx5e_rqwq_get_size(struct mlx5e_rq * rq)416 static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
417 {
418 switch (rq->wq_type) {
419 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
420 return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
421 default:
422 return mlx5_wq_cyc_get_size(&rq->wqe.wq);
423 }
424 }
425
mlx5e_rqwq_get_cur_sz(struct mlx5e_rq * rq)426 static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
427 {
428 switch (rq->wq_type) {
429 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
430 return rq->mpwqe.wq.cur_sz;
431 default:
432 return rq->wqe.wq.cur_sz;
433 }
434 }
435
mlx5e_rqwq_get_head(struct mlx5e_rq * rq)436 static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq)
437 {
438 switch (rq->wq_type) {
439 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
440 return mlx5_wq_ll_get_head(&rq->mpwqe.wq);
441 default:
442 return mlx5_wq_cyc_get_head(&rq->wqe.wq);
443 }
444 }
445
mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq * rq)446 static inline u16 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq)
447 {
448 switch (rq->wq_type) {
449 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
450 return mlx5_wq_ll_get_counter(&rq->mpwqe.wq);
451 default:
452 return mlx5_wq_cyc_get_counter(&rq->wqe.wq);
453 }
454 }
455
456 /* SW parser related functions */
457
458 struct mlx5e_swp_spec {
459 __be16 l3_proto;
460 u8 l4_proto;
461 u8 is_tun;
462 __be16 tun_l3_proto;
463 u8 tun_l4_proto;
464 };
465
mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg * eseg)466 static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
467 {
468 /* SWP offsets are in 2-bytes words */
469 eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
470 eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
471 eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
472 eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
473 }
474
475 static inline void
mlx5e_set_eseg_swp(struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg,struct mlx5e_swp_spec * swp_spec)476 mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
477 struct mlx5e_swp_spec *swp_spec)
478 {
479 /* SWP offsets are in 2-bytes words */
480 eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
481 if (swp_spec->l3_proto == htons(ETH_P_IPV6))
482 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
483 if (swp_spec->l4_proto) {
484 eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
485 if (swp_spec->l4_proto == IPPROTO_UDP)
486 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
487 }
488
489 if (swp_spec->is_tun) {
490 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
491 if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
492 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
493 } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
494 eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
495 if (swp_spec->l3_proto == htons(ETH_P_IPV6))
496 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
497 }
498 switch (swp_spec->tun_l4_proto) {
499 case IPPROTO_UDP:
500 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
501 fallthrough;
502 case IPPROTO_TCP:
503 eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
504 break;
505 }
506 }
507
508 static inline void
mlx5e_swp_encap_csum_partial(struct mlx5_core_dev * mdev,struct sk_buff * skb,bool tunnel)509 mlx5e_swp_encap_csum_partial(struct mlx5_core_dev *mdev, struct sk_buff *skb, bool tunnel)
510 {
511 const struct iphdr *ip = tunnel ? inner_ip_hdr(skb) : ip_hdr(skb);
512 const struct ipv6hdr *ip6;
513 struct tcphdr *th;
514 struct udphdr *uh;
515 int len;
516
517 if (!MLX5_CAP_ETH(mdev, swp_csum_l4_partial) || !skb_is_gso(skb))
518 return;
519
520 if (skb_is_gso_tcp(skb)) {
521 th = inner_tcp_hdr(skb);
522 len = skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb);
523
524 if (ip->version == 4) {
525 th->check = ~tcp_v4_check(len, ip->saddr, ip->daddr, 0);
526 } else {
527 ip6 = tunnel ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
528 th->check = ~tcp_v6_check(len, &ip6->saddr, &ip6->daddr, 0);
529 }
530 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
531 uh = (struct udphdr *)skb_inner_transport_header(skb);
532 len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
533
534 if (ip->version == 4) {
535 uh->check = ~udp_v4_check(len, ip->saddr, ip->daddr, 0);
536 } else {
537 ip6 = tunnel ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
538 uh->check = ~udp_v6_check(len, &ip6->saddr, &ip6->daddr, 0);
539 }
540 }
541 }
542
543 #define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1)
544
mlx5e_stop_room_for_wqe(struct mlx5_core_dev * mdev,u16 wqe_size)545 static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
546 {
547 WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < (u16)mlx5e_get_max_sq_wqebbs(mdev));
548
549 /* A WQE must not cross the page boundary, hence two conditions:
550 * 1. Its size must not exceed the page size.
551 * 2. If the WQE size is X, and the space remaining in a page is less
552 * than X, this space needs to be padded with NOPs. So, one WQE of
553 * size X may require up to X-1 WQEBBs of padding, which makes the
554 * stop room of X-1 + X.
555 * WQE size is also limited by the hardware limit.
556 */
557 WARN_ONCE(wqe_size > mlx5e_get_max_sq_wqebbs(mdev),
558 "wqe_size %u is greater than max SQ WQEBBs %u",
559 wqe_size, mlx5e_get_max_sq_wqebbs(mdev));
560
561 return MLX5E_STOP_ROOM(wqe_size);
562 }
563
mlx5e_stop_room_for_max_wqe(struct mlx5_core_dev * mdev)564 static inline u16 mlx5e_stop_room_for_max_wqe(struct mlx5_core_dev *mdev)
565 {
566 return MLX5E_STOP_ROOM(mlx5e_get_max_sq_wqebbs(mdev));
567 }
568
mlx5e_stop_room_for_mpwqe(struct mlx5_core_dev * mdev)569 static inline u16 mlx5e_stop_room_for_mpwqe(struct mlx5_core_dev *mdev)
570 {
571 u8 mpwqe_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
572
573 return mlx5e_stop_room_for_wqe(mdev, mpwqe_wqebbs);
574 }
575
mlx5e_icosq_can_post_wqe(struct mlx5e_icosq * sq,u16 wqe_size)576 static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
577 {
578 u16 room = sq->reserved_room + MLX5E_STOP_ROOM(wqe_size);
579
580 return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
581 }
582
mlx5e_get_mpw_info(struct mlx5e_rq * rq,int i)583 static inline struct mlx5e_mpw_info *mlx5e_get_mpw_info(struct mlx5e_rq *rq, int i)
584 {
585 size_t isz = struct_size(rq->mpwqe.info, alloc_units.frag_pages, rq->mpwqe.pages_per_wqe);
586
587 return (struct mlx5e_mpw_info *)((char *)rq->mpwqe.info + array_size(i, isz));
588 }
589 #endif
590