xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c (revision 1cac38910ecb881b09f61f57545a771bbe57ba68)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include <linux/skbuff.h>
5 #include <linux/ip.h>
6 #include <linux/udp.h>
7 #include <net/protocol.h>
8 #include <net/udp.h>
9 #include <net/ip6_checksum.h>
10 #include <net/psp/types.h>
11 
12 #include "en.h"
13 #include "psp.h"
14 #include "en_accel/psp_rxtx.h"
15 #include "en_accel/psp.h"
16 
17 enum {
18 	MLX5E_PSP_OFFLOAD_RX_SYNDROME_DECRYPTED,
19 	MLX5E_PSP_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
20 	MLX5E_PSP_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
21 };
22 
mlx5e_psp_set_swp(struct sk_buff * skb,struct mlx5e_accel_tx_psp_state * psp_st,struct mlx5_wqe_eth_seg * eseg)23 static void mlx5e_psp_set_swp(struct sk_buff *skb,
24 			      struct mlx5e_accel_tx_psp_state *psp_st,
25 			      struct mlx5_wqe_eth_seg *eseg)
26 {
27 	/* Tunnel Mode:
28 	 * SWP:      OutL3       InL3  InL4
29 	 * Pkt: MAC  IP     ESP  IP    L4
30 	 *
31 	 * Transport Mode:
32 	 * SWP:      OutL3       OutL4
33 	 * Pkt: MAC  IP     ESP  L4
34 	 *
35 	 * Tunnel(VXLAN TCP/UDP) over Transport Mode
36 	 * SWP:      OutL3                   InL3  InL4
37 	 * Pkt: MAC  IP     ESP  UDP  VXLAN  IP    L4
38 	 */
39 	u8 inner_ipproto = 0;
40 	struct ethhdr *eth;
41 
42 	/* Shared settings */
43 	eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
44 	if (skb->protocol == htons(ETH_P_IPV6))
45 		eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
46 
47 	if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
48 		inner_ipproto = skb->inner_ipproto;
49 		/* Set SWP additional flags for packet of type IP|UDP|PSP|[ TCP | UDP ] */
50 		switch (inner_ipproto) {
51 		case IPPROTO_UDP:
52 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
53 			fallthrough;
54 		case IPPROTO_TCP:
55 			eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
56 			break;
57 		default:
58 			break;
59 		}
60 	} else {
61 		/* IP in IP tunneling like vxlan*/
62 		if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
63 			return;
64 
65 		eth = (struct ethhdr *)skb_inner_mac_header(skb);
66 		switch (ntohs(eth->h_proto)) {
67 		case ETH_P_IP:
68 			inner_ipproto = ((struct iphdr *)((char *)skb->data +
69 					 skb_inner_network_offset(skb)))->protocol;
70 			break;
71 		case ETH_P_IPV6:
72 			inner_ipproto = ((struct ipv6hdr *)((char *)skb->data +
73 					 skb_inner_network_offset(skb)))->nexthdr;
74 			break;
75 		default:
76 			break;
77 		}
78 
79 		/* Tunnel(VXLAN TCP/UDP) over Transport Mode PSP i.e. PSP payload is vxlan tunnel */
80 		switch (inner_ipproto) {
81 		case IPPROTO_UDP:
82 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
83 			fallthrough;
84 		case IPPROTO_TCP:
85 			eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
86 			eseg->swp_inner_l4_offset =
87 				(skb->csum_start + skb->head - skb->data) / 2;
88 			if (skb->protocol == htons(ETH_P_IPV6))
89 				eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
90 			break;
91 		default:
92 			break;
93 		}
94 
95 		psp_st->inner_ipproto = inner_ipproto;
96 	}
97 }
98 
mlx5e_psp_set_state(struct mlx5e_priv * priv,struct sk_buff * skb,struct mlx5e_accel_tx_psp_state * psp_st)99 static bool mlx5e_psp_set_state(struct mlx5e_priv *priv,
100 				struct sk_buff *skb,
101 				struct mlx5e_accel_tx_psp_state *psp_st)
102 {
103 	struct psp_assoc *pas;
104 	bool ret = false;
105 
106 	rcu_read_lock();
107 	pas = psp_skb_get_assoc_rcu(skb);
108 	if (!pas)
109 		goto out;
110 
111 	ret = true;
112 	psp_st->tailen = PSP_TRL_SIZE;
113 	psp_st->spi = pas->tx.spi;
114 	psp_st->ver = pas->version;
115 	psp_st->keyid = *(u32 *)pas->drv_data;
116 
117 out:
118 	rcu_read_unlock();
119 	return ret;
120 }
121 
mlx5e_psp_offload_handle_rx_skb(struct net_device * netdev,struct sk_buff * skb,struct mlx5_cqe64 * cqe)122 bool mlx5e_psp_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
123 				     struct mlx5_cqe64 *cqe)
124 {
125 	u32 psp_meta_data = be32_to_cpu(cqe->ft_metadata);
126 	struct mlx5e_priv *priv = netdev_priv(netdev);
127 	u16 dev_id = priv->psp->psp->id;
128 	bool strip_icv = true;
129 	u8 generation = 0;
130 
131 	/* TBD: report errors as SW counters to ethtool, any further handling ? */
132 	if (MLX5_PSP_METADATA_SYNDROME(psp_meta_data) != MLX5E_PSP_OFFLOAD_RX_SYNDROME_DECRYPTED)
133 		goto drop;
134 
135 	if (psp_dev_rcv(skb, dev_id, generation, strip_icv))
136 		goto drop;
137 
138 	skb->decrypted = 1;
139 	return false;
140 
141 drop:
142 	kfree_skb(skb);
143 	return true;
144 }
145 
mlx5e_psp_tx_build_eseg(struct mlx5e_priv * priv,struct sk_buff * skb,struct mlx5e_accel_tx_psp_state * psp_st,struct mlx5_wqe_eth_seg * eseg)146 void mlx5e_psp_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
147 			     struct mlx5e_accel_tx_psp_state *psp_st,
148 			     struct mlx5_wqe_eth_seg *eseg)
149 {
150 	if (!mlx5_is_psp_device(priv->mdev))
151 		return;
152 
153 	if (unlikely(skb->protocol != htons(ETH_P_IP) &&
154 		     skb->protocol != htons(ETH_P_IPV6)))
155 		return;
156 
157 	mlx5e_psp_set_swp(skb, psp_st, eseg);
158 	/* Special WA for PSP LSO in ConnectX7 */
159 	eseg->swp_outer_l3_offset = 0;
160 	eseg->swp_inner_l3_offset = 0;
161 
162 	eseg->flow_table_metadata |= cpu_to_be32(psp_st->keyid);
163 	eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER) |
164 			 cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
165 }
166 
mlx5e_psp_handle_tx_wqe(struct mlx5e_tx_wqe * wqe,struct mlx5e_accel_tx_psp_state * psp_st,struct mlx5_wqe_inline_seg * inlseg)167 void mlx5e_psp_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
168 			     struct mlx5e_accel_tx_psp_state *psp_st,
169 			     struct mlx5_wqe_inline_seg *inlseg)
170 {
171 	inlseg->byte_count = cpu_to_be32(psp_st->tailen | MLX5_INLINE_SEG);
172 }
173 
mlx5e_psp_handle_tx_skb(struct net_device * netdev,struct sk_buff * skb,struct mlx5e_accel_tx_psp_state * psp_st)174 bool mlx5e_psp_handle_tx_skb(struct net_device *netdev,
175 			     struct sk_buff *skb,
176 			     struct mlx5e_accel_tx_psp_state *psp_st)
177 {
178 	struct mlx5e_priv *priv = netdev_priv(netdev);
179 	struct net *net = sock_net(skb->sk);
180 
181 	if (!mlx5e_psp_set_state(priv, skb, psp_st))
182 		return true;
183 
184 	/* psp_encap of the packet */
185 	if (!psp_dev_encapsulate(net, skb, psp_st->spi, psp_st->ver, 0)) {
186 		kfree_skb_reason(skb, SKB_DROP_REASON_PSP_OUTPUT);
187 		atomic_inc(&priv->psp->tx_drop);
188 		return false;
189 	}
190 	if (skb_is_gso(skb)) {
191 		int len = skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb);
192 		struct tcphdr *th = inner_tcp_hdr(skb);
193 
194 		if (skb->protocol == htons(ETH_P_IP)) {
195 			const struct iphdr *ip = ip_hdr(skb);
196 
197 			th->check = ~tcp_v4_check(len, ip->saddr, ip->daddr, 0);
198 		} else {
199 			const struct ipv6hdr *ip6 = ipv6_hdr(skb);
200 
201 			th->check = ~tcp_v6_check(len, &ip6->saddr, &ip6->daddr, 0);
202 		}
203 	}
204 
205 	return true;
206 }
207