xref: /freebsd/sys/dev/mlx5/mlx5_accel/ipsec.h (revision 357378bbdedf24ce2b90e9bd831af4a9db3ec70a)
1 /*-
2  * Copyright (c) 2023 NVIDIA corporation & affiliates.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  */
26 
27 #ifndef __MLX5_ACCEL_IPSEC_H__
28 #define __MLX5_ACCEL_IPSEC_H__
29 
30 #include <sys/mbuf.h>
31 #include <dev/mlx5/driver.h>
32 #include <dev/mlx5/qp.h>
33 #include <dev/mlx5/mlx5_core/mlx5_core.h>
34 #include <dev/mlx5/mlx5_en/en.h>
35 #include <dev/mlx5/mlx5_lib/aso.h>
36 
37 #define MLX5E_IPSEC_SADB_RX_BITS 10
38 #define MLX5_IPSEC_METADATA_MARKER(ipsec_metadata) ((ipsec_metadata >> 31) & 0x1)
39 
40 struct mlx5e_priv;
41 struct mlx5e_tx_wqe;
42 struct mlx5e_ipsec_tx;
43 struct mlx5e_ipsec_rx;
44 
45 struct aes_gcm_keymat {
46 	u64   seq_iv;
47 
48 	u32   salt;
49 	u32   icv_len;
50 
51 	u32   key_len;
52 	u32   aes_key[256 / 32];
53 };
54 
55 struct mlx5e_ipsec_priv_bothdir {
56 	struct mlx5e_ipsec_sa_entry *priv_in;
57 	struct mlx5e_ipsec_sa_entry *priv_out;
58 };
59 
60 struct mlx5e_ipsec_work {
61         struct work_struct work;
62         struct mlx5e_ipsec_sa_entry *sa_entry;
63         void *data;
64 };
65 
66 struct mlx5e_ipsec_dwork {
67 	struct delayed_work dwork;
68 	struct mlx5e_ipsec_sa_entry *sa_entry;
69 	struct mlx5e_ipsec_priv_bothdir *pb;
70 };
71 
72 struct mlx5e_ipsec_aso {
73         u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
74         dma_addr_t dma_addr;
75         struct mlx5_aso *aso;
76         /* Protect ASO WQ access, as it is global to whole IPsec */
77         spinlock_t lock;
78 };
79 
80 struct mlx5_replay_esn {
81 	u32 replay_window;
82 	u32 esn;
83 	u32 esn_msb;
84 	u8 overlap : 1;
85 	u8 trigger : 1;
86 };
87 
88 struct mlx5_accel_esp_xfrm_attrs {
89 	u32   spi;
90 	struct aes_gcm_keymat aes_gcm;
91 
92 	union {
93 		__be32 a4;
94 		__be32 a6[4];
95 	} saddr;
96 
97 	union {
98 		__be32 a4;
99 		__be32 a6[4];
100 	} daddr;
101 
102 	u8 dir : 2;
103 	u8 encap : 1;
104 	u8 drop : 1;
105 	u8 family;
106 	struct mlx5_replay_esn replay_esn;
107 	u32 authsize;
108 	u32 reqid;
109 	u16 sport;
110 	u16 dport;
111 };
112 
113 enum mlx5_ipsec_cap {
114 	MLX5_IPSEC_CAP_CRYPTO		= 1 << 0,
115 	MLX5_IPSEC_CAP_ESN		= 1 << 1,
116 	MLX5_IPSEC_CAP_PACKET_OFFLOAD	= 1 << 2,
117 	MLX5_IPSEC_CAP_ROCE             = 1 << 3,
118 	MLX5_IPSEC_CAP_PRIO             = 1 << 4,
119 	MLX5_IPSEC_CAP_TUNNEL           = 1 << 5,
120 	MLX5_IPSEC_CAP_ESPINUDP         = 1 << 6,
121 };
122 
123 struct mlx5e_ipsec {
124 	struct mlx5_core_dev *mdev;
125 	struct workqueue_struct *wq;
126 	struct mlx5e_ipsec_tx *tx;
127 	struct mlx5e_ipsec_rx *rx_ipv4;
128 	struct mlx5e_ipsec_rx *rx_ipv6;
129 	struct mlx5e_ipsec_aso *aso;
130 	u32 pdn;
131 	u32 mkey;
132 };
133 
134 struct mlx5e_ipsec_rule {
135 	struct mlx5_flow_handle *rule;
136 	struct mlx5_flow_handle *kspi_rule;
137 	struct mlx5_flow_handle *reqid_rule;
138 	struct mlx5_modify_hdr *modify_hdr;
139 	struct mlx5_pkt_reformat *pkt_reformat;
140 	struct mlx5_fc *fc;
141 };
142 
143 struct mlx5e_ipsec_esn_state {
144 	u32 esn;
145 	u32 esn_msb;
146 	u8 overlap: 1;
147 };
148 
149 struct mlx5e_ipsec_sa_entry {
150 	struct secasvar *savp;
151 	if_t ifp;
152 	struct mlx5e_ipsec *ipsec;
153 	struct mlx5_accel_esp_xfrm_attrs attrs;
154 	struct mlx5e_ipsec_rule ipsec_rule;
155 	struct mlx5e_ipsec_dwork *dwork;
156 	struct mlx5e_ipsec_work *work;
157 	u32 ipsec_obj_id;
158 	u32 enc_key_id;
159 	u16 kspi; /* Stack allocated unique SA identifier */
160 	struct mlx5e_ipsec_esn_state esn_state;
161 };
162 
163 struct upspec {
164         u16 dport;
165         u16 sport;
166         u8 proto;
167 };
168 
169 struct mlx5_accel_pol_xfrm_attrs {
170         union {
171                 __be32 a4;
172                 __be32 a6[4];
173         } saddr;
174 
175         union {
176                 __be32 a4;
177                 __be32 a6[4];
178         } daddr;
179 
180 	struct upspec upspec;
181 
182         u8 family;
183         u8 action;
184         u8 dir : 2;
185         u32 reqid;
186         u32 prio;
187 };
188 
189 struct mlx5e_ipsec_pol_entry {
190 	struct secpolicy *sp;
191 	struct mlx5e_ipsec *ipsec;
192 	struct mlx5e_ipsec_rule ipsec_rule;
193 	struct mlx5_accel_pol_xfrm_attrs attrs;
194 };
195 
196 /* This function doesn't really belong here, but let's put it here for now */
197 void mlx5_object_change_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
198 
199 int mlx5e_ipsec_init(struct mlx5e_priv *priv);
200 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
201 
202 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec);
203 void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec);
204 
205 int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
206 void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
207 
208 u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev);
209 
210 static inline struct mlx5_core_dev *
211 mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
212 {
213 	return sa_entry->ipsec->mdev;
214 }
215 
216 static inline struct mlx5_core_dev *
217 mlx5e_ipsec_pol2dev(struct mlx5e_ipsec_pol_entry *pol_entry)
218 {
219 	return pol_entry->ipsec->mdev;
220 }
221 
222 void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
223 					struct mlx5_accel_esp_xfrm_attrs *attrs,
224 					u8 dir);
225 
226 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
227 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
228 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
229 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
230 void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry);
231 struct ipsec_accel_out_tag;
232 void mlx5e_accel_ipsec_handle_tx_wqe(struct mbuf *mb, struct mlx5e_tx_wqe *wqe,
233     struct ipsec_accel_out_tag *tag);
234 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
235 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
236 static inline int mlx5e_accel_ipsec_get_metadata(unsigned int id)
237 {
238 	return MLX5_ETH_WQE_FT_META_IPSEC << 23 |  id;
239 }
240 static inline void
241 mlx5e_accel_ipsec_handle_tx(struct mbuf *mb, struct mlx5e_tx_wqe *wqe)
242 {
243 	struct ipsec_accel_out_tag *tag;
244 
245 	tag = (struct ipsec_accel_out_tag *)m_tag_find(mb,
246 	    PACKET_TAG_IPSEC_ACCEL_OUT, NULL);
247 	if (tag != NULL)
248 		mlx5e_accel_ipsec_handle_tx_wqe(mb, wqe, tag);
249 }
250 void mlx5e_accel_ipsec_fs_rx_tables_destroy(struct mlx5e_priv *priv);
251 int mlx5e_accel_ipsec_fs_rx_tables_create(struct mlx5e_priv *priv);
252 void mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(struct mlx5e_priv *priv);
253 int mlx5e_accel_ipsec_fs_rx_catchall_rules(struct mlx5e_priv *priv);
254 int mlx5_accel_ipsec_rx_tag_add(if_t ifp, struct mbuf *mb);
255 int mlx5e_accel_ipsec_handle_rx_cqe(struct mbuf *mb, struct mlx5_cqe64 *cqe);
256 static inline int mlx5e_accel_ipsec_flow(struct mlx5_cqe64 *cqe)
257 {
258 	return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
259 }
260 
261 static inline void mlx5e_accel_ipsec_handle_rx(struct mbuf *mb, struct mlx5_cqe64 *cqe)
262 {
263 	u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
264 
265 	if (!MLX5_IPSEC_METADATA_MARKER(ipsec_meta_data)) {
266 		struct m_tag *mtag;
267 
268 		mtag = m_tag_find(mb, PACKET_TAG_IPSEC_ACCEL_IN, NULL);
269 		if (mtag != NULL)
270 			m_tag_delete(mb, mtag);
271 
272 		return;
273 	}
274 
275 	mlx5e_accel_ipsec_handle_rx_cqe(mb, cqe);
276 }
277 #endif	/* __MLX5_ACCEL_IPSEC_H__ */
278