xref: /freebsd/sys/dev/mlx5/mlx5_accel/ipsec.h (revision 205263ac250aadd84931d2b77475bc931c3afeff)
1 /*-
2  * Copyright (c) 2023 NVIDIA corporation & affiliates.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  */
26 
27 #ifndef __MLX5_ACCEL_IPSEC_H__
28 #define __MLX5_ACCEL_IPSEC_H__
29 
30 #include <sys/mbuf.h>
31 #include <dev/mlx5/driver.h>
32 #include <dev/mlx5/qp.h>
33 #include <dev/mlx5/mlx5_core/mlx5_core.h>
34 #include <dev/mlx5/mlx5_en/en.h>
35 #include <dev/mlx5/mlx5_lib/aso.h>
36 
37 #define MLX5E_IPSEC_SADB_RX_BITS 10
38 #define MLX5_IPSEC_METADATA_MARKER(ipsec_metadata) ((ipsec_metadata >> 31) & 0x1)
39 
40 #define VLAN_NONE 0xfff
41 
42 struct mlx5e_priv;
43 struct mlx5e_tx_wqe;
44 struct mlx5e_ipsec_tx;
45 struct mlx5e_ipsec_rx;
46 
47 struct aes_gcm_keymat {
48 	u64   seq_iv;
49 
50 	u32   salt;
51 	u32   icv_len;
52 
53 	u32   key_len;
54 	u32   aes_key[256 / 32];
55 };
56 
57 struct mlx5e_ipsec_priv_bothdir {
58 	struct mlx5e_ipsec_sa_entry *priv_in;
59 	struct mlx5e_ipsec_sa_entry *priv_out;
60 };
61 
62 struct mlx5e_ipsec_work {
63         struct work_struct work;
64         struct mlx5e_ipsec_sa_entry *sa_entry;
65         void *data;
66 };
67 
68 struct mlx5e_ipsec_dwork {
69 	struct delayed_work dwork;
70 	struct mlx5e_ipsec_sa_entry *sa_entry;
71 	struct mlx5e_ipsec_priv_bothdir *pb;
72 };
73 
74 struct mlx5e_ipsec_aso {
75         u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
76         dma_addr_t dma_addr;
77         struct mlx5_aso *aso;
78         /* Protect ASO WQ access, as it is global to whole IPsec */
79         spinlock_t lock;
80 };
81 
82 struct mlx5_replay_esn {
83 	u32 replay_window;
84 	u32 esn;
85 	u32 esn_msb;
86 	u8 overlap : 1;
87 	u8 trigger : 1;
88 };
89 
90 struct mlx5_accel_esp_xfrm_attrs {
91 	u32   spi;
92 	struct aes_gcm_keymat aes_gcm;
93 
94 	union {
95 		__be32 a4;
96 		__be32 a6[4];
97 	} saddr;
98 
99 	union {
100 		__be32 a4;
101 		__be32 a6[4];
102 	} daddr;
103 
104 	u8 dir : 2;
105 	u8 encap : 1;
106 	u8 drop : 1;
107 	u8 family;
108 	struct mlx5_replay_esn replay_esn;
109 	u32 authsize;
110 	u32 reqid;
111 	u16 sport;
112 	u16 dport;
113 };
114 
115 enum mlx5_ipsec_cap {
116 	MLX5_IPSEC_CAP_CRYPTO		= 1 << 0,
117 	MLX5_IPSEC_CAP_ESN		= 1 << 1,
118 	MLX5_IPSEC_CAP_PACKET_OFFLOAD	= 1 << 2,
119 	MLX5_IPSEC_CAP_ROCE             = 1 << 3,
120 	MLX5_IPSEC_CAP_PRIO             = 1 << 4,
121 	MLX5_IPSEC_CAP_TUNNEL           = 1 << 5,
122 	MLX5_IPSEC_CAP_ESPINUDP         = 1 << 6,
123 };
124 
125 struct mlx5e_ipsec {
126 	struct mlx5_core_dev *mdev;
127 	struct workqueue_struct *wq;
128 	struct mlx5e_ipsec_tx *tx;
129 	struct mlx5e_ipsec_rx *rx_ipv4;
130 	struct mlx5e_ipsec_rx *rx_ipv6;
131 	struct mlx5e_ipsec_aso *aso;
132 	u32 pdn;
133 	u32 mkey;
134 };
135 
136 struct mlx5e_ipsec_rule {
137 	struct mlx5_flow_handle *rule;
138 	struct mlx5_flow_handle *kspi_rule;
139 	struct mlx5_flow_handle *reqid_rule;
140 	struct mlx5_flow_handle *vid_zero_rule;
141 	struct mlx5_modify_hdr *modify_hdr;
142 	struct mlx5_pkt_reformat *pkt_reformat;
143 	struct mlx5_fc *fc;
144 };
145 
146 struct mlx5e_ipsec_esn_state {
147 	u32 esn;
148 	u32 esn_msb;
149 	u8 overlap: 1;
150 };
151 
152 struct mlx5e_ipsec_sa_entry {
153 	struct secasvar *savp;
154 	if_t ifp;
155 	if_t ifpo;
156 	struct mlx5e_ipsec *ipsec;
157 	struct mlx5_accel_esp_xfrm_attrs attrs;
158 	struct mlx5e_ipsec_rule ipsec_rule;
159 	struct mlx5e_ipsec_dwork *dwork;
160 	struct mlx5e_ipsec_work *work;
161 	u32 ipsec_obj_id;
162 	u32 enc_key_id;
163 	u16 kspi; /* Stack allocated unique SA identifier */
164 	struct mlx5e_ipsec_esn_state esn_state;
165 	u16 vid;
166 };
167 
168 struct upspec {
169         u16 dport;
170         u16 sport;
171         u8 proto;
172 };
173 
174 struct mlx5_accel_pol_xfrm_attrs {
175         union {
176                 __be32 a4;
177                 __be32 a6[4];
178         } saddr;
179 
180         union {
181                 __be32 a4;
182                 __be32 a6[4];
183         } daddr;
184 
185 	struct upspec upspec;
186 
187         u8 family;
188         u8 action;
189         u8 dir : 2;
190         u32 reqid;
191         u32 prio;
192         u16 vid;
193 };
194 
195 struct mlx5e_ipsec_pol_entry {
196 	struct secpolicy *sp;
197 	struct mlx5e_ipsec *ipsec;
198 	struct mlx5e_ipsec_rule ipsec_rule;
199 	struct mlx5_accel_pol_xfrm_attrs attrs;
200 };
201 
202 /* This function doesn't really belong here, but let's put it here for now */
203 void mlx5_object_change_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
204 
205 int mlx5e_ipsec_init(struct mlx5e_priv *priv);
206 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
207 
208 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec);
209 void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec);
210 
211 int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
212 void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
213 
214 u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev);
215 
216 static inline struct mlx5_core_dev *
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry * sa_entry)217 mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
218 {
219 	return sa_entry->ipsec->mdev;
220 }
221 
222 static inline struct mlx5_core_dev *
mlx5e_ipsec_pol2dev(struct mlx5e_ipsec_pol_entry * pol_entry)223 mlx5e_ipsec_pol2dev(struct mlx5e_ipsec_pol_entry *pol_entry)
224 {
225 	return pol_entry->ipsec->mdev;
226 }
227 
228 void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
229 					struct mlx5_accel_esp_xfrm_attrs *attrs,
230 					u8 dir);
231 
232 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
233 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
234 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
235 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
236 void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry);
237 struct ipsec_accel_out_tag;
238 void mlx5e_accel_ipsec_handle_tx_wqe(struct mbuf *mb, struct mlx5e_tx_wqe *wqe,
239     struct ipsec_accel_out_tag *tag);
240 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
241 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
mlx5e_accel_ipsec_get_metadata(unsigned int id)242 static inline int mlx5e_accel_ipsec_get_metadata(unsigned int id)
243 {
244 	return MLX5_ETH_WQE_FT_META_IPSEC << 23 |  id;
245 }
246 static inline void
mlx5e_accel_ipsec_handle_tx(struct mbuf * mb,struct mlx5e_tx_wqe * wqe)247 mlx5e_accel_ipsec_handle_tx(struct mbuf *mb, struct mlx5e_tx_wqe *wqe)
248 {
249 	struct ipsec_accel_out_tag *tag;
250 
251 	tag = (struct ipsec_accel_out_tag *)m_tag_find(mb,
252 	    PACKET_TAG_IPSEC_ACCEL_OUT, NULL);
253 	if (tag != NULL)
254 		mlx5e_accel_ipsec_handle_tx_wqe(mb, wqe, tag);
255 }
256 void mlx5e_accel_ipsec_fs_rx_tables_destroy(struct mlx5e_priv *priv);
257 int mlx5e_accel_ipsec_fs_rx_tables_create(struct mlx5e_priv *priv);
258 void mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(struct mlx5e_priv *priv);
259 int mlx5e_accel_ipsec_fs_rx_catchall_rules(struct mlx5e_priv *priv);
260 int mlx5_accel_ipsec_rx_tag_add(if_t ifp, struct mlx5e_rq_mbuf *mr);
261 void mlx5e_accel_ipsec_handle_rx_cqe(struct mbuf *mb, struct mlx5_cqe64 *cqe,
262     struct mlx5e_rq_mbuf *mr);
263 
mlx5e_accel_ipsec_flow(struct mlx5_cqe64 * cqe)264 static inline int mlx5e_accel_ipsec_flow(struct mlx5_cqe64 *cqe)
265 {
266 	return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
267 }
268 
269 static inline void
mlx5e_accel_ipsec_handle_rx(struct mbuf * mb,struct mlx5_cqe64 * cqe,struct mlx5e_rq_mbuf * mr)270 mlx5e_accel_ipsec_handle_rx(struct mbuf *mb, struct mlx5_cqe64 *cqe,
271     struct mlx5e_rq_mbuf *mr)
272 {
273 	u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
274 
275 	if (MLX5_IPSEC_METADATA_MARKER(ipsec_meta_data))
276 		mlx5e_accel_ipsec_handle_rx_cqe(mb, cqe, mr);
277 }
278 #endif	/* __MLX5_ACCEL_IPSEC_H__ */
279