1 /*-
2 * Copyright (c) 2023 NVIDIA corporation & affiliates.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 */
26
27 #ifndef __MLX5_ACCEL_IPSEC_H__
28 #define __MLX5_ACCEL_IPSEC_H__
29
30 #include <sys/mbuf.h>
31 #include <dev/mlx5/driver.h>
32 #include <dev/mlx5/qp.h>
33 #include <dev/mlx5/mlx5_core/mlx5_core.h>
34 #include <dev/mlx5/mlx5_en/en.h>
35 #include <dev/mlx5/mlx5_lib/aso.h>
36
37 #define MLX5E_IPSEC_SADB_RX_BITS 10
38 #define MLX5_IPSEC_METADATA_MARKER(ipsec_metadata) ((ipsec_metadata >> 31) & 0x1)
39
40 #define VLAN_NONE 0xfff
41
42 struct mlx5e_priv;
43 struct mlx5e_tx_wqe;
44 struct mlx5e_ipsec_tx;
45 struct mlx5e_ipsec_rx;
46 struct mlx5e_ipsec_rx_ip_type;
47
48 struct aes_gcm_keymat {
49 u64 seq_iv;
50
51 u32 salt;
52 u32 icv_len;
53
54 u32 key_len;
55 u32 aes_key[256 / 32];
56 };
57
58 struct mlx5e_ipsec_priv_bothdir {
59 struct mlx5e_ipsec_sa_entry *priv_in;
60 struct mlx5e_ipsec_sa_entry *priv_out;
61 };
62
63 struct mlx5e_ipsec_work {
64 struct work_struct work;
65 struct mlx5e_ipsec_sa_entry *sa_entry;
66 void *data;
67 };
68
69 struct mlx5e_ipsec_dwork {
70 struct delayed_work dwork;
71 struct mlx5e_ipsec_sa_entry *sa_entry;
72 struct mlx5e_ipsec_priv_bothdir *pb;
73 };
74
75 struct mlx5e_ipsec_aso {
76 u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
77 dma_addr_t dma_addr;
78 struct mlx5_aso *aso;
79 /* Protect ASO WQ access, as it is global to whole IPsec */
80 spinlock_t lock;
81 };
82
83 struct mlx5_replay_esn {
84 u32 replay_window;
85 u32 esn;
86 u32 esn_msb;
87 u8 overlap : 1;
88 u8 trigger : 1;
89 };
90
91 struct mlx5_accel_esp_xfrm_attrs {
92 u32 spi;
93 struct aes_gcm_keymat aes_gcm;
94
95 union {
96 __be32 a4;
97 __be32 a6[4];
98 } saddr;
99
100 union {
101 __be32 a4;
102 __be32 a6[4];
103 } daddr;
104
105 u8 dir : 2;
106 u8 encap : 1;
107 u8 drop : 1;
108 u8 family;
109 struct mlx5_replay_esn replay_esn;
110 u32 authsize;
111 u32 reqid;
112 u16 sport;
113 u16 dport;
114 };
115
116 enum mlx5_ipsec_cap {
117 MLX5_IPSEC_CAP_CRYPTO = 1 << 0,
118 MLX5_IPSEC_CAP_ESN = 1 << 1,
119 MLX5_IPSEC_CAP_PACKET_OFFLOAD = 1 << 2,
120 MLX5_IPSEC_CAP_ROCE = 1 << 3,
121 MLX5_IPSEC_CAP_PRIO = 1 << 4,
122 MLX5_IPSEC_CAP_TUNNEL = 1 << 5,
123 MLX5_IPSEC_CAP_ESPINUDP = 1 << 6,
124 };
125
126 struct mlx5e_ipsec {
127 struct mlx5_core_dev *mdev;
128 struct workqueue_struct *wq;
129 struct mlx5e_ipsec_tx *tx;
130 struct mlx5e_ipsec_rx *rx_ipv4;
131 struct mlx5e_ipsec_rx *rx_ipv6;
132 struct mlx5e_ipsec_rx_ip_type *rx_ip_type;
133 struct mlx5e_ipsec_aso *aso;
134 u32 pdn;
135 u32 mkey;
136 };
137
138 struct mlx5e_ipsec_rule {
139 struct mlx5_flow_handle *rule;
140 struct mlx5_flow_handle *kspi_rule;
141 struct mlx5_flow_handle *reqid_rule;
142 struct mlx5_flow_handle *vid_zero_rule;
143 struct mlx5_modify_hdr *modify_hdr;
144 struct mlx5_pkt_reformat *pkt_reformat;
145 struct mlx5_fc *fc;
146 };
147
148 struct mlx5e_ipsec_esn_state {
149 u32 esn;
150 u32 esn_msb;
151 u8 overlap: 1;
152 };
153
154 struct mlx5e_ipsec_sa_entry {
155 struct secasvar *savp;
156 if_t ifp;
157 if_t ifpo;
158 struct mlx5e_ipsec *ipsec;
159 struct mlx5_accel_esp_xfrm_attrs attrs;
160 struct mlx5e_ipsec_rule ipsec_rule;
161 struct mlx5e_ipsec_dwork *dwork;
162 struct mlx5e_ipsec_work *work;
163 u32 ipsec_obj_id;
164 u32 enc_key_id;
165 u16 kspi; /* Stack allocated unique SA identifier */
166 struct mlx5e_ipsec_esn_state esn_state;
167 u16 vid;
168 };
169
170 struct upspec {
171 u16 dport;
172 u16 sport;
173 u8 proto;
174 };
175
176 struct mlx5_accel_pol_xfrm_attrs {
177 union {
178 __be32 a4;
179 __be32 a6[4];
180 } saddr;
181
182 union {
183 __be32 a4;
184 __be32 a6[4];
185 } daddr;
186
187 struct upspec upspec;
188
189 u8 family;
190 u8 action;
191 u8 dir : 2;
192 u32 reqid;
193 u32 prio;
194 u16 vid;
195 };
196
197 struct mlx5e_ipsec_pol_entry {
198 struct secpolicy *sp;
199 struct mlx5e_ipsec *ipsec;
200 struct mlx5e_ipsec_rule ipsec_rule;
201 struct mlx5_accel_pol_xfrm_attrs attrs;
202 };
203
204 /* This function doesn't really belong here, but let's put it here for now */
205 void mlx5_object_change_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
206
207 int mlx5e_ipsec_init(struct mlx5e_priv *priv);
208 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
209
210 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec);
211 void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec);
212
213 int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
214 void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
215
216 u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev);
217
218 static inline struct mlx5_core_dev *
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry * sa_entry)219 mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
220 {
221 return sa_entry->ipsec->mdev;
222 }
223
224 static inline struct mlx5_core_dev *
mlx5e_ipsec_pol2dev(struct mlx5e_ipsec_pol_entry * pol_entry)225 mlx5e_ipsec_pol2dev(struct mlx5e_ipsec_pol_entry *pol_entry)
226 {
227 return pol_entry->ipsec->mdev;
228 }
229
230 void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
231 struct mlx5_accel_esp_xfrm_attrs *attrs,
232 u8 dir);
233
234 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
235 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
236 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
237 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
238 void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry);
239 struct ipsec_accel_out_tag;
240 void mlx5e_accel_ipsec_handle_tx_wqe(struct mbuf *mb, struct mlx5e_tx_wqe *wqe,
241 struct ipsec_accel_out_tag *tag);
242 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
243 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
mlx5e_accel_ipsec_get_metadata(unsigned int id)244 static inline int mlx5e_accel_ipsec_get_metadata(unsigned int id)
245 {
246 return MLX5_ETH_WQE_FT_META_IPSEC << 23 | id;
247 }
248 static inline void
mlx5e_accel_ipsec_handle_tx(struct mbuf * mb,struct mlx5e_tx_wqe * wqe)249 mlx5e_accel_ipsec_handle_tx(struct mbuf *mb, struct mlx5e_tx_wqe *wqe)
250 {
251 struct ipsec_accel_out_tag *tag;
252
253 tag = (struct ipsec_accel_out_tag *)m_tag_find(mb,
254 PACKET_TAG_IPSEC_ACCEL_OUT, NULL);
255 if (tag != NULL)
256 mlx5e_accel_ipsec_handle_tx_wqe(mb, wqe, tag);
257 }
258 void mlx5e_accel_ipsec_fs_rx_tables_destroy(struct mlx5e_priv *priv);
259 int mlx5e_accel_ipsec_fs_rx_tables_create(struct mlx5e_priv *priv);
260 void mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(struct mlx5e_priv *priv);
261 int mlx5e_accel_ipsec_fs_rx_catchall_rules(struct mlx5e_priv *priv);
262 int mlx5_accel_ipsec_rx_tag_add(if_t ifp, struct mlx5e_rq_mbuf *mr);
263 void mlx5e_accel_ipsec_handle_rx_cqe(struct mbuf *mb, struct mlx5_cqe64 *cqe,
264 struct mlx5e_rq_mbuf *mr);
265
mlx5e_accel_ipsec_flow(struct mlx5_cqe64 * cqe)266 static inline int mlx5e_accel_ipsec_flow(struct mlx5_cqe64 *cqe)
267 {
268 return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
269 }
270
271 static inline void
mlx5e_accel_ipsec_handle_rx(struct mbuf * mb,struct mlx5_cqe64 * cqe,struct mlx5e_rq_mbuf * mr)272 mlx5e_accel_ipsec_handle_rx(struct mbuf *mb, struct mlx5_cqe64 *cqe,
273 struct mlx5e_rq_mbuf *mr)
274 {
275 u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
276
277 if (MLX5_IPSEC_METADATA_MARKER(ipsec_meta_data))
278 mlx5e_accel_ipsec_handle_rx_cqe(mb, cqe, mr);
279 }
280 #endif /* __MLX5_ACCEL_IPSEC_H__ */
281