1 /*-
2 * Copyright (c) 2021,2022 NVIDIA CORPORATION & AFFILIATES. ALL RIGHTS RESERVED.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include "opt_inet.h"
27 #include "opt_inet6.h"
28 #include "opt_ipsec.h"
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/ck.h>
33 #include <sys/eventhandler.h>
34 #include <sys/kernel.h>
35 #include <sys/mbuf.h>
36 #include <sys/pctrie.h>
37 #include <sys/proc.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/protosw.h>
41 #include <sys/stdarg.h>
42 #include <sys/taskqueue.h>
43
44 #include <net/if.h>
45 #include <net/if_var.h>
46 #include <net/if_private.h>
47 #include <net/vnet.h>
48 #include <netinet/in.h>
49 #include <netinet/ip.h>
50 #include <netinet/ip_var.h>
51 #include <netinet/ip6.h>
52 #include <netinet6/ip6_var.h>
53 #include <netinet/in_pcb.h>
54 #include <netinet/tcp_var.h>
55
56 #include <netipsec/key.h>
57 #include <netipsec/keydb.h>
58 #include <netipsec/key_debug.h>
59 #include <netipsec/xform.h>
60 #include <netipsec/ipsec.h>
61 #include <netipsec/ipsec_offload.h>
62 #include <netipsec/ah_var.h>
63 #include <netipsec/esp.h>
64 #include <netipsec/esp_var.h>
65 #include <netipsec/ipcomp_var.h>
66
67 #ifdef IPSEC_OFFLOAD
68
69 static struct mtx ipsec_accel_sav_tmp;
70 static struct unrhdr *drv_spi_unr;
71 static struct mtx ipsec_accel_cnt_lock;
72 static struct taskqueue *ipsec_accel_tq;
73
74 struct ipsec_accel_install_newkey_tq {
75 struct secasvar *sav;
76 struct vnet *install_vnet;
77 struct task install_task;
78 };
79
80 struct ipsec_accel_forget_tq {
81 struct vnet *forget_vnet;
82 struct task forget_task;
83 struct secasvar *sav;
84 };
85
86 struct ifp_handle_sav {
87 CK_LIST_ENTRY(ifp_handle_sav) sav_link;
88 CK_LIST_ENTRY(ifp_handle_sav) sav_allh_link;
89 struct secasvar *sav;
90 struct ifnet *ifp;
91 void *ifdata;
92 uint64_t drv_spi;
93 uint32_t flags;
94 size_t hdr_ext_size;
95 uint64_t cnt_octets;
96 uint64_t cnt_allocs;
97 struct xform_history xfh;
98 };
99
100 #define IFP_HS_HANDLED 0x00000001
101 #define IFP_HS_REJECTED 0x00000002
102 #define IFP_HS_MARKER 0x00000010
103
104 static CK_LIST_HEAD(, ifp_handle_sav) ipsec_accel_all_sav_handles;
105
106 struct ifp_handle_sp {
107 CK_LIST_ENTRY(ifp_handle_sp) sp_link;
108 CK_LIST_ENTRY(ifp_handle_sp) sp_allh_link;
109 struct secpolicy *sp;
110 struct ifnet *ifp;
111 void *ifdata;
112 uint32_t flags;
113 };
114
115 #define IFP_HP_HANDLED 0x00000001
116 #define IFP_HP_REJECTED 0x00000002
117 #define IFP_HP_MARKER 0x00000004
118
119 static CK_LIST_HEAD(, ifp_handle_sp) ipsec_accel_all_sp_handles;
120
121 static void *
drvspi_sa_trie_alloc(struct pctrie * ptree)122 drvspi_sa_trie_alloc(struct pctrie *ptree)
123 {
124 void *res;
125
126 res = malloc(pctrie_node_size(), M_IPSEC_MISC, M_ZERO | M_NOWAIT);
127 if (res != NULL)
128 pctrie_zone_init(res, 0, 0);
129 return (res);
130 }
131
132 static void
drvspi_sa_trie_free(struct pctrie * ptree,void * node)133 drvspi_sa_trie_free(struct pctrie *ptree, void *node)
134 {
135 free(node, M_IPSEC_MISC);
136 }
137
138 PCTRIE_DEFINE(DRVSPI_SA, ifp_handle_sav, drv_spi,
139 drvspi_sa_trie_alloc, drvspi_sa_trie_free);
140 static struct pctrie drv_spi_pctrie;
141
142 static eventhandler_tag ipsec_accel_ifdetach_event_tag;
143
144 static void ipsec_accel_sa_newkey_impl(struct secasvar *sav);
145 static int ipsec_accel_handle_sav(struct secasvar *sav, struct ifnet *ifp,
146 u_int drv_spi, void *priv, uint32_t flags, struct ifp_handle_sav **ires);
147 static void ipsec_accel_forget_sav_clear(struct secasvar *sav);
148 static struct ifp_handle_sav *ipsec_accel_is_accel_sav_ptr(struct secasvar *sav,
149 struct ifnet *ifp);
150 static int ipsec_accel_sa_lifetime_op_impl(struct secasvar *sav,
151 struct seclifetime *lft_c, if_t ifp, enum IF_SA_CNT_WHICH op,
152 struct rm_priotracker *sahtree_trackerp);
153 static void ipsec_accel_sa_recordxfer(struct secasvar *sav, struct mbuf *m);
154 static void ipsec_accel_sync_imp(void);
155 static bool ipsec_accel_is_accel_sav_impl(struct secasvar *sav);
156 static struct mbuf *ipsec_accel_key_setaccelif_impl(struct secasvar *sav);
157 static void ipsec_accel_on_ifdown_impl(struct ifnet *ifp);
158 static void ipsec_accel_drv_sa_lifetime_update_impl(struct secasvar *sav,
159 if_t ifp, u_int drv_spi, uint64_t octets, uint64_t allocs);
160 static int ipsec_accel_drv_sa_lifetime_fetch_impl(struct secasvar *sav,
161 if_t ifp, u_int drv_spi, uint64_t *octets, uint64_t *allocs);
162 static void ipsec_accel_ifdetach_event(void *arg, struct ifnet *ifp);
163 static bool ipsec_accel_fill_xh_impl(if_t ifp, uint32_t drv_spi,
164 struct xform_history *xh);
165
166 static void
ipsec_accel_init(void * arg)167 ipsec_accel_init(void *arg)
168 {
169 mtx_init(&ipsec_accel_sav_tmp, "ipasat", MTX_DEF, 0);
170 mtx_init(&ipsec_accel_cnt_lock, "ipascn", MTX_DEF, 0);
171 drv_spi_unr = new_unrhdr(IPSEC_ACCEL_DRV_SPI_MIN,
172 IPSEC_ACCEL_DRV_SPI_MAX, &ipsec_accel_sav_tmp);
173 ipsec_accel_tq = taskqueue_create("ipsec_offload", M_WAITOK,
174 taskqueue_thread_enqueue, &ipsec_accel_tq);
175 (void)taskqueue_start_threads(&ipsec_accel_tq,
176 1 /* Must be single-threaded */, PWAIT,
177 "ipsec_offload");
178 ipsec_accel_sa_newkey_p = ipsec_accel_sa_newkey_impl;
179 ipsec_accel_forget_sav_p = ipsec_accel_forget_sav_impl;
180 ipsec_accel_spdadd_p = ipsec_accel_spdadd_impl;
181 ipsec_accel_spddel_p = ipsec_accel_spddel_impl;
182 ipsec_accel_sa_lifetime_op_p = ipsec_accel_sa_lifetime_op_impl;
183 ipsec_accel_sync_p = ipsec_accel_sync_imp;
184 ipsec_accel_is_accel_sav_p = ipsec_accel_is_accel_sav_impl;
185 ipsec_accel_key_setaccelif_p = ipsec_accel_key_setaccelif_impl;
186 ipsec_accel_on_ifdown_p = ipsec_accel_on_ifdown_impl;
187 ipsec_accel_drv_sa_lifetime_update_p =
188 ipsec_accel_drv_sa_lifetime_update_impl;
189 ipsec_accel_drv_sa_lifetime_fetch_p =
190 ipsec_accel_drv_sa_lifetime_fetch_impl;
191 ipsec_accel_fill_xh_p = ipsec_accel_fill_xh_impl;
192 pctrie_init(&drv_spi_pctrie);
193 ipsec_accel_ifdetach_event_tag = EVENTHANDLER_REGISTER(
194 ifnet_departure_event, ipsec_accel_ifdetach_event, NULL,
195 EVENTHANDLER_PRI_ANY);
196 }
197 SYSINIT(ipsec_accel_init, SI_SUB_VNET_DONE, SI_ORDER_ANY,
198 ipsec_accel_init, NULL);
199
200 static void
ipsec_accel_fini(void * arg)201 ipsec_accel_fini(void *arg)
202 {
203 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
204 ipsec_accel_ifdetach_event_tag);
205 ipsec_accel_sa_newkey_p = NULL;
206 ipsec_accel_forget_sav_p = NULL;
207 ipsec_accel_spdadd_p = NULL;
208 ipsec_accel_spddel_p = NULL;
209 ipsec_accel_sa_lifetime_op_p = NULL;
210 ipsec_accel_sync_p = NULL;
211 ipsec_accel_is_accel_sav_p = NULL;
212 ipsec_accel_key_setaccelif_p = NULL;
213 ipsec_accel_on_ifdown_p = NULL;
214 ipsec_accel_drv_sa_lifetime_update_p = NULL;
215 ipsec_accel_drv_sa_lifetime_fetch_p = NULL;
216 ipsec_accel_fill_xh_p = NULL;
217 ipsec_accel_sync_imp();
218 clean_unrhdr(drv_spi_unr); /* avoid panic, should go later */
219 clear_unrhdr(drv_spi_unr);
220 delete_unrhdr(drv_spi_unr);
221 taskqueue_drain_all(ipsec_accel_tq);
222 taskqueue_free(ipsec_accel_tq);
223 mtx_destroy(&ipsec_accel_sav_tmp);
224 mtx_destroy(&ipsec_accel_cnt_lock);
225 }
226 SYSUNINIT(ipsec_accel_fini, SI_SUB_VNET_DONE, SI_ORDER_ANY,
227 ipsec_accel_fini, NULL);
228
229 SYSCTL_NODE(_net_inet_ipsec, OID_AUTO, offload, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
230 "");
231
232 static bool ipsec_offload_verbose = false;
233 SYSCTL_BOOL(_net_inet_ipsec_offload, OID_AUTO, verbose, CTLFLAG_RW,
234 &ipsec_offload_verbose, 0,
235 "Verbose SA/SP offload install and deinstall");
236
237 static void
dprintf(const char * fmt,...)238 dprintf(const char *fmt, ...)
239 {
240 va_list ap;
241
242 if (!ipsec_offload_verbose)
243 return;
244
245 va_start(ap, fmt);
246 vprintf(fmt, ap);
247 va_end(ap);
248 }
249
250 static void
ipsec_accel_alloc_forget_tq(struct secasvar * sav)251 ipsec_accel_alloc_forget_tq(struct secasvar *sav)
252 {
253 void *ftq;
254
255 if (sav->accel_forget_tq != 0)
256 return;
257
258 ftq = malloc(sizeof(struct ipsec_accel_forget_tq), M_TEMP, M_WAITOK);
259 if (!atomic_cmpset_ptr(&sav->accel_forget_tq, 0, (uintptr_t)ftq))
260 free(ftq, M_TEMP);
261 }
262
263 static bool
ipsec_accel_sa_install_match(if_t ifp,void * arg)264 ipsec_accel_sa_install_match(if_t ifp, void *arg)
265 {
266 if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0)
267 return (false);
268 if (ifp->if_ipsec_accel_m->if_sa_newkey == NULL) {
269 dprintf("driver bug ifp %s if_sa_newkey NULL\n",
270 if_name(ifp));
271 return (false);
272 }
273 return (true);
274 }
275
276 static int
ipsec_accel_sa_newkey_cb(if_t ifp,void * arg)277 ipsec_accel_sa_newkey_cb(if_t ifp, void *arg)
278 {
279 struct ipsec_accel_install_newkey_tq *tq;
280 void *priv;
281 u_int drv_spi;
282 int error;
283
284 tq = arg;
285
286 dprintf("ipsec_accel_sa_newkey_act: ifp %s h %p spi %#x "
287 "flags %#x seq %d\n",
288 if_name(ifp), ifp->if_ipsec_accel_m->if_sa_newkey,
289 be32toh(tq->sav->spi), tq->sav->flags, tq->sav->seq);
290 priv = NULL;
291 drv_spi = alloc_unr(drv_spi_unr);
292 if (drv_spi == -1) {
293 dprintf("ipsec_accel_sa_install_newkey: cannot alloc "
294 "drv_spi if %s spi %#x\n", if_name(ifp),
295 be32toh(tq->sav->spi));
296 return (0);
297 }
298 if (tq->sav->accel_ifname != NULL &&
299 strcmp(tq->sav->accel_ifname, if_name(ifp)) != 0) {
300 error = ipsec_accel_handle_sav(tq->sav,
301 ifp, drv_spi, priv, IFP_HS_REJECTED, NULL);
302 goto out;
303 }
304 error = ifp->if_ipsec_accel_m->if_sa_newkey(ifp, tq->sav,
305 drv_spi, &priv);
306 if (error != 0) {
307 if (error == EOPNOTSUPP) {
308 dprintf("ipsec_accel_sa_newkey: driver "
309 "refused sa if %s spi %#x\n",
310 if_name(ifp), be32toh(tq->sav->spi));
311 error = ipsec_accel_handle_sav(tq->sav,
312 ifp, drv_spi, priv, IFP_HS_REJECTED, NULL);
313 /* XXXKIB */
314 } else {
315 dprintf("ipsec_accel_sa_newkey: driver "
316 "error %d if %s spi %#x\n",
317 error, if_name(ifp), be32toh(tq->sav->spi));
318 /* XXXKIB */
319 }
320 } else {
321 error = ipsec_accel_handle_sav(tq->sav, ifp,
322 drv_spi, priv, IFP_HS_HANDLED, NULL);
323 if (error != 0) {
324 /* XXXKIB */
325 dprintf("ipsec_accel_sa_newkey: handle_sav "
326 "err %d if %s spi %#x\n", error,
327 if_name(ifp), be32toh(tq->sav->spi));
328 }
329 }
330 out:
331 return (0);
332 }
333
334 static void
ipsec_accel_sa_newkey_act(void * context,int pending)335 ipsec_accel_sa_newkey_act(void *context, int pending)
336 {
337 struct ipsec_accel_install_newkey_tq *tq;
338 void *tqf;
339 struct secasvar *sav;
340
341 tq = context;
342 tqf = NULL;
343 sav = tq->sav;
344 CURVNET_SET(tq->install_vnet);
345 mtx_lock(&ipsec_accel_sav_tmp);
346 if ((sav->accel_flags & (SADB_KEY_ACCEL_INST |
347 SADB_KEY_ACCEL_DEINST)) == 0 &&
348 sav->state == SADB_SASTATE_MATURE) {
349 sav->accel_flags |= SADB_KEY_ACCEL_INST;
350 mtx_unlock(&ipsec_accel_sav_tmp);
351 if_foreach_sleep(ipsec_accel_sa_install_match, context,
352 ipsec_accel_sa_newkey_cb, context);
353 ipsec_accel_alloc_forget_tq(sav);
354 mtx_lock(&ipsec_accel_sav_tmp);
355
356 /*
357 * If ipsec_accel_forget_sav() raced with us and set
358 * the flag, do its work. Its task cannot execute in
359 * parallel since ipsec_accel taskqueue is single-threaded.
360 */
361 if ((sav->accel_flags & SADB_KEY_ACCEL_DEINST) != 0) {
362 tqf = (void *)sav->accel_forget_tq;
363 sav->accel_forget_tq = 0;
364 ipsec_accel_forget_sav_clear(sav);
365 }
366 }
367 mtx_unlock(&ipsec_accel_sav_tmp);
368 key_freesav(&tq->sav);
369 CURVNET_RESTORE();
370 free(tq, M_TEMP);
371 free(tqf, M_TEMP);
372 }
373
374 static void
ipsec_accel_sa_newkey_impl(struct secasvar * sav)375 ipsec_accel_sa_newkey_impl(struct secasvar *sav)
376 {
377 struct ipsec_accel_install_newkey_tq *tq;
378
379 if ((sav->accel_flags & (SADB_KEY_ACCEL_INST |
380 SADB_KEY_ACCEL_DEINST)) != 0)
381 return;
382
383 dprintf(
384 "ipsec_accel_sa_install_newkey: spi %#x flags %#x seq %d\n",
385 be32toh(sav->spi), sav->flags, sav->seq);
386
387 tq = malloc(sizeof(*tq), M_TEMP, M_NOWAIT);
388 if (tq == NULL) {
389 dprintf("ipsec_accel_sa_install_newkey: no memory for tq, "
390 "spi %#x\n", be32toh(sav->spi));
391 /* XXXKIB */
392 return;
393 }
394
395 refcount_acquire(&sav->refcnt);
396
397 TASK_INIT(&tq->install_task, 0, ipsec_accel_sa_newkey_act, tq);
398 tq->sav = sav;
399 tq->install_vnet = curthread->td_vnet;
400 taskqueue_enqueue(ipsec_accel_tq, &tq->install_task);
401 }
402
403 static int
ipsec_accel_handle_sav(struct secasvar * sav,struct ifnet * ifp,u_int drv_spi,void * priv,uint32_t flags,struct ifp_handle_sav ** ires)404 ipsec_accel_handle_sav(struct secasvar *sav, struct ifnet *ifp,
405 u_int drv_spi, void *priv, uint32_t flags, struct ifp_handle_sav **ires)
406 {
407 struct ifp_handle_sav *ihs, *i;
408 int error;
409
410 MPASS(__bitcount(flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) == 1);
411
412 ihs = malloc(sizeof(*ihs), M_IPSEC_MISC, M_WAITOK | M_ZERO);
413 ihs->ifp = ifp;
414 ihs->sav = sav;
415 ihs->drv_spi = drv_spi;
416 ihs->ifdata = priv;
417 ihs->flags = flags;
418 ihs->hdr_ext_size = esp_hdrsiz(sav);
419 memcpy(&ihs->xfh.dst, &sav->sah->saidx.dst, sizeof(ihs->xfh.dst));
420 ihs->xfh.spi = sav->spi;
421 ihs->xfh.proto = sav->sah->saidx.proto;
422 ihs->xfh.mode = sav->sah->saidx.mode;
423 mtx_lock(&ipsec_accel_sav_tmp);
424 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
425 if (i->ifp == ifp) {
426 error = EALREADY;
427 goto errout;
428 }
429 }
430 error = DRVSPI_SA_PCTRIE_INSERT(&drv_spi_pctrie, ihs);
431 if (error != 0)
432 goto errout;
433 if_ref(ihs->ifp);
434 CK_LIST_INSERT_HEAD(&sav->accel_ifps, ihs, sav_link);
435 CK_LIST_INSERT_HEAD(&ipsec_accel_all_sav_handles, ihs, sav_allh_link);
436 mtx_unlock(&ipsec_accel_sav_tmp);
437 if (ires != NULL)
438 *ires = ihs;
439 return (0);
440 errout:
441 mtx_unlock(&ipsec_accel_sav_tmp);
442 free(ihs, M_IPSEC_MISC);
443 if (ires != NULL)
444 *ires = NULL;
445 return (error);
446 }
447
448 static void
ipsec_accel_forget_handle_sav(struct ifp_handle_sav * i,bool freesav)449 ipsec_accel_forget_handle_sav(struct ifp_handle_sav *i, bool freesav)
450 {
451 struct ifnet *ifp;
452 struct secasvar *sav;
453
454 mtx_assert(&ipsec_accel_sav_tmp, MA_OWNED);
455
456 CK_LIST_REMOVE(i, sav_link);
457 CK_LIST_REMOVE(i, sav_allh_link);
458 DRVSPI_SA_PCTRIE_REMOVE(&drv_spi_pctrie, i->drv_spi);
459 mtx_unlock(&ipsec_accel_sav_tmp);
460 NET_EPOCH_WAIT();
461 ifp = i->ifp;
462 sav = i->sav;
463 if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
464 IFP_HS_HANDLED) {
465 dprintf("sa deinstall %s %p spi %#x ifl %#x\n",
466 if_name(ifp), sav, be32toh(sav->spi), i->flags);
467 ifp->if_ipsec_accel_m->if_sa_deinstall(ifp,
468 i->drv_spi, i->ifdata);
469 }
470 if_rele(ifp);
471 free_unr(drv_spi_unr, i->drv_spi);
472 free(i, M_IPSEC_MISC);
473 if (freesav)
474 key_freesav(&sav);
475 mtx_lock(&ipsec_accel_sav_tmp);
476 }
477
478 static void
ipsec_accel_forget_sav_clear(struct secasvar * sav)479 ipsec_accel_forget_sav_clear(struct secasvar *sav)
480 {
481 struct ifp_handle_sav *i;
482
483 for (;;) {
484 i = CK_LIST_FIRST(&sav->accel_ifps);
485 if (i == NULL)
486 break;
487 ipsec_accel_forget_handle_sav(i, false);
488 }
489 }
490
491 static void
ipsec_accel_forget_sav_act(void * arg,int pending)492 ipsec_accel_forget_sav_act(void *arg, int pending)
493 {
494 struct ipsec_accel_forget_tq *tq;
495 struct secasvar *sav;
496
497 tq = arg;
498 sav = tq->sav;
499 CURVNET_SET(tq->forget_vnet);
500 mtx_lock(&ipsec_accel_sav_tmp);
501 ipsec_accel_forget_sav_clear(sav);
502 mtx_unlock(&ipsec_accel_sav_tmp);
503 key_freesav(&sav);
504 CURVNET_RESTORE();
505 free(tq, M_TEMP);
506 }
507
508 void
ipsec_accel_forget_sav_impl(struct secasvar * sav)509 ipsec_accel_forget_sav_impl(struct secasvar *sav)
510 {
511 struct ipsec_accel_forget_tq *tq;
512
513 mtx_lock(&ipsec_accel_sav_tmp);
514 sav->accel_flags |= SADB_KEY_ACCEL_DEINST;
515 tq = (void *)atomic_load_ptr(&sav->accel_forget_tq);
516 if (tq == NULL || !atomic_cmpset_ptr(&sav->accel_forget_tq,
517 (uintptr_t)tq, 0)) {
518 mtx_unlock(&ipsec_accel_sav_tmp);
519 return;
520 }
521 mtx_unlock(&ipsec_accel_sav_tmp);
522
523 refcount_acquire(&sav->refcnt);
524 TASK_INIT(&tq->forget_task, 0, ipsec_accel_forget_sav_act, tq);
525 tq->forget_vnet = curthread->td_vnet;
526 tq->sav = sav;
527 taskqueue_enqueue(ipsec_accel_tq, &tq->forget_task);
528 }
529
530 static void
ipsec_accel_on_ifdown_sav(struct ifnet * ifp)531 ipsec_accel_on_ifdown_sav(struct ifnet *ifp)
532 {
533 struct ifp_handle_sav *i, *marker;
534
535 marker = malloc(sizeof(*marker), M_IPSEC_MISC, M_WAITOK | M_ZERO);
536 marker->flags = IFP_HS_MARKER;
537
538 mtx_lock(&ipsec_accel_sav_tmp);
539 CK_LIST_INSERT_HEAD(&ipsec_accel_all_sav_handles, marker,
540 sav_allh_link);
541 for (;;) {
542 i = CK_LIST_NEXT(marker, sav_allh_link);
543 if (i == NULL)
544 break;
545 CK_LIST_REMOVE(marker, sav_allh_link);
546 CK_LIST_INSERT_AFTER(i, marker, sav_allh_link);
547 if (i->ifp == ifp) {
548 refcount_acquire(&i->sav->refcnt); /* XXXKIB wrap ? */
549 ipsec_accel_forget_handle_sav(i, true);
550 }
551 }
552 CK_LIST_REMOVE(marker, sav_allh_link);
553 mtx_unlock(&ipsec_accel_sav_tmp);
554 free(marker, M_IPSEC_MISC);
555 }
556
557 static struct ifp_handle_sav *
ipsec_accel_is_accel_sav_ptr_raw(struct secasvar * sav,struct ifnet * ifp)558 ipsec_accel_is_accel_sav_ptr_raw(struct secasvar *sav, struct ifnet *ifp)
559 {
560 struct ifp_handle_sav *i;
561
562 if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0)
563 return (NULL);
564 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
565 if (i->ifp == ifp)
566 return (i);
567 }
568 return (NULL);
569 }
570
571 static struct ifp_handle_sav *
ipsec_accel_is_accel_sav_ptr(struct secasvar * sav,struct ifnet * ifp)572 ipsec_accel_is_accel_sav_ptr(struct secasvar *sav, struct ifnet *ifp)
573 {
574 NET_EPOCH_ASSERT();
575 return (ipsec_accel_is_accel_sav_ptr_raw(sav, ifp));
576 }
577
578 static bool
ipsec_accel_is_accel_sav_impl(struct secasvar * sav)579 ipsec_accel_is_accel_sav_impl(struct secasvar *sav)
580 {
581 return (!CK_LIST_EMPTY(&sav->accel_ifps));
582 }
583
584 static struct secasvar *
ipsec_accel_drvspi_to_sa(u_int drv_spi)585 ipsec_accel_drvspi_to_sa(u_int drv_spi)
586 {
587 struct ifp_handle_sav *i;
588
589 i = DRVSPI_SA_PCTRIE_LOOKUP(&drv_spi_pctrie, drv_spi);
590 if (i == NULL)
591 return (NULL);
592 return (i->sav);
593 }
594
595 static struct ifp_handle_sp *
ipsec_accel_find_accel_sp(struct secpolicy * sp,if_t ifp)596 ipsec_accel_find_accel_sp(struct secpolicy *sp, if_t ifp)
597 {
598 struct ifp_handle_sp *i;
599
600 CK_LIST_FOREACH(i, &sp->accel_ifps, sp_link) {
601 if (i->ifp == ifp)
602 return (i);
603 }
604 return (NULL);
605 }
606
607 static bool
ipsec_accel_is_accel_sp(struct secpolicy * sp,if_t ifp)608 ipsec_accel_is_accel_sp(struct secpolicy *sp, if_t ifp)
609 {
610 return (ipsec_accel_find_accel_sp(sp, ifp) != NULL);
611 }
612
613 static int
ipsec_accel_remember_sp(struct secpolicy * sp,if_t ifp,struct ifp_handle_sp ** ip)614 ipsec_accel_remember_sp(struct secpolicy *sp, if_t ifp,
615 struct ifp_handle_sp **ip)
616 {
617 struct ifp_handle_sp *i;
618
619 i = malloc(sizeof(*i), M_IPSEC_MISC, M_WAITOK | M_ZERO);
620 i->sp = sp;
621 i->ifp = ifp;
622 if_ref(ifp);
623 i->flags = IFP_HP_HANDLED;
624 mtx_lock(&ipsec_accel_sav_tmp);
625 CK_LIST_INSERT_HEAD(&sp->accel_ifps, i, sp_link);
626 CK_LIST_INSERT_HEAD(&ipsec_accel_all_sp_handles, i, sp_allh_link);
627 mtx_unlock(&ipsec_accel_sav_tmp);
628 *ip = i;
629 return (0);
630 }
631
632 static bool
ipsec_accel_spdadd_match(if_t ifp,void * arg)633 ipsec_accel_spdadd_match(if_t ifp, void *arg)
634 {
635 struct secpolicy *sp;
636
637 if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0 ||
638 ifp->if_ipsec_accel_m->if_spdadd == NULL)
639 return (false);
640 sp = arg;
641 if (sp->accel_ifname != NULL &&
642 strcmp(sp->accel_ifname, if_name(ifp)) != 0)
643 return (false);
644 if (ipsec_accel_is_accel_sp(sp, ifp))
645 return (false);
646 return (true);
647 }
648
649 static int
ipsec_accel_spdadd_cb(if_t ifp,void * arg)650 ipsec_accel_spdadd_cb(if_t ifp, void *arg)
651 {
652 struct secpolicy *sp;
653 struct inpcb *inp;
654 struct ifp_handle_sp *i;
655 int error;
656
657 sp = arg;
658 inp = sp->ipsec_accel_add_sp_inp;
659 dprintf("ipsec_accel_spdadd_cb: ifp %s m %p sp %p inp %p\n",
660 if_name(ifp), ifp->if_ipsec_accel_m->if_spdadd, sp, inp);
661 error = ipsec_accel_remember_sp(sp, ifp, &i);
662 if (error != 0) {
663 dprintf("ipsec_accel_spdadd: %s if_spdadd %p remember res %d\n",
664 if_name(ifp), sp, error);
665 return (0);
666 }
667 error = ifp->if_ipsec_accel_m->if_spdadd(ifp, sp, inp, &i->ifdata);
668 if (error != 0) {
669 i->flags |= IFP_HP_REJECTED;
670 dprintf("ipsec_accel_spdadd: %s if_spdadd %p res %d\n",
671 if_name(ifp), sp, error);
672 }
673 return (0);
674 }
675
676 static void
ipsec_accel_spdadd_act(void * arg,int pending)677 ipsec_accel_spdadd_act(void *arg, int pending)
678 {
679 struct secpolicy *sp;
680 struct inpcb *inp;
681
682 sp = arg;
683 CURVNET_SET(sp->accel_add_tq.adddel_vnet);
684 if_foreach_sleep(ipsec_accel_spdadd_match, arg,
685 ipsec_accel_spdadd_cb, arg);
686 inp = sp->ipsec_accel_add_sp_inp;
687 if (inp != NULL) {
688 INP_WLOCK(inp);
689 if (!in_pcbrele_wlocked(inp))
690 INP_WUNLOCK(inp);
691 sp->ipsec_accel_add_sp_inp = NULL;
692 }
693 CURVNET_RESTORE();
694 key_freesp(&sp);
695 }
696
697 void
ipsec_accel_spdadd_impl(struct secpolicy * sp,struct inpcb * inp)698 ipsec_accel_spdadd_impl(struct secpolicy *sp, struct inpcb *inp)
699 {
700 struct ipsec_accel_adddel_sp_tq *tq;
701
702 if (sp == NULL)
703 return;
704 if (sp->tcount == 0 && inp == NULL)
705 return;
706 tq = &sp->accel_add_tq;
707 if (atomic_cmpset_int(&tq->adddel_scheduled, 0, 1) == 0)
708 return;
709 tq->adddel_vnet = curthread->td_vnet;
710 sp->ipsec_accel_add_sp_inp = inp;
711 if (inp != NULL)
712 in_pcbref(inp);
713 TASK_INIT(&tq->adddel_task, 0, ipsec_accel_spdadd_act, sp);
714 key_addref(sp);
715 taskqueue_enqueue(ipsec_accel_tq, &tq->adddel_task);
716 }
717
718 static void
ipsec_accel_spddel_act(void * arg,int pending)719 ipsec_accel_spddel_act(void *arg, int pending)
720 {
721 struct ifp_handle_sp *i;
722 struct secpolicy *sp;
723 int error;
724
725 sp = arg;
726 CURVNET_SET(sp->accel_del_tq.adddel_vnet);
727 mtx_lock(&ipsec_accel_sav_tmp);
728 for (;;) {
729 i = CK_LIST_FIRST(&sp->accel_ifps);
730 if (i == NULL)
731 break;
732 CK_LIST_REMOVE(i, sp_link);
733 CK_LIST_REMOVE(i, sp_allh_link);
734 mtx_unlock(&ipsec_accel_sav_tmp);
735 NET_EPOCH_WAIT();
736 if ((i->flags & (IFP_HP_HANDLED | IFP_HP_REJECTED)) ==
737 IFP_HP_HANDLED) {
738 dprintf("spd deinstall %s %p\n", if_name(i->ifp), sp);
739 error = i->ifp->if_ipsec_accel_m->if_spddel(i->ifp,
740 sp, i->ifdata);
741 if (error != 0) {
742 dprintf(
743 "ipsec_accel_spddel: %s if_spddel %p res %d\n",
744 if_name(i->ifp), sp, error);
745 }
746 }
747 if_rele(i->ifp);
748 free(i, M_IPSEC_MISC);
749 mtx_lock(&ipsec_accel_sav_tmp);
750 }
751 mtx_unlock(&ipsec_accel_sav_tmp);
752 key_freesp(&sp);
753 CURVNET_RESTORE();
754 }
755
756 void
ipsec_accel_spddel_impl(struct secpolicy * sp)757 ipsec_accel_spddel_impl(struct secpolicy *sp)
758 {
759 struct ipsec_accel_adddel_sp_tq *tq;
760
761 if (sp == NULL)
762 return;
763
764 tq = &sp->accel_del_tq;
765 if (atomic_cmpset_int(&tq->adddel_scheduled, 0, 1) == 0)
766 return;
767 tq->adddel_vnet = curthread->td_vnet;
768 TASK_INIT(&tq->adddel_task, 0, ipsec_accel_spddel_act, sp);
769 key_addref(sp);
770 taskqueue_enqueue(ipsec_accel_tq, &tq->adddel_task);
771 }
772
773 static void
ipsec_accel_on_ifdown_sp(struct ifnet * ifp)774 ipsec_accel_on_ifdown_sp(struct ifnet *ifp)
775 {
776 struct ifp_handle_sp *i, *marker;
777 struct secpolicy *sp;
778 int error;
779
780 marker = malloc(sizeof(*marker), M_IPSEC_MISC, M_WAITOK | M_ZERO);
781 marker->flags = IFP_HS_MARKER;
782
783 mtx_lock(&ipsec_accel_sav_tmp);
784 CK_LIST_INSERT_HEAD(&ipsec_accel_all_sp_handles, marker,
785 sp_allh_link);
786 for (;;) {
787 i = CK_LIST_NEXT(marker, sp_allh_link);
788 if (i == NULL)
789 break;
790 CK_LIST_REMOVE(marker, sp_allh_link);
791 CK_LIST_INSERT_AFTER(i, marker, sp_allh_link);
792 if (i->ifp != ifp)
793 continue;
794
795 sp = i->sp;
796 key_addref(sp);
797 CK_LIST_REMOVE(i, sp_link);
798 CK_LIST_REMOVE(i, sp_allh_link);
799 mtx_unlock(&ipsec_accel_sav_tmp);
800 NET_EPOCH_WAIT();
801 if ((i->flags & (IFP_HP_HANDLED | IFP_HP_REJECTED)) ==
802 IFP_HP_HANDLED) {
803 dprintf("spd deinstall %s %p\n", if_name(ifp), sp);
804 error = ifp->if_ipsec_accel_m->if_spddel(ifp,
805 sp, i->ifdata);
806 }
807 if (error != 0) {
808 dprintf(
809 "ipsec_accel_on_ifdown_sp: %s if_spddel %p res %d\n",
810 if_name(ifp), sp, error);
811 }
812 key_freesp(&sp);
813 if_rele(ifp);
814 free(i, M_IPSEC_MISC);
815 mtx_lock(&ipsec_accel_sav_tmp);
816 }
817 CK_LIST_REMOVE(marker, sp_allh_link);
818 mtx_unlock(&ipsec_accel_sav_tmp);
819 free(marker, M_IPSEC_MISC);
820 }
821
822 static void
ipsec_accel_on_ifdown_impl(struct ifnet * ifp)823 ipsec_accel_on_ifdown_impl(struct ifnet *ifp)
824 {
825 ipsec_accel_on_ifdown_sp(ifp);
826 ipsec_accel_on_ifdown_sav(ifp);
827 }
828
829 static void
ipsec_accel_ifdetach_event(void * arg __unused,struct ifnet * ifp)830 ipsec_accel_ifdetach_event(void *arg __unused, struct ifnet *ifp)
831 {
832 if ((ifp->if_flags & IFF_RENAMING) != 0)
833 return;
834 ipsec_accel_on_ifdown_impl(ifp);
835 }
836
837 static bool
ipsec_accel_output_pad(struct mbuf * m,struct secasvar * sav,int skip,int mtu)838 ipsec_accel_output_pad(struct mbuf *m, struct secasvar *sav, int skip, int mtu)
839 {
840 int alen, blks, hlen, padding, rlen;
841
842 rlen = m->m_pkthdr.len - skip;
843 hlen = ((sav->flags & SADB_X_EXT_OLD) != 0 ? sizeof(struct esp) :
844 sizeof(struct newesp)) + sav->ivlen;
845 blks = MAX(4, SAV_ISCTR(sav) && VNET(esp_ctr_compatibility) ?
846 sav->tdb_encalgxform->native_blocksize :
847 sav->tdb_encalgxform->blocksize);
848 padding = ((blks - ((rlen + 2) % blks)) % blks) + 2;
849 alen = xform_ah_authsize(sav->tdb_authalgxform);
850
851 return (skip + hlen + rlen + padding + alen <= mtu);
852 }
853
854 static bool
ipsec_accel_output_tag(struct mbuf * m,u_int drv_spi)855 ipsec_accel_output_tag(struct mbuf *m, u_int drv_spi)
856 {
857 struct ipsec_accel_out_tag *tag;
858
859 tag = (struct ipsec_accel_out_tag *)m_tag_get(
860 PACKET_TAG_IPSEC_ACCEL_OUT, sizeof(*tag), M_NOWAIT);
861 if (tag == NULL)
862 return (false);
863 tag->drv_spi = drv_spi;
864 m_tag_prepend(m, &tag->tag);
865 return (true);
866 }
867
868 bool
ipsec_accel_output(struct ifnet * ifp,struct mbuf * m,struct inpcb * inp,struct secpolicy * sp,struct secasvar * sav,int af,int mtu,int * hwassist)869 ipsec_accel_output(struct ifnet *ifp, struct mbuf *m, struct inpcb *inp,
870 struct secpolicy *sp, struct secasvar *sav, int af, int mtu, int *hwassist)
871 {
872 struct ifp_handle_sav *i;
873 struct ip *ip;
874 struct tcpcb *tp;
875 u_long ip_len, skip;
876 bool res;
877
878 *hwassist = 0;
879 res = false;
880 if (ifp == NULL)
881 return (res);
882
883 M_ASSERTPKTHDR(m);
884 NET_EPOCH_ASSERT();
885
886 if (sav == NULL) {
887 res = ipsec_accel_output_tag(m, IPSEC_ACCEL_DRV_SPI_BYPASS);
888 goto out;
889 }
890
891 i = ipsec_accel_is_accel_sav_ptr(sav, ifp);
892 if (i == NULL || (i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) !=
893 IFP_HS_HANDLED)
894 goto out;
895
896 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
897 ip_len = m->m_pkthdr.len;
898 if (ip_len + i->hdr_ext_size > mtu)
899 goto out;
900 switch (af) {
901 case AF_INET:
902 ip = mtod(m, struct ip *);
903 skip = ip->ip_hl << 2;
904 break;
905 case AF_INET6:
906 skip = sizeof(struct ip6_hdr);
907 break;
908 default:
909 __unreachable();
910 }
911 if (!ipsec_accel_output_pad(m, sav, skip, mtu))
912 goto out;
913 }
914
915 if (!ipsec_accel_output_tag(m, i->drv_spi))
916 goto out;
917
918 ipsec_accel_sa_recordxfer(sav, m);
919 key_freesav(&sav);
920 if (sp != NULL)
921 key_freesp(&sp);
922
923 *hwassist = ifp->if_ipsec_accel_m->if_hwassist(ifp, sav,
924 i->drv_spi, i->ifdata);
925 res = true;
926 out:
927 if (inp != NULL && inp->inp_pcbinfo == &V_tcbinfo) {
928 INP_WLOCK_ASSERT(inp);
929 tp = (struct tcpcb *)inp;
930 if (res && (*hwassist & (CSUM_TSO | CSUM_IP6_TSO)) != 0) {
931 tp->t_flags2 |= TF2_IPSEC_TSO;
932 } else {
933 tp->t_flags2 &= ~TF2_IPSEC_TSO;
934 }
935 }
936 return (res);
937 }
938
939 struct ipsec_accel_in_tag *
ipsec_accel_input_tag_lookup(const struct mbuf * m)940 ipsec_accel_input_tag_lookup(const struct mbuf *m)
941 {
942 struct ipsec_accel_in_tag *tag;
943 struct m_tag *xtag;
944
945 xtag = m_tag_find(__DECONST(struct mbuf *, m),
946 PACKET_TAG_IPSEC_ACCEL_IN, NULL);
947 if (xtag == NULL)
948 return (NULL);
949 tag = __containerof(xtag, struct ipsec_accel_in_tag, tag);
950 return (tag);
951 }
952
953 int
ipsec_accel_input(struct mbuf * m,int offset,int proto)954 ipsec_accel_input(struct mbuf *m, int offset, int proto)
955 {
956 struct secasvar *sav;
957 struct ipsec_accel_in_tag *tag;
958
959 tag = ipsec_accel_input_tag_lookup(m);
960 if (tag == NULL)
961 return (ENXIO);
962
963 if (tag->drv_spi < IPSEC_ACCEL_DRV_SPI_MIN ||
964 tag->drv_spi > IPSEC_ACCEL_DRV_SPI_MAX) {
965 dprintf("if %s mbuf %p drv_spi %d invalid, packet dropped\n",
966 (m->m_flags & M_PKTHDR) != 0 ? if_name(m->m_pkthdr.rcvif) :
967 "<unknwn>", m, tag->drv_spi);
968 m_freem(m);
969 return (EINPROGRESS);
970 }
971
972 sav = ipsec_accel_drvspi_to_sa(tag->drv_spi);
973 if (sav != NULL)
974 ipsec_accel_sa_recordxfer(sav, m);
975 return (0);
976 }
977
978 static void
ipsec_accel_sa_recordxfer(struct secasvar * sav,struct mbuf * m)979 ipsec_accel_sa_recordxfer(struct secasvar *sav, struct mbuf *m)
980 {
981 counter_u64_add(sav->accel_lft_sw, 1);
982 counter_u64_add(sav->accel_lft_sw + 1, m->m_pkthdr.len);
983 if (sav->accel_firstused == 0)
984 sav->accel_firstused = time_second;
985 }
986
987 static void
ipsec_accel_sa_lifetime_update(struct seclifetime * lft_c,const struct seclifetime * lft_l)988 ipsec_accel_sa_lifetime_update(struct seclifetime *lft_c,
989 const struct seclifetime *lft_l)
990 {
991 lft_c->allocations += lft_l->allocations;
992 lft_c->bytes += lft_l->bytes;
993 lft_c->usetime = min(lft_c->usetime, lft_l->usetime);
994 }
995
996 static void
ipsec_accel_drv_sa_lifetime_update_impl(struct secasvar * sav,if_t ifp,u_int drv_spi,uint64_t octets,uint64_t allocs)997 ipsec_accel_drv_sa_lifetime_update_impl(struct secasvar *sav, if_t ifp,
998 u_int drv_spi, uint64_t octets, uint64_t allocs)
999 {
1000 struct epoch_tracker et;
1001 struct ifp_handle_sav *i;
1002 uint64_t odiff, adiff;
1003
1004 NET_EPOCH_ENTER(et);
1005 mtx_lock(&ipsec_accel_cnt_lock);
1006
1007 if (allocs != 0) {
1008 if (sav->firstused == 0)
1009 sav->firstused = time_second;
1010 if (sav->accel_firstused == 0)
1011 sav->accel_firstused = time_second;
1012 }
1013
1014 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1015 if (i->ifp == ifp && i->drv_spi == drv_spi)
1016 break;
1017 }
1018 if (i == NULL)
1019 goto out;
1020
1021 odiff = octets - i->cnt_octets;
1022 adiff = allocs - i->cnt_allocs;
1023
1024 if (sav->lft_c != NULL) {
1025 counter_u64_add(sav->lft_c_bytes, odiff);
1026 counter_u64_add(sav->lft_c_allocations, adiff);
1027 }
1028
1029 i->cnt_octets = octets;
1030 i->cnt_allocs = allocs;
1031 sav->accel_hw_octets += odiff;
1032 sav->accel_hw_allocs += adiff;
1033
1034 out:
1035 mtx_unlock(&ipsec_accel_cnt_lock);
1036 NET_EPOCH_EXIT(et);
1037 }
1038
1039 static int
ipsec_accel_drv_sa_lifetime_fetch_impl(struct secasvar * sav,if_t ifp,u_int drv_spi,uint64_t * octets,uint64_t * allocs)1040 ipsec_accel_drv_sa_lifetime_fetch_impl(struct secasvar *sav,
1041 if_t ifp, u_int drv_spi, uint64_t *octets, uint64_t *allocs)
1042 {
1043 struct ifp_handle_sav *i;
1044 int error;
1045
1046 NET_EPOCH_ASSERT();
1047 error = 0;
1048
1049 mtx_lock(&ipsec_accel_cnt_lock);
1050 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1051 if (i->ifp == ifp && i->drv_spi == drv_spi) {
1052 *octets = i->cnt_octets;
1053 *allocs = i->cnt_allocs;
1054 break;
1055 }
1056 }
1057 if (i == NULL)
1058 error = ENOENT;
1059 mtx_unlock(&ipsec_accel_cnt_lock);
1060 return (error);
1061 }
1062
1063 static void
ipsec_accel_sa_lifetime_hw(struct secasvar * sav,if_t ifp,struct seclifetime * lft)1064 ipsec_accel_sa_lifetime_hw(struct secasvar *sav, if_t ifp,
1065 struct seclifetime *lft)
1066 {
1067 struct ifp_handle_sav *i;
1068 if_sa_cnt_fn_t p;
1069
1070 IFNET_RLOCK_ASSERT();
1071
1072 i = ipsec_accel_is_accel_sav_ptr(sav, ifp);
1073 if (i != NULL && (i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
1074 IFP_HS_HANDLED) {
1075 p = ifp->if_ipsec_accel_m->if_sa_cnt;
1076 if (p != NULL)
1077 p(ifp, sav, i->drv_spi, i->ifdata, lft);
1078 }
1079 }
1080
1081 static int
ipsec_accel_sa_lifetime_op_impl(struct secasvar * sav,struct seclifetime * lft_c,if_t ifp,enum IF_SA_CNT_WHICH op,struct rm_priotracker * sahtree_trackerp)1082 ipsec_accel_sa_lifetime_op_impl(struct secasvar *sav,
1083 struct seclifetime *lft_c, if_t ifp, enum IF_SA_CNT_WHICH op,
1084 struct rm_priotracker *sahtree_trackerp)
1085 {
1086 struct seclifetime lft_l, lft_s;
1087 struct ifp_handle_sav *i;
1088 if_t ifp1;
1089 if_sa_cnt_fn_t p;
1090 int error;
1091
1092 error = 0;
1093 memset(&lft_l, 0, sizeof(lft_l));
1094 memset(&lft_s, 0, sizeof(lft_s));
1095
1096 switch (op & ~IF_SA_CNT_UPD) {
1097 case IF_SA_CNT_IFP_HW_VAL:
1098 ipsec_accel_sa_lifetime_hw(sav, ifp, &lft_l);
1099 ipsec_accel_sa_lifetime_update(&lft_l, &lft_s);
1100 break;
1101
1102 case IF_SA_CNT_TOTAL_SW_VAL:
1103 lft_l.allocations = (uint32_t)counter_u64_fetch(
1104 sav->accel_lft_sw);
1105 lft_l.bytes = counter_u64_fetch(sav->accel_lft_sw + 1);
1106 lft_l.usetime = sav->accel_firstused;
1107 break;
1108
1109 case IF_SA_CNT_TOTAL_HW_VAL:
1110 IFNET_RLOCK_ASSERT();
1111 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1112 if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) !=
1113 IFP_HS_HANDLED)
1114 continue;
1115 ifp1 = i->ifp;
1116 p = ifp1->if_ipsec_accel_m->if_sa_cnt;
1117 if (p == NULL)
1118 continue;
1119 memset(&lft_s, 0, sizeof(lft_s));
1120 if (sahtree_trackerp != NULL)
1121 ipsec_sahtree_runlock(sahtree_trackerp);
1122 error = p(ifp1, sav, i->drv_spi, i->ifdata, &lft_s);
1123 if (sahtree_trackerp != NULL)
1124 ipsec_sahtree_rlock(sahtree_trackerp);
1125 if (error == 0)
1126 ipsec_accel_sa_lifetime_update(&lft_l, &lft_s);
1127 }
1128 break;
1129 }
1130
1131 if (error == 0) {
1132 if ((op & IF_SA_CNT_UPD) == 0)
1133 memset(lft_c, 0, sizeof(*lft_c));
1134 ipsec_accel_sa_lifetime_update(lft_c, &lft_l);
1135 }
1136
1137 return (error);
1138 }
1139
1140 static void
ipsec_accel_sync_imp(void)1141 ipsec_accel_sync_imp(void)
1142 {
1143 taskqueue_drain_all(ipsec_accel_tq);
1144 }
1145
1146 static struct mbuf *
ipsec_accel_key_setaccelif_impl(struct secasvar * sav)1147 ipsec_accel_key_setaccelif_impl(struct secasvar *sav)
1148 {
1149 struct mbuf *m, *m1;
1150 struct ifp_handle_sav *i;
1151 struct epoch_tracker et;
1152
1153 if (sav->accel_ifname != NULL)
1154 return (key_setaccelif(sav->accel_ifname));
1155
1156 m = m1 = NULL;
1157
1158 NET_EPOCH_ENTER(et);
1159 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1160 if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
1161 IFP_HS_HANDLED) {
1162 m1 = key_setaccelif(if_name(i->ifp));
1163 if (m == NULL)
1164 m = m1;
1165 else if (m1 != NULL)
1166 m_cat(m, m1);
1167 }
1168 }
1169 NET_EPOCH_EXIT(et);
1170 return (m);
1171 }
1172
1173 static bool
ipsec_accel_fill_xh_impl(if_t ifp,uint32_t drv_spi,struct xform_history * xh)1174 ipsec_accel_fill_xh_impl(if_t ifp, uint32_t drv_spi, struct xform_history *xh)
1175 {
1176 struct ifp_handle_sav *i;
1177
1178 if (drv_spi < IPSEC_ACCEL_DRV_SPI_MIN ||
1179 drv_spi > IPSEC_ACCEL_DRV_SPI_MAX)
1180 return (false);
1181
1182 i = DRVSPI_SA_PCTRIE_LOOKUP(&drv_spi_pctrie, drv_spi);
1183 if (i == NULL)
1184 return (false);
1185 memcpy(xh, &i->xfh, sizeof(*xh));
1186 return (true);
1187 }
1188
1189 #endif /* IPSEC_OFFLOAD */
1190