1 /*-
2 * Copyright (c) 2021,2022 NVIDIA CORPORATION & AFFILIATES. ALL RIGHTS RESERVED.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include "opt_inet.h"
27 #include "opt_inet6.h"
28 #include "opt_ipsec.h"
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/ck.h>
33 #include <sys/eventhandler.h>
34 #include <sys/kernel.h>
35 #include <sys/mbuf.h>
36 #include <sys/pctrie.h>
37 #include <sys/proc.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/protosw.h>
41 #include <sys/stdarg.h>
42 #include <sys/taskqueue.h>
43
44 #include <net/if.h>
45 #include <net/if_var.h>
46 #include <net/if_private.h>
47 #include <net/vnet.h>
48 #include <netinet/in.h>
49 #include <netinet/ip.h>
50 #include <netinet/ip_var.h>
51 #include <netinet/ip6.h>
52 #include <netinet6/ip6_var.h>
53 #include <netinet/in_pcb.h>
54 #include <netinet/tcp_var.h>
55
56 #include <netipsec/key.h>
57 #include <netipsec/keydb.h>
58 #include <netipsec/key_debug.h>
59 #include <netipsec/xform.h>
60 #include <netipsec/ipsec.h>
61 #include <netipsec/ipsec_offload.h>
62 #include <netipsec/ah_var.h>
63 #include <netipsec/esp.h>
64 #include <netipsec/esp_var.h>
65 #include <netipsec/ipcomp_var.h>
66
67 #ifdef IPSEC_OFFLOAD
68
69 static struct mtx ipsec_accel_sav_tmp;
70 static struct unrhdr *drv_spi_unr;
71 static struct mtx ipsec_accel_cnt_lock;
72 static struct taskqueue *ipsec_accel_tq;
73
74 struct ipsec_accel_install_newkey_tq {
75 struct secasvar *sav;
76 struct vnet *install_vnet;
77 struct task install_task;
78 };
79
80 struct ipsec_accel_forget_tq {
81 struct vnet *forget_vnet;
82 struct task forget_task;
83 struct secasvar *sav;
84 };
85
86 struct ifp_handle_sav {
87 CK_LIST_ENTRY(ifp_handle_sav) sav_link;
88 CK_LIST_ENTRY(ifp_handle_sav) sav_allh_link;
89 struct secasvar *sav;
90 struct ifnet *ifp;
91 void *ifdata;
92 uint64_t drv_spi;
93 uint32_t flags;
94 size_t hdr_ext_size;
95 uint64_t cnt_octets;
96 uint64_t cnt_allocs;
97 };
98
99 #define IFP_HS_HANDLED 0x00000001
100 #define IFP_HS_REJECTED 0x00000002
101 #define IFP_HS_MARKER 0x00000010
102
103 static CK_LIST_HEAD(, ifp_handle_sav) ipsec_accel_all_sav_handles;
104
105 struct ifp_handle_sp {
106 CK_LIST_ENTRY(ifp_handle_sp) sp_link;
107 CK_LIST_ENTRY(ifp_handle_sp) sp_allh_link;
108 struct secpolicy *sp;
109 struct ifnet *ifp;
110 void *ifdata;
111 uint32_t flags;
112 };
113
114 #define IFP_HP_HANDLED 0x00000001
115 #define IFP_HP_REJECTED 0x00000002
116 #define IFP_HP_MARKER 0x00000004
117
118 static CK_LIST_HEAD(, ifp_handle_sp) ipsec_accel_all_sp_handles;
119
120 static void *
drvspi_sa_trie_alloc(struct pctrie * ptree)121 drvspi_sa_trie_alloc(struct pctrie *ptree)
122 {
123 void *res;
124
125 res = malloc(pctrie_node_size(), M_IPSEC_MISC, M_ZERO | M_NOWAIT);
126 if (res != NULL)
127 pctrie_zone_init(res, 0, 0);
128 return (res);
129 }
130
131 static void
drvspi_sa_trie_free(struct pctrie * ptree,void * node)132 drvspi_sa_trie_free(struct pctrie *ptree, void *node)
133 {
134 free(node, M_IPSEC_MISC);
135 }
136
137 PCTRIE_DEFINE(DRVSPI_SA, ifp_handle_sav, drv_spi,
138 drvspi_sa_trie_alloc, drvspi_sa_trie_free);
139 static struct pctrie drv_spi_pctrie;
140
141 static eventhandler_tag ipsec_accel_ifdetach_event_tag;
142
143 static void ipsec_accel_sa_newkey_impl(struct secasvar *sav);
144 static int ipsec_accel_handle_sav(struct secasvar *sav, struct ifnet *ifp,
145 u_int drv_spi, void *priv, uint32_t flags, struct ifp_handle_sav **ires);
146 static void ipsec_accel_forget_sav_clear(struct secasvar *sav);
147 static struct ifp_handle_sav *ipsec_accel_is_accel_sav_ptr(struct secasvar *sav,
148 struct ifnet *ifp);
149 static int ipsec_accel_sa_lifetime_op_impl(struct secasvar *sav,
150 struct seclifetime *lft_c, if_t ifp, enum IF_SA_CNT_WHICH op,
151 struct rm_priotracker *sahtree_trackerp);
152 static void ipsec_accel_sa_recordxfer(struct secasvar *sav, struct mbuf *m);
153 static void ipsec_accel_sync_imp(void);
154 static bool ipsec_accel_is_accel_sav_impl(struct secasvar *sav);
155 static struct mbuf *ipsec_accel_key_setaccelif_impl(struct secasvar *sav);
156 static void ipsec_accel_on_ifdown_impl(struct ifnet *ifp);
157 static void ipsec_accel_drv_sa_lifetime_update_impl(struct secasvar *sav,
158 if_t ifp, u_int drv_spi, uint64_t octets, uint64_t allocs);
159 static int ipsec_accel_drv_sa_lifetime_fetch_impl(struct secasvar *sav,
160 if_t ifp, u_int drv_spi, uint64_t *octets, uint64_t *allocs);
161 static void ipsec_accel_ifdetach_event(void *arg, struct ifnet *ifp);
162
163 static void
ipsec_accel_init(void * arg)164 ipsec_accel_init(void *arg)
165 {
166 mtx_init(&ipsec_accel_sav_tmp, "ipasat", MTX_DEF, 0);
167 mtx_init(&ipsec_accel_cnt_lock, "ipascn", MTX_DEF, 0);
168 drv_spi_unr = new_unrhdr(IPSEC_ACCEL_DRV_SPI_MIN,
169 IPSEC_ACCEL_DRV_SPI_MAX, &ipsec_accel_sav_tmp);
170 ipsec_accel_tq = taskqueue_create("ipsec_offload", M_WAITOK,
171 taskqueue_thread_enqueue, &ipsec_accel_tq);
172 (void)taskqueue_start_threads(&ipsec_accel_tq,
173 1 /* Must be single-threaded */, PWAIT,
174 "ipsec_offload");
175 ipsec_accel_sa_newkey_p = ipsec_accel_sa_newkey_impl;
176 ipsec_accel_forget_sav_p = ipsec_accel_forget_sav_impl;
177 ipsec_accel_spdadd_p = ipsec_accel_spdadd_impl;
178 ipsec_accel_spddel_p = ipsec_accel_spddel_impl;
179 ipsec_accel_sa_lifetime_op_p = ipsec_accel_sa_lifetime_op_impl;
180 ipsec_accel_sync_p = ipsec_accel_sync_imp;
181 ipsec_accel_is_accel_sav_p = ipsec_accel_is_accel_sav_impl;
182 ipsec_accel_key_setaccelif_p = ipsec_accel_key_setaccelif_impl;
183 ipsec_accel_on_ifdown_p = ipsec_accel_on_ifdown_impl;
184 ipsec_accel_drv_sa_lifetime_update_p =
185 ipsec_accel_drv_sa_lifetime_update_impl;
186 ipsec_accel_drv_sa_lifetime_fetch_p =
187 ipsec_accel_drv_sa_lifetime_fetch_impl;
188 pctrie_init(&drv_spi_pctrie);
189 ipsec_accel_ifdetach_event_tag = EVENTHANDLER_REGISTER(
190 ifnet_departure_event, ipsec_accel_ifdetach_event, NULL,
191 EVENTHANDLER_PRI_ANY);
192 }
193 SYSINIT(ipsec_accel_init, SI_SUB_VNET_DONE, SI_ORDER_ANY,
194 ipsec_accel_init, NULL);
195
196 static void
ipsec_accel_fini(void * arg)197 ipsec_accel_fini(void *arg)
198 {
199 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
200 ipsec_accel_ifdetach_event_tag);
201 ipsec_accel_sa_newkey_p = NULL;
202 ipsec_accel_forget_sav_p = NULL;
203 ipsec_accel_spdadd_p = NULL;
204 ipsec_accel_spddel_p = NULL;
205 ipsec_accel_sa_lifetime_op_p = NULL;
206 ipsec_accel_sync_p = NULL;
207 ipsec_accel_is_accel_sav_p = NULL;
208 ipsec_accel_key_setaccelif_p = NULL;
209 ipsec_accel_on_ifdown_p = NULL;
210 ipsec_accel_drv_sa_lifetime_update_p = NULL;
211 ipsec_accel_drv_sa_lifetime_fetch_p = NULL;
212 ipsec_accel_sync_imp();
213 clean_unrhdr(drv_spi_unr); /* avoid panic, should go later */
214 clear_unrhdr(drv_spi_unr);
215 delete_unrhdr(drv_spi_unr);
216 taskqueue_drain_all(ipsec_accel_tq);
217 taskqueue_free(ipsec_accel_tq);
218 mtx_destroy(&ipsec_accel_sav_tmp);
219 mtx_destroy(&ipsec_accel_cnt_lock);
220 }
221 SYSUNINIT(ipsec_accel_fini, SI_SUB_VNET_DONE, SI_ORDER_ANY,
222 ipsec_accel_fini, NULL);
223
224 SYSCTL_NODE(_net_inet_ipsec, OID_AUTO, offload, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
225 "");
226
227 static bool ipsec_offload_verbose = false;
228 SYSCTL_BOOL(_net_inet_ipsec_offload, OID_AUTO, verbose, CTLFLAG_RW,
229 &ipsec_offload_verbose, 0,
230 "Verbose SA/SP offload install and deinstall");
231
232 static void
dprintf(const char * fmt,...)233 dprintf(const char *fmt, ...)
234 {
235 va_list ap;
236
237 if (!ipsec_offload_verbose)
238 return;
239
240 va_start(ap, fmt);
241 vprintf(fmt, ap);
242 va_end(ap);
243 }
244
245 static void
ipsec_accel_alloc_forget_tq(struct secasvar * sav)246 ipsec_accel_alloc_forget_tq(struct secasvar *sav)
247 {
248 void *ftq;
249
250 if (sav->accel_forget_tq != 0)
251 return;
252
253 ftq = malloc(sizeof(struct ipsec_accel_forget_tq), M_TEMP, M_WAITOK);
254 if (!atomic_cmpset_ptr(&sav->accel_forget_tq, 0, (uintptr_t)ftq))
255 free(ftq, M_TEMP);
256 }
257
258 static bool
ipsec_accel_sa_install_match(if_t ifp,void * arg)259 ipsec_accel_sa_install_match(if_t ifp, void *arg)
260 {
261 if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0)
262 return (false);
263 if (ifp->if_ipsec_accel_m->if_sa_newkey == NULL) {
264 dprintf("driver bug ifp %s if_sa_newkey NULL\n",
265 if_name(ifp));
266 return (false);
267 }
268 return (true);
269 }
270
271 static int
ipsec_accel_sa_newkey_cb(if_t ifp,void * arg)272 ipsec_accel_sa_newkey_cb(if_t ifp, void *arg)
273 {
274 struct ipsec_accel_install_newkey_tq *tq;
275 void *priv;
276 u_int drv_spi;
277 int error;
278
279 tq = arg;
280
281 dprintf("ipsec_accel_sa_newkey_act: ifp %s h %p spi %#x "
282 "flags %#x seq %d\n",
283 if_name(ifp), ifp->if_ipsec_accel_m->if_sa_newkey,
284 be32toh(tq->sav->spi), tq->sav->flags, tq->sav->seq);
285 priv = NULL;
286 drv_spi = alloc_unr(drv_spi_unr);
287 if (tq->sav->accel_ifname != NULL &&
288 strcmp(tq->sav->accel_ifname, if_name(ifp)) != 0) {
289 error = ipsec_accel_handle_sav(tq->sav,
290 ifp, drv_spi, priv, IFP_HS_REJECTED, NULL);
291 goto out;
292 }
293 if (drv_spi == -1) {
294 /* XXXKIB */
295 dprintf("ipsec_accel_sa_install_newkey: cannot alloc "
296 "drv_spi if %s spi %#x\n", if_name(ifp),
297 be32toh(tq->sav->spi));
298 return (ENOMEM);
299 }
300 error = ifp->if_ipsec_accel_m->if_sa_newkey(ifp, tq->sav,
301 drv_spi, &priv);
302 if (error != 0) {
303 if (error == EOPNOTSUPP) {
304 dprintf("ipsec_accel_sa_newkey: driver "
305 "refused sa if %s spi %#x\n",
306 if_name(ifp), be32toh(tq->sav->spi));
307 error = ipsec_accel_handle_sav(tq->sav,
308 ifp, drv_spi, priv, IFP_HS_REJECTED, NULL);
309 /* XXXKIB */
310 } else {
311 dprintf("ipsec_accel_sa_newkey: driver "
312 "error %d if %s spi %#x\n",
313 error, if_name(ifp), be32toh(tq->sav->spi));
314 /* XXXKIB */
315 }
316 } else {
317 error = ipsec_accel_handle_sav(tq->sav, ifp,
318 drv_spi, priv, IFP_HS_HANDLED, NULL);
319 if (error != 0) {
320 /* XXXKIB */
321 dprintf("ipsec_accel_sa_newkey: handle_sav "
322 "err %d if %s spi %#x\n", error,
323 if_name(ifp), be32toh(tq->sav->spi));
324 }
325 }
326 out:
327 return (error);
328 }
329
330 static void
ipsec_accel_sa_newkey_act(void * context,int pending)331 ipsec_accel_sa_newkey_act(void *context, int pending)
332 {
333 struct ipsec_accel_install_newkey_tq *tq;
334 void *tqf;
335 struct secasvar *sav;
336
337 tq = context;
338 tqf = NULL;
339 sav = tq->sav;
340 CURVNET_SET(tq->install_vnet);
341 mtx_lock(&ipsec_accel_sav_tmp);
342 if ((sav->accel_flags & (SADB_KEY_ACCEL_INST |
343 SADB_KEY_ACCEL_DEINST)) == 0 &&
344 sav->state == SADB_SASTATE_MATURE) {
345 sav->accel_flags |= SADB_KEY_ACCEL_INST;
346 mtx_unlock(&ipsec_accel_sav_tmp);
347 if_foreach_sleep(ipsec_accel_sa_install_match, context,
348 ipsec_accel_sa_newkey_cb, context);
349 ipsec_accel_alloc_forget_tq(sav);
350 mtx_lock(&ipsec_accel_sav_tmp);
351
352 /*
353 * If ipsec_accel_forget_sav() raced with us and set
354 * the flag, do its work. Its task cannot execute in
355 * parallel since ipsec_accel taskqueue is single-threaded.
356 */
357 if ((sav->accel_flags & SADB_KEY_ACCEL_DEINST) != 0) {
358 tqf = (void *)sav->accel_forget_tq;
359 sav->accel_forget_tq = 0;
360 ipsec_accel_forget_sav_clear(sav);
361 }
362 }
363 mtx_unlock(&ipsec_accel_sav_tmp);
364 key_freesav(&tq->sav);
365 CURVNET_RESTORE();
366 free(tq, M_TEMP);
367 free(tqf, M_TEMP);
368 }
369
370 static void
ipsec_accel_sa_newkey_impl(struct secasvar * sav)371 ipsec_accel_sa_newkey_impl(struct secasvar *sav)
372 {
373 struct ipsec_accel_install_newkey_tq *tq;
374
375 if ((sav->accel_flags & (SADB_KEY_ACCEL_INST |
376 SADB_KEY_ACCEL_DEINST)) != 0)
377 return;
378
379 dprintf(
380 "ipsec_accel_sa_install_newkey: spi %#x flags %#x seq %d\n",
381 be32toh(sav->spi), sav->flags, sav->seq);
382
383 tq = malloc(sizeof(*tq), M_TEMP, M_NOWAIT);
384 if (tq == NULL) {
385 dprintf("ipsec_accel_sa_install_newkey: no memory for tq, "
386 "spi %#x\n", be32toh(sav->spi));
387 /* XXXKIB */
388 return;
389 }
390
391 refcount_acquire(&sav->refcnt);
392
393 TASK_INIT(&tq->install_task, 0, ipsec_accel_sa_newkey_act, tq);
394 tq->sav = sav;
395 tq->install_vnet = curthread->td_vnet;
396 taskqueue_enqueue(ipsec_accel_tq, &tq->install_task);
397 }
398
399 static int
ipsec_accel_handle_sav(struct secasvar * sav,struct ifnet * ifp,u_int drv_spi,void * priv,uint32_t flags,struct ifp_handle_sav ** ires)400 ipsec_accel_handle_sav(struct secasvar *sav, struct ifnet *ifp,
401 u_int drv_spi, void *priv, uint32_t flags, struct ifp_handle_sav **ires)
402 {
403 struct ifp_handle_sav *ihs, *i;
404 int error;
405
406 MPASS(__bitcount(flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) == 1);
407
408 ihs = malloc(sizeof(*ihs), M_IPSEC_MISC, M_WAITOK | M_ZERO);
409 ihs->ifp = ifp;
410 ihs->sav = sav;
411 ihs->drv_spi = drv_spi;
412 ihs->ifdata = priv;
413 ihs->flags = flags;
414 ihs->hdr_ext_size = esp_hdrsiz(sav);
415 mtx_lock(&ipsec_accel_sav_tmp);
416 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
417 if (i->ifp == ifp) {
418 error = EALREADY;
419 goto errout;
420 }
421 }
422 error = DRVSPI_SA_PCTRIE_INSERT(&drv_spi_pctrie, ihs);
423 if (error != 0)
424 goto errout;
425 if_ref(ihs->ifp);
426 CK_LIST_INSERT_HEAD(&sav->accel_ifps, ihs, sav_link);
427 CK_LIST_INSERT_HEAD(&ipsec_accel_all_sav_handles, ihs, sav_allh_link);
428 mtx_unlock(&ipsec_accel_sav_tmp);
429 if (ires != NULL)
430 *ires = ihs;
431 return (0);
432 errout:
433 mtx_unlock(&ipsec_accel_sav_tmp);
434 free(ihs, M_IPSEC_MISC);
435 if (ires != NULL)
436 *ires = NULL;
437 return (error);
438 }
439
440 static void
ipsec_accel_forget_handle_sav(struct ifp_handle_sav * i,bool freesav)441 ipsec_accel_forget_handle_sav(struct ifp_handle_sav *i, bool freesav)
442 {
443 struct ifnet *ifp;
444 struct secasvar *sav;
445
446 mtx_assert(&ipsec_accel_sav_tmp, MA_OWNED);
447
448 CK_LIST_REMOVE(i, sav_link);
449 CK_LIST_REMOVE(i, sav_allh_link);
450 DRVSPI_SA_PCTRIE_REMOVE(&drv_spi_pctrie, i->drv_spi);
451 mtx_unlock(&ipsec_accel_sav_tmp);
452 NET_EPOCH_WAIT();
453 ifp = i->ifp;
454 sav = i->sav;
455 if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
456 IFP_HS_HANDLED) {
457 dprintf("sa deinstall %s %p spi %#x ifl %#x\n",
458 if_name(ifp), sav, be32toh(sav->spi), i->flags);
459 ifp->if_ipsec_accel_m->if_sa_deinstall(ifp,
460 i->drv_spi, i->ifdata);
461 }
462 if_rele(ifp);
463 free_unr(drv_spi_unr, i->drv_spi);
464 free(i, M_IPSEC_MISC);
465 if (freesav)
466 key_freesav(&sav);
467 mtx_lock(&ipsec_accel_sav_tmp);
468 }
469
470 static void
ipsec_accel_forget_sav_clear(struct secasvar * sav)471 ipsec_accel_forget_sav_clear(struct secasvar *sav)
472 {
473 struct ifp_handle_sav *i;
474
475 for (;;) {
476 i = CK_LIST_FIRST(&sav->accel_ifps);
477 if (i == NULL)
478 break;
479 ipsec_accel_forget_handle_sav(i, false);
480 }
481 }
482
483 static void
ipsec_accel_forget_sav_act(void * arg,int pending)484 ipsec_accel_forget_sav_act(void *arg, int pending)
485 {
486 struct ipsec_accel_forget_tq *tq;
487 struct secasvar *sav;
488
489 tq = arg;
490 sav = tq->sav;
491 CURVNET_SET(tq->forget_vnet);
492 mtx_lock(&ipsec_accel_sav_tmp);
493 ipsec_accel_forget_sav_clear(sav);
494 mtx_unlock(&ipsec_accel_sav_tmp);
495 key_freesav(&sav);
496 CURVNET_RESTORE();
497 free(tq, M_TEMP);
498 }
499
500 void
ipsec_accel_forget_sav_impl(struct secasvar * sav)501 ipsec_accel_forget_sav_impl(struct secasvar *sav)
502 {
503 struct ipsec_accel_forget_tq *tq;
504
505 mtx_lock(&ipsec_accel_sav_tmp);
506 sav->accel_flags |= SADB_KEY_ACCEL_DEINST;
507 tq = (void *)atomic_load_ptr(&sav->accel_forget_tq);
508 if (tq == NULL || !atomic_cmpset_ptr(&sav->accel_forget_tq,
509 (uintptr_t)tq, 0)) {
510 mtx_unlock(&ipsec_accel_sav_tmp);
511 return;
512 }
513 mtx_unlock(&ipsec_accel_sav_tmp);
514
515 refcount_acquire(&sav->refcnt);
516 TASK_INIT(&tq->forget_task, 0, ipsec_accel_forget_sav_act, tq);
517 tq->forget_vnet = curthread->td_vnet;
518 tq->sav = sav;
519 taskqueue_enqueue(ipsec_accel_tq, &tq->forget_task);
520 }
521
522 static void
ipsec_accel_on_ifdown_sav(struct ifnet * ifp)523 ipsec_accel_on_ifdown_sav(struct ifnet *ifp)
524 {
525 struct ifp_handle_sav *i, *marker;
526
527 marker = malloc(sizeof(*marker), M_IPSEC_MISC, M_WAITOK | M_ZERO);
528 marker->flags = IFP_HS_MARKER;
529
530 mtx_lock(&ipsec_accel_sav_tmp);
531 CK_LIST_INSERT_HEAD(&ipsec_accel_all_sav_handles, marker,
532 sav_allh_link);
533 for (;;) {
534 i = CK_LIST_NEXT(marker, sav_allh_link);
535 if (i == NULL)
536 break;
537 CK_LIST_REMOVE(marker, sav_allh_link);
538 CK_LIST_INSERT_AFTER(i, marker, sav_allh_link);
539 if (i->ifp == ifp) {
540 refcount_acquire(&i->sav->refcnt); /* XXXKIB wrap ? */
541 ipsec_accel_forget_handle_sav(i, true);
542 }
543 }
544 CK_LIST_REMOVE(marker, sav_allh_link);
545 mtx_unlock(&ipsec_accel_sav_tmp);
546 free(marker, M_IPSEC_MISC);
547 }
548
549 static struct ifp_handle_sav *
ipsec_accel_is_accel_sav_ptr_raw(struct secasvar * sav,struct ifnet * ifp)550 ipsec_accel_is_accel_sav_ptr_raw(struct secasvar *sav, struct ifnet *ifp)
551 {
552 struct ifp_handle_sav *i;
553
554 if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0)
555 return (NULL);
556 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
557 if (i->ifp == ifp)
558 return (i);
559 }
560 return (NULL);
561 }
562
563 static struct ifp_handle_sav *
ipsec_accel_is_accel_sav_ptr(struct secasvar * sav,struct ifnet * ifp)564 ipsec_accel_is_accel_sav_ptr(struct secasvar *sav, struct ifnet *ifp)
565 {
566 NET_EPOCH_ASSERT();
567 return (ipsec_accel_is_accel_sav_ptr_raw(sav, ifp));
568 }
569
570 static bool
ipsec_accel_is_accel_sav_impl(struct secasvar * sav)571 ipsec_accel_is_accel_sav_impl(struct secasvar *sav)
572 {
573 return (!CK_LIST_EMPTY(&sav->accel_ifps));
574 }
575
576 static struct secasvar *
ipsec_accel_drvspi_to_sa(u_int drv_spi)577 ipsec_accel_drvspi_to_sa(u_int drv_spi)
578 {
579 struct ifp_handle_sav *i;
580
581 i = DRVSPI_SA_PCTRIE_LOOKUP(&drv_spi_pctrie, drv_spi);
582 if (i == NULL)
583 return (NULL);
584 return (i->sav);
585 }
586
587 static struct ifp_handle_sp *
ipsec_accel_find_accel_sp(struct secpolicy * sp,if_t ifp)588 ipsec_accel_find_accel_sp(struct secpolicy *sp, if_t ifp)
589 {
590 struct ifp_handle_sp *i;
591
592 CK_LIST_FOREACH(i, &sp->accel_ifps, sp_link) {
593 if (i->ifp == ifp)
594 return (i);
595 }
596 return (NULL);
597 }
598
599 static bool
ipsec_accel_is_accel_sp(struct secpolicy * sp,if_t ifp)600 ipsec_accel_is_accel_sp(struct secpolicy *sp, if_t ifp)
601 {
602 return (ipsec_accel_find_accel_sp(sp, ifp) != NULL);
603 }
604
605 static int
ipsec_accel_remember_sp(struct secpolicy * sp,if_t ifp,struct ifp_handle_sp ** ip)606 ipsec_accel_remember_sp(struct secpolicy *sp, if_t ifp,
607 struct ifp_handle_sp **ip)
608 {
609 struct ifp_handle_sp *i;
610
611 i = malloc(sizeof(*i), M_IPSEC_MISC, M_WAITOK | M_ZERO);
612 i->sp = sp;
613 i->ifp = ifp;
614 if_ref(ifp);
615 i->flags = IFP_HP_HANDLED;
616 mtx_lock(&ipsec_accel_sav_tmp);
617 CK_LIST_INSERT_HEAD(&sp->accel_ifps, i, sp_link);
618 CK_LIST_INSERT_HEAD(&ipsec_accel_all_sp_handles, i, sp_allh_link);
619 mtx_unlock(&ipsec_accel_sav_tmp);
620 *ip = i;
621 return (0);
622 }
623
624 static bool
ipsec_accel_spdadd_match(if_t ifp,void * arg)625 ipsec_accel_spdadd_match(if_t ifp, void *arg)
626 {
627 struct secpolicy *sp;
628
629 if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0 ||
630 ifp->if_ipsec_accel_m->if_spdadd == NULL)
631 return (false);
632 sp = arg;
633 if (sp->accel_ifname != NULL &&
634 strcmp(sp->accel_ifname, if_name(ifp)) != 0)
635 return (false);
636 if (ipsec_accel_is_accel_sp(sp, ifp))
637 return (false);
638 return (true);
639 }
640
641 static int
ipsec_accel_spdadd_cb(if_t ifp,void * arg)642 ipsec_accel_spdadd_cb(if_t ifp, void *arg)
643 {
644 struct secpolicy *sp;
645 struct inpcb *inp;
646 struct ifp_handle_sp *i;
647 int error;
648
649 sp = arg;
650 inp = sp->ipsec_accel_add_sp_inp;
651 dprintf("ipsec_accel_spdadd_cb: ifp %s m %p sp %p inp %p\n",
652 if_name(ifp), ifp->if_ipsec_accel_m->if_spdadd, sp, inp);
653 error = ipsec_accel_remember_sp(sp, ifp, &i);
654 if (error != 0) {
655 dprintf("ipsec_accel_spdadd: %s if_spdadd %p remember res %d\n",
656 if_name(ifp), sp, error);
657 return (error);
658 }
659 error = ifp->if_ipsec_accel_m->if_spdadd(ifp, sp, inp, &i->ifdata);
660 if (error != 0) {
661 i->flags |= IFP_HP_REJECTED;
662 dprintf("ipsec_accel_spdadd: %s if_spdadd %p res %d\n",
663 if_name(ifp), sp, error);
664 }
665 return (error);
666 }
667
668 static void
ipsec_accel_spdadd_act(void * arg,int pending)669 ipsec_accel_spdadd_act(void *arg, int pending)
670 {
671 struct secpolicy *sp;
672 struct inpcb *inp;
673
674 sp = arg;
675 CURVNET_SET(sp->accel_add_tq.adddel_vnet);
676 if_foreach_sleep(ipsec_accel_spdadd_match, arg,
677 ipsec_accel_spdadd_cb, arg);
678 inp = sp->ipsec_accel_add_sp_inp;
679 if (inp != NULL) {
680 INP_WLOCK(inp);
681 if (!in_pcbrele_wlocked(inp))
682 INP_WUNLOCK(inp);
683 sp->ipsec_accel_add_sp_inp = NULL;
684 }
685 CURVNET_RESTORE();
686 key_freesp(&sp);
687 }
688
689 void
ipsec_accel_spdadd_impl(struct secpolicy * sp,struct inpcb * inp)690 ipsec_accel_spdadd_impl(struct secpolicy *sp, struct inpcb *inp)
691 {
692 struct ipsec_accel_adddel_sp_tq *tq;
693
694 if (sp == NULL)
695 return;
696 if (sp->tcount == 0 && inp == NULL)
697 return;
698 tq = &sp->accel_add_tq;
699 if (atomic_cmpset_int(&tq->adddel_scheduled, 0, 1) == 0)
700 return;
701 tq->adddel_vnet = curthread->td_vnet;
702 sp->ipsec_accel_add_sp_inp = inp;
703 if (inp != NULL)
704 in_pcbref(inp);
705 TASK_INIT(&tq->adddel_task, 0, ipsec_accel_spdadd_act, sp);
706 key_addref(sp);
707 taskqueue_enqueue(ipsec_accel_tq, &tq->adddel_task);
708 }
709
710 static void
ipsec_accel_spddel_act(void * arg,int pending)711 ipsec_accel_spddel_act(void *arg, int pending)
712 {
713 struct ifp_handle_sp *i;
714 struct secpolicy *sp;
715 int error;
716
717 sp = arg;
718 CURVNET_SET(sp->accel_del_tq.adddel_vnet);
719 mtx_lock(&ipsec_accel_sav_tmp);
720 for (;;) {
721 i = CK_LIST_FIRST(&sp->accel_ifps);
722 if (i == NULL)
723 break;
724 CK_LIST_REMOVE(i, sp_link);
725 CK_LIST_REMOVE(i, sp_allh_link);
726 mtx_unlock(&ipsec_accel_sav_tmp);
727 NET_EPOCH_WAIT();
728 if ((i->flags & (IFP_HP_HANDLED | IFP_HP_REJECTED)) ==
729 IFP_HP_HANDLED) {
730 dprintf("spd deinstall %s %p\n", if_name(i->ifp), sp);
731 error = i->ifp->if_ipsec_accel_m->if_spddel(i->ifp,
732 sp, i->ifdata);
733 if (error != 0) {
734 dprintf(
735 "ipsec_accel_spddel: %s if_spddel %p res %d\n",
736 if_name(i->ifp), sp, error);
737 }
738 }
739 if_rele(i->ifp);
740 free(i, M_IPSEC_MISC);
741 mtx_lock(&ipsec_accel_sav_tmp);
742 }
743 mtx_unlock(&ipsec_accel_sav_tmp);
744 key_freesp(&sp);
745 CURVNET_RESTORE();
746 }
747
748 void
ipsec_accel_spddel_impl(struct secpolicy * sp)749 ipsec_accel_spddel_impl(struct secpolicy *sp)
750 {
751 struct ipsec_accel_adddel_sp_tq *tq;
752
753 if (sp == NULL)
754 return;
755
756 tq = &sp->accel_del_tq;
757 if (atomic_cmpset_int(&tq->adddel_scheduled, 0, 1) == 0)
758 return;
759 tq->adddel_vnet = curthread->td_vnet;
760 TASK_INIT(&tq->adddel_task, 0, ipsec_accel_spddel_act, sp);
761 key_addref(sp);
762 taskqueue_enqueue(ipsec_accel_tq, &tq->adddel_task);
763 }
764
765 static void
ipsec_accel_on_ifdown_sp(struct ifnet * ifp)766 ipsec_accel_on_ifdown_sp(struct ifnet *ifp)
767 {
768 struct ifp_handle_sp *i, *marker;
769 struct secpolicy *sp;
770 int error;
771
772 marker = malloc(sizeof(*marker), M_IPSEC_MISC, M_WAITOK | M_ZERO);
773 marker->flags = IFP_HS_MARKER;
774
775 mtx_lock(&ipsec_accel_sav_tmp);
776 CK_LIST_INSERT_HEAD(&ipsec_accel_all_sp_handles, marker,
777 sp_allh_link);
778 for (;;) {
779 i = CK_LIST_NEXT(marker, sp_allh_link);
780 if (i == NULL)
781 break;
782 CK_LIST_REMOVE(marker, sp_allh_link);
783 CK_LIST_INSERT_AFTER(i, marker, sp_allh_link);
784 if (i->ifp != ifp)
785 continue;
786
787 sp = i->sp;
788 key_addref(sp);
789 CK_LIST_REMOVE(i, sp_link);
790 CK_LIST_REMOVE(i, sp_allh_link);
791 mtx_unlock(&ipsec_accel_sav_tmp);
792 NET_EPOCH_WAIT();
793 if ((i->flags & (IFP_HP_HANDLED | IFP_HP_REJECTED)) ==
794 IFP_HP_HANDLED) {
795 dprintf("spd deinstall %s %p\n", if_name(ifp), sp);
796 error = ifp->if_ipsec_accel_m->if_spddel(ifp,
797 sp, i->ifdata);
798 }
799 if (error != 0) {
800 dprintf(
801 "ipsec_accel_on_ifdown_sp: %s if_spddel %p res %d\n",
802 if_name(ifp), sp, error);
803 }
804 key_freesp(&sp);
805 if_rele(ifp);
806 free(i, M_IPSEC_MISC);
807 mtx_lock(&ipsec_accel_sav_tmp);
808 }
809 CK_LIST_REMOVE(marker, sp_allh_link);
810 mtx_unlock(&ipsec_accel_sav_tmp);
811 free(marker, M_IPSEC_MISC);
812 }
813
814 static void
ipsec_accel_on_ifdown_impl(struct ifnet * ifp)815 ipsec_accel_on_ifdown_impl(struct ifnet *ifp)
816 {
817 ipsec_accel_on_ifdown_sp(ifp);
818 ipsec_accel_on_ifdown_sav(ifp);
819 }
820
821 static void
ipsec_accel_ifdetach_event(void * arg __unused,struct ifnet * ifp)822 ipsec_accel_ifdetach_event(void *arg __unused, struct ifnet *ifp)
823 {
824 if ((ifp->if_flags & IFF_RENAMING) != 0)
825 return;
826 ipsec_accel_on_ifdown_impl(ifp);
827 }
828
829 static bool
ipsec_accel_output_pad(struct mbuf * m,struct secasvar * sav,int skip,int mtu)830 ipsec_accel_output_pad(struct mbuf *m, struct secasvar *sav, int skip, int mtu)
831 {
832 int alen, blks, hlen, padding, rlen;
833
834 rlen = m->m_pkthdr.len - skip;
835 hlen = ((sav->flags & SADB_X_EXT_OLD) != 0 ? sizeof(struct esp) :
836 sizeof(struct newesp)) + sav->ivlen;
837 blks = MAX(4, SAV_ISCTR(sav) && VNET(esp_ctr_compatibility) ?
838 sav->tdb_encalgxform->native_blocksize :
839 sav->tdb_encalgxform->blocksize);
840 padding = ((blks - ((rlen + 2) % blks)) % blks) + 2;
841 alen = xform_ah_authsize(sav->tdb_authalgxform);
842
843 return (skip + hlen + rlen + padding + alen <= mtu);
844 }
845
846 static bool
ipsec_accel_output_tag(struct mbuf * m,u_int drv_spi)847 ipsec_accel_output_tag(struct mbuf *m, u_int drv_spi)
848 {
849 struct ipsec_accel_out_tag *tag;
850
851 tag = (struct ipsec_accel_out_tag *)m_tag_get(
852 PACKET_TAG_IPSEC_ACCEL_OUT, sizeof(*tag), M_NOWAIT);
853 if (tag == NULL)
854 return (false);
855 tag->drv_spi = drv_spi;
856 m_tag_prepend(m, &tag->tag);
857 return (true);
858 }
859
860 bool
ipsec_accel_output(struct ifnet * ifp,struct mbuf * m,struct inpcb * inp,struct secpolicy * sp,struct secasvar * sav,int af,int mtu,int * hwassist)861 ipsec_accel_output(struct ifnet *ifp, struct mbuf *m, struct inpcb *inp,
862 struct secpolicy *sp, struct secasvar *sav, int af, int mtu, int *hwassist)
863 {
864 struct ifp_handle_sav *i;
865 struct ip *ip;
866 struct tcpcb *tp;
867 u_long ip_len, skip;
868 bool res;
869
870 *hwassist = 0;
871 res = false;
872 if (ifp == NULL)
873 return (res);
874
875 M_ASSERTPKTHDR(m);
876 NET_EPOCH_ASSERT();
877
878 if (sav == NULL) {
879 res = ipsec_accel_output_tag(m, IPSEC_ACCEL_DRV_SPI_BYPASS);
880 goto out;
881 }
882
883 i = ipsec_accel_is_accel_sav_ptr(sav, ifp);
884 if (i == NULL || (i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) !=
885 IFP_HS_HANDLED)
886 goto out;
887
888 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
889 ip_len = m->m_pkthdr.len;
890 if (ip_len + i->hdr_ext_size > mtu)
891 goto out;
892 switch (af) {
893 case AF_INET:
894 ip = mtod(m, struct ip *);
895 skip = ip->ip_hl << 2;
896 break;
897 case AF_INET6:
898 skip = sizeof(struct ip6_hdr);
899 break;
900 default:
901 __unreachable();
902 }
903 if (!ipsec_accel_output_pad(m, sav, skip, mtu))
904 goto out;
905 }
906
907 if (!ipsec_accel_output_tag(m, i->drv_spi))
908 goto out;
909
910 ipsec_accel_sa_recordxfer(sav, m);
911 key_freesav(&sav);
912 if (sp != NULL)
913 key_freesp(&sp);
914
915 *hwassist = ifp->if_ipsec_accel_m->if_hwassist(ifp, sav,
916 i->drv_spi, i->ifdata);
917 res = true;
918 out:
919 if (inp != NULL && inp->inp_pcbinfo == &V_tcbinfo) {
920 INP_WLOCK_ASSERT(inp);
921 tp = (struct tcpcb *)inp;
922 if (res && (*hwassist & (CSUM_TSO | CSUM_IP6_TSO)) != 0) {
923 tp->t_flags2 |= TF2_IPSEC_TSO;
924 } else {
925 tp->t_flags2 &= ~TF2_IPSEC_TSO;
926 }
927 }
928 return (res);
929 }
930
931 struct ipsec_accel_in_tag *
ipsec_accel_input_tag_lookup(const struct mbuf * m)932 ipsec_accel_input_tag_lookup(const struct mbuf *m)
933 {
934 struct ipsec_accel_in_tag *tag;
935 struct m_tag *xtag;
936
937 xtag = m_tag_find(__DECONST(struct mbuf *, m),
938 PACKET_TAG_IPSEC_ACCEL_IN, NULL);
939 if (xtag == NULL)
940 return (NULL);
941 tag = __containerof(xtag, struct ipsec_accel_in_tag, tag);
942 return (tag);
943 }
944
945 int
ipsec_accel_input(struct mbuf * m,int offset,int proto)946 ipsec_accel_input(struct mbuf *m, int offset, int proto)
947 {
948 struct secasvar *sav;
949 struct ipsec_accel_in_tag *tag;
950
951 tag = ipsec_accel_input_tag_lookup(m);
952 if (tag == NULL)
953 return (ENXIO);
954
955 if (tag->drv_spi < IPSEC_ACCEL_DRV_SPI_MIN ||
956 tag->drv_spi > IPSEC_ACCEL_DRV_SPI_MAX) {
957 dprintf("if %s mbuf %p drv_spi %d invalid, packet dropped\n",
958 (m->m_flags & M_PKTHDR) != 0 ? if_name(m->m_pkthdr.rcvif) :
959 "<unknwn>", m, tag->drv_spi);
960 m_freem(m);
961 return (EINPROGRESS);
962 }
963
964 sav = ipsec_accel_drvspi_to_sa(tag->drv_spi);
965 if (sav != NULL)
966 ipsec_accel_sa_recordxfer(sav, m);
967 return (0);
968 }
969
970 static void
ipsec_accel_sa_recordxfer(struct secasvar * sav,struct mbuf * m)971 ipsec_accel_sa_recordxfer(struct secasvar *sav, struct mbuf *m)
972 {
973 counter_u64_add(sav->accel_lft_sw, 1);
974 counter_u64_add(sav->accel_lft_sw + 1, m->m_pkthdr.len);
975 if (sav->accel_firstused == 0)
976 sav->accel_firstused = time_second;
977 }
978
979 static void
ipsec_accel_sa_lifetime_update(struct seclifetime * lft_c,const struct seclifetime * lft_l)980 ipsec_accel_sa_lifetime_update(struct seclifetime *lft_c,
981 const struct seclifetime *lft_l)
982 {
983 lft_c->allocations += lft_l->allocations;
984 lft_c->bytes += lft_l->bytes;
985 lft_c->usetime = min(lft_c->usetime, lft_l->usetime);
986 }
987
988 static void
ipsec_accel_drv_sa_lifetime_update_impl(struct secasvar * sav,if_t ifp,u_int drv_spi,uint64_t octets,uint64_t allocs)989 ipsec_accel_drv_sa_lifetime_update_impl(struct secasvar *sav, if_t ifp,
990 u_int drv_spi, uint64_t octets, uint64_t allocs)
991 {
992 struct epoch_tracker et;
993 struct ifp_handle_sav *i;
994 uint64_t odiff, adiff;
995
996 NET_EPOCH_ENTER(et);
997 mtx_lock(&ipsec_accel_cnt_lock);
998
999 if (allocs != 0) {
1000 if (sav->firstused == 0)
1001 sav->firstused = time_second;
1002 if (sav->accel_firstused == 0)
1003 sav->accel_firstused = time_second;
1004 }
1005
1006 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1007 if (i->ifp == ifp && i->drv_spi == drv_spi)
1008 break;
1009 }
1010 if (i == NULL)
1011 goto out;
1012
1013 odiff = octets - i->cnt_octets;
1014 adiff = allocs - i->cnt_allocs;
1015
1016 if (sav->lft_c != NULL) {
1017 counter_u64_add(sav->lft_c_bytes, odiff);
1018 counter_u64_add(sav->lft_c_allocations, adiff);
1019 }
1020
1021 i->cnt_octets = octets;
1022 i->cnt_allocs = allocs;
1023 sav->accel_hw_octets += odiff;
1024 sav->accel_hw_allocs += adiff;
1025
1026 out:
1027 mtx_unlock(&ipsec_accel_cnt_lock);
1028 NET_EPOCH_EXIT(et);
1029 }
1030
1031 static int
ipsec_accel_drv_sa_lifetime_fetch_impl(struct secasvar * sav,if_t ifp,u_int drv_spi,uint64_t * octets,uint64_t * allocs)1032 ipsec_accel_drv_sa_lifetime_fetch_impl(struct secasvar *sav,
1033 if_t ifp, u_int drv_spi, uint64_t *octets, uint64_t *allocs)
1034 {
1035 struct ifp_handle_sav *i;
1036 int error;
1037
1038 NET_EPOCH_ASSERT();
1039 error = 0;
1040
1041 mtx_lock(&ipsec_accel_cnt_lock);
1042 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1043 if (i->ifp == ifp && i->drv_spi == drv_spi) {
1044 *octets = i->cnt_octets;
1045 *allocs = i->cnt_allocs;
1046 break;
1047 }
1048 }
1049 if (i == NULL)
1050 error = ENOENT;
1051 mtx_unlock(&ipsec_accel_cnt_lock);
1052 return (error);
1053 }
1054
1055 static void
ipsec_accel_sa_lifetime_hw(struct secasvar * sav,if_t ifp,struct seclifetime * lft)1056 ipsec_accel_sa_lifetime_hw(struct secasvar *sav, if_t ifp,
1057 struct seclifetime *lft)
1058 {
1059 struct ifp_handle_sav *i;
1060 if_sa_cnt_fn_t p;
1061
1062 IFNET_RLOCK_ASSERT();
1063
1064 i = ipsec_accel_is_accel_sav_ptr(sav, ifp);
1065 if (i != NULL && (i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
1066 IFP_HS_HANDLED) {
1067 p = ifp->if_ipsec_accel_m->if_sa_cnt;
1068 if (p != NULL)
1069 p(ifp, sav, i->drv_spi, i->ifdata, lft);
1070 }
1071 }
1072
1073 static int
ipsec_accel_sa_lifetime_op_impl(struct secasvar * sav,struct seclifetime * lft_c,if_t ifp,enum IF_SA_CNT_WHICH op,struct rm_priotracker * sahtree_trackerp)1074 ipsec_accel_sa_lifetime_op_impl(struct secasvar *sav,
1075 struct seclifetime *lft_c, if_t ifp, enum IF_SA_CNT_WHICH op,
1076 struct rm_priotracker *sahtree_trackerp)
1077 {
1078 struct seclifetime lft_l, lft_s;
1079 struct ifp_handle_sav *i;
1080 if_t ifp1;
1081 if_sa_cnt_fn_t p;
1082 int error;
1083
1084 error = 0;
1085 memset(&lft_l, 0, sizeof(lft_l));
1086 memset(&lft_s, 0, sizeof(lft_s));
1087
1088 switch (op & ~IF_SA_CNT_UPD) {
1089 case IF_SA_CNT_IFP_HW_VAL:
1090 ipsec_accel_sa_lifetime_hw(sav, ifp, &lft_l);
1091 ipsec_accel_sa_lifetime_update(&lft_l, &lft_s);
1092 break;
1093
1094 case IF_SA_CNT_TOTAL_SW_VAL:
1095 lft_l.allocations = (uint32_t)counter_u64_fetch(
1096 sav->accel_lft_sw);
1097 lft_l.bytes = counter_u64_fetch(sav->accel_lft_sw + 1);
1098 lft_l.usetime = sav->accel_firstused;
1099 break;
1100
1101 case IF_SA_CNT_TOTAL_HW_VAL:
1102 IFNET_RLOCK_ASSERT();
1103 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1104 if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) !=
1105 IFP_HS_HANDLED)
1106 continue;
1107 ifp1 = i->ifp;
1108 p = ifp1->if_ipsec_accel_m->if_sa_cnt;
1109 if (p == NULL)
1110 continue;
1111 memset(&lft_s, 0, sizeof(lft_s));
1112 if (sahtree_trackerp != NULL)
1113 ipsec_sahtree_runlock(sahtree_trackerp);
1114 error = p(ifp1, sav, i->drv_spi, i->ifdata, &lft_s);
1115 if (sahtree_trackerp != NULL)
1116 ipsec_sahtree_rlock(sahtree_trackerp);
1117 if (error == 0)
1118 ipsec_accel_sa_lifetime_update(&lft_l, &lft_s);
1119 }
1120 break;
1121 }
1122
1123 if (error == 0) {
1124 if ((op & IF_SA_CNT_UPD) == 0)
1125 memset(lft_c, 0, sizeof(*lft_c));
1126 ipsec_accel_sa_lifetime_update(lft_c, &lft_l);
1127 }
1128
1129 return (error);
1130 }
1131
1132 static void
ipsec_accel_sync_imp(void)1133 ipsec_accel_sync_imp(void)
1134 {
1135 taskqueue_drain_all(ipsec_accel_tq);
1136 }
1137
1138 static struct mbuf *
ipsec_accel_key_setaccelif_impl(struct secasvar * sav)1139 ipsec_accel_key_setaccelif_impl(struct secasvar *sav)
1140 {
1141 struct mbuf *m, *m1;
1142 struct ifp_handle_sav *i;
1143 struct epoch_tracker et;
1144
1145 if (sav->accel_ifname != NULL)
1146 return (key_setaccelif(sav->accel_ifname));
1147
1148 m = m1 = NULL;
1149
1150 NET_EPOCH_ENTER(et);
1151 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1152 if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
1153 IFP_HS_HANDLED) {
1154 m1 = key_setaccelif(if_name(i->ifp));
1155 if (m == NULL)
1156 m = m1;
1157 else if (m1 != NULL)
1158 m_cat(m, m1);
1159 }
1160 }
1161 NET_EPOCH_EXIT(et);
1162 return (m);
1163 }
1164
1165 #endif /* IPSEC_OFFLOAD */
1166