1 /*-
2 * Copyright (c) 2021,2022 NVIDIA CORPORATION & AFFILIATES. ALL RIGHTS RESERVED.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include "opt_inet.h"
27 #include "opt_inet6.h"
28 #include "opt_ipsec.h"
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/ck.h>
33 #include <sys/eventhandler.h>
34 #include <sys/kernel.h>
35 #include <sys/mbuf.h>
36 #include <sys/pctrie.h>
37 #include <sys/proc.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/protosw.h>
41 #include <sys/taskqueue.h>
42
43 #include <machine/stdarg.h>
44
45 #include <net/if.h>
46 #include <net/if_var.h>
47 #include <net/if_private.h>
48 #include <net/vnet.h>
49 #include <netinet/in.h>
50 #include <netinet/ip.h>
51 #include <netinet/ip_var.h>
52 #include <netinet/ip6.h>
53 #include <netinet6/ip6_var.h>
54 #include <netinet/in_pcb.h>
55 #include <netinet/tcp_var.h>
56
57 #include <netipsec/key.h>
58 #include <netipsec/keydb.h>
59 #include <netipsec/key_debug.h>
60 #include <netipsec/xform.h>
61 #include <netipsec/ipsec.h>
62 #include <netipsec/ipsec_offload.h>
63 #include <netipsec/ah_var.h>
64 #include <netipsec/esp.h>
65 #include <netipsec/esp_var.h>
66 #include <netipsec/ipcomp_var.h>
67
68 #ifdef IPSEC_OFFLOAD
69
70 static struct mtx ipsec_accel_sav_tmp;
71 static struct unrhdr *drv_spi_unr;
72 static struct mtx ipsec_accel_cnt_lock;
73 static struct taskqueue *ipsec_accel_tq;
74
75 struct ipsec_accel_install_newkey_tq {
76 struct secasvar *sav;
77 struct vnet *install_vnet;
78 struct task install_task;
79 };
80
81 struct ipsec_accel_forget_tq {
82 struct vnet *forget_vnet;
83 struct task forget_task;
84 struct secasvar *sav;
85 };
86
87 struct ifp_handle_sav {
88 CK_LIST_ENTRY(ifp_handle_sav) sav_link;
89 CK_LIST_ENTRY(ifp_handle_sav) sav_allh_link;
90 struct secasvar *sav;
91 struct ifnet *ifp;
92 void *ifdata;
93 uint64_t drv_spi;
94 uint32_t flags;
95 size_t hdr_ext_size;
96 uint64_t cnt_octets;
97 uint64_t cnt_allocs;
98 };
99
100 #define IFP_HS_HANDLED 0x00000001
101 #define IFP_HS_REJECTED 0x00000002
102 #define IFP_HS_MARKER 0x00000010
103
104 static CK_LIST_HEAD(, ifp_handle_sav) ipsec_accel_all_sav_handles;
105
106 struct ifp_handle_sp {
107 CK_LIST_ENTRY(ifp_handle_sp) sp_link;
108 CK_LIST_ENTRY(ifp_handle_sp) sp_allh_link;
109 struct secpolicy *sp;
110 struct ifnet *ifp;
111 void *ifdata;
112 uint32_t flags;
113 };
114
115 #define IFP_HP_HANDLED 0x00000001
116 #define IFP_HP_REJECTED 0x00000002
117 #define IFP_HP_MARKER 0x00000004
118
119 static CK_LIST_HEAD(, ifp_handle_sp) ipsec_accel_all_sp_handles;
120
121 static void *
drvspi_sa_trie_alloc(struct pctrie * ptree)122 drvspi_sa_trie_alloc(struct pctrie *ptree)
123 {
124 void *res;
125
126 res = malloc(pctrie_node_size(), M_IPSEC_MISC, M_ZERO | M_NOWAIT);
127 if (res != NULL)
128 pctrie_zone_init(res, 0, 0);
129 return (res);
130 }
131
132 static void
drvspi_sa_trie_free(struct pctrie * ptree,void * node)133 drvspi_sa_trie_free(struct pctrie *ptree, void *node)
134 {
135 free(node, M_IPSEC_MISC);
136 }
137
138 PCTRIE_DEFINE(DRVSPI_SA, ifp_handle_sav, drv_spi,
139 drvspi_sa_trie_alloc, drvspi_sa_trie_free);
140 static struct pctrie drv_spi_pctrie;
141
142 static eventhandler_tag ipsec_accel_ifdetach_event_tag;
143
144 static void ipsec_accel_sa_newkey_impl(struct secasvar *sav);
145 static int ipsec_accel_handle_sav(struct secasvar *sav, struct ifnet *ifp,
146 u_int drv_spi, void *priv, uint32_t flags, struct ifp_handle_sav **ires);
147 static void ipsec_accel_forget_sav_clear(struct secasvar *sav);
148 static struct ifp_handle_sav *ipsec_accel_is_accel_sav_ptr(struct secasvar *sav,
149 struct ifnet *ifp);
150 static int ipsec_accel_sa_lifetime_op_impl(struct secasvar *sav,
151 struct seclifetime *lft_c, if_t ifp, enum IF_SA_CNT_WHICH op,
152 struct rm_priotracker *sahtree_trackerp);
153 static void ipsec_accel_sa_recordxfer(struct secasvar *sav, struct mbuf *m);
154 static void ipsec_accel_sync_imp(void);
155 static bool ipsec_accel_is_accel_sav_impl(struct secasvar *sav);
156 static struct mbuf *ipsec_accel_key_setaccelif_impl(struct secasvar *sav);
157 static void ipsec_accel_on_ifdown_impl(struct ifnet *ifp);
158 static void ipsec_accel_drv_sa_lifetime_update_impl(struct secasvar *sav,
159 if_t ifp, u_int drv_spi, uint64_t octets, uint64_t allocs);
160 static int ipsec_accel_drv_sa_lifetime_fetch_impl(struct secasvar *sav,
161 if_t ifp, u_int drv_spi, uint64_t *octets, uint64_t *allocs);
162 static void ipsec_accel_ifdetach_event(void *arg, struct ifnet *ifp);
163
164 static void
ipsec_accel_init(void * arg)165 ipsec_accel_init(void *arg)
166 {
167 mtx_init(&ipsec_accel_sav_tmp, "ipasat", MTX_DEF, 0);
168 mtx_init(&ipsec_accel_cnt_lock, "ipascn", MTX_DEF, 0);
169 drv_spi_unr = new_unrhdr(IPSEC_ACCEL_DRV_SPI_MIN,
170 IPSEC_ACCEL_DRV_SPI_MAX, &ipsec_accel_sav_tmp);
171 ipsec_accel_tq = taskqueue_create("ipsec_offload", M_WAITOK,
172 taskqueue_thread_enqueue, &ipsec_accel_tq);
173 (void)taskqueue_start_threads(&ipsec_accel_tq,
174 1 /* Must be single-threaded */, PWAIT,
175 "ipsec_offload");
176 ipsec_accel_sa_newkey_p = ipsec_accel_sa_newkey_impl;
177 ipsec_accel_forget_sav_p = ipsec_accel_forget_sav_impl;
178 ipsec_accel_spdadd_p = ipsec_accel_spdadd_impl;
179 ipsec_accel_spddel_p = ipsec_accel_spddel_impl;
180 ipsec_accel_sa_lifetime_op_p = ipsec_accel_sa_lifetime_op_impl;
181 ipsec_accel_sync_p = ipsec_accel_sync_imp;
182 ipsec_accel_is_accel_sav_p = ipsec_accel_is_accel_sav_impl;
183 ipsec_accel_key_setaccelif_p = ipsec_accel_key_setaccelif_impl;
184 ipsec_accel_on_ifdown_p = ipsec_accel_on_ifdown_impl;
185 ipsec_accel_drv_sa_lifetime_update_p =
186 ipsec_accel_drv_sa_lifetime_update_impl;
187 ipsec_accel_drv_sa_lifetime_fetch_p =
188 ipsec_accel_drv_sa_lifetime_fetch_impl;
189 pctrie_init(&drv_spi_pctrie);
190 ipsec_accel_ifdetach_event_tag = EVENTHANDLER_REGISTER(
191 ifnet_departure_event, ipsec_accel_ifdetach_event, NULL,
192 EVENTHANDLER_PRI_ANY);
193 }
194 SYSINIT(ipsec_accel_init, SI_SUB_VNET_DONE, SI_ORDER_ANY,
195 ipsec_accel_init, NULL);
196
197 static void
ipsec_accel_fini(void * arg)198 ipsec_accel_fini(void *arg)
199 {
200 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
201 ipsec_accel_ifdetach_event_tag);
202 ipsec_accel_sa_newkey_p = NULL;
203 ipsec_accel_forget_sav_p = NULL;
204 ipsec_accel_spdadd_p = NULL;
205 ipsec_accel_spddel_p = NULL;
206 ipsec_accel_sa_lifetime_op_p = NULL;
207 ipsec_accel_sync_p = NULL;
208 ipsec_accel_is_accel_sav_p = NULL;
209 ipsec_accel_key_setaccelif_p = NULL;
210 ipsec_accel_on_ifdown_p = NULL;
211 ipsec_accel_drv_sa_lifetime_update_p = NULL;
212 ipsec_accel_drv_sa_lifetime_fetch_p = NULL;
213 ipsec_accel_sync_imp();
214 clean_unrhdr(drv_spi_unr); /* avoid panic, should go later */
215 clear_unrhdr(drv_spi_unr);
216 delete_unrhdr(drv_spi_unr);
217 taskqueue_drain_all(ipsec_accel_tq);
218 taskqueue_free(ipsec_accel_tq);
219 mtx_destroy(&ipsec_accel_sav_tmp);
220 mtx_destroy(&ipsec_accel_cnt_lock);
221 }
222 SYSUNINIT(ipsec_accel_fini, SI_SUB_VNET_DONE, SI_ORDER_ANY,
223 ipsec_accel_fini, NULL);
224
225 SYSCTL_NODE(_net_inet_ipsec, OID_AUTO, offload, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
226 "");
227
228 static bool ipsec_offload_verbose = false;
229 SYSCTL_BOOL(_net_inet_ipsec_offload, OID_AUTO, verbose, CTLFLAG_RW,
230 &ipsec_offload_verbose, 0,
231 "Verbose SA/SP offload install and deinstall");
232
233 static void
dprintf(const char * fmt,...)234 dprintf(const char *fmt, ...)
235 {
236 va_list ap;
237
238 if (!ipsec_offload_verbose)
239 return;
240
241 va_start(ap, fmt);
242 vprintf(fmt, ap);
243 va_end(ap);
244 }
245
246 static void
ipsec_accel_alloc_forget_tq(struct secasvar * sav)247 ipsec_accel_alloc_forget_tq(struct secasvar *sav)
248 {
249 void *ftq;
250
251 if (sav->accel_forget_tq != 0)
252 return;
253
254 ftq = malloc(sizeof(struct ipsec_accel_forget_tq), M_TEMP, M_WAITOK);
255 if (!atomic_cmpset_ptr(&sav->accel_forget_tq, 0, (uintptr_t)ftq))
256 free(ftq, M_TEMP);
257 }
258
259 static bool
ipsec_accel_sa_install_match(if_t ifp,void * arg)260 ipsec_accel_sa_install_match(if_t ifp, void *arg)
261 {
262 if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0)
263 return (false);
264 if (ifp->if_ipsec_accel_m->if_sa_newkey == NULL) {
265 dprintf("driver bug ifp %s if_sa_newkey NULL\n",
266 if_name(ifp));
267 return (false);
268 }
269 return (true);
270 }
271
272 static int
ipsec_accel_sa_newkey_cb(if_t ifp,void * arg)273 ipsec_accel_sa_newkey_cb(if_t ifp, void *arg)
274 {
275 struct ipsec_accel_install_newkey_tq *tq;
276 void *priv;
277 u_int drv_spi;
278 int error;
279
280 tq = arg;
281
282 dprintf("ipsec_accel_sa_newkey_act: ifp %s h %p spi %#x "
283 "flags %#x seq %d\n",
284 if_name(ifp), ifp->if_ipsec_accel_m->if_sa_newkey,
285 be32toh(tq->sav->spi), tq->sav->flags, tq->sav->seq);
286 priv = NULL;
287 drv_spi = alloc_unr(drv_spi_unr);
288 if (tq->sav->accel_ifname != NULL &&
289 strcmp(tq->sav->accel_ifname, if_name(ifp)) != 0) {
290 error = ipsec_accel_handle_sav(tq->sav,
291 ifp, drv_spi, priv, IFP_HS_REJECTED, NULL);
292 goto out;
293 }
294 if (drv_spi == -1) {
295 /* XXXKIB */
296 dprintf("ipsec_accel_sa_install_newkey: cannot alloc "
297 "drv_spi if %s spi %#x\n", if_name(ifp),
298 be32toh(tq->sav->spi));
299 return (ENOMEM);
300 }
301 error = ifp->if_ipsec_accel_m->if_sa_newkey(ifp, tq->sav,
302 drv_spi, &priv);
303 if (error != 0) {
304 if (error == EOPNOTSUPP) {
305 dprintf("ipsec_accel_sa_newkey: driver "
306 "refused sa if %s spi %#x\n",
307 if_name(ifp), be32toh(tq->sav->spi));
308 error = ipsec_accel_handle_sav(tq->sav,
309 ifp, drv_spi, priv, IFP_HS_REJECTED, NULL);
310 /* XXXKIB */
311 } else {
312 dprintf("ipsec_accel_sa_newkey: driver "
313 "error %d if %s spi %#x\n",
314 error, if_name(ifp), be32toh(tq->sav->spi));
315 /* XXXKIB */
316 }
317 } else {
318 error = ipsec_accel_handle_sav(tq->sav, ifp,
319 drv_spi, priv, IFP_HS_HANDLED, NULL);
320 if (error != 0) {
321 /* XXXKIB */
322 dprintf("ipsec_accel_sa_newkey: handle_sav "
323 "err %d if %s spi %#x\n", error,
324 if_name(ifp), be32toh(tq->sav->spi));
325 }
326 }
327 out:
328 return (error);
329 }
330
331 static void
ipsec_accel_sa_newkey_act(void * context,int pending)332 ipsec_accel_sa_newkey_act(void *context, int pending)
333 {
334 struct ipsec_accel_install_newkey_tq *tq;
335 void *tqf;
336 struct secasvar *sav;
337
338 tq = context;
339 tqf = NULL;
340 sav = tq->sav;
341 CURVNET_SET(tq->install_vnet);
342 mtx_lock(&ipsec_accel_sav_tmp);
343 if ((sav->accel_flags & (SADB_KEY_ACCEL_INST |
344 SADB_KEY_ACCEL_DEINST)) == 0 &&
345 sav->state == SADB_SASTATE_MATURE) {
346 sav->accel_flags |= SADB_KEY_ACCEL_INST;
347 mtx_unlock(&ipsec_accel_sav_tmp);
348 if_foreach_sleep(ipsec_accel_sa_install_match, context,
349 ipsec_accel_sa_newkey_cb, context);
350 ipsec_accel_alloc_forget_tq(sav);
351 mtx_lock(&ipsec_accel_sav_tmp);
352
353 /*
354 * If ipsec_accel_forget_sav() raced with us and set
355 * the flag, do its work. Its task cannot execute in
356 * parallel since ipsec_accel taskqueue is single-threaded.
357 */
358 if ((sav->accel_flags & SADB_KEY_ACCEL_DEINST) != 0) {
359 tqf = (void *)sav->accel_forget_tq;
360 sav->accel_forget_tq = 0;
361 ipsec_accel_forget_sav_clear(sav);
362 }
363 }
364 mtx_unlock(&ipsec_accel_sav_tmp);
365 key_freesav(&tq->sav);
366 CURVNET_RESTORE();
367 free(tq, M_TEMP);
368 free(tqf, M_TEMP);
369 }
370
371 static void
ipsec_accel_sa_newkey_impl(struct secasvar * sav)372 ipsec_accel_sa_newkey_impl(struct secasvar *sav)
373 {
374 struct ipsec_accel_install_newkey_tq *tq;
375
376 if ((sav->accel_flags & (SADB_KEY_ACCEL_INST |
377 SADB_KEY_ACCEL_DEINST)) != 0)
378 return;
379
380 dprintf(
381 "ipsec_accel_sa_install_newkey: spi %#x flags %#x seq %d\n",
382 be32toh(sav->spi), sav->flags, sav->seq);
383
384 tq = malloc(sizeof(*tq), M_TEMP, M_NOWAIT);
385 if (tq == NULL) {
386 dprintf("ipsec_accel_sa_install_newkey: no memory for tq, "
387 "spi %#x\n", be32toh(sav->spi));
388 /* XXXKIB */
389 return;
390 }
391
392 refcount_acquire(&sav->refcnt);
393
394 TASK_INIT(&tq->install_task, 0, ipsec_accel_sa_newkey_act, tq);
395 tq->sav = sav;
396 tq->install_vnet = curthread->td_vnet;
397 taskqueue_enqueue(ipsec_accel_tq, &tq->install_task);
398 }
399
400 static int
ipsec_accel_handle_sav(struct secasvar * sav,struct ifnet * ifp,u_int drv_spi,void * priv,uint32_t flags,struct ifp_handle_sav ** ires)401 ipsec_accel_handle_sav(struct secasvar *sav, struct ifnet *ifp,
402 u_int drv_spi, void *priv, uint32_t flags, struct ifp_handle_sav **ires)
403 {
404 struct ifp_handle_sav *ihs, *i;
405 int error;
406
407 MPASS(__bitcount(flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) == 1);
408
409 ihs = malloc(sizeof(*ihs), M_IPSEC_MISC, M_WAITOK | M_ZERO);
410 ihs->ifp = ifp;
411 ihs->sav = sav;
412 ihs->drv_spi = drv_spi;
413 ihs->ifdata = priv;
414 ihs->flags = flags;
415 ihs->hdr_ext_size = esp_hdrsiz(sav);
416 mtx_lock(&ipsec_accel_sav_tmp);
417 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
418 if (i->ifp == ifp) {
419 error = EALREADY;
420 goto errout;
421 }
422 }
423 error = DRVSPI_SA_PCTRIE_INSERT(&drv_spi_pctrie, ihs);
424 if (error != 0)
425 goto errout;
426 if_ref(ihs->ifp);
427 CK_LIST_INSERT_HEAD(&sav->accel_ifps, ihs, sav_link);
428 CK_LIST_INSERT_HEAD(&ipsec_accel_all_sav_handles, ihs, sav_allh_link);
429 mtx_unlock(&ipsec_accel_sav_tmp);
430 if (ires != NULL)
431 *ires = ihs;
432 return (0);
433 errout:
434 mtx_unlock(&ipsec_accel_sav_tmp);
435 free(ihs, M_IPSEC_MISC);
436 if (ires != NULL)
437 *ires = NULL;
438 return (error);
439 }
440
441 static void
ipsec_accel_forget_handle_sav(struct ifp_handle_sav * i,bool freesav)442 ipsec_accel_forget_handle_sav(struct ifp_handle_sav *i, bool freesav)
443 {
444 struct ifnet *ifp;
445 struct secasvar *sav;
446
447 mtx_assert(&ipsec_accel_sav_tmp, MA_OWNED);
448
449 CK_LIST_REMOVE(i, sav_link);
450 CK_LIST_REMOVE(i, sav_allh_link);
451 DRVSPI_SA_PCTRIE_REMOVE(&drv_spi_pctrie, i->drv_spi);
452 mtx_unlock(&ipsec_accel_sav_tmp);
453 NET_EPOCH_WAIT();
454 ifp = i->ifp;
455 sav = i->sav;
456 if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
457 IFP_HS_HANDLED) {
458 dprintf("sa deinstall %s %p spi %#x ifl %#x\n",
459 if_name(ifp), sav, be32toh(sav->spi), i->flags);
460 ifp->if_ipsec_accel_m->if_sa_deinstall(ifp,
461 i->drv_spi, i->ifdata);
462 }
463 if_rele(ifp);
464 free_unr(drv_spi_unr, i->drv_spi);
465 free(i, M_IPSEC_MISC);
466 if (freesav)
467 key_freesav(&sav);
468 mtx_lock(&ipsec_accel_sav_tmp);
469 }
470
471 static void
ipsec_accel_forget_sav_clear(struct secasvar * sav)472 ipsec_accel_forget_sav_clear(struct secasvar *sav)
473 {
474 struct ifp_handle_sav *i;
475
476 for (;;) {
477 i = CK_LIST_FIRST(&sav->accel_ifps);
478 if (i == NULL)
479 break;
480 ipsec_accel_forget_handle_sav(i, false);
481 }
482 }
483
484 static void
ipsec_accel_forget_sav_act(void * arg,int pending)485 ipsec_accel_forget_sav_act(void *arg, int pending)
486 {
487 struct ipsec_accel_forget_tq *tq;
488 struct secasvar *sav;
489
490 tq = arg;
491 sav = tq->sav;
492 CURVNET_SET(tq->forget_vnet);
493 mtx_lock(&ipsec_accel_sav_tmp);
494 ipsec_accel_forget_sav_clear(sav);
495 mtx_unlock(&ipsec_accel_sav_tmp);
496 key_freesav(&sav);
497 CURVNET_RESTORE();
498 free(tq, M_TEMP);
499 }
500
501 void
ipsec_accel_forget_sav_impl(struct secasvar * sav)502 ipsec_accel_forget_sav_impl(struct secasvar *sav)
503 {
504 struct ipsec_accel_forget_tq *tq;
505
506 mtx_lock(&ipsec_accel_sav_tmp);
507 sav->accel_flags |= SADB_KEY_ACCEL_DEINST;
508 tq = (void *)atomic_load_ptr(&sav->accel_forget_tq);
509 if (tq == NULL || !atomic_cmpset_ptr(&sav->accel_forget_tq,
510 (uintptr_t)tq, 0)) {
511 mtx_unlock(&ipsec_accel_sav_tmp);
512 return;
513 }
514 mtx_unlock(&ipsec_accel_sav_tmp);
515
516 refcount_acquire(&sav->refcnt);
517 TASK_INIT(&tq->forget_task, 0, ipsec_accel_forget_sav_act, tq);
518 tq->forget_vnet = curthread->td_vnet;
519 tq->sav = sav;
520 taskqueue_enqueue(ipsec_accel_tq, &tq->forget_task);
521 }
522
523 static void
ipsec_accel_on_ifdown_sav(struct ifnet * ifp)524 ipsec_accel_on_ifdown_sav(struct ifnet *ifp)
525 {
526 struct ifp_handle_sav *i, *marker;
527
528 marker = malloc(sizeof(*marker), M_IPSEC_MISC, M_WAITOK | M_ZERO);
529 marker->flags = IFP_HS_MARKER;
530
531 mtx_lock(&ipsec_accel_sav_tmp);
532 CK_LIST_INSERT_HEAD(&ipsec_accel_all_sav_handles, marker,
533 sav_allh_link);
534 for (;;) {
535 i = CK_LIST_NEXT(marker, sav_allh_link);
536 if (i == NULL)
537 break;
538 CK_LIST_REMOVE(marker, sav_allh_link);
539 CK_LIST_INSERT_AFTER(i, marker, sav_allh_link);
540 if (i->ifp == ifp) {
541 refcount_acquire(&i->sav->refcnt); /* XXXKIB wrap ? */
542 ipsec_accel_forget_handle_sav(i, true);
543 }
544 }
545 CK_LIST_REMOVE(marker, sav_allh_link);
546 mtx_unlock(&ipsec_accel_sav_tmp);
547 free(marker, M_IPSEC_MISC);
548 }
549
550 static struct ifp_handle_sav *
ipsec_accel_is_accel_sav_ptr_raw(struct secasvar * sav,struct ifnet * ifp)551 ipsec_accel_is_accel_sav_ptr_raw(struct secasvar *sav, struct ifnet *ifp)
552 {
553 struct ifp_handle_sav *i;
554
555 if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0)
556 return (NULL);
557 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
558 if (i->ifp == ifp)
559 return (i);
560 }
561 return (NULL);
562 }
563
564 static struct ifp_handle_sav *
ipsec_accel_is_accel_sav_ptr(struct secasvar * sav,struct ifnet * ifp)565 ipsec_accel_is_accel_sav_ptr(struct secasvar *sav, struct ifnet *ifp)
566 {
567 NET_EPOCH_ASSERT();
568 return (ipsec_accel_is_accel_sav_ptr_raw(sav, ifp));
569 }
570
571 static bool
ipsec_accel_is_accel_sav_impl(struct secasvar * sav)572 ipsec_accel_is_accel_sav_impl(struct secasvar *sav)
573 {
574 return (!CK_LIST_EMPTY(&sav->accel_ifps));
575 }
576
577 static struct secasvar *
ipsec_accel_drvspi_to_sa(u_int drv_spi)578 ipsec_accel_drvspi_to_sa(u_int drv_spi)
579 {
580 struct ifp_handle_sav *i;
581
582 i = DRVSPI_SA_PCTRIE_LOOKUP(&drv_spi_pctrie, drv_spi);
583 if (i == NULL)
584 return (NULL);
585 return (i->sav);
586 }
587
588 static struct ifp_handle_sp *
ipsec_accel_find_accel_sp(struct secpolicy * sp,if_t ifp)589 ipsec_accel_find_accel_sp(struct secpolicy *sp, if_t ifp)
590 {
591 struct ifp_handle_sp *i;
592
593 CK_LIST_FOREACH(i, &sp->accel_ifps, sp_link) {
594 if (i->ifp == ifp)
595 return (i);
596 }
597 return (NULL);
598 }
599
600 static bool
ipsec_accel_is_accel_sp(struct secpolicy * sp,if_t ifp)601 ipsec_accel_is_accel_sp(struct secpolicy *sp, if_t ifp)
602 {
603 return (ipsec_accel_find_accel_sp(sp, ifp) != NULL);
604 }
605
606 static int
ipsec_accel_remember_sp(struct secpolicy * sp,if_t ifp,struct ifp_handle_sp ** ip)607 ipsec_accel_remember_sp(struct secpolicy *sp, if_t ifp,
608 struct ifp_handle_sp **ip)
609 {
610 struct ifp_handle_sp *i;
611
612 i = malloc(sizeof(*i), M_IPSEC_MISC, M_WAITOK | M_ZERO);
613 i->sp = sp;
614 i->ifp = ifp;
615 if_ref(ifp);
616 i->flags = IFP_HP_HANDLED;
617 mtx_lock(&ipsec_accel_sav_tmp);
618 CK_LIST_INSERT_HEAD(&sp->accel_ifps, i, sp_link);
619 CK_LIST_INSERT_HEAD(&ipsec_accel_all_sp_handles, i, sp_allh_link);
620 mtx_unlock(&ipsec_accel_sav_tmp);
621 *ip = i;
622 return (0);
623 }
624
625 static bool
ipsec_accel_spdadd_match(if_t ifp,void * arg)626 ipsec_accel_spdadd_match(if_t ifp, void *arg)
627 {
628 struct secpolicy *sp;
629
630 if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0 ||
631 ifp->if_ipsec_accel_m->if_spdadd == NULL)
632 return (false);
633 sp = arg;
634 if (sp->accel_ifname != NULL &&
635 strcmp(sp->accel_ifname, if_name(ifp)) != 0)
636 return (false);
637 if (ipsec_accel_is_accel_sp(sp, ifp))
638 return (false);
639 return (true);
640 }
641
642 static int
ipsec_accel_spdadd_cb(if_t ifp,void * arg)643 ipsec_accel_spdadd_cb(if_t ifp, void *arg)
644 {
645 struct secpolicy *sp;
646 struct inpcb *inp;
647 struct ifp_handle_sp *i;
648 int error;
649
650 sp = arg;
651 inp = sp->ipsec_accel_add_sp_inp;
652 dprintf("ipsec_accel_spdadd_cb: ifp %s m %p sp %p inp %p\n",
653 if_name(ifp), ifp->if_ipsec_accel_m->if_spdadd, sp, inp);
654 error = ipsec_accel_remember_sp(sp, ifp, &i);
655 if (error != 0) {
656 dprintf("ipsec_accel_spdadd: %s if_spdadd %p remember res %d\n",
657 if_name(ifp), sp, error);
658 return (error);
659 }
660 error = ifp->if_ipsec_accel_m->if_spdadd(ifp, sp, inp, &i->ifdata);
661 if (error != 0) {
662 i->flags |= IFP_HP_REJECTED;
663 dprintf("ipsec_accel_spdadd: %s if_spdadd %p res %d\n",
664 if_name(ifp), sp, error);
665 }
666 return (error);
667 }
668
669 static void
ipsec_accel_spdadd_act(void * arg,int pending)670 ipsec_accel_spdadd_act(void *arg, int pending)
671 {
672 struct secpolicy *sp;
673 struct inpcb *inp;
674
675 sp = arg;
676 CURVNET_SET(sp->accel_add_tq.adddel_vnet);
677 if_foreach_sleep(ipsec_accel_spdadd_match, arg,
678 ipsec_accel_spdadd_cb, arg);
679 inp = sp->ipsec_accel_add_sp_inp;
680 if (inp != NULL) {
681 INP_WLOCK(inp);
682 if (!in_pcbrele_wlocked(inp))
683 INP_WUNLOCK(inp);
684 sp->ipsec_accel_add_sp_inp = NULL;
685 }
686 CURVNET_RESTORE();
687 key_freesp(&sp);
688 }
689
690 void
ipsec_accel_spdadd_impl(struct secpolicy * sp,struct inpcb * inp)691 ipsec_accel_spdadd_impl(struct secpolicy *sp, struct inpcb *inp)
692 {
693 struct ipsec_accel_adddel_sp_tq *tq;
694
695 if (sp == NULL)
696 return;
697 if (sp->tcount == 0 && inp == NULL)
698 return;
699 tq = &sp->accel_add_tq;
700 if (atomic_cmpset_int(&tq->adddel_scheduled, 0, 1) == 0)
701 return;
702 tq->adddel_vnet = curthread->td_vnet;
703 sp->ipsec_accel_add_sp_inp = inp;
704 if (inp != NULL)
705 in_pcbref(inp);
706 TASK_INIT(&tq->adddel_task, 0, ipsec_accel_spdadd_act, sp);
707 key_addref(sp);
708 taskqueue_enqueue(ipsec_accel_tq, &tq->adddel_task);
709 }
710
711 static void
ipsec_accel_spddel_act(void * arg,int pending)712 ipsec_accel_spddel_act(void *arg, int pending)
713 {
714 struct ifp_handle_sp *i;
715 struct secpolicy *sp;
716 int error;
717
718 sp = arg;
719 CURVNET_SET(sp->accel_del_tq.adddel_vnet);
720 mtx_lock(&ipsec_accel_sav_tmp);
721 for (;;) {
722 i = CK_LIST_FIRST(&sp->accel_ifps);
723 if (i == NULL)
724 break;
725 CK_LIST_REMOVE(i, sp_link);
726 CK_LIST_REMOVE(i, sp_allh_link);
727 mtx_unlock(&ipsec_accel_sav_tmp);
728 NET_EPOCH_WAIT();
729 if ((i->flags & (IFP_HP_HANDLED | IFP_HP_REJECTED)) ==
730 IFP_HP_HANDLED) {
731 dprintf("spd deinstall %s %p\n", if_name(i->ifp), sp);
732 error = i->ifp->if_ipsec_accel_m->if_spddel(i->ifp,
733 sp, i->ifdata);
734 if (error != 0) {
735 dprintf(
736 "ipsec_accel_spddel: %s if_spddel %p res %d\n",
737 if_name(i->ifp), sp, error);
738 }
739 }
740 if_rele(i->ifp);
741 free(i, M_IPSEC_MISC);
742 mtx_lock(&ipsec_accel_sav_tmp);
743 }
744 mtx_unlock(&ipsec_accel_sav_tmp);
745 key_freesp(&sp);
746 CURVNET_RESTORE();
747 }
748
749 void
ipsec_accel_spddel_impl(struct secpolicy * sp)750 ipsec_accel_spddel_impl(struct secpolicy *sp)
751 {
752 struct ipsec_accel_adddel_sp_tq *tq;
753
754 if (sp == NULL)
755 return;
756
757 tq = &sp->accel_del_tq;
758 if (atomic_cmpset_int(&tq->adddel_scheduled, 0, 1) == 0)
759 return;
760 tq->adddel_vnet = curthread->td_vnet;
761 TASK_INIT(&tq->adddel_task, 0, ipsec_accel_spddel_act, sp);
762 key_addref(sp);
763 taskqueue_enqueue(ipsec_accel_tq, &tq->adddel_task);
764 }
765
766 static void
ipsec_accel_on_ifdown_sp(struct ifnet * ifp)767 ipsec_accel_on_ifdown_sp(struct ifnet *ifp)
768 {
769 struct ifp_handle_sp *i, *marker;
770 struct secpolicy *sp;
771 int error;
772
773 marker = malloc(sizeof(*marker), M_IPSEC_MISC, M_WAITOK | M_ZERO);
774 marker->flags = IFP_HS_MARKER;
775
776 mtx_lock(&ipsec_accel_sav_tmp);
777 CK_LIST_INSERT_HEAD(&ipsec_accel_all_sp_handles, marker,
778 sp_allh_link);
779 for (;;) {
780 i = CK_LIST_NEXT(marker, sp_allh_link);
781 if (i == NULL)
782 break;
783 CK_LIST_REMOVE(marker, sp_allh_link);
784 CK_LIST_INSERT_AFTER(i, marker, sp_allh_link);
785 if (i->ifp != ifp)
786 continue;
787
788 sp = i->sp;
789 key_addref(sp);
790 CK_LIST_REMOVE(i, sp_link);
791 CK_LIST_REMOVE(i, sp_allh_link);
792 mtx_unlock(&ipsec_accel_sav_tmp);
793 NET_EPOCH_WAIT();
794 if ((i->flags & (IFP_HP_HANDLED | IFP_HP_REJECTED)) ==
795 IFP_HP_HANDLED) {
796 dprintf("spd deinstall %s %p\n", if_name(ifp), sp);
797 error = ifp->if_ipsec_accel_m->if_spddel(ifp,
798 sp, i->ifdata);
799 }
800 if (error != 0) {
801 dprintf(
802 "ipsec_accel_on_ifdown_sp: %s if_spddel %p res %d\n",
803 if_name(ifp), sp, error);
804 }
805 key_freesp(&sp);
806 if_rele(ifp);
807 free(i, M_IPSEC_MISC);
808 mtx_lock(&ipsec_accel_sav_tmp);
809 }
810 CK_LIST_REMOVE(marker, sp_allh_link);
811 mtx_unlock(&ipsec_accel_sav_tmp);
812 free(marker, M_IPSEC_MISC);
813 }
814
815 static void
ipsec_accel_on_ifdown_impl(struct ifnet * ifp)816 ipsec_accel_on_ifdown_impl(struct ifnet *ifp)
817 {
818 ipsec_accel_on_ifdown_sp(ifp);
819 ipsec_accel_on_ifdown_sav(ifp);
820 }
821
822 static void
ipsec_accel_ifdetach_event(void * arg __unused,struct ifnet * ifp)823 ipsec_accel_ifdetach_event(void *arg __unused, struct ifnet *ifp)
824 {
825 if ((ifp->if_flags & IFF_RENAMING) != 0)
826 return;
827 ipsec_accel_on_ifdown_impl(ifp);
828 }
829
830 static bool
ipsec_accel_output_pad(struct mbuf * m,struct secasvar * sav,int skip,int mtu)831 ipsec_accel_output_pad(struct mbuf *m, struct secasvar *sav, int skip, int mtu)
832 {
833 int alen, blks, hlen, padding, rlen;
834
835 rlen = m->m_pkthdr.len - skip;
836 hlen = ((sav->flags & SADB_X_EXT_OLD) != 0 ? sizeof(struct esp) :
837 sizeof(struct newesp)) + sav->ivlen;
838 blks = MAX(4, SAV_ISCTR(sav) && VNET(esp_ctr_compatibility) ?
839 sav->tdb_encalgxform->native_blocksize :
840 sav->tdb_encalgxform->blocksize);
841 padding = ((blks - ((rlen + 2) % blks)) % blks) + 2;
842 alen = xform_ah_authsize(sav->tdb_authalgxform);
843
844 return (skip + hlen + rlen + padding + alen <= mtu);
845 }
846
847 static bool
ipsec_accel_output_tag(struct mbuf * m,u_int drv_spi)848 ipsec_accel_output_tag(struct mbuf *m, u_int drv_spi)
849 {
850 struct ipsec_accel_out_tag *tag;
851
852 tag = (struct ipsec_accel_out_tag *)m_tag_get(
853 PACKET_TAG_IPSEC_ACCEL_OUT, sizeof(*tag), M_NOWAIT);
854 if (tag == NULL)
855 return (false);
856 tag->drv_spi = drv_spi;
857 m_tag_prepend(m, &tag->tag);
858 return (true);
859 }
860
861 bool
ipsec_accel_output(struct ifnet * ifp,struct mbuf * m,struct inpcb * inp,struct secpolicy * sp,struct secasvar * sav,int af,int mtu,int * hwassist)862 ipsec_accel_output(struct ifnet *ifp, struct mbuf *m, struct inpcb *inp,
863 struct secpolicy *sp, struct secasvar *sav, int af, int mtu, int *hwassist)
864 {
865 struct ifp_handle_sav *i;
866 struct ip *ip;
867 struct tcpcb *tp;
868 u_long ip_len, skip;
869 bool res;
870
871 *hwassist = 0;
872 res = false;
873 if (ifp == NULL)
874 return (res);
875
876 M_ASSERTPKTHDR(m);
877 NET_EPOCH_ASSERT();
878
879 if (sav == NULL) {
880 res = ipsec_accel_output_tag(m, IPSEC_ACCEL_DRV_SPI_BYPASS);
881 goto out;
882 }
883
884 i = ipsec_accel_is_accel_sav_ptr(sav, ifp);
885 if (i == NULL || (i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) !=
886 IFP_HS_HANDLED)
887 goto out;
888
889 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
890 ip_len = m->m_pkthdr.len;
891 if (ip_len + i->hdr_ext_size > mtu)
892 goto out;
893 switch (af) {
894 case AF_INET:
895 ip = mtod(m, struct ip *);
896 skip = ip->ip_hl << 2;
897 break;
898 case AF_INET6:
899 skip = sizeof(struct ip6_hdr);
900 break;
901 default:
902 __unreachable();
903 }
904 if (!ipsec_accel_output_pad(m, sav, skip, mtu))
905 goto out;
906 }
907
908 if (!ipsec_accel_output_tag(m, i->drv_spi))
909 goto out;
910
911 ipsec_accel_sa_recordxfer(sav, m);
912 key_freesav(&sav);
913 if (sp != NULL)
914 key_freesp(&sp);
915
916 *hwassist = ifp->if_ipsec_accel_m->if_hwassist(ifp, sav,
917 i->drv_spi, i->ifdata);
918 res = true;
919 out:
920 if (inp != NULL && inp->inp_pcbinfo == &V_tcbinfo) {
921 INP_WLOCK_ASSERT(inp);
922 tp = (struct tcpcb *)inp;
923 if (res && (*hwassist & (CSUM_TSO | CSUM_IP6_TSO)) != 0) {
924 tp->t_flags2 |= TF2_IPSEC_TSO;
925 } else {
926 tp->t_flags2 &= ~TF2_IPSEC_TSO;
927 }
928 }
929 return (res);
930 }
931
932 struct ipsec_accel_in_tag *
ipsec_accel_input_tag_lookup(const struct mbuf * m)933 ipsec_accel_input_tag_lookup(const struct mbuf *m)
934 {
935 struct ipsec_accel_in_tag *tag;
936 struct m_tag *xtag;
937
938 xtag = m_tag_find(__DECONST(struct mbuf *, m),
939 PACKET_TAG_IPSEC_ACCEL_IN, NULL);
940 if (xtag == NULL)
941 return (NULL);
942 tag = __containerof(xtag, struct ipsec_accel_in_tag, tag);
943 return (tag);
944 }
945
946 int
ipsec_accel_input(struct mbuf * m,int offset,int proto)947 ipsec_accel_input(struct mbuf *m, int offset, int proto)
948 {
949 struct secasvar *sav;
950 struct ipsec_accel_in_tag *tag;
951
952 tag = ipsec_accel_input_tag_lookup(m);
953 if (tag == NULL)
954 return (ENXIO);
955
956 if (tag->drv_spi < IPSEC_ACCEL_DRV_SPI_MIN ||
957 tag->drv_spi > IPSEC_ACCEL_DRV_SPI_MAX) {
958 dprintf("if %s mbuf %p drv_spi %d invalid, packet dropped\n",
959 (m->m_flags & M_PKTHDR) != 0 ? if_name(m->m_pkthdr.rcvif) :
960 "<unknwn>", m, tag->drv_spi);
961 m_freem(m);
962 return (EINPROGRESS);
963 }
964
965 sav = ipsec_accel_drvspi_to_sa(tag->drv_spi);
966 if (sav != NULL)
967 ipsec_accel_sa_recordxfer(sav, m);
968 return (0);
969 }
970
971 static void
ipsec_accel_sa_recordxfer(struct secasvar * sav,struct mbuf * m)972 ipsec_accel_sa_recordxfer(struct secasvar *sav, struct mbuf *m)
973 {
974 counter_u64_add(sav->accel_lft_sw, 1);
975 counter_u64_add(sav->accel_lft_sw + 1, m->m_pkthdr.len);
976 if (sav->accel_firstused == 0)
977 sav->accel_firstused = time_second;
978 }
979
980 static void
ipsec_accel_sa_lifetime_update(struct seclifetime * lft_c,const struct seclifetime * lft_l)981 ipsec_accel_sa_lifetime_update(struct seclifetime *lft_c,
982 const struct seclifetime *lft_l)
983 {
984 lft_c->allocations += lft_l->allocations;
985 lft_c->bytes += lft_l->bytes;
986 lft_c->usetime = min(lft_c->usetime, lft_l->usetime);
987 }
988
989 static void
ipsec_accel_drv_sa_lifetime_update_impl(struct secasvar * sav,if_t ifp,u_int drv_spi,uint64_t octets,uint64_t allocs)990 ipsec_accel_drv_sa_lifetime_update_impl(struct secasvar *sav, if_t ifp,
991 u_int drv_spi, uint64_t octets, uint64_t allocs)
992 {
993 struct epoch_tracker et;
994 struct ifp_handle_sav *i;
995 uint64_t odiff, adiff;
996
997 NET_EPOCH_ENTER(et);
998 mtx_lock(&ipsec_accel_cnt_lock);
999
1000 if (allocs != 0) {
1001 if (sav->firstused == 0)
1002 sav->firstused = time_second;
1003 if (sav->accel_firstused == 0)
1004 sav->accel_firstused = time_second;
1005 }
1006
1007 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1008 if (i->ifp == ifp && i->drv_spi == drv_spi)
1009 break;
1010 }
1011 if (i == NULL)
1012 goto out;
1013
1014 odiff = octets - i->cnt_octets;
1015 adiff = allocs - i->cnt_allocs;
1016
1017 if (sav->lft_c != NULL) {
1018 counter_u64_add(sav->lft_c_bytes, odiff);
1019 counter_u64_add(sav->lft_c_allocations, adiff);
1020 }
1021
1022 i->cnt_octets = octets;
1023 i->cnt_allocs = allocs;
1024 sav->accel_hw_octets += odiff;
1025 sav->accel_hw_allocs += adiff;
1026
1027 out:
1028 mtx_unlock(&ipsec_accel_cnt_lock);
1029 NET_EPOCH_EXIT(et);
1030 }
1031
1032 static int
ipsec_accel_drv_sa_lifetime_fetch_impl(struct secasvar * sav,if_t ifp,u_int drv_spi,uint64_t * octets,uint64_t * allocs)1033 ipsec_accel_drv_sa_lifetime_fetch_impl(struct secasvar *sav,
1034 if_t ifp, u_int drv_spi, uint64_t *octets, uint64_t *allocs)
1035 {
1036 struct ifp_handle_sav *i;
1037 int error;
1038
1039 NET_EPOCH_ASSERT();
1040 error = 0;
1041
1042 mtx_lock(&ipsec_accel_cnt_lock);
1043 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1044 if (i->ifp == ifp && i->drv_spi == drv_spi) {
1045 *octets = i->cnt_octets;
1046 *allocs = i->cnt_allocs;
1047 break;
1048 }
1049 }
1050 if (i == NULL)
1051 error = ENOENT;
1052 mtx_unlock(&ipsec_accel_cnt_lock);
1053 return (error);
1054 }
1055
1056 static void
ipsec_accel_sa_lifetime_hw(struct secasvar * sav,if_t ifp,struct seclifetime * lft)1057 ipsec_accel_sa_lifetime_hw(struct secasvar *sav, if_t ifp,
1058 struct seclifetime *lft)
1059 {
1060 struct ifp_handle_sav *i;
1061 if_sa_cnt_fn_t p;
1062
1063 IFNET_RLOCK_ASSERT();
1064
1065 i = ipsec_accel_is_accel_sav_ptr(sav, ifp);
1066 if (i != NULL && (i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
1067 IFP_HS_HANDLED) {
1068 p = ifp->if_ipsec_accel_m->if_sa_cnt;
1069 if (p != NULL)
1070 p(ifp, sav, i->drv_spi, i->ifdata, lft);
1071 }
1072 }
1073
1074 static int
ipsec_accel_sa_lifetime_op_impl(struct secasvar * sav,struct seclifetime * lft_c,if_t ifp,enum IF_SA_CNT_WHICH op,struct rm_priotracker * sahtree_trackerp)1075 ipsec_accel_sa_lifetime_op_impl(struct secasvar *sav,
1076 struct seclifetime *lft_c, if_t ifp, enum IF_SA_CNT_WHICH op,
1077 struct rm_priotracker *sahtree_trackerp)
1078 {
1079 struct seclifetime lft_l, lft_s;
1080 struct ifp_handle_sav *i;
1081 if_t ifp1;
1082 if_sa_cnt_fn_t p;
1083 int error;
1084
1085 error = 0;
1086 memset(&lft_l, 0, sizeof(lft_l));
1087 memset(&lft_s, 0, sizeof(lft_s));
1088
1089 switch (op & ~IF_SA_CNT_UPD) {
1090 case IF_SA_CNT_IFP_HW_VAL:
1091 ipsec_accel_sa_lifetime_hw(sav, ifp, &lft_l);
1092 ipsec_accel_sa_lifetime_update(&lft_l, &lft_s);
1093 break;
1094
1095 case IF_SA_CNT_TOTAL_SW_VAL:
1096 lft_l.allocations = (uint32_t)counter_u64_fetch(
1097 sav->accel_lft_sw);
1098 lft_l.bytes = counter_u64_fetch(sav->accel_lft_sw + 1);
1099 lft_l.usetime = sav->accel_firstused;
1100 break;
1101
1102 case IF_SA_CNT_TOTAL_HW_VAL:
1103 IFNET_RLOCK_ASSERT();
1104 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1105 if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) !=
1106 IFP_HS_HANDLED)
1107 continue;
1108 ifp1 = i->ifp;
1109 p = ifp1->if_ipsec_accel_m->if_sa_cnt;
1110 if (p == NULL)
1111 continue;
1112 memset(&lft_s, 0, sizeof(lft_s));
1113 if (sahtree_trackerp != NULL)
1114 ipsec_sahtree_runlock(sahtree_trackerp);
1115 error = p(ifp1, sav, i->drv_spi, i->ifdata, &lft_s);
1116 if (sahtree_trackerp != NULL)
1117 ipsec_sahtree_rlock(sahtree_trackerp);
1118 if (error == 0)
1119 ipsec_accel_sa_lifetime_update(&lft_l, &lft_s);
1120 }
1121 break;
1122 }
1123
1124 if (error == 0) {
1125 if ((op & IF_SA_CNT_UPD) == 0)
1126 memset(lft_c, 0, sizeof(*lft_c));
1127 ipsec_accel_sa_lifetime_update(lft_c, &lft_l);
1128 }
1129
1130 return (error);
1131 }
1132
1133 static void
ipsec_accel_sync_imp(void)1134 ipsec_accel_sync_imp(void)
1135 {
1136 taskqueue_drain_all(ipsec_accel_tq);
1137 }
1138
1139 static struct mbuf *
ipsec_accel_key_setaccelif_impl(struct secasvar * sav)1140 ipsec_accel_key_setaccelif_impl(struct secasvar *sav)
1141 {
1142 struct mbuf *m, *m1;
1143 struct ifp_handle_sav *i;
1144 struct epoch_tracker et;
1145
1146 if (sav->accel_ifname != NULL)
1147 return (key_setaccelif(sav->accel_ifname));
1148
1149 m = m1 = NULL;
1150
1151 NET_EPOCH_ENTER(et);
1152 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1153 if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
1154 IFP_HS_HANDLED) {
1155 m1 = key_setaccelif(if_name(i->ifp));
1156 if (m == NULL)
1157 m = m1;
1158 else if (m1 != NULL)
1159 m_cat(m, m1);
1160 }
1161 }
1162 NET_EPOCH_EXIT(et);
1163 return (m);
1164 }
1165
1166 #endif /* IPSEC_OFFLOAD */
1167