xref: /freebsd/sys/netipsec/ipsec_offload.c (revision 6cf4e30252fe48b230b9d76cac20576d5b3d2ffa)
1 /*-
2  * Copyright (c) 2021,2022 NVIDIA CORPORATION & AFFILIATES. ALL RIGHTS RESERVED.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include "opt_inet.h"
27 #include "opt_inet6.h"
28 #include "opt_ipsec.h"
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/ck.h>
33 #include <sys/eventhandler.h>
34 #include <sys/kernel.h>
35 #include <sys/mbuf.h>
36 #include <sys/pctrie.h>
37 #include <sys/proc.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/protosw.h>
41 #include <sys/stdarg.h>
42 #include <sys/taskqueue.h>
43 
44 #include <net/if.h>
45 #include <net/if_var.h>
46 #include <net/if_private.h>
47 #include <net/vnet.h>
48 #include <netinet/in.h>
49 #include <netinet/ip.h>
50 #include <netinet/ip_var.h>
51 #include <netinet/ip6.h>
52 #include <netinet6/ip6_var.h>
53 #include <netinet/in_pcb.h>
54 #include <netinet/tcp_var.h>
55 
56 #include <netipsec/key.h>
57 #include <netipsec/keydb.h>
58 #include <netipsec/key_debug.h>
59 #include <netipsec/xform.h>
60 #include <netipsec/ipsec.h>
61 #include <netipsec/ipsec_offload.h>
62 #include <netipsec/ah_var.h>
63 #include <netipsec/esp.h>
64 #include <netipsec/esp_var.h>
65 #include <netipsec/ipcomp_var.h>
66 
67 #ifdef IPSEC_OFFLOAD
68 
69 static struct mtx ipsec_accel_sav_tmp;
70 static struct unrhdr *drv_spi_unr;
71 static struct mtx ipsec_accel_cnt_lock;
72 static struct taskqueue *ipsec_accel_tq;
73 
74 struct ipsec_accel_install_newkey_tq {
75 	struct secasvar *sav;
76 	struct vnet *install_vnet;
77 	struct task install_task;
78 };
79 
80 struct ipsec_accel_forget_tq {
81 	struct vnet *forget_vnet;
82 	struct task forget_task;
83 	struct secasvar *sav;
84 };
85 
86 struct ifp_handle_sav {
87 	CK_LIST_ENTRY(ifp_handle_sav) sav_link;
88 	CK_LIST_ENTRY(ifp_handle_sav) sav_allh_link;
89 	struct secasvar *sav;
90 	struct ifnet *ifp;
91 	void *ifdata;
92 	uint64_t drv_spi;
93 	uint32_t flags;
94 	size_t hdr_ext_size;
95 	uint64_t cnt_octets;
96 	uint64_t cnt_allocs;
97 	struct xform_history xfh;
98 };
99 
100 #define	IFP_HS_HANDLED	0x00000001
101 #define	IFP_HS_REJECTED	0x00000002
102 #define	IFP_HS_MARKER	0x00000010
103 
104 static CK_LIST_HEAD(, ifp_handle_sav) ipsec_accel_all_sav_handles;
105 
106 struct ifp_handle_sp {
107 	CK_LIST_ENTRY(ifp_handle_sp) sp_link;
108 	CK_LIST_ENTRY(ifp_handle_sp) sp_allh_link;
109 	struct secpolicy *sp;
110 	struct ifnet *ifp;
111 	void *ifdata;
112 	uint32_t flags;
113 };
114 
115 #define	IFP_HP_HANDLED	0x00000001
116 #define	IFP_HP_REJECTED	0x00000002
117 #define	IFP_HP_MARKER	0x00000004
118 
119 static CK_LIST_HEAD(, ifp_handle_sp) ipsec_accel_all_sp_handles;
120 
121 static void *
122 drvspi_sa_trie_alloc(struct pctrie *ptree)
123 {
124 	void *res;
125 
126 	res = malloc(pctrie_node_size(), M_IPSEC_MISC, M_ZERO | M_NOWAIT);
127 	if (res != NULL)
128 		pctrie_zone_init(res, 0, 0);
129 	return (res);
130 }
131 
132 static void
133 drvspi_sa_trie_free(struct pctrie *ptree, void *node)
134 {
135 	free(node, M_IPSEC_MISC);
136 }
137 
138 PCTRIE_DEFINE(DRVSPI_SA, ifp_handle_sav, drv_spi,
139     drvspi_sa_trie_alloc, drvspi_sa_trie_free);
140 static struct pctrie drv_spi_pctrie;
141 
142 static eventhandler_tag ipsec_accel_ifdetach_event_tag;
143 
144 static void ipsec_accel_sa_newkey_impl(struct secasvar *sav);
145 static int ipsec_accel_handle_sav(struct secasvar *sav, struct ifnet *ifp,
146     u_int drv_spi, void *priv, uint32_t flags, struct ifp_handle_sav **ires);
147 static void ipsec_accel_forget_sav_clear(struct secasvar *sav);
148 static struct ifp_handle_sav *ipsec_accel_is_accel_sav_ptr(struct secasvar *sav,
149     struct ifnet *ifp);
150 static int ipsec_accel_sa_lifetime_op_impl(struct secasvar *sav,
151     struct seclifetime *lft_c, if_t ifp, enum IF_SA_CNT_WHICH op,
152     struct rm_priotracker *sahtree_trackerp);
153 static void ipsec_accel_sa_recordxfer(struct secasvar *sav, struct mbuf *m);
154 static void ipsec_accel_sync_imp(void);
155 static bool ipsec_accel_is_accel_sav_impl(struct secasvar *sav);
156 static struct mbuf *ipsec_accel_key_setaccelif_impl(struct secasvar *sav);
157 static void ipsec_accel_on_ifdown_impl(struct ifnet *ifp);
158 static void ipsec_accel_drv_sa_lifetime_update_impl(struct secasvar *sav,
159     if_t ifp, u_int drv_spi, uint64_t octets, uint64_t allocs);
160 static int ipsec_accel_drv_sa_lifetime_fetch_impl(struct secasvar *sav,
161     if_t ifp, u_int drv_spi, uint64_t *octets, uint64_t *allocs);
162 static void ipsec_accel_ifdetach_event(void *arg, struct ifnet *ifp);
163 static bool ipsec_accel_fill_xh_impl(if_t ifp, uint32_t drv_spi,
164     struct xform_history *xh);
165 
166 static void
167 ipsec_accel_init(void *arg)
168 {
169 	mtx_init(&ipsec_accel_sav_tmp, "ipasat", MTX_DEF, 0);
170 	mtx_init(&ipsec_accel_cnt_lock, "ipascn", MTX_DEF, 0);
171 	drv_spi_unr = new_unrhdr(IPSEC_ACCEL_DRV_SPI_MIN,
172 	    IPSEC_ACCEL_DRV_SPI_MAX, &ipsec_accel_sav_tmp);
173 	ipsec_accel_tq = taskqueue_create("ipsec_offload", M_WAITOK,
174 	    taskqueue_thread_enqueue, &ipsec_accel_tq);
175 	(void)taskqueue_start_threads(&ipsec_accel_tq,
176 	    1 /* Must be single-threaded */, PWAIT,
177 	    "ipsec_offload");
178 	ipsec_accel_sa_newkey_p = ipsec_accel_sa_newkey_impl;
179 	ipsec_accel_forget_sav_p = ipsec_accel_forget_sav_impl;
180 	ipsec_accel_spdadd_p = ipsec_accel_spdadd_impl;
181 	ipsec_accel_spddel_p = ipsec_accel_spddel_impl;
182 	ipsec_accel_sa_lifetime_op_p = ipsec_accel_sa_lifetime_op_impl;
183 	ipsec_accel_sync_p = ipsec_accel_sync_imp;
184 	ipsec_accel_is_accel_sav_p = ipsec_accel_is_accel_sav_impl;
185 	ipsec_accel_key_setaccelif_p = ipsec_accel_key_setaccelif_impl;
186 	ipsec_accel_on_ifdown_p = ipsec_accel_on_ifdown_impl;
187 	ipsec_accel_drv_sa_lifetime_update_p =
188 	    ipsec_accel_drv_sa_lifetime_update_impl;
189 	ipsec_accel_drv_sa_lifetime_fetch_p =
190 	    ipsec_accel_drv_sa_lifetime_fetch_impl;
191 	ipsec_accel_fill_xh_p = ipsec_accel_fill_xh_impl;
192 	pctrie_init(&drv_spi_pctrie);
193 	ipsec_accel_ifdetach_event_tag = EVENTHANDLER_REGISTER(
194 	    ifnet_departure_event, ipsec_accel_ifdetach_event, NULL,
195 	    EVENTHANDLER_PRI_ANY);
196 }
197 SYSINIT(ipsec_accel_init, SI_SUB_VNET_DONE, SI_ORDER_ANY,
198     ipsec_accel_init, NULL);
199 
200 static void
201 ipsec_accel_fini(void *arg)
202 {
203 	EVENTHANDLER_DEREGISTER(ifnet_departure_event,
204 	    ipsec_accel_ifdetach_event_tag);
205 	ipsec_accel_sa_newkey_p = NULL;
206 	ipsec_accel_forget_sav_p = NULL;
207 	ipsec_accel_spdadd_p = NULL;
208 	ipsec_accel_spddel_p = NULL;
209 	ipsec_accel_sa_lifetime_op_p = NULL;
210 	ipsec_accel_sync_p = NULL;
211 	ipsec_accel_is_accel_sav_p = NULL;
212 	ipsec_accel_key_setaccelif_p = NULL;
213 	ipsec_accel_on_ifdown_p = NULL;
214 	ipsec_accel_drv_sa_lifetime_update_p = NULL;
215 	ipsec_accel_drv_sa_lifetime_fetch_p = NULL;
216 	ipsec_accel_fill_xh_p = NULL;
217 	ipsec_accel_sync_imp();
218 	clean_unrhdr(drv_spi_unr);	/* avoid panic, should go later */
219 	clear_unrhdr(drv_spi_unr);
220 	delete_unrhdr(drv_spi_unr);
221 	taskqueue_drain_all(ipsec_accel_tq);
222 	taskqueue_free(ipsec_accel_tq);
223 	mtx_destroy(&ipsec_accel_sav_tmp);
224 	mtx_destroy(&ipsec_accel_cnt_lock);
225 }
226 SYSUNINIT(ipsec_accel_fini, SI_SUB_VNET_DONE, SI_ORDER_ANY,
227     ipsec_accel_fini, NULL);
228 
229 SYSCTL_NODE(_net_inet_ipsec, OID_AUTO, offload, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
230     "");
231 
232 static bool ipsec_offload_verbose = false;
233 SYSCTL_BOOL(_net_inet_ipsec_offload, OID_AUTO, verbose, CTLFLAG_RW,
234     &ipsec_offload_verbose, 0,
235     "Verbose SA/SP offload install and deinstall");
236 
237 static void
238 dprintf(const char *fmt, ...)
239 {
240 	va_list ap;
241 
242 	if (!ipsec_offload_verbose)
243 		return;
244 
245 	va_start(ap, fmt);
246 	vprintf(fmt, ap);
247 	va_end(ap);
248 }
249 
250 static void
251 ipsec_accel_alloc_forget_tq(struct secasvar *sav)
252 {
253 	void *ftq;
254 
255 	if (sav->accel_forget_tq != 0)
256 		return;
257 
258 	ftq = malloc(sizeof(struct ipsec_accel_forget_tq), M_TEMP, M_WAITOK);
259 	if (!atomic_cmpset_ptr(&sav->accel_forget_tq, 0, (uintptr_t)ftq))
260 		free(ftq, M_TEMP);
261 }
262 
263 static bool
264 ipsec_accel_sa_install_match(if_t ifp, void *arg)
265 {
266 	if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0)
267 		return (false);
268 	if (ifp->if_ipsec_accel_m->if_sa_newkey == NULL) {
269 		dprintf("driver bug ifp %s if_sa_newkey NULL\n",
270 		    if_name(ifp));
271 		return (false);
272 	}
273 	return (true);
274 }
275 
276 static int
277 ipsec_accel_sa_newkey_cb(if_t ifp, void *arg)
278 {
279 	struct ipsec_accel_install_newkey_tq *tq;
280 	void *priv;
281 	u_int drv_spi;
282 	int error;
283 
284 	tq = arg;
285 
286 	dprintf("ipsec_accel_sa_newkey_act: ifp %s h %p spi %#x "
287 	    "flags %#x seq %d\n",
288 	    if_name(ifp), ifp->if_ipsec_accel_m->if_sa_newkey,
289 	    be32toh(tq->sav->spi), tq->sav->flags, tq->sav->seq);
290 	priv = NULL;
291 	drv_spi = alloc_unr(drv_spi_unr);
292 	if (drv_spi == -1) {
293 		dprintf("ipsec_accel_sa_install_newkey: cannot alloc "
294 		    "drv_spi if %s spi %#x\n", if_name(ifp),
295 		    be32toh(tq->sav->spi));
296 		return (0);
297 	}
298 	if (tq->sav->accel_ifname != NULL &&
299 	    strcmp(tq->sav->accel_ifname, if_name(ifp)) != 0) {
300 		error = ipsec_accel_handle_sav(tq->sav,
301 		    ifp, drv_spi, priv, IFP_HS_REJECTED, NULL);
302 		goto out;
303 	}
304 	error = ifp->if_ipsec_accel_m->if_sa_newkey(ifp, tq->sav,
305 	    drv_spi, &priv);
306 	if (error != 0) {
307 		if (error == EOPNOTSUPP) {
308 			dprintf("ipsec_accel_sa_newkey: driver "
309 			    "refused sa if %s spi %#x\n",
310 			    if_name(ifp), be32toh(tq->sav->spi));
311 			error = ipsec_accel_handle_sav(tq->sav,
312 			    ifp, drv_spi, priv, IFP_HS_REJECTED, NULL);
313 			/* XXXKIB */
314 		} else {
315 			dprintf("ipsec_accel_sa_newkey: driver "
316 			    "error %d if %s spi %#x\n",
317 			    error, if_name(ifp), be32toh(tq->sav->spi));
318 			/* XXXKIB */
319 		}
320 	} else {
321 		error = ipsec_accel_handle_sav(tq->sav, ifp,
322 		    drv_spi, priv, IFP_HS_HANDLED, NULL);
323 		if (error != 0) {
324 			/* XXXKIB */
325 			dprintf("ipsec_accel_sa_newkey: handle_sav "
326 			    "err %d if %s spi %#x\n", error,
327 			    if_name(ifp), be32toh(tq->sav->spi));
328 		}
329 	}
330 out:
331 	return (0);
332 }
333 
334 static void
335 ipsec_accel_sa_newkey_act(void *context, int pending)
336 {
337 	struct ipsec_accel_install_newkey_tq *tq;
338 	void *tqf;
339 	struct secasvar *sav;
340 
341 	tq = context;
342 	tqf = NULL;
343 	sav = tq->sav;
344 	CURVNET_SET(tq->install_vnet);
345 	mtx_lock(&ipsec_accel_sav_tmp);
346 	if ((sav->accel_flags & (SADB_KEY_ACCEL_INST |
347 	    SADB_KEY_ACCEL_DEINST)) == 0 &&
348 	    sav->state == SADB_SASTATE_MATURE) {
349 		sav->accel_flags |= SADB_KEY_ACCEL_INST;
350 		mtx_unlock(&ipsec_accel_sav_tmp);
351 		if_foreach_sleep(ipsec_accel_sa_install_match, context,
352 		    ipsec_accel_sa_newkey_cb, context);
353 		ipsec_accel_alloc_forget_tq(sav);
354 		mtx_lock(&ipsec_accel_sav_tmp);
355 
356 		/*
357 		 * If ipsec_accel_forget_sav() raced with us and set
358 		 * the flag, do its work.  Its task cannot execute in
359 		 * parallel since ipsec_accel taskqueue is single-threaded.
360 		 */
361 		if ((sav->accel_flags & SADB_KEY_ACCEL_DEINST) != 0) {
362 			tqf = (void *)sav->accel_forget_tq;
363 			sav->accel_forget_tq = 0;
364 			ipsec_accel_forget_sav_clear(sav);
365 		}
366 	}
367 	mtx_unlock(&ipsec_accel_sav_tmp);
368 	key_freesav(&tq->sav);
369 	CURVNET_RESTORE();
370 	free(tq, M_TEMP);
371 	free(tqf, M_TEMP);
372 }
373 
374 static void
375 ipsec_accel_sa_newkey_impl(struct secasvar *sav)
376 {
377 	struct ipsec_accel_install_newkey_tq *tq;
378 
379 	if ((sav->accel_flags & (SADB_KEY_ACCEL_INST |
380 	    SADB_KEY_ACCEL_DEINST)) != 0)
381 		return;
382 
383 	dprintf(
384 	    "ipsec_accel_sa_install_newkey: spi %#x flags %#x seq %d\n",
385 	    be32toh(sav->spi), sav->flags, sav->seq);
386 
387 	tq = malloc(sizeof(*tq), M_TEMP, M_NOWAIT);
388 	if (tq == NULL) {
389 		dprintf("ipsec_accel_sa_install_newkey: no memory for tq, "
390 		    "spi %#x\n", be32toh(sav->spi));
391 		/* XXXKIB */
392 		return;
393 	}
394 
395 	refcount_acquire(&sav->refcnt);
396 
397 	TASK_INIT(&tq->install_task, 0, ipsec_accel_sa_newkey_act, tq);
398 	tq->sav = sav;
399 	tq->install_vnet = curthread->td_vnet;
400 	taskqueue_enqueue(ipsec_accel_tq, &tq->install_task);
401 }
402 
403 static int
404 ipsec_accel_handle_sav(struct secasvar *sav, struct ifnet *ifp,
405     u_int drv_spi, void *priv, uint32_t flags, struct ifp_handle_sav **ires)
406 {
407 	struct ifp_handle_sav *ihs, *i;
408 	int error;
409 
410 	MPASS(__bitcount(flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) == 1);
411 
412 	ihs = malloc(sizeof(*ihs), M_IPSEC_MISC, M_WAITOK | M_ZERO);
413 	ihs->ifp = ifp;
414 	ihs->sav = sav;
415 	ihs->drv_spi = drv_spi;
416 	ihs->ifdata = priv;
417 	ihs->flags = flags;
418 	ihs->hdr_ext_size = esp_hdrsiz(sav);
419 	memcpy(&ihs->xfh.dst, &sav->sah->saidx.dst, sizeof(ihs->xfh.dst));
420 	ihs->xfh.spi = sav->spi;
421 	ihs->xfh.proto = sav->sah->saidx.proto;
422 	ihs->xfh.mode = sav->sah->saidx.mode;
423 	mtx_lock(&ipsec_accel_sav_tmp);
424 	CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
425 		if (i->ifp == ifp) {
426 			error = EALREADY;
427 			goto errout;
428 		}
429 	}
430 	error = DRVSPI_SA_PCTRIE_INSERT(&drv_spi_pctrie, ihs);
431 	if (error != 0)
432 		goto errout;
433 	if_ref(ihs->ifp);
434 	CK_LIST_INSERT_HEAD(&sav->accel_ifps, ihs, sav_link);
435 	CK_LIST_INSERT_HEAD(&ipsec_accel_all_sav_handles, ihs, sav_allh_link);
436 	mtx_unlock(&ipsec_accel_sav_tmp);
437 	if (ires != NULL)
438 		*ires = ihs;
439 	return (0);
440 errout:
441 	mtx_unlock(&ipsec_accel_sav_tmp);
442 	free(ihs, M_IPSEC_MISC);
443 	if (ires != NULL)
444 		*ires = NULL;
445 	return (error);
446 }
447 
448 static void
449 ipsec_accel_forget_handle_sav(struct ifp_handle_sav *i, bool freesav)
450 {
451 	struct ifnet *ifp;
452 	struct secasvar *sav;
453 
454 	mtx_assert(&ipsec_accel_sav_tmp, MA_OWNED);
455 
456 	CK_LIST_REMOVE(i, sav_link);
457 	CK_LIST_REMOVE(i, sav_allh_link);
458 	DRVSPI_SA_PCTRIE_REMOVE(&drv_spi_pctrie, i->drv_spi);
459 	mtx_unlock(&ipsec_accel_sav_tmp);
460 	NET_EPOCH_WAIT();
461 	ifp = i->ifp;
462 	sav = i->sav;
463 	if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
464 	    IFP_HS_HANDLED) {
465 		dprintf("sa deinstall %s %p spi %#x ifl %#x\n",
466 		    if_name(ifp), sav, be32toh(sav->spi), i->flags);
467 		ifp->if_ipsec_accel_m->if_sa_deinstall(ifp,
468 		    i->drv_spi, i->ifdata);
469 	}
470 	if_rele(ifp);
471 	free_unr(drv_spi_unr, i->drv_spi);
472 	free(i, M_IPSEC_MISC);
473 	if (freesav)
474 		key_freesav(&sav);
475 	mtx_lock(&ipsec_accel_sav_tmp);
476 }
477 
478 static void
479 ipsec_accel_forget_sav_clear(struct secasvar *sav)
480 {
481 	struct ifp_handle_sav *i;
482 
483 	for (;;) {
484 		i = CK_LIST_FIRST(&sav->accel_ifps);
485 		if (i == NULL)
486 			break;
487 		ipsec_accel_forget_handle_sav(i, false);
488 	}
489 }
490 
491 static void
492 ipsec_accel_forget_sav_act(void *arg, int pending)
493 {
494 	struct ipsec_accel_forget_tq *tq;
495 	struct secasvar *sav;
496 
497 	tq = arg;
498 	sav = tq->sav;
499 	CURVNET_SET(tq->forget_vnet);
500 	mtx_lock(&ipsec_accel_sav_tmp);
501 	ipsec_accel_forget_sav_clear(sav);
502 	mtx_unlock(&ipsec_accel_sav_tmp);
503 	key_freesav(&sav);
504 	CURVNET_RESTORE();
505 	free(tq, M_TEMP);
506 }
507 
508 void
509 ipsec_accel_forget_sav_impl(struct secasvar *sav)
510 {
511 	struct ipsec_accel_forget_tq *tq;
512 
513 	mtx_lock(&ipsec_accel_sav_tmp);
514 	sav->accel_flags |= SADB_KEY_ACCEL_DEINST;
515 	tq = (void *)atomic_load_ptr(&sav->accel_forget_tq);
516 	if (tq == NULL || !atomic_cmpset_ptr(&sav->accel_forget_tq,
517 	    (uintptr_t)tq, 0)) {
518 		mtx_unlock(&ipsec_accel_sav_tmp);
519 		return;
520 	}
521 	mtx_unlock(&ipsec_accel_sav_tmp);
522 
523 	refcount_acquire(&sav->refcnt);
524 	TASK_INIT(&tq->forget_task, 0, ipsec_accel_forget_sav_act, tq);
525 	tq->forget_vnet = curthread->td_vnet;
526 	tq->sav = sav;
527 	taskqueue_enqueue(ipsec_accel_tq, &tq->forget_task);
528 }
529 
530 static void
531 ipsec_accel_on_ifdown_sav(struct ifnet *ifp)
532 {
533 	struct ifp_handle_sav *i, *marker;
534 
535 	marker = malloc(sizeof(*marker), M_IPSEC_MISC, M_WAITOK | M_ZERO);
536 	marker->flags = IFP_HS_MARKER;
537 
538 	mtx_lock(&ipsec_accel_sav_tmp);
539 	CK_LIST_INSERT_HEAD(&ipsec_accel_all_sav_handles, marker,
540 	    sav_allh_link);
541 	for (;;) {
542 		i = CK_LIST_NEXT(marker, sav_allh_link);
543 		if (i == NULL)
544 			break;
545 		CK_LIST_REMOVE(marker, sav_allh_link);
546 		CK_LIST_INSERT_AFTER(i, marker, sav_allh_link);
547 		if (i->ifp == ifp) {
548 			refcount_acquire(&i->sav->refcnt); /* XXXKIB wrap ? */
549 			ipsec_accel_forget_handle_sav(i, true);
550 		}
551 	}
552 	CK_LIST_REMOVE(marker, sav_allh_link);
553 	mtx_unlock(&ipsec_accel_sav_tmp);
554 	free(marker, M_IPSEC_MISC);
555 }
556 
557 static struct ifp_handle_sav *
558 ipsec_accel_is_accel_sav_ptr_raw(struct secasvar *sav, struct ifnet *ifp)
559 {
560 	struct ifp_handle_sav *i;
561 
562 	if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0)
563 		return (NULL);
564 	CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
565 		if (i->ifp == ifp)
566 			return (i);
567 	}
568 	return (NULL);
569 }
570 
571 static struct ifp_handle_sav *
572 ipsec_accel_is_accel_sav_ptr(struct secasvar *sav, struct ifnet *ifp)
573 {
574 	NET_EPOCH_ASSERT();
575 	return (ipsec_accel_is_accel_sav_ptr_raw(sav, ifp));
576 }
577 
578 static bool
579 ipsec_accel_is_accel_sav_impl(struct secasvar *sav)
580 {
581 	return (!CK_LIST_EMPTY(&sav->accel_ifps));
582 }
583 
584 static struct secasvar *
585 ipsec_accel_drvspi_to_sa(u_int drv_spi)
586 {
587 	struct ifp_handle_sav *i;
588 
589 	i = DRVSPI_SA_PCTRIE_LOOKUP(&drv_spi_pctrie, drv_spi);
590 	if (i == NULL)
591 		return (NULL);
592 	return (i->sav);
593 }
594 
595 static struct ifp_handle_sp *
596 ipsec_accel_find_accel_sp(struct secpolicy *sp, if_t ifp)
597 {
598 	struct ifp_handle_sp *i;
599 
600 	CK_LIST_FOREACH(i, &sp->accel_ifps, sp_link) {
601 		if (i->ifp == ifp)
602 			return (i);
603 	}
604 	return (NULL);
605 }
606 
607 static bool
608 ipsec_accel_is_accel_sp(struct secpolicy *sp, if_t ifp)
609 {
610 	return (ipsec_accel_find_accel_sp(sp, ifp) != NULL);
611 }
612 
613 static int
614 ipsec_accel_remember_sp(struct secpolicy *sp, if_t ifp,
615     struct ifp_handle_sp **ip)
616 {
617 	struct ifp_handle_sp *i;
618 
619 	i = malloc(sizeof(*i), M_IPSEC_MISC, M_WAITOK | M_ZERO);
620 	i->sp = sp;
621 	i->ifp = ifp;
622 	if_ref(ifp);
623 	i->flags = IFP_HP_HANDLED;
624 	mtx_lock(&ipsec_accel_sav_tmp);
625 	CK_LIST_INSERT_HEAD(&sp->accel_ifps, i, sp_link);
626 	CK_LIST_INSERT_HEAD(&ipsec_accel_all_sp_handles, i, sp_allh_link);
627 	mtx_unlock(&ipsec_accel_sav_tmp);
628 	*ip = i;
629 	return (0);
630 }
631 
632 static bool
633 ipsec_accel_spdadd_match(if_t ifp, void *arg)
634 {
635 	struct secpolicy *sp;
636 
637 	if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0 ||
638 	    ifp->if_ipsec_accel_m->if_spdadd == NULL)
639 		return (false);
640 	sp = arg;
641 	if (sp->accel_ifname != NULL &&
642 	    strcmp(sp->accel_ifname, if_name(ifp)) != 0)
643 		return (false);
644 	if (ipsec_accel_is_accel_sp(sp, ifp))
645 		return (false);
646 	return (true);
647 }
648 
649 static int
650 ipsec_accel_spdadd_cb(if_t ifp, void *arg)
651 {
652 	struct secpolicy *sp;
653 	struct inpcb *inp;
654 	struct ifp_handle_sp *i;
655 	int error;
656 
657 	sp = arg;
658 	inp = sp->ipsec_accel_add_sp_inp;
659 	dprintf("ipsec_accel_spdadd_cb: ifp %s m %p sp %p inp %p\n",
660 	    if_name(ifp), ifp->if_ipsec_accel_m->if_spdadd, sp, inp);
661 	error = ipsec_accel_remember_sp(sp, ifp, &i);
662 	if (error != 0) {
663 		dprintf("ipsec_accel_spdadd: %s if_spdadd %p remember res %d\n",
664 		    if_name(ifp), sp, error);
665 		return (0);
666 	}
667 	error = ifp->if_ipsec_accel_m->if_spdadd(ifp, sp, inp, &i->ifdata);
668 	if (error != 0) {
669 		i->flags |= IFP_HP_REJECTED;
670 		dprintf("ipsec_accel_spdadd: %s if_spdadd %p res %d\n",
671 		    if_name(ifp), sp, error);
672 	}
673 	return (0);
674 }
675 
676 static void
677 ipsec_accel_spdadd_act(void *arg, int pending)
678 {
679 	struct secpolicy *sp;
680 	struct inpcb *inp;
681 
682 	sp = arg;
683 	CURVNET_SET(sp->accel_add_tq.adddel_vnet);
684 	if_foreach_sleep(ipsec_accel_spdadd_match, arg,
685 	    ipsec_accel_spdadd_cb, arg);
686 	inp = sp->ipsec_accel_add_sp_inp;
687 	if (inp != NULL) {
688 		INP_WLOCK(inp);
689 		if (!in_pcbrele_wlocked(inp))
690 			INP_WUNLOCK(inp);
691 		sp->ipsec_accel_add_sp_inp = NULL;
692 	}
693 	CURVNET_RESTORE();
694 	key_freesp(&sp);
695 }
696 
697 void
698 ipsec_accel_spdadd_impl(struct secpolicy *sp, struct inpcb *inp)
699 {
700 	struct ipsec_accel_adddel_sp_tq *tq;
701 
702 	if (sp == NULL)
703 		return;
704 	if (sp->tcount == 0 && inp == NULL)
705 		return;
706 	tq = &sp->accel_add_tq;
707 	if (atomic_cmpset_int(&tq->adddel_scheduled, 0, 1) == 0)
708 		return;
709 	tq->adddel_vnet = curthread->td_vnet;
710 	sp->ipsec_accel_add_sp_inp = inp;
711 	if (inp != NULL)
712 		in_pcbref(inp);
713 	TASK_INIT(&tq->adddel_task, 0, ipsec_accel_spdadd_act, sp);
714 	key_addref(sp);
715 	taskqueue_enqueue(ipsec_accel_tq, &tq->adddel_task);
716 }
717 
718 static void
719 ipsec_accel_spddel_act(void *arg, int pending)
720 {
721 	struct ifp_handle_sp *i;
722 	struct secpolicy *sp;
723 	int error;
724 
725 	sp = arg;
726 	CURVNET_SET(sp->accel_del_tq.adddel_vnet);
727 	mtx_lock(&ipsec_accel_sav_tmp);
728 	for (;;) {
729 		i = CK_LIST_FIRST(&sp->accel_ifps);
730 		if (i == NULL)
731 			break;
732 		CK_LIST_REMOVE(i, sp_link);
733 		CK_LIST_REMOVE(i, sp_allh_link);
734 		mtx_unlock(&ipsec_accel_sav_tmp);
735 		NET_EPOCH_WAIT();
736 		if ((i->flags & (IFP_HP_HANDLED | IFP_HP_REJECTED)) ==
737 		    IFP_HP_HANDLED) {
738 			dprintf("spd deinstall %s %p\n", if_name(i->ifp), sp);
739 			error = i->ifp->if_ipsec_accel_m->if_spddel(i->ifp,
740 			    sp, i->ifdata);
741 			if (error != 0) {
742 				dprintf(
743 		    "ipsec_accel_spddel: %s if_spddel %p res %d\n",
744 				    if_name(i->ifp), sp, error);
745 			}
746 		}
747 		if_rele(i->ifp);
748 		free(i, M_IPSEC_MISC);
749 		mtx_lock(&ipsec_accel_sav_tmp);
750 	}
751 	mtx_unlock(&ipsec_accel_sav_tmp);
752 	key_freesp(&sp);
753 	CURVNET_RESTORE();
754 }
755 
756 void
757 ipsec_accel_spddel_impl(struct secpolicy *sp)
758 {
759 	struct ipsec_accel_adddel_sp_tq *tq;
760 
761 	if (sp == NULL)
762 		return;
763 
764 	tq = &sp->accel_del_tq;
765 	if (atomic_cmpset_int(&tq->adddel_scheduled, 0, 1) == 0)
766 		return;
767 	tq->adddel_vnet = curthread->td_vnet;
768 	TASK_INIT(&tq->adddel_task, 0, ipsec_accel_spddel_act, sp);
769 	key_addref(sp);
770 	taskqueue_enqueue(ipsec_accel_tq, &tq->adddel_task);
771 }
772 
773 static void
774 ipsec_accel_on_ifdown_sp(struct ifnet *ifp)
775 {
776 	struct ifp_handle_sp *i, *marker;
777 	struct secpolicy *sp;
778 	int error;
779 
780 	marker = malloc(sizeof(*marker), M_IPSEC_MISC, M_WAITOK | M_ZERO);
781 	marker->flags = IFP_HS_MARKER;
782 
783 	mtx_lock(&ipsec_accel_sav_tmp);
784 	CK_LIST_INSERT_HEAD(&ipsec_accel_all_sp_handles, marker,
785 	    sp_allh_link);
786 	for (;;) {
787 		i = CK_LIST_NEXT(marker, sp_allh_link);
788 		if (i == NULL)
789 			break;
790 		CK_LIST_REMOVE(marker, sp_allh_link);
791 		CK_LIST_INSERT_AFTER(i, marker, sp_allh_link);
792 		if (i->ifp != ifp)
793 			continue;
794 
795 		sp = i->sp;
796 		key_addref(sp);
797 		CK_LIST_REMOVE(i, sp_link);
798 		CK_LIST_REMOVE(i, sp_allh_link);
799 		mtx_unlock(&ipsec_accel_sav_tmp);
800 		NET_EPOCH_WAIT();
801 		if ((i->flags & (IFP_HP_HANDLED | IFP_HP_REJECTED)) ==
802 		    IFP_HP_HANDLED) {
803 			dprintf("spd deinstall %s %p\n", if_name(ifp), sp);
804 			error = ifp->if_ipsec_accel_m->if_spddel(ifp,
805 			    sp, i->ifdata);
806 		}
807 		if (error != 0) {
808 			dprintf(
809 		    "ipsec_accel_on_ifdown_sp: %s if_spddel %p res %d\n",
810 			    if_name(ifp), sp, error);
811 		}
812 		key_freesp(&sp);
813 		if_rele(ifp);
814 		free(i, M_IPSEC_MISC);
815 		mtx_lock(&ipsec_accel_sav_tmp);
816 	}
817 	CK_LIST_REMOVE(marker, sp_allh_link);
818 	mtx_unlock(&ipsec_accel_sav_tmp);
819 	free(marker, M_IPSEC_MISC);
820 }
821 
822 static void
823 ipsec_accel_on_ifdown_impl(struct ifnet *ifp)
824 {
825 	ipsec_accel_on_ifdown_sp(ifp);
826 	ipsec_accel_on_ifdown_sav(ifp);
827 }
828 
829 static void
830 ipsec_accel_ifdetach_event(void *arg __unused, struct ifnet *ifp)
831 {
832 	ipsec_accel_on_ifdown_impl(ifp);
833 }
834 
835 static bool
836 ipsec_accel_output_pad(struct mbuf *m, struct secasvar *sav, int skip, int mtu)
837 {
838 	int alen, blks, hlen, padding, rlen;
839 
840 	rlen = m->m_pkthdr.len - skip;
841 	hlen = ((sav->flags & SADB_X_EXT_OLD) != 0 ? sizeof(struct esp) :
842 	    sizeof(struct newesp)) + sav->ivlen;
843 	blks = MAX(4, SAV_ISCTR(sav) && VNET(esp_ctr_compatibility) ?
844 	    sav->tdb_encalgxform->native_blocksize :
845 	    sav->tdb_encalgxform->blocksize);
846 	padding = ((blks - ((rlen + 2) % blks)) % blks) + 2;
847 	alen = xform_ah_authsize(sav->tdb_authalgxform);
848 
849 	return (skip + hlen + rlen + padding + alen <= mtu);
850 }
851 
852 static bool
853 ipsec_accel_output_tag(struct mbuf *m, u_int drv_spi)
854 {
855 	struct ipsec_accel_out_tag *tag;
856 
857 	tag = (struct ipsec_accel_out_tag *)m_tag_get(
858 	    PACKET_TAG_IPSEC_ACCEL_OUT, sizeof(*tag), M_NOWAIT);
859 	if (tag == NULL)
860 		return (false);
861 	tag->drv_spi = drv_spi;
862 	m_tag_prepend(m, &tag->tag);
863 	return (true);
864 }
865 
866 bool
867 ipsec_accel_output(struct ifnet *ifp, struct mbuf *m, struct inpcb *inp,
868     struct secpolicy *sp, struct secasvar *sav, int af, int mtu, int *hwassist)
869 {
870 	struct ifp_handle_sav *i;
871 	struct ip *ip;
872 	struct tcpcb *tp;
873 	u_long ip_len, skip;
874 	bool res;
875 
876 	*hwassist = 0;
877 	res = false;
878 	if (ifp == NULL)
879 		return (res);
880 
881 	M_ASSERTPKTHDR(m);
882 	NET_EPOCH_ASSERT();
883 
884 	if (sav == NULL) {
885 		res = ipsec_accel_output_tag(m, IPSEC_ACCEL_DRV_SPI_BYPASS);
886 		goto out;
887 	}
888 
889 	i = ipsec_accel_is_accel_sav_ptr(sav, ifp);
890 	if (i == NULL || (i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) !=
891 	    IFP_HS_HANDLED)
892 		goto out;
893 
894 	if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
895 		ip_len = m->m_pkthdr.len;
896 		if (ip_len + i->hdr_ext_size > mtu)
897 			goto out;
898 		switch (af) {
899 		case AF_INET:
900 			ip = mtod(m, struct ip *);
901 			skip = ip->ip_hl << 2;
902 			break;
903 		case AF_INET6:
904 			skip = sizeof(struct ip6_hdr);
905 			break;
906 		default:
907 			__unreachable();
908 		}
909 		if (!ipsec_accel_output_pad(m, sav, skip, mtu))
910 			goto out;
911 	}
912 
913 	if (!ipsec_accel_output_tag(m, i->drv_spi))
914 		goto out;
915 
916 	ipsec_accel_sa_recordxfer(sav, m);
917 	key_freesav(&sav);
918 	if (sp != NULL)
919 		key_freesp(&sp);
920 
921 	*hwassist = ifp->if_ipsec_accel_m->if_hwassist(ifp, sav,
922 	    i->drv_spi, i->ifdata);
923 	res = true;
924 out:
925 	if (inp != NULL && inp->inp_pcbinfo == &V_tcbinfo) {
926 		INP_WLOCK_ASSERT(inp);
927 		tp = (struct tcpcb *)inp;
928 		if (res && (*hwassist & (CSUM_TSO | CSUM_IP6_TSO)) != 0) {
929 			tp->t_flags2 |= TF2_IPSEC_TSO;
930 		} else {
931 			tp->t_flags2 &= ~TF2_IPSEC_TSO;
932 		}
933 	}
934 	return (res);
935 }
936 
937 struct ipsec_accel_in_tag *
938 ipsec_accel_input_tag_lookup(const struct mbuf *m)
939 {
940 	struct ipsec_accel_in_tag *tag;
941 	struct m_tag *xtag;
942 
943 	xtag = m_tag_find(__DECONST(struct mbuf *, m),
944 	    PACKET_TAG_IPSEC_ACCEL_IN, NULL);
945 	if (xtag == NULL)
946 		return (NULL);
947 	tag = __containerof(xtag, struct ipsec_accel_in_tag, tag);
948 	return (tag);
949 }
950 
951 int
952 ipsec_accel_input(struct mbuf *m, int offset, int proto)
953 {
954 	struct secasvar *sav;
955 	struct ipsec_accel_in_tag *tag;
956 
957 	tag = ipsec_accel_input_tag_lookup(m);
958 	if (tag == NULL)
959 		return (ENXIO);
960 
961 	if (tag->drv_spi < IPSEC_ACCEL_DRV_SPI_MIN ||
962 	    tag->drv_spi > IPSEC_ACCEL_DRV_SPI_MAX) {
963 		dprintf("if %s mbuf %p drv_spi %d invalid, packet dropped\n",
964 		    (m->m_flags & M_PKTHDR) != 0 ? if_name(m->m_pkthdr.rcvif) :
965 		    "<unknwn>", m, tag->drv_spi);
966 		m_freem(m);
967 		return (EINPROGRESS);
968 	}
969 
970 	sav = ipsec_accel_drvspi_to_sa(tag->drv_spi);
971 	if (sav != NULL)
972 		ipsec_accel_sa_recordxfer(sav, m);
973 	return (0);
974 }
975 
976 static void
977 ipsec_accel_sa_recordxfer(struct secasvar *sav, struct mbuf *m)
978 {
979 	counter_u64_add(sav->accel_lft_sw, 1);
980 	counter_u64_add(sav->accel_lft_sw + 1, m->m_pkthdr.len);
981 	if (sav->accel_firstused == 0)
982 		sav->accel_firstused = time_second;
983 }
984 
985 static void
986 ipsec_accel_sa_lifetime_update(struct seclifetime *lft_c,
987     const struct seclifetime *lft_l)
988 {
989 	lft_c->allocations += lft_l->allocations;
990 	lft_c->bytes += lft_l->bytes;
991 	lft_c->usetime = min(lft_c->usetime, lft_l->usetime);
992 }
993 
994 static void
995 ipsec_accel_drv_sa_lifetime_update_impl(struct secasvar *sav, if_t ifp,
996     u_int drv_spi, uint64_t octets, uint64_t allocs)
997 {
998 	struct epoch_tracker et;
999 	struct ifp_handle_sav *i;
1000 	uint64_t odiff, adiff;
1001 
1002 	NET_EPOCH_ENTER(et);
1003 	mtx_lock(&ipsec_accel_cnt_lock);
1004 
1005 	if (allocs != 0) {
1006 		if (sav->firstused == 0)
1007 			sav->firstused = time_second;
1008 		if (sav->accel_firstused == 0)
1009 			sav->accel_firstused = time_second;
1010 	}
1011 
1012 	CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1013 		if (i->ifp == ifp && i->drv_spi == drv_spi)
1014 			break;
1015 	}
1016 	if (i == NULL)
1017 		goto out;
1018 
1019 	odiff = octets - i->cnt_octets;
1020 	adiff = allocs - i->cnt_allocs;
1021 
1022 	if (sav->lft_c != NULL) {
1023 		counter_u64_add(sav->lft_c_bytes, odiff);
1024 		counter_u64_add(sav->lft_c_allocations, adiff);
1025 	}
1026 
1027 	i->cnt_octets = octets;
1028 	i->cnt_allocs = allocs;
1029 	sav->accel_hw_octets += odiff;
1030 	sav->accel_hw_allocs += adiff;
1031 
1032 out:
1033 	mtx_unlock(&ipsec_accel_cnt_lock);
1034 	NET_EPOCH_EXIT(et);
1035 }
1036 
1037 static int
1038 ipsec_accel_drv_sa_lifetime_fetch_impl(struct secasvar *sav,
1039     if_t ifp, u_int drv_spi, uint64_t *octets, uint64_t *allocs)
1040 {
1041 	struct ifp_handle_sav *i;
1042 	int error;
1043 
1044 	NET_EPOCH_ASSERT();
1045 	error = 0;
1046 
1047 	mtx_lock(&ipsec_accel_cnt_lock);
1048 	CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1049 		if (i->ifp == ifp && i->drv_spi == drv_spi) {
1050 			*octets = i->cnt_octets;
1051 			*allocs = i->cnt_allocs;
1052 			break;
1053 		}
1054 	}
1055 	if (i == NULL)
1056 		error = ENOENT;
1057 	mtx_unlock(&ipsec_accel_cnt_lock);
1058 	return (error);
1059 }
1060 
1061 static void
1062 ipsec_accel_sa_lifetime_hw(struct secasvar *sav, if_t ifp,
1063     struct seclifetime *lft)
1064 {
1065 	struct ifp_handle_sav *i;
1066 	if_sa_cnt_fn_t p;
1067 
1068 	IFNET_RLOCK_ASSERT();
1069 
1070 	i = ipsec_accel_is_accel_sav_ptr(sav, ifp);
1071 	if (i != NULL && (i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
1072 	    IFP_HS_HANDLED) {
1073 		p = ifp->if_ipsec_accel_m->if_sa_cnt;
1074 		if (p != NULL)
1075 			p(ifp, sav, i->drv_spi, i->ifdata, lft);
1076 	}
1077 }
1078 
1079 static int
1080 ipsec_accel_sa_lifetime_op_impl(struct secasvar *sav,
1081     struct seclifetime *lft_c, if_t ifp, enum IF_SA_CNT_WHICH op,
1082     struct rm_priotracker *sahtree_trackerp)
1083 {
1084 	struct seclifetime lft_l, lft_s;
1085 	struct ifp_handle_sav *i;
1086 	if_t ifp1;
1087 	if_sa_cnt_fn_t p;
1088 	int error;
1089 
1090 	error = 0;
1091 	memset(&lft_l, 0, sizeof(lft_l));
1092 	memset(&lft_s, 0, sizeof(lft_s));
1093 
1094 	switch (op & ~IF_SA_CNT_UPD) {
1095 	case IF_SA_CNT_IFP_HW_VAL:
1096 		ipsec_accel_sa_lifetime_hw(sav, ifp, &lft_l);
1097 		ipsec_accel_sa_lifetime_update(&lft_l, &lft_s);
1098 		break;
1099 
1100 	case IF_SA_CNT_TOTAL_SW_VAL:
1101 		lft_l.allocations = (uint32_t)counter_u64_fetch(
1102 		    sav->accel_lft_sw);
1103 		lft_l.bytes = counter_u64_fetch(sav->accel_lft_sw + 1);
1104 		lft_l.usetime = sav->accel_firstused;
1105 		break;
1106 
1107 	case IF_SA_CNT_TOTAL_HW_VAL:
1108 		IFNET_RLOCK_ASSERT();
1109 		CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1110 			if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) !=
1111 			    IFP_HS_HANDLED)
1112 				continue;
1113 			ifp1 = i->ifp;
1114 			p = ifp1->if_ipsec_accel_m->if_sa_cnt;
1115 			if (p == NULL)
1116 				continue;
1117 			memset(&lft_s, 0, sizeof(lft_s));
1118 			if (sahtree_trackerp != NULL)
1119 				ipsec_sahtree_runlock(sahtree_trackerp);
1120 			error = p(ifp1, sav, i->drv_spi, i->ifdata, &lft_s);
1121 			if (sahtree_trackerp != NULL)
1122 				ipsec_sahtree_rlock(sahtree_trackerp);
1123 			if (error == 0)
1124 				ipsec_accel_sa_lifetime_update(&lft_l, &lft_s);
1125 		}
1126 		break;
1127 	}
1128 
1129 	if (error == 0) {
1130 		if ((op & IF_SA_CNT_UPD) == 0)
1131 			memset(lft_c, 0, sizeof(*lft_c));
1132 		ipsec_accel_sa_lifetime_update(lft_c, &lft_l);
1133 	}
1134 
1135 	return (error);
1136 }
1137 
1138 static void
1139 ipsec_accel_sync_imp(void)
1140 {
1141 	taskqueue_drain_all(ipsec_accel_tq);
1142 }
1143 
1144 static struct mbuf *
1145 ipsec_accel_key_setaccelif_impl(struct secasvar *sav)
1146 {
1147 	struct mbuf *m, *m1;
1148 	struct ifp_handle_sav *i;
1149 	struct epoch_tracker et;
1150 
1151 	if (sav->accel_ifname != NULL)
1152 		return (key_setaccelif(sav->accel_ifname));
1153 
1154 	m = m1 = NULL;
1155 
1156 	NET_EPOCH_ENTER(et);
1157 	CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1158 		if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
1159 		    IFP_HS_HANDLED) {
1160 			m1 = key_setaccelif(if_name(i->ifp));
1161 			if (m == NULL)
1162 				m = m1;
1163 			else if (m1 != NULL)
1164 				m_cat(m, m1);
1165 		}
1166 	}
1167 	NET_EPOCH_EXIT(et);
1168 	return (m);
1169 }
1170 
1171 static bool
1172 ipsec_accel_fill_xh_impl(if_t ifp, uint32_t drv_spi, struct xform_history *xh)
1173 {
1174 	struct ifp_handle_sav *i;
1175 
1176 	if (drv_spi < IPSEC_ACCEL_DRV_SPI_MIN ||
1177 	    drv_spi > IPSEC_ACCEL_DRV_SPI_MAX)
1178 		return (false);
1179 
1180 	i = DRVSPI_SA_PCTRIE_LOOKUP(&drv_spi_pctrie, drv_spi);
1181 	if (i == NULL)
1182 		return (false);
1183 	memcpy(xh, &i->xfh, sizeof(*xh));
1184 	return (true);
1185 }
1186 
1187 #endif	/* IPSEC_OFFLOAD */
1188