xref: /freebsd/sys/netipsec/ipsec_offload.c (revision 32cd3ee5901ea33d41ff550e5f40ce743c8d4165)
1 /*-
2  * Copyright (c) 2021,2022 NVIDIA CORPORATION & AFFILIATES. ALL RIGHTS RESERVED.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include "opt_inet.h"
27 #include "opt_inet6.h"
28 #include "opt_ipsec.h"
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/ck.h>
33 #include <sys/eventhandler.h>
34 #include <sys/kernel.h>
35 #include <sys/mbuf.h>
36 #include <sys/pctrie.h>
37 #include <sys/proc.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/protosw.h>
41 #include <sys/stdarg.h>
42 #include <sys/taskqueue.h>
43 
44 #include <net/if.h>
45 #include <net/if_var.h>
46 #include <net/if_private.h>
47 #include <net/vnet.h>
48 #include <netinet/in.h>
49 #include <netinet/ip.h>
50 #include <netinet/ip_var.h>
51 #include <netinet/ip6.h>
52 #include <netinet6/ip6_var.h>
53 #include <netinet/in_pcb.h>
54 #include <netinet/tcp_var.h>
55 
56 #include <netipsec/key.h>
57 #include <netipsec/keydb.h>
58 #include <netipsec/key_debug.h>
59 #include <netipsec/xform.h>
60 #include <netipsec/ipsec.h>
61 #include <netipsec/ipsec_offload.h>
62 #include <netipsec/ah_var.h>
63 #include <netipsec/esp.h>
64 #include <netipsec/esp_var.h>
65 #include <netipsec/ipcomp_var.h>
66 
67 #ifdef IPSEC_OFFLOAD
68 
69 static struct mtx ipsec_accel_sav_tmp;
70 static struct unrhdr *drv_spi_unr;
71 static struct mtx ipsec_accel_cnt_lock;
72 static struct taskqueue *ipsec_accel_tq;
73 
74 struct ipsec_accel_install_newkey_tq {
75 	struct secasvar *sav;
76 	struct vnet *install_vnet;
77 	struct task install_task;
78 };
79 
80 struct ipsec_accel_forget_tq {
81 	struct vnet *forget_vnet;
82 	struct task forget_task;
83 	struct secasvar *sav;
84 };
85 
86 struct ifp_handle_sav {
87 	CK_LIST_ENTRY(ifp_handle_sav) sav_link;
88 	CK_LIST_ENTRY(ifp_handle_sav) sav_allh_link;
89 	struct secasvar *sav;
90 	struct ifnet *ifp;
91 	void *ifdata;
92 	uint64_t drv_spi;
93 	uint32_t flags;
94 	size_t hdr_ext_size;
95 	uint64_t cnt_octets;
96 	uint64_t cnt_allocs;
97 	struct xform_history xfh;
98 };
99 
100 #define	IFP_HS_HANDLED	0x00000001
101 #define	IFP_HS_REJECTED	0x00000002
102 #define	IFP_HS_MARKER	0x00000010
103 
104 static CK_LIST_HEAD(, ifp_handle_sav) ipsec_accel_all_sav_handles;
105 
106 struct ifp_handle_sp {
107 	CK_LIST_ENTRY(ifp_handle_sp) sp_link;
108 	CK_LIST_ENTRY(ifp_handle_sp) sp_allh_link;
109 	struct secpolicy *sp;
110 	struct ifnet *ifp;
111 	void *ifdata;
112 	uint32_t flags;
113 };
114 
115 #define	IFP_HP_HANDLED	0x00000001
116 #define	IFP_HP_REJECTED	0x00000002
117 #define	IFP_HP_MARKER	0x00000004
118 
119 static CK_LIST_HEAD(, ifp_handle_sp) ipsec_accel_all_sp_handles;
120 
121 static void *
122 drvspi_sa_trie_alloc(struct pctrie *ptree)
123 {
124 	void *res;
125 
126 	res = malloc(pctrie_node_size(), M_IPSEC_MISC, M_ZERO | M_NOWAIT);
127 	if (res != NULL)
128 		pctrie_zone_init(res, 0, 0);
129 	return (res);
130 }
131 
132 static void
133 drvspi_sa_trie_free(struct pctrie *ptree, void *node)
134 {
135 	free(node, M_IPSEC_MISC);
136 }
137 
138 PCTRIE_DEFINE(DRVSPI_SA, ifp_handle_sav, drv_spi,
139     drvspi_sa_trie_alloc, drvspi_sa_trie_free);
140 static struct pctrie drv_spi_pctrie;
141 
142 static eventhandler_tag ipsec_accel_ifdetach_event_tag;
143 
144 static void ipsec_accel_sa_newkey_impl(struct secasvar *sav);
145 static int ipsec_accel_handle_sav(struct secasvar *sav, struct ifnet *ifp,
146     u_int drv_spi, void *priv, uint32_t flags, struct ifp_handle_sav **ires);
147 static void ipsec_accel_forget_sav_clear(struct secasvar *sav);
148 static struct ifp_handle_sav *ipsec_accel_is_accel_sav_ptr(struct secasvar *sav,
149     struct ifnet *ifp);
150 static int ipsec_accel_sa_lifetime_op_impl(struct secasvar *sav,
151     struct seclifetime *lft_c, if_t ifp, enum IF_SA_CNT_WHICH op,
152     struct rm_priotracker *sahtree_trackerp);
153 static void ipsec_accel_sa_recordxfer(struct secasvar *sav, struct mbuf *m);
154 static void ipsec_accel_sync_imp(void);
155 static bool ipsec_accel_is_accel_sav_impl(struct secasvar *sav);
156 static struct mbuf *ipsec_accel_key_setaccelif_impl(struct secasvar *sav);
157 static void ipsec_accel_on_ifdown_impl(struct ifnet *ifp);
158 static void ipsec_accel_drv_sa_lifetime_update_impl(struct secasvar *sav,
159     if_t ifp, u_int drv_spi, uint64_t octets, uint64_t allocs);
160 static int ipsec_accel_drv_sa_lifetime_fetch_impl(struct secasvar *sav,
161     if_t ifp, u_int drv_spi, uint64_t *octets, uint64_t *allocs);
162 static void ipsec_accel_ifdetach_event(void *arg, struct ifnet *ifp);
163 static bool ipsec_accel_fill_xh_impl(if_t ifp, uint32_t drv_spi,
164     struct xform_history *xh);
165 
166 static void
167 ipsec_accel_init(void *arg)
168 {
169 	mtx_init(&ipsec_accel_sav_tmp, "ipasat", MTX_DEF, 0);
170 	mtx_init(&ipsec_accel_cnt_lock, "ipascn", MTX_DEF, 0);
171 	drv_spi_unr = new_unrhdr(IPSEC_ACCEL_DRV_SPI_MIN,
172 	    IPSEC_ACCEL_DRV_SPI_MAX, &ipsec_accel_sav_tmp);
173 	ipsec_accel_tq = taskqueue_create("ipsec_offload", M_WAITOK,
174 	    taskqueue_thread_enqueue, &ipsec_accel_tq);
175 	(void)taskqueue_start_threads(&ipsec_accel_tq,
176 	    1 /* Must be single-threaded */, PWAIT,
177 	    "ipsec_offload");
178 	ipsec_accel_sa_newkey_p = ipsec_accel_sa_newkey_impl;
179 	ipsec_accel_forget_sav_p = ipsec_accel_forget_sav_impl;
180 	ipsec_accel_spdadd_p = ipsec_accel_spdadd_impl;
181 	ipsec_accel_spddel_p = ipsec_accel_spddel_impl;
182 	ipsec_accel_sa_lifetime_op_p = ipsec_accel_sa_lifetime_op_impl;
183 	ipsec_accel_sync_p = ipsec_accel_sync_imp;
184 	ipsec_accel_is_accel_sav_p = ipsec_accel_is_accel_sav_impl;
185 	ipsec_accel_key_setaccelif_p = ipsec_accel_key_setaccelif_impl;
186 	ipsec_accel_on_ifdown_p = ipsec_accel_on_ifdown_impl;
187 	ipsec_accel_drv_sa_lifetime_update_p =
188 	    ipsec_accel_drv_sa_lifetime_update_impl;
189 	ipsec_accel_drv_sa_lifetime_fetch_p =
190 	    ipsec_accel_drv_sa_lifetime_fetch_impl;
191 	ipsec_accel_fill_xh_p = ipsec_accel_fill_xh_impl;
192 	pctrie_init(&drv_spi_pctrie);
193 	ipsec_accel_ifdetach_event_tag = EVENTHANDLER_REGISTER(
194 	    ifnet_departure_event, ipsec_accel_ifdetach_event, NULL,
195 	    EVENTHANDLER_PRI_ANY);
196 }
197 SYSINIT(ipsec_accel_init, SI_SUB_VNET_DONE, SI_ORDER_ANY,
198     ipsec_accel_init, NULL);
199 
200 static void
201 ipsec_accel_fini(void *arg)
202 {
203 	EVENTHANDLER_DEREGISTER(ifnet_departure_event,
204 	    ipsec_accel_ifdetach_event_tag);
205 	ipsec_accel_sa_newkey_p = NULL;
206 	ipsec_accel_forget_sav_p = NULL;
207 	ipsec_accel_spdadd_p = NULL;
208 	ipsec_accel_spddel_p = NULL;
209 	ipsec_accel_sa_lifetime_op_p = NULL;
210 	ipsec_accel_sync_p = NULL;
211 	ipsec_accel_is_accel_sav_p = NULL;
212 	ipsec_accel_key_setaccelif_p = NULL;
213 	ipsec_accel_on_ifdown_p = NULL;
214 	ipsec_accel_drv_sa_lifetime_update_p = NULL;
215 	ipsec_accel_drv_sa_lifetime_fetch_p = NULL;
216 	ipsec_accel_fill_xh_p = NULL;
217 	ipsec_accel_sync_imp();
218 	clean_unrhdr(drv_spi_unr);	/* avoid panic, should go later */
219 	clear_unrhdr(drv_spi_unr);
220 	delete_unrhdr(drv_spi_unr);
221 	taskqueue_drain_all(ipsec_accel_tq);
222 	taskqueue_free(ipsec_accel_tq);
223 	mtx_destroy(&ipsec_accel_sav_tmp);
224 	mtx_destroy(&ipsec_accel_cnt_lock);
225 }
226 SYSUNINIT(ipsec_accel_fini, SI_SUB_VNET_DONE, SI_ORDER_ANY,
227     ipsec_accel_fini, NULL);
228 
229 SYSCTL_NODE(_net_inet_ipsec, OID_AUTO, offload, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
230     "");
231 
232 static bool ipsec_offload_verbose = false;
233 SYSCTL_BOOL(_net_inet_ipsec_offload, OID_AUTO, verbose, CTLFLAG_RW,
234     &ipsec_offload_verbose, 0,
235     "Verbose SA/SP offload install and deinstall");
236 
237 static void
238 dprintf(const char *fmt, ...)
239 {
240 	va_list ap;
241 
242 	if (!ipsec_offload_verbose)
243 		return;
244 
245 	va_start(ap, fmt);
246 	vprintf(fmt, ap);
247 	va_end(ap);
248 }
249 
250 static void
251 ipsec_accel_alloc_forget_tq(struct secasvar *sav)
252 {
253 	void *ftq;
254 
255 	if (sav->accel_forget_tq != 0)
256 		return;
257 
258 	ftq = malloc(sizeof(struct ipsec_accel_forget_tq), M_TEMP, M_WAITOK);
259 	if (!atomic_cmpset_ptr(&sav->accel_forget_tq, 0, (uintptr_t)ftq))
260 		free(ftq, M_TEMP);
261 }
262 
263 static bool
264 ipsec_accel_sa_install_match(if_t ifp, void *arg)
265 {
266 	if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0)
267 		return (false);
268 	if (ifp->if_ipsec_accel_m->if_sa_newkey == NULL) {
269 		dprintf("driver bug ifp %s if_sa_newkey NULL\n",
270 		    if_name(ifp));
271 		return (false);
272 	}
273 	return (true);
274 }
275 
276 static int
277 ipsec_accel_sa_newkey_cb(if_t ifp, void *arg)
278 {
279 	struct ipsec_accel_install_newkey_tq *tq;
280 	void *priv;
281 	u_int drv_spi;
282 	int error;
283 
284 	tq = arg;
285 
286 	dprintf("ipsec_accel_sa_newkey_act: ifp %s h %p spi %#x "
287 	    "flags %#x seq %d\n",
288 	    if_name(ifp), ifp->if_ipsec_accel_m->if_sa_newkey,
289 	    be32toh(tq->sav->spi), tq->sav->flags, tq->sav->seq);
290 	priv = NULL;
291 	drv_spi = alloc_unr(drv_spi_unr);
292 	if (drv_spi == -1) {
293 		dprintf("ipsec_accel_sa_install_newkey: cannot alloc "
294 		    "drv_spi if %s spi %#x\n", if_name(ifp),
295 		    be32toh(tq->sav->spi));
296 		return (0);
297 	}
298 	if (tq->sav->accel_ifname != NULL &&
299 	    strcmp(tq->sav->accel_ifname, if_name(ifp)) != 0) {
300 		error = ipsec_accel_handle_sav(tq->sav,
301 		    ifp, drv_spi, priv, IFP_HS_REJECTED, NULL);
302 		goto out;
303 	}
304 	error = ifp->if_ipsec_accel_m->if_sa_newkey(ifp, tq->sav,
305 	    drv_spi, &priv);
306 	if (error != 0) {
307 		if (error == EOPNOTSUPP) {
308 			dprintf("ipsec_accel_sa_newkey: driver "
309 			    "refused sa if %s spi %#x\n",
310 			    if_name(ifp), be32toh(tq->sav->spi));
311 		} else {
312 			dprintf("ipsec_accel_sa_newkey: driver "
313 			    "error %d if %s spi %#x\n",
314 			    error, if_name(ifp), be32toh(tq->sav->spi));
315 		}
316 		error = ipsec_accel_handle_sav(tq->sav, ifp, drv_spi, priv,
317 		    IFP_HS_REJECTED, NULL);
318 		if (error != 0) {
319 			dprintf("ipsec_accel_sa_newkey: handle_sav REJECTED "
320 			    "err %d if %s spi %#x\n", error,
321 			    if_name(ifp), be32toh(tq->sav->spi));
322 			free_unr(drv_spi_unr, drv_spi);
323 		}
324 	} else {
325 		error = ipsec_accel_handle_sav(tq->sav, ifp,
326 		    drv_spi, priv, IFP_HS_HANDLED, NULL);
327 		if (error != 0) {
328 			dprintf("ipsec_accel_sa_newkey: handle_sav HANDLED "
329 			    "err %d if %s spi %#x\n", error,
330 			    if_name(ifp), be32toh(tq->sav->spi));
331 			error = ifp->if_ipsec_accel_m->if_sa_deinstall(ifp,
332 			    drv_spi, priv);
333 			if (error == 0)
334 				free_unr(drv_spi_unr, drv_spi);
335 			/*
336 			 * If driver refused to deinstall the SA, keep
337 			 * drv_spi leaked so that it is not reused.
338 			 * The SA is still programmed into the
339 			 * hardware with the drv_spi ident, so it is
340 			 * better to leak the drv_spi then reuse for
341 			 * another SA and have issues due to aliasing.
342 			 */
343 		}
344 	}
345 out:
346 	/*
347 	 * Return 0, ignoring any errors from the SA installation.
348 	 * This function is a callback for if_foreach_sleep(), which
349 	 * stops iteration if one of the callbacks returns non-zero.
350 	 * We need to offer the SA to all interfaces that could
351 	 * offload it.
352 	 */
353 	return (0);
354 }
355 
356 static void
357 ipsec_accel_sa_newkey_act(void *context, int pending)
358 {
359 	struct ipsec_accel_install_newkey_tq *tq;
360 	void *tqf;
361 	struct secasvar *sav;
362 
363 	tq = context;
364 	tqf = NULL;
365 	sav = tq->sav;
366 	CURVNET_SET(tq->install_vnet);
367 	mtx_lock(&ipsec_accel_sav_tmp);
368 	if ((sav->accel_flags & (SADB_KEY_ACCEL_INST |
369 	    SADB_KEY_ACCEL_DEINST)) == 0 &&
370 	    sav->state == SADB_SASTATE_MATURE) {
371 		sav->accel_flags |= SADB_KEY_ACCEL_INST;
372 		mtx_unlock(&ipsec_accel_sav_tmp);
373 		if_foreach_sleep(ipsec_accel_sa_install_match, context,
374 		    ipsec_accel_sa_newkey_cb, context);
375 		ipsec_accel_alloc_forget_tq(sav);
376 		mtx_lock(&ipsec_accel_sav_tmp);
377 
378 		/*
379 		 * If ipsec_accel_forget_sav() raced with us and set
380 		 * the flag, do its work.  Its task cannot execute in
381 		 * parallel since ipsec_accel taskqueue is single-threaded.
382 		 */
383 		if ((sav->accel_flags & SADB_KEY_ACCEL_DEINST) != 0) {
384 			tqf = (void *)sav->accel_forget_tq;
385 			sav->accel_forget_tq = 0;
386 			ipsec_accel_forget_sav_clear(sav);
387 		}
388 	}
389 	mtx_unlock(&ipsec_accel_sav_tmp);
390 	key_freesav(&tq->sav);
391 	CURVNET_RESTORE();
392 	free(tq, M_TEMP);
393 	free(tqf, M_TEMP);
394 }
395 
396 static void
397 ipsec_accel_sa_newkey_impl(struct secasvar *sav)
398 {
399 	struct ipsec_accel_install_newkey_tq *tq;
400 
401 	if ((sav->accel_flags & (SADB_KEY_ACCEL_INST |
402 	    SADB_KEY_ACCEL_DEINST)) != 0)
403 		return;
404 
405 	dprintf(
406 	    "ipsec_accel_sa_install_newkey: spi %#x flags %#x seq %d\n",
407 	    be32toh(sav->spi), sav->flags, sav->seq);
408 
409 	tq = malloc(sizeof(*tq), M_TEMP, M_NOWAIT);
410 	if (tq == NULL) {
411 		dprintf("ipsec_accel_sa_install_newkey: no memory for tq, "
412 		    "spi %#x\n", be32toh(sav->spi));
413 		/* XXXKIB */
414 		return;
415 	}
416 
417 	refcount_acquire(&sav->refcnt);
418 
419 	TASK_INIT(&tq->install_task, 0, ipsec_accel_sa_newkey_act, tq);
420 	tq->sav = sav;
421 	tq->install_vnet = curthread->td_vnet;
422 	taskqueue_enqueue(ipsec_accel_tq, &tq->install_task);
423 }
424 
425 static int
426 ipsec_accel_handle_sav(struct secasvar *sav, struct ifnet *ifp,
427     u_int drv_spi, void *priv, uint32_t flags, struct ifp_handle_sav **ires)
428 {
429 	struct ifp_handle_sav *ihs, *i;
430 	int error;
431 
432 	MPASS(__bitcount(flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) == 1);
433 
434 	ihs = malloc(sizeof(*ihs), M_IPSEC_MISC, M_WAITOK | M_ZERO);
435 	ihs->ifp = ifp;
436 	ihs->sav = sav;
437 	ihs->drv_spi = drv_spi;
438 	ihs->ifdata = priv;
439 	ihs->flags = flags;
440 	ihs->hdr_ext_size = esp_hdrsiz(sav);
441 	memcpy(&ihs->xfh.dst, &sav->sah->saidx.dst, sizeof(ihs->xfh.dst));
442 	ihs->xfh.spi = sav->spi;
443 	ihs->xfh.proto = sav->sah->saidx.proto;
444 	ihs->xfh.mode = sav->sah->saidx.mode;
445 	mtx_lock(&ipsec_accel_sav_tmp);
446 	CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
447 		if (i->ifp == ifp) {
448 			error = EALREADY;
449 			goto errout;
450 		}
451 	}
452 	error = DRVSPI_SA_PCTRIE_INSERT(&drv_spi_pctrie, ihs);
453 	if (error != 0)
454 		goto errout;
455 	if_ref(ihs->ifp);
456 	CK_LIST_INSERT_HEAD(&sav->accel_ifps, ihs, sav_link);
457 	CK_LIST_INSERT_HEAD(&ipsec_accel_all_sav_handles, ihs, sav_allh_link);
458 	mtx_unlock(&ipsec_accel_sav_tmp);
459 	if (ires != NULL)
460 		*ires = ihs;
461 	return (0);
462 errout:
463 	mtx_unlock(&ipsec_accel_sav_tmp);
464 	free(ihs, M_IPSEC_MISC);
465 	if (ires != NULL)
466 		*ires = NULL;
467 	return (error);
468 }
469 
470 static void
471 ipsec_accel_forget_handle_sav(struct ifp_handle_sav *i, bool freesav)
472 {
473 	struct ifnet *ifp;
474 	struct secasvar *sav;
475 
476 	mtx_assert(&ipsec_accel_sav_tmp, MA_OWNED);
477 
478 	CK_LIST_REMOVE(i, sav_link);
479 	CK_LIST_REMOVE(i, sav_allh_link);
480 	DRVSPI_SA_PCTRIE_REMOVE(&drv_spi_pctrie, i->drv_spi);
481 	mtx_unlock(&ipsec_accel_sav_tmp);
482 	NET_EPOCH_WAIT();
483 	ifp = i->ifp;
484 	sav = i->sav;
485 	if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
486 	    IFP_HS_HANDLED) {
487 		dprintf("sa deinstall %s %p spi %#x ifl %#x\n",
488 		    if_name(ifp), sav, be32toh(sav->spi), i->flags);
489 		ifp->if_ipsec_accel_m->if_sa_deinstall(ifp,
490 		    i->drv_spi, i->ifdata);
491 	}
492 	if_rele(ifp);
493 	free_unr(drv_spi_unr, i->drv_spi);
494 	free(i, M_IPSEC_MISC);
495 	if (freesav)
496 		key_freesav(&sav);
497 	mtx_lock(&ipsec_accel_sav_tmp);
498 }
499 
500 static void
501 ipsec_accel_forget_sav_clear(struct secasvar *sav)
502 {
503 	struct ifp_handle_sav *i;
504 
505 	for (;;) {
506 		i = CK_LIST_FIRST(&sav->accel_ifps);
507 		if (i == NULL)
508 			break;
509 		ipsec_accel_forget_handle_sav(i, false);
510 	}
511 }
512 
513 static void
514 ipsec_accel_forget_sav_act(void *arg, int pending)
515 {
516 	struct ipsec_accel_forget_tq *tq;
517 	struct secasvar *sav;
518 
519 	tq = arg;
520 	sav = tq->sav;
521 	CURVNET_SET(tq->forget_vnet);
522 	mtx_lock(&ipsec_accel_sav_tmp);
523 	ipsec_accel_forget_sav_clear(sav);
524 	mtx_unlock(&ipsec_accel_sav_tmp);
525 	key_freesav(&sav);
526 	CURVNET_RESTORE();
527 	free(tq, M_TEMP);
528 }
529 
530 void
531 ipsec_accel_forget_sav_impl(struct secasvar *sav)
532 {
533 	struct ipsec_accel_forget_tq *tq;
534 
535 	mtx_lock(&ipsec_accel_sav_tmp);
536 	sav->accel_flags |= SADB_KEY_ACCEL_DEINST;
537 	tq = (void *)atomic_load_ptr(&sav->accel_forget_tq);
538 	if (tq == NULL || !atomic_cmpset_ptr(&sav->accel_forget_tq,
539 	    (uintptr_t)tq, 0)) {
540 		mtx_unlock(&ipsec_accel_sav_tmp);
541 		return;
542 	}
543 	mtx_unlock(&ipsec_accel_sav_tmp);
544 
545 	refcount_acquire(&sav->refcnt);
546 	TASK_INIT(&tq->forget_task, 0, ipsec_accel_forget_sav_act, tq);
547 	tq->forget_vnet = curthread->td_vnet;
548 	tq->sav = sav;
549 	taskqueue_enqueue(ipsec_accel_tq, &tq->forget_task);
550 }
551 
552 static void
553 ipsec_accel_on_ifdown_sav(struct ifnet *ifp)
554 {
555 	struct ifp_handle_sav *i, *marker;
556 
557 	marker = malloc(sizeof(*marker), M_IPSEC_MISC, M_WAITOK | M_ZERO);
558 	marker->flags = IFP_HS_MARKER;
559 
560 	mtx_lock(&ipsec_accel_sav_tmp);
561 	CK_LIST_INSERT_HEAD(&ipsec_accel_all_sav_handles, marker,
562 	    sav_allh_link);
563 	for (;;) {
564 		i = CK_LIST_NEXT(marker, sav_allh_link);
565 		if (i == NULL)
566 			break;
567 		CK_LIST_REMOVE(marker, sav_allh_link);
568 		CK_LIST_INSERT_AFTER(i, marker, sav_allh_link);
569 		if (i->ifp == ifp) {
570 			refcount_acquire(&i->sav->refcnt); /* XXXKIB wrap ? */
571 			ipsec_accel_forget_handle_sav(i, true);
572 		}
573 	}
574 	CK_LIST_REMOVE(marker, sav_allh_link);
575 	mtx_unlock(&ipsec_accel_sav_tmp);
576 	free(marker, M_IPSEC_MISC);
577 }
578 
579 static struct ifp_handle_sav *
580 ipsec_accel_is_accel_sav_ptr_raw(struct secasvar *sav, struct ifnet *ifp)
581 {
582 	struct ifp_handle_sav *i;
583 
584 	if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0)
585 		return (NULL);
586 	CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
587 		if (i->ifp == ifp)
588 			return (i);
589 	}
590 	return (NULL);
591 }
592 
593 static struct ifp_handle_sav *
594 ipsec_accel_is_accel_sav_ptr(struct secasvar *sav, struct ifnet *ifp)
595 {
596 	NET_EPOCH_ASSERT();
597 	return (ipsec_accel_is_accel_sav_ptr_raw(sav, ifp));
598 }
599 
600 static bool
601 ipsec_accel_is_accel_sav_impl(struct secasvar *sav)
602 {
603 	return (!CK_LIST_EMPTY(&sav->accel_ifps));
604 }
605 
606 static struct secasvar *
607 ipsec_accel_drvspi_to_sa(u_int drv_spi)
608 {
609 	struct ifp_handle_sav *i;
610 
611 	i = DRVSPI_SA_PCTRIE_LOOKUP(&drv_spi_pctrie, drv_spi);
612 	if (i == NULL)
613 		return (NULL);
614 	return (i->sav);
615 }
616 
617 static struct ifp_handle_sp *
618 ipsec_accel_find_accel_sp(struct secpolicy *sp, if_t ifp)
619 {
620 	struct ifp_handle_sp *i;
621 
622 	CK_LIST_FOREACH(i, &sp->accel_ifps, sp_link) {
623 		if (i->ifp == ifp)
624 			return (i);
625 	}
626 	return (NULL);
627 }
628 
629 static bool
630 ipsec_accel_is_accel_sp(struct secpolicy *sp, if_t ifp)
631 {
632 	return (ipsec_accel_find_accel_sp(sp, ifp) != NULL);
633 }
634 
635 static int
636 ipsec_accel_remember_sp(struct secpolicy *sp, if_t ifp,
637     struct ifp_handle_sp **ip)
638 {
639 	struct ifp_handle_sp *i;
640 
641 	i = malloc(sizeof(*i), M_IPSEC_MISC, M_WAITOK | M_ZERO);
642 	i->sp = sp;
643 	i->ifp = ifp;
644 	if_ref(ifp);
645 	i->flags = IFP_HP_HANDLED;
646 	mtx_lock(&ipsec_accel_sav_tmp);
647 	CK_LIST_INSERT_HEAD(&sp->accel_ifps, i, sp_link);
648 	CK_LIST_INSERT_HEAD(&ipsec_accel_all_sp_handles, i, sp_allh_link);
649 	mtx_unlock(&ipsec_accel_sav_tmp);
650 	*ip = i;
651 	return (0);
652 }
653 
654 static bool
655 ipsec_accel_spdadd_match(if_t ifp, void *arg)
656 {
657 	struct secpolicy *sp;
658 
659 	if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0 ||
660 	    ifp->if_ipsec_accel_m->if_spdadd == NULL)
661 		return (false);
662 	sp = arg;
663 	if (sp->accel_ifname != NULL &&
664 	    strcmp(sp->accel_ifname, if_name(ifp)) != 0)
665 		return (false);
666 	if (ipsec_accel_is_accel_sp(sp, ifp))
667 		return (false);
668 	return (true);
669 }
670 
671 static int
672 ipsec_accel_spdadd_cb(if_t ifp, void *arg)
673 {
674 	struct secpolicy *sp;
675 	struct inpcb *inp;
676 	struct ifp_handle_sp *i;
677 	int error;
678 
679 	sp = arg;
680 	inp = sp->ipsec_accel_add_sp_inp;
681 	dprintf("ipsec_accel_spdadd_cb: ifp %s m %p sp %p inp %p\n",
682 	    if_name(ifp), ifp->if_ipsec_accel_m->if_spdadd, sp, inp);
683 	error = ipsec_accel_remember_sp(sp, ifp, &i);
684 	if (error != 0) {
685 		dprintf("ipsec_accel_spdadd: %s if_spdadd %p remember res %d\n",
686 		    if_name(ifp), sp, error);
687 		return (0);
688 	}
689 	error = ifp->if_ipsec_accel_m->if_spdadd(ifp, sp, inp, &i->ifdata);
690 	if (error != 0) {
691 		i->flags |= IFP_HP_REJECTED;
692 		dprintf("ipsec_accel_spdadd: %s if_spdadd %p res %d\n",
693 		    if_name(ifp), sp, error);
694 	}
695 	return (0);
696 }
697 
698 static void
699 ipsec_accel_spdadd_act(void *arg, int pending)
700 {
701 	struct secpolicy *sp;
702 	struct inpcb *inp;
703 
704 	sp = arg;
705 	CURVNET_SET(sp->accel_add_tq.adddel_vnet);
706 	if_foreach_sleep(ipsec_accel_spdadd_match, arg,
707 	    ipsec_accel_spdadd_cb, arg);
708 	inp = sp->ipsec_accel_add_sp_inp;
709 	if (inp != NULL) {
710 		INP_WLOCK(inp);
711 		if (!in_pcbrele_wlocked(inp))
712 			INP_WUNLOCK(inp);
713 		sp->ipsec_accel_add_sp_inp = NULL;
714 	}
715 	CURVNET_RESTORE();
716 	key_freesp(&sp);
717 }
718 
719 void
720 ipsec_accel_spdadd_impl(struct secpolicy *sp, struct inpcb *inp)
721 {
722 	struct ipsec_accel_adddel_sp_tq *tq;
723 
724 	if (sp == NULL)
725 		return;
726 	if (sp->tcount == 0 && inp == NULL)
727 		return;
728 	tq = &sp->accel_add_tq;
729 	if (atomic_cmpset_int(&tq->adddel_scheduled, 0, 1) == 0)
730 		return;
731 	tq->adddel_vnet = curthread->td_vnet;
732 	sp->ipsec_accel_add_sp_inp = inp;
733 	if (inp != NULL)
734 		in_pcbref(inp);
735 	TASK_INIT(&tq->adddel_task, 0, ipsec_accel_spdadd_act, sp);
736 	key_addref(sp);
737 	taskqueue_enqueue(ipsec_accel_tq, &tq->adddel_task);
738 }
739 
740 static void
741 ipsec_accel_spddel_act(void *arg, int pending)
742 {
743 	struct ifp_handle_sp *i;
744 	struct secpolicy *sp;
745 	int error;
746 
747 	sp = arg;
748 	CURVNET_SET(sp->accel_del_tq.adddel_vnet);
749 	mtx_lock(&ipsec_accel_sav_tmp);
750 	for (;;) {
751 		i = CK_LIST_FIRST(&sp->accel_ifps);
752 		if (i == NULL)
753 			break;
754 		CK_LIST_REMOVE(i, sp_link);
755 		CK_LIST_REMOVE(i, sp_allh_link);
756 		mtx_unlock(&ipsec_accel_sav_tmp);
757 		NET_EPOCH_WAIT();
758 		if ((i->flags & (IFP_HP_HANDLED | IFP_HP_REJECTED)) ==
759 		    IFP_HP_HANDLED) {
760 			dprintf("spd deinstall %s %p\n", if_name(i->ifp), sp);
761 			error = i->ifp->if_ipsec_accel_m->if_spddel(i->ifp,
762 			    sp, i->ifdata);
763 			if (error != 0) {
764 				dprintf(
765 		    "ipsec_accel_spddel: %s if_spddel %p res %d\n",
766 				    if_name(i->ifp), sp, error);
767 			}
768 		}
769 		if_rele(i->ifp);
770 		free(i, M_IPSEC_MISC);
771 		mtx_lock(&ipsec_accel_sav_tmp);
772 	}
773 	mtx_unlock(&ipsec_accel_sav_tmp);
774 	key_freesp(&sp);
775 	CURVNET_RESTORE();
776 }
777 
778 void
779 ipsec_accel_spddel_impl(struct secpolicy *sp)
780 {
781 	struct ipsec_accel_adddel_sp_tq *tq;
782 
783 	if (sp == NULL)
784 		return;
785 
786 	tq = &sp->accel_del_tq;
787 	if (atomic_cmpset_int(&tq->adddel_scheduled, 0, 1) == 0)
788 		return;
789 	tq->adddel_vnet = curthread->td_vnet;
790 	TASK_INIT(&tq->adddel_task, 0, ipsec_accel_spddel_act, sp);
791 	key_addref(sp);
792 	taskqueue_enqueue(ipsec_accel_tq, &tq->adddel_task);
793 }
794 
795 static void
796 ipsec_accel_on_ifdown_sp(struct ifnet *ifp)
797 {
798 	struct ifp_handle_sp *i, *marker;
799 	struct secpolicy *sp;
800 	int error;
801 
802 	marker = malloc(sizeof(*marker), M_IPSEC_MISC, M_WAITOK | M_ZERO);
803 	marker->flags = IFP_HS_MARKER;
804 
805 	mtx_lock(&ipsec_accel_sav_tmp);
806 	CK_LIST_INSERT_HEAD(&ipsec_accel_all_sp_handles, marker,
807 	    sp_allh_link);
808 	for (;;) {
809 		i = CK_LIST_NEXT(marker, sp_allh_link);
810 		if (i == NULL)
811 			break;
812 		CK_LIST_REMOVE(marker, sp_allh_link);
813 		CK_LIST_INSERT_AFTER(i, marker, sp_allh_link);
814 		if (i->ifp != ifp)
815 			continue;
816 
817 		sp = i->sp;
818 		key_addref(sp);
819 		CK_LIST_REMOVE(i, sp_link);
820 		CK_LIST_REMOVE(i, sp_allh_link);
821 		mtx_unlock(&ipsec_accel_sav_tmp);
822 		NET_EPOCH_WAIT();
823 		if ((i->flags & (IFP_HP_HANDLED | IFP_HP_REJECTED)) ==
824 		    IFP_HP_HANDLED) {
825 			dprintf("spd deinstall %s %p\n", if_name(ifp), sp);
826 			error = ifp->if_ipsec_accel_m->if_spddel(ifp,
827 			    sp, i->ifdata);
828 		}
829 		if (error != 0) {
830 			dprintf(
831 		    "ipsec_accel_on_ifdown_sp: %s if_spddel %p res %d\n",
832 			    if_name(ifp), sp, error);
833 		}
834 		key_freesp(&sp);
835 		if_rele(ifp);
836 		free(i, M_IPSEC_MISC);
837 		mtx_lock(&ipsec_accel_sav_tmp);
838 	}
839 	CK_LIST_REMOVE(marker, sp_allh_link);
840 	mtx_unlock(&ipsec_accel_sav_tmp);
841 	free(marker, M_IPSEC_MISC);
842 }
843 
844 static void
845 ipsec_accel_on_ifdown_impl(struct ifnet *ifp)
846 {
847 	ipsec_accel_on_ifdown_sp(ifp);
848 	ipsec_accel_on_ifdown_sav(ifp);
849 }
850 
851 static void
852 ipsec_accel_ifdetach_event(void *arg __unused, struct ifnet *ifp)
853 {
854 	ipsec_accel_on_ifdown_impl(ifp);
855 }
856 
857 static bool
858 ipsec_accel_output_pad(struct mbuf *m, struct secasvar *sav, int skip, int mtu)
859 {
860 	int alen, blks, hlen, padding, rlen;
861 
862 	rlen = m->m_pkthdr.len - skip;
863 	hlen = ((sav->flags & SADB_X_EXT_OLD) != 0 ? sizeof(struct esp) :
864 	    sizeof(struct newesp)) + sav->ivlen;
865 	blks = MAX(4, SAV_ISCTR(sav) && VNET(esp_ctr_compatibility) ?
866 	    sav->tdb_encalgxform->native_blocksize :
867 	    sav->tdb_encalgxform->blocksize);
868 	padding = ((blks - ((rlen + 2) % blks)) % blks) + 2;
869 	alen = xform_ah_authsize(sav->tdb_authalgxform);
870 
871 	return (skip + hlen + rlen + padding + alen <= mtu);
872 }
873 
874 static bool
875 ipsec_accel_output_tag(struct mbuf *m, u_int drv_spi)
876 {
877 	struct ipsec_accel_out_tag *tag;
878 
879 	tag = (struct ipsec_accel_out_tag *)m_tag_get(
880 	    PACKET_TAG_IPSEC_ACCEL_OUT, sizeof(*tag), M_NOWAIT);
881 	if (tag == NULL)
882 		return (false);
883 	tag->drv_spi = drv_spi;
884 	m_tag_prepend(m, &tag->tag);
885 	return (true);
886 }
887 
888 bool
889 ipsec_accel_output(struct ifnet *ifp, struct mbuf *m, struct inpcb *inp,
890     struct secpolicy *sp, struct secasvar *sav, int af, int mtu, int *hwassist)
891 {
892 	struct ifp_handle_sav *i;
893 	struct ip *ip;
894 	struct tcpcb *tp;
895 	u_long ip_len, skip;
896 	bool res;
897 
898 	*hwassist = 0;
899 	res = false;
900 	if (ifp == NULL)
901 		return (res);
902 
903 	M_ASSERTPKTHDR(m);
904 	NET_EPOCH_ASSERT();
905 
906 	if (sav == NULL) {
907 		res = ipsec_accel_output_tag(m, IPSEC_ACCEL_DRV_SPI_BYPASS);
908 		goto out;
909 	}
910 
911 	i = ipsec_accel_is_accel_sav_ptr(sav, ifp);
912 	if (i == NULL || (i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) !=
913 	    IFP_HS_HANDLED)
914 		goto out;
915 
916 	if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
917 		ip_len = m->m_pkthdr.len;
918 		if (ip_len + i->hdr_ext_size > mtu)
919 			goto out;
920 		switch (af) {
921 		case AF_INET:
922 			ip = mtod(m, struct ip *);
923 			skip = ip->ip_hl << 2;
924 			break;
925 		case AF_INET6:
926 			skip = sizeof(struct ip6_hdr);
927 			break;
928 		default:
929 			__unreachable();
930 		}
931 		if (!ipsec_accel_output_pad(m, sav, skip, mtu))
932 			goto out;
933 	}
934 
935 	if (!ipsec_accel_output_tag(m, i->drv_spi))
936 		goto out;
937 
938 	ipsec_accel_sa_recordxfer(sav, m);
939 	key_freesav(&sav);
940 	if (sp != NULL)
941 		key_freesp(&sp);
942 
943 	*hwassist = ifp->if_ipsec_accel_m->if_hwassist(ifp, sav,
944 	    i->drv_spi, i->ifdata);
945 	res = true;
946 out:
947 	if (inp != NULL && inp->inp_pcbinfo == &V_tcbinfo) {
948 		INP_WLOCK_ASSERT(inp);
949 		tp = (struct tcpcb *)inp;
950 		if (res && (*hwassist & (CSUM_TSO | CSUM_IP6_TSO)) != 0) {
951 			tp->t_flags2 |= TF2_IPSEC_TSO;
952 		} else {
953 			tp->t_flags2 &= ~TF2_IPSEC_TSO;
954 		}
955 	}
956 	return (res);
957 }
958 
959 struct ipsec_accel_in_tag *
960 ipsec_accel_input_tag_lookup(const struct mbuf *m)
961 {
962 	struct ipsec_accel_in_tag *tag;
963 	struct m_tag *xtag;
964 
965 	xtag = m_tag_find(__DECONST(struct mbuf *, m),
966 	    PACKET_TAG_IPSEC_ACCEL_IN, NULL);
967 	if (xtag == NULL)
968 		return (NULL);
969 	tag = __containerof(xtag, struct ipsec_accel_in_tag, tag);
970 	return (tag);
971 }
972 
973 int
974 ipsec_accel_input(struct mbuf *m, int offset, int proto)
975 {
976 	struct secasvar *sav;
977 	struct ipsec_accel_in_tag *tag;
978 
979 	tag = ipsec_accel_input_tag_lookup(m);
980 	if (tag == NULL)
981 		return (ENXIO);
982 
983 	if (tag->drv_spi < IPSEC_ACCEL_DRV_SPI_MIN ||
984 	    tag->drv_spi > IPSEC_ACCEL_DRV_SPI_MAX) {
985 		dprintf("if %s mbuf %p drv_spi %d invalid, packet dropped\n",
986 		    (m->m_flags & M_PKTHDR) != 0 ? if_name(m->m_pkthdr.rcvif) :
987 		    "<unknwn>", m, tag->drv_spi);
988 		m_freem(m);
989 		return (EINPROGRESS);
990 	}
991 
992 	sav = ipsec_accel_drvspi_to_sa(tag->drv_spi);
993 	if (sav != NULL)
994 		ipsec_accel_sa_recordxfer(sav, m);
995 	return (0);
996 }
997 
998 static void
999 ipsec_accel_sa_recordxfer(struct secasvar *sav, struct mbuf *m)
1000 {
1001 	counter_u64_add(sav->accel_lft_sw, 1);
1002 	counter_u64_add(sav->accel_lft_sw + 1, m->m_pkthdr.len);
1003 	if (sav->accel_firstused == 0)
1004 		sav->accel_firstused = time_second;
1005 }
1006 
1007 static void
1008 ipsec_accel_sa_lifetime_update(struct seclifetime *lft_c,
1009     const struct seclifetime *lft_l)
1010 {
1011 	lft_c->allocations += lft_l->allocations;
1012 	lft_c->bytes += lft_l->bytes;
1013 	lft_c->usetime = min(lft_c->usetime, lft_l->usetime);
1014 }
1015 
1016 static void
1017 ipsec_accel_drv_sa_lifetime_update_impl(struct secasvar *sav, if_t ifp,
1018     u_int drv_spi, uint64_t octets, uint64_t allocs)
1019 {
1020 	struct epoch_tracker et;
1021 	struct ifp_handle_sav *i;
1022 	uint64_t odiff, adiff;
1023 
1024 	NET_EPOCH_ENTER(et);
1025 	mtx_lock(&ipsec_accel_cnt_lock);
1026 
1027 	if (allocs != 0) {
1028 		if (sav->firstused == 0)
1029 			sav->firstused = time_second;
1030 		if (sav->accel_firstused == 0)
1031 			sav->accel_firstused = time_second;
1032 	}
1033 
1034 	CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1035 		if (i->ifp == ifp && i->drv_spi == drv_spi)
1036 			break;
1037 	}
1038 	if (i == NULL)
1039 		goto out;
1040 
1041 	odiff = octets - i->cnt_octets;
1042 	adiff = allocs - i->cnt_allocs;
1043 
1044 	if (sav->lft_c != NULL) {
1045 		counter_u64_add(sav->lft_c_bytes, odiff);
1046 		counter_u64_add(sav->lft_c_allocations, adiff);
1047 	}
1048 
1049 	i->cnt_octets = octets;
1050 	i->cnt_allocs = allocs;
1051 	sav->accel_hw_octets += odiff;
1052 	sav->accel_hw_allocs += adiff;
1053 
1054 out:
1055 	mtx_unlock(&ipsec_accel_cnt_lock);
1056 	NET_EPOCH_EXIT(et);
1057 }
1058 
1059 static int
1060 ipsec_accel_drv_sa_lifetime_fetch_impl(struct secasvar *sav,
1061     if_t ifp, u_int drv_spi, uint64_t *octets, uint64_t *allocs)
1062 {
1063 	struct ifp_handle_sav *i;
1064 	int error;
1065 
1066 	NET_EPOCH_ASSERT();
1067 	error = 0;
1068 
1069 	mtx_lock(&ipsec_accel_cnt_lock);
1070 	CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1071 		if (i->ifp == ifp && i->drv_spi == drv_spi) {
1072 			*octets = i->cnt_octets;
1073 			*allocs = i->cnt_allocs;
1074 			break;
1075 		}
1076 	}
1077 	if (i == NULL)
1078 		error = ENOENT;
1079 	mtx_unlock(&ipsec_accel_cnt_lock);
1080 	return (error);
1081 }
1082 
1083 static void
1084 ipsec_accel_sa_lifetime_hw(struct secasvar *sav, if_t ifp,
1085     struct seclifetime *lft)
1086 {
1087 	struct ifp_handle_sav *i;
1088 	if_sa_cnt_fn_t p;
1089 
1090 	IFNET_RLOCK_ASSERT();
1091 
1092 	i = ipsec_accel_is_accel_sav_ptr(sav, ifp);
1093 	if (i != NULL && (i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
1094 	    IFP_HS_HANDLED) {
1095 		p = ifp->if_ipsec_accel_m->if_sa_cnt;
1096 		if (p != NULL)
1097 			p(ifp, sav, i->drv_spi, i->ifdata, lft);
1098 	}
1099 }
1100 
1101 static int
1102 ipsec_accel_sa_lifetime_op_impl(struct secasvar *sav,
1103     struct seclifetime *lft_c, if_t ifp, enum IF_SA_CNT_WHICH op,
1104     struct rm_priotracker *sahtree_trackerp)
1105 {
1106 	struct seclifetime lft_l, lft_s;
1107 	struct ifp_handle_sav *i;
1108 	if_t ifp1;
1109 	if_sa_cnt_fn_t p;
1110 	int error;
1111 
1112 	error = 0;
1113 	memset(&lft_l, 0, sizeof(lft_l));
1114 	memset(&lft_s, 0, sizeof(lft_s));
1115 
1116 	switch (op & ~IF_SA_CNT_UPD) {
1117 	case IF_SA_CNT_IFP_HW_VAL:
1118 		ipsec_accel_sa_lifetime_hw(sav, ifp, &lft_l);
1119 		ipsec_accel_sa_lifetime_update(&lft_l, &lft_s);
1120 		break;
1121 
1122 	case IF_SA_CNT_TOTAL_SW_VAL:
1123 		lft_l.allocations = (uint32_t)counter_u64_fetch(
1124 		    sav->accel_lft_sw);
1125 		lft_l.bytes = counter_u64_fetch(sav->accel_lft_sw + 1);
1126 		lft_l.usetime = sav->accel_firstused;
1127 		break;
1128 
1129 	case IF_SA_CNT_TOTAL_HW_VAL:
1130 		IFNET_RLOCK_ASSERT();
1131 		CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1132 			if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) !=
1133 			    IFP_HS_HANDLED)
1134 				continue;
1135 			ifp1 = i->ifp;
1136 			p = ifp1->if_ipsec_accel_m->if_sa_cnt;
1137 			if (p == NULL)
1138 				continue;
1139 			memset(&lft_s, 0, sizeof(lft_s));
1140 			if (sahtree_trackerp != NULL)
1141 				ipsec_sahtree_runlock(sahtree_trackerp);
1142 			error = p(ifp1, sav, i->drv_spi, i->ifdata, &lft_s);
1143 			if (sahtree_trackerp != NULL)
1144 				ipsec_sahtree_rlock(sahtree_trackerp);
1145 			if (error == 0)
1146 				ipsec_accel_sa_lifetime_update(&lft_l, &lft_s);
1147 		}
1148 		break;
1149 	}
1150 
1151 	if (error == 0) {
1152 		if ((op & IF_SA_CNT_UPD) == 0)
1153 			memset(lft_c, 0, sizeof(*lft_c));
1154 		ipsec_accel_sa_lifetime_update(lft_c, &lft_l);
1155 	}
1156 
1157 	return (error);
1158 }
1159 
1160 static void
1161 ipsec_accel_sync_imp(void)
1162 {
1163 	taskqueue_drain_all(ipsec_accel_tq);
1164 }
1165 
1166 static struct mbuf *
1167 ipsec_accel_key_setaccelif_impl(struct secasvar *sav)
1168 {
1169 	struct mbuf *m, *m1;
1170 	struct ifp_handle_sav *i;
1171 	struct epoch_tracker et;
1172 
1173 	if (sav->accel_ifname != NULL)
1174 		return (key_setaccelif(sav->accel_ifname));
1175 
1176 	m = m1 = NULL;
1177 
1178 	NET_EPOCH_ENTER(et);
1179 	CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1180 		if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
1181 		    IFP_HS_HANDLED) {
1182 			m1 = key_setaccelif(if_name(i->ifp));
1183 			if (m == NULL)
1184 				m = m1;
1185 			else if (m1 != NULL)
1186 				m_cat(m, m1);
1187 		}
1188 	}
1189 	NET_EPOCH_EXIT(et);
1190 	return (m);
1191 }
1192 
1193 static bool
1194 ipsec_accel_fill_xh_impl(if_t ifp, uint32_t drv_spi, struct xform_history *xh)
1195 {
1196 	struct ifp_handle_sav *i;
1197 
1198 	if (drv_spi < IPSEC_ACCEL_DRV_SPI_MIN ||
1199 	    drv_spi > IPSEC_ACCEL_DRV_SPI_MAX)
1200 		return (false);
1201 
1202 	i = DRVSPI_SA_PCTRIE_LOOKUP(&drv_spi_pctrie, drv_spi);
1203 	if (i == NULL)
1204 		return (false);
1205 	memcpy(xh, &i->xfh, sizeof(*xh));
1206 	return (true);
1207 }
1208 
1209 #endif	/* IPSEC_OFFLOAD */
1210