1 /*-
2 * Copyright (c) 2021,2022 NVIDIA CORPORATION & AFFILIATES. ALL RIGHTS RESERVED.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include "opt_inet.h"
27 #include "opt_inet6.h"
28 #include "opt_ipsec.h"
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/ck.h>
33 #include <sys/eventhandler.h>
34 #include <sys/kernel.h>
35 #include <sys/mbuf.h>
36 #include <sys/pctrie.h>
37 #include <sys/proc.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/protosw.h>
41 #include <sys/stdarg.h>
42 #include <sys/taskqueue.h>
43
44 #include <net/if.h>
45 #include <net/if_var.h>
46 #include <net/if_private.h>
47 #include <net/vnet.h>
48 #include <netinet/in.h>
49 #include <netinet/ip.h>
50 #include <netinet/ip_var.h>
51 #include <netinet/ip6.h>
52 #include <netinet6/ip6_var.h>
53 #include <netinet/in_pcb.h>
54 #include <netinet/tcp_var.h>
55
56 #include <netipsec/key.h>
57 #include <netipsec/keydb.h>
58 #include <netipsec/key_debug.h>
59 #include <netipsec/xform.h>
60 #include <netipsec/ipsec.h>
61 #include <netipsec/ipsec_offload.h>
62 #include <netipsec/ah_var.h>
63 #include <netipsec/esp.h>
64 #include <netipsec/esp_var.h>
65 #include <netipsec/ipcomp_var.h>
66
67 #ifdef IPSEC_OFFLOAD
68
69 static struct mtx ipsec_accel_sav_tmp;
70 static struct unrhdr *drv_spi_unr;
71 static struct mtx ipsec_accel_cnt_lock;
72 static struct taskqueue *ipsec_accel_tq;
73
74 struct ipsec_accel_install_newkey_tq {
75 struct secasvar *sav;
76 struct vnet *install_vnet;
77 struct task install_task;
78 };
79
80 struct ipsec_accel_forget_tq {
81 struct vnet *forget_vnet;
82 struct task forget_task;
83 struct secasvar *sav;
84 };
85
86 struct ifp_handle_sav {
87 CK_LIST_ENTRY(ifp_handle_sav) sav_link;
88 CK_LIST_ENTRY(ifp_handle_sav) sav_allh_link;
89 struct secasvar *sav;
90 struct ifnet *ifp;
91 void *ifdata;
92 uint64_t drv_spi;
93 uint32_t flags;
94 size_t hdr_ext_size;
95 uint64_t cnt_octets;
96 uint64_t cnt_allocs;
97 struct xform_history xfh;
98 };
99
100 #define IFP_HS_HANDLED 0x00000001
101 #define IFP_HS_REJECTED 0x00000002
102 #define IFP_HS_MARKER 0x00000010
103
104 static CK_LIST_HEAD(, ifp_handle_sav) ipsec_accel_all_sav_handles;
105
106 struct ifp_handle_sp {
107 CK_LIST_ENTRY(ifp_handle_sp) sp_link;
108 CK_LIST_ENTRY(ifp_handle_sp) sp_allh_link;
109 struct secpolicy *sp;
110 struct ifnet *ifp;
111 void *ifdata;
112 uint32_t flags;
113 };
114
115 #define IFP_HP_HANDLED 0x00000001
116 #define IFP_HP_REJECTED 0x00000002
117 #define IFP_HP_MARKER 0x00000004
118
119 static CK_LIST_HEAD(, ifp_handle_sp) ipsec_accel_all_sp_handles;
120
121 static void *
drvspi_sa_trie_alloc(struct pctrie * ptree)122 drvspi_sa_trie_alloc(struct pctrie *ptree)
123 {
124 void *res;
125
126 res = malloc(pctrie_node_size(), M_IPSEC_MISC, M_ZERO | M_NOWAIT);
127 if (res != NULL)
128 pctrie_zone_init(res, 0, 0);
129 return (res);
130 }
131
132 static void
drvspi_sa_trie_free(struct pctrie * ptree,void * node)133 drvspi_sa_trie_free(struct pctrie *ptree, void *node)
134 {
135 free(node, M_IPSEC_MISC);
136 }
137
138 PCTRIE_DEFINE(DRVSPI_SA, ifp_handle_sav, drv_spi,
139 drvspi_sa_trie_alloc, drvspi_sa_trie_free);
140 static struct pctrie drv_spi_pctrie;
141
142 static eventhandler_tag ipsec_accel_ifdetach_event_tag;
143
144 static void ipsec_accel_sa_newkey_impl(struct secasvar *sav);
145 static int ipsec_accel_handle_sav(struct secasvar *sav, struct ifnet *ifp,
146 u_int drv_spi, void *priv, uint32_t flags, struct ifp_handle_sav **ires);
147 static void ipsec_accel_forget_sav_clear(struct secasvar *sav);
148 static struct ifp_handle_sav *ipsec_accel_is_accel_sav_ptr(struct secasvar *sav,
149 struct ifnet *ifp);
150 static int ipsec_accel_sa_lifetime_op_impl(struct secasvar *sav,
151 struct seclifetime *lft_c, if_t ifp, enum IF_SA_CNT_WHICH op,
152 struct rm_priotracker *sahtree_trackerp);
153 static void ipsec_accel_sa_recordxfer(struct secasvar *sav, struct mbuf *m);
154 static void ipsec_accel_sync_imp(void);
155 static bool ipsec_accel_is_accel_sav_impl(struct secasvar *sav);
156 static struct mbuf *ipsec_accel_key_setaccelif_impl(struct secasvar *sav);
157 static void ipsec_accel_on_ifdown_impl(struct ifnet *ifp);
158 static void ipsec_accel_drv_sa_lifetime_update_impl(struct secasvar *sav,
159 if_t ifp, u_int drv_spi, uint64_t octets, uint64_t allocs);
160 static int ipsec_accel_drv_sa_lifetime_fetch_impl(struct secasvar *sav,
161 if_t ifp, u_int drv_spi, uint64_t *octets, uint64_t *allocs);
162 static void ipsec_accel_ifdetach_event(void *arg, struct ifnet *ifp);
163 static bool ipsec_accel_fill_xh_impl(if_t ifp, uint32_t drv_spi,
164 struct xform_history *xh);
165
166 static void
ipsec_accel_init(void * arg)167 ipsec_accel_init(void *arg)
168 {
169 mtx_init(&ipsec_accel_sav_tmp, "ipasat", MTX_DEF, 0);
170 mtx_init(&ipsec_accel_cnt_lock, "ipascn", MTX_DEF, 0);
171 drv_spi_unr = new_unrhdr(IPSEC_ACCEL_DRV_SPI_MIN,
172 IPSEC_ACCEL_DRV_SPI_MAX, &ipsec_accel_sav_tmp);
173 ipsec_accel_tq = taskqueue_create("ipsec_offload", M_WAITOK,
174 taskqueue_thread_enqueue, &ipsec_accel_tq);
175 (void)taskqueue_start_threads(&ipsec_accel_tq,
176 1 /* Must be single-threaded */, PWAIT,
177 "ipsec_offload");
178 ipsec_accel_sa_newkey_p = ipsec_accel_sa_newkey_impl;
179 ipsec_accel_forget_sav_p = ipsec_accel_forget_sav_impl;
180 ipsec_accel_spdadd_p = ipsec_accel_spdadd_impl;
181 ipsec_accel_spddel_p = ipsec_accel_spddel_impl;
182 ipsec_accel_sa_lifetime_op_p = ipsec_accel_sa_lifetime_op_impl;
183 ipsec_accel_sync_p = ipsec_accel_sync_imp;
184 ipsec_accel_is_accel_sav_p = ipsec_accel_is_accel_sav_impl;
185 ipsec_accel_key_setaccelif_p = ipsec_accel_key_setaccelif_impl;
186 ipsec_accel_on_ifdown_p = ipsec_accel_on_ifdown_impl;
187 ipsec_accel_drv_sa_lifetime_update_p =
188 ipsec_accel_drv_sa_lifetime_update_impl;
189 ipsec_accel_drv_sa_lifetime_fetch_p =
190 ipsec_accel_drv_sa_lifetime_fetch_impl;
191 ipsec_accel_fill_xh_p = ipsec_accel_fill_xh_impl;
192 pctrie_init(&drv_spi_pctrie);
193 ipsec_accel_ifdetach_event_tag = EVENTHANDLER_REGISTER(
194 ifnet_departure_event, ipsec_accel_ifdetach_event, NULL,
195 EVENTHANDLER_PRI_ANY);
196 }
197 SYSINIT(ipsec_accel_init, SI_SUB_VNET_DONE, SI_ORDER_ANY,
198 ipsec_accel_init, NULL);
199
200 static void
ipsec_accel_fini(void * arg)201 ipsec_accel_fini(void *arg)
202 {
203 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
204 ipsec_accel_ifdetach_event_tag);
205 ipsec_accel_sa_newkey_p = NULL;
206 ipsec_accel_forget_sav_p = NULL;
207 ipsec_accel_spdadd_p = NULL;
208 ipsec_accel_spddel_p = NULL;
209 ipsec_accel_sa_lifetime_op_p = NULL;
210 ipsec_accel_sync_p = NULL;
211 ipsec_accel_is_accel_sav_p = NULL;
212 ipsec_accel_key_setaccelif_p = NULL;
213 ipsec_accel_on_ifdown_p = NULL;
214 ipsec_accel_drv_sa_lifetime_update_p = NULL;
215 ipsec_accel_drv_sa_lifetime_fetch_p = NULL;
216 ipsec_accel_fill_xh_p = NULL;
217 ipsec_accel_sync_imp();
218 clean_unrhdr(drv_spi_unr); /* avoid panic, should go later */
219 clear_unrhdr(drv_spi_unr);
220 delete_unrhdr(drv_spi_unr);
221 taskqueue_drain_all(ipsec_accel_tq);
222 taskqueue_free(ipsec_accel_tq);
223 mtx_destroy(&ipsec_accel_sav_tmp);
224 mtx_destroy(&ipsec_accel_cnt_lock);
225 }
226 SYSUNINIT(ipsec_accel_fini, SI_SUB_VNET_DONE, SI_ORDER_ANY,
227 ipsec_accel_fini, NULL);
228
229 SYSCTL_NODE(_net_inet_ipsec, OID_AUTO, offload, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
230 "");
231
232 static bool ipsec_offload_verbose = false;
233 SYSCTL_BOOL(_net_inet_ipsec_offload, OID_AUTO, verbose, CTLFLAG_RW,
234 &ipsec_offload_verbose, 0,
235 "Verbose SA/SP offload install and deinstall");
236
237 static void
dprintf(const char * fmt,...)238 dprintf(const char *fmt, ...)
239 {
240 va_list ap;
241
242 if (!ipsec_offload_verbose)
243 return;
244
245 va_start(ap, fmt);
246 vprintf(fmt, ap);
247 va_end(ap);
248 }
249
250 static void
ipsec_accel_alloc_forget_tq(struct secasvar * sav)251 ipsec_accel_alloc_forget_tq(struct secasvar *sav)
252 {
253 void *ftq;
254
255 if (sav->accel_forget_tq != 0)
256 return;
257
258 ftq = malloc(sizeof(struct ipsec_accel_forget_tq), M_TEMP, M_WAITOK);
259 if (!atomic_cmpset_ptr(&sav->accel_forget_tq, 0, (uintptr_t)ftq))
260 free(ftq, M_TEMP);
261 }
262
263 static bool
ipsec_accel_sa_install_match(if_t ifp,void * arg)264 ipsec_accel_sa_install_match(if_t ifp, void *arg)
265 {
266 if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0)
267 return (false);
268 if (ifp->if_ipsec_accel_m->if_sa_newkey == NULL) {
269 dprintf("driver bug ifp %s if_sa_newkey NULL\n",
270 if_name(ifp));
271 return (false);
272 }
273 return (true);
274 }
275
276 static int
ipsec_accel_sa_newkey_cb(if_t ifp,void * arg)277 ipsec_accel_sa_newkey_cb(if_t ifp, void *arg)
278 {
279 struct ipsec_accel_install_newkey_tq *tq;
280 void *priv;
281 u_int drv_spi;
282 int error;
283
284 tq = arg;
285
286 dprintf("ipsec_accel_sa_newkey_act: ifp %s h %p spi %#x "
287 "flags %#x seq %d\n",
288 if_name(ifp), ifp->if_ipsec_accel_m->if_sa_newkey,
289 be32toh(tq->sav->spi), tq->sav->flags, tq->sav->seq);
290 priv = NULL;
291 drv_spi = alloc_unr(drv_spi_unr);
292 if (tq->sav->accel_ifname != NULL &&
293 strcmp(tq->sav->accel_ifname, if_name(ifp)) != 0) {
294 error = ipsec_accel_handle_sav(tq->sav,
295 ifp, drv_spi, priv, IFP_HS_REJECTED, NULL);
296 goto out;
297 }
298 if (drv_spi == -1) {
299 /* XXXKIB */
300 dprintf("ipsec_accel_sa_install_newkey: cannot alloc "
301 "drv_spi if %s spi %#x\n", if_name(ifp),
302 be32toh(tq->sav->spi));
303 return (ENOMEM);
304 }
305 error = ifp->if_ipsec_accel_m->if_sa_newkey(ifp, tq->sav,
306 drv_spi, &priv);
307 if (error != 0) {
308 if (error == EOPNOTSUPP) {
309 dprintf("ipsec_accel_sa_newkey: driver "
310 "refused sa if %s spi %#x\n",
311 if_name(ifp), be32toh(tq->sav->spi));
312 error = ipsec_accel_handle_sav(tq->sav,
313 ifp, drv_spi, priv, IFP_HS_REJECTED, NULL);
314 /* XXXKIB */
315 } else {
316 dprintf("ipsec_accel_sa_newkey: driver "
317 "error %d if %s spi %#x\n",
318 error, if_name(ifp), be32toh(tq->sav->spi));
319 /* XXXKIB */
320 }
321 } else {
322 error = ipsec_accel_handle_sav(tq->sav, ifp,
323 drv_spi, priv, IFP_HS_HANDLED, NULL);
324 if (error != 0) {
325 /* XXXKIB */
326 dprintf("ipsec_accel_sa_newkey: handle_sav "
327 "err %d if %s spi %#x\n", error,
328 if_name(ifp), be32toh(tq->sav->spi));
329 }
330 }
331 out:
332 return (error);
333 }
334
335 static void
ipsec_accel_sa_newkey_act(void * context,int pending)336 ipsec_accel_sa_newkey_act(void *context, int pending)
337 {
338 struct ipsec_accel_install_newkey_tq *tq;
339 void *tqf;
340 struct secasvar *sav;
341
342 tq = context;
343 tqf = NULL;
344 sav = tq->sav;
345 CURVNET_SET(tq->install_vnet);
346 mtx_lock(&ipsec_accel_sav_tmp);
347 if ((sav->accel_flags & (SADB_KEY_ACCEL_INST |
348 SADB_KEY_ACCEL_DEINST)) == 0 &&
349 sav->state == SADB_SASTATE_MATURE) {
350 sav->accel_flags |= SADB_KEY_ACCEL_INST;
351 mtx_unlock(&ipsec_accel_sav_tmp);
352 if_foreach_sleep(ipsec_accel_sa_install_match, context,
353 ipsec_accel_sa_newkey_cb, context);
354 ipsec_accel_alloc_forget_tq(sav);
355 mtx_lock(&ipsec_accel_sav_tmp);
356
357 /*
358 * If ipsec_accel_forget_sav() raced with us and set
359 * the flag, do its work. Its task cannot execute in
360 * parallel since ipsec_accel taskqueue is single-threaded.
361 */
362 if ((sav->accel_flags & SADB_KEY_ACCEL_DEINST) != 0) {
363 tqf = (void *)sav->accel_forget_tq;
364 sav->accel_forget_tq = 0;
365 ipsec_accel_forget_sav_clear(sav);
366 }
367 }
368 mtx_unlock(&ipsec_accel_sav_tmp);
369 key_freesav(&tq->sav);
370 CURVNET_RESTORE();
371 free(tq, M_TEMP);
372 free(tqf, M_TEMP);
373 }
374
375 static void
ipsec_accel_sa_newkey_impl(struct secasvar * sav)376 ipsec_accel_sa_newkey_impl(struct secasvar *sav)
377 {
378 struct ipsec_accel_install_newkey_tq *tq;
379
380 if ((sav->accel_flags & (SADB_KEY_ACCEL_INST |
381 SADB_KEY_ACCEL_DEINST)) != 0)
382 return;
383
384 dprintf(
385 "ipsec_accel_sa_install_newkey: spi %#x flags %#x seq %d\n",
386 be32toh(sav->spi), sav->flags, sav->seq);
387
388 tq = malloc(sizeof(*tq), M_TEMP, M_NOWAIT);
389 if (tq == NULL) {
390 dprintf("ipsec_accel_sa_install_newkey: no memory for tq, "
391 "spi %#x\n", be32toh(sav->spi));
392 /* XXXKIB */
393 return;
394 }
395
396 refcount_acquire(&sav->refcnt);
397
398 TASK_INIT(&tq->install_task, 0, ipsec_accel_sa_newkey_act, tq);
399 tq->sav = sav;
400 tq->install_vnet = curthread->td_vnet;
401 taskqueue_enqueue(ipsec_accel_tq, &tq->install_task);
402 }
403
404 static int
ipsec_accel_handle_sav(struct secasvar * sav,struct ifnet * ifp,u_int drv_spi,void * priv,uint32_t flags,struct ifp_handle_sav ** ires)405 ipsec_accel_handle_sav(struct secasvar *sav, struct ifnet *ifp,
406 u_int drv_spi, void *priv, uint32_t flags, struct ifp_handle_sav **ires)
407 {
408 struct ifp_handle_sav *ihs, *i;
409 int error;
410
411 MPASS(__bitcount(flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) == 1);
412
413 ihs = malloc(sizeof(*ihs), M_IPSEC_MISC, M_WAITOK | M_ZERO);
414 ihs->ifp = ifp;
415 ihs->sav = sav;
416 ihs->drv_spi = drv_spi;
417 ihs->ifdata = priv;
418 ihs->flags = flags;
419 ihs->hdr_ext_size = esp_hdrsiz(sav);
420 memcpy(&ihs->xfh.dst, &sav->sah->saidx.dst, sizeof(ihs->xfh.dst));
421 ihs->xfh.spi = sav->spi;
422 ihs->xfh.proto = sav->sah->saidx.proto;
423 ihs->xfh.mode = sav->sah->saidx.mode;
424 mtx_lock(&ipsec_accel_sav_tmp);
425 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
426 if (i->ifp == ifp) {
427 error = EALREADY;
428 goto errout;
429 }
430 }
431 error = DRVSPI_SA_PCTRIE_INSERT(&drv_spi_pctrie, ihs);
432 if (error != 0)
433 goto errout;
434 if_ref(ihs->ifp);
435 CK_LIST_INSERT_HEAD(&sav->accel_ifps, ihs, sav_link);
436 CK_LIST_INSERT_HEAD(&ipsec_accel_all_sav_handles, ihs, sav_allh_link);
437 mtx_unlock(&ipsec_accel_sav_tmp);
438 if (ires != NULL)
439 *ires = ihs;
440 return (0);
441 errout:
442 mtx_unlock(&ipsec_accel_sav_tmp);
443 free(ihs, M_IPSEC_MISC);
444 if (ires != NULL)
445 *ires = NULL;
446 return (error);
447 }
448
449 static void
ipsec_accel_forget_handle_sav(struct ifp_handle_sav * i,bool freesav)450 ipsec_accel_forget_handle_sav(struct ifp_handle_sav *i, bool freesav)
451 {
452 struct ifnet *ifp;
453 struct secasvar *sav;
454
455 mtx_assert(&ipsec_accel_sav_tmp, MA_OWNED);
456
457 CK_LIST_REMOVE(i, sav_link);
458 CK_LIST_REMOVE(i, sav_allh_link);
459 DRVSPI_SA_PCTRIE_REMOVE(&drv_spi_pctrie, i->drv_spi);
460 mtx_unlock(&ipsec_accel_sav_tmp);
461 NET_EPOCH_WAIT();
462 ifp = i->ifp;
463 sav = i->sav;
464 if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
465 IFP_HS_HANDLED) {
466 dprintf("sa deinstall %s %p spi %#x ifl %#x\n",
467 if_name(ifp), sav, be32toh(sav->spi), i->flags);
468 ifp->if_ipsec_accel_m->if_sa_deinstall(ifp,
469 i->drv_spi, i->ifdata);
470 }
471 if_rele(ifp);
472 free_unr(drv_spi_unr, i->drv_spi);
473 free(i, M_IPSEC_MISC);
474 if (freesav)
475 key_freesav(&sav);
476 mtx_lock(&ipsec_accel_sav_tmp);
477 }
478
479 static void
ipsec_accel_forget_sav_clear(struct secasvar * sav)480 ipsec_accel_forget_sav_clear(struct secasvar *sav)
481 {
482 struct ifp_handle_sav *i;
483
484 for (;;) {
485 i = CK_LIST_FIRST(&sav->accel_ifps);
486 if (i == NULL)
487 break;
488 ipsec_accel_forget_handle_sav(i, false);
489 }
490 }
491
492 static void
ipsec_accel_forget_sav_act(void * arg,int pending)493 ipsec_accel_forget_sav_act(void *arg, int pending)
494 {
495 struct ipsec_accel_forget_tq *tq;
496 struct secasvar *sav;
497
498 tq = arg;
499 sav = tq->sav;
500 CURVNET_SET(tq->forget_vnet);
501 mtx_lock(&ipsec_accel_sav_tmp);
502 ipsec_accel_forget_sav_clear(sav);
503 mtx_unlock(&ipsec_accel_sav_tmp);
504 key_freesav(&sav);
505 CURVNET_RESTORE();
506 free(tq, M_TEMP);
507 }
508
509 void
ipsec_accel_forget_sav_impl(struct secasvar * sav)510 ipsec_accel_forget_sav_impl(struct secasvar *sav)
511 {
512 struct ipsec_accel_forget_tq *tq;
513
514 mtx_lock(&ipsec_accel_sav_tmp);
515 sav->accel_flags |= SADB_KEY_ACCEL_DEINST;
516 tq = (void *)atomic_load_ptr(&sav->accel_forget_tq);
517 if (tq == NULL || !atomic_cmpset_ptr(&sav->accel_forget_tq,
518 (uintptr_t)tq, 0)) {
519 mtx_unlock(&ipsec_accel_sav_tmp);
520 return;
521 }
522 mtx_unlock(&ipsec_accel_sav_tmp);
523
524 refcount_acquire(&sav->refcnt);
525 TASK_INIT(&tq->forget_task, 0, ipsec_accel_forget_sav_act, tq);
526 tq->forget_vnet = curthread->td_vnet;
527 tq->sav = sav;
528 taskqueue_enqueue(ipsec_accel_tq, &tq->forget_task);
529 }
530
531 static void
ipsec_accel_on_ifdown_sav(struct ifnet * ifp)532 ipsec_accel_on_ifdown_sav(struct ifnet *ifp)
533 {
534 struct ifp_handle_sav *i, *marker;
535
536 marker = malloc(sizeof(*marker), M_IPSEC_MISC, M_WAITOK | M_ZERO);
537 marker->flags = IFP_HS_MARKER;
538
539 mtx_lock(&ipsec_accel_sav_tmp);
540 CK_LIST_INSERT_HEAD(&ipsec_accel_all_sav_handles, marker,
541 sav_allh_link);
542 for (;;) {
543 i = CK_LIST_NEXT(marker, sav_allh_link);
544 if (i == NULL)
545 break;
546 CK_LIST_REMOVE(marker, sav_allh_link);
547 CK_LIST_INSERT_AFTER(i, marker, sav_allh_link);
548 if (i->ifp == ifp) {
549 refcount_acquire(&i->sav->refcnt); /* XXXKIB wrap ? */
550 ipsec_accel_forget_handle_sav(i, true);
551 }
552 }
553 CK_LIST_REMOVE(marker, sav_allh_link);
554 mtx_unlock(&ipsec_accel_sav_tmp);
555 free(marker, M_IPSEC_MISC);
556 }
557
558 static struct ifp_handle_sav *
ipsec_accel_is_accel_sav_ptr_raw(struct secasvar * sav,struct ifnet * ifp)559 ipsec_accel_is_accel_sav_ptr_raw(struct secasvar *sav, struct ifnet *ifp)
560 {
561 struct ifp_handle_sav *i;
562
563 if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0)
564 return (NULL);
565 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
566 if (i->ifp == ifp)
567 return (i);
568 }
569 return (NULL);
570 }
571
572 static struct ifp_handle_sav *
ipsec_accel_is_accel_sav_ptr(struct secasvar * sav,struct ifnet * ifp)573 ipsec_accel_is_accel_sav_ptr(struct secasvar *sav, struct ifnet *ifp)
574 {
575 NET_EPOCH_ASSERT();
576 return (ipsec_accel_is_accel_sav_ptr_raw(sav, ifp));
577 }
578
579 static bool
ipsec_accel_is_accel_sav_impl(struct secasvar * sav)580 ipsec_accel_is_accel_sav_impl(struct secasvar *sav)
581 {
582 return (!CK_LIST_EMPTY(&sav->accel_ifps));
583 }
584
585 static struct secasvar *
ipsec_accel_drvspi_to_sa(u_int drv_spi)586 ipsec_accel_drvspi_to_sa(u_int drv_spi)
587 {
588 struct ifp_handle_sav *i;
589
590 i = DRVSPI_SA_PCTRIE_LOOKUP(&drv_spi_pctrie, drv_spi);
591 if (i == NULL)
592 return (NULL);
593 return (i->sav);
594 }
595
596 static struct ifp_handle_sp *
ipsec_accel_find_accel_sp(struct secpolicy * sp,if_t ifp)597 ipsec_accel_find_accel_sp(struct secpolicy *sp, if_t ifp)
598 {
599 struct ifp_handle_sp *i;
600
601 CK_LIST_FOREACH(i, &sp->accel_ifps, sp_link) {
602 if (i->ifp == ifp)
603 return (i);
604 }
605 return (NULL);
606 }
607
608 static bool
ipsec_accel_is_accel_sp(struct secpolicy * sp,if_t ifp)609 ipsec_accel_is_accel_sp(struct secpolicy *sp, if_t ifp)
610 {
611 return (ipsec_accel_find_accel_sp(sp, ifp) != NULL);
612 }
613
614 static int
ipsec_accel_remember_sp(struct secpolicy * sp,if_t ifp,struct ifp_handle_sp ** ip)615 ipsec_accel_remember_sp(struct secpolicy *sp, if_t ifp,
616 struct ifp_handle_sp **ip)
617 {
618 struct ifp_handle_sp *i;
619
620 i = malloc(sizeof(*i), M_IPSEC_MISC, M_WAITOK | M_ZERO);
621 i->sp = sp;
622 i->ifp = ifp;
623 if_ref(ifp);
624 i->flags = IFP_HP_HANDLED;
625 mtx_lock(&ipsec_accel_sav_tmp);
626 CK_LIST_INSERT_HEAD(&sp->accel_ifps, i, sp_link);
627 CK_LIST_INSERT_HEAD(&ipsec_accel_all_sp_handles, i, sp_allh_link);
628 mtx_unlock(&ipsec_accel_sav_tmp);
629 *ip = i;
630 return (0);
631 }
632
633 static bool
ipsec_accel_spdadd_match(if_t ifp,void * arg)634 ipsec_accel_spdadd_match(if_t ifp, void *arg)
635 {
636 struct secpolicy *sp;
637
638 if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0 ||
639 ifp->if_ipsec_accel_m->if_spdadd == NULL)
640 return (false);
641 sp = arg;
642 if (sp->accel_ifname != NULL &&
643 strcmp(sp->accel_ifname, if_name(ifp)) != 0)
644 return (false);
645 if (ipsec_accel_is_accel_sp(sp, ifp))
646 return (false);
647 return (true);
648 }
649
650 static int
ipsec_accel_spdadd_cb(if_t ifp,void * arg)651 ipsec_accel_spdadd_cb(if_t ifp, void *arg)
652 {
653 struct secpolicy *sp;
654 struct inpcb *inp;
655 struct ifp_handle_sp *i;
656 int error;
657
658 sp = arg;
659 inp = sp->ipsec_accel_add_sp_inp;
660 dprintf("ipsec_accel_spdadd_cb: ifp %s m %p sp %p inp %p\n",
661 if_name(ifp), ifp->if_ipsec_accel_m->if_spdadd, sp, inp);
662 error = ipsec_accel_remember_sp(sp, ifp, &i);
663 if (error != 0) {
664 dprintf("ipsec_accel_spdadd: %s if_spdadd %p remember res %d\n",
665 if_name(ifp), sp, error);
666 return (error);
667 }
668 error = ifp->if_ipsec_accel_m->if_spdadd(ifp, sp, inp, &i->ifdata);
669 if (error != 0) {
670 i->flags |= IFP_HP_REJECTED;
671 dprintf("ipsec_accel_spdadd: %s if_spdadd %p res %d\n",
672 if_name(ifp), sp, error);
673 }
674 return (error);
675 }
676
677 static void
ipsec_accel_spdadd_act(void * arg,int pending)678 ipsec_accel_spdadd_act(void *arg, int pending)
679 {
680 struct secpolicy *sp;
681 struct inpcb *inp;
682
683 sp = arg;
684 CURVNET_SET(sp->accel_add_tq.adddel_vnet);
685 if_foreach_sleep(ipsec_accel_spdadd_match, arg,
686 ipsec_accel_spdadd_cb, arg);
687 inp = sp->ipsec_accel_add_sp_inp;
688 if (inp != NULL) {
689 INP_WLOCK(inp);
690 if (!in_pcbrele_wlocked(inp))
691 INP_WUNLOCK(inp);
692 sp->ipsec_accel_add_sp_inp = NULL;
693 }
694 CURVNET_RESTORE();
695 key_freesp(&sp);
696 }
697
698 void
ipsec_accel_spdadd_impl(struct secpolicy * sp,struct inpcb * inp)699 ipsec_accel_spdadd_impl(struct secpolicy *sp, struct inpcb *inp)
700 {
701 struct ipsec_accel_adddel_sp_tq *tq;
702
703 if (sp == NULL)
704 return;
705 if (sp->tcount == 0 && inp == NULL)
706 return;
707 tq = &sp->accel_add_tq;
708 if (atomic_cmpset_int(&tq->adddel_scheduled, 0, 1) == 0)
709 return;
710 tq->adddel_vnet = curthread->td_vnet;
711 sp->ipsec_accel_add_sp_inp = inp;
712 if (inp != NULL)
713 in_pcbref(inp);
714 TASK_INIT(&tq->adddel_task, 0, ipsec_accel_spdadd_act, sp);
715 key_addref(sp);
716 taskqueue_enqueue(ipsec_accel_tq, &tq->adddel_task);
717 }
718
719 static void
ipsec_accel_spddel_act(void * arg,int pending)720 ipsec_accel_spddel_act(void *arg, int pending)
721 {
722 struct ifp_handle_sp *i;
723 struct secpolicy *sp;
724 int error;
725
726 sp = arg;
727 CURVNET_SET(sp->accel_del_tq.adddel_vnet);
728 mtx_lock(&ipsec_accel_sav_tmp);
729 for (;;) {
730 i = CK_LIST_FIRST(&sp->accel_ifps);
731 if (i == NULL)
732 break;
733 CK_LIST_REMOVE(i, sp_link);
734 CK_LIST_REMOVE(i, sp_allh_link);
735 mtx_unlock(&ipsec_accel_sav_tmp);
736 NET_EPOCH_WAIT();
737 if ((i->flags & (IFP_HP_HANDLED | IFP_HP_REJECTED)) ==
738 IFP_HP_HANDLED) {
739 dprintf("spd deinstall %s %p\n", if_name(i->ifp), sp);
740 error = i->ifp->if_ipsec_accel_m->if_spddel(i->ifp,
741 sp, i->ifdata);
742 if (error != 0) {
743 dprintf(
744 "ipsec_accel_spddel: %s if_spddel %p res %d\n",
745 if_name(i->ifp), sp, error);
746 }
747 }
748 if_rele(i->ifp);
749 free(i, M_IPSEC_MISC);
750 mtx_lock(&ipsec_accel_sav_tmp);
751 }
752 mtx_unlock(&ipsec_accel_sav_tmp);
753 key_freesp(&sp);
754 CURVNET_RESTORE();
755 }
756
757 void
ipsec_accel_spddel_impl(struct secpolicy * sp)758 ipsec_accel_spddel_impl(struct secpolicy *sp)
759 {
760 struct ipsec_accel_adddel_sp_tq *tq;
761
762 if (sp == NULL)
763 return;
764
765 tq = &sp->accel_del_tq;
766 if (atomic_cmpset_int(&tq->adddel_scheduled, 0, 1) == 0)
767 return;
768 tq->adddel_vnet = curthread->td_vnet;
769 TASK_INIT(&tq->adddel_task, 0, ipsec_accel_spddel_act, sp);
770 key_addref(sp);
771 taskqueue_enqueue(ipsec_accel_tq, &tq->adddel_task);
772 }
773
774 static void
ipsec_accel_on_ifdown_sp(struct ifnet * ifp)775 ipsec_accel_on_ifdown_sp(struct ifnet *ifp)
776 {
777 struct ifp_handle_sp *i, *marker;
778 struct secpolicy *sp;
779 int error;
780
781 marker = malloc(sizeof(*marker), M_IPSEC_MISC, M_WAITOK | M_ZERO);
782 marker->flags = IFP_HS_MARKER;
783
784 mtx_lock(&ipsec_accel_sav_tmp);
785 CK_LIST_INSERT_HEAD(&ipsec_accel_all_sp_handles, marker,
786 sp_allh_link);
787 for (;;) {
788 i = CK_LIST_NEXT(marker, sp_allh_link);
789 if (i == NULL)
790 break;
791 CK_LIST_REMOVE(marker, sp_allh_link);
792 CK_LIST_INSERT_AFTER(i, marker, sp_allh_link);
793 if (i->ifp != ifp)
794 continue;
795
796 sp = i->sp;
797 key_addref(sp);
798 CK_LIST_REMOVE(i, sp_link);
799 CK_LIST_REMOVE(i, sp_allh_link);
800 mtx_unlock(&ipsec_accel_sav_tmp);
801 NET_EPOCH_WAIT();
802 if ((i->flags & (IFP_HP_HANDLED | IFP_HP_REJECTED)) ==
803 IFP_HP_HANDLED) {
804 dprintf("spd deinstall %s %p\n", if_name(ifp), sp);
805 error = ifp->if_ipsec_accel_m->if_spddel(ifp,
806 sp, i->ifdata);
807 }
808 if (error != 0) {
809 dprintf(
810 "ipsec_accel_on_ifdown_sp: %s if_spddel %p res %d\n",
811 if_name(ifp), sp, error);
812 }
813 key_freesp(&sp);
814 if_rele(ifp);
815 free(i, M_IPSEC_MISC);
816 mtx_lock(&ipsec_accel_sav_tmp);
817 }
818 CK_LIST_REMOVE(marker, sp_allh_link);
819 mtx_unlock(&ipsec_accel_sav_tmp);
820 free(marker, M_IPSEC_MISC);
821 }
822
823 static void
ipsec_accel_on_ifdown_impl(struct ifnet * ifp)824 ipsec_accel_on_ifdown_impl(struct ifnet *ifp)
825 {
826 ipsec_accel_on_ifdown_sp(ifp);
827 ipsec_accel_on_ifdown_sav(ifp);
828 }
829
830 static void
ipsec_accel_ifdetach_event(void * arg __unused,struct ifnet * ifp)831 ipsec_accel_ifdetach_event(void *arg __unused, struct ifnet *ifp)
832 {
833 if ((ifp->if_flags & IFF_RENAMING) != 0)
834 return;
835 ipsec_accel_on_ifdown_impl(ifp);
836 }
837
838 static bool
ipsec_accel_output_pad(struct mbuf * m,struct secasvar * sav,int skip,int mtu)839 ipsec_accel_output_pad(struct mbuf *m, struct secasvar *sav, int skip, int mtu)
840 {
841 int alen, blks, hlen, padding, rlen;
842
843 rlen = m->m_pkthdr.len - skip;
844 hlen = ((sav->flags & SADB_X_EXT_OLD) != 0 ? sizeof(struct esp) :
845 sizeof(struct newesp)) + sav->ivlen;
846 blks = MAX(4, SAV_ISCTR(sav) && VNET(esp_ctr_compatibility) ?
847 sav->tdb_encalgxform->native_blocksize :
848 sav->tdb_encalgxform->blocksize);
849 padding = ((blks - ((rlen + 2) % blks)) % blks) + 2;
850 alen = xform_ah_authsize(sav->tdb_authalgxform);
851
852 return (skip + hlen + rlen + padding + alen <= mtu);
853 }
854
855 static bool
ipsec_accel_output_tag(struct mbuf * m,u_int drv_spi)856 ipsec_accel_output_tag(struct mbuf *m, u_int drv_spi)
857 {
858 struct ipsec_accel_out_tag *tag;
859
860 tag = (struct ipsec_accel_out_tag *)m_tag_get(
861 PACKET_TAG_IPSEC_ACCEL_OUT, sizeof(*tag), M_NOWAIT);
862 if (tag == NULL)
863 return (false);
864 tag->drv_spi = drv_spi;
865 m_tag_prepend(m, &tag->tag);
866 return (true);
867 }
868
869 bool
ipsec_accel_output(struct ifnet * ifp,struct mbuf * m,struct inpcb * inp,struct secpolicy * sp,struct secasvar * sav,int af,int mtu,int * hwassist)870 ipsec_accel_output(struct ifnet *ifp, struct mbuf *m, struct inpcb *inp,
871 struct secpolicy *sp, struct secasvar *sav, int af, int mtu, int *hwassist)
872 {
873 struct ifp_handle_sav *i;
874 struct ip *ip;
875 struct tcpcb *tp;
876 u_long ip_len, skip;
877 bool res;
878
879 *hwassist = 0;
880 res = false;
881 if (ifp == NULL)
882 return (res);
883
884 M_ASSERTPKTHDR(m);
885 NET_EPOCH_ASSERT();
886
887 if (sav == NULL) {
888 res = ipsec_accel_output_tag(m, IPSEC_ACCEL_DRV_SPI_BYPASS);
889 goto out;
890 }
891
892 i = ipsec_accel_is_accel_sav_ptr(sav, ifp);
893 if (i == NULL || (i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) !=
894 IFP_HS_HANDLED)
895 goto out;
896
897 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
898 ip_len = m->m_pkthdr.len;
899 if (ip_len + i->hdr_ext_size > mtu)
900 goto out;
901 switch (af) {
902 case AF_INET:
903 ip = mtod(m, struct ip *);
904 skip = ip->ip_hl << 2;
905 break;
906 case AF_INET6:
907 skip = sizeof(struct ip6_hdr);
908 break;
909 default:
910 __unreachable();
911 }
912 if (!ipsec_accel_output_pad(m, sav, skip, mtu))
913 goto out;
914 }
915
916 if (!ipsec_accel_output_tag(m, i->drv_spi))
917 goto out;
918
919 ipsec_accel_sa_recordxfer(sav, m);
920 key_freesav(&sav);
921 if (sp != NULL)
922 key_freesp(&sp);
923
924 *hwassist = ifp->if_ipsec_accel_m->if_hwassist(ifp, sav,
925 i->drv_spi, i->ifdata);
926 res = true;
927 out:
928 if (inp != NULL && inp->inp_pcbinfo == &V_tcbinfo) {
929 INP_WLOCK_ASSERT(inp);
930 tp = (struct tcpcb *)inp;
931 if (res && (*hwassist & (CSUM_TSO | CSUM_IP6_TSO)) != 0) {
932 tp->t_flags2 |= TF2_IPSEC_TSO;
933 } else {
934 tp->t_flags2 &= ~TF2_IPSEC_TSO;
935 }
936 }
937 return (res);
938 }
939
940 struct ipsec_accel_in_tag *
ipsec_accel_input_tag_lookup(const struct mbuf * m)941 ipsec_accel_input_tag_lookup(const struct mbuf *m)
942 {
943 struct ipsec_accel_in_tag *tag;
944 struct m_tag *xtag;
945
946 xtag = m_tag_find(__DECONST(struct mbuf *, m),
947 PACKET_TAG_IPSEC_ACCEL_IN, NULL);
948 if (xtag == NULL)
949 return (NULL);
950 tag = __containerof(xtag, struct ipsec_accel_in_tag, tag);
951 return (tag);
952 }
953
954 int
ipsec_accel_input(struct mbuf * m,int offset,int proto)955 ipsec_accel_input(struct mbuf *m, int offset, int proto)
956 {
957 struct secasvar *sav;
958 struct ipsec_accel_in_tag *tag;
959
960 tag = ipsec_accel_input_tag_lookup(m);
961 if (tag == NULL)
962 return (ENXIO);
963
964 if (tag->drv_spi < IPSEC_ACCEL_DRV_SPI_MIN ||
965 tag->drv_spi > IPSEC_ACCEL_DRV_SPI_MAX) {
966 dprintf("if %s mbuf %p drv_spi %d invalid, packet dropped\n",
967 (m->m_flags & M_PKTHDR) != 0 ? if_name(m->m_pkthdr.rcvif) :
968 "<unknwn>", m, tag->drv_spi);
969 m_freem(m);
970 return (EINPROGRESS);
971 }
972
973 sav = ipsec_accel_drvspi_to_sa(tag->drv_spi);
974 if (sav != NULL)
975 ipsec_accel_sa_recordxfer(sav, m);
976 return (0);
977 }
978
979 static void
ipsec_accel_sa_recordxfer(struct secasvar * sav,struct mbuf * m)980 ipsec_accel_sa_recordxfer(struct secasvar *sav, struct mbuf *m)
981 {
982 counter_u64_add(sav->accel_lft_sw, 1);
983 counter_u64_add(sav->accel_lft_sw + 1, m->m_pkthdr.len);
984 if (sav->accel_firstused == 0)
985 sav->accel_firstused = time_second;
986 }
987
988 static void
ipsec_accel_sa_lifetime_update(struct seclifetime * lft_c,const struct seclifetime * lft_l)989 ipsec_accel_sa_lifetime_update(struct seclifetime *lft_c,
990 const struct seclifetime *lft_l)
991 {
992 lft_c->allocations += lft_l->allocations;
993 lft_c->bytes += lft_l->bytes;
994 lft_c->usetime = min(lft_c->usetime, lft_l->usetime);
995 }
996
997 static void
ipsec_accel_drv_sa_lifetime_update_impl(struct secasvar * sav,if_t ifp,u_int drv_spi,uint64_t octets,uint64_t allocs)998 ipsec_accel_drv_sa_lifetime_update_impl(struct secasvar *sav, if_t ifp,
999 u_int drv_spi, uint64_t octets, uint64_t allocs)
1000 {
1001 struct epoch_tracker et;
1002 struct ifp_handle_sav *i;
1003 uint64_t odiff, adiff;
1004
1005 NET_EPOCH_ENTER(et);
1006 mtx_lock(&ipsec_accel_cnt_lock);
1007
1008 if (allocs != 0) {
1009 if (sav->firstused == 0)
1010 sav->firstused = time_second;
1011 if (sav->accel_firstused == 0)
1012 sav->accel_firstused = time_second;
1013 }
1014
1015 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1016 if (i->ifp == ifp && i->drv_spi == drv_spi)
1017 break;
1018 }
1019 if (i == NULL)
1020 goto out;
1021
1022 odiff = octets - i->cnt_octets;
1023 adiff = allocs - i->cnt_allocs;
1024
1025 if (sav->lft_c != NULL) {
1026 counter_u64_add(sav->lft_c_bytes, odiff);
1027 counter_u64_add(sav->lft_c_allocations, adiff);
1028 }
1029
1030 i->cnt_octets = octets;
1031 i->cnt_allocs = allocs;
1032 sav->accel_hw_octets += odiff;
1033 sav->accel_hw_allocs += adiff;
1034
1035 out:
1036 mtx_unlock(&ipsec_accel_cnt_lock);
1037 NET_EPOCH_EXIT(et);
1038 }
1039
1040 static int
ipsec_accel_drv_sa_lifetime_fetch_impl(struct secasvar * sav,if_t ifp,u_int drv_spi,uint64_t * octets,uint64_t * allocs)1041 ipsec_accel_drv_sa_lifetime_fetch_impl(struct secasvar *sav,
1042 if_t ifp, u_int drv_spi, uint64_t *octets, uint64_t *allocs)
1043 {
1044 struct ifp_handle_sav *i;
1045 int error;
1046
1047 NET_EPOCH_ASSERT();
1048 error = 0;
1049
1050 mtx_lock(&ipsec_accel_cnt_lock);
1051 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1052 if (i->ifp == ifp && i->drv_spi == drv_spi) {
1053 *octets = i->cnt_octets;
1054 *allocs = i->cnt_allocs;
1055 break;
1056 }
1057 }
1058 if (i == NULL)
1059 error = ENOENT;
1060 mtx_unlock(&ipsec_accel_cnt_lock);
1061 return (error);
1062 }
1063
1064 static void
ipsec_accel_sa_lifetime_hw(struct secasvar * sav,if_t ifp,struct seclifetime * lft)1065 ipsec_accel_sa_lifetime_hw(struct secasvar *sav, if_t ifp,
1066 struct seclifetime *lft)
1067 {
1068 struct ifp_handle_sav *i;
1069 if_sa_cnt_fn_t p;
1070
1071 IFNET_RLOCK_ASSERT();
1072
1073 i = ipsec_accel_is_accel_sav_ptr(sav, ifp);
1074 if (i != NULL && (i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
1075 IFP_HS_HANDLED) {
1076 p = ifp->if_ipsec_accel_m->if_sa_cnt;
1077 if (p != NULL)
1078 p(ifp, sav, i->drv_spi, i->ifdata, lft);
1079 }
1080 }
1081
1082 static int
ipsec_accel_sa_lifetime_op_impl(struct secasvar * sav,struct seclifetime * lft_c,if_t ifp,enum IF_SA_CNT_WHICH op,struct rm_priotracker * sahtree_trackerp)1083 ipsec_accel_sa_lifetime_op_impl(struct secasvar *sav,
1084 struct seclifetime *lft_c, if_t ifp, enum IF_SA_CNT_WHICH op,
1085 struct rm_priotracker *sahtree_trackerp)
1086 {
1087 struct seclifetime lft_l, lft_s;
1088 struct ifp_handle_sav *i;
1089 if_t ifp1;
1090 if_sa_cnt_fn_t p;
1091 int error;
1092
1093 error = 0;
1094 memset(&lft_l, 0, sizeof(lft_l));
1095 memset(&lft_s, 0, sizeof(lft_s));
1096
1097 switch (op & ~IF_SA_CNT_UPD) {
1098 case IF_SA_CNT_IFP_HW_VAL:
1099 ipsec_accel_sa_lifetime_hw(sav, ifp, &lft_l);
1100 ipsec_accel_sa_lifetime_update(&lft_l, &lft_s);
1101 break;
1102
1103 case IF_SA_CNT_TOTAL_SW_VAL:
1104 lft_l.allocations = (uint32_t)counter_u64_fetch(
1105 sav->accel_lft_sw);
1106 lft_l.bytes = counter_u64_fetch(sav->accel_lft_sw + 1);
1107 lft_l.usetime = sav->accel_firstused;
1108 break;
1109
1110 case IF_SA_CNT_TOTAL_HW_VAL:
1111 IFNET_RLOCK_ASSERT();
1112 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1113 if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) !=
1114 IFP_HS_HANDLED)
1115 continue;
1116 ifp1 = i->ifp;
1117 p = ifp1->if_ipsec_accel_m->if_sa_cnt;
1118 if (p == NULL)
1119 continue;
1120 memset(&lft_s, 0, sizeof(lft_s));
1121 if (sahtree_trackerp != NULL)
1122 ipsec_sahtree_runlock(sahtree_trackerp);
1123 error = p(ifp1, sav, i->drv_spi, i->ifdata, &lft_s);
1124 if (sahtree_trackerp != NULL)
1125 ipsec_sahtree_rlock(sahtree_trackerp);
1126 if (error == 0)
1127 ipsec_accel_sa_lifetime_update(&lft_l, &lft_s);
1128 }
1129 break;
1130 }
1131
1132 if (error == 0) {
1133 if ((op & IF_SA_CNT_UPD) == 0)
1134 memset(lft_c, 0, sizeof(*lft_c));
1135 ipsec_accel_sa_lifetime_update(lft_c, &lft_l);
1136 }
1137
1138 return (error);
1139 }
1140
1141 static void
ipsec_accel_sync_imp(void)1142 ipsec_accel_sync_imp(void)
1143 {
1144 taskqueue_drain_all(ipsec_accel_tq);
1145 }
1146
1147 static struct mbuf *
ipsec_accel_key_setaccelif_impl(struct secasvar * sav)1148 ipsec_accel_key_setaccelif_impl(struct secasvar *sav)
1149 {
1150 struct mbuf *m, *m1;
1151 struct ifp_handle_sav *i;
1152 struct epoch_tracker et;
1153
1154 if (sav->accel_ifname != NULL)
1155 return (key_setaccelif(sav->accel_ifname));
1156
1157 m = m1 = NULL;
1158
1159 NET_EPOCH_ENTER(et);
1160 CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1161 if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
1162 IFP_HS_HANDLED) {
1163 m1 = key_setaccelif(if_name(i->ifp));
1164 if (m == NULL)
1165 m = m1;
1166 else if (m1 != NULL)
1167 m_cat(m, m1);
1168 }
1169 }
1170 NET_EPOCH_EXIT(et);
1171 return (m);
1172 }
1173
1174 static bool
ipsec_accel_fill_xh_impl(if_t ifp,uint32_t drv_spi,struct xform_history * xh)1175 ipsec_accel_fill_xh_impl(if_t ifp, uint32_t drv_spi, struct xform_history *xh)
1176 {
1177 struct ifp_handle_sav *i;
1178
1179 if (drv_spi < IPSEC_ACCEL_DRV_SPI_MIN ||
1180 drv_spi > IPSEC_ACCEL_DRV_SPI_MAX)
1181 return (false);
1182
1183 i = DRVSPI_SA_PCTRIE_LOOKUP(&drv_spi_pctrie, drv_spi);
1184 if (i == NULL)
1185 return (false);
1186 memcpy(xh, &i->xfh, sizeof(*xh));
1187 return (true);
1188 }
1189
1190 #endif /* IPSEC_OFFLOAD */
1191