1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 #include <sys/param.h>
26 #include <sys/types.h>
27 #include <sys/stream.h>
28 #include <sys/strsubr.h>
29 #include <sys/strsun.h>
30 #include <sys/stropts.h>
31 #include <sys/zone.h>
32 #include <sys/vnode.h>
33 #include <sys/sysmacros.h>
34 #define _SUN_TPI_VERSION 2
35 #include <sys/tihdr.h>
36 #include <sys/ddi.h>
37 #include <sys/sunddi.h>
38 #include <sys/mkdev.h>
39 #include <sys/debug.h>
40 #include <sys/kmem.h>
41 #include <sys/cmn_err.h>
42 #include <sys/suntpi.h>
43 #include <sys/policy.h>
44 #include <sys/dls.h>
45
46 #include <sys/socket.h>
47 #include <netinet/in.h>
48 #include <net/pfkeyv2.h>
49 #include <net/pfpolicy.h>
50
51 #include <inet/common.h>
52 #include <netinet/ip6.h>
53 #include <inet/ip.h>
54 #include <inet/ip6.h>
55 #include <inet/mi.h>
56 #include <inet/proto_set.h>
57 #include <inet/nd.h>
58 #include <inet/ip_if.h>
59 #include <inet/optcom.h>
60 #include <inet/ipsec_impl.h>
61 #include <inet/spdsock.h>
62 #include <inet/sadb.h>
63 #include <inet/iptun.h>
64 #include <inet/iptun/iptun_impl.h>
65
66 #include <sys/isa_defs.h>
67
68 #include <c2/audit.h>
69
70 /*
71 * This is a transport provider for the PF_POLICY IPsec policy
72 * management socket, which provides a management interface into the
73 * SPD, allowing policy rules to be added, deleted, and queried.
74 *
75 * This effectively replaces the old private SIOC*IPSECONFIG ioctls
76 * with an extensible interface which will hopefully be public some
77 * day.
78 *
79 * See <net/pfpolicy.h> for more details on the protocol.
80 *
81 * We link against drv/ip and call directly into it to manipulate the
82 * SPD; see ipsec_impl.h for the policy data structures and spd.c for
83 * the code which maintains them.
84 *
85 * The MT model of this is QPAIR with the addition of some explicit
86 * locking to protect system-wide policy data structures.
87 */
88
89 static vmem_t *spdsock_vmem; /* for minor numbers. */
90
91 #define ALIGNED64(x) IS_P2ALIGNED((x), sizeof (uint64_t))
92
93 /* Default structure copied into T_INFO_ACK messages (from rts.c...) */
94 static struct T_info_ack spdsock_g_t_info_ack = {
95 T_INFO_ACK,
96 T_INFINITE, /* TSDU_size. Maximum size messages. */
97 T_INVALID, /* ETSDU_size. No expedited data. */
98 T_INVALID, /* CDATA_size. No connect data. */
99 T_INVALID, /* DDATA_size. No disconnect data. */
100 0, /* ADDR_size. */
101 0, /* OPT_size. No user-settable options */
102 64 * 1024, /* TIDU_size. spdsock allows maximum size messages. */
103 T_COTS, /* SERV_type. spdsock supports connection oriented. */
104 TS_UNBND, /* CURRENT_state. This is set from spdsock_state. */
105 (XPG4_1) /* Provider flags */
106 };
107
108 /* Named Dispatch Parameter Management Structure */
109 typedef struct spdsockparam_s {
110 uint_t spdsock_param_min;
111 uint_t spdsock_param_max;
112 uint_t spdsock_param_value;
113 char *spdsock_param_name;
114 } spdsockparam_t;
115
116 /*
117 * Table of NDD variables supported by spdsock. These are loaded into
118 * spdsock_g_nd in spdsock_init_nd.
119 * All of these are alterable, within the min/max values given, at run time.
120 */
121 static spdsockparam_t lcl_param_arr[] = {
122 /* min max value name */
123 { 4096, 65536, 8192, "spdsock_xmit_hiwat"},
124 { 0, 65536, 1024, "spdsock_xmit_lowat"},
125 { 4096, 65536, 8192, "spdsock_recv_hiwat"},
126 { 65536, 1024*1024*1024, 256*1024, "spdsock_max_buf"},
127 { 0, 3, 0, "spdsock_debug"},
128 };
129 #define spds_xmit_hiwat spds_params[0].spdsock_param_value
130 #define spds_xmit_lowat spds_params[1].spdsock_param_value
131 #define spds_recv_hiwat spds_params[2].spdsock_param_value
132 #define spds_max_buf spds_params[3].spdsock_param_value
133 #define spds_debug spds_params[4].spdsock_param_value
134
135 #define ss0dbg(a) printf a
136 /* NOTE: != 0 instead of > 0 so lint doesn't complain. */
137 #define ss1dbg(spds, a) if (spds->spds_debug != 0) printf a
138 #define ss2dbg(spds, a) if (spds->spds_debug > 1) printf a
139 #define ss3dbg(spds, a) if (spds->spds_debug > 2) printf a
140
141 #define RESET_SPDSOCK_DUMP_POLHEAD(ss, iph) { \
142 ASSERT(RW_READ_HELD(&(iph)->iph_lock)); \
143 (ss)->spdsock_dump_head = (iph); \
144 (ss)->spdsock_dump_gen = (iph)->iph_gen; \
145 (ss)->spdsock_dump_cur_type = 0; \
146 (ss)->spdsock_dump_cur_af = IPSEC_AF_V4; \
147 (ss)->spdsock_dump_cur_rule = NULL; \
148 (ss)->spdsock_dump_count = 0; \
149 (ss)->spdsock_dump_cur_chain = 0; \
150 }
151
152 static int spdsock_close(queue_t *);
153 static int spdsock_open(queue_t *, dev_t *, int, int, cred_t *);
154 static void spdsock_wput(queue_t *, mblk_t *);
155 static void spdsock_wsrv(queue_t *);
156 static void spdsock_rsrv(queue_t *);
157 static void *spdsock_stack_init(netstackid_t stackid, netstack_t *ns);
158 static void spdsock_stack_shutdown(netstackid_t stackid, void *arg);
159 static void spdsock_stack_fini(netstackid_t stackid, void *arg);
160 static void spdsock_loadcheck(void *);
161 static void spdsock_merge_algs(spd_stack_t *);
162 static void spdsock_flush_one(ipsec_policy_head_t *, netstack_t *);
163 static mblk_t *spdsock_dump_next_record(spdsock_t *);
164 static void update_iptun_policy(ipsec_tun_pol_t *);
165
166 static struct module_info info = {
167 5138, "spdsock", 1, INFPSZ, 512, 128
168 };
169
170 static struct qinit rinit = {
171 NULL, (pfi_t)spdsock_rsrv, spdsock_open, spdsock_close,
172 NULL, &info
173 };
174
175 static struct qinit winit = {
176 (pfi_t)spdsock_wput, (pfi_t)spdsock_wsrv, NULL, NULL, NULL, &info
177 };
178
179 struct streamtab spdsockinfo = {
180 &rinit, &winit
181 };
182
183 /* mapping from alg type to protocol number, as per RFC 2407 */
184 static const uint_t algproto[] = {
185 PROTO_IPSEC_AH,
186 PROTO_IPSEC_ESP,
187 };
188
189 #define NALGPROTOS (sizeof (algproto) / sizeof (algproto[0]))
190
191 /* mapping from kernel exec mode to spdsock exec mode */
192 static const uint_t execmodes[] = {
193 SPD_ALG_EXEC_MODE_SYNC,
194 SPD_ALG_EXEC_MODE_ASYNC
195 };
196
197 #define NEXECMODES (sizeof (execmodes) / sizeof (execmodes[0]))
198
199 #define ALL_ACTIVE_POLHEADS ((ipsec_policy_head_t *)-1)
200 #define ALL_INACTIVE_POLHEADS ((ipsec_policy_head_t *)-2)
201
202 #define ITP_NAME(itp) (itp != NULL ? itp->itp_name : NULL)
203
204 /* ARGSUSED */
205 static int
spdsock_param_get(q,mp,cp,cr)206 spdsock_param_get(q, mp, cp, cr)
207 queue_t *q;
208 mblk_t *mp;
209 caddr_t cp;
210 cred_t *cr;
211 {
212 spdsockparam_t *spdsockpa = (spdsockparam_t *)cp;
213 uint_t value;
214 spdsock_t *ss = (spdsock_t *)q->q_ptr;
215 spd_stack_t *spds = ss->spdsock_spds;
216
217 mutex_enter(&spds->spds_param_lock);
218 value = spdsockpa->spdsock_param_value;
219 mutex_exit(&spds->spds_param_lock);
220
221 (void) mi_mpprintf(mp, "%u", value);
222 return (0);
223 }
224
225 /* This routine sets an NDD variable in a spdsockparam_t structure. */
226 /* ARGSUSED */
227 static int
spdsock_param_set(q,mp,value,cp,cr)228 spdsock_param_set(q, mp, value, cp, cr)
229 queue_t *q;
230 mblk_t *mp;
231 char *value;
232 caddr_t cp;
233 cred_t *cr;
234 {
235 ulong_t new_value;
236 spdsockparam_t *spdsockpa = (spdsockparam_t *)cp;
237 spdsock_t *ss = (spdsock_t *)q->q_ptr;
238 spd_stack_t *spds = ss->spdsock_spds;
239
240 /* Convert the value from a string into a long integer. */
241 if (ddi_strtoul(value, NULL, 10, &new_value) != 0)
242 return (EINVAL);
243
244 mutex_enter(&spds->spds_param_lock);
245 /*
246 * Fail the request if the new value does not lie within the
247 * required bounds.
248 */
249 if (new_value < spdsockpa->spdsock_param_min ||
250 new_value > spdsockpa->spdsock_param_max) {
251 mutex_exit(&spds->spds_param_lock);
252 return (EINVAL);
253 }
254
255 /* Set the new value */
256 spdsockpa->spdsock_param_value = new_value;
257 mutex_exit(&spds->spds_param_lock);
258
259 return (0);
260 }
261
262 /*
263 * Initialize at module load time
264 */
265 boolean_t
spdsock_ddi_init(void)266 spdsock_ddi_init(void)
267 {
268 spdsock_max_optsize = optcom_max_optsize(
269 spdsock_opt_obj.odb_opt_des_arr, spdsock_opt_obj.odb_opt_arr_cnt);
270
271 spdsock_vmem = vmem_create("spdsock", (void *)1, MAXMIN, 1,
272 NULL, NULL, NULL, 1, VM_SLEEP | VMC_IDENTIFIER);
273
274 /*
275 * We want to be informed each time a stack is created or
276 * destroyed in the kernel, so we can maintain the
277 * set of spd_stack_t's.
278 */
279 netstack_register(NS_SPDSOCK, spdsock_stack_init,
280 spdsock_stack_shutdown, spdsock_stack_fini);
281
282 return (B_TRUE);
283 }
284
285 /*
286 * Walk through the param array specified registering each element with the
287 * named dispatch handler.
288 */
289 static boolean_t
spdsock_param_register(IDP * ndp,spdsockparam_t * ssp,int cnt)290 spdsock_param_register(IDP *ndp, spdsockparam_t *ssp, int cnt)
291 {
292 for (; cnt-- > 0; ssp++) {
293 if (ssp->spdsock_param_name != NULL &&
294 ssp->spdsock_param_name[0]) {
295 if (!nd_load(ndp,
296 ssp->spdsock_param_name,
297 spdsock_param_get, spdsock_param_set,
298 (caddr_t)ssp)) {
299 nd_free(ndp);
300 return (B_FALSE);
301 }
302 }
303 }
304 return (B_TRUE);
305 }
306
307 /*
308 * Initialize for each stack instance
309 */
310 /* ARGSUSED */
311 static void *
spdsock_stack_init(netstackid_t stackid,netstack_t * ns)312 spdsock_stack_init(netstackid_t stackid, netstack_t *ns)
313 {
314 spd_stack_t *spds;
315 spdsockparam_t *ssp;
316
317 spds = (spd_stack_t *)kmem_zalloc(sizeof (*spds), KM_SLEEP);
318 spds->spds_netstack = ns;
319
320 ASSERT(spds->spds_g_nd == NULL);
321
322 ssp = (spdsockparam_t *)kmem_alloc(sizeof (lcl_param_arr), KM_SLEEP);
323 spds->spds_params = ssp;
324 bcopy(lcl_param_arr, ssp, sizeof (lcl_param_arr));
325
326 (void) spdsock_param_register(&spds->spds_g_nd, ssp,
327 A_CNT(lcl_param_arr));
328
329 mutex_init(&spds->spds_param_lock, NULL, MUTEX_DEFAULT, NULL);
330 mutex_init(&spds->spds_alg_lock, NULL, MUTEX_DEFAULT, NULL);
331
332 return (spds);
333 }
334
335 void
spdsock_ddi_destroy(void)336 spdsock_ddi_destroy(void)
337 {
338 vmem_destroy(spdsock_vmem);
339
340 netstack_unregister(NS_SPDSOCK);
341 }
342
343 /*
344 * Do pre-removal cleanup.
345 */
346 /* ARGSUSED */
347 static void
spdsock_stack_shutdown(netstackid_t stackid,void * arg)348 spdsock_stack_shutdown(netstackid_t stackid, void *arg)
349 {
350 spd_stack_t *spds = (spd_stack_t *)arg;
351
352 if (spds->spds_mp_algs != NULL) {
353 freemsg(spds->spds_mp_algs);
354 spds->spds_mp_algs = NULL;
355 }
356 }
357
358 /* ARGSUSED */
359 static void
spdsock_stack_fini(netstackid_t stackid,void * arg)360 spdsock_stack_fini(netstackid_t stackid, void *arg)
361 {
362 spd_stack_t *spds = (spd_stack_t *)arg;
363
364 ASSERT(spds->spds_mp_algs == NULL);
365 mutex_destroy(&spds->spds_param_lock);
366 mutex_destroy(&spds->spds_alg_lock);
367 nd_free(&spds->spds_g_nd);
368 kmem_free(spds->spds_params, sizeof (lcl_param_arr));
369 spds->spds_params = NULL;
370
371 kmem_free(spds, sizeof (*spds));
372 }
373
374 /*
375 * NOTE: large quantities of this should be shared with keysock.
376 * Would be nice to combine some of this into a common module, but
377 * not possible given time pressures.
378 */
379
380 /*
381 * High-level reality checking of extensions.
382 */
383 /* ARGSUSED */ /* XXX */
384 static boolean_t
ext_check(spd_ext_t * ext)385 ext_check(spd_ext_t *ext)
386 {
387 spd_if_t *tunname = (spd_if_t *)ext;
388 int i;
389 char *idstr;
390
391 if (ext->spd_ext_type == SPD_EXT_TUN_NAME) {
392 /* (NOTE: Modified from SADB_EXT_IDENTITY..) */
393
394 /*
395 * Make sure the strings in these identities are
396 * null-terminated. Let's "proactively" null-terminate the
397 * string at the last byte if it's not terminated sooner.
398 */
399 i = SPD_64TO8(tunname->spd_if_len) - sizeof (spd_if_t);
400 idstr = (char *)(tunname + 1);
401 while (*idstr != '\0' && i > 0) {
402 i--;
403 idstr++;
404 }
405 if (i == 0) {
406 /*
407 * I.e., if the bozo user didn't NULL-terminate the
408 * string...
409 */
410 idstr--;
411 *idstr = '\0';
412 }
413 }
414 return (B_TRUE); /* For now... */
415 }
416
417
418
419 /* Return values for spdsock_get_ext(). */
420 #define KGE_OK 0
421 #define KGE_DUP 1
422 #define KGE_UNK 2
423 #define KGE_LEN 3
424 #define KGE_CHK 4
425
426 /*
427 * Parse basic extension headers and return in the passed-in pointer vector.
428 * Return values include:
429 *
430 * KGE_OK Everything's nice and parsed out.
431 * If there are no extensions, place NULL in extv[0].
432 * KGE_DUP There is a duplicate extension.
433 * First instance in appropriate bin. First duplicate in
434 * extv[0].
435 * KGE_UNK Unknown extension type encountered. extv[0] contains
436 * unknown header.
437 * KGE_LEN Extension length error.
438 * KGE_CHK High-level reality check failed on specific extension.
439 *
440 * My apologies for some of the pointer arithmetic in here. I'm thinking
441 * like an assembly programmer, yet trying to make the compiler happy.
442 */
443 static int
spdsock_get_ext(spd_ext_t * extv[],spd_msg_t * basehdr,uint_t msgsize)444 spdsock_get_ext(spd_ext_t *extv[], spd_msg_t *basehdr, uint_t msgsize)
445 {
446 bzero(extv, sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1));
447
448 /* Use extv[0] as the "current working pointer". */
449
450 extv[0] = (spd_ext_t *)(basehdr + 1);
451
452 while (extv[0] < (spd_ext_t *)(((uint8_t *)basehdr) + msgsize)) {
453 /* Check for unknown headers. */
454 if (extv[0]->spd_ext_type == 0 ||
455 extv[0]->spd_ext_type > SPD_EXT_MAX)
456 return (KGE_UNK);
457
458 /*
459 * Check length. Use uint64_t because extlen is in units
460 * of 64-bit words. If length goes beyond the msgsize,
461 * return an error. (Zero length also qualifies here.)
462 */
463 if (extv[0]->spd_ext_len == 0 ||
464 (void *)((uint64_t *)extv[0] + extv[0]->spd_ext_len) >
465 (void *)((uint8_t *)basehdr + msgsize))
466 return (KGE_LEN);
467
468 /* Check for redundant headers. */
469 if (extv[extv[0]->spd_ext_type] != NULL)
470 return (KGE_DUP);
471
472 /*
473 * Reality check the extension if possible at the spdsock
474 * level.
475 */
476 if (!ext_check(extv[0]))
477 return (KGE_CHK);
478
479 /* If I make it here, assign the appropriate bin. */
480 extv[extv[0]->spd_ext_type] = extv[0];
481
482 /* Advance pointer (See above for uint64_t ptr reasoning.) */
483 extv[0] = (spd_ext_t *)
484 ((uint64_t *)extv[0] + extv[0]->spd_ext_len);
485 }
486
487 /* Everything's cool. */
488
489 /*
490 * If extv[0] == NULL, then there are no extension headers in this
491 * message. Ensure that this is the case.
492 */
493 if (extv[0] == (spd_ext_t *)(basehdr + 1))
494 extv[0] = NULL;
495
496 return (KGE_OK);
497 }
498
499 static const int bad_ext_diag[] = {
500 SPD_DIAGNOSTIC_MALFORMED_LCLPORT,
501 SPD_DIAGNOSTIC_MALFORMED_REMPORT,
502 SPD_DIAGNOSTIC_MALFORMED_PROTO,
503 SPD_DIAGNOSTIC_MALFORMED_LCLADDR,
504 SPD_DIAGNOSTIC_MALFORMED_REMADDR,
505 SPD_DIAGNOSTIC_MALFORMED_ACTION,
506 SPD_DIAGNOSTIC_MALFORMED_RULE,
507 SPD_DIAGNOSTIC_MALFORMED_RULESET,
508 SPD_DIAGNOSTIC_MALFORMED_ICMP_TYPECODE
509 };
510
511 static const int dup_ext_diag[] = {
512 SPD_DIAGNOSTIC_DUPLICATE_LCLPORT,
513 SPD_DIAGNOSTIC_DUPLICATE_REMPORT,
514 SPD_DIAGNOSTIC_DUPLICATE_PROTO,
515 SPD_DIAGNOSTIC_DUPLICATE_LCLADDR,
516 SPD_DIAGNOSTIC_DUPLICATE_REMADDR,
517 SPD_DIAGNOSTIC_DUPLICATE_ACTION,
518 SPD_DIAGNOSTIC_DUPLICATE_RULE,
519 SPD_DIAGNOSTIC_DUPLICATE_RULESET,
520 SPD_DIAGNOSTIC_DUPLICATE_ICMP_TYPECODE
521 };
522
523 /*
524 * Transmit a PF_POLICY error message to the instance either pointed to
525 * by ks, the instance with serial number serial, or more, depending.
526 *
527 * The faulty message (or a reasonable facsimile thereof) is in mp.
528 * This function will free mp or recycle it for delivery, thereby causing
529 * the stream head to free it.
530 */
531 static void
spdsock_error(queue_t * q,mblk_t * mp,int error,int diagnostic)532 spdsock_error(queue_t *q, mblk_t *mp, int error, int diagnostic)
533 {
534 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
535
536 ASSERT(mp->b_datap->db_type == M_DATA);
537
538 if (spmsg->spd_msg_type < SPD_MIN ||
539 spmsg->spd_msg_type > SPD_MAX)
540 spmsg->spd_msg_type = SPD_RESERVED;
541
542 /*
543 * Strip out extension headers.
544 */
545 ASSERT(mp->b_rptr + sizeof (*spmsg) <= mp->b_datap->db_lim);
546 mp->b_wptr = mp->b_rptr + sizeof (*spmsg);
547 spmsg->spd_msg_len = SPD_8TO64(sizeof (spd_msg_t));
548 spmsg->spd_msg_errno = (uint8_t)error;
549 spmsg->spd_msg_diagnostic = (uint16_t)diagnostic;
550
551 qreply(q, mp);
552 }
553
554 static void
spdsock_diag(queue_t * q,mblk_t * mp,int diagnostic)555 spdsock_diag(queue_t *q, mblk_t *mp, int diagnostic)
556 {
557 spdsock_error(q, mp, EINVAL, diagnostic);
558 }
559
560 static void
spd_echo(queue_t * q,mblk_t * mp)561 spd_echo(queue_t *q, mblk_t *mp)
562 {
563 qreply(q, mp);
564 }
565
566 /*
567 * Do NOT consume a reference to itp.
568 */
569 /*ARGSUSED*/
570 static void
spdsock_flush_node(ipsec_tun_pol_t * itp,void * cookie,netstack_t * ns)571 spdsock_flush_node(ipsec_tun_pol_t *itp, void *cookie, netstack_t *ns)
572 {
573 boolean_t active = (boolean_t)cookie;
574 ipsec_policy_head_t *iph;
575
576 iph = active ? itp->itp_policy : itp->itp_inactive;
577 IPPH_REFHOLD(iph);
578 mutex_enter(&itp->itp_lock);
579 spdsock_flush_one(iph, ns); /* Releases iph refhold. */
580 if (active)
581 itp->itp_flags &= ~ITPF_PFLAGS;
582 else
583 itp->itp_flags &= ~ITPF_IFLAGS;
584 mutex_exit(&itp->itp_lock);
585 /* SPD_FLUSH is worth a tunnel MTU check. */
586 update_iptun_policy(itp);
587 }
588
589 /*
590 * Clear out one polhead.
591 */
592 static void
spdsock_flush_one(ipsec_policy_head_t * iph,netstack_t * ns)593 spdsock_flush_one(ipsec_policy_head_t *iph, netstack_t *ns)
594 {
595 rw_enter(&iph->iph_lock, RW_WRITER);
596 ipsec_polhead_flush(iph, ns);
597 rw_exit(&iph->iph_lock);
598 IPPH_REFRELE(iph, ns);
599 }
600
601 static void
spdsock_flush(queue_t * q,ipsec_policy_head_t * iph,ipsec_tun_pol_t * itp,mblk_t * mp)602 spdsock_flush(queue_t *q, ipsec_policy_head_t *iph, ipsec_tun_pol_t *itp,
603 mblk_t *mp)
604 {
605 boolean_t active;
606 spdsock_t *ss = (spdsock_t *)q->q_ptr;
607 netstack_t *ns = ss->spdsock_spds->spds_netstack;
608 uint32_t auditing = AU_AUDITING();
609
610 if (iph != ALL_ACTIVE_POLHEADS && iph != ALL_INACTIVE_POLHEADS) {
611 spdsock_flush_one(iph, ns);
612 if (auditing) {
613 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
614 cred_t *cr;
615 pid_t cpid;
616
617 cr = msg_getcred(mp, &cpid);
618 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
619 audit_pf_policy(SPD_FLUSH, cr, ns,
620 ITP_NAME(itp), active, 0, cpid);
621 }
622 } else {
623 active = (iph == ALL_ACTIVE_POLHEADS);
624
625 /* First flush the global policy. */
626 spdsock_flush_one(active ? ipsec_system_policy(ns) :
627 ipsec_inactive_policy(ns), ns);
628 if (auditing) {
629 cred_t *cr;
630 pid_t cpid;
631
632 cr = msg_getcred(mp, &cpid);
633 audit_pf_policy(SPD_FLUSH, cr, ns, NULL,
634 active, 0, cpid);
635 }
636 /* Then flush every tunnel's appropriate one. */
637 itp_walk(spdsock_flush_node, (void *)active, ns);
638 if (auditing) {
639 cred_t *cr;
640 pid_t cpid;
641
642 cr = msg_getcred(mp, &cpid);
643 audit_pf_policy(SPD_FLUSH, cr, ns,
644 "all tunnels", active, 0, cpid);
645 }
646 }
647
648 spd_echo(q, mp);
649 }
650
651 static boolean_t
spdsock_ext_to_sel(spd_ext_t ** extv,ipsec_selkey_t * sel,int * diag)652 spdsock_ext_to_sel(spd_ext_t **extv, ipsec_selkey_t *sel, int *diag)
653 {
654 bzero(sel, sizeof (*sel));
655
656 if (extv[SPD_EXT_PROTO] != NULL) {
657 struct spd_proto *pr =
658 (struct spd_proto *)extv[SPD_EXT_PROTO];
659 sel->ipsl_proto = pr->spd_proto_number;
660 sel->ipsl_valid |= IPSL_PROTOCOL;
661 }
662 if (extv[SPD_EXT_LCLPORT] != NULL) {
663 struct spd_portrange *pr =
664 (struct spd_portrange *)extv[SPD_EXT_LCLPORT];
665 sel->ipsl_lport = pr->spd_ports_minport;
666 sel->ipsl_valid |= IPSL_LOCAL_PORT;
667 }
668 if (extv[SPD_EXT_REMPORT] != NULL) {
669 struct spd_portrange *pr =
670 (struct spd_portrange *)extv[SPD_EXT_REMPORT];
671 sel->ipsl_rport = pr->spd_ports_minport;
672 sel->ipsl_valid |= IPSL_REMOTE_PORT;
673 }
674
675 if (extv[SPD_EXT_ICMP_TYPECODE] != NULL) {
676 struct spd_typecode *tc=
677 (struct spd_typecode *)extv[SPD_EXT_ICMP_TYPECODE];
678
679 sel->ipsl_valid |= IPSL_ICMP_TYPE;
680 sel->ipsl_icmp_type = tc->spd_typecode_type;
681 if (tc->spd_typecode_type_end < tc->spd_typecode_type)
682 sel->ipsl_icmp_type_end = tc->spd_typecode_type;
683 else
684 sel->ipsl_icmp_type_end = tc->spd_typecode_type_end;
685
686 if (tc->spd_typecode_code != 255) {
687 sel->ipsl_valid |= IPSL_ICMP_CODE;
688 sel->ipsl_icmp_code = tc->spd_typecode_code;
689 if (tc->spd_typecode_code_end < tc->spd_typecode_code)
690 sel->ipsl_icmp_code_end = tc->spd_typecode_code;
691 else
692 sel->ipsl_icmp_code_end =
693 tc->spd_typecode_code_end;
694 }
695 }
696 #define ADDR2SEL(sel, extv, field, pfield, extn, bit) \
697 if ((extv)[(extn)] != NULL) { \
698 uint_t addrlen; \
699 struct spd_address *ap = \
700 (struct spd_address *)((extv)[(extn)]); \
701 addrlen = (ap->spd_address_af == AF_INET6) ? \
702 IPV6_ADDR_LEN : IP_ADDR_LEN; \
703 if (SPD_64TO8(ap->spd_address_len) < \
704 (addrlen + sizeof (*ap))) { \
705 *diag = SPD_DIAGNOSTIC_BAD_ADDR_LEN; \
706 return (B_FALSE); \
707 } \
708 bcopy((ap+1), &((sel)->field), addrlen); \
709 (sel)->pfield = ap->spd_address_prefixlen; \
710 (sel)->ipsl_valid |= (bit); \
711 (sel)->ipsl_valid |= (ap->spd_address_af == AF_INET6) ? \
712 IPSL_IPV6 : IPSL_IPV4; \
713 }
714
715 ADDR2SEL(sel, extv, ipsl_local, ipsl_local_pfxlen,
716 SPD_EXT_LCLADDR, IPSL_LOCAL_ADDR);
717 ADDR2SEL(sel, extv, ipsl_remote, ipsl_remote_pfxlen,
718 SPD_EXT_REMADDR, IPSL_REMOTE_ADDR);
719
720 if ((sel->ipsl_valid & (IPSL_IPV6|IPSL_IPV4)) ==
721 (IPSL_IPV6|IPSL_IPV4)) {
722 *diag = SPD_DIAGNOSTIC_MIXED_AF;
723 return (B_FALSE);
724 }
725
726 #undef ADDR2SEL
727
728 return (B_TRUE);
729 }
730
731 static boolean_t
spd_convert_type(uint32_t type,ipsec_act_t * act)732 spd_convert_type(uint32_t type, ipsec_act_t *act)
733 {
734 switch (type) {
735 case SPD_ACTTYPE_DROP:
736 act->ipa_type = IPSEC_ACT_DISCARD;
737 return (B_TRUE);
738
739 case SPD_ACTTYPE_PASS:
740 act->ipa_type = IPSEC_ACT_CLEAR;
741 return (B_TRUE);
742
743 case SPD_ACTTYPE_IPSEC:
744 act->ipa_type = IPSEC_ACT_APPLY;
745 return (B_TRUE);
746 }
747 return (B_FALSE);
748 }
749
750 static boolean_t
spd_convert_flags(uint32_t flags,ipsec_act_t * act)751 spd_convert_flags(uint32_t flags, ipsec_act_t *act)
752 {
753 /*
754 * Note use of !! for boolean canonicalization.
755 */
756 act->ipa_apply.ipp_use_ah = !!(flags & SPD_APPLY_AH);
757 act->ipa_apply.ipp_use_esp = !!(flags & SPD_APPLY_ESP);
758 act->ipa_apply.ipp_use_espa = !!(flags & SPD_APPLY_ESPA);
759 act->ipa_apply.ipp_use_se = !!(flags & SPD_APPLY_SE);
760 act->ipa_apply.ipp_use_unique = !!(flags & SPD_APPLY_UNIQUE);
761 return (B_TRUE);
762 }
763
764 static void
spdsock_reset_act(ipsec_act_t * act)765 spdsock_reset_act(ipsec_act_t *act)
766 {
767 bzero(act, sizeof (*act));
768 act->ipa_apply.ipp_espe_maxbits = IPSEC_MAX_KEYBITS;
769 act->ipa_apply.ipp_espa_maxbits = IPSEC_MAX_KEYBITS;
770 act->ipa_apply.ipp_ah_maxbits = IPSEC_MAX_KEYBITS;
771 }
772
773 /*
774 * Sanity check action against reality, and shrink-wrap key sizes..
775 */
776 static boolean_t
spdsock_check_action(ipsec_act_t * act,boolean_t tunnel_polhead,int * diag,spd_stack_t * spds)777 spdsock_check_action(ipsec_act_t *act, boolean_t tunnel_polhead, int *diag,
778 spd_stack_t *spds)
779 {
780 if (tunnel_polhead && act->ipa_apply.ipp_use_unique) {
781 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
782 return (B_FALSE);
783 }
784 if ((act->ipa_type != IPSEC_ACT_APPLY) &&
785 (act->ipa_apply.ipp_use_ah ||
786 act->ipa_apply.ipp_use_esp ||
787 act->ipa_apply.ipp_use_espa ||
788 act->ipa_apply.ipp_use_se ||
789 act->ipa_apply.ipp_use_unique)) {
790 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
791 return (B_FALSE);
792 }
793 if ((act->ipa_type == IPSEC_ACT_APPLY) &&
794 !act->ipa_apply.ipp_use_ah &&
795 !act->ipa_apply.ipp_use_esp) {
796 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
797 return (B_FALSE);
798 }
799 return (ipsec_check_action(act, diag, spds->spds_netstack));
800 }
801
802 /*
803 * We may be short a few error checks here..
804 */
805 static boolean_t
spdsock_ext_to_actvec(spd_ext_t ** extv,ipsec_act_t ** actpp,uint_t * nactp,int * diag,spd_stack_t * spds)806 spdsock_ext_to_actvec(spd_ext_t **extv, ipsec_act_t **actpp, uint_t *nactp,
807 int *diag, spd_stack_t *spds)
808 {
809 struct spd_ext_actions *sactp =
810 (struct spd_ext_actions *)extv[SPD_EXT_ACTION];
811 ipsec_act_t act, *actp, *endactp;
812 struct spd_attribute *attrp, *endattrp;
813 uint64_t *endp;
814 int nact;
815 boolean_t tunnel_polhead;
816
817 tunnel_polhead = (extv[SPD_EXT_TUN_NAME] != NULL &&
818 (((struct spd_rule *)extv[SPD_EXT_RULE])->spd_rule_flags &
819 SPD_RULE_FLAG_TUNNEL));
820
821 *actpp = NULL;
822 *nactp = 0;
823
824 if (sactp == NULL) {
825 *diag = SPD_DIAGNOSTIC_NO_ACTION_EXT;
826 return (B_FALSE);
827 }
828
829 /*
830 * Parse the "action" extension and convert into an action chain.
831 */
832
833 nact = sactp->spd_actions_count;
834
835 endp = (uint64_t *)sactp;
836 endp += sactp->spd_actions_len;
837 endattrp = (struct spd_attribute *)endp;
838
839 actp = kmem_alloc(sizeof (*actp) * nact, KM_NOSLEEP);
840 if (actp == NULL) {
841 *diag = SPD_DIAGNOSTIC_ADD_NO_MEM;
842 return (B_FALSE);
843 }
844 *actpp = actp;
845 *nactp = nact;
846 endactp = actp + nact;
847
848 spdsock_reset_act(&act);
849 attrp = (struct spd_attribute *)(&sactp[1]);
850
851 for (; attrp < endattrp; attrp++) {
852 switch (attrp->spd_attr_tag) {
853 case SPD_ATTR_NOP:
854 break;
855
856 case SPD_ATTR_EMPTY:
857 spdsock_reset_act(&act);
858 break;
859
860 case SPD_ATTR_END:
861 attrp = endattrp;
862 /* FALLTHRU */
863 case SPD_ATTR_NEXT:
864 if (actp >= endactp) {
865 *diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT;
866 goto fail;
867 }
868 if (!spdsock_check_action(&act, tunnel_polhead,
869 diag, spds))
870 goto fail;
871 *actp++ = act;
872 spdsock_reset_act(&act);
873 break;
874
875 case SPD_ATTR_TYPE:
876 if (!spd_convert_type(attrp->spd_attr_value, &act)) {
877 *diag = SPD_DIAGNOSTIC_ADD_BAD_TYPE;
878 goto fail;
879 }
880 break;
881
882 case SPD_ATTR_FLAGS:
883 if (!tunnel_polhead && extv[SPD_EXT_TUN_NAME] != NULL) {
884 /*
885 * Set "sa unique" for transport-mode
886 * tunnels whether we want to or not.
887 */
888 attrp->spd_attr_value |= SPD_APPLY_UNIQUE;
889 }
890 if (!spd_convert_flags(attrp->spd_attr_value, &act)) {
891 *diag = SPD_DIAGNOSTIC_ADD_BAD_FLAGS;
892 goto fail;
893 }
894 break;
895
896 case SPD_ATTR_AH_AUTH:
897 if (attrp->spd_attr_value == 0) {
898 *diag = SPD_DIAGNOSTIC_UNSUPP_AH_ALG;
899 goto fail;
900 }
901 act.ipa_apply.ipp_auth_alg = attrp->spd_attr_value;
902 break;
903
904 case SPD_ATTR_ESP_ENCR:
905 if (attrp->spd_attr_value == 0) {
906 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_ALG;
907 goto fail;
908 }
909 act.ipa_apply.ipp_encr_alg = attrp->spd_attr_value;
910 break;
911
912 case SPD_ATTR_ESP_AUTH:
913 if (attrp->spd_attr_value == 0) {
914 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_ALG;
915 goto fail;
916 }
917 act.ipa_apply.ipp_esp_auth_alg = attrp->spd_attr_value;
918 break;
919
920 case SPD_ATTR_ENCR_MINBITS:
921 act.ipa_apply.ipp_espe_minbits = attrp->spd_attr_value;
922 break;
923
924 case SPD_ATTR_ENCR_MAXBITS:
925 act.ipa_apply.ipp_espe_maxbits = attrp->spd_attr_value;
926 break;
927
928 case SPD_ATTR_AH_MINBITS:
929 act.ipa_apply.ipp_ah_minbits = attrp->spd_attr_value;
930 break;
931
932 case SPD_ATTR_AH_MAXBITS:
933 act.ipa_apply.ipp_ah_maxbits = attrp->spd_attr_value;
934 break;
935
936 case SPD_ATTR_ESPA_MINBITS:
937 act.ipa_apply.ipp_espa_minbits = attrp->spd_attr_value;
938 break;
939
940 case SPD_ATTR_ESPA_MAXBITS:
941 act.ipa_apply.ipp_espa_maxbits = attrp->spd_attr_value;
942 break;
943
944 case SPD_ATTR_LIFE_SOFT_TIME:
945 case SPD_ATTR_LIFE_HARD_TIME:
946 case SPD_ATTR_LIFE_SOFT_BYTES:
947 case SPD_ATTR_LIFE_HARD_BYTES:
948 break;
949
950 case SPD_ATTR_KM_PROTO:
951 act.ipa_apply.ipp_km_proto = attrp->spd_attr_value;
952 break;
953
954 case SPD_ATTR_KM_COOKIE:
955 act.ipa_apply.ipp_km_cookie = attrp->spd_attr_value;
956 break;
957
958 case SPD_ATTR_REPLAY_DEPTH:
959 act.ipa_apply.ipp_replay_depth = attrp->spd_attr_value;
960 break;
961 }
962 }
963 if (actp != endactp) {
964 *diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT;
965 goto fail;
966 }
967
968 return (B_TRUE);
969 fail:
970 ipsec_actvec_free(*actpp, nact);
971 *actpp = NULL;
972 return (B_FALSE);
973 }
974
975 typedef struct
976 {
977 ipsec_policy_t *pol;
978 int dir;
979 } tmprule_t;
980
981 static int
mkrule(ipsec_policy_head_t * iph,struct spd_rule * rule,ipsec_selkey_t * sel,ipsec_act_t * actp,int nact,uint_t dir,uint_t af,tmprule_t ** rp,uint64_t * index,spd_stack_t * spds)982 mkrule(ipsec_policy_head_t *iph, struct spd_rule *rule,
983 ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t af,
984 tmprule_t **rp, uint64_t *index, spd_stack_t *spds)
985 {
986 ipsec_policy_t *pol;
987
988 sel->ipsl_valid &= ~(IPSL_IPV6|IPSL_IPV4);
989 sel->ipsl_valid |= af;
990
991 pol = ipsec_policy_create(sel, actp, nact, rule->spd_rule_priority,
992 index, spds->spds_netstack);
993 if (pol == NULL)
994 return (ENOMEM);
995
996 (*rp)->pol = pol;
997 (*rp)->dir = dir;
998 (*rp)++;
999
1000 if (!ipsec_check_policy(iph, pol, dir))
1001 return (EEXIST);
1002
1003 rule->spd_rule_index = pol->ipsp_index;
1004 return (0);
1005 }
1006
1007 static int
mkrulepair(ipsec_policy_head_t * iph,struct spd_rule * rule,ipsec_selkey_t * sel,ipsec_act_t * actp,int nact,uint_t dir,uint_t afs,tmprule_t ** rp,uint64_t * index,spd_stack_t * spds)1008 mkrulepair(ipsec_policy_head_t *iph, struct spd_rule *rule,
1009 ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t afs,
1010 tmprule_t **rp, uint64_t *index, spd_stack_t *spds)
1011 {
1012 int error;
1013
1014 if (afs & IPSL_IPV4) {
1015 error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV4, rp,
1016 index, spds);
1017 if (error != 0)
1018 return (error);
1019 }
1020 if (afs & IPSL_IPV6) {
1021 error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV6, rp,
1022 index, spds);
1023 if (error != 0)
1024 return (error);
1025 }
1026 return (0);
1027 }
1028
1029
1030 static void
spdsock_addrule(queue_t * q,ipsec_policy_head_t * iph,mblk_t * mp,spd_ext_t ** extv,ipsec_tun_pol_t * itp)1031 spdsock_addrule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1032 spd_ext_t **extv, ipsec_tun_pol_t *itp)
1033 {
1034 ipsec_selkey_t sel;
1035 ipsec_act_t *actp;
1036 uint_t nact;
1037 int diag = 0, error, afs;
1038 struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE];
1039 tmprule_t rules[4], *rulep = &rules[0];
1040 boolean_t tunnel_mode, empty_itp, active;
1041 uint64_t *index = (itp == NULL) ? NULL : &itp->itp_next_policy_index;
1042 spdsock_t *ss = (spdsock_t *)q->q_ptr;
1043 spd_stack_t *spds = ss->spdsock_spds;
1044 uint32_t auditing = AU_AUDITING();
1045
1046 if (rule == NULL) {
1047 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT);
1048 if (auditing) {
1049 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1050 cred_t *cr;
1051 pid_t cpid;
1052
1053 cr = msg_getcred(mp, &cpid);
1054 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1055 audit_pf_policy(SPD_ADDRULE, cr,
1056 spds->spds_netstack, ITP_NAME(itp), active,
1057 SPD_DIAGNOSTIC_NO_RULE_EXT, cpid);
1058 }
1059 return;
1060 }
1061
1062 tunnel_mode = (rule->spd_rule_flags & SPD_RULE_FLAG_TUNNEL);
1063
1064 if (itp != NULL) {
1065 mutex_enter(&itp->itp_lock);
1066 ASSERT(itp->itp_policy == iph || itp->itp_inactive == iph);
1067 active = (itp->itp_policy == iph);
1068 if (ITP_P_ISACTIVE(itp, iph)) {
1069 /* Check for mix-and-match of tunnel/transport. */
1070 if ((tunnel_mode && !ITP_P_ISTUNNEL(itp, iph)) ||
1071 (!tunnel_mode && ITP_P_ISTUNNEL(itp, iph))) {
1072 mutex_exit(&itp->itp_lock);
1073 spdsock_error(q, mp, EBUSY, 0);
1074 return;
1075 }
1076 empty_itp = B_FALSE;
1077 } else {
1078 empty_itp = B_TRUE;
1079 itp->itp_flags = active ? ITPF_P_ACTIVE : ITPF_I_ACTIVE;
1080 if (tunnel_mode)
1081 itp->itp_flags |= active ? ITPF_P_TUNNEL :
1082 ITPF_I_TUNNEL;
1083 }
1084 } else {
1085 empty_itp = B_FALSE;
1086 }
1087
1088 if (rule->spd_rule_index != 0) {
1089 diag = SPD_DIAGNOSTIC_INVALID_RULE_INDEX;
1090 error = EINVAL;
1091 goto fail2;
1092 }
1093
1094 if (!spdsock_ext_to_sel(extv, &sel, &diag)) {
1095 error = EINVAL;
1096 goto fail2;
1097 }
1098
1099 if (itp != NULL) {
1100 if (tunnel_mode) {
1101 if (sel.ipsl_valid &
1102 (IPSL_REMOTE_PORT | IPSL_LOCAL_PORT)) {
1103 itp->itp_flags |= active ?
1104 ITPF_P_PER_PORT_SECURITY :
1105 ITPF_I_PER_PORT_SECURITY;
1106 }
1107 } else {
1108 /*
1109 * For now, we don't allow transport-mode on a tunnel
1110 * with ANY specific selectors. Bail if we have such
1111 * a request.
1112 */
1113 if (sel.ipsl_valid & IPSL_WILDCARD) {
1114 diag = SPD_DIAGNOSTIC_NO_TUNNEL_SELECTORS;
1115 error = EINVAL;
1116 goto fail2;
1117 }
1118 }
1119 }
1120
1121 if (!spdsock_ext_to_actvec(extv, &actp, &nact, &diag, spds)) {
1122 error = EINVAL;
1123 goto fail2;
1124 }
1125 /*
1126 * If no addresses were specified, add both.
1127 */
1128 afs = sel.ipsl_valid & (IPSL_IPV6|IPSL_IPV4);
1129 if (afs == 0)
1130 afs = (IPSL_IPV6|IPSL_IPV4);
1131
1132 rw_enter(&iph->iph_lock, RW_WRITER);
1133
1134 if (rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) {
1135 error = mkrulepair(iph, rule, &sel, actp, nact,
1136 IPSEC_TYPE_OUTBOUND, afs, &rulep, index, spds);
1137 if (error != 0)
1138 goto fail;
1139 }
1140
1141 if (rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) {
1142 error = mkrulepair(iph, rule, &sel, actp, nact,
1143 IPSEC_TYPE_INBOUND, afs, &rulep, index, spds);
1144 if (error != 0)
1145 goto fail;
1146 }
1147
1148 while ((--rulep) >= &rules[0]) {
1149 ipsec_enter_policy(iph, rulep->pol, rulep->dir,
1150 spds->spds_netstack);
1151 }
1152 rw_exit(&iph->iph_lock);
1153 if (itp != NULL)
1154 mutex_exit(&itp->itp_lock);
1155
1156 ipsec_actvec_free(actp, nact);
1157 spd_echo(q, mp);
1158 if (auditing) {
1159 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1160 cred_t *cr;
1161 pid_t cpid;
1162
1163 cr = msg_getcred(mp, &cpid);
1164 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1165 audit_pf_policy(SPD_ADDRULE, cr, spds->spds_netstack,
1166 ITP_NAME(itp), active, 0, cpid);
1167 }
1168 return;
1169
1170 fail:
1171 rw_exit(&iph->iph_lock);
1172 while ((--rulep) >= &rules[0])
1173 IPPOL_REFRELE(rulep->pol);
1174 ipsec_actvec_free(actp, nact);
1175 fail2:
1176 if (itp != NULL) {
1177 if (empty_itp)
1178 itp->itp_flags = 0;
1179 mutex_exit(&itp->itp_lock);
1180 }
1181 spdsock_error(q, mp, error, diag);
1182 if (auditing) {
1183 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1184 cred_t *cr;
1185 pid_t cpid;
1186
1187 cr = msg_getcred(mp, &cpid);
1188 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1189 audit_pf_policy(SPD_ADDRULE, cr, spds->spds_netstack,
1190 ITP_NAME(itp), active, error, cpid);
1191 }
1192 }
1193
1194 void
spdsock_deleterule(queue_t * q,ipsec_policy_head_t * iph,mblk_t * mp,spd_ext_t ** extv,ipsec_tun_pol_t * itp)1195 spdsock_deleterule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1196 spd_ext_t **extv, ipsec_tun_pol_t *itp)
1197 {
1198 ipsec_selkey_t sel;
1199 struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE];
1200 int err, diag = 0;
1201 spdsock_t *ss = (spdsock_t *)q->q_ptr;
1202 netstack_t *ns = ss->spdsock_spds->spds_netstack;
1203 uint32_t auditing = AU_AUDITING();
1204
1205 if (rule == NULL) {
1206 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT);
1207 if (auditing) {
1208 boolean_t active;
1209 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1210 cred_t *cr;
1211 pid_t cpid;
1212
1213 cr = msg_getcred(mp, &cpid);
1214 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1215 audit_pf_policy(SPD_DELETERULE, cr, ns,
1216 ITP_NAME(itp), active, SPD_DIAGNOSTIC_NO_RULE_EXT,
1217 cpid);
1218 }
1219 return;
1220 }
1221
1222 /*
1223 * Must enter itp_lock first to avoid deadlock. See tun.c's
1224 * set_sec_simple() for the other case of itp_lock and iph_lock.
1225 */
1226 if (itp != NULL)
1227 mutex_enter(&itp->itp_lock);
1228
1229 if (rule->spd_rule_index != 0) {
1230 if (ipsec_policy_delete_index(iph, rule->spd_rule_index, ns) !=
1231 0) {
1232 err = ESRCH;
1233 goto fail;
1234 }
1235 } else {
1236 if (!spdsock_ext_to_sel(extv, &sel, &diag)) {
1237 err = EINVAL; /* diag already set... */
1238 goto fail;
1239 }
1240
1241 if ((rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) &&
1242 !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_INBOUND, ns)) {
1243 err = ESRCH;
1244 goto fail;
1245 }
1246
1247 if ((rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) &&
1248 !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_OUTBOUND, ns)) {
1249 err = ESRCH;
1250 goto fail;
1251 }
1252 }
1253
1254 if (itp != NULL) {
1255 ASSERT(iph == itp->itp_policy || iph == itp->itp_inactive);
1256 rw_enter(&iph->iph_lock, RW_READER);
1257 if (avl_numnodes(&iph->iph_rulebyid) == 0) {
1258 if (iph == itp->itp_policy)
1259 itp->itp_flags &= ~ITPF_PFLAGS;
1260 else
1261 itp->itp_flags &= ~ITPF_IFLAGS;
1262 }
1263 /* Can exit locks in any order. */
1264 rw_exit(&iph->iph_lock);
1265 mutex_exit(&itp->itp_lock);
1266 }
1267 spd_echo(q, mp);
1268 if (auditing) {
1269 boolean_t active;
1270 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1271 cred_t *cr;
1272 pid_t cpid;
1273
1274 cr = msg_getcred(mp, &cpid);
1275 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1276 audit_pf_policy(SPD_DELETERULE, cr, ns, ITP_NAME(itp),
1277 active, 0, cpid);
1278 }
1279 return;
1280 fail:
1281 if (itp != NULL)
1282 mutex_exit(&itp->itp_lock);
1283 spdsock_error(q, mp, err, diag);
1284 if (auditing) {
1285 boolean_t active;
1286 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1287 cred_t *cr;
1288 pid_t cpid;
1289
1290 cr = msg_getcred(mp, &cpid);
1291 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1292 audit_pf_policy(SPD_DELETERULE, cr, ns, ITP_NAME(itp),
1293 active, err, cpid);
1294 }
1295 }
1296
1297 /* Do NOT consume a reference to itp. */
1298 /* ARGSUSED */
1299 static void
spdsock_flip_node(ipsec_tun_pol_t * itp,void * ignoreme,netstack_t * ns)1300 spdsock_flip_node(ipsec_tun_pol_t *itp, void *ignoreme, netstack_t *ns)
1301 {
1302 mutex_enter(&itp->itp_lock);
1303 ITPF_SWAP(itp->itp_flags);
1304 ipsec_swap_policy(itp->itp_policy, itp->itp_inactive, ns);
1305 mutex_exit(&itp->itp_lock);
1306 /* SPD_FLIP is worth a tunnel MTU check. */
1307 update_iptun_policy(itp);
1308 }
1309
1310 void
spdsock_flip(queue_t * q,mblk_t * mp,spd_if_t * tunname)1311 spdsock_flip(queue_t *q, mblk_t *mp, spd_if_t *tunname)
1312 {
1313 char *tname;
1314 ipsec_tun_pol_t *itp;
1315 spdsock_t *ss = (spdsock_t *)q->q_ptr;
1316 netstack_t *ns = ss->spdsock_spds->spds_netstack;
1317 uint32_t auditing = AU_AUDITING();
1318
1319 if (tunname != NULL) {
1320 tname = (char *)tunname->spd_if_name;
1321 if (*tname == '\0') {
1322 /* can't fail */
1323 ipsec_swap_global_policy(ns);
1324 if (auditing) {
1325 boolean_t active;
1326 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1327 cred_t *cr;
1328 pid_t cpid;
1329
1330 cr = msg_getcred(mp, &cpid);
1331 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1332 audit_pf_policy(SPD_FLIP, cr, ns,
1333 NULL, active, 0, cpid);
1334 }
1335 itp_walk(spdsock_flip_node, NULL, ns);
1336 if (auditing) {
1337 boolean_t active;
1338 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1339 cred_t *cr;
1340 pid_t cpid;
1341
1342 cr = msg_getcred(mp, &cpid);
1343 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1344 audit_pf_policy(SPD_FLIP, cr, ns,
1345 "all tunnels", active, 0, cpid);
1346 }
1347 } else {
1348 itp = get_tunnel_policy(tname, ns);
1349 if (itp == NULL) {
1350 /* Better idea for "tunnel not found"? */
1351 spdsock_error(q, mp, ESRCH, 0);
1352 if (auditing) {
1353 boolean_t active;
1354 spd_msg_t *spmsg =
1355 (spd_msg_t *)mp->b_rptr;
1356 cred_t *cr;
1357 pid_t cpid;
1358
1359 cr = msg_getcred(mp, &cpid);
1360 active = (spmsg->spd_msg_spdid ==
1361 SPD_ACTIVE);
1362 audit_pf_policy(SPD_FLIP, cr, ns,
1363 ITP_NAME(itp), active,
1364 ESRCH, cpid);
1365 }
1366 return;
1367 }
1368 spdsock_flip_node(itp, NULL, ns);
1369 if (auditing) {
1370 boolean_t active;
1371 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1372 cred_t *cr;
1373 pid_t cpid;
1374
1375 cr = msg_getcred(mp, &cpid);
1376 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1377 audit_pf_policy(SPD_FLIP, cr, ns,
1378 ITP_NAME(itp), active, 0, cpid);
1379 }
1380 ITP_REFRELE(itp, ns);
1381 }
1382 } else {
1383 ipsec_swap_global_policy(ns); /* can't fail */
1384 if (auditing) {
1385 boolean_t active;
1386 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1387 cred_t *cr;
1388 pid_t cpid;
1389
1390 cr = msg_getcred(mp, &cpid);
1391 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1392 audit_pf_policy(SPD_FLIP, cr,
1393 ns, NULL, active, 0, cpid);
1394 }
1395 }
1396 spd_echo(q, mp);
1397 }
1398
1399 /*
1400 * Unimplemented feature
1401 */
1402 /* ARGSUSED */
1403 static void
spdsock_lookup(queue_t * q,ipsec_policy_head_t * iph,mblk_t * mp,spd_ext_t ** extv,ipsec_tun_pol_t * itp)1404 spdsock_lookup(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1405 spd_ext_t **extv, ipsec_tun_pol_t *itp)
1406 {
1407 spdsock_error(q, mp, EINVAL, 0);
1408 }
1409
1410
1411 static mblk_t *
spdsock_dump_ruleset(mblk_t * req,ipsec_policy_head_t * iph,uint32_t count,uint16_t error)1412 spdsock_dump_ruleset(mblk_t *req, ipsec_policy_head_t *iph,
1413 uint32_t count, uint16_t error)
1414 {
1415 size_t len = sizeof (spd_ruleset_ext_t) + sizeof (spd_msg_t);
1416 spd_msg_t *msg;
1417 spd_ruleset_ext_t *ruleset;
1418 mblk_t *m = allocb(len, BPRI_HI);
1419
1420 ASSERT(RW_READ_HELD(&iph->iph_lock));
1421
1422 if (m == NULL) {
1423 return (NULL);
1424 }
1425 msg = (spd_msg_t *)m->b_rptr;
1426 ruleset = (spd_ruleset_ext_t *)(&msg[1]);
1427
1428 m->b_wptr = (uint8_t *)&ruleset[1];
1429
1430 *msg = *(spd_msg_t *)(req->b_rptr);
1431 msg->spd_msg_len = SPD_8TO64(len);
1432 msg->spd_msg_errno = error;
1433
1434 ruleset->spd_ruleset_len = SPD_8TO64(sizeof (*ruleset));
1435 ruleset->spd_ruleset_type = SPD_EXT_RULESET;
1436 ruleset->spd_ruleset_count = count;
1437 ruleset->spd_ruleset_version = iph->iph_gen;
1438 return (m);
1439 }
1440
1441 static mblk_t *
spdsock_dump_finish(spdsock_t * ss,int error)1442 spdsock_dump_finish(spdsock_t *ss, int error)
1443 {
1444 mblk_t *m;
1445 ipsec_policy_head_t *iph = ss->spdsock_dump_head;
1446 mblk_t *req = ss->spdsock_dump_req;
1447 netstack_t *ns = ss->spdsock_spds->spds_netstack;
1448
1449 rw_enter(&iph->iph_lock, RW_READER);
1450 m = spdsock_dump_ruleset(req, iph, ss->spdsock_dump_count, error);
1451 rw_exit(&iph->iph_lock);
1452 IPPH_REFRELE(iph, ns);
1453 if (ss->spdsock_itp != NULL) {
1454 ITP_REFRELE(ss->spdsock_itp, ns);
1455 ss->spdsock_itp = NULL;
1456 }
1457 ss->spdsock_dump_req = NULL;
1458 freemsg(req);
1459
1460 return (m);
1461 }
1462
1463 /*
1464 * Rule encoding functions.
1465 * We do a two-pass encode.
1466 * If base != NULL, fill in encoded rule part starting at base+offset.
1467 * Always return "offset" plus length of to-be-encoded data.
1468 */
1469 static uint_t
spdsock_encode_typecode(uint8_t * base,uint_t offset,uint8_t type,uint8_t type_end,uint8_t code,uint8_t code_end)1470 spdsock_encode_typecode(uint8_t *base, uint_t offset, uint8_t type,
1471 uint8_t type_end, uint8_t code, uint8_t code_end)
1472 {
1473 struct spd_typecode *tcp;
1474
1475 ASSERT(ALIGNED64(offset));
1476
1477 if (base != NULL) {
1478 tcp = (struct spd_typecode *)(base + offset);
1479 tcp->spd_typecode_len = SPD_8TO64(sizeof (*tcp));
1480 tcp->spd_typecode_exttype = SPD_EXT_ICMP_TYPECODE;
1481 tcp->spd_typecode_code = code;
1482 tcp->spd_typecode_type = type;
1483 tcp->spd_typecode_type_end = type_end;
1484 tcp->spd_typecode_code_end = code_end;
1485 }
1486 offset += sizeof (*tcp);
1487
1488 ASSERT(ALIGNED64(offset));
1489
1490 return (offset);
1491 }
1492
1493 static uint_t
spdsock_encode_proto(uint8_t * base,uint_t offset,uint8_t proto)1494 spdsock_encode_proto(uint8_t *base, uint_t offset, uint8_t proto)
1495 {
1496 struct spd_proto *spp;
1497
1498 ASSERT(ALIGNED64(offset));
1499
1500 if (base != NULL) {
1501 spp = (struct spd_proto *)(base + offset);
1502 spp->spd_proto_len = SPD_8TO64(sizeof (*spp));
1503 spp->spd_proto_exttype = SPD_EXT_PROTO;
1504 spp->spd_proto_number = proto;
1505 spp->spd_proto_reserved1 = 0;
1506 spp->spd_proto_reserved2 = 0;
1507 }
1508 offset += sizeof (*spp);
1509
1510 ASSERT(ALIGNED64(offset));
1511
1512 return (offset);
1513 }
1514
1515 static uint_t
spdsock_encode_port(uint8_t * base,uint_t offset,uint16_t ext,uint16_t port)1516 spdsock_encode_port(uint8_t *base, uint_t offset, uint16_t ext, uint16_t port)
1517 {
1518 struct spd_portrange *spp;
1519
1520 ASSERT(ALIGNED64(offset));
1521
1522 if (base != NULL) {
1523 spp = (struct spd_portrange *)(base + offset);
1524 spp->spd_ports_len = SPD_8TO64(sizeof (*spp));
1525 spp->spd_ports_exttype = ext;
1526 spp->spd_ports_minport = port;
1527 spp->spd_ports_maxport = port;
1528 }
1529 offset += sizeof (*spp);
1530
1531 ASSERT(ALIGNED64(offset));
1532
1533 return (offset);
1534 }
1535
1536 static uint_t
spdsock_encode_addr(uint8_t * base,uint_t offset,uint16_t ext,const ipsec_selkey_t * sel,const ipsec_addr_t * addr,uint_t pfxlen)1537 spdsock_encode_addr(uint8_t *base, uint_t offset, uint16_t ext,
1538 const ipsec_selkey_t *sel, const ipsec_addr_t *addr, uint_t pfxlen)
1539 {
1540 struct spd_address *sae;
1541 ipsec_addr_t *spdaddr;
1542 uint_t start = offset;
1543 uint_t addrlen;
1544 uint_t af;
1545
1546 if (sel->ipsl_valid & IPSL_IPV4) {
1547 af = AF_INET;
1548 addrlen = IP_ADDR_LEN;
1549 } else {
1550 af = AF_INET6;
1551 addrlen = IPV6_ADDR_LEN;
1552 }
1553
1554 ASSERT(ALIGNED64(offset));
1555
1556 if (base != NULL) {
1557 sae = (struct spd_address *)(base + offset);
1558 sae->spd_address_exttype = ext;
1559 sae->spd_address_af = af;
1560 sae->spd_address_prefixlen = pfxlen;
1561 sae->spd_address_reserved2 = 0;
1562
1563 spdaddr = (ipsec_addr_t *)(&sae[1]);
1564 bcopy(addr, spdaddr, addrlen);
1565 }
1566 offset += sizeof (*sae);
1567 addrlen = roundup(addrlen, sizeof (uint64_t));
1568 offset += addrlen;
1569
1570 ASSERT(ALIGNED64(offset));
1571
1572 if (base != NULL)
1573 sae->spd_address_len = SPD_8TO64(offset - start);
1574 return (offset);
1575 }
1576
1577 static uint_t
spdsock_encode_sel(uint8_t * base,uint_t offset,const ipsec_sel_t * sel)1578 spdsock_encode_sel(uint8_t *base, uint_t offset, const ipsec_sel_t *sel)
1579 {
1580 const ipsec_selkey_t *selkey = &sel->ipsl_key;
1581
1582 if (selkey->ipsl_valid & IPSL_PROTOCOL)
1583 offset = spdsock_encode_proto(base, offset, selkey->ipsl_proto);
1584 if (selkey->ipsl_valid & IPSL_LOCAL_PORT)
1585 offset = spdsock_encode_port(base, offset, SPD_EXT_LCLPORT,
1586 selkey->ipsl_lport);
1587 if (selkey->ipsl_valid & IPSL_REMOTE_PORT)
1588 offset = spdsock_encode_port(base, offset, SPD_EXT_REMPORT,
1589 selkey->ipsl_rport);
1590 if (selkey->ipsl_valid & IPSL_REMOTE_ADDR)
1591 offset = spdsock_encode_addr(base, offset, SPD_EXT_REMADDR,
1592 selkey, &selkey->ipsl_remote, selkey->ipsl_remote_pfxlen);
1593 if (selkey->ipsl_valid & IPSL_LOCAL_ADDR)
1594 offset = spdsock_encode_addr(base, offset, SPD_EXT_LCLADDR,
1595 selkey, &selkey->ipsl_local, selkey->ipsl_local_pfxlen);
1596 if (selkey->ipsl_valid & IPSL_ICMP_TYPE) {
1597 offset = spdsock_encode_typecode(base, offset,
1598 selkey->ipsl_icmp_type, selkey->ipsl_icmp_type_end,
1599 (selkey->ipsl_valid & IPSL_ICMP_CODE) ?
1600 selkey->ipsl_icmp_code : 255,
1601 (selkey->ipsl_valid & IPSL_ICMP_CODE) ?
1602 selkey->ipsl_icmp_code_end : 255);
1603 }
1604 return (offset);
1605 }
1606
1607 static uint_t
spdsock_encode_actattr(uint8_t * base,uint_t offset,uint32_t tag,uint32_t value)1608 spdsock_encode_actattr(uint8_t *base, uint_t offset, uint32_t tag,
1609 uint32_t value)
1610 {
1611 struct spd_attribute *attr;
1612
1613 ASSERT(ALIGNED64(offset));
1614
1615 if (base != NULL) {
1616 attr = (struct spd_attribute *)(base + offset);
1617 attr->spd_attr_tag = tag;
1618 attr->spd_attr_value = value;
1619 }
1620 offset += sizeof (struct spd_attribute);
1621
1622 ASSERT(ALIGNED64(offset));
1623
1624 return (offset);
1625 }
1626
1627
1628 #define EMIT(t, v) offset = spdsock_encode_actattr(base, offset, (t), (v))
1629
1630 static uint_t
spdsock_encode_action(uint8_t * base,uint_t offset,const ipsec_action_t * ap)1631 spdsock_encode_action(uint8_t *base, uint_t offset, const ipsec_action_t *ap)
1632 {
1633 const struct ipsec_act *act = &(ap->ipa_act);
1634 uint_t flags;
1635
1636 EMIT(SPD_ATTR_EMPTY, 0);
1637 switch (act->ipa_type) {
1638 case IPSEC_ACT_DISCARD:
1639 case IPSEC_ACT_REJECT:
1640 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_DROP);
1641 break;
1642 case IPSEC_ACT_BYPASS:
1643 case IPSEC_ACT_CLEAR:
1644 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_PASS);
1645 break;
1646
1647 case IPSEC_ACT_APPLY:
1648 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_IPSEC);
1649 flags = 0;
1650 if (act->ipa_apply.ipp_use_ah)
1651 flags |= SPD_APPLY_AH;
1652 if (act->ipa_apply.ipp_use_esp)
1653 flags |= SPD_APPLY_ESP;
1654 if (act->ipa_apply.ipp_use_espa)
1655 flags |= SPD_APPLY_ESPA;
1656 if (act->ipa_apply.ipp_use_se)
1657 flags |= SPD_APPLY_SE;
1658 if (act->ipa_apply.ipp_use_unique)
1659 flags |= SPD_APPLY_UNIQUE;
1660 EMIT(SPD_ATTR_FLAGS, flags);
1661 if (flags & SPD_APPLY_AH) {
1662 EMIT(SPD_ATTR_AH_AUTH, act->ipa_apply.ipp_auth_alg);
1663 EMIT(SPD_ATTR_AH_MINBITS,
1664 act->ipa_apply.ipp_ah_minbits);
1665 EMIT(SPD_ATTR_AH_MAXBITS,
1666 act->ipa_apply.ipp_ah_maxbits);
1667 }
1668 if (flags & SPD_APPLY_ESP) {
1669 EMIT(SPD_ATTR_ESP_ENCR, act->ipa_apply.ipp_encr_alg);
1670 EMIT(SPD_ATTR_ENCR_MINBITS,
1671 act->ipa_apply.ipp_espe_minbits);
1672 EMIT(SPD_ATTR_ENCR_MAXBITS,
1673 act->ipa_apply.ipp_espe_maxbits);
1674 if (flags & SPD_APPLY_ESPA) {
1675 EMIT(SPD_ATTR_ESP_AUTH,
1676 act->ipa_apply.ipp_esp_auth_alg);
1677 EMIT(SPD_ATTR_ESPA_MINBITS,
1678 act->ipa_apply.ipp_espa_minbits);
1679 EMIT(SPD_ATTR_ESPA_MAXBITS,
1680 act->ipa_apply.ipp_espa_maxbits);
1681 }
1682 }
1683 if (act->ipa_apply.ipp_km_proto != 0)
1684 EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_proto);
1685 if (act->ipa_apply.ipp_km_cookie != 0)
1686 EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_cookie);
1687 if (act->ipa_apply.ipp_replay_depth != 0)
1688 EMIT(SPD_ATTR_REPLAY_DEPTH,
1689 act->ipa_apply.ipp_replay_depth);
1690 /* Add more here */
1691 break;
1692 }
1693
1694 return (offset);
1695 }
1696
1697 static uint_t
spdsock_encode_action_list(uint8_t * base,uint_t offset,const ipsec_action_t * ap)1698 spdsock_encode_action_list(uint8_t *base, uint_t offset,
1699 const ipsec_action_t *ap)
1700 {
1701 struct spd_ext_actions *act;
1702 uint_t nact = 0;
1703 uint_t start = offset;
1704
1705 ASSERT(ALIGNED64(offset));
1706
1707 if (base != NULL) {
1708 act = (struct spd_ext_actions *)(base + offset);
1709 act->spd_actions_len = 0;
1710 act->spd_actions_exttype = SPD_EXT_ACTION;
1711 act->spd_actions_count = 0;
1712 act->spd_actions_reserved = 0;
1713 }
1714
1715 offset += sizeof (*act);
1716
1717 ASSERT(ALIGNED64(offset));
1718
1719 while (ap != NULL) {
1720 offset = spdsock_encode_action(base, offset, ap);
1721 ap = ap->ipa_next;
1722 nact++;
1723 if (ap != NULL) {
1724 EMIT(SPD_ATTR_NEXT, 0);
1725 }
1726 }
1727 EMIT(SPD_ATTR_END, 0);
1728
1729 ASSERT(ALIGNED64(offset));
1730
1731 if (base != NULL) {
1732 act->spd_actions_count = nact;
1733 act->spd_actions_len = SPD_8TO64(offset - start);
1734 }
1735
1736 return (offset);
1737 }
1738
1739 #undef EMIT
1740
1741 /* ARGSUSED */
1742 static uint_t
spdsock_rule_flags(uint_t dir,uint_t af)1743 spdsock_rule_flags(uint_t dir, uint_t af)
1744 {
1745 uint_t flags = 0;
1746
1747 if (dir == IPSEC_TYPE_INBOUND)
1748 flags |= SPD_RULE_FLAG_INBOUND;
1749 if (dir == IPSEC_TYPE_OUTBOUND)
1750 flags |= SPD_RULE_FLAG_OUTBOUND;
1751
1752 return (flags);
1753 }
1754
1755
1756 static uint_t
spdsock_encode_rule_head(uint8_t * base,uint_t offset,spd_msg_t * req,const ipsec_policy_t * rule,uint_t dir,uint_t af,char * name,boolean_t tunnel)1757 spdsock_encode_rule_head(uint8_t *base, uint_t offset, spd_msg_t *req,
1758 const ipsec_policy_t *rule, uint_t dir, uint_t af, char *name,
1759 boolean_t tunnel)
1760 {
1761 struct spd_msg *spmsg;
1762 struct spd_rule *spr;
1763 spd_if_t *sid;
1764
1765 uint_t start = offset;
1766
1767 ASSERT(ALIGNED64(offset));
1768
1769 if (base != NULL) {
1770 spmsg = (struct spd_msg *)(base + offset);
1771 bzero(spmsg, sizeof (*spmsg));
1772 spmsg->spd_msg_version = PF_POLICY_V1;
1773 spmsg->spd_msg_type = SPD_DUMP;
1774 spmsg->spd_msg_seq = req->spd_msg_seq;
1775 spmsg->spd_msg_pid = req->spd_msg_pid;
1776 }
1777 offset += sizeof (struct spd_msg);
1778
1779 ASSERT(ALIGNED64(offset));
1780
1781 if (base != NULL) {
1782 spr = (struct spd_rule *)(base + offset);
1783 spr->spd_rule_type = SPD_EXT_RULE;
1784 spr->spd_rule_priority = rule->ipsp_prio;
1785 spr->spd_rule_flags = spdsock_rule_flags(dir, af);
1786 if (tunnel)
1787 spr->spd_rule_flags |= SPD_RULE_FLAG_TUNNEL;
1788 spr->spd_rule_unused = 0;
1789 spr->spd_rule_len = SPD_8TO64(sizeof (*spr));
1790 spr->spd_rule_index = rule->ipsp_index;
1791 }
1792 offset += sizeof (struct spd_rule);
1793
1794 /*
1795 * If we have an interface name (i.e. if this policy head came from
1796 * a tunnel), add the SPD_EXT_TUN_NAME extension.
1797 */
1798 if (name != NULL) {
1799
1800 ASSERT(ALIGNED64(offset));
1801
1802 if (base != NULL) {
1803 sid = (spd_if_t *)(base + offset);
1804 sid->spd_if_exttype = SPD_EXT_TUN_NAME;
1805 sid->spd_if_len = SPD_8TO64(sizeof (spd_if_t) +
1806 roundup((strlen(name) - 4), 8));
1807 (void) strlcpy((char *)sid->spd_if_name, name,
1808 LIFNAMSIZ);
1809 }
1810
1811 offset += sizeof (spd_if_t) + roundup((strlen(name) - 4), 8);
1812 }
1813
1814 offset = spdsock_encode_sel(base, offset, rule->ipsp_sel);
1815 offset = spdsock_encode_action_list(base, offset, rule->ipsp_act);
1816
1817 ASSERT(ALIGNED64(offset));
1818
1819 if (base != NULL) {
1820 spmsg->spd_msg_len = SPD_8TO64(offset - start);
1821 }
1822 return (offset);
1823 }
1824
1825 /* ARGSUSED */
1826 static mblk_t *
spdsock_encode_rule(mblk_t * req,const ipsec_policy_t * rule,uint_t dir,uint_t af,char * name,boolean_t tunnel)1827 spdsock_encode_rule(mblk_t *req, const ipsec_policy_t *rule,
1828 uint_t dir, uint_t af, char *name, boolean_t tunnel)
1829 {
1830 mblk_t *m;
1831 uint_t len;
1832 spd_msg_t *mreq = (spd_msg_t *)req->b_rptr;
1833
1834 /*
1835 * Figure out how much space we'll need.
1836 */
1837 len = spdsock_encode_rule_head(NULL, 0, mreq, rule, dir, af, name,
1838 tunnel);
1839
1840 /*
1841 * Allocate mblk.
1842 */
1843 m = allocb(len, BPRI_HI);
1844 if (m == NULL)
1845 return (NULL);
1846
1847 /*
1848 * Fill it in..
1849 */
1850 m->b_wptr = m->b_rptr + len;
1851 bzero(m->b_rptr, len);
1852 (void) spdsock_encode_rule_head(m->b_rptr, 0, mreq, rule, dir, af,
1853 name, tunnel);
1854 return (m);
1855 }
1856
1857 static ipsec_policy_t *
spdsock_dump_next_in_chain(spdsock_t * ss,ipsec_policy_head_t * iph,ipsec_policy_t * cur)1858 spdsock_dump_next_in_chain(spdsock_t *ss, ipsec_policy_head_t *iph,
1859 ipsec_policy_t *cur)
1860 {
1861 ASSERT(RW_READ_HELD(&iph->iph_lock));
1862
1863 ss->spdsock_dump_count++;
1864 ss->spdsock_dump_cur_rule = cur->ipsp_hash.hash_next;
1865 return (cur);
1866 }
1867
1868 static ipsec_policy_t *
spdsock_dump_next_rule(spdsock_t * ss,ipsec_policy_head_t * iph)1869 spdsock_dump_next_rule(spdsock_t *ss, ipsec_policy_head_t *iph)
1870 {
1871 ipsec_policy_t *cur;
1872 ipsec_policy_root_t *ipr;
1873 int chain, nchains, type, af;
1874
1875 ASSERT(RW_READ_HELD(&iph->iph_lock));
1876
1877 cur = ss->spdsock_dump_cur_rule;
1878
1879 if (cur != NULL)
1880 return (spdsock_dump_next_in_chain(ss, iph, cur));
1881
1882 type = ss->spdsock_dump_cur_type;
1883
1884 next:
1885 chain = ss->spdsock_dump_cur_chain;
1886 ipr = &iph->iph_root[type];
1887 nchains = ipr->ipr_nchains;
1888
1889 while (chain < nchains) {
1890 cur = ipr->ipr_hash[chain].hash_head;
1891 chain++;
1892 if (cur != NULL) {
1893 ss->spdsock_dump_cur_chain = chain;
1894 return (spdsock_dump_next_in_chain(ss, iph, cur));
1895 }
1896 }
1897 ss->spdsock_dump_cur_chain = nchains;
1898
1899 af = ss->spdsock_dump_cur_af;
1900 while (af < IPSEC_NAF) {
1901 cur = ipr->ipr_nonhash[af];
1902 af++;
1903 if (cur != NULL) {
1904 ss->spdsock_dump_cur_af = af;
1905 return (spdsock_dump_next_in_chain(ss, iph, cur));
1906 }
1907 }
1908
1909 type++;
1910 if (type >= IPSEC_NTYPES)
1911 return (NULL);
1912
1913 ss->spdsock_dump_cur_chain = 0;
1914 ss->spdsock_dump_cur_type = type;
1915 ss->spdsock_dump_cur_af = IPSEC_AF_V4;
1916 goto next;
1917
1918 }
1919
1920 /*
1921 * If we're done with one policy head, but have more to go, we iterate through
1922 * another IPsec tunnel policy head (itp). Return NULL if it is an error
1923 * worthy of returning EAGAIN via PF_POLICY.
1924 */
1925 static ipsec_tun_pol_t *
spdsock_dump_iterate_next_tunnel(spdsock_t * ss,ipsec_stack_t * ipss)1926 spdsock_dump_iterate_next_tunnel(spdsock_t *ss, ipsec_stack_t *ipss)
1927 {
1928 ipsec_tun_pol_t *itp;
1929
1930 ASSERT(RW_READ_HELD(&ipss->ipsec_tunnel_policy_lock));
1931 if (ipss->ipsec_tunnel_policy_gen > ss->spdsock_dump_tun_gen) {
1932 /* Oops, state of the tunnel polheads changed. */
1933 itp = NULL;
1934 } else if (ss->spdsock_itp == NULL) {
1935 /* Just finished global, find first node. */
1936 itp = avl_first(&ipss->ipsec_tunnel_policies);
1937 } else {
1938 /* We just finished current polhead, find the next one. */
1939 itp = AVL_NEXT(&ipss->ipsec_tunnel_policies, ss->spdsock_itp);
1940 }
1941 if (itp != NULL) {
1942 ITP_REFHOLD(itp);
1943 }
1944 if (ss->spdsock_itp != NULL) {
1945 ITP_REFRELE(ss->spdsock_itp, ipss->ipsec_netstack);
1946 }
1947 ss->spdsock_itp = itp;
1948 return (itp);
1949 }
1950
1951 static mblk_t *
spdsock_dump_next_record(spdsock_t * ss)1952 spdsock_dump_next_record(spdsock_t *ss)
1953 {
1954 ipsec_policy_head_t *iph;
1955 ipsec_policy_t *rule;
1956 mblk_t *m;
1957 ipsec_tun_pol_t *itp;
1958 netstack_t *ns = ss->spdsock_spds->spds_netstack;
1959 ipsec_stack_t *ipss = ns->netstack_ipsec;
1960
1961 iph = ss->spdsock_dump_head;
1962
1963 ASSERT(iph != NULL);
1964
1965 rw_enter(&iph->iph_lock, RW_READER);
1966
1967 if (iph->iph_gen != ss->spdsock_dump_gen) {
1968 rw_exit(&iph->iph_lock);
1969 return (spdsock_dump_finish(ss, EAGAIN));
1970 }
1971
1972 while ((rule = spdsock_dump_next_rule(ss, iph)) == NULL) {
1973 rw_exit(&iph->iph_lock);
1974 if (--(ss->spdsock_dump_remaining_polheads) == 0)
1975 return (spdsock_dump_finish(ss, 0));
1976
1977
1978 /*
1979 * If we reach here, we have more policy heads (tunnel
1980 * entries) to dump. Let's reset to a new policy head
1981 * and get some more rules.
1982 *
1983 * An empty policy head will have spdsock_dump_next_rule()
1984 * return NULL, and we loop (while dropping the number of
1985 * remaining polheads). If we loop to 0, we finish. We
1986 * keep looping until we hit 0 or until we have a rule to
1987 * encode.
1988 *
1989 * NOTE: No need for ITP_REF*() macros here as we're only
1990 * going after and refholding the policy head itself.
1991 */
1992 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
1993 itp = spdsock_dump_iterate_next_tunnel(ss, ipss);
1994 if (itp == NULL) {
1995 rw_exit(&ipss->ipsec_tunnel_policy_lock);
1996 return (spdsock_dump_finish(ss, EAGAIN));
1997 }
1998
1999 /* Reset other spdsock_dump thingies. */
2000 IPPH_REFRELE(ss->spdsock_dump_head, ns);
2001 if (ss->spdsock_dump_active) {
2002 ss->spdsock_dump_tunnel =
2003 itp->itp_flags & ITPF_P_TUNNEL;
2004 iph = itp->itp_policy;
2005 } else {
2006 ss->spdsock_dump_tunnel =
2007 itp->itp_flags & ITPF_I_TUNNEL;
2008 iph = itp->itp_inactive;
2009 }
2010 IPPH_REFHOLD(iph);
2011 rw_exit(&ipss->ipsec_tunnel_policy_lock);
2012
2013 rw_enter(&iph->iph_lock, RW_READER);
2014 RESET_SPDSOCK_DUMP_POLHEAD(ss, iph);
2015 }
2016
2017 m = spdsock_encode_rule(ss->spdsock_dump_req, rule,
2018 ss->spdsock_dump_cur_type, ss->spdsock_dump_cur_af,
2019 (ss->spdsock_itp == NULL) ? NULL : ss->spdsock_itp->itp_name,
2020 ss->spdsock_dump_tunnel);
2021 rw_exit(&iph->iph_lock);
2022
2023 if (m == NULL)
2024 return (spdsock_dump_finish(ss, ENOMEM));
2025 return (m);
2026 }
2027
2028 /*
2029 * Dump records until we run into flow-control back-pressure.
2030 */
2031 static void
spdsock_dump_some(queue_t * q,spdsock_t * ss)2032 spdsock_dump_some(queue_t *q, spdsock_t *ss)
2033 {
2034 mblk_t *m, *dataind;
2035
2036 while ((ss->spdsock_dump_req != NULL) && canputnext(q)) {
2037 m = spdsock_dump_next_record(ss);
2038 if (m == NULL)
2039 return;
2040 dataind = allocb(sizeof (struct T_data_req), BPRI_HI);
2041 if (dataind == NULL) {
2042 freemsg(m);
2043 return;
2044 }
2045 dataind->b_cont = m;
2046 dataind->b_wptr += sizeof (struct T_data_req);
2047 ((struct T_data_ind *)dataind->b_rptr)->PRIM_type = T_DATA_IND;
2048 ((struct T_data_ind *)dataind->b_rptr)->MORE_flag = 0;
2049 dataind->b_datap->db_type = M_PROTO;
2050 putnext(q, dataind);
2051 }
2052 }
2053
2054 /*
2055 * Start dumping.
2056 * Format a start-of-dump record, and set up the stream and kick the rsrv
2057 * procedure to continue the job..
2058 */
2059 /* ARGSUSED */
2060 static void
spdsock_dump(queue_t * q,ipsec_policy_head_t * iph,mblk_t * mp)2061 spdsock_dump(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp)
2062 {
2063 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2064 netstack_t *ns = ss->spdsock_spds->spds_netstack;
2065 ipsec_stack_t *ipss = ns->netstack_ipsec;
2066 mblk_t *mr;
2067
2068 /* spdsock_open() already set spdsock_itp to NULL. */
2069 if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) {
2070 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
2071 ss->spdsock_dump_remaining_polheads = 1 +
2072 avl_numnodes(&ipss->ipsec_tunnel_policies);
2073 ss->spdsock_dump_tun_gen = ipss->ipsec_tunnel_policy_gen;
2074 rw_exit(&ipss->ipsec_tunnel_policy_lock);
2075 if (iph == ALL_ACTIVE_POLHEADS) {
2076 iph = ipsec_system_policy(ns);
2077 ss->spdsock_dump_active = B_TRUE;
2078 } else {
2079 iph = ipsec_inactive_policy(ns);
2080 ss->spdsock_dump_active = B_FALSE;
2081 }
2082 ASSERT(ss->spdsock_itp == NULL);
2083 } else {
2084 ss->spdsock_dump_remaining_polheads = 1;
2085 }
2086
2087 rw_enter(&iph->iph_lock, RW_READER);
2088
2089 mr = spdsock_dump_ruleset(mp, iph, 0, 0);
2090
2091 if (!mr) {
2092 rw_exit(&iph->iph_lock);
2093 spdsock_error(q, mp, ENOMEM, 0);
2094 return;
2095 }
2096
2097 ss->spdsock_dump_req = mp;
2098 RESET_SPDSOCK_DUMP_POLHEAD(ss, iph);
2099
2100 rw_exit(&iph->iph_lock);
2101
2102 qreply(q, mr);
2103 qenable(OTHERQ(q));
2104 }
2105
2106 /* Do NOT consume a reference to ITP. */
2107 void
spdsock_clone_node(ipsec_tun_pol_t * itp,void * ep,netstack_t * ns)2108 spdsock_clone_node(ipsec_tun_pol_t *itp, void *ep, netstack_t *ns)
2109 {
2110 int *errptr = (int *)ep;
2111
2112 if (*errptr != 0)
2113 return; /* We've failed already for some reason. */
2114 mutex_enter(&itp->itp_lock);
2115 ITPF_CLONE(itp->itp_flags);
2116 *errptr = ipsec_copy_polhead(itp->itp_policy, itp->itp_inactive, ns);
2117 mutex_exit(&itp->itp_lock);
2118 }
2119
2120 void
spdsock_clone(queue_t * q,mblk_t * mp,spd_if_t * tunname)2121 spdsock_clone(queue_t *q, mblk_t *mp, spd_if_t *tunname)
2122 {
2123 int error;
2124 char *tname;
2125 ipsec_tun_pol_t *itp;
2126 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2127 netstack_t *ns = ss->spdsock_spds->spds_netstack;
2128 uint32_t auditing = AU_AUDITING();
2129
2130 if (tunname != NULL) {
2131 tname = (char *)tunname->spd_if_name;
2132 if (*tname == '\0') {
2133 error = ipsec_clone_system_policy(ns);
2134 if (auditing) {
2135 boolean_t active;
2136 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
2137 cred_t *cr;
2138 pid_t cpid;
2139
2140 cr = msg_getcred(mp, &cpid);
2141 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
2142 audit_pf_policy(SPD_CLONE, cr, ns,
2143 NULL, active, error, cpid);
2144 }
2145 if (error == 0) {
2146 itp_walk(spdsock_clone_node, &error, ns);
2147 if (auditing) {
2148 boolean_t active;
2149 spd_msg_t *spmsg =
2150 (spd_msg_t *)mp->b_rptr;
2151 cred_t *cr;
2152 pid_t cpid;
2153
2154 cr = msg_getcred(mp, &cpid);
2155 active = (spmsg->spd_msg_spdid ==
2156 SPD_ACTIVE);
2157 audit_pf_policy(SPD_CLONE, cr,
2158 ns, "all tunnels", active, 0,
2159 cpid);
2160 }
2161 }
2162 } else {
2163 itp = get_tunnel_policy(tname, ns);
2164 if (itp == NULL) {
2165 spdsock_error(q, mp, ENOENT, 0);
2166 if (auditing) {
2167 boolean_t active;
2168 spd_msg_t *spmsg =
2169 (spd_msg_t *)mp->b_rptr;
2170 cred_t *cr;
2171 pid_t cpid;
2172
2173 cr = msg_getcred(mp, &cpid);
2174 active = (spmsg->spd_msg_spdid ==
2175 SPD_ACTIVE);
2176 audit_pf_policy(SPD_CLONE, cr,
2177 ns, NULL, active, ENOENT, cpid);
2178 }
2179 return;
2180 }
2181 spdsock_clone_node(itp, &error, NULL);
2182 if (auditing) {
2183 boolean_t active;
2184 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
2185 cred_t *cr;
2186 pid_t cpid;
2187
2188 cr = msg_getcred(mp, &cpid);
2189 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
2190 audit_pf_policy(SPD_CLONE, cr, ns,
2191 ITP_NAME(itp), active, error, cpid);
2192 }
2193 ITP_REFRELE(itp, ns);
2194 }
2195 } else {
2196 error = ipsec_clone_system_policy(ns);
2197 if (auditing) {
2198 boolean_t active;
2199 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
2200 cred_t *cr;
2201 pid_t cpid;
2202
2203 cr = msg_getcred(mp, &cpid);
2204 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
2205 audit_pf_policy(SPD_CLONE, cr, ns, NULL,
2206 active, error, cpid);
2207 }
2208 }
2209
2210 if (error != 0)
2211 spdsock_error(q, mp, error, 0);
2212 else
2213 spd_echo(q, mp);
2214 }
2215
2216 /*
2217 * Process a SPD_ALGLIST request. The caller expects separate alg entries
2218 * for AH authentication, ESP authentication, and ESP encryption.
2219 * The same distinction is then used when setting the min and max key
2220 * sizes when defining policies.
2221 */
2222
2223 #define SPDSOCK_AH_AUTH 0
2224 #define SPDSOCK_ESP_AUTH 1
2225 #define SPDSOCK_ESP_ENCR 2
2226 #define SPDSOCK_NTYPES 3
2227
2228 static const uint_t algattr[SPDSOCK_NTYPES] = {
2229 SPD_ATTR_AH_AUTH,
2230 SPD_ATTR_ESP_AUTH,
2231 SPD_ATTR_ESP_ENCR
2232 };
2233 static const uint_t minbitsattr[SPDSOCK_NTYPES] = {
2234 SPD_ATTR_AH_MINBITS,
2235 SPD_ATTR_ESPA_MINBITS,
2236 SPD_ATTR_ENCR_MINBITS
2237 };
2238 static const uint_t maxbitsattr[SPDSOCK_NTYPES] = {
2239 SPD_ATTR_AH_MAXBITS,
2240 SPD_ATTR_ESPA_MAXBITS,
2241 SPD_ATTR_ENCR_MAXBITS
2242 };
2243 static const uint_t defbitsattr[SPDSOCK_NTYPES] = {
2244 SPD_ATTR_AH_DEFBITS,
2245 SPD_ATTR_ESPA_DEFBITS,
2246 SPD_ATTR_ENCR_DEFBITS
2247 };
2248 static const uint_t incrbitsattr[SPDSOCK_NTYPES] = {
2249 SPD_ATTR_AH_INCRBITS,
2250 SPD_ATTR_ESPA_INCRBITS,
2251 SPD_ATTR_ENCR_INCRBITS
2252 };
2253
2254 #define ATTRPERALG 6 /* fixed attributes per algs */
2255
2256 void
spdsock_alglist(queue_t * q,mblk_t * mp)2257 spdsock_alglist(queue_t *q, mblk_t *mp)
2258 {
2259 uint_t algtype;
2260 uint_t algidx;
2261 uint_t algcount;
2262 uint_t size;
2263 mblk_t *m;
2264 uint8_t *cur;
2265 spd_msg_t *msg;
2266 struct spd_ext_actions *act;
2267 struct spd_attribute *attr;
2268 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2269 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
2270
2271 mutex_enter(&ipss->ipsec_alg_lock);
2272 /*
2273 * The SPD client expects to receive separate entries for
2274 * AH authentication and ESP authentication supported algorithms.
2275 *
2276 * Don't return the "any" algorithms, if defined, as no
2277 * kernel policies can be set for these algorithms.
2278 */
2279 algcount = 2 * ipss->ipsec_nalgs[IPSEC_ALG_AUTH] +
2280 ipss->ipsec_nalgs[IPSEC_ALG_ENCR];
2281
2282 if (ipss->ipsec_alglists[IPSEC_ALG_AUTH][SADB_AALG_NONE] != NULL)
2283 algcount--;
2284 if (ipss->ipsec_alglists[IPSEC_ALG_ENCR][SADB_EALG_NONE] != NULL)
2285 algcount--;
2286
2287 /*
2288 * For each algorithm, we encode:
2289 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT}
2290 */
2291
2292 size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions) +
2293 ATTRPERALG * sizeof (struct spd_attribute) * algcount;
2294
2295 ASSERT(ALIGNED64(size));
2296
2297 m = allocb(size, BPRI_HI);
2298 if (m == NULL) {
2299 mutex_exit(&ipss->ipsec_alg_lock);
2300 spdsock_error(q, mp, ENOMEM, 0);
2301 return;
2302 }
2303
2304 m->b_wptr = m->b_rptr + size;
2305 cur = m->b_rptr;
2306
2307 msg = (spd_msg_t *)cur;
2308 bcopy(mp->b_rptr, cur, sizeof (*msg));
2309
2310 msg->spd_msg_len = SPD_8TO64(size);
2311 msg->spd_msg_errno = 0;
2312 msg->spd_msg_diagnostic = 0;
2313
2314 cur += sizeof (*msg);
2315
2316 act = (struct spd_ext_actions *)cur;
2317 cur += sizeof (*act);
2318
2319 act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t));
2320 act->spd_actions_exttype = SPD_EXT_ACTION;
2321 act->spd_actions_count = algcount;
2322 act->spd_actions_reserved = 0;
2323
2324 attr = (struct spd_attribute *)cur;
2325
2326 #define EMIT(tag, value) { \
2327 attr->spd_attr_tag = (tag); \
2328 attr->spd_attr_value = (value); \
2329 attr++; \
2330 }
2331
2332 /*
2333 * If you change the number of EMIT's here, change
2334 * ATTRPERALG above to match
2335 */
2336 #define EMITALGATTRS(_type) { \
2337 EMIT(algattr[_type], algid); /* 1 */ \
2338 EMIT(minbitsattr[_type], minbits); /* 2 */ \
2339 EMIT(maxbitsattr[_type], maxbits); /* 3 */ \
2340 EMIT(defbitsattr[_type], defbits); /* 4 */ \
2341 EMIT(incrbitsattr[_type], incr); /* 5 */ \
2342 EMIT(SPD_ATTR_NEXT, 0); /* 6 */ \
2343 }
2344
2345 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2346 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
2347 algidx++) {
2348 int algid = ipss->ipsec_sortlist[algtype][algidx];
2349 ipsec_alginfo_t *alg =
2350 ipss->ipsec_alglists[algtype][algid];
2351 uint_t minbits = alg->alg_minbits;
2352 uint_t maxbits = alg->alg_maxbits;
2353 uint_t defbits = alg->alg_default_bits;
2354 uint_t incr = alg->alg_increment;
2355
2356 if (algtype == IPSEC_ALG_AUTH) {
2357 if (algid == SADB_AALG_NONE)
2358 continue;
2359 EMITALGATTRS(SPDSOCK_AH_AUTH);
2360 EMITALGATTRS(SPDSOCK_ESP_AUTH);
2361 } else {
2362 if (algid == SADB_EALG_NONE)
2363 continue;
2364 ASSERT(algtype == IPSEC_ALG_ENCR);
2365 EMITALGATTRS(SPDSOCK_ESP_ENCR);
2366 }
2367 }
2368 }
2369
2370 mutex_exit(&ipss->ipsec_alg_lock);
2371
2372 #undef EMITALGATTRS
2373 #undef EMIT
2374 #undef ATTRPERALG
2375
2376 attr--;
2377 attr->spd_attr_tag = SPD_ATTR_END;
2378
2379 freemsg(mp);
2380 qreply(q, m);
2381 }
2382
2383 /*
2384 * Process a SPD_DUMPALGS request.
2385 */
2386
2387 #define ATTRPERALG 9 /* fixed attributes per algs */
2388
2389 void
spdsock_dumpalgs(queue_t * q,mblk_t * mp)2390 spdsock_dumpalgs(queue_t *q, mblk_t *mp)
2391 {
2392 uint_t algtype;
2393 uint_t algidx;
2394 uint_t size;
2395 mblk_t *m;
2396 uint8_t *cur;
2397 spd_msg_t *msg;
2398 struct spd_ext_actions *act;
2399 struct spd_attribute *attr;
2400 ipsec_alginfo_t *alg;
2401 uint_t algid;
2402 uint_t i;
2403 uint_t alg_size;
2404 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2405 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
2406
2407 mutex_enter(&ipss->ipsec_alg_lock);
2408
2409 /*
2410 * For each algorithm, we encode:
2411 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT}
2412 *
2413 * ALG_ID / ALG_PROTO / ALG_INCRBITS / ALG_NKEYSIZES / ALG_KEYSIZE*
2414 * ALG_NBLOCKSIZES / ALG_BLOCKSIZE* / ALG_NPARAMS / ALG_PARAMS* /
2415 * ALG_MECHNAME / ALG_FLAGS / {END, NEXT}
2416 */
2417
2418 /*
2419 * Compute the size of the SPD message.
2420 */
2421 size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions);
2422
2423 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2424 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
2425 algidx++) {
2426 algid = ipss->ipsec_sortlist[algtype][algidx];
2427 alg = ipss->ipsec_alglists[algtype][algid];
2428 alg_size = sizeof (struct spd_attribute) *
2429 (ATTRPERALG + alg->alg_nkey_sizes +
2430 alg->alg_nblock_sizes + alg->alg_nparams) +
2431 CRYPTO_MAX_MECH_NAME;
2432 size += alg_size;
2433 }
2434 }
2435
2436 ASSERT(ALIGNED64(size));
2437
2438 m = allocb(size, BPRI_HI);
2439 if (m == NULL) {
2440 mutex_exit(&ipss->ipsec_alg_lock);
2441 spdsock_error(q, mp, ENOMEM, 0);
2442 return;
2443 }
2444
2445 m->b_wptr = m->b_rptr + size;
2446 cur = m->b_rptr;
2447
2448 msg = (spd_msg_t *)cur;
2449 bcopy(mp->b_rptr, cur, sizeof (*msg));
2450
2451 msg->spd_msg_len = SPD_8TO64(size);
2452 msg->spd_msg_errno = 0;
2453 msg->spd_msg_type = SPD_ALGLIST;
2454
2455 msg->spd_msg_diagnostic = 0;
2456
2457 cur += sizeof (*msg);
2458
2459 act = (struct spd_ext_actions *)cur;
2460 cur += sizeof (*act);
2461
2462 act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t));
2463 act->spd_actions_exttype = SPD_EXT_ACTION;
2464 act->spd_actions_count = ipss->ipsec_nalgs[IPSEC_ALG_AUTH] +
2465 ipss->ipsec_nalgs[IPSEC_ALG_ENCR];
2466 act->spd_actions_reserved = 0;
2467
2468 /*
2469 * If there aren't any algorithms registered, return an empty message.
2470 * spdsock_get_ext() knows how to deal with this.
2471 */
2472 if (act->spd_actions_count == 0) {
2473 act->spd_actions_len = 0;
2474 mutex_exit(&ipss->ipsec_alg_lock);
2475 goto error;
2476 }
2477
2478 attr = (struct spd_attribute *)cur;
2479
2480 #define EMIT(tag, value) { \
2481 attr->spd_attr_tag = (tag); \
2482 attr->spd_attr_value = (value); \
2483 attr++; \
2484 }
2485
2486 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2487 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
2488 algidx++) {
2489
2490 algid = ipss->ipsec_sortlist[algtype][algidx];
2491 alg = ipss->ipsec_alglists[algtype][algid];
2492
2493 /*
2494 * If you change the number of EMIT's here, change
2495 * ATTRPERALG above to match
2496 */
2497 EMIT(SPD_ATTR_ALG_ID, algid);
2498 EMIT(SPD_ATTR_ALG_PROTO, algproto[algtype]);
2499 EMIT(SPD_ATTR_ALG_INCRBITS, alg->alg_increment);
2500 EMIT(SPD_ATTR_ALG_NKEYSIZES, alg->alg_nkey_sizes);
2501 for (i = 0; i < alg->alg_nkey_sizes; i++)
2502 EMIT(SPD_ATTR_ALG_KEYSIZE,
2503 alg->alg_key_sizes[i]);
2504
2505 EMIT(SPD_ATTR_ALG_NBLOCKSIZES, alg->alg_nblock_sizes);
2506 for (i = 0; i < alg->alg_nblock_sizes; i++)
2507 EMIT(SPD_ATTR_ALG_BLOCKSIZE,
2508 alg->alg_block_sizes[i]);
2509
2510 EMIT(SPD_ATTR_ALG_NPARAMS, alg->alg_nparams);
2511 for (i = 0; i < alg->alg_nparams; i++)
2512 EMIT(SPD_ATTR_ALG_PARAMS,
2513 alg->alg_params[i]);
2514
2515 EMIT(SPD_ATTR_ALG_FLAGS, alg->alg_flags);
2516
2517 EMIT(SPD_ATTR_ALG_MECHNAME, CRYPTO_MAX_MECH_NAME);
2518 bcopy(alg->alg_mech_name, attr, CRYPTO_MAX_MECH_NAME);
2519 attr = (struct spd_attribute *)((char *)attr +
2520 CRYPTO_MAX_MECH_NAME);
2521
2522 EMIT(SPD_ATTR_NEXT, 0);
2523 }
2524 }
2525
2526 mutex_exit(&ipss->ipsec_alg_lock);
2527
2528 #undef EMITALGATTRS
2529 #undef EMIT
2530 #undef ATTRPERALG
2531
2532 attr--;
2533 attr->spd_attr_tag = SPD_ATTR_END;
2534
2535 error:
2536 freemsg(mp);
2537 qreply(q, m);
2538 }
2539
2540 /*
2541 * Do the actual work of processing an SPD_UPDATEALGS request. Can
2542 * be invoked either once IPsec is loaded on a cached request, or
2543 * when a request is received while IPsec is loaded.
2544 */
2545 static int
spdsock_do_updatealg(spd_ext_t * extv[],spd_stack_t * spds)2546 spdsock_do_updatealg(spd_ext_t *extv[], spd_stack_t *spds)
2547 {
2548 struct spd_ext_actions *actp;
2549 struct spd_attribute *attr, *endattr;
2550 uint64_t *start, *end;
2551 ipsec_alginfo_t *alg = NULL;
2552 ipsec_algtype_t alg_type = 0;
2553 boolean_t skip_alg = B_TRUE, doing_proto = B_FALSE;
2554 uint_t i, cur_key, cur_block, algid;
2555 int diag = -1;
2556
2557 ASSERT(MUTEX_HELD(&spds->spds_alg_lock));
2558
2559 /* parse the message, building the list of algorithms */
2560
2561 actp = (struct spd_ext_actions *)extv[SPD_EXT_ACTION];
2562 if (actp == NULL)
2563 return (SPD_DIAGNOSTIC_NO_ACTION_EXT);
2564
2565 start = (uint64_t *)actp;
2566 end = (start + actp->spd_actions_len);
2567 endattr = (struct spd_attribute *)end;
2568 attr = (struct spd_attribute *)&actp[1];
2569
2570 bzero(spds->spds_algs, IPSEC_NALGTYPES * IPSEC_MAX_ALGS *
2571 sizeof (ipsec_alginfo_t *));
2572
2573 alg = kmem_zalloc(sizeof (*alg), KM_SLEEP);
2574
2575 #define ALG_KEY_SIZES(a) (((a)->alg_nkey_sizes + 1) * sizeof (uint16_t))
2576 #define ALG_BLOCK_SIZES(a) (((a)->alg_nblock_sizes + 1) * sizeof (uint16_t))
2577 #define ALG_PARAM_SIZES(a) (((a)->alg_nparams + 1) * sizeof (uint16_t))
2578
2579 while (attr < endattr) {
2580 switch (attr->spd_attr_tag) {
2581 case SPD_ATTR_NOP:
2582 case SPD_ATTR_EMPTY:
2583 break;
2584 case SPD_ATTR_END:
2585 attr = endattr;
2586 /* FALLTHRU */
2587 case SPD_ATTR_NEXT:
2588 if (doing_proto) {
2589 doing_proto = B_FALSE;
2590 break;
2591 }
2592 if (skip_alg) {
2593 ipsec_alg_free(alg);
2594 } else {
2595 ipsec_alg_free(
2596 spds->spds_algs[alg_type][alg->alg_id]);
2597 spds->spds_algs[alg_type][alg->alg_id] =
2598 alg;
2599 }
2600 alg = kmem_zalloc(sizeof (*alg), KM_SLEEP);
2601 break;
2602
2603 case SPD_ATTR_ALG_ID:
2604 if (attr->spd_attr_value >= IPSEC_MAX_ALGS) {
2605 ss1dbg(spds, ("spdsock_do_updatealg: "
2606 "invalid alg id %d\n",
2607 attr->spd_attr_value));
2608 diag = SPD_DIAGNOSTIC_ALG_ID_RANGE;
2609 goto bail;
2610 }
2611 alg->alg_id = attr->spd_attr_value;
2612 break;
2613
2614 case SPD_ATTR_ALG_PROTO:
2615 /* find the alg type */
2616 for (i = 0; i < NALGPROTOS; i++)
2617 if (algproto[i] == attr->spd_attr_value)
2618 break;
2619 skip_alg = (i == NALGPROTOS);
2620 if (!skip_alg)
2621 alg_type = i;
2622 break;
2623
2624 case SPD_ATTR_ALG_INCRBITS:
2625 alg->alg_increment = attr->spd_attr_value;
2626 break;
2627
2628 case SPD_ATTR_ALG_NKEYSIZES:
2629 if (alg->alg_key_sizes != NULL) {
2630 kmem_free(alg->alg_key_sizes,
2631 ALG_KEY_SIZES(alg));
2632 }
2633 alg->alg_nkey_sizes = attr->spd_attr_value;
2634 /*
2635 * Allocate room for the trailing zero key size
2636 * value as well.
2637 */
2638 alg->alg_key_sizes = kmem_zalloc(ALG_KEY_SIZES(alg),
2639 KM_SLEEP);
2640 cur_key = 0;
2641 break;
2642
2643 case SPD_ATTR_ALG_KEYSIZE:
2644 if (alg->alg_key_sizes == NULL ||
2645 cur_key >= alg->alg_nkey_sizes) {
2646 ss1dbg(spds, ("spdsock_do_updatealg: "
2647 "too many key sizes\n"));
2648 diag = SPD_DIAGNOSTIC_ALG_NUM_KEY_SIZES;
2649 goto bail;
2650 }
2651 alg->alg_key_sizes[cur_key++] = attr->spd_attr_value;
2652 break;
2653
2654 case SPD_ATTR_ALG_FLAGS:
2655 /*
2656 * Flags (bit mask). The alg_flags element of
2657 * ipsecalg_flags_t is only 8 bits wide. The
2658 * user can set the VALID bit, but we will ignore it
2659 * and make the decision is the algorithm is valid.
2660 */
2661 alg->alg_flags |= (uint8_t)attr->spd_attr_value;
2662 break;
2663
2664 case SPD_ATTR_ALG_NBLOCKSIZES:
2665 if (alg->alg_block_sizes != NULL) {
2666 kmem_free(alg->alg_block_sizes,
2667 ALG_BLOCK_SIZES(alg));
2668 }
2669 alg->alg_nblock_sizes = attr->spd_attr_value;
2670 /*
2671 * Allocate room for the trailing zero block size
2672 * value as well.
2673 */
2674 alg->alg_block_sizes = kmem_zalloc(ALG_BLOCK_SIZES(alg),
2675 KM_SLEEP);
2676 cur_block = 0;
2677 break;
2678
2679 case SPD_ATTR_ALG_BLOCKSIZE:
2680 if (alg->alg_block_sizes == NULL ||
2681 cur_block >= alg->alg_nblock_sizes) {
2682 ss1dbg(spds, ("spdsock_do_updatealg: "
2683 "too many block sizes\n"));
2684 diag = SPD_DIAGNOSTIC_ALG_NUM_BLOCK_SIZES;
2685 goto bail;
2686 }
2687 alg->alg_block_sizes[cur_block++] =
2688 attr->spd_attr_value;
2689 break;
2690
2691 case SPD_ATTR_ALG_NPARAMS:
2692 if (alg->alg_params != NULL) {
2693 kmem_free(alg->alg_params,
2694 ALG_PARAM_SIZES(alg));
2695 }
2696 alg->alg_nparams = attr->spd_attr_value;
2697 /*
2698 * Allocate room for the trailing zero block size
2699 * value as well.
2700 */
2701 alg->alg_params = kmem_zalloc(ALG_PARAM_SIZES(alg),
2702 KM_SLEEP);
2703 cur_block = 0;
2704 break;
2705
2706 case SPD_ATTR_ALG_PARAMS:
2707 if (alg->alg_params == NULL ||
2708 cur_block >= alg->alg_nparams) {
2709 ss1dbg(spds, ("spdsock_do_updatealg: "
2710 "too many params\n"));
2711 diag = SPD_DIAGNOSTIC_ALG_NUM_BLOCK_SIZES;
2712 goto bail;
2713 }
2714 /*
2715 * Array contains: iv_len, icv_len, salt_len
2716 * Any additional parameters are currently ignored.
2717 */
2718 alg->alg_params[cur_block++] =
2719 attr->spd_attr_value;
2720 break;
2721
2722 case SPD_ATTR_ALG_MECHNAME: {
2723 char *mech_name;
2724
2725 if (attr->spd_attr_value > CRYPTO_MAX_MECH_NAME) {
2726 ss1dbg(spds, ("spdsock_do_updatealg: "
2727 "mech name too long\n"));
2728 diag = SPD_DIAGNOSTIC_ALG_MECH_NAME_LEN;
2729 goto bail;
2730 }
2731 mech_name = (char *)(attr + 1);
2732 bcopy(mech_name, alg->alg_mech_name,
2733 attr->spd_attr_value);
2734 alg->alg_mech_name[CRYPTO_MAX_MECH_NAME-1] = '\0';
2735 attr = (struct spd_attribute *)((char *)attr +
2736 attr->spd_attr_value);
2737 break;
2738 }
2739
2740 case SPD_ATTR_PROTO_ID:
2741 doing_proto = B_TRUE;
2742 for (i = 0; i < NALGPROTOS; i++) {
2743 if (algproto[i] == attr->spd_attr_value) {
2744 alg_type = i;
2745 break;
2746 }
2747 }
2748 break;
2749
2750 case SPD_ATTR_PROTO_EXEC_MODE:
2751 if (!doing_proto)
2752 break;
2753 for (i = 0; i < NEXECMODES; i++) {
2754 if (execmodes[i] == attr->spd_attr_value) {
2755 spds->spds_algs_exec_mode[alg_type] = i;
2756 break;
2757 }
2758 }
2759 break;
2760 }
2761 attr++;
2762 }
2763
2764 #undef ALG_KEY_SIZES
2765 #undef ALG_BLOCK_SIZES
2766 #undef ALG_PARAM_SIZES
2767
2768 /* update the algorithm tables */
2769 spdsock_merge_algs(spds);
2770 bail:
2771 /* cleanup */
2772 ipsec_alg_free(alg);
2773 for (alg_type = 0; alg_type < IPSEC_NALGTYPES; alg_type++)
2774 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++)
2775 if (spds->spds_algs[alg_type][algid] != NULL)
2776 ipsec_alg_free(spds->spds_algs[alg_type][algid]);
2777 return (diag);
2778 }
2779
2780 /*
2781 * Process an SPD_UPDATEALGS request. If IPsec is not loaded, queue
2782 * the request until IPsec loads. If IPsec is loaded, act on it
2783 * immediately.
2784 */
2785
2786 static void
spdsock_updatealg(queue_t * q,mblk_t * mp,spd_ext_t * extv[])2787 spdsock_updatealg(queue_t *q, mblk_t *mp, spd_ext_t *extv[])
2788 {
2789 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2790 spd_stack_t *spds = ss->spdsock_spds;
2791 ipsec_stack_t *ipss = spds->spds_netstack->netstack_ipsec;
2792 uint32_t auditing = AU_AUDITING();
2793
2794 if (!ipsec_loaded(ipss)) {
2795 /*
2796 * IPsec is not loaded, save request and return nicely,
2797 * the message will be processed once IPsec loads.
2798 */
2799 mblk_t *new_mp;
2800
2801 /* last update message wins */
2802 if ((new_mp = copymsg(mp)) == NULL) {
2803 spdsock_error(q, mp, ENOMEM, 0);
2804 return;
2805 }
2806 mutex_enter(&spds->spds_alg_lock);
2807 bcopy(extv, spds->spds_extv_algs,
2808 sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1));
2809 if (spds->spds_mp_algs != NULL)
2810 freemsg(spds->spds_mp_algs);
2811 spds->spds_mp_algs = mp;
2812 mutex_exit(&spds->spds_alg_lock);
2813 if (auditing) {
2814 cred_t *cr;
2815 pid_t cpid;
2816
2817 cr = msg_getcred(mp, &cpid);
2818 audit_pf_policy(SPD_UPDATEALGS, cr,
2819 spds->spds_netstack, NULL, B_TRUE, EAGAIN,
2820 cpid);
2821 }
2822 spd_echo(q, new_mp);
2823 } else {
2824 /*
2825 * IPsec is loaded, act on the message immediately.
2826 */
2827 int diag;
2828
2829 mutex_enter(&spds->spds_alg_lock);
2830 diag = spdsock_do_updatealg(extv, spds);
2831 if (diag == -1) {
2832 /* Keep the lock held while we walk the SA tables. */
2833 sadb_alg_update(IPSEC_ALG_ALL, 0, 0,
2834 spds->spds_netstack);
2835 mutex_exit(&spds->spds_alg_lock);
2836 spd_echo(q, mp);
2837 if (auditing) {
2838 cred_t *cr;
2839 pid_t cpid;
2840
2841 cr = msg_getcred(mp, &cpid);
2842 audit_pf_policy(SPD_UPDATEALGS, cr,
2843 spds->spds_netstack, NULL, B_TRUE, 0,
2844 cpid);
2845 }
2846 } else {
2847 mutex_exit(&spds->spds_alg_lock);
2848 spdsock_diag(q, mp, diag);
2849 if (auditing) {
2850 cred_t *cr;
2851 pid_t cpid;
2852
2853 cr = msg_getcred(mp, &cpid);
2854 audit_pf_policy(SPD_UPDATEALGS, cr,
2855 spds->spds_netstack, NULL, B_TRUE, diag,
2856 cpid);
2857 }
2858 }
2859 }
2860 }
2861
2862 /*
2863 * Find a tunnel instance (using the name to link ID mapping), and
2864 * update it after an IPsec change. We need to do this always in case
2865 * we add policy AFTER plumbing a tunnel. We also need to do this
2866 * because, as a side-effect, the tunnel's MTU is updated to reflect
2867 * any IPsec overhead in the itp's policy.
2868 */
2869 static void
update_iptun_policy(ipsec_tun_pol_t * itp)2870 update_iptun_policy(ipsec_tun_pol_t *itp)
2871 {
2872 datalink_id_t linkid;
2873
2874 if (dls_mgmt_get_linkid(itp->itp_name, &linkid) == 0)
2875 iptun_set_policy(linkid, itp);
2876 }
2877
2878 /*
2879 * Sort through the mess of polhead options to retrieve an appropriate one.
2880 * Returns NULL if we send an spdsock error. Returns a valid pointer if we
2881 * found a valid polhead. Returns ALL_ACTIVE_POLHEADS (aka. -1) or
2882 * ALL_INACTIVE_POLHEADS (aka. -2) if the operation calls for the operation to
2883 * act on ALL policy heads.
2884 */
2885 static ipsec_policy_head_t *
get_appropriate_polhead(queue_t * q,mblk_t * mp,spd_if_t * tunname,int spdid,int msgtype,ipsec_tun_pol_t ** itpp)2886 get_appropriate_polhead(queue_t *q, mblk_t *mp, spd_if_t *tunname, int spdid,
2887 int msgtype, ipsec_tun_pol_t **itpp)
2888 {
2889 ipsec_tun_pol_t *itp;
2890 ipsec_policy_head_t *iph;
2891 int errno;
2892 char *tname;
2893 boolean_t active;
2894 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2895 netstack_t *ns = ss->spdsock_spds->spds_netstack;
2896 uint64_t gen; /* Placeholder */
2897
2898 active = (spdid == SPD_ACTIVE);
2899 *itpp = NULL;
2900 if (!active && spdid != SPD_STANDBY) {
2901 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_SPDID);
2902 return (NULL);
2903 }
2904
2905 if (tunname != NULL) {
2906 /* Acting on a tunnel's SPD. */
2907 tname = (char *)tunname->spd_if_name;
2908 if (*tname == '\0') {
2909 /* Handle all-polhead cases here. */
2910 if (msgtype != SPD_FLUSH && msgtype != SPD_DUMP) {
2911 spdsock_diag(q, mp,
2912 SPD_DIAGNOSTIC_NOT_GLOBAL_OP);
2913 return (NULL);
2914 }
2915 return (active ? ALL_ACTIVE_POLHEADS :
2916 ALL_INACTIVE_POLHEADS);
2917 }
2918
2919 itp = get_tunnel_policy(tname, ns);
2920 if (itp == NULL) {
2921 if (msgtype != SPD_ADDRULE) {
2922 /* "Tunnel not found" */
2923 spdsock_error(q, mp, ENOENT, 0);
2924 return (NULL);
2925 }
2926
2927 errno = 0;
2928 itp = create_tunnel_policy(tname, &errno, &gen, ns);
2929 if (itp == NULL) {
2930 /*
2931 * Something very bad happened, most likely
2932 * ENOMEM. Return an indicator.
2933 */
2934 spdsock_error(q, mp, errno, 0);
2935 return (NULL);
2936 }
2937 }
2938
2939 /* Match up the itp to an iptun instance. */
2940 update_iptun_policy(itp);
2941
2942 *itpp = itp;
2943 /* For spdsock dump state, set the polhead's name. */
2944 if (msgtype == SPD_DUMP) {
2945 ITP_REFHOLD(itp);
2946 ss->spdsock_itp = itp;
2947 ss->spdsock_dump_tunnel = itp->itp_flags &
2948 (active ? ITPF_P_TUNNEL : ITPF_I_TUNNEL);
2949 }
2950 } else {
2951 itp = NULL;
2952 /* For spdsock dump state, indicate it's global policy. */
2953 if (msgtype == SPD_DUMP)
2954 ss->spdsock_itp = NULL;
2955 }
2956
2957 if (active)
2958 iph = (itp == NULL) ? ipsec_system_policy(ns) : itp->itp_policy;
2959 else
2960 iph = (itp == NULL) ? ipsec_inactive_policy(ns) :
2961 itp->itp_inactive;
2962
2963 ASSERT(iph != NULL);
2964 if (itp != NULL) {
2965 IPPH_REFHOLD(iph);
2966 }
2967
2968 return (iph);
2969 }
2970
2971 static void
spdsock_parse(queue_t * q,mblk_t * mp)2972 spdsock_parse(queue_t *q, mblk_t *mp)
2973 {
2974 spd_msg_t *spmsg;
2975 spd_ext_t *extv[SPD_EXT_MAX + 1];
2976 uint_t msgsize;
2977 ipsec_policy_head_t *iph;
2978 ipsec_tun_pol_t *itp;
2979 spd_if_t *tunname;
2980 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2981 spd_stack_t *spds = ss->spdsock_spds;
2982 netstack_t *ns = spds->spds_netstack;
2983 ipsec_stack_t *ipss = ns->netstack_ipsec;
2984
2985 /* Make sure nothing's below me. */
2986 ASSERT(WR(q)->q_next == NULL);
2987
2988 spmsg = (spd_msg_t *)mp->b_rptr;
2989
2990 msgsize = SPD_64TO8(spmsg->spd_msg_len);
2991
2992 if (msgdsize(mp) != msgsize) {
2993 /*
2994 * Message len incorrect w.r.t. actual size. Send an error
2995 * (EMSGSIZE). It may be necessary to massage things a
2996 * bit. For example, if the spd_msg_type is hosed,
2997 * I need to set it to SPD_RESERVED to get delivery to
2998 * do the right thing. Then again, maybe just letting
2999 * the error delivery do the right thing.
3000 */
3001 ss2dbg(spds,
3002 ("mblk (%lu) and base (%d) message sizes don't jibe.\n",
3003 msgdsize(mp), msgsize));
3004 spdsock_error(q, mp, EMSGSIZE, SPD_DIAGNOSTIC_NONE);
3005 return;
3006 }
3007
3008 if (msgsize > (uint_t)(mp->b_wptr - mp->b_rptr)) {
3009 /* Get all message into one mblk. */
3010 if (pullupmsg(mp, -1) == 0) {
3011 /*
3012 * Something screwy happened.
3013 */
3014 ss3dbg(spds, ("spdsock_parse: pullupmsg() failed.\n"));
3015 return;
3016 } else {
3017 spmsg = (spd_msg_t *)mp->b_rptr;
3018 }
3019 }
3020
3021 switch (spdsock_get_ext(extv, spmsg, msgsize)) {
3022 case KGE_DUP:
3023 /* Handle duplicate extension. */
3024 ss1dbg(spds, ("Got duplicate extension of type %d.\n",
3025 extv[0]->spd_ext_type));
3026 spdsock_diag(q, mp, dup_ext_diag[extv[0]->spd_ext_type]);
3027 return;
3028 case KGE_UNK:
3029 /* Handle unknown extension. */
3030 ss1dbg(spds, ("Got unknown extension of type %d.\n",
3031 extv[0]->spd_ext_type));
3032 spdsock_diag(q, mp, SPD_DIAGNOSTIC_UNKNOWN_EXT);
3033 return;
3034 case KGE_LEN:
3035 /* Length error. */
3036 ss1dbg(spds, ("Length %d on extension type %d overrun or 0.\n",
3037 extv[0]->spd_ext_len, extv[0]->spd_ext_type));
3038 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_EXTLEN);
3039 return;
3040 case KGE_CHK:
3041 /* Reality check failed. */
3042 ss1dbg(spds, ("Reality check failed on extension type %d.\n",
3043 extv[0]->spd_ext_type));
3044 spdsock_diag(q, mp, bad_ext_diag[extv[0]->spd_ext_type]);
3045 return;
3046 default:
3047 /* Default case is no errors. */
3048 break;
3049 }
3050
3051 /*
3052 * Special-case SPD_UPDATEALGS so as not to load IPsec.
3053 */
3054 if (!ipsec_loaded(ipss) && spmsg->spd_msg_type != SPD_UPDATEALGS) {
3055 spdsock_t *ss = (spdsock_t *)q->q_ptr;
3056
3057 ASSERT(ss != NULL);
3058 ipsec_loader_loadnow(ipss);
3059 ss->spdsock_timeout_arg = mp;
3060 ss->spdsock_timeout = qtimeout(q, spdsock_loadcheck,
3061 q, LOADCHECK_INTERVAL);
3062 return;
3063 }
3064
3065 /* First check for messages that need no polheads at all. */
3066 switch (spmsg->spd_msg_type) {
3067 case SPD_UPDATEALGS:
3068 spdsock_updatealg(q, mp, extv);
3069 return;
3070 case SPD_ALGLIST:
3071 spdsock_alglist(q, mp);
3072 return;
3073 case SPD_DUMPALGS:
3074 spdsock_dumpalgs(q, mp);
3075 return;
3076 }
3077
3078 /*
3079 * Then check for ones that need both primary/secondary polheads,
3080 * finding the appropriate tunnel policy if need be.
3081 */
3082 tunname = (spd_if_t *)extv[SPD_EXT_TUN_NAME];
3083 switch (spmsg->spd_msg_type) {
3084 case SPD_FLIP:
3085 spdsock_flip(q, mp, tunname);
3086 return;
3087 case SPD_CLONE:
3088 spdsock_clone(q, mp, tunname);
3089 return;
3090 }
3091
3092 /*
3093 * Finally, find ones that operate on exactly one polhead, or
3094 * "all polheads" of a given type (active/inactive).
3095 */
3096 iph = get_appropriate_polhead(q, mp, tunname, spmsg->spd_msg_spdid,
3097 spmsg->spd_msg_type, &itp);
3098 if (iph == NULL)
3099 return;
3100
3101 /* All-polheads-ready operations. */
3102 switch (spmsg->spd_msg_type) {
3103 case SPD_FLUSH:
3104 if (itp != NULL) {
3105 mutex_enter(&itp->itp_lock);
3106 if (spmsg->spd_msg_spdid == SPD_ACTIVE)
3107 itp->itp_flags &= ~ITPF_PFLAGS;
3108 else
3109 itp->itp_flags &= ~ITPF_IFLAGS;
3110 mutex_exit(&itp->itp_lock);
3111 }
3112
3113 spdsock_flush(q, iph, itp, mp);
3114
3115 if (itp != NULL) {
3116 /* SPD_FLUSH is worth a tunnel MTU check. */
3117 update_iptun_policy(itp);
3118 ITP_REFRELE(itp, ns);
3119 }
3120 return;
3121 case SPD_DUMP:
3122 if (itp != NULL)
3123 ITP_REFRELE(itp, ns);
3124 spdsock_dump(q, iph, mp);
3125 return;
3126 }
3127
3128 if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) {
3129 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NOT_GLOBAL_OP);
3130 return;
3131 }
3132
3133 /* Single-polhead-only operations. */
3134 switch (spmsg->spd_msg_type) {
3135 case SPD_ADDRULE:
3136 spdsock_addrule(q, iph, mp, extv, itp);
3137 break;
3138 case SPD_DELETERULE:
3139 spdsock_deleterule(q, iph, mp, extv, itp);
3140 break;
3141 case SPD_LOOKUP:
3142 spdsock_lookup(q, iph, mp, extv, itp);
3143 break;
3144 default:
3145 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_MSG_TYPE);
3146 break;
3147 }
3148
3149 IPPH_REFRELE(iph, ns);
3150 if (itp != NULL) {
3151 /* SPD_{ADD,DELETE}RULE are worth a tunnel MTU check. */
3152 if (spmsg->spd_msg_type == SPD_ADDRULE ||
3153 spmsg->spd_msg_type == SPD_DELETERULE)
3154 update_iptun_policy(itp);
3155 ITP_REFRELE(itp, ns);
3156 }
3157 }
3158
3159 /*
3160 * If an algorithm mapping was received before IPsec was loaded, process it.
3161 * Called from the IPsec loader.
3162 */
3163 void
spdsock_update_pending_algs(netstack_t * ns)3164 spdsock_update_pending_algs(netstack_t *ns)
3165 {
3166 spd_stack_t *spds = ns->netstack_spdsock;
3167
3168 mutex_enter(&spds->spds_alg_lock);
3169 if (spds->spds_mp_algs != NULL) {
3170 (void) spdsock_do_updatealg(spds->spds_extv_algs, spds);
3171 freemsg(spds->spds_mp_algs);
3172 spds->spds_mp_algs = NULL;
3173 }
3174 mutex_exit(&spds->spds_alg_lock);
3175 }
3176
3177 static void
spdsock_loadcheck(void * arg)3178 spdsock_loadcheck(void *arg)
3179 {
3180 queue_t *q = (queue_t *)arg;
3181 spdsock_t *ss = (spdsock_t *)q->q_ptr;
3182 mblk_t *mp;
3183 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
3184
3185 ASSERT(ss != NULL);
3186
3187 ss->spdsock_timeout = 0;
3188 mp = ss->spdsock_timeout_arg;
3189 ASSERT(mp != NULL);
3190 ss->spdsock_timeout_arg = NULL;
3191 if (ipsec_failed(ipss))
3192 spdsock_error(q, mp, EPROTONOSUPPORT, 0);
3193 else
3194 spdsock_parse(q, mp);
3195 }
3196
3197 /*
3198 * Copy relevant state bits.
3199 */
3200 static void
spdsock_copy_info(struct T_info_ack * tap,spdsock_t * ss)3201 spdsock_copy_info(struct T_info_ack *tap, spdsock_t *ss)
3202 {
3203 *tap = spdsock_g_t_info_ack;
3204 tap->CURRENT_state = ss->spdsock_state;
3205 tap->OPT_size = spdsock_max_optsize;
3206 }
3207
3208 /*
3209 * This routine responds to T_CAPABILITY_REQ messages. It is called by
3210 * spdsock_wput. Much of the T_CAPABILITY_ACK information is copied from
3211 * spdsock_g_t_info_ack. The current state of the stream is copied from
3212 * spdsock_state.
3213 */
3214 static void
spdsock_capability_req(queue_t * q,mblk_t * mp)3215 spdsock_capability_req(queue_t *q, mblk_t *mp)
3216 {
3217 spdsock_t *ss = (spdsock_t *)q->q_ptr;
3218 t_uscalar_t cap_bits1;
3219 struct T_capability_ack *tcap;
3220
3221 cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1;
3222
3223 mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack),
3224 mp->b_datap->db_type, T_CAPABILITY_ACK);
3225 if (mp == NULL)
3226 return;
3227
3228 tcap = (struct T_capability_ack *)mp->b_rptr;
3229 tcap->CAP_bits1 = 0;
3230
3231 if (cap_bits1 & TC1_INFO) {
3232 spdsock_copy_info(&tcap->INFO_ack, ss);
3233 tcap->CAP_bits1 |= TC1_INFO;
3234 }
3235
3236 qreply(q, mp);
3237 }
3238
3239 /*
3240 * This routine responds to T_INFO_REQ messages. It is called by
3241 * spdsock_wput_other.
3242 * Most of the T_INFO_ACK information is copied from spdsock_g_t_info_ack.
3243 * The current state of the stream is copied from spdsock_state.
3244 */
3245 static void
spdsock_info_req(q,mp)3246 spdsock_info_req(q, mp)
3247 queue_t *q;
3248 mblk_t *mp;
3249 {
3250 mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO,
3251 T_INFO_ACK);
3252 if (mp == NULL)
3253 return;
3254 spdsock_copy_info((struct T_info_ack *)mp->b_rptr,
3255 (spdsock_t *)q->q_ptr);
3256 qreply(q, mp);
3257 }
3258
3259 /*
3260 * spdsock_err_ack. This routine creates a
3261 * T_ERROR_ACK message and passes it
3262 * upstream.
3263 */
3264 static void
spdsock_err_ack(q,mp,t_error,sys_error)3265 spdsock_err_ack(q, mp, t_error, sys_error)
3266 queue_t *q;
3267 mblk_t *mp;
3268 int t_error;
3269 int sys_error;
3270 {
3271 if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL)
3272 qreply(q, mp);
3273 }
3274
3275 /*
3276 * This routine retrieves the current status of socket options.
3277 * It returns the size of the option retrieved.
3278 */
3279 /* ARGSUSED */
3280 int
spdsock_opt_get(queue_t * q,int level,int name,uchar_t * ptr)3281 spdsock_opt_get(queue_t *q, int level, int name, uchar_t *ptr)
3282 {
3283 int *i1 = (int *)ptr;
3284
3285 switch (level) {
3286 case SOL_SOCKET:
3287 switch (name) {
3288 case SO_TYPE:
3289 *i1 = SOCK_RAW;
3290 break;
3291 /*
3292 * The following two items can be manipulated,
3293 * but changing them should do nothing.
3294 */
3295 case SO_SNDBUF:
3296 *i1 = (int)q->q_hiwat;
3297 break;
3298 case SO_RCVBUF:
3299 *i1 = (int)(RD(q)->q_hiwat);
3300 break;
3301 }
3302 break;
3303 default:
3304 return (0);
3305 }
3306 return (sizeof (int));
3307 }
3308
3309 /*
3310 * This routine sets socket options.
3311 */
3312 /* ARGSUSED */
3313 int
spdsock_opt_set(queue_t * q,uint_t mgmt_flags,int level,int name,uint_t inlen,uchar_t * invalp,uint_t * outlenp,uchar_t * outvalp,void * thisdg_attrs,cred_t * cr)3314 spdsock_opt_set(queue_t *q, uint_t mgmt_flags, int level, int name,
3315 uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp,
3316 void *thisdg_attrs, cred_t *cr)
3317 {
3318 int *i1 = (int *)invalp;
3319 spdsock_t *ss = (spdsock_t *)q->q_ptr;
3320 spd_stack_t *spds = ss->spdsock_spds;
3321
3322 switch (level) {
3323 case SOL_SOCKET:
3324 switch (name) {
3325 case SO_SNDBUF:
3326 if (*i1 > spds->spds_max_buf)
3327 return (ENOBUFS);
3328 q->q_hiwat = *i1;
3329 break;
3330 case SO_RCVBUF:
3331 if (*i1 > spds->spds_max_buf)
3332 return (ENOBUFS);
3333 RD(q)->q_hiwat = *i1;
3334 (void) proto_set_rx_hiwat(RD(q), NULL, *i1);
3335 break;
3336 }
3337 break;
3338 }
3339 return (0);
3340 }
3341
3342
3343 /*
3344 * Handle STREAMS messages.
3345 */
3346 static void
spdsock_wput_other(queue_t * q,mblk_t * mp)3347 spdsock_wput_other(queue_t *q, mblk_t *mp)
3348 {
3349 struct iocblk *iocp;
3350 int error;
3351 spdsock_t *ss = (spdsock_t *)q->q_ptr;
3352 spd_stack_t *spds = ss->spdsock_spds;
3353 cred_t *cr;
3354
3355 switch (mp->b_datap->db_type) {
3356 case M_PROTO:
3357 case M_PCPROTO:
3358 if ((mp->b_wptr - mp->b_rptr) < sizeof (long)) {
3359 ss3dbg(spds, (
3360 "spdsock_wput_other: Not big enough M_PROTO\n"));
3361 freemsg(mp);
3362 return;
3363 }
3364 switch (((union T_primitives *)mp->b_rptr)->type) {
3365 case T_CAPABILITY_REQ:
3366 spdsock_capability_req(q, mp);
3367 break;
3368 case T_INFO_REQ:
3369 spdsock_info_req(q, mp);
3370 break;
3371 case T_SVR4_OPTMGMT_REQ:
3372 case T_OPTMGMT_REQ:
3373 /*
3374 * All Solaris components should pass a db_credp
3375 * for this TPI message, hence we ASSERT.
3376 * But in case there is some other M_PROTO that looks
3377 * like a TPI message sent by some other kernel
3378 * component, we check and return an error.
3379 */
3380 cr = msg_getcred(mp, NULL);
3381 ASSERT(cr != NULL);
3382 if (cr == NULL) {
3383 spdsock_err_ack(q, mp, TSYSERR, EINVAL);
3384 return;
3385 }
3386 if (((union T_primitives *)mp->b_rptr)->type ==
3387 T_SVR4_OPTMGMT_REQ) {
3388 svr4_optcom_req(q, mp, cr, &spdsock_opt_obj);
3389 } else {
3390 tpi_optcom_req(q, mp, cr, &spdsock_opt_obj);
3391 }
3392 break;
3393 case T_DATA_REQ:
3394 case T_EXDATA_REQ:
3395 case T_ORDREL_REQ:
3396 /* Illegal for spdsock. */
3397 freemsg(mp);
3398 (void) putnextctl1(RD(q), M_ERROR, EPROTO);
3399 break;
3400 default:
3401 /* Not supported by spdsock. */
3402 spdsock_err_ack(q, mp, TNOTSUPPORT, 0);
3403 break;
3404 }
3405 return;
3406 case M_IOCTL:
3407 iocp = (struct iocblk *)mp->b_rptr;
3408 error = EINVAL;
3409
3410 switch (iocp->ioc_cmd) {
3411 case ND_SET:
3412 case ND_GET:
3413 if (nd_getset(q, spds->spds_g_nd, mp)) {
3414 qreply(q, mp);
3415 return;
3416 } else
3417 error = ENOENT;
3418 /* FALLTHRU */
3419 default:
3420 miocnak(q, mp, 0, error);
3421 return;
3422 }
3423 case M_FLUSH:
3424 if (*mp->b_rptr & FLUSHW) {
3425 flushq(q, FLUSHALL);
3426 *mp->b_rptr &= ~FLUSHW;
3427 }
3428 if (*mp->b_rptr & FLUSHR) {
3429 qreply(q, mp);
3430 return;
3431 }
3432 /* Else FALLTHRU */
3433 }
3434
3435 /* If fell through, just black-hole the message. */
3436 freemsg(mp);
3437 }
3438
3439 static void
spdsock_wput(queue_t * q,mblk_t * mp)3440 spdsock_wput(queue_t *q, mblk_t *mp)
3441 {
3442 uint8_t *rptr = mp->b_rptr;
3443 mblk_t *mp1;
3444 spdsock_t *ss = (spdsock_t *)q->q_ptr;
3445 spd_stack_t *spds = ss->spdsock_spds;
3446
3447 /*
3448 * If we're dumping, defer processing other messages until the
3449 * dump completes.
3450 */
3451 if (ss->spdsock_dump_req != NULL) {
3452 if (!putq(q, mp))
3453 freemsg(mp);
3454 return;
3455 }
3456
3457 switch (mp->b_datap->db_type) {
3458 case M_DATA:
3459 /*
3460 * Silently discard.
3461 */
3462 ss2dbg(spds, ("raw M_DATA in spdsock.\n"));
3463 freemsg(mp);
3464 return;
3465 case M_PROTO:
3466 case M_PCPROTO:
3467 if ((mp->b_wptr - rptr) >= sizeof (struct T_data_req)) {
3468 if (((union T_primitives *)rptr)->type == T_DATA_REQ) {
3469 if ((mp1 = mp->b_cont) == NULL) {
3470 /* No data after T_DATA_REQ. */
3471 ss2dbg(spds,
3472 ("No data after DATA_REQ.\n"));
3473 freemsg(mp);
3474 return;
3475 }
3476 freeb(mp);
3477 mp = mp1;
3478 ss2dbg(spds, ("T_DATA_REQ\n"));
3479 break; /* Out of switch. */
3480 }
3481 }
3482 /* FALLTHRU */
3483 default:
3484 ss3dbg(spds, ("In default wput case (%d %d).\n",
3485 mp->b_datap->db_type, ((union T_primitives *)rptr)->type));
3486 spdsock_wput_other(q, mp);
3487 return;
3488 }
3489
3490 /* I now have a PF_POLICY message in an M_DATA block. */
3491 spdsock_parse(q, mp);
3492 }
3493
3494 /*
3495 * Device open procedure, called when new queue pair created.
3496 * We are passed the read-side queue.
3497 */
3498 /* ARGSUSED */
3499 static int
spdsock_open(queue_t * q,dev_t * devp,int flag,int sflag,cred_t * credp)3500 spdsock_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
3501 {
3502 spdsock_t *ss;
3503 queue_t *oq = OTHERQ(q);
3504 minor_t ssminor;
3505 netstack_t *ns;
3506 spd_stack_t *spds;
3507
3508 if (secpolicy_ip_config(credp, B_FALSE) != 0)
3509 return (EPERM);
3510
3511 if (q->q_ptr != NULL)
3512 return (0); /* Re-open of an already open instance. */
3513
3514 if (sflag & MODOPEN)
3515 return (EINVAL);
3516
3517 ns = netstack_find_by_cred(credp);
3518 ASSERT(ns != NULL);
3519 spds = ns->netstack_spdsock;
3520 ASSERT(spds != NULL);
3521
3522 ss2dbg(spds, ("Made it into PF_POLICY socket open.\n"));
3523
3524 ssminor = (minor_t)(uintptr_t)vmem_alloc(spdsock_vmem, 1, VM_NOSLEEP);
3525 if (ssminor == 0) {
3526 netstack_rele(spds->spds_netstack);
3527 return (ENOMEM);
3528 }
3529 ss = kmem_zalloc(sizeof (spdsock_t), KM_NOSLEEP);
3530 if (ss == NULL) {
3531 vmem_free(spdsock_vmem, (void *)(uintptr_t)ssminor, 1);
3532 netstack_rele(spds->spds_netstack);
3533 return (ENOMEM);
3534 }
3535
3536 ss->spdsock_minor = ssminor;
3537 ss->spdsock_state = TS_UNBND;
3538 ss->spdsock_dump_req = NULL;
3539
3540 ss->spdsock_spds = spds;
3541
3542 q->q_ptr = ss;
3543 oq->q_ptr = ss;
3544
3545 q->q_hiwat = spds->spds_recv_hiwat;
3546
3547 oq->q_hiwat = spds->spds_xmit_hiwat;
3548 oq->q_lowat = spds->spds_xmit_lowat;
3549
3550 qprocson(q);
3551 (void) proto_set_rx_hiwat(q, NULL, spds->spds_recv_hiwat);
3552
3553 *devp = makedevice(getmajor(*devp), ss->spdsock_minor);
3554 return (0);
3555 }
3556
3557 /*
3558 * Read-side service procedure, invoked when we get back-enabled
3559 * when buffer space becomes available.
3560 *
3561 * Dump another chunk if we were dumping before; when we finish, kick
3562 * the write-side queue in case it's waiting for read queue space.
3563 */
3564 void
spdsock_rsrv(queue_t * q)3565 spdsock_rsrv(queue_t *q)
3566 {
3567 spdsock_t *ss = q->q_ptr;
3568
3569 if (ss->spdsock_dump_req != NULL)
3570 spdsock_dump_some(q, ss);
3571
3572 if (ss->spdsock_dump_req == NULL)
3573 qenable(OTHERQ(q));
3574 }
3575
3576 /*
3577 * Write-side service procedure, invoked when we defer processing
3578 * if another message is received while a dump is in progress.
3579 */
3580 void
spdsock_wsrv(queue_t * q)3581 spdsock_wsrv(queue_t *q)
3582 {
3583 spdsock_t *ss = q->q_ptr;
3584 mblk_t *mp;
3585 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
3586
3587 if (ss->spdsock_dump_req != NULL) {
3588 qenable(OTHERQ(q));
3589 return;
3590 }
3591
3592 while ((mp = getq(q)) != NULL) {
3593 if (ipsec_loaded(ipss)) {
3594 spdsock_wput(q, mp);
3595 if (ss->spdsock_dump_req != NULL)
3596 return;
3597 } else if (!ipsec_failed(ipss)) {
3598 (void) putq(q, mp);
3599 } else {
3600 spdsock_error(q, mp, EPFNOSUPPORT, 0);
3601 }
3602 }
3603 }
3604
3605 static int
spdsock_close(queue_t * q)3606 spdsock_close(queue_t *q)
3607 {
3608 spdsock_t *ss = q->q_ptr;
3609 spd_stack_t *spds = ss->spdsock_spds;
3610
3611 qprocsoff(q);
3612
3613 /* Safe assumption. */
3614 ASSERT(ss != NULL);
3615
3616 if (ss->spdsock_timeout != 0)
3617 (void) quntimeout(q, ss->spdsock_timeout);
3618
3619 ss3dbg(spds, ("Driver close, PF_POLICY socket is going away.\n"));
3620
3621 vmem_free(spdsock_vmem, (void *)(uintptr_t)ss->spdsock_minor, 1);
3622 netstack_rele(ss->spdsock_spds->spds_netstack);
3623
3624 kmem_free(ss, sizeof (spdsock_t));
3625 return (0);
3626 }
3627
3628 /*
3629 * Merge the IPsec algorithms tables with the received algorithm information.
3630 */
3631 void
spdsock_merge_algs(spd_stack_t * spds)3632 spdsock_merge_algs(spd_stack_t *spds)
3633 {
3634 ipsec_alginfo_t *alg, *oalg;
3635 ipsec_algtype_t algtype;
3636 uint_t algidx, algid, nalgs;
3637 crypto_mech_name_t *mechs;
3638 uint_t mech_count, mech_idx;
3639 netstack_t *ns = spds->spds_netstack;
3640 ipsec_stack_t *ipss = ns->netstack_ipsec;
3641
3642 ASSERT(MUTEX_HELD(&spds->spds_alg_lock));
3643
3644 /*
3645 * Get the list of supported mechanisms from the crypto framework.
3646 * If a mechanism is supported by KCF, resolve its mechanism
3647 * id and mark it as being valid. This operation must be done
3648 * without holding alg_lock, since it can cause a provider
3649 * module to be loaded and the provider notification callback to
3650 * be invoked.
3651 */
3652 mechs = crypto_get_mech_list(&mech_count, KM_SLEEP);
3653 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3654 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) {
3655 int algflags = 0;
3656 crypto_mech_type_t mt = CRYPTO_MECHANISM_INVALID;
3657
3658 alg = spds->spds_algs[algtype][algid];
3659 if (alg == NULL)
3660 continue;
3661
3662 /*
3663 * The NULL encryption algorithm is a special
3664 * case because there are no mechanisms, yet
3665 * the algorithm is still valid.
3666 */
3667 if (alg->alg_id == SADB_EALG_NULL) {
3668 alg->alg_mech_type = CRYPTO_MECHANISM_INVALID;
3669 alg->alg_flags |= ALG_FLAG_VALID;
3670 continue;
3671 }
3672
3673 for (mech_idx = 0; mech_idx < mech_count; mech_idx++) {
3674 if (strncmp(alg->alg_mech_name, mechs[mech_idx],
3675 CRYPTO_MAX_MECH_NAME) == 0) {
3676 mt = crypto_mech2id(alg->alg_mech_name);
3677 ASSERT(mt != CRYPTO_MECHANISM_INVALID);
3678 algflags = ALG_FLAG_VALID;
3679 break;
3680 }
3681 }
3682 alg->alg_mech_type = mt;
3683 alg->alg_flags |= algflags;
3684 }
3685 }
3686
3687 mutex_enter(&ipss->ipsec_alg_lock);
3688
3689 /*
3690 * For each algorithm currently defined, check if it is
3691 * present in the new tables created from the SPD_UPDATEALGS
3692 * message received from user-space.
3693 * Delete the algorithm entries that are currently defined
3694 * but not part of the new tables.
3695 */
3696 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3697 nalgs = ipss->ipsec_nalgs[algtype];
3698 for (algidx = 0; algidx < nalgs; algidx++) {
3699 algid = ipss->ipsec_sortlist[algtype][algidx];
3700 if (spds->spds_algs[algtype][algid] == NULL)
3701 ipsec_alg_unreg(algtype, algid, ns);
3702 }
3703 }
3704
3705 /*
3706 * For each algorithm we just received, check if it is
3707 * present in the currently defined tables. If it is, swap
3708 * the entry with the one we just allocated.
3709 * If the new algorithm is not in the current tables,
3710 * add it.
3711 */
3712 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3713 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) {
3714 alg = spds->spds_algs[algtype][algid];
3715 if (alg == NULL)
3716 continue;
3717
3718 if ((oalg = ipss->ipsec_alglists[algtype][algid]) ==
3719 NULL) {
3720 /*
3721 * New algorithm, add it to the algorithm
3722 * table.
3723 */
3724 ipsec_alg_reg(algtype, alg, ns);
3725 } else {
3726 /*
3727 * Algorithm is already in the table. Swap
3728 * the existing entry with the new one.
3729 */
3730 ipsec_alg_fix_min_max(alg, algtype, ns);
3731 ipss->ipsec_alglists[algtype][algid] = alg;
3732 ipsec_alg_free(oalg);
3733 }
3734 spds->spds_algs[algtype][algid] = NULL;
3735 }
3736 }
3737
3738 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3739 ipss->ipsec_algs_exec_mode[algtype] =
3740 spds->spds_algs_exec_mode[algtype];
3741 }
3742
3743 mutex_exit(&ipss->ipsec_alg_lock);
3744
3745 crypto_free_mech_list(mechs, mech_count);
3746
3747 ipsecah_algs_changed(ns);
3748 ipsecesp_algs_changed(ns);
3749 }
3750