1 // SPDX-License-Identifier: GPL-2.0-only 2 /* xfrm_user.c: User interface to configure xfrm engine. 3 * 4 * Copyright (C) 2002 David S. Miller (davem@redhat.com) 5 * 6 * Changes: 7 * Mitsuru KANDA @USAGI 8 * Kazunori MIYAZAWA @USAGI 9 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 10 * IPv6 support 11 * 12 */ 13 14 #include <linux/compat.h> 15 #include <linux/crypto.h> 16 #include <linux/module.h> 17 #include <linux/kernel.h> 18 #include <linux/types.h> 19 #include <linux/slab.h> 20 #include <linux/socket.h> 21 #include <linux/string.h> 22 #include <linux/net.h> 23 #include <linux/skbuff.h> 24 #include <linux/pfkeyv2.h> 25 #include <linux/ipsec.h> 26 #include <linux/init.h> 27 #include <linux/security.h> 28 #include <net/sock.h> 29 #include <net/xfrm.h> 30 #include <net/netlink.h> 31 #include <net/ah.h> 32 #include <linux/uaccess.h> 33 #if IS_ENABLED(CONFIG_IPV6) 34 #include <linux/in6.h> 35 #endif 36 #include <linux/unaligned.h> 37 38 static struct sock *xfrm_net_nlsk(const struct net *net, const struct sk_buff *skb) 39 { 40 /* get the source of this request, see netlink_unicast_kernel */ 41 const struct sock *sk = NETLINK_CB(skb).sk; 42 43 /* sk is refcounted, the netns stays alive and nlsk with it */ 44 return rcu_dereference_protected(net->xfrm.nlsk, sk->sk_net_refcnt); 45 } 46 47 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type, 48 struct netlink_ext_ack *extack) 49 { 50 struct nlattr *rt = attrs[type]; 51 struct xfrm_algo *algp; 52 53 if (!rt) 54 return 0; 55 56 algp = nla_data(rt); 57 if (nla_len(rt) < (int)xfrm_alg_len(algp)) { 58 NL_SET_ERR_MSG(extack, "Invalid AUTH/CRYPT/COMP attribute length"); 59 return -EINVAL; 60 } 61 62 switch (type) { 63 case XFRMA_ALG_AUTH: 64 case XFRMA_ALG_CRYPT: 65 case XFRMA_ALG_COMP: 66 break; 67 68 default: 69 NL_SET_ERR_MSG(extack, "Invalid algorithm attribute type"); 70 return -EINVAL; 71 } 72 73 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0'; 74 return 0; 75 } 76 77 static int verify_auth_trunc(struct nlattr **attrs, 78 struct netlink_ext_ack *extack) 79 { 80 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC]; 81 struct xfrm_algo_auth *algp; 82 83 if (!rt) 84 return 0; 85 86 algp = nla_data(rt); 87 if (nla_len(rt) < (int)xfrm_alg_auth_len(algp)) { 88 NL_SET_ERR_MSG(extack, "Invalid AUTH_TRUNC attribute length"); 89 return -EINVAL; 90 } 91 92 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0'; 93 return 0; 94 } 95 96 static int verify_aead(struct nlattr **attrs, struct netlink_ext_ack *extack) 97 { 98 struct nlattr *rt = attrs[XFRMA_ALG_AEAD]; 99 struct xfrm_algo_aead *algp; 100 101 if (!rt) 102 return 0; 103 104 algp = nla_data(rt); 105 if (nla_len(rt) < (int)aead_len(algp)) { 106 NL_SET_ERR_MSG(extack, "Invalid AEAD attribute length"); 107 return -EINVAL; 108 } 109 110 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0'; 111 return 0; 112 } 113 114 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type, 115 xfrm_address_t **addrp) 116 { 117 struct nlattr *rt = attrs[type]; 118 119 if (rt && addrp) 120 *addrp = nla_data(rt); 121 } 122 123 static inline int verify_sec_ctx_len(struct nlattr **attrs, struct netlink_ext_ack *extack) 124 { 125 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 126 struct xfrm_user_sec_ctx *uctx; 127 128 if (!rt) 129 return 0; 130 131 uctx = nla_data(rt); 132 if (uctx->len > nla_len(rt) || 133 uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) { 134 NL_SET_ERR_MSG(extack, "Invalid security context length"); 135 return -EINVAL; 136 } 137 138 return 0; 139 } 140 141 static inline int verify_replay(struct xfrm_usersa_info *p, 142 struct nlattr **attrs, u8 sa_dir, 143 struct netlink_ext_ack *extack) 144 { 145 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; 146 struct xfrm_replay_state_esn *rs; 147 148 if (!rt) { 149 if (p->flags & XFRM_STATE_ESN) { 150 NL_SET_ERR_MSG(extack, "Missing required attribute for ESN"); 151 return -EINVAL; 152 } 153 return 0; 154 } 155 156 rs = nla_data(rt); 157 158 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) { 159 NL_SET_ERR_MSG(extack, "ESN bitmap length must be <= 128"); 160 return -EINVAL; 161 } 162 163 if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) && 164 nla_len(rt) != sizeof(*rs)) { 165 NL_SET_ERR_MSG(extack, "ESN attribute is too short to fit the full bitmap length"); 166 return -EINVAL; 167 } 168 169 /* As only ESP and AH support ESN feature. */ 170 if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH)) { 171 NL_SET_ERR_MSG(extack, "ESN only supported for ESP and AH"); 172 return -EINVAL; 173 } 174 175 if (p->replay_window != 0) { 176 NL_SET_ERR_MSG(extack, "ESN not compatible with legacy replay_window"); 177 return -EINVAL; 178 } 179 180 if (sa_dir == XFRM_SA_DIR_OUT) { 181 if (rs->replay_window) { 182 NL_SET_ERR_MSG(extack, "Replay window should be 0 for output SA"); 183 return -EINVAL; 184 } 185 if (rs->seq || rs->seq_hi) { 186 NL_SET_ERR_MSG(extack, 187 "Replay seq and seq_hi should be 0 for output SA"); 188 return -EINVAL; 189 } 190 191 if (!(p->flags & XFRM_STATE_ESN)) { 192 if (rs->oseq_hi) { 193 NL_SET_ERR_MSG( 194 extack, 195 "Replay oseq_hi should be 0 in non-ESN mode for output SA"); 196 return -EINVAL; 197 } 198 if (rs->oseq == U32_MAX) { 199 NL_SET_ERR_MSG( 200 extack, 201 "Replay oseq should be less than 0xFFFFFFFF in non-ESN mode for output SA"); 202 return -EINVAL; 203 } 204 } else { 205 if (rs->oseq == U32_MAX && rs->oseq_hi == U32_MAX) { 206 NL_SET_ERR_MSG( 207 extack, 208 "Replay oseq and oseq_hi should be less than 0xFFFFFFFF for output SA"); 209 return -EINVAL; 210 } 211 } 212 if (rs->bmp_len) { 213 NL_SET_ERR_MSG(extack, "Replay bmp_len should 0 for output SA"); 214 return -EINVAL; 215 } 216 } 217 218 if (sa_dir == XFRM_SA_DIR_IN) { 219 if (rs->oseq || rs->oseq_hi) { 220 NL_SET_ERR_MSG(extack, 221 "Replay oseq and oseq_hi should be 0 for input SA"); 222 return -EINVAL; 223 } 224 if (!(p->flags & XFRM_STATE_ESN)) { 225 if (rs->seq_hi) { 226 NL_SET_ERR_MSG( 227 extack, 228 "Replay seq_hi should be 0 in non-ESN mode for input SA"); 229 return -EINVAL; 230 } 231 232 if (rs->seq == U32_MAX) { 233 NL_SET_ERR_MSG( 234 extack, 235 "Replay seq should be less than 0xFFFFFFFF in non-ESN mode for input SA"); 236 return -EINVAL; 237 } 238 } else { 239 if (rs->seq == U32_MAX && rs->seq_hi == U32_MAX) { 240 NL_SET_ERR_MSG( 241 extack, 242 "Replay seq and seq_hi should be less than 0xFFFFFFFF for input SA"); 243 return -EINVAL; 244 } 245 } 246 } 247 248 return 0; 249 } 250 251 static int verify_newsa_info(struct xfrm_usersa_info *p, 252 struct nlattr **attrs, 253 struct netlink_ext_ack *extack) 254 { 255 int err; 256 u8 sa_dir = nla_get_u8_default(attrs[XFRMA_SA_DIR], 0); 257 u16 family = p->sel.family; 258 259 err = -EINVAL; 260 switch (p->family) { 261 case AF_INET: 262 break; 263 264 case AF_INET6: 265 #if IS_ENABLED(CONFIG_IPV6) 266 break; 267 #else 268 err = -EAFNOSUPPORT; 269 NL_SET_ERR_MSG(extack, "IPv6 support disabled"); 270 goto out; 271 #endif 272 273 default: 274 NL_SET_ERR_MSG(extack, "Invalid address family"); 275 goto out; 276 } 277 278 if (!family && !(p->flags & XFRM_STATE_AF_UNSPEC)) 279 family = p->family; 280 281 switch (family) { 282 case AF_UNSPEC: 283 break; 284 285 case AF_INET: 286 if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) { 287 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 32 for IPv4)"); 288 goto out; 289 } 290 291 break; 292 293 case AF_INET6: 294 #if IS_ENABLED(CONFIG_IPV6) 295 if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) { 296 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 128 for IPv6)"); 297 goto out; 298 } 299 300 break; 301 #else 302 NL_SET_ERR_MSG(extack, "IPv6 support disabled"); 303 err = -EAFNOSUPPORT; 304 goto out; 305 #endif 306 307 default: 308 NL_SET_ERR_MSG(extack, "Invalid address family in selector"); 309 goto out; 310 } 311 312 err = -EINVAL; 313 switch (p->id.proto) { 314 case IPPROTO_AH: 315 if (!attrs[XFRMA_ALG_AUTH] && 316 !attrs[XFRMA_ALG_AUTH_TRUNC]) { 317 NL_SET_ERR_MSG(extack, "Missing required attribute for AH: AUTH_TRUNC or AUTH"); 318 goto out; 319 } 320 321 if (attrs[XFRMA_ALG_AEAD] || 322 attrs[XFRMA_ALG_CRYPT] || 323 attrs[XFRMA_ALG_COMP] || 324 attrs[XFRMA_TFCPAD]) { 325 NL_SET_ERR_MSG(extack, "Invalid attributes for AH: AEAD, CRYPT, COMP, TFCPAD"); 326 goto out; 327 } 328 break; 329 330 case IPPROTO_ESP: 331 if (attrs[XFRMA_ALG_COMP]) { 332 NL_SET_ERR_MSG(extack, "Invalid attribute for ESP: COMP"); 333 goto out; 334 } 335 336 if (!attrs[XFRMA_ALG_AUTH] && 337 !attrs[XFRMA_ALG_AUTH_TRUNC] && 338 !attrs[XFRMA_ALG_CRYPT] && 339 !attrs[XFRMA_ALG_AEAD]) { 340 NL_SET_ERR_MSG(extack, "Missing required attribute for ESP: at least one of AUTH, AUTH_TRUNC, CRYPT, AEAD"); 341 goto out; 342 } 343 344 if ((attrs[XFRMA_ALG_AUTH] || 345 attrs[XFRMA_ALG_AUTH_TRUNC] || 346 attrs[XFRMA_ALG_CRYPT]) && 347 attrs[XFRMA_ALG_AEAD]) { 348 NL_SET_ERR_MSG(extack, "Invalid attribute combination for ESP: AEAD can't be used with AUTH, AUTH_TRUNC, CRYPT"); 349 goto out; 350 } 351 352 if (attrs[XFRMA_TFCPAD] && 353 p->mode != XFRM_MODE_TUNNEL) { 354 NL_SET_ERR_MSG(extack, "TFC padding can only be used in tunnel mode"); 355 goto out; 356 } 357 if ((attrs[XFRMA_IPTFS_DROP_TIME] || 358 attrs[XFRMA_IPTFS_REORDER_WINDOW] || 359 attrs[XFRMA_IPTFS_DONT_FRAG] || 360 attrs[XFRMA_IPTFS_INIT_DELAY] || 361 attrs[XFRMA_IPTFS_MAX_QSIZE] || 362 attrs[XFRMA_IPTFS_PKT_SIZE]) && 363 p->mode != XFRM_MODE_IPTFS) { 364 NL_SET_ERR_MSG(extack, "IP-TFS options can only be used in IP-TFS mode"); 365 goto out; 366 } 367 break; 368 369 case IPPROTO_COMP: 370 if (!attrs[XFRMA_ALG_COMP]) { 371 NL_SET_ERR_MSG(extack, "Missing required attribute for COMP: COMP"); 372 goto out; 373 } 374 375 if (attrs[XFRMA_ALG_AEAD] || 376 attrs[XFRMA_ALG_AUTH] || 377 attrs[XFRMA_ALG_AUTH_TRUNC] || 378 attrs[XFRMA_ALG_CRYPT] || 379 attrs[XFRMA_TFCPAD]) { 380 NL_SET_ERR_MSG(extack, "Invalid attributes for COMP: AEAD, AUTH, AUTH_TRUNC, CRYPT, TFCPAD"); 381 goto out; 382 } 383 384 if (ntohl(p->id.spi) >= 0x10000) { 385 NL_SET_ERR_MSG(extack, "SPI is too large for COMP (must be < 0x10000)"); 386 goto out; 387 } 388 break; 389 390 #if IS_ENABLED(CONFIG_IPV6) 391 case IPPROTO_DSTOPTS: 392 case IPPROTO_ROUTING: 393 if (attrs[XFRMA_ALG_COMP] || 394 attrs[XFRMA_ALG_AUTH] || 395 attrs[XFRMA_ALG_AUTH_TRUNC] || 396 attrs[XFRMA_ALG_AEAD] || 397 attrs[XFRMA_ALG_CRYPT] || 398 attrs[XFRMA_ENCAP] || 399 attrs[XFRMA_SEC_CTX] || 400 attrs[XFRMA_TFCPAD]) { 401 NL_SET_ERR_MSG(extack, "Invalid attributes for DSTOPTS/ROUTING"); 402 goto out; 403 } 404 405 if (!attrs[XFRMA_COADDR]) { 406 NL_SET_ERR_MSG(extack, "Missing required COADDR attribute for DSTOPTS/ROUTING"); 407 goto out; 408 } 409 break; 410 #endif 411 412 default: 413 NL_SET_ERR_MSG(extack, "Unsupported protocol"); 414 goto out; 415 } 416 417 if ((err = verify_aead(attrs, extack))) 418 goto out; 419 if ((err = verify_auth_trunc(attrs, extack))) 420 goto out; 421 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH, extack))) 422 goto out; 423 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT, extack))) 424 goto out; 425 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP, extack))) 426 goto out; 427 if ((err = verify_sec_ctx_len(attrs, extack))) 428 goto out; 429 if ((err = verify_replay(p, attrs, sa_dir, extack))) 430 goto out; 431 432 err = -EINVAL; 433 switch (p->mode) { 434 case XFRM_MODE_TRANSPORT: 435 case XFRM_MODE_TUNNEL: 436 case XFRM_MODE_ROUTEOPTIMIZATION: 437 case XFRM_MODE_BEET: 438 break; 439 case XFRM_MODE_IPTFS: 440 if (p->id.proto != IPPROTO_ESP) { 441 NL_SET_ERR_MSG(extack, "IP-TFS mode only supported with ESP"); 442 goto out; 443 } 444 if (sa_dir == 0) { 445 NL_SET_ERR_MSG(extack, "IP-TFS mode requires in or out direction attribute"); 446 goto out; 447 } 448 break; 449 450 default: 451 NL_SET_ERR_MSG(extack, "Unsupported mode"); 452 goto out; 453 } 454 455 err = 0; 456 457 if (attrs[XFRMA_MTIMER_THRESH]) { 458 if (!attrs[XFRMA_ENCAP]) { 459 NL_SET_ERR_MSG(extack, "MTIMER_THRESH attribute can only be set on ENCAP states"); 460 err = -EINVAL; 461 goto out; 462 } 463 464 if (sa_dir == XFRM_SA_DIR_OUT) { 465 NL_SET_ERR_MSG(extack, 466 "MTIMER_THRESH attribute should not be set on output SA"); 467 err = -EINVAL; 468 goto out; 469 } 470 } 471 472 if (sa_dir == XFRM_SA_DIR_OUT) { 473 if (p->flags & XFRM_STATE_DECAP_DSCP) { 474 NL_SET_ERR_MSG(extack, "Flag DECAP_DSCP should not be set for output SA"); 475 err = -EINVAL; 476 goto out; 477 } 478 479 if (p->flags & XFRM_STATE_ICMP) { 480 NL_SET_ERR_MSG(extack, "Flag ICMP should not be set for output SA"); 481 err = -EINVAL; 482 goto out; 483 } 484 485 if (p->flags & XFRM_STATE_WILDRECV) { 486 NL_SET_ERR_MSG(extack, "Flag WILDRECV should not be set for output SA"); 487 err = -EINVAL; 488 goto out; 489 } 490 491 if (p->replay_window) { 492 NL_SET_ERR_MSG(extack, "Replay window should be 0 for output SA"); 493 err = -EINVAL; 494 goto out; 495 } 496 497 if (attrs[XFRMA_IPTFS_DROP_TIME]) { 498 NL_SET_ERR_MSG(extack, "IP-TFS drop time should not be set for output SA"); 499 err = -EINVAL; 500 goto out; 501 } 502 503 if (attrs[XFRMA_IPTFS_REORDER_WINDOW]) { 504 NL_SET_ERR_MSG(extack, "IP-TFS reorder window should not be set for output SA"); 505 err = -EINVAL; 506 goto out; 507 } 508 509 if (attrs[XFRMA_REPLAY_VAL]) { 510 struct xfrm_replay_state *replay; 511 512 replay = nla_data(attrs[XFRMA_REPLAY_VAL]); 513 514 if (replay->seq || replay->bitmap) { 515 NL_SET_ERR_MSG(extack, 516 "Replay seq and bitmap should be 0 for output SA"); 517 err = -EINVAL; 518 goto out; 519 } 520 } 521 } 522 523 if (sa_dir == XFRM_SA_DIR_IN) { 524 if (p->flags & XFRM_STATE_NOPMTUDISC) { 525 NL_SET_ERR_MSG(extack, "Flag NOPMTUDISC should not be set for input SA"); 526 err = -EINVAL; 527 goto out; 528 } 529 530 if (attrs[XFRMA_SA_EXTRA_FLAGS]) { 531 u32 xflags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]); 532 533 if (xflags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP) { 534 NL_SET_ERR_MSG(extack, "Flag DONT_ENCAP_DSCP should not be set for input SA"); 535 err = -EINVAL; 536 goto out; 537 } 538 539 if (xflags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP) { 540 NL_SET_ERR_MSG(extack, "Flag OSEQ_MAY_WRAP should not be set for input SA"); 541 err = -EINVAL; 542 goto out; 543 } 544 545 } 546 547 if (attrs[XFRMA_IPTFS_DONT_FRAG]) { 548 NL_SET_ERR_MSG(extack, "IP-TFS don't fragment should not be set for input SA"); 549 err = -EINVAL; 550 goto out; 551 } 552 553 if (attrs[XFRMA_IPTFS_INIT_DELAY]) { 554 NL_SET_ERR_MSG(extack, "IP-TFS initial delay should not be set for input SA"); 555 err = -EINVAL; 556 goto out; 557 } 558 559 if (attrs[XFRMA_IPTFS_MAX_QSIZE]) { 560 NL_SET_ERR_MSG(extack, "IP-TFS max queue size should not be set for input SA"); 561 err = -EINVAL; 562 goto out; 563 } 564 565 if (attrs[XFRMA_IPTFS_PKT_SIZE]) { 566 NL_SET_ERR_MSG(extack, "IP-TFS packet size should not be set for input SA"); 567 err = -EINVAL; 568 goto out; 569 } 570 } 571 572 if (!sa_dir && attrs[XFRMA_SA_PCPU]) { 573 NL_SET_ERR_MSG(extack, "SA_PCPU only supported with SA_DIR"); 574 err = -EINVAL; 575 goto out; 576 } 577 578 out: 579 return err; 580 } 581 582 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, 583 struct xfrm_algo_desc *(*get_byname)(const char *, int), 584 struct nlattr *rta, struct netlink_ext_ack *extack) 585 { 586 struct xfrm_algo *p, *ualg; 587 struct xfrm_algo_desc *algo; 588 589 if (!rta) 590 return 0; 591 592 ualg = nla_data(rta); 593 594 algo = get_byname(ualg->alg_name, 1); 595 if (!algo) { 596 NL_SET_ERR_MSG(extack, "Requested COMP algorithm not found"); 597 return -ENOSYS; 598 } 599 *props = algo->desc.sadb_alg_id; 600 601 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL); 602 if (!p) 603 return -ENOMEM; 604 605 strscpy(p->alg_name, algo->name); 606 *algpp = p; 607 return 0; 608 } 609 610 static int attach_crypt(struct xfrm_state *x, struct nlattr *rta, 611 struct netlink_ext_ack *extack) 612 { 613 struct xfrm_algo *p, *ualg; 614 struct xfrm_algo_desc *algo; 615 616 if (!rta) 617 return 0; 618 619 ualg = nla_data(rta); 620 621 algo = xfrm_ealg_get_byname(ualg->alg_name, 1); 622 if (!algo) { 623 NL_SET_ERR_MSG(extack, "Requested CRYPT algorithm not found"); 624 return -ENOSYS; 625 } 626 x->props.ealgo = algo->desc.sadb_alg_id; 627 628 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL); 629 if (!p) 630 return -ENOMEM; 631 632 strscpy(p->alg_name, algo->name); 633 x->ealg = p; 634 x->geniv = algo->uinfo.encr.geniv; 635 return 0; 636 } 637 638 static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props, 639 struct nlattr *rta, struct netlink_ext_ack *extack) 640 { 641 struct xfrm_algo *ualg; 642 struct xfrm_algo_auth *p; 643 struct xfrm_algo_desc *algo; 644 645 if (!rta) 646 return 0; 647 648 ualg = nla_data(rta); 649 650 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 651 if (!algo) { 652 NL_SET_ERR_MSG(extack, "Requested AUTH algorithm not found"); 653 return -ENOSYS; 654 } 655 *props = algo->desc.sadb_alg_id; 656 657 p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL); 658 if (!p) 659 return -ENOMEM; 660 661 strscpy(p->alg_name, algo->name); 662 p->alg_key_len = ualg->alg_key_len; 663 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits; 664 memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8); 665 666 *algpp = p; 667 return 0; 668 } 669 670 static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props, 671 struct nlattr *rta, struct netlink_ext_ack *extack) 672 { 673 struct xfrm_algo_auth *p, *ualg; 674 struct xfrm_algo_desc *algo; 675 676 if (!rta) 677 return 0; 678 679 ualg = nla_data(rta); 680 681 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 682 if (!algo) { 683 NL_SET_ERR_MSG(extack, "Requested AUTH_TRUNC algorithm not found"); 684 return -ENOSYS; 685 } 686 if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) { 687 NL_SET_ERR_MSG(extack, "Invalid length requested for truncated ICV"); 688 return -EINVAL; 689 } 690 *props = algo->desc.sadb_alg_id; 691 692 p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL); 693 if (!p) 694 return -ENOMEM; 695 696 strscpy(p->alg_name, algo->name); 697 if (!p->alg_trunc_len) 698 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits; 699 700 *algpp = p; 701 return 0; 702 } 703 704 static int attach_aead(struct xfrm_state *x, struct nlattr *rta, 705 struct netlink_ext_ack *extack) 706 { 707 struct xfrm_algo_aead *p, *ualg; 708 struct xfrm_algo_desc *algo; 709 710 if (!rta) 711 return 0; 712 713 ualg = nla_data(rta); 714 715 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1); 716 if (!algo) { 717 NL_SET_ERR_MSG(extack, "Requested AEAD algorithm not found"); 718 return -ENOSYS; 719 } 720 x->props.ealgo = algo->desc.sadb_alg_id; 721 722 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL); 723 if (!p) 724 return -ENOMEM; 725 726 strscpy(p->alg_name, algo->name); 727 x->aead = p; 728 x->geniv = algo->uinfo.aead.geniv; 729 return 0; 730 } 731 732 static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn, 733 struct nlattr *rp, 734 struct netlink_ext_ack *extack) 735 { 736 struct xfrm_replay_state_esn *up; 737 unsigned int ulen; 738 739 if (!replay_esn || !rp) 740 return 0; 741 742 up = nla_data(rp); 743 ulen = xfrm_replay_state_esn_len(up); 744 745 /* Check the overall length and the internal bitmap length to avoid 746 * potential overflow. */ 747 if (nla_len(rp) < (int)ulen) { 748 NL_SET_ERR_MSG(extack, "ESN attribute is too short"); 749 return -EINVAL; 750 } 751 752 if (xfrm_replay_state_esn_len(replay_esn) != ulen) { 753 NL_SET_ERR_MSG(extack, "New ESN size doesn't match the existing SA's ESN size"); 754 return -EINVAL; 755 } 756 757 if (replay_esn->bmp_len != up->bmp_len) { 758 NL_SET_ERR_MSG(extack, "New ESN bitmap size doesn't match the existing SA's ESN bitmap"); 759 return -EINVAL; 760 } 761 762 if (up->replay_window > up->bmp_len * sizeof(__u32) * 8) { 763 NL_SET_ERR_MSG(extack, "ESN replay window is longer than the bitmap"); 764 return -EINVAL; 765 } 766 767 return 0; 768 } 769 770 static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn, 771 struct xfrm_replay_state_esn **preplay_esn, 772 struct nlattr *rta) 773 { 774 struct xfrm_replay_state_esn *p, *pp, *up; 775 unsigned int klen, ulen; 776 777 if (!rta) 778 return 0; 779 780 up = nla_data(rta); 781 klen = xfrm_replay_state_esn_len(up); 782 ulen = nla_len(rta) >= (int)klen ? klen : sizeof(*up); 783 784 p = kzalloc(klen, GFP_KERNEL); 785 if (!p) 786 return -ENOMEM; 787 788 pp = kzalloc(klen, GFP_KERNEL); 789 if (!pp) { 790 kfree(p); 791 return -ENOMEM; 792 } 793 794 memcpy(p, up, ulen); 795 memcpy(pp, up, ulen); 796 797 *replay_esn = p; 798 *preplay_esn = pp; 799 800 return 0; 801 } 802 803 static inline unsigned int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx) 804 { 805 unsigned int len = 0; 806 807 if (xfrm_ctx) { 808 len += sizeof(struct xfrm_user_sec_ctx); 809 len += xfrm_ctx->ctx_len; 810 } 811 return len; 812 } 813 814 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) 815 { 816 memcpy(&x->id, &p->id, sizeof(x->id)); 817 memcpy(&x->sel, &p->sel, sizeof(x->sel)); 818 memcpy(&x->lft, &p->lft, sizeof(x->lft)); 819 x->props.mode = p->mode; 820 x->props.replay_window = min_t(unsigned int, p->replay_window, 821 sizeof(x->replay.bitmap) * 8); 822 x->props.reqid = p->reqid; 823 x->props.family = p->family; 824 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr)); 825 x->props.flags = p->flags; 826 827 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC)) 828 x->sel.family = p->family; 829 } 830 831 /* 832 * someday when pfkey also has support, we could have the code 833 * somehow made shareable and move it to xfrm_state.c - JHS 834 * 835 */ 836 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, 837 int update_esn) 838 { 839 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; 840 struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL; 841 struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; 842 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; 843 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; 844 struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH]; 845 846 if (re && x->replay_esn && x->preplay_esn) { 847 struct xfrm_replay_state_esn *replay_esn; 848 replay_esn = nla_data(re); 849 memcpy(x->replay_esn, replay_esn, 850 xfrm_replay_state_esn_len(replay_esn)); 851 memcpy(x->preplay_esn, replay_esn, 852 xfrm_replay_state_esn_len(replay_esn)); 853 } 854 855 if (rp) { 856 struct xfrm_replay_state *replay; 857 replay = nla_data(rp); 858 memcpy(&x->replay, replay, sizeof(*replay)); 859 memcpy(&x->preplay, replay, sizeof(*replay)); 860 } 861 862 if (lt) { 863 struct xfrm_lifetime_cur *ltime; 864 ltime = nla_data(lt); 865 x->curlft.bytes = ltime->bytes; 866 x->curlft.packets = ltime->packets; 867 x->curlft.add_time = ltime->add_time; 868 x->curlft.use_time = ltime->use_time; 869 } 870 871 if (et) 872 x->replay_maxage = nla_get_u32(et); 873 874 if (rt) 875 x->replay_maxdiff = nla_get_u32(rt); 876 877 if (mt) 878 x->mapping_maxage = nla_get_u32(mt); 879 } 880 881 static void xfrm_smark_init(struct nlattr **attrs, struct xfrm_mark *m) 882 { 883 if (attrs[XFRMA_SET_MARK]) { 884 m->v = nla_get_u32(attrs[XFRMA_SET_MARK]); 885 m->m = nla_get_u32_default(attrs[XFRMA_SET_MARK_MASK], 886 0xffffffff); 887 } else { 888 m->v = m->m = 0; 889 } 890 } 891 892 static struct xfrm_state *xfrm_state_construct(struct net *net, 893 struct xfrm_usersa_info *p, 894 struct nlattr **attrs, 895 int *errp, 896 struct netlink_ext_ack *extack) 897 { 898 struct xfrm_state *x = xfrm_state_alloc(net); 899 int err = -ENOMEM; 900 901 if (!x) 902 goto error_no_put; 903 904 copy_from_user_state(x, p); 905 906 if (attrs[XFRMA_ENCAP]) { 907 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), 908 sizeof(*x->encap), GFP_KERNEL); 909 if (x->encap == NULL) 910 goto error; 911 } 912 913 if (attrs[XFRMA_COADDR]) { 914 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]), 915 sizeof(*x->coaddr), GFP_KERNEL); 916 if (x->coaddr == NULL) 917 goto error; 918 } 919 920 if (attrs[XFRMA_SA_EXTRA_FLAGS]) 921 x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]); 922 923 if ((err = attach_aead(x, attrs[XFRMA_ALG_AEAD], extack))) 924 goto error; 925 if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo, 926 attrs[XFRMA_ALG_AUTH_TRUNC], extack))) 927 goto error; 928 if (!x->props.aalgo) { 929 if ((err = attach_auth(&x->aalg, &x->props.aalgo, 930 attrs[XFRMA_ALG_AUTH], extack))) 931 goto error; 932 } 933 if ((err = attach_crypt(x, attrs[XFRMA_ALG_CRYPT], extack))) 934 goto error; 935 if ((err = attach_one_algo(&x->calg, &x->props.calgo, 936 xfrm_calg_get_byname, 937 attrs[XFRMA_ALG_COMP], extack))) 938 goto error; 939 940 if (attrs[XFRMA_TFCPAD]) 941 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]); 942 943 xfrm_mark_get(attrs, &x->mark); 944 945 xfrm_smark_init(attrs, &x->props.smark); 946 947 if (attrs[XFRMA_IF_ID]) 948 x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 949 950 if (attrs[XFRMA_SA_DIR]) 951 x->dir = nla_get_u8(attrs[XFRMA_SA_DIR]); 952 953 if (attrs[XFRMA_NAT_KEEPALIVE_INTERVAL]) 954 x->nat_keepalive_interval = 955 nla_get_u32(attrs[XFRMA_NAT_KEEPALIVE_INTERVAL]); 956 957 if (attrs[XFRMA_SA_PCPU]) { 958 x->pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]); 959 if (x->pcpu_num >= num_possible_cpus()) { 960 err = -ERANGE; 961 NL_SET_ERR_MSG(extack, "pCPU number too big"); 962 goto error; 963 } 964 } 965 966 err = __xfrm_init_state(x, extack); 967 if (err) 968 goto error; 969 970 if (attrs[XFRMA_SEC_CTX]) { 971 err = security_xfrm_state_alloc(x, 972 nla_data(attrs[XFRMA_SEC_CTX])); 973 if (err) 974 goto error; 975 } 976 977 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn, 978 attrs[XFRMA_REPLAY_ESN_VAL]))) 979 goto error; 980 981 x->km.seq = p->seq; 982 x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth; 983 /* sysctl_xfrm_aevent_etime is in 100ms units */ 984 x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M; 985 986 if ((err = xfrm_init_replay(x, extack))) 987 goto error; 988 989 /* override default values from above */ 990 xfrm_update_ae_params(x, attrs, 0); 991 992 xfrm_set_type_offload(x, attrs[XFRMA_OFFLOAD_DEV]); 993 /* configure the hardware if offload is requested */ 994 if (attrs[XFRMA_OFFLOAD_DEV]) { 995 err = xfrm_dev_state_add(net, x, 996 nla_data(attrs[XFRMA_OFFLOAD_DEV]), 997 extack); 998 if (err) 999 goto error; 1000 } 1001 1002 if (x->mode_cbs && x->mode_cbs->user_init) { 1003 err = x->mode_cbs->user_init(net, x, attrs, extack); 1004 if (err) 1005 goto error; 1006 } 1007 1008 return x; 1009 1010 error: 1011 x->km.state = XFRM_STATE_DEAD; 1012 xfrm_state_put(x); 1013 error_no_put: 1014 *errp = err; 1015 return NULL; 1016 } 1017 1018 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 1019 struct nlattr **attrs, struct netlink_ext_ack *extack) 1020 { 1021 struct net *net = sock_net(skb->sk); 1022 struct xfrm_usersa_info *p = nlmsg_data(nlh); 1023 struct xfrm_state *x; 1024 int err; 1025 struct km_event c; 1026 1027 err = verify_newsa_info(p, attrs, extack); 1028 if (err) 1029 return err; 1030 1031 x = xfrm_state_construct(net, p, attrs, &err, extack); 1032 if (!x) 1033 return err; 1034 1035 xfrm_state_hold(x); 1036 if (nlh->nlmsg_type == XFRM_MSG_NEWSA) 1037 err = xfrm_state_add(x); 1038 else 1039 err = xfrm_state_update(x); 1040 1041 xfrm_audit_state_add(x, err ? 0 : 1, true); 1042 1043 if (err < 0) { 1044 x->km.state = XFRM_STATE_DEAD; 1045 xfrm_dev_state_delete(x); 1046 __xfrm_state_put(x); 1047 goto out; 1048 } 1049 1050 if (x->km.state == XFRM_STATE_VOID) 1051 x->km.state = XFRM_STATE_VALID; 1052 1053 c.seq = nlh->nlmsg_seq; 1054 c.portid = nlh->nlmsg_pid; 1055 c.event = nlh->nlmsg_type; 1056 1057 km_state_notify(x, &c); 1058 out: 1059 xfrm_state_put(x); 1060 return err; 1061 } 1062 1063 static struct xfrm_state *xfrm_user_state_lookup(struct net *net, 1064 struct xfrm_usersa_id *p, 1065 struct nlattr **attrs, 1066 int *errp) 1067 { 1068 struct xfrm_state *x = NULL; 1069 struct xfrm_mark m; 1070 int err; 1071 u32 mark = xfrm_mark_get(attrs, &m); 1072 1073 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) { 1074 err = -ESRCH; 1075 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family); 1076 } else { 1077 xfrm_address_t *saddr = NULL; 1078 1079 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr); 1080 if (!saddr) { 1081 err = -EINVAL; 1082 goto out; 1083 } 1084 1085 err = -ESRCH; 1086 x = xfrm_state_lookup_byaddr(net, mark, 1087 &p->daddr, saddr, 1088 p->proto, p->family); 1089 } 1090 1091 out: 1092 if (!x && errp) 1093 *errp = err; 1094 return x; 1095 } 1096 1097 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 1098 struct nlattr **attrs, struct netlink_ext_ack *extack) 1099 { 1100 struct net *net = sock_net(skb->sk); 1101 struct xfrm_state *x; 1102 int err = -ESRCH; 1103 struct km_event c; 1104 struct xfrm_usersa_id *p = nlmsg_data(nlh); 1105 1106 x = xfrm_user_state_lookup(net, p, attrs, &err); 1107 if (x == NULL) 1108 return err; 1109 1110 if ((err = security_xfrm_state_delete(x)) != 0) 1111 goto out; 1112 1113 if (xfrm_state_kern(x)) { 1114 NL_SET_ERR_MSG(extack, "SA is in use by tunnels"); 1115 err = -EPERM; 1116 goto out; 1117 } 1118 1119 err = xfrm_state_delete(x); 1120 if (err < 0) 1121 goto out; 1122 1123 c.seq = nlh->nlmsg_seq; 1124 c.portid = nlh->nlmsg_pid; 1125 c.event = nlh->nlmsg_type; 1126 km_state_notify(x, &c); 1127 1128 out: 1129 xfrm_audit_state_delete(x, err ? 0 : 1, true); 1130 xfrm_state_put(x); 1131 return err; 1132 } 1133 1134 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) 1135 { 1136 memset(p, 0, sizeof(*p)); 1137 memcpy(&p->id, &x->id, sizeof(p->id)); 1138 memcpy(&p->sel, &x->sel, sizeof(p->sel)); 1139 memcpy(&p->lft, &x->lft, sizeof(p->lft)); 1140 if (x->xso.dev) 1141 xfrm_dev_state_update_stats(x); 1142 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft)); 1143 put_unaligned(x->stats.replay_window, &p->stats.replay_window); 1144 put_unaligned(x->stats.replay, &p->stats.replay); 1145 put_unaligned(x->stats.integrity_failed, &p->stats.integrity_failed); 1146 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr)); 1147 p->mode = x->props.mode; 1148 p->replay_window = x->props.replay_window; 1149 p->reqid = x->props.reqid; 1150 p->family = x->props.family; 1151 p->flags = x->props.flags; 1152 p->seq = x->km.seq; 1153 } 1154 1155 struct xfrm_dump_info { 1156 struct sk_buff *in_skb; 1157 struct sk_buff *out_skb; 1158 u32 nlmsg_seq; 1159 u16 nlmsg_flags; 1160 }; 1161 1162 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) 1163 { 1164 struct xfrm_user_sec_ctx *uctx; 1165 struct nlattr *attr; 1166 int ctx_size = sizeof(*uctx) + s->ctx_len; 1167 1168 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size); 1169 if (attr == NULL) 1170 return -EMSGSIZE; 1171 1172 uctx = nla_data(attr); 1173 uctx->exttype = XFRMA_SEC_CTX; 1174 uctx->len = ctx_size; 1175 uctx->ctx_doi = s->ctx_doi; 1176 uctx->ctx_alg = s->ctx_alg; 1177 uctx->ctx_len = s->ctx_len; 1178 memcpy(uctx + 1, s->ctx_str, s->ctx_len); 1179 1180 return 0; 1181 } 1182 1183 static int copy_user_offload(struct xfrm_dev_offload *xso, struct sk_buff *skb) 1184 { 1185 struct xfrm_user_offload *xuo; 1186 struct nlattr *attr; 1187 1188 attr = nla_reserve(skb, XFRMA_OFFLOAD_DEV, sizeof(*xuo)); 1189 if (attr == NULL) 1190 return -EMSGSIZE; 1191 1192 xuo = nla_data(attr); 1193 memset(xuo, 0, sizeof(*xuo)); 1194 xuo->ifindex = xso->dev->ifindex; 1195 if (xso->dir == XFRM_DEV_OFFLOAD_IN) 1196 xuo->flags = XFRM_OFFLOAD_INBOUND; 1197 if (xso->type == XFRM_DEV_OFFLOAD_PACKET) 1198 xuo->flags |= XFRM_OFFLOAD_PACKET; 1199 1200 return 0; 1201 } 1202 1203 static bool xfrm_redact(void) 1204 { 1205 return IS_ENABLED(CONFIG_SECURITY) && 1206 security_locked_down(LOCKDOWN_XFRM_SECRET); 1207 } 1208 1209 static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) 1210 { 1211 struct xfrm_algo *algo; 1212 struct xfrm_algo_auth *ap; 1213 struct nlattr *nla; 1214 bool redact_secret = xfrm_redact(); 1215 1216 nla = nla_reserve(skb, XFRMA_ALG_AUTH, 1217 sizeof(*algo) + (auth->alg_key_len + 7) / 8); 1218 if (!nla) 1219 return -EMSGSIZE; 1220 algo = nla_data(nla); 1221 strscpy_pad(algo->alg_name, auth->alg_name); 1222 1223 if (redact_secret && auth->alg_key_len) 1224 memset(algo->alg_key, 0, (auth->alg_key_len + 7) / 8); 1225 else 1226 memcpy(algo->alg_key, auth->alg_key, 1227 (auth->alg_key_len + 7) / 8); 1228 algo->alg_key_len = auth->alg_key_len; 1229 1230 nla = nla_reserve(skb, XFRMA_ALG_AUTH_TRUNC, xfrm_alg_auth_len(auth)); 1231 if (!nla) 1232 return -EMSGSIZE; 1233 ap = nla_data(nla); 1234 strscpy_pad(ap->alg_name, auth->alg_name); 1235 ap->alg_key_len = auth->alg_key_len; 1236 ap->alg_trunc_len = auth->alg_trunc_len; 1237 if (redact_secret && auth->alg_key_len) 1238 memset(ap->alg_key, 0, (auth->alg_key_len + 7) / 8); 1239 else 1240 memcpy(ap->alg_key, auth->alg_key, 1241 (auth->alg_key_len + 7) / 8); 1242 return 0; 1243 } 1244 1245 static int copy_to_user_aead(struct xfrm_algo_aead *aead, struct sk_buff *skb) 1246 { 1247 struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_AEAD, aead_len(aead)); 1248 struct xfrm_algo_aead *ap; 1249 bool redact_secret = xfrm_redact(); 1250 1251 if (!nla) 1252 return -EMSGSIZE; 1253 1254 ap = nla_data(nla); 1255 strscpy_pad(ap->alg_name, aead->alg_name); 1256 ap->alg_key_len = aead->alg_key_len; 1257 ap->alg_icv_len = aead->alg_icv_len; 1258 1259 if (redact_secret && aead->alg_key_len) 1260 memset(ap->alg_key, 0, (aead->alg_key_len + 7) / 8); 1261 else 1262 memcpy(ap->alg_key, aead->alg_key, 1263 (aead->alg_key_len + 7) / 8); 1264 return 0; 1265 } 1266 1267 static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb) 1268 { 1269 struct xfrm_algo *ap; 1270 bool redact_secret = xfrm_redact(); 1271 struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_CRYPT, 1272 xfrm_alg_len(ealg)); 1273 if (!nla) 1274 return -EMSGSIZE; 1275 1276 ap = nla_data(nla); 1277 strscpy_pad(ap->alg_name, ealg->alg_name); 1278 ap->alg_key_len = ealg->alg_key_len; 1279 1280 if (redact_secret && ealg->alg_key_len) 1281 memset(ap->alg_key, 0, (ealg->alg_key_len + 7) / 8); 1282 else 1283 memcpy(ap->alg_key, ealg->alg_key, 1284 (ealg->alg_key_len + 7) / 8); 1285 1286 return 0; 1287 } 1288 1289 static int copy_to_user_calg(struct xfrm_algo *calg, struct sk_buff *skb) 1290 { 1291 struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_COMP, sizeof(*calg)); 1292 struct xfrm_algo *ap; 1293 1294 if (!nla) 1295 return -EMSGSIZE; 1296 1297 ap = nla_data(nla); 1298 strscpy_pad(ap->alg_name, calg->alg_name); 1299 ap->alg_key_len = 0; 1300 1301 return 0; 1302 } 1303 1304 static int copy_to_user_encap(struct xfrm_encap_tmpl *ep, struct sk_buff *skb) 1305 { 1306 struct nlattr *nla = nla_reserve(skb, XFRMA_ENCAP, sizeof(*ep)); 1307 struct xfrm_encap_tmpl *uep; 1308 1309 if (!nla) 1310 return -EMSGSIZE; 1311 1312 uep = nla_data(nla); 1313 memset(uep, 0, sizeof(*uep)); 1314 1315 uep->encap_type = ep->encap_type; 1316 uep->encap_sport = ep->encap_sport; 1317 uep->encap_dport = ep->encap_dport; 1318 uep->encap_oa = ep->encap_oa; 1319 1320 return 0; 1321 } 1322 1323 static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m) 1324 { 1325 int ret = 0; 1326 1327 if (m->v | m->m) { 1328 ret = nla_put_u32(skb, XFRMA_SET_MARK, m->v); 1329 if (!ret) 1330 ret = nla_put_u32(skb, XFRMA_SET_MARK_MASK, m->m); 1331 } 1332 return ret; 1333 } 1334 1335 /* Don't change this without updating xfrm_sa_len! */ 1336 static int copy_to_user_state_extra(struct xfrm_state *x, 1337 struct xfrm_usersa_info *p, 1338 struct sk_buff *skb) 1339 { 1340 int ret = 0; 1341 1342 copy_to_user_state(x, p); 1343 1344 if (x->props.extra_flags) { 1345 ret = nla_put_u32(skb, XFRMA_SA_EXTRA_FLAGS, 1346 x->props.extra_flags); 1347 if (ret) 1348 goto out; 1349 } 1350 1351 if (x->coaddr) { 1352 ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr); 1353 if (ret) 1354 goto out; 1355 } 1356 if (x->lastused) { 1357 ret = nla_put_u64_64bit(skb, XFRMA_LASTUSED, x->lastused, 1358 XFRMA_PAD); 1359 if (ret) 1360 goto out; 1361 } 1362 if (x->aead) { 1363 ret = copy_to_user_aead(x->aead, skb); 1364 if (ret) 1365 goto out; 1366 } 1367 if (x->aalg) { 1368 ret = copy_to_user_auth(x->aalg, skb); 1369 if (ret) 1370 goto out; 1371 } 1372 if (x->ealg) { 1373 ret = copy_to_user_ealg(x->ealg, skb); 1374 if (ret) 1375 goto out; 1376 } 1377 if (x->calg) { 1378 ret = copy_to_user_calg(x->calg, skb); 1379 if (ret) 1380 goto out; 1381 } 1382 if (x->encap) { 1383 ret = copy_to_user_encap(x->encap, skb); 1384 if (ret) 1385 goto out; 1386 } 1387 if (x->tfcpad) { 1388 ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad); 1389 if (ret) 1390 goto out; 1391 } 1392 ret = xfrm_mark_put(skb, &x->mark); 1393 if (ret) 1394 goto out; 1395 1396 ret = xfrm_smark_put(skb, &x->props.smark); 1397 if (ret) 1398 goto out; 1399 1400 if (x->replay_esn) 1401 ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL, 1402 xfrm_replay_state_esn_len(x->replay_esn), 1403 x->replay_esn); 1404 else 1405 ret = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), 1406 &x->replay); 1407 if (ret) 1408 goto out; 1409 if(x->xso.dev) 1410 ret = copy_user_offload(&x->xso, skb); 1411 if (ret) 1412 goto out; 1413 if (x->if_id) { 1414 ret = nla_put_u32(skb, XFRMA_IF_ID, x->if_id); 1415 if (ret) 1416 goto out; 1417 } 1418 if (x->security) { 1419 ret = copy_sec_ctx(x->security, skb); 1420 if (ret) 1421 goto out; 1422 } 1423 if (x->mode_cbs && x->mode_cbs->copy_to_user) 1424 ret = x->mode_cbs->copy_to_user(x, skb); 1425 if (ret) 1426 goto out; 1427 if (x->mapping_maxage) { 1428 ret = nla_put_u32(skb, XFRMA_MTIMER_THRESH, x->mapping_maxage); 1429 if (ret) 1430 goto out; 1431 } 1432 if (x->pcpu_num != UINT_MAX) { 1433 ret = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num); 1434 if (ret) 1435 goto out; 1436 } 1437 if (x->dir) 1438 ret = nla_put_u8(skb, XFRMA_SA_DIR, x->dir); 1439 1440 if (x->nat_keepalive_interval) { 1441 ret = nla_put_u32(skb, XFRMA_NAT_KEEPALIVE_INTERVAL, 1442 x->nat_keepalive_interval); 1443 if (ret) 1444 goto out; 1445 } 1446 out: 1447 return ret; 1448 } 1449 1450 static int dump_one_state(struct xfrm_state *x, int count, void *ptr) 1451 { 1452 struct xfrm_dump_info *sp = ptr; 1453 struct sk_buff *in_skb = sp->in_skb; 1454 struct sk_buff *skb = sp->out_skb; 1455 struct xfrm_translator *xtr; 1456 struct xfrm_usersa_info *p; 1457 struct nlmsghdr *nlh; 1458 int err; 1459 1460 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq, 1461 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags); 1462 if (nlh == NULL) 1463 return -EMSGSIZE; 1464 1465 p = nlmsg_data(nlh); 1466 1467 err = copy_to_user_state_extra(x, p, skb); 1468 if (err) { 1469 nlmsg_cancel(skb, nlh); 1470 return err; 1471 } 1472 nlmsg_end(skb, nlh); 1473 1474 xtr = xfrm_get_translator(); 1475 if (xtr) { 1476 err = xtr->alloc_compat(skb, nlh); 1477 1478 xfrm_put_translator(xtr); 1479 if (err) { 1480 nlmsg_cancel(skb, nlh); 1481 return err; 1482 } 1483 } 1484 1485 return 0; 1486 } 1487 1488 static int xfrm_dump_sa_done(struct netlink_callback *cb) 1489 { 1490 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; 1491 struct sock *sk = cb->skb->sk; 1492 struct net *net = sock_net(sk); 1493 1494 if (cb->args[0]) 1495 xfrm_state_walk_done(walk, net); 1496 return 0; 1497 } 1498 1499 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) 1500 { 1501 struct net *net = sock_net(skb->sk); 1502 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; 1503 struct xfrm_dump_info info; 1504 1505 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) > 1506 sizeof(cb->args) - sizeof(cb->args[0])); 1507 1508 info.in_skb = cb->skb; 1509 info.out_skb = skb; 1510 info.nlmsg_seq = cb->nlh->nlmsg_seq; 1511 info.nlmsg_flags = NLM_F_MULTI; 1512 1513 if (!cb->args[0]) { 1514 struct nlattr *attrs[XFRMA_MAX+1]; 1515 struct xfrm_address_filter *filter = NULL; 1516 u8 proto = 0; 1517 int err; 1518 1519 err = nlmsg_parse_deprecated(cb->nlh, 0, attrs, XFRMA_MAX, 1520 xfrma_policy, cb->extack); 1521 if (err < 0) 1522 return err; 1523 1524 if (attrs[XFRMA_ADDRESS_FILTER]) { 1525 filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]), 1526 sizeof(*filter), GFP_KERNEL); 1527 if (filter == NULL) 1528 return -ENOMEM; 1529 1530 /* see addr_match(), (prefix length >> 5) << 2 1531 * will be used to compare xfrm_address_t 1532 */ 1533 if (filter->splen > (sizeof(xfrm_address_t) << 3) || 1534 filter->dplen > (sizeof(xfrm_address_t) << 3)) { 1535 kfree(filter); 1536 return -EINVAL; 1537 } 1538 } 1539 1540 if (attrs[XFRMA_PROTO]) 1541 proto = nla_get_u8(attrs[XFRMA_PROTO]); 1542 1543 xfrm_state_walk_init(walk, proto, filter); 1544 cb->args[0] = 1; 1545 } 1546 1547 (void) xfrm_state_walk(net, walk, dump_one_state, &info); 1548 1549 return skb->len; 1550 } 1551 1552 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, 1553 struct xfrm_state *x, u32 seq) 1554 { 1555 struct xfrm_dump_info info; 1556 struct sk_buff *skb; 1557 int err; 1558 1559 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 1560 if (!skb) 1561 return ERR_PTR(-ENOMEM); 1562 1563 info.in_skb = in_skb; 1564 info.out_skb = skb; 1565 info.nlmsg_seq = seq; 1566 info.nlmsg_flags = 0; 1567 1568 err = dump_one_state(x, 0, &info); 1569 if (err) { 1570 kfree_skb(skb); 1571 return ERR_PTR(err); 1572 } 1573 1574 return skb; 1575 } 1576 1577 /* A wrapper for nlmsg_multicast() checking that nlsk is still available. 1578 * Must be called with RCU read lock. 1579 */ 1580 static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb, 1581 u32 pid, unsigned int group) 1582 { 1583 struct sock *nlsk = rcu_dereference(net->xfrm.nlsk); 1584 struct xfrm_translator *xtr; 1585 1586 if (!nlsk) { 1587 kfree_skb(skb); 1588 return -EPIPE; 1589 } 1590 1591 xtr = xfrm_get_translator(); 1592 if (xtr) { 1593 int err = xtr->alloc_compat(skb, nlmsg_hdr(skb)); 1594 1595 xfrm_put_translator(xtr); 1596 if (err) { 1597 kfree_skb(skb); 1598 return err; 1599 } 1600 } 1601 1602 return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); 1603 } 1604 1605 static inline unsigned int xfrm_spdinfo_msgsize(void) 1606 { 1607 return NLMSG_ALIGN(4) 1608 + nla_total_size(sizeof(struct xfrmu_spdinfo)) 1609 + nla_total_size(sizeof(struct xfrmu_spdhinfo)) 1610 + nla_total_size(sizeof(struct xfrmu_spdhthresh)) 1611 + nla_total_size(sizeof(struct xfrmu_spdhthresh)); 1612 } 1613 1614 static int build_spdinfo(struct sk_buff *skb, struct net *net, 1615 u32 portid, u32 seq, u32 flags) 1616 { 1617 struct xfrmk_spdinfo si; 1618 struct xfrmu_spdinfo spc; 1619 struct xfrmu_spdhinfo sph; 1620 struct xfrmu_spdhthresh spt4, spt6; 1621 struct nlmsghdr *nlh; 1622 int err; 1623 u32 *f; 1624 unsigned lseq; 1625 1626 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); 1627 if (nlh == NULL) /* shouldn't really happen ... */ 1628 return -EMSGSIZE; 1629 1630 f = nlmsg_data(nlh); 1631 *f = flags; 1632 xfrm_spd_getinfo(net, &si); 1633 spc.incnt = si.incnt; 1634 spc.outcnt = si.outcnt; 1635 spc.fwdcnt = si.fwdcnt; 1636 spc.inscnt = si.inscnt; 1637 spc.outscnt = si.outscnt; 1638 spc.fwdscnt = si.fwdscnt; 1639 sph.spdhcnt = si.spdhcnt; 1640 sph.spdhmcnt = si.spdhmcnt; 1641 1642 do { 1643 lseq = read_seqbegin(&net->xfrm.policy_hthresh.lock); 1644 1645 spt4.lbits = net->xfrm.policy_hthresh.lbits4; 1646 spt4.rbits = net->xfrm.policy_hthresh.rbits4; 1647 spt6.lbits = net->xfrm.policy_hthresh.lbits6; 1648 spt6.rbits = net->xfrm.policy_hthresh.rbits6; 1649 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, lseq)); 1650 1651 err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc); 1652 if (!err) 1653 err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph); 1654 if (!err) 1655 err = nla_put(skb, XFRMA_SPD_IPV4_HTHRESH, sizeof(spt4), &spt4); 1656 if (!err) 1657 err = nla_put(skb, XFRMA_SPD_IPV6_HTHRESH, sizeof(spt6), &spt6); 1658 if (err) { 1659 nlmsg_cancel(skb, nlh); 1660 return err; 1661 } 1662 1663 nlmsg_end(skb, nlh); 1664 return 0; 1665 } 1666 1667 static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 1668 struct nlattr **attrs, 1669 struct netlink_ext_ack *extack) 1670 { 1671 struct net *net = sock_net(skb->sk); 1672 struct xfrmu_spdhthresh *thresh4 = NULL; 1673 struct xfrmu_spdhthresh *thresh6 = NULL; 1674 1675 /* selector prefixlen thresholds to hash policies */ 1676 if (attrs[XFRMA_SPD_IPV4_HTHRESH]) { 1677 struct nlattr *rta = attrs[XFRMA_SPD_IPV4_HTHRESH]; 1678 1679 if (nla_len(rta) < sizeof(*thresh4)) { 1680 NL_SET_ERR_MSG(extack, "Invalid SPD_IPV4_HTHRESH attribute length"); 1681 return -EINVAL; 1682 } 1683 thresh4 = nla_data(rta); 1684 if (thresh4->lbits > 32 || thresh4->rbits > 32) { 1685 NL_SET_ERR_MSG(extack, "Invalid hash threshold (must be <= 32 for IPv4)"); 1686 return -EINVAL; 1687 } 1688 } 1689 if (attrs[XFRMA_SPD_IPV6_HTHRESH]) { 1690 struct nlattr *rta = attrs[XFRMA_SPD_IPV6_HTHRESH]; 1691 1692 if (nla_len(rta) < sizeof(*thresh6)) { 1693 NL_SET_ERR_MSG(extack, "Invalid SPD_IPV6_HTHRESH attribute length"); 1694 return -EINVAL; 1695 } 1696 thresh6 = nla_data(rta); 1697 if (thresh6->lbits > 128 || thresh6->rbits > 128) { 1698 NL_SET_ERR_MSG(extack, "Invalid hash threshold (must be <= 128 for IPv6)"); 1699 return -EINVAL; 1700 } 1701 } 1702 1703 if (thresh4 || thresh6) { 1704 write_seqlock(&net->xfrm.policy_hthresh.lock); 1705 if (thresh4) { 1706 net->xfrm.policy_hthresh.lbits4 = thresh4->lbits; 1707 net->xfrm.policy_hthresh.rbits4 = thresh4->rbits; 1708 } 1709 if (thresh6) { 1710 net->xfrm.policy_hthresh.lbits6 = thresh6->lbits; 1711 net->xfrm.policy_hthresh.rbits6 = thresh6->rbits; 1712 } 1713 write_sequnlock(&net->xfrm.policy_hthresh.lock); 1714 1715 xfrm_policy_hash_rebuild(net); 1716 } 1717 1718 return 0; 1719 } 1720 1721 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 1722 struct nlattr **attrs, 1723 struct netlink_ext_ack *extack) 1724 { 1725 struct net *net = sock_net(skb->sk); 1726 struct sk_buff *r_skb; 1727 u32 *flags = nlmsg_data(nlh); 1728 u32 sportid = NETLINK_CB(skb).portid; 1729 u32 seq = nlh->nlmsg_seq; 1730 int err; 1731 1732 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC); 1733 if (r_skb == NULL) 1734 return -ENOMEM; 1735 1736 err = build_spdinfo(r_skb, net, sportid, seq, *flags); 1737 BUG_ON(err < 0); 1738 1739 return nlmsg_unicast(xfrm_net_nlsk(net, skb), r_skb, sportid); 1740 } 1741 1742 static inline unsigned int xfrm_sadinfo_msgsize(void) 1743 { 1744 return NLMSG_ALIGN(4) 1745 + nla_total_size(sizeof(struct xfrmu_sadhinfo)) 1746 + nla_total_size(4); /* XFRMA_SAD_CNT */ 1747 } 1748 1749 static int build_sadinfo(struct sk_buff *skb, struct net *net, 1750 u32 portid, u32 seq, u32 flags) 1751 { 1752 struct xfrmk_sadinfo si; 1753 struct xfrmu_sadhinfo sh; 1754 struct nlmsghdr *nlh; 1755 int err; 1756 u32 *f; 1757 1758 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); 1759 if (nlh == NULL) /* shouldn't really happen ... */ 1760 return -EMSGSIZE; 1761 1762 f = nlmsg_data(nlh); 1763 *f = flags; 1764 xfrm_sad_getinfo(net, &si); 1765 1766 sh.sadhmcnt = si.sadhmcnt; 1767 sh.sadhcnt = si.sadhcnt; 1768 1769 err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt); 1770 if (!err) 1771 err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh); 1772 if (err) { 1773 nlmsg_cancel(skb, nlh); 1774 return err; 1775 } 1776 1777 nlmsg_end(skb, nlh); 1778 return 0; 1779 } 1780 1781 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 1782 struct nlattr **attrs, 1783 struct netlink_ext_ack *extack) 1784 { 1785 struct net *net = sock_net(skb->sk); 1786 struct sk_buff *r_skb; 1787 u32 *flags = nlmsg_data(nlh); 1788 u32 sportid = NETLINK_CB(skb).portid; 1789 u32 seq = nlh->nlmsg_seq; 1790 int err; 1791 1792 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC); 1793 if (r_skb == NULL) 1794 return -ENOMEM; 1795 1796 err = build_sadinfo(r_skb, net, sportid, seq, *flags); 1797 BUG_ON(err < 0); 1798 1799 return nlmsg_unicast(xfrm_net_nlsk(net, skb), r_skb, sportid); 1800 } 1801 1802 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 1803 struct nlattr **attrs, struct netlink_ext_ack *extack) 1804 { 1805 struct net *net = sock_net(skb->sk); 1806 struct xfrm_usersa_id *p = nlmsg_data(nlh); 1807 struct xfrm_state *x; 1808 struct sk_buff *resp_skb; 1809 int err = -ESRCH; 1810 1811 x = xfrm_user_state_lookup(net, p, attrs, &err); 1812 if (x == NULL) 1813 goto out_noput; 1814 1815 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); 1816 if (IS_ERR(resp_skb)) { 1817 err = PTR_ERR(resp_skb); 1818 } else { 1819 err = nlmsg_unicast(xfrm_net_nlsk(net, skb), resp_skb, NETLINK_CB(skb).portid); 1820 } 1821 xfrm_state_put(x); 1822 out_noput: 1823 return err; 1824 } 1825 1826 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, 1827 struct nlattr **attrs, 1828 struct netlink_ext_ack *extack) 1829 { 1830 struct net *net = sock_net(skb->sk); 1831 struct xfrm_state *x; 1832 struct xfrm_userspi_info *p; 1833 struct xfrm_translator *xtr; 1834 struct sk_buff *resp_skb; 1835 xfrm_address_t *daddr; 1836 int family; 1837 int err; 1838 u32 mark; 1839 struct xfrm_mark m; 1840 u32 if_id = 0; 1841 u32 pcpu_num = UINT_MAX; 1842 1843 p = nlmsg_data(nlh); 1844 err = verify_spi_info(p->info.id.proto, p->min, p->max, extack); 1845 if (err) 1846 goto out_noput; 1847 1848 family = p->info.family; 1849 daddr = &p->info.id.daddr; 1850 1851 x = NULL; 1852 1853 mark = xfrm_mark_get(attrs, &m); 1854 1855 if (attrs[XFRMA_IF_ID]) 1856 if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 1857 1858 if (attrs[XFRMA_SA_PCPU]) { 1859 pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]); 1860 if (pcpu_num >= num_possible_cpus()) { 1861 err = -EINVAL; 1862 NL_SET_ERR_MSG(extack, "pCPU number too big"); 1863 goto out_noput; 1864 } 1865 } 1866 1867 if (p->info.seq) { 1868 x = xfrm_find_acq_byseq(net, mark, p->info.seq, pcpu_num); 1869 if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) { 1870 xfrm_state_put(x); 1871 x = NULL; 1872 } 1873 } 1874 1875 if (!x) 1876 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid, 1877 if_id, pcpu_num, p->info.id.proto, daddr, 1878 &p->info.saddr, 1, 1879 family); 1880 err = -ENOENT; 1881 if (!x) { 1882 NL_SET_ERR_MSG(extack, "Target ACQUIRE not found"); 1883 goto out_noput; 1884 } 1885 1886 err = xfrm_alloc_spi(x, p->min, p->max, extack); 1887 if (err) 1888 goto out; 1889 1890 if (attrs[XFRMA_SA_DIR]) 1891 x->dir = nla_get_u8(attrs[XFRMA_SA_DIR]); 1892 1893 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); 1894 if (IS_ERR(resp_skb)) { 1895 err = PTR_ERR(resp_skb); 1896 goto out; 1897 } 1898 1899 xtr = xfrm_get_translator(); 1900 if (xtr) { 1901 err = xtr->alloc_compat(skb, nlmsg_hdr(skb)); 1902 1903 xfrm_put_translator(xtr); 1904 if (err) { 1905 kfree_skb(resp_skb); 1906 goto out; 1907 } 1908 } 1909 1910 err = nlmsg_unicast(xfrm_net_nlsk(net, skb), resp_skb, NETLINK_CB(skb).portid); 1911 1912 out: 1913 xfrm_state_put(x); 1914 out_noput: 1915 return err; 1916 } 1917 1918 static int verify_policy_dir(u8 dir, struct netlink_ext_ack *extack) 1919 { 1920 switch (dir) { 1921 case XFRM_POLICY_IN: 1922 case XFRM_POLICY_OUT: 1923 case XFRM_POLICY_FWD: 1924 break; 1925 1926 default: 1927 NL_SET_ERR_MSG(extack, "Invalid policy direction"); 1928 return -EINVAL; 1929 } 1930 1931 return 0; 1932 } 1933 1934 static int verify_policy_type(u8 type, struct netlink_ext_ack *extack) 1935 { 1936 switch (type) { 1937 case XFRM_POLICY_TYPE_MAIN: 1938 #ifdef CONFIG_XFRM_SUB_POLICY 1939 case XFRM_POLICY_TYPE_SUB: 1940 #endif 1941 break; 1942 1943 default: 1944 NL_SET_ERR_MSG(extack, "Invalid policy type"); 1945 return -EINVAL; 1946 } 1947 1948 return 0; 1949 } 1950 1951 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p, 1952 struct netlink_ext_ack *extack) 1953 { 1954 int ret; 1955 1956 switch (p->share) { 1957 case XFRM_SHARE_ANY: 1958 case XFRM_SHARE_SESSION: 1959 case XFRM_SHARE_USER: 1960 case XFRM_SHARE_UNIQUE: 1961 break; 1962 1963 default: 1964 NL_SET_ERR_MSG(extack, "Invalid policy share"); 1965 return -EINVAL; 1966 } 1967 1968 switch (p->action) { 1969 case XFRM_POLICY_ALLOW: 1970 case XFRM_POLICY_BLOCK: 1971 break; 1972 1973 default: 1974 NL_SET_ERR_MSG(extack, "Invalid policy action"); 1975 return -EINVAL; 1976 } 1977 1978 switch (p->sel.family) { 1979 case AF_INET: 1980 if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) { 1981 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 32 for IPv4)"); 1982 return -EINVAL; 1983 } 1984 1985 break; 1986 1987 case AF_INET6: 1988 #if IS_ENABLED(CONFIG_IPV6) 1989 if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) { 1990 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 128 for IPv6)"); 1991 return -EINVAL; 1992 } 1993 1994 break; 1995 #else 1996 NL_SET_ERR_MSG(extack, "IPv6 support disabled"); 1997 return -EAFNOSUPPORT; 1998 #endif 1999 2000 default: 2001 NL_SET_ERR_MSG(extack, "Invalid selector family"); 2002 return -EINVAL; 2003 } 2004 2005 ret = verify_policy_dir(p->dir, extack); 2006 if (ret) 2007 return ret; 2008 if (p->index && (xfrm_policy_id2dir(p->index) != p->dir)) { 2009 NL_SET_ERR_MSG(extack, "Policy index doesn't match direction"); 2010 return -EINVAL; 2011 } 2012 2013 return 0; 2014 } 2015 2016 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs) 2017 { 2018 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 2019 struct xfrm_user_sec_ctx *uctx; 2020 2021 if (!rt) 2022 return 0; 2023 2024 uctx = nla_data(rt); 2025 return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL); 2026 } 2027 2028 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, 2029 int nr) 2030 { 2031 int i; 2032 2033 xp->xfrm_nr = nr; 2034 for (i = 0; i < nr; i++, ut++) { 2035 struct xfrm_tmpl *t = &xp->xfrm_vec[i]; 2036 2037 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id)); 2038 memcpy(&t->saddr, &ut->saddr, 2039 sizeof(xfrm_address_t)); 2040 t->reqid = ut->reqid; 2041 t->mode = ut->mode; 2042 t->share = ut->share; 2043 t->optional = ut->optional; 2044 t->aalgos = ut->aalgos; 2045 t->ealgos = ut->ealgos; 2046 t->calgos = ut->calgos; 2047 /* If all masks are ~0, then we allow all algorithms. */ 2048 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos); 2049 t->encap_family = ut->family; 2050 } 2051 } 2052 2053 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family, 2054 int dir, struct netlink_ext_ack *extack) 2055 { 2056 u16 prev_family; 2057 int i; 2058 2059 if (nr > XFRM_MAX_DEPTH) { 2060 NL_SET_ERR_MSG(extack, "Template count must be <= XFRM_MAX_DEPTH (" __stringify(XFRM_MAX_DEPTH) ")"); 2061 return -EINVAL; 2062 } 2063 2064 prev_family = family; 2065 2066 for (i = 0; i < nr; i++) { 2067 /* We never validated the ut->family value, so many 2068 * applications simply leave it at zero. The check was 2069 * never made and ut->family was ignored because all 2070 * templates could be assumed to have the same family as 2071 * the policy itself. Now that we will have ipv4-in-ipv6 2072 * and ipv6-in-ipv4 tunnels, this is no longer true. 2073 */ 2074 if (!ut[i].family) 2075 ut[i].family = family; 2076 2077 switch (ut[i].mode) { 2078 case XFRM_MODE_TUNNEL: 2079 case XFRM_MODE_BEET: 2080 if (ut[i].optional && dir == XFRM_POLICY_OUT) { 2081 NL_SET_ERR_MSG(extack, "Mode in optional template not allowed in outbound policy"); 2082 return -EINVAL; 2083 } 2084 break; 2085 case XFRM_MODE_IPTFS: 2086 break; 2087 default: 2088 if (ut[i].family != prev_family) { 2089 NL_SET_ERR_MSG(extack, "Mode in template doesn't support a family change"); 2090 return -EINVAL; 2091 } 2092 break; 2093 } 2094 if (ut[i].mode >= XFRM_MODE_MAX) { 2095 NL_SET_ERR_MSG(extack, "Mode in template must be < XFRM_MODE_MAX (" __stringify(XFRM_MODE_MAX) ")"); 2096 return -EINVAL; 2097 } 2098 2099 prev_family = ut[i].family; 2100 2101 switch (ut[i].family) { 2102 case AF_INET: 2103 break; 2104 #if IS_ENABLED(CONFIG_IPV6) 2105 case AF_INET6: 2106 break; 2107 #endif 2108 default: 2109 NL_SET_ERR_MSG(extack, "Invalid family in template"); 2110 return -EINVAL; 2111 } 2112 2113 if (!xfrm_id_proto_valid(ut[i].id.proto)) { 2114 NL_SET_ERR_MSG(extack, "Invalid XFRM protocol in template"); 2115 return -EINVAL; 2116 } 2117 } 2118 2119 return 0; 2120 } 2121 2122 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs, 2123 int dir, struct netlink_ext_ack *extack) 2124 { 2125 struct nlattr *rt = attrs[XFRMA_TMPL]; 2126 2127 if (!rt) { 2128 pol->xfrm_nr = 0; 2129 } else { 2130 struct xfrm_user_tmpl *utmpl = nla_data(rt); 2131 int nr = nla_len(rt) / sizeof(*utmpl); 2132 int err; 2133 2134 err = validate_tmpl(nr, utmpl, pol->family, dir, extack); 2135 if (err) 2136 return err; 2137 2138 copy_templates(pol, utmpl, nr); 2139 } 2140 return 0; 2141 } 2142 2143 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs, 2144 struct netlink_ext_ack *extack) 2145 { 2146 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE]; 2147 struct xfrm_userpolicy_type *upt; 2148 u8 type = XFRM_POLICY_TYPE_MAIN; 2149 int err; 2150 2151 if (rt) { 2152 upt = nla_data(rt); 2153 type = upt->type; 2154 } 2155 2156 err = verify_policy_type(type, extack); 2157 if (err) 2158 return err; 2159 2160 *tp = type; 2161 return 0; 2162 } 2163 2164 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p) 2165 { 2166 xp->priority = p->priority; 2167 xp->index = p->index; 2168 memcpy(&xp->selector, &p->sel, sizeof(xp->selector)); 2169 memcpy(&xp->lft, &p->lft, sizeof(xp->lft)); 2170 xp->action = p->action; 2171 xp->flags = p->flags; 2172 xp->family = p->sel.family; 2173 /* XXX xp->share = p->share; */ 2174 } 2175 2176 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) 2177 { 2178 memset(p, 0, sizeof(*p)); 2179 memcpy(&p->sel, &xp->selector, sizeof(p->sel)); 2180 memcpy(&p->lft, &xp->lft, sizeof(p->lft)); 2181 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); 2182 p->priority = xp->priority; 2183 p->index = xp->index; 2184 p->sel.family = xp->family; 2185 p->dir = dir; 2186 p->action = xp->action; 2187 p->flags = xp->flags; 2188 p->share = XFRM_SHARE_ANY; /* XXX xp->share */ 2189 } 2190 2191 static struct xfrm_policy *xfrm_policy_construct(struct net *net, 2192 struct xfrm_userpolicy_info *p, 2193 struct nlattr **attrs, 2194 int *errp, 2195 struct netlink_ext_ack *extack) 2196 { 2197 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL); 2198 int err; 2199 2200 if (!xp) { 2201 *errp = -ENOMEM; 2202 return NULL; 2203 } 2204 2205 copy_from_user_policy(xp, p); 2206 2207 err = copy_from_user_policy_type(&xp->type, attrs, extack); 2208 if (err) 2209 goto error; 2210 2211 if (!(err = copy_from_user_tmpl(xp, attrs, p->dir, extack))) 2212 err = copy_from_user_sec_ctx(xp, attrs); 2213 if (err) 2214 goto error; 2215 2216 xfrm_mark_get(attrs, &xp->mark); 2217 2218 if (attrs[XFRMA_IF_ID]) 2219 xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 2220 2221 /* configure the hardware if offload is requested */ 2222 if (attrs[XFRMA_OFFLOAD_DEV]) { 2223 err = xfrm_dev_policy_add(net, xp, 2224 nla_data(attrs[XFRMA_OFFLOAD_DEV]), 2225 p->dir, extack); 2226 if (err) 2227 goto error; 2228 } 2229 2230 return xp; 2231 error: 2232 *errp = err; 2233 xp->walk.dead = 1; 2234 xfrm_policy_destroy(xp); 2235 return NULL; 2236 } 2237 2238 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 2239 struct nlattr **attrs, 2240 struct netlink_ext_ack *extack) 2241 { 2242 struct net *net = sock_net(skb->sk); 2243 struct xfrm_userpolicy_info *p = nlmsg_data(nlh); 2244 struct xfrm_policy *xp; 2245 struct km_event c; 2246 int err; 2247 int excl; 2248 2249 err = verify_newpolicy_info(p, extack); 2250 if (err) 2251 return err; 2252 err = verify_sec_ctx_len(attrs, extack); 2253 if (err) 2254 return err; 2255 2256 xp = xfrm_policy_construct(net, p, attrs, &err, extack); 2257 if (!xp) 2258 return err; 2259 2260 /* shouldn't excl be based on nlh flags?? 2261 * Aha! this is anti-netlink really i.e more pfkey derived 2262 * in netlink excl is a flag and you wouldn't need 2263 * a type XFRM_MSG_UPDPOLICY - JHS */ 2264 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; 2265 err = xfrm_policy_insert(p->dir, xp, excl); 2266 xfrm_audit_policy_add(xp, err ? 0 : 1, true); 2267 2268 if (err) { 2269 xfrm_dev_policy_delete(xp); 2270 xfrm_dev_policy_free(xp); 2271 security_xfrm_policy_free(xp->security); 2272 kfree(xp); 2273 return err; 2274 } 2275 2276 c.event = nlh->nlmsg_type; 2277 c.seq = nlh->nlmsg_seq; 2278 c.portid = nlh->nlmsg_pid; 2279 km_policy_notify(xp, p->dir, &c); 2280 2281 xfrm_pol_put(xp); 2282 2283 return 0; 2284 } 2285 2286 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) 2287 { 2288 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH]; 2289 int i; 2290 2291 if (xp->xfrm_nr == 0) 2292 return 0; 2293 2294 if (xp->xfrm_nr > XFRM_MAX_DEPTH) 2295 return -ENOBUFS; 2296 2297 for (i = 0; i < xp->xfrm_nr; i++) { 2298 struct xfrm_user_tmpl *up = &vec[i]; 2299 struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; 2300 2301 memset(up, 0, sizeof(*up)); 2302 memcpy(&up->id, &kp->id, sizeof(up->id)); 2303 up->family = kp->encap_family; 2304 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); 2305 up->reqid = kp->reqid; 2306 up->mode = kp->mode; 2307 up->share = kp->share; 2308 up->optional = kp->optional; 2309 up->aalgos = kp->aalgos; 2310 up->ealgos = kp->ealgos; 2311 up->calgos = kp->calgos; 2312 } 2313 2314 return nla_put(skb, XFRMA_TMPL, 2315 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec); 2316 } 2317 2318 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb) 2319 { 2320 if (x->security) { 2321 return copy_sec_ctx(x->security, skb); 2322 } 2323 return 0; 2324 } 2325 2326 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb) 2327 { 2328 if (xp->security) 2329 return copy_sec_ctx(xp->security, skb); 2330 return 0; 2331 } 2332 static inline unsigned int userpolicy_type_attrsize(void) 2333 { 2334 #ifdef CONFIG_XFRM_SUB_POLICY 2335 return nla_total_size(sizeof(struct xfrm_userpolicy_type)); 2336 #else 2337 return 0; 2338 #endif 2339 } 2340 2341 #ifdef CONFIG_XFRM_SUB_POLICY 2342 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 2343 { 2344 struct xfrm_userpolicy_type upt; 2345 2346 /* Sadly there are two holes in struct xfrm_userpolicy_type */ 2347 memset(&upt, 0, sizeof(upt)); 2348 upt.type = type; 2349 2350 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); 2351 } 2352 2353 #else 2354 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 2355 { 2356 return 0; 2357 } 2358 #endif 2359 2360 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr) 2361 { 2362 struct xfrm_dump_info *sp = ptr; 2363 struct xfrm_userpolicy_info *p; 2364 struct sk_buff *in_skb = sp->in_skb; 2365 struct sk_buff *skb = sp->out_skb; 2366 struct xfrm_translator *xtr; 2367 struct nlmsghdr *nlh; 2368 int err; 2369 2370 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq, 2371 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); 2372 if (nlh == NULL) 2373 return -EMSGSIZE; 2374 2375 p = nlmsg_data(nlh); 2376 copy_to_user_policy(xp, p, dir); 2377 err = copy_to_user_tmpl(xp, skb); 2378 if (!err) 2379 err = copy_to_user_sec_ctx(xp, skb); 2380 if (!err) 2381 err = copy_to_user_policy_type(xp->type, skb); 2382 if (!err) 2383 err = xfrm_mark_put(skb, &xp->mark); 2384 if (!err) 2385 err = xfrm_if_id_put(skb, xp->if_id); 2386 if (!err && xp->xdo.dev) 2387 err = copy_user_offload(&xp->xdo, skb); 2388 if (err) { 2389 nlmsg_cancel(skb, nlh); 2390 return err; 2391 } 2392 nlmsg_end(skb, nlh); 2393 2394 xtr = xfrm_get_translator(); 2395 if (xtr) { 2396 err = xtr->alloc_compat(skb, nlh); 2397 2398 xfrm_put_translator(xtr); 2399 if (err) { 2400 nlmsg_cancel(skb, nlh); 2401 return err; 2402 } 2403 } 2404 2405 return 0; 2406 } 2407 2408 static int xfrm_dump_policy_done(struct netlink_callback *cb) 2409 { 2410 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; 2411 struct net *net = sock_net(cb->skb->sk); 2412 2413 xfrm_policy_walk_done(walk, net); 2414 return 0; 2415 } 2416 2417 static int xfrm_dump_policy_start(struct netlink_callback *cb) 2418 { 2419 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; 2420 2421 BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args)); 2422 2423 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); 2424 return 0; 2425 } 2426 2427 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) 2428 { 2429 struct net *net = sock_net(skb->sk); 2430 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; 2431 struct xfrm_dump_info info; 2432 2433 info.in_skb = cb->skb; 2434 info.out_skb = skb; 2435 info.nlmsg_seq = cb->nlh->nlmsg_seq; 2436 info.nlmsg_flags = NLM_F_MULTI; 2437 2438 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info); 2439 2440 return skb->len; 2441 } 2442 2443 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, 2444 struct xfrm_policy *xp, 2445 int dir, u32 seq) 2446 { 2447 struct xfrm_dump_info info; 2448 struct sk_buff *skb; 2449 int err; 2450 2451 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2452 if (!skb) 2453 return ERR_PTR(-ENOMEM); 2454 2455 info.in_skb = in_skb; 2456 info.out_skb = skb; 2457 info.nlmsg_seq = seq; 2458 info.nlmsg_flags = 0; 2459 2460 err = dump_one_policy(xp, dir, 0, &info); 2461 if (err) { 2462 kfree_skb(skb); 2463 return ERR_PTR(err); 2464 } 2465 2466 return skb; 2467 } 2468 2469 static int xfrm_notify_userpolicy(struct net *net) 2470 { 2471 struct xfrm_userpolicy_default *up; 2472 int len = NLMSG_ALIGN(sizeof(*up)); 2473 struct nlmsghdr *nlh; 2474 struct sk_buff *skb; 2475 int err; 2476 2477 skb = nlmsg_new(len, GFP_ATOMIC); 2478 if (skb == NULL) 2479 return -ENOMEM; 2480 2481 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_GETDEFAULT, sizeof(*up), 0); 2482 if (nlh == NULL) { 2483 kfree_skb(skb); 2484 return -EMSGSIZE; 2485 } 2486 2487 up = nlmsg_data(nlh); 2488 up->in = net->xfrm.policy_default[XFRM_POLICY_IN]; 2489 up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD]; 2490 up->out = net->xfrm.policy_default[XFRM_POLICY_OUT]; 2491 2492 nlmsg_end(skb, nlh); 2493 2494 rcu_read_lock(); 2495 err = xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY); 2496 rcu_read_unlock(); 2497 2498 return err; 2499 } 2500 2501 static bool xfrm_userpolicy_is_valid(__u8 policy) 2502 { 2503 return policy == XFRM_USERPOLICY_BLOCK || 2504 policy == XFRM_USERPOLICY_ACCEPT; 2505 } 2506 2507 static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh, 2508 struct nlattr **attrs, struct netlink_ext_ack *extack) 2509 { 2510 struct net *net = sock_net(skb->sk); 2511 struct xfrm_userpolicy_default *up = nlmsg_data(nlh); 2512 2513 if (xfrm_userpolicy_is_valid(up->in)) 2514 net->xfrm.policy_default[XFRM_POLICY_IN] = up->in; 2515 2516 if (xfrm_userpolicy_is_valid(up->fwd)) 2517 net->xfrm.policy_default[XFRM_POLICY_FWD] = up->fwd; 2518 2519 if (xfrm_userpolicy_is_valid(up->out)) 2520 net->xfrm.policy_default[XFRM_POLICY_OUT] = up->out; 2521 2522 rt_genid_bump_all(net); 2523 2524 xfrm_notify_userpolicy(net); 2525 return 0; 2526 } 2527 2528 static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh, 2529 struct nlattr **attrs, struct netlink_ext_ack *extack) 2530 { 2531 struct sk_buff *r_skb; 2532 struct nlmsghdr *r_nlh; 2533 struct net *net = sock_net(skb->sk); 2534 struct xfrm_userpolicy_default *r_up; 2535 int len = NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_default)); 2536 u32 portid = NETLINK_CB(skb).portid; 2537 u32 seq = nlh->nlmsg_seq; 2538 2539 r_skb = nlmsg_new(len, GFP_ATOMIC); 2540 if (!r_skb) 2541 return -ENOMEM; 2542 2543 r_nlh = nlmsg_put(r_skb, portid, seq, XFRM_MSG_GETDEFAULT, sizeof(*r_up), 0); 2544 if (!r_nlh) { 2545 kfree_skb(r_skb); 2546 return -EMSGSIZE; 2547 } 2548 2549 r_up = nlmsg_data(r_nlh); 2550 r_up->in = net->xfrm.policy_default[XFRM_POLICY_IN]; 2551 r_up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD]; 2552 r_up->out = net->xfrm.policy_default[XFRM_POLICY_OUT]; 2553 nlmsg_end(r_skb, r_nlh); 2554 2555 return nlmsg_unicast(xfrm_net_nlsk(net, skb), r_skb, portid); 2556 } 2557 2558 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 2559 struct nlattr **attrs, 2560 struct netlink_ext_ack *extack) 2561 { 2562 struct net *net = sock_net(skb->sk); 2563 struct xfrm_policy *xp; 2564 struct xfrm_userpolicy_id *p; 2565 u8 type = XFRM_POLICY_TYPE_MAIN; 2566 int err; 2567 struct km_event c; 2568 int delete; 2569 struct xfrm_mark m; 2570 u32 if_id = 0; 2571 2572 p = nlmsg_data(nlh); 2573 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY; 2574 2575 err = copy_from_user_policy_type(&type, attrs, extack); 2576 if (err) 2577 return err; 2578 2579 err = verify_policy_dir(p->dir, extack); 2580 if (err) 2581 return err; 2582 2583 if (attrs[XFRMA_IF_ID]) 2584 if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 2585 2586 xfrm_mark_get(attrs, &m); 2587 2588 if (p->index) 2589 xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, 2590 p->index, delete, &err); 2591 else { 2592 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 2593 struct xfrm_sec_ctx *ctx; 2594 2595 err = verify_sec_ctx_len(attrs, extack); 2596 if (err) 2597 return err; 2598 2599 ctx = NULL; 2600 if (rt) { 2601 struct xfrm_user_sec_ctx *uctx = nla_data(rt); 2602 2603 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL); 2604 if (err) 2605 return err; 2606 } 2607 xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, 2608 &p->sel, ctx, delete, &err); 2609 security_xfrm_policy_free(ctx); 2610 } 2611 if (xp == NULL) 2612 return -ENOENT; 2613 2614 if (!delete) { 2615 struct sk_buff *resp_skb; 2616 2617 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq); 2618 if (IS_ERR(resp_skb)) { 2619 err = PTR_ERR(resp_skb); 2620 } else { 2621 err = nlmsg_unicast(xfrm_net_nlsk(net, skb), resp_skb, 2622 NETLINK_CB(skb).portid); 2623 } 2624 } else { 2625 xfrm_audit_policy_delete(xp, err ? 0 : 1, true); 2626 2627 if (err != 0) 2628 goto out; 2629 2630 c.data.byid = p->index; 2631 c.event = nlh->nlmsg_type; 2632 c.seq = nlh->nlmsg_seq; 2633 c.portid = nlh->nlmsg_pid; 2634 km_policy_notify(xp, p->dir, &c); 2635 } 2636 2637 out: 2638 xfrm_pol_put(xp); 2639 return err; 2640 } 2641 2642 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 2643 struct nlattr **attrs, 2644 struct netlink_ext_ack *extack) 2645 { 2646 struct net *net = sock_net(skb->sk); 2647 struct km_event c; 2648 struct xfrm_usersa_flush *p = nlmsg_data(nlh); 2649 int err; 2650 2651 err = xfrm_state_flush(net, p->proto, true); 2652 if (err) { 2653 if (err == -ESRCH) /* empty table */ 2654 return 0; 2655 return err; 2656 } 2657 c.data.proto = p->proto; 2658 c.event = nlh->nlmsg_type; 2659 c.seq = nlh->nlmsg_seq; 2660 c.portid = nlh->nlmsg_pid; 2661 c.net = net; 2662 km_state_notify(NULL, &c); 2663 2664 return 0; 2665 } 2666 2667 static inline unsigned int xfrm_aevent_msgsize(struct xfrm_state *x) 2668 { 2669 unsigned int replay_size = x->replay_esn ? 2670 xfrm_replay_state_esn_len(x->replay_esn) : 2671 sizeof(struct xfrm_replay_state); 2672 2673 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id)) 2674 + nla_total_size(replay_size) 2675 + nla_total_size_64bit(sizeof(struct xfrm_lifetime_cur)) 2676 + nla_total_size(sizeof(struct xfrm_mark)) 2677 + nla_total_size(4) /* XFRM_AE_RTHR */ 2678 + nla_total_size(4) /* XFRM_AE_ETHR */ 2679 + nla_total_size(sizeof(x->dir)) /* XFRMA_SA_DIR */ 2680 + nla_total_size(4) /* XFRMA_SA_PCPU */ 2681 + nla_total_size(sizeof(x->if_id)); /* XFRMA_IF_ID */ 2682 } 2683 2684 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) 2685 { 2686 struct xfrm_aevent_id *id; 2687 struct nlmsghdr *nlh; 2688 int err; 2689 2690 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); 2691 if (nlh == NULL) 2692 return -EMSGSIZE; 2693 2694 id = nlmsg_data(nlh); 2695 memset(&id->sa_id, 0, sizeof(id->sa_id)); 2696 memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr)); 2697 id->sa_id.spi = x->id.spi; 2698 id->sa_id.family = x->props.family; 2699 id->sa_id.proto = x->id.proto; 2700 memcpy(&id->saddr, &x->props.saddr, sizeof(x->props.saddr)); 2701 id->reqid = x->props.reqid; 2702 id->flags = c->data.aevent; 2703 2704 if (x->replay_esn) { 2705 err = nla_put(skb, XFRMA_REPLAY_ESN_VAL, 2706 xfrm_replay_state_esn_len(x->replay_esn), 2707 x->replay_esn); 2708 } else { 2709 err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), 2710 &x->replay); 2711 } 2712 if (err) 2713 goto out_cancel; 2714 err = nla_put_64bit(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft, 2715 XFRMA_PAD); 2716 if (err) 2717 goto out_cancel; 2718 2719 if (id->flags & XFRM_AE_RTHR) { 2720 err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff); 2721 if (err) 2722 goto out_cancel; 2723 } 2724 if (id->flags & XFRM_AE_ETHR) { 2725 err = nla_put_u32(skb, XFRMA_ETIMER_THRESH, 2726 x->replay_maxage * 10 / HZ); 2727 if (err) 2728 goto out_cancel; 2729 } 2730 err = xfrm_mark_put(skb, &x->mark); 2731 if (err) 2732 goto out_cancel; 2733 2734 err = xfrm_if_id_put(skb, x->if_id); 2735 if (err) 2736 goto out_cancel; 2737 if (x->pcpu_num != UINT_MAX) { 2738 err = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num); 2739 if (err) 2740 goto out_cancel; 2741 } 2742 2743 if (x->dir) { 2744 err = nla_put_u8(skb, XFRMA_SA_DIR, x->dir); 2745 if (err) 2746 goto out_cancel; 2747 } 2748 2749 nlmsg_end(skb, nlh); 2750 return 0; 2751 2752 out_cancel: 2753 nlmsg_cancel(skb, nlh); 2754 return err; 2755 } 2756 2757 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 2758 struct nlattr **attrs, struct netlink_ext_ack *extack) 2759 { 2760 struct net *net = sock_net(skb->sk); 2761 struct xfrm_state *x; 2762 struct sk_buff *r_skb; 2763 int err; 2764 struct km_event c; 2765 u32 mark; 2766 struct xfrm_mark m; 2767 struct xfrm_aevent_id *p = nlmsg_data(nlh); 2768 struct xfrm_usersa_id *id = &p->sa_id; 2769 2770 mark = xfrm_mark_get(attrs, &m); 2771 2772 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family); 2773 if (x == NULL) 2774 return -ESRCH; 2775 2776 r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC); 2777 if (r_skb == NULL) { 2778 xfrm_state_put(x); 2779 return -ENOMEM; 2780 } 2781 2782 /* 2783 * XXX: is this lock really needed - none of the other 2784 * gets lock (the concern is things getting updated 2785 * while we are still reading) - jhs 2786 */ 2787 spin_lock_bh(&x->lock); 2788 c.data.aevent = p->flags; 2789 c.seq = nlh->nlmsg_seq; 2790 c.portid = nlh->nlmsg_pid; 2791 2792 err = build_aevent(r_skb, x, &c); 2793 if (err < 0) { 2794 spin_unlock_bh(&x->lock); 2795 xfrm_state_put(x); 2796 kfree_skb(r_skb); 2797 return err; 2798 } 2799 2800 err = nlmsg_unicast(xfrm_net_nlsk(net, skb), r_skb, NETLINK_CB(skb).portid); 2801 spin_unlock_bh(&x->lock); 2802 xfrm_state_put(x); 2803 return err; 2804 } 2805 2806 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 2807 struct nlattr **attrs, struct netlink_ext_ack *extack) 2808 { 2809 struct net *net = sock_net(skb->sk); 2810 struct xfrm_state *x; 2811 struct km_event c; 2812 int err = -EINVAL; 2813 u32 mark = 0; 2814 struct xfrm_mark m; 2815 struct xfrm_aevent_id *p = nlmsg_data(nlh); 2816 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; 2817 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; 2818 struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; 2819 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; 2820 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; 2821 2822 if (!lt && !rp && !re && !et && !rt) { 2823 NL_SET_ERR_MSG(extack, "Missing required attribute for AE"); 2824 return err; 2825 } 2826 2827 /* pedantic mode - thou shalt sayeth replaceth */ 2828 if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) { 2829 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE flag is required"); 2830 return err; 2831 } 2832 2833 mark = xfrm_mark_get(attrs, &m); 2834 2835 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family); 2836 if (x == NULL) 2837 return -ESRCH; 2838 2839 if (x->km.state != XFRM_STATE_VALID) { 2840 NL_SET_ERR_MSG(extack, "SA must be in VALID state"); 2841 goto out; 2842 } 2843 2844 err = xfrm_replay_verify_len(x->replay_esn, re, extack); 2845 if (err) 2846 goto out; 2847 2848 spin_lock_bh(&x->lock); 2849 xfrm_update_ae_params(x, attrs, 1); 2850 spin_unlock_bh(&x->lock); 2851 2852 c.event = nlh->nlmsg_type; 2853 c.seq = nlh->nlmsg_seq; 2854 c.portid = nlh->nlmsg_pid; 2855 c.data.aevent = XFRM_AE_CU; 2856 km_state_notify(x, &c); 2857 err = 0; 2858 out: 2859 xfrm_state_put(x); 2860 return err; 2861 } 2862 2863 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 2864 struct nlattr **attrs, 2865 struct netlink_ext_ack *extack) 2866 { 2867 struct net *net = sock_net(skb->sk); 2868 struct km_event c; 2869 u8 type = XFRM_POLICY_TYPE_MAIN; 2870 int err; 2871 2872 err = copy_from_user_policy_type(&type, attrs, extack); 2873 if (err) 2874 return err; 2875 2876 err = xfrm_policy_flush(net, type, true); 2877 if (err) { 2878 if (err == -ESRCH) /* empty table */ 2879 return 0; 2880 return err; 2881 } 2882 2883 c.data.type = type; 2884 c.event = nlh->nlmsg_type; 2885 c.seq = nlh->nlmsg_seq; 2886 c.portid = nlh->nlmsg_pid; 2887 c.net = net; 2888 km_policy_notify(NULL, 0, &c); 2889 return 0; 2890 } 2891 2892 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, 2893 struct nlattr **attrs, 2894 struct netlink_ext_ack *extack) 2895 { 2896 struct net *net = sock_net(skb->sk); 2897 struct xfrm_policy *xp; 2898 struct xfrm_user_polexpire *up = nlmsg_data(nlh); 2899 struct xfrm_userpolicy_info *p = &up->pol; 2900 u8 type = XFRM_POLICY_TYPE_MAIN; 2901 int err = -ENOENT; 2902 struct xfrm_mark m; 2903 u32 if_id = 0; 2904 2905 err = copy_from_user_policy_type(&type, attrs, extack); 2906 if (err) 2907 return err; 2908 2909 err = verify_policy_dir(p->dir, extack); 2910 if (err) 2911 return err; 2912 2913 if (attrs[XFRMA_IF_ID]) 2914 if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 2915 2916 xfrm_mark_get(attrs, &m); 2917 2918 if (p->index) 2919 xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index, 2920 0, &err); 2921 else { 2922 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 2923 struct xfrm_sec_ctx *ctx; 2924 2925 err = verify_sec_ctx_len(attrs, extack); 2926 if (err) 2927 return err; 2928 2929 ctx = NULL; 2930 if (rt) { 2931 struct xfrm_user_sec_ctx *uctx = nla_data(rt); 2932 2933 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL); 2934 if (err) 2935 return err; 2936 } 2937 xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, 2938 &p->sel, ctx, 0, &err); 2939 security_xfrm_policy_free(ctx); 2940 } 2941 if (xp == NULL) 2942 return -ENOENT; 2943 2944 if (unlikely(xp->walk.dead)) 2945 goto out; 2946 2947 err = 0; 2948 if (up->hard) { 2949 xfrm_policy_delete(xp, p->dir); 2950 xfrm_audit_policy_delete(xp, 1, true); 2951 } 2952 km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid); 2953 2954 out: 2955 xfrm_pol_put(xp); 2956 return err; 2957 } 2958 2959 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, 2960 struct nlattr **attrs, 2961 struct netlink_ext_ack *extack) 2962 { 2963 struct net *net = sock_net(skb->sk); 2964 struct xfrm_state *x; 2965 int err; 2966 struct xfrm_user_expire *ue = nlmsg_data(nlh); 2967 struct xfrm_usersa_info *p = &ue->state; 2968 struct xfrm_mark m; 2969 u32 mark = xfrm_mark_get(attrs, &m); 2970 2971 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family); 2972 2973 err = -ENOENT; 2974 if (x == NULL) 2975 return err; 2976 2977 spin_lock_bh(&x->lock); 2978 err = -EINVAL; 2979 if (x->km.state != XFRM_STATE_VALID) { 2980 NL_SET_ERR_MSG(extack, "SA must be in VALID state"); 2981 goto out; 2982 } 2983 2984 km_state_expired(x, ue->hard, nlh->nlmsg_pid); 2985 2986 if (ue->hard) { 2987 __xfrm_state_delete(x); 2988 xfrm_audit_state_delete(x, 1, true); 2989 } 2990 err = 0; 2991 out: 2992 spin_unlock_bh(&x->lock); 2993 xfrm_state_put(x); 2994 return err; 2995 } 2996 2997 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, 2998 struct nlattr **attrs, 2999 struct netlink_ext_ack *extack) 3000 { 3001 struct net *net = sock_net(skb->sk); 3002 struct xfrm_policy *xp; 3003 struct xfrm_user_tmpl *ut; 3004 int i; 3005 struct nlattr *rt = attrs[XFRMA_TMPL]; 3006 struct xfrm_mark mark; 3007 3008 struct xfrm_user_acquire *ua = nlmsg_data(nlh); 3009 struct xfrm_state *x = xfrm_state_alloc(net); 3010 int err = -ENOMEM; 3011 3012 if (!x) 3013 goto nomem; 3014 3015 xfrm_mark_get(attrs, &mark); 3016 3017 if (attrs[XFRMA_SA_PCPU]) { 3018 x->pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]); 3019 err = -EINVAL; 3020 if (x->pcpu_num >= num_possible_cpus()) { 3021 NL_SET_ERR_MSG(extack, "pCPU number too big"); 3022 goto free_state; 3023 } 3024 } 3025 3026 err = verify_newpolicy_info(&ua->policy, extack); 3027 if (err) 3028 goto free_state; 3029 err = verify_sec_ctx_len(attrs, extack); 3030 if (err) 3031 goto free_state; 3032 3033 /* build an XP */ 3034 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err, extack); 3035 if (!xp) 3036 goto free_state; 3037 3038 memcpy(&x->id, &ua->id, sizeof(ua->id)); 3039 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr)); 3040 memcpy(&x->sel, &ua->sel, sizeof(ua->sel)); 3041 xp->mark.m = x->mark.m = mark.m; 3042 xp->mark.v = x->mark.v = mark.v; 3043 ut = nla_data(rt); 3044 /* extract the templates and for each call km_key */ 3045 for (i = 0; i < xp->xfrm_nr; i++, ut++) { 3046 struct xfrm_tmpl *t = &xp->xfrm_vec[i]; 3047 memcpy(&x->id, &t->id, sizeof(x->id)); 3048 x->props.mode = t->mode; 3049 x->props.reqid = t->reqid; 3050 x->props.family = ut->family; 3051 t->aalgos = ua->aalgos; 3052 t->ealgos = ua->ealgos; 3053 t->calgos = ua->calgos; 3054 err = km_query(x, t, xp); 3055 3056 } 3057 3058 xfrm_state_free(x); 3059 xfrm_dev_policy_delete(xp); 3060 xfrm_dev_policy_free(xp); 3061 security_xfrm_policy_free(xp->security); 3062 kfree(xp); 3063 3064 return 0; 3065 3066 free_state: 3067 xfrm_state_free(x); 3068 nomem: 3069 return err; 3070 } 3071 3072 #ifdef CONFIG_XFRM_MIGRATE 3073 static int copy_from_user_migrate(struct xfrm_migrate *ma, 3074 struct xfrm_kmaddress *k, 3075 struct nlattr **attrs, int *num, 3076 struct netlink_ext_ack *extack) 3077 { 3078 struct nlattr *rt = attrs[XFRMA_MIGRATE]; 3079 struct xfrm_user_migrate *um; 3080 int i, num_migrate; 3081 3082 if (k != NULL) { 3083 struct xfrm_user_kmaddress *uk; 3084 3085 uk = nla_data(attrs[XFRMA_KMADDRESS]); 3086 memcpy(&k->local, &uk->local, sizeof(k->local)); 3087 memcpy(&k->remote, &uk->remote, sizeof(k->remote)); 3088 k->family = uk->family; 3089 k->reserved = uk->reserved; 3090 } 3091 3092 um = nla_data(rt); 3093 num_migrate = nla_len(rt) / sizeof(*um); 3094 3095 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH) { 3096 NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)"); 3097 return -EINVAL; 3098 } 3099 3100 for (i = 0; i < num_migrate; i++, um++, ma++) { 3101 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr)); 3102 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr)); 3103 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr)); 3104 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr)); 3105 3106 ma->proto = um->proto; 3107 ma->mode = um->mode; 3108 ma->reqid = um->reqid; 3109 3110 ma->old_family = um->old_family; 3111 ma->new_family = um->new_family; 3112 } 3113 3114 *num = i; 3115 return 0; 3116 } 3117 3118 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, 3119 struct nlattr **attrs, struct netlink_ext_ack *extack) 3120 { 3121 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh); 3122 struct xfrm_migrate m[XFRM_MAX_DEPTH]; 3123 struct xfrm_kmaddress km, *kmp; 3124 u8 type; 3125 int err; 3126 int n = 0; 3127 struct net *net = sock_net(skb->sk); 3128 struct xfrm_encap_tmpl *encap = NULL; 3129 struct xfrm_user_offload *xuo = NULL; 3130 u32 if_id = 0; 3131 3132 if (!attrs[XFRMA_MIGRATE]) { 3133 NL_SET_ERR_MSG(extack, "Missing required MIGRATE attribute"); 3134 return -EINVAL; 3135 } 3136 3137 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL; 3138 3139 err = copy_from_user_policy_type(&type, attrs, extack); 3140 if (err) 3141 return err; 3142 3143 err = copy_from_user_migrate(m, kmp, attrs, &n, extack); 3144 if (err) 3145 return err; 3146 3147 if (!n) 3148 return 0; 3149 3150 if (attrs[XFRMA_ENCAP]) { 3151 encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), 3152 sizeof(*encap), GFP_KERNEL); 3153 if (!encap) 3154 return -ENOMEM; 3155 } 3156 3157 if (attrs[XFRMA_IF_ID]) 3158 if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 3159 3160 if (attrs[XFRMA_OFFLOAD_DEV]) { 3161 xuo = kmemdup(nla_data(attrs[XFRMA_OFFLOAD_DEV]), 3162 sizeof(*xuo), GFP_KERNEL); 3163 if (!xuo) { 3164 err = -ENOMEM; 3165 goto error; 3166 } 3167 } 3168 err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap, 3169 if_id, extack, xuo); 3170 error: 3171 kfree(encap); 3172 kfree(xuo); 3173 return err; 3174 } 3175 #else 3176 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, 3177 struct nlattr **attrs, struct netlink_ext_ack *extack) 3178 { 3179 return -ENOPROTOOPT; 3180 } 3181 #endif 3182 3183 #ifdef CONFIG_XFRM_MIGRATE 3184 static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb) 3185 { 3186 struct xfrm_user_migrate um; 3187 3188 memset(&um, 0, sizeof(um)); 3189 um.proto = m->proto; 3190 um.mode = m->mode; 3191 um.reqid = m->reqid; 3192 um.old_family = m->old_family; 3193 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr)); 3194 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr)); 3195 um.new_family = m->new_family; 3196 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr)); 3197 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr)); 3198 3199 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um); 3200 } 3201 3202 static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb) 3203 { 3204 struct xfrm_user_kmaddress uk; 3205 3206 memset(&uk, 0, sizeof(uk)); 3207 uk.family = k->family; 3208 uk.reserved = k->reserved; 3209 memcpy(&uk.local, &k->local, sizeof(uk.local)); 3210 memcpy(&uk.remote, &k->remote, sizeof(uk.remote)); 3211 3212 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk); 3213 } 3214 3215 static inline unsigned int xfrm_migrate_msgsize(int num_migrate, int with_kma, 3216 int with_encp) 3217 { 3218 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id)) 3219 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0) 3220 + (with_encp ? nla_total_size(sizeof(struct xfrm_encap_tmpl)) : 0) 3221 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate) 3222 + userpolicy_type_attrsize(); 3223 } 3224 3225 static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m, 3226 int num_migrate, const struct xfrm_kmaddress *k, 3227 const struct xfrm_selector *sel, 3228 const struct xfrm_encap_tmpl *encap, u8 dir, u8 type) 3229 { 3230 const struct xfrm_migrate *mp; 3231 struct xfrm_userpolicy_id *pol_id; 3232 struct nlmsghdr *nlh; 3233 int i, err; 3234 3235 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0); 3236 if (nlh == NULL) 3237 return -EMSGSIZE; 3238 3239 pol_id = nlmsg_data(nlh); 3240 /* copy data from selector, dir, and type to the pol_id */ 3241 memset(pol_id, 0, sizeof(*pol_id)); 3242 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel)); 3243 pol_id->dir = dir; 3244 3245 if (k != NULL) { 3246 err = copy_to_user_kmaddress(k, skb); 3247 if (err) 3248 goto out_cancel; 3249 } 3250 if (encap) { 3251 err = nla_put(skb, XFRMA_ENCAP, sizeof(*encap), encap); 3252 if (err) 3253 goto out_cancel; 3254 } 3255 err = copy_to_user_policy_type(type, skb); 3256 if (err) 3257 goto out_cancel; 3258 for (i = 0, mp = m ; i < num_migrate; i++, mp++) { 3259 err = copy_to_user_migrate(mp, skb); 3260 if (err) 3261 goto out_cancel; 3262 } 3263 3264 nlmsg_end(skb, nlh); 3265 return 0; 3266 3267 out_cancel: 3268 nlmsg_cancel(skb, nlh); 3269 return err; 3270 } 3271 3272 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 3273 const struct xfrm_migrate *m, int num_migrate, 3274 const struct xfrm_kmaddress *k, 3275 const struct xfrm_encap_tmpl *encap) 3276 { 3277 struct net *net = &init_net; 3278 struct sk_buff *skb; 3279 int err; 3280 3281 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k, !!encap), 3282 GFP_ATOMIC); 3283 if (skb == NULL) 3284 return -ENOMEM; 3285 3286 /* build migrate */ 3287 err = build_migrate(skb, m, num_migrate, k, sel, encap, dir, type); 3288 BUG_ON(err < 0); 3289 3290 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MIGRATE); 3291 } 3292 #else 3293 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 3294 const struct xfrm_migrate *m, int num_migrate, 3295 const struct xfrm_kmaddress *k, 3296 const struct xfrm_encap_tmpl *encap) 3297 { 3298 return -ENOPROTOOPT; 3299 } 3300 #endif 3301 3302 #define XMSGSIZE(type) sizeof(struct type) 3303 3304 const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { 3305 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 3306 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 3307 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 3308 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 3309 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 3310 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 3311 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info), 3312 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire), 3313 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire), 3314 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 3315 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 3316 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire), 3317 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), 3318 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0, 3319 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 3320 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 3321 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report), 3322 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 3323 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32), 3324 [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32), 3325 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32), 3326 [XFRM_MSG_SETDEFAULT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default), 3327 [XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default), 3328 }; 3329 EXPORT_SYMBOL_GPL(xfrm_msg_min); 3330 3331 #undef XMSGSIZE 3332 3333 const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { 3334 [XFRMA_UNSPEC] = { .strict_start_type = XFRMA_SA_DIR }, 3335 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)}, 3336 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)}, 3337 [XFRMA_LASTUSED] = { .type = NLA_U64}, 3338 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)}, 3339 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) }, 3340 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) }, 3341 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) }, 3342 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, 3343 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, 3344 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, 3345 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) }, 3346 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, 3347 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, 3348 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, 3349 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 }, 3350 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) }, 3351 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) }, 3352 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)}, 3353 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) }, 3354 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) }, 3355 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) }, 3356 [XFRMA_TFCPAD] = { .type = NLA_U32 }, 3357 [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) }, 3358 [XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 }, 3359 [XFRMA_PROTO] = { .type = NLA_U8 }, 3360 [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) }, 3361 [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) }, 3362 [XFRMA_SET_MARK] = { .type = NLA_U32 }, 3363 [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 }, 3364 [XFRMA_IF_ID] = { .type = NLA_U32 }, 3365 [XFRMA_MTIMER_THRESH] = { .type = NLA_U32 }, 3366 [XFRMA_SA_DIR] = NLA_POLICY_RANGE(NLA_U8, XFRM_SA_DIR_IN, XFRM_SA_DIR_OUT), 3367 [XFRMA_NAT_KEEPALIVE_INTERVAL] = { .type = NLA_U32 }, 3368 [XFRMA_SA_PCPU] = { .type = NLA_U32 }, 3369 [XFRMA_IPTFS_DROP_TIME] = { .type = NLA_U32 }, 3370 [XFRMA_IPTFS_REORDER_WINDOW] = { .type = NLA_U16 }, 3371 [XFRMA_IPTFS_DONT_FRAG] = { .type = NLA_FLAG }, 3372 [XFRMA_IPTFS_INIT_DELAY] = { .type = NLA_U32 }, 3373 [XFRMA_IPTFS_MAX_QSIZE] = { .type = NLA_U32 }, 3374 [XFRMA_IPTFS_PKT_SIZE] = { .type = NLA_U32 }, 3375 }; 3376 EXPORT_SYMBOL_GPL(xfrma_policy); 3377 3378 static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { 3379 [XFRMA_SPD_IPV4_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) }, 3380 [XFRMA_SPD_IPV6_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) }, 3381 }; 3382 3383 static const struct xfrm_link { 3384 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **, 3385 struct netlink_ext_ack *); 3386 int (*start)(struct netlink_callback *); 3387 int (*dump)(struct sk_buff *, struct netlink_callback *); 3388 int (*done)(struct netlink_callback *); 3389 const struct nla_policy *nla_pol; 3390 int nla_max; 3391 } xfrm_dispatch[XFRM_NR_MSGTYPES] = { 3392 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 3393 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, 3394 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa, 3395 .dump = xfrm_dump_sa, 3396 .done = xfrm_dump_sa_done }, 3397 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 3398 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, 3399 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, 3400 .start = xfrm_dump_policy_start, 3401 .dump = xfrm_dump_policy, 3402 .done = xfrm_dump_policy_done }, 3403 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, 3404 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire }, 3405 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire }, 3406 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 3407 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 3408 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire}, 3409 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa }, 3410 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy }, 3411 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae }, 3412 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae }, 3413 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate }, 3414 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo }, 3415 [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_set_spdinfo, 3416 .nla_pol = xfrma_spd_policy, 3417 .nla_max = XFRMA_SPD_MAX }, 3418 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo }, 3419 [XFRM_MSG_SETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_set_default }, 3420 [XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_get_default }, 3421 }; 3422 3423 static int xfrm_reject_unused_attr(int type, struct nlattr **attrs, 3424 struct netlink_ext_ack *extack) 3425 { 3426 if (attrs[XFRMA_SA_DIR]) { 3427 switch (type) { 3428 case XFRM_MSG_NEWSA: 3429 case XFRM_MSG_UPDSA: 3430 case XFRM_MSG_ALLOCSPI: 3431 break; 3432 default: 3433 NL_SET_ERR_MSG(extack, "Invalid attribute SA_DIR"); 3434 return -EINVAL; 3435 } 3436 } 3437 3438 if (attrs[XFRMA_SA_PCPU]) { 3439 switch (type) { 3440 case XFRM_MSG_NEWSA: 3441 case XFRM_MSG_UPDSA: 3442 case XFRM_MSG_ALLOCSPI: 3443 case XFRM_MSG_ACQUIRE: 3444 3445 break; 3446 default: 3447 NL_SET_ERR_MSG(extack, "Invalid attribute SA_PCPU"); 3448 return -EINVAL; 3449 } 3450 } 3451 3452 return 0; 3453 } 3454 3455 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 3456 struct netlink_ext_ack *extack) 3457 { 3458 struct net *net = sock_net(skb->sk); 3459 struct nlattr *attrs[XFRMA_MAX+1]; 3460 const struct xfrm_link *link; 3461 struct nlmsghdr *nlh64 = NULL; 3462 int type, err; 3463 3464 type = nlh->nlmsg_type; 3465 if (type > XFRM_MSG_MAX) 3466 return -EINVAL; 3467 3468 type -= XFRM_MSG_BASE; 3469 link = &xfrm_dispatch[type]; 3470 3471 /* All operations require privileges, even GET */ 3472 if (!netlink_net_capable(skb, CAP_NET_ADMIN)) 3473 return -EPERM; 3474 3475 if (in_compat_syscall()) { 3476 struct xfrm_translator *xtr = xfrm_get_translator(); 3477 3478 if (!xtr) 3479 return -EOPNOTSUPP; 3480 3481 nlh64 = xtr->rcv_msg_compat(nlh, link->nla_max, 3482 link->nla_pol, extack); 3483 xfrm_put_translator(xtr); 3484 if (IS_ERR(nlh64)) 3485 return PTR_ERR(nlh64); 3486 if (nlh64) 3487 nlh = nlh64; 3488 } 3489 3490 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || 3491 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && 3492 (nlh->nlmsg_flags & NLM_F_DUMP)) { 3493 struct netlink_dump_control c = { 3494 .start = link->start, 3495 .dump = link->dump, 3496 .done = link->done, 3497 }; 3498 3499 if (link->dump == NULL) { 3500 err = -EINVAL; 3501 goto err; 3502 } 3503 3504 err = netlink_dump_start(xfrm_net_nlsk(net, skb), skb, nlh, &c); 3505 goto err; 3506 } 3507 3508 err = nlmsg_parse_deprecated(nlh, xfrm_msg_min[type], attrs, 3509 link->nla_max ? : XFRMA_MAX, 3510 link->nla_pol ? : xfrma_policy, extack); 3511 if (err < 0) 3512 goto err; 3513 3514 if (!link->nla_pol || link->nla_pol == xfrma_policy) { 3515 err = xfrm_reject_unused_attr((type + XFRM_MSG_BASE), attrs, extack); 3516 if (err < 0) 3517 goto err; 3518 } 3519 3520 if (link->doit == NULL) { 3521 err = -EINVAL; 3522 goto err; 3523 } 3524 3525 err = link->doit(skb, nlh, attrs, extack); 3526 3527 /* We need to free skb allocated in xfrm_alloc_compat() before 3528 * returning from this function, because consume_skb() won't take 3529 * care of frag_list since netlink destructor sets 3530 * sbk->head to NULL. (see netlink_skb_destructor()) 3531 */ 3532 if (skb_has_frag_list(skb)) { 3533 kfree_skb(skb_shinfo(skb)->frag_list); 3534 skb_shinfo(skb)->frag_list = NULL; 3535 } 3536 3537 err: 3538 kvfree(nlh64); 3539 return err; 3540 } 3541 3542 static void xfrm_netlink_rcv(struct sk_buff *skb) 3543 { 3544 struct net *net = sock_net(skb->sk); 3545 3546 mutex_lock(&net->xfrm.xfrm_cfg_mutex); 3547 netlink_rcv_skb(skb, &xfrm_user_rcv_msg); 3548 mutex_unlock(&net->xfrm.xfrm_cfg_mutex); 3549 } 3550 3551 static inline unsigned int xfrm_expire_msgsize(void) 3552 { 3553 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire)) + 3554 nla_total_size(sizeof(struct xfrm_mark)) + 3555 nla_total_size(sizeof_field(struct xfrm_state, dir)) + 3556 nla_total_size(4); /* XFRMA_SA_PCPU */ 3557 } 3558 3559 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) 3560 { 3561 struct xfrm_user_expire *ue; 3562 struct nlmsghdr *nlh; 3563 int err; 3564 3565 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); 3566 if (nlh == NULL) 3567 return -EMSGSIZE; 3568 3569 ue = nlmsg_data(nlh); 3570 copy_to_user_state(x, &ue->state); 3571 ue->hard = (c->data.hard != 0) ? 1 : 0; 3572 /* clear the padding bytes */ 3573 memset_after(ue, 0, hard); 3574 3575 err = xfrm_mark_put(skb, &x->mark); 3576 if (err) 3577 return err; 3578 3579 err = xfrm_if_id_put(skb, x->if_id); 3580 if (err) 3581 return err; 3582 if (x->pcpu_num != UINT_MAX) { 3583 err = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num); 3584 if (err) 3585 return err; 3586 } 3587 3588 if (x->dir) { 3589 err = nla_put_u8(skb, XFRMA_SA_DIR, x->dir); 3590 if (err) 3591 return err; 3592 } 3593 3594 nlmsg_end(skb, nlh); 3595 return 0; 3596 } 3597 3598 static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c) 3599 { 3600 struct net *net = xs_net(x); 3601 struct sk_buff *skb; 3602 3603 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC); 3604 if (skb == NULL) 3605 return -ENOMEM; 3606 3607 if (build_expire(skb, x, c) < 0) { 3608 kfree_skb(skb); 3609 return -EMSGSIZE; 3610 } 3611 3612 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE); 3613 } 3614 3615 static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c) 3616 { 3617 struct net *net = xs_net(x); 3618 struct sk_buff *skb; 3619 int err; 3620 3621 skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC); 3622 if (skb == NULL) 3623 return -ENOMEM; 3624 3625 err = build_aevent(skb, x, c); 3626 BUG_ON(err < 0); 3627 3628 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_AEVENTS); 3629 } 3630 3631 static int xfrm_notify_sa_flush(const struct km_event *c) 3632 { 3633 struct net *net = c->net; 3634 struct xfrm_usersa_flush *p; 3635 struct nlmsghdr *nlh; 3636 struct sk_buff *skb; 3637 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush)); 3638 3639 skb = nlmsg_new(len, GFP_ATOMIC); 3640 if (skb == NULL) 3641 return -ENOMEM; 3642 3643 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0); 3644 if (nlh == NULL) { 3645 kfree_skb(skb); 3646 return -EMSGSIZE; 3647 } 3648 3649 p = nlmsg_data(nlh); 3650 p->proto = c->data.proto; 3651 3652 nlmsg_end(skb, nlh); 3653 3654 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA); 3655 } 3656 3657 static inline unsigned int xfrm_sa_len(struct xfrm_state *x) 3658 { 3659 unsigned int l = 0; 3660 if (x->aead) 3661 l += nla_total_size(aead_len(x->aead)); 3662 if (x->aalg) { 3663 l += nla_total_size(sizeof(struct xfrm_algo) + 3664 (x->aalg->alg_key_len + 7) / 8); 3665 l += nla_total_size(xfrm_alg_auth_len(x->aalg)); 3666 } 3667 if (x->ealg) 3668 l += nla_total_size(xfrm_alg_len(x->ealg)); 3669 if (x->calg) 3670 l += nla_total_size(sizeof(*x->calg)); 3671 if (x->encap) 3672 l += nla_total_size(sizeof(*x->encap)); 3673 if (x->tfcpad) 3674 l += nla_total_size(sizeof(x->tfcpad)); 3675 if (x->replay_esn) 3676 l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn)); 3677 else 3678 l += nla_total_size(sizeof(struct xfrm_replay_state)); 3679 if (x->security) 3680 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) + 3681 x->security->ctx_len); 3682 if (x->coaddr) 3683 l += nla_total_size(sizeof(*x->coaddr)); 3684 if (x->props.extra_flags) 3685 l += nla_total_size(sizeof(x->props.extra_flags)); 3686 if (x->xso.dev) 3687 l += nla_total_size(sizeof(struct xfrm_user_offload)); 3688 if (x->props.smark.v | x->props.smark.m) { 3689 l += nla_total_size(sizeof(x->props.smark.v)); 3690 l += nla_total_size(sizeof(x->props.smark.m)); 3691 } 3692 if (x->if_id) 3693 l += nla_total_size(sizeof(x->if_id)); 3694 if (x->pcpu_num != UINT_MAX) 3695 l += nla_total_size(sizeof(x->pcpu_num)); 3696 3697 /* Must count x->lastused as it may become non-zero behind our back. */ 3698 l += nla_total_size_64bit(sizeof(u64)); 3699 3700 if (x->mapping_maxage) 3701 l += nla_total_size(sizeof(x->mapping_maxage)); 3702 3703 if (x->dir) 3704 l += nla_total_size(sizeof(x->dir)); 3705 3706 if (x->nat_keepalive_interval) 3707 l += nla_total_size(sizeof(x->nat_keepalive_interval)); 3708 3709 if (x->mode_cbs && x->mode_cbs->sa_len) 3710 l += x->mode_cbs->sa_len(x); 3711 3712 return l; 3713 } 3714 3715 static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c) 3716 { 3717 struct net *net = xs_net(x); 3718 struct xfrm_usersa_info *p; 3719 struct xfrm_usersa_id *id; 3720 struct nlmsghdr *nlh; 3721 struct sk_buff *skb; 3722 unsigned int len = xfrm_sa_len(x); 3723 unsigned int headlen; 3724 int err; 3725 3726 headlen = sizeof(*p); 3727 if (c->event == XFRM_MSG_DELSA) { 3728 len += nla_total_size(headlen); 3729 headlen = sizeof(*id); 3730 len += nla_total_size(sizeof(struct xfrm_mark)); 3731 } 3732 len += NLMSG_ALIGN(headlen); 3733 3734 skb = nlmsg_new(len, GFP_ATOMIC); 3735 if (skb == NULL) 3736 return -ENOMEM; 3737 3738 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); 3739 err = -EMSGSIZE; 3740 if (nlh == NULL) 3741 goto out_free_skb; 3742 3743 p = nlmsg_data(nlh); 3744 if (c->event == XFRM_MSG_DELSA) { 3745 struct nlattr *attr; 3746 3747 id = nlmsg_data(nlh); 3748 memset(id, 0, sizeof(*id)); 3749 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr)); 3750 id->spi = x->id.spi; 3751 id->family = x->props.family; 3752 id->proto = x->id.proto; 3753 3754 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p)); 3755 err = -EMSGSIZE; 3756 if (attr == NULL) 3757 goto out_free_skb; 3758 3759 p = nla_data(attr); 3760 } 3761 err = copy_to_user_state_extra(x, p, skb); 3762 if (err) 3763 goto out_free_skb; 3764 3765 nlmsg_end(skb, nlh); 3766 3767 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA); 3768 3769 out_free_skb: 3770 kfree_skb(skb); 3771 return err; 3772 } 3773 3774 static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c) 3775 { 3776 3777 switch (c->event) { 3778 case XFRM_MSG_EXPIRE: 3779 return xfrm_exp_state_notify(x, c); 3780 case XFRM_MSG_NEWAE: 3781 return xfrm_aevent_state_notify(x, c); 3782 case XFRM_MSG_DELSA: 3783 case XFRM_MSG_UPDSA: 3784 case XFRM_MSG_NEWSA: 3785 return xfrm_notify_sa(x, c); 3786 case XFRM_MSG_FLUSHSA: 3787 return xfrm_notify_sa_flush(c); 3788 default: 3789 printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n", 3790 c->event); 3791 break; 3792 } 3793 3794 return 0; 3795 3796 } 3797 3798 static inline unsigned int xfrm_acquire_msgsize(struct xfrm_state *x, 3799 struct xfrm_policy *xp) 3800 { 3801 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire)) 3802 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 3803 + nla_total_size(sizeof(struct xfrm_mark)) 3804 + nla_total_size(xfrm_user_sec_ctx_size(x->security)) 3805 + nla_total_size(4) /* XFRMA_SA_PCPU */ 3806 + userpolicy_type_attrsize(); 3807 } 3808 3809 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, 3810 struct xfrm_tmpl *xt, struct xfrm_policy *xp) 3811 { 3812 __u32 seq = xfrm_get_acqseq(); 3813 struct xfrm_user_acquire *ua; 3814 struct nlmsghdr *nlh; 3815 int err; 3816 3817 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0); 3818 if (nlh == NULL) 3819 return -EMSGSIZE; 3820 3821 ua = nlmsg_data(nlh); 3822 memcpy(&ua->id, &x->id, sizeof(ua->id)); 3823 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr)); 3824 memcpy(&ua->sel, &x->sel, sizeof(ua->sel)); 3825 copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT); 3826 ua->aalgos = xt->aalgos; 3827 ua->ealgos = xt->ealgos; 3828 ua->calgos = xt->calgos; 3829 ua->seq = x->km.seq = seq; 3830 3831 err = copy_to_user_tmpl(xp, skb); 3832 if (!err) 3833 err = copy_to_user_state_sec_ctx(x, skb); 3834 if (!err) 3835 err = copy_to_user_policy_type(xp->type, skb); 3836 if (!err) 3837 err = xfrm_mark_put(skb, &xp->mark); 3838 if (!err) 3839 err = xfrm_if_id_put(skb, xp->if_id); 3840 if (!err && xp->xdo.dev) 3841 err = copy_user_offload(&xp->xdo, skb); 3842 if (!err && x->pcpu_num != UINT_MAX) 3843 err = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num); 3844 if (err) { 3845 nlmsg_cancel(skb, nlh); 3846 return err; 3847 } 3848 3849 nlmsg_end(skb, nlh); 3850 return 0; 3851 } 3852 3853 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt, 3854 struct xfrm_policy *xp) 3855 { 3856 struct net *net = xs_net(x); 3857 struct sk_buff *skb; 3858 int err; 3859 3860 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC); 3861 if (skb == NULL) 3862 return -ENOMEM; 3863 3864 err = build_acquire(skb, x, xt, xp); 3865 BUG_ON(err < 0); 3866 3867 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_ACQUIRE); 3868 } 3869 3870 /* User gives us xfrm_user_policy_info followed by an array of 0 3871 * or more templates. 3872 */ 3873 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt, 3874 u8 *data, int len, int *dir) 3875 { 3876 struct net *net = sock_net(sk); 3877 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data; 3878 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1); 3879 struct xfrm_policy *xp; 3880 int nr; 3881 3882 switch (sk->sk_family) { 3883 case AF_INET: 3884 if (opt != IP_XFRM_POLICY) { 3885 *dir = -EOPNOTSUPP; 3886 return NULL; 3887 } 3888 break; 3889 #if IS_ENABLED(CONFIG_IPV6) 3890 case AF_INET6: 3891 if (opt != IPV6_XFRM_POLICY) { 3892 *dir = -EOPNOTSUPP; 3893 return NULL; 3894 } 3895 break; 3896 #endif 3897 default: 3898 *dir = -EINVAL; 3899 return NULL; 3900 } 3901 3902 *dir = -EINVAL; 3903 3904 if (len < sizeof(*p) || 3905 verify_newpolicy_info(p, NULL)) 3906 return NULL; 3907 3908 nr = ((len - sizeof(*p)) / sizeof(*ut)); 3909 if (validate_tmpl(nr, ut, p->sel.family, p->dir, NULL)) 3910 return NULL; 3911 3912 if (p->dir > XFRM_POLICY_OUT) 3913 return NULL; 3914 3915 xp = xfrm_policy_alloc(net, GFP_ATOMIC); 3916 if (xp == NULL) { 3917 *dir = -ENOBUFS; 3918 return NULL; 3919 } 3920 3921 copy_from_user_policy(xp, p); 3922 xp->type = XFRM_POLICY_TYPE_MAIN; 3923 copy_templates(xp, ut, nr); 3924 3925 *dir = p->dir; 3926 3927 return xp; 3928 } 3929 3930 static inline unsigned int xfrm_polexpire_msgsize(struct xfrm_policy *xp) 3931 { 3932 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire)) 3933 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 3934 + nla_total_size(xfrm_user_sec_ctx_size(xp->security)) 3935 + nla_total_size(sizeof(struct xfrm_mark)) 3936 + userpolicy_type_attrsize(); 3937 } 3938 3939 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, 3940 int dir, const struct km_event *c) 3941 { 3942 struct xfrm_user_polexpire *upe; 3943 int hard = c->data.hard; 3944 struct nlmsghdr *nlh; 3945 int err; 3946 3947 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); 3948 if (nlh == NULL) 3949 return -EMSGSIZE; 3950 3951 upe = nlmsg_data(nlh); 3952 copy_to_user_policy(xp, &upe->pol, dir); 3953 err = copy_to_user_tmpl(xp, skb); 3954 if (!err) 3955 err = copy_to_user_sec_ctx(xp, skb); 3956 if (!err) 3957 err = copy_to_user_policy_type(xp->type, skb); 3958 if (!err) 3959 err = xfrm_mark_put(skb, &xp->mark); 3960 if (!err) 3961 err = xfrm_if_id_put(skb, xp->if_id); 3962 if (!err && xp->xdo.dev) 3963 err = copy_user_offload(&xp->xdo, skb); 3964 if (err) { 3965 nlmsg_cancel(skb, nlh); 3966 return err; 3967 } 3968 upe->hard = !!hard; 3969 /* clear the padding bytes */ 3970 memset_after(upe, 0, hard); 3971 3972 nlmsg_end(skb, nlh); 3973 return 0; 3974 } 3975 3976 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 3977 { 3978 struct net *net = xp_net(xp); 3979 struct sk_buff *skb; 3980 int err; 3981 3982 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC); 3983 if (skb == NULL) 3984 return -ENOMEM; 3985 3986 err = build_polexpire(skb, xp, dir, c); 3987 BUG_ON(err < 0); 3988 3989 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE); 3990 } 3991 3992 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c) 3993 { 3994 unsigned int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); 3995 struct net *net = xp_net(xp); 3996 struct xfrm_userpolicy_info *p; 3997 struct xfrm_userpolicy_id *id; 3998 struct nlmsghdr *nlh; 3999 struct sk_buff *skb; 4000 unsigned int headlen; 4001 int err; 4002 4003 headlen = sizeof(*p); 4004 if (c->event == XFRM_MSG_DELPOLICY) { 4005 len += nla_total_size(headlen); 4006 headlen = sizeof(*id); 4007 } 4008 len += userpolicy_type_attrsize(); 4009 len += nla_total_size(sizeof(struct xfrm_mark)); 4010 len += NLMSG_ALIGN(headlen); 4011 4012 skb = nlmsg_new(len, GFP_ATOMIC); 4013 if (skb == NULL) 4014 return -ENOMEM; 4015 4016 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); 4017 err = -EMSGSIZE; 4018 if (nlh == NULL) 4019 goto out_free_skb; 4020 4021 p = nlmsg_data(nlh); 4022 if (c->event == XFRM_MSG_DELPOLICY) { 4023 struct nlattr *attr; 4024 4025 id = nlmsg_data(nlh); 4026 memset(id, 0, sizeof(*id)); 4027 id->dir = dir; 4028 if (c->data.byid) 4029 id->index = xp->index; 4030 else 4031 memcpy(&id->sel, &xp->selector, sizeof(id->sel)); 4032 4033 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p)); 4034 err = -EMSGSIZE; 4035 if (attr == NULL) 4036 goto out_free_skb; 4037 4038 p = nla_data(attr); 4039 } 4040 4041 copy_to_user_policy(xp, p, dir); 4042 err = copy_to_user_tmpl(xp, skb); 4043 if (!err) 4044 err = copy_to_user_policy_type(xp->type, skb); 4045 if (!err) 4046 err = xfrm_mark_put(skb, &xp->mark); 4047 if (!err) 4048 err = xfrm_if_id_put(skb, xp->if_id); 4049 if (!err && xp->xdo.dev) 4050 err = copy_user_offload(&xp->xdo, skb); 4051 if (err) 4052 goto out_free_skb; 4053 4054 nlmsg_end(skb, nlh); 4055 4056 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY); 4057 4058 out_free_skb: 4059 kfree_skb(skb); 4060 return err; 4061 } 4062 4063 static int xfrm_notify_policy_flush(const struct km_event *c) 4064 { 4065 struct net *net = c->net; 4066 struct nlmsghdr *nlh; 4067 struct sk_buff *skb; 4068 int err; 4069 4070 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC); 4071 if (skb == NULL) 4072 return -ENOMEM; 4073 4074 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); 4075 err = -EMSGSIZE; 4076 if (nlh == NULL) 4077 goto out_free_skb; 4078 err = copy_to_user_policy_type(c->data.type, skb); 4079 if (err) 4080 goto out_free_skb; 4081 4082 nlmsg_end(skb, nlh); 4083 4084 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY); 4085 4086 out_free_skb: 4087 kfree_skb(skb); 4088 return err; 4089 } 4090 4091 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 4092 { 4093 4094 switch (c->event) { 4095 case XFRM_MSG_NEWPOLICY: 4096 case XFRM_MSG_UPDPOLICY: 4097 case XFRM_MSG_DELPOLICY: 4098 return xfrm_notify_policy(xp, dir, c); 4099 case XFRM_MSG_FLUSHPOLICY: 4100 return xfrm_notify_policy_flush(c); 4101 case XFRM_MSG_POLEXPIRE: 4102 return xfrm_exp_policy_notify(xp, dir, c); 4103 default: 4104 printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n", 4105 c->event); 4106 } 4107 4108 return 0; 4109 4110 } 4111 4112 static inline unsigned int xfrm_report_msgsize(void) 4113 { 4114 return NLMSG_ALIGN(sizeof(struct xfrm_user_report)); 4115 } 4116 4117 static int build_report(struct sk_buff *skb, u8 proto, 4118 struct xfrm_selector *sel, xfrm_address_t *addr) 4119 { 4120 struct xfrm_user_report *ur; 4121 struct nlmsghdr *nlh; 4122 4123 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0); 4124 if (nlh == NULL) 4125 return -EMSGSIZE; 4126 4127 ur = nlmsg_data(nlh); 4128 memset(ur, 0, sizeof(*ur)); 4129 ur->proto = proto; 4130 memcpy(&ur->sel, sel, sizeof(ur->sel)); 4131 4132 if (addr) { 4133 int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr); 4134 if (err) { 4135 nlmsg_cancel(skb, nlh); 4136 return err; 4137 } 4138 } 4139 nlmsg_end(skb, nlh); 4140 return 0; 4141 } 4142 4143 static int xfrm_send_report(struct net *net, u8 proto, 4144 struct xfrm_selector *sel, xfrm_address_t *addr) 4145 { 4146 struct sk_buff *skb; 4147 int err; 4148 4149 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC); 4150 if (skb == NULL) 4151 return -ENOMEM; 4152 4153 err = build_report(skb, proto, sel, addr); 4154 BUG_ON(err < 0); 4155 4156 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_REPORT); 4157 } 4158 4159 static inline unsigned int xfrm_mapping_msgsize(void) 4160 { 4161 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping)); 4162 } 4163 4164 static int build_mapping(struct sk_buff *skb, struct xfrm_state *x, 4165 xfrm_address_t *new_saddr, __be16 new_sport) 4166 { 4167 struct xfrm_user_mapping *um; 4168 struct nlmsghdr *nlh; 4169 4170 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0); 4171 if (nlh == NULL) 4172 return -EMSGSIZE; 4173 4174 um = nlmsg_data(nlh); 4175 4176 memset(&um->id, 0, sizeof(um->id)); 4177 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr)); 4178 um->id.spi = x->id.spi; 4179 um->id.family = x->props.family; 4180 um->id.proto = x->id.proto; 4181 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr)); 4182 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr)); 4183 um->new_sport = new_sport; 4184 um->old_sport = x->encap->encap_sport; 4185 um->reqid = x->props.reqid; 4186 4187 nlmsg_end(skb, nlh); 4188 return 0; 4189 } 4190 4191 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, 4192 __be16 sport) 4193 { 4194 struct net *net = xs_net(x); 4195 struct sk_buff *skb; 4196 int err; 4197 4198 if (x->id.proto != IPPROTO_ESP) 4199 return -EINVAL; 4200 4201 if (!x->encap) 4202 return -EINVAL; 4203 4204 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC); 4205 if (skb == NULL) 4206 return -ENOMEM; 4207 4208 err = build_mapping(skb, x, ipaddr, sport); 4209 BUG_ON(err < 0); 4210 4211 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING); 4212 } 4213 4214 static bool xfrm_is_alive(const struct km_event *c) 4215 { 4216 return (bool)xfrm_acquire_is_on(c->net); 4217 } 4218 4219 static struct xfrm_mgr netlink_mgr = { 4220 .notify = xfrm_send_state_notify, 4221 .acquire = xfrm_send_acquire, 4222 .compile_policy = xfrm_compile_policy, 4223 .notify_policy = xfrm_send_policy_notify, 4224 .report = xfrm_send_report, 4225 .migrate = xfrm_send_migrate, 4226 .new_mapping = xfrm_send_mapping, 4227 .is_alive = xfrm_is_alive, 4228 }; 4229 4230 static int __net_init xfrm_user_net_init(struct net *net) 4231 { 4232 struct sock *nlsk; 4233 struct netlink_kernel_cfg cfg = { 4234 .groups = XFRMNLGRP_MAX, 4235 .input = xfrm_netlink_rcv, 4236 }; 4237 4238 nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg); 4239 if (nlsk == NULL) 4240 return -ENOMEM; 4241 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */ 4242 rcu_assign_pointer(net->xfrm.nlsk, nlsk); 4243 return 0; 4244 } 4245 4246 static void __net_exit xfrm_user_net_pre_exit(struct net *net) 4247 { 4248 RCU_INIT_POINTER(net->xfrm.nlsk, NULL); 4249 } 4250 4251 static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list) 4252 { 4253 struct net *net; 4254 4255 list_for_each_entry(net, net_exit_list, exit_list) 4256 netlink_kernel_release(net->xfrm.nlsk_stash); 4257 } 4258 4259 static struct pernet_operations xfrm_user_net_ops = { 4260 .init = xfrm_user_net_init, 4261 .pre_exit = xfrm_user_net_pre_exit, 4262 .exit_batch = xfrm_user_net_exit, 4263 }; 4264 4265 static int __init xfrm_user_init(void) 4266 { 4267 int rv; 4268 4269 printk(KERN_INFO "Initializing XFRM netlink socket\n"); 4270 4271 rv = register_pernet_subsys(&xfrm_user_net_ops); 4272 if (rv < 0) 4273 return rv; 4274 xfrm_register_km(&netlink_mgr); 4275 return 0; 4276 } 4277 4278 static void __exit xfrm_user_exit(void) 4279 { 4280 xfrm_unregister_km(&netlink_mgr); 4281 unregister_pernet_subsys(&xfrm_user_net_ops); 4282 } 4283 4284 module_init(xfrm_user_init); 4285 module_exit(xfrm_user_exit); 4286 MODULE_DESCRIPTION("XFRM User interface"); 4287 MODULE_LICENSE("GPL"); 4288 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM); 4289