1 // SPDX-License-Identifier: GPL-2.0-only 2 /* xfrm_user.c: User interface to configure xfrm engine. 3 * 4 * Copyright (C) 2002 David S. Miller (davem@redhat.com) 5 * 6 * Changes: 7 * Mitsuru KANDA @USAGI 8 * Kazunori MIYAZAWA @USAGI 9 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 10 * IPv6 support 11 * 12 */ 13 14 #include <linux/compat.h> 15 #include <linux/crypto.h> 16 #include <linux/module.h> 17 #include <linux/kernel.h> 18 #include <linux/types.h> 19 #include <linux/slab.h> 20 #include <linux/socket.h> 21 #include <linux/string.h> 22 #include <linux/net.h> 23 #include <linux/skbuff.h> 24 #include <linux/pfkeyv2.h> 25 #include <linux/ipsec.h> 26 #include <linux/init.h> 27 #include <linux/security.h> 28 #include <net/sock.h> 29 #include <net/xfrm.h> 30 #include <net/netlink.h> 31 #include <net/ah.h> 32 #include <linux/uaccess.h> 33 #if IS_ENABLED(CONFIG_IPV6) 34 #include <linux/in6.h> 35 #endif 36 #include <linux/unaligned.h> 37 38 static struct sock *xfrm_net_nlsk(const struct net *net, const struct sk_buff *skb) 39 { 40 /* get the source of this request, see netlink_unicast_kernel */ 41 const struct sock *sk = NETLINK_CB(skb).sk; 42 43 /* sk is refcounted, the netns stays alive and nlsk with it */ 44 return rcu_dereference_protected(net->xfrm.nlsk, sk->sk_net_refcnt); 45 } 46 47 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type, 48 struct netlink_ext_ack *extack) 49 { 50 struct nlattr *rt = attrs[type]; 51 struct xfrm_algo *algp; 52 53 if (!rt) 54 return 0; 55 56 algp = nla_data(rt); 57 if (nla_len(rt) < (int)xfrm_alg_len(algp)) { 58 NL_SET_ERR_MSG(extack, "Invalid AUTH/CRYPT/COMP attribute length"); 59 return -EINVAL; 60 } 61 62 switch (type) { 63 case XFRMA_ALG_AUTH: 64 case XFRMA_ALG_CRYPT: 65 case XFRMA_ALG_COMP: 66 break; 67 68 default: 69 NL_SET_ERR_MSG(extack, "Invalid algorithm attribute type"); 70 return -EINVAL; 71 } 72 73 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0'; 74 return 0; 75 } 76 77 static int verify_auth_trunc(struct nlattr **attrs, 78 struct netlink_ext_ack *extack) 79 { 80 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC]; 81 struct xfrm_algo_auth *algp; 82 83 if (!rt) 84 return 0; 85 86 algp = nla_data(rt); 87 if (nla_len(rt) < (int)xfrm_alg_auth_len(algp)) { 88 NL_SET_ERR_MSG(extack, "Invalid AUTH_TRUNC attribute length"); 89 return -EINVAL; 90 } 91 92 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0'; 93 return 0; 94 } 95 96 static int verify_aead(struct nlattr **attrs, struct netlink_ext_ack *extack) 97 { 98 struct nlattr *rt = attrs[XFRMA_ALG_AEAD]; 99 struct xfrm_algo_aead *algp; 100 101 if (!rt) 102 return 0; 103 104 algp = nla_data(rt); 105 if (nla_len(rt) < (int)aead_len(algp)) { 106 NL_SET_ERR_MSG(extack, "Invalid AEAD attribute length"); 107 return -EINVAL; 108 } 109 110 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0'; 111 return 0; 112 } 113 114 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type, 115 xfrm_address_t **addrp) 116 { 117 struct nlattr *rt = attrs[type]; 118 119 if (rt && addrp) 120 *addrp = nla_data(rt); 121 } 122 123 static inline int verify_sec_ctx_len(struct nlattr **attrs, struct netlink_ext_ack *extack) 124 { 125 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 126 struct xfrm_user_sec_ctx *uctx; 127 128 if (!rt) 129 return 0; 130 131 uctx = nla_data(rt); 132 if (uctx->len > nla_len(rt) || 133 uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) { 134 NL_SET_ERR_MSG(extack, "Invalid security context length"); 135 return -EINVAL; 136 } 137 138 return 0; 139 } 140 141 static inline int verify_replay(struct xfrm_usersa_info *p, 142 struct nlattr **attrs, u8 sa_dir, 143 struct netlink_ext_ack *extack) 144 { 145 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; 146 struct xfrm_replay_state_esn *rs; 147 148 if (!rt) { 149 if (p->flags & XFRM_STATE_ESN) { 150 NL_SET_ERR_MSG(extack, "Missing required attribute for ESN"); 151 return -EINVAL; 152 } 153 return 0; 154 } 155 156 rs = nla_data(rt); 157 158 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) { 159 NL_SET_ERR_MSG(extack, "ESN bitmap length must be <= 128"); 160 return -EINVAL; 161 } 162 163 if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) && 164 nla_len(rt) != sizeof(*rs)) { 165 NL_SET_ERR_MSG(extack, "ESN attribute is too short to fit the full bitmap length"); 166 return -EINVAL; 167 } 168 169 /* As only ESP and AH support ESN feature. */ 170 if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH)) { 171 NL_SET_ERR_MSG(extack, "ESN only supported for ESP and AH"); 172 return -EINVAL; 173 } 174 175 if (p->replay_window != 0) { 176 NL_SET_ERR_MSG(extack, "ESN not compatible with legacy replay_window"); 177 return -EINVAL; 178 } 179 180 if (sa_dir == XFRM_SA_DIR_OUT) { 181 if (rs->replay_window) { 182 NL_SET_ERR_MSG(extack, "Replay window should be 0 for output SA"); 183 return -EINVAL; 184 } 185 if (rs->seq || rs->seq_hi) { 186 NL_SET_ERR_MSG(extack, 187 "Replay seq and seq_hi should be 0 for output SA"); 188 return -EINVAL; 189 } 190 191 if (!(p->flags & XFRM_STATE_ESN)) { 192 if (rs->oseq_hi) { 193 NL_SET_ERR_MSG( 194 extack, 195 "Replay oseq_hi should be 0 in non-ESN mode for output SA"); 196 return -EINVAL; 197 } 198 if (rs->oseq == U32_MAX) { 199 NL_SET_ERR_MSG( 200 extack, 201 "Replay oseq should be less than 0xFFFFFFFF in non-ESN mode for output SA"); 202 return -EINVAL; 203 } 204 } else { 205 if (rs->oseq == U32_MAX && rs->oseq_hi == U32_MAX) { 206 NL_SET_ERR_MSG( 207 extack, 208 "Replay oseq and oseq_hi should be less than 0xFFFFFFFF for output SA"); 209 return -EINVAL; 210 } 211 } 212 if (rs->bmp_len) { 213 NL_SET_ERR_MSG(extack, "Replay bmp_len should 0 for output SA"); 214 return -EINVAL; 215 } 216 } 217 218 if (sa_dir == XFRM_SA_DIR_IN) { 219 if (rs->oseq || rs->oseq_hi) { 220 NL_SET_ERR_MSG(extack, 221 "Replay oseq and oseq_hi should be 0 for input SA"); 222 return -EINVAL; 223 } 224 if (!(p->flags & XFRM_STATE_ESN)) { 225 if (rs->seq_hi) { 226 NL_SET_ERR_MSG( 227 extack, 228 "Replay seq_hi should be 0 in non-ESN mode for input SA"); 229 return -EINVAL; 230 } 231 232 if (rs->seq == U32_MAX) { 233 NL_SET_ERR_MSG( 234 extack, 235 "Replay seq should be less than 0xFFFFFFFF in non-ESN mode for input SA"); 236 return -EINVAL; 237 } 238 } else { 239 if (rs->seq == U32_MAX && rs->seq_hi == U32_MAX) { 240 NL_SET_ERR_MSG( 241 extack, 242 "Replay seq and seq_hi should be less than 0xFFFFFFFF for input SA"); 243 return -EINVAL; 244 } 245 } 246 } 247 248 return 0; 249 } 250 251 static int verify_newsa_info(struct xfrm_usersa_info *p, 252 struct nlattr **attrs, 253 struct netlink_ext_ack *extack) 254 { 255 int err; 256 u8 sa_dir = nla_get_u8_default(attrs[XFRMA_SA_DIR], 0); 257 u16 family = p->sel.family; 258 259 err = -EINVAL; 260 switch (p->family) { 261 case AF_INET: 262 break; 263 264 case AF_INET6: 265 #if IS_ENABLED(CONFIG_IPV6) 266 break; 267 #else 268 err = -EAFNOSUPPORT; 269 NL_SET_ERR_MSG(extack, "IPv6 support disabled"); 270 goto out; 271 #endif 272 273 default: 274 NL_SET_ERR_MSG(extack, "Invalid address family"); 275 goto out; 276 } 277 278 if (!family && !(p->flags & XFRM_STATE_AF_UNSPEC)) 279 family = p->family; 280 281 switch (family) { 282 case AF_UNSPEC: 283 break; 284 285 case AF_INET: 286 if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) { 287 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 32 for IPv4)"); 288 goto out; 289 } 290 291 break; 292 293 case AF_INET6: 294 #if IS_ENABLED(CONFIG_IPV6) 295 if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) { 296 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 128 for IPv6)"); 297 goto out; 298 } 299 300 break; 301 #else 302 NL_SET_ERR_MSG(extack, "IPv6 support disabled"); 303 err = -EAFNOSUPPORT; 304 goto out; 305 #endif 306 307 default: 308 NL_SET_ERR_MSG(extack, "Invalid address family in selector"); 309 goto out; 310 } 311 312 err = -EINVAL; 313 switch (p->id.proto) { 314 case IPPROTO_AH: 315 if (!attrs[XFRMA_ALG_AUTH] && 316 !attrs[XFRMA_ALG_AUTH_TRUNC]) { 317 NL_SET_ERR_MSG(extack, "Missing required attribute for AH: AUTH_TRUNC or AUTH"); 318 goto out; 319 } 320 321 if (attrs[XFRMA_ALG_AEAD] || 322 attrs[XFRMA_ALG_CRYPT] || 323 attrs[XFRMA_ALG_COMP] || 324 attrs[XFRMA_TFCPAD]) { 325 NL_SET_ERR_MSG(extack, "Invalid attributes for AH: AEAD, CRYPT, COMP, TFCPAD"); 326 goto out; 327 } 328 break; 329 330 case IPPROTO_ESP: 331 if (attrs[XFRMA_ALG_COMP]) { 332 NL_SET_ERR_MSG(extack, "Invalid attribute for ESP: COMP"); 333 goto out; 334 } 335 336 if (!attrs[XFRMA_ALG_AUTH] && 337 !attrs[XFRMA_ALG_AUTH_TRUNC] && 338 !attrs[XFRMA_ALG_CRYPT] && 339 !attrs[XFRMA_ALG_AEAD]) { 340 NL_SET_ERR_MSG(extack, "Missing required attribute for ESP: at least one of AUTH, AUTH_TRUNC, CRYPT, AEAD"); 341 goto out; 342 } 343 344 if ((attrs[XFRMA_ALG_AUTH] || 345 attrs[XFRMA_ALG_AUTH_TRUNC] || 346 attrs[XFRMA_ALG_CRYPT]) && 347 attrs[XFRMA_ALG_AEAD]) { 348 NL_SET_ERR_MSG(extack, "Invalid attribute combination for ESP: AEAD can't be used with AUTH, AUTH_TRUNC, CRYPT"); 349 goto out; 350 } 351 352 if (attrs[XFRMA_TFCPAD] && 353 p->mode != XFRM_MODE_TUNNEL) { 354 NL_SET_ERR_MSG(extack, "TFC padding can only be used in tunnel mode"); 355 goto out; 356 } 357 if ((attrs[XFRMA_IPTFS_DROP_TIME] || 358 attrs[XFRMA_IPTFS_REORDER_WINDOW] || 359 attrs[XFRMA_IPTFS_DONT_FRAG] || 360 attrs[XFRMA_IPTFS_INIT_DELAY] || 361 attrs[XFRMA_IPTFS_MAX_QSIZE] || 362 attrs[XFRMA_IPTFS_PKT_SIZE]) && 363 p->mode != XFRM_MODE_IPTFS) { 364 NL_SET_ERR_MSG(extack, "IP-TFS options can only be used in IP-TFS mode"); 365 goto out; 366 } 367 break; 368 369 case IPPROTO_COMP: 370 if (!attrs[XFRMA_ALG_COMP]) { 371 NL_SET_ERR_MSG(extack, "Missing required attribute for COMP: COMP"); 372 goto out; 373 } 374 375 if (attrs[XFRMA_ALG_AEAD] || 376 attrs[XFRMA_ALG_AUTH] || 377 attrs[XFRMA_ALG_AUTH_TRUNC] || 378 attrs[XFRMA_ALG_CRYPT] || 379 attrs[XFRMA_TFCPAD]) { 380 NL_SET_ERR_MSG(extack, "Invalid attributes for COMP: AEAD, AUTH, AUTH_TRUNC, CRYPT, TFCPAD"); 381 goto out; 382 } 383 384 if (ntohl(p->id.spi) >= 0x10000) { 385 NL_SET_ERR_MSG(extack, "SPI is too large for COMP (must be < 0x10000)"); 386 goto out; 387 } 388 break; 389 390 #if IS_ENABLED(CONFIG_IPV6) 391 case IPPROTO_DSTOPTS: 392 case IPPROTO_ROUTING: 393 if (attrs[XFRMA_ALG_COMP] || 394 attrs[XFRMA_ALG_AUTH] || 395 attrs[XFRMA_ALG_AUTH_TRUNC] || 396 attrs[XFRMA_ALG_AEAD] || 397 attrs[XFRMA_ALG_CRYPT] || 398 attrs[XFRMA_ENCAP] || 399 attrs[XFRMA_SEC_CTX] || 400 attrs[XFRMA_TFCPAD]) { 401 NL_SET_ERR_MSG(extack, "Invalid attributes for DSTOPTS/ROUTING"); 402 goto out; 403 } 404 405 if (!attrs[XFRMA_COADDR]) { 406 NL_SET_ERR_MSG(extack, "Missing required COADDR attribute for DSTOPTS/ROUTING"); 407 goto out; 408 } 409 break; 410 #endif 411 412 default: 413 NL_SET_ERR_MSG(extack, "Unsupported protocol"); 414 goto out; 415 } 416 417 if ((err = verify_aead(attrs, extack))) 418 goto out; 419 if ((err = verify_auth_trunc(attrs, extack))) 420 goto out; 421 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH, extack))) 422 goto out; 423 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT, extack))) 424 goto out; 425 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP, extack))) 426 goto out; 427 if ((err = verify_sec_ctx_len(attrs, extack))) 428 goto out; 429 if ((err = verify_replay(p, attrs, sa_dir, extack))) 430 goto out; 431 432 err = -EINVAL; 433 switch (p->mode) { 434 case XFRM_MODE_TRANSPORT: 435 case XFRM_MODE_TUNNEL: 436 case XFRM_MODE_ROUTEOPTIMIZATION: 437 case XFRM_MODE_BEET: 438 break; 439 case XFRM_MODE_IPTFS: 440 if (p->id.proto != IPPROTO_ESP) { 441 NL_SET_ERR_MSG(extack, "IP-TFS mode only supported with ESP"); 442 goto out; 443 } 444 if (sa_dir == 0) { 445 NL_SET_ERR_MSG(extack, "IP-TFS mode requires in or out direction attribute"); 446 goto out; 447 } 448 break; 449 450 default: 451 NL_SET_ERR_MSG(extack, "Unsupported mode"); 452 goto out; 453 } 454 455 err = 0; 456 457 if (attrs[XFRMA_MTIMER_THRESH]) { 458 if (!attrs[XFRMA_ENCAP]) { 459 NL_SET_ERR_MSG(extack, "MTIMER_THRESH attribute can only be set on ENCAP states"); 460 err = -EINVAL; 461 goto out; 462 } 463 464 if (sa_dir == XFRM_SA_DIR_OUT) { 465 NL_SET_ERR_MSG(extack, 466 "MTIMER_THRESH attribute should not be set on output SA"); 467 err = -EINVAL; 468 goto out; 469 } 470 } 471 472 if (sa_dir == XFRM_SA_DIR_OUT) { 473 if (p->flags & XFRM_STATE_DECAP_DSCP) { 474 NL_SET_ERR_MSG(extack, "Flag DECAP_DSCP should not be set for output SA"); 475 err = -EINVAL; 476 goto out; 477 } 478 479 if (p->flags & XFRM_STATE_ICMP) { 480 NL_SET_ERR_MSG(extack, "Flag ICMP should not be set for output SA"); 481 err = -EINVAL; 482 goto out; 483 } 484 485 if (p->flags & XFRM_STATE_WILDRECV) { 486 NL_SET_ERR_MSG(extack, "Flag WILDRECV should not be set for output SA"); 487 err = -EINVAL; 488 goto out; 489 } 490 491 if (p->replay_window) { 492 NL_SET_ERR_MSG(extack, "Replay window should be 0 for output SA"); 493 err = -EINVAL; 494 goto out; 495 } 496 497 if (attrs[XFRMA_IPTFS_DROP_TIME]) { 498 NL_SET_ERR_MSG(extack, "IP-TFS drop time should not be set for output SA"); 499 err = -EINVAL; 500 goto out; 501 } 502 503 if (attrs[XFRMA_IPTFS_REORDER_WINDOW]) { 504 NL_SET_ERR_MSG(extack, "IP-TFS reorder window should not be set for output SA"); 505 err = -EINVAL; 506 goto out; 507 } 508 509 if (attrs[XFRMA_REPLAY_VAL]) { 510 struct xfrm_replay_state *replay; 511 512 replay = nla_data(attrs[XFRMA_REPLAY_VAL]); 513 514 if (replay->seq || replay->bitmap) { 515 NL_SET_ERR_MSG(extack, 516 "Replay seq and bitmap should be 0 for output SA"); 517 err = -EINVAL; 518 goto out; 519 } 520 } 521 } 522 523 if (sa_dir == XFRM_SA_DIR_IN) { 524 if (p->flags & XFRM_STATE_NOPMTUDISC) { 525 NL_SET_ERR_MSG(extack, "Flag NOPMTUDISC should not be set for input SA"); 526 err = -EINVAL; 527 goto out; 528 } 529 530 if (attrs[XFRMA_SA_EXTRA_FLAGS]) { 531 u32 xflags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]); 532 533 if (xflags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP) { 534 NL_SET_ERR_MSG(extack, "Flag DONT_ENCAP_DSCP should not be set for input SA"); 535 err = -EINVAL; 536 goto out; 537 } 538 539 if (xflags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP) { 540 NL_SET_ERR_MSG(extack, "Flag OSEQ_MAY_WRAP should not be set for input SA"); 541 err = -EINVAL; 542 goto out; 543 } 544 545 } 546 547 if (attrs[XFRMA_IPTFS_DONT_FRAG]) { 548 NL_SET_ERR_MSG(extack, "IP-TFS don't fragment should not be set for input SA"); 549 err = -EINVAL; 550 goto out; 551 } 552 553 if (attrs[XFRMA_IPTFS_INIT_DELAY]) { 554 NL_SET_ERR_MSG(extack, "IP-TFS initial delay should not be set for input SA"); 555 err = -EINVAL; 556 goto out; 557 } 558 559 if (attrs[XFRMA_IPTFS_MAX_QSIZE]) { 560 NL_SET_ERR_MSG(extack, "IP-TFS max queue size should not be set for input SA"); 561 err = -EINVAL; 562 goto out; 563 } 564 565 if (attrs[XFRMA_IPTFS_PKT_SIZE]) { 566 NL_SET_ERR_MSG(extack, "IP-TFS packet size should not be set for input SA"); 567 err = -EINVAL; 568 goto out; 569 } 570 } 571 572 if (!sa_dir && attrs[XFRMA_SA_PCPU]) { 573 NL_SET_ERR_MSG(extack, "SA_PCPU only supported with SA_DIR"); 574 err = -EINVAL; 575 goto out; 576 } 577 578 out: 579 return err; 580 } 581 582 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, 583 struct xfrm_algo_desc *(*get_byname)(const char *, int), 584 struct nlattr *rta, struct netlink_ext_ack *extack) 585 { 586 struct xfrm_algo *p, *ualg; 587 struct xfrm_algo_desc *algo; 588 589 if (!rta) 590 return 0; 591 592 ualg = nla_data(rta); 593 594 algo = get_byname(ualg->alg_name, 1); 595 if (!algo) { 596 NL_SET_ERR_MSG(extack, "Requested COMP algorithm not found"); 597 return -ENOSYS; 598 } 599 *props = algo->desc.sadb_alg_id; 600 601 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL); 602 if (!p) 603 return -ENOMEM; 604 605 strscpy(p->alg_name, algo->name); 606 *algpp = p; 607 return 0; 608 } 609 610 static int attach_crypt(struct xfrm_state *x, struct nlattr *rta, 611 struct netlink_ext_ack *extack) 612 { 613 struct xfrm_algo *p, *ualg; 614 struct xfrm_algo_desc *algo; 615 616 if (!rta) 617 return 0; 618 619 ualg = nla_data(rta); 620 621 algo = xfrm_ealg_get_byname(ualg->alg_name, 1); 622 if (!algo) { 623 NL_SET_ERR_MSG(extack, "Requested CRYPT algorithm not found"); 624 return -ENOSYS; 625 } 626 x->props.ealgo = algo->desc.sadb_alg_id; 627 628 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL); 629 if (!p) 630 return -ENOMEM; 631 632 strscpy(p->alg_name, algo->name); 633 x->ealg = p; 634 x->geniv = algo->uinfo.encr.geniv; 635 return 0; 636 } 637 638 static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props, 639 struct nlattr *rta, struct netlink_ext_ack *extack) 640 { 641 struct xfrm_algo *ualg; 642 struct xfrm_algo_auth *p; 643 struct xfrm_algo_desc *algo; 644 645 if (!rta) 646 return 0; 647 648 ualg = nla_data(rta); 649 650 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 651 if (!algo) { 652 NL_SET_ERR_MSG(extack, "Requested AUTH algorithm not found"); 653 return -ENOSYS; 654 } 655 *props = algo->desc.sadb_alg_id; 656 657 p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL); 658 if (!p) 659 return -ENOMEM; 660 661 strscpy(p->alg_name, algo->name); 662 p->alg_key_len = ualg->alg_key_len; 663 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits; 664 memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8); 665 666 *algpp = p; 667 return 0; 668 } 669 670 static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props, 671 struct nlattr *rta, struct netlink_ext_ack *extack) 672 { 673 struct xfrm_algo_auth *p, *ualg; 674 struct xfrm_algo_desc *algo; 675 676 if (!rta) 677 return 0; 678 679 ualg = nla_data(rta); 680 681 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 682 if (!algo) { 683 NL_SET_ERR_MSG(extack, "Requested AUTH_TRUNC algorithm not found"); 684 return -ENOSYS; 685 } 686 if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) { 687 NL_SET_ERR_MSG(extack, "Invalid length requested for truncated ICV"); 688 return -EINVAL; 689 } 690 *props = algo->desc.sadb_alg_id; 691 692 p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL); 693 if (!p) 694 return -ENOMEM; 695 696 strscpy(p->alg_name, algo->name); 697 if (!p->alg_trunc_len) 698 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits; 699 700 *algpp = p; 701 return 0; 702 } 703 704 static int attach_aead(struct xfrm_state *x, struct nlattr *rta, 705 struct netlink_ext_ack *extack) 706 { 707 struct xfrm_algo_aead *p, *ualg; 708 struct xfrm_algo_desc *algo; 709 710 if (!rta) 711 return 0; 712 713 ualg = nla_data(rta); 714 715 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1); 716 if (!algo) { 717 NL_SET_ERR_MSG(extack, "Requested AEAD algorithm not found"); 718 return -ENOSYS; 719 } 720 x->props.ealgo = algo->desc.sadb_alg_id; 721 722 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL); 723 if (!p) 724 return -ENOMEM; 725 726 strscpy(p->alg_name, algo->name); 727 x->aead = p; 728 x->geniv = algo->uinfo.aead.geniv; 729 return 0; 730 } 731 732 static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn, 733 struct nlattr *rp, 734 struct netlink_ext_ack *extack) 735 { 736 struct xfrm_replay_state_esn *up; 737 unsigned int ulen; 738 739 if (!replay_esn || !rp) 740 return 0; 741 742 up = nla_data(rp); 743 ulen = xfrm_replay_state_esn_len(up); 744 745 /* Check the overall length and the internal bitmap length to avoid 746 * potential overflow. */ 747 if (nla_len(rp) < (int)ulen) { 748 NL_SET_ERR_MSG(extack, "ESN attribute is too short"); 749 return -EINVAL; 750 } 751 752 if (xfrm_replay_state_esn_len(replay_esn) != ulen) { 753 NL_SET_ERR_MSG(extack, "New ESN size doesn't match the existing SA's ESN size"); 754 return -EINVAL; 755 } 756 757 if (replay_esn->bmp_len != up->bmp_len) { 758 NL_SET_ERR_MSG(extack, "New ESN bitmap size doesn't match the existing SA's ESN bitmap"); 759 return -EINVAL; 760 } 761 762 if (up->replay_window > up->bmp_len * sizeof(__u32) * 8) { 763 NL_SET_ERR_MSG(extack, "ESN replay window is longer than the bitmap"); 764 return -EINVAL; 765 } 766 767 return 0; 768 } 769 770 static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn, 771 struct xfrm_replay_state_esn **preplay_esn, 772 struct nlattr *rta) 773 { 774 struct xfrm_replay_state_esn *p, *pp, *up; 775 unsigned int klen, ulen; 776 777 if (!rta) 778 return 0; 779 780 up = nla_data(rta); 781 klen = xfrm_replay_state_esn_len(up); 782 ulen = nla_len(rta) >= (int)klen ? klen : sizeof(*up); 783 784 p = kzalloc(klen, GFP_KERNEL); 785 if (!p) 786 return -ENOMEM; 787 788 pp = kzalloc(klen, GFP_KERNEL); 789 if (!pp) { 790 kfree(p); 791 return -ENOMEM; 792 } 793 794 memcpy(p, up, ulen); 795 memcpy(pp, up, ulen); 796 797 *replay_esn = p; 798 *preplay_esn = pp; 799 800 return 0; 801 } 802 803 static inline unsigned int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx) 804 { 805 unsigned int len = 0; 806 807 if (xfrm_ctx) { 808 len += sizeof(struct xfrm_user_sec_ctx); 809 len += xfrm_ctx->ctx_len; 810 } 811 return len; 812 } 813 814 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) 815 { 816 memcpy(&x->id, &p->id, sizeof(x->id)); 817 memcpy(&x->sel, &p->sel, sizeof(x->sel)); 818 memcpy(&x->lft, &p->lft, sizeof(x->lft)); 819 x->props.mode = p->mode; 820 x->props.replay_window = min_t(unsigned int, p->replay_window, 821 sizeof(x->replay.bitmap) * 8); 822 x->props.reqid = p->reqid; 823 x->props.family = p->family; 824 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr)); 825 x->props.flags = p->flags; 826 827 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC)) 828 x->sel.family = p->family; 829 } 830 831 /* 832 * someday when pfkey also has support, we could have the code 833 * somehow made shareable and move it to xfrm_state.c - JHS 834 * 835 */ 836 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, 837 int update_esn) 838 { 839 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; 840 struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL; 841 struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; 842 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; 843 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; 844 struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH]; 845 846 if (re && x->replay_esn && x->preplay_esn) { 847 struct xfrm_replay_state_esn *replay_esn; 848 replay_esn = nla_data(re); 849 memcpy(x->replay_esn, replay_esn, 850 xfrm_replay_state_esn_len(replay_esn)); 851 memcpy(x->preplay_esn, replay_esn, 852 xfrm_replay_state_esn_len(replay_esn)); 853 } 854 855 if (rp) { 856 struct xfrm_replay_state *replay; 857 replay = nla_data(rp); 858 memcpy(&x->replay, replay, sizeof(*replay)); 859 memcpy(&x->preplay, replay, sizeof(*replay)); 860 } 861 862 if (lt) { 863 struct xfrm_lifetime_cur *ltime; 864 ltime = nla_data(lt); 865 x->curlft.bytes = ltime->bytes; 866 x->curlft.packets = ltime->packets; 867 x->curlft.add_time = ltime->add_time; 868 x->curlft.use_time = ltime->use_time; 869 } 870 871 if (et) 872 x->replay_maxage = nla_get_u32(et); 873 874 if (rt) 875 x->replay_maxdiff = nla_get_u32(rt); 876 877 if (mt) 878 x->mapping_maxage = nla_get_u32(mt); 879 } 880 881 static void xfrm_smark_init(struct nlattr **attrs, struct xfrm_mark *m) 882 { 883 if (attrs[XFRMA_SET_MARK]) { 884 m->v = nla_get_u32(attrs[XFRMA_SET_MARK]); 885 m->m = nla_get_u32_default(attrs[XFRMA_SET_MARK_MASK], 886 0xffffffff); 887 } else { 888 m->v = m->m = 0; 889 } 890 } 891 892 static struct xfrm_state *xfrm_state_construct(struct net *net, 893 struct xfrm_usersa_info *p, 894 struct nlattr **attrs, 895 int *errp, 896 struct netlink_ext_ack *extack) 897 { 898 struct xfrm_state *x = xfrm_state_alloc(net); 899 int err = -ENOMEM; 900 901 if (!x) 902 goto error_no_put; 903 904 copy_from_user_state(x, p); 905 906 if (attrs[XFRMA_ENCAP]) { 907 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), 908 sizeof(*x->encap), GFP_KERNEL); 909 if (x->encap == NULL) 910 goto error; 911 } 912 913 if (attrs[XFRMA_COADDR]) { 914 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]), 915 sizeof(*x->coaddr), GFP_KERNEL); 916 if (x->coaddr == NULL) 917 goto error; 918 } 919 920 if (attrs[XFRMA_SA_EXTRA_FLAGS]) 921 x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]); 922 923 if ((err = attach_aead(x, attrs[XFRMA_ALG_AEAD], extack))) 924 goto error; 925 if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo, 926 attrs[XFRMA_ALG_AUTH_TRUNC], extack))) 927 goto error; 928 if (!x->props.aalgo) { 929 if ((err = attach_auth(&x->aalg, &x->props.aalgo, 930 attrs[XFRMA_ALG_AUTH], extack))) 931 goto error; 932 } 933 if ((err = attach_crypt(x, attrs[XFRMA_ALG_CRYPT], extack))) 934 goto error; 935 if ((err = attach_one_algo(&x->calg, &x->props.calgo, 936 xfrm_calg_get_byname, 937 attrs[XFRMA_ALG_COMP], extack))) 938 goto error; 939 940 if (attrs[XFRMA_TFCPAD]) 941 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]); 942 943 xfrm_mark_get(attrs, &x->mark); 944 945 xfrm_smark_init(attrs, &x->props.smark); 946 947 if (attrs[XFRMA_IF_ID]) 948 x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 949 950 if (attrs[XFRMA_SA_DIR]) 951 x->dir = nla_get_u8(attrs[XFRMA_SA_DIR]); 952 953 if (attrs[XFRMA_NAT_KEEPALIVE_INTERVAL]) 954 x->nat_keepalive_interval = 955 nla_get_u32(attrs[XFRMA_NAT_KEEPALIVE_INTERVAL]); 956 957 if (attrs[XFRMA_SA_PCPU]) { 958 x->pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]); 959 if (x->pcpu_num >= num_possible_cpus()) { 960 err = -ERANGE; 961 NL_SET_ERR_MSG(extack, "pCPU number too big"); 962 goto error; 963 } 964 } 965 966 err = __xfrm_init_state(x, extack); 967 if (err) 968 goto error; 969 970 if (attrs[XFRMA_SEC_CTX]) { 971 err = security_xfrm_state_alloc(x, 972 nla_data(attrs[XFRMA_SEC_CTX])); 973 if (err) 974 goto error; 975 } 976 977 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn, 978 attrs[XFRMA_REPLAY_ESN_VAL]))) 979 goto error; 980 981 x->km.seq = p->seq; 982 x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth; 983 /* sysctl_xfrm_aevent_etime is in 100ms units */ 984 x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M; 985 986 if ((err = xfrm_init_replay(x, extack))) 987 goto error; 988 989 /* override default values from above */ 990 xfrm_update_ae_params(x, attrs, 0); 991 992 xfrm_set_type_offload(x, attrs[XFRMA_OFFLOAD_DEV]); 993 /* configure the hardware if offload is requested */ 994 if (attrs[XFRMA_OFFLOAD_DEV]) { 995 err = xfrm_dev_state_add(net, x, 996 nla_data(attrs[XFRMA_OFFLOAD_DEV]), 997 extack); 998 if (err) 999 goto error; 1000 } 1001 1002 if (x->mode_cbs && x->mode_cbs->user_init) { 1003 err = x->mode_cbs->user_init(net, x, attrs, extack); 1004 if (err) 1005 goto error; 1006 } 1007 1008 return x; 1009 1010 error: 1011 x->km.state = XFRM_STATE_DEAD; 1012 xfrm_state_put(x); 1013 error_no_put: 1014 *errp = err; 1015 return NULL; 1016 } 1017 1018 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 1019 struct nlattr **attrs, struct netlink_ext_ack *extack) 1020 { 1021 struct net *net = sock_net(skb->sk); 1022 struct xfrm_usersa_info *p = nlmsg_data(nlh); 1023 struct xfrm_state *x; 1024 int err; 1025 struct km_event c; 1026 1027 err = verify_newsa_info(p, attrs, extack); 1028 if (err) 1029 return err; 1030 1031 x = xfrm_state_construct(net, p, attrs, &err, extack); 1032 if (!x) 1033 return err; 1034 1035 xfrm_state_hold(x); 1036 if (nlh->nlmsg_type == XFRM_MSG_NEWSA) 1037 err = xfrm_state_add(x); 1038 else 1039 err = xfrm_state_update(x); 1040 1041 xfrm_audit_state_add(x, err ? 0 : 1, true); 1042 1043 if (err < 0) { 1044 x->km.state = XFRM_STATE_DEAD; 1045 xfrm_dev_state_delete(x); 1046 __xfrm_state_put(x); 1047 goto out; 1048 } 1049 1050 if (x->km.state == XFRM_STATE_VOID) 1051 x->km.state = XFRM_STATE_VALID; 1052 1053 c.seq = nlh->nlmsg_seq; 1054 c.portid = nlh->nlmsg_pid; 1055 c.event = nlh->nlmsg_type; 1056 1057 km_state_notify(x, &c); 1058 out: 1059 xfrm_state_put(x); 1060 return err; 1061 } 1062 1063 static struct xfrm_state *xfrm_user_state_lookup(struct net *net, 1064 struct xfrm_usersa_id *p, 1065 struct nlattr **attrs, 1066 int *errp) 1067 { 1068 struct xfrm_state *x = NULL; 1069 struct xfrm_mark m; 1070 int err; 1071 u32 mark = xfrm_mark_get(attrs, &m); 1072 1073 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) { 1074 err = -ESRCH; 1075 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family); 1076 } else { 1077 xfrm_address_t *saddr = NULL; 1078 1079 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr); 1080 if (!saddr) { 1081 err = -EINVAL; 1082 goto out; 1083 } 1084 1085 err = -ESRCH; 1086 x = xfrm_state_lookup_byaddr(net, mark, 1087 &p->daddr, saddr, 1088 p->proto, p->family); 1089 } 1090 1091 out: 1092 if (!x && errp) 1093 *errp = err; 1094 return x; 1095 } 1096 1097 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 1098 struct nlattr **attrs, struct netlink_ext_ack *extack) 1099 { 1100 struct net *net = sock_net(skb->sk); 1101 struct xfrm_state *x; 1102 int err = -ESRCH; 1103 struct km_event c; 1104 struct xfrm_usersa_id *p = nlmsg_data(nlh); 1105 1106 x = xfrm_user_state_lookup(net, p, attrs, &err); 1107 if (x == NULL) 1108 return err; 1109 1110 if ((err = security_xfrm_state_delete(x)) != 0) 1111 goto out; 1112 1113 if (xfrm_state_kern(x)) { 1114 NL_SET_ERR_MSG(extack, "SA is in use by tunnels"); 1115 err = -EPERM; 1116 goto out; 1117 } 1118 1119 err = xfrm_state_delete(x); 1120 if (err < 0) 1121 goto out; 1122 1123 c.seq = nlh->nlmsg_seq; 1124 c.portid = nlh->nlmsg_pid; 1125 c.event = nlh->nlmsg_type; 1126 km_state_notify(x, &c); 1127 1128 out: 1129 xfrm_audit_state_delete(x, err ? 0 : 1, true); 1130 xfrm_state_put(x); 1131 return err; 1132 } 1133 1134 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) 1135 { 1136 memset(p, 0, sizeof(*p)); 1137 memcpy(&p->id, &x->id, sizeof(p->id)); 1138 memcpy(&p->sel, &x->sel, sizeof(p->sel)); 1139 memcpy(&p->lft, &x->lft, sizeof(p->lft)); 1140 if (x->xso.dev) 1141 xfrm_dev_state_update_stats(x); 1142 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft)); 1143 put_unaligned(x->stats.replay_window, &p->stats.replay_window); 1144 put_unaligned(x->stats.replay, &p->stats.replay); 1145 put_unaligned(x->stats.integrity_failed, &p->stats.integrity_failed); 1146 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr)); 1147 p->mode = x->props.mode; 1148 p->replay_window = x->props.replay_window; 1149 p->reqid = x->props.reqid; 1150 p->family = x->props.family; 1151 p->flags = x->props.flags; 1152 p->seq = x->km.seq; 1153 } 1154 1155 struct xfrm_dump_info { 1156 struct sk_buff *in_skb; 1157 struct sk_buff *out_skb; 1158 u32 nlmsg_seq; 1159 u16 nlmsg_flags; 1160 }; 1161 1162 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) 1163 { 1164 struct xfrm_user_sec_ctx *uctx; 1165 struct nlattr *attr; 1166 int ctx_size = sizeof(*uctx) + s->ctx_len; 1167 1168 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size); 1169 if (attr == NULL) 1170 return -EMSGSIZE; 1171 1172 uctx = nla_data(attr); 1173 uctx->exttype = XFRMA_SEC_CTX; 1174 uctx->len = ctx_size; 1175 uctx->ctx_doi = s->ctx_doi; 1176 uctx->ctx_alg = s->ctx_alg; 1177 uctx->ctx_len = s->ctx_len; 1178 memcpy(uctx + 1, s->ctx_str, s->ctx_len); 1179 1180 return 0; 1181 } 1182 1183 static int copy_user_offload(struct xfrm_dev_offload *xso, struct sk_buff *skb) 1184 { 1185 struct xfrm_user_offload *xuo; 1186 struct nlattr *attr; 1187 1188 attr = nla_reserve(skb, XFRMA_OFFLOAD_DEV, sizeof(*xuo)); 1189 if (attr == NULL) 1190 return -EMSGSIZE; 1191 1192 xuo = nla_data(attr); 1193 memset(xuo, 0, sizeof(*xuo)); 1194 xuo->ifindex = xso->dev->ifindex; 1195 if (xso->dir == XFRM_DEV_OFFLOAD_IN) 1196 xuo->flags = XFRM_OFFLOAD_INBOUND; 1197 if (xso->type == XFRM_DEV_OFFLOAD_PACKET) 1198 xuo->flags |= XFRM_OFFLOAD_PACKET; 1199 1200 return 0; 1201 } 1202 1203 static bool xfrm_redact(void) 1204 { 1205 return IS_ENABLED(CONFIG_SECURITY) && 1206 security_locked_down(LOCKDOWN_XFRM_SECRET); 1207 } 1208 1209 static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) 1210 { 1211 struct xfrm_algo *algo; 1212 struct xfrm_algo_auth *ap; 1213 struct nlattr *nla; 1214 bool redact_secret = xfrm_redact(); 1215 1216 nla = nla_reserve(skb, XFRMA_ALG_AUTH, 1217 sizeof(*algo) + (auth->alg_key_len + 7) / 8); 1218 if (!nla) 1219 return -EMSGSIZE; 1220 algo = nla_data(nla); 1221 strscpy_pad(algo->alg_name, auth->alg_name); 1222 1223 if (redact_secret && auth->alg_key_len) 1224 memset(algo->alg_key, 0, (auth->alg_key_len + 7) / 8); 1225 else 1226 memcpy(algo->alg_key, auth->alg_key, 1227 (auth->alg_key_len + 7) / 8); 1228 algo->alg_key_len = auth->alg_key_len; 1229 1230 nla = nla_reserve(skb, XFRMA_ALG_AUTH_TRUNC, xfrm_alg_auth_len(auth)); 1231 if (!nla) 1232 return -EMSGSIZE; 1233 ap = nla_data(nla); 1234 strscpy_pad(ap->alg_name, auth->alg_name); 1235 ap->alg_key_len = auth->alg_key_len; 1236 ap->alg_trunc_len = auth->alg_trunc_len; 1237 if (redact_secret && auth->alg_key_len) 1238 memset(ap->alg_key, 0, (auth->alg_key_len + 7) / 8); 1239 else 1240 memcpy(ap->alg_key, auth->alg_key, 1241 (auth->alg_key_len + 7) / 8); 1242 return 0; 1243 } 1244 1245 static int copy_to_user_aead(struct xfrm_algo_aead *aead, struct sk_buff *skb) 1246 { 1247 struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_AEAD, aead_len(aead)); 1248 struct xfrm_algo_aead *ap; 1249 bool redact_secret = xfrm_redact(); 1250 1251 if (!nla) 1252 return -EMSGSIZE; 1253 1254 ap = nla_data(nla); 1255 strscpy_pad(ap->alg_name, aead->alg_name); 1256 ap->alg_key_len = aead->alg_key_len; 1257 ap->alg_icv_len = aead->alg_icv_len; 1258 1259 if (redact_secret && aead->alg_key_len) 1260 memset(ap->alg_key, 0, (aead->alg_key_len + 7) / 8); 1261 else 1262 memcpy(ap->alg_key, aead->alg_key, 1263 (aead->alg_key_len + 7) / 8); 1264 return 0; 1265 } 1266 1267 static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb) 1268 { 1269 struct xfrm_algo *ap; 1270 bool redact_secret = xfrm_redact(); 1271 struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_CRYPT, 1272 xfrm_alg_len(ealg)); 1273 if (!nla) 1274 return -EMSGSIZE; 1275 1276 ap = nla_data(nla); 1277 strscpy_pad(ap->alg_name, ealg->alg_name); 1278 ap->alg_key_len = ealg->alg_key_len; 1279 1280 if (redact_secret && ealg->alg_key_len) 1281 memset(ap->alg_key, 0, (ealg->alg_key_len + 7) / 8); 1282 else 1283 memcpy(ap->alg_key, ealg->alg_key, 1284 (ealg->alg_key_len + 7) / 8); 1285 1286 return 0; 1287 } 1288 1289 static int copy_to_user_calg(struct xfrm_algo *calg, struct sk_buff *skb) 1290 { 1291 struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_COMP, sizeof(*calg)); 1292 struct xfrm_algo *ap; 1293 1294 if (!nla) 1295 return -EMSGSIZE; 1296 1297 ap = nla_data(nla); 1298 strscpy_pad(ap->alg_name, calg->alg_name); 1299 ap->alg_key_len = 0; 1300 1301 return 0; 1302 } 1303 1304 static int copy_to_user_encap(struct xfrm_encap_tmpl *ep, struct sk_buff *skb) 1305 { 1306 struct nlattr *nla = nla_reserve(skb, XFRMA_ENCAP, sizeof(*ep)); 1307 struct xfrm_encap_tmpl *uep; 1308 1309 if (!nla) 1310 return -EMSGSIZE; 1311 1312 uep = nla_data(nla); 1313 memset(uep, 0, sizeof(*uep)); 1314 1315 uep->encap_type = ep->encap_type; 1316 uep->encap_sport = ep->encap_sport; 1317 uep->encap_dport = ep->encap_dport; 1318 uep->encap_oa = ep->encap_oa; 1319 1320 return 0; 1321 } 1322 1323 static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m) 1324 { 1325 int ret = 0; 1326 1327 if (m->v | m->m) { 1328 ret = nla_put_u32(skb, XFRMA_SET_MARK, m->v); 1329 if (!ret) 1330 ret = nla_put_u32(skb, XFRMA_SET_MARK_MASK, m->m); 1331 } 1332 return ret; 1333 } 1334 1335 /* Don't change this without updating xfrm_sa_len! */ 1336 static int copy_to_user_state_extra(struct xfrm_state *x, 1337 struct xfrm_usersa_info *p, 1338 struct sk_buff *skb) 1339 { 1340 int ret = 0; 1341 1342 copy_to_user_state(x, p); 1343 1344 if (x->props.extra_flags) { 1345 ret = nla_put_u32(skb, XFRMA_SA_EXTRA_FLAGS, 1346 x->props.extra_flags); 1347 if (ret) 1348 goto out; 1349 } 1350 1351 if (x->coaddr) { 1352 ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr); 1353 if (ret) 1354 goto out; 1355 } 1356 if (x->lastused) { 1357 ret = nla_put_u64_64bit(skb, XFRMA_LASTUSED, x->lastused, 1358 XFRMA_PAD); 1359 if (ret) 1360 goto out; 1361 } 1362 if (x->aead) { 1363 ret = copy_to_user_aead(x->aead, skb); 1364 if (ret) 1365 goto out; 1366 } 1367 if (x->aalg) { 1368 ret = copy_to_user_auth(x->aalg, skb); 1369 if (ret) 1370 goto out; 1371 } 1372 if (x->ealg) { 1373 ret = copy_to_user_ealg(x->ealg, skb); 1374 if (ret) 1375 goto out; 1376 } 1377 if (x->calg) { 1378 ret = copy_to_user_calg(x->calg, skb); 1379 if (ret) 1380 goto out; 1381 } 1382 if (x->encap) { 1383 ret = copy_to_user_encap(x->encap, skb); 1384 if (ret) 1385 goto out; 1386 } 1387 if (x->tfcpad) { 1388 ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad); 1389 if (ret) 1390 goto out; 1391 } 1392 ret = xfrm_mark_put(skb, &x->mark); 1393 if (ret) 1394 goto out; 1395 1396 ret = xfrm_smark_put(skb, &x->props.smark); 1397 if (ret) 1398 goto out; 1399 1400 if (x->replay_esn) 1401 ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL, 1402 xfrm_replay_state_esn_len(x->replay_esn), 1403 x->replay_esn); 1404 else 1405 ret = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), 1406 &x->replay); 1407 if (ret) 1408 goto out; 1409 if(x->xso.dev) 1410 ret = copy_user_offload(&x->xso, skb); 1411 if (ret) 1412 goto out; 1413 if (x->if_id) { 1414 ret = nla_put_u32(skb, XFRMA_IF_ID, x->if_id); 1415 if (ret) 1416 goto out; 1417 } 1418 if (x->security) { 1419 ret = copy_sec_ctx(x->security, skb); 1420 if (ret) 1421 goto out; 1422 } 1423 if (x->mode_cbs && x->mode_cbs->copy_to_user) 1424 ret = x->mode_cbs->copy_to_user(x, skb); 1425 if (ret) 1426 goto out; 1427 if (x->mapping_maxage) { 1428 ret = nla_put_u32(skb, XFRMA_MTIMER_THRESH, x->mapping_maxage); 1429 if (ret) 1430 goto out; 1431 } 1432 if (x->pcpu_num != UINT_MAX) { 1433 ret = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num); 1434 if (ret) 1435 goto out; 1436 } 1437 if (x->dir) 1438 ret = nla_put_u8(skb, XFRMA_SA_DIR, x->dir); 1439 1440 if (x->nat_keepalive_interval) { 1441 ret = nla_put_u32(skb, XFRMA_NAT_KEEPALIVE_INTERVAL, 1442 x->nat_keepalive_interval); 1443 if (ret) 1444 goto out; 1445 } 1446 out: 1447 return ret; 1448 } 1449 1450 static int dump_one_state(struct xfrm_state *x, int count, void *ptr) 1451 { 1452 struct xfrm_dump_info *sp = ptr; 1453 struct sk_buff *in_skb = sp->in_skb; 1454 struct sk_buff *skb = sp->out_skb; 1455 struct xfrm_translator *xtr; 1456 struct xfrm_usersa_info *p; 1457 struct nlmsghdr *nlh; 1458 int err; 1459 1460 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq, 1461 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags); 1462 if (nlh == NULL) 1463 return -EMSGSIZE; 1464 1465 p = nlmsg_data(nlh); 1466 1467 err = copy_to_user_state_extra(x, p, skb); 1468 if (err) { 1469 nlmsg_cancel(skb, nlh); 1470 return err; 1471 } 1472 nlmsg_end(skb, nlh); 1473 1474 xtr = xfrm_get_translator(); 1475 if (xtr) { 1476 err = xtr->alloc_compat(skb, nlh); 1477 1478 xfrm_put_translator(xtr); 1479 if (err) { 1480 nlmsg_cancel(skb, nlh); 1481 return err; 1482 } 1483 } 1484 1485 return 0; 1486 } 1487 1488 static int xfrm_dump_sa_done(struct netlink_callback *cb) 1489 { 1490 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; 1491 struct sock *sk = cb->skb->sk; 1492 struct net *net = sock_net(sk); 1493 1494 if (cb->args[0]) 1495 xfrm_state_walk_done(walk, net); 1496 return 0; 1497 } 1498 1499 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) 1500 { 1501 struct net *net = sock_net(skb->sk); 1502 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; 1503 struct xfrm_dump_info info; 1504 1505 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) > 1506 sizeof(cb->args) - sizeof(cb->args[0])); 1507 1508 info.in_skb = cb->skb; 1509 info.out_skb = skb; 1510 info.nlmsg_seq = cb->nlh->nlmsg_seq; 1511 info.nlmsg_flags = NLM_F_MULTI; 1512 1513 if (!cb->args[0]) { 1514 struct nlattr *attrs[XFRMA_MAX+1]; 1515 struct xfrm_address_filter *filter = NULL; 1516 u8 proto = 0; 1517 int err; 1518 1519 err = nlmsg_parse_deprecated(cb->nlh, 0, attrs, XFRMA_MAX, 1520 xfrma_policy, cb->extack); 1521 if (err < 0) 1522 return err; 1523 1524 if (attrs[XFRMA_ADDRESS_FILTER]) { 1525 filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]), 1526 sizeof(*filter), GFP_KERNEL); 1527 if (filter == NULL) 1528 return -ENOMEM; 1529 1530 /* see addr_match(), (prefix length >> 5) << 2 1531 * will be used to compare xfrm_address_t 1532 */ 1533 if (filter->splen > (sizeof(xfrm_address_t) << 3) || 1534 filter->dplen > (sizeof(xfrm_address_t) << 3)) { 1535 kfree(filter); 1536 return -EINVAL; 1537 } 1538 } 1539 1540 if (attrs[XFRMA_PROTO]) 1541 proto = nla_get_u8(attrs[XFRMA_PROTO]); 1542 1543 xfrm_state_walk_init(walk, proto, filter); 1544 cb->args[0] = 1; 1545 } 1546 1547 (void) xfrm_state_walk(net, walk, dump_one_state, &info); 1548 1549 return skb->len; 1550 } 1551 1552 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, 1553 struct xfrm_state *x, u32 seq) 1554 { 1555 struct xfrm_dump_info info; 1556 struct sk_buff *skb; 1557 int err; 1558 1559 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 1560 if (!skb) 1561 return ERR_PTR(-ENOMEM); 1562 1563 info.in_skb = in_skb; 1564 info.out_skb = skb; 1565 info.nlmsg_seq = seq; 1566 info.nlmsg_flags = 0; 1567 1568 err = dump_one_state(x, 0, &info); 1569 if (err) { 1570 kfree_skb(skb); 1571 return ERR_PTR(err); 1572 } 1573 1574 return skb; 1575 } 1576 1577 /* A wrapper for nlmsg_multicast() checking that nlsk is still available. 1578 * Must be called with RCU read lock. 1579 */ 1580 static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb, 1581 u32 pid, unsigned int group) 1582 { 1583 struct sock *nlsk = rcu_dereference(net->xfrm.nlsk); 1584 struct xfrm_translator *xtr; 1585 1586 if (!nlsk) { 1587 kfree_skb(skb); 1588 return -EPIPE; 1589 } 1590 1591 xtr = xfrm_get_translator(); 1592 if (xtr) { 1593 int err = xtr->alloc_compat(skb, nlmsg_hdr(skb)); 1594 1595 xfrm_put_translator(xtr); 1596 if (err) { 1597 kfree_skb(skb); 1598 return err; 1599 } 1600 } 1601 1602 return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); 1603 } 1604 1605 static inline unsigned int xfrm_spdinfo_msgsize(void) 1606 { 1607 return NLMSG_ALIGN(4) 1608 + nla_total_size(sizeof(struct xfrmu_spdinfo)) 1609 + nla_total_size(sizeof(struct xfrmu_spdhinfo)) 1610 + nla_total_size(sizeof(struct xfrmu_spdhthresh)) 1611 + nla_total_size(sizeof(struct xfrmu_spdhthresh)); 1612 } 1613 1614 static int build_spdinfo(struct sk_buff *skb, struct net *net, 1615 u32 portid, u32 seq, u32 flags) 1616 { 1617 struct xfrmk_spdinfo si; 1618 struct xfrmu_spdinfo spc; 1619 struct xfrmu_spdhinfo sph; 1620 struct xfrmu_spdhthresh spt4, spt6; 1621 struct nlmsghdr *nlh; 1622 int err; 1623 u32 *f; 1624 unsigned lseq; 1625 1626 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); 1627 if (nlh == NULL) /* shouldn't really happen ... */ 1628 return -EMSGSIZE; 1629 1630 f = nlmsg_data(nlh); 1631 *f = flags; 1632 xfrm_spd_getinfo(net, &si); 1633 spc.incnt = si.incnt; 1634 spc.outcnt = si.outcnt; 1635 spc.fwdcnt = si.fwdcnt; 1636 spc.inscnt = si.inscnt; 1637 spc.outscnt = si.outscnt; 1638 spc.fwdscnt = si.fwdscnt; 1639 sph.spdhcnt = si.spdhcnt; 1640 sph.spdhmcnt = si.spdhmcnt; 1641 1642 do { 1643 lseq = read_seqbegin(&net->xfrm.policy_hthresh.lock); 1644 1645 spt4.lbits = net->xfrm.policy_hthresh.lbits4; 1646 spt4.rbits = net->xfrm.policy_hthresh.rbits4; 1647 spt6.lbits = net->xfrm.policy_hthresh.lbits6; 1648 spt6.rbits = net->xfrm.policy_hthresh.rbits6; 1649 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, lseq)); 1650 1651 err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc); 1652 if (!err) 1653 err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph); 1654 if (!err) 1655 err = nla_put(skb, XFRMA_SPD_IPV4_HTHRESH, sizeof(spt4), &spt4); 1656 if (!err) 1657 err = nla_put(skb, XFRMA_SPD_IPV6_HTHRESH, sizeof(spt6), &spt6); 1658 if (err) { 1659 nlmsg_cancel(skb, nlh); 1660 return err; 1661 } 1662 1663 nlmsg_end(skb, nlh); 1664 return 0; 1665 } 1666 1667 static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 1668 struct nlattr **attrs, 1669 struct netlink_ext_ack *extack) 1670 { 1671 struct net *net = sock_net(skb->sk); 1672 struct xfrmu_spdhthresh *thresh4 = NULL; 1673 struct xfrmu_spdhthresh *thresh6 = NULL; 1674 1675 /* selector prefixlen thresholds to hash policies */ 1676 if (attrs[XFRMA_SPD_IPV4_HTHRESH]) { 1677 struct nlattr *rta = attrs[XFRMA_SPD_IPV4_HTHRESH]; 1678 1679 if (nla_len(rta) < sizeof(*thresh4)) { 1680 NL_SET_ERR_MSG(extack, "Invalid SPD_IPV4_HTHRESH attribute length"); 1681 return -EINVAL; 1682 } 1683 thresh4 = nla_data(rta); 1684 if (thresh4->lbits > 32 || thresh4->rbits > 32) { 1685 NL_SET_ERR_MSG(extack, "Invalid hash threshold (must be <= 32 for IPv4)"); 1686 return -EINVAL; 1687 } 1688 } 1689 if (attrs[XFRMA_SPD_IPV6_HTHRESH]) { 1690 struct nlattr *rta = attrs[XFRMA_SPD_IPV6_HTHRESH]; 1691 1692 if (nla_len(rta) < sizeof(*thresh6)) { 1693 NL_SET_ERR_MSG(extack, "Invalid SPD_IPV6_HTHRESH attribute length"); 1694 return -EINVAL; 1695 } 1696 thresh6 = nla_data(rta); 1697 if (thresh6->lbits > 128 || thresh6->rbits > 128) { 1698 NL_SET_ERR_MSG(extack, "Invalid hash threshold (must be <= 128 for IPv6)"); 1699 return -EINVAL; 1700 } 1701 } 1702 1703 if (thresh4 || thresh6) { 1704 write_seqlock(&net->xfrm.policy_hthresh.lock); 1705 if (thresh4) { 1706 net->xfrm.policy_hthresh.lbits4 = thresh4->lbits; 1707 net->xfrm.policy_hthresh.rbits4 = thresh4->rbits; 1708 } 1709 if (thresh6) { 1710 net->xfrm.policy_hthresh.lbits6 = thresh6->lbits; 1711 net->xfrm.policy_hthresh.rbits6 = thresh6->rbits; 1712 } 1713 write_sequnlock(&net->xfrm.policy_hthresh.lock); 1714 1715 xfrm_policy_hash_rebuild(net); 1716 } 1717 1718 return 0; 1719 } 1720 1721 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 1722 struct nlattr **attrs, 1723 struct netlink_ext_ack *extack) 1724 { 1725 struct net *net = sock_net(skb->sk); 1726 struct sk_buff *r_skb; 1727 u32 *flags = nlmsg_data(nlh); 1728 u32 sportid = NETLINK_CB(skb).portid; 1729 u32 seq = nlh->nlmsg_seq; 1730 int err; 1731 1732 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC); 1733 if (r_skb == NULL) 1734 return -ENOMEM; 1735 1736 err = build_spdinfo(r_skb, net, sportid, seq, *flags); 1737 BUG_ON(err < 0); 1738 1739 return nlmsg_unicast(xfrm_net_nlsk(net, skb), r_skb, sportid); 1740 } 1741 1742 static inline unsigned int xfrm_sadinfo_msgsize(void) 1743 { 1744 return NLMSG_ALIGN(4) 1745 + nla_total_size(sizeof(struct xfrmu_sadhinfo)) 1746 + nla_total_size(4); /* XFRMA_SAD_CNT */ 1747 } 1748 1749 static int build_sadinfo(struct sk_buff *skb, struct net *net, 1750 u32 portid, u32 seq, u32 flags) 1751 { 1752 struct xfrmk_sadinfo si; 1753 struct xfrmu_sadhinfo sh; 1754 struct nlmsghdr *nlh; 1755 int err; 1756 u32 *f; 1757 1758 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); 1759 if (nlh == NULL) /* shouldn't really happen ... */ 1760 return -EMSGSIZE; 1761 1762 f = nlmsg_data(nlh); 1763 *f = flags; 1764 xfrm_sad_getinfo(net, &si); 1765 1766 sh.sadhmcnt = si.sadhmcnt; 1767 sh.sadhcnt = si.sadhcnt; 1768 1769 err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt); 1770 if (!err) 1771 err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh); 1772 if (err) { 1773 nlmsg_cancel(skb, nlh); 1774 return err; 1775 } 1776 1777 nlmsg_end(skb, nlh); 1778 return 0; 1779 } 1780 1781 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 1782 struct nlattr **attrs, 1783 struct netlink_ext_ack *extack) 1784 { 1785 struct net *net = sock_net(skb->sk); 1786 struct sk_buff *r_skb; 1787 u32 *flags = nlmsg_data(nlh); 1788 u32 sportid = NETLINK_CB(skb).portid; 1789 u32 seq = nlh->nlmsg_seq; 1790 int err; 1791 1792 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC); 1793 if (r_skb == NULL) 1794 return -ENOMEM; 1795 1796 err = build_sadinfo(r_skb, net, sportid, seq, *flags); 1797 BUG_ON(err < 0); 1798 1799 return nlmsg_unicast(xfrm_net_nlsk(net, skb), r_skb, sportid); 1800 } 1801 1802 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 1803 struct nlattr **attrs, struct netlink_ext_ack *extack) 1804 { 1805 struct net *net = sock_net(skb->sk); 1806 struct xfrm_usersa_id *p = nlmsg_data(nlh); 1807 struct xfrm_state *x; 1808 struct sk_buff *resp_skb; 1809 int err = -ESRCH; 1810 1811 x = xfrm_user_state_lookup(net, p, attrs, &err); 1812 if (x == NULL) 1813 goto out_noput; 1814 1815 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); 1816 if (IS_ERR(resp_skb)) { 1817 err = PTR_ERR(resp_skb); 1818 } else { 1819 err = nlmsg_unicast(xfrm_net_nlsk(net, skb), resp_skb, NETLINK_CB(skb).portid); 1820 } 1821 xfrm_state_put(x); 1822 out_noput: 1823 return err; 1824 } 1825 1826 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, 1827 struct nlattr **attrs, 1828 struct netlink_ext_ack *extack) 1829 { 1830 struct net *net = sock_net(skb->sk); 1831 struct xfrm_state *x; 1832 struct xfrm_userspi_info *p; 1833 struct xfrm_translator *xtr; 1834 struct sk_buff *resp_skb; 1835 xfrm_address_t *daddr; 1836 int family; 1837 int err; 1838 u32 mark; 1839 struct xfrm_mark m; 1840 u32 if_id = 0; 1841 u32 pcpu_num = UINT_MAX; 1842 1843 p = nlmsg_data(nlh); 1844 err = verify_spi_info(p->info.id.proto, p->min, p->max, extack); 1845 if (err) 1846 goto out_noput; 1847 1848 family = p->info.family; 1849 daddr = &p->info.id.daddr; 1850 1851 x = NULL; 1852 1853 mark = xfrm_mark_get(attrs, &m); 1854 1855 if (attrs[XFRMA_IF_ID]) 1856 if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 1857 1858 if (attrs[XFRMA_SA_PCPU]) { 1859 pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]); 1860 if (pcpu_num >= num_possible_cpus()) { 1861 err = -EINVAL; 1862 NL_SET_ERR_MSG(extack, "pCPU number too big"); 1863 goto out_noput; 1864 } 1865 } 1866 1867 if (p->info.seq) { 1868 x = xfrm_find_acq_byseq(net, mark, p->info.seq, pcpu_num); 1869 if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) { 1870 xfrm_state_put(x); 1871 x = NULL; 1872 } 1873 } 1874 1875 if (!x) 1876 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid, 1877 if_id, pcpu_num, p->info.id.proto, daddr, 1878 &p->info.saddr, 1, 1879 family); 1880 err = -ENOENT; 1881 if (!x) { 1882 NL_SET_ERR_MSG(extack, "Target ACQUIRE not found"); 1883 goto out_noput; 1884 } 1885 1886 err = xfrm_alloc_spi(x, p->min, p->max, extack); 1887 if (err) 1888 goto out; 1889 1890 if (attrs[XFRMA_SA_DIR]) 1891 x->dir = nla_get_u8(attrs[XFRMA_SA_DIR]); 1892 1893 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); 1894 if (IS_ERR(resp_skb)) { 1895 err = PTR_ERR(resp_skb); 1896 goto out; 1897 } 1898 1899 xtr = xfrm_get_translator(); 1900 if (xtr) { 1901 err = xtr->alloc_compat(skb, nlmsg_hdr(skb)); 1902 1903 xfrm_put_translator(xtr); 1904 if (err) { 1905 kfree_skb(resp_skb); 1906 goto out; 1907 } 1908 } 1909 1910 err = nlmsg_unicast(xfrm_net_nlsk(net, skb), resp_skb, NETLINK_CB(skb).portid); 1911 1912 out: 1913 xfrm_state_put(x); 1914 out_noput: 1915 return err; 1916 } 1917 1918 static int verify_policy_dir(u8 dir, struct netlink_ext_ack *extack) 1919 { 1920 switch (dir) { 1921 case XFRM_POLICY_IN: 1922 case XFRM_POLICY_OUT: 1923 case XFRM_POLICY_FWD: 1924 break; 1925 1926 default: 1927 NL_SET_ERR_MSG(extack, "Invalid policy direction"); 1928 return -EINVAL; 1929 } 1930 1931 return 0; 1932 } 1933 1934 static int verify_policy_type(u8 type, struct netlink_ext_ack *extack) 1935 { 1936 switch (type) { 1937 case XFRM_POLICY_TYPE_MAIN: 1938 #ifdef CONFIG_XFRM_SUB_POLICY 1939 case XFRM_POLICY_TYPE_SUB: 1940 #endif 1941 break; 1942 1943 default: 1944 NL_SET_ERR_MSG(extack, "Invalid policy type"); 1945 return -EINVAL; 1946 } 1947 1948 return 0; 1949 } 1950 1951 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p, 1952 struct netlink_ext_ack *extack) 1953 { 1954 int ret; 1955 1956 switch (p->share) { 1957 case XFRM_SHARE_ANY: 1958 case XFRM_SHARE_SESSION: 1959 case XFRM_SHARE_USER: 1960 case XFRM_SHARE_UNIQUE: 1961 break; 1962 1963 default: 1964 NL_SET_ERR_MSG(extack, "Invalid policy share"); 1965 return -EINVAL; 1966 } 1967 1968 switch (p->action) { 1969 case XFRM_POLICY_ALLOW: 1970 case XFRM_POLICY_BLOCK: 1971 break; 1972 1973 default: 1974 NL_SET_ERR_MSG(extack, "Invalid policy action"); 1975 return -EINVAL; 1976 } 1977 1978 switch (p->sel.family) { 1979 case AF_INET: 1980 if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) { 1981 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 32 for IPv4)"); 1982 return -EINVAL; 1983 } 1984 1985 break; 1986 1987 case AF_INET6: 1988 #if IS_ENABLED(CONFIG_IPV6) 1989 if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) { 1990 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 128 for IPv6)"); 1991 return -EINVAL; 1992 } 1993 1994 break; 1995 #else 1996 NL_SET_ERR_MSG(extack, "IPv6 support disabled"); 1997 return -EAFNOSUPPORT; 1998 #endif 1999 2000 default: 2001 NL_SET_ERR_MSG(extack, "Invalid selector family"); 2002 return -EINVAL; 2003 } 2004 2005 ret = verify_policy_dir(p->dir, extack); 2006 if (ret) 2007 return ret; 2008 if (p->index && (xfrm_policy_id2dir(p->index) != p->dir)) { 2009 NL_SET_ERR_MSG(extack, "Policy index doesn't match direction"); 2010 return -EINVAL; 2011 } 2012 2013 return 0; 2014 } 2015 2016 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs) 2017 { 2018 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 2019 struct xfrm_user_sec_ctx *uctx; 2020 2021 if (!rt) 2022 return 0; 2023 2024 uctx = nla_data(rt); 2025 return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL); 2026 } 2027 2028 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, 2029 int nr) 2030 { 2031 int i; 2032 2033 xp->xfrm_nr = nr; 2034 for (i = 0; i < nr; i++, ut++) { 2035 struct xfrm_tmpl *t = &xp->xfrm_vec[i]; 2036 2037 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id)); 2038 memcpy(&t->saddr, &ut->saddr, 2039 sizeof(xfrm_address_t)); 2040 t->reqid = ut->reqid; 2041 t->mode = ut->mode; 2042 t->share = ut->share; 2043 t->optional = ut->optional; 2044 t->aalgos = ut->aalgos; 2045 t->ealgos = ut->ealgos; 2046 t->calgos = ut->calgos; 2047 /* If all masks are ~0, then we allow all algorithms. */ 2048 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos); 2049 t->encap_family = ut->family; 2050 } 2051 } 2052 2053 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family, 2054 int dir, struct netlink_ext_ack *extack) 2055 { 2056 u16 prev_family; 2057 int i; 2058 2059 if (nr > XFRM_MAX_DEPTH) { 2060 NL_SET_ERR_MSG(extack, "Template count must be <= XFRM_MAX_DEPTH (" __stringify(XFRM_MAX_DEPTH) ")"); 2061 return -EINVAL; 2062 } 2063 2064 prev_family = family; 2065 2066 for (i = 0; i < nr; i++) { 2067 /* We never validated the ut->family value, so many 2068 * applications simply leave it at zero. The check was 2069 * never made and ut->family was ignored because all 2070 * templates could be assumed to have the same family as 2071 * the policy itself. Now that we will have ipv4-in-ipv6 2072 * and ipv6-in-ipv4 tunnels, this is no longer true. 2073 */ 2074 if (!ut[i].family) 2075 ut[i].family = family; 2076 2077 switch (ut[i].mode) { 2078 case XFRM_MODE_TUNNEL: 2079 case XFRM_MODE_BEET: 2080 if (ut[i].optional && dir == XFRM_POLICY_OUT) { 2081 NL_SET_ERR_MSG(extack, "Mode in optional template not allowed in outbound policy"); 2082 return -EINVAL; 2083 } 2084 break; 2085 case XFRM_MODE_IPTFS: 2086 break; 2087 default: 2088 if (ut[i].family != prev_family) { 2089 NL_SET_ERR_MSG(extack, "Mode in template doesn't support a family change"); 2090 return -EINVAL; 2091 } 2092 break; 2093 } 2094 if (ut[i].mode >= XFRM_MODE_MAX) { 2095 NL_SET_ERR_MSG(extack, "Mode in template must be < XFRM_MODE_MAX (" __stringify(XFRM_MODE_MAX) ")"); 2096 return -EINVAL; 2097 } 2098 2099 prev_family = ut[i].family; 2100 2101 switch (ut[i].family) { 2102 case AF_INET: 2103 break; 2104 #if IS_ENABLED(CONFIG_IPV6) 2105 case AF_INET6: 2106 break; 2107 #endif 2108 default: 2109 NL_SET_ERR_MSG(extack, "Invalid family in template"); 2110 return -EINVAL; 2111 } 2112 2113 if (!xfrm_id_proto_valid(ut[i].id.proto)) { 2114 NL_SET_ERR_MSG(extack, "Invalid XFRM protocol in template"); 2115 return -EINVAL; 2116 } 2117 } 2118 2119 return 0; 2120 } 2121 2122 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs, 2123 int dir, struct netlink_ext_ack *extack) 2124 { 2125 struct nlattr *rt = attrs[XFRMA_TMPL]; 2126 2127 if (!rt) { 2128 pol->xfrm_nr = 0; 2129 } else { 2130 struct xfrm_user_tmpl *utmpl = nla_data(rt); 2131 int nr = nla_len(rt) / sizeof(*utmpl); 2132 int err; 2133 2134 err = validate_tmpl(nr, utmpl, pol->family, dir, extack); 2135 if (err) 2136 return err; 2137 2138 copy_templates(pol, utmpl, nr); 2139 } 2140 return 0; 2141 } 2142 2143 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs, 2144 struct netlink_ext_ack *extack) 2145 { 2146 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE]; 2147 struct xfrm_userpolicy_type *upt; 2148 u8 type = XFRM_POLICY_TYPE_MAIN; 2149 int err; 2150 2151 if (rt) { 2152 upt = nla_data(rt); 2153 type = upt->type; 2154 } 2155 2156 err = verify_policy_type(type, extack); 2157 if (err) 2158 return err; 2159 2160 *tp = type; 2161 return 0; 2162 } 2163 2164 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p) 2165 { 2166 xp->priority = p->priority; 2167 xp->index = p->index; 2168 memcpy(&xp->selector, &p->sel, sizeof(xp->selector)); 2169 memcpy(&xp->lft, &p->lft, sizeof(xp->lft)); 2170 xp->action = p->action; 2171 xp->flags = p->flags; 2172 xp->family = p->sel.family; 2173 /* XXX xp->share = p->share; */ 2174 } 2175 2176 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) 2177 { 2178 memset(p, 0, sizeof(*p)); 2179 memcpy(&p->sel, &xp->selector, sizeof(p->sel)); 2180 memcpy(&p->lft, &xp->lft, sizeof(p->lft)); 2181 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); 2182 p->priority = xp->priority; 2183 p->index = xp->index; 2184 p->sel.family = xp->family; 2185 p->dir = dir; 2186 p->action = xp->action; 2187 p->flags = xp->flags; 2188 p->share = XFRM_SHARE_ANY; /* XXX xp->share */ 2189 } 2190 2191 static struct xfrm_policy *xfrm_policy_construct(struct net *net, 2192 struct xfrm_userpolicy_info *p, 2193 struct nlattr **attrs, 2194 int *errp, 2195 struct netlink_ext_ack *extack) 2196 { 2197 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL); 2198 int err; 2199 2200 if (!xp) { 2201 *errp = -ENOMEM; 2202 return NULL; 2203 } 2204 2205 copy_from_user_policy(xp, p); 2206 2207 err = copy_from_user_policy_type(&xp->type, attrs, extack); 2208 if (err) 2209 goto error; 2210 2211 if (!(err = copy_from_user_tmpl(xp, attrs, p->dir, extack))) 2212 err = copy_from_user_sec_ctx(xp, attrs); 2213 if (err) 2214 goto error; 2215 2216 xfrm_mark_get(attrs, &xp->mark); 2217 2218 if (attrs[XFRMA_IF_ID]) 2219 xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 2220 2221 /* configure the hardware if offload is requested */ 2222 if (attrs[XFRMA_OFFLOAD_DEV]) { 2223 err = xfrm_dev_policy_add(net, xp, 2224 nla_data(attrs[XFRMA_OFFLOAD_DEV]), 2225 p->dir, extack); 2226 if (err) 2227 goto error; 2228 } 2229 2230 return xp; 2231 error: 2232 *errp = err; 2233 xp->walk.dead = 1; 2234 xfrm_policy_destroy(xp); 2235 return NULL; 2236 } 2237 2238 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 2239 struct nlattr **attrs, 2240 struct netlink_ext_ack *extack) 2241 { 2242 struct net *net = sock_net(skb->sk); 2243 struct xfrm_userpolicy_info *p = nlmsg_data(nlh); 2244 struct xfrm_policy *xp; 2245 struct km_event c; 2246 int err; 2247 int excl; 2248 2249 err = verify_newpolicy_info(p, extack); 2250 if (err) 2251 return err; 2252 err = verify_sec_ctx_len(attrs, extack); 2253 if (err) 2254 return err; 2255 2256 xp = xfrm_policy_construct(net, p, attrs, &err, extack); 2257 if (!xp) 2258 return err; 2259 2260 /* shouldn't excl be based on nlh flags?? 2261 * Aha! this is anti-netlink really i.e more pfkey derived 2262 * in netlink excl is a flag and you wouldn't need 2263 * a type XFRM_MSG_UPDPOLICY - JHS */ 2264 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; 2265 err = xfrm_policy_insert(p->dir, xp, excl); 2266 xfrm_audit_policy_add(xp, err ? 0 : 1, true); 2267 2268 if (err) { 2269 xfrm_dev_policy_delete(xp); 2270 xfrm_dev_policy_free(xp); 2271 security_xfrm_policy_free(xp->security); 2272 kfree(xp); 2273 return err; 2274 } 2275 2276 c.event = nlh->nlmsg_type; 2277 c.seq = nlh->nlmsg_seq; 2278 c.portid = nlh->nlmsg_pid; 2279 km_policy_notify(xp, p->dir, &c); 2280 2281 xfrm_pol_put(xp); 2282 2283 return 0; 2284 } 2285 2286 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) 2287 { 2288 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH]; 2289 int i; 2290 2291 if (xp->xfrm_nr == 0) 2292 return 0; 2293 2294 if (xp->xfrm_nr > XFRM_MAX_DEPTH) 2295 return -ENOBUFS; 2296 2297 for (i = 0; i < xp->xfrm_nr; i++) { 2298 struct xfrm_user_tmpl *up = &vec[i]; 2299 struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; 2300 2301 memset(up, 0, sizeof(*up)); 2302 memcpy(&up->id, &kp->id, sizeof(up->id)); 2303 up->family = kp->encap_family; 2304 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); 2305 up->reqid = kp->reqid; 2306 up->mode = kp->mode; 2307 up->share = kp->share; 2308 up->optional = kp->optional; 2309 up->aalgos = kp->aalgos; 2310 up->ealgos = kp->ealgos; 2311 up->calgos = kp->calgos; 2312 } 2313 2314 return nla_put(skb, XFRMA_TMPL, 2315 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec); 2316 } 2317 2318 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb) 2319 { 2320 if (x->security) { 2321 return copy_sec_ctx(x->security, skb); 2322 } 2323 return 0; 2324 } 2325 2326 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb) 2327 { 2328 if (xp->security) 2329 return copy_sec_ctx(xp->security, skb); 2330 return 0; 2331 } 2332 static inline unsigned int userpolicy_type_attrsize(void) 2333 { 2334 #ifdef CONFIG_XFRM_SUB_POLICY 2335 return nla_total_size(sizeof(struct xfrm_userpolicy_type)); 2336 #else 2337 return 0; 2338 #endif 2339 } 2340 2341 #ifdef CONFIG_XFRM_SUB_POLICY 2342 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 2343 { 2344 struct xfrm_userpolicy_type upt; 2345 2346 /* Sadly there are two holes in struct xfrm_userpolicy_type */ 2347 memset(&upt, 0, sizeof(upt)); 2348 upt.type = type; 2349 2350 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); 2351 } 2352 2353 #else 2354 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 2355 { 2356 return 0; 2357 } 2358 #endif 2359 2360 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr) 2361 { 2362 struct xfrm_dump_info *sp = ptr; 2363 struct xfrm_userpolicy_info *p; 2364 struct sk_buff *in_skb = sp->in_skb; 2365 struct sk_buff *skb = sp->out_skb; 2366 struct xfrm_translator *xtr; 2367 struct nlmsghdr *nlh; 2368 int err; 2369 2370 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq, 2371 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); 2372 if (nlh == NULL) 2373 return -EMSGSIZE; 2374 2375 p = nlmsg_data(nlh); 2376 copy_to_user_policy(xp, p, dir); 2377 err = copy_to_user_tmpl(xp, skb); 2378 if (!err) 2379 err = copy_to_user_sec_ctx(xp, skb); 2380 if (!err) 2381 err = copy_to_user_policy_type(xp->type, skb); 2382 if (!err) 2383 err = xfrm_mark_put(skb, &xp->mark); 2384 if (!err) 2385 err = xfrm_if_id_put(skb, xp->if_id); 2386 if (!err && xp->xdo.dev) 2387 err = copy_user_offload(&xp->xdo, skb); 2388 if (err) { 2389 nlmsg_cancel(skb, nlh); 2390 return err; 2391 } 2392 nlmsg_end(skb, nlh); 2393 2394 xtr = xfrm_get_translator(); 2395 if (xtr) { 2396 err = xtr->alloc_compat(skb, nlh); 2397 2398 xfrm_put_translator(xtr); 2399 if (err) { 2400 nlmsg_cancel(skb, nlh); 2401 return err; 2402 } 2403 } 2404 2405 return 0; 2406 } 2407 2408 static int xfrm_dump_policy_done(struct netlink_callback *cb) 2409 { 2410 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; 2411 struct net *net = sock_net(cb->skb->sk); 2412 2413 xfrm_policy_walk_done(walk, net); 2414 return 0; 2415 } 2416 2417 static int xfrm_dump_policy_start(struct netlink_callback *cb) 2418 { 2419 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; 2420 2421 BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args)); 2422 2423 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); 2424 return 0; 2425 } 2426 2427 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) 2428 { 2429 struct net *net = sock_net(skb->sk); 2430 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; 2431 struct xfrm_dump_info info; 2432 2433 info.in_skb = cb->skb; 2434 info.out_skb = skb; 2435 info.nlmsg_seq = cb->nlh->nlmsg_seq; 2436 info.nlmsg_flags = NLM_F_MULTI; 2437 2438 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info); 2439 2440 return skb->len; 2441 } 2442 2443 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, 2444 struct xfrm_policy *xp, 2445 int dir, u32 seq) 2446 { 2447 struct xfrm_dump_info info; 2448 struct sk_buff *skb; 2449 int err; 2450 2451 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2452 if (!skb) 2453 return ERR_PTR(-ENOMEM); 2454 2455 info.in_skb = in_skb; 2456 info.out_skb = skb; 2457 info.nlmsg_seq = seq; 2458 info.nlmsg_flags = 0; 2459 2460 err = dump_one_policy(xp, dir, 0, &info); 2461 if (err) { 2462 kfree_skb(skb); 2463 return ERR_PTR(err); 2464 } 2465 2466 return skb; 2467 } 2468 2469 static int xfrm_notify_userpolicy(struct net *net) 2470 { 2471 struct xfrm_userpolicy_default *up; 2472 int len = NLMSG_ALIGN(sizeof(*up)); 2473 struct nlmsghdr *nlh; 2474 struct sk_buff *skb; 2475 int err; 2476 2477 skb = nlmsg_new(len, GFP_ATOMIC); 2478 if (skb == NULL) 2479 return -ENOMEM; 2480 2481 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_GETDEFAULT, sizeof(*up), 0); 2482 if (nlh == NULL) { 2483 kfree_skb(skb); 2484 return -EMSGSIZE; 2485 } 2486 2487 up = nlmsg_data(nlh); 2488 up->in = net->xfrm.policy_default[XFRM_POLICY_IN]; 2489 up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD]; 2490 up->out = net->xfrm.policy_default[XFRM_POLICY_OUT]; 2491 2492 nlmsg_end(skb, nlh); 2493 2494 rcu_read_lock(); 2495 err = xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY); 2496 rcu_read_unlock(); 2497 2498 return err; 2499 } 2500 2501 static bool xfrm_userpolicy_is_valid(__u8 policy) 2502 { 2503 return policy == XFRM_USERPOLICY_BLOCK || 2504 policy == XFRM_USERPOLICY_ACCEPT; 2505 } 2506 2507 static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh, 2508 struct nlattr **attrs, struct netlink_ext_ack *extack) 2509 { 2510 struct net *net = sock_net(skb->sk); 2511 struct xfrm_userpolicy_default *up = nlmsg_data(nlh); 2512 2513 if (xfrm_userpolicy_is_valid(up->in)) 2514 net->xfrm.policy_default[XFRM_POLICY_IN] = up->in; 2515 2516 if (xfrm_userpolicy_is_valid(up->fwd)) 2517 net->xfrm.policy_default[XFRM_POLICY_FWD] = up->fwd; 2518 2519 if (xfrm_userpolicy_is_valid(up->out)) 2520 net->xfrm.policy_default[XFRM_POLICY_OUT] = up->out; 2521 2522 rt_genid_bump_all(net); 2523 2524 xfrm_notify_userpolicy(net); 2525 return 0; 2526 } 2527 2528 static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh, 2529 struct nlattr **attrs, struct netlink_ext_ack *extack) 2530 { 2531 struct sk_buff *r_skb; 2532 struct nlmsghdr *r_nlh; 2533 struct net *net = sock_net(skb->sk); 2534 struct xfrm_userpolicy_default *r_up; 2535 int len = NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_default)); 2536 u32 portid = NETLINK_CB(skb).portid; 2537 u32 seq = nlh->nlmsg_seq; 2538 2539 r_skb = nlmsg_new(len, GFP_ATOMIC); 2540 if (!r_skb) 2541 return -ENOMEM; 2542 2543 r_nlh = nlmsg_put(r_skb, portid, seq, XFRM_MSG_GETDEFAULT, sizeof(*r_up), 0); 2544 if (!r_nlh) { 2545 kfree_skb(r_skb); 2546 return -EMSGSIZE; 2547 } 2548 2549 r_up = nlmsg_data(r_nlh); 2550 r_up->in = net->xfrm.policy_default[XFRM_POLICY_IN]; 2551 r_up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD]; 2552 r_up->out = net->xfrm.policy_default[XFRM_POLICY_OUT]; 2553 nlmsg_end(r_skb, r_nlh); 2554 2555 return nlmsg_unicast(xfrm_net_nlsk(net, skb), r_skb, portid); 2556 } 2557 2558 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 2559 struct nlattr **attrs, 2560 struct netlink_ext_ack *extack) 2561 { 2562 struct net *net = sock_net(skb->sk); 2563 struct xfrm_policy *xp; 2564 struct xfrm_userpolicy_id *p; 2565 u8 type = XFRM_POLICY_TYPE_MAIN; 2566 int err; 2567 struct km_event c; 2568 int delete; 2569 struct xfrm_mark m; 2570 u32 if_id = 0; 2571 2572 p = nlmsg_data(nlh); 2573 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY; 2574 2575 err = copy_from_user_policy_type(&type, attrs, extack); 2576 if (err) 2577 return err; 2578 2579 err = verify_policy_dir(p->dir, extack); 2580 if (err) 2581 return err; 2582 2583 if (attrs[XFRMA_IF_ID]) 2584 if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 2585 2586 xfrm_mark_get(attrs, &m); 2587 2588 if (p->index) 2589 xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, 2590 p->index, delete, &err); 2591 else { 2592 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 2593 struct xfrm_sec_ctx *ctx; 2594 2595 err = verify_sec_ctx_len(attrs, extack); 2596 if (err) 2597 return err; 2598 2599 ctx = NULL; 2600 if (rt) { 2601 struct xfrm_user_sec_ctx *uctx = nla_data(rt); 2602 2603 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL); 2604 if (err) 2605 return err; 2606 } 2607 xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, 2608 &p->sel, ctx, delete, &err); 2609 security_xfrm_policy_free(ctx); 2610 } 2611 if (xp == NULL) 2612 return -ENOENT; 2613 2614 if (!delete) { 2615 struct sk_buff *resp_skb; 2616 2617 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq); 2618 if (IS_ERR(resp_skb)) { 2619 err = PTR_ERR(resp_skb); 2620 } else { 2621 err = nlmsg_unicast(xfrm_net_nlsk(net, skb), resp_skb, 2622 NETLINK_CB(skb).portid); 2623 } 2624 } else { 2625 xfrm_audit_policy_delete(xp, err ? 0 : 1, true); 2626 2627 if (err != 0) 2628 goto out; 2629 2630 c.data.byid = p->index; 2631 c.event = nlh->nlmsg_type; 2632 c.seq = nlh->nlmsg_seq; 2633 c.portid = nlh->nlmsg_pid; 2634 km_policy_notify(xp, p->dir, &c); 2635 } 2636 2637 out: 2638 xfrm_pol_put(xp); 2639 return err; 2640 } 2641 2642 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 2643 struct nlattr **attrs, 2644 struct netlink_ext_ack *extack) 2645 { 2646 struct net *net = sock_net(skb->sk); 2647 struct km_event c; 2648 struct xfrm_usersa_flush *p = nlmsg_data(nlh); 2649 int err; 2650 2651 err = xfrm_state_flush(net, p->proto, true); 2652 if (err) { 2653 if (err == -ESRCH) /* empty table */ 2654 return 0; 2655 return err; 2656 } 2657 c.data.proto = p->proto; 2658 c.event = nlh->nlmsg_type; 2659 c.seq = nlh->nlmsg_seq; 2660 c.portid = nlh->nlmsg_pid; 2661 c.net = net; 2662 km_state_notify(NULL, &c); 2663 2664 return 0; 2665 } 2666 2667 static inline unsigned int xfrm_aevent_msgsize(struct xfrm_state *x) 2668 { 2669 unsigned int replay_size = x->replay_esn ? 2670 xfrm_replay_state_esn_len(x->replay_esn) : 2671 sizeof(struct xfrm_replay_state); 2672 2673 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id)) 2674 + nla_total_size(replay_size) 2675 + nla_total_size_64bit(sizeof(struct xfrm_lifetime_cur)) 2676 + nla_total_size(sizeof(struct xfrm_mark)) 2677 + nla_total_size(4) /* XFRM_AE_RTHR */ 2678 + nla_total_size(4) /* XFRM_AE_ETHR */ 2679 + nla_total_size(sizeof(x->dir)) /* XFRMA_SA_DIR */ 2680 + nla_total_size(4); /* XFRMA_SA_PCPU */ 2681 } 2682 2683 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) 2684 { 2685 struct xfrm_aevent_id *id; 2686 struct nlmsghdr *nlh; 2687 int err; 2688 2689 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); 2690 if (nlh == NULL) 2691 return -EMSGSIZE; 2692 2693 id = nlmsg_data(nlh); 2694 memset(&id->sa_id, 0, sizeof(id->sa_id)); 2695 memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr)); 2696 id->sa_id.spi = x->id.spi; 2697 id->sa_id.family = x->props.family; 2698 id->sa_id.proto = x->id.proto; 2699 memcpy(&id->saddr, &x->props.saddr, sizeof(x->props.saddr)); 2700 id->reqid = x->props.reqid; 2701 id->flags = c->data.aevent; 2702 2703 if (x->replay_esn) { 2704 err = nla_put(skb, XFRMA_REPLAY_ESN_VAL, 2705 xfrm_replay_state_esn_len(x->replay_esn), 2706 x->replay_esn); 2707 } else { 2708 err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), 2709 &x->replay); 2710 } 2711 if (err) 2712 goto out_cancel; 2713 err = nla_put_64bit(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft, 2714 XFRMA_PAD); 2715 if (err) 2716 goto out_cancel; 2717 2718 if (id->flags & XFRM_AE_RTHR) { 2719 err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff); 2720 if (err) 2721 goto out_cancel; 2722 } 2723 if (id->flags & XFRM_AE_ETHR) { 2724 err = nla_put_u32(skb, XFRMA_ETIMER_THRESH, 2725 x->replay_maxage * 10 / HZ); 2726 if (err) 2727 goto out_cancel; 2728 } 2729 err = xfrm_mark_put(skb, &x->mark); 2730 if (err) 2731 goto out_cancel; 2732 2733 err = xfrm_if_id_put(skb, x->if_id); 2734 if (err) 2735 goto out_cancel; 2736 if (x->pcpu_num != UINT_MAX) { 2737 err = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num); 2738 if (err) 2739 goto out_cancel; 2740 } 2741 2742 if (x->dir) { 2743 err = nla_put_u8(skb, XFRMA_SA_DIR, x->dir); 2744 if (err) 2745 goto out_cancel; 2746 } 2747 2748 nlmsg_end(skb, nlh); 2749 return 0; 2750 2751 out_cancel: 2752 nlmsg_cancel(skb, nlh); 2753 return err; 2754 } 2755 2756 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 2757 struct nlattr **attrs, struct netlink_ext_ack *extack) 2758 { 2759 struct net *net = sock_net(skb->sk); 2760 struct xfrm_state *x; 2761 struct sk_buff *r_skb; 2762 int err; 2763 struct km_event c; 2764 u32 mark; 2765 struct xfrm_mark m; 2766 struct xfrm_aevent_id *p = nlmsg_data(nlh); 2767 struct xfrm_usersa_id *id = &p->sa_id; 2768 2769 mark = xfrm_mark_get(attrs, &m); 2770 2771 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family); 2772 if (x == NULL) 2773 return -ESRCH; 2774 2775 r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC); 2776 if (r_skb == NULL) { 2777 xfrm_state_put(x); 2778 return -ENOMEM; 2779 } 2780 2781 /* 2782 * XXX: is this lock really needed - none of the other 2783 * gets lock (the concern is things getting updated 2784 * while we are still reading) - jhs 2785 */ 2786 spin_lock_bh(&x->lock); 2787 c.data.aevent = p->flags; 2788 c.seq = nlh->nlmsg_seq; 2789 c.portid = nlh->nlmsg_pid; 2790 2791 err = build_aevent(r_skb, x, &c); 2792 BUG_ON(err < 0); 2793 2794 err = nlmsg_unicast(xfrm_net_nlsk(net, skb), r_skb, NETLINK_CB(skb).portid); 2795 spin_unlock_bh(&x->lock); 2796 xfrm_state_put(x); 2797 return err; 2798 } 2799 2800 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 2801 struct nlattr **attrs, struct netlink_ext_ack *extack) 2802 { 2803 struct net *net = sock_net(skb->sk); 2804 struct xfrm_state *x; 2805 struct km_event c; 2806 int err = -EINVAL; 2807 u32 mark = 0; 2808 struct xfrm_mark m; 2809 struct xfrm_aevent_id *p = nlmsg_data(nlh); 2810 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; 2811 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; 2812 struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; 2813 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; 2814 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; 2815 2816 if (!lt && !rp && !re && !et && !rt) { 2817 NL_SET_ERR_MSG(extack, "Missing required attribute for AE"); 2818 return err; 2819 } 2820 2821 /* pedantic mode - thou shalt sayeth replaceth */ 2822 if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) { 2823 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE flag is required"); 2824 return err; 2825 } 2826 2827 mark = xfrm_mark_get(attrs, &m); 2828 2829 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family); 2830 if (x == NULL) 2831 return -ESRCH; 2832 2833 if (x->km.state != XFRM_STATE_VALID) { 2834 NL_SET_ERR_MSG(extack, "SA must be in VALID state"); 2835 goto out; 2836 } 2837 2838 err = xfrm_replay_verify_len(x->replay_esn, re, extack); 2839 if (err) 2840 goto out; 2841 2842 spin_lock_bh(&x->lock); 2843 xfrm_update_ae_params(x, attrs, 1); 2844 spin_unlock_bh(&x->lock); 2845 2846 c.event = nlh->nlmsg_type; 2847 c.seq = nlh->nlmsg_seq; 2848 c.portid = nlh->nlmsg_pid; 2849 c.data.aevent = XFRM_AE_CU; 2850 km_state_notify(x, &c); 2851 err = 0; 2852 out: 2853 xfrm_state_put(x); 2854 return err; 2855 } 2856 2857 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 2858 struct nlattr **attrs, 2859 struct netlink_ext_ack *extack) 2860 { 2861 struct net *net = sock_net(skb->sk); 2862 struct km_event c; 2863 u8 type = XFRM_POLICY_TYPE_MAIN; 2864 int err; 2865 2866 err = copy_from_user_policy_type(&type, attrs, extack); 2867 if (err) 2868 return err; 2869 2870 err = xfrm_policy_flush(net, type, true); 2871 if (err) { 2872 if (err == -ESRCH) /* empty table */ 2873 return 0; 2874 return err; 2875 } 2876 2877 c.data.type = type; 2878 c.event = nlh->nlmsg_type; 2879 c.seq = nlh->nlmsg_seq; 2880 c.portid = nlh->nlmsg_pid; 2881 c.net = net; 2882 km_policy_notify(NULL, 0, &c); 2883 return 0; 2884 } 2885 2886 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, 2887 struct nlattr **attrs, 2888 struct netlink_ext_ack *extack) 2889 { 2890 struct net *net = sock_net(skb->sk); 2891 struct xfrm_policy *xp; 2892 struct xfrm_user_polexpire *up = nlmsg_data(nlh); 2893 struct xfrm_userpolicy_info *p = &up->pol; 2894 u8 type = XFRM_POLICY_TYPE_MAIN; 2895 int err = -ENOENT; 2896 struct xfrm_mark m; 2897 u32 if_id = 0; 2898 2899 err = copy_from_user_policy_type(&type, attrs, extack); 2900 if (err) 2901 return err; 2902 2903 err = verify_policy_dir(p->dir, extack); 2904 if (err) 2905 return err; 2906 2907 if (attrs[XFRMA_IF_ID]) 2908 if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 2909 2910 xfrm_mark_get(attrs, &m); 2911 2912 if (p->index) 2913 xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index, 2914 0, &err); 2915 else { 2916 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 2917 struct xfrm_sec_ctx *ctx; 2918 2919 err = verify_sec_ctx_len(attrs, extack); 2920 if (err) 2921 return err; 2922 2923 ctx = NULL; 2924 if (rt) { 2925 struct xfrm_user_sec_ctx *uctx = nla_data(rt); 2926 2927 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL); 2928 if (err) 2929 return err; 2930 } 2931 xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, 2932 &p->sel, ctx, 0, &err); 2933 security_xfrm_policy_free(ctx); 2934 } 2935 if (xp == NULL) 2936 return -ENOENT; 2937 2938 if (unlikely(xp->walk.dead)) 2939 goto out; 2940 2941 err = 0; 2942 if (up->hard) { 2943 xfrm_policy_delete(xp, p->dir); 2944 xfrm_audit_policy_delete(xp, 1, true); 2945 } 2946 km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid); 2947 2948 out: 2949 xfrm_pol_put(xp); 2950 return err; 2951 } 2952 2953 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, 2954 struct nlattr **attrs, 2955 struct netlink_ext_ack *extack) 2956 { 2957 struct net *net = sock_net(skb->sk); 2958 struct xfrm_state *x; 2959 int err; 2960 struct xfrm_user_expire *ue = nlmsg_data(nlh); 2961 struct xfrm_usersa_info *p = &ue->state; 2962 struct xfrm_mark m; 2963 u32 mark = xfrm_mark_get(attrs, &m); 2964 2965 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family); 2966 2967 err = -ENOENT; 2968 if (x == NULL) 2969 return err; 2970 2971 spin_lock_bh(&x->lock); 2972 err = -EINVAL; 2973 if (x->km.state != XFRM_STATE_VALID) { 2974 NL_SET_ERR_MSG(extack, "SA must be in VALID state"); 2975 goto out; 2976 } 2977 2978 km_state_expired(x, ue->hard, nlh->nlmsg_pid); 2979 2980 if (ue->hard) { 2981 __xfrm_state_delete(x); 2982 xfrm_audit_state_delete(x, 1, true); 2983 } 2984 err = 0; 2985 out: 2986 spin_unlock_bh(&x->lock); 2987 xfrm_state_put(x); 2988 return err; 2989 } 2990 2991 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, 2992 struct nlattr **attrs, 2993 struct netlink_ext_ack *extack) 2994 { 2995 struct net *net = sock_net(skb->sk); 2996 struct xfrm_policy *xp; 2997 struct xfrm_user_tmpl *ut; 2998 int i; 2999 struct nlattr *rt = attrs[XFRMA_TMPL]; 3000 struct xfrm_mark mark; 3001 3002 struct xfrm_user_acquire *ua = nlmsg_data(nlh); 3003 struct xfrm_state *x = xfrm_state_alloc(net); 3004 int err = -ENOMEM; 3005 3006 if (!x) 3007 goto nomem; 3008 3009 xfrm_mark_get(attrs, &mark); 3010 3011 if (attrs[XFRMA_SA_PCPU]) { 3012 x->pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]); 3013 err = -EINVAL; 3014 if (x->pcpu_num >= num_possible_cpus()) { 3015 NL_SET_ERR_MSG(extack, "pCPU number too big"); 3016 goto free_state; 3017 } 3018 } 3019 3020 err = verify_newpolicy_info(&ua->policy, extack); 3021 if (err) 3022 goto free_state; 3023 err = verify_sec_ctx_len(attrs, extack); 3024 if (err) 3025 goto free_state; 3026 3027 /* build an XP */ 3028 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err, extack); 3029 if (!xp) 3030 goto free_state; 3031 3032 memcpy(&x->id, &ua->id, sizeof(ua->id)); 3033 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr)); 3034 memcpy(&x->sel, &ua->sel, sizeof(ua->sel)); 3035 xp->mark.m = x->mark.m = mark.m; 3036 xp->mark.v = x->mark.v = mark.v; 3037 ut = nla_data(rt); 3038 /* extract the templates and for each call km_key */ 3039 for (i = 0; i < xp->xfrm_nr; i++, ut++) { 3040 struct xfrm_tmpl *t = &xp->xfrm_vec[i]; 3041 memcpy(&x->id, &t->id, sizeof(x->id)); 3042 x->props.mode = t->mode; 3043 x->props.reqid = t->reqid; 3044 x->props.family = ut->family; 3045 t->aalgos = ua->aalgos; 3046 t->ealgos = ua->ealgos; 3047 t->calgos = ua->calgos; 3048 err = km_query(x, t, xp); 3049 3050 } 3051 3052 xfrm_state_free(x); 3053 xfrm_dev_policy_delete(xp); 3054 xfrm_dev_policy_free(xp); 3055 security_xfrm_policy_free(xp->security); 3056 kfree(xp); 3057 3058 return 0; 3059 3060 free_state: 3061 xfrm_state_free(x); 3062 nomem: 3063 return err; 3064 } 3065 3066 #ifdef CONFIG_XFRM_MIGRATE 3067 static int copy_from_user_migrate(struct xfrm_migrate *ma, 3068 struct xfrm_kmaddress *k, 3069 struct nlattr **attrs, int *num, 3070 struct netlink_ext_ack *extack) 3071 { 3072 struct nlattr *rt = attrs[XFRMA_MIGRATE]; 3073 struct xfrm_user_migrate *um; 3074 int i, num_migrate; 3075 3076 if (k != NULL) { 3077 struct xfrm_user_kmaddress *uk; 3078 3079 uk = nla_data(attrs[XFRMA_KMADDRESS]); 3080 memcpy(&k->local, &uk->local, sizeof(k->local)); 3081 memcpy(&k->remote, &uk->remote, sizeof(k->remote)); 3082 k->family = uk->family; 3083 k->reserved = uk->reserved; 3084 } 3085 3086 um = nla_data(rt); 3087 num_migrate = nla_len(rt) / sizeof(*um); 3088 3089 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH) { 3090 NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)"); 3091 return -EINVAL; 3092 } 3093 3094 for (i = 0; i < num_migrate; i++, um++, ma++) { 3095 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr)); 3096 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr)); 3097 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr)); 3098 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr)); 3099 3100 ma->proto = um->proto; 3101 ma->mode = um->mode; 3102 ma->reqid = um->reqid; 3103 3104 ma->old_family = um->old_family; 3105 ma->new_family = um->new_family; 3106 } 3107 3108 *num = i; 3109 return 0; 3110 } 3111 3112 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, 3113 struct nlattr **attrs, struct netlink_ext_ack *extack) 3114 { 3115 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh); 3116 struct xfrm_migrate m[XFRM_MAX_DEPTH]; 3117 struct xfrm_kmaddress km, *kmp; 3118 u8 type; 3119 int err; 3120 int n = 0; 3121 struct net *net = sock_net(skb->sk); 3122 struct xfrm_encap_tmpl *encap = NULL; 3123 struct xfrm_user_offload *xuo = NULL; 3124 u32 if_id = 0; 3125 3126 if (!attrs[XFRMA_MIGRATE]) { 3127 NL_SET_ERR_MSG(extack, "Missing required MIGRATE attribute"); 3128 return -EINVAL; 3129 } 3130 3131 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL; 3132 3133 err = copy_from_user_policy_type(&type, attrs, extack); 3134 if (err) 3135 return err; 3136 3137 err = copy_from_user_migrate(m, kmp, attrs, &n, extack); 3138 if (err) 3139 return err; 3140 3141 if (!n) 3142 return 0; 3143 3144 if (attrs[XFRMA_ENCAP]) { 3145 encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), 3146 sizeof(*encap), GFP_KERNEL); 3147 if (!encap) 3148 return -ENOMEM; 3149 } 3150 3151 if (attrs[XFRMA_IF_ID]) 3152 if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 3153 3154 if (attrs[XFRMA_OFFLOAD_DEV]) { 3155 xuo = kmemdup(nla_data(attrs[XFRMA_OFFLOAD_DEV]), 3156 sizeof(*xuo), GFP_KERNEL); 3157 if (!xuo) { 3158 err = -ENOMEM; 3159 goto error; 3160 } 3161 } 3162 err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap, 3163 if_id, extack, xuo); 3164 error: 3165 kfree(encap); 3166 kfree(xuo); 3167 return err; 3168 } 3169 #else 3170 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, 3171 struct nlattr **attrs, struct netlink_ext_ack *extack) 3172 { 3173 return -ENOPROTOOPT; 3174 } 3175 #endif 3176 3177 #ifdef CONFIG_XFRM_MIGRATE 3178 static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb) 3179 { 3180 struct xfrm_user_migrate um; 3181 3182 memset(&um, 0, sizeof(um)); 3183 um.proto = m->proto; 3184 um.mode = m->mode; 3185 um.reqid = m->reqid; 3186 um.old_family = m->old_family; 3187 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr)); 3188 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr)); 3189 um.new_family = m->new_family; 3190 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr)); 3191 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr)); 3192 3193 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um); 3194 } 3195 3196 static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb) 3197 { 3198 struct xfrm_user_kmaddress uk; 3199 3200 memset(&uk, 0, sizeof(uk)); 3201 uk.family = k->family; 3202 uk.reserved = k->reserved; 3203 memcpy(&uk.local, &k->local, sizeof(uk.local)); 3204 memcpy(&uk.remote, &k->remote, sizeof(uk.remote)); 3205 3206 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk); 3207 } 3208 3209 static inline unsigned int xfrm_migrate_msgsize(int num_migrate, int with_kma, 3210 int with_encp) 3211 { 3212 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id)) 3213 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0) 3214 + (with_encp ? nla_total_size(sizeof(struct xfrm_encap_tmpl)) : 0) 3215 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate) 3216 + userpolicy_type_attrsize(); 3217 } 3218 3219 static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m, 3220 int num_migrate, const struct xfrm_kmaddress *k, 3221 const struct xfrm_selector *sel, 3222 const struct xfrm_encap_tmpl *encap, u8 dir, u8 type) 3223 { 3224 const struct xfrm_migrate *mp; 3225 struct xfrm_userpolicy_id *pol_id; 3226 struct nlmsghdr *nlh; 3227 int i, err; 3228 3229 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0); 3230 if (nlh == NULL) 3231 return -EMSGSIZE; 3232 3233 pol_id = nlmsg_data(nlh); 3234 /* copy data from selector, dir, and type to the pol_id */ 3235 memset(pol_id, 0, sizeof(*pol_id)); 3236 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel)); 3237 pol_id->dir = dir; 3238 3239 if (k != NULL) { 3240 err = copy_to_user_kmaddress(k, skb); 3241 if (err) 3242 goto out_cancel; 3243 } 3244 if (encap) { 3245 err = nla_put(skb, XFRMA_ENCAP, sizeof(*encap), encap); 3246 if (err) 3247 goto out_cancel; 3248 } 3249 err = copy_to_user_policy_type(type, skb); 3250 if (err) 3251 goto out_cancel; 3252 for (i = 0, mp = m ; i < num_migrate; i++, mp++) { 3253 err = copy_to_user_migrate(mp, skb); 3254 if (err) 3255 goto out_cancel; 3256 } 3257 3258 nlmsg_end(skb, nlh); 3259 return 0; 3260 3261 out_cancel: 3262 nlmsg_cancel(skb, nlh); 3263 return err; 3264 } 3265 3266 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 3267 const struct xfrm_migrate *m, int num_migrate, 3268 const struct xfrm_kmaddress *k, 3269 const struct xfrm_encap_tmpl *encap) 3270 { 3271 struct net *net = &init_net; 3272 struct sk_buff *skb; 3273 int err; 3274 3275 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k, !!encap), 3276 GFP_ATOMIC); 3277 if (skb == NULL) 3278 return -ENOMEM; 3279 3280 /* build migrate */ 3281 err = build_migrate(skb, m, num_migrate, k, sel, encap, dir, type); 3282 BUG_ON(err < 0); 3283 3284 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MIGRATE); 3285 } 3286 #else 3287 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 3288 const struct xfrm_migrate *m, int num_migrate, 3289 const struct xfrm_kmaddress *k, 3290 const struct xfrm_encap_tmpl *encap) 3291 { 3292 return -ENOPROTOOPT; 3293 } 3294 #endif 3295 3296 #define XMSGSIZE(type) sizeof(struct type) 3297 3298 const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { 3299 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 3300 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 3301 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 3302 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 3303 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 3304 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 3305 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info), 3306 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire), 3307 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire), 3308 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 3309 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 3310 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire), 3311 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), 3312 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0, 3313 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 3314 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 3315 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report), 3316 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 3317 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32), 3318 [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32), 3319 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32), 3320 [XFRM_MSG_SETDEFAULT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default), 3321 [XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default), 3322 }; 3323 EXPORT_SYMBOL_GPL(xfrm_msg_min); 3324 3325 #undef XMSGSIZE 3326 3327 const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { 3328 [XFRMA_UNSPEC] = { .strict_start_type = XFRMA_SA_DIR }, 3329 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)}, 3330 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)}, 3331 [XFRMA_LASTUSED] = { .type = NLA_U64}, 3332 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)}, 3333 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) }, 3334 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) }, 3335 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) }, 3336 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, 3337 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, 3338 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, 3339 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) }, 3340 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, 3341 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, 3342 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, 3343 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 }, 3344 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) }, 3345 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) }, 3346 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)}, 3347 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) }, 3348 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) }, 3349 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) }, 3350 [XFRMA_TFCPAD] = { .type = NLA_U32 }, 3351 [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) }, 3352 [XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 }, 3353 [XFRMA_PROTO] = { .type = NLA_U8 }, 3354 [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) }, 3355 [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) }, 3356 [XFRMA_SET_MARK] = { .type = NLA_U32 }, 3357 [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 }, 3358 [XFRMA_IF_ID] = { .type = NLA_U32 }, 3359 [XFRMA_MTIMER_THRESH] = { .type = NLA_U32 }, 3360 [XFRMA_SA_DIR] = NLA_POLICY_RANGE(NLA_U8, XFRM_SA_DIR_IN, XFRM_SA_DIR_OUT), 3361 [XFRMA_NAT_KEEPALIVE_INTERVAL] = { .type = NLA_U32 }, 3362 [XFRMA_SA_PCPU] = { .type = NLA_U32 }, 3363 [XFRMA_IPTFS_DROP_TIME] = { .type = NLA_U32 }, 3364 [XFRMA_IPTFS_REORDER_WINDOW] = { .type = NLA_U16 }, 3365 [XFRMA_IPTFS_DONT_FRAG] = { .type = NLA_FLAG }, 3366 [XFRMA_IPTFS_INIT_DELAY] = { .type = NLA_U32 }, 3367 [XFRMA_IPTFS_MAX_QSIZE] = { .type = NLA_U32 }, 3368 [XFRMA_IPTFS_PKT_SIZE] = { .type = NLA_U32 }, 3369 }; 3370 EXPORT_SYMBOL_GPL(xfrma_policy); 3371 3372 static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { 3373 [XFRMA_SPD_IPV4_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) }, 3374 [XFRMA_SPD_IPV6_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) }, 3375 }; 3376 3377 static const struct xfrm_link { 3378 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **, 3379 struct netlink_ext_ack *); 3380 int (*start)(struct netlink_callback *); 3381 int (*dump)(struct sk_buff *, struct netlink_callback *); 3382 int (*done)(struct netlink_callback *); 3383 const struct nla_policy *nla_pol; 3384 int nla_max; 3385 } xfrm_dispatch[XFRM_NR_MSGTYPES] = { 3386 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 3387 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, 3388 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa, 3389 .dump = xfrm_dump_sa, 3390 .done = xfrm_dump_sa_done }, 3391 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 3392 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, 3393 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, 3394 .start = xfrm_dump_policy_start, 3395 .dump = xfrm_dump_policy, 3396 .done = xfrm_dump_policy_done }, 3397 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, 3398 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire }, 3399 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire }, 3400 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 3401 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 3402 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire}, 3403 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa }, 3404 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy }, 3405 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae }, 3406 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae }, 3407 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate }, 3408 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo }, 3409 [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_set_spdinfo, 3410 .nla_pol = xfrma_spd_policy, 3411 .nla_max = XFRMA_SPD_MAX }, 3412 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo }, 3413 [XFRM_MSG_SETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_set_default }, 3414 [XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_get_default }, 3415 }; 3416 3417 static int xfrm_reject_unused_attr(int type, struct nlattr **attrs, 3418 struct netlink_ext_ack *extack) 3419 { 3420 if (attrs[XFRMA_SA_DIR]) { 3421 switch (type) { 3422 case XFRM_MSG_NEWSA: 3423 case XFRM_MSG_UPDSA: 3424 case XFRM_MSG_ALLOCSPI: 3425 break; 3426 default: 3427 NL_SET_ERR_MSG(extack, "Invalid attribute SA_DIR"); 3428 return -EINVAL; 3429 } 3430 } 3431 3432 if (attrs[XFRMA_SA_PCPU]) { 3433 switch (type) { 3434 case XFRM_MSG_NEWSA: 3435 case XFRM_MSG_UPDSA: 3436 case XFRM_MSG_ALLOCSPI: 3437 case XFRM_MSG_ACQUIRE: 3438 3439 break; 3440 default: 3441 NL_SET_ERR_MSG(extack, "Invalid attribute SA_PCPU"); 3442 return -EINVAL; 3443 } 3444 } 3445 3446 return 0; 3447 } 3448 3449 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 3450 struct netlink_ext_ack *extack) 3451 { 3452 struct net *net = sock_net(skb->sk); 3453 struct nlattr *attrs[XFRMA_MAX+1]; 3454 const struct xfrm_link *link; 3455 struct nlmsghdr *nlh64 = NULL; 3456 int type, err; 3457 3458 type = nlh->nlmsg_type; 3459 if (type > XFRM_MSG_MAX) 3460 return -EINVAL; 3461 3462 type -= XFRM_MSG_BASE; 3463 link = &xfrm_dispatch[type]; 3464 3465 /* All operations require privileges, even GET */ 3466 if (!netlink_net_capable(skb, CAP_NET_ADMIN)) 3467 return -EPERM; 3468 3469 if (in_compat_syscall()) { 3470 struct xfrm_translator *xtr = xfrm_get_translator(); 3471 3472 if (!xtr) 3473 return -EOPNOTSUPP; 3474 3475 nlh64 = xtr->rcv_msg_compat(nlh, link->nla_max, 3476 link->nla_pol, extack); 3477 xfrm_put_translator(xtr); 3478 if (IS_ERR(nlh64)) 3479 return PTR_ERR(nlh64); 3480 if (nlh64) 3481 nlh = nlh64; 3482 } 3483 3484 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || 3485 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && 3486 (nlh->nlmsg_flags & NLM_F_DUMP)) { 3487 struct netlink_dump_control c = { 3488 .start = link->start, 3489 .dump = link->dump, 3490 .done = link->done, 3491 }; 3492 3493 if (link->dump == NULL) { 3494 err = -EINVAL; 3495 goto err; 3496 } 3497 3498 err = netlink_dump_start(xfrm_net_nlsk(net, skb), skb, nlh, &c); 3499 goto err; 3500 } 3501 3502 err = nlmsg_parse_deprecated(nlh, xfrm_msg_min[type], attrs, 3503 link->nla_max ? : XFRMA_MAX, 3504 link->nla_pol ? : xfrma_policy, extack); 3505 if (err < 0) 3506 goto err; 3507 3508 if (!link->nla_pol || link->nla_pol == xfrma_policy) { 3509 err = xfrm_reject_unused_attr((type + XFRM_MSG_BASE), attrs, extack); 3510 if (err < 0) 3511 goto err; 3512 } 3513 3514 if (link->doit == NULL) { 3515 err = -EINVAL; 3516 goto err; 3517 } 3518 3519 err = link->doit(skb, nlh, attrs, extack); 3520 3521 /* We need to free skb allocated in xfrm_alloc_compat() before 3522 * returning from this function, because consume_skb() won't take 3523 * care of frag_list since netlink destructor sets 3524 * sbk->head to NULL. (see netlink_skb_destructor()) 3525 */ 3526 if (skb_has_frag_list(skb)) { 3527 kfree_skb(skb_shinfo(skb)->frag_list); 3528 skb_shinfo(skb)->frag_list = NULL; 3529 } 3530 3531 err: 3532 kvfree(nlh64); 3533 return err; 3534 } 3535 3536 static void xfrm_netlink_rcv(struct sk_buff *skb) 3537 { 3538 struct net *net = sock_net(skb->sk); 3539 3540 mutex_lock(&net->xfrm.xfrm_cfg_mutex); 3541 netlink_rcv_skb(skb, &xfrm_user_rcv_msg); 3542 mutex_unlock(&net->xfrm.xfrm_cfg_mutex); 3543 } 3544 3545 static inline unsigned int xfrm_expire_msgsize(void) 3546 { 3547 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire)) + 3548 nla_total_size(sizeof(struct xfrm_mark)) + 3549 nla_total_size(sizeof_field(struct xfrm_state, dir)) + 3550 nla_total_size(4); /* XFRMA_SA_PCPU */ 3551 } 3552 3553 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) 3554 { 3555 struct xfrm_user_expire *ue; 3556 struct nlmsghdr *nlh; 3557 int err; 3558 3559 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); 3560 if (nlh == NULL) 3561 return -EMSGSIZE; 3562 3563 ue = nlmsg_data(nlh); 3564 copy_to_user_state(x, &ue->state); 3565 ue->hard = (c->data.hard != 0) ? 1 : 0; 3566 /* clear the padding bytes */ 3567 memset_after(ue, 0, hard); 3568 3569 err = xfrm_mark_put(skb, &x->mark); 3570 if (err) 3571 return err; 3572 3573 err = xfrm_if_id_put(skb, x->if_id); 3574 if (err) 3575 return err; 3576 if (x->pcpu_num != UINT_MAX) { 3577 err = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num); 3578 if (err) 3579 return err; 3580 } 3581 3582 if (x->dir) { 3583 err = nla_put_u8(skb, XFRMA_SA_DIR, x->dir); 3584 if (err) 3585 return err; 3586 } 3587 3588 nlmsg_end(skb, nlh); 3589 return 0; 3590 } 3591 3592 static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c) 3593 { 3594 struct net *net = xs_net(x); 3595 struct sk_buff *skb; 3596 3597 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC); 3598 if (skb == NULL) 3599 return -ENOMEM; 3600 3601 if (build_expire(skb, x, c) < 0) { 3602 kfree_skb(skb); 3603 return -EMSGSIZE; 3604 } 3605 3606 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE); 3607 } 3608 3609 static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c) 3610 { 3611 struct net *net = xs_net(x); 3612 struct sk_buff *skb; 3613 int err; 3614 3615 skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC); 3616 if (skb == NULL) 3617 return -ENOMEM; 3618 3619 err = build_aevent(skb, x, c); 3620 BUG_ON(err < 0); 3621 3622 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_AEVENTS); 3623 } 3624 3625 static int xfrm_notify_sa_flush(const struct km_event *c) 3626 { 3627 struct net *net = c->net; 3628 struct xfrm_usersa_flush *p; 3629 struct nlmsghdr *nlh; 3630 struct sk_buff *skb; 3631 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush)); 3632 3633 skb = nlmsg_new(len, GFP_ATOMIC); 3634 if (skb == NULL) 3635 return -ENOMEM; 3636 3637 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0); 3638 if (nlh == NULL) { 3639 kfree_skb(skb); 3640 return -EMSGSIZE; 3641 } 3642 3643 p = nlmsg_data(nlh); 3644 p->proto = c->data.proto; 3645 3646 nlmsg_end(skb, nlh); 3647 3648 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA); 3649 } 3650 3651 static inline unsigned int xfrm_sa_len(struct xfrm_state *x) 3652 { 3653 unsigned int l = 0; 3654 if (x->aead) 3655 l += nla_total_size(aead_len(x->aead)); 3656 if (x->aalg) { 3657 l += nla_total_size(sizeof(struct xfrm_algo) + 3658 (x->aalg->alg_key_len + 7) / 8); 3659 l += nla_total_size(xfrm_alg_auth_len(x->aalg)); 3660 } 3661 if (x->ealg) 3662 l += nla_total_size(xfrm_alg_len(x->ealg)); 3663 if (x->calg) 3664 l += nla_total_size(sizeof(*x->calg)); 3665 if (x->encap) 3666 l += nla_total_size(sizeof(*x->encap)); 3667 if (x->tfcpad) 3668 l += nla_total_size(sizeof(x->tfcpad)); 3669 if (x->replay_esn) 3670 l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn)); 3671 else 3672 l += nla_total_size(sizeof(struct xfrm_replay_state)); 3673 if (x->security) 3674 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) + 3675 x->security->ctx_len); 3676 if (x->coaddr) 3677 l += nla_total_size(sizeof(*x->coaddr)); 3678 if (x->props.extra_flags) 3679 l += nla_total_size(sizeof(x->props.extra_flags)); 3680 if (x->xso.dev) 3681 l += nla_total_size(sizeof(struct xfrm_user_offload)); 3682 if (x->props.smark.v | x->props.smark.m) { 3683 l += nla_total_size(sizeof(x->props.smark.v)); 3684 l += nla_total_size(sizeof(x->props.smark.m)); 3685 } 3686 if (x->if_id) 3687 l += nla_total_size(sizeof(x->if_id)); 3688 if (x->pcpu_num != UINT_MAX) 3689 l += nla_total_size(sizeof(x->pcpu_num)); 3690 3691 /* Must count x->lastused as it may become non-zero behind our back. */ 3692 l += nla_total_size_64bit(sizeof(u64)); 3693 3694 if (x->mapping_maxage) 3695 l += nla_total_size(sizeof(x->mapping_maxage)); 3696 3697 if (x->dir) 3698 l += nla_total_size(sizeof(x->dir)); 3699 3700 if (x->nat_keepalive_interval) 3701 l += nla_total_size(sizeof(x->nat_keepalive_interval)); 3702 3703 if (x->mode_cbs && x->mode_cbs->sa_len) 3704 l += x->mode_cbs->sa_len(x); 3705 3706 return l; 3707 } 3708 3709 static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c) 3710 { 3711 struct net *net = xs_net(x); 3712 struct xfrm_usersa_info *p; 3713 struct xfrm_usersa_id *id; 3714 struct nlmsghdr *nlh; 3715 struct sk_buff *skb; 3716 unsigned int len = xfrm_sa_len(x); 3717 unsigned int headlen; 3718 int err; 3719 3720 headlen = sizeof(*p); 3721 if (c->event == XFRM_MSG_DELSA) { 3722 len += nla_total_size(headlen); 3723 headlen = sizeof(*id); 3724 len += nla_total_size(sizeof(struct xfrm_mark)); 3725 } 3726 len += NLMSG_ALIGN(headlen); 3727 3728 skb = nlmsg_new(len, GFP_ATOMIC); 3729 if (skb == NULL) 3730 return -ENOMEM; 3731 3732 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); 3733 err = -EMSGSIZE; 3734 if (nlh == NULL) 3735 goto out_free_skb; 3736 3737 p = nlmsg_data(nlh); 3738 if (c->event == XFRM_MSG_DELSA) { 3739 struct nlattr *attr; 3740 3741 id = nlmsg_data(nlh); 3742 memset(id, 0, sizeof(*id)); 3743 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr)); 3744 id->spi = x->id.spi; 3745 id->family = x->props.family; 3746 id->proto = x->id.proto; 3747 3748 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p)); 3749 err = -EMSGSIZE; 3750 if (attr == NULL) 3751 goto out_free_skb; 3752 3753 p = nla_data(attr); 3754 } 3755 err = copy_to_user_state_extra(x, p, skb); 3756 if (err) 3757 goto out_free_skb; 3758 3759 nlmsg_end(skb, nlh); 3760 3761 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA); 3762 3763 out_free_skb: 3764 kfree_skb(skb); 3765 return err; 3766 } 3767 3768 static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c) 3769 { 3770 3771 switch (c->event) { 3772 case XFRM_MSG_EXPIRE: 3773 return xfrm_exp_state_notify(x, c); 3774 case XFRM_MSG_NEWAE: 3775 return xfrm_aevent_state_notify(x, c); 3776 case XFRM_MSG_DELSA: 3777 case XFRM_MSG_UPDSA: 3778 case XFRM_MSG_NEWSA: 3779 return xfrm_notify_sa(x, c); 3780 case XFRM_MSG_FLUSHSA: 3781 return xfrm_notify_sa_flush(c); 3782 default: 3783 printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n", 3784 c->event); 3785 break; 3786 } 3787 3788 return 0; 3789 3790 } 3791 3792 static inline unsigned int xfrm_acquire_msgsize(struct xfrm_state *x, 3793 struct xfrm_policy *xp) 3794 { 3795 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire)) 3796 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 3797 + nla_total_size(sizeof(struct xfrm_mark)) 3798 + nla_total_size(xfrm_user_sec_ctx_size(x->security)) 3799 + nla_total_size(4) /* XFRMA_SA_PCPU */ 3800 + userpolicy_type_attrsize(); 3801 } 3802 3803 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, 3804 struct xfrm_tmpl *xt, struct xfrm_policy *xp) 3805 { 3806 __u32 seq = xfrm_get_acqseq(); 3807 struct xfrm_user_acquire *ua; 3808 struct nlmsghdr *nlh; 3809 int err; 3810 3811 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0); 3812 if (nlh == NULL) 3813 return -EMSGSIZE; 3814 3815 ua = nlmsg_data(nlh); 3816 memcpy(&ua->id, &x->id, sizeof(ua->id)); 3817 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr)); 3818 memcpy(&ua->sel, &x->sel, sizeof(ua->sel)); 3819 copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT); 3820 ua->aalgos = xt->aalgos; 3821 ua->ealgos = xt->ealgos; 3822 ua->calgos = xt->calgos; 3823 ua->seq = x->km.seq = seq; 3824 3825 err = copy_to_user_tmpl(xp, skb); 3826 if (!err) 3827 err = copy_to_user_state_sec_ctx(x, skb); 3828 if (!err) 3829 err = copy_to_user_policy_type(xp->type, skb); 3830 if (!err) 3831 err = xfrm_mark_put(skb, &xp->mark); 3832 if (!err) 3833 err = xfrm_if_id_put(skb, xp->if_id); 3834 if (!err && xp->xdo.dev) 3835 err = copy_user_offload(&xp->xdo, skb); 3836 if (!err && x->pcpu_num != UINT_MAX) 3837 err = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num); 3838 if (err) { 3839 nlmsg_cancel(skb, nlh); 3840 return err; 3841 } 3842 3843 nlmsg_end(skb, nlh); 3844 return 0; 3845 } 3846 3847 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt, 3848 struct xfrm_policy *xp) 3849 { 3850 struct net *net = xs_net(x); 3851 struct sk_buff *skb; 3852 int err; 3853 3854 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC); 3855 if (skb == NULL) 3856 return -ENOMEM; 3857 3858 err = build_acquire(skb, x, xt, xp); 3859 BUG_ON(err < 0); 3860 3861 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_ACQUIRE); 3862 } 3863 3864 /* User gives us xfrm_user_policy_info followed by an array of 0 3865 * or more templates. 3866 */ 3867 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt, 3868 u8 *data, int len, int *dir) 3869 { 3870 struct net *net = sock_net(sk); 3871 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data; 3872 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1); 3873 struct xfrm_policy *xp; 3874 int nr; 3875 3876 switch (sk->sk_family) { 3877 case AF_INET: 3878 if (opt != IP_XFRM_POLICY) { 3879 *dir = -EOPNOTSUPP; 3880 return NULL; 3881 } 3882 break; 3883 #if IS_ENABLED(CONFIG_IPV6) 3884 case AF_INET6: 3885 if (opt != IPV6_XFRM_POLICY) { 3886 *dir = -EOPNOTSUPP; 3887 return NULL; 3888 } 3889 break; 3890 #endif 3891 default: 3892 *dir = -EINVAL; 3893 return NULL; 3894 } 3895 3896 *dir = -EINVAL; 3897 3898 if (len < sizeof(*p) || 3899 verify_newpolicy_info(p, NULL)) 3900 return NULL; 3901 3902 nr = ((len - sizeof(*p)) / sizeof(*ut)); 3903 if (validate_tmpl(nr, ut, p->sel.family, p->dir, NULL)) 3904 return NULL; 3905 3906 if (p->dir > XFRM_POLICY_OUT) 3907 return NULL; 3908 3909 xp = xfrm_policy_alloc(net, GFP_ATOMIC); 3910 if (xp == NULL) { 3911 *dir = -ENOBUFS; 3912 return NULL; 3913 } 3914 3915 copy_from_user_policy(xp, p); 3916 xp->type = XFRM_POLICY_TYPE_MAIN; 3917 copy_templates(xp, ut, nr); 3918 3919 *dir = p->dir; 3920 3921 return xp; 3922 } 3923 3924 static inline unsigned int xfrm_polexpire_msgsize(struct xfrm_policy *xp) 3925 { 3926 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire)) 3927 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 3928 + nla_total_size(xfrm_user_sec_ctx_size(xp->security)) 3929 + nla_total_size(sizeof(struct xfrm_mark)) 3930 + userpolicy_type_attrsize(); 3931 } 3932 3933 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, 3934 int dir, const struct km_event *c) 3935 { 3936 struct xfrm_user_polexpire *upe; 3937 int hard = c->data.hard; 3938 struct nlmsghdr *nlh; 3939 int err; 3940 3941 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); 3942 if (nlh == NULL) 3943 return -EMSGSIZE; 3944 3945 upe = nlmsg_data(nlh); 3946 copy_to_user_policy(xp, &upe->pol, dir); 3947 err = copy_to_user_tmpl(xp, skb); 3948 if (!err) 3949 err = copy_to_user_sec_ctx(xp, skb); 3950 if (!err) 3951 err = copy_to_user_policy_type(xp->type, skb); 3952 if (!err) 3953 err = xfrm_mark_put(skb, &xp->mark); 3954 if (!err) 3955 err = xfrm_if_id_put(skb, xp->if_id); 3956 if (!err && xp->xdo.dev) 3957 err = copy_user_offload(&xp->xdo, skb); 3958 if (err) { 3959 nlmsg_cancel(skb, nlh); 3960 return err; 3961 } 3962 upe->hard = !!hard; 3963 3964 nlmsg_end(skb, nlh); 3965 return 0; 3966 } 3967 3968 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 3969 { 3970 struct net *net = xp_net(xp); 3971 struct sk_buff *skb; 3972 int err; 3973 3974 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC); 3975 if (skb == NULL) 3976 return -ENOMEM; 3977 3978 err = build_polexpire(skb, xp, dir, c); 3979 BUG_ON(err < 0); 3980 3981 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE); 3982 } 3983 3984 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c) 3985 { 3986 unsigned int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); 3987 struct net *net = xp_net(xp); 3988 struct xfrm_userpolicy_info *p; 3989 struct xfrm_userpolicy_id *id; 3990 struct nlmsghdr *nlh; 3991 struct sk_buff *skb; 3992 unsigned int headlen; 3993 int err; 3994 3995 headlen = sizeof(*p); 3996 if (c->event == XFRM_MSG_DELPOLICY) { 3997 len += nla_total_size(headlen); 3998 headlen = sizeof(*id); 3999 } 4000 len += userpolicy_type_attrsize(); 4001 len += nla_total_size(sizeof(struct xfrm_mark)); 4002 len += NLMSG_ALIGN(headlen); 4003 4004 skb = nlmsg_new(len, GFP_ATOMIC); 4005 if (skb == NULL) 4006 return -ENOMEM; 4007 4008 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); 4009 err = -EMSGSIZE; 4010 if (nlh == NULL) 4011 goto out_free_skb; 4012 4013 p = nlmsg_data(nlh); 4014 if (c->event == XFRM_MSG_DELPOLICY) { 4015 struct nlattr *attr; 4016 4017 id = nlmsg_data(nlh); 4018 memset(id, 0, sizeof(*id)); 4019 id->dir = dir; 4020 if (c->data.byid) 4021 id->index = xp->index; 4022 else 4023 memcpy(&id->sel, &xp->selector, sizeof(id->sel)); 4024 4025 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p)); 4026 err = -EMSGSIZE; 4027 if (attr == NULL) 4028 goto out_free_skb; 4029 4030 p = nla_data(attr); 4031 } 4032 4033 copy_to_user_policy(xp, p, dir); 4034 err = copy_to_user_tmpl(xp, skb); 4035 if (!err) 4036 err = copy_to_user_policy_type(xp->type, skb); 4037 if (!err) 4038 err = xfrm_mark_put(skb, &xp->mark); 4039 if (!err) 4040 err = xfrm_if_id_put(skb, xp->if_id); 4041 if (!err && xp->xdo.dev) 4042 err = copy_user_offload(&xp->xdo, skb); 4043 if (err) 4044 goto out_free_skb; 4045 4046 nlmsg_end(skb, nlh); 4047 4048 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY); 4049 4050 out_free_skb: 4051 kfree_skb(skb); 4052 return err; 4053 } 4054 4055 static int xfrm_notify_policy_flush(const struct km_event *c) 4056 { 4057 struct net *net = c->net; 4058 struct nlmsghdr *nlh; 4059 struct sk_buff *skb; 4060 int err; 4061 4062 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC); 4063 if (skb == NULL) 4064 return -ENOMEM; 4065 4066 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); 4067 err = -EMSGSIZE; 4068 if (nlh == NULL) 4069 goto out_free_skb; 4070 err = copy_to_user_policy_type(c->data.type, skb); 4071 if (err) 4072 goto out_free_skb; 4073 4074 nlmsg_end(skb, nlh); 4075 4076 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY); 4077 4078 out_free_skb: 4079 kfree_skb(skb); 4080 return err; 4081 } 4082 4083 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 4084 { 4085 4086 switch (c->event) { 4087 case XFRM_MSG_NEWPOLICY: 4088 case XFRM_MSG_UPDPOLICY: 4089 case XFRM_MSG_DELPOLICY: 4090 return xfrm_notify_policy(xp, dir, c); 4091 case XFRM_MSG_FLUSHPOLICY: 4092 return xfrm_notify_policy_flush(c); 4093 case XFRM_MSG_POLEXPIRE: 4094 return xfrm_exp_policy_notify(xp, dir, c); 4095 default: 4096 printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n", 4097 c->event); 4098 } 4099 4100 return 0; 4101 4102 } 4103 4104 static inline unsigned int xfrm_report_msgsize(void) 4105 { 4106 return NLMSG_ALIGN(sizeof(struct xfrm_user_report)); 4107 } 4108 4109 static int build_report(struct sk_buff *skb, u8 proto, 4110 struct xfrm_selector *sel, xfrm_address_t *addr) 4111 { 4112 struct xfrm_user_report *ur; 4113 struct nlmsghdr *nlh; 4114 4115 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0); 4116 if (nlh == NULL) 4117 return -EMSGSIZE; 4118 4119 ur = nlmsg_data(nlh); 4120 ur->proto = proto; 4121 memcpy(&ur->sel, sel, sizeof(ur->sel)); 4122 4123 if (addr) { 4124 int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr); 4125 if (err) { 4126 nlmsg_cancel(skb, nlh); 4127 return err; 4128 } 4129 } 4130 nlmsg_end(skb, nlh); 4131 return 0; 4132 } 4133 4134 static int xfrm_send_report(struct net *net, u8 proto, 4135 struct xfrm_selector *sel, xfrm_address_t *addr) 4136 { 4137 struct sk_buff *skb; 4138 int err; 4139 4140 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC); 4141 if (skb == NULL) 4142 return -ENOMEM; 4143 4144 err = build_report(skb, proto, sel, addr); 4145 BUG_ON(err < 0); 4146 4147 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_REPORT); 4148 } 4149 4150 static inline unsigned int xfrm_mapping_msgsize(void) 4151 { 4152 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping)); 4153 } 4154 4155 static int build_mapping(struct sk_buff *skb, struct xfrm_state *x, 4156 xfrm_address_t *new_saddr, __be16 new_sport) 4157 { 4158 struct xfrm_user_mapping *um; 4159 struct nlmsghdr *nlh; 4160 4161 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0); 4162 if (nlh == NULL) 4163 return -EMSGSIZE; 4164 4165 um = nlmsg_data(nlh); 4166 4167 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr)); 4168 um->id.spi = x->id.spi; 4169 um->id.family = x->props.family; 4170 um->id.proto = x->id.proto; 4171 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr)); 4172 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr)); 4173 um->new_sport = new_sport; 4174 um->old_sport = x->encap->encap_sport; 4175 um->reqid = x->props.reqid; 4176 4177 nlmsg_end(skb, nlh); 4178 return 0; 4179 } 4180 4181 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, 4182 __be16 sport) 4183 { 4184 struct net *net = xs_net(x); 4185 struct sk_buff *skb; 4186 int err; 4187 4188 if (x->id.proto != IPPROTO_ESP) 4189 return -EINVAL; 4190 4191 if (!x->encap) 4192 return -EINVAL; 4193 4194 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC); 4195 if (skb == NULL) 4196 return -ENOMEM; 4197 4198 err = build_mapping(skb, x, ipaddr, sport); 4199 BUG_ON(err < 0); 4200 4201 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING); 4202 } 4203 4204 static bool xfrm_is_alive(const struct km_event *c) 4205 { 4206 return (bool)xfrm_acquire_is_on(c->net); 4207 } 4208 4209 static struct xfrm_mgr netlink_mgr = { 4210 .notify = xfrm_send_state_notify, 4211 .acquire = xfrm_send_acquire, 4212 .compile_policy = xfrm_compile_policy, 4213 .notify_policy = xfrm_send_policy_notify, 4214 .report = xfrm_send_report, 4215 .migrate = xfrm_send_migrate, 4216 .new_mapping = xfrm_send_mapping, 4217 .is_alive = xfrm_is_alive, 4218 }; 4219 4220 static int __net_init xfrm_user_net_init(struct net *net) 4221 { 4222 struct sock *nlsk; 4223 struct netlink_kernel_cfg cfg = { 4224 .groups = XFRMNLGRP_MAX, 4225 .input = xfrm_netlink_rcv, 4226 }; 4227 4228 nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg); 4229 if (nlsk == NULL) 4230 return -ENOMEM; 4231 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */ 4232 rcu_assign_pointer(net->xfrm.nlsk, nlsk); 4233 return 0; 4234 } 4235 4236 static void __net_exit xfrm_user_net_pre_exit(struct net *net) 4237 { 4238 RCU_INIT_POINTER(net->xfrm.nlsk, NULL); 4239 } 4240 4241 static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list) 4242 { 4243 struct net *net; 4244 4245 list_for_each_entry(net, net_exit_list, exit_list) 4246 netlink_kernel_release(net->xfrm.nlsk_stash); 4247 } 4248 4249 static struct pernet_operations xfrm_user_net_ops = { 4250 .init = xfrm_user_net_init, 4251 .pre_exit = xfrm_user_net_pre_exit, 4252 .exit_batch = xfrm_user_net_exit, 4253 }; 4254 4255 static int __init xfrm_user_init(void) 4256 { 4257 int rv; 4258 4259 printk(KERN_INFO "Initializing XFRM netlink socket\n"); 4260 4261 rv = register_pernet_subsys(&xfrm_user_net_ops); 4262 if (rv < 0) 4263 return rv; 4264 xfrm_register_km(&netlink_mgr); 4265 return 0; 4266 } 4267 4268 static void __exit xfrm_user_exit(void) 4269 { 4270 xfrm_unregister_km(&netlink_mgr); 4271 unregister_pernet_subsys(&xfrm_user_net_ops); 4272 } 4273 4274 module_init(xfrm_user_init); 4275 module_exit(xfrm_user_exit); 4276 MODULE_DESCRIPTION("XFRM User interface"); 4277 MODULE_LICENSE("GPL"); 4278 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM); 4279