1 /* xfrm_user.c: User interface to configure xfrm engine. 2 * 3 * Copyright (C) 2002 David S. Miller (davem@redhat.com) 4 * 5 * Changes: 6 * Mitsuru KANDA @USAGI 7 * Kazunori MIYAZAWA @USAGI 8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 9 * IPv6 support 10 * 11 */ 12 13 #include <linux/crypto.h> 14 #include <linux/module.h> 15 #include <linux/kernel.h> 16 #include <linux/types.h> 17 #include <linux/slab.h> 18 #include <linux/socket.h> 19 #include <linux/string.h> 20 #include <linux/net.h> 21 #include <linux/skbuff.h> 22 #include <linux/rtnetlink.h> 23 #include <linux/pfkeyv2.h> 24 #include <linux/ipsec.h> 25 #include <linux/init.h> 26 #include <linux/security.h> 27 #include <net/sock.h> 28 #include <net/xfrm.h> 29 #include <net/netlink.h> 30 #include <asm/uaccess.h> 31 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 32 #include <linux/in6.h> 33 #endif 34 #include <linux/audit.h> 35 36 static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type) 37 { 38 struct rtattr *rt = xfrma[type - 1]; 39 struct xfrm_algo *algp; 40 int len; 41 42 if (!rt) 43 return 0; 44 45 len = (rt->rta_len - sizeof(*rt)) - sizeof(*algp); 46 if (len < 0) 47 return -EINVAL; 48 49 algp = RTA_DATA(rt); 50 51 len -= (algp->alg_key_len + 7U) / 8; 52 if (len < 0) 53 return -EINVAL; 54 55 switch (type) { 56 case XFRMA_ALG_AUTH: 57 if (!algp->alg_key_len && 58 strcmp(algp->alg_name, "digest_null") != 0) 59 return -EINVAL; 60 break; 61 62 case XFRMA_ALG_CRYPT: 63 if (!algp->alg_key_len && 64 strcmp(algp->alg_name, "cipher_null") != 0) 65 return -EINVAL; 66 break; 67 68 case XFRMA_ALG_COMP: 69 /* Zero length keys are legal. */ 70 break; 71 72 default: 73 return -EINVAL; 74 }; 75 76 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; 77 return 0; 78 } 79 80 static int verify_encap_tmpl(struct rtattr **xfrma) 81 { 82 struct rtattr *rt = xfrma[XFRMA_ENCAP - 1]; 83 struct xfrm_encap_tmpl *encap; 84 85 if (!rt) 86 return 0; 87 88 if ((rt->rta_len - sizeof(*rt)) < sizeof(*encap)) 89 return -EINVAL; 90 91 return 0; 92 } 93 94 static int verify_one_addr(struct rtattr **xfrma, enum xfrm_attr_type_t type, 95 xfrm_address_t **addrp) 96 { 97 struct rtattr *rt = xfrma[type - 1]; 98 99 if (!rt) 100 return 0; 101 102 if ((rt->rta_len - sizeof(*rt)) < sizeof(**addrp)) 103 return -EINVAL; 104 105 if (addrp) 106 *addrp = RTA_DATA(rt); 107 108 return 0; 109 } 110 111 static inline int verify_sec_ctx_len(struct rtattr **xfrma) 112 { 113 struct rtattr *rt = xfrma[XFRMA_SEC_CTX - 1]; 114 struct xfrm_user_sec_ctx *uctx; 115 int len = 0; 116 117 if (!rt) 118 return 0; 119 120 if (rt->rta_len < sizeof(*uctx)) 121 return -EINVAL; 122 123 uctx = RTA_DATA(rt); 124 125 len += sizeof(struct xfrm_user_sec_ctx); 126 len += uctx->ctx_len; 127 128 if (uctx->len != len) 129 return -EINVAL; 130 131 return 0; 132 } 133 134 135 static int verify_newsa_info(struct xfrm_usersa_info *p, 136 struct rtattr **xfrma) 137 { 138 int err; 139 140 err = -EINVAL; 141 switch (p->family) { 142 case AF_INET: 143 break; 144 145 case AF_INET6: 146 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 147 break; 148 #else 149 err = -EAFNOSUPPORT; 150 goto out; 151 #endif 152 153 default: 154 goto out; 155 }; 156 157 err = -EINVAL; 158 switch (p->id.proto) { 159 case IPPROTO_AH: 160 if (!xfrma[XFRMA_ALG_AUTH-1] || 161 xfrma[XFRMA_ALG_CRYPT-1] || 162 xfrma[XFRMA_ALG_COMP-1]) 163 goto out; 164 break; 165 166 case IPPROTO_ESP: 167 if ((!xfrma[XFRMA_ALG_AUTH-1] && 168 !xfrma[XFRMA_ALG_CRYPT-1]) || 169 xfrma[XFRMA_ALG_COMP-1]) 170 goto out; 171 break; 172 173 case IPPROTO_COMP: 174 if (!xfrma[XFRMA_ALG_COMP-1] || 175 xfrma[XFRMA_ALG_AUTH-1] || 176 xfrma[XFRMA_ALG_CRYPT-1]) 177 goto out; 178 break; 179 180 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 181 case IPPROTO_DSTOPTS: 182 case IPPROTO_ROUTING: 183 if (xfrma[XFRMA_ALG_COMP-1] || 184 xfrma[XFRMA_ALG_AUTH-1] || 185 xfrma[XFRMA_ALG_CRYPT-1] || 186 xfrma[XFRMA_ENCAP-1] || 187 xfrma[XFRMA_SEC_CTX-1] || 188 !xfrma[XFRMA_COADDR-1]) 189 goto out; 190 break; 191 #endif 192 193 default: 194 goto out; 195 }; 196 197 if ((err = verify_one_alg(xfrma, XFRMA_ALG_AUTH))) 198 goto out; 199 if ((err = verify_one_alg(xfrma, XFRMA_ALG_CRYPT))) 200 goto out; 201 if ((err = verify_one_alg(xfrma, XFRMA_ALG_COMP))) 202 goto out; 203 if ((err = verify_encap_tmpl(xfrma))) 204 goto out; 205 if ((err = verify_sec_ctx_len(xfrma))) 206 goto out; 207 if ((err = verify_one_addr(xfrma, XFRMA_COADDR, NULL))) 208 goto out; 209 210 err = -EINVAL; 211 switch (p->mode) { 212 case XFRM_MODE_TRANSPORT: 213 case XFRM_MODE_TUNNEL: 214 case XFRM_MODE_ROUTEOPTIMIZATION: 215 case XFRM_MODE_BEET: 216 break; 217 218 default: 219 goto out; 220 }; 221 222 err = 0; 223 224 out: 225 return err; 226 } 227 228 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, 229 struct xfrm_algo_desc *(*get_byname)(char *, int), 230 struct rtattr *u_arg) 231 { 232 struct rtattr *rta = u_arg; 233 struct xfrm_algo *p, *ualg; 234 struct xfrm_algo_desc *algo; 235 int len; 236 237 if (!rta) 238 return 0; 239 240 ualg = RTA_DATA(rta); 241 242 algo = get_byname(ualg->alg_name, 1); 243 if (!algo) 244 return -ENOSYS; 245 *props = algo->desc.sadb_alg_id; 246 247 len = sizeof(*ualg) + (ualg->alg_key_len + 7U) / 8; 248 p = kmemdup(ualg, len, GFP_KERNEL); 249 if (!p) 250 return -ENOMEM; 251 252 strcpy(p->alg_name, algo->name); 253 *algpp = p; 254 return 0; 255 } 256 257 static int attach_encap_tmpl(struct xfrm_encap_tmpl **encapp, struct rtattr *u_arg) 258 { 259 struct rtattr *rta = u_arg; 260 struct xfrm_encap_tmpl *p, *uencap; 261 262 if (!rta) 263 return 0; 264 265 uencap = RTA_DATA(rta); 266 p = kmemdup(uencap, sizeof(*p), GFP_KERNEL); 267 if (!p) 268 return -ENOMEM; 269 270 *encapp = p; 271 return 0; 272 } 273 274 275 static inline int xfrm_user_sec_ctx_size(struct xfrm_policy *xp) 276 { 277 struct xfrm_sec_ctx *xfrm_ctx = xp->security; 278 int len = 0; 279 280 if (xfrm_ctx) { 281 len += sizeof(struct xfrm_user_sec_ctx); 282 len += xfrm_ctx->ctx_len; 283 } 284 return len; 285 } 286 287 static int attach_sec_ctx(struct xfrm_state *x, struct rtattr *u_arg) 288 { 289 struct xfrm_user_sec_ctx *uctx; 290 291 if (!u_arg) 292 return 0; 293 294 uctx = RTA_DATA(u_arg); 295 return security_xfrm_state_alloc(x, uctx); 296 } 297 298 static int attach_one_addr(xfrm_address_t **addrpp, struct rtattr *u_arg) 299 { 300 struct rtattr *rta = u_arg; 301 xfrm_address_t *p, *uaddrp; 302 303 if (!rta) 304 return 0; 305 306 uaddrp = RTA_DATA(rta); 307 p = kmemdup(uaddrp, sizeof(*p), GFP_KERNEL); 308 if (!p) 309 return -ENOMEM; 310 311 *addrpp = p; 312 return 0; 313 } 314 315 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) 316 { 317 memcpy(&x->id, &p->id, sizeof(x->id)); 318 memcpy(&x->sel, &p->sel, sizeof(x->sel)); 319 memcpy(&x->lft, &p->lft, sizeof(x->lft)); 320 x->props.mode = p->mode; 321 x->props.replay_window = p->replay_window; 322 x->props.reqid = p->reqid; 323 x->props.family = p->family; 324 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr)); 325 x->props.flags = p->flags; 326 } 327 328 /* 329 * someday when pfkey also has support, we could have the code 330 * somehow made shareable and move it to xfrm_state.c - JHS 331 * 332 */ 333 static int xfrm_update_ae_params(struct xfrm_state *x, struct rtattr **xfrma) 334 { 335 int err = - EINVAL; 336 struct rtattr *rp = xfrma[XFRMA_REPLAY_VAL-1]; 337 struct rtattr *lt = xfrma[XFRMA_LTIME_VAL-1]; 338 struct rtattr *et = xfrma[XFRMA_ETIMER_THRESH-1]; 339 struct rtattr *rt = xfrma[XFRMA_REPLAY_THRESH-1]; 340 341 if (rp) { 342 struct xfrm_replay_state *replay; 343 if (RTA_PAYLOAD(rp) < sizeof(*replay)) 344 goto error; 345 replay = RTA_DATA(rp); 346 memcpy(&x->replay, replay, sizeof(*replay)); 347 memcpy(&x->preplay, replay, sizeof(*replay)); 348 } 349 350 if (lt) { 351 struct xfrm_lifetime_cur *ltime; 352 if (RTA_PAYLOAD(lt) < sizeof(*ltime)) 353 goto error; 354 ltime = RTA_DATA(lt); 355 x->curlft.bytes = ltime->bytes; 356 x->curlft.packets = ltime->packets; 357 x->curlft.add_time = ltime->add_time; 358 x->curlft.use_time = ltime->use_time; 359 } 360 361 if (et) { 362 if (RTA_PAYLOAD(et) < sizeof(u32)) 363 goto error; 364 x->replay_maxage = *(u32*)RTA_DATA(et); 365 } 366 367 if (rt) { 368 if (RTA_PAYLOAD(rt) < sizeof(u32)) 369 goto error; 370 x->replay_maxdiff = *(u32*)RTA_DATA(rt); 371 } 372 373 return 0; 374 error: 375 return err; 376 } 377 378 static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p, 379 struct rtattr **xfrma, 380 int *errp) 381 { 382 struct xfrm_state *x = xfrm_state_alloc(); 383 int err = -ENOMEM; 384 385 if (!x) 386 goto error_no_put; 387 388 copy_from_user_state(x, p); 389 390 if ((err = attach_one_algo(&x->aalg, &x->props.aalgo, 391 xfrm_aalg_get_byname, 392 xfrma[XFRMA_ALG_AUTH-1]))) 393 goto error; 394 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo, 395 xfrm_ealg_get_byname, 396 xfrma[XFRMA_ALG_CRYPT-1]))) 397 goto error; 398 if ((err = attach_one_algo(&x->calg, &x->props.calgo, 399 xfrm_calg_get_byname, 400 xfrma[XFRMA_ALG_COMP-1]))) 401 goto error; 402 if ((err = attach_encap_tmpl(&x->encap, xfrma[XFRMA_ENCAP-1]))) 403 goto error; 404 if ((err = attach_one_addr(&x->coaddr, xfrma[XFRMA_COADDR-1]))) 405 goto error; 406 err = xfrm_init_state(x); 407 if (err) 408 goto error; 409 410 if ((err = attach_sec_ctx(x, xfrma[XFRMA_SEC_CTX-1]))) 411 goto error; 412 413 x->km.seq = p->seq; 414 x->replay_maxdiff = sysctl_xfrm_aevent_rseqth; 415 /* sysctl_xfrm_aevent_etime is in 100ms units */ 416 x->replay_maxage = (sysctl_xfrm_aevent_etime*HZ)/XFRM_AE_ETH_M; 417 x->preplay.bitmap = 0; 418 x->preplay.seq = x->replay.seq+x->replay_maxdiff; 419 x->preplay.oseq = x->replay.oseq +x->replay_maxdiff; 420 421 /* override default values from above */ 422 423 err = xfrm_update_ae_params(x, (struct rtattr **)xfrma); 424 if (err < 0) 425 goto error; 426 427 return x; 428 429 error: 430 x->km.state = XFRM_STATE_DEAD; 431 xfrm_state_put(x); 432 error_no_put: 433 *errp = err; 434 return NULL; 435 } 436 437 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 438 struct rtattr **xfrma) 439 { 440 struct xfrm_usersa_info *p = NLMSG_DATA(nlh); 441 struct xfrm_state *x; 442 int err; 443 struct km_event c; 444 445 err = verify_newsa_info(p, xfrma); 446 if (err) 447 return err; 448 449 x = xfrm_state_construct(p, xfrma, &err); 450 if (!x) 451 return err; 452 453 xfrm_state_hold(x); 454 if (nlh->nlmsg_type == XFRM_MSG_NEWSA) 455 err = xfrm_state_add(x); 456 else 457 err = xfrm_state_update(x); 458 459 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, 460 AUDIT_MAC_IPSEC_ADDSA, err ? 0 : 1, NULL, x); 461 462 if (err < 0) { 463 x->km.state = XFRM_STATE_DEAD; 464 __xfrm_state_put(x); 465 goto out; 466 } 467 468 c.seq = nlh->nlmsg_seq; 469 c.pid = nlh->nlmsg_pid; 470 c.event = nlh->nlmsg_type; 471 472 km_state_notify(x, &c); 473 out: 474 xfrm_state_put(x); 475 return err; 476 } 477 478 static struct xfrm_state *xfrm_user_state_lookup(struct xfrm_usersa_id *p, 479 struct rtattr **xfrma, 480 int *errp) 481 { 482 struct xfrm_state *x = NULL; 483 int err; 484 485 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) { 486 err = -ESRCH; 487 x = xfrm_state_lookup(&p->daddr, p->spi, p->proto, p->family); 488 } else { 489 xfrm_address_t *saddr = NULL; 490 491 err = verify_one_addr(xfrma, XFRMA_SRCADDR, &saddr); 492 if (err) 493 goto out; 494 495 if (!saddr) { 496 err = -EINVAL; 497 goto out; 498 } 499 500 err = -ESRCH; 501 x = xfrm_state_lookup_byaddr(&p->daddr, saddr, p->proto, 502 p->family); 503 } 504 505 out: 506 if (!x && errp) 507 *errp = err; 508 return x; 509 } 510 511 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 512 struct rtattr **xfrma) 513 { 514 struct xfrm_state *x; 515 int err = -ESRCH; 516 struct km_event c; 517 struct xfrm_usersa_id *p = NLMSG_DATA(nlh); 518 519 x = xfrm_user_state_lookup(p, xfrma, &err); 520 if (x == NULL) 521 return err; 522 523 if ((err = security_xfrm_state_delete(x)) != 0) 524 goto out; 525 526 if (xfrm_state_kern(x)) { 527 err = -EPERM; 528 goto out; 529 } 530 531 err = xfrm_state_delete(x); 532 533 if (err < 0) 534 goto out; 535 536 c.seq = nlh->nlmsg_seq; 537 c.pid = nlh->nlmsg_pid; 538 c.event = nlh->nlmsg_type; 539 km_state_notify(x, &c); 540 541 out: 542 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, 543 AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x); 544 xfrm_state_put(x); 545 return err; 546 } 547 548 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) 549 { 550 memcpy(&p->id, &x->id, sizeof(p->id)); 551 memcpy(&p->sel, &x->sel, sizeof(p->sel)); 552 memcpy(&p->lft, &x->lft, sizeof(p->lft)); 553 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft)); 554 memcpy(&p->stats, &x->stats, sizeof(p->stats)); 555 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr)); 556 p->mode = x->props.mode; 557 p->replay_window = x->props.replay_window; 558 p->reqid = x->props.reqid; 559 p->family = x->props.family; 560 p->flags = x->props.flags; 561 p->seq = x->km.seq; 562 } 563 564 struct xfrm_dump_info { 565 struct sk_buff *in_skb; 566 struct sk_buff *out_skb; 567 u32 nlmsg_seq; 568 u16 nlmsg_flags; 569 int start_idx; 570 int this_idx; 571 }; 572 573 static int dump_one_state(struct xfrm_state *x, int count, void *ptr) 574 { 575 struct xfrm_dump_info *sp = ptr; 576 struct sk_buff *in_skb = sp->in_skb; 577 struct sk_buff *skb = sp->out_skb; 578 struct xfrm_usersa_info *p; 579 struct nlmsghdr *nlh; 580 unsigned char *b = skb->tail; 581 582 if (sp->this_idx < sp->start_idx) 583 goto out; 584 585 nlh = NLMSG_PUT(skb, NETLINK_CB(in_skb).pid, 586 sp->nlmsg_seq, 587 XFRM_MSG_NEWSA, sizeof(*p)); 588 nlh->nlmsg_flags = sp->nlmsg_flags; 589 590 p = NLMSG_DATA(nlh); 591 copy_to_user_state(x, p); 592 593 if (x->aalg) 594 RTA_PUT(skb, XFRMA_ALG_AUTH, 595 sizeof(*(x->aalg))+(x->aalg->alg_key_len+7)/8, x->aalg); 596 if (x->ealg) 597 RTA_PUT(skb, XFRMA_ALG_CRYPT, 598 sizeof(*(x->ealg))+(x->ealg->alg_key_len+7)/8, x->ealg); 599 if (x->calg) 600 RTA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg); 601 602 if (x->encap) 603 RTA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); 604 605 if (x->security) { 606 int ctx_size = sizeof(struct xfrm_sec_ctx) + 607 x->security->ctx_len; 608 struct rtattr *rt = __RTA_PUT(skb, XFRMA_SEC_CTX, ctx_size); 609 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt); 610 611 uctx->exttype = XFRMA_SEC_CTX; 612 uctx->len = ctx_size; 613 uctx->ctx_doi = x->security->ctx_doi; 614 uctx->ctx_alg = x->security->ctx_alg; 615 uctx->ctx_len = x->security->ctx_len; 616 memcpy(uctx + 1, x->security->ctx_str, x->security->ctx_len); 617 } 618 619 if (x->coaddr) 620 RTA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr); 621 622 if (x->lastused) 623 RTA_PUT(skb, XFRMA_LASTUSED, sizeof(x->lastused), &x->lastused); 624 625 nlh->nlmsg_len = skb->tail - b; 626 out: 627 sp->this_idx++; 628 return 0; 629 630 nlmsg_failure: 631 rtattr_failure: 632 skb_trim(skb, b - skb->data); 633 return -1; 634 } 635 636 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) 637 { 638 struct xfrm_dump_info info; 639 640 info.in_skb = cb->skb; 641 info.out_skb = skb; 642 info.nlmsg_seq = cb->nlh->nlmsg_seq; 643 info.nlmsg_flags = NLM_F_MULTI; 644 info.this_idx = 0; 645 info.start_idx = cb->args[0]; 646 (void) xfrm_state_walk(0, dump_one_state, &info); 647 cb->args[0] = info.this_idx; 648 649 return skb->len; 650 } 651 652 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, 653 struct xfrm_state *x, u32 seq) 654 { 655 struct xfrm_dump_info info; 656 struct sk_buff *skb; 657 658 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); 659 if (!skb) 660 return ERR_PTR(-ENOMEM); 661 662 info.in_skb = in_skb; 663 info.out_skb = skb; 664 info.nlmsg_seq = seq; 665 info.nlmsg_flags = 0; 666 info.this_idx = info.start_idx = 0; 667 668 if (dump_one_state(x, 0, &info)) { 669 kfree_skb(skb); 670 return NULL; 671 } 672 673 return skb; 674 } 675 676 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 677 struct rtattr **xfrma) 678 { 679 struct xfrm_usersa_id *p = NLMSG_DATA(nlh); 680 struct xfrm_state *x; 681 struct sk_buff *resp_skb; 682 int err = -ESRCH; 683 684 x = xfrm_user_state_lookup(p, xfrma, &err); 685 if (x == NULL) 686 goto out_noput; 687 688 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); 689 if (IS_ERR(resp_skb)) { 690 err = PTR_ERR(resp_skb); 691 } else { 692 err = netlink_unicast(xfrm_nl, resp_skb, 693 NETLINK_CB(skb).pid, MSG_DONTWAIT); 694 } 695 xfrm_state_put(x); 696 out_noput: 697 return err; 698 } 699 700 static int verify_userspi_info(struct xfrm_userspi_info *p) 701 { 702 switch (p->info.id.proto) { 703 case IPPROTO_AH: 704 case IPPROTO_ESP: 705 break; 706 707 case IPPROTO_COMP: 708 /* IPCOMP spi is 16-bits. */ 709 if (p->max >= 0x10000) 710 return -EINVAL; 711 break; 712 713 default: 714 return -EINVAL; 715 }; 716 717 if (p->min > p->max) 718 return -EINVAL; 719 720 return 0; 721 } 722 723 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, 724 struct rtattr **xfrma) 725 { 726 struct xfrm_state *x; 727 struct xfrm_userspi_info *p; 728 struct sk_buff *resp_skb; 729 xfrm_address_t *daddr; 730 int family; 731 int err; 732 733 p = NLMSG_DATA(nlh); 734 err = verify_userspi_info(p); 735 if (err) 736 goto out_noput; 737 738 family = p->info.family; 739 daddr = &p->info.id.daddr; 740 741 x = NULL; 742 if (p->info.seq) { 743 x = xfrm_find_acq_byseq(p->info.seq); 744 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) { 745 xfrm_state_put(x); 746 x = NULL; 747 } 748 } 749 750 if (!x) 751 x = xfrm_find_acq(p->info.mode, p->info.reqid, 752 p->info.id.proto, daddr, 753 &p->info.saddr, 1, 754 family); 755 err = -ENOENT; 756 if (x == NULL) 757 goto out_noput; 758 759 resp_skb = ERR_PTR(-ENOENT); 760 761 spin_lock_bh(&x->lock); 762 if (x->km.state != XFRM_STATE_DEAD) { 763 xfrm_alloc_spi(x, htonl(p->min), htonl(p->max)); 764 if (x->id.spi) 765 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); 766 } 767 spin_unlock_bh(&x->lock); 768 769 if (IS_ERR(resp_skb)) { 770 err = PTR_ERR(resp_skb); 771 goto out; 772 } 773 774 err = netlink_unicast(xfrm_nl, resp_skb, 775 NETLINK_CB(skb).pid, MSG_DONTWAIT); 776 777 out: 778 xfrm_state_put(x); 779 out_noput: 780 return err; 781 } 782 783 static int verify_policy_dir(u8 dir) 784 { 785 switch (dir) { 786 case XFRM_POLICY_IN: 787 case XFRM_POLICY_OUT: 788 case XFRM_POLICY_FWD: 789 break; 790 791 default: 792 return -EINVAL; 793 }; 794 795 return 0; 796 } 797 798 static int verify_policy_type(u8 type) 799 { 800 switch (type) { 801 case XFRM_POLICY_TYPE_MAIN: 802 #ifdef CONFIG_XFRM_SUB_POLICY 803 case XFRM_POLICY_TYPE_SUB: 804 #endif 805 break; 806 807 default: 808 return -EINVAL; 809 }; 810 811 return 0; 812 } 813 814 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p) 815 { 816 switch (p->share) { 817 case XFRM_SHARE_ANY: 818 case XFRM_SHARE_SESSION: 819 case XFRM_SHARE_USER: 820 case XFRM_SHARE_UNIQUE: 821 break; 822 823 default: 824 return -EINVAL; 825 }; 826 827 switch (p->action) { 828 case XFRM_POLICY_ALLOW: 829 case XFRM_POLICY_BLOCK: 830 break; 831 832 default: 833 return -EINVAL; 834 }; 835 836 switch (p->sel.family) { 837 case AF_INET: 838 break; 839 840 case AF_INET6: 841 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 842 break; 843 #else 844 return -EAFNOSUPPORT; 845 #endif 846 847 default: 848 return -EINVAL; 849 }; 850 851 return verify_policy_dir(p->dir); 852 } 853 854 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct rtattr **xfrma) 855 { 856 struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1]; 857 struct xfrm_user_sec_ctx *uctx; 858 859 if (!rt) 860 return 0; 861 862 uctx = RTA_DATA(rt); 863 return security_xfrm_policy_alloc(pol, uctx); 864 } 865 866 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, 867 int nr) 868 { 869 int i; 870 871 xp->xfrm_nr = nr; 872 for (i = 0; i < nr; i++, ut++) { 873 struct xfrm_tmpl *t = &xp->xfrm_vec[i]; 874 875 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id)); 876 memcpy(&t->saddr, &ut->saddr, 877 sizeof(xfrm_address_t)); 878 t->reqid = ut->reqid; 879 t->mode = ut->mode; 880 t->share = ut->share; 881 t->optional = ut->optional; 882 t->aalgos = ut->aalgos; 883 t->ealgos = ut->ealgos; 884 t->calgos = ut->calgos; 885 t->encap_family = ut->family; 886 } 887 } 888 889 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) 890 { 891 int i; 892 893 if (nr > XFRM_MAX_DEPTH) 894 return -EINVAL; 895 896 for (i = 0; i < nr; i++) { 897 /* We never validated the ut->family value, so many 898 * applications simply leave it at zero. The check was 899 * never made and ut->family was ignored because all 900 * templates could be assumed to have the same family as 901 * the policy itself. Now that we will have ipv4-in-ipv6 902 * and ipv6-in-ipv4 tunnels, this is no longer true. 903 */ 904 if (!ut[i].family) 905 ut[i].family = family; 906 907 switch (ut[i].family) { 908 case AF_INET: 909 break; 910 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 911 case AF_INET6: 912 break; 913 #endif 914 default: 915 return -EINVAL; 916 }; 917 } 918 919 return 0; 920 } 921 922 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct rtattr **xfrma) 923 { 924 struct rtattr *rt = xfrma[XFRMA_TMPL-1]; 925 926 if (!rt) { 927 pol->xfrm_nr = 0; 928 } else { 929 struct xfrm_user_tmpl *utmpl = RTA_DATA(rt); 930 int nr = (rt->rta_len - sizeof(*rt)) / sizeof(*utmpl); 931 int err; 932 933 err = validate_tmpl(nr, utmpl, pol->family); 934 if (err) 935 return err; 936 937 copy_templates(pol, RTA_DATA(rt), nr); 938 } 939 return 0; 940 } 941 942 static int copy_from_user_policy_type(u8 *tp, struct rtattr **xfrma) 943 { 944 struct rtattr *rt = xfrma[XFRMA_POLICY_TYPE-1]; 945 struct xfrm_userpolicy_type *upt; 946 u8 type = XFRM_POLICY_TYPE_MAIN; 947 int err; 948 949 if (rt) { 950 if (rt->rta_len < sizeof(*upt)) 951 return -EINVAL; 952 953 upt = RTA_DATA(rt); 954 type = upt->type; 955 } 956 957 err = verify_policy_type(type); 958 if (err) 959 return err; 960 961 *tp = type; 962 return 0; 963 } 964 965 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p) 966 { 967 xp->priority = p->priority; 968 xp->index = p->index; 969 memcpy(&xp->selector, &p->sel, sizeof(xp->selector)); 970 memcpy(&xp->lft, &p->lft, sizeof(xp->lft)); 971 xp->action = p->action; 972 xp->flags = p->flags; 973 xp->family = p->sel.family; 974 /* XXX xp->share = p->share; */ 975 } 976 977 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) 978 { 979 memcpy(&p->sel, &xp->selector, sizeof(p->sel)); 980 memcpy(&p->lft, &xp->lft, sizeof(p->lft)); 981 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); 982 p->priority = xp->priority; 983 p->index = xp->index; 984 p->sel.family = xp->family; 985 p->dir = dir; 986 p->action = xp->action; 987 p->flags = xp->flags; 988 p->share = XFRM_SHARE_ANY; /* XXX xp->share */ 989 } 990 991 static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p, struct rtattr **xfrma, int *errp) 992 { 993 struct xfrm_policy *xp = xfrm_policy_alloc(GFP_KERNEL); 994 int err; 995 996 if (!xp) { 997 *errp = -ENOMEM; 998 return NULL; 999 } 1000 1001 copy_from_user_policy(xp, p); 1002 1003 err = copy_from_user_policy_type(&xp->type, xfrma); 1004 if (err) 1005 goto error; 1006 1007 if (!(err = copy_from_user_tmpl(xp, xfrma))) 1008 err = copy_from_user_sec_ctx(xp, xfrma); 1009 if (err) 1010 goto error; 1011 1012 return xp; 1013 error: 1014 *errp = err; 1015 kfree(xp); 1016 return NULL; 1017 } 1018 1019 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1020 struct rtattr **xfrma) 1021 { 1022 struct xfrm_userpolicy_info *p = NLMSG_DATA(nlh); 1023 struct xfrm_policy *xp; 1024 struct km_event c; 1025 int err; 1026 int excl; 1027 1028 err = verify_newpolicy_info(p); 1029 if (err) 1030 return err; 1031 err = verify_sec_ctx_len(xfrma); 1032 if (err) 1033 return err; 1034 1035 xp = xfrm_policy_construct(p, xfrma, &err); 1036 if (!xp) 1037 return err; 1038 1039 /* shouldnt excl be based on nlh flags?? 1040 * Aha! this is anti-netlink really i.e more pfkey derived 1041 * in netlink excl is a flag and you wouldnt need 1042 * a type XFRM_MSG_UPDPOLICY - JHS */ 1043 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; 1044 err = xfrm_policy_insert(p->dir, xp, excl); 1045 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, 1046 AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL); 1047 1048 if (err) { 1049 security_xfrm_policy_free(xp); 1050 kfree(xp); 1051 return err; 1052 } 1053 1054 c.event = nlh->nlmsg_type; 1055 c.seq = nlh->nlmsg_seq; 1056 c.pid = nlh->nlmsg_pid; 1057 km_policy_notify(xp, p->dir, &c); 1058 1059 xfrm_pol_put(xp); 1060 1061 return 0; 1062 } 1063 1064 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) 1065 { 1066 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH]; 1067 int i; 1068 1069 if (xp->xfrm_nr == 0) 1070 return 0; 1071 1072 for (i = 0; i < xp->xfrm_nr; i++) { 1073 struct xfrm_user_tmpl *up = &vec[i]; 1074 struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; 1075 1076 memcpy(&up->id, &kp->id, sizeof(up->id)); 1077 up->family = kp->encap_family; 1078 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); 1079 up->reqid = kp->reqid; 1080 up->mode = kp->mode; 1081 up->share = kp->share; 1082 up->optional = kp->optional; 1083 up->aalgos = kp->aalgos; 1084 up->ealgos = kp->ealgos; 1085 up->calgos = kp->calgos; 1086 } 1087 RTA_PUT(skb, XFRMA_TMPL, 1088 (sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr), 1089 vec); 1090 1091 return 0; 1092 1093 rtattr_failure: 1094 return -1; 1095 } 1096 1097 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) 1098 { 1099 int ctx_size = sizeof(struct xfrm_sec_ctx) + s->ctx_len; 1100 struct rtattr *rt = __RTA_PUT(skb, XFRMA_SEC_CTX, ctx_size); 1101 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt); 1102 1103 uctx->exttype = XFRMA_SEC_CTX; 1104 uctx->len = ctx_size; 1105 uctx->ctx_doi = s->ctx_doi; 1106 uctx->ctx_alg = s->ctx_alg; 1107 uctx->ctx_len = s->ctx_len; 1108 memcpy(uctx + 1, s->ctx_str, s->ctx_len); 1109 return 0; 1110 1111 rtattr_failure: 1112 return -1; 1113 } 1114 1115 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb) 1116 { 1117 if (x->security) { 1118 return copy_sec_ctx(x->security, skb); 1119 } 1120 return 0; 1121 } 1122 1123 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb) 1124 { 1125 if (xp->security) { 1126 return copy_sec_ctx(xp->security, skb); 1127 } 1128 return 0; 1129 } 1130 1131 #ifdef CONFIG_XFRM_SUB_POLICY 1132 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 1133 { 1134 struct xfrm_userpolicy_type upt; 1135 1136 memset(&upt, 0, sizeof(upt)); 1137 upt.type = type; 1138 1139 RTA_PUT(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); 1140 1141 return 0; 1142 1143 rtattr_failure: 1144 return -1; 1145 } 1146 1147 #else 1148 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 1149 { 1150 return 0; 1151 } 1152 #endif 1153 1154 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr) 1155 { 1156 struct xfrm_dump_info *sp = ptr; 1157 struct xfrm_userpolicy_info *p; 1158 struct sk_buff *in_skb = sp->in_skb; 1159 struct sk_buff *skb = sp->out_skb; 1160 struct nlmsghdr *nlh; 1161 unsigned char *b = skb->tail; 1162 1163 if (sp->this_idx < sp->start_idx) 1164 goto out; 1165 1166 nlh = NLMSG_PUT(skb, NETLINK_CB(in_skb).pid, 1167 sp->nlmsg_seq, 1168 XFRM_MSG_NEWPOLICY, sizeof(*p)); 1169 p = NLMSG_DATA(nlh); 1170 nlh->nlmsg_flags = sp->nlmsg_flags; 1171 1172 copy_to_user_policy(xp, p, dir); 1173 if (copy_to_user_tmpl(xp, skb) < 0) 1174 goto nlmsg_failure; 1175 if (copy_to_user_sec_ctx(xp, skb)) 1176 goto nlmsg_failure; 1177 if (copy_to_user_policy_type(xp->type, skb) < 0) 1178 goto nlmsg_failure; 1179 1180 nlh->nlmsg_len = skb->tail - b; 1181 out: 1182 sp->this_idx++; 1183 return 0; 1184 1185 nlmsg_failure: 1186 skb_trim(skb, b - skb->data); 1187 return -1; 1188 } 1189 1190 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) 1191 { 1192 struct xfrm_dump_info info; 1193 1194 info.in_skb = cb->skb; 1195 info.out_skb = skb; 1196 info.nlmsg_seq = cb->nlh->nlmsg_seq; 1197 info.nlmsg_flags = NLM_F_MULTI; 1198 info.this_idx = 0; 1199 info.start_idx = cb->args[0]; 1200 (void) xfrm_policy_walk(XFRM_POLICY_TYPE_MAIN, dump_one_policy, &info); 1201 #ifdef CONFIG_XFRM_SUB_POLICY 1202 (void) xfrm_policy_walk(XFRM_POLICY_TYPE_SUB, dump_one_policy, &info); 1203 #endif 1204 cb->args[0] = info.this_idx; 1205 1206 return skb->len; 1207 } 1208 1209 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, 1210 struct xfrm_policy *xp, 1211 int dir, u32 seq) 1212 { 1213 struct xfrm_dump_info info; 1214 struct sk_buff *skb; 1215 1216 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1217 if (!skb) 1218 return ERR_PTR(-ENOMEM); 1219 1220 info.in_skb = in_skb; 1221 info.out_skb = skb; 1222 info.nlmsg_seq = seq; 1223 info.nlmsg_flags = 0; 1224 info.this_idx = info.start_idx = 0; 1225 1226 if (dump_one_policy(xp, dir, 0, &info) < 0) { 1227 kfree_skb(skb); 1228 return NULL; 1229 } 1230 1231 return skb; 1232 } 1233 1234 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1235 struct rtattr **xfrma) 1236 { 1237 struct xfrm_policy *xp; 1238 struct xfrm_userpolicy_id *p; 1239 u8 type = XFRM_POLICY_TYPE_MAIN; 1240 int err; 1241 struct km_event c; 1242 int delete; 1243 1244 p = NLMSG_DATA(nlh); 1245 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY; 1246 1247 err = copy_from_user_policy_type(&type, xfrma); 1248 if (err) 1249 return err; 1250 1251 err = verify_policy_dir(p->dir); 1252 if (err) 1253 return err; 1254 1255 if (p->index) 1256 xp = xfrm_policy_byid(type, p->dir, p->index, delete, &err); 1257 else { 1258 struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1]; 1259 struct xfrm_policy tmp; 1260 1261 err = verify_sec_ctx_len(xfrma); 1262 if (err) 1263 return err; 1264 1265 memset(&tmp, 0, sizeof(struct xfrm_policy)); 1266 if (rt) { 1267 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt); 1268 1269 if ((err = security_xfrm_policy_alloc(&tmp, uctx))) 1270 return err; 1271 } 1272 xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security, 1273 delete, &err); 1274 security_xfrm_policy_free(&tmp); 1275 } 1276 if (xp == NULL) 1277 return -ENOENT; 1278 1279 if (!delete) { 1280 struct sk_buff *resp_skb; 1281 1282 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq); 1283 if (IS_ERR(resp_skb)) { 1284 err = PTR_ERR(resp_skb); 1285 } else { 1286 err = netlink_unicast(xfrm_nl, resp_skb, 1287 NETLINK_CB(skb).pid, 1288 MSG_DONTWAIT); 1289 } 1290 } else { 1291 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, 1292 AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL); 1293 1294 if (err != 0) 1295 goto out; 1296 1297 c.data.byid = p->index; 1298 c.event = nlh->nlmsg_type; 1299 c.seq = nlh->nlmsg_seq; 1300 c.pid = nlh->nlmsg_pid; 1301 km_policy_notify(xp, p->dir, &c); 1302 } 1303 1304 out: 1305 xfrm_pol_put(xp); 1306 return err; 1307 } 1308 1309 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 1310 struct rtattr **xfrma) 1311 { 1312 struct km_event c; 1313 struct xfrm_usersa_flush *p = NLMSG_DATA(nlh); 1314 struct xfrm_audit audit_info; 1315 1316 audit_info.loginuid = NETLINK_CB(skb).loginuid; 1317 audit_info.secid = NETLINK_CB(skb).sid; 1318 xfrm_state_flush(p->proto, &audit_info); 1319 c.data.proto = p->proto; 1320 c.event = nlh->nlmsg_type; 1321 c.seq = nlh->nlmsg_seq; 1322 c.pid = nlh->nlmsg_pid; 1323 km_state_notify(NULL, &c); 1324 1325 return 0; 1326 } 1327 1328 1329 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c) 1330 { 1331 struct xfrm_aevent_id *id; 1332 struct nlmsghdr *nlh; 1333 struct xfrm_lifetime_cur ltime; 1334 unsigned char *b = skb->tail; 1335 1336 nlh = NLMSG_PUT(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id)); 1337 id = NLMSG_DATA(nlh); 1338 nlh->nlmsg_flags = 0; 1339 1340 memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr)); 1341 id->sa_id.spi = x->id.spi; 1342 id->sa_id.family = x->props.family; 1343 id->sa_id.proto = x->id.proto; 1344 memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr)); 1345 id->reqid = x->props.reqid; 1346 id->flags = c->data.aevent; 1347 1348 RTA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay); 1349 1350 ltime.bytes = x->curlft.bytes; 1351 ltime.packets = x->curlft.packets; 1352 ltime.add_time = x->curlft.add_time; 1353 ltime.use_time = x->curlft.use_time; 1354 1355 RTA_PUT(skb, XFRMA_LTIME_VAL, sizeof(struct xfrm_lifetime_cur), <ime); 1356 1357 if (id->flags&XFRM_AE_RTHR) { 1358 RTA_PUT(skb,XFRMA_REPLAY_THRESH,sizeof(u32),&x->replay_maxdiff); 1359 } 1360 1361 if (id->flags&XFRM_AE_ETHR) { 1362 u32 etimer = x->replay_maxage*10/HZ; 1363 RTA_PUT(skb,XFRMA_ETIMER_THRESH,sizeof(u32),&etimer); 1364 } 1365 1366 nlh->nlmsg_len = skb->tail - b; 1367 return skb->len; 1368 1369 rtattr_failure: 1370 nlmsg_failure: 1371 skb_trim(skb, b - skb->data); 1372 return -1; 1373 } 1374 1375 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 1376 struct rtattr **xfrma) 1377 { 1378 struct xfrm_state *x; 1379 struct sk_buff *r_skb; 1380 int err; 1381 struct km_event c; 1382 struct xfrm_aevent_id *p = NLMSG_DATA(nlh); 1383 int len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id)); 1384 struct xfrm_usersa_id *id = &p->sa_id; 1385 1386 len += RTA_SPACE(sizeof(struct xfrm_replay_state)); 1387 len += RTA_SPACE(sizeof(struct xfrm_lifetime_cur)); 1388 1389 if (p->flags&XFRM_AE_RTHR) 1390 len+=RTA_SPACE(sizeof(u32)); 1391 1392 if (p->flags&XFRM_AE_ETHR) 1393 len+=RTA_SPACE(sizeof(u32)); 1394 1395 r_skb = alloc_skb(len, GFP_ATOMIC); 1396 if (r_skb == NULL) 1397 return -ENOMEM; 1398 1399 x = xfrm_state_lookup(&id->daddr, id->spi, id->proto, id->family); 1400 if (x == NULL) { 1401 kfree_skb(r_skb); 1402 return -ESRCH; 1403 } 1404 1405 /* 1406 * XXX: is this lock really needed - none of the other 1407 * gets lock (the concern is things getting updated 1408 * while we are still reading) - jhs 1409 */ 1410 spin_lock_bh(&x->lock); 1411 c.data.aevent = p->flags; 1412 c.seq = nlh->nlmsg_seq; 1413 c.pid = nlh->nlmsg_pid; 1414 1415 if (build_aevent(r_skb, x, &c) < 0) 1416 BUG(); 1417 err = netlink_unicast(xfrm_nl, r_skb, 1418 NETLINK_CB(skb).pid, MSG_DONTWAIT); 1419 spin_unlock_bh(&x->lock); 1420 xfrm_state_put(x); 1421 return err; 1422 } 1423 1424 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 1425 struct rtattr **xfrma) 1426 { 1427 struct xfrm_state *x; 1428 struct km_event c; 1429 int err = - EINVAL; 1430 struct xfrm_aevent_id *p = NLMSG_DATA(nlh); 1431 struct rtattr *rp = xfrma[XFRMA_REPLAY_VAL-1]; 1432 struct rtattr *lt = xfrma[XFRMA_LTIME_VAL-1]; 1433 1434 if (!lt && !rp) 1435 return err; 1436 1437 /* pedantic mode - thou shalt sayeth replaceth */ 1438 if (!(nlh->nlmsg_flags&NLM_F_REPLACE)) 1439 return err; 1440 1441 x = xfrm_state_lookup(&p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family); 1442 if (x == NULL) 1443 return -ESRCH; 1444 1445 if (x->km.state != XFRM_STATE_VALID) 1446 goto out; 1447 1448 spin_lock_bh(&x->lock); 1449 err = xfrm_update_ae_params(x, xfrma); 1450 spin_unlock_bh(&x->lock); 1451 if (err < 0) 1452 goto out; 1453 1454 c.event = nlh->nlmsg_type; 1455 c.seq = nlh->nlmsg_seq; 1456 c.pid = nlh->nlmsg_pid; 1457 c.data.aevent = XFRM_AE_CU; 1458 km_state_notify(x, &c); 1459 err = 0; 1460 out: 1461 xfrm_state_put(x); 1462 return err; 1463 } 1464 1465 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1466 struct rtattr **xfrma) 1467 { 1468 struct km_event c; 1469 u8 type = XFRM_POLICY_TYPE_MAIN; 1470 int err; 1471 struct xfrm_audit audit_info; 1472 1473 err = copy_from_user_policy_type(&type, xfrma); 1474 if (err) 1475 return err; 1476 1477 audit_info.loginuid = NETLINK_CB(skb).loginuid; 1478 audit_info.secid = NETLINK_CB(skb).sid; 1479 xfrm_policy_flush(type, &audit_info); 1480 c.data.type = type; 1481 c.event = nlh->nlmsg_type; 1482 c.seq = nlh->nlmsg_seq; 1483 c.pid = nlh->nlmsg_pid; 1484 km_policy_notify(NULL, 0, &c); 1485 return 0; 1486 } 1487 1488 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, 1489 struct rtattr **xfrma) 1490 { 1491 struct xfrm_policy *xp; 1492 struct xfrm_user_polexpire *up = NLMSG_DATA(nlh); 1493 struct xfrm_userpolicy_info *p = &up->pol; 1494 u8 type = XFRM_POLICY_TYPE_MAIN; 1495 int err = -ENOENT; 1496 1497 err = copy_from_user_policy_type(&type, xfrma); 1498 if (err) 1499 return err; 1500 1501 if (p->index) 1502 xp = xfrm_policy_byid(type, p->dir, p->index, 0, &err); 1503 else { 1504 struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1]; 1505 struct xfrm_policy tmp; 1506 1507 err = verify_sec_ctx_len(xfrma); 1508 if (err) 1509 return err; 1510 1511 memset(&tmp, 0, sizeof(struct xfrm_policy)); 1512 if (rt) { 1513 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt); 1514 1515 if ((err = security_xfrm_policy_alloc(&tmp, uctx))) 1516 return err; 1517 } 1518 xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security, 1519 0, &err); 1520 security_xfrm_policy_free(&tmp); 1521 } 1522 1523 if (xp == NULL) 1524 return -ENOENT; 1525 read_lock(&xp->lock); 1526 if (xp->dead) { 1527 read_unlock(&xp->lock); 1528 goto out; 1529 } 1530 1531 read_unlock(&xp->lock); 1532 err = 0; 1533 if (up->hard) { 1534 xfrm_policy_delete(xp, p->dir); 1535 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, 1536 AUDIT_MAC_IPSEC_DELSPD, 1, xp, NULL); 1537 1538 } else { 1539 // reset the timers here? 1540 printk("Dont know what to do with soft policy expire\n"); 1541 } 1542 km_policy_expired(xp, p->dir, up->hard, current->pid); 1543 1544 out: 1545 xfrm_pol_put(xp); 1546 return err; 1547 } 1548 1549 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, 1550 struct rtattr **xfrma) 1551 { 1552 struct xfrm_state *x; 1553 int err; 1554 struct xfrm_user_expire *ue = NLMSG_DATA(nlh); 1555 struct xfrm_usersa_info *p = &ue->state; 1556 1557 x = xfrm_state_lookup(&p->id.daddr, p->id.spi, p->id.proto, p->family); 1558 1559 err = -ENOENT; 1560 if (x == NULL) 1561 return err; 1562 1563 spin_lock_bh(&x->lock); 1564 err = -EINVAL; 1565 if (x->km.state != XFRM_STATE_VALID) 1566 goto out; 1567 km_state_expired(x, ue->hard, current->pid); 1568 1569 if (ue->hard) { 1570 __xfrm_state_delete(x); 1571 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, 1572 AUDIT_MAC_IPSEC_DELSA, 1, NULL, x); 1573 } 1574 err = 0; 1575 out: 1576 spin_unlock_bh(&x->lock); 1577 xfrm_state_put(x); 1578 return err; 1579 } 1580 1581 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, 1582 struct rtattr **xfrma) 1583 { 1584 struct xfrm_policy *xp; 1585 struct xfrm_user_tmpl *ut; 1586 int i; 1587 struct rtattr *rt = xfrma[XFRMA_TMPL-1]; 1588 1589 struct xfrm_user_acquire *ua = NLMSG_DATA(nlh); 1590 struct xfrm_state *x = xfrm_state_alloc(); 1591 int err = -ENOMEM; 1592 1593 if (!x) 1594 return err; 1595 1596 err = verify_newpolicy_info(&ua->policy); 1597 if (err) { 1598 printk("BAD policy passed\n"); 1599 kfree(x); 1600 return err; 1601 } 1602 1603 /* build an XP */ 1604 xp = xfrm_policy_construct(&ua->policy, (struct rtattr **) xfrma, &err); 1605 if (!xp) { 1606 kfree(x); 1607 return err; 1608 } 1609 1610 memcpy(&x->id, &ua->id, sizeof(ua->id)); 1611 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr)); 1612 memcpy(&x->sel, &ua->sel, sizeof(ua->sel)); 1613 1614 ut = RTA_DATA(rt); 1615 /* extract the templates and for each call km_key */ 1616 for (i = 0; i < xp->xfrm_nr; i++, ut++) { 1617 struct xfrm_tmpl *t = &xp->xfrm_vec[i]; 1618 memcpy(&x->id, &t->id, sizeof(x->id)); 1619 x->props.mode = t->mode; 1620 x->props.reqid = t->reqid; 1621 x->props.family = ut->family; 1622 t->aalgos = ua->aalgos; 1623 t->ealgos = ua->ealgos; 1624 t->calgos = ua->calgos; 1625 err = km_query(x, t, xp); 1626 1627 } 1628 1629 kfree(x); 1630 kfree(xp); 1631 1632 return 0; 1633 } 1634 1635 #ifdef CONFIG_XFRM_MIGRATE 1636 static int verify_user_migrate(struct rtattr **xfrma) 1637 { 1638 struct rtattr *rt = xfrma[XFRMA_MIGRATE-1]; 1639 struct xfrm_user_migrate *um; 1640 1641 if (!rt) 1642 return -EINVAL; 1643 1644 if ((rt->rta_len - sizeof(*rt)) < sizeof(*um)) 1645 return -EINVAL; 1646 1647 return 0; 1648 } 1649 1650 static int copy_from_user_migrate(struct xfrm_migrate *ma, 1651 struct rtattr **xfrma, int *num) 1652 { 1653 struct rtattr *rt = xfrma[XFRMA_MIGRATE-1]; 1654 struct xfrm_user_migrate *um; 1655 int i, num_migrate; 1656 1657 um = RTA_DATA(rt); 1658 num_migrate = (rt->rta_len - sizeof(*rt)) / sizeof(*um); 1659 1660 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH) 1661 return -EINVAL; 1662 1663 for (i = 0; i < num_migrate; i++, um++, ma++) { 1664 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr)); 1665 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr)); 1666 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr)); 1667 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr)); 1668 1669 ma->proto = um->proto; 1670 ma->mode = um->mode; 1671 ma->reqid = um->reqid; 1672 1673 ma->old_family = um->old_family; 1674 ma->new_family = um->new_family; 1675 } 1676 1677 *num = i; 1678 return 0; 1679 } 1680 1681 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, 1682 struct rtattr **xfrma) 1683 { 1684 struct xfrm_userpolicy_id *pi = NLMSG_DATA(nlh); 1685 struct xfrm_migrate m[XFRM_MAX_DEPTH]; 1686 u8 type; 1687 int err; 1688 int n = 0; 1689 1690 err = verify_user_migrate((struct rtattr **)xfrma); 1691 if (err) 1692 return err; 1693 1694 err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma); 1695 if (err) 1696 return err; 1697 1698 err = copy_from_user_migrate((struct xfrm_migrate *)m, 1699 (struct rtattr **)xfrma, &n); 1700 if (err) 1701 return err; 1702 1703 if (!n) 1704 return 0; 1705 1706 xfrm_migrate(&pi->sel, pi->dir, type, m, n); 1707 1708 return 0; 1709 } 1710 #else 1711 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, 1712 struct rtattr **xfrma) 1713 { 1714 return -ENOPROTOOPT; 1715 } 1716 #endif 1717 1718 #ifdef CONFIG_XFRM_MIGRATE 1719 static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb) 1720 { 1721 struct xfrm_user_migrate um; 1722 1723 memset(&um, 0, sizeof(um)); 1724 um.proto = m->proto; 1725 um.mode = m->mode; 1726 um.reqid = m->reqid; 1727 um.old_family = m->old_family; 1728 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr)); 1729 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr)); 1730 um.new_family = m->new_family; 1731 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr)); 1732 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr)); 1733 1734 RTA_PUT(skb, XFRMA_MIGRATE, sizeof(um), &um); 1735 return 0; 1736 1737 rtattr_failure: 1738 return -1; 1739 } 1740 1741 static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m, 1742 int num_migrate, struct xfrm_selector *sel, 1743 u8 dir, u8 type) 1744 { 1745 struct xfrm_migrate *mp; 1746 struct xfrm_userpolicy_id *pol_id; 1747 struct nlmsghdr *nlh; 1748 unsigned char *b = skb->tail; 1749 int i; 1750 1751 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id)); 1752 pol_id = NLMSG_DATA(nlh); 1753 nlh->nlmsg_flags = 0; 1754 1755 /* copy data from selector, dir, and type to the pol_id */ 1756 memset(pol_id, 0, sizeof(*pol_id)); 1757 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel)); 1758 pol_id->dir = dir; 1759 1760 if (copy_to_user_policy_type(type, skb) < 0) 1761 goto nlmsg_failure; 1762 1763 for (i = 0, mp = m ; i < num_migrate; i++, mp++) { 1764 if (copy_to_user_migrate(mp, skb) < 0) 1765 goto nlmsg_failure; 1766 } 1767 1768 nlh->nlmsg_len = skb->tail - b; 1769 return skb->len; 1770 nlmsg_failure: 1771 skb_trim(skb, b - skb->data); 1772 return -1; 1773 } 1774 1775 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type, 1776 struct xfrm_migrate *m, int num_migrate) 1777 { 1778 struct sk_buff *skb; 1779 size_t len; 1780 1781 len = RTA_SPACE(sizeof(struct xfrm_user_migrate) * num_migrate); 1782 len += NLMSG_SPACE(sizeof(struct xfrm_userpolicy_id)); 1783 #ifdef CONFIG_XFRM_SUB_POLICY 1784 len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type)); 1785 #endif 1786 skb = alloc_skb(len, GFP_ATOMIC); 1787 if (skb == NULL) 1788 return -ENOMEM; 1789 1790 /* build migrate */ 1791 if (build_migrate(skb, m, num_migrate, sel, dir, type) < 0) 1792 BUG(); 1793 1794 NETLINK_CB(skb).dst_group = XFRMNLGRP_MIGRATE; 1795 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_MIGRATE, 1796 GFP_ATOMIC); 1797 } 1798 #else 1799 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type, 1800 struct xfrm_migrate *m, int num_migrate) 1801 { 1802 return -ENOPROTOOPT; 1803 } 1804 #endif 1805 1806 #define XMSGSIZE(type) NLMSG_LENGTH(sizeof(struct type)) 1807 1808 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { 1809 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 1810 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 1811 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 1812 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 1813 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 1814 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 1815 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info), 1816 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire), 1817 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire), 1818 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 1819 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 1820 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire), 1821 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), 1822 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = NLMSG_LENGTH(0), 1823 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 1824 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 1825 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report), 1826 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 1827 }; 1828 1829 #undef XMSGSIZE 1830 1831 static struct xfrm_link { 1832 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct rtattr **); 1833 int (*dump)(struct sk_buff *, struct netlink_callback *); 1834 } xfrm_dispatch[XFRM_NR_MSGTYPES] = { 1835 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 1836 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, 1837 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa, 1838 .dump = xfrm_dump_sa }, 1839 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 1840 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, 1841 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, 1842 .dump = xfrm_dump_policy }, 1843 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, 1844 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire }, 1845 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire }, 1846 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 1847 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 1848 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire}, 1849 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa }, 1850 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy }, 1851 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae }, 1852 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae }, 1853 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate }, 1854 }; 1855 1856 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) 1857 { 1858 struct rtattr *xfrma[XFRMA_MAX]; 1859 struct xfrm_link *link; 1860 int type, min_len; 1861 1862 if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) 1863 return 0; 1864 1865 type = nlh->nlmsg_type; 1866 1867 /* A control message: ignore them */ 1868 if (type < XFRM_MSG_BASE) 1869 return 0; 1870 1871 /* Unknown message: reply with EINVAL */ 1872 if (type > XFRM_MSG_MAX) 1873 goto err_einval; 1874 1875 type -= XFRM_MSG_BASE; 1876 link = &xfrm_dispatch[type]; 1877 1878 /* All operations require privileges, even GET */ 1879 if (security_netlink_recv(skb, CAP_NET_ADMIN)) { 1880 *errp = -EPERM; 1881 return -1; 1882 } 1883 1884 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || 1885 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && 1886 (nlh->nlmsg_flags & NLM_F_DUMP)) { 1887 if (link->dump == NULL) 1888 goto err_einval; 1889 1890 if ((*errp = netlink_dump_start(xfrm_nl, skb, nlh, 1891 link->dump, NULL)) != 0) { 1892 return -1; 1893 } 1894 1895 netlink_queue_skip(nlh, skb); 1896 return -1; 1897 } 1898 1899 memset(xfrma, 0, sizeof(xfrma)); 1900 1901 if (nlh->nlmsg_len < (min_len = xfrm_msg_min[type])) 1902 goto err_einval; 1903 1904 if (nlh->nlmsg_len > min_len) { 1905 int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); 1906 struct rtattr *attr = (void *) nlh + NLMSG_ALIGN(min_len); 1907 1908 while (RTA_OK(attr, attrlen)) { 1909 unsigned short flavor = attr->rta_type; 1910 if (flavor) { 1911 if (flavor > XFRMA_MAX) 1912 goto err_einval; 1913 xfrma[flavor - 1] = attr; 1914 } 1915 attr = RTA_NEXT(attr, attrlen); 1916 } 1917 } 1918 1919 if (link->doit == NULL) 1920 goto err_einval; 1921 *errp = link->doit(skb, nlh, xfrma); 1922 1923 return *errp; 1924 1925 err_einval: 1926 *errp = -EINVAL; 1927 return -1; 1928 } 1929 1930 static void xfrm_netlink_rcv(struct sock *sk, int len) 1931 { 1932 unsigned int qlen = 0; 1933 1934 do { 1935 mutex_lock(&xfrm_cfg_mutex); 1936 netlink_run_queue(sk, &qlen, &xfrm_user_rcv_msg); 1937 mutex_unlock(&xfrm_cfg_mutex); 1938 1939 } while (qlen); 1940 } 1941 1942 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c) 1943 { 1944 struct xfrm_user_expire *ue; 1945 struct nlmsghdr *nlh; 1946 unsigned char *b = skb->tail; 1947 1948 nlh = NLMSG_PUT(skb, c->pid, 0, XFRM_MSG_EXPIRE, 1949 sizeof(*ue)); 1950 ue = NLMSG_DATA(nlh); 1951 nlh->nlmsg_flags = 0; 1952 1953 copy_to_user_state(x, &ue->state); 1954 ue->hard = (c->data.hard != 0) ? 1 : 0; 1955 1956 nlh->nlmsg_len = skb->tail - b; 1957 return skb->len; 1958 1959 nlmsg_failure: 1960 skb_trim(skb, b - skb->data); 1961 return -1; 1962 } 1963 1964 static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c) 1965 { 1966 struct sk_buff *skb; 1967 int len = NLMSG_LENGTH(sizeof(struct xfrm_user_expire)); 1968 1969 skb = alloc_skb(len, GFP_ATOMIC); 1970 if (skb == NULL) 1971 return -ENOMEM; 1972 1973 if (build_expire(skb, x, c) < 0) 1974 BUG(); 1975 1976 NETLINK_CB(skb).dst_group = XFRMNLGRP_EXPIRE; 1977 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 1978 } 1979 1980 static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c) 1981 { 1982 struct sk_buff *skb; 1983 int len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id)); 1984 1985 len += RTA_SPACE(sizeof(struct xfrm_replay_state)); 1986 len += RTA_SPACE(sizeof(struct xfrm_lifetime_cur)); 1987 skb = alloc_skb(len, GFP_ATOMIC); 1988 if (skb == NULL) 1989 return -ENOMEM; 1990 1991 if (build_aevent(skb, x, c) < 0) 1992 BUG(); 1993 1994 NETLINK_CB(skb).dst_group = XFRMNLGRP_AEVENTS; 1995 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC); 1996 } 1997 1998 static int xfrm_notify_sa_flush(struct km_event *c) 1999 { 2000 struct xfrm_usersa_flush *p; 2001 struct nlmsghdr *nlh; 2002 struct sk_buff *skb; 2003 unsigned char *b; 2004 int len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush)); 2005 2006 skb = alloc_skb(len, GFP_ATOMIC); 2007 if (skb == NULL) 2008 return -ENOMEM; 2009 b = skb->tail; 2010 2011 nlh = NLMSG_PUT(skb, c->pid, c->seq, 2012 XFRM_MSG_FLUSHSA, sizeof(*p)); 2013 nlh->nlmsg_flags = 0; 2014 2015 p = NLMSG_DATA(nlh); 2016 p->proto = c->data.proto; 2017 2018 nlh->nlmsg_len = skb->tail - b; 2019 2020 NETLINK_CB(skb).dst_group = XFRMNLGRP_SA; 2021 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); 2022 2023 nlmsg_failure: 2024 kfree_skb(skb); 2025 return -1; 2026 } 2027 2028 static int inline xfrm_sa_len(struct xfrm_state *x) 2029 { 2030 int l = 0; 2031 if (x->aalg) 2032 l += RTA_SPACE(sizeof(*x->aalg) + (x->aalg->alg_key_len+7)/8); 2033 if (x->ealg) 2034 l += RTA_SPACE(sizeof(*x->ealg) + (x->ealg->alg_key_len+7)/8); 2035 if (x->calg) 2036 l += RTA_SPACE(sizeof(*x->calg)); 2037 if (x->encap) 2038 l += RTA_SPACE(sizeof(*x->encap)); 2039 2040 return l; 2041 } 2042 2043 static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c) 2044 { 2045 struct xfrm_usersa_info *p; 2046 struct xfrm_usersa_id *id; 2047 struct nlmsghdr *nlh; 2048 struct sk_buff *skb; 2049 unsigned char *b; 2050 int len = xfrm_sa_len(x); 2051 int headlen; 2052 2053 headlen = sizeof(*p); 2054 if (c->event == XFRM_MSG_DELSA) { 2055 len += RTA_SPACE(headlen); 2056 headlen = sizeof(*id); 2057 } 2058 len += NLMSG_SPACE(headlen); 2059 2060 skb = alloc_skb(len, GFP_ATOMIC); 2061 if (skb == NULL) 2062 return -ENOMEM; 2063 b = skb->tail; 2064 2065 nlh = NLMSG_PUT(skb, c->pid, c->seq, c->event, headlen); 2066 nlh->nlmsg_flags = 0; 2067 2068 p = NLMSG_DATA(nlh); 2069 if (c->event == XFRM_MSG_DELSA) { 2070 id = NLMSG_DATA(nlh); 2071 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr)); 2072 id->spi = x->id.spi; 2073 id->family = x->props.family; 2074 id->proto = x->id.proto; 2075 2076 p = RTA_DATA(__RTA_PUT(skb, XFRMA_SA, sizeof(*p))); 2077 } 2078 2079 copy_to_user_state(x, p); 2080 2081 if (x->aalg) 2082 RTA_PUT(skb, XFRMA_ALG_AUTH, 2083 sizeof(*(x->aalg))+(x->aalg->alg_key_len+7)/8, x->aalg); 2084 if (x->ealg) 2085 RTA_PUT(skb, XFRMA_ALG_CRYPT, 2086 sizeof(*(x->ealg))+(x->ealg->alg_key_len+7)/8, x->ealg); 2087 if (x->calg) 2088 RTA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg); 2089 2090 if (x->encap) 2091 RTA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); 2092 2093 nlh->nlmsg_len = skb->tail - b; 2094 2095 NETLINK_CB(skb).dst_group = XFRMNLGRP_SA; 2096 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); 2097 2098 nlmsg_failure: 2099 rtattr_failure: 2100 kfree_skb(skb); 2101 return -1; 2102 } 2103 2104 static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c) 2105 { 2106 2107 switch (c->event) { 2108 case XFRM_MSG_EXPIRE: 2109 return xfrm_exp_state_notify(x, c); 2110 case XFRM_MSG_NEWAE: 2111 return xfrm_aevent_state_notify(x, c); 2112 case XFRM_MSG_DELSA: 2113 case XFRM_MSG_UPDSA: 2114 case XFRM_MSG_NEWSA: 2115 return xfrm_notify_sa(x, c); 2116 case XFRM_MSG_FLUSHSA: 2117 return xfrm_notify_sa_flush(c); 2118 default: 2119 printk("xfrm_user: Unknown SA event %d\n", c->event); 2120 break; 2121 } 2122 2123 return 0; 2124 2125 } 2126 2127 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, 2128 struct xfrm_tmpl *xt, struct xfrm_policy *xp, 2129 int dir) 2130 { 2131 struct xfrm_user_acquire *ua; 2132 struct nlmsghdr *nlh; 2133 unsigned char *b = skb->tail; 2134 __u32 seq = xfrm_get_acqseq(); 2135 2136 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_ACQUIRE, 2137 sizeof(*ua)); 2138 ua = NLMSG_DATA(nlh); 2139 nlh->nlmsg_flags = 0; 2140 2141 memcpy(&ua->id, &x->id, sizeof(ua->id)); 2142 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr)); 2143 memcpy(&ua->sel, &x->sel, sizeof(ua->sel)); 2144 copy_to_user_policy(xp, &ua->policy, dir); 2145 ua->aalgos = xt->aalgos; 2146 ua->ealgos = xt->ealgos; 2147 ua->calgos = xt->calgos; 2148 ua->seq = x->km.seq = seq; 2149 2150 if (copy_to_user_tmpl(xp, skb) < 0) 2151 goto nlmsg_failure; 2152 if (copy_to_user_state_sec_ctx(x, skb)) 2153 goto nlmsg_failure; 2154 if (copy_to_user_policy_type(xp->type, skb) < 0) 2155 goto nlmsg_failure; 2156 2157 nlh->nlmsg_len = skb->tail - b; 2158 return skb->len; 2159 2160 nlmsg_failure: 2161 skb_trim(skb, b - skb->data); 2162 return -1; 2163 } 2164 2165 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt, 2166 struct xfrm_policy *xp, int dir) 2167 { 2168 struct sk_buff *skb; 2169 size_t len; 2170 2171 len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); 2172 len += NLMSG_SPACE(sizeof(struct xfrm_user_acquire)); 2173 len += RTA_SPACE(xfrm_user_sec_ctx_size(xp)); 2174 #ifdef CONFIG_XFRM_SUB_POLICY 2175 len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type)); 2176 #endif 2177 skb = alloc_skb(len, GFP_ATOMIC); 2178 if (skb == NULL) 2179 return -ENOMEM; 2180 2181 if (build_acquire(skb, x, xt, xp, dir) < 0) 2182 BUG(); 2183 2184 NETLINK_CB(skb).dst_group = XFRMNLGRP_ACQUIRE; 2185 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC); 2186 } 2187 2188 /* User gives us xfrm_user_policy_info followed by an array of 0 2189 * or more templates. 2190 */ 2191 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt, 2192 u8 *data, int len, int *dir) 2193 { 2194 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data; 2195 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1); 2196 struct xfrm_policy *xp; 2197 int nr; 2198 2199 switch (sk->sk_family) { 2200 case AF_INET: 2201 if (opt != IP_XFRM_POLICY) { 2202 *dir = -EOPNOTSUPP; 2203 return NULL; 2204 } 2205 break; 2206 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 2207 case AF_INET6: 2208 if (opt != IPV6_XFRM_POLICY) { 2209 *dir = -EOPNOTSUPP; 2210 return NULL; 2211 } 2212 break; 2213 #endif 2214 default: 2215 *dir = -EINVAL; 2216 return NULL; 2217 } 2218 2219 *dir = -EINVAL; 2220 2221 if (len < sizeof(*p) || 2222 verify_newpolicy_info(p)) 2223 return NULL; 2224 2225 nr = ((len - sizeof(*p)) / sizeof(*ut)); 2226 if (validate_tmpl(nr, ut, p->sel.family)) 2227 return NULL; 2228 2229 if (p->dir > XFRM_POLICY_OUT) 2230 return NULL; 2231 2232 xp = xfrm_policy_alloc(GFP_KERNEL); 2233 if (xp == NULL) { 2234 *dir = -ENOBUFS; 2235 return NULL; 2236 } 2237 2238 copy_from_user_policy(xp, p); 2239 xp->type = XFRM_POLICY_TYPE_MAIN; 2240 copy_templates(xp, ut, nr); 2241 2242 *dir = p->dir; 2243 2244 return xp; 2245 } 2246 2247 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, 2248 int dir, struct km_event *c) 2249 { 2250 struct xfrm_user_polexpire *upe; 2251 struct nlmsghdr *nlh; 2252 int hard = c->data.hard; 2253 unsigned char *b = skb->tail; 2254 2255 nlh = NLMSG_PUT(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe)); 2256 upe = NLMSG_DATA(nlh); 2257 nlh->nlmsg_flags = 0; 2258 2259 copy_to_user_policy(xp, &upe->pol, dir); 2260 if (copy_to_user_tmpl(xp, skb) < 0) 2261 goto nlmsg_failure; 2262 if (copy_to_user_sec_ctx(xp, skb)) 2263 goto nlmsg_failure; 2264 if (copy_to_user_policy_type(xp->type, skb) < 0) 2265 goto nlmsg_failure; 2266 upe->hard = !!hard; 2267 2268 nlh->nlmsg_len = skb->tail - b; 2269 return skb->len; 2270 2271 nlmsg_failure: 2272 skb_trim(skb, b - skb->data); 2273 return -1; 2274 } 2275 2276 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c) 2277 { 2278 struct sk_buff *skb; 2279 size_t len; 2280 2281 len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); 2282 len += NLMSG_SPACE(sizeof(struct xfrm_user_polexpire)); 2283 len += RTA_SPACE(xfrm_user_sec_ctx_size(xp)); 2284 #ifdef CONFIG_XFRM_SUB_POLICY 2285 len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type)); 2286 #endif 2287 skb = alloc_skb(len, GFP_ATOMIC); 2288 if (skb == NULL) 2289 return -ENOMEM; 2290 2291 if (build_polexpire(skb, xp, dir, c) < 0) 2292 BUG(); 2293 2294 NETLINK_CB(skb).dst_group = XFRMNLGRP_EXPIRE; 2295 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 2296 } 2297 2298 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c) 2299 { 2300 struct xfrm_userpolicy_info *p; 2301 struct xfrm_userpolicy_id *id; 2302 struct nlmsghdr *nlh; 2303 struct sk_buff *skb; 2304 unsigned char *b; 2305 int len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); 2306 int headlen; 2307 2308 headlen = sizeof(*p); 2309 if (c->event == XFRM_MSG_DELPOLICY) { 2310 len += RTA_SPACE(headlen); 2311 headlen = sizeof(*id); 2312 } 2313 #ifdef CONFIG_XFRM_SUB_POLICY 2314 len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type)); 2315 #endif 2316 len += NLMSG_SPACE(headlen); 2317 2318 skb = alloc_skb(len, GFP_ATOMIC); 2319 if (skb == NULL) 2320 return -ENOMEM; 2321 b = skb->tail; 2322 2323 nlh = NLMSG_PUT(skb, c->pid, c->seq, c->event, headlen); 2324 2325 p = NLMSG_DATA(nlh); 2326 if (c->event == XFRM_MSG_DELPOLICY) { 2327 id = NLMSG_DATA(nlh); 2328 memset(id, 0, sizeof(*id)); 2329 id->dir = dir; 2330 if (c->data.byid) 2331 id->index = xp->index; 2332 else 2333 memcpy(&id->sel, &xp->selector, sizeof(id->sel)); 2334 2335 p = RTA_DATA(__RTA_PUT(skb, XFRMA_POLICY, sizeof(*p))); 2336 } 2337 2338 nlh->nlmsg_flags = 0; 2339 2340 copy_to_user_policy(xp, p, dir); 2341 if (copy_to_user_tmpl(xp, skb) < 0) 2342 goto nlmsg_failure; 2343 if (copy_to_user_policy_type(xp->type, skb) < 0) 2344 goto nlmsg_failure; 2345 2346 nlh->nlmsg_len = skb->tail - b; 2347 2348 NETLINK_CB(skb).dst_group = XFRMNLGRP_POLICY; 2349 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2350 2351 nlmsg_failure: 2352 rtattr_failure: 2353 kfree_skb(skb); 2354 return -1; 2355 } 2356 2357 static int xfrm_notify_policy_flush(struct km_event *c) 2358 { 2359 struct nlmsghdr *nlh; 2360 struct sk_buff *skb; 2361 unsigned char *b; 2362 int len = 0; 2363 #ifdef CONFIG_XFRM_SUB_POLICY 2364 len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type)); 2365 #endif 2366 len += NLMSG_LENGTH(0); 2367 2368 skb = alloc_skb(len, GFP_ATOMIC); 2369 if (skb == NULL) 2370 return -ENOMEM; 2371 b = skb->tail; 2372 2373 2374 nlh = NLMSG_PUT(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0); 2375 nlh->nlmsg_flags = 0; 2376 if (copy_to_user_policy_type(c->data.type, skb) < 0) 2377 goto nlmsg_failure; 2378 2379 nlh->nlmsg_len = skb->tail - b; 2380 2381 NETLINK_CB(skb).dst_group = XFRMNLGRP_POLICY; 2382 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2383 2384 nlmsg_failure: 2385 kfree_skb(skb); 2386 return -1; 2387 } 2388 2389 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c) 2390 { 2391 2392 switch (c->event) { 2393 case XFRM_MSG_NEWPOLICY: 2394 case XFRM_MSG_UPDPOLICY: 2395 case XFRM_MSG_DELPOLICY: 2396 return xfrm_notify_policy(xp, dir, c); 2397 case XFRM_MSG_FLUSHPOLICY: 2398 return xfrm_notify_policy_flush(c); 2399 case XFRM_MSG_POLEXPIRE: 2400 return xfrm_exp_policy_notify(xp, dir, c); 2401 default: 2402 printk("xfrm_user: Unknown Policy event %d\n", c->event); 2403 } 2404 2405 return 0; 2406 2407 } 2408 2409 static int build_report(struct sk_buff *skb, u8 proto, 2410 struct xfrm_selector *sel, xfrm_address_t *addr) 2411 { 2412 struct xfrm_user_report *ur; 2413 struct nlmsghdr *nlh; 2414 unsigned char *b = skb->tail; 2415 2416 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur)); 2417 ur = NLMSG_DATA(nlh); 2418 nlh->nlmsg_flags = 0; 2419 2420 ur->proto = proto; 2421 memcpy(&ur->sel, sel, sizeof(ur->sel)); 2422 2423 if (addr) 2424 RTA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr); 2425 2426 nlh->nlmsg_len = skb->tail - b; 2427 return skb->len; 2428 2429 nlmsg_failure: 2430 rtattr_failure: 2431 skb_trim(skb, b - skb->data); 2432 return -1; 2433 } 2434 2435 static int xfrm_send_report(u8 proto, struct xfrm_selector *sel, 2436 xfrm_address_t *addr) 2437 { 2438 struct sk_buff *skb; 2439 size_t len; 2440 2441 len = NLMSG_ALIGN(NLMSG_LENGTH(sizeof(struct xfrm_user_report))); 2442 skb = alloc_skb(len, GFP_ATOMIC); 2443 if (skb == NULL) 2444 return -ENOMEM; 2445 2446 if (build_report(skb, proto, sel, addr) < 0) 2447 BUG(); 2448 2449 NETLINK_CB(skb).dst_group = XFRMNLGRP_REPORT; 2450 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC); 2451 } 2452 2453 static struct xfrm_mgr netlink_mgr = { 2454 .id = "netlink", 2455 .notify = xfrm_send_state_notify, 2456 .acquire = xfrm_send_acquire, 2457 .compile_policy = xfrm_compile_policy, 2458 .notify_policy = xfrm_send_policy_notify, 2459 .report = xfrm_send_report, 2460 .migrate = xfrm_send_migrate, 2461 }; 2462 2463 static int __init xfrm_user_init(void) 2464 { 2465 struct sock *nlsk; 2466 2467 printk(KERN_INFO "Initializing XFRM netlink socket\n"); 2468 2469 nlsk = netlink_kernel_create(NETLINK_XFRM, XFRMNLGRP_MAX, 2470 xfrm_netlink_rcv, THIS_MODULE); 2471 if (nlsk == NULL) 2472 return -ENOMEM; 2473 rcu_assign_pointer(xfrm_nl, nlsk); 2474 2475 xfrm_register_km(&netlink_mgr); 2476 2477 return 0; 2478 } 2479 2480 static void __exit xfrm_user_exit(void) 2481 { 2482 struct sock *nlsk = xfrm_nl; 2483 2484 xfrm_unregister_km(&netlink_mgr); 2485 rcu_assign_pointer(xfrm_nl, NULL); 2486 synchronize_rcu(); 2487 sock_release(nlsk->sk_socket); 2488 } 2489 2490 module_init(xfrm_user_init); 2491 module_exit(xfrm_user_exit); 2492 MODULE_LICENSE("GPL"); 2493 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM); 2494 2495