1 /* 2 * Copyright (C) 2017-2018 Netronome Systems, Inc. 3 * 4 * This software is licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree. 7 * 8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" 9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, 10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE 12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME 13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 14 */ 15 16 #include <linux/bpf.h> 17 #include <linux/bpf_verifier.h> 18 #include <linux/bug.h> 19 #include <linux/kdev_t.h> 20 #include <linux/list.h> 21 #include <linux/lockdep.h> 22 #include <linux/netdevice.h> 23 #include <linux/printk.h> 24 #include <linux/proc_ns.h> 25 #include <linux/rhashtable.h> 26 #include <linux/rtnetlink.h> 27 #include <linux/rwsem.h> 28 #include <net/xdp.h> 29 30 /* Protects offdevs, members of bpf_offload_netdev and offload members 31 * of all progs. 32 * RTNL lock cannot be taken when holding this lock. 33 */ 34 static DECLARE_RWSEM(bpf_devs_lock); 35 36 struct bpf_offload_dev { 37 const struct bpf_prog_offload_ops *ops; 38 struct list_head netdevs; 39 void *priv; 40 }; 41 42 struct bpf_offload_netdev { 43 struct rhash_head l; 44 struct net_device *netdev; 45 struct bpf_offload_dev *offdev; /* NULL when bound-only */ 46 struct list_head progs; 47 struct list_head maps; 48 struct list_head offdev_netdevs; 49 }; 50 51 static const struct rhashtable_params offdevs_params = { 52 .nelem_hint = 4, 53 .key_len = sizeof(struct net_device *), 54 .key_offset = offsetof(struct bpf_offload_netdev, netdev), 55 .head_offset = offsetof(struct bpf_offload_netdev, l), 56 .automatic_shrinking = true, 57 }; 58 59 static struct rhashtable offdevs; 60 61 static int bpf_dev_offload_check(struct net_device *netdev) 62 { 63 if (!netdev) 64 return -EINVAL; 65 if (!netdev->netdev_ops->ndo_bpf) 66 return -EOPNOTSUPP; 67 return 0; 68 } 69 70 static struct bpf_offload_netdev * 71 bpf_offload_find_netdev(struct net_device *netdev) 72 { 73 lockdep_assert_held(&bpf_devs_lock); 74 75 return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params); 76 } 77 78 static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 79 struct net_device *netdev) 80 { 81 struct bpf_offload_netdev *ondev; 82 int err; 83 84 ondev = kzalloc(sizeof(*ondev), GFP_KERNEL); 85 if (!ondev) 86 return -ENOMEM; 87 88 ondev->netdev = netdev; 89 ondev->offdev = offdev; 90 INIT_LIST_HEAD(&ondev->progs); 91 INIT_LIST_HEAD(&ondev->maps); 92 93 err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params); 94 if (err) { 95 netdev_warn(netdev, "failed to register for BPF offload\n"); 96 goto err_free; 97 } 98 99 if (offdev) 100 list_add(&ondev->offdev_netdevs, &offdev->netdevs); 101 return 0; 102 103 err_free: 104 kfree(ondev); 105 return err; 106 } 107 108 static void __bpf_prog_offload_destroy(struct bpf_prog *prog) 109 { 110 struct bpf_prog_offload *offload = prog->aux->offload; 111 112 if (offload->dev_state) 113 offload->offdev->ops->destroy(prog); 114 115 list_del_init(&offload->offloads); 116 kfree(offload); 117 prog->aux->offload = NULL; 118 } 119 120 static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap, 121 enum bpf_netdev_command cmd) 122 { 123 struct netdev_bpf data = {}; 124 struct net_device *netdev; 125 126 ASSERT_RTNL(); 127 128 data.command = cmd; 129 data.offmap = offmap; 130 /* Caller must make sure netdev is valid */ 131 netdev = offmap->netdev; 132 133 return netdev->netdev_ops->ndo_bpf(netdev, &data); 134 } 135 136 static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap) 137 { 138 WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE)); 139 /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */ 140 bpf_map_free_id(&offmap->map); 141 list_del_init(&offmap->offloads); 142 offmap->netdev = NULL; 143 } 144 145 static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 146 struct net_device *netdev) 147 { 148 struct bpf_offload_netdev *ondev, *altdev = NULL; 149 struct bpf_offloaded_map *offmap, *mtmp; 150 struct bpf_prog_offload *offload, *ptmp; 151 152 ASSERT_RTNL(); 153 154 ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params); 155 if (WARN_ON(!ondev)) 156 return; 157 158 WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params)); 159 160 /* Try to move the objects to another netdev of the device */ 161 if (offdev) { 162 list_del(&ondev->offdev_netdevs); 163 altdev = list_first_entry_or_null(&offdev->netdevs, 164 struct bpf_offload_netdev, 165 offdev_netdevs); 166 } 167 168 if (altdev) { 169 list_for_each_entry(offload, &ondev->progs, offloads) 170 offload->netdev = altdev->netdev; 171 list_splice_init(&ondev->progs, &altdev->progs); 172 173 list_for_each_entry(offmap, &ondev->maps, offloads) 174 offmap->netdev = altdev->netdev; 175 list_splice_init(&ondev->maps, &altdev->maps); 176 } else { 177 list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads) 178 __bpf_prog_offload_destroy(offload->prog); 179 list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads) 180 __bpf_map_offload_destroy(offmap); 181 } 182 183 WARN_ON(!list_empty(&ondev->progs)); 184 WARN_ON(!list_empty(&ondev->maps)); 185 kfree(ondev); 186 } 187 188 static int __bpf_prog_dev_bound_init(struct bpf_prog *prog, struct net_device *netdev) 189 { 190 struct bpf_offload_netdev *ondev; 191 struct bpf_prog_offload *offload; 192 int err; 193 194 offload = kzalloc(sizeof(*offload), GFP_USER); 195 if (!offload) 196 return -ENOMEM; 197 198 offload->prog = prog; 199 offload->netdev = netdev; 200 201 ondev = bpf_offload_find_netdev(offload->netdev); 202 /* When program is offloaded require presence of "true" 203 * bpf_offload_netdev, avoid the one created for !ondev case below. 204 */ 205 if (bpf_prog_is_offloaded(prog->aux) && (!ondev || !ondev->offdev)) { 206 err = -EINVAL; 207 goto err_free; 208 } 209 if (!ondev) { 210 /* When only binding to the device, explicitly 211 * create an entry in the hashtable. 212 */ 213 err = __bpf_offload_dev_netdev_register(NULL, offload->netdev); 214 if (err) 215 goto err_free; 216 ondev = bpf_offload_find_netdev(offload->netdev); 217 } 218 offload->offdev = ondev->offdev; 219 prog->aux->offload = offload; 220 list_add_tail(&offload->offloads, &ondev->progs); 221 222 return 0; 223 err_free: 224 kfree(offload); 225 return err; 226 } 227 228 int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr) 229 { 230 struct net_device *netdev; 231 int err; 232 233 if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && 234 attr->prog_type != BPF_PROG_TYPE_XDP) 235 return -EINVAL; 236 237 if (attr->prog_flags & ~(BPF_F_XDP_DEV_BOUND_ONLY | BPF_F_XDP_HAS_FRAGS)) 238 return -EINVAL; 239 240 /* Frags are allowed only if program is dev-bound-only, but not 241 * if it is requesting bpf offload. 242 */ 243 if (attr->prog_flags & BPF_F_XDP_HAS_FRAGS && 244 !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY)) 245 return -EINVAL; 246 247 if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS && 248 attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY) 249 return -EINVAL; 250 251 netdev = dev_get_by_index(current->nsproxy->net_ns, attr->prog_ifindex); 252 if (!netdev) 253 return -EINVAL; 254 255 err = bpf_dev_offload_check(netdev); 256 if (err) 257 goto out; 258 259 prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY); 260 261 down_write(&bpf_devs_lock); 262 err = __bpf_prog_dev_bound_init(prog, netdev); 263 up_write(&bpf_devs_lock); 264 265 out: 266 dev_put(netdev); 267 return err; 268 } 269 270 int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog) 271 { 272 int err; 273 274 if (!bpf_prog_is_dev_bound(old_prog->aux)) 275 return 0; 276 277 if (bpf_prog_is_offloaded(old_prog->aux)) 278 return -EINVAL; 279 280 new_prog->aux->dev_bound = old_prog->aux->dev_bound; 281 new_prog->aux->offload_requested = old_prog->aux->offload_requested; 282 283 down_write(&bpf_devs_lock); 284 if (!old_prog->aux->offload) { 285 err = -EINVAL; 286 goto out; 287 } 288 289 err = __bpf_prog_dev_bound_init(new_prog, old_prog->aux->offload->netdev); 290 291 out: 292 up_write(&bpf_devs_lock); 293 return err; 294 } 295 296 int bpf_prog_offload_verifier_prep(struct bpf_prog *prog) 297 { 298 struct bpf_prog_offload *offload; 299 int ret = -ENODEV; 300 301 down_read(&bpf_devs_lock); 302 offload = prog->aux->offload; 303 if (offload) { 304 ret = offload->offdev->ops->prepare(prog); 305 offload->dev_state = !ret; 306 } 307 up_read(&bpf_devs_lock); 308 309 return ret; 310 } 311 312 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, 313 int insn_idx, int prev_insn_idx) 314 { 315 struct bpf_prog_offload *offload; 316 int ret = -ENODEV; 317 318 down_read(&bpf_devs_lock); 319 offload = env->prog->aux->offload; 320 if (offload) 321 ret = offload->offdev->ops->insn_hook(env, insn_idx, 322 prev_insn_idx); 323 up_read(&bpf_devs_lock); 324 325 return ret; 326 } 327 328 int bpf_prog_offload_finalize(struct bpf_verifier_env *env) 329 { 330 struct bpf_prog_offload *offload; 331 int ret = -ENODEV; 332 333 down_read(&bpf_devs_lock); 334 offload = env->prog->aux->offload; 335 if (offload) { 336 if (offload->offdev->ops->finalize) 337 ret = offload->offdev->ops->finalize(env); 338 else 339 ret = 0; 340 } 341 up_read(&bpf_devs_lock); 342 343 return ret; 344 } 345 346 void 347 bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, 348 struct bpf_insn *insn) 349 { 350 const struct bpf_prog_offload_ops *ops; 351 struct bpf_prog_offload *offload; 352 int ret = -EOPNOTSUPP; 353 354 down_read(&bpf_devs_lock); 355 offload = env->prog->aux->offload; 356 if (offload) { 357 ops = offload->offdev->ops; 358 if (!offload->opt_failed && ops->replace_insn) 359 ret = ops->replace_insn(env, off, insn); 360 offload->opt_failed |= ret; 361 } 362 up_read(&bpf_devs_lock); 363 } 364 365 void 366 bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) 367 { 368 struct bpf_prog_offload *offload; 369 int ret = -EOPNOTSUPP; 370 371 down_read(&bpf_devs_lock); 372 offload = env->prog->aux->offload; 373 if (offload) { 374 if (!offload->opt_failed && offload->offdev->ops->remove_insns) 375 ret = offload->offdev->ops->remove_insns(env, off, cnt); 376 offload->opt_failed |= ret; 377 } 378 up_read(&bpf_devs_lock); 379 } 380 381 void bpf_prog_dev_bound_destroy(struct bpf_prog *prog) 382 { 383 struct bpf_offload_netdev *ondev; 384 struct net_device *netdev; 385 386 rtnl_lock(); 387 down_write(&bpf_devs_lock); 388 if (prog->aux->offload) { 389 list_del_init(&prog->aux->offload->offloads); 390 391 netdev = prog->aux->offload->netdev; 392 __bpf_prog_offload_destroy(prog); 393 394 ondev = bpf_offload_find_netdev(netdev); 395 if (!ondev->offdev && list_empty(&ondev->progs)) 396 __bpf_offload_dev_netdev_unregister(NULL, netdev); 397 } 398 up_write(&bpf_devs_lock); 399 rtnl_unlock(); 400 } 401 402 static int bpf_prog_offload_translate(struct bpf_prog *prog) 403 { 404 struct bpf_prog_offload *offload; 405 int ret = -ENODEV; 406 407 down_read(&bpf_devs_lock); 408 offload = prog->aux->offload; 409 if (offload) 410 ret = offload->offdev->ops->translate(prog); 411 up_read(&bpf_devs_lock); 412 413 return ret; 414 } 415 416 static unsigned int bpf_prog_warn_on_exec(const void *ctx, 417 const struct bpf_insn *insn) 418 { 419 WARN(1, "attempt to execute device eBPF program on the host!"); 420 return 0; 421 } 422 423 int bpf_prog_offload_compile(struct bpf_prog *prog) 424 { 425 prog->bpf_func = bpf_prog_warn_on_exec; 426 427 return bpf_prog_offload_translate(prog); 428 } 429 430 struct ns_get_path_bpf_prog_args { 431 struct bpf_prog *prog; 432 struct bpf_prog_info *info; 433 }; 434 435 static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data) 436 { 437 struct ns_get_path_bpf_prog_args *args = private_data; 438 struct bpf_prog_aux *aux = args->prog->aux; 439 struct ns_common *ns; 440 struct net *net; 441 442 rtnl_lock(); 443 down_read(&bpf_devs_lock); 444 445 if (aux->offload) { 446 args->info->ifindex = aux->offload->netdev->ifindex; 447 net = dev_net(aux->offload->netdev); 448 get_net(net); 449 ns = &net->ns; 450 } else { 451 args->info->ifindex = 0; 452 ns = NULL; 453 } 454 455 up_read(&bpf_devs_lock); 456 rtnl_unlock(); 457 458 return ns; 459 } 460 461 int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 462 struct bpf_prog *prog) 463 { 464 struct ns_get_path_bpf_prog_args args = { 465 .prog = prog, 466 .info = info, 467 }; 468 struct bpf_prog_aux *aux = prog->aux; 469 struct inode *ns_inode; 470 struct path ns_path; 471 char __user *uinsns; 472 int res; 473 u32 ulen; 474 475 res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args); 476 if (res) { 477 if (!info->ifindex) 478 return -ENODEV; 479 return res; 480 } 481 482 down_read(&bpf_devs_lock); 483 484 if (!aux->offload) { 485 up_read(&bpf_devs_lock); 486 return -ENODEV; 487 } 488 489 ulen = info->jited_prog_len; 490 info->jited_prog_len = aux->offload->jited_len; 491 if (info->jited_prog_len && ulen) { 492 uinsns = u64_to_user_ptr(info->jited_prog_insns); 493 ulen = min_t(u32, info->jited_prog_len, ulen); 494 if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) { 495 up_read(&bpf_devs_lock); 496 return -EFAULT; 497 } 498 } 499 500 up_read(&bpf_devs_lock); 501 502 ns_inode = ns_path.dentry->d_inode; 503 info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev); 504 info->netns_ino = ns_inode->i_ino; 505 path_put(&ns_path); 506 507 return 0; 508 } 509 510 const struct bpf_prog_ops bpf_offload_prog_ops = { 511 }; 512 513 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 514 { 515 struct net *net = current->nsproxy->net_ns; 516 struct bpf_offload_netdev *ondev; 517 struct bpf_offloaded_map *offmap; 518 int err; 519 520 if (!capable(CAP_SYS_ADMIN)) 521 return ERR_PTR(-EPERM); 522 if (attr->map_type != BPF_MAP_TYPE_ARRAY && 523 attr->map_type != BPF_MAP_TYPE_HASH) 524 return ERR_PTR(-EINVAL); 525 526 offmap = bpf_map_area_alloc(sizeof(*offmap), NUMA_NO_NODE); 527 if (!offmap) 528 return ERR_PTR(-ENOMEM); 529 530 bpf_map_init_from_attr(&offmap->map, attr); 531 rtnl_lock(); 532 offmap->netdev = __dev_get_by_index(net, attr->map_ifindex); 533 netdev_lock_ops(offmap->netdev); 534 down_write(&bpf_devs_lock); 535 err = bpf_dev_offload_check(offmap->netdev); 536 if (err) 537 goto err_unlock; 538 539 ondev = bpf_offload_find_netdev(offmap->netdev); 540 if (!ondev) { 541 err = -EINVAL; 542 goto err_unlock; 543 } 544 545 err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC); 546 if (err) 547 goto err_unlock; 548 549 list_add_tail(&offmap->offloads, &ondev->maps); 550 up_write(&bpf_devs_lock); 551 netdev_unlock_ops(offmap->netdev); 552 rtnl_unlock(); 553 554 return &offmap->map; 555 556 err_unlock: 557 up_write(&bpf_devs_lock); 558 netdev_unlock_ops(offmap->netdev); 559 rtnl_unlock(); 560 bpf_map_area_free(offmap); 561 return ERR_PTR(err); 562 } 563 564 void bpf_map_offload_map_free(struct bpf_map *map) 565 { 566 struct bpf_offloaded_map *offmap = map_to_offmap(map); 567 568 rtnl_lock(); 569 down_write(&bpf_devs_lock); 570 if (offmap->netdev) 571 __bpf_map_offload_destroy(offmap); 572 up_write(&bpf_devs_lock); 573 rtnl_unlock(); 574 575 bpf_map_area_free(offmap); 576 } 577 578 u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map) 579 { 580 /* The memory dynamically allocated in netdev dev_ops is not counted */ 581 return sizeof(struct bpf_offloaded_map); 582 } 583 584 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value) 585 { 586 struct bpf_offloaded_map *offmap = map_to_offmap(map); 587 int ret = -ENODEV; 588 589 down_read(&bpf_devs_lock); 590 if (offmap->netdev) 591 ret = offmap->dev_ops->map_lookup_elem(offmap, key, value); 592 up_read(&bpf_devs_lock); 593 594 return ret; 595 } 596 597 int bpf_map_offload_update_elem(struct bpf_map *map, 598 void *key, void *value, u64 flags) 599 { 600 struct bpf_offloaded_map *offmap = map_to_offmap(map); 601 int ret = -ENODEV; 602 603 if (unlikely(flags > BPF_EXIST)) 604 return -EINVAL; 605 606 down_read(&bpf_devs_lock); 607 if (offmap->netdev) 608 ret = offmap->dev_ops->map_update_elem(offmap, key, value, 609 flags); 610 up_read(&bpf_devs_lock); 611 612 return ret; 613 } 614 615 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key) 616 { 617 struct bpf_offloaded_map *offmap = map_to_offmap(map); 618 int ret = -ENODEV; 619 620 down_read(&bpf_devs_lock); 621 if (offmap->netdev) 622 ret = offmap->dev_ops->map_delete_elem(offmap, key); 623 up_read(&bpf_devs_lock); 624 625 return ret; 626 } 627 628 int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key) 629 { 630 struct bpf_offloaded_map *offmap = map_to_offmap(map); 631 int ret = -ENODEV; 632 633 down_read(&bpf_devs_lock); 634 if (offmap->netdev) 635 ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key); 636 up_read(&bpf_devs_lock); 637 638 return ret; 639 } 640 641 struct ns_get_path_bpf_map_args { 642 struct bpf_offloaded_map *offmap; 643 struct bpf_map_info *info; 644 }; 645 646 static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data) 647 { 648 struct ns_get_path_bpf_map_args *args = private_data; 649 struct ns_common *ns; 650 struct net *net; 651 652 rtnl_lock(); 653 down_read(&bpf_devs_lock); 654 655 if (args->offmap->netdev) { 656 args->info->ifindex = args->offmap->netdev->ifindex; 657 net = dev_net(args->offmap->netdev); 658 get_net(net); 659 ns = &net->ns; 660 } else { 661 args->info->ifindex = 0; 662 ns = NULL; 663 } 664 665 up_read(&bpf_devs_lock); 666 rtnl_unlock(); 667 668 return ns; 669 } 670 671 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map) 672 { 673 struct ns_get_path_bpf_map_args args = { 674 .offmap = map_to_offmap(map), 675 .info = info, 676 }; 677 struct inode *ns_inode; 678 struct path ns_path; 679 int res; 680 681 res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args); 682 if (res) { 683 if (!info->ifindex) 684 return -ENODEV; 685 return res; 686 } 687 688 ns_inode = ns_path.dentry->d_inode; 689 info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev); 690 info->netns_ino = ns_inode->i_ino; 691 path_put(&ns_path); 692 693 return 0; 694 } 695 696 static bool __bpf_offload_dev_match(struct bpf_prog *prog, 697 struct net_device *netdev) 698 { 699 struct bpf_offload_netdev *ondev1, *ondev2; 700 struct bpf_prog_offload *offload; 701 702 if (!bpf_prog_is_dev_bound(prog->aux)) 703 return false; 704 705 offload = prog->aux->offload; 706 if (!offload) 707 return false; 708 if (offload->netdev == netdev) 709 return true; 710 711 ondev1 = bpf_offload_find_netdev(offload->netdev); 712 ondev2 = bpf_offload_find_netdev(netdev); 713 714 return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev; 715 } 716 717 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev) 718 { 719 bool ret; 720 721 down_read(&bpf_devs_lock); 722 ret = __bpf_offload_dev_match(prog, netdev); 723 up_read(&bpf_devs_lock); 724 725 return ret; 726 } 727 EXPORT_SYMBOL_GPL(bpf_offload_dev_match); 728 729 bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs) 730 { 731 bool ret; 732 733 if (bpf_prog_is_offloaded(lhs->aux) != bpf_prog_is_offloaded(rhs->aux)) 734 return false; 735 736 down_read(&bpf_devs_lock); 737 ret = lhs->aux->offload && rhs->aux->offload && 738 lhs->aux->offload->netdev && 739 lhs->aux->offload->netdev == rhs->aux->offload->netdev; 740 up_read(&bpf_devs_lock); 741 742 return ret; 743 } 744 745 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map) 746 { 747 struct bpf_offloaded_map *offmap; 748 bool ret; 749 750 if (!bpf_map_is_offloaded(map)) 751 return bpf_map_offload_neutral(map); 752 offmap = map_to_offmap(map); 753 754 down_read(&bpf_devs_lock); 755 ret = __bpf_offload_dev_match(prog, offmap->netdev); 756 up_read(&bpf_devs_lock); 757 758 return ret; 759 } 760 761 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 762 struct net_device *netdev) 763 { 764 int err; 765 766 down_write(&bpf_devs_lock); 767 err = __bpf_offload_dev_netdev_register(offdev, netdev); 768 up_write(&bpf_devs_lock); 769 return err; 770 } 771 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register); 772 773 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 774 struct net_device *netdev) 775 { 776 down_write(&bpf_devs_lock); 777 __bpf_offload_dev_netdev_unregister(offdev, netdev); 778 up_write(&bpf_devs_lock); 779 } 780 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister); 781 782 struct bpf_offload_dev * 783 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv) 784 { 785 struct bpf_offload_dev *offdev; 786 787 offdev = kzalloc(sizeof(*offdev), GFP_KERNEL); 788 if (!offdev) 789 return ERR_PTR(-ENOMEM); 790 791 offdev->ops = ops; 792 offdev->priv = priv; 793 INIT_LIST_HEAD(&offdev->netdevs); 794 795 return offdev; 796 } 797 EXPORT_SYMBOL_GPL(bpf_offload_dev_create); 798 799 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev) 800 { 801 WARN_ON(!list_empty(&offdev->netdevs)); 802 kfree(offdev); 803 } 804 EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy); 805 806 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev) 807 { 808 return offdev->priv; 809 } 810 EXPORT_SYMBOL_GPL(bpf_offload_dev_priv); 811 812 void bpf_dev_bound_netdev_unregister(struct net_device *dev) 813 { 814 struct bpf_offload_netdev *ondev; 815 816 ASSERT_RTNL(); 817 818 down_write(&bpf_devs_lock); 819 ondev = bpf_offload_find_netdev(dev); 820 if (ondev && !ondev->offdev) 821 __bpf_offload_dev_netdev_unregister(NULL, ondev->netdev); 822 up_write(&bpf_devs_lock); 823 } 824 825 int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, 826 struct bpf_prog_aux *prog_aux) 827 { 828 if (!bpf_prog_is_dev_bound(prog_aux)) { 829 bpf_log(log, "metadata kfuncs require device-bound program\n"); 830 return -EINVAL; 831 } 832 833 if (bpf_prog_is_offloaded(prog_aux)) { 834 bpf_log(log, "metadata kfuncs can't be offloaded\n"); 835 return -EINVAL; 836 } 837 838 return 0; 839 } 840 841 void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id) 842 { 843 const struct xdp_metadata_ops *ops; 844 void *p = NULL; 845 846 /* We don't hold bpf_devs_lock while resolving several 847 * kfuncs and can race with the unregister_netdevice(). 848 * We rely on bpf_dev_bound_match() check at attach 849 * to render this program unusable. 850 */ 851 down_read(&bpf_devs_lock); 852 if (!prog->aux->offload) 853 goto out; 854 855 ops = prog->aux->offload->netdev->xdp_metadata_ops; 856 if (!ops) 857 goto out; 858 859 #define XDP_METADATA_KFUNC(name, _, __, xmo) \ 860 if (func_id == bpf_xdp_metadata_kfunc_id(name)) p = ops->xmo; 861 XDP_METADATA_KFUNC_xxx 862 #undef XDP_METADATA_KFUNC 863 864 out: 865 up_read(&bpf_devs_lock); 866 867 return p; 868 } 869 870 static int __init bpf_offload_init(void) 871 { 872 return rhashtable_init(&offdevs, &offdevs_params); 873 } 874 875 core_initcall(bpf_offload_init); 876