xref: /linux/kernel/bpf/offload.c (revision 8ccd54fe45713cd458015b5b08d6098545e70543)
1 /*
2  * Copyright (C) 2017-2018 Netronome Systems, Inc.
3  *
4  * This software is licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree.
7  *
8  * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9  * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10  * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11  * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12  * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13  * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14  */
15 
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/bug.h>
19 #include <linux/kdev_t.h>
20 #include <linux/list.h>
21 #include <linux/lockdep.h>
22 #include <linux/netdevice.h>
23 #include <linux/printk.h>
24 #include <linux/proc_ns.h>
25 #include <linux/rhashtable.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/rwsem.h>
28 
29 /* Protects offdevs, members of bpf_offload_netdev and offload members
30  * of all progs.
31  * RTNL lock cannot be taken when holding this lock.
32  */
33 static DECLARE_RWSEM(bpf_devs_lock);
34 
35 struct bpf_offload_dev {
36 	const struct bpf_prog_offload_ops *ops;
37 	struct list_head netdevs;
38 	void *priv;
39 };
40 
41 struct bpf_offload_netdev {
42 	struct rhash_head l;
43 	struct net_device *netdev;
44 	struct bpf_offload_dev *offdev; /* NULL when bound-only */
45 	struct list_head progs;
46 	struct list_head maps;
47 	struct list_head offdev_netdevs;
48 };
49 
50 static const struct rhashtable_params offdevs_params = {
51 	.nelem_hint		= 4,
52 	.key_len		= sizeof(struct net_device *),
53 	.key_offset		= offsetof(struct bpf_offload_netdev, netdev),
54 	.head_offset		= offsetof(struct bpf_offload_netdev, l),
55 	.automatic_shrinking	= true,
56 };
57 
58 static struct rhashtable offdevs;
59 
60 static int bpf_dev_offload_check(struct net_device *netdev)
61 {
62 	if (!netdev)
63 		return -EINVAL;
64 	if (!netdev->netdev_ops->ndo_bpf)
65 		return -EOPNOTSUPP;
66 	return 0;
67 }
68 
69 static struct bpf_offload_netdev *
70 bpf_offload_find_netdev(struct net_device *netdev)
71 {
72 	lockdep_assert_held(&bpf_devs_lock);
73 
74 	return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
75 }
76 
77 static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
78 					     struct net_device *netdev)
79 {
80 	struct bpf_offload_netdev *ondev;
81 	int err;
82 
83 	ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
84 	if (!ondev)
85 		return -ENOMEM;
86 
87 	ondev->netdev = netdev;
88 	ondev->offdev = offdev;
89 	INIT_LIST_HEAD(&ondev->progs);
90 	INIT_LIST_HEAD(&ondev->maps);
91 
92 	err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
93 	if (err) {
94 		netdev_warn(netdev, "failed to register for BPF offload\n");
95 		goto err_free;
96 	}
97 
98 	if (offdev)
99 		list_add(&ondev->offdev_netdevs, &offdev->netdevs);
100 	return 0;
101 
102 err_free:
103 	kfree(ondev);
104 	return err;
105 }
106 
107 static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
108 {
109 	struct bpf_prog_offload *offload = prog->aux->offload;
110 
111 	if (offload->dev_state)
112 		offload->offdev->ops->destroy(prog);
113 
114 	list_del_init(&offload->offloads);
115 	kfree(offload);
116 	prog->aux->offload = NULL;
117 }
118 
119 static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
120 			       enum bpf_netdev_command cmd)
121 {
122 	struct netdev_bpf data = {};
123 	struct net_device *netdev;
124 
125 	ASSERT_RTNL();
126 
127 	data.command = cmd;
128 	data.offmap = offmap;
129 	/* Caller must make sure netdev is valid */
130 	netdev = offmap->netdev;
131 
132 	return netdev->netdev_ops->ndo_bpf(netdev, &data);
133 }
134 
135 static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
136 {
137 	WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
138 	/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
139 	bpf_map_free_id(&offmap->map);
140 	list_del_init(&offmap->offloads);
141 	offmap->netdev = NULL;
142 }
143 
144 static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
145 						struct net_device *netdev)
146 {
147 	struct bpf_offload_netdev *ondev, *altdev = NULL;
148 	struct bpf_offloaded_map *offmap, *mtmp;
149 	struct bpf_prog_offload *offload, *ptmp;
150 
151 	ASSERT_RTNL();
152 
153 	ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
154 	if (WARN_ON(!ondev))
155 		return;
156 
157 	WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
158 
159 	/* Try to move the objects to another netdev of the device */
160 	if (offdev) {
161 		list_del(&ondev->offdev_netdevs);
162 		altdev = list_first_entry_or_null(&offdev->netdevs,
163 						  struct bpf_offload_netdev,
164 						  offdev_netdevs);
165 	}
166 
167 	if (altdev) {
168 		list_for_each_entry(offload, &ondev->progs, offloads)
169 			offload->netdev = altdev->netdev;
170 		list_splice_init(&ondev->progs, &altdev->progs);
171 
172 		list_for_each_entry(offmap, &ondev->maps, offloads)
173 			offmap->netdev = altdev->netdev;
174 		list_splice_init(&ondev->maps, &altdev->maps);
175 	} else {
176 		list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
177 			__bpf_prog_offload_destroy(offload->prog);
178 		list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
179 			__bpf_map_offload_destroy(offmap);
180 	}
181 
182 	WARN_ON(!list_empty(&ondev->progs));
183 	WARN_ON(!list_empty(&ondev->maps));
184 	kfree(ondev);
185 }
186 
187 static int __bpf_prog_dev_bound_init(struct bpf_prog *prog, struct net_device *netdev)
188 {
189 	struct bpf_offload_netdev *ondev;
190 	struct bpf_prog_offload *offload;
191 	int err;
192 
193 	offload = kzalloc(sizeof(*offload), GFP_USER);
194 	if (!offload)
195 		return -ENOMEM;
196 
197 	offload->prog = prog;
198 	offload->netdev = netdev;
199 
200 	ondev = bpf_offload_find_netdev(offload->netdev);
201 	if (!ondev) {
202 		if (bpf_prog_is_offloaded(prog->aux)) {
203 			err = -EINVAL;
204 			goto err_free;
205 		}
206 
207 		/* When only binding to the device, explicitly
208 		 * create an entry in the hashtable.
209 		 */
210 		err = __bpf_offload_dev_netdev_register(NULL, offload->netdev);
211 		if (err)
212 			goto err_free;
213 		ondev = bpf_offload_find_netdev(offload->netdev);
214 	}
215 	offload->offdev = ondev->offdev;
216 	prog->aux->offload = offload;
217 	list_add_tail(&offload->offloads, &ondev->progs);
218 
219 	return 0;
220 err_free:
221 	kfree(offload);
222 	return err;
223 }
224 
225 int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr)
226 {
227 	struct net_device *netdev;
228 	int err;
229 
230 	if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
231 	    attr->prog_type != BPF_PROG_TYPE_XDP)
232 		return -EINVAL;
233 
234 	if (attr->prog_flags & ~BPF_F_XDP_DEV_BOUND_ONLY)
235 		return -EINVAL;
236 
237 	if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS &&
238 	    attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY)
239 		return -EINVAL;
240 
241 	netdev = dev_get_by_index(current->nsproxy->net_ns, attr->prog_ifindex);
242 	if (!netdev)
243 		return -EINVAL;
244 
245 	err = bpf_dev_offload_check(netdev);
246 	if (err)
247 		goto out;
248 
249 	prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY);
250 
251 	down_write(&bpf_devs_lock);
252 	err = __bpf_prog_dev_bound_init(prog, netdev);
253 	up_write(&bpf_devs_lock);
254 
255 out:
256 	dev_put(netdev);
257 	return err;
258 }
259 
260 int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog)
261 {
262 	int err;
263 
264 	if (!bpf_prog_is_dev_bound(old_prog->aux))
265 		return 0;
266 
267 	if (bpf_prog_is_offloaded(old_prog->aux))
268 		return -EINVAL;
269 
270 	new_prog->aux->dev_bound = old_prog->aux->dev_bound;
271 	new_prog->aux->offload_requested = old_prog->aux->offload_requested;
272 
273 	down_write(&bpf_devs_lock);
274 	if (!old_prog->aux->offload) {
275 		err = -EINVAL;
276 		goto out;
277 	}
278 
279 	err = __bpf_prog_dev_bound_init(new_prog, old_prog->aux->offload->netdev);
280 
281 out:
282 	up_write(&bpf_devs_lock);
283 	return err;
284 }
285 
286 int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
287 {
288 	struct bpf_prog_offload *offload;
289 	int ret = -ENODEV;
290 
291 	down_read(&bpf_devs_lock);
292 	offload = prog->aux->offload;
293 	if (offload) {
294 		ret = offload->offdev->ops->prepare(prog);
295 		offload->dev_state = !ret;
296 	}
297 	up_read(&bpf_devs_lock);
298 
299 	return ret;
300 }
301 
302 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
303 				 int insn_idx, int prev_insn_idx)
304 {
305 	struct bpf_prog_offload *offload;
306 	int ret = -ENODEV;
307 
308 	down_read(&bpf_devs_lock);
309 	offload = env->prog->aux->offload;
310 	if (offload)
311 		ret = offload->offdev->ops->insn_hook(env, insn_idx,
312 						      prev_insn_idx);
313 	up_read(&bpf_devs_lock);
314 
315 	return ret;
316 }
317 
318 int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
319 {
320 	struct bpf_prog_offload *offload;
321 	int ret = -ENODEV;
322 
323 	down_read(&bpf_devs_lock);
324 	offload = env->prog->aux->offload;
325 	if (offload) {
326 		if (offload->offdev->ops->finalize)
327 			ret = offload->offdev->ops->finalize(env);
328 		else
329 			ret = 0;
330 	}
331 	up_read(&bpf_devs_lock);
332 
333 	return ret;
334 }
335 
336 void
337 bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
338 			      struct bpf_insn *insn)
339 {
340 	const struct bpf_prog_offload_ops *ops;
341 	struct bpf_prog_offload *offload;
342 	int ret = -EOPNOTSUPP;
343 
344 	down_read(&bpf_devs_lock);
345 	offload = env->prog->aux->offload;
346 	if (offload) {
347 		ops = offload->offdev->ops;
348 		if (!offload->opt_failed && ops->replace_insn)
349 			ret = ops->replace_insn(env, off, insn);
350 		offload->opt_failed |= ret;
351 	}
352 	up_read(&bpf_devs_lock);
353 }
354 
355 void
356 bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
357 {
358 	struct bpf_prog_offload *offload;
359 	int ret = -EOPNOTSUPP;
360 
361 	down_read(&bpf_devs_lock);
362 	offload = env->prog->aux->offload;
363 	if (offload) {
364 		if (!offload->opt_failed && offload->offdev->ops->remove_insns)
365 			ret = offload->offdev->ops->remove_insns(env, off, cnt);
366 		offload->opt_failed |= ret;
367 	}
368 	up_read(&bpf_devs_lock);
369 }
370 
371 void bpf_prog_dev_bound_destroy(struct bpf_prog *prog)
372 {
373 	struct bpf_offload_netdev *ondev;
374 	struct net_device *netdev;
375 
376 	rtnl_lock();
377 	down_write(&bpf_devs_lock);
378 	if (prog->aux->offload) {
379 		list_del_init(&prog->aux->offload->offloads);
380 
381 		netdev = prog->aux->offload->netdev;
382 		__bpf_prog_offload_destroy(prog);
383 
384 		ondev = bpf_offload_find_netdev(netdev);
385 		if (!ondev->offdev && list_empty(&ondev->progs))
386 			__bpf_offload_dev_netdev_unregister(NULL, netdev);
387 	}
388 	up_write(&bpf_devs_lock);
389 	rtnl_unlock();
390 }
391 
392 static int bpf_prog_offload_translate(struct bpf_prog *prog)
393 {
394 	struct bpf_prog_offload *offload;
395 	int ret = -ENODEV;
396 
397 	down_read(&bpf_devs_lock);
398 	offload = prog->aux->offload;
399 	if (offload)
400 		ret = offload->offdev->ops->translate(prog);
401 	up_read(&bpf_devs_lock);
402 
403 	return ret;
404 }
405 
406 static unsigned int bpf_prog_warn_on_exec(const void *ctx,
407 					  const struct bpf_insn *insn)
408 {
409 	WARN(1, "attempt to execute device eBPF program on the host!");
410 	return 0;
411 }
412 
413 int bpf_prog_offload_compile(struct bpf_prog *prog)
414 {
415 	prog->bpf_func = bpf_prog_warn_on_exec;
416 
417 	return bpf_prog_offload_translate(prog);
418 }
419 
420 struct ns_get_path_bpf_prog_args {
421 	struct bpf_prog *prog;
422 	struct bpf_prog_info *info;
423 };
424 
425 static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
426 {
427 	struct ns_get_path_bpf_prog_args *args = private_data;
428 	struct bpf_prog_aux *aux = args->prog->aux;
429 	struct ns_common *ns;
430 	struct net *net;
431 
432 	rtnl_lock();
433 	down_read(&bpf_devs_lock);
434 
435 	if (aux->offload) {
436 		args->info->ifindex = aux->offload->netdev->ifindex;
437 		net = dev_net(aux->offload->netdev);
438 		get_net(net);
439 		ns = &net->ns;
440 	} else {
441 		args->info->ifindex = 0;
442 		ns = NULL;
443 	}
444 
445 	up_read(&bpf_devs_lock);
446 	rtnl_unlock();
447 
448 	return ns;
449 }
450 
451 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
452 			       struct bpf_prog *prog)
453 {
454 	struct ns_get_path_bpf_prog_args args = {
455 		.prog	= prog,
456 		.info	= info,
457 	};
458 	struct bpf_prog_aux *aux = prog->aux;
459 	struct inode *ns_inode;
460 	struct path ns_path;
461 	char __user *uinsns;
462 	int res;
463 	u32 ulen;
464 
465 	res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
466 	if (res) {
467 		if (!info->ifindex)
468 			return -ENODEV;
469 		return res;
470 	}
471 
472 	down_read(&bpf_devs_lock);
473 
474 	if (!aux->offload) {
475 		up_read(&bpf_devs_lock);
476 		return -ENODEV;
477 	}
478 
479 	ulen = info->jited_prog_len;
480 	info->jited_prog_len = aux->offload->jited_len;
481 	if (info->jited_prog_len && ulen) {
482 		uinsns = u64_to_user_ptr(info->jited_prog_insns);
483 		ulen = min_t(u32, info->jited_prog_len, ulen);
484 		if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
485 			up_read(&bpf_devs_lock);
486 			return -EFAULT;
487 		}
488 	}
489 
490 	up_read(&bpf_devs_lock);
491 
492 	ns_inode = ns_path.dentry->d_inode;
493 	info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
494 	info->netns_ino = ns_inode->i_ino;
495 	path_put(&ns_path);
496 
497 	return 0;
498 }
499 
500 const struct bpf_prog_ops bpf_offload_prog_ops = {
501 };
502 
503 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
504 {
505 	struct net *net = current->nsproxy->net_ns;
506 	struct bpf_offload_netdev *ondev;
507 	struct bpf_offloaded_map *offmap;
508 	int err;
509 
510 	if (!capable(CAP_SYS_ADMIN))
511 		return ERR_PTR(-EPERM);
512 	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
513 	    attr->map_type != BPF_MAP_TYPE_HASH)
514 		return ERR_PTR(-EINVAL);
515 
516 	offmap = bpf_map_area_alloc(sizeof(*offmap), NUMA_NO_NODE);
517 	if (!offmap)
518 		return ERR_PTR(-ENOMEM);
519 
520 	bpf_map_init_from_attr(&offmap->map, attr);
521 
522 	rtnl_lock();
523 	down_write(&bpf_devs_lock);
524 	offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
525 	err = bpf_dev_offload_check(offmap->netdev);
526 	if (err)
527 		goto err_unlock;
528 
529 	ondev = bpf_offload_find_netdev(offmap->netdev);
530 	if (!ondev) {
531 		err = -EINVAL;
532 		goto err_unlock;
533 	}
534 
535 	err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
536 	if (err)
537 		goto err_unlock;
538 
539 	list_add_tail(&offmap->offloads, &ondev->maps);
540 	up_write(&bpf_devs_lock);
541 	rtnl_unlock();
542 
543 	return &offmap->map;
544 
545 err_unlock:
546 	up_write(&bpf_devs_lock);
547 	rtnl_unlock();
548 	bpf_map_area_free(offmap);
549 	return ERR_PTR(err);
550 }
551 
552 void bpf_map_offload_map_free(struct bpf_map *map)
553 {
554 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
555 
556 	rtnl_lock();
557 	down_write(&bpf_devs_lock);
558 	if (offmap->netdev)
559 		__bpf_map_offload_destroy(offmap);
560 	up_write(&bpf_devs_lock);
561 	rtnl_unlock();
562 
563 	bpf_map_area_free(offmap);
564 }
565 
566 u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
567 {
568 	/* The memory dynamically allocated in netdev dev_ops is not counted */
569 	return sizeof(struct bpf_offloaded_map);
570 }
571 
572 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
573 {
574 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
575 	int ret = -ENODEV;
576 
577 	down_read(&bpf_devs_lock);
578 	if (offmap->netdev)
579 		ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
580 	up_read(&bpf_devs_lock);
581 
582 	return ret;
583 }
584 
585 int bpf_map_offload_update_elem(struct bpf_map *map,
586 				void *key, void *value, u64 flags)
587 {
588 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
589 	int ret = -ENODEV;
590 
591 	if (unlikely(flags > BPF_EXIST))
592 		return -EINVAL;
593 
594 	down_read(&bpf_devs_lock);
595 	if (offmap->netdev)
596 		ret = offmap->dev_ops->map_update_elem(offmap, key, value,
597 						       flags);
598 	up_read(&bpf_devs_lock);
599 
600 	return ret;
601 }
602 
603 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
604 {
605 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
606 	int ret = -ENODEV;
607 
608 	down_read(&bpf_devs_lock);
609 	if (offmap->netdev)
610 		ret = offmap->dev_ops->map_delete_elem(offmap, key);
611 	up_read(&bpf_devs_lock);
612 
613 	return ret;
614 }
615 
616 int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
617 {
618 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
619 	int ret = -ENODEV;
620 
621 	down_read(&bpf_devs_lock);
622 	if (offmap->netdev)
623 		ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
624 	up_read(&bpf_devs_lock);
625 
626 	return ret;
627 }
628 
629 struct ns_get_path_bpf_map_args {
630 	struct bpf_offloaded_map *offmap;
631 	struct bpf_map_info *info;
632 };
633 
634 static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
635 {
636 	struct ns_get_path_bpf_map_args *args = private_data;
637 	struct ns_common *ns;
638 	struct net *net;
639 
640 	rtnl_lock();
641 	down_read(&bpf_devs_lock);
642 
643 	if (args->offmap->netdev) {
644 		args->info->ifindex = args->offmap->netdev->ifindex;
645 		net = dev_net(args->offmap->netdev);
646 		get_net(net);
647 		ns = &net->ns;
648 	} else {
649 		args->info->ifindex = 0;
650 		ns = NULL;
651 	}
652 
653 	up_read(&bpf_devs_lock);
654 	rtnl_unlock();
655 
656 	return ns;
657 }
658 
659 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
660 {
661 	struct ns_get_path_bpf_map_args args = {
662 		.offmap	= map_to_offmap(map),
663 		.info	= info,
664 	};
665 	struct inode *ns_inode;
666 	struct path ns_path;
667 	int res;
668 
669 	res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
670 	if (res) {
671 		if (!info->ifindex)
672 			return -ENODEV;
673 		return res;
674 	}
675 
676 	ns_inode = ns_path.dentry->d_inode;
677 	info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
678 	info->netns_ino = ns_inode->i_ino;
679 	path_put(&ns_path);
680 
681 	return 0;
682 }
683 
684 static bool __bpf_offload_dev_match(struct bpf_prog *prog,
685 				    struct net_device *netdev)
686 {
687 	struct bpf_offload_netdev *ondev1, *ondev2;
688 	struct bpf_prog_offload *offload;
689 
690 	if (!bpf_prog_is_dev_bound(prog->aux))
691 		return false;
692 
693 	offload = prog->aux->offload;
694 	if (!offload)
695 		return false;
696 	if (offload->netdev == netdev)
697 		return true;
698 
699 	ondev1 = bpf_offload_find_netdev(offload->netdev);
700 	ondev2 = bpf_offload_find_netdev(netdev);
701 
702 	return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
703 }
704 
705 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
706 {
707 	bool ret;
708 
709 	down_read(&bpf_devs_lock);
710 	ret = __bpf_offload_dev_match(prog, netdev);
711 	up_read(&bpf_devs_lock);
712 
713 	return ret;
714 }
715 EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
716 
717 bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
718 {
719 	bool ret;
720 
721 	if (bpf_prog_is_offloaded(lhs->aux) != bpf_prog_is_offloaded(rhs->aux))
722 		return false;
723 
724 	down_read(&bpf_devs_lock);
725 	ret = lhs->aux->offload && rhs->aux->offload &&
726 	      lhs->aux->offload->netdev &&
727 	      lhs->aux->offload->netdev == rhs->aux->offload->netdev;
728 	up_read(&bpf_devs_lock);
729 
730 	return ret;
731 }
732 
733 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
734 {
735 	struct bpf_offloaded_map *offmap;
736 	bool ret;
737 
738 	if (!bpf_map_is_offloaded(map))
739 		return bpf_map_offload_neutral(map);
740 	offmap = map_to_offmap(map);
741 
742 	down_read(&bpf_devs_lock);
743 	ret = __bpf_offload_dev_match(prog, offmap->netdev);
744 	up_read(&bpf_devs_lock);
745 
746 	return ret;
747 }
748 
749 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
750 				    struct net_device *netdev)
751 {
752 	int err;
753 
754 	down_write(&bpf_devs_lock);
755 	err = __bpf_offload_dev_netdev_register(offdev, netdev);
756 	up_write(&bpf_devs_lock);
757 	return err;
758 }
759 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
760 
761 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
762 				       struct net_device *netdev)
763 {
764 	down_write(&bpf_devs_lock);
765 	__bpf_offload_dev_netdev_unregister(offdev, netdev);
766 	up_write(&bpf_devs_lock);
767 }
768 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
769 
770 struct bpf_offload_dev *
771 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
772 {
773 	struct bpf_offload_dev *offdev;
774 
775 	offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
776 	if (!offdev)
777 		return ERR_PTR(-ENOMEM);
778 
779 	offdev->ops = ops;
780 	offdev->priv = priv;
781 	INIT_LIST_HEAD(&offdev->netdevs);
782 
783 	return offdev;
784 }
785 EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
786 
787 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
788 {
789 	WARN_ON(!list_empty(&offdev->netdevs));
790 	kfree(offdev);
791 }
792 EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
793 
794 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev)
795 {
796 	return offdev->priv;
797 }
798 EXPORT_SYMBOL_GPL(bpf_offload_dev_priv);
799 
800 void bpf_dev_bound_netdev_unregister(struct net_device *dev)
801 {
802 	struct bpf_offload_netdev *ondev;
803 
804 	ASSERT_RTNL();
805 
806 	down_write(&bpf_devs_lock);
807 	ondev = bpf_offload_find_netdev(dev);
808 	if (ondev && !ondev->offdev)
809 		__bpf_offload_dev_netdev_unregister(NULL, ondev->netdev);
810 	up_write(&bpf_devs_lock);
811 }
812 
813 int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
814 			      struct bpf_prog_aux *prog_aux)
815 {
816 	if (!bpf_prog_is_dev_bound(prog_aux)) {
817 		bpf_log(log, "metadata kfuncs require device-bound program\n");
818 		return -EINVAL;
819 	}
820 
821 	if (bpf_prog_is_offloaded(prog_aux)) {
822 		bpf_log(log, "metadata kfuncs can't be offloaded\n");
823 		return -EINVAL;
824 	}
825 
826 	return 0;
827 }
828 
829 void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id)
830 {
831 	const struct xdp_metadata_ops *ops;
832 	void *p = NULL;
833 
834 	/* We don't hold bpf_devs_lock while resolving several
835 	 * kfuncs and can race with the unregister_netdevice().
836 	 * We rely on bpf_dev_bound_match() check at attach
837 	 * to render this program unusable.
838 	 */
839 	down_read(&bpf_devs_lock);
840 	if (!prog->aux->offload)
841 		goto out;
842 
843 	ops = prog->aux->offload->netdev->xdp_metadata_ops;
844 	if (!ops)
845 		goto out;
846 
847 	if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_TIMESTAMP))
848 		p = ops->xmo_rx_timestamp;
849 	else if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_HASH))
850 		p = ops->xmo_rx_hash;
851 out:
852 	up_read(&bpf_devs_lock);
853 
854 	return p;
855 }
856 
857 static int __init bpf_offload_init(void)
858 {
859 	return rhashtable_init(&offdevs, &offdevs_params);
860 }
861 
862 late_initcall(bpf_offload_init);
863