xref: /linux/kernel/bpf/devmap.c (revision 0ad53fe3ae82443c74ff8cfd7bd13377cc1134a3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3  */
4 
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6  * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7  * spent some effort to ensure the datapath with redirect maps does not use
8  * any locking. This is a quick note on the details.
9  *
10  * We have three possible paths to get into the devmap control plane bpf
11  * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12  * will invoke an update, delete, or lookup operation. To ensure updates and
13  * deletes appear atomic from the datapath side xchg() is used to modify the
14  * netdev_map array. Then because the datapath does a lookup into the netdev_map
15  * array (read-only) from an RCU critical section we use call_rcu() to wait for
16  * an rcu grace period before free'ing the old data structures. This ensures the
17  * datapath always has a valid copy. However, the datapath does a "flush"
18  * operation that pushes any pending packets in the driver outside the RCU
19  * critical section. Each bpf_dtab_netdev tracks these pending operations using
20  * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed  until
21  * this list is empty, indicating outstanding flush operations have completed.
22  *
23  * BPF syscalls may race with BPF program calls on any of the update, delete
24  * or lookup operations. As noted above the xchg() operation also keep the
25  * netdev_map consistent in this case. From the devmap side BPF programs
26  * calling into these operations are the same as multiple user space threads
27  * making system calls.
28  *
29  * Finally, any of the above may race with a netdev_unregister notifier. The
30  * unregister notifier must search for net devices in the map structure that
31  * contain a reference to the net device and remove them. This is a two step
32  * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33  * check to see if the ifindex is the same as the net_device being removed.
34  * When removing the dev a cmpxchg() is used to ensure the correct dev is
35  * removed, in the case of a concurrent update or delete operation it is
36  * possible that the initially referenced dev is no longer in the map. As the
37  * notifier hook walks the map we know that new dev references can not be
38  * added by the user because core infrastructure ensures dev_get_by_index()
39  * calls will fail at this point.
40  *
41  * The devmap_hash type is a map type which interprets keys as ifindexes and
42  * indexes these using a hashmap. This allows maps that use ifindex as key to be
43  * densely packed instead of having holes in the lookup array for unused
44  * ifindexes. The setup and packet enqueue/send code is shared between the two
45  * types of devmap; only the lookup and insertion is different.
46  */
47 #include <linux/bpf.h>
48 #include <net/xdp.h>
49 #include <linux/filter.h>
50 #include <trace/events/xdp.h>
51 
52 #define DEV_CREATE_FLAG_MASK \
53 	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
54 
55 struct xdp_dev_bulk_queue {
56 	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
57 	struct list_head flush_node;
58 	struct net_device *dev;
59 	struct net_device *dev_rx;
60 	struct bpf_prog *xdp_prog;
61 	unsigned int count;
62 };
63 
64 struct bpf_dtab_netdev {
65 	struct net_device *dev; /* must be first member, due to tracepoint */
66 	struct hlist_node index_hlist;
67 	struct bpf_dtab *dtab;
68 	struct bpf_prog *xdp_prog;
69 	struct rcu_head rcu;
70 	unsigned int idx;
71 	struct bpf_devmap_val val;
72 };
73 
74 struct bpf_dtab {
75 	struct bpf_map map;
76 	struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
77 	struct list_head list;
78 
79 	/* these are only used for DEVMAP_HASH type maps */
80 	struct hlist_head *dev_index_head;
81 	spinlock_t index_lock;
82 	unsigned int items;
83 	u32 n_buckets;
84 };
85 
86 static DEFINE_PER_CPU(struct list_head, dev_flush_list);
87 static DEFINE_SPINLOCK(dev_map_lock);
88 static LIST_HEAD(dev_map_list);
89 
90 static struct hlist_head *dev_map_create_hash(unsigned int entries,
91 					      int numa_node)
92 {
93 	int i;
94 	struct hlist_head *hash;
95 
96 	hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
97 	if (hash != NULL)
98 		for (i = 0; i < entries; i++)
99 			INIT_HLIST_HEAD(&hash[i]);
100 
101 	return hash;
102 }
103 
104 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
105 						    int idx)
106 {
107 	return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
108 }
109 
110 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
111 {
112 	u32 valsize = attr->value_size;
113 
114 	/* check sanity of attributes. 2 value sizes supported:
115 	 * 4 bytes: ifindex
116 	 * 8 bytes: ifindex + prog fd
117 	 */
118 	if (attr->max_entries == 0 || attr->key_size != 4 ||
119 	    (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
120 	     valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
121 	    attr->map_flags & ~DEV_CREATE_FLAG_MASK)
122 		return -EINVAL;
123 
124 	/* Lookup returns a pointer straight to dev->ifindex, so make sure the
125 	 * verifier prevents writes from the BPF side
126 	 */
127 	attr->map_flags |= BPF_F_RDONLY_PROG;
128 
129 
130 	bpf_map_init_from_attr(&dtab->map, attr);
131 
132 	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
133 		dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
134 
135 		if (!dtab->n_buckets) /* Overflow check */
136 			return -EINVAL;
137 	}
138 
139 	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
140 		dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
141 							   dtab->map.numa_node);
142 		if (!dtab->dev_index_head)
143 			return -ENOMEM;
144 
145 		spin_lock_init(&dtab->index_lock);
146 	} else {
147 		dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
148 						      sizeof(struct bpf_dtab_netdev *),
149 						      dtab->map.numa_node);
150 		if (!dtab->netdev_map)
151 			return -ENOMEM;
152 	}
153 
154 	return 0;
155 }
156 
157 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
158 {
159 	struct bpf_dtab *dtab;
160 	int err;
161 
162 	if (!capable(CAP_NET_ADMIN))
163 		return ERR_PTR(-EPERM);
164 
165 	dtab = kzalloc(sizeof(*dtab), GFP_USER | __GFP_ACCOUNT);
166 	if (!dtab)
167 		return ERR_PTR(-ENOMEM);
168 
169 	err = dev_map_init_map(dtab, attr);
170 	if (err) {
171 		kfree(dtab);
172 		return ERR_PTR(err);
173 	}
174 
175 	spin_lock(&dev_map_lock);
176 	list_add_tail_rcu(&dtab->list, &dev_map_list);
177 	spin_unlock(&dev_map_lock);
178 
179 	return &dtab->map;
180 }
181 
182 static void dev_map_free(struct bpf_map *map)
183 {
184 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
185 	int i;
186 
187 	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
188 	 * so the programs (can be more than one that used this map) were
189 	 * disconnected from events. The following synchronize_rcu() guarantees
190 	 * both rcu read critical sections complete and waits for
191 	 * preempt-disable regions (NAPI being the relevant context here) so we
192 	 * are certain there will be no further reads against the netdev_map and
193 	 * all flush operations are complete. Flush operations can only be done
194 	 * from NAPI context for this reason.
195 	 */
196 
197 	spin_lock(&dev_map_lock);
198 	list_del_rcu(&dtab->list);
199 	spin_unlock(&dev_map_lock);
200 
201 	bpf_clear_redirect_map(map);
202 	synchronize_rcu();
203 
204 	/* Make sure prior __dev_map_entry_free() have completed. */
205 	rcu_barrier();
206 
207 	if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
208 		for (i = 0; i < dtab->n_buckets; i++) {
209 			struct bpf_dtab_netdev *dev;
210 			struct hlist_head *head;
211 			struct hlist_node *next;
212 
213 			head = dev_map_index_hash(dtab, i);
214 
215 			hlist_for_each_entry_safe(dev, next, head, index_hlist) {
216 				hlist_del_rcu(&dev->index_hlist);
217 				if (dev->xdp_prog)
218 					bpf_prog_put(dev->xdp_prog);
219 				dev_put(dev->dev);
220 				kfree(dev);
221 			}
222 		}
223 
224 		bpf_map_area_free(dtab->dev_index_head);
225 	} else {
226 		for (i = 0; i < dtab->map.max_entries; i++) {
227 			struct bpf_dtab_netdev *dev;
228 
229 			dev = rcu_dereference_raw(dtab->netdev_map[i]);
230 			if (!dev)
231 				continue;
232 
233 			if (dev->xdp_prog)
234 				bpf_prog_put(dev->xdp_prog);
235 			dev_put(dev->dev);
236 			kfree(dev);
237 		}
238 
239 		bpf_map_area_free(dtab->netdev_map);
240 	}
241 
242 	kfree(dtab);
243 }
244 
245 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
246 {
247 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
248 	u32 index = key ? *(u32 *)key : U32_MAX;
249 	u32 *next = next_key;
250 
251 	if (index >= dtab->map.max_entries) {
252 		*next = 0;
253 		return 0;
254 	}
255 
256 	if (index == dtab->map.max_entries - 1)
257 		return -ENOENT;
258 	*next = index + 1;
259 	return 0;
260 }
261 
262 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
263  * by local_bh_disable() (from XDP calls inside NAPI). The
264  * rcu_read_lock_bh_held() below makes lockdep accept both.
265  */
266 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
267 {
268 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
269 	struct hlist_head *head = dev_map_index_hash(dtab, key);
270 	struct bpf_dtab_netdev *dev;
271 
272 	hlist_for_each_entry_rcu(dev, head, index_hlist,
273 				 lockdep_is_held(&dtab->index_lock))
274 		if (dev->idx == key)
275 			return dev;
276 
277 	return NULL;
278 }
279 
280 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
281 				    void *next_key)
282 {
283 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
284 	u32 idx, *next = next_key;
285 	struct bpf_dtab_netdev *dev, *next_dev;
286 	struct hlist_head *head;
287 	int i = 0;
288 
289 	if (!key)
290 		goto find_first;
291 
292 	idx = *(u32 *)key;
293 
294 	dev = __dev_map_hash_lookup_elem(map, idx);
295 	if (!dev)
296 		goto find_first;
297 
298 	next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
299 				    struct bpf_dtab_netdev, index_hlist);
300 
301 	if (next_dev) {
302 		*next = next_dev->idx;
303 		return 0;
304 	}
305 
306 	i = idx & (dtab->n_buckets - 1);
307 	i++;
308 
309  find_first:
310 	for (; i < dtab->n_buckets; i++) {
311 		head = dev_map_index_hash(dtab, i);
312 
313 		next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
314 					    struct bpf_dtab_netdev,
315 					    index_hlist);
316 		if (next_dev) {
317 			*next = next_dev->idx;
318 			return 0;
319 		}
320 	}
321 
322 	return -ENOENT;
323 }
324 
325 static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
326 				struct xdp_frame **frames, int n,
327 				struct net_device *dev)
328 {
329 	struct xdp_txq_info txq = { .dev = dev };
330 	struct xdp_buff xdp;
331 	int i, nframes = 0;
332 
333 	for (i = 0; i < n; i++) {
334 		struct xdp_frame *xdpf = frames[i];
335 		u32 act;
336 		int err;
337 
338 		xdp_convert_frame_to_buff(xdpf, &xdp);
339 		xdp.txq = &txq;
340 
341 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
342 		switch (act) {
343 		case XDP_PASS:
344 			err = xdp_update_frame_from_buff(&xdp, xdpf);
345 			if (unlikely(err < 0))
346 				xdp_return_frame_rx_napi(xdpf);
347 			else
348 				frames[nframes++] = xdpf;
349 			break;
350 		default:
351 			bpf_warn_invalid_xdp_action(act);
352 			fallthrough;
353 		case XDP_ABORTED:
354 			trace_xdp_exception(dev, xdp_prog, act);
355 			fallthrough;
356 		case XDP_DROP:
357 			xdp_return_frame_rx_napi(xdpf);
358 			break;
359 		}
360 	}
361 	return nframes; /* sent frames count */
362 }
363 
364 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
365 {
366 	struct net_device *dev = bq->dev;
367 	unsigned int cnt = bq->count;
368 	int sent = 0, err = 0;
369 	int to_send = cnt;
370 	int i;
371 
372 	if (unlikely(!cnt))
373 		return;
374 
375 	for (i = 0; i < cnt; i++) {
376 		struct xdp_frame *xdpf = bq->q[i];
377 
378 		prefetch(xdpf);
379 	}
380 
381 	if (bq->xdp_prog) {
382 		to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
383 		if (!to_send)
384 			goto out;
385 	}
386 
387 	sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
388 	if (sent < 0) {
389 		/* If ndo_xdp_xmit fails with an errno, no frames have
390 		 * been xmit'ed.
391 		 */
392 		err = sent;
393 		sent = 0;
394 	}
395 
396 	/* If not all frames have been transmitted, it is our
397 	 * responsibility to free them
398 	 */
399 	for (i = sent; unlikely(i < to_send); i++)
400 		xdp_return_frame_rx_napi(bq->q[i]);
401 
402 out:
403 	bq->count = 0;
404 	trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
405 }
406 
407 /* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
408  * driver before returning from its napi->poll() routine. See the comment above
409  * xdp_do_flush() in filter.c.
410  */
411 void __dev_flush(void)
412 {
413 	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
414 	struct xdp_dev_bulk_queue *bq, *tmp;
415 
416 	list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
417 		bq_xmit_all(bq, XDP_XMIT_FLUSH);
418 		bq->dev_rx = NULL;
419 		bq->xdp_prog = NULL;
420 		__list_del_clearprev(&bq->flush_node);
421 	}
422 }
423 
424 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
425  * by local_bh_disable() (from XDP calls inside NAPI). The
426  * rcu_read_lock_bh_held() below makes lockdep accept both.
427  */
428 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
429 {
430 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
431 	struct bpf_dtab_netdev *obj;
432 
433 	if (key >= map->max_entries)
434 		return NULL;
435 
436 	obj = rcu_dereference_check(dtab->netdev_map[key],
437 				    rcu_read_lock_bh_held());
438 	return obj;
439 }
440 
441 /* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
442  * variable access, and map elements stick around. See comment above
443  * xdp_do_flush() in filter.c.
444  */
445 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
446 		       struct net_device *dev_rx, struct bpf_prog *xdp_prog)
447 {
448 	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
449 	struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
450 
451 	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
452 		bq_xmit_all(bq, 0);
453 
454 	/* Ingress dev_rx will be the same for all xdp_frame's in
455 	 * bulk_queue, because bq stored per-CPU and must be flushed
456 	 * from net_device drivers NAPI func end.
457 	 *
458 	 * Do the same with xdp_prog and flush_list since these fields
459 	 * are only ever modified together.
460 	 */
461 	if (!bq->dev_rx) {
462 		bq->dev_rx = dev_rx;
463 		bq->xdp_prog = xdp_prog;
464 		list_add(&bq->flush_node, flush_list);
465 	}
466 
467 	bq->q[bq->count++] = xdpf;
468 }
469 
470 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
471 				struct net_device *dev_rx,
472 				struct bpf_prog *xdp_prog)
473 {
474 	struct xdp_frame *xdpf;
475 	int err;
476 
477 	if (!dev->netdev_ops->ndo_xdp_xmit)
478 		return -EOPNOTSUPP;
479 
480 	err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
481 	if (unlikely(err))
482 		return err;
483 
484 	xdpf = xdp_convert_buff_to_frame(xdp);
485 	if (unlikely(!xdpf))
486 		return -EOVERFLOW;
487 
488 	bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
489 	return 0;
490 }
491 
492 static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev *dst)
493 {
494 	struct xdp_txq_info txq = { .dev = dst->dev };
495 	struct xdp_buff xdp;
496 	u32 act;
497 
498 	if (!dst->xdp_prog)
499 		return XDP_PASS;
500 
501 	__skb_pull(skb, skb->mac_len);
502 	xdp.txq = &txq;
503 
504 	act = bpf_prog_run_generic_xdp(skb, &xdp, dst->xdp_prog);
505 	switch (act) {
506 	case XDP_PASS:
507 		__skb_push(skb, skb->mac_len);
508 		break;
509 	default:
510 		bpf_warn_invalid_xdp_action(act);
511 		fallthrough;
512 	case XDP_ABORTED:
513 		trace_xdp_exception(dst->dev, dst->xdp_prog, act);
514 		fallthrough;
515 	case XDP_DROP:
516 		kfree_skb(skb);
517 		break;
518 	}
519 
520 	return act;
521 }
522 
523 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
524 		    struct net_device *dev_rx)
525 {
526 	return __xdp_enqueue(dev, xdp, dev_rx, NULL);
527 }
528 
529 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
530 		    struct net_device *dev_rx)
531 {
532 	struct net_device *dev = dst->dev;
533 
534 	return __xdp_enqueue(dev, xdp, dev_rx, dst->xdp_prog);
535 }
536 
537 static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_buff *xdp)
538 {
539 	if (!obj ||
540 	    !obj->dev->netdev_ops->ndo_xdp_xmit)
541 		return false;
542 
543 	if (xdp_ok_fwd_dev(obj->dev, xdp->data_end - xdp->data))
544 		return false;
545 
546 	return true;
547 }
548 
549 static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
550 				 struct net_device *dev_rx,
551 				 struct xdp_frame *xdpf)
552 {
553 	struct xdp_frame *nxdpf;
554 
555 	nxdpf = xdpf_clone(xdpf);
556 	if (!nxdpf)
557 		return -ENOMEM;
558 
559 	bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
560 
561 	return 0;
562 }
563 
564 static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex)
565 {
566 	while (num_excluded--) {
567 		if (ifindex == excluded[num_excluded])
568 			return true;
569 	}
570 	return false;
571 }
572 
573 /* Get ifindex of each upper device. 'indexes' must be able to hold at
574  * least MAX_NEST_DEV elements.
575  * Returns the number of ifindexes added.
576  */
577 static int get_upper_ifindexes(struct net_device *dev, int *indexes)
578 {
579 	struct net_device *upper;
580 	struct list_head *iter;
581 	int n = 0;
582 
583 	netdev_for_each_upper_dev_rcu(dev, upper, iter) {
584 		indexes[n++] = upper->ifindex;
585 	}
586 	return n;
587 }
588 
589 int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
590 			  struct bpf_map *map, bool exclude_ingress)
591 {
592 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
593 	struct bpf_dtab_netdev *dst, *last_dst = NULL;
594 	int excluded_devices[1+MAX_NEST_DEV];
595 	struct hlist_head *head;
596 	struct xdp_frame *xdpf;
597 	int num_excluded = 0;
598 	unsigned int i;
599 	int err;
600 
601 	if (exclude_ingress) {
602 		num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
603 		excluded_devices[num_excluded++] = dev_rx->ifindex;
604 	}
605 
606 	xdpf = xdp_convert_buff_to_frame(xdp);
607 	if (unlikely(!xdpf))
608 		return -EOVERFLOW;
609 
610 	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
611 		for (i = 0; i < map->max_entries; i++) {
612 			dst = rcu_dereference_check(dtab->netdev_map[i],
613 						    rcu_read_lock_bh_held());
614 			if (!is_valid_dst(dst, xdp))
615 				continue;
616 
617 			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
618 				continue;
619 
620 			/* we only need n-1 clones; last_dst enqueued below */
621 			if (!last_dst) {
622 				last_dst = dst;
623 				continue;
624 			}
625 
626 			err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
627 			if (err)
628 				return err;
629 
630 			last_dst = dst;
631 		}
632 	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
633 		for (i = 0; i < dtab->n_buckets; i++) {
634 			head = dev_map_index_hash(dtab, i);
635 			hlist_for_each_entry_rcu(dst, head, index_hlist,
636 						 lockdep_is_held(&dtab->index_lock)) {
637 				if (!is_valid_dst(dst, xdp))
638 					continue;
639 
640 				if (is_ifindex_excluded(excluded_devices, num_excluded,
641 							dst->dev->ifindex))
642 					continue;
643 
644 				/* we only need n-1 clones; last_dst enqueued below */
645 				if (!last_dst) {
646 					last_dst = dst;
647 					continue;
648 				}
649 
650 				err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
651 				if (err)
652 					return err;
653 
654 				last_dst = dst;
655 			}
656 		}
657 	}
658 
659 	/* consume the last copy of the frame */
660 	if (last_dst)
661 		bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
662 	else
663 		xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
664 
665 	return 0;
666 }
667 
668 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
669 			     struct bpf_prog *xdp_prog)
670 {
671 	int err;
672 
673 	err = xdp_ok_fwd_dev(dst->dev, skb->len);
674 	if (unlikely(err))
675 		return err;
676 
677 	/* Redirect has already succeeded semantically at this point, so we just
678 	 * return 0 even if packet is dropped. Helper below takes care of
679 	 * freeing skb.
680 	 */
681 	if (dev_map_bpf_prog_run_skb(skb, dst) != XDP_PASS)
682 		return 0;
683 
684 	skb->dev = dst->dev;
685 	generic_xdp_tx(skb, xdp_prog);
686 
687 	return 0;
688 }
689 
690 static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
691 				  struct sk_buff *skb,
692 				  struct bpf_prog *xdp_prog)
693 {
694 	struct sk_buff *nskb;
695 	int err;
696 
697 	nskb = skb_clone(skb, GFP_ATOMIC);
698 	if (!nskb)
699 		return -ENOMEM;
700 
701 	err = dev_map_generic_redirect(dst, nskb, xdp_prog);
702 	if (unlikely(err)) {
703 		consume_skb(nskb);
704 		return err;
705 	}
706 
707 	return 0;
708 }
709 
710 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
711 			   struct bpf_prog *xdp_prog, struct bpf_map *map,
712 			   bool exclude_ingress)
713 {
714 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
715 	struct bpf_dtab_netdev *dst, *last_dst = NULL;
716 	int excluded_devices[1+MAX_NEST_DEV];
717 	struct hlist_head *head;
718 	struct hlist_node *next;
719 	int num_excluded = 0;
720 	unsigned int i;
721 	int err;
722 
723 	if (exclude_ingress) {
724 		num_excluded = get_upper_ifindexes(dev, excluded_devices);
725 		excluded_devices[num_excluded++] = dev->ifindex;
726 	}
727 
728 	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
729 		for (i = 0; i < map->max_entries; i++) {
730 			dst = rcu_dereference_check(dtab->netdev_map[i],
731 						    rcu_read_lock_bh_held());
732 			if (!dst)
733 				continue;
734 
735 			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
736 				continue;
737 
738 			/* we only need n-1 clones; last_dst enqueued below */
739 			if (!last_dst) {
740 				last_dst = dst;
741 				continue;
742 			}
743 
744 			err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
745 			if (err)
746 				return err;
747 
748 			last_dst = dst;
749 
750 		}
751 	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
752 		for (i = 0; i < dtab->n_buckets; i++) {
753 			head = dev_map_index_hash(dtab, i);
754 			hlist_for_each_entry_safe(dst, next, head, index_hlist) {
755 				if (!dst)
756 					continue;
757 
758 				if (is_ifindex_excluded(excluded_devices, num_excluded,
759 							dst->dev->ifindex))
760 					continue;
761 
762 				/* we only need n-1 clones; last_dst enqueued below */
763 				if (!last_dst) {
764 					last_dst = dst;
765 					continue;
766 				}
767 
768 				err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
769 				if (err)
770 					return err;
771 
772 				last_dst = dst;
773 			}
774 		}
775 	}
776 
777 	/* consume the first skb and return */
778 	if (last_dst)
779 		return dev_map_generic_redirect(last_dst, skb, xdp_prog);
780 
781 	/* dtab is empty */
782 	consume_skb(skb);
783 	return 0;
784 }
785 
786 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
787 {
788 	struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
789 
790 	return obj ? &obj->val : NULL;
791 }
792 
793 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
794 {
795 	struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
796 								*(u32 *)key);
797 	return obj ? &obj->val : NULL;
798 }
799 
800 static void __dev_map_entry_free(struct rcu_head *rcu)
801 {
802 	struct bpf_dtab_netdev *dev;
803 
804 	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
805 	if (dev->xdp_prog)
806 		bpf_prog_put(dev->xdp_prog);
807 	dev_put(dev->dev);
808 	kfree(dev);
809 }
810 
811 static int dev_map_delete_elem(struct bpf_map *map, void *key)
812 {
813 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
814 	struct bpf_dtab_netdev *old_dev;
815 	int k = *(u32 *)key;
816 
817 	if (k >= map->max_entries)
818 		return -EINVAL;
819 
820 	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
821 	if (old_dev)
822 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
823 	return 0;
824 }
825 
826 static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
827 {
828 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
829 	struct bpf_dtab_netdev *old_dev;
830 	int k = *(u32 *)key;
831 	unsigned long flags;
832 	int ret = -ENOENT;
833 
834 	spin_lock_irqsave(&dtab->index_lock, flags);
835 
836 	old_dev = __dev_map_hash_lookup_elem(map, k);
837 	if (old_dev) {
838 		dtab->items--;
839 		hlist_del_init_rcu(&old_dev->index_hlist);
840 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
841 		ret = 0;
842 	}
843 	spin_unlock_irqrestore(&dtab->index_lock, flags);
844 
845 	return ret;
846 }
847 
848 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
849 						    struct bpf_dtab *dtab,
850 						    struct bpf_devmap_val *val,
851 						    unsigned int idx)
852 {
853 	struct bpf_prog *prog = NULL;
854 	struct bpf_dtab_netdev *dev;
855 
856 	dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
857 				   GFP_ATOMIC | __GFP_NOWARN,
858 				   dtab->map.numa_node);
859 	if (!dev)
860 		return ERR_PTR(-ENOMEM);
861 
862 	dev->dev = dev_get_by_index(net, val->ifindex);
863 	if (!dev->dev)
864 		goto err_out;
865 
866 	if (val->bpf_prog.fd > 0) {
867 		prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
868 					     BPF_PROG_TYPE_XDP, false);
869 		if (IS_ERR(prog))
870 			goto err_put_dev;
871 		if (prog->expected_attach_type != BPF_XDP_DEVMAP)
872 			goto err_put_prog;
873 	}
874 
875 	dev->idx = idx;
876 	dev->dtab = dtab;
877 	if (prog) {
878 		dev->xdp_prog = prog;
879 		dev->val.bpf_prog.id = prog->aux->id;
880 	} else {
881 		dev->xdp_prog = NULL;
882 		dev->val.bpf_prog.id = 0;
883 	}
884 	dev->val.ifindex = val->ifindex;
885 
886 	return dev;
887 err_put_prog:
888 	bpf_prog_put(prog);
889 err_put_dev:
890 	dev_put(dev->dev);
891 err_out:
892 	kfree(dev);
893 	return ERR_PTR(-EINVAL);
894 }
895 
896 static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
897 				 void *key, void *value, u64 map_flags)
898 {
899 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
900 	struct bpf_dtab_netdev *dev, *old_dev;
901 	struct bpf_devmap_val val = {};
902 	u32 i = *(u32 *)key;
903 
904 	if (unlikely(map_flags > BPF_EXIST))
905 		return -EINVAL;
906 	if (unlikely(i >= dtab->map.max_entries))
907 		return -E2BIG;
908 	if (unlikely(map_flags == BPF_NOEXIST))
909 		return -EEXIST;
910 
911 	/* already verified value_size <= sizeof val */
912 	memcpy(&val, value, map->value_size);
913 
914 	if (!val.ifindex) {
915 		dev = NULL;
916 		/* can not specify fd if ifindex is 0 */
917 		if (val.bpf_prog.fd > 0)
918 			return -EINVAL;
919 	} else {
920 		dev = __dev_map_alloc_node(net, dtab, &val, i);
921 		if (IS_ERR(dev))
922 			return PTR_ERR(dev);
923 	}
924 
925 	/* Use call_rcu() here to ensure rcu critical sections have completed
926 	 * Remembering the driver side flush operation will happen before the
927 	 * net device is removed.
928 	 */
929 	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
930 	if (old_dev)
931 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
932 
933 	return 0;
934 }
935 
936 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
937 			       u64 map_flags)
938 {
939 	return __dev_map_update_elem(current->nsproxy->net_ns,
940 				     map, key, value, map_flags);
941 }
942 
943 static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
944 				     void *key, void *value, u64 map_flags)
945 {
946 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
947 	struct bpf_dtab_netdev *dev, *old_dev;
948 	struct bpf_devmap_val val = {};
949 	u32 idx = *(u32 *)key;
950 	unsigned long flags;
951 	int err = -EEXIST;
952 
953 	/* already verified value_size <= sizeof val */
954 	memcpy(&val, value, map->value_size);
955 
956 	if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
957 		return -EINVAL;
958 
959 	spin_lock_irqsave(&dtab->index_lock, flags);
960 
961 	old_dev = __dev_map_hash_lookup_elem(map, idx);
962 	if (old_dev && (map_flags & BPF_NOEXIST))
963 		goto out_err;
964 
965 	dev = __dev_map_alloc_node(net, dtab, &val, idx);
966 	if (IS_ERR(dev)) {
967 		err = PTR_ERR(dev);
968 		goto out_err;
969 	}
970 
971 	if (old_dev) {
972 		hlist_del_rcu(&old_dev->index_hlist);
973 	} else {
974 		if (dtab->items >= dtab->map.max_entries) {
975 			spin_unlock_irqrestore(&dtab->index_lock, flags);
976 			call_rcu(&dev->rcu, __dev_map_entry_free);
977 			return -E2BIG;
978 		}
979 		dtab->items++;
980 	}
981 
982 	hlist_add_head_rcu(&dev->index_hlist,
983 			   dev_map_index_hash(dtab, idx));
984 	spin_unlock_irqrestore(&dtab->index_lock, flags);
985 
986 	if (old_dev)
987 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
988 
989 	return 0;
990 
991 out_err:
992 	spin_unlock_irqrestore(&dtab->index_lock, flags);
993 	return err;
994 }
995 
996 static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
997 				   u64 map_flags)
998 {
999 	return __dev_map_hash_update_elem(current->nsproxy->net_ns,
1000 					 map, key, value, map_flags);
1001 }
1002 
1003 static int dev_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
1004 {
1005 	return __bpf_xdp_redirect_map(map, ifindex, flags,
1006 				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1007 				      __dev_map_lookup_elem);
1008 }
1009 
1010 static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
1011 {
1012 	return __bpf_xdp_redirect_map(map, ifindex, flags,
1013 				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1014 				      __dev_map_hash_lookup_elem);
1015 }
1016 
1017 static int dev_map_btf_id;
1018 const struct bpf_map_ops dev_map_ops = {
1019 	.map_meta_equal = bpf_map_meta_equal,
1020 	.map_alloc = dev_map_alloc,
1021 	.map_free = dev_map_free,
1022 	.map_get_next_key = dev_map_get_next_key,
1023 	.map_lookup_elem = dev_map_lookup_elem,
1024 	.map_update_elem = dev_map_update_elem,
1025 	.map_delete_elem = dev_map_delete_elem,
1026 	.map_check_btf = map_check_no_btf,
1027 	.map_btf_name = "bpf_dtab",
1028 	.map_btf_id = &dev_map_btf_id,
1029 	.map_redirect = dev_map_redirect,
1030 };
1031 
1032 static int dev_map_hash_map_btf_id;
1033 const struct bpf_map_ops dev_map_hash_ops = {
1034 	.map_meta_equal = bpf_map_meta_equal,
1035 	.map_alloc = dev_map_alloc,
1036 	.map_free = dev_map_free,
1037 	.map_get_next_key = dev_map_hash_get_next_key,
1038 	.map_lookup_elem = dev_map_hash_lookup_elem,
1039 	.map_update_elem = dev_map_hash_update_elem,
1040 	.map_delete_elem = dev_map_hash_delete_elem,
1041 	.map_check_btf = map_check_no_btf,
1042 	.map_btf_name = "bpf_dtab",
1043 	.map_btf_id = &dev_map_hash_map_btf_id,
1044 	.map_redirect = dev_hash_map_redirect,
1045 };
1046 
1047 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
1048 				       struct net_device *netdev)
1049 {
1050 	unsigned long flags;
1051 	u32 i;
1052 
1053 	spin_lock_irqsave(&dtab->index_lock, flags);
1054 	for (i = 0; i < dtab->n_buckets; i++) {
1055 		struct bpf_dtab_netdev *dev;
1056 		struct hlist_head *head;
1057 		struct hlist_node *next;
1058 
1059 		head = dev_map_index_hash(dtab, i);
1060 
1061 		hlist_for_each_entry_safe(dev, next, head, index_hlist) {
1062 			if (netdev != dev->dev)
1063 				continue;
1064 
1065 			dtab->items--;
1066 			hlist_del_rcu(&dev->index_hlist);
1067 			call_rcu(&dev->rcu, __dev_map_entry_free);
1068 		}
1069 	}
1070 	spin_unlock_irqrestore(&dtab->index_lock, flags);
1071 }
1072 
1073 static int dev_map_notification(struct notifier_block *notifier,
1074 				ulong event, void *ptr)
1075 {
1076 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1077 	struct bpf_dtab *dtab;
1078 	int i, cpu;
1079 
1080 	switch (event) {
1081 	case NETDEV_REGISTER:
1082 		if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
1083 			break;
1084 
1085 		/* will be freed in free_netdev() */
1086 		netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
1087 		if (!netdev->xdp_bulkq)
1088 			return NOTIFY_BAD;
1089 
1090 		for_each_possible_cpu(cpu)
1091 			per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
1092 		break;
1093 	case NETDEV_UNREGISTER:
1094 		/* This rcu_read_lock/unlock pair is needed because
1095 		 * dev_map_list is an RCU list AND to ensure a delete
1096 		 * operation does not free a netdev_map entry while we
1097 		 * are comparing it against the netdev being unregistered.
1098 		 */
1099 		rcu_read_lock();
1100 		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
1101 			if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
1102 				dev_map_hash_remove_netdev(dtab, netdev);
1103 				continue;
1104 			}
1105 
1106 			for (i = 0; i < dtab->map.max_entries; i++) {
1107 				struct bpf_dtab_netdev *dev, *odev;
1108 
1109 				dev = rcu_dereference(dtab->netdev_map[i]);
1110 				if (!dev || netdev != dev->dev)
1111 					continue;
1112 				odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
1113 				if (dev == odev)
1114 					call_rcu(&dev->rcu,
1115 						 __dev_map_entry_free);
1116 			}
1117 		}
1118 		rcu_read_unlock();
1119 		break;
1120 	default:
1121 		break;
1122 	}
1123 	return NOTIFY_OK;
1124 }
1125 
1126 static struct notifier_block dev_map_notifier = {
1127 	.notifier_call = dev_map_notification,
1128 };
1129 
1130 static int __init dev_map_init(void)
1131 {
1132 	int cpu;
1133 
1134 	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
1135 	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
1136 		     offsetof(struct _bpf_dtab_netdev, dev));
1137 	register_netdevice_notifier(&dev_map_notifier);
1138 
1139 	for_each_possible_cpu(cpu)
1140 		INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
1141 	return 0;
1142 }
1143 
1144 subsys_initcall(dev_map_init);
1145